diff --git a/Documentation/devicetree/bindings/bus/bcma.txt b/Documentation/devicetree/bindings/bus/bcma.txt new file mode 100644 index 0000000000000000000000000000000000000000..62a48348ac1561935416f7d0bd4e4c5357b60429 --- /dev/null +++ b/Documentation/devicetree/bindings/bus/bcma.txt @@ -0,0 +1,32 @@ +Driver for ARM AXI Bus with Broadcom Plugins (bcma) + +Required properties: + +- compatible : brcm,bus-axi + +- reg : iomem address range of chipcommon core + +The cores on the AXI bus are automatically detected by bcma with the +memory ranges they are using and they get registered afterwards. + +The top-level axi bus may contain children representing attached cores +(devices). This is needed since some hardware details can't be auto +detected (e.g. IRQ numbers). Also some of the cores may be responsible +for extra things, e.g. ChipCommon providing access to the GPIO chip. + +Example: + + axi@18000000 { + compatible = "brcm,bus-axi"; + reg = <0x18000000 0x1000>; + ranges = <0x00000000 0x18000000 0x00100000>; + #address-cells = <1>; + #size-cells = <1>; + + chipcommon { + reg = <0x00000000 0x1000>; + + gpio-controller; + #gpio-cells = <2>; + }; + }; diff --git a/Documentation/devicetree/bindings/net/broadcom-mdio-unimac.txt b/Documentation/devicetree/bindings/net/broadcom-mdio-unimac.txt new file mode 100644 index 0000000000000000000000000000000000000000..ab0bb4247d14950959044cc138e71e7c736f85b9 --- /dev/null +++ b/Documentation/devicetree/bindings/net/broadcom-mdio-unimac.txt @@ -0,0 +1,39 @@ +* Broadcom UniMAC MDIO bus controller + +Required properties: +- compatible: should one from "brcm,genet-mdio-v1", "brcm,genet-mdio-v2", + "brcm,genet-mdio-v3", "brcm,genet-mdio-v4" or "brcm,unimac-mdio" +- reg: address and length of the regsiter set for the device, first one is the + base register, and the second one is optional and for indirect accesses to + larger than 16-bits MDIO transactions +- reg-names: name(s) of the register must be "mdio" and optional "mdio_indir_rw" +- #size-cells: must be 1 +- #address-cells: must be 0 + +Optional properties: +- interrupts: must be one if the interrupt is shared with the Ethernet MAC or + Ethernet switch this MDIO block is integrated from, or must be two, if there + are two separate interrupts, first one must be "mdio done" and second must be + for "mdio error" +- interrupt-names: must be "mdio_done_error" when there is a share interrupt fed + to this hardware block, or must be "mdio_done" for the first interrupt and + "mdio_error" for the second when there are separate interrupts + +Child nodes of this MDIO bus controller node are standard Ethernet PHY device +nodes as described in Documentation/devicetree/bindings/net/phy.txt + +Example: + +mdio@403c0 { + compatible = "brcm,unimac-mdio"; + reg = <0x403c0 0x8 0x40300 0x18>; + reg-names = "mdio", "mdio_indir_rw"; + #size-cells = <1>; + #address-cells = <0>; + + ... + phy@0 { + compatible = "ethernet-phy-ieee802.3-c22"; + reg = <0>; + }; +}; diff --git a/Documentation/devicetree/bindings/net/broadcom-sf2.txt b/Documentation/devicetree/bindings/net/broadcom-sf2.txt new file mode 100644 index 0000000000000000000000000000000000000000..30d487597ecba545942eb78cecdf6c0d97f7c077 --- /dev/null +++ b/Documentation/devicetree/bindings/net/broadcom-sf2.txt @@ -0,0 +1,78 @@ +* Broadcom Starfighter 2 integrated swich + +Required properties: + +- compatible: should be "brcm,bcm7445-switch-v4.0" +- reg: addresses and length of the register sets for the device, must be 6 + pairs of register addresses and lengths +- interrupts: interrupts for the devices, must be two interrupts +- dsa,mii-bus: phandle to the MDIO bus controller, see dsa/dsa.txt +- dsa,ethernet: phandle to the CPU network interface controller, see dsa/dsa.txt +- #size-cells: must be 0 +- #address-cells: must be 2, see dsa/dsa.txt + +Subnodes: + +The integrated switch subnode should be specified according to the binding +described in dsa/dsa.txt. + +Optional properties: + +- reg-names: litteral names for the device base register addresses, when present + must be: "core", "reg", "intrl2_0", "intrl2_1", "fcb", "acb" + +- interrupt-names: litternal names for the device interrupt lines, when present + must be: "switch_0" and "switch_1" + +- brcm,num-gphy: specify the maximum number of integrated gigabit PHYs in the + switch + +- brcm,num-rgmii-ports: specify the maximum number of RGMII interfaces supported + by the switch + +- brcm,fcb-pause-override: boolean property, if present indicates that the switch + supports Failover Control Block pause override capability + +- brcm,acb-packets-inflight: boolean property, if present indicates that the switch + Admission Control Block supports reporting the number of packets in-flight in a + switch queue + +Example: + +switch_top@f0b00000 { + compatible = "simple-bus"; + #size-cells = <1>; + #address-cells = <1>; + ranges = <0 0xf0b00000 0x40804>; + + ethernet_switch@0 { + compatible = "brcm,bcm7445-switch-v4.0"; + #size-cells = <0>; + #address-cells = <2>; + reg = <0x0 0x40000 + 0x40000 0x110 + 0x40340 0x30 + 0x40380 0x30 + 0x40400 0x34 + 0x40600 0x208>; + interrupts = <0 0x18 0 + 0 0x19 0>; + brcm,num-gphy = <1>; + brcm,num-rgmii-ports = <2>; + brcm,fcb-pause-override; + brcm,acb-packets-inflight; + + ... + switch@0 { + reg = <0 0>; + #size-cells = <0>; + #address-cells <1>; + + port@0 { + label = "gphy"; + reg = <0>; + }; + ... + }; + }; +}; diff --git a/Documentation/devicetree/bindings/net/can/m_can.txt b/Documentation/devicetree/bindings/net/can/m_can.txt new file mode 100644 index 0000000000000000000000000000000000000000..9e331777c2036ae6e2a9c685add8f32633aa77e7 --- /dev/null +++ b/Documentation/devicetree/bindings/net/can/m_can.txt @@ -0,0 +1,67 @@ +Bosch MCAN controller Device Tree Bindings +------------------------------------------------- + +Required properties: +- compatible : Should be "bosch,m_can" for M_CAN controllers +- reg : physical base address and size of the M_CAN + registers map and Message RAM +- reg-names : Should be "m_can" and "message_ram" +- interrupts : Should be the interrupt number of M_CAN interrupt + line 0 and line 1, could be same if sharing + the same interrupt. +- interrupt-names : Should contain "int0" and "int1" +- clocks : Clocks used by controller, should be host clock + and CAN clock. +- clock-names : Should contain "hclk" and "cclk" +- pinctrl- : Pinctrl states as described in bindings/pinctrl/pinctrl-bindings.txt +- pinctrl-names : Names corresponding to the numbered pinctrl states +- bosch,mram-cfg : Message RAM configuration data. + Multiple M_CAN instances can share the same Message + RAM and each element(e.g Rx FIFO or Tx Buffer and etc) + number in Message RAM is also configurable, + so this property is telling driver how the shared or + private Message RAM are used by this M_CAN controller. + + The format should be as follows: + + The 'offset' is an address offset of the Message RAM + where the following elements start from. This is + usually set to 0x0 if you're using a private Message + RAM. The remain cells are used to specify how many + elements are used for each FIFO/Buffer. + + M_CAN includes the following elements according to user manual: + 11-bit Filter 0-128 elements / 0-128 words + 29-bit Filter 0-64 elements / 0-128 words + Rx FIFO 0 0-64 elements / 0-1152 words + Rx FIFO 1 0-64 elements / 0-1152 words + Rx Buffers 0-64 elements / 0-1152 words + Tx Event FIFO 0-32 elements / 0-64 words + Tx Buffers 0-32 elements / 0-576 words + + Please refer to 2.4.1 Message RAM Configuration in + Bosch M_CAN user manual for details. + +Example: +SoC dtsi: +m_can1: can@020e8000 { + compatible = "bosch,m_can"; + reg = <0x020e8000 0x4000>, <0x02298000 0x4000>; + reg-names = "m_can", "message_ram"; + interrupts = <0 114 0x04>, + <0 114 0x04>; + interrupt-names = "int0", "int1"; + clocks = <&clks IMX6SX_CLK_CANFD>, + <&clks IMX6SX_CLK_CANFD>; + clock-names = "hclk", "cclk"; + bosch,mram-cfg = <0x0 0 0 32 0 0 0 1>; + status = "disabled"; +}; + +Board dts: +&m_can1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_m_can1>; + status = "enabled"; +}; diff --git a/Documentation/devicetree/bindings/net/can/rcar_can.txt b/Documentation/devicetree/bindings/net/can/rcar_can.txt new file mode 100644 index 0000000000000000000000000000000000000000..002d8440bf66f6a00054895b75c47e724cbcd5b9 --- /dev/null +++ b/Documentation/devicetree/bindings/net/can/rcar_can.txt @@ -0,0 +1,43 @@ +Renesas R-Car CAN controller Device Tree Bindings +------------------------------------------------- + +Required properties: +- compatible: "renesas,can-r8a7778" if CAN controller is a part of R8A7778 SoC. + "renesas,can-r8a7779" if CAN controller is a part of R8A7779 SoC. + "renesas,can-r8a7790" if CAN controller is a part of R8A7790 SoC. + "renesas,can-r8a7791" if CAN controller is a part of R8A7791 SoC. +- reg: physical base address and size of the R-Car CAN register map. +- interrupts: interrupt specifier for the sole interrupt. +- clocks: phandles and clock specifiers for 3 CAN clock inputs. +- clock-names: 3 clock input name strings: "clkp1", "clkp2", "can_clk". +- pinctrl-0: pin control group to be used for this controller. +- pinctrl-names: must be "default". + +Optional properties: +- renesas,can-clock-select: R-Car CAN Clock Source Select. Valid values are: + <0x0> (default) : Peripheral clock (clkp1) + <0x1> : Peripheral clock (clkp2) + <0x3> : Externally input clock + +Example +------- + +SoC common .dtsi file: + + can0: can@e6e80000 { + compatible = "renesas,can-r8a7791"; + reg = <0 0xe6e80000 0 0x1000>; + interrupts = <0 186 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&mstp9_clks R8A7791_CLK_RCAN0>, + <&cpg_clocks R8A7791_CLK_RCAN>, <&can_clk>; + clock-names = "clkp1", "clkp2", "can_clk"; + status = "disabled"; + }; + +Board specific .dts file: + +&can0 { + pinctrl-0 = <&can0_pins>; + pinctrl-names = "default"; + status = "okay"; +}; diff --git a/Documentation/devicetree/bindings/net/cpsw.txt b/Documentation/devicetree/bindings/net/cpsw.txt index ae2b8b7f9c38f0bb3d233588b1b2a4cc01f6c900..33fe8462edf4996eb81dace6618798be59d8c65a 100644 --- a/Documentation/devicetree/bindings/net/cpsw.txt +++ b/Documentation/devicetree/bindings/net/cpsw.txt @@ -24,15 +24,17 @@ Optional properties: - ti,hwmods : Must be "cpgmac0" - no_bd_ram : Must be 0 or 1 - dual_emac : Specifies Switch to act as Dual EMAC +- syscon : Phandle to the system control device node, which is + the control module device of the am33x Slave Properties: Required properties: - phy_id : Specifies slave phy id - phy-mode : See ethernet.txt file in the same directory -- mac-address : See ethernet.txt file in the same directory Optional properties: - dual_emac_res_vlan : Specifies VID to be used to segregate the ports +- mac-address : See ethernet.txt file in the same directory Note: "ti,hwmods" field is used to fetch the base address and irq resources from TI, omap hwmod data base during device registration. @@ -57,6 +59,7 @@ Examples: active_slave = <0>; cpts_clock_mult = <0x80000000>; cpts_clock_shift = <29>; + syscon = <&cm>; cpsw_emac0: slave@0 { phy_id = <&davinci_mdio>, <0>; phy-mode = "rgmii-txid"; @@ -85,6 +88,7 @@ Examples: active_slave = <0>; cpts_clock_mult = <0x80000000>; cpts_clock_shift = <29>; + syscon = <&cm>; cpsw_emac0: slave@0 { phy_id = <&davinci_mdio>, <0>; phy-mode = "rgmii-txid"; diff --git a/Documentation/devicetree/bindings/net/dsa/dsa.txt b/Documentation/devicetree/bindings/net/dsa/dsa.txt index 49f4f7ae3f5145921a6def4250cc39fabb4fdafa..a62c889aafcaf070774bc3e9bc93c96ba9ea1992 100644 --- a/Documentation/devicetree/bindings/net/dsa/dsa.txt +++ b/Documentation/devicetree/bindings/net/dsa/dsa.txt @@ -39,6 +39,22 @@ Optionnal property: This property is only used when switches are being chained/cascaded together. +- phy-handle : Phandle to a PHY on an external MDIO bus, not the + switch internal one. See + Documentation/devicetree/bindings/net/ethernet.txt + for details. + +- phy-mode : String representing the connection to the designated + PHY node specified by the 'phy-handle' property. See + Documentation/devicetree/bindings/net/ethernet.txt + for details. + +Optional subnodes: +- fixed-link : Fixed-link subnode describing a link to a non-MDIO + managed entity. See + Documentation/devicetree/bindings/net/fixed-link.txt + for details. + Example: dsa@0 { @@ -58,6 +74,7 @@ Example: port@0 { reg = <0>; label = "lan1"; + phy-handle = <&phy0>; }; port@1 { diff --git a/Documentation/devicetree/bindings/net/emac_rockchip.txt b/Documentation/devicetree/bindings/net/emac_rockchip.txt new file mode 100644 index 0000000000000000000000000000000000000000..8dc1c79fef7fcad6b46470a05b716e1a761b4445 --- /dev/null +++ b/Documentation/devicetree/bindings/net/emac_rockchip.txt @@ -0,0 +1,50 @@ +* ARC EMAC 10/100 Ethernet platform driver for Rockchip Rk3066/RK3188 SoCs + +Required properties: +- compatible: Should be "rockchip,rk3066-emac" or "rockchip,rk3188-emac" + according to the target SoC. +- reg: Address and length of the register set for the device +- interrupts: Should contain the EMAC interrupts +- rockchip,grf: phandle to the syscon grf used to control speed and mode + for emac. +- phy: see ethernet.txt file in the same directory. +- phy-mode: see ethernet.txt file in the same directory. + +Optional properties: +- phy-supply: phandle to a regulator if the PHY needs one + +Clock handling: +- clocks: Must contain an entry for each entry in clock-names. +- clock-names: Shall be "hclk" for the host clock needed to calculate and set + polling period of EMAC and "macref" for the reference clock needed to transfer + data to and from the phy. + +Child nodes of the driver are the individual PHY devices connected to the +MDIO bus. They must have a "reg" property given the PHY address on the MDIO bus. + +Examples: + +ethernet@10204000 { + compatible = "rockchip,rk3188-emac"; + reg = <0xc0fc2000 0x3c>; + interrupts = <6>; + mac-address = [ 00 11 22 33 44 55 ]; + + clocks = <&cru HCLK_EMAC>, <&cru SCLK_MAC>; + clock-names = "hclk", "macref"; + + pinctrl-names = "default"; + pinctrl-0 = <&emac_xfer>, <&emac_mdio>, <&phy_int>; + + rockchip,grf = <&grf>; + + phy = <&phy0>; + phy-mode = "rmii"; + phy-supply = <&vcc_rmii>; + + #address-cells = <1>; + #size-cells = <0>; + phy0: ethernet-phy@0 { + reg = <1>; + }; +}; diff --git a/Documentation/devicetree/bindings/net/fsl-fec.txt b/Documentation/devicetree/bindings/net/fsl-fec.txt index 8a2c7b55ec165b69ce1b6084fea32466f68ff483..0c8775c457983994dad116415f973093946e97c3 100644 --- a/Documentation/devicetree/bindings/net/fsl-fec.txt +++ b/Documentation/devicetree/bindings/net/fsl-fec.txt @@ -16,6 +16,12 @@ Optional properties: - phy-handle : phandle to the PHY device connected to this device. - fixed-link : Assume a fixed link. See fixed-link.txt in the same directory. Use instead of phy-handle. +- fsl,num-tx-queues : The property is valid for enet-avb IP, which supports + hw multi queues. Should specify the tx queue number, otherwise set tx queue + number to 1. +- fsl,num-rx-queues : The property is valid for enet-avb IP, which supports + hw multi queues. Should specify the rx queue number, otherwise set rx queue + number to 1. Optional subnodes: - mdio : specifies the mdio bus in the FEC, used as a container for phy nodes diff --git a/Documentation/devicetree/bindings/net/marvell-pxa168.txt b/Documentation/devicetree/bindings/net/marvell-pxa168.txt new file mode 100644 index 0000000000000000000000000000000000000000..845a148a346e9265e92f93f966a2e091b39ed058 --- /dev/null +++ b/Documentation/devicetree/bindings/net/marvell-pxa168.txt @@ -0,0 +1,36 @@ +* Marvell PXA168 Ethernet Controller + +Required properties: +- compatible: should be "marvell,pxa168-eth". +- reg: address and length of the register set for the device. +- interrupts: interrupt for the device. +- clocks: pointer to the clock for the device. + +Optional properties: +- port-id: Ethernet port number. Should be '0','1' or '2'. +- #address-cells: must be 1 when using sub-nodes. +- #size-cells: must be 0 when using sub-nodes. +- phy-handle: see ethernet.txt file in the same directory. +- local-mac-address: see ethernet.txt file in the same directory. + +Sub-nodes: +Each PHY can be represented as a sub-node. This is not mandatory. + +Sub-nodes required properties: +- reg: the MDIO address of the PHY. + +Example: + + eth0: ethernet@f7b90000 { + compatible = "marvell,pxa168-eth"; + reg = <0xf7b90000 0x10000>; + clocks = <&chip CLKID_GETH0>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + phy-handle = <ðphy0>; + + ethphy0: ethernet-phy@0 { + reg = <0>; + }; + }; diff --git a/Documentation/devicetree/bindings/net/meson-dwmac.txt b/Documentation/devicetree/bindings/net/meson-dwmac.txt new file mode 100644 index 0000000000000000000000000000000000000000..ec633d74a8a8f1c865cc1b87235434ae728a7d06 --- /dev/null +++ b/Documentation/devicetree/bindings/net/meson-dwmac.txt @@ -0,0 +1,25 @@ +* Amlogic Meson DWMAC Ethernet controller + +The device inherits all the properties of the dwmac/stmmac devices +described in the file net/stmmac.txt with the following changes. + +Required properties: + +- compatible: should be "amlogic,meson6-dwmac" along with "snps,dwmac" + and any applicable more detailed version number + described in net/stmmac.txt + +- reg: should contain a register range for the dwmac controller and + another one for the Amlogic specific configuration + +Example: + + ethmac: ethernet@c9410000 { + compatible = "amlogic,meson6-dwmac", "snps,dwmac"; + reg = <0xc9410000 0x10000 + 0xc1108108 0x4>; + interrupts = <0 8 1>; + interrupt-names = "macirq"; + clocks = <&clk81>; + clock-names = "stmmaceth"; + } diff --git a/Documentation/devicetree/bindings/net/nfc/st21nfcb.txt b/Documentation/devicetree/bindings/net/nfc/st21nfcb.txt index 3b58ae4803445da5c1066159dc67577bec9f6cb4..9005608cbbd1e7fe1683b15ce8fcc6bbd7b43586 100644 --- a/Documentation/devicetree/bindings/net/nfc/st21nfcb.txt +++ b/Documentation/devicetree/bindings/net/nfc/st21nfcb.txt @@ -26,7 +26,7 @@ Example (for ARM-based BeagleBoard xM with ST21NFCB on I2C2): clock-frequency = <400000>; interrupt-parent = <&gpio5>; - interrupts = <2 IRQ_TYPE_LEVEL_LOW>; + interrupts = <2 IRQ_TYPE_LEVEL_HIGH>; reset-gpios = <&gpio5 29 GPIO_ACTIVE_HIGH>; }; diff --git a/Documentation/devicetree/bindings/net/nfc/trf7970a.txt b/Documentation/devicetree/bindings/net/nfc/trf7970a.txt index 1e436133685f91f470ba7f63e302503422bd818e..7c89ca290ced74c05600d59fa6abe7abaf7f8f58 100644 --- a/Documentation/devicetree/bindings/net/nfc/trf7970a.txt +++ b/Documentation/devicetree/bindings/net/nfc/trf7970a.txt @@ -13,6 +13,11 @@ Optional SoC Specific Properties: - pinctrl-names: Contains only one value - "default". - pintctrl-0: Specifies the pin control groups used for this controller. - autosuspend-delay: Specify autosuspend delay in milliseconds. +- vin-voltage-override: Specify voltage of VIN pin in microvolts. +- irq-status-read-quirk: Specify that the trf7970a being used has the + "IRQ Status Read" erratum. +- en2-rf-quirk: Specify that the trf7970a being used has the "EN2 RF" + erratum. Example (for ARM-based BeagleBone with TRF7970A on SPI1): @@ -30,7 +35,10 @@ Example (for ARM-based BeagleBone with TRF7970A on SPI1): ti,enable-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>, <&gpio2 5 GPIO_ACTIVE_LOW>; vin-supply = <&ldo3_reg>; + vin-voltage-override = <5000000>; autosuspend-delay = <30000>; + irq-status-read-quirk; + en2-rf-quirk; status = "okay"; }; }; diff --git a/Documentation/devicetree/bindings/net/qca-qca7000-spi.txt b/Documentation/devicetree/bindings/net/qca-qca7000-spi.txt new file mode 100644 index 0000000000000000000000000000000000000000..c74989c0d8ac96d6561a7dde306840a8f4595547 --- /dev/null +++ b/Documentation/devicetree/bindings/net/qca-qca7000-spi.txt @@ -0,0 +1,47 @@ +* Qualcomm QCA7000 (Ethernet over SPI protocol) + +Note: The QCA7000 is useable as a SPI device. In this case it must be defined +as a child of a SPI master in the device tree. + +Required properties: +- compatible : Should be "qca,qca7000" +- reg : Should specify the SPI chip select +- interrupts : The first cell should specify the index of the source interrupt + and the second cell should specify the trigger type as rising edge +- spi-cpha : Must be set +- spi-cpol: Must be set + +Optional properties: +- interrupt-parent : Specify the pHandle of the source interrupt +- spi-max-frequency : Maximum frequency of the SPI bus the chip can operate at. + Numbers smaller than 1000000 or greater than 16000000 are invalid. Missing + the property will set the SPI frequency to 8000000 Hertz. +- local-mac-address: 6 bytes, MAC address +- qca,legacy-mode : Set the SPI data transfer of the QCA7000 to legacy mode. + In this mode the SPI master must toggle the chip select between each data + word. In burst mode these gaps aren't necessary, which is faster. + This setting depends on how the QCA7000 is setup via GPIO pin strapping. + If the property is missing the driver defaults to burst mode. + +Example: + +/* Freescale i.MX28 SPI master*/ +ssp2: spi@80014000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,imx28-spi"; + pinctrl-names = "default"; + pinctrl-0 = <&spi2_pins_a>; + status = "okay"; + + qca7000: ethernet@0 { + compatible = "qca,qca7000"; + reg = <0x0>; + interrupt-parent = <&gpio3>; /* GPIO Bank 3 */ + interrupts = <25 0x1>; /* Index: 25, rising edge */ + spi-cpha; /* SPI mode: CPHA=1 */ + spi-cpol; /* SPI mode: CPOL=1 */ + spi-max-frequency = <8000000>; /* freq: 8 MHz */ + local-mac-address = [ A0 B0 C0 D0 E0 F0 ]; + }; +}; diff --git a/Documentation/devicetree/bindings/net/socfpga-dwmac.txt b/Documentation/devicetree/bindings/net/socfpga-dwmac.txt index 2a60cd3e8d5ddb7bdf3b2caad2bc414a3d8566e0..3a9d6795160654080ec4aaa114304b62d5f0754f 100644 --- a/Documentation/devicetree/bindings/net/socfpga-dwmac.txt +++ b/Documentation/devicetree/bindings/net/socfpga-dwmac.txt @@ -12,6 +12,10 @@ Required properties: - altr,sysmgr-syscon : Should be the phandle to the system manager node that encompasses the glue register, the register offset, and the register shift. +Optional properties: +altr,emac-splitter: Should be the phandle to the emac splitter soft IP node if + DWMAC controller is connected emac splitter. + Example: gmac0: ethernet@ff700000 { diff --git a/Documentation/networking/dctcp.txt b/Documentation/networking/dctcp.txt new file mode 100644 index 0000000000000000000000000000000000000000..0d5dfbc89ec9e37e27b53a610594dd112d3c6e37 --- /dev/null +++ b/Documentation/networking/dctcp.txt @@ -0,0 +1,43 @@ +DCTCP (DataCenter TCP) +---------------------- + +DCTCP is an enhancement to the TCP congestion control algorithm for data +center networks and leverages Explicit Congestion Notification (ECN) in +the data center network to provide multi-bit feedback to the end hosts. + +To enable it on end hosts: + + sysctl -w net.ipv4.tcp_congestion_control=dctcp + +All switches in the data center network running DCTCP must support ECN +marking and be configured for marking when reaching defined switch buffer +thresholds. The default ECN marking threshold heuristic for DCTCP on +switches is 20 packets (30KB) at 1Gbps, and 65 packets (~100KB) at 10Gbps, +but might need further careful tweaking. + +For more details, see below documents: + +Paper: + +The algorithm is further described in detail in the following two +SIGCOMM/SIGMETRICS papers: + + i) Mohammad Alizadeh, Albert Greenberg, David A. Maltz, Jitendra Padhye, + Parveen Patel, Balaji Prabhakar, Sudipta Sengupta, and Murari Sridharan: + "Data Center TCP (DCTCP)", Data Center Networks session + Proc. ACM SIGCOMM, New Delhi, 2010. + http://simula.stanford.edu/~alizade/Site/DCTCP_files/dctcp-final.pdf + http://www.sigcomm.org/ccr/papers/2010/October/1851275.1851192 + +ii) Mohammad Alizadeh, Adel Javanmard, and Balaji Prabhakar: + "Analysis of DCTCP: Stability, Convergence, and Fairness" + Proc. ACM SIGMETRICS, San Jose, 2011. + http://simula.stanford.edu/~alizade/Site/DCTCP_files/dctcp_analysis-full.pdf + +IETF informational draft: + + http://tools.ietf.org/html/draft-bensley-tcpm-dctcp-00 + +DCTCP site: + + http://simula.stanford.edu/~alizade/Site/DCTCP.html diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt index f4db0972ea38c9ab537d7e93c3f28c7e70bacf30..b1935f9ce081237634e67cfd8ea39012e7ccd6a3 100644 --- a/Documentation/networking/filter.txt +++ b/Documentation/networking/filter.txt @@ -951,7 +951,7 @@ Size modifier is one of ... Mode modifier is one of: - BPF_IMM 0x00 /* classic BPF only, reserved in eBPF */ + BPF_IMM 0x00 /* used for 32-bit mov in classic BPF and 64-bit in eBPF */ BPF_ABS 0x20 BPF_IND 0x40 BPF_MEM 0x60 @@ -995,6 +995,275 @@ BPF_XADD | BPF_DW | BPF_STX: lock xadd *(u64 *)(dst_reg + off16) += src_reg Where size is one of: BPF_B or BPF_H or BPF_W or BPF_DW. Note that 1 and 2 byte atomic increments are not supported. +eBPF has one 16-byte instruction: BPF_LD | BPF_DW | BPF_IMM which consists +of two consecutive 'struct bpf_insn' 8-byte blocks and interpreted as single +instruction that loads 64-bit immediate value into a dst_reg. +Classic BPF has similar instruction: BPF_LD | BPF_W | BPF_IMM which loads +32-bit immediate value into a register. + +eBPF verifier +------------- +The safety of the eBPF program is determined in two steps. + +First step does DAG check to disallow loops and other CFG validation. +In particular it will detect programs that have unreachable instructions. +(though classic BPF checker allows them) + +Second step starts from the first insn and descends all possible paths. +It simulates execution of every insn and observes the state change of +registers and stack. + +At the start of the program the register R1 contains a pointer to context +and has type PTR_TO_CTX. +If verifier sees an insn that does R2=R1, then R2 has now type +PTR_TO_CTX as well and can be used on the right hand side of expression. +If R1=PTR_TO_CTX and insn is R2=R1+R1, then R2=UNKNOWN_VALUE, +since addition of two valid pointers makes invalid pointer. +(In 'secure' mode verifier will reject any type of pointer arithmetic to make +sure that kernel addresses don't leak to unprivileged users) + +If register was never written to, it's not readable: + bpf_mov R0 = R2 + bpf_exit +will be rejected, since R2 is unreadable at the start of the program. + +After kernel function call, R1-R5 are reset to unreadable and +R0 has a return type of the function. + +Since R6-R9 are callee saved, their state is preserved across the call. + bpf_mov R6 = 1 + bpf_call foo + bpf_mov R0 = R6 + bpf_exit +is a correct program. If there was R1 instead of R6, it would have +been rejected. + +load/store instructions are allowed only with registers of valid types, which +are PTR_TO_CTX, PTR_TO_MAP, FRAME_PTR. They are bounds and alignment checked. +For example: + bpf_mov R1 = 1 + bpf_mov R2 = 2 + bpf_xadd *(u32 *)(R1 + 3) += R2 + bpf_exit +will be rejected, since R1 doesn't have a valid pointer type at the time of +execution of instruction bpf_xadd. + +At the start R1 type is PTR_TO_CTX (a pointer to generic 'struct bpf_context') +A callback is used to customize verifier to restrict eBPF program access to only +certain fields within ctx structure with specified size and alignment. + +For example, the following insn: + bpf_ld R0 = *(u32 *)(R6 + 8) +intends to load a word from address R6 + 8 and store it into R0 +If R6=PTR_TO_CTX, via is_valid_access() callback the verifier will know +that offset 8 of size 4 bytes can be accessed for reading, otherwise +the verifier will reject the program. +If R6=FRAME_PTR, then access should be aligned and be within +stack bounds, which are [-MAX_BPF_STACK, 0). In this example offset is 8, +so it will fail verification, since it's out of bounds. + +The verifier will allow eBPF program to read data from stack only after +it wrote into it. +Classic BPF verifier does similar check with M[0-15] memory slots. +For example: + bpf_ld R0 = *(u32 *)(R10 - 4) + bpf_exit +is invalid program. +Though R10 is correct read-only register and has type FRAME_PTR +and R10 - 4 is within stack bounds, there were no stores into that location. + +Pointer register spill/fill is tracked as well, since four (R6-R9) +callee saved registers may not be enough for some programs. + +Allowed function calls are customized with bpf_verifier_ops->get_func_proto() +The eBPF verifier will check that registers match argument constraints. +After the call register R0 will be set to return type of the function. + +Function calls is a main mechanism to extend functionality of eBPF programs. +Socket filters may let programs to call one set of functions, whereas tracing +filters may allow completely different set. + +If a function made accessible to eBPF program, it needs to be thought through +from safety point of view. The verifier will guarantee that the function is +called with valid arguments. + +seccomp vs socket filters have different security restrictions for classic BPF. +Seccomp solves this by two stage verifier: classic BPF verifier is followed +by seccomp verifier. In case of eBPF one configurable verifier is shared for +all use cases. + +See details of eBPF verifier in kernel/bpf/verifier.c + +eBPF maps +--------- +'maps' is a generic storage of different types for sharing data between kernel +and userspace. + +The maps are accessed from user space via BPF syscall, which has commands: +- create a map with given type and attributes + map_fd = bpf(BPF_MAP_CREATE, union bpf_attr *attr, u32 size) + using attr->map_type, attr->key_size, attr->value_size, attr->max_entries + returns process-local file descriptor or negative error + +- lookup key in a given map + err = bpf(BPF_MAP_LOOKUP_ELEM, union bpf_attr *attr, u32 size) + using attr->map_fd, attr->key, attr->value + returns zero and stores found elem into value or negative error + +- create or update key/value pair in a given map + err = bpf(BPF_MAP_UPDATE_ELEM, union bpf_attr *attr, u32 size) + using attr->map_fd, attr->key, attr->value + returns zero or negative error + +- find and delete element by key in a given map + err = bpf(BPF_MAP_DELETE_ELEM, union bpf_attr *attr, u32 size) + using attr->map_fd, attr->key + +- to delete map: close(fd) + Exiting process will delete maps automatically + +userspace programs use this syscall to create/access maps that eBPF programs +are concurrently updating. + +maps can have different types: hash, array, bloom filter, radix-tree, etc. + +The map is defined by: + . type + . max number of elements + . key size in bytes + . value size in bytes + +Understanding eBPF verifier messages +------------------------------------ + +The following are few examples of invalid eBPF programs and verifier error +messages as seen in the log: + +Program with unreachable instructions: +static struct bpf_insn prog[] = { + BPF_EXIT_INSN(), + BPF_EXIT_INSN(), +}; +Error: + unreachable insn 1 + +Program that reads uninitialized register: + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_EXIT_INSN(), +Error: + 0: (bf) r0 = r2 + R2 !read_ok + +Program that doesn't initialize R0 before exiting: + BPF_MOV64_REG(BPF_REG_2, BPF_REG_1), + BPF_EXIT_INSN(), +Error: + 0: (bf) r2 = r1 + 1: (95) exit + R0 !read_ok + +Program that accesses stack out of bounds: + BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0), + BPF_EXIT_INSN(), +Error: + 0: (7a) *(u64 *)(r10 +8) = 0 + invalid stack off=8 size=8 + +Program that doesn't initialize stack before passing its address into function: + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_EXIT_INSN(), +Error: + 0: (bf) r2 = r10 + 1: (07) r2 += -8 + 2: (b7) r1 = 0x0 + 3: (85) call 1 + invalid indirect read from stack off -8+0 size 8 + +Program that uses invalid map_fd=0 while calling to map_lookup_elem() function: + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_EXIT_INSN(), +Error: + 0: (7a) *(u64 *)(r10 -8) = 0 + 1: (bf) r2 = r10 + 2: (07) r2 += -8 + 3: (b7) r1 = 0x0 + 4: (85) call 1 + fd 0 is not pointing to valid bpf_map + +Program that doesn't check return value of map_lookup_elem() before accessing +map element: + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), + BPF_EXIT_INSN(), +Error: + 0: (7a) *(u64 *)(r10 -8) = 0 + 1: (bf) r2 = r10 + 2: (07) r2 += -8 + 3: (b7) r1 = 0x0 + 4: (85) call 1 + 5: (7a) *(u64 *)(r0 +0) = 0 + R0 invalid mem access 'map_value_or_null' + +Program that correctly checks map_lookup_elem() returned value for NULL, but +accesses the memory with incorrect alignment: + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0), + BPF_EXIT_INSN(), +Error: + 0: (7a) *(u64 *)(r10 -8) = 0 + 1: (bf) r2 = r10 + 2: (07) r2 += -8 + 3: (b7) r1 = 1 + 4: (85) call 1 + 5: (15) if r0 == 0x0 goto pc+1 + R0=map_ptr R10=fp + 6: (7a) *(u64 *)(r0 +4) = 0 + misaligned access off 4 size 8 + +Program that correctly checks map_lookup_elem() returned value for NULL and +accesses memory with correct alignment in one side of 'if' branch, but fails +to do so in the other side of 'if' branch: + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), + BPF_EXIT_INSN(), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), +Error: + 0: (7a) *(u64 *)(r10 -8) = 0 + 1: (bf) r2 = r10 + 2: (07) r2 += -8 + 3: (b7) r1 = 1 + 4: (85) call 1 + 5: (15) if r0 == 0x0 goto pc+2 + R0=map_ptr R10=fp + 6: (7a) *(u64 *)(r0 +0) = 0 + 7: (95) exit + + from 5 to 8: R0=imm0 R10=fp + 8: (7a) *(u64 *)(r0 +0) = 1 + R0 invalid mem access 'imm' + Testing ------- diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index caedb18d4564c9185861a9c14b6a6f0ea0568deb..0307e2875f2159cb669b741f9d6a949618c3a055 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -65,6 +65,12 @@ neigh/default/gc_thresh1 - INTEGER purge entries if there are fewer than this number. Default: 128 +neigh/default/gc_thresh2 - INTEGER + Threshold when garbage collector becomes more aggressive about + purging entries. Entries older than 5 seconds will be cleared + when over this number. + Default: 512 + neigh/default/gc_thresh3 - INTEGER Maximum number of neighbor entries allowed. Increase this when using large numbers of interfaces and when communicating @@ -757,8 +763,21 @@ icmp_ratelimit - INTEGER icmp_ratemask (see below) to specific targets. 0 to disable any limiting, otherwise the minimal space between responses in milliseconds. + Note that another sysctl, icmp_msgs_per_sec limits the number + of ICMP packets sent on all targets. + Default: 1000 + +icmp_msgs_per_sec - INTEGER + Limit maximal number of ICMP packets sent per second from this host. + Only messages whose type matches icmp_ratemask (see below) are + controlled by this limit. Default: 1000 +icmp_msgs_burst - INTEGER + icmp_msgs_per_sec controls number of ICMP packets sent per second, + while icmp_msgs_burst controls the burst size of these packets. + Default: 50 + icmp_ratemask - INTEGER Mask made of ICMP types for which rates are being limited. Significant bits: IHGFEDCBA9876543210 @@ -832,6 +851,11 @@ igmp_max_memberships - INTEGER conf/all/* is special, changes the settings for all interfaces +igmp_qrv - INTEGER + Controls the IGMP query robustness variable (see RFC2236 8.1). + Default: 2 (as specified by RFC2236 8.1) + Minimum: 1 (as specified by RFC6636 4.5) + log_martians - BOOLEAN Log packets with impossible addresses to kernel log. log_martians for the interface will be enabled if at least one of @@ -935,14 +959,9 @@ accept_source_route - BOOLEAN FALSE (host) accept_local - BOOLEAN - Accept packets with local source addresses. In combination - with suitable routing, this can be used to direct packets - between two local interfaces over the wire and have them - accepted properly. - - rp_filter must be set to a non-zero value in order for - accept_local to have an effect. - + Accept packets with local source addresses. In combination with + suitable routing, this can be used to direct packets between two + local interfaces over the wire and have them accepted properly. default FALSE route_localnet - BOOLEAN @@ -1140,6 +1159,11 @@ anycast_src_echo_reply - BOOLEAN FALSE: disabled Default: FALSE +mld_qrv - INTEGER + Controls the MLD query robustness variable (see RFC3810 9.1). + Default: 2 (as specified by RFC3810 9.1) + Minimum: 1 (as specified by RFC6636 4.5) + IPv6 Fragmentation: ip6frag_high_thresh - INTEGER diff --git a/Documentation/networking/pktgen.txt b/Documentation/networking/pktgen.txt index 0dffc6e3790215b90b3208e50cd8316222289bd4..6915c6b2786972b8afde6b91c1695df0cf993c08 100644 --- a/Documentation/networking/pktgen.txt +++ b/Documentation/networking/pktgen.txt @@ -99,6 +99,9 @@ Examples: pgset "clone_skb 1" sets the number of copies of the same packet pgset "clone_skb 0" use single SKB for all transmits + pgset "burst 8" uses xmit_more API to queue 8 copies of the same + packet and update HW tx queue tail pointer once. + "burst 1" is the default pgset "pkt_size 9014" sets packet size to 9014 pgset "frags 5" packet will consist of 5 fragments pgset "count 200000" sets number of packets to send, set to zero diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt index 897f942b976bd61597d5dd49ceb17cf24a0bc70d..412f45ca2d73e3cd31a487e5fe13b73fc552d9f5 100644 --- a/Documentation/networking/timestamping.txt +++ b/Documentation/networking/timestamping.txt @@ -1,102 +1,307 @@ -The existing interfaces for getting network packages time stamped are: + +1. Control Interfaces + +The interfaces for receiving network packages timestamps are: * SO_TIMESTAMP - Generate time stamp for each incoming packet using the (not necessarily - monotonous!) system time. Result is returned via recv_msg() in a - control message as timeval (usec resolution). + Generates a timestamp for each incoming packet in (not necessarily + monotonic) system time. Reports the timestamp via recvmsg() in a + control message as struct timeval (usec resolution). * SO_TIMESTAMPNS - Same time stamping mechanism as SO_TIMESTAMP, but returns result as - timespec (nsec resolution). + Same timestamping mechanism as SO_TIMESTAMP, but reports the + timestamp as struct timespec (nsec resolution). * IP_MULTICAST_LOOP + SO_TIMESTAMP[NS] - Only for multicasts: approximate send time stamp by receiving the looped - packet and using its receive time stamp. + Only for multicast:approximate transmit timestamp obtained by + reading the looped packet receive timestamp. -The following interface complements the existing ones: receive time -stamps can be generated and returned for arbitrary packets and much -closer to the point where the packet is really sent. Time stamps can -be generated in software (as before) or in hardware (if the hardware -has such a feature). +* SO_TIMESTAMPING + Generates timestamps on reception, transmission or both. Supports + multiple timestamp sources, including hardware. Supports generating + timestamps for stream sockets. -SO_TIMESTAMPING: -Instructs the socket layer which kind of information should be collected -and/or reported. The parameter is an integer with some of the following -bits set. Setting other bits is an error and doesn't change the current -state. +1.1 SO_TIMESTAMP: -Four of the bits are requests to the stack to try to generate -timestamps. Any combination of them is valid. +This socket option enables timestamping of datagrams on the reception +path. Because the destination socket, if any, is not known early in +the network stack, the feature has to be enabled for all packets. The +same is true for all early receive timestamp options. -SOF_TIMESTAMPING_TX_HARDWARE: try to obtain send time stamps in hardware -SOF_TIMESTAMPING_TX_SOFTWARE: try to obtain send time stamps in software -SOF_TIMESTAMPING_RX_HARDWARE: try to obtain receive time stamps in hardware -SOF_TIMESTAMPING_RX_SOFTWARE: try to obtain receive time stamps in software +For interface details, see `man 7 socket`. + + +1.2 SO_TIMESTAMPNS: + +This option is identical to SO_TIMESTAMP except for the returned data type. +Its struct timespec allows for higher resolution (ns) timestamps than the +timeval of SO_TIMESTAMP (ms). + + +1.3 SO_TIMESTAMPING: + +Supports multiple types of timestamp requests. As a result, this +socket option takes a bitmap of flags, not a boolean. In + + err = setsockopt(fd, SOL_SOCKET, SO_TIMESTAMPING, (void *) val, &val); + +val is an integer with any of the following bits set. Setting other +bit returns EINVAL and does not change the current state. -The other three bits control which timestamps will be reported in a -generated control message. If none of these bits are set or if none of -the set bits correspond to data that is available, then the control -message will not be generated: -SOF_TIMESTAMPING_SOFTWARE: report systime if available -SOF_TIMESTAMPING_SYS_HARDWARE: report hwtimetrans if available (deprecated) -SOF_TIMESTAMPING_RAW_HARDWARE: report hwtimeraw if available +1.3.1 Timestamp Generation -It is worth noting that timestamps may be collected for reasons other -than being requested by a particular socket with -SOF_TIMESTAMPING_[TR]X_(HARD|SOFT)WARE. For example, most drivers that -can generate hardware receive timestamps ignore -SOF_TIMESTAMPING_RX_HARDWARE. It is still a good idea to set that flag -in case future drivers pay attention. +Some bits are requests to the stack to try to generate timestamps. Any +combination of them is valid. Changes to these bits apply to newly +created packets, not to packets already in the stack. As a result, it +is possible to selectively request timestamps for a subset of packets +(e.g., for sampling) by embedding an send() call within two setsockopt +calls, one to enable timestamp generation and one to disable it. +Timestamps may also be generated for reasons other than being +requested by a particular socket, such as when receive timestamping is +enabled system wide, as explained earlier. -If timestamps are reported, they will appear in a control message with -cmsg_level==SOL_SOCKET, cmsg_type==SO_TIMESTAMPING, and a payload like -this: +SOF_TIMESTAMPING_RX_HARDWARE: + Request rx timestamps generated by the network adapter. + +SOF_TIMESTAMPING_RX_SOFTWARE: + Request rx timestamps when data enters the kernel. These timestamps + are generated just after a device driver hands a packet to the + kernel receive stack. + +SOF_TIMESTAMPING_TX_HARDWARE: + Request tx timestamps generated by the network adapter. + +SOF_TIMESTAMPING_TX_SOFTWARE: + Request tx timestamps when data leaves the kernel. These timestamps + are generated in the device driver as close as possible, but always + prior to, passing the packet to the network interface. Hence, they + require driver support and may not be available for all devices. + +SOF_TIMESTAMPING_TX_SCHED: + Request tx timestamps prior to entering the packet scheduler. Kernel + transmit latency is, if long, often dominated by queuing delay. The + difference between this timestamp and one taken at + SOF_TIMESTAMPING_TX_SOFTWARE will expose this latency independent + of protocol processing. The latency incurred in protocol + processing, if any, can be computed by subtracting a userspace + timestamp taken immediately before send() from this timestamp. On + machines with virtual devices where a transmitted packet travels + through multiple devices and, hence, multiple packet schedulers, + a timestamp is generated at each layer. This allows for fine + grained measurement of queuing delay. + +SOF_TIMESTAMPING_TX_ACK: + Request tx timestamps when all data in the send buffer has been + acknowledged. This only makes sense for reliable protocols. It is + currently only implemented for TCP. For that protocol, it may + over-report measurement, because the timestamp is generated when all + data up to and including the buffer at send() was acknowledged: the + cumulative acknowledgment. The mechanism ignores SACK and FACK. + + +1.3.2 Timestamp Reporting + +The other three bits control which timestamps will be reported in a +generated control message. Changes to the bits take immediate +effect at the timestamp reporting locations in the stack. Timestamps +are only reported for packets that also have the relevant timestamp +generation request set. + +SOF_TIMESTAMPING_SOFTWARE: + Report any software timestamps when available. + +SOF_TIMESTAMPING_SYS_HARDWARE: + This option is deprecated and ignored. + +SOF_TIMESTAMPING_RAW_HARDWARE: + Report hardware timestamps as generated by + SOF_TIMESTAMPING_TX_HARDWARE when available. + + +1.3.3 Timestamp Options + +The interface supports one option + +SOF_TIMESTAMPING_OPT_ID: + + Generate a unique identifier along with each packet. A process can + have multiple concurrent timestamping requests outstanding. Packets + can be reordered in the transmit path, for instance in the packet + scheduler. In that case timestamps will be queued onto the error + queue out of order from the original send() calls. This option + embeds a counter that is incremented at send() time, to order + timestamps within a flow. + + This option is implemented only for transmit timestamps. There, the + timestamp is always looped along with a struct sock_extended_err. + The option modifies field ee_info to pass an id that is unique + among all possibly concurrently outstanding timestamp requests for + that socket. In practice, it is a monotonically increasing u32 + (that wraps). + + In datagram sockets, the counter increments on each send call. In + stream sockets, it increments with every byte. + + +1.4 Bytestream Timestamps + +The SO_TIMESTAMPING interface supports timestamping of bytes in a +bytestream. Each request is interpreted as a request for when the +entire contents of the buffer has passed a timestamping point. That +is, for streams option SOF_TIMESTAMPING_TX_SOFTWARE will record +when all bytes have reached the device driver, regardless of how +many packets the data has been converted into. + +In general, bytestreams have no natural delimiters and therefore +correlating a timestamp with data is non-trivial. A range of bytes +may be split across segments, any segments may be merged (possibly +coalescing sections of previously segmented buffers associated with +independent send() calls). Segments can be reordered and the same +byte range can coexist in multiple segments for protocols that +implement retransmissions. + +It is essential that all timestamps implement the same semantics, +regardless of these possible transformations, as otherwise they are +incomparable. Handling "rare" corner cases differently from the +simple case (a 1:1 mapping from buffer to skb) is insufficient +because performance debugging often needs to focus on such outliers. + +In practice, timestamps can be correlated with segments of a +bytestream consistently, if both semantics of the timestamp and the +timing of measurement are chosen correctly. This challenge is no +different from deciding on a strategy for IP fragmentation. There, the +definition is that only the first fragment is timestamped. For +bytestreams, we chose that a timestamp is generated only when all +bytes have passed a point. SOF_TIMESTAMPING_TX_ACK as defined is easy to +implement and reason about. An implementation that has to take into +account SACK would be more complex due to possible transmission holes +and out of order arrival. + +On the host, TCP can also break the simple 1:1 mapping from buffer to +skbuff as a result of Nagle, cork, autocork, segmentation and GSO. The +implementation ensures correctness in all cases by tracking the +individual last byte passed to send(), even if it is no longer the +last byte after an skbuff extend or merge operation. It stores the +relevant sequence number in skb_shinfo(skb)->tskey. Because an skbuff +has only one such field, only one timestamp can be generated. + +In rare cases, a timestamp request can be missed if two requests are +collapsed onto the same skb. A process can detect this situation by +enabling SOF_TIMESTAMPING_OPT_ID and comparing the byte offset at +send time with the value returned for each timestamp. It can prevent +the situation by always flushing the TCP stack in between requests, +for instance by enabling TCP_NODELAY and disabling TCP_CORK and +autocork. + +These precautions ensure that the timestamp is generated only when all +bytes have passed a timestamp point, assuming that the network stack +itself does not reorder the segments. The stack indeed tries to avoid +reordering. The one exception is under administrator control: it is +possible to construct a packet scheduler configuration that delays +segments from the same stream differently. Such a setup would be +unusual. + + +2 Data Interfaces + +Timestamps are read using the ancillary data feature of recvmsg(). +See `man 3 cmsg` for details of this interface. The socket manual +page (`man 7 socket`) describes how timestamps generated with +SO_TIMESTAMP and SO_TIMESTAMPNS records can be retrieved. + + +2.1 SCM_TIMESTAMPING records + +These timestamps are returned in a control message with cmsg_level +SOL_SOCKET, cmsg_type SCM_TIMESTAMPING, and payload of type struct scm_timestamping { - struct timespec systime; - struct timespec hwtimetrans; - struct timespec hwtimeraw; + struct timespec ts[3]; }; -recvmsg() can be used to get this control message for regular incoming -packets. For send time stamps the outgoing packet is looped back to -the socket's error queue with the send time stamp(s) attached. It can -be received with recvmsg(flags=MSG_ERRQUEUE). The call returns the -original outgoing packet data including all headers preprended down to -and including the link layer, the scm_timestamping control message and -a sock_extended_err control message with ee_errno==ENOMSG and -ee_origin==SO_EE_ORIGIN_TIMESTAMPING. A socket with such a pending -bounced packet is ready for reading as far as select() is concerned. -If the outgoing packet has to be fragmented, then only the first -fragment is time stamped and returned to the sending socket. - -All three values correspond to the same event in time, but were -generated in different ways. Each of these values may be empty (= all -zero), in which case no such value was available. If the application -is not interested in some of these values, they can be left blank to -avoid the potential overhead of calculating them. - -systime is the value of the system time at that moment. This -corresponds to the value also returned via SO_TIMESTAMP[NS]. If the -time stamp was generated by hardware, then this field is -empty. Otherwise it is filled in if SOF_TIMESTAMPING_SOFTWARE is -set. - -hwtimeraw is the original hardware time stamp. Filled in if -SOF_TIMESTAMPING_RAW_HARDWARE is set. No assumptions about its -relation to system time should be made. - -hwtimetrans is always zero. This field is deprecated. It used to hold -hw timestamps converted to system time. Instead, expose the hardware -clock device on the NIC directly as a HW PTP clock source, to allow -time conversion in userspace and optionally synchronize system time -with a userspace PTP stack such as linuxptp. For the PTP clock API, -see Documentation/ptp/ptp.txt. - - -SIOCSHWTSTAMP, SIOCGHWTSTAMP: +The structure can return up to three timestamps. This is a legacy +feature. Only one field is non-zero at any time. Most timestamps +are passed in ts[0]. Hardware timestamps are passed in ts[2]. + +ts[1] used to hold hardware timestamps converted to system time. +Instead, expose the hardware clock device on the NIC directly as +a HW PTP clock source, to allow time conversion in userspace and +optionally synchronize system time with a userspace PTP stack such +as linuxptp. For the PTP clock API, see Documentation/ptp/ptp.txt. + +2.1.1 Transmit timestamps with MSG_ERRQUEUE + +For transmit timestamps the outgoing packet is looped back to the +socket's error queue with the send timestamp(s) attached. A process +receives the timestamps by calling recvmsg() with flag MSG_ERRQUEUE +set and with a msg_control buffer sufficiently large to receive the +relevant metadata structures. The recvmsg call returns the original +outgoing data packet with two ancillary messages attached. + +A message of cm_level SOL_IP(V6) and cm_type IP(V6)_RECVERR +embeds a struct sock_extended_err. This defines the error type. For +timestamps, the ee_errno field is ENOMSG. The other ancillary message +will have cm_level SOL_SOCKET and cm_type SCM_TIMESTAMPING. This +embeds the struct scm_timestamping. + + +2.1.1.2 Timestamp types + +The semantics of the three struct timespec are defined by field +ee_info in the extended error structure. It contains a value of +type SCM_TSTAMP_* to define the actual timestamp passed in +scm_timestamping. + +The SCM_TSTAMP_* types are 1:1 matches to the SOF_TIMESTAMPING_* +control fields discussed previously, with one exception. For legacy +reasons, SCM_TSTAMP_SND is equal to zero and can be set for both +SOF_TIMESTAMPING_TX_HARDWARE and SOF_TIMESTAMPING_TX_SOFTWARE. It +is the first if ts[2] is non-zero, the second otherwise, in which +case the timestamp is stored in ts[0]. + + +2.1.1.3 Fragmentation + +Fragmentation of outgoing datagrams is rare, but is possible, e.g., by +explicitly disabling PMTU discovery. If an outgoing packet is fragmented, +then only the first fragment is timestamped and returned to the sending +socket. + + +2.1.1.4 Packet Payload + +The calling application is often not interested in receiving the whole +packet payload that it passed to the stack originally: the socket +error queue mechanism is just a method to piggyback the timestamp on. +In this case, the application can choose to read datagrams with a +smaller buffer, possibly even of length 0. The payload is truncated +accordingly. Until the process calls recvmsg() on the error queue, +however, the full packet is queued, taking up budget from SO_RCVBUF. + + +2.1.1.5 Blocking Read + +Reading from the error queue is always a non-blocking operation. To +block waiting on a timestamp, use poll or select. poll() will return +POLLERR in pollfd.revents if any data is ready on the error queue. +There is no need to pass this flag in pollfd.events. This flag is +ignored on request. See also `man 2 poll`. + + +2.1.2 Receive timestamps + +On reception, there is no reason to read from the socket error queue. +The SCM_TIMESTAMPING ancillary data is sent along with the packet data +on a normal recvmsg(). Since this is not a socket error, it is not +accompanied by a message SOL_IP(V6)/IP(V6)_RECVERROR. In this case, +the meaning of the three fields in struct scm_timestamping is +implicitly defined. ts[0] holds a software timestamp if set, ts[1] +is again deprecated and ts[2] holds a hardware timestamp if set. + + +3. Hardware Timestamping configuration: SIOCSHWTSTAMP and SIOCGHWTSTAMP Hardware time stamping must also be initialized for each device driver that is expected to do hardware time stamping. The parameter is defined in @@ -167,8 +372,7 @@ enum { */ }; - -DEVICE IMPLEMENTATION +3.1 Hardware Timestamping Implementation: Device Drivers A driver which supports hardware time stamping must support the SIOCSHWTSTAMP ioctl and update the supplied struct hwtstamp_config with diff --git a/Documentation/networking/timestamping/Makefile b/Documentation/networking/timestamping/Makefile index 52ac67da931516873f3039613ff139c1d7b5d6bd..8c20dfaa4d6ee91341ac782179127274e8abde61 100644 --- a/Documentation/networking/timestamping/Makefile +++ b/Documentation/networking/timestamping/Makefile @@ -1,8 +1,14 @@ +# To compile, from the source root +# +# make headers_install +# make M=documentation + # List of programs to build -hostprogs-y := hwtstamp_config timestamping +hostprogs-y := hwtstamp_config timestamping txtimestamp # Tell kbuild to always build the programs always := $(hostprogs-y) HOSTCFLAGS_timestamping.o += -I$(objtree)/usr/include +HOSTCFLAGS_txtimestamp.o += -I$(objtree)/usr/include HOSTCFLAGS_hwtstamp_config.o += -I$(objtree)/usr/include diff --git a/Documentation/networking/timestamping/txtimestamp.c b/Documentation/networking/timestamping/txtimestamp.c new file mode 100644 index 0000000000000000000000000000000000000000..b32fc2a07734ca28ac60a30913dab738b8111f57 --- /dev/null +++ b/Documentation/networking/timestamping/txtimestamp.c @@ -0,0 +1,469 @@ +/* + * Copyright 2014 Google Inc. + * Author: willemb@google.com (Willem de Bruijn) + * + * Test software tx timestamping, including + * + * - SCHED, SND and ACK timestamps + * - RAW, UDP and TCP + * - IPv4 and IPv6 + * - various packet sizes (to test GSO and TSO) + * + * Consult the command line arguments for help on running + * the various testcases. + * + * This test requires a dummy TCP server. + * A simple `nc6 [-u] -l -p $DESTPORT` will do + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* command line parameters */ +static int cfg_proto = SOCK_STREAM; +static int cfg_ipproto = IPPROTO_TCP; +static int cfg_num_pkts = 4; +static int do_ipv4 = 1; +static int do_ipv6 = 1; +static int cfg_payload_len = 10; +static uint16_t dest_port = 9000; + +static struct sockaddr_in daddr; +static struct sockaddr_in6 daddr6; +static struct timespec ts_prev; + +static void __print_timestamp(const char *name, struct timespec *cur, + uint32_t key, int payload_len) +{ + if (!(cur->tv_sec | cur->tv_nsec)) + return; + + fprintf(stderr, " %s: %lu s %lu us (seq=%u, len=%u)", + name, cur->tv_sec, cur->tv_nsec / 1000, + key, payload_len); + + if ((ts_prev.tv_sec | ts_prev.tv_nsec)) { + int64_t cur_ms, prev_ms; + + cur_ms = (long) cur->tv_sec * 1000 * 1000; + cur_ms += cur->tv_nsec / 1000; + + prev_ms = (long) ts_prev.tv_sec * 1000 * 1000; + prev_ms += ts_prev.tv_nsec / 1000; + + fprintf(stderr, " (%+ld us)", cur_ms - prev_ms); + } + + ts_prev = *cur; + fprintf(stderr, "\n"); +} + +static void print_timestamp_usr(void) +{ + struct timespec ts; + struct timeval tv; /* avoid dependency on -lrt */ + + gettimeofday(&tv, NULL); + ts.tv_sec = tv.tv_sec; + ts.tv_nsec = tv.tv_usec * 1000; + + __print_timestamp(" USR", &ts, 0, 0); +} + +static void print_timestamp(struct scm_timestamping *tss, int tstype, + int tskey, int payload_len) +{ + const char *tsname; + + switch (tstype) { + case SCM_TSTAMP_SCHED: + tsname = " ENQ"; + break; + case SCM_TSTAMP_SND: + tsname = " SND"; + break; + case SCM_TSTAMP_ACK: + tsname = " ACK"; + break; + default: + error(1, 0, "unknown timestamp type: %u", + tstype); + } + __print_timestamp(tsname, &tss->ts[0], tskey, payload_len); +} + +static void __poll(int fd) +{ + struct pollfd pollfd; + int ret; + + memset(&pollfd, 0, sizeof(pollfd)); + pollfd.fd = fd; + ret = poll(&pollfd, 1, 100); + if (ret != 1) + error(1, errno, "poll"); +} + +static void __recv_errmsg_cmsg(struct msghdr *msg, int payload_len) +{ + struct sock_extended_err *serr = NULL; + struct scm_timestamping *tss = NULL; + struct cmsghdr *cm; + + for (cm = CMSG_FIRSTHDR(msg); + cm && cm->cmsg_len; + cm = CMSG_NXTHDR(msg, cm)) { + if (cm->cmsg_level == SOL_SOCKET && + cm->cmsg_type == SCM_TIMESTAMPING) { + tss = (void *) CMSG_DATA(cm); + } else if ((cm->cmsg_level == SOL_IP && + cm->cmsg_type == IP_RECVERR) || + (cm->cmsg_level == SOL_IPV6 && + cm->cmsg_type == IPV6_RECVERR)) { + + serr = (void *) CMSG_DATA(cm); + if (serr->ee_errno != ENOMSG || + serr->ee_origin != SO_EE_ORIGIN_TIMESTAMPING) { + fprintf(stderr, "unknown ip error %d %d\n", + serr->ee_errno, + serr->ee_origin); + serr = NULL; + } + } else + fprintf(stderr, "unknown cmsg %d,%d\n", + cm->cmsg_level, cm->cmsg_type); + } + + if (serr && tss) + print_timestamp(tss, serr->ee_info, serr->ee_data, payload_len); +} + +static int recv_errmsg(int fd) +{ + static char ctrl[1024 /* overprovision*/]; + static struct msghdr msg; + struct iovec entry; + static char *data; + int ret = 0; + + data = malloc(cfg_payload_len); + if (!data) + error(1, 0, "malloc"); + + memset(&msg, 0, sizeof(msg)); + memset(&entry, 0, sizeof(entry)); + memset(ctrl, 0, sizeof(ctrl)); + + entry.iov_base = data; + entry.iov_len = cfg_payload_len; + msg.msg_iov = &entry; + msg.msg_iovlen = 1; + msg.msg_name = NULL; + msg.msg_namelen = 0; + msg.msg_control = ctrl; + msg.msg_controllen = sizeof(ctrl); + + ret = recvmsg(fd, &msg, MSG_ERRQUEUE); + if (ret == -1 && errno != EAGAIN) + error(1, errno, "recvmsg"); + + __recv_errmsg_cmsg(&msg, ret); + + free(data); + return ret == -1; +} + +static void do_test(int family, unsigned int opt) +{ + char *buf; + int fd, i, val, total_len; + + if (family == IPPROTO_IPV6 && cfg_proto != SOCK_STREAM) { + /* due to lack of checksum generation code */ + fprintf(stderr, "test: skipping datagram over IPv6\n"); + return; + } + + total_len = cfg_payload_len; + if (cfg_proto == SOCK_RAW) { + total_len += sizeof(struct udphdr); + if (cfg_ipproto == IPPROTO_RAW) + total_len += sizeof(struct iphdr); + } + + buf = malloc(total_len); + if (!buf) + error(1, 0, "malloc"); + + fd = socket(family, cfg_proto, cfg_ipproto); + if (fd < 0) + error(1, errno, "socket"); + + if (cfg_proto == SOCK_STREAM) { + val = 1; + if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, + (char*) &val, sizeof(val))) + error(1, 0, "setsockopt no nagle"); + + if (family == PF_INET) { + if (connect(fd, (void *) &daddr, sizeof(daddr))) + error(1, errno, "connect ipv4"); + } else { + if (connect(fd, (void *) &daddr6, sizeof(daddr6))) + error(1, errno, "connect ipv6"); + } + } + + opt |= SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_OPT_ID; + if (setsockopt(fd, SOL_SOCKET, SO_TIMESTAMPING, + (char *) &opt, sizeof(opt))) + error(1, 0, "setsockopt timestamping"); + + for (i = 0; i < cfg_num_pkts; i++) { + memset(&ts_prev, 0, sizeof(ts_prev)); + memset(buf, 'a' + i, total_len); + buf[total_len - 2] = '\n'; + buf[total_len - 1] = '\0'; + + if (cfg_proto == SOCK_RAW) { + struct udphdr *udph; + int off = 0; + + if (cfg_ipproto == IPPROTO_RAW) { + struct iphdr *iph = (void *) buf; + + memset(iph, 0, sizeof(*iph)); + iph->ihl = 5; + iph->version = 4; + iph->ttl = 2; + iph->daddr = daddr.sin_addr.s_addr; + iph->protocol = IPPROTO_UDP; + /* kernel writes saddr, csum, len */ + + off = sizeof(*iph); + } + + udph = (void *) buf + off; + udph->source = ntohs(9000); /* random spoof */ + udph->dest = ntohs(dest_port); + udph->len = ntohs(sizeof(*udph) + cfg_payload_len); + udph->check = 0; /* not allowed for IPv6 */ + } + + print_timestamp_usr(); + if (cfg_proto != SOCK_STREAM) { + if (family == PF_INET) + val = sendto(fd, buf, total_len, 0, (void *) &daddr, sizeof(daddr)); + else + val = sendto(fd, buf, total_len, 0, (void *) &daddr6, sizeof(daddr6)); + } else { + val = send(fd, buf, cfg_payload_len, 0); + } + if (val != total_len) + error(1, errno, "send"); + + /* wait for all errors to be queued, else ACKs arrive OOO */ + usleep(50 * 1000); + + __poll(fd); + + while (!recv_errmsg(fd)) {} + } + + if (close(fd)) + error(1, errno, "close"); + + free(buf); + usleep(400 * 1000); +} + +static void __attribute__((noreturn)) usage(const char *filepath) +{ + fprintf(stderr, "\nUsage: %s [options] hostname\n" + "\nwhere options are:\n" + " -4: only IPv4\n" + " -6: only IPv6\n" + " -h: show this message\n" + " -l N: send N bytes at a time\n" + " -r: use raw\n" + " -R: use raw (IP_HDRINCL)\n" + " -p N: connect to port N\n" + " -u: use udp\n", + filepath); + exit(1); +} + +static void parse_opt(int argc, char **argv) +{ + int proto_count = 0; + char c; + + while ((c = getopt(argc, argv, "46hl:p:rRu")) != -1) { + switch (c) { + case '4': + do_ipv6 = 0; + break; + case '6': + do_ipv4 = 0; + break; + case 'r': + proto_count++; + cfg_proto = SOCK_RAW; + cfg_ipproto = IPPROTO_UDP; + break; + case 'R': + proto_count++; + cfg_proto = SOCK_RAW; + cfg_ipproto = IPPROTO_RAW; + break; + case 'u': + proto_count++; + cfg_proto = SOCK_DGRAM; + cfg_ipproto = IPPROTO_UDP; + break; + case 'l': + cfg_payload_len = strtoul(optarg, NULL, 10); + break; + case 'p': + dest_port = strtoul(optarg, NULL, 10); + break; + case 'h': + default: + usage(argv[0]); + } + } + + if (!cfg_payload_len) + error(1, 0, "payload may not be nonzero"); + if (cfg_proto != SOCK_STREAM && cfg_payload_len > 1472) + error(1, 0, "udp packet might exceed expected MTU"); + if (!do_ipv4 && !do_ipv6) + error(1, 0, "pass -4 or -6, not both"); + if (proto_count > 1) + error(1, 0, "pass -r, -R or -u, not multiple"); + + if (optind != argc - 1) + error(1, 0, "missing required hostname argument"); +} + +static void resolve_hostname(const char *hostname) +{ + struct addrinfo *addrs, *cur; + int have_ipv4 = 0, have_ipv6 = 0; + + if (getaddrinfo(hostname, NULL, NULL, &addrs)) + error(1, errno, "getaddrinfo"); + + cur = addrs; + while (cur && !have_ipv4 && !have_ipv6) { + if (!have_ipv4 && cur->ai_family == AF_INET) { + memcpy(&daddr, cur->ai_addr, sizeof(daddr)); + daddr.sin_port = htons(dest_port); + have_ipv4 = 1; + } + else if (!have_ipv6 && cur->ai_family == AF_INET6) { + memcpy(&daddr6, cur->ai_addr, sizeof(daddr6)); + daddr6.sin6_port = htons(dest_port); + have_ipv6 = 1; + } + cur = cur->ai_next; + } + if (addrs) + freeaddrinfo(addrs); + + do_ipv4 &= have_ipv4; + do_ipv6 &= have_ipv6; +} + +static void do_main(int family) +{ + fprintf(stderr, "family: %s\n", + family == PF_INET ? "INET" : "INET6"); + + fprintf(stderr, "test SND\n"); + do_test(family, SOF_TIMESTAMPING_TX_SOFTWARE); + + fprintf(stderr, "test ENQ\n"); + do_test(family, SOF_TIMESTAMPING_TX_SCHED); + + fprintf(stderr, "test ENQ + SND\n"); + do_test(family, SOF_TIMESTAMPING_TX_SCHED | + SOF_TIMESTAMPING_TX_SOFTWARE); + + if (cfg_proto == SOCK_STREAM) { + fprintf(stderr, "\ntest ACK\n"); + do_test(family, SOF_TIMESTAMPING_TX_ACK); + + fprintf(stderr, "\ntest SND + ACK\n"); + do_test(family, SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_TX_ACK); + + fprintf(stderr, "\ntest ENQ + SND + ACK\n"); + do_test(family, SOF_TIMESTAMPING_TX_SCHED | + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_TX_ACK); + } +} + +const char *sock_names[] = { NULL, "TCP", "UDP", "RAW" }; + +int main(int argc, char **argv) +{ + if (argc == 1) + usage(argv[0]); + + parse_opt(argc, argv); + resolve_hostname(argv[argc - 1]); + + fprintf(stderr, "protocol: %s\n", sock_names[cfg_proto]); + fprintf(stderr, "payload: %u\n", cfg_payload_len); + fprintf(stderr, "server port: %u\n", dest_port); + fprintf(stderr, "\n"); + + if (do_ipv4) + do_main(PF_INET); + if (do_ipv6) + do_main(PF_INET6); + + return 0; +} diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt index 9a0319a8247032c0fb6ae1a34129eacbba40c5eb..04892b821157774b81f4adecae870e06ef6f9bd4 100644 --- a/Documentation/sysctl/net.txt +++ b/Documentation/sysctl/net.txt @@ -241,6 +241,9 @@ address of the router (or Connected) for internal networks. 6. TIPC ------------------------------------------------------- +tipc_rmem +---------- + The TIPC protocol now has a tunable for the receive memory, similar to the tcp_rmem - i.e. a vector of 3 INTEGERs: (min, default, max) @@ -252,3 +255,16 @@ The max value is set to CONN_OVERLOAD_LIMIT, and the default and min values are scaled (shifted) versions of that same value. Note that the min value is not at this point in time used in any meaningful way, but the triplet is preserved in order to be consistent with things like tcp_rmem. + +named_timeout +-------------- + +TIPC name table updates are distributed asynchronously in a cluster, without +any form of transaction handling. This means that different race scenarios are +possible. One such is that a name withdrawal sent out by one node and received +by another node may arrive after a second, overlapping name publication already +has been accepted from a third node, although the conflicting updates +originally may have been issued in the correct sequential order. +If named_timeout is nonzero, failed topology updates will be placed on a defer +queue until another event arrives that clears the error, or until the timeout +expires. Value is in milliseconds. diff --git a/MAINTAINERS b/MAINTAINERS index 0fa3ff342af6f958528dd0aa45aab1497ea2062d..9e315a44ae0c735f7fad455daacb6e8facf4945a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -152,8 +152,9 @@ F: drivers/scsi/53c700* 6LOWPAN GENERIC (BTLE/IEEE 802.15.4) M: Alexander Aring -L: linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers) +M: Jukka Rissanen L: linux-bluetooth@vger.kernel.org +L: linux-wpan@vger.kernel.org S: Maintained F: net/6lowpan/ F: include/net/6lowpan.h @@ -1649,6 +1650,7 @@ L: wil6210@qca.qualcomm.com S: Supported W: http://wireless.kernel.org/en/users/Drivers/wil6210 F: drivers/net/wireless/ath/wil6210/ +F: include/uapi/linux/wil6210_uapi.h CARL9170 LINUX COMMUNITY WIRELESS DRIVER M: Christian Lamparter @@ -3605,6 +3607,11 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kristoffer/linux-hpc.git F: drivers/video/fbdev/s1d13xxxfb.c F: include/video/s1d13xxxfb.h +ET131X NETWORK DRIVER +M: Mark Einon +S: Odd Fixes +F: drivers/net/ethernet/agere/ + ETHERNET BRIDGE M: Stephen Hemminger L: bridge@lists.linux-foundation.org @@ -4650,13 +4657,14 @@ F: drivers/idle/i7300_idle.c IEEE 802.15.4 SUBSYSTEM M: Alexander Aring -L: linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers) -W: http://apps.sourceforge.net/trac/linux-zigbee -T: git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git +L: linux-wpan@vger.kernel.org +W: https://github.com/linux-wpan +T: git git://github.com/linux-wpan/linux-wpan-next.git S: Maintained F: net/ieee802154/ F: net/mac802154/ F: drivers/net/ieee802154/ +F: Documentation/networking/ieee802154.txt IGUANAWORKS USB IR TRANSCEIVER M: Sean Young @@ -4841,14 +4849,14 @@ M: Deepak Saxena S: Maintained F: drivers/char/hw_random/ixp4xx-rng.c -INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe/ixgbevf/i40e/i40evf) +INTEL ETHERNET DRIVERS (e100/e1000/e1000e/fm10k/igb/igbvf/ixgb/ixgbe/ixgbevf/i40e/i40evf) M: Jeff Kirsher M: Jesse Brandeburg M: Bruce Allan M: Carolyn Wyborny M: Don Skidmore M: Greg Rose -M: Alex Duyck +M: Matthew Vick M: John Ronciak M: Mitch Williams M: Linux NICS @@ -6429,7 +6437,7 @@ M: Lauro Ramos Venancio M: Aloisio Almeida Jr M: Samuel Ortiz L: linux-wireless@vger.kernel.org -L: linux-nfc@lists.01.org (moderated for non-subscribers) +L: linux-nfc@lists.01.org (subscribers-only) S: Supported F: net/nfc/ F: include/net/nfc/ @@ -7435,15 +7443,15 @@ F: drivers/net/ethernet/qlogic/qla3xxx.* QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER M: Shahed Shaikh -M: Dept-HSGLinuxNICDev@qlogic.com +M: Dept-GELinuxNICDev@qlogic.com L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/qlogic/qlcnic/ QLOGIC QLGE 10Gb ETHERNET DRIVER -M: Shahed Shaikh -M: Jitendra Kalsaria -M: Ron Mercer +M: Harish Patil +M: Sudarsana Kalluru +M: Dept-GELinuxNICDev@qlogic.com M: linux-driver@qlogic.com L: netdev@vger.kernel.org S: Supported @@ -7546,13 +7554,12 @@ F: drivers/video/fbdev/aty/aty128fb.c RALINK RT2X00 WIRELESS LAN DRIVER P: rt2x00 project -M: Ivo van Doorn +M: Stanislaw Gruszka M: Helmut Schaa L: linux-wireless@vger.kernel.org L: users@rt2x00.serialmonkey.com (moderated for non-subscribers) W: http://rt2x00.serialmonkey.com/ S: Maintained -T: git git://git.kernel.org/pub/scm/linux/kernel/git/ivd/rt2x00.git F: drivers/net/wireless/rt2x00/ RAMDISK RAM BLOCK DEVICE DRIVER @@ -8752,11 +8759,6 @@ M: H Hartley Sweeten S: Odd Fixes F: drivers/staging/comedi/ -STAGING - ET131X NETWORK DRIVER -M: Mark Einon -S: Odd Fixes -F: drivers/staging/et131x/ - STAGING - FLARION FT1000 DRIVERS M: Marek Belisko S: Odd Fixes diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi index abe530f7029634e715a7392b40cab387b4d2abfa..831810583823b78f735a11b354c715461f376401 100644 --- a/arch/arm/boot/dts/am33xx.dtsi +++ b/arch/arm/boot/dts/am33xx.dtsi @@ -132,6 +132,11 @@ }; }; + cm: syscon@44e10000 { + compatible = "ti,am33xx-controlmodule", "syscon"; + reg = <0x44e10000 0x800>; + }; + intc: interrupt-controller@48200000 { compatible = "ti,am33xx-intc"; interrupt-controller; @@ -699,6 +704,7 @@ */ interrupts = <40 41 42 43>; ranges; + syscon = <&cm>; status = "disabled"; davinci_mdio: mdio@4a101000 { diff --git a/arch/arm/boot/dts/berlin2q-marvell-dmp.dts b/arch/arm/boot/dts/berlin2q-marvell-dmp.dts index a357ce02a64e8cc78a1e0ba61457329f27605bd6..ea1f99b8eed688988fa9b7924c60389d2162dbaa 100644 --- a/arch/arm/boot/dts/berlin2q-marvell-dmp.dts +++ b/arch/arm/boot/dts/berlin2q-marvell-dmp.dts @@ -45,3 +45,7 @@ &uart0 { status = "okay"; }; + +ð0 { + status = "okay"; +}; diff --git a/arch/arm/boot/dts/berlin2q.dtsi b/arch/arm/boot/dts/berlin2q.dtsi index 400c40fceccc7118722ff021e7711a6e9f9564dd..891d56b03922ad8237c306c80b56c5a936f8506d 100644 --- a/arch/arm/boot/dts/berlin2q.dtsi +++ b/arch/arm/boot/dts/berlin2q.dtsi @@ -114,6 +114,23 @@ #interrupt-cells = <3>; }; + eth0: ethernet@b90000 { + compatible = "marvell,pxa168-eth"; + reg = <0xb90000 0x10000>; + clocks = <&chip CLKID_GETH0>; + interrupts = ; + /* set by bootloader */ + local-mac-address = [00 00 00 00 00 00]; + #address-cells = <1>; + #size-cells = <0>; + phy-handle = <ðphy0>; + status = "disabled"; + + ethphy0: ethernet-phy@0 { + reg = <0>; + }; + }; + cpu-ctrl@dd0000 { compatible = "marvell,berlin-cpu-ctrl"; reg = <0xdd0000 0x10000>; diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi index 888dd767a0b34dd3964d6cc530346398f1ed4dbc..f3e88c03b1e49d22d8cad7375e9f30daa617a4e0 100644 --- a/arch/arm/boot/dts/imx6sx.dtsi +++ b/arch/arm/boot/dts/imx6sx.dtsi @@ -779,6 +779,8 @@ <&clks IMX6SX_CLK_ENET_PTP>; clock-names = "ipg", "ahb", "ptp", "enet_clk_ref", "enet_out"; + fsl,num-tx-queues=<3>; + fsl,num-rx-queues=<3>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/rk3188-radxarock.dts b/arch/arm/boot/dts/rk3188-radxarock.dts index 39f66e349445b400dab2fe6e02d4c1decacd8c1e..15910c9ddbc7c6cb33c30904a4107b387693e132 100644 --- a/arch/arm/boot/dts/rk3188-radxarock.dts +++ b/arch/arm/boot/dts/rk3188-radxarock.dts @@ -102,6 +102,22 @@ }; }; +&emac { + status = "okay"; + + pinctrl-names = "default"; + pinctrl-0 = <&emac_xfer>, <&emac_mdio>, <&phy_int>; + + phy = <&phy0>; + phy-supply = <&vcc_rmii>; + + phy0: ethernet-phy@0 { + reg = <0>; + interrupt-parent = <&gpio3>; + interrupts = <26 IRQ_TYPE_LEVEL_LOW>; + }; +}; + &i2c1 { status = "okay"; clock-frequency = <400000>; @@ -240,6 +256,12 @@ }; }; + lan8720a { + phy_int: phy-int { + rockchip,pins = ; + }; + }; + ir-receiver { ir_recv_pin: ir-recv-pin { rockchip,pins = ; diff --git a/arch/arm/boot/dts/rk3188.dtsi b/arch/arm/boot/dts/rk3188.dtsi index 82732f5249b27161931809836719368863ea4047..ddaada788b4584429840953311cd806a7c2cff19 100644 --- a/arch/arm/boot/dts/rk3188.dtsi +++ b/arch/arm/boot/dts/rk3188.dtsi @@ -168,6 +168,24 @@ */ }; + emac { + emac_xfer: emac-xfer { + rockchip,pins = , /* tx_en */ + , /* txd1 */ + , /* txd0 */ + , /* rxd0 */ + , /* rxd1 */ + , /* mac_clk */ + , /* rx_err */ + ; /* crs_dvalid */ + }; + + emac_mdio: emac-mdio { + rockchip,pins = , + ; + }; + }; + i2c0 { i2c0_xfer: i2c0-xfer { rockchip,pins = , @@ -380,6 +398,10 @@ }; }; +&emac { + compatible = "rockchip,rk3188-emac"; +}; + &global_timer { interrupts = ; }; diff --git a/arch/arm/boot/dts/rk3xxx.dtsi b/arch/arm/boot/dts/rk3xxx.dtsi index 7332d12eb565a43822b5cfcaa9e4023c3fca7770..499468d42adac51007f6c663c6b2e0e64cd67ef6 100644 --- a/arch/arm/boot/dts/rk3xxx.dtsi +++ b/arch/arm/boot/dts/rk3xxx.dtsi @@ -152,6 +152,23 @@ status = "disabled"; }; + emac: ethernet@10204000 { + compatible = "snps,arc-emac"; + reg = <0x10204000 0x3c>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + + rockchip,grf = <&grf>; + + clocks = <&cru HCLK_EMAC>, <&cru SCLK_MAC>; + clock-names = "hclk", "macref"; + max-speed = <100>; + phy-mode = "rmii"; + + status = "disabled"; + }; + mmc0: dwmmc@10214000 { compatible = "rockchip,rk2928-dw-mshc"; reg = <0x10214000 0x1000>; diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index a37b989a2f91e302be654a7633082321e8f0c5bb..e1268f90502682c75dfd7a98e6ed15272ea8d512 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -12,11 +12,11 @@ #include #include #include -#include #include #include #include #include + #include #include #include @@ -174,6 +174,14 @@ static inline bool is_load_to_a(u16 inst) } } +static void jit_fill_hole(void *area, unsigned int size) +{ + u32 *ptr; + /* We are guaranteed to have aligned memory. */ + for (ptr = area; size >= sizeof(u32); size -= sizeof(u32)) + *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF); +} + static void build_prologue(struct jit_ctx *ctx) { u16 reg_set = saved_regs(ctx); @@ -859,9 +867,11 @@ static int build_body(struct jit_ctx *ctx) void bpf_jit_compile(struct bpf_prog *fp) { + struct bpf_binary_header *header; struct jit_ctx ctx; unsigned tmp_idx; unsigned alloc_size; + u8 *target_ptr; if (!bpf_jit_enable) return; @@ -897,13 +907,15 @@ void bpf_jit_compile(struct bpf_prog *fp) /* there's nothing after the epilogue on ARMv7 */ build_epilogue(&ctx); #endif - alloc_size = 4 * ctx.idx; - ctx.target = module_alloc(alloc_size); - if (unlikely(ctx.target == NULL)) + header = bpf_jit_binary_alloc(alloc_size, &target_ptr, + 4, jit_fill_hole); + if (header == NULL) goto out; + ctx.target = (u32 *) target_ptr; ctx.idx = 0; + build_prologue(&ctx); build_body(&ctx); build_epilogue(&ctx); @@ -919,8 +931,9 @@ void bpf_jit_compile(struct bpf_prog *fp) /* there are 2 passes here */ bpf_jit_dump(fp->len, alloc_size, 2, ctx.target); + set_memory_ro((unsigned long)header, header->pages); fp->bpf_func = (void *)ctx.target; - fp->jited = 1; + fp->jited = true; out: kfree(ctx.offsets); return; @@ -928,7 +941,15 @@ void bpf_jit_compile(struct bpf_prog *fp) void bpf_jit_free(struct bpf_prog *fp) { - if (fp->jited) - module_free(NULL, fp->bpf_func); - kfree(fp); + unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK; + struct bpf_binary_header *header = (void *)addr; + + if (!fp->jited) + goto free_filter; + + set_memory_rw(addr, header->pages); + bpf_jit_binary_free(header); + +free_filter: + bpf_prog_unlock_free(fp); } diff --git a/arch/arm/net/bpf_jit_32.h b/arch/arm/net/bpf_jit_32.h index afb84621ff6f210eb390c37fdd929c66cc32cbb3..b2d7d92859d37e11c31f54c751a1cb78c419713b 100644 --- a/arch/arm/net/bpf_jit_32.h +++ b/arch/arm/net/bpf_jit_32.h @@ -114,6 +114,20 @@ #define ARM_INST_UMULL 0x00800090 +/* + * Use a suitable undefined instruction to use for ARM/Thumb2 faulting. + * We need to be careful not to conflict with those used by other modules + * (BUG, kprobes, etc) and the register_undef_hook() system. + * + * The ARM architecture reference manual guarantees that the following + * instruction space will produce an undefined instruction exception on + * all CPUs: + * + * ARM: xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx ARMv7-AR, section A5.4 + * Thumb: 1101 1110 xxxx xxxx ARMv7-M, section A5.2.6 + */ +#define ARM_INST_UDF 0xe7fddef1 + /* register */ #define _AL3_R(op, rd, rn, rm) ((op ## _R) | (rd) << 12 | (rn) << 16 | (rm)) /* immediate */ diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c index 3ec6e8e8d3683f4f76bf8aff68ccfb814441a9e3..f5b00f41c4f6d848e023e08f436e0b3208e1d265 100644 --- a/arch/arm/plat-orion/common.c +++ b/arch/arm/plat-orion/common.c @@ -499,7 +499,7 @@ void __init orion_ge00_switch_init(struct dsa_platform_data *d, int irq) d->netdev = &orion_ge00.dev; for (i = 0; i < d->nr_chips; i++) - d->chip[i].mii_bus = &orion_ge00_shared.dev; + d->chip[i].host_dev = &orion_ge00_shared.dev; orion_switch_device.dev.platform_data = d; platform_device_register(&orion_switch_device); diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c index ad439c27300350ce47ef49e579d0688bc2ca48f4..c00585d915bc37840eeffc0283716d81e3f98f56 100644 --- a/arch/mips/bcm47xx/setup.c +++ b/arch/mips/bcm47xx/setup.c @@ -210,6 +210,10 @@ static void __init bcm47xx_register_bcma(void) pr_warn("bcm47xx: someone else already registered a bcma SPROM callback handler (err %d)\n", err); err = bcma_host_soc_register(&bcm47xx_bus.bcma); + if (err) + panic("Failed to register BCMA bus (err %d)", err); + + err = bcma_host_soc_init(&bcm47xx_bus.bcma); if (err) panic("Failed to initialize BCMA bus (err %d)", err); diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c index 9f7ecbda250c2569daae114e9fae539407d00766..7edc08398c4a9e04dabb5ff9401ca106b6f55312 100644 --- a/arch/mips/net/bpf_jit.c +++ b/arch/mips/net/bpf_jit.c @@ -765,27 +765,6 @@ static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset) return (u64)err << 32 | ntohl(ret); } -#ifdef __BIG_ENDIAN_BITFIELD -#define PKT_TYPE_MAX (7 << 5) -#else -#define PKT_TYPE_MAX 7 -#endif -static int pkt_type_offset(void) -{ - struct sk_buff skb_probe = { - .pkt_type = ~0, - }; - u8 *ct = (u8 *)&skb_probe; - unsigned int off; - - for (off = 0; off < sizeof(struct sk_buff); off++) { - if (ct[off] == PKT_TYPE_MAX) - return off; - } - pr_err_once("Please fix pkt_type_offset(), as pkt_type couldn't be found\n"); - return -1; -} - static int build_body(struct jit_ctx *ctx) { void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w}; @@ -793,7 +772,6 @@ static int build_body(struct jit_ctx *ctx) const struct sock_filter *inst; unsigned int i, off, load_order, condt; u32 k, b_off __maybe_unused; - int tmp; for (i = 0; i < prog->len; i++) { u16 code; @@ -1333,11 +1311,7 @@ static int build_body(struct jit_ctx *ctx) case BPF_ANC | SKF_AD_PKTTYPE: ctx->flags |= SEEN_SKB; - tmp = off = pkt_type_offset(); - - if (tmp < 0) - return -1; - emit_load_byte(r_tmp, r_skb, off, ctx); + emit_load_byte(r_tmp, r_skb, PKT_TYPE_OFFSET(), ctx); /* Keep only the last 3 bits */ emit_andi(r_A, r_tmp, PKT_TYPE_MAX, ctx); #ifdef __BIG_ENDIAN_BITFIELD @@ -1418,7 +1392,7 @@ void bpf_jit_compile(struct bpf_prog *fp) bpf_jit_dump(fp->len, alloc_size, 2, ctx.target); fp->bpf_func = (void *)ctx.target; - fp->jited = 1; + fp->jited = true; out: kfree(ctx.offsets); @@ -1428,5 +1402,6 @@ void bpf_jit_free(struct bpf_prog *fp) { if (fp->jited) module_free(NULL, fp->bpf_func); - kfree(fp); + + bpf_prog_unlock_free(fp); } diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index 3afa6f4c195705569726ef927ddc8f392af097ac..cbae2dfd053cafc22540572652528d4dbedc91fe 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -686,7 +686,7 @@ void bpf_jit_compile(struct bpf_prog *fp) ((u64 *)image)[0] = (u64)code_base; ((u64 *)image)[1] = local_paca->kernel_toc; fp->bpf_func = (void *)image; - fp->jited = 1; + fp->jited = true; } out: kfree(addrs); @@ -697,5 +697,6 @@ void bpf_jit_free(struct bpf_prog *fp) { if (fp->jited) module_free(NULL, fp->bpf_func); - kfree(fp); + + bpf_prog_unlock_free(fp); } diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 61e45b7c04d7bdf122a195903face00cfb6c2ce3..c52ac77408ca5cac7ad50b6a2a8a3113d8af30ca 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -5,11 +5,9 @@ * * Author(s): Martin Schwidefsky */ -#include #include #include #include -#include #include #include #include @@ -148,6 +146,12 @@ struct bpf_jit { ret; \ }) +static void bpf_jit_fill_hole(void *area, unsigned int size) +{ + /* Fill whole space with illegal instructions */ + memset(area, 0, size); +} + static void bpf_jit_prologue(struct bpf_jit *jit) { /* Save registers and create stack frame if necessary */ @@ -223,37 +227,6 @@ static void bpf_jit_epilogue(struct bpf_jit *jit) EMIT2(0x07fe); } -/* Helper to find the offset of pkt_type in sk_buff - * Make sure its still a 3bit field starting at the MSBs within a byte. - */ -#define PKT_TYPE_MAX 0xe0 -static int pkt_type_offset; - -static int __init bpf_pkt_type_offset_init(void) -{ - struct sk_buff skb_probe = { - .pkt_type = ~0, - }; - char *ct = (char *)&skb_probe; - int off; - - pkt_type_offset = -1; - for (off = 0; off < sizeof(struct sk_buff); off++) { - if (!ct[off]) - continue; - if (ct[off] == PKT_TYPE_MAX) - pkt_type_offset = off; - else { - /* Found non matching bit pattern, fix needed. */ - WARN_ON_ONCE(1); - pkt_type_offset = -1; - return -1; - } - } - return 0; -} -device_initcall(bpf_pkt_type_offset_init); - /* * make sure we dont leak kernel information to user */ @@ -753,12 +726,10 @@ load_abs: if ((int) K < 0) } break; case BPF_ANC | SKF_AD_PKTTYPE: - if (pkt_type_offset < 0) - goto out; /* lhi %r5,0 */ EMIT4(0xa7580000); /* ic %r5,(%r2) */ - EMIT4_DISP(0x43502000, pkt_type_offset); + EMIT4_DISP(0x43502000, PKT_TYPE_OFFSET()); /* srl %r5,5 */ EMIT4_DISP(0x88500000, 5); break; @@ -780,38 +751,6 @@ load_abs: if ((int) K < 0) return -1; } -/* - * Note: for security reasons, bpf code will follow a randomly - * sized amount of illegal instructions. - */ -struct bpf_binary_header { - unsigned int pages; - u8 image[]; -}; - -static struct bpf_binary_header *bpf_alloc_binary(unsigned int bpfsize, - u8 **image_ptr) -{ - struct bpf_binary_header *header; - unsigned int sz, hole; - - /* Most BPF filters are really small, but if some of them fill a page, - * allow at least 128 extra bytes for illegal instructions. - */ - sz = round_up(bpfsize + sizeof(*header) + 128, PAGE_SIZE); - header = module_alloc(sz); - if (!header) - return NULL; - memset(header, 0, sz); - header->pages = sz / PAGE_SIZE; - hole = min(sz - (bpfsize + sizeof(*header)), PAGE_SIZE - sizeof(*header)); - /* Insert random number of illegal instructions before BPF code - * and make sure the first instruction starts at an even address. - */ - *image_ptr = &header->image[(prandom_u32() % hole) & -2]; - return header; -} - void bpf_jit_compile(struct bpf_prog *fp) { struct bpf_binary_header *header = NULL; @@ -850,7 +789,8 @@ void bpf_jit_compile(struct bpf_prog *fp) size = prg_len + lit_len; if (size >= BPF_SIZE_MAX) goto out; - header = bpf_alloc_binary(size, &jit.start); + header = bpf_jit_binary_alloc(size, &jit.start, + 2, bpf_jit_fill_hole); if (!header) goto out; jit.prg = jit.mid = jit.start + prg_len; @@ -869,7 +809,7 @@ void bpf_jit_compile(struct bpf_prog *fp) if (jit.start) { set_memory_ro((unsigned long)header, header->pages); fp->bpf_func = (void *) jit.start; - fp->jited = 1; + fp->jited = true; } out: kfree(addrs); @@ -884,8 +824,8 @@ void bpf_jit_free(struct bpf_prog *fp) goto free_filter; set_memory_rw(addr, header->pages); - module_free(NULL, header); + bpf_jit_binary_free(header); free_filter: - kfree(fp); + bpf_prog_unlock_free(fp); } diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h index e0f6c399f1d0f1225f9a375796ab791eef5e843f..6b135a8ab07bfdca9d8c2aa066d104a92bbb68e6 100644 --- a/arch/sparc/include/asm/vio.h +++ b/arch/sparc/include/asm/vio.h @@ -65,6 +65,7 @@ struct vio_dring_register { u16 options; #define VIO_TX_DRING 0x0001 #define VIO_RX_DRING 0x0002 +#define VIO_RX_DRING_DATA 0x0004 u16 resv; u32 num_cookies; struct ldc_trans_cookie cookies[0]; @@ -80,6 +81,8 @@ struct vio_dring_unregister { #define VIO_PKT_MODE 0x01 /* Packet based transfer */ #define VIO_DESC_MODE 0x02 /* In-band descriptors */ #define VIO_DRING_MODE 0x03 /* Descriptor rings */ +/* in vers >= 1.2, VIO_DRING_MODE is 0x04 and transfer mode is a bitmask */ +#define VIO_NEW_DRING_MODE 0x04 struct vio_dring_data { struct vio_msg_tag tag; @@ -205,10 +208,20 @@ struct vio_net_attr_info { u8 addr_type; #define VNET_ADDR_ETHERMAC 0x01 u16 ack_freq; - u32 resv1; + u8 plnk_updt; +#define PHYSLINK_UPDATE_NONE 0x00 +#define PHYSLINK_UPDATE_STATE 0x01 +#define PHYSLINK_UPDATE_STATE_ACK 0x02 +#define PHYSLINK_UPDATE_STATE_NACK 0x03 + u8 options; + u16 resv1; u64 addr; u64 mtu; - u64 resv2[3]; + u16 cflags; +#define VNET_LSO_IPV4_CAPAB 0x0001 + u16 ipv4_lso_maxlen; + u32 resv2; + u64 resv3[2]; }; #define VNET_NUM_MCAST 7 @@ -366,6 +379,33 @@ struct vio_driver_state { struct vio_driver_ops *ops; }; +static inline bool vio_version_before(struct vio_driver_state *vio, + u16 major, u16 minor) +{ + u32 have = (u32)vio->ver.major << 16 | vio->ver.minor; + u32 want = (u32)major << 16 | minor; + + return have < want; +} + +static inline bool vio_version_after(struct vio_driver_state *vio, + u16 major, u16 minor) +{ + u32 have = (u32)vio->ver.major << 16 | vio->ver.minor; + u32 want = (u32)major << 16 | minor; + + return have > want; +} + +static inline bool vio_version_after_eq(struct vio_driver_state *vio, + u16 major, u16 minor) +{ + u32 have = (u32)vio->ver.major << 16 | vio->ver.minor; + u32 want = (u32)major << 16 | minor; + + return have >= want; +} + #define viodbg(TYPE, f, a...) \ do { if (vio->debug & VIO_DEBUG_##TYPE) \ printk(KERN_INFO "vio: ID[%lu] " f, \ diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c index 66dacd56bb1000193284f570074415eff947ba7f..0af28b9846954593612cf512cf85afb167eacbfc 100644 --- a/arch/sparc/kernel/ldc.c +++ b/arch/sparc/kernel/ldc.c @@ -2159,7 +2159,7 @@ int ldc_map_single(struct ldc_channel *lp, state.pte_idx = (base - iommu->page_table); state.nc = 0; fill_cookies(&state, (pa & PAGE_MASK), (pa & ~PAGE_MASK), len); - BUG_ON(state.nc != 1); + BUG_ON(state.nc > ncookies); return state.nc; } diff --git a/arch/sparc/kernel/viohs.c b/arch/sparc/kernel/viohs.c index f8e7dd53e1c7d7f62912f22100445e323b77afe5..7ef081a185b124cfd24757e9c7f206191efccfaa 100644 --- a/arch/sparc/kernel/viohs.c +++ b/arch/sparc/kernel/viohs.c @@ -426,6 +426,13 @@ static int process_dreg_info(struct vio_driver_state *vio, if (vio->dr_state & VIO_DR_STATE_RXREG) goto send_nack; + /* v1.6 and higher, ACK with desired, supported mode, or NACK */ + if (vio_version_after_eq(vio, 1, 6)) { + if (!(pkt->options & VIO_TX_DRING)) + goto send_nack; + pkt->options = VIO_TX_DRING; + } + BUG_ON(vio->desc_buf); vio->desc_buf = kzalloc(pkt->descr_size, GFP_ATOMIC); @@ -453,8 +460,11 @@ static int process_dreg_info(struct vio_driver_state *vio, pkt->tag.stype = VIO_SUBTYPE_ACK; pkt->dring_ident = ++dr->ident; - viodbg(HS, "SEND DRING_REG ACK ident[%llx]\n", - (unsigned long long) pkt->dring_ident); + viodbg(HS, "SEND DRING_REG ACK ident[%llx] " + "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n", + (unsigned long long) pkt->dring_ident, + pkt->num_descr, pkt->descr_size, pkt->options, + pkt->num_cookies); len = (sizeof(*pkt) + (dr->ncookies * sizeof(struct ldc_trans_cookie))); diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c index ece4af0575e9ac98f23f496396b929479987786e..f33e7c7a3bf74d48232e0c9708877cb63a6a4aaa 100644 --- a/arch/sparc/net/bpf_jit_comp.c +++ b/arch/sparc/net/bpf_jit_comp.c @@ -585,16 +585,11 @@ void bpf_jit_compile(struct bpf_prog *fp) case BPF_ANC | SKF_AD_PROTOCOL: emit_skb_load16(protocol, r_A); break; -#if 0 - /* GCC won't let us take the address of - * a bit field even though we very much - * know what we are doing here. - */ case BPF_ANC | SKF_AD_PKTTYPE: - __emit_skb_load8(pkt_type, r_A); + __emit_skb_load8(__pkt_type_offset, r_A); + emit_andi(r_A, PKT_TYPE_MAX, r_A); emit_alu_K(SRL, 5); break; -#endif case BPF_ANC | SKF_AD_IFINDEX: emit_skb_loadptr(dev, r_A); emit_cmpi(r_A, 0); @@ -629,7 +624,12 @@ void bpf_jit_compile(struct bpf_prog *fp) emit_and(r_A, r_TMP, r_A); } break; - + case BPF_LD | BPF_W | BPF_LEN: + emit_skb_load32(len, r_A); + break; + case BPF_LDX | BPF_W | BPF_LEN: + emit_skb_load32(len, r_X); + break; case BPF_LD | BPF_IMM: emit_loadimm(K, r_A); break; @@ -812,7 +812,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf]; if (image) { bpf_flush_icache(image, image + proglen); fp->bpf_func = (void *)image; - fp->jited = 1; + fp->jited = true; } out: kfree(addrs); @@ -823,5 +823,6 @@ void bpf_jit_free(struct bpf_prog *fp) { if (fp->jited) module_free(NULL, fp->bpf_func); - kfree(fp); + + bpf_prog_unlock_free(fp); } diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 5c8cb8043c5a2b19df3e524d9d35ee3ec1949cd5..d56cd1f515bdb8037e67e0cbd45d1bd7b5c0db7f 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -8,12 +8,10 @@ * as published by the Free Software Foundation; version 2 * of the License. */ -#include -#include #include #include #include -#include +#include int bpf_jit_enable __read_mostly; @@ -109,39 +107,6 @@ static inline void bpf_flush_icache(void *start, void *end) #define CHOOSE_LOAD_FUNC(K, func) \ ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) -struct bpf_binary_header { - unsigned int pages; - /* Note : for security reasons, bpf code will follow a randomly - * sized amount of int3 instructions - */ - u8 image[]; -}; - -static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen, - u8 **image_ptr) -{ - unsigned int sz, hole; - struct bpf_binary_header *header; - - /* Most of BPF filters are really small, - * but if some of them fill a page, allow at least - * 128 extra bytes to insert a random section of int3 - */ - sz = round_up(proglen + sizeof(*header) + 128, PAGE_SIZE); - header = module_alloc(sz); - if (!header) - return NULL; - - memset(header, 0xcc, sz); /* fill whole space with int3 instructions */ - - header->pages = sz / PAGE_SIZE; - hole = min(sz - (proglen + sizeof(*header)), PAGE_SIZE - sizeof(*header)); - - /* insert a random number of int3 instructions before BPF code */ - *image_ptr = &header->image[prandom_u32() % hole]; - return header; -} - /* pick a register outside of BPF range for JIT internal work */ #define AUX_REG (MAX_BPF_REG + 1) @@ -206,6 +171,12 @@ static inline u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg) return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3); } +static void jit_fill_hole(void *area, unsigned int size) +{ + /* fill whole space with int3 instructions */ + memset(area, 0xcc, size); +} + struct jit_context { unsigned int cleanup_addr; /* epilogue code offset */ bool seen_ld_abs; @@ -393,6 +364,23 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, EMIT1_off32(add_1reg(0xB8, dst_reg), imm32); break; + case BPF_LD | BPF_IMM | BPF_DW: + if (insn[1].code != 0 || insn[1].src_reg != 0 || + insn[1].dst_reg != 0 || insn[1].off != 0) { + /* verifier must catch invalid insns */ + pr_err("invalid BPF_LD_IMM64 insn\n"); + return -EINVAL; + } + + /* movabsq %rax, imm64 */ + EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg)); + EMIT(insn[0].imm, 4); + EMIT(insn[1].imm, 4); + + insn++; + i++; + break; + /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */ case BPF_ALU | BPF_MOD | BPF_X: case BPF_ALU | BPF_DIV | BPF_X: @@ -515,6 +503,48 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, EMIT3(0xC1, add_1reg(b3, dst_reg), imm32); break; + case BPF_ALU | BPF_LSH | BPF_X: + case BPF_ALU | BPF_RSH | BPF_X: + case BPF_ALU | BPF_ARSH | BPF_X: + case BPF_ALU64 | BPF_LSH | BPF_X: + case BPF_ALU64 | BPF_RSH | BPF_X: + case BPF_ALU64 | BPF_ARSH | BPF_X: + + /* check for bad case when dst_reg == rcx */ + if (dst_reg == BPF_REG_4) { + /* mov r11, dst_reg */ + EMIT_mov(AUX_REG, dst_reg); + dst_reg = AUX_REG; + } + + if (src_reg != BPF_REG_4) { /* common case */ + EMIT1(0x51); /* push rcx */ + + /* mov rcx, src_reg */ + EMIT_mov(BPF_REG_4, src_reg); + } + + /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */ + if (BPF_CLASS(insn->code) == BPF_ALU64) + EMIT1(add_1mod(0x48, dst_reg)); + else if (is_ereg(dst_reg)) + EMIT1(add_1mod(0x40, dst_reg)); + + switch (BPF_OP(insn->code)) { + case BPF_LSH: b3 = 0xE0; break; + case BPF_RSH: b3 = 0xE8; break; + case BPF_ARSH: b3 = 0xF8; break; + } + EMIT2(0xD3, add_1reg(b3, dst_reg)); + + if (src_reg != BPF_REG_4) + EMIT1(0x59); /* pop rcx */ + + if (insn->dst_reg == BPF_REG_4) + /* mov dst_reg, r11 */ + EMIT_mov(insn->dst_reg, AUX_REG); + break; + case BPF_ALU | BPF_END | BPF_FROM_BE: switch (imm32) { case 16: @@ -900,7 +930,7 @@ void bpf_int_jit_compile(struct bpf_prog *prog) if (proglen <= 0) { image = NULL; if (header) - module_free(NULL, header); + bpf_jit_binary_free(header); goto out; } if (image) { @@ -910,7 +940,8 @@ void bpf_int_jit_compile(struct bpf_prog *prog) break; } if (proglen == oldproglen) { - header = bpf_alloc_binary(proglen, &image); + header = bpf_jit_binary_alloc(proglen, &image, + 1, jit_fill_hole); if (!header) goto out; } @@ -924,29 +955,23 @@ void bpf_int_jit_compile(struct bpf_prog *prog) bpf_flush_icache(header, image + proglen); set_memory_ro((unsigned long)header, header->pages); prog->bpf_func = (void *)image; - prog->jited = 1; + prog->jited = true; } out: kfree(addrs); } -static void bpf_jit_free_deferred(struct work_struct *work) +void bpf_jit_free(struct bpf_prog *fp) { - struct bpf_prog *fp = container_of(work, struct bpf_prog, work); unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK; struct bpf_binary_header *header = (void *)addr; + if (!fp->jited) + goto free_filter; + set_memory_rw(addr, header->pages); - module_free(NULL, header); - kfree(fp); -} + bpf_jit_binary_free(header); -void bpf_jit_free(struct bpf_prog *fp) -{ - if (fp->jited) { - INIT_WORK(&fp->work, bpf_jit_free_deferred); - schedule_work(&fp->work); - } else { - kfree(fp); - } +free_filter: + bpf_prog_unlock_free(fp); } diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl index 028b78168d85ac51d70c6b750d59fc23ae7cc028..9fe1b5d002f0ad5a30039ba672c85dbbf1c3ca38 100644 --- a/arch/x86/syscalls/syscall_32.tbl +++ b/arch/x86/syscalls/syscall_32.tbl @@ -363,3 +363,4 @@ 354 i386 seccomp sys_seccomp 355 i386 getrandom sys_getrandom 356 i386 memfd_create sys_memfd_create +357 i386 bpf sys_bpf diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl index 35dd922727b9a577a5de0108dd5f9d79cce4b9c1..281150b539a2fb8eb802816612e768cf25dc9990 100644 --- a/arch/x86/syscalls/syscall_64.tbl +++ b/arch/x86/syscalls/syscall_64.tbl @@ -327,6 +327,7 @@ 318 common getrandom sys_getrandom 319 common memfd_create sys_memfd_create 320 common kexec_file_load sys_kexec_file_load +321 common bpf sys_bpf # # x32-specific system call numbers start at 512 to avoid cache impact diff --git a/drivers/bcma/Makefile b/drivers/bcma/Makefile index 91290f7f61b8c2b903d874345b6c32896da6114a..838b4b9d352ffc1d6c7790426bde8502712cc8dc 100644 --- a/drivers/bcma/Makefile +++ b/drivers/bcma/Makefile @@ -1,5 +1,6 @@ bcma-y += main.o scan.o core.o sprom.o bcma-y += driver_chipcommon.o driver_chipcommon_pmu.o +bcma-y += driver_chipcommon_b.o bcma-$(CONFIG_BCMA_SFLASH) += driver_chipcommon_sflash.o bcma-$(CONFIG_BCMA_NFLASH) += driver_chipcommon_nflash.o bcma-y += driver_pci.o diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h index 09b632ad0fe20ec97f3452df80e0cc525e7bfdd3..b6412b2d748dd11d6236963432c3984dd537f704 100644 --- a/drivers/bcma/bcma_private.h +++ b/drivers/bcma/bcma_private.h @@ -50,6 +50,10 @@ void bcma_chipco_serial_init(struct bcma_drv_cc *cc); extern struct platform_device bcma_pflash_dev; #endif /* CONFIG_BCMA_DRIVER_MIPS */ +/* driver_chipcommon_b.c */ +int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb); +void bcma_core_chipcommon_b_free(struct bcma_drv_cc_b *ccb); + /* driver_chipcommon_pmu.c */ u32 bcma_pmu_get_alp_clock(struct bcma_drv_cc *cc); u32 bcma_pmu_get_cpu_clock(struct bcma_drv_cc *cc); @@ -84,6 +88,20 @@ extern int __init bcma_host_pci_init(void); extern void __exit bcma_host_pci_exit(void); #endif /* CONFIG_BCMA_HOST_PCI */ +/* host_soc.c */ +#if defined(CONFIG_BCMA_HOST_SOC) && defined(CONFIG_OF) +extern int __init bcma_host_soc_register_driver(void); +extern void __exit bcma_host_soc_unregister_driver(void); +#else +static inline int __init bcma_host_soc_register_driver(void) +{ + return 0; +} +static inline void __exit bcma_host_soc_unregister_driver(void) +{ +} +#endif /* CONFIG_BCMA_HOST_SOC && CONFIG_OF */ + /* driver_pci.c */ u32 bcma_pcie_read(struct bcma_drv_pci *pc, u32 address); diff --git a/drivers/bcma/driver_chipcommon_b.c b/drivers/bcma/driver_chipcommon_b.c new file mode 100644 index 0000000000000000000000000000000000000000..c20b5f4ff2901ebaac31b02c93cd2b78d21f99ea --- /dev/null +++ b/drivers/bcma/driver_chipcommon_b.c @@ -0,0 +1,61 @@ +/* + * Broadcom specific AMBA + * ChipCommon B Unit driver + * + * Copyright 2014, Hauke Mehrtens + * + * Licensed under the GNU/GPL. See COPYING for details. + */ + +#include "bcma_private.h" +#include +#include + +static bool bcma_wait_reg(struct bcma_bus *bus, void __iomem *addr, u32 mask, + u32 value, int timeout) +{ + unsigned long deadline = jiffies + timeout; + u32 val; + + do { + val = readl(addr); + if ((val & mask) == value) + return true; + cpu_relax(); + udelay(10); + } while (!time_after_eq(jiffies, deadline)); + + bcma_err(bus, "Timeout waiting for register %p\n", addr); + + return false; +} + +void bcma_chipco_b_mii_write(struct bcma_drv_cc_b *ccb, u32 offset, u32 value) +{ + struct bcma_bus *bus = ccb->core->bus; + + writel(offset, ccb->mii + 0x00); + bcma_wait_reg(bus, ccb->mii + 0x00, 0x0100, 0x0000, 100); + writel(value, ccb->mii + 0x04); + bcma_wait_reg(bus, ccb->mii + 0x00, 0x0100, 0x0000, 100); +} +EXPORT_SYMBOL_GPL(bcma_chipco_b_mii_write); + +int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb) +{ + if (ccb->setup_done) + return 0; + + ccb->setup_done = 1; + ccb->mii = ioremap_nocache(ccb->core->addr_s[1], BCMA_CORE_SIZE); + if (!ccb->mii) + return -ENOMEM; + + return 0; +} + +void bcma_core_chipcommon_b_free(struct bcma_drv_cc_b *ccb) +{ + if (ccb->mii) + iounmap(ccb->mii); +} diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c index aec9f850b4a80eba3ea7c0b06bd5df647829980e..57ce5fe65364feec68eecd3927bb0b08a69cdcb1 100644 --- a/drivers/bcma/driver_gpio.c +++ b/drivers/bcma/driver_gpio.c @@ -76,7 +76,7 @@ static void bcma_gpio_free(struct gpio_chip *chip, unsigned gpio) bcma_chipco_gpio_pullup(cc, 1 << gpio, 0); } -#if IS_BUILTIN(CONFIG_BCMA_HOST_SOC) +#if IS_BUILTIN(CONFIG_BCM47XX) static int bcma_gpio_to_irq(struct gpio_chip *chip, unsigned gpio) { struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip); @@ -215,8 +215,12 @@ int bcma_gpio_init(struct bcma_drv_cc *cc) chip->set = bcma_gpio_set_value; chip->direction_input = bcma_gpio_direction_input; chip->direction_output = bcma_gpio_direction_output; -#if IS_BUILTIN(CONFIG_BCMA_HOST_SOC) +#if IS_BUILTIN(CONFIG_BCM47XX) chip->to_irq = bcma_gpio_to_irq; +#endif +#if IS_BUILTIN(CONFIG_OF) + if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC) + chip->of_node = cc->core->dev.of_node; #endif switch (cc->core->bus->chipinfo.id) { case BCMA_CHIP_ID_BCM5357: diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c index 11115bbe115cb120c778b4818d1e14a05f2e012e..004d6aa671cecbf82f0df9da6078f868683de361 100644 --- a/drivers/bcma/driver_mips.c +++ b/drivers/bcma/driver_mips.c @@ -21,6 +21,14 @@ #include #include +enum bcma_boot_dev { + BCMA_BOOT_DEV_UNK = 0, + BCMA_BOOT_DEV_ROM, + BCMA_BOOT_DEV_PARALLEL, + BCMA_BOOT_DEV_SERIAL, + BCMA_BOOT_DEV_NAND, +}; + static const char * const part_probes[] = { "bcm47xxpart", NULL }; static struct physmap_flash_data bcma_pflash_data = { @@ -229,11 +237,51 @@ u32 bcma_cpu_clock(struct bcma_drv_mips *mcore) } EXPORT_SYMBOL(bcma_cpu_clock); +static enum bcma_boot_dev bcma_boot_dev(struct bcma_bus *bus) +{ + struct bcma_drv_cc *cc = &bus->drv_cc; + u8 cc_rev = cc->core->id.rev; + + if (cc_rev == 42) { + struct bcma_device *core; + + core = bcma_find_core(bus, BCMA_CORE_NS_ROM); + if (core) { + switch (bcma_aread32(core, BCMA_IOST) & + BCMA_NS_ROM_IOST_BOOT_DEV_MASK) { + case BCMA_NS_ROM_IOST_BOOT_DEV_NOR: + return BCMA_BOOT_DEV_SERIAL; + case BCMA_NS_ROM_IOST_BOOT_DEV_NAND: + return BCMA_BOOT_DEV_NAND; + case BCMA_NS_ROM_IOST_BOOT_DEV_ROM: + default: + return BCMA_BOOT_DEV_ROM; + } + } + } else { + if (cc_rev == 38) { + if (cc->status & BCMA_CC_CHIPST_5357_NAND_BOOT) + return BCMA_BOOT_DEV_NAND; + else if (cc->status & BIT(5)) + return BCMA_BOOT_DEV_ROM; + } + + if ((cc->capabilities & BCMA_CC_CAP_FLASHT) == + BCMA_CC_FLASHT_PARA) + return BCMA_BOOT_DEV_PARALLEL; + else + return BCMA_BOOT_DEV_SERIAL; + } + + return BCMA_BOOT_DEV_SERIAL; +} + static void bcma_core_mips_flash_detect(struct bcma_drv_mips *mcore) { struct bcma_bus *bus = mcore->core->bus; struct bcma_drv_cc *cc = &bus->drv_cc; struct bcma_pflash *pflash = &cc->pflash; + enum bcma_boot_dev boot_dev; switch (cc->capabilities & BCMA_CC_CAP_FLASHT) { case BCMA_CC_FLASHT_STSER: @@ -269,6 +317,20 @@ static void bcma_core_mips_flash_detect(struct bcma_drv_mips *mcore) bcma_nflash_init(cc); } } + + /* Determine flash type this SoC boots from */ + boot_dev = bcma_boot_dev(bus); + switch (boot_dev) { + case BCMA_BOOT_DEV_PARALLEL: + case BCMA_BOOT_DEV_SERIAL: + /* TODO: Init NVRAM using BCMA_SOC_FLASH2 window */ + break; + case BCMA_BOOT_DEV_NAND: + /* TODO: Init NVRAM using BCMA_SOC_FLASH1 window */ + break; + default: + break; + } } void bcma_core_mips_early_init(struct bcma_drv_mips *mcore) diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c index f032ed6dd459564ece3a6cf119070c72ca2991cc..1e5ac0a796968b61e3de79a4ae2a82f9c2339916 100644 --- a/drivers/bcma/host_pci.c +++ b/drivers/bcma/host_pci.c @@ -208,6 +208,9 @@ static int bcma_host_pci_probe(struct pci_dev *dev, bus->boardinfo.vendor = bus->host_pci->subsystem_vendor; bus->boardinfo.type = bus->host_pci->subsystem_device; + /* Initialize struct, detect chip */ + bcma_init_bus(bus); + /* Register */ err = bcma_bus_register(bus); if (err) diff --git a/drivers/bcma/host_soc.c b/drivers/bcma/host_soc.c index 3475e600011a5c5ce0ec6ffe2b41f4f68b04c0e7..335cbcfd945b83893a981428b573df7924f81c04 100644 --- a/drivers/bcma/host_soc.c +++ b/drivers/bcma/host_soc.c @@ -7,6 +7,9 @@ #include "bcma_private.h" #include "scan.h" +#include +#include +#include #include #include @@ -134,12 +137,16 @@ static void bcma_host_soc_block_write(struct bcma_device *core, static u32 bcma_host_soc_aread32(struct bcma_device *core, u16 offset) { + if (WARN_ONCE(!core->io_wrap, "Accessed core has no wrapper/agent\n")) + return ~0; return readl(core->io_wrap + offset); } static void bcma_host_soc_awrite32(struct bcma_device *core, u16 offset, u32 value) { + if (WARN_ONCE(!core->io_wrap, "Accessed core has no wrapper/agent\n")) + return; writel(value, core->io_wrap + offset); } @@ -161,7 +168,6 @@ static const struct bcma_host_ops bcma_host_soc_ops = { int __init bcma_host_soc_register(struct bcma_soc *soc) { struct bcma_bus *bus = &soc->bus; - int err; /* iomap only first core. We have to read some register on this core * to scan the bus. @@ -173,11 +179,100 @@ int __init bcma_host_soc_register(struct bcma_soc *soc) /* Host specific */ bus->hosttype = BCMA_HOSTTYPE_SOC; bus->ops = &bcma_host_soc_ops; + bus->host_pdev = NULL; - /* Register */ + /* Initialize struct, detect chip */ + bcma_init_bus(bus); + + return 0; +} + +int __init bcma_host_soc_init(struct bcma_soc *soc) +{ + struct bcma_bus *bus = &soc->bus; + int err; + + /* Scan bus and initialize it */ err = bcma_bus_early_register(bus, &soc->core_cc, &soc->core_mips); if (err) iounmap(bus->mmio); return err; } + +#ifdef CONFIG_OF +static int bcma_host_soc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct bcma_bus *bus; + int err; + + /* Alloc */ + bus = devm_kzalloc(dev, sizeof(*bus), GFP_KERNEL); + if (!bus) + return -ENOMEM; + + /* Map MMIO */ + bus->mmio = of_iomap(np, 0); + if (!bus->mmio) + return -ENOMEM; + + /* Host specific */ + bus->hosttype = BCMA_HOSTTYPE_SOC; + bus->ops = &bcma_host_soc_ops; + bus->host_pdev = pdev; + + /* Initialize struct, detect chip */ + bcma_init_bus(bus); + + /* Register */ + err = bcma_bus_register(bus); + if (err) + goto err_unmap_mmio; + + platform_set_drvdata(pdev, bus); + + return err; + +err_unmap_mmio: + iounmap(bus->mmio); + return err; +} + +static int bcma_host_soc_remove(struct platform_device *pdev) +{ + struct bcma_bus *bus = platform_get_drvdata(pdev); + + bcma_bus_unregister(bus); + iounmap(bus->mmio); + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static const struct of_device_id bcma_host_soc_of_match[] = { + { .compatible = "brcm,bus-axi", }, + {}, +}; +MODULE_DEVICE_TABLE(of, bcma_host_soc_of_match); + +static struct platform_driver bcma_host_soc_driver = { + .driver = { + .name = "bcma-host-soc", + .of_match_table = bcma_host_soc_of_match, + }, + .probe = bcma_host_soc_probe, + .remove = bcma_host_soc_remove, +}; + +int __init bcma_host_soc_register_driver(void) +{ + return platform_driver_register(&bcma_host_soc_driver); +} + +void __exit bcma_host_soc_unregister_driver(void) +{ + platform_driver_unregister(&bcma_host_soc_driver); +} +#endif /* CONFIG_OF */ diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c index 0ff8d58831ef30fcbe7a5452baa8b00e3fdababa..d1656c2f70afa43198066681f6a02ded26eab80b 100644 --- a/drivers/bcma/main.c +++ b/drivers/bcma/main.c @@ -10,6 +10,7 @@ #include #include #include +#include MODULE_DESCRIPTION("Broadcom's specific AMBA driver"); MODULE_LICENSE("GPL"); @@ -120,16 +121,103 @@ static void bcma_release_core_dev(struct device *dev) kfree(core); } -static int bcma_register_cores(struct bcma_bus *bus) +static bool bcma_is_core_needed_early(u16 core_id) +{ + switch (core_id) { + case BCMA_CORE_NS_NAND: + case BCMA_CORE_NS_QSPI: + return true; + } + + return false; +} + +#ifdef CONFIG_OF +static struct device_node *bcma_of_find_child_device(struct platform_device *parent, + struct bcma_device *core) +{ + struct device_node *node; + u64 size; + const __be32 *reg; + + if (!parent || !parent->dev.of_node) + return NULL; + + for_each_child_of_node(parent->dev.of_node, node) { + reg = of_get_address(node, 0, &size, NULL); + if (!reg) + continue; + if (of_translate_address(node, reg) == core->addr) + return node; + } + return NULL; +} + +static void bcma_of_fill_device(struct platform_device *parent, + struct bcma_device *core) +{ + struct device_node *node; + + node = bcma_of_find_child_device(parent, core); + if (node) + core->dev.of_node = node; +} +#else +static void bcma_of_fill_device(struct platform_device *parent, + struct bcma_device *core) +{ +} +#endif /* CONFIG_OF */ + +static void bcma_register_core(struct bcma_bus *bus, struct bcma_device *core) +{ + int err; + + core->dev.release = bcma_release_core_dev; + core->dev.bus = &bcma_bus_type; + dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index); + + switch (bus->hosttype) { + case BCMA_HOSTTYPE_PCI: + core->dev.parent = &bus->host_pci->dev; + core->dma_dev = &bus->host_pci->dev; + core->irq = bus->host_pci->irq; + break; + case BCMA_HOSTTYPE_SOC: + core->dev.dma_mask = &core->dev.coherent_dma_mask; + if (bus->host_pdev) { + core->dma_dev = &bus->host_pdev->dev; + core->dev.parent = &bus->host_pdev->dev; + bcma_of_fill_device(bus->host_pdev, core); + } else { + core->dma_dev = &core->dev; + } + break; + case BCMA_HOSTTYPE_SDIO: + break; + } + + err = device_register(&core->dev); + if (err) { + bcma_err(bus, "Could not register dev for core 0x%03X\n", + core->id.id); + put_device(&core->dev); + return; + } + core->dev_registered = true; +} + +static int bcma_register_devices(struct bcma_bus *bus) { struct bcma_device *core; - int err, dev_id = 0; + int err; list_for_each_entry(core, &bus->cores, list) { /* We support that cores ourself */ switch (core->id.id) { case BCMA_CORE_4706_CHIPCOMMON: case BCMA_CORE_CHIPCOMMON: + case BCMA_CORE_NS_CHIPCOMMON_B: case BCMA_CORE_PCI: case BCMA_CORE_PCIE: case BCMA_CORE_PCIE2: @@ -138,39 +226,16 @@ static int bcma_register_cores(struct bcma_bus *bus) continue; } + /* Early cores were already registered */ + if (bcma_is_core_needed_early(core->id.id)) + continue; + /* Only first GMAC core on BCM4706 is connected and working */ if (core->id.id == BCMA_CORE_4706_MAC_GBIT && core->core_unit > 0) continue; - core->dev.release = bcma_release_core_dev; - core->dev.bus = &bcma_bus_type; - dev_set_name(&core->dev, "bcma%d:%d", bus->num, dev_id); - - switch (bus->hosttype) { - case BCMA_HOSTTYPE_PCI: - core->dev.parent = &bus->host_pci->dev; - core->dma_dev = &bus->host_pci->dev; - core->irq = bus->host_pci->irq; - break; - case BCMA_HOSTTYPE_SOC: - core->dev.dma_mask = &core->dev.coherent_dma_mask; - core->dma_dev = &core->dev; - break; - case BCMA_HOSTTYPE_SDIO: - break; - } - - err = device_register(&core->dev); - if (err) { - bcma_err(bus, - "Could not register dev for core 0x%03X\n", - core->id.id); - put_device(&core->dev); - continue; - } - core->dev_registered = true; - dev_id++; + bcma_register_core(bus, core); } #ifdef CONFIG_BCMA_DRIVER_MIPS @@ -247,6 +312,12 @@ int bcma_bus_register(struct bcma_bus *bus) bcma_core_chipcommon_early_init(&bus->drv_cc); } + /* Cores providing flash access go before SPROM init */ + list_for_each_entry(core, &bus->cores, list) { + if (bcma_is_core_needed_early(core->id.id)) + bcma_register_core(bus, core); + } + /* Try to get SPROM */ err = bcma_sprom_get(bus); if (err == -ENOENT) { @@ -261,6 +332,13 @@ int bcma_bus_register(struct bcma_bus *bus) bcma_core_chipcommon_init(&bus->drv_cc); } + /* Init CC core */ + core = bcma_find_core(bus, BCMA_CORE_NS_CHIPCOMMON_B); + if (core) { + bus->drv_cc_b.core = core; + bcma_core_chipcommon_b_init(&bus->drv_cc_b); + } + /* Init MIPS core */ core = bcma_find_core(bus, BCMA_CORE_MIPS_74K); if (core) { @@ -297,7 +375,7 @@ int bcma_bus_register(struct bcma_bus *bus) } /* Register found cores */ - bcma_register_cores(bus); + bcma_register_devices(bus); bcma_info(bus, "Bus registered\n"); @@ -315,6 +393,8 @@ void bcma_bus_unregister(struct bcma_bus *bus) else if (err) bcma_err(bus, "Can not unregister GPIO driver: %i\n", err); + bcma_core_chipcommon_b_free(&bus->drv_cc_b); + cores[0] = bcma_find_core(bus, BCMA_CORE_MIPS_74K); cores[1] = bcma_find_core(bus, BCMA_CORE_PCIE); cores[2] = bcma_find_core(bus, BCMA_CORE_4706_MAC_GBIT_COMMON); @@ -334,8 +414,6 @@ int __init bcma_bus_early_register(struct bcma_bus *bus, struct bcma_device *core; struct bcma_device_id match; - bcma_init_bus(bus); - match.manuf = BCMA_MANUF_BCM; match.id = bcma_cc_core_id(bus); match.class = BCMA_CL_SIM; @@ -494,6 +572,11 @@ static int __init bcma_modinit(void) if (err) return err; + err = bcma_host_soc_register_driver(); + if (err) { + pr_err("SoC host initialization failed\n"); + err = 0; + } #ifdef CONFIG_BCMA_HOST_PCI err = bcma_host_pci_init(); if (err) { @@ -511,6 +594,7 @@ static void __exit bcma_modexit(void) #ifdef CONFIG_BCMA_HOST_PCI bcma_host_pci_exit(); #endif + bcma_host_soc_unregister_driver(); bus_unregister(&bcma_bus_type); } module_exit(bcma_modexit) diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c index b4764c6bcf171ddb5273238fe9993d717ad18ec4..14b56561a36f10713cc5c470b838b25da4687999 100644 --- a/drivers/bcma/scan.c +++ b/drivers/bcma/scan.c @@ -276,7 +276,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr, struct bcma_device *core) { u32 tmp; - u8 i, j; + u8 i, j, k; s32 cia, cib; u8 ports[2], wrappers[2]; @@ -314,6 +314,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr, /* Some specific cores don't need wrappers */ switch (core->id.id) { case BCMA_CORE_4706_MAC_GBIT_COMMON: + case BCMA_CORE_NS_CHIPCOMMON_B: /* Not used yet: case BCMA_CORE_OOB_ROUTER: */ break; default: @@ -367,6 +368,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr, core->addr = tmp; /* get & parse slave ports */ + k = 0; for (i = 0; i < ports[1]; i++) { for (j = 0; ; j++) { tmp = bcma_erom_get_addr_desc(bus, eromptr, @@ -376,9 +378,9 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr, /* pr_debug("erom: slave port %d " * "has %d descriptors\n", i, j); */ break; - } else { - if (i == 0 && j == 0) - core->addr1 = tmp; + } else if (k < ARRAY_SIZE(core->addr_s)) { + core->addr_s[k] = tmp; + k++; } } } @@ -421,10 +423,13 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr, core->io_addr = ioremap_nocache(core->addr, BCMA_CORE_SIZE); if (!core->io_addr) return -ENOMEM; - core->io_wrap = ioremap_nocache(core->wrap, BCMA_CORE_SIZE); - if (!core->io_wrap) { - iounmap(core->io_addr); - return -ENOMEM; + if (core->wrap) { + core->io_wrap = ioremap_nocache(core->wrap, + BCMA_CORE_SIZE); + if (!core->io_wrap) { + iounmap(core->io_addr); + return -ENOMEM; + } } } return 0; @@ -434,9 +439,7 @@ void bcma_init_bus(struct bcma_bus *bus) { s32 tmp; struct bcma_chipinfo *chipinfo = &(bus->chipinfo); - - if (bus->init_done) - return; + char chip_id[8]; INIT_LIST_HEAD(&bus->cores); bus->nr_cores = 0; @@ -447,10 +450,11 @@ void bcma_init_bus(struct bcma_bus *bus) chipinfo->id = (tmp & BCMA_CC_ID_ID) >> BCMA_CC_ID_ID_SHIFT; chipinfo->rev = (tmp & BCMA_CC_ID_REV) >> BCMA_CC_ID_REV_SHIFT; chipinfo->pkg = (tmp & BCMA_CC_ID_PKG) >> BCMA_CC_ID_PKG_SHIFT; - bcma_info(bus, "Found chip with id 0x%04X, rev 0x%02X and package 0x%02X\n", - chipinfo->id, chipinfo->rev, chipinfo->pkg); - bus->init_done = true; + snprintf(chip_id, ARRAY_SIZE(chip_id), + (chipinfo->id > 0x9999) ? "%d" : "0x%04X", chipinfo->id); + bcma_info(bus, "Found chip with id %s, rev 0x%02X and package 0x%02X\n", + chip_id, chipinfo->rev, chipinfo->pkg); } int bcma_bus_scan(struct bcma_bus *bus) @@ -460,8 +464,6 @@ int bcma_bus_scan(struct bcma_bus *bus) int err, core_num = 0; - bcma_init_bus(bus); - erombase = bcma_scan_read32(bus, 0, BCMA_CC_EROM); if (bus->hosttype == BCMA_HOSTTYPE_SOC) { eromptr = ioremap_nocache(erombase, BCMA_CORE_SIZE); diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index fa7fd62ddffa429234323a4b0df7d5b1c5c5c6ed..4547dc238fc7022967f9ebc380b2e8e2d22d19ae 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig @@ -201,7 +201,7 @@ config BT_MRVL The core driver to support Marvell Bluetooth devices. This driver is required if you want to support - Marvell Bluetooth devices, such as 8688/8787/8797/8897. + Marvell Bluetooth devices, such as 8688/8787/8797/8887/8897. Say Y here to compile Marvell Bluetooth driver into the kernel or say M to compile it as module. @@ -214,7 +214,7 @@ config BT_MRVL_SDIO The driver for Marvell Bluetooth chipsets with SDIO interface. This driver is required if you want to use Marvell Bluetooth - devices with SDIO interface. Currently SD8688/SD8787/SD8797/SD8897 + devices with SDIO interface. Currently SD8688/SD8787/SD8797/SD8887/SD8897 chipsets are supported. Say Y here to compile support for Marvell BT-over-SDIO driver diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index a0d7355ef1275e865ad09d6e3503c953111cf4fd..d85ced27ebd58f5a40b1881032daced2aedf82b0 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -88,6 +88,7 @@ static const struct usb_device_id ath3k_table[] = { { USB_DEVICE(0x04CA, 0x300b) }, { USB_DEVICE(0x0930, 0x0219) }, { USB_DEVICE(0x0930, 0x0220) }, + { USB_DEVICE(0x0930, 0x0227) }, { USB_DEVICE(0x0b05, 0x17d0) }, { USB_DEVICE(0x0CF3, 0x0036) }, { USB_DEVICE(0x0CF3, 0x3004) }, @@ -138,6 +139,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = { { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c index dfa5043e68bacc9de66f6ce12c565fb8f410f4b1..35e63aaa6f803e423a39e532adeff717b7279cb3 100644 --- a/drivers/bluetooth/bluecard_cs.c +++ b/drivers/bluetooth/bluecard_cs.c @@ -61,7 +61,7 @@ MODULE_LICENSE("GPL"); /* ======================== Local structures ======================== */ -typedef struct bluecard_info_t { +struct bluecard_info { struct pcmcia_device *p_dev; struct hci_dev *hdev; @@ -78,7 +78,7 @@ typedef struct bluecard_info_t { unsigned char ctrl_reg; unsigned long hw_state; /* Status of the hardware and LED control */ -} bluecard_info_t; +}; static int bluecard_config(struct pcmcia_device *link); @@ -157,7 +157,7 @@ static void bluecard_detach(struct pcmcia_device *p_dev); static void bluecard_activity_led_timeout(u_long arg) { - bluecard_info_t *info = (bluecard_info_t *)arg; + struct bluecard_info *info = (struct bluecard_info *)arg; unsigned int iobase = info->p_dev->resource[0]->start; if (!test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) @@ -173,7 +173,7 @@ static void bluecard_activity_led_timeout(u_long arg) } -static void bluecard_enable_activity_led(bluecard_info_t *info) +static void bluecard_enable_activity_led(struct bluecard_info *info) { unsigned int iobase = info->p_dev->resource[0]->start; @@ -215,7 +215,7 @@ static int bluecard_write(unsigned int iobase, unsigned int offset, __u8 *buf, i } -static void bluecard_write_wakeup(bluecard_info_t *info) +static void bluecard_write_wakeup(struct bluecard_info *info) { if (!info) { BT_ERR("Unknown device"); @@ -368,7 +368,8 @@ static int bluecard_read(unsigned int iobase, unsigned int offset, __u8 *buf, in } -static void bluecard_receive(bluecard_info_t *info, unsigned int offset) +static void bluecard_receive(struct bluecard_info *info, + unsigned int offset) { unsigned int iobase; unsigned char buf[31]; @@ -497,7 +498,7 @@ static void bluecard_receive(bluecard_info_t *info, unsigned int offset) static irqreturn_t bluecard_interrupt(int irq, void *dev_inst) { - bluecard_info_t *info = dev_inst; + struct bluecard_info *info = dev_inst; unsigned int iobase; unsigned char reg; @@ -562,7 +563,7 @@ static irqreturn_t bluecard_interrupt(int irq, void *dev_inst) static int bluecard_hci_set_baud_rate(struct hci_dev *hdev, int baud) { - bluecard_info_t *info = hci_get_drvdata(hdev); + struct bluecard_info *info = hci_get_drvdata(hdev); struct sk_buff *skb; /* Ericsson baud rate command */ @@ -611,7 +612,7 @@ static int bluecard_hci_set_baud_rate(struct hci_dev *hdev, int baud) static int bluecard_hci_flush(struct hci_dev *hdev) { - bluecard_info_t *info = hci_get_drvdata(hdev); + struct bluecard_info *info = hci_get_drvdata(hdev); /* Drop TX queue */ skb_queue_purge(&(info->txq)); @@ -622,7 +623,7 @@ static int bluecard_hci_flush(struct hci_dev *hdev) static int bluecard_hci_open(struct hci_dev *hdev) { - bluecard_info_t *info = hci_get_drvdata(hdev); + struct bluecard_info *info = hci_get_drvdata(hdev); if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) bluecard_hci_set_baud_rate(hdev, DEFAULT_BAUD_RATE); @@ -643,7 +644,7 @@ static int bluecard_hci_open(struct hci_dev *hdev) static int bluecard_hci_close(struct hci_dev *hdev) { - bluecard_info_t *info = hci_get_drvdata(hdev); + struct bluecard_info *info = hci_get_drvdata(hdev); if (!test_and_clear_bit(HCI_RUNNING, &(hdev->flags))) return 0; @@ -663,7 +664,7 @@ static int bluecard_hci_close(struct hci_dev *hdev) static int bluecard_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) { - bluecard_info_t *info = hci_get_drvdata(hdev); + struct bluecard_info *info = hci_get_drvdata(hdev); switch (bt_cb(skb)->pkt_type) { case HCI_COMMAND_PKT: @@ -691,7 +692,7 @@ static int bluecard_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) /* ======================== Card services HCI interaction ======================== */ -static int bluecard_open(bluecard_info_t *info) +static int bluecard_open(struct bluecard_info *info) { unsigned int iobase = info->p_dev->resource[0]->start; struct hci_dev *hdev; @@ -806,7 +807,7 @@ static int bluecard_open(bluecard_info_t *info) } -static int bluecard_close(bluecard_info_t *info) +static int bluecard_close(struct bluecard_info *info) { unsigned int iobase = info->p_dev->resource[0]->start; struct hci_dev *hdev = info->hdev; @@ -833,7 +834,7 @@ static int bluecard_close(bluecard_info_t *info) static int bluecard_probe(struct pcmcia_device *link) { - bluecard_info_t *info; + struct bluecard_info *info; /* Create new info device */ info = devm_kzalloc(&link->dev, sizeof(*info), GFP_KERNEL); @@ -857,7 +858,7 @@ static void bluecard_detach(struct pcmcia_device *link) static int bluecard_config(struct pcmcia_device *link) { - bluecard_info_t *info = link->priv; + struct bluecard_info *info = link->priv; int i, n; link->config_index = 0x20; @@ -897,7 +898,7 @@ static int bluecard_config(struct pcmcia_device *link) static void bluecard_release(struct pcmcia_device *link) { - bluecard_info_t *info = link->priv; + struct bluecard_info *info = link->priv; bluecard_close(info); diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c index 1d82721cf9c67e05653f00f6ae93e20d482c9ff4..4f7e8d400bc01a178bcbfcf499340b40545fb54f 100644 --- a/drivers/bluetooth/bt3c_cs.c +++ b/drivers/bluetooth/bt3c_cs.c @@ -67,7 +67,7 @@ MODULE_FIRMWARE("BT3CPCC.bin"); /* ======================== Local structures ======================== */ -typedef struct bt3c_info_t { +struct bt3c_info { struct pcmcia_device *p_dev; struct hci_dev *hdev; @@ -80,7 +80,7 @@ typedef struct bt3c_info_t { unsigned long rx_state; unsigned long rx_count; struct sk_buff *rx_skb; -} bt3c_info_t; +}; static int bt3c_config(struct pcmcia_device *link); @@ -175,7 +175,7 @@ static int bt3c_write(unsigned int iobase, int fifo_size, __u8 *buf, int len) } -static void bt3c_write_wakeup(bt3c_info_t *info) +static void bt3c_write_wakeup(struct bt3c_info *info) { if (!info) { BT_ERR("Unknown device"); @@ -214,7 +214,7 @@ static void bt3c_write_wakeup(bt3c_info_t *info) } -static void bt3c_receive(bt3c_info_t *info) +static void bt3c_receive(struct bt3c_info *info) { unsigned int iobase; int size = 0, avail; @@ -336,7 +336,7 @@ static void bt3c_receive(bt3c_info_t *info) static irqreturn_t bt3c_interrupt(int irq, void *dev_inst) { - bt3c_info_t *info = dev_inst; + struct bt3c_info *info = dev_inst; unsigned int iobase; int iir; irqreturn_t r = IRQ_NONE; @@ -388,7 +388,7 @@ static irqreturn_t bt3c_interrupt(int irq, void *dev_inst) static int bt3c_hci_flush(struct hci_dev *hdev) { - bt3c_info_t *info = hci_get_drvdata(hdev); + struct bt3c_info *info = hci_get_drvdata(hdev); /* Drop TX queue */ skb_queue_purge(&(info->txq)); @@ -418,7 +418,7 @@ static int bt3c_hci_close(struct hci_dev *hdev) static int bt3c_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) { - bt3c_info_t *info = hci_get_drvdata(hdev); + struct bt3c_info *info = hci_get_drvdata(hdev); unsigned long flags; switch (bt_cb(skb)->pkt_type) { @@ -451,7 +451,8 @@ static int bt3c_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) /* ======================== Card services HCI interaction ======================== */ -static int bt3c_load_firmware(bt3c_info_t *info, const unsigned char *firmware, +static int bt3c_load_firmware(struct bt3c_info *info, + const unsigned char *firmware, int count) { char *ptr = (char *) firmware; @@ -536,7 +537,7 @@ static int bt3c_load_firmware(bt3c_info_t *info, const unsigned char *firmware, } -static int bt3c_open(bt3c_info_t *info) +static int bt3c_open(struct bt3c_info *info) { const struct firmware *firmware; struct hci_dev *hdev; @@ -603,7 +604,7 @@ static int bt3c_open(bt3c_info_t *info) } -static int bt3c_close(bt3c_info_t *info) +static int bt3c_close(struct bt3c_info *info) { struct hci_dev *hdev = info->hdev; @@ -620,7 +621,7 @@ static int bt3c_close(bt3c_info_t *info) static int bt3c_probe(struct pcmcia_device *link) { - bt3c_info_t *info; + struct bt3c_info *info; /* Create new info device */ info = devm_kzalloc(&link->dev, sizeof(*info), GFP_KERNEL); @@ -683,7 +684,7 @@ static int bt3c_check_config_notpicky(struct pcmcia_device *p_dev, static int bt3c_config(struct pcmcia_device *link) { - bt3c_info_t *info = link->priv; + struct bt3c_info *info = link->priv; int i; unsigned long try; @@ -724,7 +725,7 @@ static int bt3c_config(struct pcmcia_device *link) static void bt3c_release(struct pcmcia_device *link) { - bt3c_info_t *info = link->priv; + struct bt3c_info *info = link->priv; bt3c_close(info); diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c index 3e683b1532598231dc156bbc69887dd2e836d4da..550bce089fa6c428467882d23c977ed40f37a532 100644 --- a/drivers/bluetooth/btmrvl_sdio.c +++ b/drivers/bluetooth/btmrvl_sdio.c @@ -84,7 +84,27 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_87xx = { .int_read_to_clear = false, }; -static const struct btmrvl_sdio_card_reg btmrvl_reg_88xx = { +static const struct btmrvl_sdio_card_reg btmrvl_reg_8887 = { + .cfg = 0x00, + .host_int_mask = 0x08, + .host_intstatus = 0x0C, + .card_status = 0x5C, + .sq_read_base_addr_a0 = 0x6C, + .sq_read_base_addr_a1 = 0x6D, + .card_revision = 0xC8, + .card_fw_status0 = 0x88, + .card_fw_status1 = 0x89, + .card_rx_len = 0x8A, + .card_rx_unit = 0x8B, + .io_port_0 = 0xE4, + .io_port_1 = 0xE5, + .io_port_2 = 0xE6, + .int_read_to_clear = true, + .host_int_rsr = 0x04, + .card_misc_cfg = 0xD8, +}; + +static const struct btmrvl_sdio_card_reg btmrvl_reg_8897 = { .cfg = 0x00, .host_int_mask = 0x02, .host_intstatus = 0x03, @@ -128,10 +148,18 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = { .sd_blksz_fw_dl = 256, }; +static const struct btmrvl_sdio_device btmrvl_sdio_sd8887 = { + .helper = NULL, + .firmware = "mrvl/sd8887_uapsta.bin", + .reg = &btmrvl_reg_8887, + .support_pscan_win_report = true, + .sd_blksz_fw_dl = 256, +}; + static const struct btmrvl_sdio_device btmrvl_sdio_sd8897 = { .helper = NULL, .firmware = "mrvl/sd8897_uapsta.bin", - .reg = &btmrvl_reg_88xx, + .reg = &btmrvl_reg_8897, .support_pscan_win_report = true, .sd_blksz_fw_dl = 256, }; @@ -149,6 +177,9 @@ static const struct sdio_device_id btmrvl_sdio_ids[] = { /* Marvell SD8797 Bluetooth device */ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A), .driver_data = (unsigned long) &btmrvl_sdio_sd8797 }, + /* Marvell SD8887 Bluetooth device */ + { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9136), + .driver_data = (unsigned long)&btmrvl_sdio_sd8887 }, /* Marvell SD8897 Bluetooth device */ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912E), .driver_data = (unsigned long) &btmrvl_sdio_sd8897 }, @@ -1280,4 +1311,5 @@ MODULE_FIRMWARE("mrvl/sd8688_helper.bin"); MODULE_FIRMWARE("mrvl/sd8688.bin"); MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin"); MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin"); +MODULE_FIRMWARE("mrvl/sd8887_uapsta.bin"); MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin"); diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c index fb948f02eda5b1747a7ca7604133daf88a719ef1..abb4d2106db464198c328af1069342bd36d64d80 100644 --- a/drivers/bluetooth/btuart_cs.c +++ b/drivers/bluetooth/btuart_cs.c @@ -62,7 +62,7 @@ MODULE_LICENSE("GPL"); /* ======================== Local structures ======================== */ -typedef struct btuart_info_t { +struct btuart_info { struct pcmcia_device *p_dev; struct hci_dev *hdev; @@ -75,7 +75,7 @@ typedef struct btuart_info_t { unsigned long rx_state; unsigned long rx_count; struct sk_buff *rx_skb; -} btuart_info_t; +}; static int btuart_config(struct pcmcia_device *link); @@ -127,7 +127,7 @@ static int btuart_write(unsigned int iobase, int fifo_size, __u8 *buf, int len) } -static void btuart_write_wakeup(btuart_info_t *info) +static void btuart_write_wakeup(struct btuart_info *info) { if (!info) { BT_ERR("Unknown device"); @@ -172,7 +172,7 @@ static void btuart_write_wakeup(btuart_info_t *info) } -static void btuart_receive(btuart_info_t *info) +static void btuart_receive(struct btuart_info *info) { unsigned int iobase; int boguscount = 0; @@ -286,7 +286,7 @@ static void btuart_receive(btuart_info_t *info) static irqreturn_t btuart_interrupt(int irq, void *dev_inst) { - btuart_info_t *info = dev_inst; + struct btuart_info *info = dev_inst; unsigned int iobase; int boguscount = 0; int iir, lsr; @@ -340,7 +340,8 @@ static irqreturn_t btuart_interrupt(int irq, void *dev_inst) } -static void btuart_change_speed(btuart_info_t *info, unsigned int speed) +static void btuart_change_speed(struct btuart_info *info, + unsigned int speed) { unsigned long flags; unsigned int iobase; @@ -397,7 +398,7 @@ static void btuart_change_speed(btuart_info_t *info, unsigned int speed) static int btuart_hci_flush(struct hci_dev *hdev) { - btuart_info_t *info = hci_get_drvdata(hdev); + struct btuart_info *info = hci_get_drvdata(hdev); /* Drop TX queue */ skb_queue_purge(&(info->txq)); @@ -427,7 +428,7 @@ static int btuart_hci_close(struct hci_dev *hdev) static int btuart_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) { - btuart_info_t *info = hci_get_drvdata(hdev); + struct btuart_info *info = hci_get_drvdata(hdev); switch (bt_cb(skb)->pkt_type) { case HCI_COMMAND_PKT: @@ -455,7 +456,7 @@ static int btuart_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) /* ======================== Card services HCI interaction ======================== */ -static int btuart_open(btuart_info_t *info) +static int btuart_open(struct btuart_info *info) { unsigned long flags; unsigned int iobase = info->p_dev->resource[0]->start; @@ -521,7 +522,7 @@ static int btuart_open(btuart_info_t *info) } -static int btuart_close(btuart_info_t *info) +static int btuart_close(struct btuart_info *info) { unsigned long flags; unsigned int iobase = info->p_dev->resource[0]->start; @@ -550,7 +551,7 @@ static int btuart_close(btuart_info_t *info) static int btuart_probe(struct pcmcia_device *link) { - btuart_info_t *info; + struct btuart_info *info; /* Create new info device */ info = devm_kzalloc(&link->dev, sizeof(*info), GFP_KERNEL); @@ -613,7 +614,7 @@ static int btuart_check_config_notpicky(struct pcmcia_device *p_dev, static int btuart_config(struct pcmcia_device *link) { - btuart_info_t *info = link->priv; + struct btuart_info *info = link->priv; int i; int try; @@ -654,7 +655,7 @@ static int btuart_config(struct pcmcia_device *link) static void btuart_release(struct pcmcia_device *link) { - btuart_info_t *info = link->priv; + struct btuart_info *info = link->priv; btuart_close(info); diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 292c38e8aa1760c000cdcce9ee6d223e9f819559..edfc17bfcd44e02a8c8fb8748687cf265770863d 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -165,6 +165,7 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, @@ -267,20 +268,24 @@ struct btusb_data { struct usb_interface *intf; struct usb_interface *isoc; - spinlock_t lock; - unsigned long flags; struct work_struct work; struct work_struct waker; + struct usb_anchor deferred; struct usb_anchor tx_anchor; + int tx_in_flight; + spinlock_t txlock; + struct usb_anchor intr_anchor; struct usb_anchor bulk_anchor; struct usb_anchor isoc_anchor; - struct usb_anchor deferred; - int tx_in_flight; - spinlock_t txlock; + spinlock_t rxlock; + + struct sk_buff *evt_skb; + struct sk_buff *acl_skb; + struct sk_buff *sco_skb; struct usb_endpoint_descriptor *intr_ep; struct usb_endpoint_descriptor *bulk_tx_ep; @@ -295,18 +300,189 @@ struct btusb_data { int suspend_count; }; -static int inc_tx(struct btusb_data *data) +static inline void btusb_free_frags(struct btusb_data *data) { unsigned long flags; - int rv; - spin_lock_irqsave(&data->txlock, flags); - rv = test_bit(BTUSB_SUSPENDING, &data->flags); - if (!rv) - data->tx_in_flight++; - spin_unlock_irqrestore(&data->txlock, flags); + spin_lock_irqsave(&data->rxlock, flags); + + kfree_skb(data->evt_skb); + data->evt_skb = NULL; + + kfree_skb(data->acl_skb); + data->acl_skb = NULL; + + kfree_skb(data->sco_skb); + data->sco_skb = NULL; + + spin_unlock_irqrestore(&data->rxlock, flags); +} + +static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count) +{ + struct sk_buff *skb; + int err = 0; + + spin_lock(&data->rxlock); + skb = data->evt_skb; + + while (count) { + int len; + + if (!skb) { + skb = bt_skb_alloc(HCI_MAX_EVENT_SIZE, GFP_ATOMIC); + if (!skb) { + err = -ENOMEM; + break; + } + + bt_cb(skb)->pkt_type = HCI_EVENT_PKT; + bt_cb(skb)->expect = HCI_EVENT_HDR_SIZE; + } + + len = min_t(uint, bt_cb(skb)->expect, count); + memcpy(skb_put(skb, len), buffer, len); + + count -= len; + buffer += len; + bt_cb(skb)->expect -= len; + + if (skb->len == HCI_EVENT_HDR_SIZE) { + /* Complete event header */ + bt_cb(skb)->expect = hci_event_hdr(skb)->plen; + + if (skb_tailroom(skb) < bt_cb(skb)->expect) { + kfree_skb(skb); + skb = NULL; + + err = -EILSEQ; + break; + } + } + + if (bt_cb(skb)->expect == 0) { + /* Complete frame */ + hci_recv_frame(data->hdev, skb); + skb = NULL; + } + } + + data->evt_skb = skb; + spin_unlock(&data->rxlock); - return rv; + return err; +} + +static int btusb_recv_bulk(struct btusb_data *data, void *buffer, int count) +{ + struct sk_buff *skb; + int err = 0; + + spin_lock(&data->rxlock); + skb = data->acl_skb; + + while (count) { + int len; + + if (!skb) { + skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC); + if (!skb) { + err = -ENOMEM; + break; + } + + bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; + bt_cb(skb)->expect = HCI_ACL_HDR_SIZE; + } + + len = min_t(uint, bt_cb(skb)->expect, count); + memcpy(skb_put(skb, len), buffer, len); + + count -= len; + buffer += len; + bt_cb(skb)->expect -= len; + + if (skb->len == HCI_ACL_HDR_SIZE) { + __le16 dlen = hci_acl_hdr(skb)->dlen; + + /* Complete ACL header */ + bt_cb(skb)->expect = __le16_to_cpu(dlen); + + if (skb_tailroom(skb) < bt_cb(skb)->expect) { + kfree_skb(skb); + skb = NULL; + + err = -EILSEQ; + break; + } + } + + if (bt_cb(skb)->expect == 0) { + /* Complete frame */ + hci_recv_frame(data->hdev, skb); + skb = NULL; + } + } + + data->acl_skb = skb; + spin_unlock(&data->rxlock); + + return err; +} + +static int btusb_recv_isoc(struct btusb_data *data, void *buffer, int count) +{ + struct sk_buff *skb; + int err = 0; + + spin_lock(&data->rxlock); + skb = data->sco_skb; + + while (count) { + int len; + + if (!skb) { + skb = bt_skb_alloc(HCI_MAX_SCO_SIZE, GFP_ATOMIC); + if (!skb) { + err = -ENOMEM; + break; + } + + bt_cb(skb)->pkt_type = HCI_SCODATA_PKT; + bt_cb(skb)->expect = HCI_SCO_HDR_SIZE; + } + + len = min_t(uint, bt_cb(skb)->expect, count); + memcpy(skb_put(skb, len), buffer, len); + + count -= len; + buffer += len; + bt_cb(skb)->expect -= len; + + if (skb->len == HCI_SCO_HDR_SIZE) { + /* Complete SCO header */ + bt_cb(skb)->expect = hci_sco_hdr(skb)->dlen; + + if (skb_tailroom(skb) < bt_cb(skb)->expect) { + kfree_skb(skb); + skb = NULL; + + err = -EILSEQ; + break; + } + } + + if (bt_cb(skb)->expect == 0) { + /* Complete frame */ + hci_recv_frame(data->hdev, skb); + skb = NULL; + } + } + + data->sco_skb = skb; + spin_unlock(&data->rxlock); + + return err; } static void btusb_intr_complete(struct urb *urb) @@ -315,8 +491,8 @@ static void btusb_intr_complete(struct urb *urb) struct btusb_data *data = hci_get_drvdata(hdev); int err; - BT_DBG("%s urb %p status %d count %d", hdev->name, - urb, urb->status, urb->actual_length); + BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status, + urb->actual_length); if (!test_bit(HCI_RUNNING, &hdev->flags)) return; @@ -324,12 +500,14 @@ static void btusb_intr_complete(struct urb *urb) if (urb->status == 0) { hdev->stat.byte_rx += urb->actual_length; - if (hci_recv_fragment(hdev, HCI_EVENT_PKT, - urb->transfer_buffer, - urb->actual_length) < 0) { + if (btusb_recv_intr(data, urb->transfer_buffer, + urb->actual_length) < 0) { BT_ERR("%s corrupted event packet", hdev->name); hdev->stat.err_rx++; } + } else if (urb->status == -ENOENT) { + /* Avoid suspend failed when usb_kill_urb */ + return; } if (!test_bit(BTUSB_INTR_RUNNING, &data->flags)) @@ -344,7 +522,7 @@ static void btusb_intr_complete(struct urb *urb) * -ENODEV: device got disconnected */ if (err != -EPERM && err != -ENODEV) BT_ERR("%s urb %p failed to resubmit (%d)", - hdev->name, urb, -err); + hdev->name, urb, -err); usb_unanchor_urb(urb); } } @@ -377,8 +555,7 @@ static int btusb_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags) pipe = usb_rcvintpipe(data->udev, data->intr_ep->bEndpointAddress); usb_fill_int_urb(urb, data->udev, pipe, buf, size, - btusb_intr_complete, hdev, - data->intr_ep->bInterval); + btusb_intr_complete, hdev, data->intr_ep->bInterval); urb->transfer_flags |= URB_FREE_BUFFER; @@ -388,7 +565,7 @@ static int btusb_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags) if (err < 0) { if (err != -EPERM && err != -ENODEV) BT_ERR("%s urb %p submission failed (%d)", - hdev->name, urb, -err); + hdev->name, urb, -err); usb_unanchor_urb(urb); } @@ -403,8 +580,8 @@ static void btusb_bulk_complete(struct urb *urb) struct btusb_data *data = hci_get_drvdata(hdev); int err; - BT_DBG("%s urb %p status %d count %d", hdev->name, - urb, urb->status, urb->actual_length); + BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status, + urb->actual_length); if (!test_bit(HCI_RUNNING, &hdev->flags)) return; @@ -412,12 +589,14 @@ static void btusb_bulk_complete(struct urb *urb) if (urb->status == 0) { hdev->stat.byte_rx += urb->actual_length; - if (hci_recv_fragment(hdev, HCI_ACLDATA_PKT, - urb->transfer_buffer, - urb->actual_length) < 0) { + if (btusb_recv_bulk(data, urb->transfer_buffer, + urb->actual_length) < 0) { BT_ERR("%s corrupted ACL packet", hdev->name); hdev->stat.err_rx++; } + } else if (urb->status == -ENOENT) { + /* Avoid suspend failed when usb_kill_urb */ + return; } if (!test_bit(BTUSB_BULK_RUNNING, &data->flags)) @@ -432,7 +611,7 @@ static void btusb_bulk_complete(struct urb *urb) * -ENODEV: device got disconnected */ if (err != -EPERM && err != -ENODEV) BT_ERR("%s urb %p failed to resubmit (%d)", - hdev->name, urb, -err); + hdev->name, urb, -err); usb_unanchor_urb(urb); } } @@ -462,8 +641,8 @@ static int btusb_submit_bulk_urb(struct hci_dev *hdev, gfp_t mem_flags) pipe = usb_rcvbulkpipe(data->udev, data->bulk_rx_ep->bEndpointAddress); - usb_fill_bulk_urb(urb, data->udev, pipe, - buf, size, btusb_bulk_complete, hdev); + usb_fill_bulk_urb(urb, data->udev, pipe, buf, size, + btusb_bulk_complete, hdev); urb->transfer_flags |= URB_FREE_BUFFER; @@ -474,7 +653,7 @@ static int btusb_submit_bulk_urb(struct hci_dev *hdev, gfp_t mem_flags) if (err < 0) { if (err != -EPERM && err != -ENODEV) BT_ERR("%s urb %p submission failed (%d)", - hdev->name, urb, -err); + hdev->name, urb, -err); usb_unanchor_urb(urb); } @@ -489,8 +668,8 @@ static void btusb_isoc_complete(struct urb *urb) struct btusb_data *data = hci_get_drvdata(hdev); int i, err; - BT_DBG("%s urb %p status %d count %d", hdev->name, - urb, urb->status, urb->actual_length); + BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status, + urb->actual_length); if (!test_bit(HCI_RUNNING, &hdev->flags)) return; @@ -505,13 +684,15 @@ static void btusb_isoc_complete(struct urb *urb) hdev->stat.byte_rx += length; - if (hci_recv_fragment(hdev, HCI_SCODATA_PKT, - urb->transfer_buffer + offset, - length) < 0) { + if (btusb_recv_isoc(data, urb->transfer_buffer + offset, + length) < 0) { BT_ERR("%s corrupted SCO packet", hdev->name); hdev->stat.err_rx++; } } + } else if (urb->status == -ENOENT) { + /* Avoid suspend failed when usb_kill_urb */ + return; } if (!test_bit(BTUSB_ISOC_RUNNING, &data->flags)) @@ -525,7 +706,7 @@ static void btusb_isoc_complete(struct urb *urb) * -ENODEV: device got disconnected */ if (err != -EPERM && err != -ENODEV) BT_ERR("%s urb %p failed to resubmit (%d)", - hdev->name, urb, -err); + hdev->name, urb, -err); usb_unanchor_urb(urb); } } @@ -580,12 +761,12 @@ static int btusb_submit_isoc_urb(struct hci_dev *hdev, gfp_t mem_flags) pipe = usb_rcvisocpipe(data->udev, data->isoc_rx_ep->bEndpointAddress); usb_fill_int_urb(urb, data->udev, pipe, buf, size, btusb_isoc_complete, - hdev, data->isoc_rx_ep->bInterval); + hdev, data->isoc_rx_ep->bInterval); - urb->transfer_flags = URB_FREE_BUFFER | URB_ISO_ASAP; + urb->transfer_flags = URB_FREE_BUFFER | URB_ISO_ASAP; __fill_isoc_descriptor(urb, size, - le16_to_cpu(data->isoc_rx_ep->wMaxPacketSize)); + le16_to_cpu(data->isoc_rx_ep->wMaxPacketSize)); usb_anchor_urb(urb, &data->isoc_anchor); @@ -593,7 +774,7 @@ static int btusb_submit_isoc_urb(struct hci_dev *hdev, gfp_t mem_flags) if (err < 0) { if (err != -EPERM && err != -ENODEV) BT_ERR("%s urb %p submission failed (%d)", - hdev->name, urb, -err); + hdev->name, urb, -err); usb_unanchor_urb(urb); } @@ -605,11 +786,11 @@ static int btusb_submit_isoc_urb(struct hci_dev *hdev, gfp_t mem_flags) static void btusb_tx_complete(struct urb *urb) { struct sk_buff *skb = urb->context; - struct hci_dev *hdev = (struct hci_dev *) skb->dev; + struct hci_dev *hdev = (struct hci_dev *)skb->dev; struct btusb_data *data = hci_get_drvdata(hdev); - BT_DBG("%s urb %p status %d count %d", hdev->name, - urb, urb->status, urb->actual_length); + BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status, + urb->actual_length); if (!test_bit(HCI_RUNNING, &hdev->flags)) goto done; @@ -632,10 +813,10 @@ static void btusb_tx_complete(struct urb *urb) static void btusb_isoc_tx_complete(struct urb *urb) { struct sk_buff *skb = urb->context; - struct hci_dev *hdev = (struct hci_dev *) skb->dev; + struct hci_dev *hdev = (struct hci_dev *)skb->dev; - BT_DBG("%s urb %p status %d count %d", hdev->name, - urb, urb->status, urb->actual_length); + BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status, + urb->actual_length); if (!test_bit(HCI_RUNNING, &hdev->flags)) goto done; @@ -719,6 +900,8 @@ static int btusb_close(struct hci_dev *hdev) clear_bit(BTUSB_INTR_RUNNING, &data->flags); btusb_stop_traffic(data); + btusb_free_frags(data); + err = usb_autopm_get_interface(data->intf); if (err < 0) goto failed; @@ -738,122 +921,181 @@ static int btusb_flush(struct hci_dev *hdev) BT_DBG("%s", hdev->name); usb_kill_anchored_urbs(&data->tx_anchor); + btusb_free_frags(data); return 0; } -static int btusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb) +static struct urb *alloc_ctrl_urb(struct hci_dev *hdev, struct sk_buff *skb) { struct btusb_data *data = hci_get_drvdata(hdev); struct usb_ctrlrequest *dr; struct urb *urb; unsigned int pipe; - int err; - BT_DBG("%s", hdev->name); + urb = usb_alloc_urb(0, GFP_KERNEL); + if (!urb) + return ERR_PTR(-ENOMEM); - if (!test_bit(HCI_RUNNING, &hdev->flags)) - return -EBUSY; + dr = kmalloc(sizeof(*dr), GFP_KERNEL); + if (!dr) { + usb_free_urb(urb); + return ERR_PTR(-ENOMEM); + } - skb->dev = (void *) hdev; + dr->bRequestType = data->cmdreq_type; + dr->bRequest = 0; + dr->wIndex = 0; + dr->wValue = 0; + dr->wLength = __cpu_to_le16(skb->len); - switch (bt_cb(skb)->pkt_type) { - case HCI_COMMAND_PKT: - urb = usb_alloc_urb(0, GFP_ATOMIC); - if (!urb) - return -ENOMEM; - - dr = kmalloc(sizeof(*dr), GFP_ATOMIC); - if (!dr) { - usb_free_urb(urb); - return -ENOMEM; - } + pipe = usb_sndctrlpipe(data->udev, 0x00); - dr->bRequestType = data->cmdreq_type; - dr->bRequest = 0; - dr->wIndex = 0; - dr->wValue = 0; - dr->wLength = __cpu_to_le16(skb->len); + usb_fill_control_urb(urb, data->udev, pipe, (void *)dr, + skb->data, skb->len, btusb_tx_complete, skb); - pipe = usb_sndctrlpipe(data->udev, 0x00); + skb->dev = (void *)hdev; - usb_fill_control_urb(urb, data->udev, pipe, (void *) dr, - skb->data, skb->len, btusb_tx_complete, skb); + return urb; +} - hdev->stat.cmd_tx++; - break; +static struct urb *alloc_bulk_urb(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + struct urb *urb; + unsigned int pipe; - case HCI_ACLDATA_PKT: - if (!data->bulk_tx_ep) - return -ENODEV; + if (!data->bulk_tx_ep) + return ERR_PTR(-ENODEV); + + urb = usb_alloc_urb(0, GFP_KERNEL); + if (!urb) + return ERR_PTR(-ENOMEM); - urb = usb_alloc_urb(0, GFP_ATOMIC); - if (!urb) - return -ENOMEM; + pipe = usb_sndbulkpipe(data->udev, data->bulk_tx_ep->bEndpointAddress); - pipe = usb_sndbulkpipe(data->udev, - data->bulk_tx_ep->bEndpointAddress); + usb_fill_bulk_urb(urb, data->udev, pipe, + skb->data, skb->len, btusb_tx_complete, skb); - usb_fill_bulk_urb(urb, data->udev, pipe, - skb->data, skb->len, btusb_tx_complete, skb); + skb->dev = (void *)hdev; - hdev->stat.acl_tx++; - break; + return urb; +} - case HCI_SCODATA_PKT: - if (!data->isoc_tx_ep || hci_conn_num(hdev, SCO_LINK) < 1) - return -ENODEV; +static struct urb *alloc_isoc_urb(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + struct urb *urb; + unsigned int pipe; - urb = usb_alloc_urb(BTUSB_MAX_ISOC_FRAMES, GFP_ATOMIC); - if (!urb) - return -ENOMEM; + if (!data->isoc_tx_ep) + return ERR_PTR(-ENODEV); - pipe = usb_sndisocpipe(data->udev, - data->isoc_tx_ep->bEndpointAddress); + urb = usb_alloc_urb(BTUSB_MAX_ISOC_FRAMES, GFP_KERNEL); + if (!urb) + return ERR_PTR(-ENOMEM); - usb_fill_int_urb(urb, data->udev, pipe, - skb->data, skb->len, btusb_isoc_tx_complete, - skb, data->isoc_tx_ep->bInterval); + pipe = usb_sndisocpipe(data->udev, data->isoc_tx_ep->bEndpointAddress); - urb->transfer_flags = URB_ISO_ASAP; + usb_fill_int_urb(urb, data->udev, pipe, + skb->data, skb->len, btusb_isoc_tx_complete, + skb, data->isoc_tx_ep->bInterval); - __fill_isoc_descriptor(urb, skb->len, - le16_to_cpu(data->isoc_tx_ep->wMaxPacketSize)); + urb->transfer_flags = URB_ISO_ASAP; - hdev->stat.sco_tx++; - goto skip_waking; + __fill_isoc_descriptor(urb, skb->len, + le16_to_cpu(data->isoc_tx_ep->wMaxPacketSize)); - default: - return -EILSEQ; - } + skb->dev = (void *)hdev; - err = inc_tx(data); - if (err) { - usb_anchor_urb(urb, &data->deferred); - schedule_work(&data->waker); - err = 0; - goto done; - } + return urb; +} + +static int submit_tx_urb(struct hci_dev *hdev, struct urb *urb) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + int err; -skip_waking: usb_anchor_urb(urb, &data->tx_anchor); - err = usb_submit_urb(urb, GFP_ATOMIC); + err = usb_submit_urb(urb, GFP_KERNEL); if (err < 0) { if (err != -EPERM && err != -ENODEV) BT_ERR("%s urb %p submission failed (%d)", - hdev->name, urb, -err); + hdev->name, urb, -err); kfree(urb->setup_packet); usb_unanchor_urb(urb); } else { usb_mark_last_busy(data->udev); } -done: usb_free_urb(urb); return err; } +static int submit_or_queue_tx_urb(struct hci_dev *hdev, struct urb *urb) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + unsigned long flags; + bool suspending; + + spin_lock_irqsave(&data->txlock, flags); + suspending = test_bit(BTUSB_SUSPENDING, &data->flags); + if (!suspending) + data->tx_in_flight++; + spin_unlock_irqrestore(&data->txlock, flags); + + if (!suspending) + return submit_tx_urb(hdev, urb); + + usb_anchor_urb(urb, &data->deferred); + schedule_work(&data->waker); + + usb_free_urb(urb); + return 0; +} + +static int btusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct urb *urb; + + BT_DBG("%s", hdev->name); + + if (!test_bit(HCI_RUNNING, &hdev->flags)) + return -EBUSY; + + switch (bt_cb(skb)->pkt_type) { + case HCI_COMMAND_PKT: + urb = alloc_ctrl_urb(hdev, skb); + if (IS_ERR(urb)) + return PTR_ERR(urb); + + hdev->stat.cmd_tx++; + return submit_or_queue_tx_urb(hdev, urb); + + case HCI_ACLDATA_PKT: + urb = alloc_bulk_urb(hdev, skb); + if (IS_ERR(urb)) + return PTR_ERR(urb); + + hdev->stat.acl_tx++; + return submit_or_queue_tx_urb(hdev, urb); + + case HCI_SCODATA_PKT: + if (hci_conn_num(hdev, SCO_LINK) < 1) + return -ENODEV; + + urb = alloc_isoc_urb(hdev, skb); + if (IS_ERR(urb)) + return PTR_ERR(urb); + + hdev->stat.sco_tx++; + return submit_tx_urb(hdev, urb); + } + + return -EILSEQ; +} + static void btusb_notify(struct hci_dev *hdev, unsigned int evt) { struct btusb_data *data = hci_get_drvdata(hdev); @@ -930,6 +1172,7 @@ static void btusb_work(struct work_struct *work) if (hdev->voice_setting & 0x0020) { static const int alts[3] = { 2, 4, 5 }; + new_alts = alts[data->sco_num - 1]; } else { new_alts = data->sco_num; @@ -1002,7 +1245,7 @@ static int btusb_setup_csr(struct hci_dev *hdev) return -PTR_ERR(skb); } - rp = (struct hci_rp_read_local_version *) skb->data; + rp = (struct hci_rp_read_local_version *)skb->data; if (!rp->status) { if (le16_to_cpu(rp->manufacturer) != 10) { @@ -1040,7 +1283,7 @@ struct intel_version { } __packed; static const struct firmware *btusb_setup_intel_get_fw(struct hci_dev *hdev, - struct intel_version *ver) + struct intel_version *ver) { const struct firmware *fw; char fwname[64]; @@ -1216,7 +1459,7 @@ static int btusb_check_bdaddr_intel(struct hci_dev *hdev) return -EIO; } - rp = (struct hci_rp_read_bd_addr *) skb->data; + rp = (struct hci_rp_read_bd_addr *)skb->data; if (rp->status) { BT_ERR("%s Intel device address result failed (%02x)", hdev->name, rp->status); @@ -1346,6 +1589,7 @@ static int btusb_setup_intel(struct hci_dev *hdev) if (skb->data[0]) { u8 evt_status = skb->data[0]; + BT_ERR("%s enable Intel manufacturer mode event failed (%02x)", hdev->name, evt_status); kfree_skb(skb); @@ -1455,7 +1699,7 @@ static int btusb_set_bdaddr_intel(struct hci_dev *hdev, const bdaddr_t *bdaddr) if (IS_ERR(skb)) { ret = PTR_ERR(skb); BT_ERR("%s: changing Intel device address failed (%ld)", - hdev->name, ret); + hdev->name, ret); return ret; } kfree_skb(skb); @@ -1530,19 +1774,19 @@ static int btusb_setup_bcm_patchram(struct hci_dev *hdev) if (IS_ERR(skb)) { ret = PTR_ERR(skb); BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION failed (%ld)", - hdev->name, ret); + hdev->name, ret); goto done; } if (skb->len != sizeof(*ver)) { BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION event length mismatch", - hdev->name); + hdev->name); kfree_skb(skb); ret = -EIO; goto done; } - ver = (struct hci_rp_read_local_version *) skb->data; + ver = (struct hci_rp_read_local_version *)skb->data; BT_INFO("%s: BCM: patching hci_ver=%02x hci_rev=%04x lmp_ver=%02x " "lmp_subver=%04x", hdev->name, ver->hci_ver, ver->hci_rev, ver->lmp_ver, ver->lmp_subver); @@ -1553,7 +1797,7 @@ static int btusb_setup_bcm_patchram(struct hci_dev *hdev) if (IS_ERR(skb)) { ret = PTR_ERR(skb); BT_ERR("%s: BCM: Download Minidrv command failed (%ld)", - hdev->name, ret); + hdev->name, ret); goto reset_fw; } kfree_skb(skb); @@ -1565,13 +1809,13 @@ static int btusb_setup_bcm_patchram(struct hci_dev *hdev) fw_size = fw->size; while (fw_size >= sizeof(*cmd)) { - cmd = (struct hci_command_hdr *) fw_ptr; + cmd = (struct hci_command_hdr *)fw_ptr; fw_ptr += sizeof(*cmd); fw_size -= sizeof(*cmd); if (fw_size < cmd->plen) { BT_ERR("%s: BCM: patch %s is corrupted", - hdev->name, fw_name); + hdev->name, fw_name); ret = -EINVAL; goto reset_fw; } @@ -1587,7 +1831,7 @@ static int btusb_setup_bcm_patchram(struct hci_dev *hdev) if (IS_ERR(skb)) { ret = PTR_ERR(skb); BT_ERR("%s: BCM: patch command %04x failed (%ld)", - hdev->name, opcode, ret); + hdev->name, opcode, ret); goto reset_fw; } kfree_skb(skb); @@ -1612,19 +1856,19 @@ static int btusb_setup_bcm_patchram(struct hci_dev *hdev) if (IS_ERR(skb)) { ret = PTR_ERR(skb); BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION failed (%ld)", - hdev->name, ret); + hdev->name, ret); goto done; } if (skb->len != sizeof(*ver)) { BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION event length mismatch", - hdev->name); + hdev->name); kfree_skb(skb); ret = -EIO; goto done; } - ver = (struct hci_rp_read_local_version *) skb->data; + ver = (struct hci_rp_read_local_version *)skb->data; BT_INFO("%s: BCM: firmware hci_ver=%02x hci_rev=%04x lmp_ver=%02x " "lmp_subver=%04x", hdev->name, ver->hci_ver, ver->hci_rev, ver->lmp_ver, ver->lmp_subver); @@ -1636,19 +1880,19 @@ static int btusb_setup_bcm_patchram(struct hci_dev *hdev) if (IS_ERR(skb)) { ret = PTR_ERR(skb); BT_ERR("%s: HCI_OP_READ_BD_ADDR failed (%ld)", - hdev->name, ret); + hdev->name, ret); goto done; } if (skb->len != sizeof(*bda)) { BT_ERR("%s: HCI_OP_READ_BD_ADDR event length mismatch", - hdev->name); + hdev->name); kfree_skb(skb); ret = -EIO; goto done; } - bda = (struct hci_rp_read_bd_addr *) skb->data; + bda = (struct hci_rp_read_bd_addr *)skb->data; if (bda->status) { BT_ERR("%s: HCI_OP_READ_BD_ADDR error status (%02x)", hdev->name, bda->status); @@ -1683,7 +1927,7 @@ static int btusb_set_bdaddr_bcm(struct hci_dev *hdev, const bdaddr_t *bdaddr) if (IS_ERR(skb)) { ret = PTR_ERR(skb); BT_ERR("%s: BCM: Change address command failed (%ld)", - hdev->name, ret); + hdev->name, ret); return ret; } kfree_skb(skb); @@ -1692,7 +1936,7 @@ static int btusb_set_bdaddr_bcm(struct hci_dev *hdev, const bdaddr_t *bdaddr) } static int btusb_probe(struct usb_interface *intf, - const struct usb_device_id *id) + const struct usb_device_id *id) { struct usb_endpoint_descriptor *ep_desc; struct btusb_data *data; @@ -1707,6 +1951,7 @@ static int btusb_probe(struct usb_interface *intf, if (!id->driver_info) { const struct usb_device_id *match; + match = usb_match_id(intf, blacklist_table); if (match) id = match; @@ -1755,17 +2000,16 @@ static int btusb_probe(struct usb_interface *intf, data->udev = interface_to_usbdev(intf); data->intf = intf; - spin_lock_init(&data->lock); - INIT_WORK(&data->work, btusb_work); INIT_WORK(&data->waker, btusb_waker); + init_usb_anchor(&data->deferred); + init_usb_anchor(&data->tx_anchor); spin_lock_init(&data->txlock); - init_usb_anchor(&data->tx_anchor); init_usb_anchor(&data->intr_anchor); init_usb_anchor(&data->bulk_anchor); init_usb_anchor(&data->isoc_anchor); - init_usb_anchor(&data->deferred); + spin_lock_init(&data->rxlock); hdev = hci_alloc_dev(); if (!hdev) @@ -1857,7 +2101,7 @@ static int btusb_probe(struct usb_interface *intf, if (data->isoc) { err = usb_driver_claim_interface(&btusb_driver, - data->isoc, data); + data->isoc, data); if (err < 0) { hci_free_dev(hdev); return err; @@ -1898,6 +2142,7 @@ static void btusb_disconnect(struct usb_interface *intf) else if (data->isoc) usb_driver_release_interface(&btusb_driver, data->isoc); + btusb_free_frags(data); hci_free_dev(hdev); } diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c index 2bd8fad172064af8f2211536fe8903e363ef33f1..78e10f0c65b28dc38d30336514b92614bc7e449c 100644 --- a/drivers/bluetooth/dtl1_cs.c +++ b/drivers/bluetooth/dtl1_cs.c @@ -62,7 +62,7 @@ MODULE_LICENSE("GPL"); /* ======================== Local structures ======================== */ -typedef struct dtl1_info_t { +struct dtl1_info { struct pcmcia_device *p_dev; struct hci_dev *hdev; @@ -78,7 +78,7 @@ typedef struct dtl1_info_t { unsigned long rx_state; unsigned long rx_count; struct sk_buff *rx_skb; -} dtl1_info_t; +}; static int dtl1_config(struct pcmcia_device *link); @@ -94,11 +94,11 @@ static int dtl1_config(struct pcmcia_device *link); #define RECV_WAIT_DATA 1 -typedef struct { +struct nsh { u8 type; u8 zero; u16 len; -} __packed nsh_t; /* Nokia Specific Header */ +} __packed; /* Nokia Specific Header */ #define NSHL 4 /* Nokia Specific Header Length */ @@ -126,7 +126,7 @@ static int dtl1_write(unsigned int iobase, int fifo_size, __u8 *buf, int len) } -static void dtl1_write_wakeup(dtl1_info_t *info) +static void dtl1_write_wakeup(struct dtl1_info *info) { if (!info) { BT_ERR("Unknown device"); @@ -176,7 +176,7 @@ static void dtl1_write_wakeup(dtl1_info_t *info) } -static void dtl1_control(dtl1_info_t *info, struct sk_buff *skb) +static void dtl1_control(struct dtl1_info *info, struct sk_buff *skb) { u8 flowmask = *(u8 *)skb->data; int i; @@ -199,10 +199,10 @@ static void dtl1_control(dtl1_info_t *info, struct sk_buff *skb) } -static void dtl1_receive(dtl1_info_t *info) +static void dtl1_receive(struct dtl1_info *info) { unsigned int iobase; - nsh_t *nsh; + struct nsh *nsh; int boguscount = 0; if (!info) { @@ -227,7 +227,7 @@ static void dtl1_receive(dtl1_info_t *info) } *skb_put(info->rx_skb, 1) = inb(iobase + UART_RX); - nsh = (nsh_t *)info->rx_skb->data; + nsh = (struct nsh *)info->rx_skb->data; info->rx_count--; @@ -287,7 +287,7 @@ static void dtl1_receive(dtl1_info_t *info) static irqreturn_t dtl1_interrupt(int irq, void *dev_inst) { - dtl1_info_t *info = dev_inst; + struct dtl1_info *info = dev_inst; unsigned int iobase; unsigned char msr; int boguscount = 0; @@ -365,7 +365,7 @@ static int dtl1_hci_open(struct hci_dev *hdev) static int dtl1_hci_flush(struct hci_dev *hdev) { - dtl1_info_t *info = hci_get_drvdata(hdev); + struct dtl1_info *info = hci_get_drvdata(hdev); /* Drop TX queue */ skb_queue_purge(&(info->txq)); @@ -387,9 +387,9 @@ static int dtl1_hci_close(struct hci_dev *hdev) static int dtl1_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) { - dtl1_info_t *info = hci_get_drvdata(hdev); + struct dtl1_info *info = hci_get_drvdata(hdev); struct sk_buff *s; - nsh_t nsh; + struct nsh nsh; switch (bt_cb(skb)->pkt_type) { case HCI_COMMAND_PKT: @@ -436,7 +436,7 @@ static int dtl1_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) /* ======================== Card services HCI interaction ======================== */ -static int dtl1_open(dtl1_info_t *info) +static int dtl1_open(struct dtl1_info *info) { unsigned long flags; unsigned int iobase = info->p_dev->resource[0]->start; @@ -505,7 +505,7 @@ static int dtl1_open(dtl1_info_t *info) } -static int dtl1_close(dtl1_info_t *info) +static int dtl1_close(struct dtl1_info *info) { unsigned long flags; unsigned int iobase = info->p_dev->resource[0]->start; @@ -534,7 +534,7 @@ static int dtl1_close(dtl1_info_t *info) static int dtl1_probe(struct pcmcia_device *link) { - dtl1_info_t *info; + struct dtl1_info *info; /* Create new info device */ info = devm_kzalloc(&link->dev, sizeof(*info), GFP_KERNEL); @@ -552,7 +552,7 @@ static int dtl1_probe(struct pcmcia_device *link) static void dtl1_detach(struct pcmcia_device *link) { - dtl1_info_t *info = link->priv; + struct dtl1_info *info = link->priv; dtl1_close(info); pcmcia_disable_device(link); @@ -571,7 +571,7 @@ static int dtl1_confcheck(struct pcmcia_device *p_dev, void *priv_data) static int dtl1_config(struct pcmcia_device *link) { - dtl1_info_t *info = link->priv; + struct dtl1_info *info = link->priv; int ret; /* Look for a generic full-sized window */ diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c index caacb422995dd4a69e18585275f32904a17bf708..a22838669b4ed227d447925ea45337f5efd9f72d 100644 --- a/drivers/bluetooth/hci_h5.c +++ b/drivers/bluetooth/hci_h5.c @@ -237,7 +237,7 @@ static void h5_pkt_cull(struct h5 *h5) break; to_remove--; - seq = (seq - 1) % 8; + seq = (seq - 1) & 0x07; } if (seq != h5->rx_ack) diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index e4056279166d3b4775aabfc10e4f786128a68d95..10cfce5119a9d2f4ef89c68967236285e7a8730e 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -752,7 +752,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries, return ERR_PTR(-EINVAL); entries = roundup_pow_of_two(entries + 1); - if (entries > dev->mdev->caps.max_cqes) + if (entries > dev->mdev->caps.gen.max_cqes) return ERR_PTR(-EINVAL); cq = kzalloc(sizeof(*cq), GFP_KERNEL); @@ -919,7 +919,7 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) int err; u32 fsel; - if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_CQ_MODER)) + if (!(dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_CQ_MODER)) return -ENOSYS; in = kzalloc(sizeof(*in), GFP_KERNEL); @@ -1074,7 +1074,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) int uninitialized_var(cqe_size); unsigned long flags; - if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) { + if (!(dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) { pr_info("Firmware does not support resize CQ\n"); return -ENOSYS; } @@ -1083,7 +1083,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) return -EINVAL; entries = roundup_pow_of_two(entries + 1); - if (entries > dev->mdev->caps.max_cqes + 1) + if (entries > dev->mdev->caps.gen.max_cqes + 1) return -EINVAL; if (entries == ibcq->cqe + 1) diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c index b514bbb5610ffea4643fedf708d61ddf695b5f0e..657af9a1167cc9ee8c38889ba8cef071c8892c5b 100644 --- a/drivers/infiniband/hw/mlx5/mad.c +++ b/drivers/infiniband/hw/mlx5/mad.c @@ -129,7 +129,7 @@ int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port) packet_error = be16_to_cpu(out_mad->status); - dev->mdev->caps.ext_port_cap[port - 1] = (!err && !packet_error) ? + dev->mdev->caps.gen.ext_port_cap[port - 1] = (!err && !packet_error) ? MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0; out: diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index d8907b20522abcb8eb46b0266a8495523eb241f2..f3114d1132fb1b0d90f959e4771172c420b3762c 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -157,11 +157,13 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, struct mlx5_ib_dev *dev = to_mdev(ibdev); struct ib_smp *in_mad = NULL; struct ib_smp *out_mad = NULL; + struct mlx5_general_caps *gen; int err = -ENOMEM; int max_rq_sg; int max_sq_sg; u64 flags; + gen = &dev->mdev->caps.gen; in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); if (!in_mad || !out_mad) @@ -183,7 +185,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN; - flags = dev->mdev->caps.flags; + flags = gen->flags; if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR) props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; if (flags & MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR) @@ -213,30 +215,31 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, memcpy(&props->sys_image_guid, out_mad->data + 4, 8); props->max_mr_size = ~0ull; - props->page_size_cap = dev->mdev->caps.min_page_sz; - props->max_qp = 1 << dev->mdev->caps.log_max_qp; - props->max_qp_wr = dev->mdev->caps.max_wqes; - max_rq_sg = dev->mdev->caps.max_rq_desc_sz / sizeof(struct mlx5_wqe_data_seg); - max_sq_sg = (dev->mdev->caps.max_sq_desc_sz - sizeof(struct mlx5_wqe_ctrl_seg)) / + props->page_size_cap = gen->min_page_sz; + props->max_qp = 1 << gen->log_max_qp; + props->max_qp_wr = gen->max_wqes; + max_rq_sg = gen->max_rq_desc_sz / sizeof(struct mlx5_wqe_data_seg); + max_sq_sg = (gen->max_sq_desc_sz - sizeof(struct mlx5_wqe_ctrl_seg)) / sizeof(struct mlx5_wqe_data_seg); props->max_sge = min(max_rq_sg, max_sq_sg); - props->max_cq = 1 << dev->mdev->caps.log_max_cq; - props->max_cqe = dev->mdev->caps.max_cqes - 1; - props->max_mr = 1 << dev->mdev->caps.log_max_mkey; - props->max_pd = 1 << dev->mdev->caps.log_max_pd; - props->max_qp_rd_atom = dev->mdev->caps.max_ra_req_qp; - props->max_qp_init_rd_atom = dev->mdev->caps.max_ra_res_qp; + props->max_cq = 1 << gen->log_max_cq; + props->max_cqe = gen->max_cqes - 1; + props->max_mr = 1 << gen->log_max_mkey; + props->max_pd = 1 << gen->log_max_pd; + props->max_qp_rd_atom = 1 << gen->log_max_ra_req_qp; + props->max_qp_init_rd_atom = 1 << gen->log_max_ra_res_qp; + props->max_srq = 1 << gen->log_max_srq; + props->max_srq_wr = gen->max_srq_wqes - 1; + props->local_ca_ack_delay = gen->local_ca_ack_delay; props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; - props->max_srq = 1 << dev->mdev->caps.log_max_srq; - props->max_srq_wr = dev->mdev->caps.max_srq_wqes - 1; props->max_srq_sge = max_rq_sg - 1; props->max_fast_reg_page_list_len = (unsigned int)-1; - props->local_ca_ack_delay = dev->mdev->caps.local_ca_ack_delay; + props->local_ca_ack_delay = gen->local_ca_ack_delay; props->atomic_cap = IB_ATOMIC_NONE; props->masked_atomic_cap = IB_ATOMIC_NONE; props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28)); - props->max_mcast_grp = 1 << dev->mdev->caps.log_max_mcg; - props->max_mcast_qp_attach = dev->mdev->caps.max_qp_mcg; + props->max_mcast_grp = 1 << gen->log_max_mcg; + props->max_mcast_qp_attach = gen->max_qp_mcg; props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * props->max_mcast_grp; props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */ @@ -254,10 +257,12 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, struct mlx5_ib_dev *dev = to_mdev(ibdev); struct ib_smp *in_mad = NULL; struct ib_smp *out_mad = NULL; + struct mlx5_general_caps *gen; int ext_active_speed; int err = -ENOMEM; - if (port < 1 || port > dev->mdev->caps.num_ports) { + gen = &dev->mdev->caps.gen; + if (port < 1 || port > gen->num_ports) { mlx5_ib_warn(dev, "invalid port number %d\n", port); return -EINVAL; } @@ -288,8 +293,8 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, props->phys_state = out_mad->data[33] >> 4; props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20)); props->gid_tbl_len = out_mad->data[50]; - props->max_msg_sz = 1 << to_mdev(ibdev)->mdev->caps.log_max_msg; - props->pkey_tbl_len = to_mdev(ibdev)->mdev->caps.port[port - 1].pkey_table_len; + props->max_msg_sz = 1 << gen->log_max_msg; + props->pkey_tbl_len = gen->port[port - 1].pkey_table_len; props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46)); props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48)); props->active_width = out_mad->data[31] & 0xf; @@ -316,7 +321,7 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, /* If reported active speed is QDR, check if is FDR-10 */ if (props->active_speed == 4) { - if (dev->mdev->caps.ext_port_cap[port - 1] & + if (gen->ext_port_cap[port - 1] & MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) { init_query_mad(in_mad); in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO; @@ -470,6 +475,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, struct mlx5_ib_alloc_ucontext_req_v2 req; struct mlx5_ib_alloc_ucontext_resp resp; struct mlx5_ib_ucontext *context; + struct mlx5_general_caps *gen; struct mlx5_uuar_info *uuari; struct mlx5_uar *uars; int gross_uuars; @@ -480,6 +486,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, int i; size_t reqlen; + gen = &dev->mdev->caps.gen; if (!dev->ib_active) return ERR_PTR(-EAGAIN); @@ -512,14 +519,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE; gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE; - resp.qp_tab_size = 1 << dev->mdev->caps.log_max_qp; - resp.bf_reg_size = dev->mdev->caps.bf_reg_size; + resp.qp_tab_size = 1 << gen->log_max_qp; + resp.bf_reg_size = gen->bf_reg_size; resp.cache_line_size = L1_CACHE_BYTES; - resp.max_sq_desc_sz = dev->mdev->caps.max_sq_desc_sz; - resp.max_rq_desc_sz = dev->mdev->caps.max_rq_desc_sz; - resp.max_send_wqebb = dev->mdev->caps.max_wqes; - resp.max_recv_wr = dev->mdev->caps.max_wqes; - resp.max_srq_recv_wr = dev->mdev->caps.max_srq_wqes; + resp.max_sq_desc_sz = gen->max_sq_desc_sz; + resp.max_rq_desc_sz = gen->max_rq_desc_sz; + resp.max_send_wqebb = gen->max_wqes; + resp.max_recv_wr = gen->max_wqes; + resp.max_srq_recv_wr = gen->max_srq_wqes; context = kzalloc(sizeof(*context), GFP_KERNEL); if (!context) @@ -565,7 +572,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, mutex_init(&context->db_page_mutex); resp.tot_uuars = req.total_num_uuars; - resp.num_ports = dev->mdev->caps.num_ports; + resp.num_ports = gen->num_ports; err = ib_copy_to_udata(udata, &resp, sizeof(resp) - sizeof(resp.reserved)); if (err) @@ -967,9 +974,11 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context, static void get_ext_port_caps(struct mlx5_ib_dev *dev) { + struct mlx5_general_caps *gen; int port; - for (port = 1; port <= dev->mdev->caps.num_ports; port++) + gen = &dev->mdev->caps.gen; + for (port = 1; port <= gen->num_ports; port++) mlx5_query_ext_port_caps(dev, port); } @@ -977,9 +986,11 @@ static int get_port_caps(struct mlx5_ib_dev *dev) { struct ib_device_attr *dprops = NULL; struct ib_port_attr *pprops = NULL; + struct mlx5_general_caps *gen; int err = 0; int port; + gen = &dev->mdev->caps.gen; pprops = kmalloc(sizeof(*pprops), GFP_KERNEL); if (!pprops) goto out; @@ -994,14 +1005,14 @@ static int get_port_caps(struct mlx5_ib_dev *dev) goto out; } - for (port = 1; port <= dev->mdev->caps.num_ports; port++) { + for (port = 1; port <= gen->num_ports; port++) { err = mlx5_ib_query_port(&dev->ib_dev, port, pprops); if (err) { mlx5_ib_warn(dev, "query_port %d failed %d\n", port, err); break; } - dev->mdev->caps.port[port - 1].pkey_table_len = dprops->max_pkeys; - dev->mdev->caps.port[port - 1].gid_table_len = pprops->gid_tbl_len; + gen->port[port - 1].pkey_table_len = dprops->max_pkeys; + gen->port[port - 1].gid_table_len = pprops->gid_tbl_len; mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n", dprops->max_pkeys, pprops->gid_tbl_len); } @@ -1279,8 +1290,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX); dev->ib_dev.owner = THIS_MODULE; dev->ib_dev.node_type = RDMA_NODE_IB_CA; - dev->ib_dev.local_dma_lkey = mdev->caps.reserved_lkey; - dev->num_ports = mdev->caps.num_ports; + dev->ib_dev.local_dma_lkey = mdev->caps.gen.reserved_lkey; + dev->num_ports = mdev->caps.gen.num_ports; dev->ib_dev.phys_port_cnt = dev->num_ports; dev->ib_dev.num_comp_vectors = dev->num_comp_vectors; dev->ib_dev.dma_device = &mdev->pdev->dev; @@ -1355,7 +1366,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) dev->ib_dev.free_fast_reg_page_list = mlx5_ib_free_fast_reg_page_list; dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; - if (mdev->caps.flags & MLX5_DEV_CAP_FLAG_XRC) { + if (mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_XRC) { dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd; dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd; dev->ib_dev.uverbs_cmd_mask |= diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 8c574b63d77b900768a71ffe334d5cf046b7d83d..dbfe498870c1e7d006e35979eab2f8b277b6a9df 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -158,11 +158,13 @@ static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type) static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap, int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd) { + struct mlx5_general_caps *gen; int wqe_size; int wq_size; + gen = &dev->mdev->caps.gen; /* Sanity check RQ size before proceeding */ - if (cap->max_recv_wr > dev->mdev->caps.max_wqes) + if (cap->max_recv_wr > gen->max_wqes) return -EINVAL; if (!has_rq) { @@ -182,10 +184,10 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap, wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size; wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB); qp->rq.wqe_cnt = wq_size / wqe_size; - if (wqe_size > dev->mdev->caps.max_rq_desc_sz) { + if (wqe_size > gen->max_rq_desc_sz) { mlx5_ib_dbg(dev, "wqe_size %d, max %d\n", wqe_size, - dev->mdev->caps.max_rq_desc_sz); + gen->max_rq_desc_sz); return -EINVAL; } qp->rq.wqe_shift = ilog2(wqe_size); @@ -266,9 +268,11 @@ static int calc_send_wqe(struct ib_qp_init_attr *attr) static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, struct mlx5_ib_qp *qp) { + struct mlx5_general_caps *gen; int wqe_size; int wq_size; + gen = &dev->mdev->caps.gen; if (!attr->cap.max_send_wr) return 0; @@ -277,9 +281,9 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, if (wqe_size < 0) return wqe_size; - if (wqe_size > dev->mdev->caps.max_sq_desc_sz) { + if (wqe_size > gen->max_sq_desc_sz) { mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n", - wqe_size, dev->mdev->caps.max_sq_desc_sz); + wqe_size, gen->max_sq_desc_sz); return -EINVAL; } @@ -292,9 +296,9 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size); qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; - if (qp->sq.wqe_cnt > dev->mdev->caps.max_wqes) { + if (qp->sq.wqe_cnt > gen->max_wqes) { mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n", - qp->sq.wqe_cnt, dev->mdev->caps.max_wqes); + qp->sq.wqe_cnt, gen->max_wqes); return -ENOMEM; } qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); @@ -309,11 +313,13 @@ static int set_user_buf_size(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd) { + struct mlx5_general_caps *gen; int desc_sz = 1 << qp->sq.wqe_shift; - if (desc_sz > dev->mdev->caps.max_sq_desc_sz) { + gen = &dev->mdev->caps.gen; + if (desc_sz > gen->max_sq_desc_sz) { mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n", - desc_sz, dev->mdev->caps.max_sq_desc_sz); + desc_sz, gen->max_sq_desc_sz); return -EINVAL; } @@ -325,9 +331,9 @@ static int set_user_buf_size(struct mlx5_ib_dev *dev, qp->sq.wqe_cnt = ucmd->sq_wqe_count; - if (qp->sq.wqe_cnt > dev->mdev->caps.max_wqes) { + if (qp->sq.wqe_cnt > gen->max_wqes) { mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n", - qp->sq.wqe_cnt, dev->mdev->caps.max_wqes); + qp->sq.wqe_cnt, gen->max_wqes); return -EINVAL; } @@ -803,16 +809,18 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, struct mlx5_ib_resources *devr = &dev->devr; struct mlx5_ib_create_qp_resp resp; struct mlx5_create_qp_mbox_in *in; + struct mlx5_general_caps *gen; struct mlx5_ib_create_qp ucmd; int inlen = sizeof(*in); int err; + gen = &dev->mdev->caps.gen; mutex_init(&qp->mutex); spin_lock_init(&qp->sq.lock); spin_lock_init(&qp->rq.lock); if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) { - if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)) { + if (!(gen->flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)) { mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n"); return -EINVAL; } else { @@ -851,9 +859,9 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, mlx5_ib_dbg(dev, "invalid rq params\n"); return -EINVAL; } - if (ucmd.sq_wqe_count > dev->mdev->caps.max_wqes) { + if (ucmd.sq_wqe_count > gen->max_wqes) { mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n", - ucmd.sq_wqe_count, dev->mdev->caps.max_wqes); + ucmd.sq_wqe_count, gen->max_wqes); return -EINVAL; } err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen); @@ -1144,6 +1152,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata) { + struct mlx5_general_caps *gen; struct mlx5_ib_dev *dev; struct mlx5_ib_qp *qp; u16 xrcdn = 0; @@ -1161,11 +1170,12 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, } dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device); } + gen = &dev->mdev->caps.gen; switch (init_attr->qp_type) { case IB_QPT_XRC_TGT: case IB_QPT_XRC_INI: - if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_XRC)) { + if (!(gen->flags & MLX5_DEV_CAP_FLAG_XRC)) { mlx5_ib_dbg(dev, "XRC not supported\n"); return ERR_PTR(-ENOSYS); } @@ -1272,6 +1282,9 @@ enum { static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate) { + struct mlx5_general_caps *gen; + + gen = &dev->mdev->caps.gen; if (rate == IB_RATE_PORT_CURRENT) { return 0; } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) { @@ -1279,7 +1292,7 @@ static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate) } else { while (rate != IB_RATE_2_5_GBPS && !(1 << (rate + MLX5_STAT_RATE_OFFSET) & - dev->mdev->caps.stat_rate_support)) + gen->stat_rate_support)) --rate; } @@ -1290,8 +1303,10 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah, struct mlx5_qp_path *path, u8 port, int attr_mask, u32 path_flags, const struct ib_qp_attr *attr) { + struct mlx5_general_caps *gen; int err; + gen = &dev->mdev->caps.gen; path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0; path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : 0; @@ -1318,9 +1333,9 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah, path->port = port; if (ah->ah_flags & IB_AH_GRH) { - if (ah->grh.sgid_index >= dev->mdev->caps.port[port - 1].gid_table_len) { + if (ah->grh.sgid_index >= gen->port[port - 1].gid_table_len) { pr_err(KERN_ERR "sgid_index (%u) too large. max is %d\n", - ah->grh.sgid_index, dev->mdev->caps.port[port - 1].gid_table_len); + ah->grh.sgid_index, gen->port[port - 1].gid_table_len); return -EINVAL; } @@ -1492,6 +1507,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, struct mlx5_ib_qp *qp = to_mqp(ibqp); struct mlx5_ib_cq *send_cq, *recv_cq; struct mlx5_qp_context *context; + struct mlx5_general_caps *gen; struct mlx5_modify_qp_mbox_in *in; struct mlx5_ib_pd *pd; enum mlx5_qp_state mlx5_cur, mlx5_new; @@ -1500,6 +1516,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, int mlx5_st; int err; + gen = &dev->mdev->caps.gen; in = kzalloc(sizeof(*in), GFP_KERNEL); if (!in) return -ENOMEM; @@ -1539,7 +1556,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, err = -EINVAL; goto out; } - context->mtu_msgmax = (attr->path_mtu << 5) | dev->mdev->caps.log_max_msg; + context->mtu_msgmax = (attr->path_mtu << 5) | gen->log_max_msg; } if (attr_mask & IB_QP_DEST_QPN) @@ -1685,9 +1702,11 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, struct mlx5_ib_dev *dev = to_mdev(ibqp->device); struct mlx5_ib_qp *qp = to_mqp(ibqp); enum ib_qp_state cur_state, new_state; + struct mlx5_general_caps *gen; int err = -EINVAL; int port; + gen = &dev->mdev->caps.gen; mutex_lock(&qp->mutex); cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; @@ -1699,21 +1718,21 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, goto out; if ((attr_mask & IB_QP_PORT) && - (attr->port_num == 0 || attr->port_num > dev->mdev->caps.num_ports)) + (attr->port_num == 0 || attr->port_num > gen->num_ports)) goto out; if (attr_mask & IB_QP_PKEY_INDEX) { port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; - if (attr->pkey_index >= dev->mdev->caps.port[port - 1].pkey_table_len) + if (attr->pkey_index >= gen->port[port - 1].pkey_table_len) goto out; } if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && - attr->max_rd_atomic > dev->mdev->caps.max_ra_res_qp) + attr->max_rd_atomic > (1 << gen->log_max_ra_res_qp)) goto out; if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && - attr->max_dest_rd_atomic > dev->mdev->caps.max_ra_req_qp) + attr->max_dest_rd_atomic > (1 << gen->log_max_ra_req_qp)) goto out; if (cur_state == new_state && cur_state == IB_QPS_RESET) { @@ -2893,7 +2912,8 @@ static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_at memset(ib_ah_attr, 0, sizeof(*ib_ah_attr)); ib_ah_attr->port_num = path->port; - if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports) + if (ib_ah_attr->port_num == 0 || + ib_ah_attr->port_num > dev->caps.gen.num_ports) return; ib_ah_attr->sl = path->sl & 0xf; @@ -3011,10 +3031,12 @@ struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev, struct ib_udata *udata) { struct mlx5_ib_dev *dev = to_mdev(ibdev); + struct mlx5_general_caps *gen; struct mlx5_ib_xrcd *xrcd; int err; - if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_XRC)) + gen = &dev->mdev->caps.gen; + if (!(gen->flags & MLX5_DEV_CAP_FLAG_XRC)) return ERR_PTR(-ENOSYS); xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL); diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c index 70bd131ba64625fa2809a9357a184c2c06803f3e..97cc1baaa8e3e8d1a3ea2c8b66dd344b14dc470b 100644 --- a/drivers/infiniband/hw/mlx5/srq.c +++ b/drivers/infiniband/hw/mlx5/srq.c @@ -238,6 +238,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, struct ib_udata *udata) { struct mlx5_ib_dev *dev = to_mdev(pd->device); + struct mlx5_general_caps *gen; struct mlx5_ib_srq *srq; int desc_size; int buf_size; @@ -247,11 +248,12 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, int is_xrc; u32 flgs, xrcdn; + gen = &dev->mdev->caps.gen; /* Sanity check SRQ size before proceeding */ - if (init_attr->attr.max_wr >= dev->mdev->caps.max_srq_wqes) { + if (init_attr->attr.max_wr >= gen->max_srq_wqes) { mlx5_ib_dbg(dev, "max_wr %d, cap %d\n", init_attr->attr.max_wr, - dev->mdev->caps.max_srq_wqes); + gen->max_srq_wqes); return ERR_PTR(-EINVAL); } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 13e6e04315920d7f361129945332cdbf41f8a930..58b5aa3b6f2dded5d2e6d15aff080551aa9eddd9 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -1364,7 +1364,7 @@ void ipoib_setup(struct net_device *dev) dev->tx_queue_len = ipoib_sendq_size * 2; dev->features = (NETIF_F_VLAN_CHALLENGED | NETIF_F_HIGHDMA); - dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; + netif_keep_dst(dev); memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN); diff --git a/drivers/isdn/capi/capiutil.c b/drivers/isdn/capi/capiutil.c index 6e797e502cfaf0fc2c2459de6e3d80885c145117..4073d1684d07ef22a78dfd2ef966ad2d62055757 100644 --- a/drivers/isdn/capi/capiutil.c +++ b/drivers/isdn/capi/capiutil.c @@ -205,11 +205,8 @@ static unsigned command_2_index(unsigned c, unsigned sc) { if (c & 0x80) c = 0x9 + (c & 0x0f); - else if (c <= 0x0f); else if (c == 0x41) c = 0x9 + 0x1; - else if (c == 0xff) - c = 0x00; return (sc & 3) * (0x9 + 0x9) + c; } diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c index b7ae0a0dd5b6814c61332d3f56b4b7626754ddea..aecec6d3246370004a0e9434a1f58303e96b7c46 100644 --- a/drivers/isdn/gigaset/bas-gigaset.c +++ b/drivers/isdn/gigaset/bas-gigaset.c @@ -2365,7 +2365,7 @@ static int gigaset_probe(struct usb_interface *interface, endpoint = &hostif->endpoint[0].desc; usb_fill_int_urb(ucs->urb_int_in, udev, usb_rcvintpipe(udev, - (endpoint->bEndpointAddress) & 0x0f), + usb_endpoint_num(endpoint)), ucs->int_in_buf, IP_MSGSIZE, read_int_callback, cs, endpoint->bInterval); rc = usb_submit_urb(ucs->urb_int_in, GFP_KERNEL); diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c index 7459b127ddd59fe00fd2872337c2c8eb5845f7d5..dcae14aef3763cd416c2f6e3d807d44e7c5b4624 100644 --- a/drivers/isdn/gigaset/ev-layer.c +++ b/drivers/isdn/gigaset/ev-layer.c @@ -1243,7 +1243,8 @@ static void do_action(int action, struct cardstate *cs, break; case ACT_FAILDLE0: cs->cur_at_seq = SEQ_NONE; - dev_warn(cs->dev, "Could not leave DLE mode.\n"); + dev_warn(cs->dev, "Error leaving DLE mode.\n"); + cs->dle = 0; at_state2 = &cs->bcs[cs->curchannel].at_state; disconnect(&at_state2); schedule_init(cs, MS_RECOVER); diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c index d0a41cb0cf627fe8c6687324e93f9dd3cb0384b7..82e91ba1acd343ed296d1b8969a83f86ade2173f 100644 --- a/drivers/isdn/gigaset/usb-gigaset.c +++ b/drivers/isdn/gigaset/usb-gigaset.c @@ -135,14 +135,13 @@ struct usb_cardstate { /* Output buffer */ unsigned char *bulk_out_buffer; int bulk_out_size; - __u8 bulk_out_endpointAddr; + int bulk_out_epnum; struct urb *bulk_out_urb; /* Input buffer */ unsigned char *rcvbuf; int rcvbuf_size; struct urb *read_urb; - __u8 int_in_endpointAddr; char bchars[6]; /* for request 0x19 */ }; @@ -466,7 +465,7 @@ static int send_cb(struct cardstate *cs, struct cmdbuf_t *cb) usb_fill_bulk_urb(ucs->bulk_out_urb, ucs->udev, usb_sndbulkpipe(ucs->udev, - ucs->bulk_out_endpointAddr & 0x0f), + ucs->bulk_out_epnum), cb->buf + cb->offset, count, gigaset_write_bulk_callback, cs); @@ -628,8 +627,7 @@ static int write_modem(struct cardstate *cs) if (cs->connected) { usb_fill_bulk_urb(ucs->bulk_out_urb, ucs->udev, usb_sndbulkpipe(ucs->udev, - ucs->bulk_out_endpointAddr & - 0x0f), + ucs->bulk_out_epnum), ucs->bulk_out_buffer, count, gigaset_write_bulk_callback, cs); ret = usb_submit_urb(ucs->bulk_out_urb, GFP_ATOMIC); @@ -714,7 +712,7 @@ static int gigaset_probe(struct usb_interface *interface, buffer_size = le16_to_cpu(endpoint->wMaxPacketSize); ucs->bulk_out_size = buffer_size; - ucs->bulk_out_endpointAddr = endpoint->bEndpointAddress; + ucs->bulk_out_epnum = usb_endpoint_num(endpoint); ucs->bulk_out_buffer = kmalloc(buffer_size, GFP_KERNEL); if (!ucs->bulk_out_buffer) { dev_err(cs->dev, "Couldn't allocate bulk_out_buffer\n"); @@ -741,7 +739,6 @@ static int gigaset_probe(struct usb_interface *interface, } buffer_size = le16_to_cpu(endpoint->wMaxPacketSize); ucs->rcvbuf_size = buffer_size; - ucs->int_in_endpointAddr = endpoint->bEndpointAddress; ucs->rcvbuf = kmalloc(buffer_size, GFP_KERNEL); if (!ucs->rcvbuf) { dev_err(cs->dev, "Couldn't allocate rcvbuf\n"); @@ -750,8 +747,7 @@ static int gigaset_probe(struct usb_interface *interface, } /* Fill the interrupt urb and send it to the core */ usb_fill_int_urb(ucs->read_urb, udev, - usb_rcvintpipe(udev, - endpoint->bEndpointAddress & 0x0f), + usb_rcvintpipe(udev, usb_endpoint_num(endpoint)), ucs->rcvbuf, buffer_size, gigaset_read_int_callback, cs, endpoint->bInterval); diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c index a4f05c54c32b021cc04fb9ea65ece506a1e94e2a..87f7dff20ff66b269537b166642690722fd4accc 100644 --- a/drivers/isdn/mISDN/dsp_cmx.c +++ b/drivers/isdn/mISDN/dsp_cmx.c @@ -1454,66 +1454,63 @@ dsp_cmx_send_member(struct dsp *dsp, int len, s32 *c, int members) #ifdef CMX_CONF_DEBUG if (0) { #else - if (members == 2) { + if (members == 2) { #endif - /* "other" becomes other party */ - other = (list_entry(conf->mlist.next, - struct dsp_conf_member, list))->dsp; - if (other == member) - other = (list_entry(conf->mlist.prev, - struct dsp_conf_member, list))->dsp; - o_q = other->rx_buff; /* received data */ - o_rr = (other->rx_R + len) & CMX_BUFF_MASK; - /* end of rx-pointer */ - o_r = (o_rr - rr + r) & CMX_BUFF_MASK; - /* start rx-pointer at current read position*/ - /* -> if echo is NOT enabled */ - if (!dsp->echo.software) { - /* - * -> copy other member's rx-data, - * if tx-data is available, mix - */ - while (o_r != o_rr && t != tt) { - *d++ = dsp_audio_mix_law[(p[t] << 8) | o_q[o_r]]; - t = (t + 1) & CMX_BUFF_MASK; - o_r = (o_r + 1) & CMX_BUFF_MASK; - } - while (o_r != o_rr) { - *d++ = o_q[o_r]; - o_r = (o_r + 1) & CMX_BUFF_MASK; - } - /* -> if echo is enabled */ - } else { - /* - * -> mix other member's rx-data with echo, - * if tx-data is available, mix - */ - while (r != rr && t != tt) { - sample = dsp_audio_law_to_s32[p[t]] + - dsp_audio_law_to_s32[q[r]] + - dsp_audio_law_to_s32[o_q[o_r]]; - if (sample < -32768) - sample = -32768; - else if (sample > 32767) - sample = 32767; - *d++ = dsp_audio_s16_to_law[sample & 0xffff]; - /* tx-data + rx_data + echo */ - t = (t + 1) & CMX_BUFF_MASK; - r = (r + 1) & CMX_BUFF_MASK; - o_r = (o_r + 1) & CMX_BUFF_MASK; - } - while (r != rr) { - *d++ = dsp_audio_mix_law[(q[r] << 8) | o_q[o_r]]; - r = (r + 1) & CMX_BUFF_MASK; - o_r = (o_r + 1) & CMX_BUFF_MASK; - } + /* "other" becomes other party */ + other = (list_entry(conf->mlist.next, + struct dsp_conf_member, list))->dsp; + if (other == member) + other = (list_entry(conf->mlist.prev, + struct dsp_conf_member, list))->dsp; + o_q = other->rx_buff; /* received data */ + o_rr = (other->rx_R + len) & CMX_BUFF_MASK; + /* end of rx-pointer */ + o_r = (o_rr - rr + r) & CMX_BUFF_MASK; + /* start rx-pointer at current read position*/ + /* -> if echo is NOT enabled */ + if (!dsp->echo.software) { + /* + * -> copy other member's rx-data, + * if tx-data is available, mix + */ + while (o_r != o_rr && t != tt) { + *d++ = dsp_audio_mix_law[(p[t] << 8) | o_q[o_r]]; + t = (t + 1) & CMX_BUFF_MASK; + o_r = (o_r + 1) & CMX_BUFF_MASK; + } + while (o_r != o_rr) { + *d++ = o_q[o_r]; + o_r = (o_r + 1) & CMX_BUFF_MASK; + } + /* -> if echo is enabled */ + } else { + /* + * -> mix other member's rx-data with echo, + * if tx-data is available, mix + */ + while (r != rr && t != tt) { + sample = dsp_audio_law_to_s32[p[t]] + + dsp_audio_law_to_s32[q[r]] + + dsp_audio_law_to_s32[o_q[o_r]]; + if (sample < -32768) + sample = -32768; + else if (sample > 32767) + sample = 32767; + *d++ = dsp_audio_s16_to_law[sample & 0xffff]; + /* tx-data + rx_data + echo */ + t = (t + 1) & CMX_BUFF_MASK; + r = (r + 1) & CMX_BUFF_MASK; + o_r = (o_r + 1) & CMX_BUFF_MASK; + } + while (r != rr) { + *d++ = dsp_audio_mix_law[(q[r] << 8) | o_q[o_r]]; + r = (r + 1) & CMX_BUFF_MASK; + o_r = (o_r + 1) & CMX_BUFF_MASK; } - dsp->tx_R = t; - goto send_packet; } -#ifdef DSP_NEVER_DEFINED + dsp->tx_R = t; + goto send_packet; } -#endif /* PROCESS DATA (three or more members) */ /* -> if echo is NOT enabled */ if (!dsp->echo.software) { diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index c6f6f69f8961a58f40771d3b80d200d8fd730967..4706386b7d34c2d719e30eb602defd49ee92a72d 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -147,7 +147,6 @@ config MACVTAP config VXLAN tristate "Virtual eXtensible Local Area Network (VXLAN)" depends on INET - select NET_IP_TUNNEL select NET_UDP_TUNNEL ---help--- This allows one to create vxlan virtual interfaces that provide diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c index 10d0dba572c2e319f630638e204ed8c12ae38b5e..e90c6a7333d76b4f11e3fccfdcad094be141108e 100644 --- a/drivers/net/appletalk/ipddp.c +++ b/drivers/net/appletalk/ipddp.c @@ -74,7 +74,7 @@ static struct net_device * __init ipddp_init(void) if (!dev) return ERR_PTR(-ENOMEM); - dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; + netif_keep_dst(dev); strcpy(dev->name, "ipddp%d"); if (version_printed++ == 0) diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c index 3b790de6c97652791dbfadf0300d4f141c840d6d..09de683c167ecf692ac5178644e46b96ac3d7853 100644 --- a/drivers/net/arcnet/arcnet.c +++ b/drivers/net/arcnet/arcnet.c @@ -777,7 +777,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id) ACOMMAND(CFLAGScmd | RESETclear); AINTMASK(0); spin_unlock(&lp->lock); - return IRQ_HANDLED; + return retval; } BUGMSG(D_DURING, "in arcnet_inthandler (status=%Xh, intmask=%Xh)\n", diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c index 7bb292e59559233096fd515cb12b63e7c401cc95..6c99ff0b0bdd3443fcd13bac63aea44e09f33a62 100644 --- a/drivers/net/arcnet/com20020-pci.c +++ b/drivers/net/arcnet/com20020-pci.c @@ -38,6 +38,7 @@ #include #include #include +#include #include @@ -61,115 +62,317 @@ module_param(clockp, int, 0); module_param(clockm, int, 0); MODULE_LICENSE("GPL"); +static void com20020pci_remove(struct pci_dev *pdev); + static int com20020pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { + struct com20020_pci_card_info *ci; struct net_device *dev; struct arcnet_local *lp; - int ioaddr, err; + struct com20020_priv *priv; + int i, ioaddr, ret; + struct resource *r; if (pci_enable_device(pdev)) return -EIO; - dev = alloc_arcdev(device); - if (!dev) - return -ENOMEM; - dev->netdev_ops = &com20020_netdev_ops; + priv = devm_kzalloc(&pdev->dev, sizeof(struct com20020_priv), + GFP_KERNEL); + ci = (struct com20020_pci_card_info *)id->driver_data; + priv->ci = ci; - lp = netdev_priv(dev); + INIT_LIST_HEAD(&priv->list_dev); - pci_set_drvdata(pdev, dev); - // SOHARD needs PCI base addr 4 - if (pdev->vendor==0x10B5) { - BUGMSG(D_NORMAL, "SOHARD\n"); - ioaddr = pci_resource_start(pdev, 4); - } - else { - BUGMSG(D_NORMAL, "Contemporary Controls\n"); - ioaddr = pci_resource_start(pdev, 2); - } + for (i = 0; i < ci->devcount; i++) { + struct com20020_pci_channel_map *cm = &ci->chan_map_tbl[i]; + struct com20020_dev *card; - if (!request_region(ioaddr, ARCNET_TOTAL_SIZE, "com20020-pci")) { - BUGMSG(D_INIT, "IO region %xh-%xh already allocated.\n", - ioaddr, ioaddr + ARCNET_TOTAL_SIZE - 1); - err = -EBUSY; - goto out_dev; - } + dev = alloc_arcdev(device); + if (!dev) { + ret = -ENOMEM; + goto out_port; + } - // Dummy access after Reset - // ARCNET controller needs this access to detect bustype - outb(0x00,ioaddr+1); - inb(ioaddr+1); - - dev->base_addr = ioaddr; - dev->irq = pdev->irq; - dev->dev_addr[0] = node; - lp->card_name = "PCI COM20020"; - lp->card_flags = id->driver_data; - lp->backplane = backplane; - lp->clockp = clockp & 7; - lp->clockm = clockm & 3; - lp->timeout = timeout; - lp->hw.owner = THIS_MODULE; - - if (ASTATUS() == 0xFF) { - BUGMSG(D_NORMAL, "IO address %Xh was reported by PCI BIOS, " - "but seems empty!\n", ioaddr); - err = -EIO; - goto out_port; - } - if (com20020_check(dev)) { - err = -EIO; - goto out_port; + dev->netdev_ops = &com20020_netdev_ops; + + lp = netdev_priv(dev); + + BUGMSG(D_NORMAL, "%s Controls\n", ci->name); + ioaddr = pci_resource_start(pdev, cm->bar) + cm->offset; + + r = devm_request_region(&pdev->dev, ioaddr, cm->size, + "com20020-pci"); + if (!r) { + pr_err("IO region %xh-%xh already allocated.\n", + ioaddr, ioaddr + cm->size - 1); + ret = -EBUSY; + goto out_port; + } + + /* Dummy access after Reset + * ARCNET controller needs + * this access to detect bustype + */ + outb(0x00, ioaddr + 1); + inb(ioaddr + 1); + + dev->base_addr = ioaddr; + dev->dev_addr[0] = node; + dev->irq = pdev->irq; + lp->card_name = "PCI COM20020"; + lp->card_flags = ci->flags; + lp->backplane = backplane; + lp->clockp = clockp & 7; + lp->clockm = clockm & 3; + lp->timeout = timeout; + lp->hw.owner = THIS_MODULE; + + if (ASTATUS() == 0xFF) { + pr_err("IO address %Xh is empty!\n", ioaddr); + ret = -EIO; + goto out_port; + } + if (com20020_check(dev)) { + ret = -EIO; + goto out_port; + } + + card = devm_kzalloc(&pdev->dev, sizeof(struct com20020_dev), + GFP_KERNEL); + if (!card) { + pr_err("%s out of memory!\n", __func__); + return -ENOMEM; + } + + card->index = i; + card->pci_priv = priv; + card->dev = dev; + + dev_set_drvdata(&dev->dev, card); + + ret = com20020_found(dev, IRQF_SHARED); + if (ret) + goto out_port; + + list_add(&card->list, &priv->list_dev); } - if ((err = com20020_found(dev, IRQF_SHARED)) != 0) - goto out_port; + pci_set_drvdata(pdev, priv); return 0; out_port: - release_region(ioaddr, ARCNET_TOTAL_SIZE); -out_dev: - free_netdev(dev); - return err; + com20020pci_remove(pdev); + return ret; } static void com20020pci_remove(struct pci_dev *pdev) { - struct net_device *dev = pci_get_drvdata(pdev); - unregister_netdev(dev); - free_irq(dev->irq, dev); - release_region(dev->base_addr, ARCNET_TOTAL_SIZE); - free_netdev(dev); + struct com20020_dev *card, *tmpcard; + struct com20020_priv *priv; + + priv = pci_get_drvdata(pdev); + + list_for_each_entry_safe(card, tmpcard, &priv->list_dev, list) { + struct net_device *dev = card->dev; + + unregister_netdev(dev); + free_irq(dev->irq, dev); + free_netdev(dev); + } } +static struct com20020_pci_card_info card_info_10mbit = { + .name = "ARC-PCI", + .devcount = 1, + .chan_map_tbl = { + { 2, 0x00, 0x08 }, + }, + .flags = ARC_CAN_10MBIT, +}; + +static struct com20020_pci_card_info card_info_5mbit = { + .name = "ARC-PCI", + .devcount = 1, + .chan_map_tbl = { + { 2, 0x00, 0x08 }, + }, + .flags = ARC_IS_5MBIT, +}; + +static struct com20020_pci_card_info card_info_sohard = { + .name = "PLX-PCI", + .devcount = 1, + /* SOHARD needs PCI base addr 4 */ + .chan_map_tbl = { + {4, 0x00, 0x08}, + }, + .flags = ARC_CAN_10MBIT, +}; + +static struct com20020_pci_card_info card_info_eae = { + .name = "EAE PLX-PCI", + .devcount = 2, + .chan_map_tbl = { + { 2, 0x00, 0x08 }, + { 2, 0x08, 0x08 } + }, + .flags = ARC_CAN_10MBIT, +}; + static const struct pci_device_id com20020pci_id_table[] = { - { 0x1571, 0xa001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, - { 0x1571, 0xa002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, - { 0x1571, 0xa003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, - { 0x1571, 0xa004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, - { 0x1571, 0xa005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, - { 0x1571, 0xa006, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, - { 0x1571, 0xa007, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, - { 0x1571, 0xa008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, - { 0x1571, 0xa009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_IS_5MBIT }, - { 0x1571, 0xa00a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_IS_5MBIT }, - { 0x1571, 0xa00b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_IS_5MBIT }, - { 0x1571, 0xa00c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_IS_5MBIT }, - { 0x1571, 0xa00d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_IS_5MBIT }, - { 0x1571, 0xa00e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_IS_5MBIT }, - { 0x1571, 0xa201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT }, - { 0x1571, 0xa202, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT }, - { 0x1571, 0xa203, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT }, - { 0x1571, 0xa204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT }, - { 0x1571, 0xa205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT }, - { 0x1571, 0xa206, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT }, - { 0x10B5, 0x9030, 0x10B5, 0x2978, 0, 0, ARC_CAN_10MBIT }, - { 0x10B5, 0x9050, 0x10B5, 0x2273, 0, 0, ARC_CAN_10MBIT }, - { 0x14BA, 0x6000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT }, - { 0x10B5, 0x2200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT }, - {0,} + { + 0x1571, 0xa001, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + 0, + }, + { + 0x1571, 0xa002, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + 0, + }, + { + 0x1571, 0xa003, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + 0 + }, + { + 0x1571, 0xa004, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + 0, + }, + { + 0x1571, 0xa005, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + 0 + }, + { + 0x1571, 0xa006, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + 0 + }, + { + 0x1571, 0xa007, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + 0 + }, + { + 0x1571, 0xa008, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + 0 + }, + { + 0x1571, 0xa009, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + (kernel_ulong_t)&card_info_5mbit + }, + { + 0x1571, 0xa00a, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + (kernel_ulong_t)&card_info_5mbit + }, + { + 0x1571, 0xa00b, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + (kernel_ulong_t)&card_info_5mbit + }, + { + 0x1571, 0xa00c, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + (kernel_ulong_t)&card_info_5mbit + }, + { + 0x1571, 0xa00d, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + (kernel_ulong_t)&card_info_5mbit + }, + { + 0x1571, 0xa00e, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + (kernel_ulong_t)&card_info_5mbit + }, + { + 0x1571, 0xa201, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + (kernel_ulong_t)&card_info_10mbit + }, + { + 0x1571, 0xa202, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + (kernel_ulong_t)&card_info_10mbit + }, + { + 0x1571, 0xa203, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + (kernel_ulong_t)&card_info_10mbit + }, + { + 0x1571, 0xa204, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + (kernel_ulong_t)&card_info_10mbit + }, + { + 0x1571, 0xa205, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + (kernel_ulong_t)&card_info_10mbit + }, + { + 0x1571, 0xa206, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + (kernel_ulong_t)&card_info_10mbit + }, + { + 0x10B5, 0x9030, + 0x10B5, 0x2978, + 0, 0, + (kernel_ulong_t)&card_info_sohard + }, + { + 0x10B5, 0x9050, + 0x10B5, 0x2273, + 0, 0, + (kernel_ulong_t)&card_info_sohard + }, + { + 0x10B5, 0x9050, + 0x10B5, 0x3292, + 0, 0, + (kernel_ulong_t)&card_info_eae + }, + { + 0x14BA, 0x6000, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + (kernel_ulong_t)&card_info_10mbit + }, + { + 0x10B5, 0x2200, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + (kernel_ulong_t)&card_info_10mbit + }, + { 0, } }; MODULE_DEVICE_TABLE(pci, com20020pci_id_table); diff --git a/drivers/net/arcnet/com20020.c b/drivers/net/arcnet/com20020.c index 7b96c5f47e8ddfab20d17319a9b959d018404f2f..1a8437842fbca819efeca443433c9ab82bc3236f 100644 --- a/drivers/net/arcnet/com20020.c +++ b/drivers/net/arcnet/com20020.c @@ -149,11 +149,25 @@ int com20020_check(struct net_device *dev) return 0; } +static int com20020_set_hwaddr(struct net_device *dev, void *addr) +{ + int ioaddr = dev->base_addr; + struct arcnet_local *lp = netdev_priv(dev); + struct sockaddr *hwaddr = addr; + + memcpy(dev->dev_addr, hwaddr->sa_data, 1); + SET_SUBADR(SUB_NODE); + outb(dev->dev_addr[0], _XREG); + + return 0; +} + const struct net_device_ops com20020_netdev_ops = { .ndo_open = arcnet_open, .ndo_stop = arcnet_close, .ndo_start_xmit = arcnet_send_packet, .ndo_tx_timeout = arcnet_timeout, + .ndo_set_mac_address = com20020_set_hwaddr, .ndo_set_rx_mode = com20020_set_mc_list, }; diff --git a/drivers/net/arcnet/com20020_cs.c b/drivers/net/arcnet/com20020_cs.c index 1a790a20210d9bc6103c2c7401ed89695e5a336a..057d9582132a6f659c7d5c08fbd99f883e50fb07 100644 --- a/drivers/net/arcnet/com20020_cs.c +++ b/drivers/net/arcnet/com20020_cs.c @@ -112,10 +112,6 @@ static void com20020_detach(struct pcmcia_device *p_dev); /*====================================================================*/ -struct com20020_dev { - struct net_device *dev; -}; - static int com20020_probe(struct pcmcia_device *p_dev) { struct com20020_dev *info; diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index ee2c73a9de397da448bd3ac617b04bf1a6ae7749..2110215f3528fb0cf41c7c188de3821d171e170b 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -102,17 +102,20 @@ static const u8 lacpdu_mcast_addr[ETH_ALEN] = MULTICAST_LACPDU_ADDR; /* ================= main 802.3ad protocol functions ================== */ static int ad_lacpdu_send(struct port *port); static int ad_marker_send(struct port *port, struct bond_marker *marker); -static void ad_mux_machine(struct port *port); +static void ad_mux_machine(struct port *port, bool *update_slave_arr); static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port); static void ad_tx_machine(struct port *port); static void ad_periodic_machine(struct port *port); -static void ad_port_selection_logic(struct port *port); -static void ad_agg_selection_logic(struct aggregator *aggregator); +static void ad_port_selection_logic(struct port *port, bool *update_slave_arr); +static void ad_agg_selection_logic(struct aggregator *aggregator, + bool *update_slave_arr); static void ad_clear_agg(struct aggregator *aggregator); static void ad_initialize_agg(struct aggregator *aggregator); static void ad_initialize_port(struct port *port, int lacp_fast); -static void ad_enable_collecting_distributing(struct port *port); -static void ad_disable_collecting_distributing(struct port *port); +static void ad_enable_collecting_distributing(struct port *port, + bool *update_slave_arr); +static void ad_disable_collecting_distributing(struct port *port, + bool *update_slave_arr); static void ad_marker_info_received(struct bond_marker *marker_info, struct port *port); static void ad_marker_response_received(struct bond_marker *marker, @@ -233,24 +236,6 @@ static inline int __check_agg_selection_timer(struct port *port) return BOND_AD_INFO(bond).agg_select_timer ? 1 : 0; } -/** - * __get_state_machine_lock - lock the port's state machines - * @port: the port we're looking at - */ -static inline void __get_state_machine_lock(struct port *port) -{ - spin_lock_bh(&(SLAVE_AD_INFO(port->slave)->state_machine_lock)); -} - -/** - * __release_state_machine_lock - unlock the port's state machines - * @port: the port we're looking at - */ -static inline void __release_state_machine_lock(struct port *port) -{ - spin_unlock_bh(&(SLAVE_AD_INFO(port->slave)->state_machine_lock)); -} - /** * __get_link_speed - get a port's speed * @port: the port we're looking at @@ -315,15 +300,14 @@ static u16 __get_link_speed(struct port *port) static u8 __get_duplex(struct port *port) { struct slave *slave = port->slave; - u8 retval; /* handling a special case: when the configuration starts with * link down, it sets the duplex to 0. */ - if (slave->link != BOND_LINK_UP) + if (slave->link != BOND_LINK_UP) { retval = 0x0; - else { + } else { switch (slave->duplex) { case DUPLEX_FULL: retval = 0x1; @@ -341,16 +325,6 @@ static u8 __get_duplex(struct port *port) return retval; } -/** - * __initialize_port_locks - initialize a port's STATE machine spinlock - * @port: the slave of the port we're looking at - */ -static inline void __initialize_port_locks(struct slave *slave) -{ - /* make sure it isn't called twice */ - spin_lock_init(&(SLAVE_AD_INFO(slave)->state_machine_lock)); -} - /* Conversions */ /** @@ -825,8 +799,9 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker) /** * ad_mux_machine - handle a port's mux state machine * @port: the port we're looking at + * @update_slave_arr: Does slave array need update? */ -static void ad_mux_machine(struct port *port) +static void ad_mux_machine(struct port *port, bool *update_slave_arr) { mux_states_t last_state; @@ -930,7 +905,8 @@ static void ad_mux_machine(struct port *port) switch (port->sm_mux_state) { case AD_MUX_DETACHED: port->actor_oper_port_state &= ~AD_STATE_SYNCHRONIZATION; - ad_disable_collecting_distributing(port); + ad_disable_collecting_distributing(port, + update_slave_arr); port->actor_oper_port_state &= ~AD_STATE_COLLECTING; port->actor_oper_port_state &= ~AD_STATE_DISTRIBUTING; port->ntt = true; @@ -942,13 +918,15 @@ static void ad_mux_machine(struct port *port) port->actor_oper_port_state |= AD_STATE_SYNCHRONIZATION; port->actor_oper_port_state &= ~AD_STATE_COLLECTING; port->actor_oper_port_state &= ~AD_STATE_DISTRIBUTING; - ad_disable_collecting_distributing(port); + ad_disable_collecting_distributing(port, + update_slave_arr); port->ntt = true; break; case AD_MUX_COLLECTING_DISTRIBUTING: port->actor_oper_port_state |= AD_STATE_COLLECTING; port->actor_oper_port_state |= AD_STATE_DISTRIBUTING; - ad_enable_collecting_distributing(port); + ad_enable_collecting_distributing(port, + update_slave_arr); port->ntt = true; break; default: @@ -1216,12 +1194,13 @@ static void ad_periodic_machine(struct port *port) /** * ad_port_selection_logic - select aggregation groups * @port: the port we're looking at + * @update_slave_arr: Does slave array need update? * * Select aggregation groups, and assign each port for it's aggregetor. The * selection logic is called in the inititalization (after all the handshkes), * and after every lacpdu receive (if selected is off). */ -static void ad_port_selection_logic(struct port *port) +static void ad_port_selection_logic(struct port *port, bool *update_slave_arr) { struct aggregator *aggregator, *free_aggregator = NULL, *temp_aggregator; struct port *last_port = NULL, *curr_port; @@ -1376,7 +1355,7 @@ static void ad_port_selection_logic(struct port *port) __agg_ports_are_ready(port->aggregator)); aggregator = __get_first_agg(port); - ad_agg_selection_logic(aggregator); + ad_agg_selection_logic(aggregator, update_slave_arr); } /* Decide if "agg" is a better choice for the new active aggregator that @@ -1464,6 +1443,7 @@ static int agg_device_up(const struct aggregator *agg) /** * ad_agg_selection_logic - select an aggregation group for a team * @aggregator: the aggregator we're looking at + * @update_slave_arr: Does slave array need update? * * It is assumed that only one aggregator may be selected for a team. * @@ -1486,7 +1466,8 @@ static int agg_device_up(const struct aggregator *agg) * __get_active_agg() won't work correctly. This function should be better * called with the bond itself, and retrieve the first agg from it. */ -static void ad_agg_selection_logic(struct aggregator *agg) +static void ad_agg_selection_logic(struct aggregator *agg, + bool *update_slave_arr) { struct aggregator *best, *active, *origin; struct bonding *bond = agg->slave->bond; @@ -1579,6 +1560,8 @@ static void ad_agg_selection_logic(struct aggregator *agg) __disable_port(port); } } + /* Slave array needs update. */ + *update_slave_arr = true; } /* if the selected aggregator is of join individuals @@ -1707,24 +1690,30 @@ static void ad_initialize_port(struct port *port, int lacp_fast) /** * ad_enable_collecting_distributing - enable a port's transmit/receive * @port: the port we're looking at + * @update_slave_arr: Does slave array need update? * * Enable @port if it's in an active aggregator */ -static void ad_enable_collecting_distributing(struct port *port) +static void ad_enable_collecting_distributing(struct port *port, + bool *update_slave_arr) { if (port->aggregator->is_active) { pr_debug("Enabling port %d(LAG %d)\n", port->actor_port_number, port->aggregator->aggregator_identifier); __enable_port(port); + /* Slave array needs update */ + *update_slave_arr = true; } } /** * ad_disable_collecting_distributing - disable a port's transmit/receive * @port: the port we're looking at + * @update_slave_arr: Does slave array need update? */ -static void ad_disable_collecting_distributing(struct port *port) +static void ad_disable_collecting_distributing(struct port *port, + bool *update_slave_arr) { if (port->aggregator && !MAC_ADDRESS_EQUAL(&(port->aggregator->partner_system), @@ -1733,6 +1722,8 @@ static void ad_disable_collecting_distributing(struct port *port) port->actor_port_number, port->aggregator->aggregator_identifier); __disable_port(port); + /* Slave array needs an update */ + *update_slave_arr = true; } } @@ -1843,7 +1834,6 @@ void bond_3ad_bind_slave(struct slave *slave) ad_initialize_port(port, bond->params.lacp_fast); - __initialize_port_locks(slave); port->slave = slave; port->actor_port_number = SLAVE_AD_INFO(slave)->id; /* key is determined according to the link speed, duplex and user key(which @@ -1898,7 +1888,10 @@ void bond_3ad_unbind_slave(struct slave *slave) struct bonding *bond = slave->bond; struct slave *slave_iter; struct list_head *iter; + bool dummy_slave_update; /* Ignore this value as caller updates array */ + /* Sync against bond_3ad_state_machine_handler() */ + spin_lock_bh(&bond->mode_lock); aggregator = &(SLAVE_AD_INFO(slave)->aggregator); port = &(SLAVE_AD_INFO(slave)->port); @@ -1906,7 +1899,7 @@ void bond_3ad_unbind_slave(struct slave *slave) if (!port->slave) { netdev_warn(bond->dev, "Trying to unbind an uninitialized port on %s\n", slave->dev->name); - return; + goto out; } netdev_dbg(bond->dev, "Unbinding Link Aggregation Group %d\n", @@ -1979,7 +1972,8 @@ void bond_3ad_unbind_slave(struct slave *slave) ad_clear_agg(aggregator); if (select_new_active_agg) - ad_agg_selection_logic(__get_first_agg(port)); + ad_agg_selection_logic(__get_first_agg(port), + &dummy_slave_update); } else { netdev_warn(bond->dev, "unbinding aggregator, and could not find a new aggregator for its ports\n"); } @@ -1994,7 +1988,8 @@ void bond_3ad_unbind_slave(struct slave *slave) /* select new active aggregator */ temp_aggregator = __get_first_agg(port); if (temp_aggregator) - ad_agg_selection_logic(temp_aggregator); + ad_agg_selection_logic(temp_aggregator, + &dummy_slave_update); } } } @@ -2024,7 +2019,8 @@ void bond_3ad_unbind_slave(struct slave *slave) if (select_new_active_agg) { netdev_info(bond->dev, "Removing an active aggregator\n"); /* select new active aggregator */ - ad_agg_selection_logic(__get_first_agg(port)); + ad_agg_selection_logic(__get_first_agg(port), + &dummy_slave_update); } } break; @@ -2032,6 +2028,9 @@ void bond_3ad_unbind_slave(struct slave *slave) } } port->slave = NULL; + +out: + spin_unlock_bh(&bond->mode_lock); } /** @@ -2056,8 +2055,13 @@ void bond_3ad_state_machine_handler(struct work_struct *work) struct slave *slave; struct port *port; bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER; + bool update_slave_arr = false; - read_lock(&bond->lock); + /* Lock to protect data accessed by all (e.g., port->sm_vars) and + * against running with bond_3ad_unbind_slave. ad_rx_machine may run + * concurrently due to incoming LACPDU as well. + */ + spin_lock_bh(&bond->mode_lock); rcu_read_lock(); /* check if there are any slaves */ @@ -2079,7 +2083,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work) } aggregator = __get_first_agg(port); - ad_agg_selection_logic(aggregator); + ad_agg_selection_logic(aggregator, &update_slave_arr); } bond_3ad_set_carrier(bond); } @@ -2093,23 +2097,15 @@ void bond_3ad_state_machine_handler(struct work_struct *work) goto re_arm; } - /* Lock around state machines to protect data accessed - * by all (e.g., port->sm_vars). ad_rx_machine may run - * concurrently due to incoming LACPDU. - */ - __get_state_machine_lock(port); - ad_rx_machine(NULL, port); ad_periodic_machine(port); - ad_port_selection_logic(port); - ad_mux_machine(port); + ad_port_selection_logic(port, &update_slave_arr); + ad_mux_machine(port, &update_slave_arr); ad_tx_machine(port); /* turn off the BEGIN bit, since we already handled it */ if (port->sm_vars & AD_PORT_BEGIN) port->sm_vars &= ~AD_PORT_BEGIN; - - __release_state_machine_lock(port); } re_arm: @@ -2120,7 +2116,10 @@ void bond_3ad_state_machine_handler(struct work_struct *work) } } rcu_read_unlock(); - read_unlock(&bond->lock); + spin_unlock_bh(&bond->mode_lock); + + if (update_slave_arr) + bond_slave_arr_work_rearm(bond, 0); if (should_notify_rtnl && rtnl_trylock()) { bond_slave_state_notify(bond); @@ -2161,9 +2160,9 @@ static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, netdev_dbg(slave->bond->dev, "Received LACPDU on port %d\n", port->actor_port_number); /* Protect against concurrent state machines */ - __get_state_machine_lock(port); + spin_lock(&slave->bond->mode_lock); ad_rx_machine(lacpdu, port); - __release_state_machine_lock(port); + spin_unlock(&slave->bond->mode_lock); break; case AD_TYPE_MARKER: @@ -2213,7 +2212,7 @@ void bond_3ad_adapter_speed_changed(struct slave *slave) return; } - __get_state_machine_lock(port); + spin_lock_bh(&slave->bond->mode_lock); port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS; port->actor_oper_port_key = port->actor_admin_port_key |= @@ -2224,7 +2223,7 @@ void bond_3ad_adapter_speed_changed(struct slave *slave) */ port->sm_vars |= AD_PORT_BEGIN; - __release_state_machine_lock(port); + spin_unlock_bh(&slave->bond->mode_lock); } /** @@ -2246,7 +2245,7 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave) return; } - __get_state_machine_lock(port); + spin_lock_bh(&slave->bond->mode_lock); port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; port->actor_oper_port_key = port->actor_admin_port_key |= @@ -2257,7 +2256,7 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave) */ port->sm_vars |= AD_PORT_BEGIN; - __release_state_machine_lock(port); + spin_unlock_bh(&slave->bond->mode_lock); } /** @@ -2280,7 +2279,7 @@ void bond_3ad_handle_link_change(struct slave *slave, char link) return; } - __get_state_machine_lock(port); + spin_lock_bh(&slave->bond->mode_lock); /* on link down we are zeroing duplex and speed since * some of the adaptors(ce1000.lan) report full duplex/speed * instead of N/A(duplex) / 0(speed). @@ -2311,7 +2310,12 @@ void bond_3ad_handle_link_change(struct slave *slave, char link) */ port->sm_vars |= AD_PORT_BEGIN; - __release_state_machine_lock(port); + spin_unlock_bh(&slave->bond->mode_lock); + + /* RTNL is held and mode_lock is released so it's safe + * to update slave_array here. + */ + bond_update_slave_arr(slave->bond, NULL); } /** @@ -2395,7 +2399,6 @@ int __bond_3ad_get_active_agg_info(struct bonding *bond, return 0; } -/* Wrapper used to hold bond->lock so no slave manipulation can occur */ int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info) { int ret; @@ -2407,90 +2410,19 @@ int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info) return ret; } -int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev) -{ - struct bonding *bond = netdev_priv(dev); - struct slave *slave, *first_ok_slave; - struct aggregator *agg; - struct ad_info ad_info; - struct list_head *iter; - int slaves_in_agg; - int slave_agg_no; - int agg_id; - - if (__bond_3ad_get_active_agg_info(bond, &ad_info)) { - netdev_dbg(dev, "__bond_3ad_get_active_agg_info failed\n"); - goto err_free; - } - - slaves_in_agg = ad_info.ports; - agg_id = ad_info.aggregator_id; - - if (slaves_in_agg == 0) { - netdev_dbg(dev, "active aggregator is empty\n"); - goto err_free; - } - - slave_agg_no = bond_xmit_hash(bond, skb) % slaves_in_agg; - first_ok_slave = NULL; - - bond_for_each_slave_rcu(bond, slave, iter) { - agg = SLAVE_AD_INFO(slave)->port.aggregator; - if (!agg || agg->aggregator_identifier != agg_id) - continue; - - if (slave_agg_no >= 0) { - if (!first_ok_slave && bond_slave_can_tx(slave)) - first_ok_slave = slave; - slave_agg_no--; - continue; - } - - if (bond_slave_can_tx(slave)) { - bond_dev_queue_xmit(bond, skb, slave->dev); - goto out; - } - } - - if (slave_agg_no >= 0) { - netdev_err(dev, "Couldn't find a slave to tx on for aggregator ID %d\n", - agg_id); - goto err_free; - } - - /* we couldn't find any suitable slave after the agg_no, so use the - * first suitable found, if found. - */ - if (first_ok_slave) - bond_dev_queue_xmit(bond, skb, first_ok_slave->dev); - else - goto err_free; - -out: - return NETDEV_TX_OK; -err_free: - /* no suitable interface, frame not sent */ - dev_kfree_skb_any(skb); - goto out; -} - int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave) { - int ret = RX_HANDLER_ANOTHER; struct lacpdu *lacpdu, _lacpdu; if (skb->protocol != PKT_TYPE_LACPDU) - return ret; + return RX_HANDLER_ANOTHER; lacpdu = skb_header_pointer(skb, 0, sizeof(_lacpdu), &_lacpdu); if (!lacpdu) - return ret; + return RX_HANDLER_ANOTHER; - read_lock(&bond->lock); - ret = bond_3ad_rx_indication(lacpdu, slave, skb->len); - read_unlock(&bond->lock); - return ret; + return bond_3ad_rx_indication(lacpdu, slave, skb->len); } /** @@ -2500,7 +2432,7 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond, * When modify lacp_rate parameter via sysfs, * update actor_oper_port_state of each port. * - * Hold slave->state_machine_lock, + * Hold bond->mode_lock, * so we can modify port->actor_oper_port_state, * no matter bond is up or down. */ @@ -2512,13 +2444,13 @@ void bond_3ad_update_lacp_rate(struct bonding *bond) int lacp_fast; lacp_fast = bond->params.lacp_fast; + spin_lock_bh(&bond->mode_lock); bond_for_each_slave(bond, slave, iter) { port = &(SLAVE_AD_INFO(slave)->port); - __get_state_machine_lock(port); if (lacp_fast) port->actor_oper_port_state |= AD_STATE_LACP_TIMEOUT; else port->actor_oper_port_state &= ~AD_STATE_LACP_TIMEOUT; - __release_state_machine_lock(port); } + spin_unlock_bh(&bond->mode_lock); } diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h index bb03b1df2f3e799da32d82ca2a5226c8bd031647..c5f14ac63f3ee7b2b8b41b60939c6002ad9e7c1f 100644 --- a/drivers/net/bonding/bond_3ad.h +++ b/drivers/net/bonding/bond_3ad.h @@ -259,7 +259,6 @@ struct ad_bond_info { struct ad_slave_info { struct aggregator aggregator; /* 802.3ad aggregator structure */ struct port port; /* 802.3ad port structure */ - spinlock_t state_machine_lock; /* mutex state machines vs. incoming LACPDU */ u16 id; }; diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 95dd1f58c260d941c51ac4a369b2b97ba3f4ba34..d2eadab787c55d8fc0f361755409a880d64abfb3 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -100,27 +100,6 @@ static inline u8 _simple_hash(const u8 *hash_start, int hash_size) /*********************** tlb specific functions ***************************/ -static inline void _lock_tx_hashtbl_bh(struct bonding *bond) -{ - spin_lock_bh(&(BOND_ALB_INFO(bond).tx_hashtbl_lock)); -} - -static inline void _unlock_tx_hashtbl_bh(struct bonding *bond) -{ - spin_unlock_bh(&(BOND_ALB_INFO(bond).tx_hashtbl_lock)); -} - -static inline void _lock_tx_hashtbl(struct bonding *bond) -{ - spin_lock(&(BOND_ALB_INFO(bond).tx_hashtbl_lock)); -} - -static inline void _unlock_tx_hashtbl(struct bonding *bond) -{ - spin_unlock(&(BOND_ALB_INFO(bond).tx_hashtbl_lock)); -} - -/* Caller must hold tx_hashtbl lock */ static inline void tlb_init_table_entry(struct tlb_client_info *entry, int save_load) { if (save_load) { @@ -140,7 +119,6 @@ static inline void tlb_init_slave(struct slave *slave) SLAVE_TLB_INFO(slave).head = TLB_NULL_INDEX; } -/* Caller must hold bond lock for read, BH disabled */ static void __tlb_clear_slave(struct bonding *bond, struct slave *slave, int save_load) { @@ -163,13 +141,12 @@ static void __tlb_clear_slave(struct bonding *bond, struct slave *slave, tlb_init_slave(slave); } -/* Caller must hold bond lock for read */ static void tlb_clear_slave(struct bonding *bond, struct slave *slave, int save_load) { - _lock_tx_hashtbl_bh(bond); + spin_lock_bh(&bond->mode_lock); __tlb_clear_slave(bond, slave, save_load); - _unlock_tx_hashtbl_bh(bond); + spin_unlock_bh(&bond->mode_lock); } /* Must be called before starting the monitor timer */ @@ -184,14 +161,14 @@ static int tlb_initialize(struct bonding *bond) if (!new_hashtbl) return -1; - _lock_tx_hashtbl_bh(bond); + spin_lock_bh(&bond->mode_lock); bond_info->tx_hashtbl = new_hashtbl; for (i = 0; i < TLB_HASH_TABLE_SIZE; i++) tlb_init_table_entry(&bond_info->tx_hashtbl[i], 0); - _unlock_tx_hashtbl_bh(bond); + spin_unlock_bh(&bond->mode_lock); return 0; } @@ -200,18 +177,13 @@ static int tlb_initialize(struct bonding *bond) static void tlb_deinitialize(struct bonding *bond) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); - struct tlb_up_slave *arr; - _lock_tx_hashtbl_bh(bond); + spin_lock_bh(&bond->mode_lock); kfree(bond_info->tx_hashtbl); bond_info->tx_hashtbl = NULL; - _unlock_tx_hashtbl_bh(bond); - - arr = rtnl_dereference(bond_info->slave_arr); - if (arr) - kfree_rcu(arr, rcu); + spin_unlock_bh(&bond->mode_lock); } static long long compute_gap(struct slave *slave) @@ -220,7 +192,6 @@ static long long compute_gap(struct slave *slave) (s64) (SLAVE_TLB_INFO(slave).load << 3); /* Bytes to bits */ } -/* Caller must hold bond lock for read */ static struct slave *tlb_get_least_loaded_slave(struct bonding *bond) { struct slave *slave, *least_loaded; @@ -281,42 +252,23 @@ static struct slave *__tlb_choose_channel(struct bonding *bond, u32 hash_index, return assigned_slave; } -/* Caller must hold bond lock for read */ static struct slave *tlb_choose_channel(struct bonding *bond, u32 hash_index, u32 skb_len) { struct slave *tx_slave; - /* - * We don't need to disable softirq here, becase + + /* We don't need to disable softirq here, becase * tlb_choose_channel() is only called by bond_alb_xmit() * which already has softirq disabled. */ - _lock_tx_hashtbl(bond); + spin_lock(&bond->mode_lock); tx_slave = __tlb_choose_channel(bond, hash_index, skb_len); - _unlock_tx_hashtbl(bond); + spin_unlock(&bond->mode_lock); + return tx_slave; } /*********************** rlb specific functions ***************************/ -static inline void _lock_rx_hashtbl_bh(struct bonding *bond) -{ - spin_lock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); -} - -static inline void _unlock_rx_hashtbl_bh(struct bonding *bond) -{ - spin_unlock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); -} - -static inline void _lock_rx_hashtbl(struct bonding *bond) -{ - spin_lock(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); -} - -static inline void _unlock_rx_hashtbl(struct bonding *bond) -{ - spin_unlock(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); -} /* when an ARP REPLY is received from a client update its info * in the rx_hashtbl @@ -327,7 +279,7 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp) struct rlb_client_info *client_info; u32 hash_index; - _lock_rx_hashtbl_bh(bond); + spin_lock_bh(&bond->mode_lock); hash_index = _simple_hash((u8 *)&(arp->ip_src), sizeof(arp->ip_src)); client_info = &(bond_info->rx_hashtbl[hash_index]); @@ -342,7 +294,7 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp) bond_info->rx_ntt = 1; } - _unlock_rx_hashtbl_bh(bond); + spin_unlock_bh(&bond->mode_lock); } static int rlb_arp_recv(const struct sk_buff *skb, struct bonding *bond, @@ -378,15 +330,15 @@ static int rlb_arp_recv(const struct sk_buff *skb, struct bonding *bond, return RX_HANDLER_ANOTHER; } -/* Caller must hold bond lock for read */ -static struct slave *rlb_next_rx_slave(struct bonding *bond) +/* Caller must hold rcu_read_lock() */ +static struct slave *__rlb_next_rx_slave(struct bonding *bond) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); struct slave *before = NULL, *rx_slave = NULL, *slave; struct list_head *iter; bool found = false; - bond_for_each_slave(bond, slave, iter) { + bond_for_each_slave_rcu(bond, slave, iter) { if (!bond_slave_can_tx(slave)) continue; if (!found) { @@ -411,35 +363,16 @@ static struct slave *rlb_next_rx_slave(struct bonding *bond) return rx_slave; } -/* Caller must hold rcu_read_lock() for read */ -static struct slave *__rlb_next_rx_slave(struct bonding *bond) +/* Caller must hold RTNL, rcu_read_lock is obtained only to silence checkers */ +static struct slave *rlb_next_rx_slave(struct bonding *bond) { - struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); - struct slave *before = NULL, *rx_slave = NULL, *slave; - struct list_head *iter; - bool found = false; + struct slave *rx_slave; - bond_for_each_slave_rcu(bond, slave, iter) { - if (!bond_slave_can_tx(slave)) - continue; - if (!found) { - if (!before || before->speed < slave->speed) - before = slave; - } else { - if (!rx_slave || rx_slave->speed < slave->speed) - rx_slave = slave; - } - if (slave == bond_info->rx_slave) - found = true; - } - /* we didn't find anything after the current or we have something - * better before and up to the current slave - */ - if (!rx_slave || (before && rx_slave->speed < before->speed)) - rx_slave = before; + ASSERT_RTNL(); - if (rx_slave) - bond_info->rx_slave = rx_slave; + rcu_read_lock(); + rx_slave = __rlb_next_rx_slave(bond); + rcu_read_unlock(); return rx_slave; } @@ -447,11 +380,11 @@ static struct slave *__rlb_next_rx_slave(struct bonding *bond) /* teach the switch the mac of a disabled slave * on the primary for fault tolerance * - * Caller must hold bond->curr_slave_lock for write or bond lock for write + * Caller must hold RTNL */ static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[]) { - struct slave *curr_active = bond_deref_active_protected(bond); + struct slave *curr_active = rtnl_dereference(bond->curr_active_slave); if (!curr_active) return; @@ -479,7 +412,7 @@ static void rlb_clear_slave(struct bonding *bond, struct slave *slave) u32 index, next_index; /* clear slave from rx_hashtbl */ - _lock_rx_hashtbl_bh(bond); + spin_lock_bh(&bond->mode_lock); rx_hash_table = bond_info->rx_hashtbl; index = bond_info->rx_hashtbl_used_head; @@ -510,14 +443,10 @@ static void rlb_clear_slave(struct bonding *bond, struct slave *slave) } } - _unlock_rx_hashtbl_bh(bond); - - write_lock_bh(&bond->curr_slave_lock); + spin_unlock_bh(&bond->mode_lock); - if (slave != bond_deref_active_protected(bond)) + if (slave != rtnl_dereference(bond->curr_active_slave)) rlb_teach_disabled_mac_on_primary(bond, slave->dev->dev_addr); - - write_unlock_bh(&bond->curr_slave_lock); } static void rlb_update_client(struct rlb_client_info *client_info) @@ -565,7 +494,7 @@ static void rlb_update_rx_clients(struct bonding *bond) struct rlb_client_info *client_info; u32 hash_index; - _lock_rx_hashtbl_bh(bond); + spin_lock_bh(&bond->mode_lock); hash_index = bond_info->rx_hashtbl_used_head; for (; hash_index != RLB_NULL_INDEX; @@ -583,7 +512,7 @@ static void rlb_update_rx_clients(struct bonding *bond) */ bond_info->rlb_update_delay_counter = RLB_UPDATE_DELAY; - _unlock_rx_hashtbl_bh(bond); + spin_unlock_bh(&bond->mode_lock); } /* The slave was assigned a new mac address - update the clients */ @@ -594,7 +523,7 @@ static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *sla int ntt = 0; u32 hash_index; - _lock_rx_hashtbl_bh(bond); + spin_lock_bh(&bond->mode_lock); hash_index = bond_info->rx_hashtbl_used_head; for (; hash_index != RLB_NULL_INDEX; @@ -615,7 +544,7 @@ static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *sla bond_info->rlb_update_retry_counter = RLB_UPDATE_RETRY; } - _unlock_rx_hashtbl_bh(bond); + spin_unlock_bh(&bond->mode_lock); } /* mark all clients using src_ip to be updated */ @@ -625,7 +554,7 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip) struct rlb_client_info *client_info; u32 hash_index; - _lock_rx_hashtbl(bond); + spin_lock(&bond->mode_lock); hash_index = bond_info->rx_hashtbl_used_head; for (; hash_index != RLB_NULL_INDEX; @@ -636,7 +565,7 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip) netdev_err(bond->dev, "found a client with no channel in the client's hash table\n"); continue; } - /*update all clients using this src_ip, that are not assigned + /* update all clients using this src_ip, that are not assigned * to the team's address (curr_active_slave) and have a known * unicast mac address. */ @@ -649,10 +578,9 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip) } } - _unlock_rx_hashtbl(bond); + spin_unlock(&bond->mode_lock); } -/* Caller must hold both bond and ptr locks for read */ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bond) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); @@ -661,7 +589,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon struct rlb_client_info *client_info; u32 hash_index = 0; - _lock_rx_hashtbl(bond); + spin_lock(&bond->mode_lock); curr_active_slave = rcu_dereference(bond->curr_active_slave); @@ -680,7 +608,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon assigned_slave = client_info->slave; if (assigned_slave) { - _unlock_rx_hashtbl(bond); + spin_unlock(&bond->mode_lock); return assigned_slave; } } else { @@ -742,7 +670,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon } } - _unlock_rx_hashtbl(bond); + spin_unlock(&bond->mode_lock); return assigned_slave; } @@ -763,9 +691,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond) return NULL; if (arp->op_code == htons(ARPOP_REPLY)) { - /* the arp must be sent on the selected - * rx channel - */ + /* the arp must be sent on the selected rx channel */ tx_slave = rlb_choose_channel(skb, bond); if (tx_slave) ether_addr_copy(arp->mac_src, tx_slave->dev->dev_addr); @@ -795,7 +721,6 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond) return tx_slave; } -/* Caller must hold bond lock for read */ static void rlb_rebalance(struct bonding *bond) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); @@ -804,7 +729,7 @@ static void rlb_rebalance(struct bonding *bond) int ntt; u32 hash_index; - _lock_rx_hashtbl_bh(bond); + spin_lock_bh(&bond->mode_lock); ntt = 0; hash_index = bond_info->rx_hashtbl_used_head; @@ -822,10 +747,10 @@ static void rlb_rebalance(struct bonding *bond) /* update the team's flag only after the whole iteration */ if (ntt) bond_info->rx_ntt = 1; - _unlock_rx_hashtbl_bh(bond); + spin_unlock_bh(&bond->mode_lock); } -/* Caller must hold rx_hashtbl lock */ +/* Caller must hold mode_lock */ static void rlb_init_table_entry_dst(struct rlb_client_info *entry) { entry->used_next = RLB_NULL_INDEX; @@ -913,15 +838,16 @@ static void rlb_src_link(struct bonding *bond, u32 ip_src_hash, u32 ip_dst_hash) bond_info->rx_hashtbl[ip_src_hash].src_first = ip_dst_hash; } -/* deletes all rx_hashtbl entries with arp->ip_src if their mac_src does - * not match arp->mac_src */ +/* deletes all rx_hashtbl entries with arp->ip_src if their mac_src does + * not match arp->mac_src + */ static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); u32 ip_src_hash = _simple_hash((u8 *)&(arp->ip_src), sizeof(arp->ip_src)); u32 index; - _lock_rx_hashtbl_bh(bond); + spin_lock_bh(&bond->mode_lock); index = bond_info->rx_hashtbl[ip_src_hash].src_first; while (index != RLB_NULL_INDEX) { @@ -932,7 +858,7 @@ static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp) rlb_delete_table_entry(bond, index); index = next_index; } - _unlock_rx_hashtbl_bh(bond); + spin_unlock_bh(&bond->mode_lock); } static int rlb_initialize(struct bonding *bond) @@ -946,7 +872,7 @@ static int rlb_initialize(struct bonding *bond) if (!new_hashtbl) return -1; - _lock_rx_hashtbl_bh(bond); + spin_lock_bh(&bond->mode_lock); bond_info->rx_hashtbl = new_hashtbl; @@ -955,7 +881,7 @@ static int rlb_initialize(struct bonding *bond) for (i = 0; i < RLB_HASH_TABLE_SIZE; i++) rlb_init_table_entry(bond_info->rx_hashtbl + i); - _unlock_rx_hashtbl_bh(bond); + spin_unlock_bh(&bond->mode_lock); /* register to receive ARPs */ bond->recv_probe = rlb_arp_recv; @@ -967,13 +893,13 @@ static void rlb_deinitialize(struct bonding *bond) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); - _lock_rx_hashtbl_bh(bond); + spin_lock_bh(&bond->mode_lock); kfree(bond_info->rx_hashtbl); bond_info->rx_hashtbl = NULL; bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX; - _unlock_rx_hashtbl_bh(bond); + spin_unlock_bh(&bond->mode_lock); } static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id) @@ -981,7 +907,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id) struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); u32 curr_index; - _lock_rx_hashtbl_bh(bond); + spin_lock_bh(&bond->mode_lock); curr_index = bond_info->rx_hashtbl_used_head; while (curr_index != RLB_NULL_INDEX) { @@ -994,7 +920,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id) curr_index = next_index; } - _unlock_rx_hashtbl_bh(bond); + spin_unlock_bh(&bond->mode_lock); } /*********************** tlb/rlb shared functions *********************/ @@ -1091,8 +1017,9 @@ static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[]) return 0; } - /* for rlb each slave must have a unique hw mac addresses so that */ - /* each slave will receive packets destined to a different mac */ + /* for rlb each slave must have a unique hw mac addresses so that + * each slave will receive packets destined to a different mac + */ memcpy(s_addr.sa_data, addr, dev->addr_len); s_addr.sa_family = dev->type; if (dev_set_mac_address(dev, &s_addr)) { @@ -1103,13 +1030,10 @@ static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[]) return 0; } -/* - * Swap MAC addresses between two slaves. +/* Swap MAC addresses between two slaves. * * Called with RTNL held, and no other locks. - * */ - static void alb_swap_mac_addr(struct slave *slave1, struct slave *slave2) { u8 tmp_mac_addr[ETH_ALEN]; @@ -1120,8 +1044,7 @@ static void alb_swap_mac_addr(struct slave *slave1, struct slave *slave2) } -/* - * Send learning packets after MAC address swap. +/* Send learning packets after MAC address swap. * * Called with RTNL and no other locks */ @@ -1194,7 +1117,6 @@ static void alb_change_hw_addr_on_detach(struct bonding *bond, struct slave *sla found_slave = bond_slave_has_mac(bond, slave->perm_hwaddr); if (found_slave) { - /* locking: needs RTNL and nothing else */ alb_swap_mac_addr(slave, found_slave); alb_fasten_mac_swap(bond, slave, found_slave); } @@ -1243,7 +1165,8 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav return 0; /* Try setting slave mac to bond address and fall-through - to code handling that situation below... */ + * to code handling that situation below... + */ alb_set_slave_mac_addr(slave, bond->dev->dev_addr); } @@ -1351,7 +1274,6 @@ int bond_alb_initialize(struct bonding *bond, int rlb_enabled) if (rlb_enabled) { bond->alb_info.rlb_enabled = 1; - /* initialize rlb */ res = rlb_initialize(bond); if (res) { tlb_deinitialize(bond); @@ -1375,7 +1297,7 @@ void bond_alb_deinitialize(struct bonding *bond) } static int bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond, - struct slave *tx_slave) + struct slave *tx_slave) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); struct ethhdr *eth_data = eth_hdr(skb); @@ -1388,7 +1310,7 @@ static int bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond, } if (tx_slave && bond_slave_can_tx(tx_slave)) { - if (tx_slave != rcu_dereference(bond->curr_active_slave)) { + if (tx_slave != rcu_access_pointer(bond->curr_active_slave)) { ether_addr_copy(eth_data->h_source, tx_slave->dev->dev_addr); } @@ -1398,9 +1320,9 @@ static int bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond, } if (tx_slave && bond->params.tlb_dynamic_lb) { - _lock_tx_hashtbl(bond); + spin_lock(&bond->mode_lock); __tlb_clear_slave(bond, tx_slave, 0); - _unlock_tx_hashtbl(bond); + spin_unlock(&bond->mode_lock); } /* no suitable interface, frame not sent */ @@ -1409,39 +1331,9 @@ static int bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond, return NETDEV_TX_OK; } -static int bond_tlb_update_slave_arr(struct bonding *bond, - struct slave *skipslave) -{ - struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); - struct slave *tx_slave; - struct list_head *iter; - struct tlb_up_slave *new_arr, *old_arr; - - new_arr = kzalloc(offsetof(struct tlb_up_slave, arr[bond->slave_cnt]), - GFP_ATOMIC); - if (!new_arr) - return -ENOMEM; - - bond_for_each_slave(bond, tx_slave, iter) { - if (!bond_slave_can_tx(tx_slave)) - continue; - if (skipslave == tx_slave) - continue; - new_arr->arr[new_arr->count++] = tx_slave; - } - - old_arr = rtnl_dereference(bond_info->slave_arr); - rcu_assign_pointer(bond_info->slave_arr, new_arr); - if (old_arr) - kfree_rcu(old_arr, rcu); - - return 0; -} - int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); - struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); struct ethhdr *eth_data; struct slave *tx_slave = NULL; u32 hash_index; @@ -1462,12 +1354,14 @@ int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev) hash_index & 0xFF, skb->len); } else { - struct tlb_up_slave *slaves; + struct bond_up_slave *slaves; + unsigned int count; - slaves = rcu_dereference(bond_info->slave_arr); - if (slaves && slaves->count) + slaves = rcu_dereference(bond->slave_arr); + count = slaves ? ACCESS_ONCE(slaves->count) : 0; + if (likely(count)) tx_slave = slaves->arr[hash_index % - slaves->count]; + count]; } break; } @@ -1595,13 +1489,6 @@ void bond_alb_monitor(struct work_struct *work) if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) { bool strict_match; - /* change of curr_active_slave involves swapping of mac addresses. - * in order to avoid this swapping from happening while - * sending the learning packets, the curr_slave_lock must be held for - * read. - */ - read_lock(&bond->curr_slave_lock); - bond_for_each_slave_rcu(bond, slave, iter) { /* If updating current_active, use all currently * user mac addreses (!strict_match). Otherwise, only @@ -1613,17 +1500,11 @@ void bond_alb_monitor(struct work_struct *work) alb_send_learning_packets(slave, slave->dev->dev_addr, strict_match); } - - read_unlock(&bond->curr_slave_lock); - bond_info->lp_counter = 0; } /* rebalance tx traffic */ if (bond_info->tx_rebalance_counter >= BOND_TLB_REBALANCE_TICKS) { - - read_lock(&bond->curr_slave_lock); - bond_for_each_slave_rcu(bond, slave, iter) { tlb_clear_slave(bond, slave, 1); if (slave == rcu_access_pointer(bond->curr_active_slave)) { @@ -1633,19 +1514,14 @@ void bond_alb_monitor(struct work_struct *work) bond_info->unbalanced_load = 0; } } - - read_unlock(&bond->curr_slave_lock); - bond_info->tx_rebalance_counter = 0; } - /* handle rlb stuff */ if (bond_info->rlb_enabled) { if (bond_info->primary_is_promisc && (++bond_info->rlb_promisc_timeout_counter >= RLB_PROMISC_TIMEOUT)) { - /* - * dev_set_promiscuity requires rtnl and + /* dev_set_promiscuity requires rtnl and * nothing else. Avoid race with bond_close. */ rcu_read_unlock(); @@ -1715,8 +1591,7 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave) return 0; } -/* - * Remove slave from tlb and rlb hash tables, and fix up MAC addresses +/* Remove slave from tlb and rlb hash tables, and fix up MAC addresses * if necessary. * * Caller must hold RTNL and no other locks @@ -1733,13 +1608,8 @@ void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave) rlb_clear_slave(bond, slave); } - if (bond_is_nondyn_tlb(bond)) - if (bond_tlb_update_slave_arr(bond, slave)) - pr_err("Failed to build slave-array for TLB mode.\n"); - } -/* Caller must hold bond lock for read */ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char link) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); @@ -1762,7 +1632,7 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char } if (bond_is_nondyn_tlb(bond)) { - if (bond_tlb_update_slave_arr(bond, NULL)) + if (bond_update_slave_arr(bond, NULL)) pr_err("Failed to build slave-array for TLB mode.\n"); } } @@ -1775,22 +1645,14 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char * Set the bond->curr_active_slave to @new_slave and handle * mac address swapping and promiscuity changes as needed. * - * If new_slave is NULL, caller must hold curr_slave_lock or - * bond->lock for write. - * - * If new_slave is not NULL, caller must hold RTNL, curr_slave_lock - * for write. Processing here may sleep, so no other locks may be held. + * Caller must hold RTNL */ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave) - __releases(&bond->curr_slave_lock) - __acquires(&bond->curr_slave_lock) { struct slave *swap_slave; struct slave *curr_active; - curr_active = rcu_dereference_protected(bond->curr_active_slave, - !new_slave || - lockdep_is_held(&bond->curr_slave_lock)); + curr_active = rtnl_dereference(bond->curr_active_slave); if (curr_active == new_slave) return; @@ -1812,8 +1674,7 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave if (!swap_slave) swap_slave = bond_slave_has_mac(bond, bond->dev->dev_addr); - /* - * Arrange for swap_slave and new_slave to temporarily be + /* Arrange for swap_slave and new_slave to temporarily be * ignored so we can mess with their MAC addresses without * fear of interference from transmit activity. */ @@ -1821,10 +1682,6 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave tlb_clear_slave(bond, swap_slave, 1); tlb_clear_slave(bond, new_slave, 1); - write_unlock_bh(&bond->curr_slave_lock); - - ASSERT_RTNL(); - /* in TLB mode, the slave might flip down/up with the old dev_addr, * and thus filter bond->dev_addr's packets, so force bond's mac */ @@ -1853,16 +1710,10 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave alb_send_learning_packets(new_slave, bond->dev->dev_addr, false); } - - write_lock_bh(&bond->curr_slave_lock); } -/* - * Called with RTNL - */ +/* Called with RTNL */ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr) - __acquires(&bond->lock) - __releases(&bond->lock) { struct bonding *bond = netdev_priv(bond_dev); struct sockaddr *sa = addr; @@ -1895,14 +1746,12 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr) } else { alb_set_slave_mac_addr(curr_active, bond_dev->dev_addr); - read_lock(&bond->lock); alb_send_learning_packets(curr_active, bond_dev->dev_addr, false); if (bond->alb_info.rlb_enabled) { /* inform clients mac address has changed */ rlb_req_update_slave_clients(bond, curr_active); } - read_unlock(&bond->lock); } return 0; diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h index aaeac61d03cf804524ac602a8f5870a30aee0b73..1ad473b4ade5b50f7a0dc734f55c60627f65a5cd 100644 --- a/drivers/net/bonding/bond_alb.h +++ b/drivers/net/bonding/bond_alb.h @@ -139,24 +139,14 @@ struct tlb_slave_info { */ }; -struct tlb_up_slave { - unsigned int count; - struct rcu_head rcu; - struct slave *arr[0]; -}; - struct alb_bond_info { struct tlb_client_info *tx_hashtbl; /* Dynamically allocated */ - spinlock_t tx_hashtbl_lock; u32 unbalanced_load; int tx_rebalance_counter; int lp_counter; - /* -------- non-dynamic tlb mode only ---------*/ - struct tlb_up_slave __rcu *slave_arr; /* Up slaves */ /* -------- rlb parameters -------- */ int rlb_enabled; struct rlb_client_info *rx_hashtbl; /* Receive hash table */ - spinlock_t rx_hashtbl_lock; u32 rx_hashtbl_used_head; u8 rx_ntt; /* flag - need to transmit * to all rx clients diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c index 280971b227ea461788797d35b98145699eee52e4..8f99082f90ebed3bf7d91a8914b66704a1c74512 100644 --- a/drivers/net/bonding/bond_debugfs.c +++ b/drivers/net/bonding/bond_debugfs.c @@ -13,9 +13,7 @@ static struct dentry *bonding_debug_root; -/* - * Show RLB hash table - */ +/* Show RLB hash table */ static int bond_debug_rlb_hash_show(struct seq_file *m, void *v) { struct bonding *bond = m->private; @@ -29,7 +27,7 @@ static int bond_debug_rlb_hash_show(struct seq_file *m, void *v) seq_printf(m, "SourceIP DestinationIP " "Destination MAC DEV\n"); - spin_lock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); + spin_lock_bh(&bond->mode_lock); hash_index = bond_info->rx_hashtbl_used_head; for (; hash_index != RLB_NULL_INDEX; @@ -42,7 +40,7 @@ static int bond_debug_rlb_hash_show(struct seq_file *m, void *v) client_info->slave->dev->name); } - spin_unlock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); + spin_unlock_bh(&bond->mode_lock); return 0; } diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 798ae69fb63c2ee22973e05bb276cd539e47f24a..c9ac06cfe6b7b3a8f62568b70a6ad6d7ca9b44d0 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -208,6 +208,9 @@ static int lacp_fast; static int bond_init(struct net_device *bond_dev); static void bond_uninit(struct net_device *bond_dev); +static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev, + struct rtnl_link_stats64 *stats); +static void bond_slave_arr_handler(struct work_struct *work); /*---------------------------- General routines -----------------------------*/ @@ -253,8 +256,7 @@ void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, dev_queue_xmit(skb); } -/* - * In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid, +/* In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid, * We don't protect the slave list iteration with a lock because: * a. This operation is performed in IOCTL context, * b. The operation is protected by the RTNL semaphore in the 8021q code, @@ -326,8 +328,7 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev, /*------------------------------- Link status -------------------------------*/ -/* - * Set the carrier state for the master according to the state of its +/* Set the carrier state for the master according to the state of its * slaves. If any slaves are up, the master is up. In 802.3ad mode, * do special 802.3ad magic. * @@ -362,8 +363,7 @@ static int bond_set_carrier(struct bonding *bond) return 0; } -/* - * Get link speed and duplex from the slave's base driver +/* Get link speed and duplex from the slave's base driver * using ethtool. If for some reason the call fails or the * values are invalid, set speed and duplex to -1, * and return. @@ -416,8 +416,7 @@ const char *bond_slave_link_status(s8 link) } } -/* - * if supports MII link status reporting, check its link status. +/* if supports MII link status reporting, check its link status. * * We either do MII/ETHTOOL ioctls, or check netif_carrier_ok(), * depending upon the setting of the use_carrier parameter. @@ -454,14 +453,14 @@ static int bond_check_dev_link(struct bonding *bond, /* Ethtool can't be used, fallback to MII ioctls. */ ioctl = slave_ops->ndo_do_ioctl; if (ioctl) { - /* TODO: set pointer to correct ioctl on a per team member */ - /* bases to make this more efficient. that is, once */ - /* we determine the correct ioctl, we will always */ - /* call it and not the others for that team */ - /* member. */ - - /* - * We cannot assume that SIOCGMIIPHY will also read a + /* TODO: set pointer to correct ioctl on a per team member + * bases to make this more efficient. that is, once + * we determine the correct ioctl, we will always + * call it and not the others for that team + * member. + */ + + /* We cannot assume that SIOCGMIIPHY will also read a * register; not all network drivers (e.g., e100) * support that. */ @@ -476,8 +475,7 @@ static int bond_check_dev_link(struct bonding *bond, } } - /* - * If reporting, report that either there's no dev->do_ioctl, + /* If reporting, report that either there's no dev->do_ioctl, * or both SIOCGMIIREG and get_link failed (meaning that we * cannot report link status). If not reporting, pretend * we're ok. @@ -487,9 +485,7 @@ static int bond_check_dev_link(struct bonding *bond, /*----------------------------- Multicast list ------------------------------*/ -/* - * Push the promiscuity flag down to appropriate slaves - */ +/* Push the promiscuity flag down to appropriate slaves */ static int bond_set_promiscuity(struct bonding *bond, int inc) { struct list_head *iter; @@ -512,9 +508,7 @@ static int bond_set_promiscuity(struct bonding *bond, int inc) return err; } -/* - * Push the allmulti flag down to all slaves - */ +/* Push the allmulti flag down to all slaves */ static int bond_set_allmulti(struct bonding *bond, int inc) { struct list_head *iter; @@ -537,8 +531,7 @@ static int bond_set_allmulti(struct bonding *bond, int inc) return err; } -/* - * Retrieve the list of registered multicast addresses for the bonding +/* Retrieve the list of registered multicast addresses for the bonding * device and retransmit an IGMP JOIN request to the current active * slave. */ @@ -560,8 +553,7 @@ static void bond_resend_igmp_join_requests_delayed(struct work_struct *work) rtnl_unlock(); } -/* Flush bond's hardware addresses from slave - */ +/* Flush bond's hardware addresses from slave */ static void bond_hw_addr_flush(struct net_device *bond_dev, struct net_device *slave_dev) { @@ -588,8 +580,6 @@ static void bond_hw_addr_flush(struct net_device *bond_dev, static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active, struct slave *old_active) { - ASSERT_RTNL(); - if (old_active) { if (bond->dev->flags & IFF_PROMISC) dev_set_promiscuity(old_active->dev, -1); @@ -632,18 +622,15 @@ static void bond_set_dev_addr(struct net_device *bond_dev, call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev); } -/* - * bond_do_fail_over_mac +/* bond_do_fail_over_mac * * Perform special MAC address swapping for fail_over_mac settings * - * Called with RTNL, curr_slave_lock for write_bh. + * Called with RTNL */ static void bond_do_fail_over_mac(struct bonding *bond, struct slave *new_active, struct slave *old_active) - __releases(&bond->curr_slave_lock) - __acquires(&bond->curr_slave_lock) { u8 tmp_mac[ETH_ALEN]; struct sockaddr saddr; @@ -651,23 +638,17 @@ static void bond_do_fail_over_mac(struct bonding *bond, switch (bond->params.fail_over_mac) { case BOND_FOM_ACTIVE: - if (new_active) { - write_unlock_bh(&bond->curr_slave_lock); + if (new_active) bond_set_dev_addr(bond->dev, new_active->dev); - write_lock_bh(&bond->curr_slave_lock); - } break; case BOND_FOM_FOLLOW: - /* - * if new_active && old_active, swap them + /* if new_active && old_active, swap them * if just old_active, do nothing (going to no active slave) * if just new_active, set new_active to bond's MAC */ if (!new_active) return; - write_unlock_bh(&bond->curr_slave_lock); - if (old_active) { ether_addr_copy(tmp_mac, new_active->dev->dev_addr); ether_addr_copy(saddr.sa_data, @@ -696,7 +677,6 @@ static void bond_do_fail_over_mac(struct bonding *bond, netdev_err(bond->dev, "Error %d setting MAC of slave %s\n", -rv, new_active->dev->name); out: - write_lock_bh(&bond->curr_slave_lock); break; default: netdev_err(bond->dev, "bond_do_fail_over_mac impossible: bad policy %d\n", @@ -708,8 +688,8 @@ static void bond_do_fail_over_mac(struct bonding *bond, static bool bond_should_change_active(struct bonding *bond) { - struct slave *prim = bond->primary_slave; - struct slave *curr = bond_deref_active_protected(bond); + struct slave *prim = rtnl_dereference(bond->primary_slave); + struct slave *curr = rtnl_dereference(bond->curr_active_slave); if (!prim || !curr || curr->link != BOND_LINK_UP) return true; @@ -732,13 +712,14 @@ static bool bond_should_change_active(struct bonding *bond) */ static struct slave *bond_find_best_slave(struct bonding *bond) { - struct slave *slave, *bestslave = NULL; + struct slave *slave, *bestslave = NULL, *primary; struct list_head *iter; int mintime = bond->params.updelay; - if (bond->primary_slave && bond->primary_slave->link == BOND_LINK_UP && + primary = rtnl_dereference(bond->primary_slave); + if (primary && primary->link == BOND_LINK_UP && bond_should_change_active(bond)) - return bond->primary_slave; + return primary; bond_for_each_slave(bond, slave, iter) { if (slave->link == BOND_LINK_UP) @@ -784,15 +765,15 @@ static bool bond_should_notify_peers(struct bonding *bond) * because it is apparently the best available slave we have, even though its * updelay hasn't timed out yet. * - * If new_active is not NULL, caller must hold curr_slave_lock for write_bh. + * Caller must hold RTNL. */ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) { struct slave *old_active; - old_active = rcu_dereference_protected(bond->curr_active_slave, - !new_active || - lockdep_is_held(&bond->curr_slave_lock)); + ASSERT_RTNL(); + + old_active = rtnl_dereference(bond->curr_active_slave); if (old_active == new_active) return; @@ -860,21 +841,18 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) bond_should_notify_peers(bond); } - write_unlock_bh(&bond->curr_slave_lock); - call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev); if (should_notify_peers) call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev); - - write_lock_bh(&bond->curr_slave_lock); } } /* resend IGMP joins since active slave has changed or * all were sent on curr_active_slave. * resend only if bond is brought up with the affected - * bonding modes and the retransmission is enabled */ + * bonding modes and the retransmission is enabled + */ if (netif_running(bond->dev) && (bond->params.resend_igmp > 0) && ((bond_uses_primary(bond) && new_active) || BOND_MODE(bond) == BOND_MODE_ROUNDROBIN)) { @@ -892,15 +870,17 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) * - The primary_slave has got its link back. * - A slave has got its link back and there's no old curr_active_slave. * - * Caller must hold curr_slave_lock for write_bh. + * Caller must hold RTNL. */ void bond_select_active_slave(struct bonding *bond) { struct slave *best_slave; int rv; + ASSERT_RTNL(); + best_slave = bond_find_best_slave(bond); - if (best_slave != bond_deref_active_protected(bond)) { + if (best_slave != rtnl_dereference(bond->curr_active_slave)) { bond_change_active_slave(bond, best_slave); rv = bond_set_carrier(bond); if (!rv) @@ -1022,7 +1002,8 @@ static netdev_features_t bond_fix_features(struct net_device *dev, static void bond_compute_features(struct bonding *bond) { - unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE; + unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE | + IFF_XMIT_DST_RELEASE_PERM; netdev_features_t vlan_features = BOND_VLAN_FEATURES; netdev_features_t enc_features = BOND_ENC_FEATURES; struct net_device *bond_dev = bond->dev; @@ -1058,8 +1039,10 @@ static void bond_compute_features(struct bonding *bond) bond_dev->gso_max_segs = gso_max_segs; netif_set_gso_max_size(bond_dev, gso_max_size); - flags = bond_dev->priv_flags & ~IFF_XMIT_DST_RELEASE; - bond_dev->priv_flags = flags | dst_release_flag; + bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; + if ((bond_dev->priv_flags & IFF_XMIT_DST_RELEASE_PERM) && + dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM)) + bond_dev->priv_flags |= IFF_XMIT_DST_RELEASE; netdev_change_features(bond_dev); } @@ -1240,8 +1223,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) slave_dev->name); } - /* - * Old ifenslave binaries are no longer supported. These can + /* Old ifenslave binaries are no longer supported. These can * be identified with moderate accuracy by the state of the slave: * the current ifenslave will set the interface down prior to * enslaving it; the old ifenslave will not. @@ -1313,7 +1295,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) call_netdevice_notifiers(NETDEV_JOIN, slave_dev); /* If this is the first slave, then we need to set the master's hardware - * address to be the same as the slave's. */ + * address to be the same as the slave's. + */ if (!bond_has_slaves(bond) && bond->dev->addr_assign_type == NET_ADDR_RANDOM) bond_set_dev_addr(bond->dev, slave_dev); @@ -1326,8 +1309,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) new_slave->bond = bond; new_slave->dev = slave_dev; - /* - * Set the new_slave's queue_id to be zero. Queue ID mapping + /* Set the new_slave's queue_id to be zero. Queue ID mapping * is set via sysfs or module option if desired. */ new_slave->queue_id = 0; @@ -1340,8 +1322,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) goto err_free; } - /* - * Save slave's original ("permanent") mac address for modes + /* Save slave's original ("permanent") mac address for modes * that need it, and for restoring it upon release, and then * set it to the master's address */ @@ -1349,8 +1330,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) if (!bond->params.fail_over_mac || BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { - /* - * Set slave to master's mac address. The application already + /* Set slave to master's mac address. The application already * set the master's mac address to that of the first slave */ memcpy(addr.sa_data, bond_dev->dev_addr, bond_dev->addr_len); @@ -1370,6 +1350,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) } slave_dev->priv_flags |= IFF_BONDING; + /* initialize slave stats */ + dev_get_stats(new_slave->dev, &new_slave->slave_stats); if (bond_is_lb(bond)) { /* bond_alb_init_slave() must be called before all other stages since @@ -1436,8 +1418,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) link_reporting = bond_check_dev_link(bond, slave_dev, 1); if ((link_reporting == -1) && !bond->params.arp_interval) { - /* - * miimon is set but a bonded network driver + /* miimon is set but a bonded network driver * does not support ETHTOOL/MII and * arp_interval is not set. Note: if * use_carrier is enabled, we will never go @@ -1482,7 +1463,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) if (bond_uses_primary(bond) && bond->params.primary[0]) { /* if there is a primary slave, remember it */ if (strcmp(bond->params.primary, new_slave->dev->name) == 0) { - bond->primary_slave = new_slave; + rcu_assign_pointer(bond->primary_slave, new_slave); bond->force_primary = true; } } @@ -1570,12 +1551,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) if (bond_uses_primary(bond)) { block_netpoll_tx(); - write_lock_bh(&bond->curr_slave_lock); bond_select_active_slave(bond); - write_unlock_bh(&bond->curr_slave_lock); unblock_netpoll_tx(); } + if (bond_mode_uses_xmit_hash(bond)) + bond_update_slave_arr(bond, NULL); + netdev_info(bond_dev, "Enslaving %s as %s interface with %s link\n", slave_dev->name, bond_is_active_slave(new_slave) ? "an active" : "a backup", @@ -1596,16 +1578,16 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) bond_hw_addr_flush(bond_dev, slave_dev); vlan_vids_del_by_dev(slave_dev, bond_dev); - if (bond->primary_slave == new_slave) - bond->primary_slave = NULL; + if (rcu_access_pointer(bond->primary_slave) == new_slave) + RCU_INIT_POINTER(bond->primary_slave, NULL); if (rcu_access_pointer(bond->curr_active_slave) == new_slave) { block_netpoll_tx(); - write_lock_bh(&bond->curr_slave_lock); bond_change_active_slave(bond, NULL); bond_select_active_slave(bond); - write_unlock_bh(&bond->curr_slave_lock); unblock_netpoll_tx(); } + /* either primary_slave or curr_active_slave might've changed */ + synchronize_rcu(); slave_disable_netpoll(new_slave); err_close: @@ -1639,10 +1621,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) return res; } -/* - * Try to release the slave device from the bond device +/* Try to release the slave device from the bond device * It is legal to access curr_active_slave without a lock because all the function - * is write-locked. If "all" is true it means that the function is being called + * is RTNL-locked. If "all" is true it means that the function is being called * while destroying a bond interface and all slaves are being released. * * The rules for slave state should be: @@ -1682,18 +1663,20 @@ static int __bond_release_one(struct net_device *bond_dev, bond_sysfs_slave_del(slave); + /* recompute stats just before removing the slave */ + bond_get_stats(bond->dev, &bond->bond_stats); + bond_upper_dev_unlink(bond_dev, slave_dev); /* unregister rx_handler early so bond_handle_frame wouldn't be called * for this slave anymore. */ netdev_rx_handler_unregister(slave_dev); - write_lock_bh(&bond->lock); - /* Inform AD package of unbinding of slave. */ if (BOND_MODE(bond) == BOND_MODE_8023AD) bond_3ad_unbind_slave(slave); - write_unlock_bh(&bond->lock); + if (bond_mode_uses_xmit_hash(bond)) + bond_update_slave_arr(bond, slave); netdev_info(bond_dev, "Releasing %s interface %s\n", bond_is_active_slave(slave) ? "active" : "backup", @@ -1712,14 +1695,11 @@ static int __bond_release_one(struct net_device *bond_dev, bond_dev->name, slave_dev->name); } - if (bond->primary_slave == slave) - bond->primary_slave = NULL; + if (rtnl_dereference(bond->primary_slave) == slave) + RCU_INIT_POINTER(bond->primary_slave, NULL); - if (oldcurrent == slave) { - write_lock_bh(&bond->curr_slave_lock); + if (oldcurrent == slave) bond_change_active_slave(bond, NULL); - write_unlock_bh(&bond->curr_slave_lock); - } if (bond_is_lb(bond)) { /* Must be called only after the slave has been @@ -1733,16 +1713,11 @@ static int __bond_release_one(struct net_device *bond_dev, if (all) { RCU_INIT_POINTER(bond->curr_active_slave, NULL); } else if (oldcurrent == slave) { - /* - * Note that we hold RTNL over this sequence, so there + /* Note that we hold RTNL over this sequence, so there * is no concern that another slave add/remove event * will interfere. */ - write_lock_bh(&bond->curr_slave_lock); - bond_select_active_slave(bond); - - write_unlock_bh(&bond->curr_slave_lock); } if (!bond_has_slaves(bond)) { @@ -1765,10 +1740,9 @@ static int __bond_release_one(struct net_device *bond_dev, netdev_info(bond_dev, "last VLAN challenged slave %s left bond %s - VLAN blocking is removed\n", slave_dev->name, bond_dev->name); - /* must do this from outside any spinlocks */ vlan_vids_del_by_dev(slave_dev, bond_dev); - /* If the mode uses primary, then this cases was handled above by + /* If the mode uses primary, then this case was handled above by * bond_change_active_slave(..., NULL) */ if (!bond_uses_primary(bond)) { @@ -1808,7 +1782,7 @@ static int __bond_release_one(struct net_device *bond_dev, bond_free_slave(slave); - return 0; /* deletion OK */ + return 0; } /* A wrapper used because of ndo_del_link */ @@ -1817,10 +1791,9 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) return __bond_release_one(bond_dev, slave_dev, false); } -/* -* First release a slave and then destroy the bond if no more slaves are left. -* Must be under rtnl_lock when this function is called. -*/ +/* First release a slave and then destroy the bond if no more slaves are left. + * Must be under rtnl_lock when this function is called. + */ static int bond_release_and_destroy(struct net_device *bond_dev, struct net_device *slave_dev) { @@ -1843,7 +1816,6 @@ static int bond_info_query(struct net_device *bond_dev, struct ifbond *info) info->bond_mode = BOND_MODE(bond); info->miimon = bond->params.miimon; - info->num_slaves = bond->slave_cnt; return 0; @@ -1906,9 +1878,7 @@ static int bond_miimon_inspect(struct bonding *bond) /*FALLTHRU*/ case BOND_LINK_FAIL: if (link_state) { - /* - * recovered before downdelay expired - */ + /* recovered before downdelay expired */ slave->link = BOND_LINK_UP; slave->last_link_up = jiffies; netdev_info(bond->dev, "link status up again after %d ms for interface %s\n", @@ -1974,7 +1944,7 @@ static int bond_miimon_inspect(struct bonding *bond) static void bond_miimon_commit(struct bonding *bond) { struct list_head *iter; - struct slave *slave; + struct slave *slave, *primary; bond_for_each_slave(bond, slave, iter) { switch (slave->new_link) { @@ -1985,13 +1955,14 @@ static void bond_miimon_commit(struct bonding *bond) slave->link = BOND_LINK_UP; slave->last_link_up = jiffies; + primary = rtnl_dereference(bond->primary_slave); if (BOND_MODE(bond) == BOND_MODE_8023AD) { /* prevent it from being the active one */ bond_set_backup_slave(slave); } else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { /* make it immediately active */ bond_set_active_slave(slave); - } else if (slave != bond->primary_slave) { + } else if (slave != primary) { /* prevent it from being the active one */ bond_set_backup_slave(slave); } @@ -2009,8 +1980,10 @@ static void bond_miimon_commit(struct bonding *bond) bond_alb_handle_link_change(bond, slave, BOND_LINK_UP); - if (!bond->curr_active_slave || - (slave == bond->primary_slave)) + if (BOND_MODE(bond) == BOND_MODE_XOR) + bond_update_slave_arr(bond, NULL); + + if (!bond->curr_active_slave || slave == primary) goto do_failover; continue; @@ -2037,6 +2010,9 @@ static void bond_miimon_commit(struct bonding *bond) bond_alb_handle_link_change(bond, slave, BOND_LINK_DOWN); + if (BOND_MODE(bond) == BOND_MODE_XOR) + bond_update_slave_arr(bond, NULL); + if (slave == rcu_access_pointer(bond->curr_active_slave)) goto do_failover; @@ -2051,19 +2027,15 @@ static void bond_miimon_commit(struct bonding *bond) } do_failover: - ASSERT_RTNL(); block_netpoll_tx(); - write_lock_bh(&bond->curr_slave_lock); bond_select_active_slave(bond); - write_unlock_bh(&bond->curr_slave_lock); unblock_netpoll_tx(); } bond_set_carrier(bond); } -/* - * bond_mii_monitor +/* bond_mii_monitor * * Really a wrapper that splits the mii monitor into two phases: an * inspection, then (if inspection indicates something needs to be done) @@ -2135,8 +2107,7 @@ static bool bond_has_this_ip(struct bonding *bond, __be32 ip) return ret; } -/* - * We go to the (large) trouble of VLAN tagging ARP frames because +/* We go to the (large) trouble of VLAN tagging ARP frames because * switches in VLAN mode (especially if ports are configured as * "native" to a VLAN) might not pass non-tagged frames. */ @@ -2363,8 +2334,7 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, curr_active_slave = rcu_dereference(bond->curr_active_slave); - /* - * Backup slaves won't see the ARP reply, but do come through + /* Backup slaves won't see the ARP reply, but do come through * here for each ARP probe (so we swap the sip/tip to validate * the probe). In a "redundant switch, common router" type of * configuration, the ARP probe will (hopefully) travel from @@ -2404,8 +2374,7 @@ static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act, last_act + mod * delta_in_ticks + delta_in_ticks/2); } -/* - * this function is called regularly to monitor each slave's link +/* This function is called regularly to monitor each slave's link * ensuring that traffic is being sent and received when arp monitoring * is used in load-balancing mode. if the adapter has been dormant, then an * arp is transmitted to generate traffic. see activebackup_arp_monitor for @@ -2500,16 +2469,11 @@ static void bond_loadbalance_arp_mon(struct work_struct *work) if (slave_state_changed) { bond_slave_state_change(bond); + if (BOND_MODE(bond) == BOND_MODE_XOR) + bond_update_slave_arr(bond, NULL); } else if (do_failover) { - /* the bond_select_active_slave must hold RTNL - * and curr_slave_lock for write. - */ block_netpoll_tx(); - write_lock_bh(&bond->curr_slave_lock); - bond_select_active_slave(bond); - - write_unlock_bh(&bond->curr_slave_lock); unblock_netpoll_tx(); } rtnl_unlock(); @@ -2521,13 +2485,12 @@ static void bond_loadbalance_arp_mon(struct work_struct *work) msecs_to_jiffies(bond->params.arp_interval)); } -/* - * Called to inspect slaves for active-backup mode ARP monitor link state +/* Called to inspect slaves for active-backup mode ARP monitor link state * changes. Sets new_link in slaves to specify what action should take * place for the slave. Returns 0 if no changes are found, >0 if changes * to link states must be committed. * - * Called with rcu_read_lock hold. + * Called with rcu_read_lock held. */ static int bond_ab_arp_inspect(struct bonding *bond) { @@ -2548,16 +2511,14 @@ static int bond_ab_arp_inspect(struct bonding *bond) continue; } - /* - * Give slaves 2*delta after being enslaved or made + /* Give slaves 2*delta after being enslaved or made * active. This avoids bouncing, as the last receive * times need a full ARP monitor cycle to be updated. */ if (bond_time_in_interval(bond, slave->last_link_up, 2)) continue; - /* - * Backup slave is down if: + /* Backup slave is down if: * - No current_arp_slave AND * - more than 3*delta since last receive AND * - the bond has an IP address @@ -2576,8 +2537,7 @@ static int bond_ab_arp_inspect(struct bonding *bond) commit++; } - /* - * Active slave is down if: + /* Active slave is down if: * - more than 2*delta since transmitting OR * - (more than 2*delta since receive AND * the bond has an IP address) @@ -2594,8 +2554,7 @@ static int bond_ab_arp_inspect(struct bonding *bond) return commit; } -/* - * Called to commit link state changes noted by inspection step of +/* Called to commit link state changes noted by inspection step of * active-backup mode ARP monitor. * * Called with RTNL hold. @@ -2631,7 +2590,7 @@ static void bond_ab_arp_commit(struct bonding *bond) slave->dev->name); if (!rtnl_dereference(bond->curr_active_slave) || - (slave == bond->primary_slave)) + slave == rtnl_dereference(bond->primary_slave)) goto do_failover; } @@ -2663,21 +2622,17 @@ static void bond_ab_arp_commit(struct bonding *bond) } do_failover: - ASSERT_RTNL(); block_netpoll_tx(); - write_lock_bh(&bond->curr_slave_lock); bond_select_active_slave(bond); - write_unlock_bh(&bond->curr_slave_lock); unblock_netpoll_tx(); } bond_set_carrier(bond); } -/* - * Send ARP probes for active-backup mode ARP monitor. +/* Send ARP probes for active-backup mode ARP monitor. * - * Called with rcu_read_lock hold. + * Called with rcu_read_lock held. */ static bool bond_ab_arp_probe(struct bonding *bond) { @@ -2817,9 +2772,7 @@ static void bond_activebackup_arp_mon(struct work_struct *work) /*-------------------------- netdev event handling --------------------------*/ -/* - * Change device name - */ +/* Change device name */ static int bond_event_changename(struct bonding *bond) { bond_remove_proc_entry(bond); @@ -2858,7 +2811,7 @@ static int bond_master_netdev_event(unsigned long event, static int bond_slave_netdev_event(unsigned long event, struct net_device *slave_dev) { - struct slave *slave = bond_slave_get_rtnl(slave_dev); + struct slave *slave = bond_slave_get_rtnl(slave_dev), *primary; struct bonding *bond; struct net_device *bond_dev; u32 old_speed; @@ -2872,6 +2825,7 @@ static int bond_slave_netdev_event(unsigned long event, return NOTIFY_DONE; bond_dev = slave->bond->dev; bond = slave->bond; + primary = rtnl_dereference(bond->primary_slave); switch (event) { case NETDEV_UNREGISTER: @@ -2893,15 +2847,23 @@ static int bond_slave_netdev_event(unsigned long event, if (old_duplex != slave->duplex) bond_3ad_adapter_duplex_changed(slave); } + /* Refresh slave-array if applicable! + * If the setup does not use miimon or arpmon (mode-specific!), + * then these events will not cause the slave-array to be + * refreshed. This will cause xmit to use a slave that is not + * usable. Avoid such situation by refeshing the array at these + * events. If these (miimon/arpmon) parameters are configured + * then array gets refreshed twice and that should be fine! + */ + if (bond_mode_uses_xmit_hash(bond)) + bond_update_slave_arr(bond, NULL); break; case NETDEV_DOWN: - /* - * ... Or is it this? - */ + if (bond_mode_uses_xmit_hash(bond)) + bond_update_slave_arr(bond, NULL); break; case NETDEV_CHANGEMTU: - /* - * TODO: Should slaves be allowed to + /* TODO: Should slaves be allowed to * independently alter their MTU? For * an active-backup bond, slaves need * not be the same type of device, so @@ -2919,23 +2881,21 @@ static int bond_slave_netdev_event(unsigned long event, !bond->params.primary[0]) break; - if (slave == bond->primary_slave) { + if (slave == primary) { /* slave's name changed - he's no longer primary */ - bond->primary_slave = NULL; + RCU_INIT_POINTER(bond->primary_slave, NULL); } else if (!strcmp(slave_dev->name, bond->params.primary)) { /* we have a new primary slave */ - bond->primary_slave = slave; + rcu_assign_pointer(bond->primary_slave, slave); } else { /* we didn't change primary - exit */ break; } netdev_info(bond->dev, "Primary slave changed to %s, reselecting active slave\n", - bond->primary_slave ? slave_dev->name : "none"); + primary ? slave_dev->name : "none"); block_netpoll_tx(); - write_lock_bh(&bond->curr_slave_lock); bond_select_active_slave(bond); - write_unlock_bh(&bond->curr_slave_lock); unblock_netpoll_tx(); break; case NETDEV_FEAT_CHANGE: @@ -2952,8 +2912,7 @@ static int bond_slave_netdev_event(unsigned long event, return NOTIFY_DONE; } -/* - * bond_netdev_event: handle netdev notifier chain events. +/* bond_netdev_event: handle netdev notifier chain events. * * This function receives events for the netdev chain. The caller (an * ioctl handler calling blocking_notifier_call_chain) holds the necessary @@ -3081,6 +3040,7 @@ static void bond_work_init_all(struct bonding *bond) else INIT_DELAYED_WORK(&bond->arp_work, bond_loadbalance_arp_mon); INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler); + INIT_DELAYED_WORK(&bond->slave_arr_work, bond_slave_arr_handler); } static void bond_work_cancel_all(struct bonding *bond) @@ -3090,6 +3050,7 @@ static void bond_work_cancel_all(struct bonding *bond) cancel_delayed_work_sync(&bond->alb_work); cancel_delayed_work_sync(&bond->ad_work); cancel_delayed_work_sync(&bond->mcast_work); + cancel_delayed_work_sync(&bond->slave_arr_work); } static int bond_open(struct net_device *bond_dev) @@ -3099,9 +3060,7 @@ static int bond_open(struct net_device *bond_dev) struct slave *slave; /* reset slave->backup and slave->inactive */ - read_lock(&bond->lock); if (bond_has_slaves(bond)) { - read_lock(&bond->curr_slave_lock); bond_for_each_slave(bond, slave, iter) { if (bond_uses_primary(bond) && slave != rcu_access_pointer(bond->curr_active_slave)) { @@ -3112,9 +3071,7 @@ static int bond_open(struct net_device *bond_dev) BOND_SLAVE_NOTIFY_NOW); } } - read_unlock(&bond->curr_slave_lock); } - read_unlock(&bond->lock); bond_work_init_all(bond); @@ -3143,6 +3100,9 @@ static int bond_open(struct net_device *bond_dev) bond_3ad_initiate_agg_selection(bond, 1); } + if (bond_mode_uses_xmit_hash(bond)) + bond_update_slave_arr(bond, NULL); + return 0; } @@ -3167,40 +3127,43 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev, struct list_head *iter; struct slave *slave; - memset(stats, 0, sizeof(*stats)); + memcpy(stats, &bond->bond_stats, sizeof(*stats)); - read_lock_bh(&bond->lock); bond_for_each_slave(bond, slave, iter) { const struct rtnl_link_stats64 *sstats = dev_get_stats(slave->dev, &temp); + struct rtnl_link_stats64 *pstats = &slave->slave_stats; - stats->rx_packets += sstats->rx_packets; - stats->rx_bytes += sstats->rx_bytes; - stats->rx_errors += sstats->rx_errors; - stats->rx_dropped += sstats->rx_dropped; + stats->rx_packets += sstats->rx_packets - pstats->rx_packets; + stats->rx_bytes += sstats->rx_bytes - pstats->rx_bytes; + stats->rx_errors += sstats->rx_errors - pstats->rx_errors; + stats->rx_dropped += sstats->rx_dropped - pstats->rx_dropped; - stats->tx_packets += sstats->tx_packets; - stats->tx_bytes += sstats->tx_bytes; - stats->tx_errors += sstats->tx_errors; - stats->tx_dropped += sstats->tx_dropped; + stats->tx_packets += sstats->tx_packets - pstats->tx_packets;; + stats->tx_bytes += sstats->tx_bytes - pstats->tx_bytes; + stats->tx_errors += sstats->tx_errors - pstats->tx_errors; + stats->tx_dropped += sstats->tx_dropped - pstats->tx_dropped; - stats->multicast += sstats->multicast; - stats->collisions += sstats->collisions; + stats->multicast += sstats->multicast - pstats->multicast; + stats->collisions += sstats->collisions - pstats->collisions; - stats->rx_length_errors += sstats->rx_length_errors; - stats->rx_over_errors += sstats->rx_over_errors; - stats->rx_crc_errors += sstats->rx_crc_errors; - stats->rx_frame_errors += sstats->rx_frame_errors; - stats->rx_fifo_errors += sstats->rx_fifo_errors; - stats->rx_missed_errors += sstats->rx_missed_errors; + stats->rx_length_errors += sstats->rx_length_errors - pstats->rx_length_errors; + stats->rx_over_errors += sstats->rx_over_errors - pstats->rx_over_errors; + stats->rx_crc_errors += sstats->rx_crc_errors - pstats->rx_crc_errors; + stats->rx_frame_errors += sstats->rx_frame_errors - pstats->rx_frame_errors; + stats->rx_fifo_errors += sstats->rx_fifo_errors - pstats->rx_fifo_errors; + stats->rx_missed_errors += sstats->rx_missed_errors - pstats->rx_missed_errors; - stats->tx_aborted_errors += sstats->tx_aborted_errors; - stats->tx_carrier_errors += sstats->tx_carrier_errors; - stats->tx_fifo_errors += sstats->tx_fifo_errors; - stats->tx_heartbeat_errors += sstats->tx_heartbeat_errors; - stats->tx_window_errors += sstats->tx_window_errors; + stats->tx_aborted_errors += sstats->tx_aborted_errors - pstats->tx_aborted_errors; + stats->tx_carrier_errors += sstats->tx_carrier_errors - pstats->tx_carrier_errors; + stats->tx_fifo_errors += sstats->tx_fifo_errors - pstats->tx_fifo_errors; + stats->tx_heartbeat_errors += sstats->tx_heartbeat_errors - pstats->tx_heartbeat_errors; + stats->tx_window_errors += sstats->tx_window_errors - pstats->tx_window_errors; + + /* save off the slave stats for the next run */ + memcpy(pstats, sstats, sizeof(*sstats)); } - read_unlock_bh(&bond->lock); + memcpy(&bond->bond_stats, stats, sizeof(*stats)); return stats; } @@ -3229,24 +3192,17 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd mii->phy_id = 0; /* Fall Through */ case SIOCGMIIREG: - /* - * We do this again just in case we were called by SIOCGMIIREG + /* We do this again just in case we were called by SIOCGMIIREG * instead of SIOCGMIIPHY. */ mii = if_mii(ifr); if (!mii) return -EINVAL; - if (mii->reg_num == 1) { mii->val_out = 0; - read_lock(&bond->lock); - read_lock(&bond->curr_slave_lock); if (netif_carrier_ok(bond->dev)) mii->val_out = BMSR_LSTATUS; - - read_unlock(&bond->curr_slave_lock); - read_unlock(&bond->lock); } return 0; @@ -3277,7 +3233,6 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd return res; default: - /* Go on */ break; } @@ -3339,7 +3294,6 @@ static void bond_set_rx_mode(struct net_device *bond_dev) struct list_head *iter; struct slave *slave; - rcu_read_lock(); if (bond_uses_primary(bond)) { slave = rcu_dereference(bond->curr_active_slave); @@ -3377,8 +3331,7 @@ static int bond_neigh_init(struct neighbour *n) if (ret) return ret; - /* - * Assign slave's neigh_cleanup to neighbour in case cleanup is called + /* Assign slave's neigh_cleanup to neighbour in case cleanup is called * after the last slave has been detached. Assumes that all slaves * utilize the same neigh_cleanup (true at this writing as only user * is ipoib). @@ -3391,8 +3344,7 @@ static int bond_neigh_init(struct neighbour *n) return parms.neigh_setup(n); } -/* - * The bonding ndo_neigh_setup is called at init time beofre any +/* The bonding ndo_neigh_setup is called at init time beofre any * slave exists. So we must declare proxy setup function which will * be used at run time to resolve the actual slave neigh param setup. * @@ -3410,9 +3362,7 @@ static int bond_neigh_setup(struct net_device *dev, return 0; } -/* - * Change the MTU of all of a master's slaves to match the master - */ +/* Change the MTU of all of a master's slaves to match the master */ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu) { struct bonding *bond = netdev_priv(bond_dev); @@ -3422,21 +3372,6 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu) netdev_dbg(bond_dev, "bond=%p, new_mtu=%d\n", bond, new_mtu); - /* Can't hold bond->lock with bh disabled here since - * some base drivers panic. On the other hand we can't - * hold bond->lock without bh disabled because we'll - * deadlock. The only solution is to rely on the fact - * that we're under rtnl_lock here, and the slaves - * list won't change. This doesn't solve the problem - * of setting the slave's MTU while it is - * transmitting, but the assumption is that the base - * driver can handle that. - * - * TODO: figure out a way to safely iterate the slaves - * list, but without holding a lock around the actual - * call to the base driver. - */ - bond_for_each_slave(bond, slave, iter) { netdev_dbg(bond_dev, "s %p c_m %p\n", slave, slave->dev->netdev_ops->ndo_change_mtu); @@ -3480,8 +3415,7 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu) return res; } -/* - * Change HW address +/* Change HW address * * Note that many devices must be down to change the HW address, and * downing the master releases all slaves. We can make bonds full of @@ -3511,21 +3445,6 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr) if (!is_valid_ether_addr(sa->sa_data)) return -EADDRNOTAVAIL; - /* Can't hold bond->lock with bh disabled here since - * some base drivers panic. On the other hand we can't - * hold bond->lock without bh disabled because we'll - * deadlock. The only solution is to rely on the fact - * that we're under rtnl_lock here, and the slaves - * list won't change. This doesn't solve the problem - * of setting the slave's hw address while it is - * transmitting, but the assumption is that the base - * driver can handle that. - * - * TODO: figure out a way to safely iterate the slaves - * list, but without holding a lock around the actual - * call to the base driver. - */ - bond_for_each_slave(bond, slave, iter) { netdev_dbg(bond_dev, "slave %p %s\n", slave, slave->dev->name); res = dev_set_mac_address(slave->dev, addr); @@ -3654,7 +3573,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev */ if (iph->protocol == IPPROTO_IGMP && skb->protocol == htons(ETH_P_IP)) { slave = rcu_dereference(bond->curr_active_slave); - if (slave && bond_slave_can_tx(slave)) + if (slave) bond_dev_queue_xmit(bond, skb, slave->dev); else bond_xmit_slave_id(bond, skb, 0); @@ -3672,8 +3591,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev return NETDEV_TX_OK; } -/* - * in active-backup mode, we know that bond->curr_active_slave is always valid if +/* In active-backup mode, we know that bond->curr_active_slave is always valid if * the bond has a usable interface. */ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_dev) @@ -3690,20 +3608,148 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d return NETDEV_TX_OK; } -/* In bond_xmit_xor() , we determine the output device by using a pre- - * determined xmit_hash_policy(), If the selected device is not enabled, - * find the next active slave. +/* Use this to update slave_array when (a) it's not appropriate to update + * slave_array right away (note that update_slave_array() may sleep) + * and / or (b) RTNL is not held. */ -static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev) +void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay) { - struct bonding *bond = netdev_priv(bond_dev); - int slave_cnt = ACCESS_ONCE(bond->slave_cnt); + queue_delayed_work(bond->wq, &bond->slave_arr_work, delay); +} - if (likely(slave_cnt)) - bond_xmit_slave_id(bond, skb, - bond_xmit_hash(bond, skb) % slave_cnt); - else +/* Slave array work handler. Holds only RTNL */ +static void bond_slave_arr_handler(struct work_struct *work) +{ + struct bonding *bond = container_of(work, struct bonding, + slave_arr_work.work); + int ret; + + if (!rtnl_trylock()) + goto err; + + ret = bond_update_slave_arr(bond, NULL); + rtnl_unlock(); + if (ret) { + pr_warn_ratelimited("Failed to update slave array from WT\n"); + goto err; + } + return; + +err: + bond_slave_arr_work_rearm(bond, 1); +} + +/* Build the usable slaves array in control path for modes that use xmit-hash + * to determine the slave interface - + * (a) BOND_MODE_8023AD + * (b) BOND_MODE_XOR + * (c) BOND_MODE_TLB && tlb_dynamic_lb == 0 + * + * The caller is expected to hold RTNL only and NO other lock! + */ +int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave) +{ + struct slave *slave; + struct list_head *iter; + struct bond_up_slave *new_arr, *old_arr; + int slaves_in_agg; + int agg_id = 0; + int ret = 0; + +#ifdef CONFIG_LOCKDEP + WARN_ON(lockdep_is_held(&bond->mode_lock)); +#endif + + new_arr = kzalloc(offsetof(struct bond_up_slave, arr[bond->slave_cnt]), + GFP_KERNEL); + if (!new_arr) { + ret = -ENOMEM; + pr_err("Failed to build slave-array.\n"); + goto out; + } + if (BOND_MODE(bond) == BOND_MODE_8023AD) { + struct ad_info ad_info; + + if (bond_3ad_get_active_agg_info(bond, &ad_info)) { + pr_debug("bond_3ad_get_active_agg_info failed\n"); + kfree_rcu(new_arr, rcu); + /* No active aggragator means it's not safe to use + * the previous array. + */ + old_arr = rtnl_dereference(bond->slave_arr); + if (old_arr) { + RCU_INIT_POINTER(bond->slave_arr, NULL); + kfree_rcu(old_arr, rcu); + } + goto out; + } + slaves_in_agg = ad_info.ports; + agg_id = ad_info.aggregator_id; + } + bond_for_each_slave(bond, slave, iter) { + if (BOND_MODE(bond) == BOND_MODE_8023AD) { + struct aggregator *agg; + + agg = SLAVE_AD_INFO(slave)->port.aggregator; + if (!agg || agg->aggregator_identifier != agg_id) + continue; + } + if (!bond_slave_can_tx(slave)) + continue; + if (skipslave == slave) + continue; + new_arr->arr[new_arr->count++] = slave; + } + + old_arr = rtnl_dereference(bond->slave_arr); + rcu_assign_pointer(bond->slave_arr, new_arr); + if (old_arr) + kfree_rcu(old_arr, rcu); +out: + if (ret != 0 && skipslave) { + int idx; + + /* Rare situation where caller has asked to skip a specific + * slave but allocation failed (most likely!). BTW this is + * only possible when the call is initiated from + * __bond_release_one(). In this situation; overwrite the + * skipslave entry in the array with the last entry from the + * array to avoid a situation where the xmit path may choose + * this to-be-skipped slave to send a packet out. + */ + old_arr = rtnl_dereference(bond->slave_arr); + for (idx = 0; idx < old_arr->count; idx++) { + if (skipslave == old_arr->arr[idx]) { + old_arr->arr[idx] = + old_arr->arr[old_arr->count-1]; + old_arr->count--; + break; + } + } + } + return ret; +} + +/* Use this Xmit function for 3AD as well as XOR modes. The current + * usable slave array is formed in the control path. The xmit function + * just calculates hash and sends the packet out. + */ +int bond_3ad_xor_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct bonding *bond = netdev_priv(dev); + struct slave *slave; + struct bond_up_slave *slaves; + unsigned int count; + + slaves = rcu_dereference(bond->slave_arr); + count = slaves ? ACCESS_ONCE(slaves->count) : 0; + if (likely(count)) { + slave = slaves->arr[bond_xmit_hash(bond, skb) % count]; + bond_dev_queue_xmit(bond, skb, slave->dev); + } else { dev_kfree_skb_any(skb); + atomic_long_inc(&dev->tx_dropped); + } return NETDEV_TX_OK; } @@ -3726,7 +3772,6 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev) bond_dev->name, __func__); continue; } - /* bond_dev_queue_xmit always returns 0 */ bond_dev_queue_xmit(bond, skb2, slave->dev); } } @@ -3740,9 +3785,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev) /*------------------------- Device initialization ---------------------------*/ -/* - * Lookup the slave that corresponds to a qid - */ +/* Lookup the slave that corresponds to a qid */ static inline int bond_slave_override(struct bonding *bond, struct sk_buff *skb) { @@ -3771,17 +3814,14 @@ static inline int bond_slave_override(struct bonding *bond, static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback) { - /* - * This helper function exists to help dev_pick_tx get the correct + /* This helper function exists to help dev_pick_tx get the correct * destination queue. Using a helper function skips a call to * skb_tx_hash and will put the skbs in the queue we expect on their * way down to the bonding driver. */ u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; - /* - * Save the original txq to restore before passing to the driver - */ + /* Save the original txq to restore before passing to the driver */ qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping; if (unlikely(txq >= dev->real_num_tx_queues)) { @@ -3805,12 +3845,11 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev return bond_xmit_roundrobin(skb, dev); case BOND_MODE_ACTIVEBACKUP: return bond_xmit_activebackup(skb, dev); + case BOND_MODE_8023AD: case BOND_MODE_XOR: - return bond_xmit_xor(skb, dev); + return bond_3ad_xor_xmit(skb, dev); case BOND_MODE_BROADCAST: return bond_xmit_broadcast(skb, dev); - case BOND_MODE_8023AD: - return bond_3ad_xmit_xor(skb, dev); case BOND_MODE_ALB: return bond_alb_xmit(skb, dev); case BOND_MODE_TLB: @@ -3829,8 +3868,7 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev) struct bonding *bond = netdev_priv(dev); netdev_tx_t ret = NETDEV_TX_OK; - /* - * If we risk deadlock from transmitting this in the + /* If we risk deadlock from transmitting this in the * netpoll path, tell netpoll to queue the frame for later tx */ if (unlikely(is_netpoll_tx_blocked(dev))) @@ -3862,7 +3900,6 @@ static int bond_ethtool_get_settings(struct net_device *bond_dev, * the true receive or transmit bandwidth (not all modes are symmetric) * this is an accurate maximum. */ - read_lock(&bond->lock); bond_for_each_slave(bond, slave, iter) { if (bond_slave_can_tx(slave)) { if (slave->speed != SPEED_UNKNOWN) @@ -3873,7 +3910,6 @@ static int bond_ethtool_get_settings(struct net_device *bond_dev, } } ethtool_cmd_speed_set(ecmd, speed ? : SPEED_UNKNOWN); - read_unlock(&bond->lock); return 0; } @@ -3935,9 +3971,7 @@ void bond_setup(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); - /* initialize rwlocks */ - rwlock_init(&bond->lock); - rwlock_init(&bond->curr_slave_lock); + spin_lock_init(&bond->mode_lock); bond->params = bonding_defaults; /* Initialize pointers */ @@ -3958,8 +3992,7 @@ void bond_setup(struct net_device *bond_dev) bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT; bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); - /* don't acquire bond device's netif_tx_lock when - * transmitting */ + /* don't acquire bond device's netif_tx_lock when transmitting */ bond_dev->features |= NETIF_F_LLTX; /* By default, we declare the bond to be fully @@ -3982,15 +4015,15 @@ void bond_setup(struct net_device *bond_dev) bond_dev->features |= bond_dev->hw_features; } -/* -* Destroy a bonding device. -* Must be under rtnl_lock when this function is called. -*/ +/* Destroy a bonding device. + * Must be under rtnl_lock when this function is called. + */ static void bond_uninit(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); struct list_head *iter; struct slave *slave; + struct bond_up_slave *arr; bond_netpoll_cleanup(bond_dev); @@ -3999,6 +4032,12 @@ static void bond_uninit(struct net_device *bond_dev) __bond_release_one(bond_dev, slave->dev, true); netdev_info(bond_dev, "Released all slaves\n"); + arr = rtnl_dereference(bond->slave_arr); + if (arr) { + RCU_INIT_POINTER(bond->slave_arr, NULL); + kfree_rcu(arr, rcu); + } + list_del(&bond->bond_list); bond_debug_unregister(bond); @@ -4013,9 +4052,7 @@ static int bond_check_params(struct bond_params *params) const struct bond_opt_value *valptr; int arp_all_targets_value; - /* - * Convert string parameters. - */ + /* Convert string parameters. */ if (mode) { bond_opt_initstr(&newval, mode); valptr = bond_opt_parse(bond_opt_get(BOND_OPT_MODE), &newval); @@ -4192,9 +4229,9 @@ static int bond_check_params(struct bond_params *params) for (arp_ip_count = 0, i = 0; (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) { - /* not complete check, but should be good enough to - catch mistakes */ __be32 ip; + + /* not a complete check, but good enough to catch mistakes */ if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) || !bond_is_ip_target_ok(ip)) { pr_warn("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n", @@ -4377,26 +4414,14 @@ static void bond_set_lockdep_class(struct net_device *dev) dev->qdisc_tx_busylock = &bonding_tx_busylock_key; } -/* - * Called from registration process - */ +/* Called from registration process */ static int bond_init(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id); - struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); netdev_dbg(bond_dev, "Begin bond_init\n"); - /* - * Initialize locks that may be required during - * en/deslave operations. All of the bond_open work - * (of which this is part) should really be moved to - * a phase prior to dev_open - */ - spin_lock_init(&(bond_info->tx_hashtbl_lock)); - spin_lock_init(&(bond_info->rx_hashtbl_lock)); - bond->wq = create_singlethread_workqueue(bond_dev->name); if (!bond->wq) return -ENOMEM; @@ -4543,9 +4568,7 @@ static void __exit bonding_exit(void) unregister_pernet_subsys(&bond_net_ops); #ifdef CONFIG_NET_POLL_CONTROLLER - /* - * Make sure we don't have an imbalance on our netpoll blocking - */ + /* Make sure we don't have an imbalance on our netpoll blocking */ WARN_ON(atomic_read(&netpoll_block_tx)); #endif } diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c index d163e112f04ce00956fedd17221c3c283ff8a3c6..c13d83e15ace440fc624ff6913d617fdabd2699f 100644 --- a/drivers/net/bonding/bond_netlink.c +++ b/drivers/net/bonding/bond_netlink.c @@ -96,6 +96,10 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = { [IFLA_BOND_AD_INFO] = { .type = NLA_NESTED }, }; +static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = { + [IFLA_BOND_SLAVE_QUEUE_ID] = { .type = NLA_U16 }, +}; + static int bond_validate(struct nlattr *tb[], struct nlattr *data[]) { if (tb[IFLA_ADDRESS]) { @@ -107,6 +111,33 @@ static int bond_validate(struct nlattr *tb[], struct nlattr *data[]) return 0; } +static int bond_slave_changelink(struct net_device *bond_dev, + struct net_device *slave_dev, + struct nlattr *tb[], struct nlattr *data[]) +{ + struct bonding *bond = netdev_priv(bond_dev); + struct bond_opt_value newval; + int err; + + if (!data) + return 0; + + if (data[IFLA_BOND_SLAVE_QUEUE_ID]) { + u16 queue_id = nla_get_u16(data[IFLA_BOND_SLAVE_QUEUE_ID]); + char queue_id_str[IFNAMSIZ + 7]; + + /* queue_id option setting expects slave_name:queue_id */ + snprintf(queue_id_str, sizeof(queue_id_str), "%s:%u\n", + slave_dev->name, queue_id); + bond_opt_initstr(&newval, queue_id_str); + err = __bond_opt_set(bond, BOND_OPT_QUEUE_ID, &newval); + if (err) + return err; + } + + return 0; +} + static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[], struct nlattr *data[]) { @@ -412,6 +443,7 @@ static int bond_fill_info(struct sk_buff *skb, unsigned int packets_per_slave; int ifindex, i, targets_added; struct nlattr *targets; + struct slave *primary; if (nla_put_u8(skb, IFLA_BOND_MODE, BOND_MODE(bond))) goto nla_put_failure; @@ -461,9 +493,9 @@ static int bond_fill_info(struct sk_buff *skb, bond->params.arp_all_targets)) goto nla_put_failure; - if (bond->primary_slave && - nla_put_u32(skb, IFLA_BOND_PRIMARY, - bond->primary_slave->dev->ifindex)) + primary = rtnl_dereference(bond->primary_slave); + if (primary && + nla_put_u32(skb, IFLA_BOND_PRIMARY, primary->dev->ifindex)) goto nla_put_failure; if (nla_put_u8(skb, IFLA_BOND_PRIMARY_RESELECT, @@ -562,6 +594,9 @@ struct rtnl_link_ops bond_link_ops __read_mostly = { .get_num_tx_queues = bond_get_num_tx_queues, .get_num_rx_queues = bond_get_num_tx_queues, /* Use the same number as for TX queues */ + .slave_maxtype = IFLA_BOND_SLAVE_MAX, + .slave_policy = bond_slave_policy, + .slave_changelink = bond_slave_changelink, .get_slave_size = bond_get_slave_size, .fill_slave_info = bond_fill_slave_info, }; diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index dc73463c2c237d99bf26c9b3ff21512c36f69504..b62697f4a3deb90a5c2209ad6f032c6984284510 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -625,6 +625,8 @@ int __bond_opt_set(struct bonding *bond, out: if (ret) bond_opt_error_interpret(bond, opt, ret, val); + else + call_netdevice_notifiers(NETDEV_CHANGEINFODATA, bond->dev); return ret; } @@ -732,15 +734,13 @@ static int bond_option_active_slave_set(struct bonding *bond, } block_netpoll_tx(); - write_lock_bh(&bond->curr_slave_lock); - /* check to see if we are clearing active */ if (!slave_dev) { netdev_info(bond->dev, "Clearing current active slave\n"); RCU_INIT_POINTER(bond->curr_active_slave, NULL); bond_select_active_slave(bond); } else { - struct slave *old_active = bond_deref_active_protected(bond); + struct slave *old_active = rtnl_dereference(bond->curr_active_slave); struct slave *new_active = bond_slave_get_rtnl(slave_dev); BUG_ON(!new_active); @@ -763,8 +763,6 @@ static int bond_option_active_slave_set(struct bonding *bond, } } } - - write_unlock_bh(&bond->curr_slave_lock); unblock_netpoll_tx(); return ret; @@ -953,14 +951,7 @@ static int _bond_option_arp_ip_target_add(struct bonding *bond, __be32 target) static int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target) { - int ret; - - /* not to race with bond_arp_rcv */ - write_lock_bh(&bond->lock); - ret = _bond_option_arp_ip_target_add(bond, target); - write_unlock_bh(&bond->lock); - - return ret; + return _bond_option_arp_ip_target_add(bond, target); } static int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target) @@ -989,9 +980,6 @@ static int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target) netdev_info(bond->dev, "Removing ARP target %pI4\n", &target); - /* not to race with bond_arp_rcv */ - write_lock_bh(&bond->lock); - bond_for_each_slave(bond, slave, iter) { targets_rx = slave->target_last_arp_rx; for (i = ind; (i < BOND_MAX_ARP_TARGETS-1) && targets[i+1]; i++) @@ -1002,8 +990,6 @@ static int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target) targets[i] = targets[i+1]; targets[i] = 0; - write_unlock_bh(&bond->lock); - return 0; } @@ -1011,11 +997,8 @@ void bond_option_arp_ip_targets_clear(struct bonding *bond) { int i; - /* not to race with bond_arp_rcv */ - write_lock_bh(&bond->lock); for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) _bond_options_arp_ip_target_set(bond, i, 0, 0); - write_unlock_bh(&bond->lock); } static int bond_option_arp_ip_targets_set(struct bonding *bond, @@ -1079,8 +1062,6 @@ static int bond_option_primary_set(struct bonding *bond, struct slave *slave; block_netpoll_tx(); - read_lock(&bond->lock); - write_lock_bh(&bond->curr_slave_lock); p = strchr(primary, '\n'); if (p) @@ -1088,7 +1069,7 @@ static int bond_option_primary_set(struct bonding *bond, /* check to see if we are clearing primary */ if (!strlen(primary)) { netdev_info(bond->dev, "Setting primary slave to None\n"); - bond->primary_slave = NULL; + RCU_INIT_POINTER(bond->primary_slave, NULL); memset(bond->params.primary, 0, sizeof(bond->params.primary)); bond_select_active_slave(bond); goto out; @@ -1098,16 +1079,16 @@ static int bond_option_primary_set(struct bonding *bond, if (strncmp(slave->dev->name, primary, IFNAMSIZ) == 0) { netdev_info(bond->dev, "Setting %s as primary slave\n", slave->dev->name); - bond->primary_slave = slave; + rcu_assign_pointer(bond->primary_slave, slave); strcpy(bond->params.primary, slave->dev->name); bond_select_active_slave(bond); goto out; } } - if (bond->primary_slave) { + if (rtnl_dereference(bond->primary_slave)) { netdev_info(bond->dev, "Setting primary slave to None\n"); - bond->primary_slave = NULL; + RCU_INIT_POINTER(bond->primary_slave, NULL); bond_select_active_slave(bond); } strncpy(bond->params.primary, primary, IFNAMSIZ); @@ -1117,8 +1098,6 @@ static int bond_option_primary_set(struct bonding *bond, primary, bond->dev->name); out: - write_unlock_bh(&bond->curr_slave_lock); - read_unlock(&bond->lock); unblock_netpoll_tx(); return 0; @@ -1132,9 +1111,7 @@ static int bond_option_primary_reselect_set(struct bonding *bond, bond->params.primary_reselect = newval->value; block_netpoll_tx(); - write_lock_bh(&bond->curr_slave_lock); bond_select_active_slave(bond); - write_unlock_bh(&bond->curr_slave_lock); unblock_netpoll_tx(); return 0; diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c index de62c0385dfb06309a5fa4b4aff3f524a099ebe3..a3948f8d1e533d86502c3efa4eac839dcba0e557 100644 --- a/drivers/net/bonding/bond_procfs.c +++ b/drivers/net/bonding/bond_procfs.c @@ -7,21 +7,18 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) - __acquires(&bond->lock) { struct bonding *bond = seq->private; struct list_head *iter; struct slave *slave; loff_t off = 0; - /* make sure the bond won't be taken away */ rcu_read_lock(); - read_lock(&bond->lock); if (*pos == 0) return SEQ_START_TOKEN; - bond_for_each_slave(bond, slave, iter) + bond_for_each_slave_rcu(bond, slave, iter) if (++off == *pos) return slave; @@ -37,12 +34,9 @@ static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos) ++*pos; if (v == SEQ_START_TOKEN) - return bond_first_slave(bond); + return bond_first_slave_rcu(bond); - if (bond_is_last_slave(bond, v)) - return NULL; - - bond_for_each_slave(bond, slave, iter) { + bond_for_each_slave_rcu(bond, slave, iter) { if (found) return slave; if (slave == v) @@ -53,12 +47,8 @@ static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos) } static void bond_info_seq_stop(struct seq_file *seq, void *v) - __releases(&bond->lock) __releases(RCU) { - struct bonding *bond = seq->private; - - read_unlock(&bond->lock); rcu_read_unlock(); } @@ -66,7 +56,7 @@ static void bond_info_show_master(struct seq_file *seq) { struct bonding *bond = seq->private; const struct bond_opt_value *optval; - struct slave *curr; + struct slave *curr, *primary; int i; curr = rcu_dereference(bond->curr_active_slave); @@ -83,8 +73,7 @@ static void bond_info_show_master(struct seq_file *seq) seq_printf(seq, "\n"); - if (BOND_MODE(bond) == BOND_MODE_XOR || - BOND_MODE(bond) == BOND_MODE_8023AD) { + if (bond_mode_uses_xmit_hash(bond)) { optval = bond_opt_get_val(BOND_OPT_XMIT_HASH, bond->params.xmit_policy); seq_printf(seq, "Transmit Hash Policy: %s (%d)\n", @@ -92,10 +81,10 @@ static void bond_info_show_master(struct seq_file *seq) } if (bond_uses_primary(bond)) { + primary = rcu_dereference(bond->primary_slave); seq_printf(seq, "Primary Slave: %s", - (bond->primary_slave) ? - bond->primary_slave->dev->name : "None"); - if (bond->primary_slave) { + primary ? primary->dev->name : "None"); + if (primary) { optval = bond_opt_get_val(BOND_OPT_PRIMARY_RESELECT, bond->params.primary_reselect); seq_printf(seq, " (primary_reselect %s)", diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 98db8edd9c755c55d04a26b0b40143ba80d15a55..8ffbafd500fd439f4277489aa533790f8f43e774 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -91,7 +91,6 @@ static struct net_device *bond_get_by_name(struct bond_net *bn, const char *ifna * creates and deletes entire bonds. * * The class parameter is ignored. - * */ static ssize_t bonding_store_bonds(struct class *cls, struct class_attribute *attr, @@ -425,11 +424,15 @@ static ssize_t bonding_show_primary(struct device *d, struct device_attribute *attr, char *buf) { - int count = 0; struct bonding *bond = to_bond(d); + struct slave *primary; + int count = 0; - if (bond->primary_slave) - count = sprintf(buf, "%s\n", bond->primary_slave->dev->name); + rcu_read_lock(); + primary = rcu_dereference(bond->primary_slave); + if (primary) + count = sprintf(buf, "%s\n", primary->dev->name); + rcu_read_unlock(); return count; } diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index aace510d08d1dae8b300528ab703452bbe5c99e5..10920f0686e2f0222203359751581d1b728c6617 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h @@ -24,6 +24,7 @@ #include #include #include +#include #include "bond_3ad.h" #include "bond_alb.h" @@ -83,7 +84,7 @@ * @pos: current slave * @iter: list_head * iterator * - * Caller must hold bond->lock + * Caller must hold RTNL */ #define bond_for_each_slave(bond, pos, iter) \ netdev_for_each_lower_private((bond)->dev, pos, iter) @@ -175,6 +176,13 @@ struct slave { struct netpoll *np; #endif struct kobject kobj; + struct rtnl_link_stats64 slave_stats; +}; + +struct bond_up_slave { + unsigned int count; + struct rcu_head rcu; + struct slave *arr[0]; }; /* @@ -184,24 +192,26 @@ struct slave { /* * Here are the locking policies for the two bonding locks: - * - * 1) Get bond->lock when reading/writing slave list. - * 2) Get bond->curr_slave_lock when reading/writing bond->curr_active_slave. - * (It is unnecessary when the write-lock is put with bond->lock.) - * 3) When we lock with bond->curr_slave_lock, we must lock with bond->lock - * beforehand. + * Get rcu_read_lock when reading or RTNL when writing slave list. */ struct bonding { struct net_device *dev; /* first - useful for panic debug */ struct slave __rcu *curr_active_slave; struct slave __rcu *current_arp_slave; - struct slave *primary_slave; + struct slave __rcu *primary_slave; + struct bond_up_slave __rcu *slave_arr; /* Array of usable slaves */ bool force_primary; s32 slave_cnt; /* never change this value outside the attach/detach wrappers */ int (*recv_probe)(const struct sk_buff *, struct bonding *, struct slave *); - rwlock_t lock; - rwlock_t curr_slave_lock; + /* mode_lock is used for mode-specific locking needs, currently used by: + * 3ad mode (4) - protect against running bond_3ad_unbind_slave() and + * bond_3ad_state_machine_handler() concurrently and also + * the access to the state machine shared variables. + * TLB mode (5) - to sync the use and modifications of its hash table + * ALB mode (6) - to sync the use and modifications of its hash table + */ + spinlock_t mode_lock; u8 send_peer_notif; u8 igmp_retrans; #ifdef CONFIG_PROC_FS @@ -219,10 +229,12 @@ struct bonding { struct delayed_work alb_work; struct delayed_work ad_work; struct delayed_work mcast_work; + struct delayed_work slave_arr_work; #ifdef CONFIG_DEBUG_FS /* debugging support via debugfs */ struct dentry *debug_dir; #endif /* CONFIG_DEBUG_FS */ + struct rtnl_link_stats64 bond_stats; }; #define bond_slave_get_rcu(dev) \ @@ -231,10 +243,6 @@ struct bonding { #define bond_slave_get_rtnl(dev) \ ((struct slave *) rtnl_dereference(dev->rx_handler_data)) -#define bond_deref_active_protected(bond) \ - rcu_dereference_protected(bond->curr_active_slave, \ - lockdep_is_held(&bond->curr_slave_lock)) - struct bond_vlan_tag { __be16 vlan_proto; unsigned short vlan_id; @@ -274,6 +282,13 @@ static inline bool bond_is_nondyn_tlb(const struct bonding *bond) (bond->params.tlb_dynamic_lb == 0); } +static inline bool bond_mode_uses_xmit_hash(const struct bonding *bond) +{ + return (BOND_MODE(bond) == BOND_MODE_8023AD || + BOND_MODE(bond) == BOND_MODE_XOR || + bond_is_nondyn_tlb(bond)); +} + static inline bool bond_mode_uses_arp(int mode) { return mode != BOND_MODE_8023AD && mode != BOND_MODE_TLB && @@ -527,6 +542,8 @@ const char *bond_slave_link_status(s8 link); struct bond_vlan_tag *bond_verify_device_path(struct net_device *start_dev, struct net_device *end_dev, int level); +int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave); +void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay); #ifdef CONFIG_PROC_FS void bond_create_proc_entry(struct bonding *bond); diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index 41688229c570eb92e9e79b2bee2bd5b9d6173e39..98d73aab52fe962888a675d1293ae69f9731f7d0 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -65,7 +65,7 @@ config CAN_LEDS config CAN_AT91 tristate "Atmel AT91 onchip CAN controller" - depends on ARCH_AT91 || COMPILE_TEST + depends on (ARCH_AT91 || COMPILE_TEST) && HAS_IOMEM ---help--- This is a driver for the SoC CAN controller in Atmel's AT91SAM9263 and AT91SAM9X5 processors. @@ -143,6 +143,8 @@ source "drivers/net/can/sja1000/Kconfig" source "drivers/net/can/c_can/Kconfig" +source "drivers/net/can/m_can/Kconfig" + source "drivers/net/can/cc770/Kconfig" source "drivers/net/can/spi/Kconfig" diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile index 1697f22353a943315bdb5e162d3cdce4f7d535f7..fc9304143f449c8eee3aaddcaf1d701fc90b7bb5 100644 --- a/drivers/net/can/Makefile +++ b/drivers/net/can/Makefile @@ -17,6 +17,7 @@ obj-y += softing/ obj-$(CONFIG_CAN_SJA1000) += sja1000/ obj-$(CONFIG_CAN_MSCAN) += mscan/ obj-$(CONFIG_CAN_C_CAN) += c_can/ +obj-$(CONFIG_CAN_M_CAN) += m_can/ obj-$(CONFIG_CAN_CC770) += cc770/ obj-$(CONFIG_CAN_AT91) += at91_can.o obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o @@ -28,4 +29,4 @@ obj-$(CONFIG_CAN_GRCAN) += grcan.o obj-$(CONFIG_CAN_RCAR) += rcar_can.o obj-$(CONFIG_CAN_XILINXCAN) += xilinx_can.o -ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG +subdir-ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG diff --git a/drivers/net/can/c_can/Makefile b/drivers/net/can/c_can/Makefile index ad1cc842170add4076f17ec08c211bc16b7b647b..9fdc678b5b3787776a94371215a11f2a3d5194f3 100644 --- a/drivers/net/can/c_can/Makefile +++ b/drivers/net/can/c_can/Makefile @@ -5,5 +5,3 @@ obj-$(CONFIG_CAN_C_CAN) += c_can.o obj-$(CONFIG_CAN_C_CAN_PLATFORM) += c_can_platform.o obj-$(CONFIG_CAN_C_CAN_PCI) += c_can_pci.o - -ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG diff --git a/drivers/net/can/cc770/Makefile b/drivers/net/can/cc770/Makefile index 9fb8321b33eb770c16090963b02d45720e26f8af..8657f879ae19084cbd9143c62e72f72dfea9fe91 100644 --- a/drivers/net/can/cc770/Makefile +++ b/drivers/net/can/cc770/Makefile @@ -5,5 +5,3 @@ obj-$(CONFIG_CAN_CC770) += cc770.o obj-$(CONFIG_CAN_CC770_ISA) += cc770_isa.o obj-$(CONFIG_CAN_CC770_PLATFORM) += cc770_platform.o - -ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 9f91fcba43f8718f4d546e39faaef1aa3bd9fabf..02492d241e4c9e8cb8d49ae319067c7c0aa1f9cf 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -103,11 +103,11 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, const struct can_bittiming_const *btc) { struct can_priv *priv = netdev_priv(dev); - long rate, best_rate = 0; long best_error = 1000000000, error = 0; int best_tseg = 0, best_brp = 0, brp = 0; int tsegall, tseg = 0, tseg1 = 0, tseg2 = 0; int spt_error = 1000, spt = 0, sampl_pt; + long rate; u64 v64; /* Use CIA recommended sample points */ @@ -152,7 +152,6 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, } best_tseg = tseg / 2; best_brp = brp; - best_rate = rate; if (error == 0) break; } diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 6586309329e6130b9663bb3d3b6287e6d09f38c8..60f86bd0434af7cd93c394677809a3e60932d768 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -92,6 +92,27 @@ #define FLEXCAN_CTRL_ERR_ALL \ (FLEXCAN_CTRL_ERR_BUS | FLEXCAN_CTRL_ERR_STATE) +/* FLEXCAN control register 2 (CTRL2) bits */ +#define FLEXCAN_CRL2_ECRWRE BIT(29) +#define FLEXCAN_CRL2_WRMFRZ BIT(28) +#define FLEXCAN_CRL2_RFFN(x) (((x) & 0x0f) << 24) +#define FLEXCAN_CRL2_TASD(x) (((x) & 0x1f) << 19) +#define FLEXCAN_CRL2_MRP BIT(18) +#define FLEXCAN_CRL2_RRS BIT(17) +#define FLEXCAN_CRL2_EACEN BIT(16) + +/* FLEXCAN memory error control register (MECR) bits */ +#define FLEXCAN_MECR_ECRWRDIS BIT(31) +#define FLEXCAN_MECR_HANCEI_MSK BIT(19) +#define FLEXCAN_MECR_FANCEI_MSK BIT(18) +#define FLEXCAN_MECR_CEI_MSK BIT(16) +#define FLEXCAN_MECR_HAERRIE BIT(15) +#define FLEXCAN_MECR_FAERRIE BIT(14) +#define FLEXCAN_MECR_EXTERRIE BIT(13) +#define FLEXCAN_MECR_RERRDIS BIT(9) +#define FLEXCAN_MECR_ECCDIS BIT(8) +#define FLEXCAN_MECR_NCEFAFRZ BIT(7) + /* FLEXCAN error and status register (ESR) bits */ #define FLEXCAN_ESR_TWRN_INT BIT(17) #define FLEXCAN_ESR_RWRN_INT BIT(16) @@ -163,18 +184,20 @@ * FLEXCAN hardware feature flags * * Below is some version info we got: - * SOC Version IP-Version Glitch- [TR]WRN_INT - * Filter? connected? - * MX25 FlexCAN2 03.00.00.00 no no - * MX28 FlexCAN2 03.00.04.00 yes yes - * MX35 FlexCAN2 03.00.00.00 no no - * MX53 FlexCAN2 03.00.00.00 yes no - * MX6s FlexCAN3 10.00.12.00 yes yes + * SOC Version IP-Version Glitch- [TR]WRN_INT Memory err + * Filter? connected? detection + * MX25 FlexCAN2 03.00.00.00 no no no + * MX28 FlexCAN2 03.00.04.00 yes yes no + * MX35 FlexCAN2 03.00.00.00 no no no + * MX53 FlexCAN2 03.00.00.00 yes no no + * MX6s FlexCAN3 10.00.12.00 yes yes no + * VF610 FlexCAN3 ? no yes yes * * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected. */ #define FLEXCAN_HAS_V10_FEATURES BIT(1) /* For core version >= 10 */ #define FLEXCAN_HAS_BROKEN_ERR_STATE BIT(2) /* [TR]WRN_INT not connected */ +#define FLEXCAN_HAS_MECR_FEATURES BIT(3) /* Memory error detection */ /* Structure of the message buffer */ struct flexcan_mb { @@ -205,8 +228,17 @@ struct flexcan_regs { u32 crcr; /* 0x44 */ u32 rxfgmask; /* 0x48 */ u32 rxfir; /* 0x4c */ - u32 _reserved3[12]; - struct flexcan_mb cantxfg[64]; + u32 _reserved3[12]; /* 0x50 */ + struct flexcan_mb cantxfg[64]; /* 0x80 */ + u32 _reserved4[408]; + u32 mecr; /* 0xae0 */ + u32 erriar; /* 0xae4 */ + u32 erridpr; /* 0xae8 */ + u32 errippr; /* 0xaec */ + u32 rerrar; /* 0xaf0 */ + u32 rerrdr; /* 0xaf4 */ + u32 rerrsynr; /* 0xaf8 */ + u32 errsr; /* 0xafc */ }; struct flexcan_devtype_data { @@ -236,6 +268,9 @@ static struct flexcan_devtype_data fsl_imx28_devtype_data; static struct flexcan_devtype_data fsl_imx6q_devtype_data = { .features = FLEXCAN_HAS_V10_FEATURES, }; +static struct flexcan_devtype_data fsl_vf610_devtype_data = { + .features = FLEXCAN_HAS_V10_FEATURES | FLEXCAN_HAS_MECR_FEATURES, +}; static const struct can_bittiming_const flexcan_bittiming_const = { .name = DRV_NAME, @@ -391,8 +426,9 @@ static int flexcan_chip_softreset(struct flexcan_priv *priv) return 0; } -static int flexcan_get_berr_counter(const struct net_device *dev, - struct can_berr_counter *bec) + +static int __flexcan_get_berr_counter(const struct net_device *dev, + struct can_berr_counter *bec) { const struct flexcan_priv *priv = netdev_priv(dev); struct flexcan_regs __iomem *regs = priv->base; @@ -404,6 +440,29 @@ static int flexcan_get_berr_counter(const struct net_device *dev, return 0; } +static int flexcan_get_berr_counter(const struct net_device *dev, + struct can_berr_counter *bec) +{ + const struct flexcan_priv *priv = netdev_priv(dev); + int err; + + err = clk_prepare_enable(priv->clk_ipg); + if (err) + return err; + + err = clk_prepare_enable(priv->clk_per); + if (err) + goto out_disable_ipg; + + err = __flexcan_get_berr_counter(dev, bec); + + clk_disable_unprepare(priv->clk_per); + out_disable_ipg: + clk_disable_unprepare(priv->clk_ipg); + + return err; +} + static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev) { const struct flexcan_priv *priv = netdev_priv(dev); @@ -524,7 +583,7 @@ static void do_state(struct net_device *dev, struct flexcan_priv *priv = netdev_priv(dev); struct can_berr_counter bec; - flexcan_get_berr_counter(dev, &bec); + __flexcan_get_berr_counter(dev, &bec); switch (priv->can.state) { case CAN_STATE_ERROR_ACTIVE: @@ -823,9 +882,8 @@ static int flexcan_chip_start(struct net_device *dev) { struct flexcan_priv *priv = netdev_priv(dev); struct flexcan_regs __iomem *regs = priv->base; - int err; - u32 reg_mcr, reg_ctrl; - int i; + u32 reg_mcr, reg_ctrl, reg_crl2, reg_mecr; + int err, i; /* enable module */ err = flexcan_chip_enable(priv); @@ -914,6 +972,31 @@ static int flexcan_chip_start(struct net_device *dev) if (priv->devtype_data->features & FLEXCAN_HAS_V10_FEATURES) flexcan_write(0x0, ®s->rxfgmask); + /* + * On Vybrid, disable memory error detection interrupts + * and freeze mode. + * This also works around errata e5295 which generates + * false positive memory errors and put the device in + * freeze mode. + */ + if (priv->devtype_data->features & FLEXCAN_HAS_MECR_FEATURES) { + /* + * Follow the protocol as described in "Detection + * and Correction of Memory Errors" to write to + * MECR register + */ + reg_crl2 = flexcan_read(®s->crl2); + reg_crl2 |= FLEXCAN_CRL2_ECRWRE; + flexcan_write(reg_crl2, ®s->crl2); + + reg_mecr = flexcan_read(®s->mecr); + reg_mecr &= ~FLEXCAN_MECR_ECRWRDIS; + flexcan_write(reg_mecr, ®s->mecr); + reg_mecr &= ~(FLEXCAN_MECR_NCEFAFRZ | FLEXCAN_MECR_HANCEI_MSK | + FLEXCAN_MECR_FANCEI_MSK); + flexcan_write(reg_mecr, ®s->mecr); + } + err = flexcan_transceiver_enable(priv); if (err) goto out_chip_disable; @@ -1124,6 +1207,7 @@ static const struct of_device_id flexcan_of_match[] = { { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, }, { .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, }, { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, }, + { .compatible = "fsl,vf610-flexcan", .data = &fsl_vf610_devtype_data, }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, flexcan_of_match); diff --git a/drivers/net/can/m_can/Kconfig b/drivers/net/can/m_can/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..fca5482c09acd43237f85e69765d3e66b96a4b86 --- /dev/null +++ b/drivers/net/can/m_can/Kconfig @@ -0,0 +1,4 @@ +config CAN_M_CAN + tristate "Bosch M_CAN devices" + ---help--- + Say Y here if you want to support for Bosch M_CAN controller. diff --git a/drivers/net/can/m_can/Makefile b/drivers/net/can/m_can/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..8bbd7f24f5be3b6394b9717e63ca6be4eb0ef0f0 --- /dev/null +++ b/drivers/net/can/m_can/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Bosch M_CAN controller driver. +# + +obj-$(CONFIG_CAN_M_CAN) += m_can.o diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c new file mode 100644 index 0000000000000000000000000000000000000000..10d571eaed856814928151da898bd2053d4d899c --- /dev/null +++ b/drivers/net/can/m_can/m_can.c @@ -0,0 +1,1202 @@ +/* + * CAN bus driver for Bosch M_CAN controller + * + * Copyright (C) 2014 Freescale Semiconductor, Inc. + * Dong Aisheng + * + * Bosch M_CAN user manual can be obtained from: + * http://www.bosch-semiconductors.de/media/pdf_1/ipmodules_1/m_can/ + * mcan_users_manual_v302.pdf + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* napi related */ +#define M_CAN_NAPI_WEIGHT 64 + +/* message ram configuration data length */ +#define MRAM_CFG_LEN 8 + +/* registers definition */ +enum m_can_reg { + M_CAN_CREL = 0x0, + M_CAN_ENDN = 0x4, + M_CAN_CUST = 0x8, + M_CAN_FBTP = 0xc, + M_CAN_TEST = 0x10, + M_CAN_RWD = 0x14, + M_CAN_CCCR = 0x18, + M_CAN_BTP = 0x1c, + M_CAN_TSCC = 0x20, + M_CAN_TSCV = 0x24, + M_CAN_TOCC = 0x28, + M_CAN_TOCV = 0x2c, + M_CAN_ECR = 0x40, + M_CAN_PSR = 0x44, + M_CAN_IR = 0x50, + M_CAN_IE = 0x54, + M_CAN_ILS = 0x58, + M_CAN_ILE = 0x5c, + M_CAN_GFC = 0x80, + M_CAN_SIDFC = 0x84, + M_CAN_XIDFC = 0x88, + M_CAN_XIDAM = 0x90, + M_CAN_HPMS = 0x94, + M_CAN_NDAT1 = 0x98, + M_CAN_NDAT2 = 0x9c, + M_CAN_RXF0C = 0xa0, + M_CAN_RXF0S = 0xa4, + M_CAN_RXF0A = 0xa8, + M_CAN_RXBC = 0xac, + M_CAN_RXF1C = 0xb0, + M_CAN_RXF1S = 0xb4, + M_CAN_RXF1A = 0xb8, + M_CAN_RXESC = 0xbc, + M_CAN_TXBC = 0xc0, + M_CAN_TXFQS = 0xc4, + M_CAN_TXESC = 0xc8, + M_CAN_TXBRP = 0xcc, + M_CAN_TXBAR = 0xd0, + M_CAN_TXBCR = 0xd4, + M_CAN_TXBTO = 0xd8, + M_CAN_TXBCF = 0xdc, + M_CAN_TXBTIE = 0xe0, + M_CAN_TXBCIE = 0xe4, + M_CAN_TXEFC = 0xf0, + M_CAN_TXEFS = 0xf4, + M_CAN_TXEFA = 0xf8, +}; + +/* m_can lec values */ +enum m_can_lec_type { + LEC_NO_ERROR = 0, + LEC_STUFF_ERROR, + LEC_FORM_ERROR, + LEC_ACK_ERROR, + LEC_BIT1_ERROR, + LEC_BIT0_ERROR, + LEC_CRC_ERROR, + LEC_UNUSED, +}; + +enum m_can_mram_cfg { + MRAM_SIDF = 0, + MRAM_XIDF, + MRAM_RXF0, + MRAM_RXF1, + MRAM_RXB, + MRAM_TXE, + MRAM_TXB, + MRAM_CFG_NUM, +}; + +/* Test Register (TEST) */ +#define TEST_LBCK BIT(4) + +/* CC Control Register(CCCR) */ +#define CCCR_TEST BIT(7) +#define CCCR_MON BIT(5) +#define CCCR_CCE BIT(1) +#define CCCR_INIT BIT(0) + +/* Bit Timing & Prescaler Register (BTP) */ +#define BTR_BRP_MASK 0x3ff +#define BTR_BRP_SHIFT 16 +#define BTR_TSEG1_SHIFT 8 +#define BTR_TSEG1_MASK (0x3f << BTR_TSEG1_SHIFT) +#define BTR_TSEG2_SHIFT 4 +#define BTR_TSEG2_MASK (0xf << BTR_TSEG2_SHIFT) +#define BTR_SJW_SHIFT 0 +#define BTR_SJW_MASK 0xf + +/* Error Counter Register(ECR) */ +#define ECR_RP BIT(15) +#define ECR_REC_SHIFT 8 +#define ECR_REC_MASK (0x7f << ECR_REC_SHIFT) +#define ECR_TEC_SHIFT 0 +#define ECR_TEC_MASK 0xff + +/* Protocol Status Register(PSR) */ +#define PSR_BO BIT(7) +#define PSR_EW BIT(6) +#define PSR_EP BIT(5) +#define PSR_LEC_MASK 0x7 + +/* Interrupt Register(IR) */ +#define IR_ALL_INT 0xffffffff +#define IR_STE BIT(31) +#define IR_FOE BIT(30) +#define IR_ACKE BIT(29) +#define IR_BE BIT(28) +#define IR_CRCE BIT(27) +#define IR_WDI BIT(26) +#define IR_BO BIT(25) +#define IR_EW BIT(24) +#define IR_EP BIT(23) +#define IR_ELO BIT(22) +#define IR_BEU BIT(21) +#define IR_BEC BIT(20) +#define IR_DRX BIT(19) +#define IR_TOO BIT(18) +#define IR_MRAF BIT(17) +#define IR_TSW BIT(16) +#define IR_TEFL BIT(15) +#define IR_TEFF BIT(14) +#define IR_TEFW BIT(13) +#define IR_TEFN BIT(12) +#define IR_TFE BIT(11) +#define IR_TCF BIT(10) +#define IR_TC BIT(9) +#define IR_HPM BIT(8) +#define IR_RF1L BIT(7) +#define IR_RF1F BIT(6) +#define IR_RF1W BIT(5) +#define IR_RF1N BIT(4) +#define IR_RF0L BIT(3) +#define IR_RF0F BIT(2) +#define IR_RF0W BIT(1) +#define IR_RF0N BIT(0) +#define IR_ERR_STATE (IR_BO | IR_EW | IR_EP) +#define IR_ERR_LEC (IR_STE | IR_FOE | IR_ACKE | IR_BE | IR_CRCE) +#define IR_ERR_BUS (IR_ERR_LEC | IR_WDI | IR_ELO | IR_BEU | \ + IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \ + IR_RF1L | IR_RF0L) +#define IR_ERR_ALL (IR_ERR_STATE | IR_ERR_BUS) + +/* Interrupt Line Select (ILS) */ +#define ILS_ALL_INT0 0x0 +#define ILS_ALL_INT1 0xFFFFFFFF + +/* Interrupt Line Enable (ILE) */ +#define ILE_EINT0 BIT(0) +#define ILE_EINT1 BIT(1) + +/* Rx FIFO 0/1 Configuration (RXF0C/RXF1C) */ +#define RXFC_FWM_OFF 24 +#define RXFC_FWM_MASK 0x7f +#define RXFC_FWM_1 (1 << RXFC_FWM_OFF) +#define RXFC_FS_OFF 16 +#define RXFC_FS_MASK 0x7f + +/* Rx FIFO 0/1 Status (RXF0S/RXF1S) */ +#define RXFS_RFL BIT(25) +#define RXFS_FF BIT(24) +#define RXFS_FPI_OFF 16 +#define RXFS_FPI_MASK 0x3f0000 +#define RXFS_FGI_OFF 8 +#define RXFS_FGI_MASK 0x3f00 +#define RXFS_FFL_MASK 0x7f + +/* Rx Buffer / FIFO Element Size Configuration (RXESC) */ +#define M_CAN_RXESC_8BYTES 0x0 + +/* Tx Buffer Configuration(TXBC) */ +#define TXBC_NDTB_OFF 16 +#define TXBC_NDTB_MASK 0x3f + +/* Tx Buffer Element Size Configuration(TXESC) */ +#define TXESC_TBDS_8BYTES 0x0 + +/* Tx Event FIFO Con.guration (TXEFC) */ +#define TXEFC_EFS_OFF 16 +#define TXEFC_EFS_MASK 0x3f + +/* Message RAM Configuration (in bytes) */ +#define SIDF_ELEMENT_SIZE 4 +#define XIDF_ELEMENT_SIZE 8 +#define RXF0_ELEMENT_SIZE 16 +#define RXF1_ELEMENT_SIZE 16 +#define RXB_ELEMENT_SIZE 16 +#define TXE_ELEMENT_SIZE 8 +#define TXB_ELEMENT_SIZE 16 + +/* Message RAM Elements */ +#define M_CAN_FIFO_ID 0x0 +#define M_CAN_FIFO_DLC 0x4 +#define M_CAN_FIFO_DATA(n) (0x8 + ((n) << 2)) + +/* Rx Buffer Element */ +#define RX_BUF_ESI BIT(31) +#define RX_BUF_XTD BIT(30) +#define RX_BUF_RTR BIT(29) + +/* Tx Buffer Element */ +#define TX_BUF_XTD BIT(30) +#define TX_BUF_RTR BIT(29) + +/* address offset and element number for each FIFO/Buffer in the Message RAM */ +struct mram_cfg { + u16 off; + u8 num; +}; + +/* m_can private data structure */ +struct m_can_priv { + struct can_priv can; /* must be the first member */ + struct napi_struct napi; + struct net_device *dev; + struct device *device; + struct clk *hclk; + struct clk *cclk; + void __iomem *base; + u32 irqstatus; + + /* message ram configuration */ + void __iomem *mram_base; + struct mram_cfg mcfg[MRAM_CFG_NUM]; +}; + +static inline u32 m_can_read(const struct m_can_priv *priv, enum m_can_reg reg) +{ + return readl(priv->base + reg); +} + +static inline void m_can_write(const struct m_can_priv *priv, + enum m_can_reg reg, u32 val) +{ + writel(val, priv->base + reg); +} + +static inline u32 m_can_fifo_read(const struct m_can_priv *priv, + u32 fgi, unsigned int offset) +{ + return readl(priv->mram_base + priv->mcfg[MRAM_RXF0].off + + fgi * RXF0_ELEMENT_SIZE + offset); +} + +static inline void m_can_fifo_write(const struct m_can_priv *priv, + u32 fpi, unsigned int offset, u32 val) +{ + return writel(val, priv->mram_base + priv->mcfg[MRAM_TXB].off + + fpi * TXB_ELEMENT_SIZE + offset); +} + +static inline void m_can_config_endisable(const struct m_can_priv *priv, + bool enable) +{ + u32 cccr = m_can_read(priv, M_CAN_CCCR); + u32 timeout = 10; + u32 val = 0; + + if (enable) { + /* enable m_can configuration */ + m_can_write(priv, M_CAN_CCCR, cccr | CCCR_INIT); + /* CCCR.CCE can only be set/reset while CCCR.INIT = '1' */ + m_can_write(priv, M_CAN_CCCR, cccr | CCCR_INIT | CCCR_CCE); + } else { + m_can_write(priv, M_CAN_CCCR, cccr & ~(CCCR_INIT | CCCR_CCE)); + } + + /* there's a delay for module initialization */ + if (enable) + val = CCCR_INIT | CCCR_CCE; + + while ((m_can_read(priv, M_CAN_CCCR) & (CCCR_INIT | CCCR_CCE)) != val) { + if (timeout == 0) { + netdev_warn(priv->dev, "Failed to init module\n"); + return; + } + timeout--; + udelay(1); + } +} + +static inline void m_can_enable_all_interrupts(const struct m_can_priv *priv) +{ + m_can_write(priv, M_CAN_ILE, ILE_EINT0 | ILE_EINT1); +} + +static inline void m_can_disable_all_interrupts(const struct m_can_priv *priv) +{ + m_can_write(priv, M_CAN_ILE, 0x0); +} + +static void m_can_read_fifo(const struct net_device *dev, struct can_frame *cf, + u32 rxfs) +{ + struct m_can_priv *priv = netdev_priv(dev); + u32 id, fgi; + + /* calculate the fifo get index for where to read data */ + fgi = (rxfs & RXFS_FGI_MASK) >> RXFS_FGI_OFF; + id = m_can_fifo_read(priv, fgi, M_CAN_FIFO_ID); + if (id & RX_BUF_XTD) + cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG; + else + cf->can_id = (id >> 18) & CAN_SFF_MASK; + + if (id & RX_BUF_RTR) { + cf->can_id |= CAN_RTR_FLAG; + } else { + id = m_can_fifo_read(priv, fgi, M_CAN_FIFO_DLC); + cf->can_dlc = get_can_dlc((id >> 16) & 0x0F); + *(u32 *)(cf->data + 0) = m_can_fifo_read(priv, fgi, + M_CAN_FIFO_DATA(0)); + *(u32 *)(cf->data + 4) = m_can_fifo_read(priv, fgi, + M_CAN_FIFO_DATA(1)); + } + + /* acknowledge rx fifo 0 */ + m_can_write(priv, M_CAN_RXF0A, fgi); +} + +static int m_can_do_rx_poll(struct net_device *dev, int quota) +{ + struct m_can_priv *priv = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + struct sk_buff *skb; + struct can_frame *frame; + u32 pkts = 0; + u32 rxfs; + + rxfs = m_can_read(priv, M_CAN_RXF0S); + if (!(rxfs & RXFS_FFL_MASK)) { + netdev_dbg(dev, "no messages in fifo0\n"); + return 0; + } + + while ((rxfs & RXFS_FFL_MASK) && (quota > 0)) { + if (rxfs & RXFS_RFL) + netdev_warn(dev, "Rx FIFO 0 Message Lost\n"); + + skb = alloc_can_skb(dev, &frame); + if (!skb) { + stats->rx_dropped++; + return pkts; + } + + m_can_read_fifo(dev, frame, rxfs); + + stats->rx_packets++; + stats->rx_bytes += frame->can_dlc; + + netif_receive_skb(skb); + + quota--; + pkts++; + rxfs = m_can_read(priv, M_CAN_RXF0S); + } + + if (pkts) + can_led_event(dev, CAN_LED_EVENT_RX); + + return pkts; +} + +static int m_can_handle_lost_msg(struct net_device *dev) +{ + struct net_device_stats *stats = &dev->stats; + struct sk_buff *skb; + struct can_frame *frame; + + netdev_err(dev, "msg lost in rxf0\n"); + + stats->rx_errors++; + stats->rx_over_errors++; + + skb = alloc_can_err_skb(dev, &frame); + if (unlikely(!skb)) + return 0; + + frame->can_id |= CAN_ERR_CRTL; + frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; + + netif_receive_skb(skb); + + return 1; +} + +static int m_can_handle_lec_err(struct net_device *dev, + enum m_can_lec_type lec_type) +{ + struct m_can_priv *priv = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + struct can_frame *cf; + struct sk_buff *skb; + + priv->can.can_stats.bus_error++; + stats->rx_errors++; + + /* propagate the error condition to the CAN stack */ + skb = alloc_can_err_skb(dev, &cf); + if (unlikely(!skb)) + return 0; + + /* check for 'last error code' which tells us the + * type of the last error to occur on the CAN bus + */ + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; + cf->data[2] |= CAN_ERR_PROT_UNSPEC; + + switch (lec_type) { + case LEC_STUFF_ERROR: + netdev_dbg(dev, "stuff error\n"); + cf->data[2] |= CAN_ERR_PROT_STUFF; + break; + case LEC_FORM_ERROR: + netdev_dbg(dev, "form error\n"); + cf->data[2] |= CAN_ERR_PROT_FORM; + break; + case LEC_ACK_ERROR: + netdev_dbg(dev, "ack error\n"); + cf->data[3] |= (CAN_ERR_PROT_LOC_ACK | + CAN_ERR_PROT_LOC_ACK_DEL); + break; + case LEC_BIT1_ERROR: + netdev_dbg(dev, "bit1 error\n"); + cf->data[2] |= CAN_ERR_PROT_BIT1; + break; + case LEC_BIT0_ERROR: + netdev_dbg(dev, "bit0 error\n"); + cf->data[2] |= CAN_ERR_PROT_BIT0; + break; + case LEC_CRC_ERROR: + netdev_dbg(dev, "CRC error\n"); + cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ | + CAN_ERR_PROT_LOC_CRC_DEL); + break; + default: + break; + } + + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + netif_receive_skb(skb); + + return 1; +} + +static int m_can_get_berr_counter(const struct net_device *dev, + struct can_berr_counter *bec) +{ + struct m_can_priv *priv = netdev_priv(dev); + unsigned int ecr; + int err; + + err = clk_prepare_enable(priv->hclk); + if (err) + return err; + + err = clk_prepare_enable(priv->cclk); + if (err) { + clk_disable_unprepare(priv->hclk); + return err; + } + + ecr = m_can_read(priv, M_CAN_ECR); + bec->rxerr = (ecr & ECR_REC_MASK) >> ECR_REC_SHIFT; + bec->txerr = ecr & ECR_TEC_MASK; + + clk_disable_unprepare(priv->cclk); + clk_disable_unprepare(priv->hclk); + + return 0; +} + +static int m_can_handle_state_change(struct net_device *dev, + enum can_state new_state) +{ + struct m_can_priv *priv = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + struct can_frame *cf; + struct sk_buff *skb; + struct can_berr_counter bec; + unsigned int ecr; + + switch (new_state) { + case CAN_STATE_ERROR_ACTIVE: + /* error warning state */ + priv->can.can_stats.error_warning++; + priv->can.state = CAN_STATE_ERROR_WARNING; + break; + case CAN_STATE_ERROR_PASSIVE: + /* error passive state */ + priv->can.can_stats.error_passive++; + priv->can.state = CAN_STATE_ERROR_PASSIVE; + break; + case CAN_STATE_BUS_OFF: + /* bus-off state */ + priv->can.state = CAN_STATE_BUS_OFF; + m_can_disable_all_interrupts(priv); + can_bus_off(dev); + break; + default: + break; + } + + /* propagate the error condition to the CAN stack */ + skb = alloc_can_err_skb(dev, &cf); + if (unlikely(!skb)) + return 0; + + m_can_get_berr_counter(dev, &bec); + + switch (new_state) { + case CAN_STATE_ERROR_ACTIVE: + /* error warning state */ + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = (bec.txerr > bec.rxerr) ? + CAN_ERR_CRTL_TX_WARNING : + CAN_ERR_CRTL_RX_WARNING; + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + break; + case CAN_STATE_ERROR_PASSIVE: + /* error passive state */ + cf->can_id |= CAN_ERR_CRTL; + ecr = m_can_read(priv, M_CAN_ECR); + if (ecr & ECR_RP) + cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE; + if (bec.txerr > 127) + cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE; + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + break; + case CAN_STATE_BUS_OFF: + /* bus-off state */ + cf->can_id |= CAN_ERR_BUSOFF; + break; + default: + break; + } + + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + netif_receive_skb(skb); + + return 1; +} + +static int m_can_handle_state_errors(struct net_device *dev, u32 psr) +{ + struct m_can_priv *priv = netdev_priv(dev); + int work_done = 0; + + if ((psr & PSR_EW) && + (priv->can.state != CAN_STATE_ERROR_WARNING)) { + netdev_dbg(dev, "entered error warning state\n"); + work_done += m_can_handle_state_change(dev, + CAN_STATE_ERROR_WARNING); + } + + if ((psr & PSR_EP) && + (priv->can.state != CAN_STATE_ERROR_PASSIVE)) { + netdev_dbg(dev, "entered error warning state\n"); + work_done += m_can_handle_state_change(dev, + CAN_STATE_ERROR_PASSIVE); + } + + if ((psr & PSR_BO) && + (priv->can.state != CAN_STATE_BUS_OFF)) { + netdev_dbg(dev, "entered error warning state\n"); + work_done += m_can_handle_state_change(dev, + CAN_STATE_BUS_OFF); + } + + return work_done; +} + +static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus) +{ + if (irqstatus & IR_WDI) + netdev_err(dev, "Message RAM Watchdog event due to missing READY\n"); + if (irqstatus & IR_BEU) + netdev_err(dev, "Error Logging Overflow\n"); + if (irqstatus & IR_BEU) + netdev_err(dev, "Bit Error Uncorrected\n"); + if (irqstatus & IR_BEC) + netdev_err(dev, "Bit Error Corrected\n"); + if (irqstatus & IR_TOO) + netdev_err(dev, "Timeout reached\n"); + if (irqstatus & IR_MRAF) + netdev_err(dev, "Message RAM access failure occurred\n"); +} + +static inline bool is_lec_err(u32 psr) +{ + psr &= LEC_UNUSED; + + return psr && (psr != LEC_UNUSED); +} + +static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus, + u32 psr) +{ + struct m_can_priv *priv = netdev_priv(dev); + int work_done = 0; + + if (irqstatus & IR_RF0L) + work_done += m_can_handle_lost_msg(dev); + + /* handle lec errors on the bus */ + if ((priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) && + is_lec_err(psr)) + work_done += m_can_handle_lec_err(dev, psr & LEC_UNUSED); + + /* other unproccessed error interrupts */ + m_can_handle_other_err(dev, irqstatus); + + return work_done; +} + +static int m_can_poll(struct napi_struct *napi, int quota) +{ + struct net_device *dev = napi->dev; + struct m_can_priv *priv = netdev_priv(dev); + int work_done = 0; + u32 irqstatus, psr; + + irqstatus = priv->irqstatus | m_can_read(priv, M_CAN_IR); + if (!irqstatus) + goto end; + + psr = m_can_read(priv, M_CAN_PSR); + if (irqstatus & IR_ERR_STATE) + work_done += m_can_handle_state_errors(dev, psr); + + if (irqstatus & IR_ERR_BUS) + work_done += m_can_handle_bus_errors(dev, irqstatus, psr); + + if (irqstatus & IR_RF0N) + work_done += m_can_do_rx_poll(dev, (quota - work_done)); + + if (work_done < quota) { + napi_complete(napi); + m_can_enable_all_interrupts(priv); + } + +end: + return work_done; +} + +static irqreturn_t m_can_isr(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct m_can_priv *priv = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + u32 ir; + + ir = m_can_read(priv, M_CAN_IR); + if (!ir) + return IRQ_NONE; + + /* ACK all irqs */ + if (ir & IR_ALL_INT) + m_can_write(priv, M_CAN_IR, ir); + + /* schedule NAPI in case of + * - rx IRQ + * - state change IRQ + * - bus error IRQ and bus error reporting + */ + if ((ir & IR_RF0N) || (ir & IR_ERR_ALL)) { + priv->irqstatus = ir; + m_can_disable_all_interrupts(priv); + napi_schedule(&priv->napi); + } + + /* transmission complete interrupt */ + if (ir & IR_TC) { + stats->tx_bytes += can_get_echo_skb(dev, 0); + stats->tx_packets++; + can_led_event(dev, CAN_LED_EVENT_TX); + netif_wake_queue(dev); + } + + return IRQ_HANDLED; +} + +static const struct can_bittiming_const m_can_bittiming_const = { + .name = KBUILD_MODNAME, + .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ + .tseg1_max = 64, + .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ + .tseg2_max = 16, + .sjw_max = 16, + .brp_min = 1, + .brp_max = 1024, + .brp_inc = 1, +}; + +static int m_can_set_bittiming(struct net_device *dev) +{ + struct m_can_priv *priv = netdev_priv(dev); + const struct can_bittiming *bt = &priv->can.bittiming; + u16 brp, sjw, tseg1, tseg2; + u32 reg_btp; + + brp = bt->brp - 1; + sjw = bt->sjw - 1; + tseg1 = bt->prop_seg + bt->phase_seg1 - 1; + tseg2 = bt->phase_seg2 - 1; + reg_btp = (brp << BTR_BRP_SHIFT) | (sjw << BTR_SJW_SHIFT) | + (tseg1 << BTR_TSEG1_SHIFT) | (tseg2 << BTR_TSEG2_SHIFT); + m_can_write(priv, M_CAN_BTP, reg_btp); + netdev_dbg(dev, "setting BTP 0x%x\n", reg_btp); + + return 0; +} + +/* Configure M_CAN chip: + * - set rx buffer/fifo element size + * - configure rx fifo + * - accept non-matching frame into fifo 0 + * - configure tx buffer + * - configure mode + * - setup bittiming + */ +static void m_can_chip_config(struct net_device *dev) +{ + struct m_can_priv *priv = netdev_priv(dev); + u32 cccr, test; + + m_can_config_endisable(priv, true); + + /* RX Buffer/FIFO Element Size 8 bytes data field */ + m_can_write(priv, M_CAN_RXESC, M_CAN_RXESC_8BYTES); + + /* Accept Non-matching Frames Into FIFO 0 */ + m_can_write(priv, M_CAN_GFC, 0x0); + + /* only support one Tx Buffer currently */ + m_can_write(priv, M_CAN_TXBC, (1 << TXBC_NDTB_OFF) | + priv->mcfg[MRAM_TXB].off); + + /* only support 8 bytes firstly */ + m_can_write(priv, M_CAN_TXESC, TXESC_TBDS_8BYTES); + + m_can_write(priv, M_CAN_TXEFC, (1 << TXEFC_EFS_OFF) | + priv->mcfg[MRAM_TXE].off); + + /* rx fifo configuration, blocking mode, fifo size 1 */ + m_can_write(priv, M_CAN_RXF0C, + (priv->mcfg[MRAM_RXF0].num << RXFC_FS_OFF) | + RXFC_FWM_1 | priv->mcfg[MRAM_RXF0].off); + + m_can_write(priv, M_CAN_RXF1C, + (priv->mcfg[MRAM_RXF1].num << RXFC_FS_OFF) | + RXFC_FWM_1 | priv->mcfg[MRAM_RXF1].off); + + cccr = m_can_read(priv, M_CAN_CCCR); + cccr &= ~(CCCR_TEST | CCCR_MON); + test = m_can_read(priv, M_CAN_TEST); + test &= ~TEST_LBCK; + + if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) + cccr |= CCCR_MON; + + if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { + cccr |= CCCR_TEST; + test |= TEST_LBCK; + } + + m_can_write(priv, M_CAN_CCCR, cccr); + m_can_write(priv, M_CAN_TEST, test); + + /* enable interrupts */ + m_can_write(priv, M_CAN_IR, IR_ALL_INT); + if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) + m_can_write(priv, M_CAN_IE, IR_ALL_INT & ~IR_ERR_LEC); + else + m_can_write(priv, M_CAN_IE, IR_ALL_INT); + + /* route all interrupts to INT0 */ + m_can_write(priv, M_CAN_ILS, ILS_ALL_INT0); + + /* set bittiming params */ + m_can_set_bittiming(dev); + + m_can_config_endisable(priv, false); +} + +static void m_can_start(struct net_device *dev) +{ + struct m_can_priv *priv = netdev_priv(dev); + + /* basic m_can configuration */ + m_can_chip_config(dev); + + priv->can.state = CAN_STATE_ERROR_ACTIVE; + + m_can_enable_all_interrupts(priv); +} + +static int m_can_set_mode(struct net_device *dev, enum can_mode mode) +{ + switch (mode) { + case CAN_MODE_START: + m_can_start(dev); + netif_wake_queue(dev); + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static void free_m_can_dev(struct net_device *dev) +{ + free_candev(dev); +} + +static struct net_device *alloc_m_can_dev(void) +{ + struct net_device *dev; + struct m_can_priv *priv; + + dev = alloc_candev(sizeof(*priv), 1); + if (!dev) + return NULL; + + priv = netdev_priv(dev); + netif_napi_add(dev, &priv->napi, m_can_poll, M_CAN_NAPI_WEIGHT); + + priv->dev = dev; + priv->can.bittiming_const = &m_can_bittiming_const; + priv->can.do_set_mode = m_can_set_mode; + priv->can.do_get_berr_counter = m_can_get_berr_counter; + priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | + CAN_CTRLMODE_LISTENONLY | + CAN_CTRLMODE_BERR_REPORTING; + + return dev; +} + +static int m_can_open(struct net_device *dev) +{ + struct m_can_priv *priv = netdev_priv(dev); + int err; + + err = clk_prepare_enable(priv->hclk); + if (err) + return err; + + err = clk_prepare_enable(priv->cclk); + if (err) + goto exit_disable_hclk; + + /* open the can device */ + err = open_candev(dev); + if (err) { + netdev_err(dev, "failed to open can device\n"); + goto exit_disable_cclk; + } + + /* register interrupt handler */ + err = request_irq(dev->irq, m_can_isr, IRQF_SHARED, dev->name, + dev); + if (err < 0) { + netdev_err(dev, "failed to request interrupt\n"); + goto exit_irq_fail; + } + + /* start the m_can controller */ + m_can_start(dev); + + can_led_event(dev, CAN_LED_EVENT_OPEN); + napi_enable(&priv->napi); + netif_start_queue(dev); + + return 0; + +exit_irq_fail: + close_candev(dev); +exit_disable_cclk: + clk_disable_unprepare(priv->cclk); +exit_disable_hclk: + clk_disable_unprepare(priv->hclk); + return err; +} + +static void m_can_stop(struct net_device *dev) +{ + struct m_can_priv *priv = netdev_priv(dev); + + /* disable all interrupts */ + m_can_disable_all_interrupts(priv); + + clk_disable_unprepare(priv->hclk); + clk_disable_unprepare(priv->cclk); + + /* set the state as STOPPED */ + priv->can.state = CAN_STATE_STOPPED; +} + +static int m_can_close(struct net_device *dev) +{ + struct m_can_priv *priv = netdev_priv(dev); + + netif_stop_queue(dev); + napi_disable(&priv->napi); + m_can_stop(dev); + free_irq(dev->irq, dev); + close_candev(dev); + can_led_event(dev, CAN_LED_EVENT_STOP); + + return 0; +} + +static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct m_can_priv *priv = netdev_priv(dev); + struct can_frame *cf = (struct can_frame *)skb->data; + u32 id; + + if (can_dropped_invalid_skb(dev, skb)) + return NETDEV_TX_OK; + + netif_stop_queue(dev); + + if (cf->can_id & CAN_EFF_FLAG) { + id = cf->can_id & CAN_EFF_MASK; + id |= TX_BUF_XTD; + } else { + id = ((cf->can_id & CAN_SFF_MASK) << 18); + } + + if (cf->can_id & CAN_RTR_FLAG) + id |= TX_BUF_RTR; + + /* message ram configuration */ + m_can_fifo_write(priv, 0, M_CAN_FIFO_ID, id); + m_can_fifo_write(priv, 0, M_CAN_FIFO_DLC, cf->can_dlc << 16); + m_can_fifo_write(priv, 0, M_CAN_FIFO_DATA(0), *(u32 *)(cf->data + 0)); + m_can_fifo_write(priv, 0, M_CAN_FIFO_DATA(1), *(u32 *)(cf->data + 4)); + can_put_echo_skb(skb, dev, 0); + + /* enable first TX buffer to start transfer */ + m_can_write(priv, M_CAN_TXBTIE, 0x1); + m_can_write(priv, M_CAN_TXBAR, 0x1); + + return NETDEV_TX_OK; +} + +static const struct net_device_ops m_can_netdev_ops = { + .ndo_open = m_can_open, + .ndo_stop = m_can_close, + .ndo_start_xmit = m_can_start_xmit, +}; + +static int register_m_can_dev(struct net_device *dev) +{ + dev->flags |= IFF_ECHO; /* we support local echo */ + dev->netdev_ops = &m_can_netdev_ops; + + return register_candev(dev); +} + +static int m_can_of_parse_mram(struct platform_device *pdev, + struct m_can_priv *priv) +{ + struct device_node *np = pdev->dev.of_node; + struct resource *res; + void __iomem *addr; + u32 out_val[MRAM_CFG_LEN]; + int ret; + + /* message ram could be shared */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram"); + if (!res) + return -ENODEV; + + addr = devm_ioremap(&pdev->dev, res->start, resource_size(res)); + if (!addr) + return -ENOMEM; + + /* get message ram configuration */ + ret = of_property_read_u32_array(np, "bosch,mram-cfg", + out_val, sizeof(out_val) / 4); + if (ret) { + dev_err(&pdev->dev, "can not get message ram configuration\n"); + return -ENODEV; + } + + priv->mram_base = addr; + priv->mcfg[MRAM_SIDF].off = out_val[0]; + priv->mcfg[MRAM_SIDF].num = out_val[1]; + priv->mcfg[MRAM_XIDF].off = priv->mcfg[MRAM_SIDF].off + + priv->mcfg[MRAM_SIDF].num * SIDF_ELEMENT_SIZE; + priv->mcfg[MRAM_XIDF].num = out_val[2]; + priv->mcfg[MRAM_RXF0].off = priv->mcfg[MRAM_XIDF].off + + priv->mcfg[MRAM_XIDF].num * XIDF_ELEMENT_SIZE; + priv->mcfg[MRAM_RXF0].num = out_val[3] & RXFC_FS_MASK; + priv->mcfg[MRAM_RXF1].off = priv->mcfg[MRAM_RXF0].off + + priv->mcfg[MRAM_RXF0].num * RXF0_ELEMENT_SIZE; + priv->mcfg[MRAM_RXF1].num = out_val[4] & RXFC_FS_MASK; + priv->mcfg[MRAM_RXB].off = priv->mcfg[MRAM_RXF1].off + + priv->mcfg[MRAM_RXF1].num * RXF1_ELEMENT_SIZE; + priv->mcfg[MRAM_RXB].num = out_val[5]; + priv->mcfg[MRAM_TXE].off = priv->mcfg[MRAM_RXB].off + + priv->mcfg[MRAM_RXB].num * RXB_ELEMENT_SIZE; + priv->mcfg[MRAM_TXE].num = out_val[6]; + priv->mcfg[MRAM_TXB].off = priv->mcfg[MRAM_TXE].off + + priv->mcfg[MRAM_TXE].num * TXE_ELEMENT_SIZE; + priv->mcfg[MRAM_TXB].num = out_val[7] & TXBC_NDTB_MASK; + + dev_dbg(&pdev->dev, "mram_base %p sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n", + priv->mram_base, + priv->mcfg[MRAM_SIDF].off, priv->mcfg[MRAM_SIDF].num, + priv->mcfg[MRAM_XIDF].off, priv->mcfg[MRAM_XIDF].num, + priv->mcfg[MRAM_RXF0].off, priv->mcfg[MRAM_RXF0].num, + priv->mcfg[MRAM_RXF1].off, priv->mcfg[MRAM_RXF1].num, + priv->mcfg[MRAM_RXB].off, priv->mcfg[MRAM_RXB].num, + priv->mcfg[MRAM_TXE].off, priv->mcfg[MRAM_TXE].num, + priv->mcfg[MRAM_TXB].off, priv->mcfg[MRAM_TXB].num); + + return 0; +} + +static int m_can_plat_probe(struct platform_device *pdev) +{ + struct net_device *dev; + struct m_can_priv *priv; + struct resource *res; + void __iomem *addr; + struct clk *hclk, *cclk; + int irq, ret; + + hclk = devm_clk_get(&pdev->dev, "hclk"); + cclk = devm_clk_get(&pdev->dev, "cclk"); + if (IS_ERR(hclk) || IS_ERR(cclk)) { + dev_err(&pdev->dev, "no clock find\n"); + return -ENODEV; + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "m_can"); + addr = devm_ioremap_resource(&pdev->dev, res); + irq = platform_get_irq_byname(pdev, "int0"); + if (IS_ERR(addr) || irq < 0) + return -EINVAL; + + /* allocate the m_can device */ + dev = alloc_m_can_dev(); + if (!dev) + return -ENOMEM; + + priv = netdev_priv(dev); + dev->irq = irq; + priv->base = addr; + priv->device = &pdev->dev; + priv->hclk = hclk; + priv->cclk = cclk; + priv->can.clock.freq = clk_get_rate(cclk); + + ret = m_can_of_parse_mram(pdev, priv); + if (ret) + goto failed_free_dev; + + platform_set_drvdata(pdev, dev); + SET_NETDEV_DEV(dev, &pdev->dev); + + ret = register_m_can_dev(dev); + if (ret) { + dev_err(&pdev->dev, "registering %s failed (err=%d)\n", + KBUILD_MODNAME, ret); + goto failed_free_dev; + } + + devm_can_led_init(dev); + + dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n", + KBUILD_MODNAME, priv->base, dev->irq); + + return 0; + +failed_free_dev: + free_m_can_dev(dev); + return ret; +} + +static __maybe_unused int m_can_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct m_can_priv *priv = netdev_priv(ndev); + + if (netif_running(ndev)) { + netif_stop_queue(ndev); + netif_device_detach(ndev); + } + + /* TODO: enter low power */ + + priv->can.state = CAN_STATE_SLEEPING; + + return 0; +} + +static __maybe_unused int m_can_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct m_can_priv *priv = netdev_priv(ndev); + + /* TODO: exit low power */ + + priv->can.state = CAN_STATE_ERROR_ACTIVE; + + if (netif_running(ndev)) { + netif_device_attach(ndev); + netif_start_queue(ndev); + } + + return 0; +} + +static void unregister_m_can_dev(struct net_device *dev) +{ + unregister_candev(dev); +} + +static int m_can_plat_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + + unregister_m_can_dev(dev); + platform_set_drvdata(pdev, NULL); + + free_m_can_dev(dev); + + return 0; +} + +static const struct dev_pm_ops m_can_pmops = { + SET_SYSTEM_SLEEP_PM_OPS(m_can_suspend, m_can_resume) +}; + +static const struct of_device_id m_can_of_table[] = { + { .compatible = "bosch,m_can", .data = NULL }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, m_can_of_table); + +static struct platform_driver m_can_plat_driver = { + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = m_can_of_table, + .pm = &m_can_pmops, + }, + .probe = m_can_plat_probe, + .remove = m_can_plat_remove, +}; + +module_platform_driver(m_can_plat_driver); + +MODULE_AUTHOR("Dong Aisheng "); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("CAN bus driver for Bosch M_CAN controller"); diff --git a/drivers/net/can/mscan/Makefile b/drivers/net/can/mscan/Makefile index c9fab17cd8b4d9b37297b1a339faea8f7d820871..58903b45f5fb91c5649eb4c458a41034ab478eef 100644 --- a/drivers/net/can/mscan/Makefile +++ b/drivers/net/can/mscan/Makefile @@ -1,5 +1,3 @@ obj-$(CONFIG_CAN_MPC5XXX) += mscan-mpc5xxx.o mscan-mpc5xxx-objs := mscan.o mpc5xxx_can.o - -ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG diff --git a/drivers/net/can/rcar_can.c b/drivers/net/can/rcar_can.c index 5268d216ecfae7b5a447abbd1327d7a08a632b46..1abe133d159428e0e098d611e8898bdaabb04c71 100644 --- a/drivers/net/can/rcar_can.c +++ b/drivers/net/can/rcar_can.c @@ -20,6 +20,7 @@ #include #include #include +#include #define RCAR_CAN_DRV_NAME "rcar_can" @@ -87,6 +88,7 @@ struct rcar_can_priv { struct napi_struct napi; struct rcar_can_regs __iomem *regs; struct clk *clk; + struct clk *can_clk; u8 tx_dlc[RCAR_CAN_FIFO_DEPTH]; u32 tx_head; u32 tx_tail; @@ -505,14 +507,20 @@ static int rcar_can_open(struct net_device *ndev) err = clk_prepare_enable(priv->clk); if (err) { - netdev_err(ndev, "clk_prepare_enable() failed, error %d\n", + netdev_err(ndev, "failed to enable periperal clock, error %d\n", err); goto out; } + err = clk_prepare_enable(priv->can_clk); + if (err) { + netdev_err(ndev, "failed to enable CAN clock, error %d\n", + err); + goto out_clock; + } err = open_candev(ndev); if (err) { netdev_err(ndev, "open_candev() failed, error %d\n", err); - goto out_clock; + goto out_can_clock; } napi_enable(&priv->napi); err = request_irq(ndev->irq, rcar_can_interrupt, 0, ndev->name, ndev); @@ -527,6 +535,8 @@ static int rcar_can_open(struct net_device *ndev) out_close: napi_disable(&priv->napi); close_candev(ndev); +out_can_clock: + clk_disable_unprepare(priv->can_clk); out_clock: clk_disable_unprepare(priv->clk); out: @@ -565,6 +575,7 @@ static int rcar_can_close(struct net_device *ndev) rcar_can_stop(ndev); free_irq(ndev->irq, ndev); napi_disable(&priv->napi); + clk_disable_unprepare(priv->can_clk); clk_disable_unprepare(priv->clk); close_candev(ndev); can_led_event(ndev, CAN_LED_EVENT_STOP); @@ -715,6 +726,12 @@ static int rcar_can_get_berr_counter(const struct net_device *dev, return 0; } +static const char * const clock_names[] = { + [CLKR_CLKP1] = "clkp1", + [CLKR_CLKP2] = "clkp2", + [CLKR_CLKEXT] = "can_clk", +}; + static int rcar_can_probe(struct platform_device *pdev) { struct rcar_can_platform_data *pdata; @@ -722,13 +739,20 @@ static int rcar_can_probe(struct platform_device *pdev) struct net_device *ndev; struct resource *mem; void __iomem *addr; + u32 clock_select = CLKR_CLKP1; int err = -ENODEV; int irq; - pdata = dev_get_platdata(&pdev->dev); - if (!pdata) { - dev_err(&pdev->dev, "No platform data provided!\n"); - goto fail; + if (pdev->dev.of_node) { + of_property_read_u32(pdev->dev.of_node, + "renesas,can-clock-select", &clock_select); + } else { + pdata = dev_get_platdata(&pdev->dev); + if (!pdata) { + dev_err(&pdev->dev, "No platform data provided!\n"); + goto fail; + } + clock_select = pdata->clock_select; } irq = platform_get_irq(pdev, 0); @@ -753,10 +777,22 @@ static int rcar_can_probe(struct platform_device *pdev) priv = netdev_priv(ndev); - priv->clk = devm_clk_get(&pdev->dev, NULL); + priv->clk = devm_clk_get(&pdev->dev, "clkp1"); if (IS_ERR(priv->clk)) { err = PTR_ERR(priv->clk); - dev_err(&pdev->dev, "cannot get clock: %d\n", err); + dev_err(&pdev->dev, "cannot get peripheral clock: %d\n", err); + goto fail_clk; + } + + if (clock_select >= ARRAY_SIZE(clock_names)) { + err = -EINVAL; + dev_err(&pdev->dev, "invalid CAN clock selected\n"); + goto fail_clk; + } + priv->can_clk = devm_clk_get(&pdev->dev, clock_names[clock_select]); + if (IS_ERR(priv->can_clk)) { + err = PTR_ERR(priv->can_clk); + dev_err(&pdev->dev, "cannot get CAN clock: %d\n", err); goto fail_clk; } @@ -765,8 +801,8 @@ static int rcar_can_probe(struct platform_device *pdev) ndev->flags |= IFF_ECHO; priv->ndev = ndev; priv->regs = addr; - priv->clock_select = pdata->clock_select; - priv->can.clock.freq = clk_get_rate(priv->clk); + priv->clock_select = clock_select; + priv->can.clock.freq = clk_get_rate(priv->can_clk); priv->can.bittiming_const = &rcar_can_bittiming_const; priv->can.do_set_mode = rcar_can_do_set_mode; priv->can.do_get_berr_counter = rcar_can_get_berr_counter; @@ -858,10 +894,20 @@ static int __maybe_unused rcar_can_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(rcar_can_pm_ops, rcar_can_suspend, rcar_can_resume); +static const struct of_device_id rcar_can_of_table[] __maybe_unused = { + { .compatible = "renesas,can-r8a7778" }, + { .compatible = "renesas,can-r8a7779" }, + { .compatible = "renesas,can-r8a7790" }, + { .compatible = "renesas,can-r8a7791" }, + { } +}; +MODULE_DEVICE_TABLE(of, rcar_can_of_table); + static struct platform_driver rcar_can_driver = { .driver = { .name = RCAR_CAN_DRV_NAME, .owner = THIS_MODULE, + .of_match_table = of_match_ptr(rcar_can_of_table), .pm = &rcar_can_pm_ops, }, .probe = rcar_can_probe, diff --git a/drivers/net/can/sja1000/Makefile b/drivers/net/can/sja1000/Makefile index 531d5fcc97e58bded41ac3f3022a2a84a41ba6f0..be11ddd11b87447c8e2b348bdeb7b57cca1b5d25 100644 --- a/drivers/net/can/sja1000/Makefile +++ b/drivers/net/can/sja1000/Makefile @@ -12,5 +12,3 @@ obj-$(CONFIG_CAN_PEAK_PCMCIA) += peak_pcmcia.o obj-$(CONFIG_CAN_PEAK_PCI) += peak_pci.o obj-$(CONFIG_CAN_PLX_PCI) += plx_pci.o obj-$(CONFIG_CAN_TSCAN1) += tscan1.o - -ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG diff --git a/drivers/net/can/softing/Makefile b/drivers/net/can/softing/Makefile index c5e5016c742ee040f5ccf8f202b4896357abe7b4..a23da492dad52b169bcdca64cc4fbb6d729d9979 100644 --- a/drivers/net/can/softing/Makefile +++ b/drivers/net/can/softing/Makefile @@ -2,5 +2,3 @@ softing-y := softing_main.o softing_fw.o obj-$(CONFIG_CAN_SOFTING) += softing.o obj-$(CONFIG_CAN_SOFTING_CS) += softing_cs.o - -ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG diff --git a/drivers/net/can/spi/Makefile b/drivers/net/can/spi/Makefile index 90bcacffbc65c15372c32cb04df36b6781b82024..0e86040cdd8ce9c2ca037ed27c234dd362812a5f 100644 --- a/drivers/net/can/spi/Makefile +++ b/drivers/net/can/spi/Makefile @@ -4,5 +4,3 @@ obj-$(CONFIG_CAN_MCP251X) += mcp251x.o - -ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c index 5df239e68812635e1d209f835e9a62dbe2fdf555..c66d699640a9c1478026da5df023264a7b1ff973 100644 --- a/drivers/net/can/spi/mcp251x.c +++ b/drivers/net/can/spi/mcp251x.c @@ -1107,10 +1107,10 @@ static int mcp251x_can_probe(struct spi_device *spi) * Minimum coherent DMA allocation is PAGE_SIZE, so allocate * that much and share it between Tx and Rx DMA buffers. */ - priv->spi_tx_buf = dma_alloc_coherent(&spi->dev, - PAGE_SIZE, - &priv->spi_tx_dma, - GFP_DMA); + priv->spi_tx_buf = dmam_alloc_coherent(&spi->dev, + PAGE_SIZE, + &priv->spi_tx_dma, + GFP_DMA); if (priv->spi_tx_buf) { priv->spi_rx_buf = (priv->spi_tx_buf + (PAGE_SIZE / 2)); @@ -1156,9 +1156,6 @@ static int mcp251x_can_probe(struct spi_device *spi) return 0; error_probe: - if (mcp251x_enable_dma) - dma_free_coherent(&spi->dev, PAGE_SIZE, - priv->spi_tx_buf, priv->spi_tx_dma); mcp251x_power_enable(priv->power, 0); out_clk: @@ -1178,11 +1175,6 @@ static int mcp251x_can_remove(struct spi_device *spi) unregister_candev(net); - if (mcp251x_enable_dma) { - dma_free_coherent(&spi->dev, PAGE_SIZE, - priv->spi_tx_buf, priv->spi_tx_dma); - } - mcp251x_power_enable(priv->power, 0); if (!IS_ERR(priv->clk)) diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile index 7b9a393b1ac82a1caf4684a704d7121ad16dc3c9..a64cf983fb87af3111c0b8b8ed276943a20067e4 100644 --- a/drivers/net/can/usb/Makefile +++ b/drivers/net/can/usb/Makefile @@ -8,5 +8,3 @@ obj-$(CONFIG_CAN_GS_USB) += gs_usb.o obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/ obj-$(CONFIG_CAN_8DEV_USB) += usb_8dev.o - -ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index b8fe808b7957c735e0f4c8248ad3bcff14b46716..9234d808cbb35e4000a12afd4f70c33ec5a7d3fd 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -36,4 +36,25 @@ config NET_DSA_MV88E6123_61_65 This enables support for the Marvell 88E6123/6161/6165 ethernet switch chips. +config NET_DSA_MV88E6171 + tristate "Marvell 88E6171 ethernet switch chip support" + select NET_DSA + select NET_DSA_MV88E6XXX + select NET_DSA_TAG_EDSA + ---help--- + This enables support for the Marvell 88E6171 ethernet switch + chip. + +config NET_DSA_BCM_SF2 + tristate "Broadcom Starfighter 2 Ethernet switch support" + depends on HAS_IOMEM + select NET_DSA + select NET_DSA_TAG_BRCM + select FIXED_PHY if NET_DSA_BCM_SF2=y + select BCM7XXX_PHY + select MDIO_BCM_UNIMAC + ---help--- + This enables support for the Broadcom Starfighter 2 Ethernet + switch chips. + endmenu diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile index f3bda05536cc55e861ae2996103d930433a74dfc..23a90de9830e87cdec4f3ade882ca41e9e188f48 100644 --- a/drivers/net/dsa/Makefile +++ b/drivers/net/dsa/Makefile @@ -7,3 +7,7 @@ endif ifdef CONFIG_NET_DSA_MV88E6131 mv88e6xxx_drv-y += mv88e6131.o endif +ifdef CONFIG_NET_DSA_MV88E6171 +mv88e6xxx_drv-y += mv88e6171.o +endif +obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm_sf2.o diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c new file mode 100644 index 0000000000000000000000000000000000000000..b9625968daacc0eb89c0f7371a3a4e70242f95ce --- /dev/null +++ b/drivers/net/dsa/bcm_sf2.c @@ -0,0 +1,887 @@ +/* + * Broadcom Starfighter 2 DSA switch driver + * + * Copyright (C) 2014, Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bcm_sf2.h" +#include "bcm_sf2_regs.h" + +/* String, offset, and register size in bytes if different from 4 bytes */ +static const struct bcm_sf2_hw_stats bcm_sf2_mib[] = { + { "TxOctets", 0x000, 8 }, + { "TxDropPkts", 0x020 }, + { "TxQPKTQ0", 0x030 }, + { "TxBroadcastPkts", 0x040 }, + { "TxMulticastPkts", 0x050 }, + { "TxUnicastPKts", 0x060 }, + { "TxCollisions", 0x070 }, + { "TxSingleCollision", 0x080 }, + { "TxMultipleCollision", 0x090 }, + { "TxDeferredCollision", 0x0a0 }, + { "TxLateCollision", 0x0b0 }, + { "TxExcessiveCollision", 0x0c0 }, + { "TxFrameInDisc", 0x0d0 }, + { "TxPausePkts", 0x0e0 }, + { "TxQPKTQ1", 0x0f0 }, + { "TxQPKTQ2", 0x100 }, + { "TxQPKTQ3", 0x110 }, + { "TxQPKTQ4", 0x120 }, + { "TxQPKTQ5", 0x130 }, + { "RxOctets", 0x140, 8 }, + { "RxUndersizePkts", 0x160 }, + { "RxPausePkts", 0x170 }, + { "RxPkts64Octets", 0x180 }, + { "RxPkts65to127Octets", 0x190 }, + { "RxPkts128to255Octets", 0x1a0 }, + { "RxPkts256to511Octets", 0x1b0 }, + { "RxPkts512to1023Octets", 0x1c0 }, + { "RxPkts1024toMaxPktsOctets", 0x1d0 }, + { "RxOversizePkts", 0x1e0 }, + { "RxJabbers", 0x1f0 }, + { "RxAlignmentErrors", 0x200 }, + { "RxFCSErrors", 0x210 }, + { "RxGoodOctets", 0x220, 8 }, + { "RxDropPkts", 0x240 }, + { "RxUnicastPkts", 0x250 }, + { "RxMulticastPkts", 0x260 }, + { "RxBroadcastPkts", 0x270 }, + { "RxSAChanges", 0x280 }, + { "RxFragments", 0x290 }, + { "RxJumboPkt", 0x2a0 }, + { "RxSymblErr", 0x2b0 }, + { "InRangeErrCount", 0x2c0 }, + { "OutRangeErrCount", 0x2d0 }, + { "EEELpiEvent", 0x2e0 }, + { "EEELpiDuration", 0x2f0 }, + { "RxDiscard", 0x300, 8 }, + { "TxQPKTQ6", 0x320 }, + { "TxQPKTQ7", 0x330 }, + { "TxPkts64Octets", 0x340 }, + { "TxPkts65to127Octets", 0x350 }, + { "TxPkts128to255Octets", 0x360 }, + { "TxPkts256to511Ocets", 0x370 }, + { "TxPkts512to1023Ocets", 0x380 }, + { "TxPkts1024toMaxPktOcets", 0x390 }, +}; + +#define BCM_SF2_STATS_SIZE ARRAY_SIZE(bcm_sf2_mib) + +static void bcm_sf2_sw_get_strings(struct dsa_switch *ds, + int port, uint8_t *data) +{ + unsigned int i; + + for (i = 0; i < BCM_SF2_STATS_SIZE; i++) + memcpy(data + i * ETH_GSTRING_LEN, + bcm_sf2_mib[i].string, ETH_GSTRING_LEN); +} + +static void bcm_sf2_sw_get_ethtool_stats(struct dsa_switch *ds, + int port, uint64_t *data) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + const struct bcm_sf2_hw_stats *s; + unsigned int i; + u64 val = 0; + u32 offset; + + mutex_lock(&priv->stats_mutex); + + /* Now fetch the per-port counters */ + for (i = 0; i < BCM_SF2_STATS_SIZE; i++) { + s = &bcm_sf2_mib[i]; + + /* Do a latched 64-bit read if needed */ + offset = s->reg + CORE_P_MIB_OFFSET(port); + if (s->sizeof_stat == 8) + val = core_readq(priv, offset); + else + val = core_readl(priv, offset); + + data[i] = (u64)val; + } + + mutex_unlock(&priv->stats_mutex); +} + +static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds) +{ + return BCM_SF2_STATS_SIZE; +} + +static char *bcm_sf2_sw_probe(struct device *host_dev, int sw_addr) +{ + return "Broadcom Starfighter 2"; +} + +static void bcm_sf2_imp_vlan_setup(struct dsa_switch *ds, int cpu_port) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + unsigned int i; + u32 reg; + + /* Enable the IMP Port to be in the same VLAN as the other ports + * on a per-port basis such that we only have Port i and IMP in + * the same VLAN. + */ + for (i = 0; i < priv->hw_params.num_ports; i++) { + if (!((1 << i) & ds->phys_port_mask)) + continue; + + reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i)); + reg |= (1 << cpu_port); + core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i)); + } +} + +static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + u32 reg, val; + + /* Enable the port memories */ + reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); + reg &= ~P_TXQ_PSM_VDD(port); + core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); + + /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */ + reg = core_readl(priv, CORE_IMP_CTL); + reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN); + reg &= ~(RX_DIS | TX_DIS); + core_writel(priv, reg, CORE_IMP_CTL); + + /* Enable forwarding */ + core_writel(priv, SW_FWDG_EN, CORE_SWMODE); + + /* Enable IMP port in dumb mode */ + reg = core_readl(priv, CORE_SWITCH_CTRL); + reg |= MII_DUMB_FWDG_EN; + core_writel(priv, reg, CORE_SWITCH_CTRL); + + /* Resolve which bit controls the Broadcom tag */ + switch (port) { + case 8: + val = BRCM_HDR_EN_P8; + break; + case 7: + val = BRCM_HDR_EN_P7; + break; + case 5: + val = BRCM_HDR_EN_P5; + break; + default: + val = 0; + break; + } + + /* Enable Broadcom tags for IMP port */ + reg = core_readl(priv, CORE_BRCM_HDR_CTRL); + reg |= val; + core_writel(priv, reg, CORE_BRCM_HDR_CTRL); + + /* Enable reception Broadcom tag for CPU TX (switch RX) to + * allow us to tag outgoing frames + */ + reg = core_readl(priv, CORE_BRCM_HDR_RX_DIS); + reg &= ~(1 << port); + core_writel(priv, reg, CORE_BRCM_HDR_RX_DIS); + + /* Enable transmission of Broadcom tags from the switch (CPU RX) to + * allow delivering frames to the per-port net_devices + */ + reg = core_readl(priv, CORE_BRCM_HDR_TX_DIS); + reg &= ~(1 << port); + core_writel(priv, reg, CORE_BRCM_HDR_TX_DIS); + + /* Force link status for IMP port */ + reg = core_readl(priv, CORE_STS_OVERRIDE_IMP); + reg |= (MII_SW_OR | LINK_STS); + core_writel(priv, reg, CORE_STS_OVERRIDE_IMP); +} + +static void bcm_sf2_eee_enable_set(struct dsa_switch *ds, int port, bool enable) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + u32 reg; + + reg = core_readl(priv, CORE_EEE_EN_CTRL); + if (enable) + reg |= 1 << port; + else + reg &= ~(1 << port); + core_writel(priv, reg, CORE_EEE_EN_CTRL); +} + +static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, + struct phy_device *phy) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + s8 cpu_port = ds->dst[ds->index].cpu_port; + u32 reg; + + /* Clear the memory power down */ + reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); + reg &= ~P_TXQ_PSM_VDD(port); + core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); + + /* Clear the Rx and Tx disable bits and set to no spanning tree */ + core_writel(priv, 0, CORE_G_PCTL_PORT(port)); + + /* Enable port 7 interrupts to get notified */ + if (port == 7) + intrl2_1_mask_clear(priv, P_IRQ_MASK(P7_IRQ_OFF)); + + /* Set this port, and only this one to be in the default VLAN */ + reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port)); + reg &= ~PORT_VLAN_CTRL_MASK; + reg |= (1 << port); + core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(port)); + + bcm_sf2_imp_vlan_setup(ds, cpu_port); + + /* If EEE was enabled, restore it */ + if (priv->port_sts[port].eee.eee_enabled) + bcm_sf2_eee_enable_set(ds, port, true); + + return 0; +} + +static void bcm_sf2_port_disable(struct dsa_switch *ds, int port, + struct phy_device *phy) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + u32 off, reg; + + if (priv->wol_ports_mask & (1 << port)) + return; + + if (port == 7) { + intrl2_1_mask_set(priv, P_IRQ_MASK(P7_IRQ_OFF)); + intrl2_1_writel(priv, P_IRQ_MASK(P7_IRQ_OFF), INTRL2_CPU_CLEAR); + } + + if (dsa_is_cpu_port(ds, port)) + off = CORE_IMP_CTL; + else + off = CORE_G_PCTL_PORT(port); + + reg = core_readl(priv, off); + reg |= RX_DIS | TX_DIS; + core_writel(priv, reg, off); + + /* Power down the port memory */ + reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); + reg |= P_TXQ_PSM_VDD(port); + core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); +} + +/* Returns 0 if EEE was not enabled, or 1 otherwise + */ +static int bcm_sf2_eee_init(struct dsa_switch *ds, int port, + struct phy_device *phy) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct ethtool_eee *p = &priv->port_sts[port].eee; + int ret; + + p->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full); + + ret = phy_init_eee(phy, 0); + if (ret) + return 0; + + bcm_sf2_eee_enable_set(ds, port, true); + + return 1; +} + +static int bcm_sf2_sw_get_eee(struct dsa_switch *ds, int port, + struct ethtool_eee *e) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct ethtool_eee *p = &priv->port_sts[port].eee; + u32 reg; + + reg = core_readl(priv, CORE_EEE_LPI_INDICATE); + e->eee_enabled = p->eee_enabled; + e->eee_active = !!(reg & (1 << port)); + + return 0; +} + +static int bcm_sf2_sw_set_eee(struct dsa_switch *ds, int port, + struct phy_device *phydev, + struct ethtool_eee *e) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct ethtool_eee *p = &priv->port_sts[port].eee; + + p->eee_enabled = e->eee_enabled; + + if (!p->eee_enabled) { + bcm_sf2_eee_enable_set(ds, port, false); + } else { + p->eee_enabled = bcm_sf2_eee_init(ds, port, phydev); + if (!p->eee_enabled) + return -EOPNOTSUPP; + } + + return 0; +} + +static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id) +{ + struct bcm_sf2_priv *priv = dev_id; + + priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) & + ~priv->irq0_mask; + intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); + + return IRQ_HANDLED; +} + +static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id) +{ + struct bcm_sf2_priv *priv = dev_id; + + priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) & + ~priv->irq1_mask; + intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); + + if (priv->irq1_stat & P_LINK_UP_IRQ(P7_IRQ_OFF)) + priv->port_sts[7].link = 1; + if (priv->irq1_stat & P_LINK_DOWN_IRQ(P7_IRQ_OFF)) + priv->port_sts[7].link = 0; + + return IRQ_HANDLED; +} + +static int bcm_sf2_sw_setup(struct dsa_switch *ds) +{ + const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME; + struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct device_node *dn; + void __iomem **base; + unsigned int port; + unsigned int i; + u32 reg, rev; + int ret; + + spin_lock_init(&priv->indir_lock); + mutex_init(&priv->stats_mutex); + + /* All the interesting properties are at the parent device_node + * level + */ + dn = ds->pd->of_node->parent; + + priv->irq0 = irq_of_parse_and_map(dn, 0); + priv->irq1 = irq_of_parse_and_map(dn, 1); + + base = &priv->core; + for (i = 0; i < BCM_SF2_REGS_NUM; i++) { + *base = of_iomap(dn, i); + if (*base == NULL) { + pr_err("unable to find register: %s\n", reg_names[i]); + return -ENODEV; + } + base++; + } + + /* Disable all interrupts and request them */ + intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET); + intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); + intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); + intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET); + intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); + intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); + + ret = request_irq(priv->irq0, bcm_sf2_switch_0_isr, 0, + "switch_0", priv); + if (ret < 0) { + pr_err("failed to request switch_0 IRQ\n"); + goto out_unmap; + } + + ret = request_irq(priv->irq1, bcm_sf2_switch_1_isr, 0, + "switch_1", priv); + if (ret < 0) { + pr_err("failed to request switch_1 IRQ\n"); + goto out_free_irq0; + } + + /* Reset the MIB counters */ + reg = core_readl(priv, CORE_GMNCFGCFG); + reg |= RST_MIB_CNT; + core_writel(priv, reg, CORE_GMNCFGCFG); + reg &= ~RST_MIB_CNT; + core_writel(priv, reg, CORE_GMNCFGCFG); + + /* Get the maximum number of ports for this switch */ + priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1; + if (priv->hw_params.num_ports > DSA_MAX_PORTS) + priv->hw_params.num_ports = DSA_MAX_PORTS; + + /* Assume a single GPHY setup if we can't read that property */ + if (of_property_read_u32(dn, "brcm,num-gphy", + &priv->hw_params.num_gphy)) + priv->hw_params.num_gphy = 1; + + /* Enable all valid ports and disable those unused */ + for (port = 0; port < priv->hw_params.num_ports; port++) { + /* IMP port receives special treatment */ + if ((1 << port) & ds->phys_port_mask) + bcm_sf2_port_setup(ds, port, NULL); + else if (dsa_is_cpu_port(ds, port)) + bcm_sf2_imp_setup(ds, port); + else + bcm_sf2_port_disable(ds, port, NULL); + } + + /* Include the pseudo-PHY address and the broadcast PHY address to + * divert reads towards our workaround + */ + ds->phys_mii_mask |= ((1 << 30) | (1 << 0)); + + rev = reg_readl(priv, REG_SWITCH_REVISION); + priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) & + SWITCH_TOP_REV_MASK; + priv->hw_params.core_rev = (rev & SF2_REV_MASK); + + rev = reg_readl(priv, REG_PHY_REVISION); + priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK; + + pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n", + priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff, + priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff, + priv->core, priv->irq0, priv->irq1); + + return 0; + +out_free_irq0: + free_irq(priv->irq0, priv); +out_unmap: + base = &priv->core; + for (i = 0; i < BCM_SF2_REGS_NUM; i++) { + iounmap(*base); + base++; + } + return ret; +} + +static int bcm_sf2_sw_set_addr(struct dsa_switch *ds, u8 *addr) +{ + return 0; +} + +static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + + /* The BCM7xxx PHY driver expects to find the integrated PHY revision + * in bits 15:8 and the patch level in bits 7:0 which is exactly what + * the REG_PHY_REVISION register layout is. + */ + + return priv->hw_params.gphy_rev; +} + +static int bcm_sf2_sw_indir_rw(struct dsa_switch *ds, int op, int addr, + int regnum, u16 val) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + int ret = 0; + u32 reg; + + reg = reg_readl(priv, REG_SWITCH_CNTRL); + reg |= MDIO_MASTER_SEL; + reg_writel(priv, reg, REG_SWITCH_CNTRL); + + /* Page << 8 | offset */ + reg = 0x70; + reg <<= 2; + core_writel(priv, addr, reg); + + /* Page << 8 | offset */ + reg = 0x80 << 8 | regnum << 1; + reg <<= 2; + + if (op) + ret = core_readl(priv, reg); + else + core_writel(priv, val, reg); + + reg = reg_readl(priv, REG_SWITCH_CNTRL); + reg &= ~MDIO_MASTER_SEL; + reg_writel(priv, reg, REG_SWITCH_CNTRL); + + return ret & 0xffff; +} + +static int bcm_sf2_sw_phy_read(struct dsa_switch *ds, int addr, int regnum) +{ + /* Intercept reads from the MDIO broadcast address or Broadcom + * pseudo-PHY address + */ + switch (addr) { + case 0: + case 30: + return bcm_sf2_sw_indir_rw(ds, 1, addr, regnum, 0); + default: + return 0xffff; + } +} + +static int bcm_sf2_sw_phy_write(struct dsa_switch *ds, int addr, int regnum, + u16 val) +{ + /* Intercept writes to the MDIO broadcast address or Broadcom + * pseudo-PHY address + */ + switch (addr) { + case 0: + case 30: + bcm_sf2_sw_indir_rw(ds, 0, addr, regnum, val); + break; + } + + return 0; +} + +static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port, + struct phy_device *phydev) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + u32 id_mode_dis = 0, port_mode; + const char *str = NULL; + u32 reg; + + switch (phydev->interface) { + case PHY_INTERFACE_MODE_RGMII: + str = "RGMII (no delay)"; + id_mode_dis = 1; + case PHY_INTERFACE_MODE_RGMII_TXID: + if (!str) + str = "RGMII (TX delay)"; + port_mode = EXT_GPHY; + break; + case PHY_INTERFACE_MODE_MII: + str = "MII"; + port_mode = EXT_EPHY; + break; + case PHY_INTERFACE_MODE_REVMII: + str = "Reverse MII"; + port_mode = EXT_REVMII; + break; + default: + /* All other PHYs: internal and MoCA */ + goto force_link; + } + + /* If the link is down, just disable the interface to conserve power */ + if (!phydev->link) { + reg = reg_readl(priv, REG_RGMII_CNTRL_P(port)); + reg &= ~RGMII_MODE_EN; + reg_writel(priv, reg, REG_RGMII_CNTRL_P(port)); + goto force_link; + } + + /* Clear id_mode_dis bit, and the existing port mode, but + * make sure we enable the RGMII block for data to pass + */ + reg = reg_readl(priv, REG_RGMII_CNTRL_P(port)); + reg &= ~ID_MODE_DIS; + reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT); + reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN); + + reg |= port_mode | RGMII_MODE_EN; + if (id_mode_dis) + reg |= ID_MODE_DIS; + + if (phydev->pause) { + if (phydev->asym_pause) + reg |= TX_PAUSE_EN; + reg |= RX_PAUSE_EN; + } + + reg_writel(priv, reg, REG_RGMII_CNTRL_P(port)); + + pr_info("Port %d configured for %s\n", port, str); + +force_link: + /* Force link settings detected from the PHY */ + reg = SW_OVERRIDE; + switch (phydev->speed) { + case SPEED_1000: + reg |= SPDSTS_1000 << SPEED_SHIFT; + break; + case SPEED_100: + reg |= SPDSTS_100 << SPEED_SHIFT; + break; + } + + if (phydev->link) + reg |= LINK_STS; + if (phydev->duplex == DUPLEX_FULL) + reg |= DUPLX_MODE; + + core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port)); +} + +static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, + struct fixed_phy_status *status) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + u32 link, duplex, pause, speed; + u32 reg; + + link = core_readl(priv, CORE_LNKSTS); + duplex = core_readl(priv, CORE_DUPSTS); + pause = core_readl(priv, CORE_PAUSESTS); + speed = core_readl(priv, CORE_SPDSTS); + + speed >>= (port * SPDSTS_SHIFT); + speed &= SPDSTS_MASK; + + status->link = 0; + + /* Port 7 is special as we do not get link status from CORE_LNKSTS, + * which means that we need to force the link at the port override + * level to get the data to flow. We do use what the interrupt handler + * did determine before. + */ + if (port == 7) { + status->link = priv->port_sts[port].link; + reg = core_readl(priv, CORE_STS_OVERRIDE_GMIIP_PORT(7)); + reg |= SW_OVERRIDE; + if (status->link) + reg |= LINK_STS; + else + reg &= ~LINK_STS; + core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(7)); + status->duplex = 1; + } else { + status->link = !!(link & (1 << port)); + status->duplex = !!(duplex & (1 << port)); + } + + switch (speed) { + case SPDSTS_10: + status->speed = SPEED_10; + break; + case SPDSTS_100: + status->speed = SPEED_100; + break; + case SPDSTS_1000: + status->speed = SPEED_1000; + break; + } + + if ((pause & (1 << port)) && + (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) { + status->asym_pause = 1; + status->pause = 1; + } + + if (pause & (1 << port)) + status->pause = 1; +} + +static int bcm_sf2_sw_suspend(struct dsa_switch *ds) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + unsigned int port; + + intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET); + intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); + intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); + intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET); + intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); + intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); + + /* Disable all ports physically present including the IMP + * port, the other ones have already been disabled during + * bcm_sf2_sw_setup + */ + for (port = 0; port < DSA_MAX_PORTS; port++) { + if ((1 << port) & ds->phys_port_mask || + dsa_is_cpu_port(ds, port)) + bcm_sf2_port_disable(ds, port, NULL); + } + + return 0; +} + +static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv) +{ + unsigned int timeout = 1000; + u32 reg; + + reg = core_readl(priv, CORE_WATCHDOG_CTRL); + reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET; + core_writel(priv, reg, CORE_WATCHDOG_CTRL); + + do { + reg = core_readl(priv, CORE_WATCHDOG_CTRL); + if (!(reg & SOFTWARE_RESET)) + break; + + usleep_range(1000, 2000); + } while (timeout-- > 0); + + if (timeout == 0) + return -ETIMEDOUT; + + return 0; +} + +static int bcm_sf2_sw_resume(struct dsa_switch *ds) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + unsigned int port; + u32 reg; + int ret; + + ret = bcm_sf2_sw_rst(priv); + if (ret) { + pr_err("%s: failed to software reset switch\n", __func__); + return ret; + } + + /* Reinitialize the single GPHY */ + if (priv->hw_params.num_gphy == 1) { + reg = reg_readl(priv, REG_SPHY_CNTRL); + reg |= PHY_RESET; + reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS); + reg_writel(priv, reg, REG_SPHY_CNTRL); + udelay(21); + reg = reg_readl(priv, REG_SPHY_CNTRL); + reg &= ~PHY_RESET; + reg_writel(priv, reg, REG_SPHY_CNTRL); + } + + for (port = 0; port < DSA_MAX_PORTS; port++) { + if ((1 << port) & ds->phys_port_mask) + bcm_sf2_port_setup(ds, port, NULL); + else if (dsa_is_cpu_port(ds, port)) + bcm_sf2_imp_setup(ds, port); + } + + return 0; +} + +static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port, + struct ethtool_wolinfo *wol) +{ + struct net_device *p = ds->dst[ds->index].master_netdev; + struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct ethtool_wolinfo pwol; + + /* Get the parent device WoL settings */ + p->ethtool_ops->get_wol(p, &pwol); + + /* Advertise the parent device supported settings */ + wol->supported = pwol.supported; + memset(&wol->sopass, 0, sizeof(wol->sopass)); + + if (pwol.wolopts & WAKE_MAGICSECURE) + memcpy(&wol->sopass, pwol.sopass, sizeof(wol->sopass)); + + if (priv->wol_ports_mask & (1 << port)) + wol->wolopts = pwol.wolopts; + else + wol->wolopts = 0; +} + +static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port, + struct ethtool_wolinfo *wol) +{ + struct net_device *p = ds->dst[ds->index].master_netdev; + struct bcm_sf2_priv *priv = ds_to_priv(ds); + s8 cpu_port = ds->dst[ds->index].cpu_port; + struct ethtool_wolinfo pwol; + + p->ethtool_ops->get_wol(p, &pwol); + if (wol->wolopts & ~pwol.supported) + return -EINVAL; + + if (wol->wolopts) + priv->wol_ports_mask |= (1 << port); + else + priv->wol_ports_mask &= ~(1 << port); + + /* If we have at least one port enabled, make sure the CPU port + * is also enabled. If the CPU port is the last one enabled, we disable + * it since this configuration does not make sense. + */ + if (priv->wol_ports_mask && priv->wol_ports_mask != (1 << cpu_port)) + priv->wol_ports_mask |= (1 << cpu_port); + else + priv->wol_ports_mask &= ~(1 << cpu_port); + + return p->ethtool_ops->set_wol(p, wol); +} + +static struct dsa_switch_driver bcm_sf2_switch_driver = { + .tag_protocol = DSA_TAG_PROTO_BRCM, + .priv_size = sizeof(struct bcm_sf2_priv), + .probe = bcm_sf2_sw_probe, + .setup = bcm_sf2_sw_setup, + .set_addr = bcm_sf2_sw_set_addr, + .get_phy_flags = bcm_sf2_sw_get_phy_flags, + .phy_read = bcm_sf2_sw_phy_read, + .phy_write = bcm_sf2_sw_phy_write, + .get_strings = bcm_sf2_sw_get_strings, + .get_ethtool_stats = bcm_sf2_sw_get_ethtool_stats, + .get_sset_count = bcm_sf2_sw_get_sset_count, + .adjust_link = bcm_sf2_sw_adjust_link, + .fixed_link_update = bcm_sf2_sw_fixed_link_update, + .suspend = bcm_sf2_sw_suspend, + .resume = bcm_sf2_sw_resume, + .get_wol = bcm_sf2_sw_get_wol, + .set_wol = bcm_sf2_sw_set_wol, + .port_enable = bcm_sf2_port_setup, + .port_disable = bcm_sf2_port_disable, + .get_eee = bcm_sf2_sw_get_eee, + .set_eee = bcm_sf2_sw_set_eee, +}; + +static int __init bcm_sf2_init(void) +{ + register_switch_driver(&bcm_sf2_switch_driver); + + return 0; +} +module_init(bcm_sf2_init); + +static void __exit bcm_sf2_exit(void) +{ + unregister_switch_driver(&bcm_sf2_switch_driver); +} +module_exit(bcm_sf2_exit); + +MODULE_AUTHOR("Broadcom Corporation"); +MODULE_DESCRIPTION("Driver for Broadcom Starfighter 2 ethernet switch chip"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:brcm-sf2"); diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h new file mode 100644 index 0000000000000000000000000000000000000000..ee9f650d50264bb74771783b65e860b2aabb3d46 --- /dev/null +++ b/drivers/net/dsa/bcm_sf2.h @@ -0,0 +1,147 @@ +/* + * Broadcom Starfighter2 private context + * + * Copyright (C) 2014, Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __BCM_SF2_H +#define __BCM_SF2_H + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "bcm_sf2_regs.h" + +struct bcm_sf2_hw_params { + u16 top_rev; + u16 core_rev; + u16 gphy_rev; + u32 num_gphy; + u8 num_acb_queue; + u8 num_rgmii; + u8 num_ports; + u8 fcb_pause_override:1; + u8 acb_packets_inflight:1; +}; + +#define BCM_SF2_REGS_NAME {\ + "core", "reg", "intrl2_0", "intrl2_1", "fcb", "acb" \ +} + +#define BCM_SF2_REGS_NUM 6 + +struct bcm_sf2_port_status { + unsigned int link; + + struct ethtool_eee eee; +}; + +struct bcm_sf2_priv { + /* Base registers, keep those in order with BCM_SF2_REGS_NAME */ + void __iomem *core; + void __iomem *reg; + void __iomem *intrl2_0; + void __iomem *intrl2_1; + void __iomem *fcb; + void __iomem *acb; + + /* spinlock protecting access to the indirect registers */ + spinlock_t indir_lock; + + int irq0; + int irq1; + u32 irq0_stat; + u32 irq0_mask; + u32 irq1_stat; + u32 irq1_mask; + + /* Mutex protecting access to the MIB counters */ + struct mutex stats_mutex; + + struct bcm_sf2_hw_params hw_params; + + struct bcm_sf2_port_status port_sts[DSA_MAX_PORTS]; + + /* Mask of ports enabled for Wake-on-LAN */ + u32 wol_ports_mask; +}; + +struct bcm_sf2_hw_stats { + const char *string; + u16 reg; + u8 sizeof_stat; +}; + +#define SF2_IO_MACRO(name) \ +static inline u32 name##_readl(struct bcm_sf2_priv *priv, u32 off) \ +{ \ + return __raw_readl(priv->name + off); \ +} \ +static inline void name##_writel(struct bcm_sf2_priv *priv, \ + u32 val, u32 off) \ +{ \ + __raw_writel(val, priv->name + off); \ +} \ + +/* Accesses to 64-bits register requires us to latch the hi/lo pairs + * using the REG_DIR_DATA_{READ,WRITE} ancillary registers. The 'indir_lock' + * spinlock is automatically grabbed and released to provide relative + * atomiticy with latched reads/writes. + */ +#define SF2_IO64_MACRO(name) \ +static inline u64 name##_readq(struct bcm_sf2_priv *priv, u32 off) \ +{ \ + u32 indir, dir; \ + spin_lock(&priv->indir_lock); \ + indir = reg_readl(priv, REG_DIR_DATA_READ); \ + dir = __raw_readl(priv->name + off); \ + spin_unlock(&priv->indir_lock); \ + return (u64)indir << 32 | dir; \ +} \ +static inline void name##_writeq(struct bcm_sf2_priv *priv, u32 off, \ + u64 val) \ +{ \ + spin_lock(&priv->indir_lock); \ + reg_writel(priv, upper_32_bits(val), REG_DIR_DATA_WRITE); \ + __raw_writel(lower_32_bits(val), priv->name + off); \ + spin_unlock(&priv->indir_lock); \ +} + +#define SWITCH_INTR_L2(which) \ +static inline void intrl2_##which##_mask_clear(struct bcm_sf2_priv *priv, \ + u32 mask) \ +{ \ + intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \ + priv->irq##which##_mask &= ~(mask); \ +} \ +static inline void intrl2_##which##_mask_set(struct bcm_sf2_priv *priv, \ + u32 mask) \ +{ \ + intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \ + priv->irq##which##_mask |= (mask); \ +} \ + +SF2_IO_MACRO(core); +SF2_IO_MACRO(reg); +SF2_IO64_MACRO(core); +SF2_IO_MACRO(intrl2_0); +SF2_IO_MACRO(intrl2_1); +SF2_IO_MACRO(fcb); +SF2_IO_MACRO(acb); + +SWITCH_INTR_L2(0); +SWITCH_INTR_L2(1); + +#endif /* __BCM_SF2_H */ diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h new file mode 100644 index 0000000000000000000000000000000000000000..1bb49cb699ab02a5af7ef1de66ab6bbed0aa47ae --- /dev/null +++ b/drivers/net/dsa/bcm_sf2_regs.h @@ -0,0 +1,231 @@ +/* + * Broadcom Starfighter 2 switch register defines + * + * Copyright (C) 2014, Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef __BCM_SF2_REGS_H +#define __BCM_SF2_REGS_H + +/* Register set relative to 'REG' */ +#define REG_SWITCH_CNTRL 0x00 +#define MDIO_MASTER_SEL (1 << 0) + +#define REG_SWITCH_STATUS 0x04 +#define REG_DIR_DATA_WRITE 0x08 +#define REG_DIR_DATA_READ 0x0C + +#define REG_SWITCH_REVISION 0x18 +#define SF2_REV_MASK 0xffff +#define SWITCH_TOP_REV_SHIFT 16 +#define SWITCH_TOP_REV_MASK 0xffff + +#define REG_PHY_REVISION 0x1C +#define PHY_REVISION_MASK 0xffff + +#define REG_SPHY_CNTRL 0x2C +#define IDDQ_BIAS (1 << 0) +#define EXT_PWR_DOWN (1 << 1) +#define FORCE_DLL_EN (1 << 2) +#define IDDQ_GLOBAL_PWR (1 << 3) +#define CK25_DIS (1 << 4) +#define PHY_RESET (1 << 5) +#define PHY_PHYAD_SHIFT 8 +#define PHY_PHYAD_MASK 0x1F + +#define REG_RGMII_0_BASE 0x34 +#define REG_RGMII_CNTRL 0x00 +#define REG_RGMII_IB_STATUS 0x04 +#define REG_RGMII_RX_CLOCK_DELAY_CNTRL 0x08 +#define REG_RGMII_CNTRL_SIZE 0x0C +#define REG_RGMII_CNTRL_P(x) (REG_RGMII_0_BASE + \ + ((x) * REG_RGMII_CNTRL_SIZE)) +/* Relative to REG_RGMII_CNTRL */ +#define RGMII_MODE_EN (1 << 0) +#define ID_MODE_DIS (1 << 1) +#define PORT_MODE_SHIFT 2 +#define INT_EPHY (0 << PORT_MODE_SHIFT) +#define INT_GPHY (1 << PORT_MODE_SHIFT) +#define EXT_EPHY (2 << PORT_MODE_SHIFT) +#define EXT_GPHY (3 << PORT_MODE_SHIFT) +#define EXT_REVMII (4 << PORT_MODE_SHIFT) +#define PORT_MODE_MASK 0x7 +#define RVMII_REF_SEL (1 << 5) +#define RX_PAUSE_EN (1 << 6) +#define TX_PAUSE_EN (1 << 7) +#define TX_CLK_STOP_EN (1 << 8) +#define LPI_COUNT_SHIFT 9 +#define LPI_COUNT_MASK 0x3F + +/* Register set relative to 'INTRL2_0' and 'INTRL2_1' */ +#define INTRL2_CPU_STATUS 0x00 +#define INTRL2_CPU_SET 0x04 +#define INTRL2_CPU_CLEAR 0x08 +#define INTRL2_CPU_MASK_STATUS 0x0c +#define INTRL2_CPU_MASK_SET 0x10 +#define INTRL2_CPU_MASK_CLEAR 0x14 + +/* Shared INTRL2_0 and INTRL2_ interrupt sources macros */ +#define P_LINK_UP_IRQ(x) (1 << (0 + (x))) +#define P_LINK_DOWN_IRQ(x) (1 << (1 + (x))) +#define P_ENERGY_ON_IRQ(x) (1 << (2 + (x))) +#define P_ENERGY_OFF_IRQ(x) (1 << (3 + (x))) +#define P_GPHY_IRQ(x) (1 << (4 + (x))) +#define P_NUM_IRQ 5 +#define P_IRQ_MASK(x) (P_LINK_UP_IRQ((x)) | \ + P_LINK_DOWN_IRQ((x)) | \ + P_ENERGY_ON_IRQ((x)) | \ + P_ENERGY_OFF_IRQ((x)) | \ + P_GPHY_IRQ((x))) + +/* INTRL2_0 interrupt sources */ +#define P0_IRQ_OFF 0 +#define MEM_DOUBLE_IRQ (1 << 5) +#define EEE_LPI_IRQ (1 << 6) +#define P5_CPU_WAKE_IRQ (1 << 7) +#define P8_CPU_WAKE_IRQ (1 << 8) +#define P7_CPU_WAKE_IRQ (1 << 9) +#define IEEE1588_IRQ (1 << 10) +#define MDIO_ERR_IRQ (1 << 11) +#define MDIO_DONE_IRQ (1 << 12) +#define GISB_ERR_IRQ (1 << 13) +#define UBUS_ERR_IRQ (1 << 14) +#define FAILOVER_ON_IRQ (1 << 15) +#define FAILOVER_OFF_IRQ (1 << 16) +#define TCAM_SOFT_ERR_IRQ (1 << 17) + +/* INTRL2_1 interrupt sources */ +#define P7_IRQ_OFF 0 +#define P_IRQ_OFF(x) ((6 - (x)) * P_NUM_IRQ) + +/* Register set relative to 'CORE' */ +#define CORE_G_PCTL_PORT0 0x00000 +#define CORE_G_PCTL_PORT(x) (CORE_G_PCTL_PORT0 + (x * 0x4)) +#define CORE_IMP_CTL 0x00020 +#define RX_DIS (1 << 0) +#define TX_DIS (1 << 1) +#define RX_BCST_EN (1 << 2) +#define RX_MCST_EN (1 << 3) +#define RX_UCST_EN (1 << 4) +#define G_MISTP_STATE_SHIFT 5 +#define G_MISTP_NO_STP (0 << G_MISTP_STATE_SHIFT) +#define G_MISTP_DIS_STATE (1 << G_MISTP_STATE_SHIFT) +#define G_MISTP_BLOCK_STATE (2 << G_MISTP_STATE_SHIFT) +#define G_MISTP_LISTEN_STATE (3 << G_MISTP_STATE_SHIFT) +#define G_MISTP_LEARN_STATE (4 << G_MISTP_STATE_SHIFT) +#define G_MISTP_FWD_STATE (5 << G_MISTP_STATE_SHIFT) +#define G_MISTP_STATE_MASK 0x7 + +#define CORE_SWMODE 0x0002c +#define SW_FWDG_MODE (1 << 0) +#define SW_FWDG_EN (1 << 1) +#define RTRY_LMT_DIS (1 << 2) + +#define CORE_STS_OVERRIDE_IMP 0x00038 +#define GMII_SPEED_UP_2G (1 << 6) +#define MII_SW_OR (1 << 7) + +#define CORE_NEW_CTRL 0x00084 +#define IP_MC (1 << 0) +#define OUTRANGEERR_DISCARD (1 << 1) +#define INRANGEERR_DISCARD (1 << 2) +#define CABLE_DIAG_LEN (1 << 3) +#define OVERRIDE_AUTO_PD_WAR (1 << 4) +#define EN_AUTO_PD_WAR (1 << 5) +#define UC_FWD_EN (1 << 6) +#define MC_FWD_EN (1 << 7) + +#define CORE_SWITCH_CTRL 0x00088 +#define MII_DUMB_FWDG_EN (1 << 6) + +#define CORE_SFT_LRN_CTRL 0x000f8 +#define SW_LEARN_CNTL(x) (1 << (x)) + +#define CORE_STS_OVERRIDE_GMIIP_PORT(x) (0x160 + (x) * 4) +#define LINK_STS (1 << 0) +#define DUPLX_MODE (1 << 1) +#define SPEED_SHIFT 2 +#define SPEED_MASK 0x3 +#define RXFLOW_CNTL (1 << 4) +#define TXFLOW_CNTL (1 << 5) +#define SW_OVERRIDE (1 << 6) + +#define CORE_WATCHDOG_CTRL 0x001e4 +#define SOFTWARE_RESET (1 << 7) +#define EN_CHIP_RST (1 << 6) +#define EN_SW_RESET (1 << 4) + +#define CORE_LNKSTS 0x00400 +#define LNK_STS_MASK 0x1ff + +#define CORE_SPDSTS 0x00410 +#define SPDSTS_10 0 +#define SPDSTS_100 1 +#define SPDSTS_1000 2 +#define SPDSTS_SHIFT 2 +#define SPDSTS_MASK 0x3 + +#define CORE_DUPSTS 0x00420 +#define CORE_DUPSTS_MASK 0x1ff + +#define CORE_PAUSESTS 0x00428 +#define PAUSESTS_TX_PAUSE_SHIFT 9 + +#define CORE_GMNCFGCFG 0x0800 +#define RST_MIB_CNT (1 << 0) +#define RXBPDU_EN (1 << 1) + +#define CORE_IMP0_PRT_ID 0x0804 + +#define CORE_BRCM_HDR_CTRL 0x0080c +#define BRCM_HDR_EN_P8 (1 << 0) +#define BRCM_HDR_EN_P5 (1 << 1) +#define BRCM_HDR_EN_P7 (1 << 2) + +#define CORE_BRCM_HDR_CTRL2 0x0828 + +#define CORE_HL_PRTC_CTRL 0x0940 +#define ARP_EN (1 << 0) +#define RARP_EN (1 << 1) +#define DHCP_EN (1 << 2) +#define ICMPV4_EN (1 << 3) +#define ICMPV6_EN (1 << 4) +#define ICMPV6_FWD_MODE (1 << 5) +#define IGMP_DIP_EN (1 << 8) +#define IGMP_RPTLVE_EN (1 << 9) +#define IGMP_RTPLVE_FWD_MODE (1 << 10) +#define IGMP_QRY_EN (1 << 11) +#define IGMP_QRY_FWD_MODE (1 << 12) +#define IGMP_UKN_EN (1 << 13) +#define IGMP_UKN_FWD_MODE (1 << 14) +#define MLD_RPTDONE_EN (1 << 15) +#define MLD_RPTDONE_FWD_MODE (1 << 16) +#define MLD_QRY_EN (1 << 17) +#define MLD_QRY_FWD_MODE (1 << 18) + +#define CORE_RST_MIB_CNT_EN 0x0950 + +#define CORE_BRCM_HDR_RX_DIS 0x0980 +#define CORE_BRCM_HDR_TX_DIS 0x0988 + +#define CORE_MEM_PSM_VDD_CTRL 0x2380 +#define P_TXQ_PSM_VDD_SHIFT 2 +#define P_TXQ_PSM_VDD_MASK 0x3 +#define P_TXQ_PSM_VDD(x) (P_TXQ_PSM_VDD_MASK << \ + ((x) * P_TXQ_PSM_VDD_SHIFT)) + +#define CORE_P0_MIB_OFFSET 0x8000 +#define P_MIB_SIZE 0x400 +#define CORE_P_MIB_OFFSET(x) (CORE_P0_MIB_OFFSET + (x) * P_MIB_SIZE) + +#define CORE_PORT_VLAN_CTL_PORT(x) (0xc400 + ((x) * 0x8)) +#define PORT_VLAN_CTRL_MASK 0x1ff + +#define CORE_EEE_EN_CTRL 0x24800 +#define CORE_EEE_LPI_INDICATE 0x24810 + +#endif /* __BCM_SF2_REGS_H */ diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c index 7a54ec04b4181210961f2819c7fca1e260cb7661..776e965dc9f45c886b5c23456a801d2a26bbc2cd 100644 --- a/drivers/net/dsa/mv88e6060.c +++ b/drivers/net/dsa/mv88e6060.c @@ -21,7 +21,8 @@ static int reg_read(struct dsa_switch *ds, int addr, int reg) { - return mdiobus_read(ds->master_mii_bus, ds->pd->sw_addr + addr, reg); + return mdiobus_read(to_mii_bus(ds->master_dev), + ds->pd->sw_addr + addr, reg); } #define REG_READ(addr, reg) \ @@ -37,8 +38,8 @@ static int reg_read(struct dsa_switch *ds, int addr, int reg) static int reg_write(struct dsa_switch *ds, int addr, int reg, u16 val) { - return mdiobus_write(ds->master_mii_bus, ds->pd->sw_addr + addr, - reg, val); + return mdiobus_write(to_mii_bus(ds->master_dev), + ds->pd->sw_addr + addr, reg, val); } #define REG_WRITE(addr, reg, val) \ @@ -50,10 +51,14 @@ static int reg_write(struct dsa_switch *ds, int addr, int reg, u16 val) return __ret; \ }) -static char *mv88e6060_probe(struct mii_bus *bus, int sw_addr) +static char *mv88e6060_probe(struct device *host_dev, int sw_addr) { + struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev); int ret; + if (bus == NULL) + return NULL; + ret = mdiobus_read(bus, sw_addr + REG_PORT(0), 0x03); if (ret >= 0) { ret &= 0xfff0; @@ -258,7 +263,7 @@ static void mv88e6060_poll_link(struct dsa_switch *ds) } static struct dsa_switch_driver mv88e6060_switch_driver = { - .tag_protocol = htons(ETH_P_TRAILER), + .tag_protocol = DSA_TAG_PROTO_TRAILER, .probe = mv88e6060_probe, .setup = mv88e6060_setup, .set_addr = mv88e6060_set_addr, diff --git a/drivers/net/dsa/mv88e6123_61_65.c b/drivers/net/dsa/mv88e6123_61_65.c index 69c42513dd724bb06ef40eaf4b5e4bbea800936d..a332c53ff955906eaeec11edd81350b247561fed 100644 --- a/drivers/net/dsa/mv88e6123_61_65.c +++ b/drivers/net/dsa/mv88e6123_61_65.c @@ -17,10 +17,14 @@ #include #include "mv88e6xxx.h" -static char *mv88e6123_61_65_probe(struct mii_bus *bus, int sw_addr) +static char *mv88e6123_61_65_probe(struct device *host_dev, int sw_addr) { + struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev); int ret; + if (bus == NULL) + return NULL; + ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03); if (ret >= 0) { if (ret == 0x1212) @@ -207,7 +211,7 @@ static int mv88e6123_61_65_setup_port(struct dsa_switch *ds, int p) */ val = 0x0433; if (dsa_is_cpu_port(ds, p)) { - if (ds->dst->tag_protocol == htons(ETH_P_EDSA)) + if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA) val |= 0x3300; else val |= 0x0100; @@ -391,7 +395,7 @@ static int mv88e6123_61_65_get_sset_count(struct dsa_switch *ds) } struct dsa_switch_driver mv88e6123_61_65_switch_driver = { - .tag_protocol = cpu_to_be16(ETH_P_EDSA), + .tag_protocol = DSA_TAG_PROTO_EDSA, .priv_size = sizeof(struct mv88e6xxx_priv_state), .probe = mv88e6123_61_65_probe, .setup = mv88e6123_61_65_setup, diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c index 953bc6a49e594471342270e7281a8e263f0086de..244c735014fa47421b25091efd9b70077e1089e0 100644 --- a/drivers/net/dsa/mv88e6131.c +++ b/drivers/net/dsa/mv88e6131.c @@ -22,10 +22,14 @@ #define ID_6095 0x0950 #define ID_6131 0x1060 -static char *mv88e6131_probe(struct mii_bus *bus, int sw_addr) +static char *mv88e6131_probe(struct device *host_dev, int sw_addr) { + struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev); int ret; + if (bus == NULL) + return NULL; + ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03); if (ret >= 0) { ret &= 0xfff0; @@ -379,7 +383,7 @@ static int mv88e6131_get_sset_count(struct dsa_switch *ds) } struct dsa_switch_driver mv88e6131_switch_driver = { - .tag_protocol = cpu_to_be16(ETH_P_DSA), + .tag_protocol = DSA_TAG_PROTO_DSA, .priv_size = sizeof(struct mv88e6xxx_priv_state), .probe = mv88e6131_probe, .setup = mv88e6131_setup, diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c new file mode 100644 index 0000000000000000000000000000000000000000..6365e30138af7e613a9ad2dc4b27a79f583142a9 --- /dev/null +++ b/drivers/net/dsa/mv88e6171.c @@ -0,0 +1,411 @@ +/* net/dsa/mv88e6171.c - Marvell 88e6171 switch chip support + * Copyright (c) 2008-2009 Marvell Semiconductor + * Copyright (c) 2014 Claudio Leite + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "mv88e6xxx.h" + +static char *mv88e6171_probe(struct device *host_dev, int sw_addr) +{ + struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev); + int ret; + + if (bus == NULL) + return NULL; + + ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03); + if (ret >= 0) { + if ((ret & 0xfff0) == 0x1710) + return "Marvell 88E6171"; + } + + return NULL; +} + +static int mv88e6171_switch_reset(struct dsa_switch *ds) +{ + int i; + int ret; + unsigned long timeout; + + /* Set all ports to the disabled state. */ + for (i = 0; i < 8; i++) { + ret = REG_READ(REG_PORT(i), 0x04); + REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc); + } + + /* Wait for transmit queues to drain. */ + usleep_range(2000, 4000); + + /* Reset the switch. */ + REG_WRITE(REG_GLOBAL, 0x04, 0xc400); + + /* Wait up to one second for reset to complete. */ + timeout = jiffies + 1 * HZ; + while (time_before(jiffies, timeout)) { + ret = REG_READ(REG_GLOBAL, 0x00); + if ((ret & 0xc800) == 0xc800) + break; + + usleep_range(1000, 2000); + } + if (time_after(jiffies, timeout)) + return -ETIMEDOUT; + + /* Enable ports not under DSA, e.g. WAN port */ + for (i = 0; i < 8; i++) { + if (dsa_is_cpu_port(ds, i) || ds->phys_port_mask & (1 << i)) + continue; + + ret = REG_READ(REG_PORT(i), 0x04); + REG_WRITE(REG_PORT(i), 0x04, ret | 0x03); + } + + return 0; +} + +static int mv88e6171_setup_global(struct dsa_switch *ds) +{ + int ret; + int i; + + /* Disable the PHY polling unit (since there won't be any + * external PHYs to poll), don't discard packets with + * excessive collisions, and mask all interrupt sources. + */ + REG_WRITE(REG_GLOBAL, 0x04, 0x0000); + + /* Set the default address aging time to 5 minutes, and + * enable address learn messages to be sent to all message + * ports. + */ + REG_WRITE(REG_GLOBAL, 0x0a, 0x0148); + + /* Configure the priority mapping registers. */ + ret = mv88e6xxx_config_prio(ds); + if (ret < 0) + return ret; + + /* Configure the upstream port, and configure the upstream + * port as the port to which ingress and egress monitor frames + * are to be sent. + */ + if (REG_READ(REG_PORT(0), 0x03) == 0x1710) + REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1111)); + else + REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1110)); + + /* Disable remote management for now, and set the switch's + * DSA device number. + */ + REG_WRITE(REG_GLOBAL, 0x1c, ds->index & 0x1f); + + /* Send all frames with destination addresses matching + * 01:80:c2:00:00:2x to the CPU port. + */ + REG_WRITE(REG_GLOBAL2, 0x02, 0xffff); + + /* Send all frames with destination addresses matching + * 01:80:c2:00:00:0x to the CPU port. + */ + REG_WRITE(REG_GLOBAL2, 0x03, 0xffff); + + /* Disable the loopback filter, disable flow control + * messages, disable flood broadcast override, disable + * removing of provider tags, disable ATU age violation + * interrupts, disable tag flow control, force flow + * control priority to the highest, and send all special + * multicast frames to the CPU at the highest priority. + */ + REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff); + + /* Program the DSA routing table. */ + for (i = 0; i < 32; i++) { + int nexthop; + + nexthop = 0x1f; + if (i != ds->index && i < ds->dst->pd->nr_chips) + nexthop = ds->pd->rtable[i] & 0x1f; + + REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop); + } + + /* Clear all trunk masks. */ + for (i = 0; i < 8; i++) + REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0xff); + + /* Clear all trunk mappings. */ + for (i = 0; i < 16; i++) + REG_WRITE(REG_GLOBAL2, 0x08, 0x8000 | (i << 11)); + + /* Disable ingress rate limiting by resetting all ingress + * rate limit registers to their initial state. + */ + for (i = 0; i < 6; i++) + REG_WRITE(REG_GLOBAL2, 0x09, 0x9000 | (i << 8)); + + /* Initialise cross-chip port VLAN table to reset defaults. */ + REG_WRITE(REG_GLOBAL2, 0x0b, 0x9000); + + /* Clear the priority override table. */ + for (i = 0; i < 16; i++) + REG_WRITE(REG_GLOBAL2, 0x0f, 0x8000 | (i << 8)); + + /* @@@ initialise AVB (22/23) watchdog (27) sdet (29) registers */ + + return 0; +} + +static int mv88e6171_setup_port(struct dsa_switch *ds, int p) +{ + int addr = REG_PORT(p); + u16 val; + + /* MAC Forcing register: don't force link, speed, duplex + * or flow control state to any particular values on physical + * ports, but force the CPU port and all DSA ports to 1000 Mb/s + * full duplex. + */ + val = REG_READ(addr, 0x01); + if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p)) + REG_WRITE(addr, 0x01, val | 0x003e); + else + REG_WRITE(addr, 0x01, val | 0x0003); + + /* Do not limit the period of time that this port can be + * paused for by the remote end or the period of time that + * this port can pause the remote end. + */ + REG_WRITE(addr, 0x02, 0x0000); + + /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock, + * disable Header mode, enable IGMP/MLD snooping, disable VLAN + * tunneling, determine priority by looking at 802.1p and IP + * priority fields (IP prio has precedence), and set STP state + * to Forwarding. + * + * If this is the CPU link, use DSA or EDSA tagging depending + * on which tagging mode was configured. + * + * If this is a link to another switch, use DSA tagging mode. + * + * If this is the upstream port for this switch, enable + * forwarding of unknown unicasts and multicasts. + */ + val = 0x0433; + if (dsa_is_cpu_port(ds, p)) { + if (ds->dst->tag_protocol == htons(ETH_P_EDSA)) + val |= 0x3300; + else + val |= 0x0100; + } + if (ds->dsa_port_mask & (1 << p)) + val |= 0x0100; + if (p == dsa_upstream_port(ds)) + val |= 0x000c; + REG_WRITE(addr, 0x04, val); + + /* Port Control 1: disable trunking. Also, if this is the + * CPU port, enable learn messages to be sent to this port. + */ + REG_WRITE(addr, 0x05, dsa_is_cpu_port(ds, p) ? 0x8000 : 0x0000); + + /* Port based VLAN map: give each port its own address + * database, allow the CPU port to talk to each of the 'real' + * ports, and allow each of the 'real' ports to only talk to + * the upstream port. + */ + val = (p & 0xf) << 12; + if (dsa_is_cpu_port(ds, p)) + val |= ds->phys_port_mask; + else + val |= 1 << dsa_upstream_port(ds); + REG_WRITE(addr, 0x06, val); + + /* Default VLAN ID and priority: don't set a default VLAN + * ID, and set the default packet priority to zero. + */ + REG_WRITE(addr, 0x07, 0x0000); + + /* Port Control 2: don't force a good FCS, set the maximum + * frame size to 10240 bytes, don't let the switch add or + * strip 802.1q tags, don't discard tagged or untagged frames + * on this port, do a destination address lookup on all + * received packets as usual, disable ARP mirroring and don't + * send a copy of all transmitted/received frames on this port + * to the CPU. + */ + REG_WRITE(addr, 0x08, 0x2080); + + /* Egress rate control: disable egress rate control. */ + REG_WRITE(addr, 0x09, 0x0001); + + /* Egress rate control 2: disable egress rate control. */ + REG_WRITE(addr, 0x0a, 0x0000); + + /* Port Association Vector: when learning source addresses + * of packets, add the address to the address database using + * a port bitmap that has only the bit for this port set and + * the other bits clear. + */ + REG_WRITE(addr, 0x0b, 1 << p); + + /* Port ATU control: disable limiting the number of address + * database entries that this port is allowed to use. + */ + REG_WRITE(addr, 0x0c, 0x0000); + + /* Priority Override: disable DA, SA and VTU priority override. */ + REG_WRITE(addr, 0x0d, 0x0000); + + /* Port Ethertype: use the Ethertype DSA Ethertype value. */ + REG_WRITE(addr, 0x0f, ETH_P_EDSA); + + /* Tag Remap: use an identity 802.1p prio -> switch prio + * mapping. + */ + REG_WRITE(addr, 0x18, 0x3210); + + /* Tag Remap 2: use an identity 802.1p prio -> switch prio + * mapping. + */ + REG_WRITE(addr, 0x19, 0x7654); + + return 0; +} + +static int mv88e6171_setup(struct dsa_switch *ds) +{ + struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); + int i; + int ret; + + mutex_init(&ps->smi_mutex); + mutex_init(&ps->stats_mutex); + + ret = mv88e6171_switch_reset(ds); + if (ret < 0) + return ret; + + /* @@@ initialise vtu and atu */ + + ret = mv88e6171_setup_global(ds); + if (ret < 0) + return ret; + + for (i = 0; i < 8; i++) { + if (!(dsa_is_cpu_port(ds, i) || ds->phys_port_mask & (1 << i))) + continue; + + ret = mv88e6171_setup_port(ds, i); + if (ret < 0) + return ret; + } + + return 0; +} + +static int mv88e6171_port_to_phy_addr(int port) +{ + if (port >= 0 && port <= 4) + return port; + return -1; +} + +static int +mv88e6171_phy_read(struct dsa_switch *ds, int port, int regnum) +{ + int addr = mv88e6171_port_to_phy_addr(port); + + return mv88e6xxx_phy_read(ds, addr, regnum); +} + +static int +mv88e6171_phy_write(struct dsa_switch *ds, + int port, int regnum, u16 val) +{ + int addr = mv88e6171_port_to_phy_addr(port); + + return mv88e6xxx_phy_write(ds, addr, regnum, val); +} + +static struct mv88e6xxx_hw_stat mv88e6171_hw_stats[] = { + { "in_good_octets", 8, 0x00, }, + { "in_bad_octets", 4, 0x02, }, + { "in_unicast", 4, 0x04, }, + { "in_broadcasts", 4, 0x06, }, + { "in_multicasts", 4, 0x07, }, + { "in_pause", 4, 0x16, }, + { "in_undersize", 4, 0x18, }, + { "in_fragments", 4, 0x19, }, + { "in_oversize", 4, 0x1a, }, + { "in_jabber", 4, 0x1b, }, + { "in_rx_error", 4, 0x1c, }, + { "in_fcs_error", 4, 0x1d, }, + { "out_octets", 8, 0x0e, }, + { "out_unicast", 4, 0x10, }, + { "out_broadcasts", 4, 0x13, }, + { "out_multicasts", 4, 0x12, }, + { "out_pause", 4, 0x15, }, + { "excessive", 4, 0x11, }, + { "collisions", 4, 0x1e, }, + { "deferred", 4, 0x05, }, + { "single", 4, 0x14, }, + { "multiple", 4, 0x17, }, + { "out_fcs_error", 4, 0x03, }, + { "late", 4, 0x1f, }, + { "hist_64bytes", 4, 0x08, }, + { "hist_65_127bytes", 4, 0x09, }, + { "hist_128_255bytes", 4, 0x0a, }, + { "hist_256_511bytes", 4, 0x0b, }, + { "hist_512_1023bytes", 4, 0x0c, }, + { "hist_1024_max_bytes", 4, 0x0d, }, +}; + +static void +mv88e6171_get_strings(struct dsa_switch *ds, int port, uint8_t *data) +{ + mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6171_hw_stats), + mv88e6171_hw_stats, port, data); +} + +static void +mv88e6171_get_ethtool_stats(struct dsa_switch *ds, + int port, uint64_t *data) +{ + mv88e6xxx_get_ethtool_stats(ds, ARRAY_SIZE(mv88e6171_hw_stats), + mv88e6171_hw_stats, port, data); +} + +static int mv88e6171_get_sset_count(struct dsa_switch *ds) +{ + return ARRAY_SIZE(mv88e6171_hw_stats); +} + +struct dsa_switch_driver mv88e6171_switch_driver = { + .tag_protocol = DSA_TAG_PROTO_DSA, + .priv_size = sizeof(struct mv88e6xxx_priv_state), + .probe = mv88e6171_probe, + .setup = mv88e6171_setup, + .set_addr = mv88e6xxx_set_addr_indirect, + .phy_read = mv88e6171_phy_read, + .phy_write = mv88e6171_phy_write, + .poll_link = mv88e6xxx_poll_link, + .get_strings = mv88e6171_get_strings, + .get_ethtool_stats = mv88e6171_get_ethtool_stats, + .get_sset_count = mv88e6171_get_sset_count, +}; + +MODULE_ALIAS("platform:mv88e6171"); diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 9ce2146346b6cda7175229d0f493f004730ab1b1..d6f6428b27dcc02dbce52a3f0666d58b8ccbf385 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -78,7 +78,7 @@ int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg) int ret; mutex_lock(&ps->smi_mutex); - ret = __mv88e6xxx_reg_read(ds->master_mii_bus, + ret = __mv88e6xxx_reg_read(to_mii_bus(ds->master_dev), ds->pd->sw_addr, addr, reg); mutex_unlock(&ps->smi_mutex); @@ -122,7 +122,7 @@ int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val) int ret; mutex_lock(&ps->smi_mutex); - ret = __mv88e6xxx_reg_write(ds->master_mii_bus, + ret = __mv88e6xxx_reg_write(to_mii_bus(ds->master_dev), ds->pd->sw_addr, addr, reg, val); mutex_unlock(&ps->smi_mutex); @@ -500,6 +500,9 @@ static int __init mv88e6xxx_init(void) #endif #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65) register_switch_driver(&mv88e6123_61_65_switch_driver); +#endif +#if IS_ENABLED(CONFIG_NET_DSA_MV88E6171) + register_switch_driver(&mv88e6171_switch_driver); #endif return 0; } @@ -507,6 +510,9 @@ module_init(mv88e6xxx_init); static void __exit mv88e6xxx_cleanup(void) { +#if IS_ENABLED(CONFIG_NET_DSA_MV88E6171) + unregister_switch_driver(&mv88e6171_switch_driver); +#endif #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65) unregister_switch_driver(&mv88e6123_61_65_switch_driver); #endif diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 911ede58dd12d22223cfa71cce53e32eba55f5f8..5e5145ad95259cffe069500333f270e6de5f04e6 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -70,6 +70,7 @@ void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, extern struct dsa_switch_driver mv88e6131_switch_driver; extern struct dsa_switch_driver mv88e6123_61_65_switch_driver; +extern struct dsa_switch_driver mv88e6171_switch_driver; #define REG_READ(addr, reg) \ ({ \ diff --git a/drivers/net/eql.c b/drivers/net/eql.c index 957e5c0cede337cff8d98c81089e879d83988dcb..a10ad74cc8d2225a40a7f1afd261c88834a6458a 100644 --- a/drivers/net/eql.c +++ b/drivers/net/eql.c @@ -199,7 +199,7 @@ static void __init eql_setup(struct net_device *dev) dev->type = ARPHRD_SLIP; dev->tx_queue_len = 5; /* Hands them off fast */ - dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; + netif_keep_dst(dev); } static int eql_open(struct net_device *dev) diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c index a968654b631d28a860dcd83b8c9b728189c196b2..4547a1b8b958bab25ed6cd73ccd3d0b62112dfaa 100644 --- a/drivers/net/ethernet/3com/3c509.c +++ b/drivers/net/ethernet/3com/3c509.c @@ -695,9 +695,9 @@ el3_tx_timeout (struct net_device *dev) int ioaddr = dev->base_addr; /* Transmitter timeout, serious problems. */ - pr_warning("%s: transmit timed out, Tx_status %2.2x status %4.4x Tx FIFO room %d.\n", - dev->name, inb(ioaddr + TX_STATUS), inw(ioaddr + EL3_STATUS), - inw(ioaddr + TX_FREE)); + pr_warn("%s: transmit timed out, Tx_status %2.2x status %4.4x Tx FIFO room %d\n", + dev->name, inb(ioaddr + TX_STATUS), inw(ioaddr + EL3_STATUS), + inw(ioaddr + TX_FREE)); dev->stats.tx_errors++; dev->trans_start = jiffies; /* prevent tx timeout */ /* Issue TX_RESET and TX_START commands. */ diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c index 94c656f5a05ddbe9a09e505009466961a8e119a2..942fb0d5aacebf0b7f565836979b7d61ab3b9cfb 100644 --- a/drivers/net/ethernet/3com/3c515.c +++ b/drivers/net/ethernet/3com/3c515.c @@ -515,7 +515,7 @@ static struct net_device *corkscrew_scan(int unit) if (pnp_device_attach(idev) < 0) continue; if (pnp_activate_dev(idev) < 0) { - pr_warning("pnp activate failed (out of resources?)\n"); + pr_warn("pnp activate failed (out of resources?)\n"); pnp_device_detach(idev); continue; } @@ -659,7 +659,7 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr, pr_cont(", IRQ %d\n", dev->irq); /* Tell them about an invalid IRQ. */ if (corkscrew_debug && (dev->irq <= 0 || dev->irq > 15)) - pr_warning(" *** Warning: this IRQ is unlikely to work! ***\n"); + pr_warn(" *** Warning: this IRQ is unlikely to work! ***\n"); { static const char * const ram_split[] = { @@ -967,13 +967,13 @@ static void corkscrew_timeout(struct net_device *dev) struct corkscrew_private *vp = netdev_priv(dev); int ioaddr = dev->base_addr; - pr_warning("%s: transmit timed out, tx_status %2.2x status %4.4x.\n", - dev->name, inb(ioaddr + TxStatus), - inw(ioaddr + EL3_STATUS)); + pr_warn("%s: transmit timed out, tx_status %2.2x status %4.4x\n", + dev->name, inb(ioaddr + TxStatus), + inw(ioaddr + EL3_STATUS)); /* Slight code bloat to be user friendly. */ if ((inb(ioaddr + TxStatus) & 0x88) == 0x88) - pr_warning("%s: Transmitter encountered 16 collisions --" - " network cable problem?\n", dev->name); + pr_warn("%s: Transmitter encountered 16 collisions -- network cable problem?\n", + dev->name); #ifndef final_version pr_debug(" Flags; bus-master %d, full %d; dirty %d current %d.\n", vp->full_bus_master_tx, vp->tx_full, vp->dirty_tx, @@ -1382,13 +1382,10 @@ static int boomerang_rx(struct net_device *dev) temp = skb_put(skb, pkt_len); /* Remove this checking code for final release. */ if (isa_bus_to_virt(vp->rx_ring[entry].addr) != temp) - pr_warning("%s: Warning -- the skbuff addresses do not match" - " in boomerang_rx: %p vs. %p / %p.\n", - dev->name, - isa_bus_to_virt(vp-> - rx_ring[entry]. - addr), skb->head, - temp); + pr_warn("%s: Warning -- the skbuff addresses do not match in boomerang_rx: %p vs. %p / %p\n", + dev->name, + isa_bus_to_virt(vp->rx_ring[entry].addr), + skb->head, temp); rx_nocopy++; } skb->protocol = eth_type_trans(skb, dev); diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 8ca49f04acecb22ff61b15437c2ecf4def4c190a..41095ebad97fe15e399d00c293882d542c844b25 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -1310,8 +1310,8 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, pr_cont(", IRQ %d\n", dev->irq); /* Tell them about an invalid IRQ. */ if (dev->irq <= 0 || dev->irq >= nr_irqs) - pr_warning(" *** Warning: IRQ %d is unlikely to work! ***\n", - dev->irq); + pr_warn(" *** Warning: IRQ %d is unlikely to work! ***\n", + dev->irq); step = (window_read8(vp, 4, Wn4_NetDiag) & 0x1e) >> 1; if (print_info) { @@ -1425,7 +1425,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, } mii_preamble_required--; if (phy_idx == 0) { - pr_warning(" ***WARNING*** No MII transceivers found!\n"); + pr_warn(" ***WARNING*** No MII transceivers found!\n"); vp->phys[0] = 24; } else { vp->advertising = mdio_read(dev, vp->phys[0], MII_ADVERTISE); @@ -1566,8 +1566,7 @@ vortex_up(struct net_device *dev) pci_restore_state(VORTEX_PCI(vp)); err = pci_enable_device(VORTEX_PCI(vp)); if (err) { - pr_warning("%s: Could not enable device\n", - dev->name); + pr_warn("%s: Could not enable device\n", dev->name); goto err_out; } } @@ -2007,8 +2006,8 @@ vortex_error(struct net_device *dev, int status) /* This occurs when we have the wrong media type! */ if (DoneDidThat == 0 && ioread16(ioaddr + EL3_STATUS) & StatsFull) { - pr_warning("%s: Updating statistics failed, disabling " - "stats as an interrupt source.\n", dev->name); + pr_warn("%s: Updating statistics failed, disabling stats as an interrupt source\n", + dev->name); iowrite16(SetIntrEnb | (window_read16(vp, 5, 10) & ~StatsFull), ioaddr + EL3_CMD); @@ -2148,8 +2147,8 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) { if (vortex_debug > 0) - pr_warning("%s: BUG! Tx Ring full, refusing to send buffer.\n", - dev->name); + pr_warn("%s: BUG! Tx Ring full, refusing to send buffer\n", + dev->name); netif_stop_queue(dev); return NETDEV_TX_BUSY; } @@ -2214,7 +2213,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) } } #else - dma_addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE)); + dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE); if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) goto out_dma_err; vp->tx_ring[entry].addr = cpu_to_le32(dma_addr); @@ -2343,7 +2342,7 @@ vortex_interrupt(int irq, void *dev_id) } if (--work_done < 0) { - pr_warning("%s: Too much work in interrupt, status %4.4x.\n", + pr_warn("%s: Too much work in interrupt, status %4.4x\n", dev->name, status); /* Disable all pending interrupts. */ do { @@ -2476,7 +2475,7 @@ boomerang_interrupt(int irq, void *dev_id) vortex_error(dev, status); if (--work_done < 0) { - pr_warning("%s: Too much work in interrupt, status %4.4x.\n", + pr_warn("%s: Too much work in interrupt, status %4.4x\n", dev->name, status); /* Disable all pending interrupts. */ do { @@ -2652,7 +2651,8 @@ boomerang_rx(struct net_device *dev) if (skb == NULL) { static unsigned long last_jif; if (time_after(jiffies, last_jif + 10 * HZ)) { - pr_warning("%s: memory shortage\n", dev->name); + pr_warn("%s: memory shortage\n", + dev->name); last_jif = jiffies; } if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) @@ -2751,7 +2751,8 @@ vortex_close(struct net_device *dev) if (vp->rx_csumhits && (vp->drv_flags & HAS_HWCKSM) == 0 && (vp->card_idx >= MAX_UNITS || hw_checksums[vp->card_idx] == -1)) { - pr_warning("%s supports hardware checksums, and we're not using them!\n", dev->name); + pr_warn("%s supports hardware checksums, and we're not using them!\n", + dev->name); } #endif diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index dc7406c81c452cacb2d83cb42d5ec5181ade9326..1ed1fbba5d58140912258d80bf8279bb9a2686a1 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -20,6 +20,7 @@ config SUNGEM_PHY source "drivers/net/ethernet/3com/Kconfig" source "drivers/net/ethernet/adaptec/Kconfig" source "drivers/net/ethernet/aeroflex/Kconfig" +source "drivers/net/ethernet/agere/Kconfig" source "drivers/net/ethernet/allwinner/Kconfig" source "drivers/net/ethernet/alteon/Kconfig" source "drivers/net/ethernet/altera/Kconfig" @@ -150,6 +151,7 @@ config ETHOC source "drivers/net/ethernet/packetengines/Kconfig" source "drivers/net/ethernet/pasemi/Kconfig" source "drivers/net/ethernet/qlogic/Kconfig" +source "drivers/net/ethernet/qualcomm/Kconfig" source "drivers/net/ethernet/realtek/Kconfig" source "drivers/net/ethernet/renesas/Kconfig" source "drivers/net/ethernet/rdc/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 224a018771499f7d6e03dee2fd913978772725f4..6e0b629e9859a85b13d82ce3b0d9fb6a7eece3e3 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -6,6 +6,7 @@ obj-$(CONFIG_NET_VENDOR_3COM) += 3com/ obj-$(CONFIG_NET_VENDOR_8390) += 8390/ obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adaptec/ obj-$(CONFIG_GRETH) += aeroflex/ +obj-$(CONFIG_NET_VENDOR_AGERE) += agere/ obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/ obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/ obj-$(CONFIG_ALTERA_TSE) += altera/ @@ -60,6 +61,7 @@ obj-$(CONFIG_ETHOC) += ethoc.o obj-$(CONFIG_NET_PACKET_ENGINE) += packetengines/ obj-$(CONFIG_NET_VENDOR_PASEMI) += pasemi/ obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/ +obj-$(CONFIG_NET_VENDOR_QUALCOMM) += qualcomm/ obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/ obj-$(CONFIG_SH_ETH) += renesas/ obj-$(CONFIG_NET_VENDOR_RDC) += rdc/ diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c index afa66847e10b60e09dd8c25776bd6e020cbfd7e9..8ed4d3408ef6d30512f4f6ca8f5ff3ee2146f457 100644 --- a/drivers/net/ethernet/adi/bfin_mac.c +++ b/drivers/net/ethernet/adi/bfin_mac.c @@ -1692,9 +1692,6 @@ static int bfin_mac_probe(struct platform_device *pdev) lp->vlan1_mask = ETH_P_8021Q | mii_bus_data->vlan1_mask; lp->vlan2_mask = ETH_P_8021Q | mii_bus_data->vlan2_mask; - /* Fill in the fields of the device structure with ethernet values. */ - ether_setup(ndev); - ndev->netdev_ops = &bfin_mac_netdev_ops; ndev->ethtool_ops = &bfin_mac_ethtool_ops; diff --git a/drivers/net/ethernet/agere/Kconfig b/drivers/net/ethernet/agere/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..63e805de619e16ba0ea59ac015ddc7c7415f3768 --- /dev/null +++ b/drivers/net/ethernet/agere/Kconfig @@ -0,0 +1,31 @@ +# +# Agere device configuration +# + +config NET_VENDOR_AGERE + bool "Agere devices" + default y + depends on PCI + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Agere devices. If you say Y, you will be asked + for your specific card in the following questions. + +if NET_VENDOR_AGERE + +config ET131X + tristate "Agere ET-1310 Gigabit Ethernet support" + depends on PCI + select PHYLIB + ---help--- + This driver supports Agere ET-1310 ethernet adapters. + + To compile this driver as a module, choose M here. The module + will be called et131x. + +endif # NET_VENDOR_AGERE diff --git a/drivers/net/ethernet/agere/Makefile b/drivers/net/ethernet/agere/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..027ff9453fe117bd5e6de24fb3b2e8428007c9b6 --- /dev/null +++ b/drivers/net/ethernet/agere/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Agere ET-131x ethernet driver +# + +obj-$(CONFIG_ET131X) += et131x.o diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c new file mode 100644 index 0000000000000000000000000000000000000000..384dc163851b3dd28bc51977306d4e1bec962575 --- /dev/null +++ b/drivers/net/ethernet/agere/et131x.c @@ -0,0 +1,4121 @@ +/* Agere Systems Inc. + * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs + * + * Copyright © 2005 Agere Systems Inc. + * All rights reserved. + * http://www.agere.com + * + * Copyright (c) 2011 Mark Einon + * + *------------------------------------------------------------------------------ + * + * SOFTWARE LICENSE + * + * This software is provided subject to the following terms and conditions, + * which you should read carefully before using the software. Using this + * software indicates your acceptance of these terms and conditions. If you do + * not agree with these terms and conditions, do not use the software. + * + * Copyright © 2005 Agere Systems Inc. + * All rights reserved. + * + * Redistribution and use in source or binary forms, with or without + * modifications, are permitted provided that the following conditions are met: + * + * . Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following Disclaimer as comments in the code as + * well as in the documentation and/or other materials provided with the + * distribution. + * + * . Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following Disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * . Neither the name of Agere Systems Inc. nor the names of the contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * Disclaimer + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY + * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN + * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH + * DAMAGE. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "et131x.h" + +MODULE_AUTHOR("Victor Soriano "); +MODULE_AUTHOR("Mark Einon "); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver for the ET1310 by Agere Systems"); + +/* EEPROM defines */ +#define MAX_NUM_REGISTER_POLLS 1000 +#define MAX_NUM_WRITE_RETRIES 2 + +/* MAC defines */ +#define COUNTER_WRAP_16_BIT 0x10000 +#define COUNTER_WRAP_12_BIT 0x1000 + +/* PCI defines */ +#define INTERNAL_MEM_SIZE 0x400 /* 1024 of internal memory */ +#define INTERNAL_MEM_RX_OFFSET 0x1FF /* 50% Tx, 50% Rx */ + +/* ISR defines */ +/* For interrupts, normal running is: + * rxdma_xfr_done, phy_interrupt, mac_stat_interrupt, + * watchdog_interrupt & txdma_xfer_done + * + * In both cases, when flow control is enabled for either Tx or bi-direction, + * we additional enable rx_fbr0_low and rx_fbr1_low, so we know when the + * buffer rings are running low. + */ +#define INT_MASK_DISABLE 0xffffffff + +/* NOTE: Masking out MAC_STAT Interrupt for now... + * #define INT_MASK_ENABLE 0xfff6bf17 + * #define INT_MASK_ENABLE_NO_FLOW 0xfff6bfd7 + */ +#define INT_MASK_ENABLE 0xfffebf17 +#define INT_MASK_ENABLE_NO_FLOW 0xfffebfd7 + +/* General defines */ +/* Packet and header sizes */ +#define NIC_MIN_PACKET_SIZE 60 + +/* Multicast list size */ +#define NIC_MAX_MCAST_LIST 128 + +/* Supported Filters */ +#define ET131X_PACKET_TYPE_DIRECTED 0x0001 +#define ET131X_PACKET_TYPE_MULTICAST 0x0002 +#define ET131X_PACKET_TYPE_BROADCAST 0x0004 +#define ET131X_PACKET_TYPE_PROMISCUOUS 0x0008 +#define ET131X_PACKET_TYPE_ALL_MULTICAST 0x0010 + +/* Tx Timeout */ +#define ET131X_TX_TIMEOUT (1 * HZ) +#define NIC_SEND_HANG_THRESHOLD 0 + +/* MP_ADAPTER flags */ +#define FMP_ADAPTER_INTERRUPT_IN_USE 0x00000008 + +/* MP_SHARED flags */ +#define FMP_ADAPTER_LOWER_POWER 0x00200000 + +#define FMP_ADAPTER_NON_RECOVER_ERROR 0x00800000 +#define FMP_ADAPTER_HARDWARE_ERROR 0x04000000 + +#define FMP_ADAPTER_FAIL_SEND_MASK 0x3ff00000 + +/* Some offsets in PCI config space that are actually used. */ +#define ET1310_PCI_MAC_ADDRESS 0xA4 +#define ET1310_PCI_EEPROM_STATUS 0xB2 +#define ET1310_PCI_ACK_NACK 0xC0 +#define ET1310_PCI_REPLAY 0xC2 +#define ET1310_PCI_L0L1LATENCY 0xCF + +/* PCI Product IDs */ +#define ET131X_PCI_DEVICE_ID_GIG 0xED00 /* ET1310 1000 Base-T 8 */ +#define ET131X_PCI_DEVICE_ID_FAST 0xED01 /* ET1310 100 Base-T */ + +/* Define order of magnitude converter */ +#define NANO_IN_A_MICRO 1000 + +#define PARM_RX_NUM_BUFS_DEF 4 +#define PARM_RX_TIME_INT_DEF 10 +#define PARM_RX_MEM_END_DEF 0x2bc +#define PARM_TX_TIME_INT_DEF 40 +#define PARM_TX_NUM_BUFS_DEF 4 +#define PARM_DMA_CACHE_DEF 0 + +/* RX defines */ +#define FBR_CHUNKS 32 +#define MAX_DESC_PER_RING_RX 1024 + +/* number of RFDs - default and min */ +#define RFD_LOW_WATER_MARK 40 +#define NIC_DEFAULT_NUM_RFD 1024 +#define NUM_FBRS 2 + +#define MAX_PACKETS_HANDLED 256 + +#define ALCATEL_MULTICAST_PKT 0x01000000 +#define ALCATEL_BROADCAST_PKT 0x02000000 + +/* typedefs for Free Buffer Descriptors */ +struct fbr_desc { + u32 addr_lo; + u32 addr_hi; + u32 word2; /* Bits 10-31 reserved, 0-9 descriptor */ +}; + +/* Packet Status Ring Descriptors + * + * Word 0: + * + * top 16 bits are from the Alcatel Status Word as enumerated in + * PE-MCXMAC Data Sheet IPD DS54 0210-1 (also IPD-DS80 0205-2) + * + * 0: hp hash pass + * 1: ipa IP checksum assist + * 2: ipp IP checksum pass + * 3: tcpa TCP checksum assist + * 4: tcpp TCP checksum pass + * 5: wol WOL Event + * 6: rxmac_error RXMAC Error Indicator + * 7: drop Drop packet + * 8: ft Frame Truncated + * 9: jp Jumbo Packet + * 10: vp VLAN Packet + * 11-15: unused + * 16: asw_prev_pkt_dropped e.g. IFG too small on previous + * 17: asw_RX_DV_event short receive event detected + * 18: asw_false_carrier_event bad carrier since last good packet + * 19: asw_code_err one or more nibbles signalled as errors + * 20: asw_CRC_err CRC error + * 21: asw_len_chk_err frame length field incorrect + * 22: asw_too_long frame length > 1518 bytes + * 23: asw_OK valid CRC + no code error + * 24: asw_multicast has a multicast address + * 25: asw_broadcast has a broadcast address + * 26: asw_dribble_nibble spurious bits after EOP + * 27: asw_control_frame is a control frame + * 28: asw_pause_frame is a pause frame + * 29: asw_unsupported_op unsupported OP code + * 30: asw_VLAN_tag VLAN tag detected + * 31: asw_long_evt Rx long event + * + * Word 1: + * 0-15: length length in bytes + * 16-25: bi Buffer Index + * 26-27: ri Ring Index + * 28-31: reserved + */ +struct pkt_stat_desc { + u32 word0; + u32 word1; +}; + +/* Typedefs for the RX DMA status word */ + +/* rx status word 0 holds part of the status bits of the Rx DMA engine + * that get copied out to memory by the ET-1310. Word 0 is a 32 bit word + * which contains the Free Buffer ring 0 and 1 available offset. + * + * bit 0-9 FBR1 offset + * bit 10 Wrap flag for FBR1 + * bit 16-25 FBR0 offset + * bit 26 Wrap flag for FBR0 + */ + +/* RXSTAT_WORD1_t structure holds part of the status bits of the Rx DMA engine + * that get copied out to memory by the ET-1310. Word 3 is a 32 bit word + * which contains the Packet Status Ring available offset. + * + * bit 0-15 reserved + * bit 16-27 PSRoffset + * bit 28 PSRwrap + * bit 29-31 unused + */ + +/* struct rx_status_block is a structure representing the status of the Rx + * DMA engine it sits in free memory, and is pointed to by 0x101c / 0x1020 + */ +struct rx_status_block { + u32 word0; + u32 word1; +}; + +/* Structure for look-up table holding free buffer ring pointers, addresses + * and state. + */ +struct fbr_lookup { + void *virt[MAX_DESC_PER_RING_RX]; + u32 bus_high[MAX_DESC_PER_RING_RX]; + u32 bus_low[MAX_DESC_PER_RING_RX]; + void *ring_virtaddr; + dma_addr_t ring_physaddr; + void *mem_virtaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS]; + dma_addr_t mem_physaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS]; + u32 local_full; + u32 num_entries; + dma_addr_t buffsize; +}; + +/* struct rx_ring is the structure representing the adaptor's local + * reference(s) to the rings + */ +struct rx_ring { + struct fbr_lookup *fbr[NUM_FBRS]; + void *ps_ring_virtaddr; + dma_addr_t ps_ring_physaddr; + u32 local_psr_full; + u32 psr_entries; + + struct rx_status_block *rx_status_block; + dma_addr_t rx_status_bus; + + struct list_head recv_list; + u32 num_ready_recv; + + u32 num_rfd; + + bool unfinished_receives; +}; + +/* TX defines */ +/* word 2 of the control bits in the Tx Descriptor ring for the ET-1310 + * + * 0-15: length of packet + * 16-27: VLAN tag + * 28: VLAN CFI + * 29-31: VLAN priority + * + * word 3 of the control bits in the Tx Descriptor ring for the ET-1310 + * + * 0: last packet in the sequence + * 1: first packet in the sequence + * 2: interrupt the processor when this pkt sent + * 3: Control word - no packet data + * 4: Issue half-duplex backpressure : XON/XOFF + * 5: send pause frame + * 6: Tx frame has error + * 7: append CRC + * 8: MAC override + * 9: pad packet + * 10: Packet is a Huge packet + * 11: append VLAN tag + * 12: IP checksum assist + * 13: TCP checksum assist + * 14: UDP checksum assist + */ +#define TXDESC_FLAG_LASTPKT 0x0001 +#define TXDESC_FLAG_FIRSTPKT 0x0002 +#define TXDESC_FLAG_INTPROC 0x0004 + +/* struct tx_desc represents each descriptor on the ring */ +struct tx_desc { + u32 addr_hi; + u32 addr_lo; + u32 len_vlan; /* control words how to xmit the */ + u32 flags; /* data (detailed above) */ +}; + +/* The status of the Tx DMA engine it sits in free memory, and is pointed to + * by 0x101c / 0x1020. This is a DMA10 type + */ + +/* TCB (Transmit Control Block: Host Side) */ +struct tcb { + struct tcb *next; /* Next entry in ring */ + u32 count; /* Used to spot stuck/lost packets */ + u32 stale; /* Used to spot stuck/lost packets */ + struct sk_buff *skb; /* Network skb we are tied to */ + u32 index; /* Ring indexes */ + u32 index_start; +}; + +/* Structure representing our local reference(s) to the ring */ +struct tx_ring { + /* TCB (Transmit Control Block) memory and lists */ + struct tcb *tcb_ring; + + /* List of TCBs that are ready to be used */ + struct tcb *tcb_qhead; + struct tcb *tcb_qtail; + + /* list of TCBs that are currently being sent. */ + struct tcb *send_head; + struct tcb *send_tail; + int used; + + /* The actual descriptor ring */ + struct tx_desc *tx_desc_ring; + dma_addr_t tx_desc_ring_pa; + + /* send_idx indicates where we last wrote to in the descriptor ring. */ + u32 send_idx; + + /* The location of the write-back status block */ + u32 *tx_status; + dma_addr_t tx_status_pa; + + /* Packets since the last IRQ: used for interrupt coalescing */ + int since_irq; +}; + +/* Do not change these values: if changed, then change also in respective + * TXdma and Rxdma engines + */ +#define NUM_DESC_PER_RING_TX 512 /* TX Do not change these values */ +#define NUM_TCB 64 + +/* These values are all superseded by registry entries to facilitate tuning. + * Once the desired performance has been achieved, the optimal registry values + * should be re-populated to these #defines: + */ +#define TX_ERROR_PERIOD 1000 + +#define LO_MARK_PERCENT_FOR_PSR 15 +#define LO_MARK_PERCENT_FOR_RX 15 + +/* RFD (Receive Frame Descriptor) */ +struct rfd { + struct list_head list_node; + struct sk_buff *skb; + u32 len; /* total size of receive frame */ + u16 bufferindex; + u8 ringindex; +}; + +/* Flow Control */ +#define FLOW_BOTH 0 +#define FLOW_TXONLY 1 +#define FLOW_RXONLY 2 +#define FLOW_NONE 3 + +/* Struct to define some device statistics */ +struct ce_stats { + u32 multicast_pkts_rcvd; + u32 rcvd_pkts_dropped; + + u32 tx_underflows; + u32 tx_collisions; + u32 tx_excessive_collisions; + u32 tx_first_collisions; + u32 tx_late_collisions; + u32 tx_max_pkt_errs; + u32 tx_deferred; + + u32 rx_overflows; + u32 rx_length_errs; + u32 rx_align_errs; + u32 rx_crc_errs; + u32 rx_code_violations; + u32 rx_other_errs; + + u32 interrupt_status; +}; + +/* The private adapter structure */ +struct et131x_adapter { + struct net_device *netdev; + struct pci_dev *pdev; + struct mii_bus *mii_bus; + struct phy_device *phydev; + struct napi_struct napi; + + /* Flags that indicate current state of the adapter */ + u32 flags; + + /* local link state, to determine if a state change has occurred */ + int link; + + /* Configuration */ + u8 rom_addr[ETH_ALEN]; + u8 addr[ETH_ALEN]; + bool has_eeprom; + u8 eeprom_data[2]; + + spinlock_t tcb_send_qlock; /* protects the tx_ring send tcb list */ + spinlock_t tcb_ready_qlock; /* protects the tx_ring ready tcb list */ + spinlock_t rcv_lock; /* protects the rx_ring receive list */ + + /* Packet Filter and look ahead size */ + u32 packet_filter; + + /* multicast list */ + u32 multicast_addr_count; + u8 multicast_list[NIC_MAX_MCAST_LIST][ETH_ALEN]; + + /* Pointer to the device's PCI register space */ + struct address_map __iomem *regs; + + /* Registry parameters */ + u8 wanted_flow; /* Flow we want for 802.3x flow control */ + u32 registry_jumbo_packet; /* Max supported ethernet packet size */ + + /* Derived from the registry: */ + u8 flow; /* flow control validated by the far-end */ + + /* Minimize init-time */ + struct timer_list error_timer; + + /* variable putting the phy into coma mode when boot up with no cable + * plugged in after 5 seconds + */ + u8 boot_coma; + + /* Tx Memory Variables */ + struct tx_ring tx_ring; + + /* Rx Memory Variables */ + struct rx_ring rx_ring; + + struct ce_stats stats; +}; + +static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status) +{ + u32 reg; + int i; + + /* 1. Check LBCIF Status Register for bits 6 & 3:2 all equal to 0 and + * bits 7,1:0 both equal to 1, at least once after reset. + * Subsequent operations need only to check that bits 1:0 are equal + * to 1 prior to starting a single byte read/write + */ + for (i = 0; i < MAX_NUM_REGISTER_POLLS; i++) { + if (pci_read_config_dword(pdev, LBCIF_DWORD1_GROUP, ®)) + return -EIO; + + /* I2C idle and Phy Queue Avail both true */ + if ((reg & 0x3000) == 0x3000) { + if (status) + *status = reg; + return reg & 0xFF; + } + } + return -ETIMEDOUT; +} + +static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data) +{ + struct pci_dev *pdev = adapter->pdev; + int index = 0; + int retries; + int err = 0; + int writeok = 0; + u32 status; + u32 val = 0; + + /* For an EEPROM, an I2C single byte write is defined as a START + * condition followed by the device address, EEPROM address, one byte + * of data and a STOP condition. The STOP condition will trigger the + * EEPROM's internally timed write cycle to the nonvolatile memory. + * All inputs are disabled during this write cycle and the EEPROM will + * not respond to any access until the internal write is complete. + */ + err = eeprom_wait_ready(pdev, NULL); + if (err < 0) + return err; + + /* 2. Write to the LBCIF Control Register: bit 7=1, bit 6=1, bit 3=0, + * and bits 1:0 both =0. Bit 5 should be set according to the + * type of EEPROM being accessed (1=two byte addressing, 0=one + * byte addressing). + */ + if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, + LBCIF_CONTROL_LBCIF_ENABLE | + LBCIF_CONTROL_I2C_WRITE)) + return -EIO; + + /* Prepare EEPROM address for Step 3 */ + for (retries = 0; retries < MAX_NUM_WRITE_RETRIES; retries++) { + if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr)) + break; + /* Write the data to the LBCIF Data Register (the I2C write + * will begin). + */ + if (pci_write_config_byte(pdev, LBCIF_DATA_REGISTER, data)) + break; + /* Monitor bit 1:0 of the LBCIF Status Register. When bits + * 1:0 are both equal to 1, the I2C write has completed and the + * internal write cycle of the EEPROM is about to start. + * (bits 1:0 = 01 is a legal state while waiting from both + * equal to 1, but bits 1:0 = 10 is invalid and implies that + * something is broken). + */ + err = eeprom_wait_ready(pdev, &status); + if (err < 0) + return 0; + + /* Check bit 3 of the LBCIF Status Register. If equal to 1, + * an error has occurred.Don't break here if we are revision + * 1, this is so we do a blind write for load bug. + */ + if ((status & LBCIF_STATUS_GENERAL_ERROR) && + adapter->pdev->revision == 0) + break; + + /* Check bit 2 of the LBCIF Status Register. If equal to 1 an + * ACK error has occurred on the address phase of the write. + * This could be due to an actual hardware failure or the + * EEPROM may still be in its internal write cycle from a + * previous write. This write operation was ignored and must be + *repeated later. + */ + if (status & LBCIF_STATUS_ACK_ERROR) { + /* This could be due to an actual hardware failure + * or the EEPROM may still be in its internal write + * cycle from a previous write. This write operation + * was ignored and must be repeated later. + */ + udelay(10); + continue; + } + + writeok = 1; + break; + } + + udelay(10); + + while (1) { + if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, + LBCIF_CONTROL_LBCIF_ENABLE)) + writeok = 0; + + /* Do read until internal ACK_ERROR goes away meaning write + * completed + */ + do { + pci_write_config_dword(pdev, + LBCIF_ADDRESS_REGISTER, + addr); + do { + pci_read_config_dword(pdev, + LBCIF_DATA_REGISTER, + &val); + } while ((val & 0x00010000) == 0); + } while (val & 0x00040000); + + if ((val & 0xFF00) != 0xC000 || index == 10000) + break; + index++; + } + return writeok ? 0 : -EIO; +} + +static int eeprom_read(struct et131x_adapter *adapter, u32 addr, u8 *pdata) +{ + struct pci_dev *pdev = adapter->pdev; + int err; + u32 status; + + /* A single byte read is similar to the single byte write, with the + * exception of the data flow: + */ + err = eeprom_wait_ready(pdev, NULL); + if (err < 0) + return err; + /* Write to the LBCIF Control Register: bit 7=1, bit 6=0, bit 3=0, + * and bits 1:0 both =0. Bit 5 should be set according to the type + * of EEPROM being accessed (1=two byte addressing, 0=one byte + * addressing). + */ + if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, + LBCIF_CONTROL_LBCIF_ENABLE)) + return -EIO; + /* Write the address to the LBCIF Address Register (I2C read will + * begin). + */ + if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr)) + return -EIO; + /* Monitor bit 0 of the LBCIF Status Register. When = 1, I2C read + * is complete. (if bit 1 =1 and bit 0 stays = 0, a hardware failure + * has occurred). + */ + err = eeprom_wait_ready(pdev, &status); + if (err < 0) + return err; + /* Regardless of error status, read data byte from LBCIF Data + * Register. + */ + *pdata = err; + + return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0; +} + +static int et131x_init_eeprom(struct et131x_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + u8 eestatus; + + pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus); + + /* THIS IS A WORKAROUND: + * I need to call this function twice to get my card in a + * LG M1 Express Dual running. I tried also a msleep before this + * function, because I thought there could be some time conditions + * but it didn't work. Call the whole function twice also work. + */ + if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) { + dev_err(&pdev->dev, + "Could not read PCI config space for EEPROM Status\n"); + return -EIO; + } + + /* Determine if the error(s) we care about are present. If they are + * present we need to fail. + */ + if (eestatus & 0x4C) { + int write_failed = 0; + + if (pdev->revision == 0x01) { + int i; + static const u8 eedata[4] = { 0xFE, 0x13, 0x10, 0xFF }; + + /* Re-write the first 4 bytes if we have an eeprom + * present and the revision id is 1, this fixes the + * corruption seen with 1310 B Silicon + */ + for (i = 0; i < 3; i++) + if (eeprom_write(adapter, i, eedata[i]) < 0) + write_failed = 1; + } + if (pdev->revision != 0x01 || write_failed) { + dev_err(&pdev->dev, + "Fatal EEPROM Status Error - 0x%04x\n", + eestatus); + + /* This error could mean that there was an error + * reading the eeprom or that the eeprom doesn't exist. + * We will treat each case the same and not try to + * gather additional information that normally would + * come from the eeprom, like MAC Address + */ + adapter->has_eeprom = 0; + return -EIO; + } + } + adapter->has_eeprom = 1; + + /* Read the EEPROM for information regarding LED behavior. Refer to + * et131x_xcvr_init() for its use. + */ + eeprom_read(adapter, 0x70, &adapter->eeprom_data[0]); + eeprom_read(adapter, 0x71, &adapter->eeprom_data[1]); + + if (adapter->eeprom_data[0] != 0xcd) + /* Disable all optional features */ + adapter->eeprom_data[1] = 0x00; + + return 0; +} + +static void et131x_rx_dma_enable(struct et131x_adapter *adapter) +{ + /* Setup the receive dma configuration register for normal operation */ + u32 csr = ET_RXDMA_CSR_FBR1_ENABLE; + struct rx_ring *rx_ring = &adapter->rx_ring; + + if (rx_ring->fbr[1]->buffsize == 4096) + csr |= ET_RXDMA_CSR_FBR1_SIZE_LO; + else if (rx_ring->fbr[1]->buffsize == 8192) + csr |= ET_RXDMA_CSR_FBR1_SIZE_HI; + else if (rx_ring->fbr[1]->buffsize == 16384) + csr |= ET_RXDMA_CSR_FBR1_SIZE_LO | ET_RXDMA_CSR_FBR1_SIZE_HI; + + csr |= ET_RXDMA_CSR_FBR0_ENABLE; + if (rx_ring->fbr[0]->buffsize == 256) + csr |= ET_RXDMA_CSR_FBR0_SIZE_LO; + else if (rx_ring->fbr[0]->buffsize == 512) + csr |= ET_RXDMA_CSR_FBR0_SIZE_HI; + else if (rx_ring->fbr[0]->buffsize == 1024) + csr |= ET_RXDMA_CSR_FBR0_SIZE_LO | ET_RXDMA_CSR_FBR0_SIZE_HI; + writel(csr, &adapter->regs->rxdma.csr); + + csr = readl(&adapter->regs->rxdma.csr); + if (csr & ET_RXDMA_CSR_HALT_STATUS) { + udelay(5); + csr = readl(&adapter->regs->rxdma.csr); + if (csr & ET_RXDMA_CSR_HALT_STATUS) { + dev_err(&adapter->pdev->dev, + "RX Dma failed to exit halt state. CSR 0x%08x\n", + csr); + } + } +} + +static void et131x_rx_dma_disable(struct et131x_adapter *adapter) +{ + u32 csr; + /* Setup the receive dma configuration register */ + writel(ET_RXDMA_CSR_HALT | ET_RXDMA_CSR_FBR1_ENABLE, + &adapter->regs->rxdma.csr); + csr = readl(&adapter->regs->rxdma.csr); + if (!(csr & ET_RXDMA_CSR_HALT_STATUS)) { + udelay(5); + csr = readl(&adapter->regs->rxdma.csr); + if (!(csr & ET_RXDMA_CSR_HALT_STATUS)) + dev_err(&adapter->pdev->dev, + "RX Dma failed to enter halt state. CSR 0x%08x\n", + csr); + } +} + +static void et131x_tx_dma_enable(struct et131x_adapter *adapter) +{ + /* Setup the transmit dma configuration register for normal + * operation + */ + writel(ET_TXDMA_SNGL_EPKT | (PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT), + &adapter->regs->txdma.csr); +} + +static inline void add_10bit(u32 *v, int n) +{ + *v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP); +} + +static inline void add_12bit(u32 *v, int n) +{ + *v = INDEX12(*v + n) | (*v & ET_DMA12_WRAP); +} + +static void et1310_config_mac_regs1(struct et131x_adapter *adapter) +{ + struct mac_regs __iomem *macregs = &adapter->regs->mac; + u32 station1; + u32 station2; + u32 ipg; + + /* First we need to reset everything. Write to MAC configuration + * register 1 to perform reset. + */ + writel(ET_MAC_CFG1_SOFT_RESET | ET_MAC_CFG1_SIM_RESET | + ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC | + ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC, + ¯egs->cfg1); + + /* Next lets configure the MAC Inter-packet gap register */ + ipg = 0x38005860; /* IPG1 0x38 IPG2 0x58 B2B 0x60 */ + ipg |= 0x50 << 8; /* ifg enforce 0x50 */ + writel(ipg, ¯egs->ipg); + + /* Next lets configure the MAC Half Duplex register */ + /* BEB trunc 0xA, Ex Defer, Rexmit 0xF Coll 0x37 */ + writel(0x00A1F037, ¯egs->hfdp); + + /* Next lets configure the MAC Interface Control register */ + writel(0, ¯egs->if_ctrl); + + writel(ET_MAC_MIIMGMT_CLK_RST, ¯egs->mii_mgmt_cfg); + + /* Next lets configure the MAC Station Address register. These + * values are read from the EEPROM during initialization and stored + * in the adapter structure. We write what is stored in the adapter + * structure to the MAC Station Address registers high and low. This + * station address is used for generating and checking pause control + * packets. + */ + station2 = (adapter->addr[1] << ET_MAC_STATION_ADDR2_OC2_SHIFT) | + (adapter->addr[0] << ET_MAC_STATION_ADDR2_OC1_SHIFT); + station1 = (adapter->addr[5] << ET_MAC_STATION_ADDR1_OC6_SHIFT) | + (adapter->addr[4] << ET_MAC_STATION_ADDR1_OC5_SHIFT) | + (adapter->addr[3] << ET_MAC_STATION_ADDR1_OC4_SHIFT) | + adapter->addr[2]; + writel(station1, ¯egs->station_addr_1); + writel(station2, ¯egs->station_addr_2); + + /* Max ethernet packet in bytes that will be passed by the mac without + * being truncated. Allow the MAC to pass 4 more than our max packet + * size. This is 4 for the Ethernet CRC. + * + * Packets larger than (registry_jumbo_packet) that do not contain a + * VLAN ID will be dropped by the Rx function. + */ + writel(adapter->registry_jumbo_packet + 4, ¯egs->max_fm_len); + + /* clear out MAC config reset */ + writel(0, ¯egs->cfg1); +} + +static void et1310_config_mac_regs2(struct et131x_adapter *adapter) +{ + int32_t delay = 0; + struct mac_regs __iomem *mac = &adapter->regs->mac; + struct phy_device *phydev = adapter->phydev; + u32 cfg1; + u32 cfg2; + u32 ifctrl; + u32 ctl; + + ctl = readl(&adapter->regs->txmac.ctl); + cfg1 = readl(&mac->cfg1); + cfg2 = readl(&mac->cfg2); + ifctrl = readl(&mac->if_ctrl); + + /* Set up the if mode bits */ + cfg2 &= ~ET_MAC_CFG2_IFMODE_MASK; + if (phydev->speed == SPEED_1000) { + cfg2 |= ET_MAC_CFG2_IFMODE_1000; + ifctrl &= ~ET_MAC_IFCTRL_PHYMODE; + } else { + cfg2 |= ET_MAC_CFG2_IFMODE_100; + ifctrl |= ET_MAC_IFCTRL_PHYMODE; + } + + cfg1 |= ET_MAC_CFG1_RX_ENABLE | ET_MAC_CFG1_TX_ENABLE | + ET_MAC_CFG1_TX_FLOW; + + cfg1 &= ~(ET_MAC_CFG1_LOOPBACK | ET_MAC_CFG1_RX_FLOW); + if (adapter->flow == FLOW_RXONLY || adapter->flow == FLOW_BOTH) + cfg1 |= ET_MAC_CFG1_RX_FLOW; + writel(cfg1, &mac->cfg1); + + /* Now we need to initialize the MAC Configuration 2 register */ + /* preamble 7, check length, huge frame off, pad crc, crc enable + * full duplex off + */ + cfg2 |= 0x7 << ET_MAC_CFG2_PREAMBLE_SHIFT; + cfg2 |= ET_MAC_CFG2_IFMODE_LEN_CHECK; + cfg2 |= ET_MAC_CFG2_IFMODE_PAD_CRC; + cfg2 |= ET_MAC_CFG2_IFMODE_CRC_ENABLE; + cfg2 &= ~ET_MAC_CFG2_IFMODE_HUGE_FRAME; + cfg2 &= ~ET_MAC_CFG2_IFMODE_FULL_DPLX; + + if (phydev->duplex == DUPLEX_FULL) + cfg2 |= ET_MAC_CFG2_IFMODE_FULL_DPLX; + + ifctrl &= ~ET_MAC_IFCTRL_GHDMODE; + if (phydev->duplex == DUPLEX_HALF) + ifctrl |= ET_MAC_IFCTRL_GHDMODE; + + writel(ifctrl, &mac->if_ctrl); + writel(cfg2, &mac->cfg2); + + do { + udelay(10); + delay++; + cfg1 = readl(&mac->cfg1); + } while ((cfg1 & ET_MAC_CFG1_WAIT) != ET_MAC_CFG1_WAIT && delay < 100); + + if (delay == 100) { + dev_warn(&adapter->pdev->dev, + "Syncd bits did not respond correctly cfg1 word 0x%08x\n", + cfg1); + } + + ctl |= ET_TX_CTRL_TXMAC_ENABLE | ET_TX_CTRL_FC_DISABLE; + writel(ctl, &adapter->regs->txmac.ctl); + + if (adapter->flags & FMP_ADAPTER_LOWER_POWER) { + et131x_rx_dma_enable(adapter); + et131x_tx_dma_enable(adapter); + } +} + +static int et1310_in_phy_coma(struct et131x_adapter *adapter) +{ + u32 pmcsr = readl(&adapter->regs->global.pm_csr); + + return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0; +} + +static void et1310_setup_device_for_multicast(struct et131x_adapter *adapter) +{ + struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; + u32 hash1 = 0; + u32 hash2 = 0; + u32 hash3 = 0; + u32 hash4 = 0; + u32 pm_csr; + + /* If ET131X_PACKET_TYPE_MULTICAST is specified, then we provision + * the multi-cast LIST. If it is NOT specified, (and "ALL" is not + * specified) then we should pass NO multi-cast addresses to the + * driver. + */ + if (adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) { + int i; + + /* Loop through our multicast array and set up the device */ + for (i = 0; i < adapter->multicast_addr_count; i++) { + u32 result; + + result = ether_crc(6, adapter->multicast_list[i]); + + result = (result & 0x3F800000) >> 23; + + if (result < 32) { + hash1 |= (1 << result); + } else if ((31 < result) && (result < 64)) { + result -= 32; + hash2 |= (1 << result); + } else if ((63 < result) && (result < 96)) { + result -= 64; + hash3 |= (1 << result); + } else { + result -= 96; + hash4 |= (1 << result); + } + } + } + + /* Write out the new hash to the device */ + pm_csr = readl(&adapter->regs->global.pm_csr); + if (!et1310_in_phy_coma(adapter)) { + writel(hash1, &rxmac->multi_hash1); + writel(hash2, &rxmac->multi_hash2); + writel(hash3, &rxmac->multi_hash3); + writel(hash4, &rxmac->multi_hash4); + } +} + +static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter) +{ + struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; + u32 uni_pf1; + u32 uni_pf2; + u32 uni_pf3; + u32 pm_csr; + + /* Set up unicast packet filter reg 3 to be the first two octets of + * the MAC address for both address + * + * Set up unicast packet filter reg 2 to be the octets 2 - 5 of the + * MAC address for second address + * + * Set up unicast packet filter reg 3 to be the octets 2 - 5 of the + * MAC address for first address + */ + uni_pf3 = (adapter->addr[0] << ET_RX_UNI_PF_ADDR2_1_SHIFT) | + (adapter->addr[1] << ET_RX_UNI_PF_ADDR2_2_SHIFT) | + (adapter->addr[0] << ET_RX_UNI_PF_ADDR1_1_SHIFT) | + adapter->addr[1]; + + uni_pf2 = (adapter->addr[2] << ET_RX_UNI_PF_ADDR2_3_SHIFT) | + (adapter->addr[3] << ET_RX_UNI_PF_ADDR2_4_SHIFT) | + (adapter->addr[4] << ET_RX_UNI_PF_ADDR2_5_SHIFT) | + adapter->addr[5]; + + uni_pf1 = (adapter->addr[2] << ET_RX_UNI_PF_ADDR1_3_SHIFT) | + (adapter->addr[3] << ET_RX_UNI_PF_ADDR1_4_SHIFT) | + (adapter->addr[4] << ET_RX_UNI_PF_ADDR1_5_SHIFT) | + adapter->addr[5]; + + pm_csr = readl(&adapter->regs->global.pm_csr); + if (!et1310_in_phy_coma(adapter)) { + writel(uni_pf1, &rxmac->uni_pf_addr1); + writel(uni_pf2, &rxmac->uni_pf_addr2); + writel(uni_pf3, &rxmac->uni_pf_addr3); + } +} + +static void et1310_config_rxmac_regs(struct et131x_adapter *adapter) +{ + struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; + struct phy_device *phydev = adapter->phydev; + u32 sa_lo; + u32 sa_hi = 0; + u32 pf_ctrl = 0; + u32 __iomem *wolw; + + /* Disable the MAC while it is being configured (also disable WOL) */ + writel(0x8, &rxmac->ctrl); + + /* Initialize WOL to disabled. */ + writel(0, &rxmac->crc0); + writel(0, &rxmac->crc12); + writel(0, &rxmac->crc34); + + /* We need to set the WOL mask0 - mask4 next. We initialize it to + * its default Values of 0x00000000 because there are not WOL masks + * as of this time. + */ + for (wolw = &rxmac->mask0_word0; wolw <= &rxmac->mask4_word3; wolw++) + writel(0, wolw); + + /* Lets setup the WOL Source Address */ + sa_lo = (adapter->addr[2] << ET_RX_WOL_LO_SA3_SHIFT) | + (adapter->addr[3] << ET_RX_WOL_LO_SA4_SHIFT) | + (adapter->addr[4] << ET_RX_WOL_LO_SA5_SHIFT) | + adapter->addr[5]; + writel(sa_lo, &rxmac->sa_lo); + + sa_hi = (u32)(adapter->addr[0] << ET_RX_WOL_HI_SA1_SHIFT) | + adapter->addr[1]; + writel(sa_hi, &rxmac->sa_hi); + + /* Disable all Packet Filtering */ + writel(0, &rxmac->pf_ctrl); + + /* Let's initialize the Unicast Packet filtering address */ + if (adapter->packet_filter & ET131X_PACKET_TYPE_DIRECTED) { + et1310_setup_device_for_unicast(adapter); + pf_ctrl |= ET_RX_PFCTRL_UNICST_FILTER_ENABLE; + } else { + writel(0, &rxmac->uni_pf_addr1); + writel(0, &rxmac->uni_pf_addr2); + writel(0, &rxmac->uni_pf_addr3); + } + + /* Let's initialize the Multicast hash */ + if (!(adapter->packet_filter & ET131X_PACKET_TYPE_ALL_MULTICAST)) { + pf_ctrl |= ET_RX_PFCTRL_MLTCST_FILTER_ENABLE; + et1310_setup_device_for_multicast(adapter); + } + + /* Runt packet filtering. Didn't work in version A silicon. */ + pf_ctrl |= (NIC_MIN_PACKET_SIZE + 4) << ET_RX_PFCTRL_MIN_PKT_SZ_SHIFT; + pf_ctrl |= ET_RX_PFCTRL_FRAG_FILTER_ENABLE; + + if (adapter->registry_jumbo_packet > 8192) + /* In order to transmit jumbo packets greater than 8k, the + * FIFO between RxMAC and RxDMA needs to be reduced in size + * to (16k - Jumbo packet size). In order to implement this, + * we must use "cut through" mode in the RxMAC, which chops + * packets down into segments which are (max_size * 16). In + * this case we selected 256 bytes, since this is the size of + * the PCI-Express TLP's that the 1310 uses. + * + * seg_en on, fc_en off, size 0x10 + */ + writel(0x41, &rxmac->mcif_ctrl_max_seg); + else + writel(0, &rxmac->mcif_ctrl_max_seg); + + writel(0, &rxmac->mcif_water_mark); + writel(0, &rxmac->mif_ctrl); + writel(0, &rxmac->space_avail); + + /* Initialize the the mif_ctrl register + * bit 3: Receive code error. One or more nibbles were signaled as + * errors during the reception of the packet. Clear this + * bit in Gigabit, set it in 100Mbit. This was derived + * experimentally at UNH. + * bit 4: Receive CRC error. The packet's CRC did not match the + * internally generated CRC. + * bit 5: Receive length check error. Indicates that frame length + * field value in the packet does not match the actual data + * byte length and is not a type field. + * bit 16: Receive frame truncated. + * bit 17: Drop packet enable + */ + if (phydev && phydev->speed == SPEED_100) + writel(0x30038, &rxmac->mif_ctrl); + else + writel(0x30030, &rxmac->mif_ctrl); + + /* Finally we initialize RxMac to be enabled & WOL disabled. Packet + * filter is always enabled since it is where the runt packets are + * supposed to be dropped. For version A silicon, runt packet + * dropping doesn't work, so it is disabled in the pf_ctrl register, + * but we still leave the packet filter on. + */ + writel(pf_ctrl, &rxmac->pf_ctrl); + writel(ET_RX_CTRL_RXMAC_ENABLE | ET_RX_CTRL_WOL_DISABLE, &rxmac->ctrl); +} + +static void et1310_config_txmac_regs(struct et131x_adapter *adapter) +{ + struct txmac_regs __iomem *txmac = &adapter->regs->txmac; + + /* We need to update the Control Frame Parameters + * cfpt - control frame pause timer set to 64 (0x40) + * cfep - control frame extended pause timer set to 0x0 + */ + if (adapter->flow == FLOW_NONE) + writel(0, &txmac->cf_param); + else + writel(0x40, &txmac->cf_param); +} + +static void et1310_config_macstat_regs(struct et131x_adapter *adapter) +{ + struct macstat_regs __iomem *macstat = &adapter->regs->macstat; + u32 __iomem *reg; + + /* initialize all the macstat registers to zero on the device */ + for (reg = &macstat->txrx_0_64_byte_frames; + reg <= &macstat->carry_reg2; reg++) + writel(0, reg); + + /* Unmask any counters that we want to track the overflow of. + * Initially this will be all counters. It may become clear later + * that we do not need to track all counters. + */ + writel(0xFFFFBE32, &macstat->carry_reg1_mask); + writel(0xFFFE7E8B, &macstat->carry_reg2_mask); +} + +static int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr, + u8 reg, u16 *value) +{ + struct mac_regs __iomem *mac = &adapter->regs->mac; + int status = 0; + u32 delay = 0; + u32 mii_addr; + u32 mii_cmd; + u32 mii_indicator; + + /* Save a local copy of the registers we are dealing with so we can + * set them back + */ + mii_addr = readl(&mac->mii_mgmt_addr); + mii_cmd = readl(&mac->mii_mgmt_cmd); + + /* Stop the current operation */ + writel(0, &mac->mii_mgmt_cmd); + + /* Set up the register we need to read from on the correct PHY */ + writel(ET_MAC_MII_ADDR(addr, reg), &mac->mii_mgmt_addr); + + writel(0x1, &mac->mii_mgmt_cmd); + + do { + udelay(50); + delay++; + mii_indicator = readl(&mac->mii_mgmt_indicator); + } while ((mii_indicator & ET_MAC_MGMT_WAIT) && delay < 50); + + /* If we hit the max delay, we could not read the register */ + if (delay == 50) { + dev_warn(&adapter->pdev->dev, + "reg 0x%08x could not be read\n", reg); + dev_warn(&adapter->pdev->dev, "status is 0x%08x\n", + mii_indicator); + + status = -EIO; + goto out; + } + + /* If we hit here we were able to read the register and we need to + * return the value to the caller + */ + *value = readl(&mac->mii_mgmt_stat) & ET_MAC_MIIMGMT_STAT_PHYCRTL_MASK; + +out: + /* Stop the read operation */ + writel(0, &mac->mii_mgmt_cmd); + + /* set the registers we touched back to the state at which we entered + * this function + */ + writel(mii_addr, &mac->mii_mgmt_addr); + writel(mii_cmd, &mac->mii_mgmt_cmd); + + return status; +} + +static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value) +{ + struct phy_device *phydev = adapter->phydev; + + if (!phydev) + return -EIO; + + return et131x_phy_mii_read(adapter, phydev->addr, reg, value); +} + +static int et131x_mii_write(struct et131x_adapter *adapter, u8 addr, u8 reg, + u16 value) +{ + struct mac_regs __iomem *mac = &adapter->regs->mac; + int status = 0; + u32 delay = 0; + u32 mii_addr; + u32 mii_cmd; + u32 mii_indicator; + + /* Save a local copy of the registers we are dealing with so we can + * set them back + */ + mii_addr = readl(&mac->mii_mgmt_addr); + mii_cmd = readl(&mac->mii_mgmt_cmd); + + /* Stop the current operation */ + writel(0, &mac->mii_mgmt_cmd); + + /* Set up the register we need to write to on the correct PHY */ + writel(ET_MAC_MII_ADDR(addr, reg), &mac->mii_mgmt_addr); + + /* Add the value to write to the registers to the mac */ + writel(value, &mac->mii_mgmt_ctrl); + + do { + udelay(50); + delay++; + mii_indicator = readl(&mac->mii_mgmt_indicator); + } while ((mii_indicator & ET_MAC_MGMT_BUSY) && delay < 100); + + /* If we hit the max delay, we could not write the register */ + if (delay == 100) { + u16 tmp; + + dev_warn(&adapter->pdev->dev, + "reg 0x%08x could not be written", reg); + dev_warn(&adapter->pdev->dev, "status is 0x%08x\n", + mii_indicator); + dev_warn(&adapter->pdev->dev, "command is 0x%08x\n", + readl(&mac->mii_mgmt_cmd)); + + et131x_mii_read(adapter, reg, &tmp); + + status = -EIO; + } + /* Stop the write operation */ + writel(0, &mac->mii_mgmt_cmd); + + /* set the registers we touched back to the state at which we entered + * this function + */ + writel(mii_addr, &mac->mii_mgmt_addr); + writel(mii_cmd, &mac->mii_mgmt_cmd); + + return status; +} + +static void et1310_phy_read_mii_bit(struct et131x_adapter *adapter, + u16 regnum, + u16 bitnum, + u8 *value) +{ + u16 reg; + u16 mask = 1 << bitnum; + + et131x_mii_read(adapter, regnum, ®); + + *value = (reg & mask) >> bitnum; +} + +static void et1310_config_flow_control(struct et131x_adapter *adapter) +{ + struct phy_device *phydev = adapter->phydev; + + if (phydev->duplex == DUPLEX_HALF) { + adapter->flow = FLOW_NONE; + } else { + char remote_pause, remote_async_pause; + + et1310_phy_read_mii_bit(adapter, 5, 10, &remote_pause); + et1310_phy_read_mii_bit(adapter, 5, 11, &remote_async_pause); + + if (remote_pause && remote_async_pause) { + adapter->flow = adapter->wanted_flow; + } else if (remote_pause && !remote_async_pause) { + if (adapter->wanted_flow == FLOW_BOTH) + adapter->flow = FLOW_BOTH; + else + adapter->flow = FLOW_NONE; + } else if (!remote_pause && !remote_async_pause) { + adapter->flow = FLOW_NONE; + } else { + if (adapter->wanted_flow == FLOW_BOTH) + adapter->flow = FLOW_RXONLY; + else + adapter->flow = FLOW_NONE; + } + } +} + +/* et1310_update_macstat_host_counters - Update local copy of the statistics */ +static void et1310_update_macstat_host_counters(struct et131x_adapter *adapter) +{ + struct ce_stats *stats = &adapter->stats; + struct macstat_regs __iomem *macstat = + &adapter->regs->macstat; + + stats->tx_collisions += readl(&macstat->tx_total_collisions); + stats->tx_first_collisions += readl(&macstat->tx_single_collisions); + stats->tx_deferred += readl(&macstat->tx_deferred); + stats->tx_excessive_collisions += + readl(&macstat->tx_multiple_collisions); + stats->tx_late_collisions += readl(&macstat->tx_late_collisions); + stats->tx_underflows += readl(&macstat->tx_undersize_frames); + stats->tx_max_pkt_errs += readl(&macstat->tx_oversize_frames); + + stats->rx_align_errs += readl(&macstat->rx_align_errs); + stats->rx_crc_errs += readl(&macstat->rx_code_errs); + stats->rcvd_pkts_dropped += readl(&macstat->rx_drops); + stats->rx_overflows += readl(&macstat->rx_oversize_packets); + stats->rx_code_violations += readl(&macstat->rx_fcs_errs); + stats->rx_length_errs += readl(&macstat->rx_frame_len_errs); + stats->rx_other_errs += readl(&macstat->rx_fragment_packets); +} + +/* et1310_handle_macstat_interrupt + * + * One of the MACSTAT counters has wrapped. Update the local copy of + * the statistics held in the adapter structure, checking the "wrap" + * bit for each counter. + */ +static void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter) +{ + u32 carry_reg1; + u32 carry_reg2; + + /* Read the interrupt bits from the register(s). These are Clear On + * Write. + */ + carry_reg1 = readl(&adapter->regs->macstat.carry_reg1); + carry_reg2 = readl(&adapter->regs->macstat.carry_reg2); + + writel(carry_reg1, &adapter->regs->macstat.carry_reg1); + writel(carry_reg2, &adapter->regs->macstat.carry_reg2); + + /* We need to do update the host copy of all the MAC_STAT counters. + * For each counter, check it's overflow bit. If the overflow bit is + * set, then increment the host version of the count by one complete + * revolution of the counter. This routine is called when the counter + * block indicates that one of the counters has wrapped. + */ + if (carry_reg1 & (1 << 14)) + adapter->stats.rx_code_violations += COUNTER_WRAP_16_BIT; + if (carry_reg1 & (1 << 8)) + adapter->stats.rx_align_errs += COUNTER_WRAP_12_BIT; + if (carry_reg1 & (1 << 7)) + adapter->stats.rx_length_errs += COUNTER_WRAP_16_BIT; + if (carry_reg1 & (1 << 2)) + adapter->stats.rx_other_errs += COUNTER_WRAP_16_BIT; + if (carry_reg1 & (1 << 6)) + adapter->stats.rx_crc_errs += COUNTER_WRAP_16_BIT; + if (carry_reg1 & (1 << 3)) + adapter->stats.rx_overflows += COUNTER_WRAP_16_BIT; + if (carry_reg1 & (1 << 0)) + adapter->stats.rcvd_pkts_dropped += COUNTER_WRAP_16_BIT; + if (carry_reg2 & (1 << 16)) + adapter->stats.tx_max_pkt_errs += COUNTER_WRAP_12_BIT; + if (carry_reg2 & (1 << 15)) + adapter->stats.tx_underflows += COUNTER_WRAP_12_BIT; + if (carry_reg2 & (1 << 6)) + adapter->stats.tx_first_collisions += COUNTER_WRAP_12_BIT; + if (carry_reg2 & (1 << 8)) + adapter->stats.tx_deferred += COUNTER_WRAP_12_BIT; + if (carry_reg2 & (1 << 5)) + adapter->stats.tx_excessive_collisions += COUNTER_WRAP_12_BIT; + if (carry_reg2 & (1 << 4)) + adapter->stats.tx_late_collisions += COUNTER_WRAP_12_BIT; + if (carry_reg2 & (1 << 2)) + adapter->stats.tx_collisions += COUNTER_WRAP_12_BIT; +} + +static int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg) +{ + struct net_device *netdev = bus->priv; + struct et131x_adapter *adapter = netdev_priv(netdev); + u16 value; + int ret; + + ret = et131x_phy_mii_read(adapter, phy_addr, reg, &value); + + if (ret < 0) + return ret; + + return value; +} + +static int et131x_mdio_write(struct mii_bus *bus, int phy_addr, + int reg, u16 value) +{ + struct net_device *netdev = bus->priv; + struct et131x_adapter *adapter = netdev_priv(netdev); + + return et131x_mii_write(adapter, phy_addr, reg, value); +} + +/* et1310_phy_power_switch - PHY power control + * @adapter: device to control + * @down: true for off/false for back on + * + * one hundred, ten, one thousand megs + * How would you like to have your LAN accessed + * Can't you see that this code processed + * Phy power, phy power.. + */ +static void et1310_phy_power_switch(struct et131x_adapter *adapter, bool down) +{ + u16 data; + struct phy_device *phydev = adapter->phydev; + + et131x_mii_read(adapter, MII_BMCR, &data); + data &= ~BMCR_PDOWN; + if (down) + data |= BMCR_PDOWN; + et131x_mii_write(adapter, phydev->addr, MII_BMCR, data); +} + +/* et131x_xcvr_init - Init the phy if we are setting it into force mode */ +static void et131x_xcvr_init(struct et131x_adapter *adapter) +{ + u16 lcr2; + struct phy_device *phydev = adapter->phydev; + + /* Set the LED behavior such that LED 1 indicates speed (off = + * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates + * link and activity (on for link, blink off for activity). + * + * NOTE: Some customizations have been added here for specific + * vendors; The LED behavior is now determined by vendor data in the + * EEPROM. However, the above description is the default. + */ + if ((adapter->eeprom_data[1] & 0x4) == 0) { + et131x_mii_read(adapter, PHY_LED_2, &lcr2); + + lcr2 &= (ET_LED2_LED_100TX | ET_LED2_LED_1000T); + lcr2 |= (LED_VAL_LINKON_ACTIVE << LED_LINK_SHIFT); + + if ((adapter->eeprom_data[1] & 0x8) == 0) + lcr2 |= (LED_VAL_1000BT_100BTX << LED_TXRX_SHIFT); + else + lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT); + + et131x_mii_write(adapter, phydev->addr, PHY_LED_2, lcr2); + } +} + +/* et131x_configure_global_regs - configure JAGCore global regs */ +static void et131x_configure_global_regs(struct et131x_adapter *adapter) +{ + struct global_regs __iomem *regs = &adapter->regs->global; + + writel(0, ®s->rxq_start_addr); + writel(INTERNAL_MEM_SIZE - 1, ®s->txq_end_addr); + + if (adapter->registry_jumbo_packet < 2048) { + /* Tx / RxDMA and Tx/Rx MAC interfaces have a 1k word + * block of RAM that the driver can split between Tx + * and Rx as it desires. Our default is to split it + * 50/50: + */ + writel(PARM_RX_MEM_END_DEF, ®s->rxq_end_addr); + writel(PARM_RX_MEM_END_DEF + 1, ®s->txq_start_addr); + } else if (adapter->registry_jumbo_packet < 8192) { + /* For jumbo packets > 2k but < 8k, split 50-50. */ + writel(INTERNAL_MEM_RX_OFFSET, ®s->rxq_end_addr); + writel(INTERNAL_MEM_RX_OFFSET + 1, ®s->txq_start_addr); + } else { + /* 9216 is the only packet size greater than 8k that + * is available. The Tx buffer has to be big enough + * for one whole packet on the Tx side. We'll make + * the Tx 9408, and give the rest to Rx + */ + writel(0x01b3, ®s->rxq_end_addr); + writel(0x01b4, ®s->txq_start_addr); + } + + /* Initialize the loopback register. Disable all loopbacks. */ + writel(0, ®s->loopback); + + writel(0, ®s->msi_config); + + /* By default, disable the watchdog timer. It will be enabled when + * a packet is queued. + */ + writel(0, ®s->watchdog_timer); +} + +/* et131x_config_rx_dma_regs - Start of Rx_DMA init sequence */ +static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter) +{ + struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma; + struct rx_ring *rx_local = &adapter->rx_ring; + struct fbr_desc *fbr_entry; + u32 entry; + u32 psr_num_des; + unsigned long flags; + u8 id; + + et131x_rx_dma_disable(adapter); + + /* Load the completion writeback physical address */ + writel(upper_32_bits(rx_local->rx_status_bus), &rx_dma->dma_wb_base_hi); + writel(lower_32_bits(rx_local->rx_status_bus), &rx_dma->dma_wb_base_lo); + + memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block)); + + /* Set the address and parameters of the packet status ring */ + writel(upper_32_bits(rx_local->ps_ring_physaddr), &rx_dma->psr_base_hi); + writel(lower_32_bits(rx_local->ps_ring_physaddr), &rx_dma->psr_base_lo); + writel(rx_local->psr_entries - 1, &rx_dma->psr_num_des); + writel(0, &rx_dma->psr_full_offset); + + psr_num_des = readl(&rx_dma->psr_num_des) & ET_RXDMA_PSR_NUM_DES_MASK; + writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100, + &rx_dma->psr_min_des); + + spin_lock_irqsave(&adapter->rcv_lock, flags); + + /* These local variables track the PSR in the adapter structure */ + rx_local->local_psr_full = 0; + + for (id = 0; id < NUM_FBRS; id++) { + u32 __iomem *num_des; + u32 __iomem *full_offset; + u32 __iomem *min_des; + u32 __iomem *base_hi; + u32 __iomem *base_lo; + struct fbr_lookup *fbr = rx_local->fbr[id]; + + if (id == 0) { + num_des = &rx_dma->fbr0_num_des; + full_offset = &rx_dma->fbr0_full_offset; + min_des = &rx_dma->fbr0_min_des; + base_hi = &rx_dma->fbr0_base_hi; + base_lo = &rx_dma->fbr0_base_lo; + } else { + num_des = &rx_dma->fbr1_num_des; + full_offset = &rx_dma->fbr1_full_offset; + min_des = &rx_dma->fbr1_min_des; + base_hi = &rx_dma->fbr1_base_hi; + base_lo = &rx_dma->fbr1_base_lo; + } + + /* Now's the best time to initialize FBR contents */ + fbr_entry = fbr->ring_virtaddr; + for (entry = 0; entry < fbr->num_entries; entry++) { + fbr_entry->addr_hi = fbr->bus_high[entry]; + fbr_entry->addr_lo = fbr->bus_low[entry]; + fbr_entry->word2 = entry; + fbr_entry++; + } + + /* Set the address and parameters of Free buffer ring 1 and 0 */ + writel(upper_32_bits(fbr->ring_physaddr), base_hi); + writel(lower_32_bits(fbr->ring_physaddr), base_lo); + writel(fbr->num_entries - 1, num_des); + writel(ET_DMA10_WRAP, full_offset); + + /* This variable tracks the free buffer ring 1 full position, + * so it has to match the above. + */ + fbr->local_full = ET_DMA10_WRAP; + writel(((fbr->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1, + min_des); + } + + /* Program the number of packets we will receive before generating an + * interrupt. + * For version B silicon, this value gets updated once autoneg is + *complete. + */ + writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done); + + /* The "time_done" is not working correctly to coalesce interrupts + * after a given time period, but rather is giving us an interrupt + * regardless of whether we have received packets. + * This value gets updated once autoneg is complete. + */ + writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time); + + spin_unlock_irqrestore(&adapter->rcv_lock, flags); +} + +/* et131x_config_tx_dma_regs - Set up the tx dma section of the JAGCore. + * + * Configure the transmit engine with the ring buffers we have created + * and prepare it for use. + */ +static void et131x_config_tx_dma_regs(struct et131x_adapter *adapter) +{ + struct txdma_regs __iomem *txdma = &adapter->regs->txdma; + struct tx_ring *tx_ring = &adapter->tx_ring; + + /* Load the hardware with the start of the transmit descriptor ring. */ + writel(upper_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_hi); + writel(lower_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_lo); + + /* Initialise the transmit DMA engine */ + writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des); + + /* Load the completion writeback physical address */ + writel(upper_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_hi); + writel(lower_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_lo); + + *tx_ring->tx_status = 0; + + writel(0, &txdma->service_request); + tx_ring->send_idx = 0; +} + +/* et131x_adapter_setup - Set the adapter up as per cassini+ documentation */ +static void et131x_adapter_setup(struct et131x_adapter *adapter) +{ + et131x_configure_global_regs(adapter); + et1310_config_mac_regs1(adapter); + + /* Configure the MMC registers */ + /* All we need to do is initialize the Memory Control Register */ + writel(ET_MMC_ENABLE, &adapter->regs->mmc.mmc_ctrl); + + et1310_config_rxmac_regs(adapter); + et1310_config_txmac_regs(adapter); + + et131x_config_rx_dma_regs(adapter); + et131x_config_tx_dma_regs(adapter); + + et1310_config_macstat_regs(adapter); + + et1310_phy_power_switch(adapter, 0); + et131x_xcvr_init(adapter); +} + +/* et131x_soft_reset - Issue soft reset to the hardware, complete for ET1310 */ +static void et131x_soft_reset(struct et131x_adapter *adapter) +{ + u32 reg; + + /* Disable MAC Core */ + reg = ET_MAC_CFG1_SOFT_RESET | ET_MAC_CFG1_SIM_RESET | + ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC | + ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC; + writel(reg, &adapter->regs->mac.cfg1); + + reg = ET_RESET_ALL; + writel(reg, &adapter->regs->global.sw_reset); + + reg = ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC | + ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC; + writel(reg, &adapter->regs->mac.cfg1); + writel(0, &adapter->regs->mac.cfg1); +} + +static void et131x_enable_interrupts(struct et131x_adapter *adapter) +{ + u32 mask; + + if (adapter->flow == FLOW_TXONLY || adapter->flow == FLOW_BOTH) + mask = INT_MASK_ENABLE; + else + mask = INT_MASK_ENABLE_NO_FLOW; + + writel(mask, &adapter->regs->global.int_mask); +} + +static void et131x_disable_interrupts(struct et131x_adapter *adapter) +{ + writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask); +} + +static void et131x_tx_dma_disable(struct et131x_adapter *adapter) +{ + /* Setup the transmit dma configuration register */ + writel(ET_TXDMA_CSR_HALT | ET_TXDMA_SNGL_EPKT, + &adapter->regs->txdma.csr); +} + +static void et131x_enable_txrx(struct net_device *netdev) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + + et131x_rx_dma_enable(adapter); + et131x_tx_dma_enable(adapter); + + if (adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE) + et131x_enable_interrupts(adapter); + + netif_start_queue(netdev); +} + +static void et131x_disable_txrx(struct net_device *netdev) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + + netif_stop_queue(netdev); + + et131x_rx_dma_disable(adapter); + et131x_tx_dma_disable(adapter); + + et131x_disable_interrupts(adapter); +} + +static void et131x_init_send(struct et131x_adapter *adapter) +{ + int i; + struct tx_ring *tx_ring = &adapter->tx_ring; + struct tcb *tcb = tx_ring->tcb_ring; + + tx_ring->tcb_qhead = tcb; + + memset(tcb, 0, sizeof(struct tcb) * NUM_TCB); + + for (i = 0; i < NUM_TCB; i++) { + tcb->next = tcb + 1; + tcb++; + } + + tcb--; + tx_ring->tcb_qtail = tcb; + tcb->next = NULL; + /* Curr send queue should now be empty */ + tx_ring->send_head = NULL; + tx_ring->send_tail = NULL; +} + +/* et1310_enable_phy_coma + * + * driver receive an phy status change interrupt while in D0 and check that + * phy_status is down. + * + * -- gate off JAGCore; + * -- set gigE PHY in Coma mode + * -- wake on phy_interrupt; Perform software reset JAGCore, + * re-initialize jagcore and gigE PHY + */ +static void et1310_enable_phy_coma(struct et131x_adapter *adapter) +{ + u32 pmcsr = readl(&adapter->regs->global.pm_csr); + + /* Stop sending packets. */ + adapter->flags |= FMP_ADAPTER_LOWER_POWER; + + /* Wait for outstanding Receive packets */ + et131x_disable_txrx(adapter->netdev); + + /* Gate off JAGCore 3 clock domains */ + pmcsr &= ~ET_PMCSR_INIT; + writel(pmcsr, &adapter->regs->global.pm_csr); + + /* Program gigE PHY in to Coma mode */ + pmcsr |= ET_PM_PHY_SW_COMA; + writel(pmcsr, &adapter->regs->global.pm_csr); +} + +static void et1310_disable_phy_coma(struct et131x_adapter *adapter) +{ + u32 pmcsr; + + pmcsr = readl(&adapter->regs->global.pm_csr); + + /* Disable phy_sw_coma register and re-enable JAGCore clocks */ + pmcsr |= ET_PMCSR_INIT; + pmcsr &= ~ET_PM_PHY_SW_COMA; + writel(pmcsr, &adapter->regs->global.pm_csr); + + /* Restore the GbE PHY speed and duplex modes; + * Reset JAGCore; re-configure and initialize JAGCore and gigE PHY + */ + + /* Re-initialize the send structures */ + et131x_init_send(adapter); + + /* Bring the device back to the state it was during init prior to + * autonegotiation being complete. This way, when we get the auto-neg + * complete interrupt, we can complete init by calling ConfigMacREGS2. + */ + et131x_soft_reset(adapter); + + et131x_adapter_setup(adapter); + + /* Allow Tx to restart */ + adapter->flags &= ~FMP_ADAPTER_LOWER_POWER; + + et131x_enable_txrx(adapter->netdev); +} + +static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit) +{ + u32 tmp_free_buff_ring = *free_buff_ring; + + tmp_free_buff_ring++; + /* This works for all cases where limit < 1024. The 1023 case + * works because 1023++ is 1024 which means the if condition is not + * taken but the carry of the bit into the wrap bit toggles the wrap + * value correctly + */ + if ((tmp_free_buff_ring & ET_DMA10_MASK) > limit) { + tmp_free_buff_ring &= ~ET_DMA10_MASK; + tmp_free_buff_ring ^= ET_DMA10_WRAP; + } + /* For the 1023 case */ + tmp_free_buff_ring &= (ET_DMA10_MASK | ET_DMA10_WRAP); + *free_buff_ring = tmp_free_buff_ring; + return tmp_free_buff_ring; +} + +/* et131x_rx_dma_memory_alloc + * + * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required, + * and the Packet Status Ring. + */ +static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter) +{ + u8 id; + u32 i, j; + u32 bufsize; + u32 psr_size; + u32 fbr_chunksize; + struct rx_ring *rx_ring = &adapter->rx_ring; + struct fbr_lookup *fbr; + + /* Alloc memory for the lookup table */ + rx_ring->fbr[0] = kzalloc(sizeof(*fbr), GFP_KERNEL); + if (rx_ring->fbr[0] == NULL) + return -ENOMEM; + rx_ring->fbr[1] = kzalloc(sizeof(*fbr), GFP_KERNEL); + if (rx_ring->fbr[1] == NULL) + return -ENOMEM; + + /* The first thing we will do is configure the sizes of the buffer + * rings. These will change based on jumbo packet support. Larger + * jumbo packets increases the size of each entry in FBR0, and the + * number of entries in FBR0, while at the same time decreasing the + * number of entries in FBR1. + * + * FBR1 holds "large" frames, FBR0 holds "small" frames. If FBR1 + * entries are huge in order to accommodate a "jumbo" frame, then it + * will have less entries. Conversely, FBR1 will now be relied upon + * to carry more "normal" frames, thus it's entry size also increases + * and the number of entries goes up too (since it now carries + * "small" + "regular" packets. + * + * In this scheme, we try to maintain 512 entries between the two + * rings. Also, FBR1 remains a constant size - when it's size doubles + * the number of entries halves. FBR0 increases in size, however. + */ + if (adapter->registry_jumbo_packet < 2048) { + rx_ring->fbr[0]->buffsize = 256; + rx_ring->fbr[0]->num_entries = 512; + rx_ring->fbr[1]->buffsize = 2048; + rx_ring->fbr[1]->num_entries = 512; + } else if (adapter->registry_jumbo_packet < 4096) { + rx_ring->fbr[0]->buffsize = 512; + rx_ring->fbr[0]->num_entries = 1024; + rx_ring->fbr[1]->buffsize = 4096; + rx_ring->fbr[1]->num_entries = 512; + } else { + rx_ring->fbr[0]->buffsize = 1024; + rx_ring->fbr[0]->num_entries = 768; + rx_ring->fbr[1]->buffsize = 16384; + rx_ring->fbr[1]->num_entries = 128; + } + + rx_ring->psr_entries = rx_ring->fbr[0]->num_entries + + rx_ring->fbr[1]->num_entries; + + for (id = 0; id < NUM_FBRS; id++) { + fbr = rx_ring->fbr[id]; + /* Allocate an area of memory for Free Buffer Ring */ + bufsize = sizeof(struct fbr_desc) * fbr->num_entries; + fbr->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev, + bufsize, + &fbr->ring_physaddr, + GFP_KERNEL); + if (!fbr->ring_virtaddr) { + dev_err(&adapter->pdev->dev, + "Cannot alloc memory for Free Buffer Ring %d\n", + id); + return -ENOMEM; + } + } + + for (id = 0; id < NUM_FBRS; id++) { + fbr = rx_ring->fbr[id]; + fbr_chunksize = (FBR_CHUNKS * fbr->buffsize); + + for (i = 0; i < fbr->num_entries / FBR_CHUNKS; i++) { + dma_addr_t fbr_physaddr; + + fbr->mem_virtaddrs[i] = dma_alloc_coherent( + &adapter->pdev->dev, fbr_chunksize, + &fbr->mem_physaddrs[i], + GFP_KERNEL); + + if (!fbr->mem_virtaddrs[i]) { + dev_err(&adapter->pdev->dev, + "Could not alloc memory\n"); + return -ENOMEM; + } + + /* See NOTE in "Save Physical Address" comment above */ + fbr_physaddr = fbr->mem_physaddrs[i]; + + for (j = 0; j < FBR_CHUNKS; j++) { + u32 k = (i * FBR_CHUNKS) + j; + + /* Save the Virtual address of this index for + * quick access later + */ + fbr->virt[k] = (u8 *)fbr->mem_virtaddrs[i] + + (j * fbr->buffsize); + + /* now store the physical address in the + * descriptor so the device can access it + */ + fbr->bus_high[k] = upper_32_bits(fbr_physaddr); + fbr->bus_low[k] = lower_32_bits(fbr_physaddr); + fbr_physaddr += fbr->buffsize; + } + } + } + + /* Allocate an area of memory for FIFO of Packet Status ring entries */ + psr_size = sizeof(struct pkt_stat_desc) * rx_ring->psr_entries; + + rx_ring->ps_ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev, + psr_size, + &rx_ring->ps_ring_physaddr, + GFP_KERNEL); + + if (!rx_ring->ps_ring_virtaddr) { + dev_err(&adapter->pdev->dev, + "Cannot alloc memory for Packet Status Ring\n"); + return -ENOMEM; + } + + /* Allocate an area of memory for writeback of status information */ + rx_ring->rx_status_block = dma_alloc_coherent(&adapter->pdev->dev, + sizeof(struct rx_status_block), + &rx_ring->rx_status_bus, + GFP_KERNEL); + if (!rx_ring->rx_status_block) { + dev_err(&adapter->pdev->dev, + "Cannot alloc memory for Status Block\n"); + return -ENOMEM; + } + rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD; + + /* The RFDs are going to be put on lists later on, so initialize the + * lists now. + */ + INIT_LIST_HEAD(&rx_ring->recv_list); + return 0; +} + +static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter) +{ + u8 id; + u32 ii; + u32 bufsize; + u32 psr_size; + struct rfd *rfd; + struct rx_ring *rx_ring = &adapter->rx_ring; + struct fbr_lookup *fbr; + + /* Free RFDs and associated packet descriptors */ + WARN_ON(rx_ring->num_ready_recv != rx_ring->num_rfd); + + while (!list_empty(&rx_ring->recv_list)) { + rfd = list_entry(rx_ring->recv_list.next, + struct rfd, list_node); + + list_del(&rfd->list_node); + rfd->skb = NULL; + kfree(rfd); + } + + /* Free Free Buffer Rings */ + for (id = 0; id < NUM_FBRS; id++) { + fbr = rx_ring->fbr[id]; + + if (!fbr || !fbr->ring_virtaddr) + continue; + + /* First the packet memory */ + for (ii = 0; ii < fbr->num_entries / FBR_CHUNKS; ii++) { + if (fbr->mem_virtaddrs[ii]) { + bufsize = fbr->buffsize * FBR_CHUNKS; + + dma_free_coherent(&adapter->pdev->dev, + bufsize, + fbr->mem_virtaddrs[ii], + fbr->mem_physaddrs[ii]); + + fbr->mem_virtaddrs[ii] = NULL; + } + } + + bufsize = sizeof(struct fbr_desc) * fbr->num_entries; + + dma_free_coherent(&adapter->pdev->dev, + bufsize, + fbr->ring_virtaddr, + fbr->ring_physaddr); + + fbr->ring_virtaddr = NULL; + } + + /* Free Packet Status Ring */ + if (rx_ring->ps_ring_virtaddr) { + psr_size = sizeof(struct pkt_stat_desc) * rx_ring->psr_entries; + + dma_free_coherent(&adapter->pdev->dev, psr_size, + rx_ring->ps_ring_virtaddr, + rx_ring->ps_ring_physaddr); + + rx_ring->ps_ring_virtaddr = NULL; + } + + /* Free area of memory for the writeback of status information */ + if (rx_ring->rx_status_block) { + dma_free_coherent(&adapter->pdev->dev, + sizeof(struct rx_status_block), + rx_ring->rx_status_block, + rx_ring->rx_status_bus); + rx_ring->rx_status_block = NULL; + } + + /* Free the FBR Lookup Table */ + kfree(rx_ring->fbr[0]); + kfree(rx_ring->fbr[1]); + + /* Reset Counters */ + rx_ring->num_ready_recv = 0; +} + +/* et131x_init_recv - Initialize receive data structures */ +static int et131x_init_recv(struct et131x_adapter *adapter) +{ + struct rfd *rfd; + u32 rfdct; + struct rx_ring *rx_ring = &adapter->rx_ring; + + /* Setup each RFD */ + for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) { + rfd = kzalloc(sizeof(*rfd), GFP_ATOMIC | GFP_DMA); + if (!rfd) + return -ENOMEM; + + rfd->skb = NULL; + + /* Add this RFD to the recv_list */ + list_add_tail(&rfd->list_node, &rx_ring->recv_list); + + /* Increment the available RFD's */ + rx_ring->num_ready_recv++; + } + + return 0; +} + +/* et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate */ +static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter) +{ + struct phy_device *phydev = adapter->phydev; + + /* For version B silicon, we do not use the RxDMA timer for 10 and 100 + * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing. + */ + if ((phydev->speed == SPEED_100) || (phydev->speed == SPEED_10)) { + writel(0, &adapter->regs->rxdma.max_pkt_time); + writel(1, &adapter->regs->rxdma.num_pkt_done); + } +} + +/* nic_return_rfd - Recycle a RFD and put it back onto the receive list */ +static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd) +{ + struct rx_ring *rx_local = &adapter->rx_ring; + struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma; + u16 buff_index = rfd->bufferindex; + u8 ring_index = rfd->ringindex; + unsigned long flags; + struct fbr_lookup *fbr = rx_local->fbr[ring_index]; + + /* We don't use any of the OOB data besides status. Otherwise, we + * need to clean up OOB data + */ + if (buff_index < fbr->num_entries) { + u32 free_buff_ring; + u32 __iomem *offset; + struct fbr_desc *next; + + if (ring_index == 0) + offset = &rx_dma->fbr0_full_offset; + else + offset = &rx_dma->fbr1_full_offset; + + next = (struct fbr_desc *)(fbr->ring_virtaddr) + + INDEX10(fbr->local_full); + + /* Handle the Free Buffer Ring advancement here. Write + * the PA / Buffer Index for the returned buffer into + * the oldest (next to be freed)FBR entry + */ + next->addr_hi = fbr->bus_high[buff_index]; + next->addr_lo = fbr->bus_low[buff_index]; + next->word2 = buff_index; + + free_buff_ring = bump_free_buff_ring(&fbr->local_full, + fbr->num_entries - 1); + writel(free_buff_ring, offset); + } else { + dev_err(&adapter->pdev->dev, + "%s illegal Buffer Index returned\n", __func__); + } + + /* The processing on this RFD is done, so put it back on the tail of + * our list + */ + spin_lock_irqsave(&adapter->rcv_lock, flags); + list_add_tail(&rfd->list_node, &rx_local->recv_list); + rx_local->num_ready_recv++; + spin_unlock_irqrestore(&adapter->rcv_lock, flags); + + WARN_ON(rx_local->num_ready_recv > rx_local->num_rfd); +} + +/* nic_rx_pkts - Checks the hardware for available packets + * + * Checks the hardware for available packets, using completion ring + * If packets are available, it gets an RFD from the recv_list, attaches + * the packet to it, puts the RFD in the RecvPendList, and also returns + * the pointer to the RFD. + */ +static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter) +{ + struct rx_ring *rx_local = &adapter->rx_ring; + struct rx_status_block *status; + struct pkt_stat_desc *psr; + struct rfd *rfd; + unsigned long flags; + struct list_head *element; + u8 ring_index; + u16 buff_index; + u32 len; + u32 word0; + u32 word1; + struct sk_buff *skb; + struct fbr_lookup *fbr; + + /* RX Status block is written by the DMA engine prior to every + * interrupt. It contains the next to be used entry in the Packet + * Status Ring, and also the two Free Buffer rings. + */ + status = rx_local->rx_status_block; + word1 = status->word1 >> 16; + + /* Check the PSR and wrap bits do not match */ + if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF)) + return NULL; /* Looks like this ring is not updated yet */ + + /* The packet status ring indicates that data is available. */ + psr = (struct pkt_stat_desc *)(rx_local->ps_ring_virtaddr) + + (rx_local->local_psr_full & 0xFFF); + + /* Grab any information that is required once the PSR is advanced, + * since we can no longer rely on the memory being accurate + */ + len = psr->word1 & 0xFFFF; + ring_index = (psr->word1 >> 26) & 0x03; + fbr = rx_local->fbr[ring_index]; + buff_index = (psr->word1 >> 16) & 0x3FF; + word0 = psr->word0; + + /* Indicate that we have used this PSR entry. */ + /* FIXME wrap 12 */ + add_12bit(&rx_local->local_psr_full, 1); + if ((rx_local->local_psr_full & 0xFFF) > rx_local->psr_entries - 1) { + /* Clear psr full and toggle the wrap bit */ + rx_local->local_psr_full &= ~0xFFF; + rx_local->local_psr_full ^= 0x1000; + } + + writel(rx_local->local_psr_full, &adapter->regs->rxdma.psr_full_offset); + + if (ring_index > 1 || buff_index > fbr->num_entries - 1) { + /* Illegal buffer or ring index cannot be used by S/W*/ + dev_err(&adapter->pdev->dev, + "NICRxPkts PSR Entry %d indicates length of %d and/or bad bi(%d)\n", + rx_local->local_psr_full & 0xFFF, len, buff_index); + return NULL; + } + + /* Get and fill the RFD. */ + spin_lock_irqsave(&adapter->rcv_lock, flags); + + element = rx_local->recv_list.next; + rfd = list_entry(element, struct rfd, list_node); + + if (!rfd) { + spin_unlock_irqrestore(&adapter->rcv_lock, flags); + return NULL; + } + + list_del(&rfd->list_node); + rx_local->num_ready_recv--; + + spin_unlock_irqrestore(&adapter->rcv_lock, flags); + + rfd->bufferindex = buff_index; + rfd->ringindex = ring_index; + + /* In V1 silicon, there is a bug which screws up filtering of runt + * packets. Therefore runt packet filtering is disabled in the MAC and + * the packets are dropped here. They are also counted here. + */ + if (len < (NIC_MIN_PACKET_SIZE + 4)) { + adapter->stats.rx_other_errs++; + rfd->len = 0; + goto out; + } + + if ((word0 & ALCATEL_MULTICAST_PKT) && !(word0 & ALCATEL_BROADCAST_PKT)) + adapter->stats.multicast_pkts_rcvd++; + + rfd->len = len; + + skb = dev_alloc_skb(rfd->len + 2); + if (!skb) + return NULL; + + adapter->netdev->stats.rx_bytes += rfd->len; + + memcpy(skb_put(skb, rfd->len), fbr->virt[buff_index], rfd->len); + + skb->protocol = eth_type_trans(skb, adapter->netdev); + skb->ip_summed = CHECKSUM_NONE; + netif_receive_skb(skb); + +out: + nic_return_rfd(adapter, rfd); + return rfd; +} + +static int et131x_handle_recv_pkts(struct et131x_adapter *adapter, int budget) +{ + struct rfd *rfd = NULL; + int count = 0; + int limit = budget; + bool done = true; + struct rx_ring *rx_ring = &adapter->rx_ring; + + if (budget > MAX_PACKETS_HANDLED) + limit = MAX_PACKETS_HANDLED; + + /* Process up to available RFD's */ + while (count < limit) { + if (list_empty(&rx_ring->recv_list)) { + WARN_ON(rx_ring->num_ready_recv != 0); + done = false; + break; + } + + rfd = nic_rx_pkts(adapter); + + if (rfd == NULL) + break; + + /* Do not receive any packets until a filter has been set. + * Do not receive any packets until we have link. + * If length is zero, return the RFD in order to advance the + * Free buffer ring. + */ + if (!adapter->packet_filter || + !netif_carrier_ok(adapter->netdev) || + rfd->len == 0) + continue; + + adapter->netdev->stats.rx_packets++; + + if (rx_ring->num_ready_recv < RFD_LOW_WATER_MARK) + dev_warn(&adapter->pdev->dev, "RFD's are running out\n"); + + count++; + } + + if (count == limit || !done) { + rx_ring->unfinished_receives = true; + writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO, + &adapter->regs->global.watchdog_timer); + } else { + /* Watchdog timer will disable itself if appropriate. */ + rx_ring->unfinished_receives = false; + } + + return count; +} + +/* et131x_tx_dma_memory_alloc + * + * Allocates memory that will be visible both to the device and to the CPU. + * The OS will pass us packets, pointers to which we will insert in the Tx + * Descriptor queue. The device will read this queue to find the packets in + * memory. The device will update the "status" in memory each time it xmits a + * packet. + */ +static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter) +{ + int desc_size = 0; + struct tx_ring *tx_ring = &adapter->tx_ring; + + /* Allocate memory for the TCB's (Transmit Control Block) */ + tx_ring->tcb_ring = kcalloc(NUM_TCB, sizeof(struct tcb), + GFP_ATOMIC | GFP_DMA); + if (!tx_ring->tcb_ring) + return -ENOMEM; + + desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX); + tx_ring->tx_desc_ring = dma_alloc_coherent(&adapter->pdev->dev, + desc_size, + &tx_ring->tx_desc_ring_pa, + GFP_KERNEL); + if (!tx_ring->tx_desc_ring) { + dev_err(&adapter->pdev->dev, + "Cannot alloc memory for Tx Ring\n"); + return -ENOMEM; + } + + tx_ring->tx_status = dma_alloc_coherent(&adapter->pdev->dev, + sizeof(u32), + &tx_ring->tx_status_pa, + GFP_KERNEL); + if (!tx_ring->tx_status_pa) { + dev_err(&adapter->pdev->dev, + "Cannot alloc memory for Tx status block\n"); + return -ENOMEM; + } + return 0; +} + +static void et131x_tx_dma_memory_free(struct et131x_adapter *adapter) +{ + int desc_size = 0; + struct tx_ring *tx_ring = &adapter->tx_ring; + + if (tx_ring->tx_desc_ring) { + /* Free memory relating to Tx rings here */ + desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX); + dma_free_coherent(&adapter->pdev->dev, + desc_size, + tx_ring->tx_desc_ring, + tx_ring->tx_desc_ring_pa); + tx_ring->tx_desc_ring = NULL; + } + + /* Free memory for the Tx status block */ + if (tx_ring->tx_status) { + dma_free_coherent(&adapter->pdev->dev, + sizeof(u32), + tx_ring->tx_status, + tx_ring->tx_status_pa); + + tx_ring->tx_status = NULL; + } + /* Free the memory for the tcb structures */ + kfree(tx_ring->tcb_ring); +} + +/* nic_send_packet - NIC specific send handler for version B silicon. */ +static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb) +{ + u32 i; + struct tx_desc desc[24]; + u32 frag = 0; + u32 thiscopy, remainder; + struct sk_buff *skb = tcb->skb; + u32 nr_frags = skb_shinfo(skb)->nr_frags + 1; + struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0]; + struct phy_device *phydev = adapter->phydev; + dma_addr_t dma_addr; + struct tx_ring *tx_ring = &adapter->tx_ring; + + /* Part of the optimizations of this send routine restrict us to + * sending 24 fragments at a pass. In practice we should never see + * more than 5 fragments. + */ + + /* nr_frags should be no more than 18. */ + BUILD_BUG_ON(MAX_SKB_FRAGS + 1 > 23); + + memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1)); + + for (i = 0; i < nr_frags; i++) { + /* If there is something in this element, lets get a + * descriptor from the ring and get the necessary data + */ + if (i == 0) { + /* If the fragments are smaller than a standard MTU, + * then map them to a single descriptor in the Tx + * Desc ring. However, if they're larger, as is + * possible with support for jumbo packets, then + * split them each across 2 descriptors. + * + * This will work until we determine why the hardware + * doesn't seem to like large fragments. + */ + if (skb_headlen(skb) <= 1514) { + /* Low 16bits are length, high is vlan and + * unused currently so zero + */ + desc[frag].len_vlan = skb_headlen(skb); + dma_addr = dma_map_single(&adapter->pdev->dev, + skb->data, + skb_headlen(skb), + DMA_TO_DEVICE); + desc[frag].addr_lo = lower_32_bits(dma_addr); + desc[frag].addr_hi = upper_32_bits(dma_addr); + frag++; + } else { + desc[frag].len_vlan = skb_headlen(skb) / 2; + dma_addr = dma_map_single(&adapter->pdev->dev, + skb->data, + skb_headlen(skb) / 2, + DMA_TO_DEVICE); + desc[frag].addr_lo = lower_32_bits(dma_addr); + desc[frag].addr_hi = upper_32_bits(dma_addr); + frag++; + + desc[frag].len_vlan = skb_headlen(skb) / 2; + dma_addr = dma_map_single(&adapter->pdev->dev, + skb->data + + skb_headlen(skb) / 2, + skb_headlen(skb) / 2, + DMA_TO_DEVICE); + desc[frag].addr_lo = lower_32_bits(dma_addr); + desc[frag].addr_hi = upper_32_bits(dma_addr); + frag++; + } + } else { + desc[frag].len_vlan = frags[i - 1].size; + dma_addr = skb_frag_dma_map(&adapter->pdev->dev, + &frags[i - 1], + 0, + frags[i - 1].size, + DMA_TO_DEVICE); + desc[frag].addr_lo = lower_32_bits(dma_addr); + desc[frag].addr_hi = upper_32_bits(dma_addr); + frag++; + } + } + + if (phydev && phydev->speed == SPEED_1000) { + if (++tx_ring->since_irq == PARM_TX_NUM_BUFS_DEF) { + /* Last element & Interrupt flag */ + desc[frag - 1].flags = + TXDESC_FLAG_INTPROC | TXDESC_FLAG_LASTPKT; + tx_ring->since_irq = 0; + } else { /* Last element */ + desc[frag - 1].flags = TXDESC_FLAG_LASTPKT; + } + } else { + desc[frag - 1].flags = + TXDESC_FLAG_INTPROC | TXDESC_FLAG_LASTPKT; + } + + desc[0].flags |= TXDESC_FLAG_FIRSTPKT; + + tcb->index_start = tx_ring->send_idx; + tcb->stale = 0; + + thiscopy = NUM_DESC_PER_RING_TX - INDEX10(tx_ring->send_idx); + + if (thiscopy >= frag) { + remainder = 0; + thiscopy = frag; + } else { + remainder = frag - thiscopy; + } + + memcpy(tx_ring->tx_desc_ring + INDEX10(tx_ring->send_idx), + desc, + sizeof(struct tx_desc) * thiscopy); + + add_10bit(&tx_ring->send_idx, thiscopy); + + if (INDEX10(tx_ring->send_idx) == 0 || + INDEX10(tx_ring->send_idx) == NUM_DESC_PER_RING_TX) { + tx_ring->send_idx &= ~ET_DMA10_MASK; + tx_ring->send_idx ^= ET_DMA10_WRAP; + } + + if (remainder) { + memcpy(tx_ring->tx_desc_ring, + desc + thiscopy, + sizeof(struct tx_desc) * remainder); + + add_10bit(&tx_ring->send_idx, remainder); + } + + if (INDEX10(tx_ring->send_idx) == 0) { + if (tx_ring->send_idx) + tcb->index = NUM_DESC_PER_RING_TX - 1; + else + tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1); + } else { + tcb->index = tx_ring->send_idx - 1; + } + + spin_lock(&adapter->tcb_send_qlock); + + if (tx_ring->send_tail) + tx_ring->send_tail->next = tcb; + else + tx_ring->send_head = tcb; + + tx_ring->send_tail = tcb; + + WARN_ON(tcb->next != NULL); + + tx_ring->used++; + + spin_unlock(&adapter->tcb_send_qlock); + + /* Write the new write pointer back to the device. */ + writel(tx_ring->send_idx, &adapter->regs->txdma.service_request); + + /* For Gig only, we use Tx Interrupt coalescing. Enable the software + * timer to wake us up if this packet isn't followed by N more. + */ + if (phydev && phydev->speed == SPEED_1000) { + writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO, + &adapter->regs->global.watchdog_timer); + } + return 0; +} + +static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter) +{ + int status; + struct tcb *tcb; + unsigned long flags; + struct tx_ring *tx_ring = &adapter->tx_ring; + + /* All packets must have at least a MAC address and a protocol type */ + if (skb->len < ETH_HLEN) + return -EIO; + + spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); + + tcb = tx_ring->tcb_qhead; + + if (tcb == NULL) { + spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); + return -ENOMEM; + } + + tx_ring->tcb_qhead = tcb->next; + + if (tx_ring->tcb_qhead == NULL) + tx_ring->tcb_qtail = NULL; + + spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); + + tcb->skb = skb; + tcb->next = NULL; + + status = nic_send_packet(adapter, tcb); + + if (status != 0) { + spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); + + if (tx_ring->tcb_qtail) + tx_ring->tcb_qtail->next = tcb; + else + /* Apparently ready Q is empty. */ + tx_ring->tcb_qhead = tcb; + + tx_ring->tcb_qtail = tcb; + spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); + return status; + } + WARN_ON(tx_ring->used > NUM_TCB); + return 0; +} + +/* free_send_packet - Recycle a struct tcb */ +static inline void free_send_packet(struct et131x_adapter *adapter, + struct tcb *tcb) +{ + unsigned long flags; + struct tx_desc *desc = NULL; + struct net_device_stats *stats = &adapter->netdev->stats; + struct tx_ring *tx_ring = &adapter->tx_ring; + u64 dma_addr; + + if (tcb->skb) { + stats->tx_bytes += tcb->skb->len; + + /* Iterate through the TX descriptors on the ring + * corresponding to this packet and umap the fragments + * they point to + */ + do { + desc = tx_ring->tx_desc_ring + + INDEX10(tcb->index_start); + + dma_addr = desc->addr_lo; + dma_addr |= (u64)desc->addr_hi << 32; + + dma_unmap_single(&adapter->pdev->dev, + dma_addr, + desc->len_vlan, DMA_TO_DEVICE); + + add_10bit(&tcb->index_start, 1); + if (INDEX10(tcb->index_start) >= + NUM_DESC_PER_RING_TX) { + tcb->index_start &= ~ET_DMA10_MASK; + tcb->index_start ^= ET_DMA10_WRAP; + } + } while (desc != tx_ring->tx_desc_ring + INDEX10(tcb->index)); + + dev_kfree_skb_any(tcb->skb); + } + + memset(tcb, 0, sizeof(struct tcb)); + + /* Add the TCB to the Ready Q */ + spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); + + stats->tx_packets++; + + if (tx_ring->tcb_qtail) + tx_ring->tcb_qtail->next = tcb; + else /* Apparently ready Q is empty. */ + tx_ring->tcb_qhead = tcb; + + tx_ring->tcb_qtail = tcb; + + spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); + WARN_ON(tx_ring->used < 0); +} + +/* et131x_free_busy_send_packets - Free and complete the stopped active sends */ +static void et131x_free_busy_send_packets(struct et131x_adapter *adapter) +{ + struct tcb *tcb; + unsigned long flags; + u32 freed = 0; + struct tx_ring *tx_ring = &adapter->tx_ring; + + /* Any packets being sent? Check the first TCB on the send list */ + spin_lock_irqsave(&adapter->tcb_send_qlock, flags); + + tcb = tx_ring->send_head; + + while (tcb != NULL && freed < NUM_TCB) { + struct tcb *next = tcb->next; + + tx_ring->send_head = next; + + if (next == NULL) + tx_ring->send_tail = NULL; + + tx_ring->used--; + + spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); + + freed++; + free_send_packet(adapter, tcb); + + spin_lock_irqsave(&adapter->tcb_send_qlock, flags); + + tcb = tx_ring->send_head; + } + + WARN_ON(freed == NUM_TCB); + + spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); + + tx_ring->used = 0; +} + +/* et131x_handle_send_pkts + * + * Re-claim the send resources, complete sends and get more to send from + * the send wait queue. + */ +static void et131x_handle_send_pkts(struct et131x_adapter *adapter) +{ + unsigned long flags; + u32 serviced; + struct tcb *tcb; + u32 index; + struct tx_ring *tx_ring = &adapter->tx_ring; + + serviced = readl(&adapter->regs->txdma.new_service_complete); + index = INDEX10(serviced); + + /* Has the ring wrapped? Process any descriptors that do not have + * the same "wrap" indicator as the current completion indicator + */ + spin_lock_irqsave(&adapter->tcb_send_qlock, flags); + + tcb = tx_ring->send_head; + + while (tcb && + ((serviced ^ tcb->index) & ET_DMA10_WRAP) && + index < INDEX10(tcb->index)) { + tx_ring->used--; + tx_ring->send_head = tcb->next; + if (tcb->next == NULL) + tx_ring->send_tail = NULL; + + spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); + free_send_packet(adapter, tcb); + spin_lock_irqsave(&adapter->tcb_send_qlock, flags); + + /* Goto the next packet */ + tcb = tx_ring->send_head; + } + while (tcb && + !((serviced ^ tcb->index) & ET_DMA10_WRAP) && + index > (tcb->index & ET_DMA10_MASK)) { + tx_ring->used--; + tx_ring->send_head = tcb->next; + if (tcb->next == NULL) + tx_ring->send_tail = NULL; + + spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); + free_send_packet(adapter, tcb); + spin_lock_irqsave(&adapter->tcb_send_qlock, flags); + + /* Goto the next packet */ + tcb = tx_ring->send_head; + } + + /* Wake up the queue when we hit a low-water mark */ + if (tx_ring->used <= NUM_TCB / 3) + netif_wake_queue(adapter->netdev); + + spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); +} + +static int et131x_get_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + + return phy_ethtool_gset(adapter->phydev, cmd); +} + +static int et131x_set_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + + return phy_ethtool_sset(adapter->phydev, cmd); +} + +static int et131x_get_regs_len(struct net_device *netdev) +{ +#define ET131X_REGS_LEN 256 + return ET131X_REGS_LEN * sizeof(u32); +} + +static void et131x_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *regs_data) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + struct address_map __iomem *aregs = adapter->regs; + u32 *regs_buff = regs_data; + u32 num = 0; + u16 tmp; + + memset(regs_data, 0, et131x_get_regs_len(netdev)); + + regs->version = (1 << 24) | (adapter->pdev->revision << 16) | + adapter->pdev->device; + + /* PHY regs */ + et131x_mii_read(adapter, MII_BMCR, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, MII_BMSR, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, MII_PHYSID1, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, MII_PHYSID2, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, MII_ADVERTISE, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, MII_LPA, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, MII_EXPANSION, &tmp); + regs_buff[num++] = tmp; + /* Autoneg next page transmit reg */ + et131x_mii_read(adapter, 0x07, &tmp); + regs_buff[num++] = tmp; + /* Link partner next page reg */ + et131x_mii_read(adapter, 0x08, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, MII_CTRL1000, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, MII_STAT1000, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, 0x0b, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, 0x0c, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, MII_MMD_CTRL, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, MII_MMD_DATA, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, MII_ESTATUS, &tmp); + regs_buff[num++] = tmp; + + et131x_mii_read(adapter, PHY_INDEX_REG, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, PHY_DATA_REG, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL + 1, &tmp); + regs_buff[num++] = tmp; + + et131x_mii_read(adapter, PHY_REGISTER_MGMT_CONTROL, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, PHY_CONFIG, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, PHY_PHY_CONTROL, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, PHY_INTERRUPT_MASK, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, PHY_PHY_STATUS, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, PHY_LED_1, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, PHY_LED_2, &tmp); + regs_buff[num++] = tmp; + + /* Global regs */ + regs_buff[num++] = readl(&aregs->global.txq_start_addr); + regs_buff[num++] = readl(&aregs->global.txq_end_addr); + regs_buff[num++] = readl(&aregs->global.rxq_start_addr); + regs_buff[num++] = readl(&aregs->global.rxq_end_addr); + regs_buff[num++] = readl(&aregs->global.pm_csr); + regs_buff[num++] = adapter->stats.interrupt_status; + regs_buff[num++] = readl(&aregs->global.int_mask); + regs_buff[num++] = readl(&aregs->global.int_alias_clr_en); + regs_buff[num++] = readl(&aregs->global.int_status_alias); + regs_buff[num++] = readl(&aregs->global.sw_reset); + regs_buff[num++] = readl(&aregs->global.slv_timer); + regs_buff[num++] = readl(&aregs->global.msi_config); + regs_buff[num++] = readl(&aregs->global.loopback); + regs_buff[num++] = readl(&aregs->global.watchdog_timer); + + /* TXDMA regs */ + regs_buff[num++] = readl(&aregs->txdma.csr); + regs_buff[num++] = readl(&aregs->txdma.pr_base_hi); + regs_buff[num++] = readl(&aregs->txdma.pr_base_lo); + regs_buff[num++] = readl(&aregs->txdma.pr_num_des); + regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr); + regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr_ext); + regs_buff[num++] = readl(&aregs->txdma.txq_rd_addr); + regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_hi); + regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_lo); + regs_buff[num++] = readl(&aregs->txdma.service_request); + regs_buff[num++] = readl(&aregs->txdma.service_complete); + regs_buff[num++] = readl(&aregs->txdma.cache_rd_index); + regs_buff[num++] = readl(&aregs->txdma.cache_wr_index); + regs_buff[num++] = readl(&aregs->txdma.tx_dma_error); + regs_buff[num++] = readl(&aregs->txdma.desc_abort_cnt); + regs_buff[num++] = readl(&aregs->txdma.payload_abort_cnt); + regs_buff[num++] = readl(&aregs->txdma.writeback_abort_cnt); + regs_buff[num++] = readl(&aregs->txdma.desc_timeout_cnt); + regs_buff[num++] = readl(&aregs->txdma.payload_timeout_cnt); + regs_buff[num++] = readl(&aregs->txdma.writeback_timeout_cnt); + regs_buff[num++] = readl(&aregs->txdma.desc_error_cnt); + regs_buff[num++] = readl(&aregs->txdma.payload_error_cnt); + regs_buff[num++] = readl(&aregs->txdma.writeback_error_cnt); + regs_buff[num++] = readl(&aregs->txdma.dropped_tlp_cnt); + regs_buff[num++] = readl(&aregs->txdma.new_service_complete); + regs_buff[num++] = readl(&aregs->txdma.ethernet_packet_cnt); + + /* RXDMA regs */ + regs_buff[num++] = readl(&aregs->rxdma.csr); + regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_hi); + regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_lo); + regs_buff[num++] = readl(&aregs->rxdma.num_pkt_done); + regs_buff[num++] = readl(&aregs->rxdma.max_pkt_time); + regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr); + regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr_ext); + regs_buff[num++] = readl(&aregs->rxdma.rxq_wr_addr); + regs_buff[num++] = readl(&aregs->rxdma.psr_base_hi); + regs_buff[num++] = readl(&aregs->rxdma.psr_base_lo); + regs_buff[num++] = readl(&aregs->rxdma.psr_num_des); + regs_buff[num++] = readl(&aregs->rxdma.psr_avail_offset); + regs_buff[num++] = readl(&aregs->rxdma.psr_full_offset); + regs_buff[num++] = readl(&aregs->rxdma.psr_access_index); + regs_buff[num++] = readl(&aregs->rxdma.psr_min_des); + regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_lo); + regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_hi); + regs_buff[num++] = readl(&aregs->rxdma.fbr0_num_des); + regs_buff[num++] = readl(&aregs->rxdma.fbr0_avail_offset); + regs_buff[num++] = readl(&aregs->rxdma.fbr0_full_offset); + regs_buff[num++] = readl(&aregs->rxdma.fbr0_rd_index); + regs_buff[num++] = readl(&aregs->rxdma.fbr0_min_des); + regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_lo); + regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_hi); + regs_buff[num++] = readl(&aregs->rxdma.fbr1_num_des); + regs_buff[num++] = readl(&aregs->rxdma.fbr1_avail_offset); + regs_buff[num++] = readl(&aregs->rxdma.fbr1_full_offset); + regs_buff[num++] = readl(&aregs->rxdma.fbr1_rd_index); + regs_buff[num++] = readl(&aregs->rxdma.fbr1_min_des); +} + +static void et131x_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + + strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver)); + strlcpy(info->version, DRIVER_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(adapter->pdev), + sizeof(info->bus_info)); +} + +static struct ethtool_ops et131x_ethtool_ops = { + .get_settings = et131x_get_settings, + .set_settings = et131x_set_settings, + .get_drvinfo = et131x_get_drvinfo, + .get_regs_len = et131x_get_regs_len, + .get_regs = et131x_get_regs, + .get_link = ethtool_op_get_link, +}; + +/* et131x_hwaddr_init - set up the MAC Address */ +static void et131x_hwaddr_init(struct et131x_adapter *adapter) +{ + /* If have our default mac from init and no mac address from + * EEPROM then we need to generate the last octet and set it on the + * device + */ + if (is_zero_ether_addr(adapter->rom_addr)) { + /* We need to randomly generate the last octet so we + * decrease our chances of setting the mac address to + * same as another one of our cards in the system + */ + get_random_bytes(&adapter->addr[5], 1); + /* We have the default value in the register we are + * working with so we need to copy the current + * address into the permanent address + */ + ether_addr_copy(adapter->rom_addr, adapter->addr); + } else { + /* We do not have an override address, so set the + * current address to the permanent address and add + * it to the device + */ + ether_addr_copy(adapter->addr, adapter->rom_addr); + } +} + +static int et131x_pci_init(struct et131x_adapter *adapter, + struct pci_dev *pdev) +{ + u16 max_payload; + int i, rc; + + rc = et131x_init_eeprom(adapter); + if (rc < 0) + goto out; + + if (!pci_is_pcie(pdev)) { + dev_err(&pdev->dev, "Missing PCIe capabilities\n"); + goto err_out; + } + + /* Program the Ack/Nak latency and replay timers */ + max_payload = pdev->pcie_mpss; + + if (max_payload < 2) { + static const u16 acknak[2] = { 0x76, 0xD0 }; + static const u16 replay[2] = { 0x1E0, 0x2ED }; + + if (pci_write_config_word(pdev, ET1310_PCI_ACK_NACK, + acknak[max_payload])) { + dev_err(&pdev->dev, + "Could not write PCI config space for ACK/NAK\n"); + goto err_out; + } + if (pci_write_config_word(pdev, ET1310_PCI_REPLAY, + replay[max_payload])) { + dev_err(&pdev->dev, + "Could not write PCI config space for Replay Timer\n"); + goto err_out; + } + } + + /* l0s and l1 latency timers. We are using default values. + * Representing 001 for L0s and 010 for L1 + */ + if (pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11)) { + dev_err(&pdev->dev, + "Could not write PCI config space for Latency Timers\n"); + goto err_out; + } + + /* Change the max read size to 2k */ + if (pcie_set_readrq(pdev, 2048)) { + dev_err(&pdev->dev, + "Couldn't change PCI config space for Max read size\n"); + goto err_out; + } + + /* Get MAC address from config space if an eeprom exists, otherwise + * the MAC address there will not be valid + */ + if (!adapter->has_eeprom) { + et131x_hwaddr_init(adapter); + return 0; + } + + for (i = 0; i < ETH_ALEN; i++) { + if (pci_read_config_byte(pdev, ET1310_PCI_MAC_ADDRESS + i, + adapter->rom_addr + i)) { + dev_err(&pdev->dev, "Could not read PCI config space for MAC address\n"); + goto err_out; + } + } + ether_addr_copy(adapter->addr, adapter->rom_addr); +out: + return rc; +err_out: + rc = -EIO; + goto out; +} + +/* et131x_error_timer_handler + * @data: timer-specific variable; here a pointer to our adapter structure + * + * The routine called when the error timer expires, to track the number of + * recurring errors. + */ +static void et131x_error_timer_handler(unsigned long data) +{ + struct et131x_adapter *adapter = (struct et131x_adapter *)data; + struct phy_device *phydev = adapter->phydev; + + if (et1310_in_phy_coma(adapter)) { + /* Bring the device immediately out of coma, to + * prevent it from sleeping indefinitely, this + * mechanism could be improved! + */ + et1310_disable_phy_coma(adapter); + adapter->boot_coma = 20; + } else { + et1310_update_macstat_host_counters(adapter); + } + + if (!phydev->link && adapter->boot_coma < 11) + adapter->boot_coma++; + + if (adapter->boot_coma == 10) { + if (!phydev->link) { + if (!et1310_in_phy_coma(adapter)) { + /* NOTE - This was originally a 'sync with + * interrupt'. How to do that under Linux? + */ + et131x_enable_interrupts(adapter); + et1310_enable_phy_coma(adapter); + } + } + } + + /* This is a periodic timer, so reschedule */ + mod_timer(&adapter->error_timer, jiffies + TX_ERROR_PERIOD * HZ / 1000); +} + +static void et131x_adapter_memory_free(struct et131x_adapter *adapter) +{ + et131x_tx_dma_memory_free(adapter); + et131x_rx_dma_memory_free(adapter); +} + +static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter) +{ + int status; + + status = et131x_tx_dma_memory_alloc(adapter); + if (status) { + dev_err(&adapter->pdev->dev, + "et131x_tx_dma_memory_alloc FAILED\n"); + et131x_tx_dma_memory_free(adapter); + return status; + } + + status = et131x_rx_dma_memory_alloc(adapter); + if (status) { + dev_err(&adapter->pdev->dev, + "et131x_rx_dma_memory_alloc FAILED\n"); + et131x_adapter_memory_free(adapter); + return status; + } + + status = et131x_init_recv(adapter); + if (status) { + dev_err(&adapter->pdev->dev, "et131x_init_recv FAILED\n"); + et131x_adapter_memory_free(adapter); + } + return status; +} + +static void et131x_adjust_link(struct net_device *netdev) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + struct phy_device *phydev = adapter->phydev; + + if (!phydev) + return; + if (phydev->link == adapter->link) + return; + + /* Check to see if we are in coma mode and if + * so, disable it because we will not be able + * to read PHY values until we are out. + */ + if (et1310_in_phy_coma(adapter)) + et1310_disable_phy_coma(adapter); + + adapter->link = phydev->link; + phy_print_status(phydev); + + if (phydev->link) { + adapter->boot_coma = 20; + if (phydev->speed == SPEED_10) { + u16 register18; + + et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, + ®ister18); + et131x_mii_write(adapter, phydev->addr, + PHY_MPHY_CONTROL_REG, + register18 | 0x4); + et131x_mii_write(adapter, phydev->addr, PHY_INDEX_REG, + register18 | 0x8402); + et131x_mii_write(adapter, phydev->addr, PHY_DATA_REG, + register18 | 511); + et131x_mii_write(adapter, phydev->addr, + PHY_MPHY_CONTROL_REG, register18); + } + + et1310_config_flow_control(adapter); + + if (phydev->speed == SPEED_1000 && + adapter->registry_jumbo_packet > 2048) { + u16 reg; + + et131x_mii_read(adapter, PHY_CONFIG, ®); + reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH; + reg |= ET_PHY_CONFIG_FIFO_DEPTH_32; + et131x_mii_write(adapter, phydev->addr, PHY_CONFIG, + reg); + } + + et131x_set_rx_dma_timer(adapter); + et1310_config_mac_regs2(adapter); + } else { + adapter->boot_coma = 0; + + if (phydev->speed == SPEED_10) { + u16 register18; + + et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, + ®ister18); + et131x_mii_write(adapter, phydev->addr, + PHY_MPHY_CONTROL_REG, + register18 | 0x4); + et131x_mii_write(adapter, phydev->addr, + PHY_INDEX_REG, register18 | 0x8402); + et131x_mii_write(adapter, phydev->addr, + PHY_DATA_REG, register18 | 511); + et131x_mii_write(adapter, phydev->addr, + PHY_MPHY_CONTROL_REG, register18); + } + + et131x_free_busy_send_packets(adapter); + et131x_init_send(adapter); + + /* Bring the device back to the state it was during + * init prior to autonegotiation being complete. This + * way, when we get the auto-neg complete interrupt, + * we can complete init by calling config_mac_regs2. + */ + et131x_soft_reset(adapter); + + et131x_adapter_setup(adapter); + + et131x_disable_txrx(netdev); + et131x_enable_txrx(netdev); + } +} + +static int et131x_mii_probe(struct net_device *netdev) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + struct phy_device *phydev = NULL; + + phydev = phy_find_first(adapter->mii_bus); + if (!phydev) { + dev_err(&adapter->pdev->dev, "no PHY found\n"); + return -ENODEV; + } + + phydev = phy_connect(netdev, dev_name(&phydev->dev), + &et131x_adjust_link, PHY_INTERFACE_MODE_MII); + + if (IS_ERR(phydev)) { + dev_err(&adapter->pdev->dev, "Could not attach to PHY\n"); + return PTR_ERR(phydev); + } + + phydev->supported &= (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_MII | + SUPPORTED_TP); + + if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST) + phydev->supported |= SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full; + + phydev->advertising = phydev->supported; + phydev->autoneg = AUTONEG_ENABLE; + adapter->phydev = phydev; + + dev_info(&adapter->pdev->dev, + "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", + phydev->drv->name, dev_name(&phydev->dev)); + + return 0; +} + +static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev, + struct pci_dev *pdev) +{ + static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 }; + + struct et131x_adapter *adapter; + + adapter = netdev_priv(netdev); + adapter->pdev = pci_dev_get(pdev); + adapter->netdev = netdev; + + spin_lock_init(&adapter->tcb_send_qlock); + spin_lock_init(&adapter->tcb_ready_qlock); + spin_lock_init(&adapter->rcv_lock); + + adapter->registry_jumbo_packet = 1514; /* 1514-9216 */ + + ether_addr_copy(adapter->addr, default_mac); + + return adapter; +} + +static void et131x_pci_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct et131x_adapter *adapter = netdev_priv(netdev); + + unregister_netdev(netdev); + netif_napi_del(&adapter->napi); + phy_disconnect(adapter->phydev); + mdiobus_unregister(adapter->mii_bus); + kfree(adapter->mii_bus->irq); + mdiobus_free(adapter->mii_bus); + + et131x_adapter_memory_free(adapter); + iounmap(adapter->regs); + pci_dev_put(pdev); + + free_netdev(netdev); + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +static void et131x_up(struct net_device *netdev) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + + et131x_enable_txrx(netdev); + phy_start(adapter->phydev); +} + +static void et131x_down(struct net_device *netdev) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + + /* Save the timestamp for the TX watchdog, prevent a timeout */ + netdev->trans_start = jiffies; + + phy_stop(adapter->phydev); + et131x_disable_txrx(netdev); +} + +#ifdef CONFIG_PM_SLEEP +static int et131x_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + + if (netif_running(netdev)) { + netif_device_detach(netdev); + et131x_down(netdev); + pci_save_state(pdev); + } + + return 0; +} + +static int et131x_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + + if (netif_running(netdev)) { + pci_restore_state(pdev); + et131x_up(netdev); + netif_device_attach(netdev); + } + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume); + +static irqreturn_t et131x_isr(int irq, void *dev_id) +{ + bool handled = true; + bool enable_interrupts = true; + struct net_device *netdev = dev_id; + struct et131x_adapter *adapter = netdev_priv(netdev); + struct address_map __iomem *iomem = adapter->regs; + struct rx_ring *rx_ring = &adapter->rx_ring; + struct tx_ring *tx_ring = &adapter->tx_ring; + u32 status; + + if (!netif_device_present(netdev)) { + handled = false; + enable_interrupts = false; + goto out; + } + + et131x_disable_interrupts(adapter); + + status = readl(&adapter->regs->global.int_status); + + if (adapter->flow == FLOW_TXONLY || adapter->flow == FLOW_BOTH) + status &= ~INT_MASK_ENABLE; + else + status &= ~INT_MASK_ENABLE_NO_FLOW; + + /* Make sure this is our interrupt */ + if (!status) { + handled = false; + et131x_enable_interrupts(adapter); + goto out; + } + + /* This is our interrupt, so process accordingly */ + if (status & ET_INTR_WATCHDOG) { + struct tcb *tcb = tx_ring->send_head; + + if (tcb) + if (++tcb->stale > 1) + status |= ET_INTR_TXDMA_ISR; + + if (rx_ring->unfinished_receives) + status |= ET_INTR_RXDMA_XFR_DONE; + else if (tcb == NULL) + writel(0, &adapter->regs->global.watchdog_timer); + + status &= ~ET_INTR_WATCHDOG; + } + + if (status & (ET_INTR_RXDMA_XFR_DONE | ET_INTR_TXDMA_ISR)) { + enable_interrupts = false; + napi_schedule(&adapter->napi); + } + + status &= ~(ET_INTR_TXDMA_ISR | ET_INTR_RXDMA_XFR_DONE); + + if (!status) + goto out; + + if (status & ET_INTR_TXDMA_ERR) { + /* Following read also clears the register (COR) */ + u32 txdma_err = readl(&iomem->txdma.tx_dma_error); + + dev_warn(&adapter->pdev->dev, + "TXDMA_ERR interrupt, error = %d\n", + txdma_err); + } + + if (status & (ET_INTR_RXDMA_FB_R0_LOW | ET_INTR_RXDMA_FB_R1_LOW)) { + /* This indicates the number of unused buffers in RXDMA free + * buffer ring 0 is <= the limit you programmed. Free buffer + * resources need to be returned. Free buffers are consumed as + * packets are passed from the network to the host. The host + * becomes aware of the packets from the contents of the packet + * status ring. This ring is queried when the packet done + * interrupt occurs. Packets are then passed to the OS. When + * the OS is done with the packets the resources can be + * returned to the ET1310 for re-use. This interrupt is one + * method of returning resources. + */ + + /* If the user has flow control on, then we will + * send a pause packet, otherwise just exit + */ + if (adapter->flow == FLOW_TXONLY || adapter->flow == FLOW_BOTH) { + u32 pm_csr; + + /* Tell the device to send a pause packet via the back + * pressure register (bp req and bp xon/xoff) + */ + pm_csr = readl(&iomem->global.pm_csr); + if (!et1310_in_phy_coma(adapter)) + writel(3, &iomem->txmac.bp_ctrl); + } + } + + /* Handle Packet Status Ring Low Interrupt */ + if (status & ET_INTR_RXDMA_STAT_LOW) { + /* Same idea as with the two Free Buffer Rings. Packets going + * from the network to the host each consume a free buffer + * resource and a packet status resource. These resources are + * passed to the OS. When the OS is done with the resources, + * they need to be returned to the ET1310. This is one method + * of returning the resources. + */ + } + + if (status & ET_INTR_RXDMA_ERR) { + /* The rxdma_error interrupt is sent when a time-out on a + * request issued by the JAGCore has occurred or a completion is + * returned with an un-successful status. In both cases the + * request is considered complete. The JAGCore will + * automatically re-try the request in question. Normally + * information on events like these are sent to the host using + * the "Advanced Error Reporting" capability. This interrupt is + * another way of getting similar information. The only thing + * required is to clear the interrupt by reading the ISR in the + * global resources. The JAGCore will do a re-try on the + * request. Normally you should never see this interrupt. If + * you start to see this interrupt occurring frequently then + * something bad has occurred. A reset might be the thing to do. + */ + /* TRAP();*/ + + dev_warn(&adapter->pdev->dev, "RxDMA_ERR interrupt, error %x\n", + readl(&iomem->txmac.tx_test)); + } + + /* Handle the Wake on LAN Event */ + if (status & ET_INTR_WOL) { + /* This is a secondary interrupt for wake on LAN. The driver + * should never see this, if it does, something serious is + * wrong. + */ + dev_err(&adapter->pdev->dev, "WAKE_ON_LAN interrupt\n"); + } + + if (status & ET_INTR_TXMAC) { + u32 err = readl(&iomem->txmac.err); + + /* When any of the errors occur and TXMAC generates an + * interrupt to report these errors, it usually means that + * TXMAC has detected an error in the data stream retrieved + * from the on-chip Tx Q. All of these errors are catastrophic + * and TXMAC won't be able to recover data when these errors + * occur. In a nutshell, the whole Tx path will have to be reset + * and re-configured afterwards. + */ + dev_warn(&adapter->pdev->dev, "TXMAC interrupt, error 0x%08x\n", + err); + + /* If we are debugging, we want to see this error, otherwise we + * just want the device to be reset and continue + */ + } + + if (status & ET_INTR_RXMAC) { + /* These interrupts are catastrophic to the device, what we need + * to do is disable the interrupts and set the flag to cause us + * to reset so we can solve this issue. + */ + dev_warn(&adapter->pdev->dev, + "RXMAC interrupt, error 0x%08x. Requesting reset\n", + readl(&iomem->rxmac.err_reg)); + + dev_warn(&adapter->pdev->dev, + "Enable 0x%08x, Diag 0x%08x\n", + readl(&iomem->rxmac.ctrl), + readl(&iomem->rxmac.rxq_diag)); + + /* If we are debugging, we want to see this error, otherwise we + * just want the device to be reset and continue + */ + } + + if (status & ET_INTR_MAC_STAT) { + /* This means at least one of the un-masked counters in the + * MAC_STAT block has rolled over. Use this to maintain the top, + * software managed bits of the counter(s). + */ + et1310_handle_macstat_interrupt(adapter); + } + + if (status & ET_INTR_SLV_TIMEOUT) { + /* This means a timeout has occurred on a read or write request + * to one of the JAGCore registers. The Global Resources block + * has terminated the request and on a read request, returned a + * "fake" value. The most likely reasons are: Bad Address or the + * addressed module is in a power-down state and can't respond. + */ + } + +out: + if (enable_interrupts) + et131x_enable_interrupts(adapter); + + return IRQ_RETVAL(handled); +} + +static int et131x_poll(struct napi_struct *napi, int budget) +{ + struct et131x_adapter *adapter = + container_of(napi, struct et131x_adapter, napi); + int work_done = et131x_handle_recv_pkts(adapter, budget); + + et131x_handle_send_pkts(adapter); + + if (work_done < budget) { + napi_complete(&adapter->napi); + et131x_enable_interrupts(adapter); + } + + return work_done; +} + +/* et131x_stats - Return the current device statistics */ +static struct net_device_stats *et131x_stats(struct net_device *netdev) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + struct net_device_stats *stats = &adapter->netdev->stats; + struct ce_stats *devstat = &adapter->stats; + + stats->rx_errors = devstat->rx_length_errs + + devstat->rx_align_errs + + devstat->rx_crc_errs + + devstat->rx_code_violations + + devstat->rx_other_errs; + stats->tx_errors = devstat->tx_max_pkt_errs; + stats->multicast = devstat->multicast_pkts_rcvd; + stats->collisions = devstat->tx_collisions; + + stats->rx_length_errors = devstat->rx_length_errs; + stats->rx_over_errors = devstat->rx_overflows; + stats->rx_crc_errors = devstat->rx_crc_errs; + stats->rx_dropped = devstat->rcvd_pkts_dropped; + + /* NOTE: Not used, can't find analogous statistics */ + /* stats->rx_frame_errors = devstat->; */ + /* stats->rx_fifo_errors = devstat->; */ + /* stats->rx_missed_errors = devstat->; */ + + /* stats->tx_aborted_errors = devstat->; */ + /* stats->tx_carrier_errors = devstat->; */ + /* stats->tx_fifo_errors = devstat->; */ + /* stats->tx_heartbeat_errors = devstat->; */ + /* stats->tx_window_errors = devstat->; */ + return stats; +} + +static int et131x_open(struct net_device *netdev) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + unsigned int irq = pdev->irq; + int result; + + /* Start the timer to track NIC errors */ + init_timer(&adapter->error_timer); + adapter->error_timer.expires = jiffies + TX_ERROR_PERIOD * HZ / 1000; + adapter->error_timer.function = et131x_error_timer_handler; + adapter->error_timer.data = (unsigned long)adapter; + add_timer(&adapter->error_timer); + + result = request_irq(irq, et131x_isr, + IRQF_SHARED, netdev->name, netdev); + if (result) { + dev_err(&pdev->dev, "could not register IRQ %d\n", irq); + return result; + } + + adapter->flags |= FMP_ADAPTER_INTERRUPT_IN_USE; + + napi_enable(&adapter->napi); + + et131x_up(netdev); + + return result; +} + +static int et131x_close(struct net_device *netdev) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + + et131x_down(netdev); + napi_disable(&adapter->napi); + + adapter->flags &= ~FMP_ADAPTER_INTERRUPT_IN_USE; + free_irq(adapter->pdev->irq, netdev); + + /* Stop the error timer */ + return del_timer_sync(&adapter->error_timer); +} + +static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf, + int cmd) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + + if (!adapter->phydev) + return -EINVAL; + + return phy_mii_ioctl(adapter->phydev, reqbuf, cmd); +} + +/* et131x_set_packet_filter - Configures the Rx Packet filtering */ +static int et131x_set_packet_filter(struct et131x_adapter *adapter) +{ + int filter = adapter->packet_filter; + u32 ctrl; + u32 pf_ctrl; + + ctrl = readl(&adapter->regs->rxmac.ctrl); + pf_ctrl = readl(&adapter->regs->rxmac.pf_ctrl); + + /* Default to disabled packet filtering */ + ctrl |= 0x04; + + /* Set us to be in promiscuous mode so we receive everything, this + * is also true when we get a packet filter of 0 + */ + if ((filter & ET131X_PACKET_TYPE_PROMISCUOUS) || filter == 0) + pf_ctrl &= ~7; /* Clear filter bits */ + else { + /* Set us up with Multicast packet filtering. Three cases are + * possible - (1) we have a multi-cast list, (2) we receive ALL + * multicast entries or (3) we receive none. + */ + if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST) + pf_ctrl &= ~2; /* Multicast filter bit */ + else { + et1310_setup_device_for_multicast(adapter); + pf_ctrl |= 2; + ctrl &= ~0x04; + } + + /* Set us up with Unicast packet filtering */ + if (filter & ET131X_PACKET_TYPE_DIRECTED) { + et1310_setup_device_for_unicast(adapter); + pf_ctrl |= 4; + ctrl &= ~0x04; + } + + /* Set us up with Broadcast packet filtering */ + if (filter & ET131X_PACKET_TYPE_BROADCAST) { + pf_ctrl |= 1; /* Broadcast filter bit */ + ctrl &= ~0x04; + } else { + pf_ctrl &= ~1; + } + + /* Setup the receive mac configuration registers - Packet + * Filter control + the enable / disable for packet filter + * in the control reg. + */ + writel(pf_ctrl, &adapter->regs->rxmac.pf_ctrl); + writel(ctrl, &adapter->regs->rxmac.ctrl); + } + return 0; +} + +static void et131x_multicast(struct net_device *netdev) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + int packet_filter; + struct netdev_hw_addr *ha; + int i; + + /* Before we modify the platform-independent filter flags, store them + * locally. This allows us to determine if anything's changed and if + * we even need to bother the hardware + */ + packet_filter = adapter->packet_filter; + + /* Clear the 'multicast' flag locally; because we only have a single + * flag to check multicast, and multiple multicast addresses can be + * set, this is the easiest way to determine if more than one + * multicast address is being set. + */ + packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST; + + /* Check the net_device flags and set the device independent flags + * accordingly + */ + if (netdev->flags & IFF_PROMISC) + adapter->packet_filter |= ET131X_PACKET_TYPE_PROMISCUOUS; + else + adapter->packet_filter &= ~ET131X_PACKET_TYPE_PROMISCUOUS; + + if ((netdev->flags & IFF_ALLMULTI) || + (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST)) + adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST; + + if (netdev_mc_count(netdev) < 1) { + adapter->packet_filter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST; + adapter->packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST; + } else { + adapter->packet_filter |= ET131X_PACKET_TYPE_MULTICAST; + } + + /* Set values in the private adapter struct */ + i = 0; + netdev_for_each_mc_addr(ha, netdev) { + if (i == NIC_MAX_MCAST_LIST) + break; + ether_addr_copy(adapter->multicast_list[i++], ha->addr); + } + adapter->multicast_addr_count = i; + + /* Are the new flags different from the previous ones? If not, then no + * action is required + * + * NOTE - This block will always update the multicast_list with the + * hardware, even if the addresses aren't the same. + */ + if (packet_filter != adapter->packet_filter) + et131x_set_packet_filter(adapter); +} + +static netdev_tx_t et131x_tx(struct sk_buff *skb, struct net_device *netdev) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + struct tx_ring *tx_ring = &adapter->tx_ring; + + /* stop the queue if it's getting full */ + if (tx_ring->used >= NUM_TCB - 1 && !netif_queue_stopped(netdev)) + netif_stop_queue(netdev); + + /* Save the timestamp for the TX timeout watchdog */ + netdev->trans_start = jiffies; + + /* TCB is not available */ + if (tx_ring->used >= NUM_TCB) + goto drop_err; + + if ((adapter->flags & FMP_ADAPTER_FAIL_SEND_MASK) || + !netif_carrier_ok(netdev)) + goto drop_err; + + if (send_packet(skb, adapter)) + goto drop_err; + + return NETDEV_TX_OK; + +drop_err: + dev_kfree_skb_any(skb); + adapter->netdev->stats.tx_dropped++; + return NETDEV_TX_OK; +} + +/* et131x_tx_timeout - Timeout handler + * + * The handler called when a Tx request times out. The timeout period is + * specified by the 'tx_timeo" element in the net_device structure (see + * et131x_alloc_device() to see how this value is set). + */ +static void et131x_tx_timeout(struct net_device *netdev) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + struct tx_ring *tx_ring = &adapter->tx_ring; + struct tcb *tcb; + unsigned long flags; + + /* If the device is closed, ignore the timeout */ + if (~(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE)) + return; + + /* Any nonrecoverable hardware error? + * Checks adapter->flags for any failure in phy reading + */ + if (adapter->flags & FMP_ADAPTER_NON_RECOVER_ERROR) + return; + + /* Hardware failure? */ + if (adapter->flags & FMP_ADAPTER_HARDWARE_ERROR) { + dev_err(&adapter->pdev->dev, "hardware error - reset\n"); + return; + } + + /* Is send stuck? */ + spin_lock_irqsave(&adapter->tcb_send_qlock, flags); + tcb = tx_ring->send_head; + spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); + + if (tcb) { + tcb->count++; + + if (tcb->count > NIC_SEND_HANG_THRESHOLD) { + dev_warn(&adapter->pdev->dev, + "Send stuck - reset. tcb->WrIndex %x\n", + tcb->index); + + adapter->netdev->stats.tx_errors++; + + /* perform reset of tx/rx */ + et131x_disable_txrx(netdev); + et131x_enable_txrx(netdev); + } + } +} + +static int et131x_change_mtu(struct net_device *netdev, int new_mtu) +{ + int result = 0; + struct et131x_adapter *adapter = netdev_priv(netdev); + + if (new_mtu < 64 || new_mtu > 9216) + return -EINVAL; + + et131x_disable_txrx(netdev); + + netdev->mtu = new_mtu; + + et131x_adapter_memory_free(adapter); + + /* Set the config parameter for Jumbo Packet support */ + adapter->registry_jumbo_packet = new_mtu + 14; + et131x_soft_reset(adapter); + + result = et131x_adapter_memory_alloc(adapter); + if (result != 0) { + dev_warn(&adapter->pdev->dev, + "Change MTU failed; couldn't re-alloc DMA memory\n"); + return result; + } + + et131x_init_send(adapter); + et131x_hwaddr_init(adapter); + ether_addr_copy(netdev->dev_addr, adapter->addr); + + /* Init the device with the new settings */ + et131x_adapter_setup(adapter); + et131x_enable_txrx(netdev); + + return result; +} + +static const struct net_device_ops et131x_netdev_ops = { + .ndo_open = et131x_open, + .ndo_stop = et131x_close, + .ndo_start_xmit = et131x_tx, + .ndo_set_rx_mode = et131x_multicast, + .ndo_tx_timeout = et131x_tx_timeout, + .ndo_change_mtu = et131x_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_get_stats = et131x_stats, + .ndo_do_ioctl = et131x_ioctl, +}; + +static int et131x_pci_setup(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct et131x_adapter *adapter; + int rc; + int ii; + + rc = pci_enable_device(pdev); + if (rc < 0) { + dev_err(&pdev->dev, "pci_enable_device() failed\n"); + goto out; + } + + /* Perform some basic PCI checks */ + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + dev_err(&pdev->dev, "Can't find PCI device's base address\n"); + rc = -ENODEV; + goto err_disable; + } + + rc = pci_request_regions(pdev, DRIVER_NAME); + if (rc < 0) { + dev_err(&pdev->dev, "Can't get PCI resources\n"); + goto err_disable; + } + + pci_set_master(pdev); + + /* Check the DMA addressing support of this device */ + if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) && + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { + dev_err(&pdev->dev, "No usable DMA addressing method\n"); + rc = -EIO; + goto err_release_res; + } + + netdev = alloc_etherdev(sizeof(struct et131x_adapter)); + if (!netdev) { + dev_err(&pdev->dev, "Couldn't alloc netdev struct\n"); + rc = -ENOMEM; + goto err_release_res; + } + + netdev->watchdog_timeo = ET131X_TX_TIMEOUT; + netdev->netdev_ops = &et131x_netdev_ops; + + SET_NETDEV_DEV(netdev, &pdev->dev); + netdev->ethtool_ops = &et131x_ethtool_ops; + + adapter = et131x_adapter_init(netdev, pdev); + + rc = et131x_pci_init(adapter, pdev); + if (rc < 0) + goto err_free_dev; + + /* Map the bus-relative registers to system virtual memory */ + adapter->regs = pci_ioremap_bar(pdev, 0); + if (!adapter->regs) { + dev_err(&pdev->dev, "Cannot map device registers\n"); + rc = -ENOMEM; + goto err_free_dev; + } + + /* If Phy COMA mode was enabled when we went down, disable it here. */ + writel(ET_PMCSR_INIT, &adapter->regs->global.pm_csr); + + et131x_soft_reset(adapter); + et131x_disable_interrupts(adapter); + + rc = et131x_adapter_memory_alloc(adapter); + if (rc < 0) { + dev_err(&pdev->dev, "Could not alloc adapter memory (DMA)\n"); + goto err_iounmap; + } + + et131x_init_send(adapter); + + netif_napi_add(netdev, &adapter->napi, et131x_poll, 64); + + ether_addr_copy(netdev->dev_addr, adapter->addr); + + rc = -ENOMEM; + + adapter->mii_bus = mdiobus_alloc(); + if (!adapter->mii_bus) { + dev_err(&pdev->dev, "Alloc of mii_bus struct failed\n"); + goto err_mem_free; + } + + adapter->mii_bus->name = "et131x_eth_mii"; + snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x", + (adapter->pdev->bus->number << 8) | adapter->pdev->devfn); + adapter->mii_bus->priv = netdev; + adapter->mii_bus->read = et131x_mdio_read; + adapter->mii_bus->write = et131x_mdio_write; + adapter->mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int), + GFP_KERNEL); + if (!adapter->mii_bus->irq) + goto err_mdio_free; + + for (ii = 0; ii < PHY_MAX_ADDR; ii++) + adapter->mii_bus->irq[ii] = PHY_POLL; + + rc = mdiobus_register(adapter->mii_bus); + if (rc < 0) { + dev_err(&pdev->dev, "failed to register MII bus\n"); + goto err_mdio_free_irq; + } + + rc = et131x_mii_probe(netdev); + if (rc < 0) { + dev_err(&pdev->dev, "failed to probe MII bus\n"); + goto err_mdio_unregister; + } + + et131x_adapter_setup(adapter); + + /* Init variable for counting how long we do not have link status */ + adapter->boot_coma = 0; + et1310_disable_phy_coma(adapter); + + /* We can enable interrupts now + * + * NOTE - Because registration of interrupt handler is done in the + * device's open(), defer enabling device interrupts to that + * point + */ + + rc = register_netdev(netdev); + if (rc < 0) { + dev_err(&pdev->dev, "register_netdev() failed\n"); + goto err_phy_disconnect; + } + + /* Register the net_device struct with the PCI subsystem. Save a copy + * of the PCI config space for this device now that the device has + * been initialized, just in case it needs to be quickly restored. + */ + pci_set_drvdata(pdev, netdev); +out: + return rc; + +err_phy_disconnect: + phy_disconnect(adapter->phydev); +err_mdio_unregister: + mdiobus_unregister(adapter->mii_bus); +err_mdio_free_irq: + kfree(adapter->mii_bus->irq); +err_mdio_free: + mdiobus_free(adapter->mii_bus); +err_mem_free: + et131x_adapter_memory_free(adapter); +err_iounmap: + iounmap(adapter->regs); +err_free_dev: + pci_dev_put(pdev); + free_netdev(netdev); +err_release_res: + pci_release_regions(pdev); +err_disable: + pci_disable_device(pdev); + goto out; +} + +static const struct pci_device_id et131x_pci_table[] = { + { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL}, + { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL}, + { 0,} +}; +MODULE_DEVICE_TABLE(pci, et131x_pci_table); + +static struct pci_driver et131x_driver = { + .name = DRIVER_NAME, + .id_table = et131x_pci_table, + .probe = et131x_pci_setup, + .remove = et131x_pci_remove, + .driver.pm = &et131x_pm_ops, +}; + +module_pci_driver(et131x_driver); diff --git a/drivers/net/ethernet/agere/et131x.h b/drivers/net/ethernet/agere/et131x.h new file mode 100644 index 0000000000000000000000000000000000000000..be9a11c025265528501908eaca4a16d8006ab6ce --- /dev/null +++ b/drivers/net/ethernet/agere/et131x.h @@ -0,0 +1,1433 @@ +/* Copyright © 2005 Agere Systems Inc. + * All rights reserved. + * http://www.agere.com + * + * SOFTWARE LICENSE + * + * This software is provided subject to the following terms and conditions, + * which you should read carefully before using the software. Using this + * software indicates your acceptance of these terms and conditions. If you do + * not agree with these terms and conditions, do not use the software. + * + * Copyright © 2005 Agere Systems Inc. + * All rights reserved. + * + * Redistribution and use in source or binary forms, with or without + * modifications, are permitted provided that the following conditions are met: + * + * . Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following Disclaimer as comments in the code as + * well as in the documentation and/or other materials provided with the + * distribution. + * + * . Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following Disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * . Neither the name of Agere Systems Inc. nor the names of the contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * Disclaimer + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY + * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN + * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH + * DAMAGE. + * + */ + +#define DRIVER_NAME "et131x" +#define DRIVER_VERSION "v2.0" + +/* EEPROM registers */ + +/* LBCIF Register Groups (addressed via 32-bit offsets) */ +#define LBCIF_DWORD0_GROUP 0xAC +#define LBCIF_DWORD1_GROUP 0xB0 + +/* LBCIF Registers (addressed via 8-bit offsets) */ +#define LBCIF_ADDRESS_REGISTER 0xAC +#define LBCIF_DATA_REGISTER 0xB0 +#define LBCIF_CONTROL_REGISTER 0xB1 +#define LBCIF_STATUS_REGISTER 0xB2 + +/* LBCIF Control Register Bits */ +#define LBCIF_CONTROL_SEQUENTIAL_READ 0x01 +#define LBCIF_CONTROL_PAGE_WRITE 0x02 +#define LBCIF_CONTROL_EEPROM_RELOAD 0x08 +#define LBCIF_CONTROL_TWO_BYTE_ADDR 0x20 +#define LBCIF_CONTROL_I2C_WRITE 0x40 +#define LBCIF_CONTROL_LBCIF_ENABLE 0x80 + +/* LBCIF Status Register Bits */ +#define LBCIF_STATUS_PHY_QUEUE_AVAIL 0x01 +#define LBCIF_STATUS_I2C_IDLE 0x02 +#define LBCIF_STATUS_ACK_ERROR 0x04 +#define LBCIF_STATUS_GENERAL_ERROR 0x08 +#define LBCIF_STATUS_CHECKSUM_ERROR 0x40 +#define LBCIF_STATUS_EEPROM_PRESENT 0x80 + +/* START OF GLOBAL REGISTER ADDRESS MAP */ +/* 10bit registers + * + * Tx queue start address reg in global address map at address 0x0000 + * tx queue end address reg in global address map at address 0x0004 + * rx queue start address reg in global address map at address 0x0008 + * rx queue end address reg in global address map at address 0x000C + */ + +/* structure for power management control status reg in global address map + * located at address 0x0010 + * jagcore_rx_rdy bit 9 + * jagcore_tx_rdy bit 8 + * phy_lped_en bit 7 + * phy_sw_coma bit 6 + * rxclk_gate bit 5 + * txclk_gate bit 4 + * sysclk_gate bit 3 + * jagcore_rx_en bit 2 + * jagcore_tx_en bit 1 + * gigephy_en bit 0 + */ +#define ET_PM_PHY_SW_COMA 0x40 +#define ET_PMCSR_INIT 0x38 + +/* Interrupt status reg at address 0x0018 + */ +#define ET_INTR_TXDMA_ISR 0x00000008 +#define ET_INTR_TXDMA_ERR 0x00000010 +#define ET_INTR_RXDMA_XFR_DONE 0x00000020 +#define ET_INTR_RXDMA_FB_R0_LOW 0x00000040 +#define ET_INTR_RXDMA_FB_R1_LOW 0x00000080 +#define ET_INTR_RXDMA_STAT_LOW 0x00000100 +#define ET_INTR_RXDMA_ERR 0x00000200 +#define ET_INTR_WATCHDOG 0x00004000 +#define ET_INTR_WOL 0x00008000 +#define ET_INTR_PHY 0x00010000 +#define ET_INTR_TXMAC 0x00020000 +#define ET_INTR_RXMAC 0x00040000 +#define ET_INTR_MAC_STAT 0x00080000 +#define ET_INTR_SLV_TIMEOUT 0x00100000 + +/* Interrupt mask register at address 0x001C + * Interrupt alias clear mask reg at address 0x0020 + * Interrupt status alias reg at address 0x0024 + * + * Same masks as above + */ + +/* Software reset reg at address 0x0028 + * 0: txdma_sw_reset + * 1: rxdma_sw_reset + * 2: txmac_sw_reset + * 3: rxmac_sw_reset + * 4: mac_sw_reset + * 5: mac_stat_sw_reset + * 6: mmc_sw_reset + *31: selfclr_disable + */ +#define ET_RESET_ALL 0x007F + +/* SLV Timer reg at address 0x002C (low 24 bits) + */ + +/* MSI Configuration reg at address 0x0030 + */ +#define ET_MSI_VECTOR 0x0000001F +#define ET_MSI_TC 0x00070000 + +/* Loopback reg located at address 0x0034 + */ +#define ET_LOOP_MAC 0x00000001 +#define ET_LOOP_DMA 0x00000002 + +/* GLOBAL Module of JAGCore Address Mapping + * Located at address 0x0000 + */ +struct global_regs { /* Location: */ + u32 txq_start_addr; /* 0x0000 */ + u32 txq_end_addr; /* 0x0004 */ + u32 rxq_start_addr; /* 0x0008 */ + u32 rxq_end_addr; /* 0x000C */ + u32 pm_csr; /* 0x0010 */ + u32 unused; /* 0x0014 */ + u32 int_status; /* 0x0018 */ + u32 int_mask; /* 0x001C */ + u32 int_alias_clr_en; /* 0x0020 */ + u32 int_status_alias; /* 0x0024 */ + u32 sw_reset; /* 0x0028 */ + u32 slv_timer; /* 0x002C */ + u32 msi_config; /* 0x0030 */ + u32 loopback; /* 0x0034 */ + u32 watchdog_timer; /* 0x0038 */ +}; + +/* START OF TXDMA REGISTER ADDRESS MAP */ +/* txdma control status reg at address 0x1000 + */ +#define ET_TXDMA_CSR_HALT 0x00000001 +#define ET_TXDMA_DROP_TLP 0x00000002 +#define ET_TXDMA_CACHE_THRS 0x000000F0 +#define ET_TXDMA_CACHE_SHIFT 4 +#define ET_TXDMA_SNGL_EPKT 0x00000100 +#define ET_TXDMA_CLASS 0x00001E00 + +/* structure for txdma packet ring base address hi reg in txdma address map + * located at address 0x1004 + * Defined earlier (u32) + */ + +/* structure for txdma packet ring base address low reg in txdma address map + * located at address 0x1008 + * Defined earlier (u32) + */ + +/* structure for txdma packet ring number of descriptor reg in txdma address + * map. Located at address 0x100C + * + * 31-10: unused + * 9-0: pr ndes + */ +#define ET_DMA12_MASK 0x0FFF /* 12 bit mask for DMA12W types */ +#define ET_DMA12_WRAP 0x1000 +#define ET_DMA10_MASK 0x03FF /* 10 bit mask for DMA10W types */ +#define ET_DMA10_WRAP 0x0400 +#define ET_DMA4_MASK 0x000F /* 4 bit mask for DMA4W types */ +#define ET_DMA4_WRAP 0x0010 + +#define INDEX12(x) ((x) & ET_DMA12_MASK) +#define INDEX10(x) ((x) & ET_DMA10_MASK) +#define INDEX4(x) ((x) & ET_DMA4_MASK) + +/* 10bit DMA with wrap + * txdma tx queue write address reg in txdma address map at 0x1010 + * txdma tx queue write address external reg in txdma address map at 0x1014 + * txdma tx queue read address reg in txdma address map at 0x1018 + * + * u32 + * txdma status writeback address hi reg in txdma address map at0x101C + * txdma status writeback address lo reg in txdma address map at 0x1020 + * + * 10bit DMA with wrap + * txdma service request reg in txdma address map at 0x1024 + * structure for txdma service complete reg in txdma address map at 0x1028 + * + * 4bit DMA with wrap + * txdma tx descriptor cache read index reg in txdma address map at 0x102C + * txdma tx descriptor cache write index reg in txdma address map at 0x1030 + * + * txdma error reg in txdma address map at address 0x1034 + * 0: PyldResend + * 1: PyldRewind + * 4: DescrResend + * 5: DescrRewind + * 8: WrbkResend + * 9: WrbkRewind + */ + +/* Tx DMA Module of JAGCore Address Mapping + * Located at address 0x1000 + */ +struct txdma_regs { /* Location: */ + u32 csr; /* 0x1000 */ + u32 pr_base_hi; /* 0x1004 */ + u32 pr_base_lo; /* 0x1008 */ + u32 pr_num_des; /* 0x100C */ + u32 txq_wr_addr; /* 0x1010 */ + u32 txq_wr_addr_ext; /* 0x1014 */ + u32 txq_rd_addr; /* 0x1018 */ + u32 dma_wb_base_hi; /* 0x101C */ + u32 dma_wb_base_lo; /* 0x1020 */ + u32 service_request; /* 0x1024 */ + u32 service_complete; /* 0x1028 */ + u32 cache_rd_index; /* 0x102C */ + u32 cache_wr_index; /* 0x1030 */ + u32 tx_dma_error; /* 0x1034 */ + u32 desc_abort_cnt; /* 0x1038 */ + u32 payload_abort_cnt; /* 0x103c */ + u32 writeback_abort_cnt; /* 0x1040 */ + u32 desc_timeout_cnt; /* 0x1044 */ + u32 payload_timeout_cnt; /* 0x1048 */ + u32 writeback_timeout_cnt; /* 0x104c */ + u32 desc_error_cnt; /* 0x1050 */ + u32 payload_error_cnt; /* 0x1054 */ + u32 writeback_error_cnt; /* 0x1058 */ + u32 dropped_tlp_cnt; /* 0x105c */ + u32 new_service_complete; /* 0x1060 */ + u32 ethernet_packet_cnt; /* 0x1064 */ +}; + +/* END OF TXDMA REGISTER ADDRESS MAP */ + +/* START OF RXDMA REGISTER ADDRESS MAP */ +/* structure for control status reg in rxdma address map + * Located at address 0x2000 + * + * CSR + * 0: halt + * 1-3: tc + * 4: fbr_big_endian + * 5: psr_big_endian + * 6: pkt_big_endian + * 7: dma_big_endian + * 8-9: fbr0_size + * 10: fbr0_enable + * 11-12: fbr1_size + * 13: fbr1_enable + * 14: unused + * 15: pkt_drop_disable + * 16: pkt_done_flush + * 17: halt_status + * 18-31: unused + */ +#define ET_RXDMA_CSR_HALT 0x0001 +#define ET_RXDMA_CSR_FBR0_SIZE_LO 0x0100 +#define ET_RXDMA_CSR_FBR0_SIZE_HI 0x0200 +#define ET_RXDMA_CSR_FBR0_ENABLE 0x0400 +#define ET_RXDMA_CSR_FBR1_SIZE_LO 0x0800 +#define ET_RXDMA_CSR_FBR1_SIZE_HI 0x1000 +#define ET_RXDMA_CSR_FBR1_ENABLE 0x2000 +#define ET_RXDMA_CSR_HALT_STATUS 0x00020000 + +/* structure for dma writeback lo reg in rxdma address map + * located at address 0x2004 + * Defined earlier (u32) + */ + +/* structure for dma writeback hi reg in rxdma address map + * located at address 0x2008 + * Defined earlier (u32) + */ + +/* structure for number of packets done reg in rxdma address map + * located at address 0x200C + * + * 31-8: unused + * 7-0: num done + */ + +/* structure for max packet time reg in rxdma address map + * located at address 0x2010 + * + * 31-18: unused + * 17-0: time done + */ + +/* structure for rx queue read address reg in rxdma address map + * located at address 0x2014 + * Defined earlier (u32) + */ + +/* structure for rx queue read address external reg in rxdma address map + * located at address 0x2018 + * Defined earlier (u32) + */ + +/* structure for rx queue write address reg in rxdma address map + * located at address 0x201C + * Defined earlier (u32) + */ + +/* structure for packet status ring base address lo reg in rxdma address map + * located at address 0x2020 + * Defined earlier (u32) + */ + +/* structure for packet status ring base address hi reg in rxdma address map + * located at address 0x2024 + * Defined earlier (u32) + */ + +/* structure for packet status ring number of descriptors reg in rxdma address + * map. Located at address 0x2028 + * + * 31-12: unused + * 11-0: psr ndes + */ +#define ET_RXDMA_PSR_NUM_DES_MASK 0xFFF + +/* structure for packet status ring available offset reg in rxdma address map + * located at address 0x202C + * + * 31-13: unused + * 12: psr avail wrap + * 11-0: psr avail + */ + +/* structure for packet status ring full offset reg in rxdma address map + * located at address 0x2030 + * + * 31-13: unused + * 12: psr full wrap + * 11-0: psr full + */ + +/* structure for packet status ring access index reg in rxdma address map + * located at address 0x2034 + * + * 31-5: unused + * 4-0: psr_ai + */ + +/* structure for packet status ring minimum descriptors reg in rxdma address + * map. Located at address 0x2038 + * + * 31-12: unused + * 11-0: psr_min + */ + +/* structure for free buffer ring base lo address reg in rxdma address map + * located at address 0x203C + * Defined earlier (u32) + */ + +/* structure for free buffer ring base hi address reg in rxdma address map + * located at address 0x2040 + * Defined earlier (u32) + */ + +/* structure for free buffer ring number of descriptors reg in rxdma address + * map. Located at address 0x2044 + * + * 31-10: unused + * 9-0: fbr ndesc + */ + +/* structure for free buffer ring 0 available offset reg in rxdma address map + * located at address 0x2048 + * Defined earlier (u32) + */ + +/* structure for free buffer ring 0 full offset reg in rxdma address map + * located at address 0x204C + * Defined earlier (u32) + */ + +/* structure for free buffer cache 0 full offset reg in rxdma address map + * located at address 0x2050 + * + * 31-5: unused + * 4-0: fbc rdi + */ + +/* structure for free buffer ring 0 minimum descriptor reg in rxdma address map + * located at address 0x2054 + * + * 31-10: unused + * 9-0: fbr min + */ + +/* structure for free buffer ring 1 base address lo reg in rxdma address map + * located at address 0x2058 - 0x205C + * Defined earlier (RXDMA_FBR_BASE_LO_t and RXDMA_FBR_BASE_HI_t) + */ + +/* structure for free buffer ring 1 number of descriptors reg in rxdma address + * map. Located at address 0x2060 + * Defined earlier (RXDMA_FBR_NUM_DES_t) + */ + +/* structure for free buffer ring 1 available offset reg in rxdma address map + * located at address 0x2064 + * Defined Earlier (RXDMA_FBR_AVAIL_OFFSET_t) + */ + +/* structure for free buffer ring 1 full offset reg in rxdma address map + * located at address 0x2068 + * Defined Earlier (RXDMA_FBR_FULL_OFFSET_t) + */ + +/* structure for free buffer cache 1 read index reg in rxdma address map + * located at address 0x206C + * Defined Earlier (RXDMA_FBC_RD_INDEX_t) + */ + +/* structure for free buffer ring 1 minimum descriptor reg in rxdma address map + * located at address 0x2070 + * Defined Earlier (RXDMA_FBR_MIN_DES_t) + */ + +/* Rx DMA Module of JAGCore Address Mapping + * Located at address 0x2000 + */ +struct rxdma_regs { /* Location: */ + u32 csr; /* 0x2000 */ + u32 dma_wb_base_lo; /* 0x2004 */ + u32 dma_wb_base_hi; /* 0x2008 */ + u32 num_pkt_done; /* 0x200C */ + u32 max_pkt_time; /* 0x2010 */ + u32 rxq_rd_addr; /* 0x2014 */ + u32 rxq_rd_addr_ext; /* 0x2018 */ + u32 rxq_wr_addr; /* 0x201C */ + u32 psr_base_lo; /* 0x2020 */ + u32 psr_base_hi; /* 0x2024 */ + u32 psr_num_des; /* 0x2028 */ + u32 psr_avail_offset; /* 0x202C */ + u32 psr_full_offset; /* 0x2030 */ + u32 psr_access_index; /* 0x2034 */ + u32 psr_min_des; /* 0x2038 */ + u32 fbr0_base_lo; /* 0x203C */ + u32 fbr0_base_hi; /* 0x2040 */ + u32 fbr0_num_des; /* 0x2044 */ + u32 fbr0_avail_offset; /* 0x2048 */ + u32 fbr0_full_offset; /* 0x204C */ + u32 fbr0_rd_index; /* 0x2050 */ + u32 fbr0_min_des; /* 0x2054 */ + u32 fbr1_base_lo; /* 0x2058 */ + u32 fbr1_base_hi; /* 0x205C */ + u32 fbr1_num_des; /* 0x2060 */ + u32 fbr1_avail_offset; /* 0x2064 */ + u32 fbr1_full_offset; /* 0x2068 */ + u32 fbr1_rd_index; /* 0x206C */ + u32 fbr1_min_des; /* 0x2070 */ +}; + +/* END OF RXDMA REGISTER ADDRESS MAP */ + +/* START OF TXMAC REGISTER ADDRESS MAP */ +/* structure for control reg in txmac address map + * located at address 0x3000 + * + * bits + * 31-8: unused + * 7: cklseg_disable + * 6: ckbcnt_disable + * 5: cksegnum + * 4: async_disable + * 3: fc_disable + * 2: mcif_disable + * 1: mif_disable + * 0: txmac_en + */ +#define ET_TX_CTRL_FC_DISABLE 0x0008 +#define ET_TX_CTRL_TXMAC_ENABLE 0x0001 + +/* structure for shadow pointer reg in txmac address map + * located at address 0x3004 + * 31-27: reserved + * 26-16: txq rd ptr + * 15-11: reserved + * 10-0: txq wr ptr + */ + +/* structure for error count reg in txmac address map + * located at address 0x3008 + * + * 31-12: unused + * 11-8: reserved + * 7-4: txq_underrun + * 3-0: fifo_underrun + */ + +/* structure for max fill reg in txmac address map + * located at address 0x300C + * 31-12: unused + * 11-0: max fill + */ + +/* structure for cf parameter reg in txmac address map + * located at address 0x3010 + * 31-16: cfep + * 15-0: cfpt + */ + +/* structure for tx test reg in txmac address map + * located at address 0x3014 + * 31-17: unused + * 16: reserved + * 15: txtest_en + * 14-11: unused + * 10-0: txq test pointer + */ + +/* structure for error reg in txmac address map + * located at address 0x3018 + * + * 31-9: unused + * 8: fifo_underrun + * 7-6: unused + * 5: ctrl2_err + * 4: txq_underrun + * 3: bcnt_err + * 2: lseg_err + * 1: segnum_err + * 0: seg0_err + */ + +/* structure for error interrupt reg in txmac address map + * located at address 0x301C + * + * 31-9: unused + * 8: fifo_underrun + * 7-6: unused + * 5: ctrl2_err + * 4: txq_underrun + * 3: bcnt_err + * 2: lseg_err + * 1: segnum_err + * 0: seg0_err + */ + +/* structure for error interrupt reg in txmac address map + * located at address 0x3020 + * + * 31-2: unused + * 1: bp_req + * 0: bp_xonxoff + */ + +/* Tx MAC Module of JAGCore Address Mapping + */ +struct txmac_regs { /* Location: */ + u32 ctl; /* 0x3000 */ + u32 shadow_ptr; /* 0x3004 */ + u32 err_cnt; /* 0x3008 */ + u32 max_fill; /* 0x300C */ + u32 cf_param; /* 0x3010 */ + u32 tx_test; /* 0x3014 */ + u32 err; /* 0x3018 */ + u32 err_int; /* 0x301C */ + u32 bp_ctrl; /* 0x3020 */ +}; + +/* END OF TXMAC REGISTER ADDRESS MAP */ + +/* START OF RXMAC REGISTER ADDRESS MAP */ + +/* structure for rxmac control reg in rxmac address map + * located at address 0x4000 + * + * 31-7: reserved + * 6: rxmac_int_disable + * 5: async_disable + * 4: mif_disable + * 3: wol_disable + * 2: pkt_filter_disable + * 1: mcif_disable + * 0: rxmac_en + */ +#define ET_RX_CTRL_WOL_DISABLE 0x0008 +#define ET_RX_CTRL_RXMAC_ENABLE 0x0001 + +/* structure for Wake On Lan Control and CRC 0 reg in rxmac address map + * located at address 0x4004 + * 31-16: crc + * 15-12: reserved + * 11: ignore_pp + * 10: ignore_mp + * 9: clr_intr + * 8: ignore_link_chg + * 7: ignore_uni + * 6: ignore_multi + * 5: ignore_broad + * 4-0: valid_crc 4-0 + */ + +/* structure for CRC 1 and CRC 2 reg in rxmac address map + * located at address 0x4008 + * + * 31-16: crc2 + * 15-0: crc1 + */ + +/* structure for CRC 3 and CRC 4 reg in rxmac address map + * located at address 0x400C + * + * 31-16: crc4 + * 15-0: crc3 + */ + +/* structure for Wake On Lan Source Address Lo reg in rxmac address map + * located at address 0x4010 + * + * 31-24: sa3 + * 23-16: sa4 + * 15-8: sa5 + * 7-0: sa6 + */ +#define ET_RX_WOL_LO_SA3_SHIFT 24 +#define ET_RX_WOL_LO_SA4_SHIFT 16 +#define ET_RX_WOL_LO_SA5_SHIFT 8 + +/* structure for Wake On Lan Source Address Hi reg in rxmac address map + * located at address 0x4014 + * + * 31-16: reserved + * 15-8: sa1 + * 7-0: sa2 + */ +#define ET_RX_WOL_HI_SA1_SHIFT 8 + +/* structure for Wake On Lan mask reg in rxmac address map + * located at address 0x4018 - 0x4064 + * Defined earlier (u32) + */ + +/* structure for Unicast Packet Filter Address 1 reg in rxmac address map + * located at address 0x4068 + * + * 31-24: addr1_3 + * 23-16: addr1_4 + * 15-8: addr1_5 + * 7-0: addr1_6 + */ +#define ET_RX_UNI_PF_ADDR1_3_SHIFT 24 +#define ET_RX_UNI_PF_ADDR1_4_SHIFT 16 +#define ET_RX_UNI_PF_ADDR1_5_SHIFT 8 + +/* structure for Unicast Packet Filter Address 2 reg in rxmac address map + * located at address 0x406C + * + * 31-24: addr2_3 + * 23-16: addr2_4 + * 15-8: addr2_5 + * 7-0: addr2_6 + */ +#define ET_RX_UNI_PF_ADDR2_3_SHIFT 24 +#define ET_RX_UNI_PF_ADDR2_4_SHIFT 16 +#define ET_RX_UNI_PF_ADDR2_5_SHIFT 8 + +/* structure for Unicast Packet Filter Address 1 & 2 reg in rxmac address map + * located at address 0x4070 + * + * 31-24: addr2_1 + * 23-16: addr2_2 + * 15-8: addr1_1 + * 7-0: addr1_2 + */ +#define ET_RX_UNI_PF_ADDR2_1_SHIFT 24 +#define ET_RX_UNI_PF_ADDR2_2_SHIFT 16 +#define ET_RX_UNI_PF_ADDR1_1_SHIFT 8 + +/* structure for Multicast Hash reg in rxmac address map + * located at address 0x4074 - 0x4080 + * Defined earlier (u32) + */ + +/* structure for Packet Filter Control reg in rxmac address map + * located at address 0x4084 + * + * 31-23: unused + * 22-16: min_pkt_size + * 15-4: unused + * 3: filter_frag_en + * 2: filter_uni_en + * 1: filter_multi_en + * 0: filter_broad_en + */ +#define ET_RX_PFCTRL_MIN_PKT_SZ_SHIFT 16 +#define ET_RX_PFCTRL_FRAG_FILTER_ENABLE 0x0008 +#define ET_RX_PFCTRL_UNICST_FILTER_ENABLE 0x0004 +#define ET_RX_PFCTRL_MLTCST_FILTER_ENABLE 0x0002 +#define ET_RX_PFCTRL_BRDCST_FILTER_ENABLE 0x0001 + +/* structure for Memory Controller Interface Control Max Segment reg in rxmac + * address map. Located at address 0x4088 + * + * 31-10: reserved + * 9-2: max_size + * 1: fc_en + * 0: seg_en + */ +#define ET_RX_MCIF_CTRL_MAX_SEG_SIZE_SHIFT 2 +#define ET_RX_MCIF_CTRL_MAX_SEG_FC_ENABLE 0x0002 +#define ET_RX_MCIF_CTRL_MAX_SEG_ENABLE 0x0001 + +/* structure for Memory Controller Interface Water Mark reg in rxmac address + * map. Located at address 0x408C + * + * 31-26: unused + * 25-16: mark_hi + * 15-10: unused + * 9-0: mark_lo + */ + +/* structure for Rx Queue Dialog reg in rxmac address map. + * located at address 0x4090 + * + * 31-26: reserved + * 25-16: rd_ptr + * 15-10: reserved + * 9-0: wr_ptr + */ + +/* structure for space available reg in rxmac address map. + * located at address 0x4094 + * + * 31-17: reserved + * 16: space_avail_en + * 15-10: reserved + * 9-0: space_avail + */ + +/* structure for management interface reg in rxmac address map. + * located at address 0x4098 + * + * 31-18: reserved + * 17: drop_pkt_en + * 16-0: drop_pkt_mask + */ + +/* structure for Error reg in rxmac address map. + * located at address 0x409C + * + * 31-4: unused + * 3: mif + * 2: async + * 1: pkt_filter + * 0: mcif + */ + +/* Rx MAC Module of JAGCore Address Mapping + */ +struct rxmac_regs { /* Location: */ + u32 ctrl; /* 0x4000 */ + u32 crc0; /* 0x4004 */ + u32 crc12; /* 0x4008 */ + u32 crc34; /* 0x400C */ + u32 sa_lo; /* 0x4010 */ + u32 sa_hi; /* 0x4014 */ + u32 mask0_word0; /* 0x4018 */ + u32 mask0_word1; /* 0x401C */ + u32 mask0_word2; /* 0x4020 */ + u32 mask0_word3; /* 0x4024 */ + u32 mask1_word0; /* 0x4028 */ + u32 mask1_word1; /* 0x402C */ + u32 mask1_word2; /* 0x4030 */ + u32 mask1_word3; /* 0x4034 */ + u32 mask2_word0; /* 0x4038 */ + u32 mask2_word1; /* 0x403C */ + u32 mask2_word2; /* 0x4040 */ + u32 mask2_word3; /* 0x4044 */ + u32 mask3_word0; /* 0x4048 */ + u32 mask3_word1; /* 0x404C */ + u32 mask3_word2; /* 0x4050 */ + u32 mask3_word3; /* 0x4054 */ + u32 mask4_word0; /* 0x4058 */ + u32 mask4_word1; /* 0x405C */ + u32 mask4_word2; /* 0x4060 */ + u32 mask4_word3; /* 0x4064 */ + u32 uni_pf_addr1; /* 0x4068 */ + u32 uni_pf_addr2; /* 0x406C */ + u32 uni_pf_addr3; /* 0x4070 */ + u32 multi_hash1; /* 0x4074 */ + u32 multi_hash2; /* 0x4078 */ + u32 multi_hash3; /* 0x407C */ + u32 multi_hash4; /* 0x4080 */ + u32 pf_ctrl; /* 0x4084 */ + u32 mcif_ctrl_max_seg; /* 0x4088 */ + u32 mcif_water_mark; /* 0x408C */ + u32 rxq_diag; /* 0x4090 */ + u32 space_avail; /* 0x4094 */ + + u32 mif_ctrl; /* 0x4098 */ + u32 err_reg; /* 0x409C */ +}; + +/* END OF RXMAC REGISTER ADDRESS MAP */ + +/* START OF MAC REGISTER ADDRESS MAP */ +/* structure for configuration #1 reg in mac address map. + * located at address 0x5000 + * + * 31: soft reset + * 30: sim reset + * 29-20: reserved + * 19: reset rx mc + * 18: reset tx mc + * 17: reset rx func + * 16: reset tx fnc + * 15-9: reserved + * 8: loopback + * 7-6: reserved + * 5: rx flow + * 4: tx flow + * 3: syncd rx en + * 2: rx enable + * 1: syncd tx en + * 0: tx enable + */ +#define ET_MAC_CFG1_SOFT_RESET 0x80000000 +#define ET_MAC_CFG1_SIM_RESET 0x40000000 +#define ET_MAC_CFG1_RESET_RXMC 0x00080000 +#define ET_MAC_CFG1_RESET_TXMC 0x00040000 +#define ET_MAC_CFG1_RESET_RXFUNC 0x00020000 +#define ET_MAC_CFG1_RESET_TXFUNC 0x00010000 +#define ET_MAC_CFG1_LOOPBACK 0x00000100 +#define ET_MAC_CFG1_RX_FLOW 0x00000020 +#define ET_MAC_CFG1_TX_FLOW 0x00000010 +#define ET_MAC_CFG1_RX_ENABLE 0x00000004 +#define ET_MAC_CFG1_TX_ENABLE 0x00000001 +#define ET_MAC_CFG1_WAIT 0x0000000A /* RX & TX syncd */ + +/* structure for configuration #2 reg in mac address map. + * located at address 0x5004 + * 31-16: reserved + * 15-12: preamble + * 11-10: reserved + * 9-8: if mode + * 7-6: reserved + * 5: huge frame + * 4: length check + * 3: undefined + * 2: pad crc + * 1: crc enable + * 0: full duplex + */ +#define ET_MAC_CFG2_PREAMBLE_SHIFT 12 +#define ET_MAC_CFG2_IFMODE_MASK 0x0300 +#define ET_MAC_CFG2_IFMODE_1000 0x0200 +#define ET_MAC_CFG2_IFMODE_100 0x0100 +#define ET_MAC_CFG2_IFMODE_HUGE_FRAME 0x0020 +#define ET_MAC_CFG2_IFMODE_LEN_CHECK 0x0010 +#define ET_MAC_CFG2_IFMODE_PAD_CRC 0x0004 +#define ET_MAC_CFG2_IFMODE_CRC_ENABLE 0x0002 +#define ET_MAC_CFG2_IFMODE_FULL_DPLX 0x0001 + +/* structure for Interpacket gap reg in mac address map. + * located at address 0x5008 + * + * 31: reserved + * 30-24: non B2B ipg 1 + * 23: undefined + * 22-16: non B2B ipg 2 + * 15-8: Min ifg enforce + * 7-0: B2B ipg + * + * structure for half duplex reg in mac address map. + * located at address 0x500C + * 31-24: reserved + * 23-20: Alt BEB trunc + * 19: Alt BEB enable + * 18: BP no backoff + * 17: no backoff + * 16: excess defer + * 15-12: re-xmit max + * 11-10: reserved + * 9-0: collision window + */ + +/* structure for Maximum Frame Length reg in mac address map. + * located at address 0x5010: bits 0-15 hold the length. + */ + +/* structure for Reserve 1 reg in mac address map. + * located at address 0x5014 - 0x5018 + * Defined earlier (u32) + */ + +/* structure for Test reg in mac address map. + * located at address 0x501C + * test: bits 0-2, rest unused + */ + +/* structure for MII Management Configuration reg in mac address map. + * located at address 0x5020 + * + * 31: reset MII mgmt + * 30-6: unused + * 5: scan auto increment + * 4: preamble suppress + * 3: undefined + * 2-0: mgmt clock reset + */ +#define ET_MAC_MIIMGMT_CLK_RST 0x0007 + +/* structure for MII Management Command reg in mac address map. + * located at address 0x5024 + * bit 1: scan cycle + * bit 0: read cycle + */ + +/* structure for MII Management Address reg in mac address map. + * located at address 0x5028 + * 31-13: reserved + * 12-8: phy addr + * 7-5: reserved + * 4-0: register + */ +#define ET_MAC_MII_ADDR(phy, reg) ((phy) << 8 | (reg)) + +/* structure for MII Management Control reg in mac address map. + * located at address 0x502C + * 31-16: reserved + * 15-0: phy control + */ + +/* structure for MII Management Status reg in mac address map. + * located at address 0x5030 + * 31-16: reserved + * 15-0: phy control + */ +#define ET_MAC_MIIMGMT_STAT_PHYCRTL_MASK 0xFFFF + +/* structure for MII Management Indicators reg in mac address map. + * located at address 0x5034 + * 31-3: reserved + * 2: not valid + * 1: scanning + * 0: busy + */ +#define ET_MAC_MGMT_BUSY 0x00000001 /* busy */ +#define ET_MAC_MGMT_WAIT 0x00000005 /* busy | not valid */ + +/* structure for Interface Control reg in mac address map. + * located at address 0x5038 + * + * 31: reset if module + * 30-28: reserved + * 27: tbi mode + * 26: ghd mode + * 25: lhd mode + * 24: phy mode + * 23: reset per mii + * 22-17: reserved + * 16: speed + * 15: reset pe100x + * 14-11: reserved + * 10: force quiet + * 9: no cipher + * 8: disable link fail + * 7: reset gpsi + * 6-1: reserved + * 0: enable jabber protection + */ +#define ET_MAC_IFCTRL_GHDMODE (1 << 26) +#define ET_MAC_IFCTRL_PHYMODE (1 << 24) + +/* structure for Interface Status reg in mac address map. + * located at address 0x503C + * + * 31-10: reserved + * 9: excess_defer + * 8: clash + * 7: phy_jabber + * 6: phy_link_ok + * 5: phy_full_duplex + * 4: phy_speed + * 3: pe100x_link_fail + * 2: pe10t_loss_carrier + * 1: pe10t_sqe_error + * 0: pe10t_jabber + */ + +/* structure for Mac Station Address, Part 1 reg in mac address map. + * located at address 0x5040 + * + * 31-24: Octet6 + * 23-16: Octet5 + * 15-8: Octet4 + * 7-0: Octet3 + */ +#define ET_MAC_STATION_ADDR1_OC6_SHIFT 24 +#define ET_MAC_STATION_ADDR1_OC5_SHIFT 16 +#define ET_MAC_STATION_ADDR1_OC4_SHIFT 8 + +/* structure for Mac Station Address, Part 2 reg in mac address map. + * located at address 0x5044 + * + * 31-24: Octet2 + * 23-16: Octet1 + * 15-0: reserved + */ +#define ET_MAC_STATION_ADDR2_OC2_SHIFT 24 +#define ET_MAC_STATION_ADDR2_OC1_SHIFT 16 + +/* MAC Module of JAGCore Address Mapping + */ +struct mac_regs { /* Location: */ + u32 cfg1; /* 0x5000 */ + u32 cfg2; /* 0x5004 */ + u32 ipg; /* 0x5008 */ + u32 hfdp; /* 0x500C */ + u32 max_fm_len; /* 0x5010 */ + u32 rsv1; /* 0x5014 */ + u32 rsv2; /* 0x5018 */ + u32 mac_test; /* 0x501C */ + u32 mii_mgmt_cfg; /* 0x5020 */ + u32 mii_mgmt_cmd; /* 0x5024 */ + u32 mii_mgmt_addr; /* 0x5028 */ + u32 mii_mgmt_ctrl; /* 0x502C */ + u32 mii_mgmt_stat; /* 0x5030 */ + u32 mii_mgmt_indicator; /* 0x5034 */ + u32 if_ctrl; /* 0x5038 */ + u32 if_stat; /* 0x503C */ + u32 station_addr_1; /* 0x5040 */ + u32 station_addr_2; /* 0x5044 */ +}; + +/* END OF MAC REGISTER ADDRESS MAP */ + +/* START OF MAC STAT REGISTER ADDRESS MAP */ +/* structure for Carry Register One and it's Mask Register reg located in mac + * stat address map address 0x6130 and 0x6138. + * + * 31: tr64 + * 30: tr127 + * 29: tr255 + * 28: tr511 + * 27: tr1k + * 26: trmax + * 25: trmgv + * 24-17: unused + * 16: rbyt + * 15: rpkt + * 14: rfcs + * 13: rmca + * 12: rbca + * 11: rxcf + * 10: rxpf + * 9: rxuo + * 8: raln + * 7: rflr + * 6: rcde + * 5: rcse + * 4: rund + * 3: rovr + * 2: rfrg + * 1: rjbr + * 0: rdrp + */ + +/* structure for Carry Register Two Mask Register reg in mac stat address map. + * located at address 0x613C + * + * 31-20: unused + * 19: tjbr + * 18: tfcs + * 17: txcf + * 16: tovr + * 15: tund + * 14: trfg + * 13: tbyt + * 12: tpkt + * 11: tmca + * 10: tbca + * 9: txpf + * 8: tdfr + * 7: tedf + * 6: tscl + * 5: tmcl + * 4: tlcl + * 3: txcl + * 2: tncl + * 1: tpfh + * 0: tdrp + */ + +/* MAC STATS Module of JAGCore Address Mapping + */ +struct macstat_regs { /* Location: */ + u32 pad[32]; /* 0x6000 - 607C */ + + /* counters */ + u32 txrx_0_64_byte_frames; /* 0x6080 */ + u32 txrx_65_127_byte_frames; /* 0x6084 */ + u32 txrx_128_255_byte_frames; /* 0x6088 */ + u32 txrx_256_511_byte_frames; /* 0x608C */ + u32 txrx_512_1023_byte_frames; /* 0x6090 */ + u32 txrx_1024_1518_byte_frames; /* 0x6094 */ + u32 txrx_1519_1522_gvln_frames; /* 0x6098 */ + u32 rx_bytes; /* 0x609C */ + u32 rx_packets; /* 0x60A0 */ + u32 rx_fcs_errs; /* 0x60A4 */ + u32 rx_multicast_packets; /* 0x60A8 */ + u32 rx_broadcast_packets; /* 0x60AC */ + u32 rx_control_frames; /* 0x60B0 */ + u32 rx_pause_frames; /* 0x60B4 */ + u32 rx_unknown_opcodes; /* 0x60B8 */ + u32 rx_align_errs; /* 0x60BC */ + u32 rx_frame_len_errs; /* 0x60C0 */ + u32 rx_code_errs; /* 0x60C4 */ + u32 rx_carrier_sense_errs; /* 0x60C8 */ + u32 rx_undersize_packets; /* 0x60CC */ + u32 rx_oversize_packets; /* 0x60D0 */ + u32 rx_fragment_packets; /* 0x60D4 */ + u32 rx_jabbers; /* 0x60D8 */ + u32 rx_drops; /* 0x60DC */ + u32 tx_bytes; /* 0x60E0 */ + u32 tx_packets; /* 0x60E4 */ + u32 tx_multicast_packets; /* 0x60E8 */ + u32 tx_broadcast_packets; /* 0x60EC */ + u32 tx_pause_frames; /* 0x60F0 */ + u32 tx_deferred; /* 0x60F4 */ + u32 tx_excessive_deferred; /* 0x60F8 */ + u32 tx_single_collisions; /* 0x60FC */ + u32 tx_multiple_collisions; /* 0x6100 */ + u32 tx_late_collisions; /* 0x6104 */ + u32 tx_excessive_collisions; /* 0x6108 */ + u32 tx_total_collisions; /* 0x610C */ + u32 tx_pause_honored_frames; /* 0x6110 */ + u32 tx_drops; /* 0x6114 */ + u32 tx_jabbers; /* 0x6118 */ + u32 tx_fcs_errs; /* 0x611C */ + u32 tx_control_frames; /* 0x6120 */ + u32 tx_oversize_frames; /* 0x6124 */ + u32 tx_undersize_frames; /* 0x6128 */ + u32 tx_fragments; /* 0x612C */ + u32 carry_reg1; /* 0x6130 */ + u32 carry_reg2; /* 0x6134 */ + u32 carry_reg1_mask; /* 0x6138 */ + u32 carry_reg2_mask; /* 0x613C */ +}; + +/* END OF MAC STAT REGISTER ADDRESS MAP */ + +/* START OF MMC REGISTER ADDRESS MAP */ +/* Main Memory Controller Control reg in mmc address map. + * located at address 0x7000 + */ +#define ET_MMC_ENABLE 1 +#define ET_MMC_ARB_DISABLE 2 +#define ET_MMC_RXMAC_DISABLE 4 +#define ET_MMC_TXMAC_DISABLE 8 +#define ET_MMC_TXDMA_DISABLE 16 +#define ET_MMC_RXDMA_DISABLE 32 +#define ET_MMC_FORCE_CE 64 + +/* Main Memory Controller Host Memory Access Address reg in mmc + * address map. Located at address 0x7004. Top 16 bits hold the address bits + */ +#define ET_SRAM_REQ_ACCESS 1 +#define ET_SRAM_WR_ACCESS 2 +#define ET_SRAM_IS_CTRL 4 + +/* structure for Main Memory Controller Host Memory Access Data reg in mmc + * address map. Located at address 0x7008 - 0x7014 + * Defined earlier (u32) + */ + +/* Memory Control Module of JAGCore Address Mapping + */ +struct mmc_regs { /* Location: */ + u32 mmc_ctrl; /* 0x7000 */ + u32 sram_access; /* 0x7004 */ + u32 sram_word1; /* 0x7008 */ + u32 sram_word2; /* 0x700C */ + u32 sram_word3; /* 0x7010 */ + u32 sram_word4; /* 0x7014 */ +}; + +/* END OF MMC REGISTER ADDRESS MAP */ + +/* JAGCore Address Mapping + */ +struct address_map { + struct global_regs global; + /* unused section of global address map */ + u8 unused_global[4096 - sizeof(struct global_regs)]; + struct txdma_regs txdma; + /* unused section of txdma address map */ + u8 unused_txdma[4096 - sizeof(struct txdma_regs)]; + struct rxdma_regs rxdma; + /* unused section of rxdma address map */ + u8 unused_rxdma[4096 - sizeof(struct rxdma_regs)]; + struct txmac_regs txmac; + /* unused section of txmac address map */ + u8 unused_txmac[4096 - sizeof(struct txmac_regs)]; + struct rxmac_regs rxmac; + /* unused section of rxmac address map */ + u8 unused_rxmac[4096 - sizeof(struct rxmac_regs)]; + struct mac_regs mac; + /* unused section of mac address map */ + u8 unused_mac[4096 - sizeof(struct mac_regs)]; + struct macstat_regs macstat; + /* unused section of mac stat address map */ + u8 unused_mac_stat[4096 - sizeof(struct macstat_regs)]; + struct mmc_regs mmc; + /* unused section of mmc address map */ + u8 unused_mmc[4096 - sizeof(struct mmc_regs)]; + /* unused section of address map */ + u8 unused_[1015808]; + u8 unused_exp_rom[4096]; /* MGS-size TBD */ + u8 unused__[524288]; /* unused section of address map */ +}; + +/* Defines for generic MII registers 0x00 -> 0x0F can be found in + * include/linux/mii.h + */ +/* some defines for modem registers that seem to be 'reserved' */ +#define PHY_INDEX_REG 0x10 +#define PHY_DATA_REG 0x11 +#define PHY_MPHY_CONTROL_REG 0x12 + +/* defines for specified registers */ +#define PHY_LOOPBACK_CONTROL 0x13 /* TRU_VMI_LOOPBACK_CONTROL_1_REG 19 */ + /* TRU_VMI_LOOPBACK_CONTROL_2_REG 20 */ +#define PHY_REGISTER_MGMT_CONTROL 0x15 /* TRU_VMI_MI_SEQ_CONTROL_REG 21 */ +#define PHY_CONFIG 0x16 /* TRU_VMI_CONFIGURATION_REG 22 */ +#define PHY_PHY_CONTROL 0x17 /* TRU_VMI_PHY_CONTROL_REG 23 */ +#define PHY_INTERRUPT_MASK 0x18 /* TRU_VMI_INTERRUPT_MASK_REG 24 */ +#define PHY_INTERRUPT_STATUS 0x19 /* TRU_VMI_INTERRUPT_STATUS_REG 25 */ +#define PHY_PHY_STATUS 0x1A /* TRU_VMI_PHY_STATUS_REG 26 */ +#define PHY_LED_1 0x1B /* TRU_VMI_LED_CONTROL_1_REG 27 */ +#define PHY_LED_2 0x1C /* TRU_VMI_LED_CONTROL_2_REG 28 */ + /* TRU_VMI_LINK_CONTROL_REG 29 */ + /* TRU_VMI_TIMING_CONTROL_REG */ + +/* MI Register 10: Gigabit basic mode status reg(Reg 0x0A) */ +#define ET_1000BT_MSTR_SLV 0x4000 + +/* MI Register 16 - 18: Reserved Reg(0x10-0x12) */ + +/* MI Register 19: Loopback Control Reg(0x13) + * 15: mii_en + * 14: pcs_en + * 13: pmd_en + * 12: all_digital_en + * 11: replica_en + * 10: line_driver_en + * 9-0: reserved + */ + +/* MI Register 20: Reserved Reg(0x14) */ + +/* MI Register 21: Management Interface Control Reg(0x15) + * 15-11: reserved + * 10-4: mi_error_count + * 3: reserved + * 2: ignore_10g_fr + * 1: reserved + * 0: preamble_suppress_en + */ + +/* MI Register 22: PHY Configuration Reg(0x16) + * 15: crs_tx_en + * 14: reserved + * 13-12: tx_fifo_depth + * 11-10: speed_downshift + * 9: pbi_detect + * 8: tbi_rate + * 7: alternate_np + * 6: group_mdio_en + * 5: tx_clock_en + * 4: sys_clock_en + * 3: reserved + * 2-0: mac_if_mode + */ +#define ET_PHY_CONFIG_TX_FIFO_DEPTH 0x3000 + +#define ET_PHY_CONFIG_FIFO_DEPTH_8 0x0000 +#define ET_PHY_CONFIG_FIFO_DEPTH_16 0x1000 +#define ET_PHY_CONFIG_FIFO_DEPTH_32 0x2000 +#define ET_PHY_CONFIG_FIFO_DEPTH_64 0x3000 + +/* MI Register 23: PHY CONTROL Reg(0x17) + * 15: reserved + * 14: tdr_en + * 13: reserved + * 12-11: downshift_attempts + * 10-6: reserved + * 5: jabber_10baseT + * 4: sqe_10baseT + * 3: tp_loopback_10baseT + * 2: preamble_gen_en + * 1: reserved + * 0: force_int + */ + +/* MI Register 24: Interrupt Mask Reg(0x18) + * 15-10: reserved + * 9: mdio_sync_lost + * 8: autoneg_status + * 7: hi_bit_err + * 6: np_rx + * 5: err_counter_full + * 4: fifo_over_underflow + * 3: rx_status + * 2: link_status + * 1: automatic_speed + * 0: int_en + */ + +/* MI Register 25: Interrupt Status Reg(0x19) + * 15-10: reserved + * 9: mdio_sync_lost + * 8: autoneg_status + * 7: hi_bit_err + * 6: np_rx + * 5: err_counter_full + * 4: fifo_over_underflow + * 3: rx_status + * 2: link_status + * 1: automatic_speed + * 0: int_en + */ + +/* MI Register 26: PHY Status Reg(0x1A) + * 15: reserved + * 14-13: autoneg_fault + * 12: autoneg_status + * 11: mdi_x_status + * 10: polarity_status + * 9-8: speed_status + * 7: duplex_status + * 6: link_status + * 5: tx_status + * 4: rx_status + * 3: collision_status + * 2: autoneg_en + * 1: pause_en + * 0: asymmetric_dir + */ +#define ET_PHY_AUTONEG_STATUS 0x1000 +#define ET_PHY_POLARITY_STATUS 0x0400 +#define ET_PHY_SPEED_STATUS 0x0300 +#define ET_PHY_DUPLEX_STATUS 0x0080 +#define ET_PHY_LSTATUS 0x0040 +#define ET_PHY_AUTONEG_ENABLE 0x0020 + +/* MI Register 27: LED Control Reg 1(0x1B) + * 15-14: reserved + * 13-12: led_dup_indicate + * 11-10: led_10baseT + * 9-8: led_collision + * 7-4: reserved + * 3-2: pulse_dur + * 1: pulse_stretch1 + * 0: pulse_stretch0 + */ + +/* MI Register 28: LED Control Reg 2(0x1C) + * 15-12: led_link + * 11-8: led_tx_rx + * 7-4: led_100BaseTX + * 3-0: led_1000BaseT + */ +#define ET_LED2_LED_LINK 0xF000 +#define ET_LED2_LED_TXRX 0x0F00 +#define ET_LED2_LED_100TX 0x00F0 +#define ET_LED2_LED_1000T 0x000F + +/* defines for LED control reg 2 values */ +#define LED_VAL_1000BT 0x0 +#define LED_VAL_100BTX 0x1 +#define LED_VAL_10BT 0x2 +#define LED_VAL_1000BT_100BTX 0x3 /* 1000BT on, 100BTX blink */ +#define LED_VAL_LINKON 0x4 +#define LED_VAL_TX 0x5 +#define LED_VAL_RX 0x6 +#define LED_VAL_TXRX 0x7 /* TX or RX */ +#define LED_VAL_DUPLEXFULL 0x8 +#define LED_VAL_COLLISION 0x9 +#define LED_VAL_LINKON_ACTIVE 0xA /* Link on, activity blink */ +#define LED_VAL_LINKON_RECV 0xB /* Link on, receive blink */ +#define LED_VAL_DUPLEXFULL_COLLISION 0xC /* Duplex on, collision blink */ +#define LED_VAL_BLINK 0xD +#define LED_VAL_ON 0xE +#define LED_VAL_OFF 0xF + +#define LED_LINK_SHIFT 12 +#define LED_TXRX_SHIFT 8 +#define LED_100TX_SHIFT 4 + +/* MI Register 29 - 31: Reserved Reg(0x1D - 0x1E) */ diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index 29b9f082475de10b290f579d72ef5240a133be78..1fcd5568a3520981fd6ac03b57a3a599c2876a6c 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -878,8 +878,6 @@ static int emac_probe(struct platform_device *pdev) emac_powerup(ndev); emac_reset(db); - ether_setup(ndev); - ndev->netdev_ops = &emac_netdev_ops; ndev->watchdog_timeo = msecs_to_jiffies(watchdog); ndev->ethtool_ops = &emac_ethtool_ops; diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 7330681574d20ccd02855beabedc9b01c8ae2b48..4efc4355d34537e79632844e3e493fa25240a215 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -728,6 +728,44 @@ static struct phy_device *connect_local_phy(struct net_device *dev) return phydev; } +static int altera_tse_phy_get_addr_mdio_create(struct net_device *dev) +{ + struct altera_tse_private *priv = netdev_priv(dev); + struct device_node *np = priv->device->of_node; + int ret = 0; + + priv->phy_iface = of_get_phy_mode(np); + + /* Avoid get phy addr and create mdio if no phy is present */ + if (!priv->phy_iface) + return 0; + + /* try to get PHY address from device tree, use PHY autodetection if + * no valid address is given + */ + + if (of_property_read_u32(priv->device->of_node, "phy-addr", + &priv->phy_addr)) { + priv->phy_addr = POLL_PHY; + } + + if (!((priv->phy_addr == POLL_PHY) || + ((priv->phy_addr >= 0) && (priv->phy_addr < PHY_MAX_ADDR)))) { + netdev_err(dev, "invalid phy-addr specified %d\n", + priv->phy_addr); + return -ENODEV; + } + + /* Create/attach to MDIO bus */ + ret = altera_tse_mdio_create(dev, + atomic_add_return(1, &instance_count)); + + if (ret) + return -ENODEV; + + return 0; +} + /* Initialize driver's PHY state, and attach to the PHY */ static int init_phy(struct net_device *dev) @@ -736,6 +774,10 @@ static int init_phy(struct net_device *dev) struct phy_device *phydev; struct device_node *phynode; + /* Avoid init phy in case of no phy present */ + if (!priv->phy_iface) + return 0; + priv->oldlink = 0; priv->oldspeed = 0; priv->oldduplex = -1; @@ -1231,7 +1273,6 @@ static int altera_tse_probe(struct platform_device *pdev) struct resource *dma_res; struct altera_tse_private *priv; const unsigned char *macaddr; - struct device_node *np = pdev->dev.of_node; void __iomem *descmap; const struct of_device_id *of_id = NULL; @@ -1408,32 +1449,13 @@ static int altera_tse_probe(struct platform_device *pdev) else eth_hw_addr_random(ndev); - priv->phy_iface = of_get_phy_mode(np); - - /* try to get PHY address from device tree, use PHY autodetection if - * no valid address is given - */ - if (of_property_read_u32(pdev->dev.of_node, "phy-addr", - &priv->phy_addr)) { - priv->phy_addr = POLL_PHY; - } - - if (!((priv->phy_addr == POLL_PHY) || - ((priv->phy_addr >= 0) && (priv->phy_addr < PHY_MAX_ADDR)))) { - dev_err(&pdev->dev, "invalid phy-addr specified %d\n", - priv->phy_addr); - goto err_free_netdev; - } - - /* Create/attach to MDIO bus */ - ret = altera_tse_mdio_create(ndev, - atomic_add_return(1, &instance_count)); + /* get phy addr and create mdio */ + ret = altera_tse_phy_get_addr_mdio_create(ndev); if (ret) goto err_free_netdev; /* initialize netdev */ - ether_setup(ndev); ndev->mem_start = control_port->start; ndev->mem_end = control_port->end; ndev->netdev_ops = &altera_tse_netdev_ops; diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index 31c48a7ac2b60d0b46d7710e9b238194648d9cdd..6c323f4f457bcc4289e584bc6b13afd507973cd8 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -1140,7 +1140,6 @@ static const struct net_device_ops au1000_netdev_ops = { static int au1000_probe(struct platform_device *pdev) { - static unsigned version_printed; struct au1000_private *aup = NULL; struct au1000_eth_platform_data *pd; struct net_device *dev = NULL; @@ -1371,9 +1370,8 @@ static int au1000_probe(struct platform_device *pdev) netdev_info(dev, "Au1xx0 Ethernet found at 0x%lx, irq %d\n", (unsigned long)base->start, irq); - if (version_printed++ == 0) - pr_info("%s version %s %s\n", - DRV_NAME, DRV_VERSION, DRV_AUTHOR); + + pr_info_once("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR); return 0; diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c index abf3b1581c82a2eacbf7074b3c9db9ca12fbef19..5b22764ba88d2304c3502cd987420e7f955fd793 100644 --- a/drivers/net/ethernet/amd/nmclan_cs.c +++ b/drivers/net/ethernet/amd/nmclan_cs.c @@ -621,7 +621,7 @@ static int nmclan_config(struct pcmcia_device *link) ret = pcmcia_request_io(link); if (ret) goto failed; - ret = pcmcia_request_exclusive_irq(link, mace_interrupt); + ret = pcmcia_request_irq(link, mace_interrupt); if (ret) goto failed; ret = pcmcia_enable_device(link); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index cc25a3a9e7cf388d6da509c7bfb7277d0f70549a..caade30820d50b9851228589a192f17bb8c9a915 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -271,7 +271,6 @@ #define DMA_PBL_X8_DISABLE 0x00 #define DMA_PBL_X8_ENABLE 0x01 - /* MAC register offsets */ #define MAC_TCR 0x0000 #define MAC_RCR 0x0004 @@ -792,7 +791,6 @@ #define MTL_Q_DISABLED 0x00 #define MTL_Q_ENABLED 0x02 - /* MTL traffic class register offsets * Multiple traffic classes can be active. The first class has registers * that begin at 0x1100. Each subsequent queue has registers that @@ -815,7 +813,6 @@ #define MTL_TSA_SP 0x00 #define MTL_TSA_ETS 0x02 - /* PCS MMD select register offset * The MMD select register is used for accessing PCS registers * when the underlying APB3 interface is using indirect addressing. @@ -825,7 +822,6 @@ */ #define PCS_MMD_SELECT 0xff - /* Descriptor/Packet entry bit positions and sizes */ #define RX_PACKET_ERRORS_CRC_INDEX 2 #define RX_PACKET_ERRORS_CRC_WIDTH 1 @@ -929,7 +925,6 @@ #define MDIO_AN_COMP_STAT 0x0030 #endif - /* Bit setting and getting macros * The get macro will extract the current bit field value from within * the variable @@ -957,7 +952,6 @@ do { \ ((0x1 << (_width)) - 1)) << (_index))); \ } while (0) - /* Bit setting and getting macros based on register fields * The get macro uses the bit field definitions formed using the input * names to extract the current bit field value from within the @@ -986,7 +980,6 @@ do { \ _prefix##_##_field##_INDEX, \ _prefix##_##_field##_WIDTH, (_val)) - /* Macros for reading or writing registers * The ioread macros will get bit fields or full values using the * register definitions formed using the input names @@ -1014,7 +1007,6 @@ do { \ XGMAC_IOWRITE((_pdata), _reg, reg_val); \ } while (0) - /* Macros for reading or writing MTL queue or traffic class registers * Similar to the standard read and write macros except that the * base register value is calculated by the queue or traffic class number @@ -1041,7 +1033,6 @@ do { \ XGMAC_MTL_IOWRITE((_pdata), (_n), _reg, reg_val); \ } while (0) - /* Macros for reading or writing DMA channel registers * Similar to the standard read and write macros except that the * base register value is obtained from the ring @@ -1066,7 +1057,6 @@ do { \ XGMAC_DMA_IOWRITE((_channel), _reg, reg_val); \ } while (0) - /* Macros for building, reading or writing register values or bits * within the register values of XPCS registers. */ @@ -1076,7 +1066,6 @@ do { \ #define XPCS_IOREAD(_pdata, _off) \ ioread32((_pdata)->xpcs_regs + (_off)) - /* Macros for building, reading or writing register values or bits * using MDIO. Different from above because of the use of standardized * Linux include values. No shifting is performed with the bit diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c index 7d6a49b2432188665412ee2bdf44d58156cf51e9..8a50b01c2686292b06e97dae11c21c57095f6844 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c @@ -120,7 +120,6 @@ #include "xgbe.h" #include "xgbe-common.h" - static int xgbe_dcb_ieee_getets(struct net_device *netdev, struct ieee_ets *ets) { diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c index a3c11355a34dd199a40252dab901ab23cb908ffb..76479d04b9037370ebb91cd161574c793eaf881b 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c @@ -121,7 +121,6 @@ #include "xgbe.h" #include "xgbe-common.h" - static ssize_t xgbe_common_read(char __user *buffer, size_t count, loff_t *ppos, unsigned int value) { diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c index 1c5d62e8dab655013ea3883d983543ea7993a4b6..6fc5da01437d8ba75a3b8cf0cd16244b61e4f7d8 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c @@ -117,7 +117,6 @@ #include "xgbe.h" #include "xgbe-common.h" - static void xgbe_unmap_skb(struct xgbe_prv_data *, struct xgbe_ring_data *); static void xgbe_free_ring(struct xgbe_prv_data *pdata, @@ -524,11 +523,8 @@ static void xgbe_realloc_skb(struct xgbe_channel *channel) /* Allocate skb & assign to each rdesc */ skb = dev_alloc_skb(pdata->rx_buf_size); - if (skb == NULL) { - netdev_alert(pdata->netdev, - "failed to allocate skb\n"); + if (skb == NULL) break; - } skb_dma = dma_map_single(pdata->dev, skb->data, pdata->rx_buf_size, DMA_FROM_DEVICE); if (dma_mapping_error(pdata->dev, skb_dma)) { diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index ea273836d999c854d89d45789e33d1ff81afc92b..9da3a03e8c0777595a7f99774be62c9528e55c6a 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -122,7 +122,6 @@ #include "xgbe.h" #include "xgbe-common.h" - static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata, unsigned int usec) { diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index b26d75856553bf62c975e533dc815051f7968de5..29554992215aa18301f26b7e431b1fc19dfff9cb 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -126,7 +126,6 @@ #include "xgbe.h" #include "xgbe-common.h" - static int xgbe_poll(struct napi_struct *, int); static void xgbe_set_rx_mode(struct net_device *); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index 46f613028e9c00ba9b659a4b8ccc7a8f5f47c644..49508ec98b72c1c01fdd189fc71e8c7465c77d2c 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c @@ -121,7 +121,6 @@ #include "xgbe.h" #include "xgbe-common.h" - struct xgbe_stats { char stat_string[ETH_GSTRING_LEN]; int stat_size; @@ -173,6 +172,7 @@ static const struct xgbe_stats xgbe_gstring_stats[] = { XGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror), XGMAC_MMC_STAT("rx_pause_frames", rxpauseframes), }; + #define XGBE_STATS_COUNT ARRAY_SIZE(xgbe_gstring_stats) static void xgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index bdf9cfa70e88c4dac3c7f3c439fa2b3eef29aaa6..f5a8fa03921aafdff9dd8c8274b4bd055ad05e8c 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -128,7 +128,6 @@ #include "xgbe.h" #include "xgbe-common.h" - MODULE_AUTHOR("Tom Lendacky "); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(XGBE_DRV_VERSION); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index 6d2221e023f45a70ebdcb32f668f32b0e59b7671..363b210560f332e08837e9bfb9426dcea62707a4 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -123,7 +123,6 @@ #include "xgbe.h" #include "xgbe-common.h" - static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg) { struct xgbe_prv_data *pdata = mii->priv; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c index 37e64cfa5718e6e45dc71d580296101327b7f49a..a1bf9d1cdae1e9d5dc666cf7b37f6320d1b2adff 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c @@ -122,7 +122,6 @@ #include "xgbe.h" #include "xgbe-common.h" - static cycle_t xgbe_cc_read(const struct cyclecounter *cc) { struct xgbe_prv_data *pdata = container_of(cc, diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index e9fe6e6ddcc34acd7469aef6b523855e535f5a99..789957d43a1379939e2aa9b8d15f267473a271a6 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -128,7 +128,6 @@ #include #include - #define XGBE_DRV_NAME "amd-xgbe" #define XGBE_DRV_VERSION "1.0.0-a" #define XGBE_DRV_DESC "AMD 10 Gigabit Ethernet Driver" @@ -199,7 +198,6 @@ ((_ring)->rdata + \ ((_idx) & ((_ring)->rdesc_count - 1))) - /* Default coalescing parameters */ #define XGMAC_INIT_DMA_TX_USECS 50 #define XGMAC_INIT_DMA_TX_FRAMES 25 diff --git a/drivers/net/ethernet/arc/Kconfig b/drivers/net/ethernet/arc/Kconfig index 514c57fd26f1bbf6eda45da6c6e1aebdb735e37c..8e262e2b39b63fc5b1e26cca09c66c08b76169c9 100644 --- a/drivers/net/ethernet/arc/Kconfig +++ b/drivers/net/ethernet/arc/Kconfig @@ -17,10 +17,14 @@ config NET_VENDOR_ARC if NET_VENDOR_ARC -config ARC_EMAC - tristate "ARC EMAC support" +config ARC_EMAC_CORE + tristate select MII select PHYLIB + +config ARC_EMAC + tristate "ARC EMAC support" + select ARC_EMAC_CORE depends on OF_IRQ depends on OF_NET ---help--- @@ -28,4 +32,14 @@ config ARC_EMAC non-standard on-chip ethernet device ARC EMAC 10/100 is used. Say Y here if you have such a board. If unsure, say N. +config EMAC_ROCKCHIP + tristate "Rockchip EMAC support" + select ARC_EMAC_CORE + depends on OF_IRQ && OF_NET && REGULATOR + ---help--- + Support for Rockchip RK3066/RK3188 EMAC ethernet controllers. + This selects Rockchip SoC glue layer support for the + emac device driver. This driver is used for RK3066/RK3188 + EMAC ethernet controller. + endif # NET_VENDOR_ARC diff --git a/drivers/net/ethernet/arc/Makefile b/drivers/net/ethernet/arc/Makefile index 00c8657637d56d8396d10d3edf3b298cd4efd298..79108af553fb8d9d6f4097a20b09b0372a1af505 100644 --- a/drivers/net/ethernet/arc/Makefile +++ b/drivers/net/ethernet/arc/Makefile @@ -3,4 +3,6 @@ # arc_emac-objs := emac_main.o emac_mdio.o -obj-$(CONFIG_ARC_EMAC) += arc_emac.o +obj-$(CONFIG_ARC_EMAC_CORE) += arc_emac.o +obj-$(CONFIG_ARC_EMAC) += emac_arc.o +obj-$(CONFIG_EMAC_ROCKCHIP) += emac_rockchip.o diff --git a/drivers/net/ethernet/arc/emac.h b/drivers/net/ethernet/arc/emac.h index 36cc9bd07c478e7e776f7be199af0787eeb04da5..dae1ac300a49e692475a587381db629de3b4babc 100644 --- a/drivers/net/ethernet/arc/emac.h +++ b/drivers/net/ethernet/arc/emac.h @@ -123,6 +123,10 @@ struct buffer_state { * @speed: PHY's last set speed. */ struct arc_emac_priv { + const char *drv_name; + const char *drv_version; + void (*set_mac_speed)(void *priv, unsigned int speed); + /* Devices */ struct device *dev; struct phy_device *phy_dev; @@ -204,7 +208,9 @@ static inline void arc_reg_clr(struct arc_emac_priv *priv, int reg, int mask) arc_reg_set(priv, reg, value & ~mask); } -int arc_mdio_probe(struct platform_device *pdev, struct arc_emac_priv *priv); +int arc_mdio_probe(struct arc_emac_priv *priv); int arc_mdio_remove(struct arc_emac_priv *priv); +int arc_emac_probe(struct net_device *ndev, int interface); +int arc_emac_remove(struct net_device *ndev); #endif /* ARC_EMAC_H */ diff --git a/drivers/net/ethernet/arc/emac_arc.c b/drivers/net/ethernet/arc/emac_arc.c new file mode 100644 index 0000000000000000000000000000000000000000..f9cb99bfb511896c0e6b9cb9559a714a72c30e1d --- /dev/null +++ b/drivers/net/ethernet/arc/emac_arc.c @@ -0,0 +1,95 @@ +/** + * emac_arc.c - ARC EMAC specific glue layer + * + * Copyright (C) 2014 Romain Perier + * + * Romain Perier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include + +#include "emac.h" + +#define DRV_NAME "emac_arc" +#define DRV_VERSION "1.0" + +static int emac_arc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct net_device *ndev; + struct arc_emac_priv *priv; + int interface, err; + + if (!dev->of_node) + return -ENODEV; + + ndev = alloc_etherdev(sizeof(struct arc_emac_priv)); + if (!ndev) + return -ENOMEM; + platform_set_drvdata(pdev, ndev); + SET_NETDEV_DEV(ndev, dev); + + priv = netdev_priv(ndev); + priv->drv_name = DRV_NAME; + priv->drv_version = DRV_VERSION; + + interface = of_get_phy_mode(dev->of_node); + if (interface < 0) + interface = PHY_INTERFACE_MODE_MII; + + priv->clk = devm_clk_get(dev, "hclk"); + if (IS_ERR(priv->clk)) { + dev_err(dev, "failed to retrieve host clock from device tree\n"); + err = -EINVAL; + goto out_netdev; + } + + err = arc_emac_probe(ndev, interface); +out_netdev: + if (err) + free_netdev(ndev); + return err; +} + +static int emac_arc_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + int err; + + err = arc_emac_remove(ndev); + free_netdev(ndev); + return err; +} + +static const struct of_device_id emac_arc_dt_ids[] = { + { .compatible = "snps,arc-emac" }, + { /* Sentinel */ } +}; + +static struct platform_driver emac_arc_driver = { + .probe = emac_arc_probe, + .remove = emac_arc_remove, + .driver = { + .name = DRV_NAME, + .of_match_table = emac_arc_dt_ids, + }, +}; + +module_platform_driver(emac_arc_driver); + +MODULE_AUTHOR("Romain Perier "); +MODULE_DESCRIPTION("ARC EMAC platform driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index 5919394d9f585f4c635b27dc985919b25d250646..abe1eabc017177ede3d1bdc7df6bec5a8ded86da 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -26,8 +26,6 @@ #include "emac.h" -#define DRV_NAME "arc_emac" -#define DRV_VERSION "1.0" /** * arc_emac_tx_avail - Return the number of available slots in the tx ring. @@ -61,6 +59,8 @@ static void arc_emac_adjust_link(struct net_device *ndev) if (priv->speed != phy_dev->speed) { priv->speed = phy_dev->speed; state_changed = 1; + if (priv->set_mac_speed) + priv->set_mac_speed(priv, priv->speed); } if (priv->duplex != phy_dev->duplex) { @@ -131,8 +131,10 @@ static int arc_emac_set_settings(struct net_device *ndev, static void arc_emac_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) { - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + struct arc_emac_priv *priv = netdev_priv(ndev); + + strlcpy(info->driver, priv->drv_name, sizeof(info->driver)); + strlcpy(info->version, priv->drv_version, sizeof(info->version)); } static const struct ethtool_ops arc_emac_ethtool_ops = { @@ -692,46 +694,38 @@ static const struct net_device_ops arc_emac_netdev_ops = { #endif }; -static int arc_emac_probe(struct platform_device *pdev) +int arc_emac_probe(struct net_device *ndev, int interface) { + struct device *dev = ndev->dev.parent; struct resource res_regs; struct device_node *phy_node; struct arc_emac_priv *priv; - struct net_device *ndev; const char *mac_addr; unsigned int id, clock_frequency, irq; int err; - if (!pdev->dev.of_node) - return -ENODEV; /* Get PHY from device tree */ - phy_node = of_parse_phandle(pdev->dev.of_node, "phy", 0); + phy_node = of_parse_phandle(dev->of_node, "phy", 0); if (!phy_node) { - dev_err(&pdev->dev, "failed to retrieve phy description from device tree\n"); + dev_err(dev, "failed to retrieve phy description from device tree\n"); return -ENODEV; } /* Get EMAC registers base address from device tree */ - err = of_address_to_resource(pdev->dev.of_node, 0, &res_regs); + err = of_address_to_resource(dev->of_node, 0, &res_regs); if (err) { - dev_err(&pdev->dev, "failed to retrieve registers base from device tree\n"); + dev_err(dev, "failed to retrieve registers base from device tree\n"); return -ENODEV; } /* Get IRQ from device tree */ - irq = irq_of_parse_and_map(pdev->dev.of_node, 0); + irq = irq_of_parse_and_map(dev->of_node, 0); if (!irq) { - dev_err(&pdev->dev, "failed to retrieve value from device tree\n"); + dev_err(dev, "failed to retrieve value from device tree\n"); return -ENODEV; } - ndev = alloc_etherdev(sizeof(struct arc_emac_priv)); - if (!ndev) - return -ENOMEM; - - platform_set_drvdata(pdev, ndev); - SET_NETDEV_DEV(ndev, &pdev->dev); ndev->netdev_ops = &arc_emac_netdev_ops; ndev->ethtool_ops = &arc_emac_ethtool_ops; @@ -740,60 +734,57 @@ static int arc_emac_probe(struct platform_device *pdev) ndev->flags &= ~IFF_MULTICAST; priv = netdev_priv(ndev); - priv->dev = &pdev->dev; + priv->dev = dev; - priv->regs = devm_ioremap_resource(&pdev->dev, &res_regs); + priv->regs = devm_ioremap_resource(dev, &res_regs); if (IS_ERR(priv->regs)) { - err = PTR_ERR(priv->regs); - goto out_netdev; + return PTR_ERR(priv->regs); } - dev_dbg(&pdev->dev, "Registers base address is 0x%p\n", priv->regs); + dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs); - priv->clk = of_clk_get(pdev->dev.of_node, 0); - if (IS_ERR(priv->clk)) { - /* Get CPU clock frequency from device tree */ - if (of_property_read_u32(pdev->dev.of_node, "clock-frequency", - &clock_frequency)) { - dev_err(&pdev->dev, "failed to retrieve from device tree\n"); - err = -EINVAL; - goto out_netdev; - } - } else { + if (priv->clk) { err = clk_prepare_enable(priv->clk); if (err) { - dev_err(&pdev->dev, "failed to enable clock\n"); - goto out_clkget; + dev_err(dev, "failed to enable clock\n"); + return err; } clock_frequency = clk_get_rate(priv->clk); + } else { + /* Get CPU clock frequency from device tree */ + if (of_property_read_u32(dev->of_node, "clock-frequency", + &clock_frequency)) { + dev_err(dev, "failed to retrieve from device tree\n"); + return -EINVAL; + } } id = arc_reg_get(priv, R_ID); /* Check for EMAC revision 5 or 7, magic number */ if (!(id == 0x0005fd02 || id == 0x0007fd02)) { - dev_err(&pdev->dev, "ARC EMAC not detected, id=0x%x\n", id); + dev_err(dev, "ARC EMAC not detected, id=0x%x\n", id); err = -ENODEV; goto out_clken; } - dev_info(&pdev->dev, "ARC EMAC detected with id: 0x%x\n", id); + dev_info(dev, "ARC EMAC detected with id: 0x%x\n", id); /* Set poll rate so that it polls every 1 ms */ arc_reg_set(priv, R_POLLRATE, clock_frequency / 1000000); ndev->irq = irq; - dev_info(&pdev->dev, "IRQ is %d\n", ndev->irq); + dev_info(dev, "IRQ is %d\n", ndev->irq); /* Register interrupt handler for device */ - err = devm_request_irq(&pdev->dev, ndev->irq, arc_emac_intr, 0, + err = devm_request_irq(dev, ndev->irq, arc_emac_intr, 0, ndev->name, ndev); if (err) { - dev_err(&pdev->dev, "could not allocate IRQ\n"); + dev_err(dev, "could not allocate IRQ\n"); goto out_clken; } /* Get MAC address from device tree */ - mac_addr = of_get_mac_address(pdev->dev.of_node); + mac_addr = of_get_mac_address(dev->of_node); if (mac_addr) memcpy(ndev->dev_addr, mac_addr, ETH_ALEN); @@ -801,14 +792,14 @@ static int arc_emac_probe(struct platform_device *pdev) eth_hw_addr_random(ndev); arc_emac_set_address_internal(ndev); - dev_info(&pdev->dev, "MAC address is now %pM\n", ndev->dev_addr); + dev_info(dev, "MAC address is now %pM\n", ndev->dev_addr); /* Do 1 allocation instead of 2 separate ones for Rx and Tx BD rings */ - priv->rxbd = dmam_alloc_coherent(&pdev->dev, RX_RING_SZ + TX_RING_SZ, + priv->rxbd = dmam_alloc_coherent(dev, RX_RING_SZ + TX_RING_SZ, &priv->rxbd_dma, GFP_KERNEL); if (!priv->rxbd) { - dev_err(&pdev->dev, "failed to allocate data buffers\n"); + dev_err(dev, "failed to allocate data buffers\n"); err = -ENOMEM; goto out_clken; } @@ -816,31 +807,31 @@ static int arc_emac_probe(struct platform_device *pdev) priv->txbd = priv->rxbd + RX_BD_NUM; priv->txbd_dma = priv->rxbd_dma + RX_RING_SZ; - dev_dbg(&pdev->dev, "EMAC Device addr: Rx Ring [0x%x], Tx Ring[%x]\n", + dev_dbg(dev, "EMAC Device addr: Rx Ring [0x%x], Tx Ring[%x]\n", (unsigned int)priv->rxbd_dma, (unsigned int)priv->txbd_dma); - err = arc_mdio_probe(pdev, priv); + err = arc_mdio_probe(priv); if (err) { - dev_err(&pdev->dev, "failed to probe MII bus\n"); + dev_err(dev, "failed to probe MII bus\n"); goto out_clken; } priv->phy_dev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0, - PHY_INTERFACE_MODE_MII); + interface); if (!priv->phy_dev) { - dev_err(&pdev->dev, "of_phy_connect() failed\n"); + dev_err(dev, "of_phy_connect() failed\n"); err = -ENODEV; goto out_mdio; } - dev_info(&pdev->dev, "connected to %s phy with id 0x%x\n", + dev_info(dev, "connected to %s phy with id 0x%x\n", priv->phy_dev->drv->name, priv->phy_dev->phy_id); netif_napi_add(ndev, &priv->napi, arc_emac_poll, ARC_EMAC_NAPI_WEIGHT); err = register_netdev(ndev); if (err) { - dev_err(&pdev->dev, "failed to register network device\n"); + dev_err(dev, "failed to register network device\n"); goto out_netif_api; } @@ -853,19 +844,14 @@ static int arc_emac_probe(struct platform_device *pdev) out_mdio: arc_mdio_remove(priv); out_clken: - if (!IS_ERR(priv->clk)) + if (priv->clk) clk_disable_unprepare(priv->clk); -out_clkget: - if (!IS_ERR(priv->clk)) - clk_put(priv->clk); -out_netdev: - free_netdev(ndev); return err; } +EXPORT_SYMBOL_GPL(arc_emac_probe); -static int arc_emac_remove(struct platform_device *pdev) +int arc_emac_remove(struct net_device *ndev) { - struct net_device *ndev = platform_get_drvdata(pdev); struct arc_emac_priv *priv = netdev_priv(ndev); phy_disconnect(priv->phy_dev); @@ -876,31 +862,12 @@ static int arc_emac_remove(struct platform_device *pdev) if (!IS_ERR(priv->clk)) { clk_disable_unprepare(priv->clk); - clk_put(priv->clk); } - free_netdev(ndev); return 0; } - -static const struct of_device_id arc_emac_dt_ids[] = { - { .compatible = "snps,arc-emac" }, - { /* Sentinel */ } -}; -MODULE_DEVICE_TABLE(of, arc_emac_dt_ids); - -static struct platform_driver arc_emac_driver = { - .probe = arc_emac_probe, - .remove = arc_emac_remove, - .driver = { - .name = DRV_NAME, - .owner = THIS_MODULE, - .of_match_table = arc_emac_dt_ids, - }, -}; - -module_platform_driver(arc_emac_driver); +EXPORT_SYMBOL_GPL(arc_emac_remove); MODULE_AUTHOR("Alexey Brodkin "); MODULE_DESCRIPTION("ARC EMAC driver"); diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c index 26ba2423f33a9a383b79c6a58d37e07b3d456cd3..d5ee986936dadd6764a0fe08ecc3b963c2e97d88 100644 --- a/drivers/net/ethernet/arc/emac_mdio.c +++ b/drivers/net/ethernet/arc/emac_mdio.c @@ -100,7 +100,6 @@ static int arc_mdio_write(struct mii_bus *bus, int phy_addr, /** * arc_mdio_probe - MDIO probe function. - * @pdev: Pointer to platform device. * @priv: Pointer to ARC EMAC private data structure. * * returns: 0 on success, -ENOMEM when mdiobus_alloc @@ -108,7 +107,7 @@ static int arc_mdio_write(struct mii_bus *bus, int phy_addr, * * Sets up and registers the MDIO interface. */ -int arc_mdio_probe(struct platform_device *pdev, struct arc_emac_priv *priv) +int arc_mdio_probe(struct arc_emac_priv *priv) { struct mii_bus *bus; int error; @@ -124,9 +123,9 @@ int arc_mdio_probe(struct platform_device *pdev, struct arc_emac_priv *priv) bus->read = &arc_mdio_read; bus->write = &arc_mdio_write; - snprintf(bus->id, MII_BUS_ID_SIZE, "%s", pdev->name); + snprintf(bus->id, MII_BUS_ID_SIZE, "%s", bus->name); - error = of_mdiobus_register(bus, pdev->dev.of_node); + error = of_mdiobus_register(bus, priv->dev->of_node); if (error) { dev_err(priv->dev, "cannot register MDIO bus %s\n", bus->name); mdiobus_free(bus); diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c new file mode 100644 index 0000000000000000000000000000000000000000..c31c7407b75353c1d2d4f855957b13098a65ab96 --- /dev/null +++ b/drivers/net/ethernet/arc/emac_rockchip.c @@ -0,0 +1,229 @@ +/** + * emac-rockchip.c - Rockchip EMAC specific glue layer + * + * Copyright (C) 2014 Romain Perier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "emac.h" + +#define DRV_NAME "rockchip_emac" +#define DRV_VERSION "1.0" + +#define GRF_MODE_MII (1UL << 0) +#define GRF_MODE_RMII (0UL << 0) +#define GRF_SPEED_10M (0UL << 1) +#define GRF_SPEED_100M (1UL << 1) +#define GRF_SPEED_ENABLE_BIT (1UL << 17) +#define GRF_MODE_ENABLE_BIT (1UL << 16) + +struct emac_rockchip_soc_data { + int grf_offset; +}; + +struct rockchip_priv_data { + struct arc_emac_priv emac; + struct regmap *grf; + const struct emac_rockchip_soc_data *soc_data; + struct regulator *regulator; + struct clk *refclk; +}; + +static void emac_rockchip_set_mac_speed(void *priv, unsigned int speed) +{ + struct rockchip_priv_data *emac = priv; + u32 data; + int err = 0; + + /* write-enable bits */ + data = GRF_SPEED_ENABLE_BIT; + + switch(speed) { + case 10: + data |= GRF_SPEED_10M; + break; + case 100: + data |= GRF_SPEED_100M; + break; + default: + pr_err("speed %u not supported\n", speed); + return; + } + + err = regmap_write(emac->grf, emac->soc_data->grf_offset, data); + if (err) + pr_err("unable to apply speed %u to grf (%d)\n", speed, err); +} + +static const struct emac_rockchip_soc_data emac_rockchip_dt_data[] = { + { .grf_offset = 0x154 }, /* rk3066 */ + { .grf_offset = 0x0a4 }, /* rk3188 */ +}; + +static const struct of_device_id emac_rockchip_dt_ids[] = { + { .compatible = "rockchip,rk3066-emac", .data = &emac_rockchip_dt_data[0] }, + { .compatible = "rockchip,rk3188-emac", .data = &emac_rockchip_dt_data[1] }, + { /* Sentinel */ } +}; + +MODULE_DEVICE_TABLE(of, emac_rockchip_dt_ids); + +static int emac_rockchip_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct net_device *ndev; + struct rockchip_priv_data *priv; + const struct of_device_id *match; + u32 data; + int err, interface; + + if (!pdev->dev.of_node) + return -ENODEV; + + ndev = alloc_etherdev(sizeof(struct rockchip_priv_data)); + if (!ndev) + return -ENOMEM; + platform_set_drvdata(pdev, ndev); + SET_NETDEV_DEV(ndev, dev); + + priv = netdev_priv(ndev); + priv->emac.drv_name = DRV_NAME; + priv->emac.drv_version = DRV_VERSION; + priv->emac.set_mac_speed = emac_rockchip_set_mac_speed; + + interface = of_get_phy_mode(dev->of_node); + + /* RK3066 and RK3188 SoCs only support RMII */ + if (interface != PHY_INTERFACE_MODE_RMII) { + dev_err(dev, "unsupported phy interface mode %d\n", interface); + err = -ENOTSUPP; + goto out_netdev; + } + + priv->grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf"); + if (IS_ERR(priv->grf)) { + dev_err(dev, "failed to retrieve global register file (%ld)\n", PTR_ERR(priv->grf)); + err = PTR_ERR(priv->grf); + goto out_netdev; + } + + match = of_match_node(emac_rockchip_dt_ids, dev->of_node); + priv->soc_data = match->data; + + priv->emac.clk = devm_clk_get(dev, "hclk"); + if (IS_ERR(priv->emac.clk)) { + dev_err(dev, "failed to retrieve host clock (%ld)\n", PTR_ERR(priv->emac.clk)); + err = PTR_ERR(priv->emac.clk); + goto out_netdev; + } + + priv->refclk = devm_clk_get(dev, "macref"); + if (IS_ERR(priv->refclk)) { + dev_err(dev, "failed to retrieve reference clock (%ld)\n", PTR_ERR(priv->refclk)); + err = PTR_ERR(priv->refclk); + goto out_netdev; + } + + err = clk_prepare_enable(priv->refclk); + if (err) { + dev_err(dev, "failed to enable reference clock (%d)\n", err); + goto out_netdev; + } + + /* Optional regulator for PHY */ + priv->regulator = devm_regulator_get_optional(dev, "phy"); + if (IS_ERR(priv->regulator)) { + if (PTR_ERR(priv->regulator) == -EPROBE_DEFER) + return -EPROBE_DEFER; + dev_err(dev, "no regulator found\n"); + priv->regulator = NULL; + } + + if (priv->regulator) { + err = regulator_enable(priv->regulator); + if (err) { + dev_err(dev, "failed to enable phy-supply (%d)\n", err); + goto out_clk_disable; + } + } + + err = arc_emac_probe(ndev, interface); + if (err) + goto out_regulator_disable; + + /* write-enable bits */ + data = GRF_MODE_ENABLE_BIT | GRF_SPEED_ENABLE_BIT; + + data |= GRF_SPEED_100M; + data |= GRF_MODE_RMII; + + err = regmap_write(priv->grf, priv->soc_data->grf_offset, data); + if (err) { + dev_err(dev, "unable to apply initial settings to grf (%d)\n", err); + goto out_regulator_disable; + } + + /* RMII interface needs always a rate of 50MHz */ + err = clk_set_rate(priv->refclk, 50000000); + if (err) + dev_err(dev, "failed to change reference clock rate (%d)\n", err); + return 0; + +out_regulator_disable: + if (priv->regulator) + regulator_disable(priv->regulator); +out_clk_disable: + clk_disable_unprepare(priv->refclk); +out_netdev: + free_netdev(ndev); + return err; +} + +static int emac_rockchip_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct rockchip_priv_data *priv = netdev_priv(ndev); + int err; + + err = arc_emac_remove(ndev); + + clk_disable_unprepare(priv->refclk); + + if (priv->regulator) + regulator_disable(priv->regulator); + + free_netdev(ndev); + return err; +} + +static struct platform_driver emac_rockchip_driver = { + .probe = emac_rockchip_probe, + .remove = emac_rockchip_remove, + .driver = { + .name = DRV_NAME, + .of_match_table = emac_rockchip_dt_ids, + }, +}; + +module_platform_driver(emac_rockchip_driver); + +MODULE_AUTHOR("Romain Perier "); +MODULE_DESCRIPTION("Rockchip EMAC platform driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index d8d07a818b89bc694a77b2bdc3172f5bbb6d7010..c3e260c21734c37ca1641ffbf9441c867d18deb6 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -122,6 +122,7 @@ config TIGON3 config BNX2X tristate "Broadcom NetXtremeII 10Gb support" depends on PCI + select PTP_1588_CLOCK select FW_LOADER select ZLIB_INFLATE select LIBCRC32C diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index d588136b23b93c6dad513384a3a0ef3e4a436a2d..416620fa8fac4655f247a3d5e50aad9ad38c7ece 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -427,7 +427,7 @@ static void b44_wap54g10_workaround(struct b44 *bp) } return; error: - pr_warning("PHY: cannot reset MII transceiver isolate bit\n"); + pr_warn("PHY: cannot reset MII transceiver isolate bit\n"); } #else static inline void b44_wap54g10_workaround(struct b44 *bp) diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index d9b9170ed2fc329d5d5c8522fdc3803ec90cb49b..07568818864440190918e314425f697a26d93ff6 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -139,6 +139,15 @@ static int bcm_sysport_set_rx_csum(struct net_device *dev, else reg &= ~RXCHK_SKIP_FCS; + /* If Broadcom tags are enabled (e.g: using a switch), make + * sure we tell the RXCHK hardware to expect a 4-bytes Broadcom + * tag after the Ethernet MAC Source Address. + */ + if (netdev_uses_dsa(dev)) + reg |= RXCHK_BRCM_TAG_EN; + else + reg &= ~RXCHK_BRCM_TAG_EN; + rxchk_writel(priv, reg, RXCHK_CONTROL); return 0; @@ -848,7 +857,8 @@ static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id) return IRQ_HANDLED; } -static int bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev) +static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb, + struct net_device *dev) { struct sk_buff *nskb; struct bcm_tsb *tsb; @@ -864,7 +874,7 @@ static int bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev) if (!nskb) { dev->stats.tx_errors++; dev->stats.tx_dropped++; - return -ENOMEM; + return NULL; } skb = nskb; } @@ -883,7 +893,7 @@ static int bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev) ip_proto = ipv6_hdr(skb)->nexthdr; break; default: - return 0; + return skb; } /* Get the checksum offset and the L4 (transport) offset */ @@ -902,7 +912,7 @@ static int bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev) tsb->l4_ptr_dest_map = csum_info; } - return 0; + return skb; } static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb, @@ -936,8 +946,8 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb, /* Insert TSB and checksum infos */ if (priv->tsb_en) { - ret = bcm_sysport_insert_tsb(skb, dev); - if (ret) { + skb = bcm_sysport_insert_tsb(skb, dev); + if (!skb) { ret = NETDEV_TX_OK; goto out; } @@ -1069,16 +1079,19 @@ static void bcm_sysport_adj_link(struct net_device *dev) if (!phydev->pause) cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE; - if (changed) { + if (!changed) + return; + + if (phydev->link) { reg = umac_readl(priv, UMAC_CMD); reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) | CMD_HD_EN | CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE); reg |= cmd_bits; umac_writel(priv, reg, UMAC_CMD); - - phy_print_status(priv->phydev); } + + phy_print_status(priv->phydev); } static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index d777fae869883fedd4ecba03987f8b56e3db56cb..c3a6072134f58c0f2df719b9badf102400a92043 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -20,13 +20,17 @@ #include #include +#include +#include +#include + /* compilation time flags */ /* define this to make the driver freeze on error to allow getting debug info * (you will need to reboot afterwards) */ /* #define BNX2X_STOP_ON_ERROR */ -#define DRV_MODULE_VERSION "1.78.19-0" +#define DRV_MODULE_VERSION "1.710.51-0" #define DRV_MODULE_RELDATE "2014/02/10" #define BNX2X_BC_VER 0x040200 @@ -70,6 +74,7 @@ enum bnx2x_int_mode { #define BNX2X_MSG_SP 0x0100000 /* was: NETIF_MSG_INTR */ #define BNX2X_MSG_FP 0x0200000 /* was: NETIF_MSG_INTR */ #define BNX2X_MSG_IOV 0x0800000 +#define BNX2X_MSG_PTP 0x1000000 #define BNX2X_MSG_IDLE 0x2000000 /* used for idle check*/ #define BNX2X_MSG_ETHTOOL 0x4000000 #define BNX2X_MSG_DCB 0x8000000 @@ -1443,6 +1448,12 @@ struct bnx2x_fp_stats { struct bnx2x_eth_q_stats_old eth_q_stats_old; }; +enum { + SUB_MF_MODE_UNKNOWN = 0, + SUB_MF_MODE_UFP, + SUB_MF_MODE_NPAR1_DOT_5, +}; + struct bnx2x { /* Fields used in the tx and intr/napi performance paths * are grouped together in the beginning of the structure @@ -1587,10 +1598,11 @@ struct bnx2x { #define USING_SINGLE_MSIX_FLAG (1 << 20) #define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21) #define IS_VF_FLAG (1 << 22) -#define INTERRUPTS_ENABLED_FLAG (1 << 23) -#define BC_SUPPORTS_RMMOD_CMD (1 << 24) -#define HAS_PHYS_PORT_ID (1 << 25) -#define AER_ENABLED (1 << 26) +#define BC_SUPPORTS_RMMOD_CMD (1 << 23) +#define HAS_PHYS_PORT_ID (1 << 24) +#define AER_ENABLED (1 << 25) +#define PTP_SUPPORTED (1 << 26) +#define TX_TIMESTAMPING_EN (1 << 27) #define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG) @@ -1653,6 +1665,9 @@ struct bnx2x { #define IS_MF_SI(bp) (bp->mf_mode == MULTI_FUNCTION_SI) #define IS_MF_SD(bp) (bp->mf_mode == MULTI_FUNCTION_SD) #define IS_MF_AFEX(bp) (bp->mf_mode == MULTI_FUNCTION_AFEX) + u8 mf_sub_mode; +#define IS_MF_UFP(bp) (IS_MF_SD(bp) && \ + bp->mf_sub_mode == SUB_MF_MODE_UFP) u8 wol; @@ -1684,13 +1699,9 @@ struct bnx2x { #define BNX2X_STATE_ERROR 0xf000 #define BNX2X_MAX_PRIORITY 8 -#define BNX2X_MAX_ENTRIES_PER_PRI 16 -#define BNX2X_MAX_COS 3 -#define BNX2X_MAX_TX_COS 2 int num_queues; uint num_ethernet_queues; uint num_cnic_queues; - int num_napi_queues; int disable_tpa; u32 rx_mode; @@ -1933,6 +1944,19 @@ struct bnx2x { u8 phys_port_id[ETH_ALEN]; + /* PTP related context */ + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_clock_info; + struct work_struct ptp_task; + struct cyclecounter cyclecounter; + struct timecounter timecounter; + bool timecounter_init_done; + struct sk_buff *ptp_tx_skb; + unsigned long ptp_tx_start; + bool hwtstamp_ioctl_called; + u16 tx_type; + u16 rx_filter; + struct bnx2x_link_report_data vf_link_vars; }; @@ -2346,7 +2370,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, #define ATTN_HARD_WIRED_MASK 0xff00 #define ATTENTION_ID 4 -#define IS_MF_STORAGE_ONLY(bp) (IS_MF_STORAGE_SD(bp) || \ +#define IS_MF_STORAGE_ONLY(bp) (IS_MF_STORAGE_PERSONALITY_ONLY(bp) || \ IS_MF_FCOE_AFEX(bp)) /* stuff added to make the code fit 80Col */ @@ -2522,14 +2546,44 @@ void bnx2x_notify_link_changed(struct bnx2x *bp); #define IS_MF_ISCSI_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) #define IS_MF_FCOE_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) +#define IS_MF_ISCSI_SI(bp) (IS_MF_SI(bp) && BNX2X_IS_MF_EXT_PROTOCOL_ISCSI(bp)) + +#define IS_MF_ISCSI_ONLY(bp) (IS_MF_ISCSI_SD(bp) || IS_MF_ISCSI_SI(bp)) + +#define BNX2X_MF_EXT_PROTOCOL_MASK \ + (MACP_FUNC_CFG_FLAGS_ETHERNET | \ + MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD | \ + MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) + +#define BNX2X_MF_EXT_PROT(bp) ((bp)->mf_ext_config & \ + BNX2X_MF_EXT_PROTOCOL_MASK) -#define BNX2X_MF_EXT_PROTOCOL_FCOE(bp) ((bp)->mf_ext_config & \ - MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) +#define BNX2X_HAS_MF_EXT_PROTOCOL_FCOE(bp) \ + (BNX2X_MF_EXT_PROT(bp) & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) + +#define BNX2X_IS_MF_EXT_PROTOCOL_FCOE(bp) \ + (BNX2X_MF_EXT_PROT(bp) == MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) + +#define BNX2X_IS_MF_EXT_PROTOCOL_ISCSI(bp) \ + (BNX2X_MF_EXT_PROT(bp) == MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) + +#define IS_MF_FCOE_AFEX(bp) \ + (IS_MF_AFEX(bp) && BNX2X_IS_MF_EXT_PROTOCOL_FCOE(bp)) + +#define IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp) \ + (IS_MF_SD(bp) && \ + (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) || \ + BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) + +#define IS_MF_SI_STORAGE_PERSONALITY_ONLY(bp) \ + (IS_MF_SI(bp) && \ + (BNX2X_IS_MF_EXT_PROTOCOL_ISCSI(bp) || \ + BNX2X_IS_MF_EXT_PROTOCOL_FCOE(bp))) + +#define IS_MF_STORAGE_PERSONALITY_ONLY(bp) \ + (IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp) || \ + IS_MF_SI_STORAGE_PERSONALITY_ONLY(bp)) -#define IS_MF_FCOE_AFEX(bp) (IS_MF_AFEX(bp) && BNX2X_MF_EXT_PROTOCOL_FCOE(bp)) -#define IS_MF_STORAGE_SD(bp) (IS_MF_SD(bp) && \ - (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) || \ - BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) #define SET_FLAG(value, mask, flag) \ do {\ @@ -2559,4 +2613,11 @@ void bnx2x_update_mng_version(struct bnx2x *bp); #define E1H_MAX_MF_SB_COUNT (HC_SB_MAX_SB_E1X/(E1HVN_MAX * PORT_MAX)) +void bnx2x_init_ptp(struct bnx2x *bp); +int bnx2x_configure_ptp_filters(struct bnx2x *bp); +void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb); + +#define BNX2X_MAX_PHC_DRIFT 31000000 +#define BNX2X_PTP_TX_TIMEOUT + #endif /* bnx2x.h */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 4ccc806b11501bb08b96614a0e79b020867648fe..40beef5bca88ade51dd5248550c45059b8774476 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -64,7 +65,7 @@ static int bnx2x_calc_num_queues(struct bnx2x *bp) int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues(); /* Reduce memory usage in kdump environment by using only one queue */ - if (reset_devices) + if (is_kdump_kernel()) nq = 1; nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp)); @@ -1063,6 +1064,11 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) skb_record_rx_queue(skb, fp->rx_queue); + /* Check if this packet was timestamped */ + if (unlikely(cqe->fast_path_cqe.type_error_flags & + (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT))) + bnx2x_set_rx_ts(bp, skb); + if (le16_to_cpu(cqe_fp->pars_flags.flags) & PARSING_FLAGS_VLAN) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), @@ -1932,7 +1938,7 @@ void bnx2x_set_num_queues(struct bnx2x *bp) bp->num_ethernet_queues = bnx2x_calc_num_queues(bp); /* override in STORAGE SD modes */ - if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) + if (IS_MF_STORAGE_ONLY(bp)) bp->num_ethernet_queues = 1; /* Add special queues */ @@ -2078,6 +2084,10 @@ int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags); if (rss_obj->udp_rss_v6) __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags); + + if (!CHIP_IS_E1x(bp)) + /* valid only for TUNN_MODE_GRE tunnel mode */ + __set_bit(BNX2X_RSS_GRE_INNER_HDRS, ¶ms.rss_flags); } else { __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags); } @@ -2800,7 +2810,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) /* Initialize Rx filter. */ bnx2x_set_rx_mode_inner(bp); - /* Start the Tx */ + if (bp->flags & PTP_SUPPORTED) { + bnx2x_init_ptp(bp); + bnx2x_configure_ptp_filters(bp); + } + /* Start Tx */ switch (load_mode) { case LOAD_NORMAL: /* Tx queue should be only re-enabled */ @@ -3437,26 +3451,6 @@ static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb, } #endif -static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data, - u32 xmit_type) -{ - struct ipv6hdr *ipv6; - - *parsing_data |= (skb_shinfo(skb)->gso_size << - ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) & - ETH_TX_PARSE_BD_E2_LSO_MSS; - - if (xmit_type & XMIT_GSO_ENC_V6) - ipv6 = inner_ipv6_hdr(skb); - else if (xmit_type & XMIT_GSO_V6) - ipv6 = ipv6_hdr(skb); - else - ipv6 = NULL; - - if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6) - *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR; -} - /** * bnx2x_set_pbd_gso - update PBD in GSO case. * @@ -3466,7 +3460,6 @@ static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data, */ static void bnx2x_set_pbd_gso(struct sk_buff *skb, struct eth_tx_parse_bd_e1x *pbd, - struct eth_tx_start_bd *tx_start_bd, u32 xmit_type) { pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); @@ -3479,9 +3472,6 @@ static void bnx2x_set_pbd_gso(struct sk_buff *skb, bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0)); - - /* GSO on 57710/57711 needs FW to calculate IP checksum */ - tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM; } else { pbd->tcp_pseudo_csum = bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, @@ -3653,18 +3643,23 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb, (__force u32)iph->tot_len - (__force u32)iph->frag_off; + outerip_len = iph->ihl << 1; + pbd2->fw_ip_csum_wo_len_flags_frag = bswab16(csum_fold((__force __wsum)csum)); } else { pbd2->fw_ip_hdr_to_payload_w = hlen_w - ((sizeof(struct ipv6hdr)) >> 1); + pbd_e2->data.tunnel_data.flags |= + ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER; } pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq); pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb)); - if (xmit_type & XMIT_GSO_V4) { + /* inner IP header info */ + if (xmit_type & XMIT_CSUM_ENC_V4) { pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id); pbd_e2->data.tunnel_data.pseudo_csum = @@ -3672,8 +3667,6 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb, inner_ip_hdr(skb)->saddr, inner_ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0)); - - outerip_len = ip_hdr(skb)->ihl << 1; } else { pbd_e2->data.tunnel_data.pseudo_csum = bswab16(~csum_ipv6_magic( @@ -3686,8 +3679,6 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb, *global_data |= outerip_off | - (!!(xmit_type & XMIT_CSUM_V6) << - ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) | (outerip_len << ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) << @@ -3699,6 +3690,23 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb, } } +static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data, + u32 xmit_type) +{ + struct ipv6hdr *ipv6; + + if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6))) + return; + + if (xmit_type & XMIT_GSO_ENC_V6) + ipv6 = inner_ipv6_hdr(skb); + else /* XMIT_GSO_V6 */ + ipv6 = ipv6_hdr(skb); + + if (ipv6->nexthdr == NEXTHDR_IPV6) + *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR; +} + /* called with netif_tx_lock * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call * netif_wake_queue() @@ -3831,6 +3839,20 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { + if (!(bp->flags & TX_TIMESTAMPING_EN)) { + BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n"); + } else if (bp->ptp_tx_skb) { + BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n"); + } else { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + /* schedule check for Tx timestamp */ + bp->ptp_tx_skb = skb_get(skb); + bp->ptp_tx_start = jiffies; + schedule_work(&bp->ptp_task); + } + } + /* header nbd: indirectly zero other flags! */ tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT; @@ -3852,12 +3874,16 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) /* when transmitting in a vf, start bd must hold the ethertype * for fw to enforce it */ +#ifndef BNX2X_STOP_ON_ERROR if (IS_VF(bp)) +#endif tx_start_bd->vlan_or_ethertype = cpu_to_le16(ntohs(eth->h_proto)); +#ifndef BNX2X_STOP_ON_ERROR else /* used by FW for packet accounting */ tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); +#endif } nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */ @@ -3915,6 +3941,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) xmit_type); } + bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type); /* Add the macs to the parsing BD if this is a vf or if * Tx Switching is enabled. */ @@ -3929,11 +3956,22 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) &pbd_e2->data.mac_addr.dst_mid, &pbd_e2->data.mac_addr.dst_lo, eth->h_dest); - } else if (bp->flags & TX_SWITCHING) { - bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi, - &pbd_e2->data.mac_addr.dst_mid, - &pbd_e2->data.mac_addr.dst_lo, - eth->h_dest); + } else { + if (bp->flags & TX_SWITCHING) + bnx2x_set_fw_mac_addr( + &pbd_e2->data.mac_addr.dst_hi, + &pbd_e2->data.mac_addr.dst_mid, + &pbd_e2->data.mac_addr.dst_lo, + eth->h_dest); +#ifdef BNX2X_STOP_ON_ERROR + /* Enforce security is always set in Stop on Error - + * source mac should be present in the parsing BD + */ + bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi, + &pbd_e2->data.mac_addr.src_mid, + &pbd_e2->data.mac_addr.src_lo, + eth->h_source); +#endif } SET_FLAG(pbd_e2_parsing_data, @@ -3980,10 +4018,12 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) bd_prod); } if (!CHIP_IS_E1x(bp)) - bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data, - xmit_type); + pbd_e2_parsing_data |= + (skb_shinfo(skb)->gso_size << + ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) & + ETH_TX_PARSE_BD_E2_LSO_MSS; else - bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type); + bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type); } /* Set the PBD's parsing_data field if not zero @@ -4191,14 +4231,13 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p) struct bnx2x *bp = netdev_priv(dev); int rc = 0; - if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) { + if (!is_valid_ether_addr(addr->sa_data)) { BNX2X_ERR("Requested MAC address is not valid\n"); return -EINVAL; } - if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) && - !is_zero_ether_addr(addr->sa_data)) { - BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n"); + if (IS_MF_STORAGE_ONLY(bp)) { + BNX2X_ERR("Can't change address on STORAGE ONLY function\n"); return -EINVAL; } @@ -4377,8 +4416,7 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) u8 cos; int rx_ring_size = 0; - if (!bp->rx_ring_size && - (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) { + if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) { rx_ring_size = MIN_RX_SIZE_NONTPA; bp->rx_ring_size = rx_ring_size; } else if (!bp->rx_ring_size) { @@ -4771,11 +4809,15 @@ netdev_features_t bnx2x_fix_features(struct net_device *dev, struct bnx2x *bp = netdev_priv(dev); /* TPA requires Rx CSUM offloading */ - if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) { + if (!(features & NETIF_F_RXCSUM)) { features &= ~NETIF_F_LRO; features &= ~NETIF_F_GRO; } + /* Note: do not disable SW GRO in kernel when HW GRO is off */ + if (bp->disable_tpa) + features &= ~NETIF_F_LRO; + return features; } @@ -4814,6 +4856,10 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features) if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG)) changes &= ~GRO_ENABLE_FLAG; + /* if GRO is changed while HW TPA is off, don't force a reload */ + if ((changes & GRO_ENABLE_FLAG) && bp->disable_tpa) + changes &= ~GRO_ENABLE_FLAG; + if (changes) bnx2x_reload = true; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 571427c7226b11f4c22669ce8d8f14c81950a10d..adcacda7af7b10e70b053821acc1e2dfe722732a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -932,8 +932,15 @@ static inline int bnx2x_func_start(struct bnx2x *bp) else /* CHIP_IS_E1X */ start_params->network_cos_mode = FW_WRR; - start_params->gre_tunnel_mode = L2GRE_TUNNEL; - start_params->gre_tunnel_rss = GRE_INNER_HEADERS_RSS; + start_params->tunnel_mode = TUNN_MODE_GRE; + start_params->gre_tunnel_type = IPGRE_TUNNEL; + start_params->inner_gre_rss_en = 1; + + if (IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) { + start_params->class_fail_ethtype = ETH_P_FIP; + start_params->class_fail = 1; + start_params->no_added_tags = 1; + } return bnx2x_func_state_change(bp, &func_params); } @@ -1297,15 +1304,7 @@ static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set) } } -static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr) -{ - if (is_valid_ether_addr(addr) || - (is_zero_ether_addr(addr) && - (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)))) - return true; - return false; -} /** * bnx2x_fill_fw_str - Fill buffer with FW version string diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index fb26bc4c42a1fd03d42ef885dc562bcf4d460ade..6e4294ed1fc997e5545fcc48a24c92124129867c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c @@ -2092,7 +2092,6 @@ static void bnx2x_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio, static u8 bnx2x_dcbnl_set_all(struct net_device *netdev) { struct bnx2x *bp = netdev_priv(netdev); - int rc = 0; DP(BNX2X_MSG_DCB, "SET-ALL\n"); @@ -2110,9 +2109,7 @@ static u8 bnx2x_dcbnl_set_all(struct net_device *netdev) 1); bnx2x_dcbx_init(bp, true); } - DP(BNX2X_MSG_DCB, "set_dcbx_params done (%d)\n", rc); - if (rc) - return 1; + DP(BNX2X_MSG_DCB, "set_dcbx_params done\n"); return 0; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h index 12eb4baee9f642e444bbb162d038911776b4d852..741aa130c19f4efb4df2e2b64b166f24bd4b6b3e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h @@ -40,7 +40,7 @@ struct dump_header { u32 dump_meta_data; /* OR of CHIP and PATH. */ }; -#define BNX2X_DUMP_VERSION 0x50acff01 +#define BNX2X_DUMP_VERSION 0x61111111 struct reg_addr { u32 addr; u32 size; @@ -1464,7 +1464,6 @@ static const struct reg_addr reg_addrs[] = { { 0x180398, 1, 0x1c, 0x924}, { 0x1803a0, 5, 0x1c, 0x924}, { 0x1803b4, 2, 0x18, 0x924}, - { 0x180400, 256, 0x3, 0xfff}, { 0x181000, 4, 0x1f, 0x93c}, { 0x181010, 1020, 0x1f, 0x38}, { 0x182000, 4, 0x18, 0x924}, @@ -1576,7 +1575,6 @@ static const struct reg_addr reg_addrs[] = { { 0x200398, 1, 0x1c, 0x924}, { 0x2003a0, 1, 0x1c, 0x924}, { 0x2003a8, 2, 0x1c, 0x924}, - { 0x200400, 256, 0x3, 0xfff}, { 0x202000, 4, 0x1f, 0x1927}, { 0x202010, 2044, 0x1f, 0x1007}, { 0x204000, 4, 0x18, 0x924}, @@ -1688,7 +1686,6 @@ static const struct reg_addr reg_addrs[] = { { 0x280398, 1, 0x1c, 0x924}, { 0x2803a0, 1, 0x1c, 0x924}, { 0x2803a8, 2, 0x1c, 0x924}, - { 0x280400, 256, 0x3, 0xfff}, { 0x282000, 4, 0x1f, 0x9e4}, { 0x282010, 2044, 0x1f, 0x1c0}, { 0x284000, 4, 0x18, 0x924}, @@ -1800,7 +1797,6 @@ static const struct reg_addr reg_addrs[] = { { 0x300398, 1, 0x1c, 0x924}, { 0x3003a0, 1, 0x1c, 0x924}, { 0x3003a8, 2, 0x1c, 0x924}, - { 0x300400, 256, 0x3, 0xfff}, { 0x302000, 4, 0x1f, 0xf24}, { 0x302010, 2044, 0x1f, 0xe00}, { 0x304000, 4, 0x18, 0x924}, @@ -2206,10 +2202,10 @@ static const struct wreg_addr wreg_addr_e3b0 = { 0x1b0c00, 128, 2, read_reg_e3b0, 0x1f, 0x1fff}; static const unsigned int dump_num_registers[NUM_CHIPS][NUM_PRESETS] = { - {20782, 18567, 27975, 19729, 18311, 27719, 20836, 32391, 41799, 20812, - 26247, 35655, 19074}, - {32774, 19297, 33277, 31721, 19041, 33021, 32828, 33121, 47101, 32804, - 26977, 40957, 35895}, + {19758, 17543, 26951, 18705, 17287, 26695, 19812, 31367, 40775, 19788, + 25223, 34631, 19074}, + {31750, 18273, 32253, 30697, 18017, 31997, 31804, 32097, 46077, 31780, + 25953, 39933, 35895}, {36527, 17928, 33697, 35474, 18700, 34466, 36581, 31752, 47521, 36557, 25608, 41377, 43903}, {45239, 17936, 34387, 44186, 18708, 35156, 45293, 31760, 48211, 45269, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 92fee842f954f8787b245c007deb5291ca8c8771..1edc931b145848b238d2450b5c2468d029644feb 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -1852,7 +1852,7 @@ static int bnx2x_set_ringparam(struct net_device *dev, if ((ering->rx_pending > MAX_RX_AVAIL) || (ering->rx_pending < (bp->disable_tpa ? MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) || - (ering->tx_pending > (IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL)) || + (ering->tx_pending > (IS_MF_STORAGE_ONLY(bp) ? 0 : MAX_TX_AVAIL)) || (ering->tx_pending <= MAX_SKB_FRAGS + 4)) { DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n"); return -EINVAL; @@ -3481,6 +3481,46 @@ static int bnx2x_set_channels(struct net_device *dev, return bnx2x_nic_load(bp, LOAD_NORMAL); } +static int bnx2x_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct bnx2x *bp = netdev_priv(dev); + + if (bp->flags & PTP_SUPPORTED) { + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + if (bp->ptp_clock) + info->phc_index = ptp_clock_index(bp->ptp_clock); + else + info->phc_index = -1; + + info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ); + + info->tx_types = (1 << HWTSTAMP_TX_OFF)|(1 << HWTSTAMP_TX_ON); + + return 0; + } + + return ethtool_op_get_ts_info(dev, info); +} + static const struct ethtool_ops bnx2x_ethtool_ops = { .get_settings = bnx2x_get_settings, .set_settings = bnx2x_set_settings, @@ -3522,7 +3562,7 @@ static const struct ethtool_ops bnx2x_ethtool_ops = { .get_module_eeprom = bnx2x_get_module_eeprom, .get_eee = bnx2x_get_eee, .set_eee = bnx2x_set_eee, - .get_ts_info = ethtool_op_get_ts_info, + .get_ts_info = bnx2x_get_ts_info, }; static const struct ethtool_ops bnx2x_vf_ethtool_ops = { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h index 95dc365435483ed9298389b88f965130d48eb7d0..7636e3c18771dced3a1d4bd3db0018cee4b4d7f3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h @@ -10,170 +10,170 @@ #ifndef BNX2X_FW_DEFS_H #define BNX2X_FW_DEFS_H -#define CSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[148].base) +#define CSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[152].base) #define CSTORM_ASSERT_LIST_OFFSET(assertListEntry) \ - (IRO[147].base + ((assertListEntry) * IRO[147].m1)) + (IRO[151].base + ((assertListEntry) * IRO[151].m1)) #define CSTORM_EVENT_RING_DATA_OFFSET(pfId) \ - (IRO[153].base + (((pfId)>>1) * IRO[153].m1) + (((pfId)&1) * \ - IRO[153].m2)) + (IRO[157].base + (((pfId)>>1) * IRO[157].m1) + (((pfId)&1) * \ + IRO[157].m2)) #define CSTORM_EVENT_RING_PROD_OFFSET(pfId) \ - (IRO[154].base + (((pfId)>>1) * IRO[154].m1) + (((pfId)&1) * \ - IRO[154].m2)) + (IRO[158].base + (((pfId)>>1) * IRO[158].m1) + (((pfId)&1) * \ + IRO[158].m2)) #define CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(funcId) \ - (IRO[159].base + ((funcId) * IRO[159].m1)) + (IRO[163].base + ((funcId) * IRO[163].m1)) #define CSTORM_FUNC_EN_OFFSET(funcId) \ - (IRO[149].base + ((funcId) * IRO[149].m1)) + (IRO[153].base + ((funcId) * IRO[153].m1)) #define CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hcIndex, sbId) \ - (IRO[139].base + ((hcIndex) * IRO[139].m1) + ((sbId) * IRO[139].m2)) + (IRO[143].base + ((hcIndex) * IRO[143].m1) + ((sbId) * IRO[143].m2)) #define CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hcIndex, sbId) \ - (IRO[138].base + (((hcIndex)>>2) * IRO[138].m1) + (((hcIndex)&3) \ - * IRO[138].m2) + ((sbId) * IRO[138].m3)) -#define CSTORM_IGU_MODE_OFFSET (IRO[157].base) + (IRO[142].base + (((hcIndex)>>2) * IRO[142].m1) + (((hcIndex)&3) \ + * IRO[142].m2) + ((sbId) * IRO[142].m3)) +#define CSTORM_IGU_MODE_OFFSET (IRO[161].base) #define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ - (IRO[317].base + ((pfId) * IRO[317].m1)) + (IRO[323].base + ((pfId) * IRO[323].m1)) #define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \ - (IRO[318].base + ((pfId) * IRO[318].m1)) + (IRO[324].base + ((pfId) * IRO[324].m1)) #define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \ - (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2)) + (IRO[316].base + ((pfId) * IRO[316].m1) + ((iscsiEqId) * IRO[316].m2)) #define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \ - (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2)) + (IRO[318].base + ((pfId) * IRO[318].m1) + ((iscsiEqId) * IRO[318].m2)) #define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \ - (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2)) + (IRO[317].base + ((pfId) * IRO[317].m1) + ((iscsiEqId) * IRO[317].m2)) #define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \ - (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2)) + (IRO[319].base + ((pfId) * IRO[319].m1) + ((iscsiEqId) * IRO[319].m2)) #define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \ - (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2)) -#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \ (IRO[315].base + ((pfId) * IRO[315].m1) + ((iscsiEqId) * IRO[315].m2)) +#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \ + (IRO[321].base + ((pfId) * IRO[321].m1) + ((iscsiEqId) * IRO[321].m2)) #define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \ - (IRO[314].base + ((pfId) * IRO[314].m1) + ((iscsiEqId) * IRO[314].m2)) + (IRO[320].base + ((pfId) * IRO[320].m1) + ((iscsiEqId) * IRO[320].m2)) #define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ - (IRO[316].base + ((pfId) * IRO[316].m1)) + (IRO[322].base + ((pfId) * IRO[322].m1)) #define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ - (IRO[308].base + ((pfId) * IRO[308].m1)) + (IRO[314].base + ((pfId) * IRO[314].m1)) #define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ - (IRO[307].base + ((pfId) * IRO[307].m1)) + (IRO[313].base + ((pfId) * IRO[313].m1)) #define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ - (IRO[306].base + ((pfId) * IRO[306].m1)) + (IRO[312].base + ((pfId) * IRO[312].m1)) #define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ - (IRO[151].base + ((funcId) * IRO[151].m1)) + (IRO[155].base + ((funcId) * IRO[155].m1)) #define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \ - (IRO[142].base + ((pfId) * IRO[142].m1)) + (IRO[146].base + ((pfId) * IRO[146].m1)) #define CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(pfId) \ - (IRO[143].base + ((pfId) * IRO[143].m1)) + (IRO[147].base + ((pfId) * IRO[147].m1)) #define CSTORM_SP_STATUS_BLOCK_OFFSET(pfId) \ - (IRO[141].base + ((pfId) * IRO[141].m1)) -#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[141].size) + (IRO[145].base + ((pfId) * IRO[145].m1)) +#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[145].size) #define CSTORM_SP_SYNC_BLOCK_OFFSET(pfId) \ - (IRO[144].base + ((pfId) * IRO[144].m1)) -#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[144].size) + (IRO[148].base + ((pfId) * IRO[148].m1)) +#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[148].size) #define CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(sbId, hcIndex) \ - (IRO[136].base + ((sbId) * IRO[136].m1) + ((hcIndex) * IRO[136].m2)) + (IRO[140].base + ((sbId) * IRO[140].m1) + ((hcIndex) * IRO[140].m2)) #define CSTORM_STATUS_BLOCK_DATA_OFFSET(sbId) \ - (IRO[133].base + ((sbId) * IRO[133].m1)) + (IRO[137].base + ((sbId) * IRO[137].m1)) #define CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(sbId) \ - (IRO[134].base + ((sbId) * IRO[134].m1)) + (IRO[138].base + ((sbId) * IRO[138].m1)) #define CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(sbId, hcIndex) \ - (IRO[135].base + ((sbId) * IRO[135].m1) + ((hcIndex) * IRO[135].m2)) + (IRO[139].base + ((sbId) * IRO[139].m1) + ((hcIndex) * IRO[139].m2)) #define CSTORM_STATUS_BLOCK_OFFSET(sbId) \ - (IRO[132].base + ((sbId) * IRO[132].m1)) -#define CSTORM_STATUS_BLOCK_SIZE (IRO[132].size) + (IRO[136].base + ((sbId) * IRO[136].m1)) +#define CSTORM_STATUS_BLOCK_SIZE (IRO[136].size) #define CSTORM_SYNC_BLOCK_OFFSET(sbId) \ - (IRO[137].base + ((sbId) * IRO[137].m1)) -#define CSTORM_SYNC_BLOCK_SIZE (IRO[137].size) + (IRO[141].base + ((sbId) * IRO[141].m1)) +#define CSTORM_SYNC_BLOCK_SIZE (IRO[141].size) #define CSTORM_VF_PF_CHANNEL_STATE_OFFSET(vfId) \ - (IRO[155].base + ((vfId) * IRO[155].m1)) + (IRO[159].base + ((vfId) * IRO[159].m1)) #define CSTORM_VF_PF_CHANNEL_VALID_OFFSET(vfId) \ - (IRO[156].base + ((vfId) * IRO[156].m1)) + (IRO[160].base + ((vfId) * IRO[160].m1)) #define CSTORM_VF_TO_PF_OFFSET(funcId) \ - (IRO[150].base + ((funcId) * IRO[150].m1)) + (IRO[154].base + ((funcId) * IRO[154].m1)) #define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \ - (IRO[203].base + ((pfId) * IRO[203].m1)) + (IRO[207].base + ((pfId) * IRO[207].m1)) #define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base) #define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \ (IRO[101].base + ((assertListEntry) * IRO[101].m1)) #define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \ - (IRO[201].base + ((pfId) * IRO[201].m1)) + (IRO[205].base + ((pfId) * IRO[205].m1)) #define TSTORM_FUNC_EN_OFFSET(funcId) \ - (IRO[103].base + ((funcId) * IRO[103].m1)) + (IRO[107].base + ((funcId) * IRO[107].m1)) #define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \ - (IRO[272].base + ((pfId) * IRO[272].m1)) + (IRO[278].base + ((pfId) * IRO[278].m1)) #define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \ - (IRO[273].base + ((pfId) * IRO[273].m1)) + (IRO[279].base + ((pfId) * IRO[279].m1)) #define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \ - (IRO[274].base + ((pfId) * IRO[274].m1)) + (IRO[280].base + ((pfId) * IRO[280].m1)) #define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \ - (IRO[275].base + ((pfId) * IRO[275].m1)) + (IRO[281].base + ((pfId) * IRO[281].m1)) #define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ - (IRO[271].base + ((pfId) * IRO[271].m1)) + (IRO[277].base + ((pfId) * IRO[277].m1)) #define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ - (IRO[270].base + ((pfId) * IRO[270].m1)) + (IRO[276].base + ((pfId) * IRO[276].m1)) #define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ - (IRO[269].base + ((pfId) * IRO[269].m1)) + (IRO[275].base + ((pfId) * IRO[275].m1)) #define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \ - (IRO[268].base + ((pfId) * IRO[268].m1)) + (IRO[274].base + ((pfId) * IRO[274].m1)) #define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \ - (IRO[278].base + ((pfId) * IRO[278].m1)) + (IRO[284].base + ((pfId) * IRO[284].m1)) #define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ - (IRO[264].base + ((pfId) * IRO[264].m1)) + (IRO[270].base + ((pfId) * IRO[270].m1)) #define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \ - (IRO[265].base + ((pfId) * IRO[265].m1)) + (IRO[271].base + ((pfId) * IRO[271].m1)) #define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \ - (IRO[266].base + ((pfId) * IRO[266].m1)) + (IRO[272].base + ((pfId) * IRO[272].m1)) #define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \ - (IRO[267].base + ((pfId) * IRO[267].m1)) + (IRO[273].base + ((pfId) * IRO[273].m1)) #define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \ - (IRO[202].base + ((pfId) * IRO[202].m1)) + (IRO[206].base + ((pfId) * IRO[206].m1)) #define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ - (IRO[105].base + ((funcId) * IRO[105].m1)) + (IRO[109].base + ((funcId) * IRO[109].m1)) #define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \ - (IRO[217].base + ((pfId) * IRO[217].m1)) + (IRO[223].base + ((pfId) * IRO[223].m1)) #define TSTORM_VF_TO_PF_OFFSET(funcId) \ - (IRO[104].base + ((funcId) * IRO[104].m1)) -#define USTORM_AGG_DATA_OFFSET (IRO[206].base) -#define USTORM_AGG_DATA_SIZE (IRO[206].size) -#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[177].base) + (IRO[108].base + ((funcId) * IRO[108].m1)) +#define USTORM_AGG_DATA_OFFSET (IRO[212].base) +#define USTORM_AGG_DATA_SIZE (IRO[212].size) +#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[181].base) #define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \ - (IRO[176].base + ((assertListEntry) * IRO[176].m1)) + (IRO[180].base + ((assertListEntry) * IRO[180].m1)) #define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \ - (IRO[183].base + ((portId) * IRO[183].m1)) + (IRO[187].base + ((portId) * IRO[187].m1)) #define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \ - (IRO[319].base + ((pfId) * IRO[319].m1)) + (IRO[325].base + ((pfId) * IRO[325].m1)) #define USTORM_FUNC_EN_OFFSET(funcId) \ - (IRO[178].base + ((funcId) * IRO[178].m1)) + (IRO[182].base + ((funcId) * IRO[182].m1)) #define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ - (IRO[283].base + ((pfId) * IRO[283].m1)) + (IRO[289].base + ((pfId) * IRO[289].m1)) #define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \ - (IRO[284].base + ((pfId) * IRO[284].m1)) + (IRO[290].base + ((pfId) * IRO[290].m1)) #define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \ - (IRO[288].base + ((pfId) * IRO[288].m1)) + (IRO[294].base + ((pfId) * IRO[294].m1)) #define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \ - (IRO[285].base + ((pfId) * IRO[285].m1)) + (IRO[291].base + ((pfId) * IRO[291].m1)) #define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ - (IRO[281].base + ((pfId) * IRO[281].m1)) + (IRO[287].base + ((pfId) * IRO[287].m1)) #define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ - (IRO[280].base + ((pfId) * IRO[280].m1)) + (IRO[286].base + ((pfId) * IRO[286].m1)) #define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ - (IRO[279].base + ((pfId) * IRO[279].m1)) + (IRO[285].base + ((pfId) * IRO[285].m1)) #define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ - (IRO[282].base + ((pfId) * IRO[282].m1)) + (IRO[288].base + ((pfId) * IRO[288].m1)) #define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \ - (IRO[286].base + ((pfId) * IRO[286].m1)) + (IRO[292].base + ((pfId) * IRO[292].m1)) #define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \ - (IRO[287].base + ((pfId) * IRO[287].m1)) + (IRO[293].base + ((pfId) * IRO[293].m1)) #define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \ - (IRO[182].base + ((pfId) * IRO[182].m1)) + (IRO[186].base + ((pfId) * IRO[186].m1)) #define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ - (IRO[180].base + ((funcId) * IRO[180].m1)) + (IRO[184].base + ((funcId) * IRO[184].m1)) #define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \ - (IRO[209].base + ((portId) * IRO[209].m1) + ((clientId) * \ - IRO[209].m2)) + (IRO[215].base + ((portId) * IRO[215].m1) + ((clientId) * \ + IRO[215].m2)) #define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \ - (IRO[210].base + ((qzoneId) * IRO[210].m1)) -#define USTORM_TPA_BTR_OFFSET (IRO[207].base) -#define USTORM_TPA_BTR_SIZE (IRO[207].size) + (IRO[216].base + ((qzoneId) * IRO[216].m1)) +#define USTORM_TPA_BTR_OFFSET (IRO[213].base) +#define USTORM_TPA_BTR_SIZE (IRO[213].size) #define USTORM_VF_TO_PF_OFFSET(funcId) \ - (IRO[179].base + ((funcId) * IRO[179].m1)) + (IRO[183].base + ((funcId) * IRO[183].m1)) #define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[67].base) #define XSTORM_AGG_INT_FINAL_CLEANUP_INDEX (IRO[66].base) #define XSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[51].base) @@ -186,39 +186,39 @@ #define XSTORM_FUNC_EN_OFFSET(funcId) \ (IRO[47].base + ((funcId) * IRO[47].m1)) #define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ - (IRO[296].base + ((pfId) * IRO[296].m1)) + (IRO[302].base + ((pfId) * IRO[302].m1)) #define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \ - (IRO[299].base + ((pfId) * IRO[299].m1)) + (IRO[305].base + ((pfId) * IRO[305].m1)) #define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \ - (IRO[300].base + ((pfId) * IRO[300].m1)) + (IRO[306].base + ((pfId) * IRO[306].m1)) #define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \ - (IRO[301].base + ((pfId) * IRO[301].m1)) + (IRO[307].base + ((pfId) * IRO[307].m1)) #define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \ - (IRO[302].base + ((pfId) * IRO[302].m1)) + (IRO[308].base + ((pfId) * IRO[308].m1)) #define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \ - (IRO[303].base + ((pfId) * IRO[303].m1)) + (IRO[309].base + ((pfId) * IRO[309].m1)) #define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \ - (IRO[304].base + ((pfId) * IRO[304].m1)) + (IRO[310].base + ((pfId) * IRO[310].m1)) #define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \ - (IRO[305].base + ((pfId) * IRO[305].m1)) + (IRO[311].base + ((pfId) * IRO[311].m1)) #define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ - (IRO[295].base + ((pfId) * IRO[295].m1)) + (IRO[301].base + ((pfId) * IRO[301].m1)) #define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ - (IRO[294].base + ((pfId) * IRO[294].m1)) + (IRO[300].base + ((pfId) * IRO[300].m1)) #define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ - (IRO[293].base + ((pfId) * IRO[293].m1)) + (IRO[299].base + ((pfId) * IRO[299].m1)) #define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ - (IRO[298].base + ((pfId) * IRO[298].m1)) + (IRO[304].base + ((pfId) * IRO[304].m1)) #define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \ - (IRO[297].base + ((pfId) * IRO[297].m1)) + (IRO[303].base + ((pfId) * IRO[303].m1)) #define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \ - (IRO[292].base + ((pfId) * IRO[292].m1)) + (IRO[298].base + ((pfId) * IRO[298].m1)) #define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ - (IRO[291].base + ((pfId) * IRO[291].m1)) + (IRO[297].base + ((pfId) * IRO[297].m1)) #define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \ - (IRO[290].base + ((pfId) * IRO[290].m1)) + (IRO[296].base + ((pfId) * IRO[296].m1)) #define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \ - (IRO[289].base + ((pfId) * IRO[289].m1)) + (IRO[295].base + ((pfId) * IRO[295].m1)) #define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \ (IRO[44].base + ((pfId) * IRO[44].m1)) #define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ @@ -231,16 +231,19 @@ #define XSTORM_SPQ_PROD_OFFSET(funcId) \ (IRO[31].base + ((funcId) * IRO[31].m1)) #define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(portId) \ - (IRO[211].base + ((portId) * IRO[211].m1)) + (IRO[217].base + ((portId) * IRO[217].m1)) #define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \ - (IRO[212].base + ((portId) * IRO[212].m1)) + (IRO[218].base + ((portId) * IRO[218].m1)) #define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfId) \ - (IRO[214].base + (((pfId)>>1) * IRO[214].m1) + (((pfId)&1) * \ - IRO[214].m2)) + (IRO[220].base + (((pfId)>>1) * IRO[220].m1) + (((pfId)&1) * \ + IRO[220].m2)) #define XSTORM_VF_TO_PF_OFFSET(funcId) \ (IRO[48].base + ((funcId) * IRO[48].m1)) #define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0 +/* eth hsi version */ +#define ETH_FP_HSI_VERSION (ETH_FP_HSI_VER_2) + /* Ethernet Ring parameters */ #define X_ETH_LOCAL_RING_SIZE 13 #define FIRST_BD_IN_PKT 0 @@ -356,6 +359,7 @@ #define XSEMI_CLK1_RESUL_CHIP (1e-3) #define SDM_TIMER_TICK_RESUL_CHIP (4 * (1e-6)) +#define TSDM_TIMER_TICK_RESUL_CHIP (1 * (1e-6)) /**** END DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index c4daa068f1db5e37bdea6f160dde26b7c13d1659..583591d52497d22fb6684cadb161de167b74260e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -280,17 +280,11 @@ struct shared_hw_cfg { /* NVRAM Offset */ #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_BOTH 0x60000000 #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SWAPPED 0x80000000 - - u32 power_dissipated; /* 0x11c */ - #define SHARED_HW_CFG_POWER_MGNT_SCALE_MASK 0x00ff0000 - #define SHARED_HW_CFG_POWER_MGNT_SCALE_SHIFT 16 - #define SHARED_HW_CFG_POWER_MGNT_UNKNOWN_SCALE 0x00000000 - #define SHARED_HW_CFG_POWER_MGNT_DOT_1_WATT 0x00010000 - #define SHARED_HW_CFG_POWER_MGNT_DOT_01_WATT 0x00020000 - #define SHARED_HW_CFG_POWER_MGNT_DOT_001_WATT 0x00030000 - - #define SHARED_HW_CFG_POWER_DIS_CMN_MASK 0xff000000 - #define SHARED_HW_CFG_POWER_DIS_CMN_SHIFT 24 + u32 config_3; /* 0x11C */ + #define SHARED_HW_CFG_EXTENDED_MF_MODE_MASK 0x00000F00 + #define SHARED_HW_CFG_EXTENDED_MF_MODE_SHIFT 8 + #define SHARED_HW_CFG_EXTENDED_MF_MODE_NPAR1_DOT_5 0x00000000 + #define SHARED_HW_CFG_EXTENDED_MF_MODE_NPAR2_DOT_0 0x00000100 u32 ump_nc_si_config; /* 0x120 */ #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MASK 0x00000003 @@ -859,6 +853,8 @@ struct shared_feat_cfg { /* NVRAM Offset */ #define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200 #define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300 #define SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE 0x00000400 + #define SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE 0x00000600 + #define SHARED_FEAT_CFG_FORCE_SF_MODE_EXTENDED_MODE 0x00000700 /* The interval in seconds between sending LLDP packets. Set to zero to disable the feature */ @@ -1268,6 +1264,10 @@ struct drv_func_mb { #define DRV_MSG_CODE_GET_UPGRADE_KEY 0x81000000 #define DRV_MSG_CODE_GET_MANUF_KEY 0x82000000 #define DRV_MSG_CODE_LOAD_L2B_PRAM 0x90000000 + #define DRV_MSG_CODE_OEM_OK 0x00010000 + #define DRV_MSG_CODE_OEM_FAILURE 0x00020000 + #define DRV_MSG_CODE_OEM_UPDATE_SVID_OK 0x00030000 + #define DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE 0x00040000 /* * The optic module verification command requires bootcode * v5.0.6 or later, te specific optic module verification command @@ -1423,6 +1423,12 @@ struct drv_func_mb { #define DRV_STATUS_SET_MF_BW 0x00000004 #define DRV_STATUS_LINK_EVENT 0x00000008 + #define DRV_STATUS_OEM_EVENT_MASK 0x00000070 + #define DRV_STATUS_OEM_DISABLE_ENABLE_PF 0x00000010 + #define DRV_STATUS_OEM_BANDWIDTH_ALLOCATION 0x00000020 + + #define DRV_STATUS_OEM_UPDATE_SVID 0x00000080 + #define DRV_STATUS_DCC_EVENT_MASK 0x0000ff00 #define DRV_STATUS_DCC_DISABLE_ENABLE_PF 0x00000100 #define DRV_STATUS_DCC_BANDWIDTH_ALLOCATION 0x00000200 @@ -2881,8 +2887,8 @@ struct afex_stats { }; #define BCM_5710_FW_MAJOR_VERSION 7 -#define BCM_5710_FW_MINOR_VERSION 8 -#define BCM_5710_FW_REVISION_VERSION 19 +#define BCM_5710_FW_MINOR_VERSION 10 +#define BCM_5710_FW_REVISION_VERSION 51 #define BCM_5710_FW_ENGINEERING_VERSION 0 #define BCM_5710_FW_COMPILE_FLAGS 1 @@ -3451,6 +3457,7 @@ enum classify_rule { CLASSIFY_RULE_OPCODE_MAC, CLASSIFY_RULE_OPCODE_VLAN, CLASSIFY_RULE_OPCODE_PAIR, + CLASSIFY_RULE_OPCODE_VXLAN, MAX_CLASSIFY_RULE }; @@ -3480,7 +3487,8 @@ struct client_init_general_data { u8 func_id; u8 cos; u8 traffic_type; - u32 reserved0; + u8 fp_hsi_ver; + u8 reserved0[3]; }; @@ -3550,7 +3558,9 @@ struct client_init_rx_data { __le16 rx_cos_mask; __le16 silent_vlan_value; __le16 silent_vlan_mask; - __le32 reserved6[2]; + u8 handle_ptp_pkts_flg; + u8 reserved6[3]; + __le32 reserved7; }; /* @@ -3581,7 +3591,7 @@ struct client_init_tx_data { u8 tunnel_lso_inc_ip_id; u8 refuse_outband_vlan_flg; u8 tunnel_non_lso_pcsum_location; - u8 reserved1; + u8 tunnel_non_lso_outer_ip_csum_location; }; /* @@ -3619,7 +3629,9 @@ struct client_update_ramrod_data { u8 refuse_outband_vlan_change_flg; u8 tx_switching_flg; u8 tx_switching_change_flg; - __le32 reserved1; + u8 handle_ptp_pkts_flg; + u8 handle_ptp_pkts_change_flg; + __le16 reserved1; __le32 echo; }; @@ -3639,6 +3651,11 @@ struct double_regpair { u32 regpair1_hi; }; +/* 2nd parse bd type used in ethernet tx BDs */ +enum eth_2nd_parse_bd_type { + ETH_2ND_PARSE_BD_TYPE_LSO_TUNNEL, + MAX_ETH_2ND_PARSE_BD_TYPE +}; /* * Ethernet address typesm used in ethernet tx BDs @@ -3723,6 +3740,18 @@ struct eth_classify_vlan_cmd { __le16 vlan; }; +/* + * Command for adding/removing a VXLAN classification rule + */ +struct eth_classify_vxlan_cmd { + struct eth_classify_cmd_header header; + __le32 vni; + __le16 inner_mac_lsb; + __le16 inner_mac_mid; + __le16 inner_mac_msb; + __le16 reserved1; +}; + /* * union for eth classification rule */ @@ -3730,6 +3759,7 @@ union eth_classify_rule_cmd { struct eth_classify_mac_cmd mac; struct eth_classify_vlan_cmd vlan; struct eth_classify_pair_cmd pair; + struct eth_classify_vxlan_cmd vxlan; }; /* @@ -3835,8 +3865,10 @@ struct eth_fast_path_rx_cqe { #define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG_SHIFT 4 #define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG (0x1<<5) #define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG_SHIFT 5 -#define ETH_FAST_PATH_RX_CQE_RESERVED0 (0x3<<6) -#define ETH_FAST_PATH_RX_CQE_RESERVED0_SHIFT 6 +#define ETH_FAST_PATH_RX_CQE_PTP_PKT (0x1<<6) +#define ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT 6 +#define ETH_FAST_PATH_RX_CQE_RESERVED0 (0x1<<7) +#define ETH_FAST_PATH_RX_CQE_RESERVED0_SHIFT 7 u8 status_flags; #define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE (0x7<<0) #define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE_SHIFT 0 @@ -3907,6 +3939,13 @@ struct eth_filter_rules_ramrod_data { struct eth_filter_rules_cmd rules[FILTER_RULES_COUNT]; }; +/* Hsi version */ +enum eth_fp_hsi_ver { + ETH_FP_HSI_VER_0, + ETH_FP_HSI_VER_1, + ETH_FP_HSI_VER_2, + MAX_ETH_FP_HSI_VER +}; /* * parameters for eth classification configuration ramrod @@ -3955,29 +3994,17 @@ struct eth_mac_addresses { /* tunneling related data */ struct eth_tunnel_data { -#if defined(__BIG_ENDIAN) - __le16 dst_mid; - __le16 dst_lo; -#elif defined(__LITTLE_ENDIAN) __le16 dst_lo; __le16 dst_mid; -#endif -#if defined(__BIG_ENDIAN) - __le16 reserved0; - __le16 dst_hi; -#elif defined(__LITTLE_ENDIAN) __le16 dst_hi; - __le16 reserved0; -#endif -#if defined(__BIG_ENDIAN) - u8 reserved1; - u8 ip_hdr_start_inner_w; - __le16 pseudo_csum; -#elif defined(__LITTLE_ENDIAN) + __le16 fw_ip_hdr_csum; __le16 pseudo_csum; u8 ip_hdr_start_inner_w; - u8 reserved1; -#endif + u8 flags; +#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER (0x1<<0) +#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER_SHIFT 0 +#define ETH_TUNNEL_DATA_RESERVED (0x7F<<1) +#define ETH_TUNNEL_DATA_RESERVED_SHIFT 1 }; /* union for mac addresses and for tunneling data. @@ -4064,31 +4091,41 @@ enum eth_rss_mode { */ struct eth_rss_update_ramrod_data { u8 rss_engine_id; - u8 capabilities; + u8 rss_mode; + __le16 capabilities; #define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY (0x1<<0) #define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY_SHIFT 0 #define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY (0x1<<1) #define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY_SHIFT 1 #define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY (0x1<<2) #define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY_SHIFT 2 -#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY (0x1<<3) -#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY_SHIFT 3 -#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY (0x1<<4) -#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 4 -#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<5) -#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 5 -#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY (0x1<<6) -#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY_SHIFT 6 -#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<7) -#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 7 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_VXLAN_CAPABILITY (0x1<<3) +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_VXLAN_CAPABILITY_SHIFT 3 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY (0x1<<4) +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY_SHIFT 4 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY (0x1<<5) +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 5 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<6) +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 6 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY (0x1<<7) +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY_SHIFT 7 +#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY (0x1<<8) +#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY_SHIFT 8 +#define ETH_RSS_UPDATE_RAMROD_DATA_NVGRE_KEY_ENTROPY_CAPABILITY (0x1<<9) +#define ETH_RSS_UPDATE_RAMROD_DATA_NVGRE_KEY_ENTROPY_CAPABILITY_SHIFT 9 +#define ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY (0x1<<10) +#define ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY_SHIFT 10 +#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<11) +#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 11 +#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED (0xF<<12) +#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED_SHIFT 12 u8 rss_result_mask; - u8 rss_mode; - __le16 udp_4tuple_dst_port_mask; - __le16 udp_4tuple_dst_port_value; + u8 reserved3; + __le16 reserved4; u8 indirection_table[T_ETH_INDIRECTION_TABLE_SIZE]; __le32 rss_key[T_ETH_RSS_KEY]; __le32 echo; - __le32 reserved3; + __le32 reserved5; }; @@ -4260,10 +4297,10 @@ enum eth_tunnel_lso_inc_ip_id { /* In case tunnel exist and L4 checksum offload, * the pseudo checksum location, on packet or on BD. */ -enum eth_tunnel_non_lso_pcsum_location { - PCSUM_ON_PKT, - PCSUM_ON_BD, - MAX_ETH_TUNNEL_NON_LSO_PCSUM_LOCATION +enum eth_tunnel_non_lso_csum_location { + CSUM_ON_PKT, + CSUM_ON_BD, + MAX_ETH_TUNNEL_NON_LSO_CSUM_LOCATION }; /* @@ -4310,8 +4347,10 @@ struct eth_tx_start_bd { __le16 vlan_or_ethertype; struct eth_tx_bd_flags bd_flags; u8 general_data; -#define ETH_TX_START_BD_HDR_NBDS (0xF<<0) +#define ETH_TX_START_BD_HDR_NBDS (0x7<<0) #define ETH_TX_START_BD_HDR_NBDS_SHIFT 0 +#define ETH_TX_START_BD_NO_ADDED_TAGS (0x1<<3) +#define ETH_TX_START_BD_NO_ADDED_TAGS_SHIFT 3 #define ETH_TX_START_BD_FORCE_VLAN_MODE (0x1<<4) #define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4 #define ETH_TX_START_BD_PARSE_NBDS (0x3<<5) @@ -4387,8 +4426,8 @@ struct eth_tx_parse_2nd_bd { __le16 global_data; #define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W (0xF<<0) #define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W_SHIFT 0 -#define ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER (0x1<<4) -#define ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT 4 +#define ETH_TX_PARSE_2ND_BD_RESERVED0 (0x1<<4) +#define ETH_TX_PARSE_2ND_BD_RESERVED0_SHIFT 4 #define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN (0x1<<5) #define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT 5 #define ETH_TX_PARSE_2ND_BD_NS_FLG (0x1<<6) @@ -4397,9 +4436,14 @@ struct eth_tx_parse_2nd_bd { #define ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST_SHIFT 7 #define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W (0x1F<<8) #define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT 8 -#define ETH_TX_PARSE_2ND_BD_RESERVED0 (0x7<<13) -#define ETH_TX_PARSE_2ND_BD_RESERVED0_SHIFT 13 - __le16 reserved1; +#define ETH_TX_PARSE_2ND_BD_RESERVED1 (0x7<<13) +#define ETH_TX_PARSE_2ND_BD_RESERVED1_SHIFT 13 + u8 bd_type; +#define ETH_TX_PARSE_2ND_BD_TYPE (0xF<<0) +#define ETH_TX_PARSE_2ND_BD_TYPE_SHIFT 0 +#define ETH_TX_PARSE_2ND_BD_RESERVED2 (0xF<<4) +#define ETH_TX_PARSE_2ND_BD_RESERVED2_SHIFT 4 + u8 reserved3; u8 tcp_flags; #define ETH_TX_PARSE_2ND_BD_FIN_FLG (0x1<<0) #define ETH_TX_PARSE_2ND_BD_FIN_FLG_SHIFT 0 @@ -4417,7 +4461,7 @@ struct eth_tx_parse_2nd_bd { #define ETH_TX_PARSE_2ND_BD_ECE_FLG_SHIFT 6 #define ETH_TX_PARSE_2ND_BD_CWR_FLG (0x1<<7) #define ETH_TX_PARSE_2ND_BD_CWR_FLG_SHIFT 7 - u8 reserved2; + u8 reserved4; u8 tunnel_udp_hdr_start_w; u8 fw_ip_hdr_to_payload_w; __le16 fw_ip_csum_wo_len_flags_frag; @@ -5205,10 +5249,18 @@ struct function_start_data { u8 path_id; u8 network_cos_mode; u8 dmae_cmd_id; - u8 gre_tunnel_mode; - u8 gre_tunnel_rss; - u8 nvgre_clss_en; - __le16 reserved1[2]; + u8 tunnel_mode; + u8 gre_tunnel_type; + u8 tunn_clss_en; + u8 inner_gre_rss_en; + u8 sd_accept_mf_clss_fail; + __le16 vxlan_dst_port; + __le16 sd_accept_mf_clss_fail_ethtype; + __le16 sd_vlan_eth_type; + u8 sd_vlan_force_pri_flg; + u8 sd_vlan_force_pri_val; + u8 sd_accept_mf_clss_fail_match_ethtype; + u8 no_added_tags; }; struct function_update_data { @@ -5225,12 +5277,20 @@ struct function_update_data { u8 tx_switch_suspend_change_flg; u8 tx_switch_suspend; u8 echo; + u8 update_tunn_cfg_flg; + u8 tunnel_mode; + u8 gre_tunnel_type; + u8 tunn_clss_en; + u8 inner_gre_rss_en; + __le16 vxlan_dst_port; + u8 sd_vlan_force_pri_change_flg; + u8 sd_vlan_force_pri_flg; + u8 sd_vlan_force_pri_val; + u8 sd_vlan_tag_change_flg; + u8 sd_vlan_eth_type_change_flg; u8 reserved1; - u8 update_gre_cfg_flg; - u8 gre_tunnel_mode; - u8 gre_tunnel_rss; - u8 nvgre_clss_en; - u32 reserved3; + __le16 sd_vlan_tag; + __le16 sd_vlan_eth_type; }; /* @@ -5259,17 +5319,9 @@ struct fw_version { #define __FW_VERSION_RESERVED_SHIFT 4 }; -/* GRE RSS Mode */ -enum gre_rss_mode { - GRE_OUTER_HEADERS_RSS, - GRE_INNER_HEADERS_RSS, - NVGRE_KEY_ENTROPY_RSS, - MAX_GRE_RSS_MODE -}; /* GRE Tunnel Mode */ enum gre_tunnel_type { - NO_GRE_TUNNEL, NVGRE_TUNNEL, L2GRE_TUNNEL, IPGRE_TUNNEL, @@ -5442,6 +5494,7 @@ enum ip_ver { * Malicious VF error ID */ enum malicious_vf_error_id { + MALICIOUS_VF_NO_ERROR, VF_PF_CHANNEL_NOT_READY, ETH_ILLEGAL_BD_LENGTHS, ETH_PACKET_TOO_SHORT, @@ -5602,6 +5655,16 @@ struct protocol_common_spe { union protocol_common_specific_data data; }; +/* The data for the Set Timesync Ramrod */ +struct set_timesync_ramrod_data { + u8 drift_adjust_cmd; + u8 offset_cmd; + u8 add_sub_drift_adjust_value; + u8 drift_adjust_value; + u32 drift_adjust_period; + struct regpair offset_delta; +}; + /* * The send queue element */ @@ -5724,10 +5787,38 @@ struct tstorm_vf_zone_data { struct regpair reserved; }; +/* Add or Subtract Value for Set Timesync Ramrod */ +enum ts_add_sub_value { + TS_SUB_VALUE, + TS_ADD_VALUE, + MAX_TS_ADD_SUB_VALUE +}; -/* - * zone A per-queue data - */ +/* Drift-Adjust Commands for Set Timesync Ramrod */ +enum ts_drift_adjust_cmd { + TS_DRIFT_ADJUST_KEEP, + TS_DRIFT_ADJUST_SET, + TS_DRIFT_ADJUST_RESET, + MAX_TS_DRIFT_ADJUST_CMD +}; + +/* Offset Commands for Set Timesync Ramrod */ +enum ts_offset_cmd { + TS_OFFSET_KEEP, + TS_OFFSET_INC, + TS_OFFSET_DEC, + MAX_TS_OFFSET_CMD +}; + +/* Tunnel Mode */ +enum tunnel_mode { + TUNN_MODE_NONE, + TUNN_MODE_VXLAN, + TUNN_MODE_GRE, + MAX_TUNNEL_MODE +}; + + /* zone A per-queue data */ struct ustorm_queue_zone_data { struct ustorm_eth_rx_producers eth_rx_producers; struct regpair reserved[3]; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index d1c093dcb054aebb71b1ee701cf175e8ab804515..74fbf9ea7bd878e4ee3f7d1561f4b74afe46ca54 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include #include @@ -63,7 +64,6 @@ #include "bnx2x_vfpf.h" #include "bnx2x_dcb.h" #include "bnx2x_sp.h" - #include #include "bnx2x_fw_file_hdr.h" /* FW files */ @@ -290,6 +290,8 @@ static int bnx2x_set_storm_rx_mode(struct bnx2x *bp); * General service functions ****************************************************************************/ +static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr); + static void __storm_memset_dma_mapping(struct bnx2x *bp, u32 addr, dma_addr_t mapping) { @@ -523,6 +525,7 @@ int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, * as long as this code is called both from syscall context and * from ndo_set_rx_mode() flow that may be called from BH. */ + spin_lock_bh(&bp->dmae_lock); /* reset completion */ @@ -551,7 +554,9 @@ int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, } unlock: + spin_unlock_bh(&bp->dmae_lock); + return rc; } @@ -646,119 +651,98 @@ static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len); } +enum storms { + XSTORM, + TSTORM, + CSTORM, + USTORM, + MAX_STORMS +}; + +#define STORMS_NUM 4 +#define REGS_IN_ENTRY 4 + +static inline int bnx2x_get_assert_list_entry(struct bnx2x *bp, + enum storms storm, + int entry) +{ + switch (storm) { + case XSTORM: + return XSTORM_ASSERT_LIST_OFFSET(entry); + case TSTORM: + return TSTORM_ASSERT_LIST_OFFSET(entry); + case CSTORM: + return CSTORM_ASSERT_LIST_OFFSET(entry); + case USTORM: + return USTORM_ASSERT_LIST_OFFSET(entry); + case MAX_STORMS: + default: + BNX2X_ERR("unknown storm\n"); + } + return -EINVAL; +} + static int bnx2x_mc_assert(struct bnx2x *bp) { char last_idx; - int i, rc = 0; - u32 row0, row1, row2, row3; - - /* XSTORM */ - last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM + - XSTORM_ASSERT_LIST_INDEX_OFFSET); - if (last_idx) - BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); - - /* print the asserts */ - for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { - - row0 = REG_RD(bp, BAR_XSTRORM_INTMEM + - XSTORM_ASSERT_LIST_OFFSET(i)); - row1 = REG_RD(bp, BAR_XSTRORM_INTMEM + - XSTORM_ASSERT_LIST_OFFSET(i) + 4); - row2 = REG_RD(bp, BAR_XSTRORM_INTMEM + - XSTORM_ASSERT_LIST_OFFSET(i) + 8); - row3 = REG_RD(bp, BAR_XSTRORM_INTMEM + - XSTORM_ASSERT_LIST_OFFSET(i) + 12); - - if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { - BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", - i, row3, row2, row1, row0); - rc++; - } else { - break; - } - } - - /* TSTORM */ - last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM + - TSTORM_ASSERT_LIST_INDEX_OFFSET); - if (last_idx) - BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); - - /* print the asserts */ - for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { - - row0 = REG_RD(bp, BAR_TSTRORM_INTMEM + - TSTORM_ASSERT_LIST_OFFSET(i)); - row1 = REG_RD(bp, BAR_TSTRORM_INTMEM + - TSTORM_ASSERT_LIST_OFFSET(i) + 4); - row2 = REG_RD(bp, BAR_TSTRORM_INTMEM + - TSTORM_ASSERT_LIST_OFFSET(i) + 8); - row3 = REG_RD(bp, BAR_TSTRORM_INTMEM + - TSTORM_ASSERT_LIST_OFFSET(i) + 12); - - if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { - BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", - i, row3, row2, row1, row0); - rc++; - } else { - break; - } - } + int i, j, rc = 0; + enum storms storm; + u32 regs[REGS_IN_ENTRY]; + u32 bar_storm_intmem[STORMS_NUM] = { + BAR_XSTRORM_INTMEM, + BAR_TSTRORM_INTMEM, + BAR_CSTRORM_INTMEM, + BAR_USTRORM_INTMEM + }; + u32 storm_assert_list_index[STORMS_NUM] = { + XSTORM_ASSERT_LIST_INDEX_OFFSET, + TSTORM_ASSERT_LIST_INDEX_OFFSET, + CSTORM_ASSERT_LIST_INDEX_OFFSET, + USTORM_ASSERT_LIST_INDEX_OFFSET + }; + char *storms_string[STORMS_NUM] = { + "XSTORM", + "TSTORM", + "CSTORM", + "USTORM" + }; - /* CSTORM */ - last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM + - CSTORM_ASSERT_LIST_INDEX_OFFSET); - if (last_idx) - BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); - - /* print the asserts */ - for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { - - row0 = REG_RD(bp, BAR_CSTRORM_INTMEM + - CSTORM_ASSERT_LIST_OFFSET(i)); - row1 = REG_RD(bp, BAR_CSTRORM_INTMEM + - CSTORM_ASSERT_LIST_OFFSET(i) + 4); - row2 = REG_RD(bp, BAR_CSTRORM_INTMEM + - CSTORM_ASSERT_LIST_OFFSET(i) + 8); - row3 = REG_RD(bp, BAR_CSTRORM_INTMEM + - CSTORM_ASSERT_LIST_OFFSET(i) + 12); - - if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { - BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", - i, row3, row2, row1, row0); - rc++; - } else { - break; + for (storm = XSTORM; storm < MAX_STORMS; storm++) { + last_idx = REG_RD8(bp, bar_storm_intmem[storm] + + storm_assert_list_index[storm]); + if (last_idx) + BNX2X_ERR("%s_ASSERT_LIST_INDEX 0x%x\n", + storms_string[storm], last_idx); + + /* print the asserts */ + for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { + /* read a single assert entry */ + for (j = 0; j < REGS_IN_ENTRY; j++) + regs[j] = REG_RD(bp, bar_storm_intmem[storm] + + bnx2x_get_assert_list_entry(bp, + storm, + i) + + sizeof(u32) * j); + + /* log entry if it contains a valid assert */ + if (regs[0] != COMMON_ASM_INVALID_ASSERT_OPCODE) { + BNX2X_ERR("%s_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", + storms_string[storm], i, regs[3], + regs[2], regs[1], regs[0]); + rc++; + } else { + break; + } } } - /* USTORM */ - last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM + - USTORM_ASSERT_LIST_INDEX_OFFSET); - if (last_idx) - BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); - - /* print the asserts */ - for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { - - row0 = REG_RD(bp, BAR_USTRORM_INTMEM + - USTORM_ASSERT_LIST_OFFSET(i)); - row1 = REG_RD(bp, BAR_USTRORM_INTMEM + - USTORM_ASSERT_LIST_OFFSET(i) + 4); - row2 = REG_RD(bp, BAR_USTRORM_INTMEM + - USTORM_ASSERT_LIST_OFFSET(i) + 8); - row3 = REG_RD(bp, BAR_USTRORM_INTMEM + - USTORM_ASSERT_LIST_OFFSET(i) + 12); - - if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { - BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", - i, row3, row2, row1, row0); - rc++; - } else { - break; - } - } + BNX2X_ERR("Chip Revision: %s, FW Version: %d_%d_%d\n", + CHIP_IS_E1(bp) ? "everest1" : + CHIP_IS_E1H(bp) ? "everest1h" : + CHIP_IS_E2(bp) ? "everest2" : "everest3", + BCM_5710_FW_MAJOR_VERSION, + BCM_5710_FW_MINOR_VERSION, + BCM_5710_FW_REVISION_VERSION); return rc; } @@ -983,6 +967,12 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) u32 *sb_data_p; struct bnx2x_fp_txdata txdata; + if (!bp->fp) + break; + + if (!fp->rx_cons_sb) + continue; + /* Rx */ BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x) rx_comp_prod(0x%x) rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n", i, fp->rx_bd_prod, fp->rx_bd_cons, @@ -995,7 +985,14 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) /* Tx */ for_each_cos_in_tx_queue(fp, cos) { + if (!fp->txdata_ptr[cos]) + break; + txdata = *fp->txdata_ptr[cos]; + + if (!txdata.tx_cons_sb) + continue; + BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n", i, txdata.tx_pkt_prod, txdata.tx_pkt_cons, txdata.tx_bd_prod, @@ -1097,6 +1094,12 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) for_each_valid_rx_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; + if (!bp->fp) + break; + + if (!fp->rx_cons_sb) + continue; + start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10); end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503); for (j = start; j != end; j = RX_BD(j + 1)) { @@ -1130,9 +1133,19 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) /* Tx */ for_each_valid_tx_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; + + if (!bp->fp) + break; + for_each_cos_in_tx_queue(fp, cos) { struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; + if (!fp->txdata_ptr[cos]) + break; + + if (!txdata->tx_cons_sb) + continue; + start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10); end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245); for (j = start; j != end; j = TX_BD(j + 1)) { @@ -2071,8 +2084,6 @@ int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port) else value = 0; - DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value); - return value; } @@ -2894,6 +2905,57 @@ static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd) } } +static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp) +{ + struct bnx2x_func_switch_update_params *switch_update_params; + struct bnx2x_func_state_params func_params; + + memset(&func_params, 0, sizeof(struct bnx2x_func_state_params)); + switch_update_params = &func_params.params.switch_update; + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE; + + if (IS_MF_UFP(bp)) { + int func = BP_ABS_FUNC(bp); + u32 val; + + /* Re-learn the S-tag from shmem */ + val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & + FUNC_MF_CFG_E1HOV_TAG_MASK; + if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { + bp->mf_ov = val; + } else { + BNX2X_ERR("Got an SVID event, but no tag is configured in shmem\n"); + goto fail; + } + + /* Configure new S-tag in LLH */ + REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + BP_PORT(bp) * 8, + bp->mf_ov); + + /* Send Ramrod to update FW of change */ + __set_bit(BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG, + &switch_update_params->changes); + switch_update_params->vlan = bp->mf_ov; + + if (bnx2x_func_state_change(bp, &func_params) < 0) { + BNX2X_ERR("Failed to configure FW of S-tag Change to %02x\n", + bp->mf_ov); + goto fail; + } + + DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n", bp->mf_ov); + + bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0); + + return; + } + + /* not supported by SW yet */ +fail: + bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE, 0); +} + static void bnx2x_pmf_update(struct bnx2x *bp) { int port = BP_PORT(bp); @@ -3286,7 +3348,8 @@ static void bnx2x_e1h_enable(struct bnx2x *bp) { int port = BP_PORT(bp); - REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); + if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) + REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1); /* Tx queue should be only re-enabled */ netif_tx_wake_all_queues(bp->dev); @@ -3641,14 +3704,30 @@ void bnx2x_update_mng_version(struct bnx2x *bp) ethver, iscsiver, fcoever); } -static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) +static void bnx2x_oem_event(struct bnx2x *bp, u32 event) { - DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event); + u32 cmd_ok, cmd_fail; - if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) { + /* sanity */ + if (event & DRV_STATUS_DCC_EVENT_MASK && + event & DRV_STATUS_OEM_EVENT_MASK) { + BNX2X_ERR("Received simultaneous events %08x\n", event); + return; + } - /* - * This is the only place besides the function initialization + if (event & DRV_STATUS_DCC_EVENT_MASK) { + cmd_fail = DRV_MSG_CODE_DCC_FAILURE; + cmd_ok = DRV_MSG_CODE_DCC_OK; + } else /* if (event & DRV_STATUS_OEM_EVENT_MASK) */ { + cmd_fail = DRV_MSG_CODE_OEM_FAILURE; + cmd_ok = DRV_MSG_CODE_OEM_OK; + } + + DP(BNX2X_MSG_MCP, "oem_event 0x%x\n", event); + + if (event & (DRV_STATUS_DCC_DISABLE_ENABLE_PF | + DRV_STATUS_OEM_DISABLE_ENABLE_PF)) { + /* This is the only place besides the function initialization * where the bp->flags can change so it is done without any * locks */ @@ -3663,18 +3742,22 @@ static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) bnx2x_e1h_enable(bp); } - dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF; + event &= ~(DRV_STATUS_DCC_DISABLE_ENABLE_PF | + DRV_STATUS_OEM_DISABLE_ENABLE_PF); } - if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) { + + if (event & (DRV_STATUS_DCC_BANDWIDTH_ALLOCATION | + DRV_STATUS_OEM_BANDWIDTH_ALLOCATION)) { bnx2x_config_mf_bw(bp); - dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION; + event &= ~(DRV_STATUS_DCC_BANDWIDTH_ALLOCATION | + DRV_STATUS_OEM_BANDWIDTH_ALLOCATION); } /* Report results to MCP */ - if (dcc_event) - bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0); + if (event) + bnx2x_fw_command(bp, cmd_fail, 0); else - bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0); + bnx2x_fw_command(bp, cmd_ok, 0); } /* must be called under the spq lock */ @@ -4156,9 +4239,12 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) func_mf_config[BP_ABS_FUNC(bp)].config); val = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_status); - if (val & DRV_STATUS_DCC_EVENT_MASK) - bnx2x_dcc_event(bp, - (val & DRV_STATUS_DCC_EVENT_MASK)); + + if (val & (DRV_STATUS_DCC_EVENT_MASK | + DRV_STATUS_OEM_EVENT_MASK)) + bnx2x_oem_event(bp, + (val & (DRV_STATUS_DCC_EVENT_MASK | + DRV_STATUS_OEM_EVENT_MASK))); if (val & DRV_STATUS_SET_MF_BW) bnx2x_set_mf_bw(bp); @@ -4184,6 +4270,10 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) val & DRV_STATUS_AFEX_EVENT_MASK); if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS) bnx2x_handle_eee_event(bp); + + if (val & DRV_STATUS_OEM_UPDATE_SVID) + bnx2x_handle_update_svid_cmd(bp); + if (bp->link_vars.periodic_flags & PERIODIC_FLAGS_LINK_EVENT) { /* sync with link */ @@ -4678,7 +4768,7 @@ static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig, for (i = 0; sig; i++) { cur_bit = (0x1UL << i); if (sig & cur_bit) { - res |= true; /* Each bit is real error! */ + res = true; /* Each bit is real error! */ if (print) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: @@ -4757,21 +4847,21 @@ static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig, _print_next_block((*par_num)++, "MCP ROM"); *global = true; - res |= true; + res = true; break; case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: if (print) _print_next_block((*par_num)++, "MCP UMP RX"); *global = true; - res |= true; + res = true; break; case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: if (print) _print_next_block((*par_num)++, "MCP UMP TX"); *global = true; - res |= true; + res = true; break; case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: if (print) @@ -4803,7 +4893,7 @@ static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig, for (i = 0; sig; i++) { cur_bit = (0x1UL << i); if (sig & cur_bit) { - res |= true; /* Each bit is real error! */ + res = true; /* Each bit is real error! */ if (print) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: @@ -5452,6 +5542,14 @@ static void bnx2x_eq_int(struct bnx2x *bp) break; goto next_spqe; + + case EVENT_RING_OPCODE_SET_TIMESYNC: + DP(BNX2X_MSG_SP | BNX2X_MSG_PTP, + "got set_timesync ramrod completion\n"); + if (f_obj->complete_cmd(bp, f_obj, + BNX2X_F_CMD_SET_TIMESYNC)) + break; + goto next_spqe; } switch (opcode | bp->state) { @@ -6102,7 +6200,7 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode, } /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */ - if (bp->rx_mode != BNX2X_RX_MODE_NONE) { + if (rx_mode != BNX2X_RX_MODE_NONE) { __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags); __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags); } @@ -7662,7 +7760,11 @@ static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend) func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE; /* Function parameters */ - switch_update_params->suspend = suspend; + __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG, + &switch_update_params->changes); + if (suspend) + __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND, + &switch_update_params->changes); rc = bnx2x_func_state_change(bp, &func_params); @@ -7907,8 +8009,11 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1); if (IS_MF(bp)) { - REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); - REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov); + if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) { + REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1); + REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port * 8, + bp->mf_ov); + } } bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); @@ -8300,13 +8405,6 @@ int bnx2x_del_all_macs(struct bnx2x *bp, int bnx2x_set_eth_mac(struct bnx2x *bp, bool set) { - if (is_zero_ether_addr(bp->dev->dev_addr) && - (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) { - DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN, - "Ignoring Zero MAC for STORAGE SD mode\n"); - return 0; - } - if (IS_PF(bp)) { unsigned long ramrod_flags = 0; @@ -9025,7 +9123,7 @@ static int bnx2x_func_wait_started(struct bnx2x *bp) struct bnx2x_func_state_params func_params = {NULL}; DP(NETIF_MSG_IFDOWN, - "Hmmm... Unexpected function state! Forcing STARTED-->TX_ST0PPED-->STARTED\n"); + "Hmmm... Unexpected function state! Forcing STARTED-->TX_STOPPED-->STARTED\n"); func_params.f_obj = &bp->func_obj; __set_bit(RAMROD_DRV_CLR_ONLY, @@ -9044,6 +9142,48 @@ static int bnx2x_func_wait_started(struct bnx2x *bp) return 0; } +static void bnx2x_disable_ptp(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + + /* Disable sending PTP packets to host */ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST : + NIG_REG_P0_LLH_PTP_TO_HOST, 0x0); + + /* Reset PTP event detection rules */ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : + NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF); + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : + NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF); + REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK : + NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF); + REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK : + NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF); + + /* Disable the PTP feature */ + REG_WR(bp, port ? NIG_REG_P1_PTP_EN : + NIG_REG_P0_PTP_EN, 0x0); +} + +/* Called during unload, to stop PTP-related stuff */ +void bnx2x_stop_ptp(struct bnx2x *bp) +{ + /* Cancel PTP work queue. Should be done after the Tx queues are + * drained to prevent additional scheduling. + */ + cancel_work_sync(&bp->ptp_task); + + if (bp->ptp_tx_skb) { + dev_kfree_skb_any(bp->ptp_tx_skb); + bp->ptp_tx_skb = NULL; + } + + /* Disable PTP in HW */ + bnx2x_disable_ptp(bp); + + DP(BNX2X_MSG_PTP, "PTP stop ended successfully\n"); +} + void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link) { int port = BP_PORT(bp); @@ -9162,6 +9302,13 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link) #endif } + /* stop_ptp should be after the Tx queues are drained to prevent + * scheduling to the cancelled PTP work queue. It should also be after + * function stop ramrod is sent, since as part of this ramrod FW access + * PTP registers. + */ + bnx2x_stop_ptp(bp); + /* Disable HW interrupts, NAPI */ bnx2x_netif_stop(bp, 1); /* Delete all NAPI objects */ @@ -11283,15 +11430,14 @@ static void bnx2x_get_fcoe_info(struct bnx2x *bp) dev_info.port_hw_config[port]. fcoe_wwn_node_name_lower); } else if (!IS_MF_SD(bp)) { - /* - * Read the WWN info only if the FCoE feature is enabled for + /* Read the WWN info only if the FCoE feature is enabled for * this function. */ - if (BNX2X_MF_EXT_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp)) + if (BNX2X_HAS_MF_EXT_PROTOCOL_FCOE(bp)) + bnx2x_get_ext_wwn_info(bp, func); + } else { + if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp)) bnx2x_get_ext_wwn_info(bp, func); - - } else if (IS_MF_FCOE_SD(bp) && !CHIP_IS_E1x(bp)) { - bnx2x_get_ext_wwn_info(bp, func); } BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn); @@ -11329,7 +11475,7 @@ static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp) * In non SD mode features configuration comes from struct * func_ext_config. */ - if (!IS_MF_SD(bp) && !CHIP_IS_E1x(bp)) { + if (!IS_MF_SD(bp)) { u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg); if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { val2 = MF_CFG_RD(bp, func_ext_config[func]. @@ -11448,7 +11594,7 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp) memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); - if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr)) + if (!is_valid_ether_addr(bp->dev->dev_addr)) dev_err(&bp->pdev->dev, "bad Ethernet MAC address configuration: %pM\n" "change it manually before bringing up the appropriate network interface\n", @@ -11478,11 +11624,27 @@ static bool bnx2x_get_dropless_info(struct bnx2x *bp) return cfg; } +static void validate_set_si_mode(struct bnx2x *bp) +{ + u8 func = BP_ABS_FUNC(bp); + u32 val; + + val = MF_CFG_RD(bp, func_mf_config[func].mac_upper); + + /* check for legal mac (upper bytes) */ + if (val != 0xffff) { + bp->mf_mode = MULTI_FUNCTION_SI; + bp->mf_config[BP_VN(bp)] = + MF_CFG_RD(bp, func_mf_config[func].config); + } else + BNX2X_DEV_INFO("illegal MAC address for SI\n"); +} + static int bnx2x_get_hwinfo(struct bnx2x *bp) { int /*abs*/func = BP_ABS_FUNC(bp); int vn; - u32 val = 0; + u32 val = 0, val2 = 0; int rc = 0; bnx2x_get_common_hwinfo(bp); @@ -11562,6 +11724,7 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp) bp->mf_ov = 0; bp->mf_mode = 0; + bp->mf_sub_mode = 0; vn = BP_VN(bp); if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { @@ -11591,15 +11754,7 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp) switch (val) { case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT: - val = MF_CFG_RD(bp, func_mf_config[func]. - mac_upper); - /* check for legal mac (upper bytes)*/ - if (val != 0xffff) { - bp->mf_mode = MULTI_FUNCTION_SI; - bp->mf_config[vn] = MF_CFG_RD(bp, - func_mf_config[func].config); - } else - BNX2X_DEV_INFO("illegal MAC address for SI\n"); + validate_set_si_mode(bp); break; case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE: if ((!CHIP_IS_E1x(bp)) && @@ -11627,9 +11782,33 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp) } else BNX2X_DEV_INFO("illegal OV for SD\n"); break; + case SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE: + bp->mf_mode = MULTI_FUNCTION_SD; + bp->mf_sub_mode = SUB_MF_MODE_UFP; + bp->mf_config[vn] = + MF_CFG_RD(bp, + func_mf_config[func].config); + break; case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF: bp->mf_config[vn] = 0; break; + case SHARED_FEAT_CFG_FORCE_SF_MODE_EXTENDED_MODE: + val2 = SHMEM_RD(bp, + dev_info.shared_hw_config.config_3); + val2 &= SHARED_HW_CFG_EXTENDED_MF_MODE_MASK; + switch (val2) { + case SHARED_HW_CFG_EXTENDED_MF_MODE_NPAR1_DOT_5: + validate_set_si_mode(bp); + bp->mf_sub_mode = + SUB_MF_MODE_NPAR1_DOT_5; + break; + default: + /* Unknown configuration */ + bp->mf_config[vn] = 0; + BNX2X_DEV_INFO("unknown extended MF mode 0x%x\n", + val); + } + break; default: /* Unknown configuration: reset mf_config */ bp->mf_config[vn] = 0; @@ -11650,6 +11829,11 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp) BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n", func, bp->mf_ov, bp->mf_ov); + } else if (bp->mf_sub_mode == SUB_MF_MODE_UFP) { + dev_err(&bp->pdev->dev, + "Unexpected - no valid MF OV for func %d in UFP mode\n", + func); + bp->path_has_ovlan = true; } else { dev_err(&bp->pdev->dev, "No valid MF OV for func %d, aborting\n", @@ -11898,9 +12082,9 @@ static int bnx2x_init_bp(struct bnx2x *bp) dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n"); bp->disable_tpa = disable_tpa; - bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp); + bp->disable_tpa |= !!IS_MF_STORAGE_ONLY(bp); /* Reduce memory usage in kdump environment by disabling TPA */ - bp->disable_tpa |= reset_devices; + bp->disable_tpa |= is_kdump_kernel(); /* Set TPA flags */ if (bp->disable_tpa) { @@ -11918,7 +12102,7 @@ static int bnx2x_init_bp(struct bnx2x *bp) bp->mrrs = mrrs; - bp->tx_ring_size = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL; + bp->tx_ring_size = IS_MF_STORAGE_ONLY(bp) ? 0 : MAX_TX_AVAIL; if (IS_VF(bp)) bp->rx_ring_size = MAX_RX_AVAIL; @@ -11976,6 +12160,9 @@ static int bnx2x_init_bp(struct bnx2x *bp) bp->dump_preset_idx = 1; + if (CHIP_IS_E3B0(bp)) + bp->flags |= PTP_SUPPORTED; + return rc; } @@ -12235,7 +12422,7 @@ void bnx2x_set_rx_mode_inner(struct bnx2x *bp) bp->rx_mode = rx_mode; /* handle ISCSI SD mode */ - if (IS_MF_ISCSI_SD(bp)) + if (IS_MF_ISCSI_ONLY(bp)) bp->rx_mode = BNX2X_RX_MODE_NONE; /* Schedule the rx_mode command */ @@ -12308,13 +12495,17 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) struct bnx2x *bp = netdev_priv(dev); struct mii_ioctl_data *mdio = if_mii(ifr); - DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n", - mdio->phy_id, mdio->reg_num, mdio->val_in); - if (!netif_running(dev)) return -EAGAIN; - return mdio_mii_ioctl(&bp->mdio, mdio, cmd); + switch (cmd) { + case SIOCSHWTSTAMP: + return bnx2x_hwtstamp_ioctl(bp, ifr); + default: + DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n", + mdio->phy_id, mdio->reg_num, mdio->val_in); + return mdio_mii_ioctl(&bp->mdio, mdio, cmd); + } } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -12338,7 +12529,7 @@ static int bnx2x_validate_addr(struct net_device *dev) if (IS_VF(bp)) bnx2x_sample_bulletin(bp); - if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr)) { + if (!is_valid_ether_addr(dev->dev_addr)) { BNX2X_ERR("Non-valid Ethernet address\n"); return -EADDRNOTAVAIL; } @@ -12958,6 +13149,191 @@ static int set_is_vf(int chip_id) } } +/* nig_tsgen registers relative address */ +#define tsgen_ctrl 0x0 +#define tsgen_freecount 0x10 +#define tsgen_synctime_t0 0x20 +#define tsgen_offset_t0 0x28 +#define tsgen_drift_t0 0x30 +#define tsgen_synctime_t1 0x58 +#define tsgen_offset_t1 0x60 +#define tsgen_drift_t1 0x68 + +/* FW workaround for setting drift */ +static int bnx2x_send_update_drift_ramrod(struct bnx2x *bp, int drift_dir, + int best_val, int best_period) +{ + struct bnx2x_func_state_params func_params = {NULL}; + struct bnx2x_func_set_timesync_params *set_timesync_params = + &func_params.params.set_timesync; + + /* Prepare parameters for function state transitions */ + __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); + __set_bit(RAMROD_RETRY, &func_params.ramrod_flags); + + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC; + + /* Function parameters */ + set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_SET; + set_timesync_params->offset_cmd = TS_OFFSET_KEEP; + set_timesync_params->add_sub_drift_adjust_value = + drift_dir ? TS_ADD_VALUE : TS_SUB_VALUE; + set_timesync_params->drift_adjust_value = best_val; + set_timesync_params->drift_adjust_period = best_period; + + return bnx2x_func_state_change(bp, &func_params); +} + +static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ + struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); + int rc; + int drift_dir = 1; + int val, period, period1, period2, dif, dif1, dif2; + int best_dif = BNX2X_MAX_PHC_DRIFT, best_period = 0, best_val = 0; + + DP(BNX2X_MSG_PTP, "PTP adjfreq called, ppb = %d\n", ppb); + + if (!netif_running(bp->dev)) { + DP(BNX2X_MSG_PTP, + "PTP adjfreq called while the interface is down\n"); + return -EFAULT; + } + + if (ppb < 0) { + ppb = -ppb; + drift_dir = 0; + } + + if (ppb == 0) { + best_val = 1; + best_period = 0x1FFFFFF; + } else if (ppb >= BNX2X_MAX_PHC_DRIFT) { + best_val = 31; + best_period = 1; + } else { + /* Changed not to allow val = 8, 16, 24 as these values + * are not supported in workaround. + */ + for (val = 0; val <= 31; val++) { + if ((val & 0x7) == 0) + continue; + period1 = val * 1000000 / ppb; + period2 = period1 + 1; + if (period1 != 0) + dif1 = ppb - (val * 1000000 / period1); + else + dif1 = BNX2X_MAX_PHC_DRIFT; + if (dif1 < 0) + dif1 = -dif1; + dif2 = ppb - (val * 1000000 / period2); + if (dif2 < 0) + dif2 = -dif2; + dif = (dif1 < dif2) ? dif1 : dif2; + period = (dif1 < dif2) ? period1 : period2; + if (dif < best_dif) { + best_dif = dif; + best_val = val; + best_period = period; + } + } + } + + rc = bnx2x_send_update_drift_ramrod(bp, drift_dir, best_val, + best_period); + if (rc) { + BNX2X_ERR("Failed to set drift\n"); + return -EFAULT; + } + + DP(BNX2X_MSG_PTP, "Configrued val = %d, period = %d\n", best_val, + best_period); + + return 0; +} + +static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); + u64 now; + + DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta); + + now = timecounter_read(&bp->timecounter); + now += delta; + /* Re-init the timecounter */ + timecounter_init(&bp->timecounter, &bp->cyclecounter, now); + + return 0; +} + +static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts) +{ + struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); + u64 ns; + u32 remainder; + + ns = timecounter_read(&bp->timecounter); + + DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns); + + ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder); + ts->tv_nsec = remainder; + + return 0; +} + +static int bnx2x_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec *ts) +{ + struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); + u64 ns; + + ns = ts->tv_sec * 1000000000ULL; + ns += ts->tv_nsec; + + DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns); + + /* Re-init the timecounter */ + timecounter_init(&bp->timecounter, &bp->cyclecounter, ns); + + return 0; +} + +/* Enable (or disable) ancillary features of the phc subsystem */ +static int bnx2x_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); + + BNX2X_ERR("PHC ancillary features are not supported\n"); + return -ENOTSUPP; +} + +void bnx2x_register_phc(struct bnx2x *bp) +{ + /* Fill the ptp_clock_info struct and register PTP clock*/ + bp->ptp_clock_info.owner = THIS_MODULE; + snprintf(bp->ptp_clock_info.name, 16, "%s", bp->dev->name); + bp->ptp_clock_info.max_adj = BNX2X_MAX_PHC_DRIFT; /* In PPB */ + bp->ptp_clock_info.n_alarm = 0; + bp->ptp_clock_info.n_ext_ts = 0; + bp->ptp_clock_info.n_per_out = 0; + bp->ptp_clock_info.pps = 0; + bp->ptp_clock_info.adjfreq = bnx2x_ptp_adjfreq; + bp->ptp_clock_info.adjtime = bnx2x_ptp_adjtime; + bp->ptp_clock_info.gettime = bnx2x_ptp_gettime; + bp->ptp_clock_info.settime = bnx2x_ptp_settime; + bp->ptp_clock_info.enable = bnx2x_ptp_enable; + + bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev); + if (IS_ERR(bp->ptp_clock)) { + bp->ptp_clock = NULL; + BNX2X_ERR("PTP clock registeration failed\n"); + } +} + static int bnx2x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -13129,6 +13505,8 @@ static int bnx2x_init_one(struct pci_dev *pdev, "Unknown", dev->base_addr, bp->pdev->irq, dev->dev_addr); + bnx2x_register_phc(bp); + return 0; init_one_exit: @@ -13155,6 +13533,11 @@ static void __bnx2x_remove(struct pci_dev *pdev, struct bnx2x *bp, bool remove_netdev) { + if (bp->ptp_clock) { + ptp_clock_unregister(bp->ptp_clock); + bp->ptp_clock = NULL; + } + /* Delete storage MAC address */ if (!NO_FCOE(bp)) { rtnl_lock(); @@ -14136,3 +14519,332 @@ int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val) REG_RD(bp, pretend_reg); return 0; } + +static void bnx2x_ptp_task(struct work_struct *work) +{ + struct bnx2x *bp = container_of(work, struct bnx2x, ptp_task); + int port = BP_PORT(bp); + u32 val_seq; + u64 timestamp, ns; + struct skb_shared_hwtstamps shhwtstamps; + + /* Read Tx timestamp registers */ + val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID : + NIG_REG_P0_TLLH_PTP_BUF_SEQID); + if (val_seq & 0x10000) { + /* There is a valid timestamp value */ + timestamp = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_MSB : + NIG_REG_P0_TLLH_PTP_BUF_TS_MSB); + timestamp <<= 32; + timestamp |= REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_LSB : + NIG_REG_P0_TLLH_PTP_BUF_TS_LSB); + /* Reset timestamp register to allow new timestamp */ + REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID : + NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000); + ns = timecounter_cyc2time(&bp->timecounter, timestamp); + + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + shhwtstamps.hwtstamp = ns_to_ktime(ns); + skb_tstamp_tx(bp->ptp_tx_skb, &shhwtstamps); + dev_kfree_skb_any(bp->ptp_tx_skb); + bp->ptp_tx_skb = NULL; + + DP(BNX2X_MSG_PTP, "Tx timestamp, timestamp cycles = %llu, ns = %llu\n", + timestamp, ns); + } else { + DP(BNX2X_MSG_PTP, "There is no valid Tx timestamp yet\n"); + /* Reschedule to keep checking for a valid timestamp value */ + schedule_work(&bp->ptp_task); + } +} + +void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb) +{ + int port = BP_PORT(bp); + u64 timestamp, ns; + + timestamp = REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_MSB : + NIG_REG_P0_LLH_PTP_HOST_BUF_TS_MSB); + timestamp <<= 32; + timestamp |= REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_LSB : + NIG_REG_P0_LLH_PTP_HOST_BUF_TS_LSB); + + /* Reset timestamp register to allow new timestamp */ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID : + NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000); + + ns = timecounter_cyc2time(&bp->timecounter, timestamp); + + skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns); + + DP(BNX2X_MSG_PTP, "Rx timestamp, timestamp cycles = %llu, ns = %llu\n", + timestamp, ns); +} + +/* Read the PHC */ +static cycle_t bnx2x_cyclecounter_read(const struct cyclecounter *cc) +{ + struct bnx2x *bp = container_of(cc, struct bnx2x, cyclecounter); + int port = BP_PORT(bp); + u32 wb_data[2]; + u64 phc_cycles; + + REG_RD_DMAE(bp, port ? NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t1 : + NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t0, wb_data, 2); + phc_cycles = wb_data[1]; + phc_cycles = (phc_cycles << 32) + wb_data[0]; + + DP(BNX2X_MSG_PTP, "PHC read cycles = %llu\n", phc_cycles); + + return phc_cycles; +} + +static void bnx2x_init_cyclecounter(struct bnx2x *bp) +{ + memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter)); + bp->cyclecounter.read = bnx2x_cyclecounter_read; + bp->cyclecounter.mask = CLOCKSOURCE_MASK(64); + bp->cyclecounter.shift = 1; + bp->cyclecounter.mult = 1; +} + +static int bnx2x_send_reset_timesync_ramrod(struct bnx2x *bp) +{ + struct bnx2x_func_state_params func_params = {NULL}; + struct bnx2x_func_set_timesync_params *set_timesync_params = + &func_params.params.set_timesync; + + /* Prepare parameters for function state transitions */ + __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); + __set_bit(RAMROD_RETRY, &func_params.ramrod_flags); + + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC; + + /* Function parameters */ + set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_RESET; + set_timesync_params->offset_cmd = TS_OFFSET_KEEP; + + return bnx2x_func_state_change(bp, &func_params); +} + +int bnx2x_enable_ptp_packets(struct bnx2x *bp) +{ + struct bnx2x_queue_state_params q_params; + int rc, i; + + /* send queue update ramrod to enable PTP packets */ + memset(&q_params, 0, sizeof(q_params)); + __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); + q_params.cmd = BNX2X_Q_CMD_UPDATE; + __set_bit(BNX2X_Q_UPDATE_PTP_PKTS_CHNG, + &q_params.params.update.update_flags); + __set_bit(BNX2X_Q_UPDATE_PTP_PKTS, + &q_params.params.update.update_flags); + + /* send the ramrod on all the queues of the PF */ + for_each_eth_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + + /* Set the appropriate Queue object */ + q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; + + /* Update the Queue state */ + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) { + BNX2X_ERR("Failed to enable PTP packets\n"); + return rc; + } + } + + return 0; +} + +int bnx2x_configure_ptp_filters(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + int rc; + + if (!bp->hwtstamp_ioctl_called) + return 0; + + switch (bp->tx_type) { + case HWTSTAMP_TX_ON: + bp->flags |= TX_TIMESTAMPING_EN; + REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK : + NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x6AA); + REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK : + NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3EEE); + break; + case HWTSTAMP_TX_ONESTEP_SYNC: + BNX2X_ERR("One-step timestamping is not supported\n"); + return -ERANGE; + } + + switch (bp->rx_filter) { + case HWTSTAMP_FILTER_NONE: + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_SOME: + bp->rx_filter = HWTSTAMP_FILTER_NONE; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + bp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + /* Initialize PTP detection for UDP/IPv4 events */ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : + NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EE); + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : + NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFE); + break; + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; + /* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : + NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EA); + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : + NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FEE); + break; + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; + /* Initialize PTP detection L2 events */ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : + NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6BF); + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : + NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EFF); + + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + /* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : + NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6AA); + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : + NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EEE); + break; + } + + /* Indicate to FW that this PF expects recorded PTP packets */ + rc = bnx2x_enable_ptp_packets(bp); + if (rc) + return rc; + + /* Enable sending PTP packets to host */ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST : + NIG_REG_P0_LLH_PTP_TO_HOST, 0x1); + + return 0; +} + +static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr) +{ + struct hwtstamp_config config; + int rc; + + DP(BNX2X_MSG_PTP, "HWTSTAMP IOCTL called\n"); + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + DP(BNX2X_MSG_PTP, "Requested tx_type: %d, requested rx_filters = %d\n", + config.tx_type, config.rx_filter); + + if (config.flags) { + BNX2X_ERR("config.flags is reserved for future use\n"); + return -EINVAL; + } + + bp->hwtstamp_ioctl_called = 1; + bp->tx_type = config.tx_type; + bp->rx_filter = config.rx_filter; + + rc = bnx2x_configure_ptp_filters(bp); + if (rc) + return rc; + + config.rx_filter = bp->rx_filter; + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +/* Configrues HW for PTP */ +static int bnx2x_configure_ptp(struct bnx2x *bp) +{ + int rc, port = BP_PORT(bp); + u32 wb_data[2]; + + /* Reset PTP event detection rules - will be configured in the IOCTL */ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : + NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF); + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : + NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF); + REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK : + NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF); + REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK : + NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF); + + /* Disable PTP packets to host - will be configured in the IOCTL*/ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST : + NIG_REG_P0_LLH_PTP_TO_HOST, 0x0); + + /* Enable the PTP feature */ + REG_WR(bp, port ? NIG_REG_P1_PTP_EN : + NIG_REG_P0_PTP_EN, 0x3F); + + /* Enable the free-running counter */ + wb_data[0] = 0; + wb_data[1] = 0; + REG_WR_DMAE(bp, NIG_REG_TIMESYNC_GEN_REG + tsgen_ctrl, wb_data, 2); + + /* Reset drift register (offset register is not reset) */ + rc = bnx2x_send_reset_timesync_ramrod(bp); + if (rc) { + BNX2X_ERR("Failed to reset PHC drift register\n"); + return -EFAULT; + } + + /* Reset possibly old timestamps */ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID : + NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000); + REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID : + NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000); + + return 0; +} + +/* Called during load, to initialize PTP-related stuff */ +void bnx2x_init_ptp(struct bnx2x *bp) +{ + int rc; + + /* Configure PTP in HW */ + rc = bnx2x_configure_ptp(bp); + if (rc) { + BNX2X_ERR("Stopping PTP initialization\n"); + return; + } + + /* Init work queue for Tx timestamping */ + INIT_WORK(&bp->ptp_task, bnx2x_ptp_task); + + /* Init cyclecounter and timecounter. This is done only in the first + * load. If done in every load, PTP application will fail when doing + * unload / load (e.g. MTU change) while it is running. + */ + if (!bp->timecounter_init_done) { + bnx2x_init_cyclecounter(bp); + timecounter_init(&bp->timecounter, &bp->cyclecounter, + ktime_to_ns(ktime_get_real())); + bp->timecounter_init_done = 1; + } + + DP(BNX2X_MSG_PTP, "PTP initialization ended successfully\n"); +} diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index 2beb5430b876cb8d77cec26661c51140ab2712ed..b0779d773343cbc82aa825eed3024205581ad33f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -2182,6 +2182,45 @@ #define NIG_REG_P0_HWPFC_ENABLE 0x18078 #define NIG_REG_P0_LLH_FUNC_MEM2 0x18480 #define NIG_REG_P0_LLH_FUNC_MEM2_ENABLE 0x18440 +/* [RW 17] Packet TimeSync information that is buffered in 1-deep FIFOs for + * the host. Bits [15:0] return the sequence ID of the packet. Bit 16 + * indicates the validity of the data in the buffer. Writing a 1 to bit 16 + * will clear the buffer. + */ +#define NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID 0x1875c +/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for + * the host. This location returns the lower 32 bits of timestamp value. + */ +#define NIG_REG_P0_LLH_PTP_HOST_BUF_TS_LSB 0x18754 +/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for + * the host. This location returns the upper 32 bits of timestamp value. + */ +#define NIG_REG_P0_LLH_PTP_HOST_BUF_TS_MSB 0x18758 +/* [RW 11] Mask register for the various parameters used in determining PTP + * packet presence. Set each bit to 1 to mask out the particular parameter. + * 0-IPv4 DA 0 of 224.0.1.129. 1-IPv4 DA 1 of 224.0.0.107. 2-IPv6 DA 0 of + * 0xFF0*:0:0:0:0:0:0:181. 3-IPv6 DA 1 of 0xFF02:0:0:0:0:0:0:6B. 4-UDP + * destination port 0 of 319. 5-UDP destination port 1 of 320. 6-MAC + * Ethertype 0 of 0x88F7. 7-configurable MAC Ethertype 1. 8-MAC DA 0 of + * 0x01-1B-19-00-00-00. 9-MAC DA 1 of 0x01-80-C2-00-00-0E. 10-configurable + * MAC DA 2. The reset default is set to mask out all parameters. + */ +#define NIG_REG_P0_LLH_PTP_PARAM_MASK 0x187a0 +/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set + * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} . + * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP + * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1; + * UDP DP 0} . 7-{IPv6 DA 1; UDP DP 1} . 8-{MAC DA 0; Ethertype 0} . 9-{MAC + * DA 1; Ethertype 0} . 10-{MAC DA 0; Ethertype 1} . 11-{MAC DA 1; Ethertype + * 1} . 12-{MAC DA 2; Ethertype 0} . 13-{MAC DA 2; Ethertype 1} . The reset + * default is to mask out all of the rules. Note that rules 0-3 are for IPv4 + * packets only and require that the packet is IPv4 for the rules to match. + * Note that rules 4-7 are for IPv6 packets only and require that the packet + * is IPv6 for the rules to match. + */ +#define NIG_REG_P0_LLH_PTP_RULE_MASK 0x187a4 +/* [RW 1] Set to 1 to enable PTP packets to be forwarded to the host. */ +#define NIG_REG_P0_LLH_PTP_TO_HOST 0x187ac /* [RW 1] Input enable for RX MAC interface. */ #define NIG_REG_P0_MAC_IN_EN 0x185ac /* [RW 1] Output enable for TX MAC interface */ @@ -2194,6 +2233,17 @@ * priority field is extracted from the outer-most VLAN in receive packet. * Only COS 0 and COS 1 are supported in E2. */ #define NIG_REG_P0_PKT_PRIORITY_TO_COS 0x18054 +/* [RW 6] Enable for TimeSync feature. Bits [2:0] are for RX side. Bits + * [5:3] are for TX side. Bit 0 enables TimeSync on RX side. Bit 1 enables + * V1 frame format in timesync event detection on RX side. Bit 2 enables V2 + * frame format in timesync event detection on RX side. Bit 3 enables + * TimeSync on TX side. Bit 4 enables V1 frame format in timesync event + * detection on TX side. Bit 5 enables V2 frame format in timesync event + * detection on TX side. Note that for HW to detect PTP packet and extract + * data from the packet, at least one of the version bits of that traffic + * direction has to be enabled. + */ +#define NIG_REG_P0_PTP_EN 0x18788 /* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A * priority is mapped to COS 0 when the corresponding mask bit is 1. More * than one bit may be set; allowing multiple priorities to be mapped to one @@ -2300,7 +2350,46 @@ * Ethernet header. */ #define NIG_REG_P1_HDRS_AFTER_BASIC 0x1818c #define NIG_REG_P1_LLH_FUNC_MEM2 0x184c0 -#define NIG_REG_P1_LLH_FUNC_MEM2_ENABLE 0x18460 +#define NIG_REG_P1_LLH_FUNC_MEM2_ENABLE 0x18460a +/* [RW 17] Packet TimeSync information that is buffered in 1-deep FIFOs for + * the host. Bits [15:0] return the sequence ID of the packet. Bit 16 + * indicates the validity of the data in the buffer. Writing a 1 to bit 16 + * will clear the buffer. + */ +#define NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID 0x18774 +/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for + * the host. This location returns the lower 32 bits of timestamp value. + */ +#define NIG_REG_P1_LLH_PTP_HOST_BUF_TS_LSB 0x1876c +/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for + * the host. This location returns the upper 32 bits of timestamp value. + */ +#define NIG_REG_P1_LLH_PTP_HOST_BUF_TS_MSB 0x18770 +/* [RW 11] Mask register for the various parameters used in determining PTP + * packet presence. Set each bit to 1 to mask out the particular parameter. + * 0-IPv4 DA 0 of 224.0.1.129. 1-IPv4 DA 1 of 224.0.0.107. 2-IPv6 DA 0 of + * 0xFF0*:0:0:0:0:0:0:181. 3-IPv6 DA 1 of 0xFF02:0:0:0:0:0:0:6B. 4-UDP + * destination port 0 of 319. 5-UDP destination port 1 of 320. 6-MAC + * Ethertype 0 of 0x88F7. 7-configurable MAC Ethertype 1. 8-MAC DA 0 of + * 0x01-1B-19-00-00-00. 9-MAC DA 1 of 0x01-80-C2-00-00-0E. 10-configurable + * MAC DA 2. The reset default is set to mask out all parameters. + */ +#define NIG_REG_P1_LLH_PTP_PARAM_MASK 0x187c8 +/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set + * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} . + * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP + * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1; + * UDP DP 0} . 7-{IPv6 DA 1; UDP DP 1} . 8-{MAC DA 0; Ethertype 0} . 9-{MAC + * DA 1; Ethertype 0} . 10-{MAC DA 0; Ethertype 1} . 11-{MAC DA 1; Ethertype + * 1} . 12-{MAC DA 2; Ethertype 0} . 13-{MAC DA 2; Ethertype 1} . The reset + * default is to mask out all of the rules. Note that rules 0-3 are for IPv4 + * packets only and require that the packet is IPv4 for the rules to match. + * Note that rules 4-7 are for IPv6 packets only and require that the packet + * is IPv6 for the rules to match. + */ +#define NIG_REG_P1_LLH_PTP_RULE_MASK 0x187cc +/* [RW 1] Set to 1 to enable PTP packets to be forwarded to the host. */ +#define NIG_REG_P1_LLH_PTP_TO_HOST 0x187d4 /* [RW 32] Specify the client number to be assigned to each priority of the * strict priority arbiter. This register specifies bits 31:0 of the 36-bit * value. Priority 0 is the highest priority. Bits [3:0] are for priority 0 @@ -2342,6 +2431,17 @@ * priority field is extracted from the outer-most VLAN in receive packet. * Only COS 0 and COS 1 are supported in E2. */ #define NIG_REG_P1_PKT_PRIORITY_TO_COS 0x181a8 +/* [RW 6] Enable for TimeSync feature. Bits [2:0] are for RX side. Bits + * [5:3] are for TX side. Bit 0 enables TimeSync on RX side. Bit 1 enables + * V1 frame format in timesync event detection on RX side. Bit 2 enables V2 + * frame format in timesync event detection on RX side. Bit 3 enables + * TimeSync on TX side. Bit 4 enables V1 frame format in timesync event + * detection on TX side. Bit 5 enables V2 frame format in timesync event + * detection on TX side. Note that for HW to detect PTP packet and extract + * data from the packet, at least one of the version bits of that traffic + * direction has to be enabled. + */ +#define NIG_REG_P1_PTP_EN 0x187b0 /* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A * priority is mapped to COS 0 when the corresponding mask bit is 1. More * than one bit may be set; allowing multiple priorities to be mapped to one @@ -2361,6 +2461,78 @@ #define NIG_REG_P1_RX_MACFIFO_EMPTY 0x1858c /* [R 1] TLLH FIFO is empty. */ #define NIG_REG_P1_TLLH_FIFO_EMPTY 0x18338 +/* [RW 19] Packet TimeSync information that is buffered in 1-deep FIFOs for + * TX side. Bits [15:0] reflect the sequence ID of the packet. Bit 16 + * indicates the validity of the data in the buffer. Bit 17 indicates that + * the sequence ID is valid and it is waiting for the TX timestamp value. + * Bit 18 indicates whether the timestamp is from a SW request (value of 1) + * or HW request (value of 0). Writing a 1 to bit 16 will clear the buffer. + */ +#define NIG_REG_P0_TLLH_PTP_BUF_SEQID 0x187e0 +/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for + * MCP. This location returns the lower 32 bits of timestamp value. + */ +#define NIG_REG_P0_TLLH_PTP_BUF_TS_LSB 0x187d8 +/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for + * MCP. This location returns the upper 32 bits of timestamp value. + */ +#define NIG_REG_P0_TLLH_PTP_BUF_TS_MSB 0x187dc +/* [RW 11] Mask register for the various parameters used in determining PTP + * packet presence. Set each bit to 1 to mask out the particular parameter. + * 0-IPv4 DA 0 of 224.0.1.129. 1-IPv4 DA 1 of 224.0.0.107. 2-IPv6 DA 0 of + * 0xFF0*:0:0:0:0:0:0:181. 3-IPv6 DA 1 of 0xFF02:0:0:0:0:0:0:6B. 4-UDP + * destination port 0 of 319. 5-UDP destination port 1 of 320. 6-MAC + * Ethertype 0 of 0x88F7. 7-configurable MAC Ethertype 1. 8-MAC DA 0 of + * 0x01-1B-19-00-00-00. 9-MAC DA 1 of 0x01-80-C2-00-00-0E. 10-configurable + * MAC DA 2. The reset default is set to mask out all parameters. + */ +#define NIG_REG_P0_TLLH_PTP_PARAM_MASK 0x187f0 +/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set + * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} . + * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP + * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1; + * UDP DP 0} . 7-{IPv6 DA 1; UDP DP 1} . 8-{MAC DA 0; Ethertype 0} . 9-{MAC + * DA 1; Ethertype 0} . 10-{MAC DA 0; Ethertype 1} . 11-{MAC DA 1; Ethertype + * 1} . 12-{MAC DA 2; Ethertype 0} . 13-{MAC DA 2; Ethertype 1} . The reset + * default is to mask out all of the rules. + */ +#define NIG_REG_P0_TLLH_PTP_RULE_MASK 0x187f4 +/* [RW 19] Packet TimeSync information that is buffered in 1-deep FIFOs for + * TX side. Bits [15:0] reflect the sequence ID of the packet. Bit 16 + * indicates the validity of the data in the buffer. Bit 17 indicates that + * the sequence ID is valid and it is waiting for the TX timestamp value. + * Bit 18 indicates whether the timestamp is from a SW request (value of 1) + * or HW request (value of 0). Writing a 1 to bit 16 will clear the buffer. + */ +#define NIG_REG_P1_TLLH_PTP_BUF_SEQID 0x187ec +/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for + * MCP. This location returns the lower 32 bits of timestamp value. + */ +#define NIG_REG_P1_TLLH_PTP_BUF_TS_LSB 0x187e4 +/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for + * MCP. This location returns the upper 32 bits of timestamp value. + */ +#define NIG_REG_P1_TLLH_PTP_BUF_TS_MSB 0x187e8 +/* [RW 11] Mask register for the various parameters used in determining PTP + * packet presence. Set each bit to 1 to mask out the particular parameter. + * 0-IPv4 DA 0 of 224.0.1.129. 1-IPv4 DA 1 of 224.0.0.107. 2-IPv6 DA 0 of + * 0xFF0*:0:0:0:0:0:0:181. 3-IPv6 DA 1 of 0xFF02:0:0:0:0:0:0:6B. 4-UDP + * destination port 0 of 319. 5-UDP destination port 1 of 320. 6-MAC + * Ethertype 0 of 0x88F7. 7-configurable MAC Ethertype 1. 8-MAC DA 0 of + * 0x01-1B-19-00-00-00. 9-MAC DA 1 of 0x01-80-C2-00-00-0E. 10-configurable + * MAC DA 2. The reset default is set to mask out all parameters. + */ +#define NIG_REG_P1_TLLH_PTP_PARAM_MASK 0x187f8 +/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set + * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} . + * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP + * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1; + * UDP DP 0} . 7-{IPv6 DA 1; UDP DP 1} . 8-{MAC DA 0; Ethertype 0} . 9-{MAC + * DA 1; Ethertype 0} . 10-{MAC DA 0; Ethertype 1} . 11-{MAC DA 1; Ethertype + * 1} . 12-{MAC DA 2; Ethertype 0} . 13-{MAC DA 2; Ethertype 1} . The reset + * default is to mask out all of the rules. + */ +#define NIG_REG_P1_TLLH_PTP_RULE_MASK 0x187fc /* [RW 32] Specify which of the credit registers the client is to be mapped * to. This register specifies bits 31:0 of the 36-bit value. Bits[3:0] are * for client 0; bits [35:32] are for client 8. For clients that are not @@ -2513,6 +2685,10 @@ swap is equal to SPIO pin that inputs from ifmux_serdes_swap. If 1 then ort swap is equal to ~nig_registers_port_swap.port_swap */ #define NIG_REG_STRAP_OVERRIDE 0x10398 +/* [WB 64] Addresses for TimeSync related registers in the timesync + * generator sub-module. + */ +#define NIG_REG_TIMESYNC_GEN_REG 0x18800 /* [RW 1] output enable for RX_XCM0 IF */ #define NIG_REG_XCM0_OUT_EN 0x100f0 /* [RW 1] output enable for RX_XCM1 IF */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index b1936044767aca73bc494609095ae2cc4fef55f0..7bc2924a7e24f071d6f39adfed574dff7c2af71a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -4019,6 +4019,7 @@ static int bnx2x_setup_rss(struct bnx2x *bp, struct bnx2x_raw_obj *r = &o->raw; struct eth_rss_update_ramrod_data *data = (struct eth_rss_update_ramrod_data *)(r->rdata); + u16 caps = 0; u8 rss_mode = 0; int rc; @@ -4042,28 +4043,34 @@ static int bnx2x_setup_rss(struct bnx2x *bp, /* RSS capabilities */ if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags)) - data->capabilities |= - ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY; + caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY; if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags)) - data->capabilities |= - ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY; + caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY; if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags)) - data->capabilities |= - ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY; + caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY; if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags)) - data->capabilities |= - ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY; + caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY; if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags)) - data->capabilities |= - ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY; + caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY; if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags)) - data->capabilities |= - ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY; + caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY; + + if (test_bit(BNX2X_RSS_GRE_INNER_HDRS, &p->rss_flags)) + caps |= ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY; + + /* RSS keys */ + if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) { + memcpy(&data->rss_key[0], &p->rss_key[0], + sizeof(data->rss_key)); + caps |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY; + } + + data->capabilities = cpu_to_le16(caps); /* Hashing mask */ data->rss_result_mask = p->rss_result_mask; @@ -4084,13 +4091,6 @@ static int bnx2x_setup_rss(struct bnx2x *bp, if (netif_msg_ifup(bp)) bnx2x_debug_print_ind_table(bp, p); - /* RSS keys */ - if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) { - memcpy(&data->rss_key[0], &p->rss_key[0], - sizeof(data->rss_key)); - data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY; - } - /* No need for an explicit memory barrier here as long as we * ensure the ordering of writing to the SPQ element * and updating of the SPQ producer which involves a memory @@ -4336,6 +4336,8 @@ static void bnx2x_q_fill_init_general_data(struct bnx2x *bp, test_bit(BNX2X_Q_FLG_FCOE, flags) ? LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW; + gen_data->fp_hsi_ver = ETH_FP_HSI_VERSION; + DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n", gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg); } @@ -4357,12 +4359,13 @@ static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o, test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags); tx_data->force_default_pri_flg = test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags); - + tx_data->refuse_outband_vlan_flg = + test_bit(BNX2X_Q_FLG_REFUSE_OUTBAND_VLAN, flags); tx_data->tunnel_lso_inc_ip_id = test_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, flags); tx_data->tunnel_non_lso_pcsum_location = - test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? PCSUM_ON_PKT : - PCSUM_ON_BD; + test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? CSUM_ON_PKT : + CSUM_ON_BD; tx_data->tx_status_block_id = params->fw_sb_id; tx_data->tx_sb_index_number = params->sb_cq_index; @@ -4722,6 +4725,12 @@ static void bnx2x_q_fill_update_data(struct bnx2x *bp, data->tx_switching_change_flg = test_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG, ¶ms->update_flags); + + /* PTP */ + data->handle_ptp_pkts_flg = + test_bit(BNX2X_Q_UPDATE_PTP_PKTS, ¶ms->update_flags); + data->handle_ptp_pkts_change_flg = + test_bit(BNX2X_Q_UPDATE_PTP_PKTS_CHNG, ¶ms->update_flags); } static inline int bnx2x_q_send_update(struct bnx2x *bp, @@ -5376,6 +5385,10 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp, (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) next_state = BNX2X_F_STATE_STARTED; + else if ((cmd == BNX2X_F_CMD_SET_TIMESYNC) && + (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) + next_state = BNX2X_F_STATE_STARTED; + else if (cmd == BNX2X_F_CMD_TX_STOP) next_state = BNX2X_F_STATE_TX_STOPPED; @@ -5385,6 +5398,10 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp, (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) next_state = BNX2X_F_STATE_TX_STOPPED; + else if ((cmd == BNX2X_F_CMD_SET_TIMESYNC) && + (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) + next_state = BNX2X_F_STATE_TX_STOPPED; + else if (cmd == BNX2X_F_CMD_TX_START) next_state = BNX2X_F_STATE_STARTED; @@ -5652,9 +5669,27 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp, rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag); rdata->path_id = BP_PATH(bp); rdata->network_cos_mode = start_params->network_cos_mode; - rdata->gre_tunnel_mode = start_params->gre_tunnel_mode; - rdata->gre_tunnel_rss = start_params->gre_tunnel_rss; + rdata->tunnel_mode = start_params->tunnel_mode; + rdata->gre_tunnel_type = start_params->gre_tunnel_type; + rdata->inner_gre_rss_en = start_params->inner_gre_rss_en; + rdata->vxlan_dst_port = cpu_to_le16(4789); + rdata->sd_accept_mf_clss_fail = start_params->class_fail; + if (start_params->class_fail_ethtype) { + rdata->sd_accept_mf_clss_fail_match_ethtype = 1; + rdata->sd_accept_mf_clss_fail_ethtype = + cpu_to_le16(start_params->class_fail_ethtype); + } + rdata->sd_vlan_force_pri_flg = start_params->sd_vlan_force_pri; + rdata->sd_vlan_force_pri_val = start_params->sd_vlan_force_pri_val; + if (start_params->sd_vlan_eth_type) + rdata->sd_vlan_eth_type = + cpu_to_le16(start_params->sd_vlan_eth_type); + else + rdata->sd_vlan_eth_type = + cpu_to_le16(0x8100); + + rdata->no_added_tags = start_params->no_added_tags; /* No need for an explicit memory barrier here as long we would * need to ensure the ordering of writing to the SPQ element * and updating of the SPQ producer which involves a memory @@ -5680,8 +5715,52 @@ static inline int bnx2x_func_send_switch_update(struct bnx2x *bp, memset(rdata, 0, sizeof(*rdata)); /* Fill the ramrod data with provided parameters */ - rdata->tx_switch_suspend_change_flg = 1; - rdata->tx_switch_suspend = switch_update_params->suspend; + if (test_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG, + &switch_update_params->changes)) { + rdata->tx_switch_suspend_change_flg = 1; + rdata->tx_switch_suspend = + test_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND, + &switch_update_params->changes); + } + + if (test_bit(BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG, + &switch_update_params->changes)) { + rdata->sd_vlan_tag_change_flg = 1; + rdata->sd_vlan_tag = + cpu_to_le16(switch_update_params->vlan); + } + + if (test_bit(BNX2X_F_UPDATE_SD_VLAN_ETH_TYPE_CHNG, + &switch_update_params->changes)) { + rdata->sd_vlan_eth_type_change_flg = 1; + rdata->sd_vlan_eth_type = + cpu_to_le16(switch_update_params->vlan_eth_type); + } + + if (test_bit(BNX2X_F_UPDATE_VLAN_FORCE_PRIO_CHNG, + &switch_update_params->changes)) { + rdata->sd_vlan_force_pri_change_flg = 1; + if (test_bit(BNX2X_F_UPDATE_VLAN_FORCE_PRIO_FLAG, + &switch_update_params->changes)) + rdata->sd_vlan_force_pri_flg = 1; + rdata->sd_vlan_force_pri_flg = + switch_update_params->vlan_force_prio; + } + + if (test_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG, + &switch_update_params->changes)) { + rdata->update_tunn_cfg_flg = 1; + if (test_bit(BNX2X_F_UPDATE_TUNNEL_CLSS_EN, + &switch_update_params->changes)) + rdata->tunn_clss_en = 1; + if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_GRE_RSS_EN, + &switch_update_params->changes)) + rdata->inner_gre_rss_en = 1; + rdata->tunnel_mode = switch_update_params->tunnel_mode; + rdata->gre_tunnel_type = switch_update_params->gre_tunnel_type; + rdata->vxlan_dst_port = cpu_to_le16(4789); + } + rdata->echo = SWITCH_UPDATE; /* No need for an explicit memory barrier here as long as we @@ -5817,6 +5896,42 @@ static inline int bnx2x_func_send_tx_start(struct bnx2x *bp, U64_LO(data_mapping), NONE_CONNECTION_TYPE); } +static inline +int bnx2x_func_send_set_timesync(struct bnx2x *bp, + struct bnx2x_func_state_params *params) +{ + struct bnx2x_func_sp_obj *o = params->f_obj; + struct set_timesync_ramrod_data *rdata = + (struct set_timesync_ramrod_data *)o->rdata; + dma_addr_t data_mapping = o->rdata_mapping; + struct bnx2x_func_set_timesync_params *set_timesync_params = + ¶ms->params.set_timesync; + + memset(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data with provided parameters */ + rdata->drift_adjust_cmd = set_timesync_params->drift_adjust_cmd; + rdata->offset_cmd = set_timesync_params->offset_cmd; + rdata->add_sub_drift_adjust_value = + set_timesync_params->add_sub_drift_adjust_value; + rdata->drift_adjust_value = set_timesync_params->drift_adjust_value; + rdata->drift_adjust_period = set_timesync_params->drift_adjust_period; + rdata->offset_delta.lo = + cpu_to_le32(U64_LO(set_timesync_params->offset_delta)); + rdata->offset_delta.hi = + cpu_to_le32(U64_HI(set_timesync_params->offset_delta)); + + DP(BNX2X_MSG_SP, "Set timesync command params: drift_cmd = %d, offset_cmd = %d, add_sub_drift = %d, drift_val = %d, drift_period = %d, offset_lo = %d, offset_hi = %d\n", + rdata->drift_adjust_cmd, rdata->offset_cmd, + rdata->add_sub_drift_adjust_value, rdata->drift_adjust_value, + rdata->drift_adjust_period, rdata->offset_delta.lo, + rdata->offset_delta.hi); + + return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_TIMESYNC, 0, + U64_HI(data_mapping), + U64_LO(data_mapping), NONE_CONNECTION_TYPE); +} + static int bnx2x_func_send_cmd(struct bnx2x *bp, struct bnx2x_func_state_params *params) { @@ -5839,6 +5954,8 @@ static int bnx2x_func_send_cmd(struct bnx2x *bp, return bnx2x_func_send_tx_start(bp, params); case BNX2X_F_CMD_SWITCH_UPDATE: return bnx2x_func_send_switch_update(bp, params); + case BNX2X_F_CMD_SET_TIMESYNC: + return bnx2x_func_send_set_timesync(bp, params); default: BNX2X_ERR("Unknown command: %d\n", params->cmd); return -EINVAL; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index 718ecd2946616195cc92d53bd2f6498d525f14fe..e97275f456c0ae789ab3e628482d73ccf2dd949c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -711,6 +711,7 @@ enum { BNX2X_RSS_IPV6, BNX2X_RSS_IPV6_TCP, BNX2X_RSS_IPV6_UDP, + BNX2X_RSS_GRE_INNER_HDRS, }; struct bnx2x_config_rss_params { @@ -769,7 +770,9 @@ enum { BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, BNX2X_Q_UPDATE_SILENT_VLAN_REM, BNX2X_Q_UPDATE_TX_SWITCHING_CHNG, - BNX2X_Q_UPDATE_TX_SWITCHING + BNX2X_Q_UPDATE_TX_SWITCHING, + BNX2X_Q_UPDATE_PTP_PKTS_CHNG, + BNX2X_Q_UPDATE_PTP_PKTS, }; /* Allowed Queue states */ @@ -831,6 +834,7 @@ enum { BNX2X_Q_FLG_ANTI_SPOOF, BNX2X_Q_FLG_SILENT_VLAN_REM, BNX2X_Q_FLG_FORCE_DEFAULT_PRI, + BNX2X_Q_FLG_REFUSE_OUTBAND_VLAN, BNX2X_Q_FLG_PCSUM_ON_PKT, BNX2X_Q_FLG_TUN_INC_INNER_IP_ID }; @@ -851,6 +855,10 @@ enum bnx2x_q_type { #define BNX2X_MULTI_TX_COS 3 /* Maximum possible */ #define MAC_PAD (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN) +/* DMAE channel to be used by FW for timesync workaroun. A driver that sends + * timesync-related ramrods must not use this DMAE command ID. + */ +#define FW_DMAE_CMD_ID 6 struct bnx2x_queue_init_params { struct { @@ -1085,6 +1093,20 @@ struct bnx2x_queue_sp_obj { }; /********************** Function state update *********************************/ + +/* UPDATE command options */ +enum { + BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG, + BNX2X_F_UPDATE_TX_SWITCH_SUSPEND, + BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG, + BNX2X_F_UPDATE_SD_VLAN_ETH_TYPE_CHNG, + BNX2X_F_UPDATE_VLAN_FORCE_PRIO_CHNG, + BNX2X_F_UPDATE_VLAN_FORCE_PRIO_FLAG, + BNX2X_F_UPDATE_TUNNEL_CFG_CHNG, + BNX2X_F_UPDATE_TUNNEL_CLSS_EN, + BNX2X_F_UPDATE_TUNNEL_INNER_GRE_RSS_EN, +}; + /* Allowed Function states */ enum bnx2x_func_state { BNX2X_F_STATE_RESET, @@ -1105,6 +1127,7 @@ enum bnx2x_func_cmd { BNX2X_F_CMD_TX_STOP, BNX2X_F_CMD_TX_START, BNX2X_F_CMD_SWITCH_UPDATE, + BNX2X_F_CMD_SET_TIMESYNC, BNX2X_F_CMD_MAX, }; @@ -1146,18 +1169,44 @@ struct bnx2x_func_start_params { /* Function cos mode */ u8 network_cos_mode; - /* NVGRE classification enablement */ - u8 nvgre_clss_en; + /* TUNN_MODE_NONE/TUNN_MODE_VXLAN/TUNN_MODE_GRE */ + u8 tunnel_mode; - /* NO_GRE_TUNNEL/NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */ - u8 gre_tunnel_mode; + /* tunneling classification enablement */ + u8 tunn_clss_en; + + /* NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */ + u8 gre_tunnel_type; + + /* Enables Inner GRE RSS on the function, depends on the client RSS + * capailities + */ + u8 inner_gre_rss_en; + + /* Allows accepting of packets failing MF classification, possibly + * only matching a given ethertype + */ + u8 class_fail; + u16 class_fail_ethtype; - /* GRE_OUTER_HEADERS_RSS/GRE_INNER_HEADERS_RSS/NVGRE_KEY_ENTROPY_RSS */ - u8 gre_tunnel_rss; + /* Override priority of output packets */ + u8 sd_vlan_force_pri; + u8 sd_vlan_force_pri_val; + + /* Replace vlan's ethertype */ + u16 sd_vlan_eth_type; + + /* Prevent inner vlans from being added by FW */ + u8 no_added_tags; }; struct bnx2x_func_switch_update_params { - u8 suspend; + unsigned long changes; /* BNX2X_F_UPDATE_XX bits */ + u16 vlan; + u16 vlan_eth_type; + u8 vlan_force_prio; + u8 tunnel_mode; + u8 gre_tunnel_type; }; struct bnx2x_func_afex_update_params { @@ -1172,6 +1221,7 @@ struct bnx2x_func_afex_viflists_params { u8 afex_vif_list_command; u8 func_to_clear; }; + struct bnx2x_func_tx_start_params { struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES]; u8 dcb_enabled; @@ -1179,6 +1229,24 @@ struct bnx2x_func_tx_start_params { u8 dont_add_pri_0_en; }; +struct bnx2x_func_set_timesync_params { + /* Reset, set or keep the current drift value */ + u8 drift_adjust_cmd; + + /* Dec, inc or keep the current offset */ + u8 offset_cmd; + + /* Drift value direction */ + u8 add_sub_drift_adjust_value; + + /* Drift, period and offset values to be used according to the commands + * above. + */ + u8 drift_adjust_value; + u32 drift_adjust_period; + u64 offset_delta; +}; + struct bnx2x_func_state_params { struct bnx2x_func_sp_obj *f_obj; @@ -1197,6 +1265,7 @@ struct bnx2x_func_state_params { struct bnx2x_func_afex_update_params afex_update; struct bnx2x_func_afex_viflists_params afex_viflists; struct bnx2x_func_tx_start_params tx_start; + struct bnx2x_func_set_timesync_params set_timesync; } params; }; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 662310c5f4e98c4f7d7cdda3d0062969f13ddd5b..c88b20af87dff2e6613215520eeff5920b438b0f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -1125,7 +1125,7 @@ static int bnx2x_ari_enabled(struct pci_dev *dev) return dev->bus->self && dev->bus->self->ari_enabled; } -static void +static int bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) { int sb_id; @@ -1150,6 +1150,7 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)); } DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool); + return BP_VFDB(bp)->vf_sbs_pool; } static void __bnx2x_iov_free_vfdb(struct bnx2x *bp) @@ -1314,15 +1315,17 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, } /* re-read the IGU CAM for VFs - index and abs_vfid must be set */ - bnx2x_get_vf_igu_cam_info(bp); + if (!bnx2x_get_vf_igu_cam_info(bp)) { + BNX2X_ERR("No entries in IGU CAM for vfs\n"); + err = -EINVAL; + goto failed; + } /* allocate the queue arrays for all VFs */ bp->vfdb->vfqs = kzalloc( BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue), GFP_KERNEL); - DP(BNX2X_MSG_IOV, "bp->vfdb->vfqs was %p\n", bp->vfdb->vfqs); - if (!bp->vfdb->vfqs) { BNX2X_ERR("failed to allocate vf queue array\n"); err = -ENOMEM; @@ -1349,9 +1352,7 @@ void bnx2x_iov_remove_one(struct bnx2x *bp) if (!IS_SRIOV(bp)) return; - DP(BNX2X_MSG_IOV, "about to call disable sriov\n"); - pci_disable_sriov(bp->pdev); - DP(BNX2X_MSG_IOV, "sriov disabled\n"); + bnx2x_disable_sriov(bp); /* disable access to all VFs */ for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) { @@ -1985,21 +1986,6 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; } -static inline -struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id) -{ - int i; - struct bnx2x_virtf *vf = NULL; - - for_each_vf(bp, i) { - vf = BP_VF(bp, i); - if (stat_id >= vf->igu_base_id && - stat_id < vf->igu_base_id + vf_sb_count(vf)) - break; - } - return vf; -} - /* VF API helpers */ static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid, u8 enable) @@ -2362,12 +2348,6 @@ int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf) return rc; } -static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp, - struct bnx2x_virtf *vf, u32 *sbdf) -{ - *sbdf = vf->devfn | (vf->bus << 8); -} - void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, enum channel_tlvs tlv) { @@ -2416,7 +2396,7 @@ void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, /* log the unlock */ DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n", - vf->abs_vfid, vf->op_current); + vf->abs_vfid, current_tlv); } static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable) @@ -2501,7 +2481,7 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) bp->requested_nr_virtfn = num_vfs_param; if (num_vfs_param == 0) { bnx2x_set_pf_tx_switching(bp, false); - pci_disable_sriov(dev); + bnx2x_disable_sriov(bp); return 0; } else { return bnx2x_enable_sriov(bp); @@ -2614,6 +2594,12 @@ void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) void bnx2x_disable_sriov(struct bnx2x *bp) { + if (pci_vfs_assigned(bp->pdev)) { + DP(BNX2X_MSG_IOV, + "Unloading driver while VFs are assigned - VFs will not be deallocated\n"); + return; + } + pci_disable_sriov(bp->pdev); } @@ -2628,7 +2614,7 @@ static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx, } if (!IS_SRIOV(bp)) { - BNX2X_ERR("sriov is disabled - can't utilize iov-realted functionality\n"); + BNX2X_ERR("sriov is disabled - can't utilize iov-related functionality\n"); return -EINVAL; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index ca1055f3d8afda852f10412d2f551cc05b257848..01bafa4ac04589364ef092e2bd5a31231e42a5f7 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -299,7 +299,8 @@ struct bnx2x_vfdb { #define BP_VFDB(bp) ((bp)->vfdb) /* vf array */ struct bnx2x_virtf *vfs; -#define BP_VF(bp, idx) (&((bp)->vfdb->vfs[idx])) +#define BP_VF(bp, idx) ((BP_VFDB(bp) && (bp)->vfdb->vfs) ? \ + &((bp)->vfdb->vfs[idx]) : NULL) #define bnx2x_vf(bp, idx, var) ((bp)->vfdb->vfs[idx].var) /* queue array - for all vfs */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index ca47665f94bf76fd6c9b2c1eb920b5aba7743124..d1608297c773746237a8faf3697277afcbe45918 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c @@ -137,7 +137,7 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp) cpu_to_le16(bp->stats_counter++); DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n", - bp->fw_stats_req->hdr.drv_stats_counter); + le16_to_cpu(bp->fw_stats_req->hdr.drv_stats_counter)); /* adjust the ramrod to include VF queues statistics */ bnx2x_iov_adjust_stats_req(bp); @@ -200,7 +200,7 @@ static void bnx2x_hw_stats_post(struct bnx2x *bp) } } -static int bnx2x_stats_comp(struct bnx2x *bp) +static void bnx2x_stats_comp(struct bnx2x *bp) { u32 *stats_comp = bnx2x_sp(bp, stats_comp); int cnt = 10; @@ -214,7 +214,6 @@ static int bnx2x_stats_comp(struct bnx2x *bp) cnt--; usleep_range(1000, 2000); } - return 1; } /* @@ -1630,6 +1629,11 @@ void bnx2x_stats_init(struct bnx2x *bp) int /*abs*/port = BP_PORT(bp); int mb_idx = BP_FW_MB_IDX(bp); + if (IS_VF(bp)) { + bnx2x_memset_stats(bp); + return; + } + bp->stats_pending = 0; bp->executer_idx = 0; bp->stats_counter = 0; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 54e0427a9ee601b2a5e499a01c1e05218ffde368..b1d9c44aa56c919732721207d88163cda8080a53 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -583,7 +583,6 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, flags |= VFPF_QUEUE_FLG_STATS; flags |= VFPF_QUEUE_FLG_CACHE_ALIGN; flags |= VFPF_QUEUE_FLG_VLAN; - DP(NETIF_MSG_IFUP, "vlan removal enabled\n"); /* Common */ req->vf_qid = fp_idx; @@ -952,14 +951,6 @@ static void storm_memset_vf_mbx_valid(struct bnx2x *bp, u16 abs_fid) REG_WR8(bp, addr, 1); } -static inline void bnx2x_set_vf_mbxs_valid(struct bnx2x *bp) -{ - int i; - - for_each_vf(bp, i) - storm_memset_vf_mbx_valid(bp, bnx2x_vf(bp, i, abs_vfid)); -} - /* enable vf_pf mailbox (aka vf-pf-channel) */ void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid) { diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index a6a9f284c8dd762579e062a9d9d62f03e1ce6f96..23f23c97c2ad7b9fa52b8d589b1627a560635f92 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -383,7 +383,7 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type, break; rcu_read_lock(); - if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) { + if (!rcu_access_pointer(cp->ulp_ops[CNIC_ULP_L4])) { rc = -ENODEV; rcu_read_unlock(); break; @@ -527,7 +527,7 @@ int cnic_unregister_driver(int ulp_type) list_for_each_entry(dev, &cnic_dev_list, list) { struct cnic_local *cp = dev->cnic_priv; - if (rcu_dereference(cp->ulp_ops[ulp_type])) { + if (rcu_access_pointer(cp->ulp_ops[ulp_type])) { pr_err("%s: Type %d still has devices registered\n", __func__, ulp_type); read_unlock(&cnic_dev_lock); @@ -575,7 +575,7 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type, mutex_unlock(&cnic_lock); return -EAGAIN; } - if (rcu_dereference(cp->ulp_ops[ulp_type])) { + if (rcu_access_pointer(cp->ulp_ops[ulp_type])) { pr_err("%s: Type %d has already been registered to this device\n", __func__, ulp_type); mutex_unlock(&cnic_lock); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 5cc9cae21ed504fa0b7d71475adcc2a3e43a3742..fff2634b6f3414258655d2174e7988a1be3f843d 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -191,8 +191,9 @@ enum dma_reg { DMA_STATUS, DMA_SCB_BURST_SIZE, DMA_ARB_CTRL, - DMA_PRIORITY, - DMA_RING_PRIORITY, + DMA_PRIORITY_0, + DMA_PRIORITY_1, + DMA_PRIORITY_2, }; static const u8 bcmgenet_dma_regs_v3plus[] = { @@ -201,8 +202,9 @@ static const u8 bcmgenet_dma_regs_v3plus[] = { [DMA_STATUS] = 0x08, [DMA_SCB_BURST_SIZE] = 0x0C, [DMA_ARB_CTRL] = 0x2C, - [DMA_PRIORITY] = 0x30, - [DMA_RING_PRIORITY] = 0x38, + [DMA_PRIORITY_0] = 0x30, + [DMA_PRIORITY_1] = 0x34, + [DMA_PRIORITY_2] = 0x38, }; static const u8 bcmgenet_dma_regs_v2[] = { @@ -211,8 +213,9 @@ static const u8 bcmgenet_dma_regs_v2[] = { [DMA_STATUS] = 0x08, [DMA_SCB_BURST_SIZE] = 0x0C, [DMA_ARB_CTRL] = 0x30, - [DMA_PRIORITY] = 0x34, - [DMA_RING_PRIORITY] = 0x3C, + [DMA_PRIORITY_0] = 0x34, + [DMA_PRIORITY_1] = 0x38, + [DMA_PRIORITY_2] = 0x3C, }; static const u8 bcmgenet_dma_regs_v1[] = { @@ -220,8 +223,9 @@ static const u8 bcmgenet_dma_regs_v1[] = { [DMA_STATUS] = 0x04, [DMA_SCB_BURST_SIZE] = 0x0C, [DMA_ARB_CTRL] = 0x30, - [DMA_PRIORITY] = 0x34, - [DMA_RING_PRIORITY] = 0x3C, + [DMA_PRIORITY_0] = 0x34, + [DMA_PRIORITY_1] = 0x38, + [DMA_PRIORITY_2] = 0x3C, }; /* Set at runtime once bcmgenet version is known */ @@ -1054,7 +1058,8 @@ static int bcmgenet_xmit_frag(struct net_device *dev, /* Reallocate the SKB to put enough headroom in front of it and insert * the transmit checksum offsets in the descriptors */ -static int bcmgenet_put_tx_csum(struct net_device *dev, struct sk_buff *skb) +static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev, + struct sk_buff *skb) { struct status_64 *status = NULL; struct sk_buff *new_skb; @@ -1072,7 +1077,7 @@ static int bcmgenet_put_tx_csum(struct net_device *dev, struct sk_buff *skb) if (!new_skb) { dev->stats.tx_errors++; dev->stats.tx_dropped++; - return -ENOMEM; + return NULL; } skb = new_skb; } @@ -1090,7 +1095,7 @@ static int bcmgenet_put_tx_csum(struct net_device *dev, struct sk_buff *skb) ip_proto = ipv6_hdr(skb)->nexthdr; break; default: - return 0; + return skb; } offset = skb_checksum_start_offset(skb) - sizeof(*status); @@ -1111,7 +1116,7 @@ static int bcmgenet_put_tx_csum(struct net_device *dev, struct sk_buff *skb) status->tx_csum_info = tx_csum_info; } - return 0; + return skb; } static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) @@ -1158,8 +1163,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) /* set the SKB transmit checksum */ if (priv->desc_64b_en) { - ret = bcmgenet_put_tx_csum(dev, skb); - if (ret) { + skb = bcmgenet_put_tx_csum(dev, skb); + if (!skb) { ret = NETDEV_TX_OK; goto out; } @@ -1695,7 +1700,8 @@ static void bcmgenet_init_multiq(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); unsigned int i, dma_enable; - u32 reg, dma_ctrl, ring_cfg = 0, dma_priority = 0; + u32 reg, dma_ctrl, ring_cfg = 0; + u32 dma_priority[3] = {0, 0, 0}; if (!netif_is_multiqueue(dev)) { netdev_warn(dev, "called with non multi queue aware HW\n"); @@ -1720,22 +1726,25 @@ static void bcmgenet_init_multiq(struct net_device *dev) /* Configure ring as descriptor ring and setup priority */ ring_cfg |= 1 << i; - dma_priority |= ((GENET_Q0_PRIORITY + i) << - (GENET_MAX_MQ_CNT + 1) * i); dma_ctrl |= 1 << (i + DMA_RING_BUF_EN_SHIFT); + + dma_priority[DMA_PRIO_REG_INDEX(i)] |= + ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i)); } + /* Set ring 16 priority and program the hardware registers */ + dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |= + ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) << + DMA_PRIO_REG_SHIFT(DESC_INDEX)); + bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0); + bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1); + bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2); + /* Enable rings */ reg = bcmgenet_tdma_readl(priv, DMA_RING_CFG); reg |= ring_cfg; bcmgenet_tdma_writel(priv, reg, DMA_RING_CFG); - /* Use configured rings priority and set ring #16 priority */ - reg = bcmgenet_tdma_readl(priv, DMA_RING_PRIORITY); - reg |= ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) << 20); - reg |= dma_priority; - bcmgenet_tdma_writel(priv, reg, DMA_PRIORITY); - /* Configure ring as descriptor ring and re-enable DMA if enabled */ reg = bcmgenet_tdma_readl(priv, DMA_CTRL); reg |= dma_ctrl; @@ -2017,19 +2026,6 @@ static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv, bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1); } -static int bcmgenet_wol_resume(struct bcmgenet_priv *priv) -{ - /* From WOL-enabled suspend, switch to regular clock */ - if (priv->wolopts) - clk_disable_unprepare(priv->clk_wol); - - phy_init_hw(priv->phydev); - /* Speed settings must be restored */ - bcmgenet_mii_config(priv->dev); - - return 0; -} - /* Returns a reusable dma control register value */ static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv) { @@ -2174,9 +2170,10 @@ static void bcmgenet_netif_stop(struct net_device *dev) */ cancel_work_sync(&priv->bcmgenet_irq_work); - priv->old_pause = -1; priv->old_link = -1; + priv->old_speed = -1; priv->old_duplex = -1; + priv->old_pause = -1; } static int bcmgenet_close(struct net_device *dev) @@ -2439,6 +2436,13 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT, major, (reg >> 16) & 0x0f, reg & 0xffff); + /* Store the integrated PHY revision for the MDIO probing function + * to pass this information to the PHY driver. The PHY driver expects + * to find the PHY major revision in bits 15:8 while the GENET register + * stores that information in bits 7:0, account for that. + */ + priv->gphy_rev = (reg & 0xffff) << 8; + #ifdef CONFIG_PHYS_ADDR_T_64BIT if (!(params->flags & GENET_HAS_40BITS)) pr_warn("GENET does not support 40-bits PA\n"); @@ -2676,9 +2680,13 @@ static int bcmgenet_resume(struct device *d) if (ret) goto out_clk_disable; - ret = bcmgenet_wol_resume(priv); - if (ret) - goto out_clk_disable; + /* From WOL-enabled suspend, switch to regular clock */ + if (priv->wolopts) + clk_disable_unprepare(priv->clk_wol); + + phy_init_hw(priv->phydev); + /* Speed settings must be restored */ + bcmgenet_mii_config(priv->dev); /* disable ethernet MAC while updating its registers */ umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index c862d06667719d1037eaa86bdf6924b46f6b06e6..dbf524ea3b19561bc1098b3cd58620cf9d66ac43 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -401,6 +401,8 @@ struct bcmgenet_mib_counters { #define DMA_ARBITER_MODE_MASK 0x03 #define DMA_RING_BUF_PRIORITY_MASK 0x1F #define DMA_RING_BUF_PRIORITY_SHIFT 5 +#define DMA_PRIO_REG_INDEX(q) ((q) / 6) +#define DMA_PRIO_REG_SHIFT(q) (((q) % 6) * DMA_RING_BUF_PRIORITY_SHIFT) #define DMA_RATE_ADJ_MASK 0xFF /* Tx/Rx Dma Descriptor common bits*/ @@ -545,10 +547,12 @@ struct bcmgenet_priv { struct phy_device *phydev; struct device_node *phy_dn; struct mii_bus *mii_bus; + u16 gphy_rev; /* PHY device variables */ - int old_duplex; int old_link; + int old_speed; + int old_duplex; int old_pause; phy_interface_t phy_interface; int phy_addr; diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index c88f7ae9963698b97837dc094f96d8b163c512c7..9ff799a9f8019dafe2d47be33aba738921e588a3 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -82,24 +82,33 @@ static void bcmgenet_mii_setup(struct net_device *dev) struct bcmgenet_priv *priv = netdev_priv(dev); struct phy_device *phydev = priv->phydev; u32 reg, cmd_bits = 0; - unsigned int status_changed = 0; + bool status_changed = false; if (priv->old_link != phydev->link) { - status_changed = 1; + status_changed = true; priv->old_link = phydev->link; } if (phydev->link) { - /* program UMAC and RGMII block based on established link - * speed, pause, and duplex. - * the speed set in umac->cmd tell RGMII block which clock - * 25MHz(100Mbps)/125MHz(1Gbps) to use for transmit. - * receive clock is provided by PHY. - */ - reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); - reg &= ~OOB_DISABLE; - reg |= RGMII_LINK; - bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); + /* check speed/duplex/pause changes */ + if (priv->old_speed != phydev->speed) { + status_changed = true; + priv->old_speed = phydev->speed; + } + + if (priv->old_duplex != phydev->duplex) { + status_changed = true; + priv->old_duplex = phydev->duplex; + } + + if (priv->old_pause != phydev->pause) { + status_changed = true; + priv->old_pause = phydev->pause; + } + + /* done if nothing has changed */ + if (!status_changed) + return; /* speed */ if (phydev->speed == SPEED_1000) @@ -110,36 +119,39 @@ static void bcmgenet_mii_setup(struct net_device *dev) cmd_bits = UMAC_SPEED_10; cmd_bits <<= CMD_SPEED_SHIFT; - if (priv->old_duplex != phydev->duplex) { - status_changed = 1; - priv->old_duplex = phydev->duplex; - } - /* duplex */ if (phydev->duplex != DUPLEX_FULL) cmd_bits |= CMD_HD_EN; - if (priv->old_pause != phydev->pause) { - status_changed = 1; - priv->old_pause = phydev->pause; - } - /* pause capability */ if (!phydev->pause) cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE; - } - if (!status_changed) - return; + /* + * Program UMAC and RGMII block based on established + * link speed, duplex, and pause. The speed set in + * umac->cmd tell RGMII block which clock to use for + * transmit -- 25MHz(100Mbps) or 125MHz(1Gbps). + * Receive clock is provided by the PHY. + */ + reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); + reg &= ~OOB_DISABLE; + reg |= RGMII_LINK; + bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); - if (phydev->link) { reg = bcmgenet_umac_readl(priv, UMAC_CMD); reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) | CMD_HD_EN | CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE); reg |= cmd_bits; bcmgenet_umac_writel(priv, reg, UMAC_CMD); + } else { + /* done if nothing has changed */ + if (!status_changed) + return; + /* needed for MoCA fixed PHY to reflect correct link status */ + netif_carrier_off(dev); } phy_print_status(phydev); @@ -296,7 +308,7 @@ static int bcmgenet_mii_probe(struct net_device *dev) struct bcmgenet_priv *priv = netdev_priv(dev); struct device_node *dn = priv->pdev->dev.of_node; struct phy_device *phydev; - unsigned int phy_flags; + u32 phy_flags; int ret; if (priv->phydev) { @@ -315,16 +327,22 @@ static int bcmgenet_mii_probe(struct net_device *dev) priv->phy_dn = of_node_get(dn); } - phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup, 0, - priv->phy_interface); + /* Communicate the integrated PHY revision */ + phy_flags = priv->gphy_rev; + + /* Initialize link state variables that bcmgenet_mii_setup() uses */ + priv->old_link = -1; + priv->old_speed = -1; + priv->old_duplex = -1; + priv->old_pause = -1; + + phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup, + phy_flags, priv->phy_interface); if (!phydev) { pr_err("could not attach to PHY\n"); return -ENODEV; } - priv->old_link = -1; - priv->old_duplex = -1; - priv->old_pause = -1; priv->phydev = phydev; /* Configure port multiplexer based on what the probed PHY device since @@ -338,15 +356,6 @@ static int bcmgenet_mii_probe(struct net_device *dev) return ret; } - phy_flags = PHY_BRCM_100MBPS_WAR; - - /* workarounds are only needed for 100Mpbs PHYs, and - * never on GENET V1 hardware - */ - if ((phydev->supported & PHY_GBIT_FEATURES) || GENET_IS_V1(priv)) - phy_flags = 0; - - phydev->dev_flags |= phy_flags; phydev->advertising = phydev->supported; /* The internal PHY has its link interrupts routed to the diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c index 13f9636cdba73fdd7bc7f4d148f35a9948f4e04b..903466ef41c06ed4f9812b754f245f1c32644b2f 100644 --- a/drivers/net/ethernet/brocade/bna/bna_enet.c +++ b/drivers/net/ethernet/brocade/bna/bna_enet.c @@ -107,7 +107,8 @@ bna_bfi_ethport_admin_rsp(struct bna_ethport *ethport, { struct bfi_enet_enable_req *admin_req = ðport->bfi_enet_cmd.admin_req; - struct bfi_enet_rsp *rsp = (struct bfi_enet_rsp *)msghdr; + struct bfi_enet_rsp *rsp = + container_of(msghdr, struct bfi_enet_rsp, mh); switch (admin_req->enable) { case BNA_STATUS_T_ENABLED: @@ -133,7 +134,8 @@ bna_bfi_ethport_lpbk_rsp(struct bna_ethport *ethport, { struct bfi_enet_diag_lb_req *diag_lb_req = ðport->bfi_enet_cmd.lpbk_req; - struct bfi_enet_rsp *rsp = (struct bfi_enet_rsp *)msghdr; + struct bfi_enet_rsp *rsp = + container_of(msghdr, struct bfi_enet_rsp, mh); switch (diag_lb_req->enable) { case BNA_STATUS_T_ENABLED: @@ -161,7 +163,8 @@ static void bna_bfi_attr_get_rsp(struct bna_ioceth *ioceth, struct bfi_msgq_mhdr *msghdr) { - struct bfi_enet_attr_rsp *rsp = (struct bfi_enet_attr_rsp *)msghdr; + struct bfi_enet_attr_rsp *rsp = + container_of(msghdr, struct bfi_enet_attr_rsp, mh); /** * Store only if not set earlier, since BNAD can override the HW diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c index 85e63546abe3311c2902a5a93cd422942b714b24..5fac411c52f43ef4c8365a306b216e002c2aee0d 100644 --- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c +++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c @@ -715,7 +715,7 @@ bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr) { struct bfi_enet_rsp *rsp = - (struct bfi_enet_rsp *)msghdr; + container_of(msghdr, struct bfi_enet_rsp, mh); if (rsp->error) { /* Clear ucast from cache */ @@ -732,7 +732,7 @@ bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf, struct bfi_enet_mcast_add_req *req = &rxf->bfi_enet_cmd.mcast_add_req; struct bfi_enet_mcast_add_rsp *rsp = - (struct bfi_enet_mcast_add_rsp *)msghdr; + container_of(msghdr, struct bfi_enet_mcast_add_rsp, mh); bna_rxf_mchandle_attach(rxf, (u8 *)&req->mac_addr, ntohs(rsp->handle)); @@ -3410,7 +3410,7 @@ bna_bfi_tx_enet_start(struct bna_tx *tx) cfg_req->tx_cfg.vlan_mode = BFI_ENET_TX_VLAN_WI; cfg_req->tx_cfg.vlan_id = htons((u16)tx->txf_vlan_id); - cfg_req->tx_cfg.admit_tagged_frame = BNA_STATUS_T_DISABLED; + cfg_req->tx_cfg.admit_tagged_frame = BNA_STATUS_T_ENABLED; cfg_req->tx_cfg.apply_vlan_filter = BNA_STATUS_T_DISABLED; bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index ffc92a41d75be550d27698af6ca3e600d9a146fe..153cafac323c41e9a040036dfdfac93609539739 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -2864,7 +2864,7 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb, txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND); txqent->hdr.wi.lso_mss = 0; - if (unlikely(skb->len > (bnad->netdev->mtu + ETH_HLEN))) { + if (unlikely(skb->len > (bnad->netdev->mtu + VLAN_ETH_HLEN))) { BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long); return -EINVAL; } diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c index 4a79edaf388547d0cb5f7ca594d8ab20841efefc..4a24b9a6ad759255aeeaa3b4b18d56c475cd568a 100644 --- a/drivers/net/ethernet/cadence/at91_ether.c +++ b/drivers/net/ethernet/cadence/at91_ether.c @@ -351,7 +351,6 @@ static int __init at91ether_probe(struct platform_device *pdev) if (res) goto err_disable_clock; - ether_setup(dev); dev->netdev_ops = &at91ether_netdev_ops; dev->ethtool_ops = &macb_ethtool_ops; platform_set_drvdata(pdev, dev); diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index e1e02fba4fcc55e663dfc31777f281f26463896c..4d9fc0509af68c91cfd7c7e45f5aa3ec96b7bf88 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -2230,9 +2230,9 @@ static int __init macb_probe(struct platform_device *pdev) netif_carrier_off(dev); - netdev_info(dev, "Cadence %s at 0x%08lx irq %d (%pM)\n", - macb_is_gem(bp) ? "GEM" : "MACB", dev->base_addr, - dev->irq, dev->dev_addr); + netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n", + macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID), + dev->base_addr, dev->irq, dev->dev_addr); phydev = bp->phy_dev; netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index 25d6b2a10e4e6f7fb6b09d25706e11e2783bbea4..47bfea24b9e1b1bce64f07d5eb0424fc2a9b7e0f 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -1735,7 +1735,6 @@ static int xgmac_probe(struct platform_device *pdev) SET_NETDEV_DEV(ndev, &pdev->dev); priv = netdev_priv(ndev); platform_set_drvdata(pdev, ndev); - ether_setup(ndev); ndev->netdev_ops = &xgmac_netdev_ops; ndev->ethtool_ops = &xgmac_ethtool_ops; spin_lock_init(&priv->stats_lock); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index c067b7888ac4fac6c174d3063406d087dcd530bb..9b2c669b6522234abedd23db142ad5493a008ade 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -431,6 +431,7 @@ struct sge_fl { /* SGE free-buffer queue state */ struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ __be64 *desc; /* address of HW Rx descriptor ring */ dma_addr_t addr; /* bus address of HW ring start */ + u64 udb; /* BAR2 offset of User Doorbell area */ }; /* A packet gather list */ @@ -451,6 +452,7 @@ struct sge_rspq { /* state for an SGE response queue */ u8 gen; /* current generation bit */ u8 intr_params; /* interrupt holdoff parameters */ u8 next_intr_params; /* holdoff params for next interrupt */ + u8 adaptive_rx; u8 pktcnt_idx; /* interrupt packet threshold */ u8 uld; /* ULD handling this queue */ u8 idx; /* queue index within its group */ @@ -459,6 +461,7 @@ struct sge_rspq { /* state for an SGE response queue */ u16 abs_id; /* absolute SGE id for the response q */ __be64 *desc; /* address of HW response ring */ dma_addr_t phys_addr; /* physical address of the ring */ + u64 udb; /* BAR2 offset of User Doorbell area */ unsigned int iqe_len; /* entry size */ unsigned int size; /* capacity of response queue */ struct adapter *adap; @@ -516,7 +519,7 @@ struct sge_txq { int db_disabled; unsigned short db_pidx; unsigned short db_pidx_inc; - u64 udb; + u64 udb; /* BAR2 offset of User Doorbell area */ }; struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index e5be511a3c38b13d97af70947341d031ce62acce..321f3d9385c9b0f6ec90c04c3fe91b63dc131c32 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -283,6 +283,9 @@ static const struct pci_device_id cxgb4_pci_tbl[] = { CH_DEVICE(0x5083, 4), CH_DEVICE(0x5084, 4), CH_DEVICE(0x5085, 4), + CH_DEVICE(0x5086, 4), + CH_DEVICE(0x5087, 4), + CH_DEVICE(0x5088, 4), CH_DEVICE(0x5401, 4), CH_DEVICE(0x5402, 4), CH_DEVICE(0x5403, 4), @@ -310,6 +313,9 @@ static const struct pci_device_id cxgb4_pci_tbl[] = { CH_DEVICE(0x5483, 4), CH_DEVICE(0x5484, 4), CH_DEVICE(0x5485, 4), + CH_DEVICE(0x5486, 4), + CH_DEVICE(0x5487, 4), + CH_DEVICE(0x5488, 4), { 0, } }; @@ -2747,8 +2753,31 @@ static int set_rx_intr_params(struct net_device *dev, return 0; } +static int set_adaptive_rx_setting(struct net_device *dev, int adaptive_rx) +{ + int i; + struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset]; + + for (i = 0; i < pi->nqsets; i++, q++) + q->rspq.adaptive_rx = adaptive_rx; + + return 0; +} + +static int get_adaptive_rx_setting(struct net_device *dev) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset]; + + return q->rspq.adaptive_rx; +} + static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) { + set_adaptive_rx_setting(dev, c->use_adaptive_rx_coalesce); return set_rx_intr_params(dev, c->rx_coalesce_usecs, c->rx_max_coalesced_frames); } @@ -2762,6 +2791,7 @@ static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) c->rx_coalesce_usecs = qtimer_val(adap, rq); c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ? adap->sge.counter_val[rq->pktcnt_idx] : 0; + c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev); return 0; } @@ -4390,7 +4420,6 @@ static int cxgb4_inet6addr_handler(struct notifier_block *this, * bond. We need to find such different adapters and add clip * in all of them only once. */ - read_lock(&bond->lock); bond_for_each_slave(bond, slave, iter) { if (!first_pdev) { ret = clip_add(slave->dev, ifa, event); @@ -4404,7 +4433,6 @@ static int cxgb4_inet6addr_handler(struct notifier_block *this, to_pci_dev(slave->dev->dev.parent)) ret = clip_add(slave->dev, ifa, event); } - read_unlock(&bond->lock); } else ret = clip_add(ifa->idev->dev, ifa, event); diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index d22d728d4e5cb748321d9c86c4ad473a814d0831..fab4c84a1da4c5fa8313f6c133613c245b7846be 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -203,6 +203,9 @@ enum { RX_LARGE_MTU_BUF = 0x3, /* large MTU buffer */ }; +static int timer_pkt_quota[] = {1, 1, 2, 3, 4, 5}; +#define MIN_NAPI_WORK 1 + static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d) { return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS; @@ -521,9 +524,23 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) val = PIDX(q->pend_cred / 8); if (!is_t4(adap->params.chip)) val |= DBTYPE(1); + val |= DBPRIO(1); wmb(); - t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO(1) | - QID(q->cntxt_id) | val); + + /* If we're on T4, use the old doorbell mechanism; otherwise + * use the new BAR2 mechanism. + */ + if (is_t4(adap->params.chip)) { + t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), + val | QID(q->cntxt_id)); + } else { + writel(val, adap->bar2 + q->udb + SGE_UDB_KDOORBELL); + + /* This Write memory Barrier will force the write to + * the User Doorbell area to be flushed. + */ + wmb(); + } q->pend_cred &= 7; } } @@ -833,13 +850,14 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q, *end = 0; } -/* This function copies 64 byte coalesced work request to - * memory mapped BAR2 space(user space writes). - * For coalesced WR SGE, fetches data from the FIFO instead of from Host. +/* This function copies a tx_desc struct to memory mapped BAR2 space(user space + * writes). For coalesced WR SGE, fetches data from the FIFO instead of from + * Host. */ -static void cxgb_pio_copy(u64 __iomem *dst, u64 *src) +static void cxgb_pio_copy(u64 __iomem *dst, struct tx_desc *desc) { - int count = 8; + int count = sizeof(*desc) / sizeof(u64); + u64 *src = (u64 *)desc; while (count) { writeq(*src, dst); @@ -859,30 +877,63 @@ static void cxgb_pio_copy(u64 __iomem *dst, u64 *src) */ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) { - unsigned int *wr, index; - unsigned long flags; - wmb(); /* write descriptors before telling HW */ - spin_lock_irqsave(&q->db_lock, flags); - if (!q->db_disabled) { - if (is_t4(adap->params.chip)) { + + if (is_t4(adap->params.chip)) { + u32 val = PIDX(n); + unsigned long flags; + + /* For T4 we need to participate in the Doorbell Recovery + * mechanism. + */ + spin_lock_irqsave(&q->db_lock, flags); + if (!q->db_disabled) t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), - QID(q->cntxt_id) | PIDX(n)); + QID(q->cntxt_id) | val); + else + q->db_pidx_inc += n; + q->db_pidx = q->pidx; + spin_unlock_irqrestore(&q->db_lock, flags); + } else { + u32 val = PIDX_T5(n); + + /* T4 and later chips share the same PIDX field offset within + * the doorbell, but T5 and later shrank the field in order to + * gain a bit for Doorbell Priority. The field was absurdly + * large in the first place (14 bits) so we just use the T5 + * and later limits and warn if a Queue ID is too large. + */ + WARN_ON(val & DBPRIO(1)); + + /* For T5 and later we use the Write-Combine mapped BAR2 User + * Doorbell mechanism. If we're only writing a single TX + * Descriptor and TX Write Combining hasn't been disabled, we + * can use the Write Combining Gather Buffer; otherwise we use + * the simple doorbell. + */ + if (n == 1) { + int index = (q->pidx + ? (q->pidx - 1) + : (q->size - 1)); + + cxgb_pio_copy(adap->bar2 + q->udb + SGE_UDB_WCDOORBELL, + q->desc + index); } else { - if (n == 1) { - index = q->pidx ? (q->pidx - 1) : (q->size - 1); - wr = (unsigned int *)&q->desc[index]; - cxgb_pio_copy((u64 __iomem *) - (adap->bar2 + q->udb + 64), - (u64 *)wr); - } else - writel(n, adap->bar2 + q->udb + 8); - wmb(); + writel(val, adap->bar2 + q->udb + SGE_UDB_KDOORBELL); } - } else - q->db_pidx_inc += n; - q->db_pidx = q->pidx; - spin_unlock_irqrestore(&q->db_lock, flags); + + /* This Write Memory Barrier will force the write to the User + * Doorbell area to be flushed. This is needed to prevent + * writes on different CPUs for the same queue from hitting + * the adapter out of order. This is required when some Work + * Requests take the Write Combine Gather Buffer path (user + * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some + * take the traditional path where we simply increment the + * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the + * hardware DMA read the actual Work Request. + */ + wmb(); + } } /** @@ -1916,16 +1967,40 @@ static int napi_rx_handler(struct napi_struct *napi, int budget) unsigned int params; struct sge_rspq *q = container_of(napi, struct sge_rspq, napi); int work_done = process_responses(q, budget); + u32 val; if (likely(work_done < budget)) { + int timer_index; + napi_complete(napi); - params = q->next_intr_params; - q->next_intr_params = q->intr_params; + timer_index = QINTR_TIMER_IDX_GET(q->next_intr_params); + + if (q->adaptive_rx) { + if (work_done > max(timer_pkt_quota[timer_index], + MIN_NAPI_WORK)) + timer_index = (timer_index + 1); + else + timer_index = timer_index - 1; + + timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1); + q->next_intr_params = QINTR_TIMER_IDX(timer_index) | + V_QINTR_CNT_EN; + params = q->next_intr_params; + } else { + params = q->next_intr_params; + q->next_intr_params = q->intr_params; + } } else params = QINTR_TIMER_IDX(7); - t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS), CIDXINC(work_done) | - INGRESSQID((u32)q->cntxt_id) | SEINTARM(params)); + val = CIDXINC(work_done) | SEINTARM(params); + if (is_t4(q->adap->params.chip)) { + t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS), + val | INGRESSQID((u32)q->cntxt_id)); + } else { + writel(val, q->adap->bar2 + q->udb + SGE_UDB_GTS); + wmb(); + } return work_done; } @@ -1949,6 +2024,7 @@ static unsigned int process_intrq(struct adapter *adap) unsigned int credits; const struct rsp_ctrl *rc; struct sge_rspq *q = &adap->sge.intrq; + u32 val; spin_lock(&adap->sge.intrq_lock); for (credits = 0; ; credits++) { @@ -1967,8 +2043,14 @@ static unsigned int process_intrq(struct adapter *adap) rspq_next(q); } - t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), CIDXINC(credits) | - INGRESSQID(q->cntxt_id) | SEINTARM(q->intr_params)); + val = CIDXINC(credits) | SEINTARM(q->intr_params); + if (is_t4(adap->params.chip)) { + t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), + val | INGRESSQID(q->cntxt_id)); + } else { + writel(val, adap->bar2 + q->udb + SGE_UDB_GTS); + wmb(); + } spin_unlock(&adap->sge.intrq_lock); return credits; } @@ -2149,6 +2231,51 @@ static void sge_tx_timer_cb(unsigned long data) mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2)); } +/** + * udb_address - return the BAR2 User Doorbell address for a Queue + * @adap: the adapter + * @cntxt_id: the Queue Context ID + * @qpp: Queues Per Page (for all PFs) + * + * Returns the BAR2 address of the user Doorbell associated with the + * indicated Queue Context ID. Note that this is only applicable + * for T5 and later. + */ +static u64 udb_address(struct adapter *adap, unsigned int cntxt_id, + unsigned int qpp) +{ + u64 udb; + unsigned int s_qpp; + unsigned short udb_density; + unsigned long qpshift; + int page; + + BUG_ON(is_t4(adap->params.chip)); + + s_qpp = (QUEUESPERPAGEPF0 + + (QUEUESPERPAGEPF1 - QUEUESPERPAGEPF0) * adap->fn); + udb_density = 1 << ((qpp >> s_qpp) & QUEUESPERPAGEPF0_MASK); + qpshift = PAGE_SHIFT - ilog2(udb_density); + udb = (u64)cntxt_id << qpshift; + udb &= PAGE_MASK; + page = udb / PAGE_SIZE; + udb += (cntxt_id - (page * udb_density)) * SGE_UDB_SIZE; + + return udb; +} + +static u64 udb_address_eq(struct adapter *adap, unsigned int cntxt_id) +{ + return udb_address(adap, cntxt_id, + t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF)); +} + +static u64 udb_address_iq(struct adapter *adap, unsigned int cntxt_id) +{ + return udb_address(adap, cntxt_id, + t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF)); +} + int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, struct net_device *dev, int intr_idx, struct sge_fl *fl, rspq_handler_t hnd) @@ -2214,6 +2341,8 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, iq->next_intr_params = iq->intr_params; iq->cntxt_id = ntohs(c.iqid); iq->abs_id = ntohs(c.physiqid); + if (!is_t4(adap->params.chip)) + iq->udb = udb_address_iq(adap, iq->cntxt_id); iq->size--; /* subtract status entry */ iq->netdev = dev; iq->handler = hnd; @@ -2229,6 +2358,12 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, fl->pidx = fl->cidx = 0; fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0; adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl; + + /* Note, we must initialize the Free List User Doorbell + * address before refilling the Free List! + */ + if (!is_t4(adap->params.chip)) + fl->udb = udb_address_eq(adap, fl->cntxt_id); refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL); } return 0; @@ -2254,21 +2389,8 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) { q->cntxt_id = id; - if (!is_t4(adap->params.chip)) { - unsigned int s_qpp; - unsigned short udb_density; - unsigned long qpshift; - int page; - - s_qpp = QUEUESPERPAGEPF1 * adap->fn; - udb_density = 1 << QUEUESPERPAGEPF0_GET((t4_read_reg(adap, - SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp)); - qpshift = PAGE_SHIFT - ilog2(udb_density); - q->udb = q->cntxt_id << qpshift; - q->udb &= PAGE_MASK; - page = q->udb / PAGE_SIZE; - q->udb += (q->cntxt_id - (page * udb_density)) * 128; - } + if (!is_t4(adap->params.chip)) + q->udb = udb_address_eq(adap, q->cntxt_id); q->in_use = 0; q->cidx = q->pidx = 0; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 41d04462b72eb158f7699c38000e6331291d2447..22d7581341a912a3fb1329fdf7445677aa27f2cd 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -1099,6 +1099,9 @@ static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end) { int ret = 0; + if (end >= adapter->params.sf_nsec) + return -EINVAL; + while (start <= end) { if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || (ret = sf1_write(adapter, 4, 0, 1, @@ -3850,8 +3853,20 @@ int t4_wait_dev_ready(struct adapter *adap) return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO; } +struct flash_desc { + u32 vendor_and_model_id; + u32 size_mb; +}; + static int get_flash_params(struct adapter *adap) { + /* Table for non-Numonix supported flash parts. Numonix parts are left + * to the preexisting code. All flash parts have 64KB sectors. + */ + static struct flash_desc supported_flash[] = { + { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */ + }; + int ret; u32 info; @@ -3862,6 +3877,14 @@ static int get_flash_params(struct adapter *adap) if (ret) return ret; + for (ret = 0; ret < ARRAY_SIZE(supported_flash); ++ret) + if (supported_flash[ret].vendor_and_model_id == info) { + adap->params.sf_size = supported_flash[ret].size_mb; + adap->params.sf_nsec = + adap->params.sf_size / SF_SEC_SIZE; + return 0; + } + if ((info & 0xff) != 0x20) /* not a Numonix flash */ return -EINVAL; info >>= 16; /* log2 of size */ @@ -3874,6 +3897,10 @@ static int get_flash_params(struct adapter *adap) adap->params.sf_size = 1 << info; adap->params.sf_fw_start = t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK; + + if (adap->params.sf_size < FLASH_MIN_SIZE) + dev_warn(adap->pdev_dev, "WARNING!!! FLASH size %#x < %#x!!!\n", + adap->params.sf_size, FLASH_MIN_SIZE); return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h index 35e3d8e3288162485d442a16e0f4435c5109d998..c19a90e7f7d1934ee6011d5601bfb0d46982710e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h @@ -135,6 +135,7 @@ struct rsp_ctrl { #define RSPD_GEN(x) ((x) >> 7) #define RSPD_TYPE(x) (((x) >> 4) & 3) +#define V_QINTR_CNT_EN 0x0 #define QINTR_CNT_EN 0x1 #define QINTR_TIMER_IDX(x) ((x) << 1) #define QINTR_TIMER_IDX_GET(x) (((x) >> 1) & 0x7) @@ -175,7 +176,7 @@ enum { * Location of firmware image in FLASH. */ FLASH_FW_START_SEC = 8, - FLASH_FW_NSECS = 8, + FLASH_FW_NSECS = 16, FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC), FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS), @@ -206,6 +207,12 @@ enum { FLASH_CFG_START = FLASH_START(FLASH_CFG_START_SEC), FLASH_CFG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_CFG_NSECS), + /* We don't support FLASH devices which can't support the full + * standard set of sections which we need for normal + * operations. + */ + FLASH_MIN_SIZE = FLASH_CFG_START + FLASH_CFG_MAX_SIZE, + FLASH_FPGA_CFG_START_SEC = 15, FLASH_FPGA_CFG_START = FLASH_START(FLASH_FPGA_CFG_START_SEC), diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 39fb325474f7e7499534d142d47cdcd0b7fbc1f4..eee2728830271e7cbf98252f3713fe5eec28f7f3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -77,6 +77,7 @@ #define PIDX_T5(x) (((x) >> S_PIDX_T5) & M_PIDX_T5) +#define SGE_TIMERREGS 6 #define SGE_PF_GTS 0x4 #define INGRESSQID_MASK 0xffff0000U #define INGRESSQID_SHIFT 16 @@ -157,8 +158,27 @@ #define QUEUESPERPAGEPF0_MASK 0x0000000fU #define QUEUESPERPAGEPF0_GET(x) ((x) & QUEUESPERPAGEPF0_MASK) +#define QUEUESPERPAGEPF0 0 #define QUEUESPERPAGEPF1 4 +/* T5 and later support a new BAR2-based doorbell mechanism for Egress Queues. + * The User Doorbells are each 128 bytes in length with a Simple Doorbell at + * offsets 8x and a Write Combining single 64-byte Egress Queue Unit + * (X_IDXSIZE_UNIT) Gather Buffer interface at offset 64. For Ingress Queues, + * we have a Going To Sleep register at offsets 8x+4. + * + * As noted above, we have many instances of the Simple Doorbell and Going To + * Sleep registers at offsets 8x and 8x+4, respectively. We want to use a + * non-64-byte aligned offset for the Simple Doorbell in order to attempt to + * avoid buffering of the writes to the Simple Doorbell and we want to use a + * non-contiguous offset for the Going To Sleep writes in order to avoid + * possible combining between them. + */ +#define SGE_UDB_SIZE 128 +#define SGE_UDB_KDOORBELL 8 +#define SGE_UDB_GTS 20 +#define SGE_UDB_WCDOORBELL 64 + #define SGE_INT_CAUSE1 0x1024 #define SGE_INT_CAUSE2 0x1030 #define SGE_INT_CAUSE3 0x103c diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 2102a4c9173757eb2b1c809fd1dbe8c5332b742d..8498a641b2e32c9c17169b2dfe9c4122a584a3d9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -2907,61 +2907,62 @@ static void cxgb4vf_pci_shutdown(struct pci_dev *pdev) /* * PCI Device registration data structures. */ -#define CH_DEVICE(devid, idx) \ - { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx } +#define CH_DEVICE(devid) \ + { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 } static const struct pci_device_id cxgb4vf_pci_tbl[] = { - CH_DEVICE(0xb000, 0), /* PE10K FPGA */ - CH_DEVICE(0x4800, 0), /* T440-dbg */ - CH_DEVICE(0x4801, 0), /* T420-cr */ - CH_DEVICE(0x4802, 0), /* T422-cr */ - CH_DEVICE(0x4803, 0), /* T440-cr */ - CH_DEVICE(0x4804, 0), /* T420-bch */ - CH_DEVICE(0x4805, 0), /* T440-bch */ - CH_DEVICE(0x4806, 0), /* T460-ch */ - CH_DEVICE(0x4807, 0), /* T420-so */ - CH_DEVICE(0x4808, 0), /* T420-cx */ - CH_DEVICE(0x4809, 0), /* T420-bt */ - CH_DEVICE(0x480a, 0), /* T404-bt */ - CH_DEVICE(0x480d, 0), /* T480-cr */ - CH_DEVICE(0x480e, 0), /* T440-lp-cr */ - CH_DEVICE(0x4880, 0), - CH_DEVICE(0x4880, 1), - CH_DEVICE(0x4880, 2), - CH_DEVICE(0x4880, 3), - CH_DEVICE(0x4880, 4), - CH_DEVICE(0x4880, 5), - CH_DEVICE(0x4880, 6), - CH_DEVICE(0x4880, 7), - CH_DEVICE(0x4880, 8), - CH_DEVICE(0x5800, 0), /* T580-dbg */ - CH_DEVICE(0x5801, 0), /* T520-cr */ - CH_DEVICE(0x5802, 0), /* T522-cr */ - CH_DEVICE(0x5803, 0), /* T540-cr */ - CH_DEVICE(0x5804, 0), /* T520-bch */ - CH_DEVICE(0x5805, 0), /* T540-bch */ - CH_DEVICE(0x5806, 0), /* T540-ch */ - CH_DEVICE(0x5807, 0), /* T520-so */ - CH_DEVICE(0x5808, 0), /* T520-cx */ - CH_DEVICE(0x5809, 0), /* T520-bt */ - CH_DEVICE(0x580a, 0), /* T504-bt */ - CH_DEVICE(0x580b, 0), /* T520-sr */ - CH_DEVICE(0x580c, 0), /* T504-bt */ - CH_DEVICE(0x580d, 0), /* T580-cr */ - CH_DEVICE(0x580e, 0), /* T540-lp-cr */ - CH_DEVICE(0x580f, 0), /* Amsterdam */ - CH_DEVICE(0x5810, 0), /* T580-lp-cr */ - CH_DEVICE(0x5811, 0), /* T520-lp-cr */ - CH_DEVICE(0x5812, 0), /* T560-cr */ - CH_DEVICE(0x5813, 0), /* T580-cr */ - CH_DEVICE(0x5814, 0), /* T580-so-cr */ - CH_DEVICE(0x5815, 0), /* T502-bt */ - CH_DEVICE(0x5880, 0), - CH_DEVICE(0x5881, 0), - CH_DEVICE(0x5882, 0), - CH_DEVICE(0x5883, 0), - CH_DEVICE(0x5884, 0), - CH_DEVICE(0x5885, 0), + CH_DEVICE(0xb000), /* PE10K FPGA */ + CH_DEVICE(0x4801), /* T420-cr */ + CH_DEVICE(0x4802), /* T422-cr */ + CH_DEVICE(0x4803), /* T440-cr */ + CH_DEVICE(0x4804), /* T420-bch */ + CH_DEVICE(0x4805), /* T440-bch */ + CH_DEVICE(0x4806), /* T460-ch */ + CH_DEVICE(0x4807), /* T420-so */ + CH_DEVICE(0x4808), /* T420-cx */ + CH_DEVICE(0x4809), /* T420-bt */ + CH_DEVICE(0x480a), /* T404-bt */ + CH_DEVICE(0x480d), /* T480-cr */ + CH_DEVICE(0x480e), /* T440-lp-cr */ + CH_DEVICE(0x4880), + CH_DEVICE(0x4880), + CH_DEVICE(0x4880), + CH_DEVICE(0x4880), + CH_DEVICE(0x4880), + CH_DEVICE(0x4880), + CH_DEVICE(0x4880), + CH_DEVICE(0x4880), + CH_DEVICE(0x4880), + CH_DEVICE(0x5801), /* T520-cr */ + CH_DEVICE(0x5802), /* T522-cr */ + CH_DEVICE(0x5803), /* T540-cr */ + CH_DEVICE(0x5804), /* T520-bch */ + CH_DEVICE(0x5805), /* T540-bch */ + CH_DEVICE(0x5806), /* T540-ch */ + CH_DEVICE(0x5807), /* T520-so */ + CH_DEVICE(0x5808), /* T520-cx */ + CH_DEVICE(0x5809), /* T520-bt */ + CH_DEVICE(0x580a), /* T504-bt */ + CH_DEVICE(0x580b), /* T520-sr */ + CH_DEVICE(0x580c), /* T504-bt */ + CH_DEVICE(0x580d), /* T580-cr */ + CH_DEVICE(0x580e), /* T540-lp-cr */ + CH_DEVICE(0x580f), /* Amsterdam */ + CH_DEVICE(0x5810), /* T580-lp-cr */ + CH_DEVICE(0x5811), /* T520-lp-cr */ + CH_DEVICE(0x5812), /* T560-cr */ + CH_DEVICE(0x5813), /* T580-cr */ + CH_DEVICE(0x5814), /* T580-so-cr */ + CH_DEVICE(0x5815), /* T502-bt */ + CH_DEVICE(0x5880), + CH_DEVICE(0x5881), + CH_DEVICE(0x5882), + CH_DEVICE(0x5883), + CH_DEVICE(0x5884), + CH_DEVICE(0x5885), + CH_DEVICE(0x5886), + CH_DEVICE(0x5887), + CH_DEVICE(0x5888), { 0, } }; diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h index 962510f391dfcb3b8e041dd5ad89391b093c7bb4..5ba5ad071bb619dee3c5842fc23b7ff3c3e19216 100644 --- a/drivers/net/ethernet/cisco/enic/enic.h +++ b/drivers/net/ethernet/cisco/enic/enic.h @@ -186,6 +186,7 @@ struct enic { ____cacheline_aligned struct vnic_cq cq[ENIC_CQ_MAX]; unsigned int cq_count; struct enic_rfs_flw_tbl rfs_h; + u32 rx_copybreak; }; static inline struct device *enic_get_dev(struct enic *enic) diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c index 523c9ceb04c020265b8ceabf3fbf209d9db34af6..85173d6207584aadc2546f880c22446fee0f09ff 100644 --- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c +++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c @@ -379,6 +379,43 @@ static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, return ret; } +static int enic_get_tunable(struct net_device *dev, + const struct ethtool_tunable *tuna, void *data) +{ + struct enic *enic = netdev_priv(dev); + int ret = 0; + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + *(u32 *)data = enic->rx_copybreak; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int enic_set_tunable(struct net_device *dev, + const struct ethtool_tunable *tuna, + const void *data) +{ + struct enic *enic = netdev_priv(dev); + int ret = 0; + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + enic->rx_copybreak = *(u32 *)data; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + static const struct ethtool_ops enic_ethtool_ops = { .get_settings = enic_get_settings, .get_drvinfo = enic_get_drvinfo, @@ -391,6 +428,8 @@ static const struct ethtool_ops enic_ethtool_ops = { .get_coalesce = enic_get_coalesce, .set_coalesce = enic_set_coalesce, .get_rxnfc = enic_get_rxnfc, + .get_tunable = enic_get_tunable, + .set_tunable = enic_set_tunable, }; void enic_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index c8832bc1c5f776a9199acbca27b693941c83d1fe..929bfe70080ac040bd879ede472d72744aaa300c 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -66,6 +66,8 @@ #define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */ #define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */ +#define RX_COPYBREAK_DEFAULT 256 + /* Supported devices */ static const struct pci_device_id enic_id_table[] = { { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) }, @@ -924,6 +926,7 @@ static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) pci_unmap_single(enic->pdev, buf->dma_addr, buf->len, PCI_DMA_FROMDEVICE); dev_kfree_skb_any(buf->os_buf); + buf->os_buf = NULL; } static int enic_rq_alloc_buf(struct vnic_rq *rq) @@ -934,7 +937,24 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq) unsigned int len = netdev->mtu + VLAN_ETH_HLEN; unsigned int os_buf_index = 0; dma_addr_t dma_addr; + struct vnic_rq_buf *buf = rq->to_use; + + if (buf->os_buf) { + buf = buf->next; + rq->to_use = buf; + rq->ring.desc_avail--; + if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) { + /* Adding write memory barrier prevents compiler and/or + * CPU reordering, thus avoiding descriptor posting + * before descriptor is initialized. Otherwise, hardware + * can read stale descriptor fields. + */ + wmb(); + iowrite32(buf->index, &rq->ctrl->posted_index); + } + return 0; + } skb = netdev_alloc_skb_ip_align(netdev, len); if (!skb) return -ENOMEM; @@ -957,6 +977,25 @@ static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size, pkt_size->small_pkt_bytes_cnt += pkt_len; } +static bool enic_rxcopybreak(struct net_device *netdev, struct sk_buff **skb, + struct vnic_rq_buf *buf, u16 len) +{ + struct enic *enic = netdev_priv(netdev); + struct sk_buff *new_skb; + + if (len > enic->rx_copybreak) + return false; + new_skb = netdev_alloc_skb_ip_align(netdev, len); + if (!new_skb) + return false; + pci_dma_sync_single_for_cpu(enic->pdev, buf->dma_addr, len, + DMA_FROM_DEVICE); + memcpy(new_skb->data, (*skb)->data, len); + *skb = new_skb; + + return true; +} + static void enic_rq_indicate_buf(struct vnic_rq *rq, struct cq_desc *cq_desc, struct vnic_rq_buf *buf, int skipped, void *opaque) @@ -978,9 +1017,6 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, return; skb = buf->os_buf; - prefetch(skb->data - NET_IP_ALIGN); - pci_unmap_single(enic->pdev, buf->dma_addr, - buf->len, PCI_DMA_FROMDEVICE); cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, &type, &color, &q_number, &completed_index, @@ -1011,6 +1047,13 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, /* Good receive */ + if (!enic_rxcopybreak(netdev, &skb, buf, bytes_written)) { + buf->os_buf = NULL; + pci_unmap_single(enic->pdev, buf->dma_addr, buf->len, + PCI_DMA_FROMDEVICE); + } + prefetch(skb->data - NET_IP_ALIGN); + skb_put(skb, bytes_written); skb->protocol = eth_type_trans(skb, netdev); skb_record_rx_queue(skb, q_number); @@ -2531,6 +2574,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_err(dev, "Cannot register net device, aborting\n"); goto err_out_dev_deinit; } + enic->rx_copybreak = RX_COPYBREAK_DEFAULT; return 0; diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c index 37472ce4fac310a933a1f332a5df451a1cdbcc5d..62f7b7baf93cd79f77de9aceb8d977461c075cb7 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_dev.c +++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c @@ -847,8 +847,7 @@ int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev) */ if ((err == ERR_ECMDUNKNOWN) || (!err && !(vdev->args[0] && vdev->args[1] && vdev->args[2]))) { - pr_warning("Using default conversion factor for " - "interrupt coalesce timer\n"); + pr_warn("Using default conversion factor for interrupt coalesce timer\n"); vnic_dev_intr_coal_timer_info_default(vdev); return 0; } diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 70089c29d307a43643222959202228403c31e97f..f3ba840cbf7b065fa4b73828e75d60af20069dbb 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -1613,9 +1613,6 @@ dm9000_probe(struct platform_device *pdev) /* from this point we assume that we have found a DM9000 */ - /* driver system function */ - ether_setup(ndev); - ndev->netdev_ops = &dm9000_netdev_ops; ndev->watchdog_timeo = msecs_to_jiffies(watchdog); ndev->ethtool_ops = &dm9000_ethtool_ops; diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c index 322213d901d5ec663ef3831f81786b199cdd41bd..c8205606c7757ff3345acef81d57a5c0118497e3 100644 --- a/drivers/net/ethernet/dec/tulip/dmfe.c +++ b/drivers/net/ethernet/dec/tulip/dmfe.c @@ -328,10 +328,10 @@ static void allocate_rx_buffer(struct net_device *); static void update_cr6(u32, void __iomem *); static void send_filter_frame(struct DEVICE *); static void dm9132_id_table(struct DEVICE *); -static u16 phy_read(void __iomem *, u8, u8, u32); -static void phy_write(void __iomem *, u8, u8, u16, u32); -static void phy_write_1bit(void __iomem *, u32); -static u16 phy_read_1bit(void __iomem *); +static u16 dmfe_phy_read(void __iomem *, u8, u8, u32); +static void dmfe_phy_write(void __iomem *, u8, u8, u16, u32); +static void dmfe_phy_write_1bit(void __iomem *, u32); +static u16 dmfe_phy_read_1bit(void __iomem *); static u8 dmfe_sense_speed(struct dmfe_board_info *); static void dmfe_process_mode(struct dmfe_board_info *); static void dmfe_timer(unsigned long); @@ -770,7 +770,7 @@ static int dmfe_stop(struct DEVICE *dev) /* Reset & stop DM910X board */ dw32(DCR0, DM910X_RESET); udelay(100); - phy_write(ioaddr, db->phy_addr, 0, 0x8000, db->chip_id); + dmfe_phy_write(ioaddr, db->phy_addr, 0, 0x8000, db->chip_id); /* free interrupt */ free_irq(db->pdev->irq, dev); @@ -1154,7 +1154,7 @@ static void dmfe_timer(unsigned long data) if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) { db->cr6_data &= ~0x40000; update_cr6(db->cr6_data, ioaddr); - phy_write(ioaddr, db->phy_addr, 0, 0x1000, db->chip_id); + dmfe_phy_write(ioaddr, db->phy_addr, 0, 0x1000, db->chip_id); db->cr6_data |= 0x40000; update_cr6(db->cr6_data, ioaddr); db->timer.expires = DMFE_TIMER_WUT + HZ * 2; @@ -1230,9 +1230,9 @@ static void dmfe_timer(unsigned long data) */ /* need a dummy read because of PHY's register latch*/ - phy_read (db->ioaddr, db->phy_addr, 1, db->chip_id); - link_ok_phy = (phy_read (db->ioaddr, - db->phy_addr, 1, db->chip_id) & 0x4) ? 1 : 0; + dmfe_phy_read (db->ioaddr, db->phy_addr, 1, db->chip_id); + link_ok_phy = (dmfe_phy_read (db->ioaddr, + db->phy_addr, 1, db->chip_id) & 0x4) ? 1 : 0; if (link_ok_phy != link_ok) { DMFE_DBUG (0, "PHY and chip report different link status", 0); @@ -1247,8 +1247,8 @@ static void dmfe_timer(unsigned long data) /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */ /* AUTO or force 1M Homerun/Longrun don't need */ if ( !(db->media_mode & 0x38) ) - phy_write(db->ioaddr, db->phy_addr, - 0, 0x1000, db->chip_id); + dmfe_phy_write(db->ioaddr, db->phy_addr, + 0, 0x1000, db->chip_id); /* AUTO mode, if INT phyxcer link failed, select EXT device */ if (db->media_mode & DMFE_AUTO) { @@ -1649,16 +1649,16 @@ static u8 dmfe_sense_speed(struct dmfe_board_info *db) /* CR6 bit18=0, select 10/100M */ update_cr6(db->cr6_data & ~0x40000, ioaddr); - phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id); - phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id); + phy_mode = dmfe_phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id); + phy_mode = dmfe_phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id); if ( (phy_mode & 0x24) == 0x24 ) { if (db->chip_id == PCI_DM9132_ID) /* DM9132 */ - phy_mode = phy_read(db->ioaddr, - db->phy_addr, 7, db->chip_id) & 0xf000; + phy_mode = dmfe_phy_read(db->ioaddr, + db->phy_addr, 7, db->chip_id) & 0xf000; else /* DM9102/DM9102A */ - phy_mode = phy_read(db->ioaddr, - db->phy_addr, 17, db->chip_id) & 0xf000; + phy_mode = dmfe_phy_read(db->ioaddr, + db->phy_addr, 17, db->chip_id) & 0xf000; switch (phy_mode) { case 0x1000: db->op_mode = DMFE_10MHF; break; case 0x2000: db->op_mode = DMFE_10MFD; break; @@ -1695,15 +1695,15 @@ static void dmfe_set_phyxcer(struct dmfe_board_info *db) /* DM9009 Chip: Phyxcer reg18 bit12=0 */ if (db->chip_id == PCI_DM9009_ID) { - phy_reg = phy_read(db->ioaddr, - db->phy_addr, 18, db->chip_id) & ~0x1000; + phy_reg = dmfe_phy_read(db->ioaddr, + db->phy_addr, 18, db->chip_id) & ~0x1000; - phy_write(db->ioaddr, - db->phy_addr, 18, phy_reg, db->chip_id); + dmfe_phy_write(db->ioaddr, + db->phy_addr, 18, phy_reg, db->chip_id); } /* Phyxcer capability setting */ - phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0; + phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0; if (db->media_mode & DMFE_AUTO) { /* AUTO Mode */ @@ -1724,13 +1724,13 @@ static void dmfe_set_phyxcer(struct dmfe_board_info *db) phy_reg|=db->PHY_reg4; db->media_mode|=DMFE_AUTO; } - phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id); + dmfe_phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id); /* Restart Auto-Negotiation */ if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) ) - phy_write(db->ioaddr, db->phy_addr, 0, 0x1800, db->chip_id); + dmfe_phy_write(db->ioaddr, db->phy_addr, 0, 0x1800, db->chip_id); if ( !db->chip_type ) - phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id); + dmfe_phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id); } @@ -1762,7 +1762,7 @@ static void dmfe_process_mode(struct dmfe_board_info *db) /* 10/100M phyxcer force mode need */ if ( !(db->media_mode & 0x18)) { /* Forece Mode */ - phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id); + phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id); if ( !(phy_reg & 0x1) ) { /* parter without N-Way capability */ phy_reg = 0x0; @@ -1772,12 +1772,12 @@ static void dmfe_process_mode(struct dmfe_board_info *db) case DMFE_100MHF: phy_reg = 0x2000; break; case DMFE_100MFD: phy_reg = 0x2100; break; } - phy_write(db->ioaddr, - db->phy_addr, 0, phy_reg, db->chip_id); + dmfe_phy_write(db->ioaddr, + db->phy_addr, 0, phy_reg, db->chip_id); if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) ) mdelay(20); - phy_write(db->ioaddr, - db->phy_addr, 0, phy_reg, db->chip_id); + dmfe_phy_write(db->ioaddr, + db->phy_addr, 0, phy_reg, db->chip_id); } } } @@ -1787,8 +1787,8 @@ static void dmfe_process_mode(struct dmfe_board_info *db) * Write a word to Phy register */ -static void phy_write(void __iomem *ioaddr, u8 phy_addr, u8 offset, - u16 phy_data, u32 chip_id) +static void dmfe_phy_write(void __iomem *ioaddr, u8 phy_addr, u8 offset, + u16 phy_data, u32 chip_id) { u16 i; @@ -1799,34 +1799,34 @@ static void phy_write(void __iomem *ioaddr, u8 phy_addr, u8 offset, /* Send 33 synchronization clock to Phy controller */ for (i = 0; i < 35; i++) - phy_write_1bit(ioaddr, PHY_DATA_1); + dmfe_phy_write_1bit(ioaddr, PHY_DATA_1); /* Send start command(01) to Phy */ - phy_write_1bit(ioaddr, PHY_DATA_0); - phy_write_1bit(ioaddr, PHY_DATA_1); + dmfe_phy_write_1bit(ioaddr, PHY_DATA_0); + dmfe_phy_write_1bit(ioaddr, PHY_DATA_1); /* Send write command(01) to Phy */ - phy_write_1bit(ioaddr, PHY_DATA_0); - phy_write_1bit(ioaddr, PHY_DATA_1); + dmfe_phy_write_1bit(ioaddr, PHY_DATA_0); + dmfe_phy_write_1bit(ioaddr, PHY_DATA_1); /* Send Phy address */ for (i = 0x10; i > 0; i = i >> 1) - phy_write_1bit(ioaddr, - phy_addr & i ? PHY_DATA_1 : PHY_DATA_0); + dmfe_phy_write_1bit(ioaddr, + phy_addr & i ? PHY_DATA_1 : PHY_DATA_0); /* Send register address */ for (i = 0x10; i > 0; i = i >> 1) - phy_write_1bit(ioaddr, - offset & i ? PHY_DATA_1 : PHY_DATA_0); + dmfe_phy_write_1bit(ioaddr, + offset & i ? PHY_DATA_1 : PHY_DATA_0); /* written trasnition */ - phy_write_1bit(ioaddr, PHY_DATA_1); - phy_write_1bit(ioaddr, PHY_DATA_0); + dmfe_phy_write_1bit(ioaddr, PHY_DATA_1); + dmfe_phy_write_1bit(ioaddr, PHY_DATA_0); /* Write a word data to PHY controller */ for ( i = 0x8000; i > 0; i >>= 1) - phy_write_1bit(ioaddr, - phy_data & i ? PHY_DATA_1 : PHY_DATA_0); + dmfe_phy_write_1bit(ioaddr, + phy_data & i ? PHY_DATA_1 : PHY_DATA_0); } } @@ -1835,7 +1835,7 @@ static void phy_write(void __iomem *ioaddr, u8 phy_addr, u8 offset, * Read a word data from phy register */ -static u16 phy_read(void __iomem *ioaddr, u8 phy_addr, u8 offset, u32 chip_id) +static u16 dmfe_phy_read(void __iomem *ioaddr, u8 phy_addr, u8 offset, u32 chip_id) { int i; u16 phy_data; @@ -1848,33 +1848,33 @@ static u16 phy_read(void __iomem *ioaddr, u8 phy_addr, u8 offset, u32 chip_id) /* Send 33 synchronization clock to Phy controller */ for (i = 0; i < 35; i++) - phy_write_1bit(ioaddr, PHY_DATA_1); + dmfe_phy_write_1bit(ioaddr, PHY_DATA_1); /* Send start command(01) to Phy */ - phy_write_1bit(ioaddr, PHY_DATA_0); - phy_write_1bit(ioaddr, PHY_DATA_1); + dmfe_phy_write_1bit(ioaddr, PHY_DATA_0); + dmfe_phy_write_1bit(ioaddr, PHY_DATA_1); /* Send read command(10) to Phy */ - phy_write_1bit(ioaddr, PHY_DATA_1); - phy_write_1bit(ioaddr, PHY_DATA_0); + dmfe_phy_write_1bit(ioaddr, PHY_DATA_1); + dmfe_phy_write_1bit(ioaddr, PHY_DATA_0); /* Send Phy address */ for (i = 0x10; i > 0; i = i >> 1) - phy_write_1bit(ioaddr, - phy_addr & i ? PHY_DATA_1 : PHY_DATA_0); + dmfe_phy_write_1bit(ioaddr, + phy_addr & i ? PHY_DATA_1 : PHY_DATA_0); /* Send register address */ for (i = 0x10; i > 0; i = i >> 1) - phy_write_1bit(ioaddr, - offset & i ? PHY_DATA_1 : PHY_DATA_0); + dmfe_phy_write_1bit(ioaddr, + offset & i ? PHY_DATA_1 : PHY_DATA_0); /* Skip transition state */ - phy_read_1bit(ioaddr); + dmfe_phy_read_1bit(ioaddr); /* read 16bit data */ for (phy_data = 0, i = 0; i < 16; i++) { phy_data <<= 1; - phy_data |= phy_read_1bit(ioaddr); + phy_data |= dmfe_phy_read_1bit(ioaddr); } } @@ -1886,7 +1886,7 @@ static u16 phy_read(void __iomem *ioaddr, u8 phy_addr, u8 offset, u32 chip_id) * Write one bit data to Phy Controller */ -static void phy_write_1bit(void __iomem *ioaddr, u32 phy_data) +static void dmfe_phy_write_1bit(void __iomem *ioaddr, u32 phy_data) { dw32(DCR9, phy_data); /* MII Clock Low */ udelay(1); @@ -1901,7 +1901,7 @@ static void phy_write_1bit(void __iomem *ioaddr, u32 phy_data) * Read one bit phy data from PHY controller */ -static u16 phy_read_1bit(void __iomem *ioaddr) +static u16 dmfe_phy_read_1bit(void __iomem *ioaddr) { u16 phy_data; @@ -1995,11 +1995,11 @@ static void dmfe_parse_srom(struct dmfe_board_info * db) /* Check DM9801 or DM9802 present or not */ db->HPNA_present = 0; update_cr6(db->cr6_data | 0x40000, db->ioaddr); - tmp_reg = phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id); + tmp_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id); if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) { /* DM9801 or DM9802 present */ db->HPNA_timer = 8; - if ( phy_read(db->ioaddr, db->phy_addr, 31, db->chip_id) == 0x4404) { + if ( dmfe_phy_read(db->ioaddr, db->phy_addr, 31, db->chip_id) == 0x4404) { /* DM9801 HomeRun */ db->HPNA_present = 1; dmfe_program_DM9801(db, tmp_reg); @@ -2025,29 +2025,29 @@ static void dmfe_program_DM9801(struct dmfe_board_info * db, int HPNA_rev) switch(HPNA_rev) { case 0xb900: /* DM9801 E3 */ db->HPNA_command |= 0x1000; - reg25 = phy_read(db->ioaddr, db->phy_addr, 24, db->chip_id); + reg25 = dmfe_phy_read(db->ioaddr, db->phy_addr, 24, db->chip_id); reg25 = ( (reg25 + HPNA_NoiseFloor) & 0xff) | 0xf000; - reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id); + reg17 = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id); break; case 0xb901: /* DM9801 E4 */ - reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id); + reg25 = dmfe_phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id); reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor; - reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id); + reg17 = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id); reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor + 3; break; case 0xb902: /* DM9801 E5 */ case 0xb903: /* DM9801 E6 */ default: db->HPNA_command |= 0x1000; - reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id); + reg25 = dmfe_phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id); reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor - 5; - reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id); + reg17 = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id); reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor; break; } - phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id); - phy_write(db->ioaddr, db->phy_addr, 17, reg17, db->chip_id); - phy_write(db->ioaddr, db->phy_addr, 25, reg25, db->chip_id); + dmfe_phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id); + dmfe_phy_write(db->ioaddr, db->phy_addr, 17, reg17, db->chip_id); + dmfe_phy_write(db->ioaddr, db->phy_addr, 25, reg25, db->chip_id); } @@ -2060,10 +2060,10 @@ static void dmfe_program_DM9802(struct dmfe_board_info * db) uint phy_reg; if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9802_NOISE_FLOOR; - phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id); - phy_reg = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id); + dmfe_phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id); + phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id); phy_reg = ( phy_reg & 0xff00) + HPNA_NoiseFloor; - phy_write(db->ioaddr, db->phy_addr, 25, phy_reg, db->chip_id); + dmfe_phy_write(db->ioaddr, db->phy_addr, 25, phy_reg, db->chip_id); } @@ -2077,7 +2077,7 @@ static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db) uint phy_reg; /* Got remote device status */ - phy_reg = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0x60; + phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0x60; switch(phy_reg) { case 0x00: phy_reg = 0x0a00;break; /* LP/LS */ case 0x20: phy_reg = 0x0900;break; /* LP/HS */ @@ -2087,8 +2087,8 @@ static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db) /* Check remote device status match our setting ot not */ if ( phy_reg != (db->HPNA_command & 0x0f00) ) { - phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, - db->chip_id); + dmfe_phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, + db->chip_id); db->HPNA_timer=8; } else db->HPNA_timer=600; /* Match, every 10 minutes, check */ diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c index 056b44b934773cb0084d5126dd7946d08bb24c46..d1017509b08ac1e171a12a89770373a5057c5d64 100644 --- a/drivers/net/ethernet/ec_bhf.c +++ b/drivers/net/ethernet/ec_bhf.c @@ -1,5 +1,5 @@ /* - * drivers/net/ethernet/beckhoff/ec_bhf.c + * drivers/net/ethernet/ec_bhf.c * * Copyright (C) 2014 Darek Marcinkiewicz * @@ -18,9 +18,6 @@ * Those can be found on Bechhoff CX50xx industrial PCs. */ -#if 0 -#define DEBUG -#endif #include #include #include @@ -74,6 +71,8 @@ #define DMA_WINDOW_SIZE_MASK 0xfffffffc +#define ETHERCAT_MASTER_ID 0x14 + static struct pci_device_id ids[] = { { PCI_DEVICE(0x15ec, 0x5000), }, { 0, } @@ -131,7 +130,6 @@ struct bhf_dma { struct ec_bhf_priv { struct net_device *net_dev; - struct pci_dev *dev; void __iomem *io; @@ -162,32 +160,6 @@ struct ec_bhf_priv { #define PRIV_TO_DEV(priv) (&(priv)->dev->dev) -#define ETHERCAT_MASTER_ID 0x14 - -static void ec_bhf_print_status(struct ec_bhf_priv *priv) -{ - struct device *dev = PRIV_TO_DEV(priv); - - dev_dbg(dev, "Frame error counter: %d\n", - ioread8(priv->mac_io + MAC_FRAME_ERR_CNT)); - dev_dbg(dev, "RX error counter: %d\n", - ioread8(priv->mac_io + MAC_RX_ERR_CNT)); - dev_dbg(dev, "CRC error counter: %d\n", - ioread8(priv->mac_io + MAC_CRC_ERR_CNT)); - dev_dbg(dev, "TX frame counter: %d\n", - ioread32(priv->mac_io + MAC_TX_FRAME_CNT)); - dev_dbg(dev, "RX frame counter: %d\n", - ioread32(priv->mac_io + MAC_RX_FRAME_CNT)); - dev_dbg(dev, "TX fifo level: %d\n", - ioread8(priv->mac_io + MAC_TX_FIFO_LVL)); - dev_dbg(dev, "Dropped frames: %d\n", - ioread8(priv->mac_io + MAC_DROPPED_FRMS)); - dev_dbg(dev, "Connected with CCAT slot: %d\n", - ioread8(priv->mac_io + MAC_CONNECTED_CCAT_FLAG)); - dev_dbg(dev, "Link status: %d\n", - ioread8(priv->mii_io + MII_LINK_STATUS)); -} - static void ec_bhf_reset(struct ec_bhf_priv *priv) { iowrite8(0, priv->mac_io + MAC_FRAME_ERR_CNT); @@ -210,8 +182,6 @@ static void ec_bhf_send_packet(struct ec_bhf_priv *priv, struct tx_desc *desc) u32 addr = (u8 *)desc - priv->tx_buf.buf; iowrite32((ALIGN(len, 8) << 24) | addr, priv->fifo_io + FIFO_TX_REG); - - dev_dbg(PRIV_TO_DEV(priv), "Done sending packet\n"); } static int ec_bhf_desc_sent(struct tx_desc *desc) @@ -244,7 +214,6 @@ static void ec_bhf_add_rx_desc(struct ec_bhf_priv *priv, struct rx_desc *desc) static void ec_bhf_process_rx(struct ec_bhf_priv *priv) { struct rx_desc *desc = &priv->rx_descs[priv->rx_dnext]; - struct device *dev = PRIV_TO_DEV(priv); while (ec_bhf_pkt_received(desc)) { int pkt_size = (le16_to_cpu(desc->header.len) & @@ -253,20 +222,16 @@ static void ec_bhf_process_rx(struct ec_bhf_priv *priv) struct sk_buff *skb; skb = netdev_alloc_skb_ip_align(priv->net_dev, pkt_size); - dev_dbg(dev, "Received packet, size: %d\n", pkt_size); - if (skb) { memcpy(skb_put(skb, pkt_size), data, pkt_size); skb->protocol = eth_type_trans(skb, priv->net_dev); - dev_dbg(dev, "Protocol type: %x\n", skb->protocol); - priv->stat_rx_bytes += pkt_size; netif_rx(skb); } else { - dev_err_ratelimited(dev, - "Couldn't allocate a skb_buff for a packet of size %u\n", - pkt_size); + dev_err_ratelimited(PRIV_TO_DEV(priv), + "Couldn't allocate a skb_buff for a packet of size %u\n", + pkt_size); } desc->header.recv = 0; @@ -276,7 +241,6 @@ static void ec_bhf_process_rx(struct ec_bhf_priv *priv) priv->rx_dnext = (priv->rx_dnext + 1) % priv->rx_dcount; desc = &priv->rx_descs[priv->rx_dnext]; } - } static enum hrtimer_restart ec_bhf_timer_fun(struct hrtimer *timer) @@ -299,14 +263,7 @@ static int ec_bhf_setup_offsets(struct ec_bhf_priv *priv) unsigned block_count, i; void __iomem *ec_info; - dev_dbg(dev, "Info block:\n"); - dev_dbg(dev, "Type of function: %x\n", (unsigned)ioread16(priv->io)); - dev_dbg(dev, "Revision of function: %x\n", - (unsigned)ioread16(priv->io + INFO_BLOCK_REV)); - block_count = ioread8(priv->io + INFO_BLOCK_BLK_CNT); - dev_dbg(dev, "Number of function blocks: %x\n", block_count); - for (i = 0; i < block_count; i++) { u16 type = ioread16(priv->io + i * INFO_BLOCK_SIZE + INFO_BLOCK_TYPE); @@ -317,29 +274,17 @@ static int ec_bhf_setup_offsets(struct ec_bhf_priv *priv) dev_err(dev, "EtherCAT master with DMA block not found\n"); return -ENODEV; } - dev_dbg(dev, "EtherCAT master with DMA block found at pos: %d\n", i); ec_info = priv->io + i * INFO_BLOCK_SIZE; - dev_dbg(dev, "EtherCAT master revision: %d\n", - ioread16(ec_info + INFO_BLOCK_REV)); priv->tx_dma_chan = ioread8(ec_info + INFO_BLOCK_TX_CHAN); - dev_dbg(dev, "EtherCAT master tx dma channel: %d\n", - priv->tx_dma_chan); - priv->rx_dma_chan = ioread8(ec_info + INFO_BLOCK_RX_CHAN); - dev_dbg(dev, "EtherCAT master rx dma channel: %d\n", - priv->rx_dma_chan); priv->ec_io = priv->io + ioread32(ec_info + INFO_BLOCK_OFFSET); priv->mii_io = priv->ec_io + ioread32(priv->ec_io + EC_MII_OFFSET); priv->fifo_io = priv->ec_io + ioread32(priv->ec_io + EC_FIFO_OFFSET); priv->mac_io = priv->ec_io + ioread32(priv->ec_io + EC_MAC_OFFSET); - dev_dbg(dev, - "EtherCAT block addres: %p, fifo address: %p, mii address: %p, mac address: %p\n", - priv->ec_io, priv->fifo_io, priv->mii_io, priv->mac_io); - return 0; } @@ -350,8 +295,6 @@ static netdev_tx_t ec_bhf_start_xmit(struct sk_buff *skb, struct tx_desc *desc; unsigned len; - dev_dbg(PRIV_TO_DEV(priv), "Starting xmit\n"); - desc = &priv->tx_descs[priv->tx_dnext]; skb_copy_and_csum_dev(skb, desc->data); @@ -366,15 +309,12 @@ static netdev_tx_t ec_bhf_start_xmit(struct sk_buff *skb, priv->tx_dnext = (priv->tx_dnext + 1) % priv->tx_dcount; if (!ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext])) { - /* Make sure that update updates to tx_dnext are perceived + /* Make sure that updates to tx_dnext are perceived * by timer routine. */ smp_wmb(); netif_stop_queue(net_dev); - - dev_dbg(PRIV_TO_DEV(priv), "Stopping netif queue\n"); - ec_bhf_print_status(priv); } priv->stat_tx_bytes += len; @@ -397,7 +337,6 @@ static int ec_bhf_alloc_dma_mem(struct ec_bhf_priv *priv, mask = ioread32(priv->dma_io + offset); mask &= DMA_WINDOW_SIZE_MASK; - dev_dbg(dev, "Read mask %x for channel %d\n", mask, channel); /* We want to allocate a chunk of memory that is: * - aligned to the mask we just read @@ -408,12 +347,10 @@ static int ec_bhf_alloc_dma_mem(struct ec_bhf_priv *priv, buf->len = min_t(int, ~mask + 1, size); buf->alloc_len = 2 * buf->len; - dev_dbg(dev, "Allocating %d bytes for channel %d", - (int)buf->alloc_len, channel); buf->alloc = dma_alloc_coherent(dev, buf->alloc_len, &buf->alloc_phys, GFP_KERNEL); if (buf->alloc == NULL) { - dev_info(dev, "Failed to allocate buffer\n"); + dev_err(dev, "Failed to allocate buffer\n"); return -ENOMEM; } @@ -422,8 +359,6 @@ static int ec_bhf_alloc_dma_mem(struct ec_bhf_priv *priv, iowrite32(0, priv->dma_io + offset + 4); iowrite32(buf->buf_phys, priv->dma_io + offset); - dev_dbg(dev, "Buffer: %x and read from dev: %x", - (unsigned)buf->buf_phys, ioread32(priv->dma_io + offset)); return 0; } @@ -433,7 +368,7 @@ static void ec_bhf_setup_tx_descs(struct ec_bhf_priv *priv) int i = 0; priv->tx_dcount = priv->tx_buf.len / sizeof(struct tx_desc); - priv->tx_descs = (struct tx_desc *) priv->tx_buf.buf; + priv->tx_descs = (struct tx_desc *)priv->tx_buf.buf; priv->tx_dnext = 0; for (i = 0; i < priv->tx_dcount; i++) @@ -445,7 +380,7 @@ static void ec_bhf_setup_rx_descs(struct ec_bhf_priv *priv) int i; priv->rx_dcount = priv->rx_buf.len / sizeof(struct rx_desc); - priv->rx_descs = (struct rx_desc *) priv->rx_buf.buf; + priv->rx_descs = (struct rx_desc *)priv->rx_buf.buf; priv->rx_dnext = 0; for (i = 0; i < priv->rx_dcount; i++) { @@ -469,8 +404,6 @@ static int ec_bhf_open(struct net_device *net_dev) struct device *dev = PRIV_TO_DEV(priv); int err = 0; - dev_info(dev, "Opening device\n"); - ec_bhf_reset(priv); err = ec_bhf_alloc_dma_mem(priv, &priv->rx_buf, priv->rx_dma_chan, @@ -481,20 +414,13 @@ static int ec_bhf_open(struct net_device *net_dev) } ec_bhf_setup_rx_descs(priv); - dev_info(dev, "RX buffer allocated, address: %x\n", - (unsigned)priv->rx_buf.buf_phys); - err = ec_bhf_alloc_dma_mem(priv, &priv->tx_buf, priv->tx_dma_chan, FIFO_SIZE * sizeof(struct tx_desc)); if (err) { dev_err(dev, "Failed to allocate tx buffer\n"); goto error_rx_free; } - dev_dbg(dev, "TX buffer allocated, addres: %x\n", - (unsigned)priv->tx_buf.buf_phys); - iowrite8(0, priv->mii_io + MII_MAC_FILT_FLAG); - ec_bhf_setup_tx_descs(priv); netif_start_queue(net_dev); @@ -504,10 +430,6 @@ static int ec_bhf_open(struct net_device *net_dev) hrtimer_start(&priv->hrtimer, ktime_set(0, polling_frequency), HRTIMER_MODE_REL); - dev_info(PRIV_TO_DEV(priv), "Device open\n"); - - ec_bhf_print_status(priv); - return 0; error_rx_free: @@ -640,9 +562,6 @@ static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id) memcpy_fromio(net_dev->dev_addr, priv->mii_io + MII_MAC_ADDR, 6); - dev_dbg(&dev->dev, "CX5020 Ethercat master address: %pM\n", - net_dev->dev_addr); - err = register_netdev(net_dev); if (err < 0) goto err_free_net_dev; diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 43e08d0bc3d316ff407555bf1ec170c44c3c2be2..9a2d75235e89991bd62a7decd437707ce6b78b4b 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -86,6 +86,8 @@ static inline char *nic_name(struct pci_dev *pdev) #define BE_MAX_JUMBO_FRAME_SIZE 9018 #define BE_MIN_MTU 256 +#define BE_MAX_MTU (BE_MAX_JUMBO_FRAME_SIZE - \ + (ETH_HLEN + ETH_FCS_LEN)) #define BE_NUM_VLANS_SUPPORTED 64 #define BE_MAX_EQD 128u @@ -112,7 +114,6 @@ static inline char *nic_name(struct pci_dev *pdev) #define MAX_ROCE_EQS 5 #define MAX_MSIX_VECTORS 32 #define MIN_MSIX_VECTORS 1 -#define BE_TX_BUDGET 256 #define BE_NAPI_WEIGHT 64 #define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */ #define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST) @@ -198,7 +199,6 @@ struct be_eq_obj { u8 idx; /* array index */ u8 msix_idx; - u16 tx_budget; u16 spurious_intr; struct napi_struct napi; struct be_adapter *adapter; @@ -248,6 +248,13 @@ struct be_tx_stats { ulong tx_jiffies; u32 tx_stops; u32 tx_drv_drops; /* pkts dropped by driver */ + /* the error counters are described in be_ethtool.c */ + u32 tx_hdr_parse_err; + u32 tx_dma_err; + u32 tx_tso_err; + u32 tx_spoof_check_err; + u32 tx_qinq_err; + u32 tx_internal_parity_err; struct u64_stats_sync sync; struct u64_stats_sync sync_compl; }; @@ -316,6 +323,7 @@ struct be_rx_obj { struct be_drv_stats { u32 be_on_die_temperature; u32 eth_red_drops; + u32 dma_map_errors; u32 rx_drops_no_pbuf; u32 rx_drops_no_txpb; u32 rx_drops_no_erx_descr; @@ -399,9 +407,9 @@ struct phy_info { u16 auto_speeds_supported; u16 fixed_speeds_supported; int link_speed; - u32 dac_cable_len; u32 advertising; u32 supported; + u8 cable_type; }; struct be_resources { @@ -613,6 +621,10 @@ extern const struct ethtool_ops be_ethtool_ops; for (i = eqo->idx, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs;\ i += adapter->num_evt_qs, rxo += adapter->num_evt_qs) +#define for_all_tx_queues_on_eq(adapter, eqo, txo, i) \ + for (i = eqo->idx, txo = &adapter->tx_obj[i]; i < adapter->num_tx_qs;\ + i += adapter->num_evt_qs, txo += adapter->num_evt_qs) + #define is_mcc_eqo(eqo) (eqo->idx == 0) #define mcc_eqo(adapter) (&adapter->eq_obj[0]) @@ -661,6 +673,18 @@ static inline u32 amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset) amap_mask(sizeof(((_struct *)0)->field)), \ AMAP_BIT_OFFSET(_struct, field)) +#define GET_RX_COMPL_V0_BITS(field, ptr) \ + AMAP_GET_BITS(struct amap_eth_rx_compl_v0, field, ptr) + +#define GET_RX_COMPL_V1_BITS(field, ptr) \ + AMAP_GET_BITS(struct amap_eth_rx_compl_v1, field, ptr) + +#define GET_TX_COMPL_BITS(field, ptr) \ + AMAP_GET_BITS(struct amap_eth_tx_compl, field, ptr) + +#define SET_TX_WRB_HDR_BITS(field, ptr, val) \ + AMAP_SET_BITS(struct amap_eth_hdr_wrb, field, ptr, val) + #define be_dws_cpu_to_le(wrb, len) swap_dws(wrb, len) #define be_dws_le_to_cpu(wrb, len) swap_dws(wrb, len) static inline void swap_dws(void *wrb, int len) diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 4370ec1952accd2b3ce8ea3beda71ca35ba03331..fead5c65a4f00fe525464bb647442fcd025484d5 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -209,7 +209,6 @@ static int be_mcc_compl_process(struct be_adapter *adapter, if (base_status != MCC_STATUS_SUCCESS && !be_skip_err_log(opcode, base_status, addl_status)) { - if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST) { dev_warn(&adapter->pdev->dev, "VF is not privileged to issue opcode %d-%d\n", @@ -309,8 +308,6 @@ static void be_async_grp5_evt_process(struct be_adapter *adapter, be_async_grp5_pvid_state_process(adapter, compl); break; default: - dev_warn(&adapter->pdev->dev, "Unknown grp5 event 0x%x!\n", - event_type); break; } } @@ -319,7 +316,7 @@ static void be_async_dbg_evt_process(struct be_adapter *adapter, struct be_mcc_compl *cmp) { u8 event_type = 0; - struct be_async_event_qnq *evt = (struct be_async_event_qnq *) cmp; + struct be_async_event_qnq *evt = (struct be_async_event_qnq *)cmp; event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) & ASYNC_EVENT_TYPE_MASK; @@ -595,6 +592,7 @@ static int lancer_wait_ready(struct be_adapter *adapter) static bool lancer_provisioning_error(struct be_adapter *adapter) { u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0; + sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); if (sliport_status & SLIPORT_STATUS_ERR_MASK) { sliport_err1 = ioread32(adapter->db + SLIPORT_ERROR1_OFFSET); @@ -677,7 +675,6 @@ int be_fw_wait_ready(struct be_adapter *adapter) return -1; } - static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb) { return &wrb->payload.sgl[0]; @@ -924,6 +921,7 @@ int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo) status = be_mbox_notify_wait(adapter); if (!status) { struct be_cmd_resp_eq_create *resp = embedded_payload(wrb); + eqo->q.id = le16_to_cpu(resp->eq_id); eqo->msix_idx = (ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx; @@ -958,7 +956,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, if (permanent) { req->permanent = 1; } else { - req->if_id = cpu_to_le16((u16) if_handle); + req->if_id = cpu_to_le16((u16)if_handle); req->pmac_id = cpu_to_le32(pmac_id); req->permanent = 0; } @@ -966,6 +964,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_mac_query *resp = embedded_payload(wrb); + memcpy(mac_addr, resp->mac.addr, ETH_ALEN); } @@ -1002,6 +1001,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb); + *pmac_id = le32_to_cpu(resp->pmac_id); } @@ -1034,7 +1034,8 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom) req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, - OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), wrb, NULL); + OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), + wrb, NULL); req->hdr.domain = dom; req->if_id = cpu_to_le32(if_id); @@ -1106,6 +1107,7 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq, status = be_mbox_notify_wait(adapter); if (!status) { struct be_cmd_resp_cq_create *resp = embedded_payload(wrb); + cq->id = le16_to_cpu(resp->cq_id); cq->created = true; } @@ -1118,6 +1120,7 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq, static u32 be_encoded_q_len(int q_len) { u32 len_encoded = fls(q_len); /* log2(len) + 1 */ + if (len_encoded == 16) len_encoded = 0; return len_encoded; @@ -1173,6 +1176,7 @@ static int be_cmd_mccq_ext_create(struct be_adapter *adapter, status = be_mbox_notify_wait(adapter); if (!status) { struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb); + mccq->id = le16_to_cpu(resp->id); mccq->created = true; } @@ -1216,6 +1220,7 @@ static int be_cmd_mccq_org_create(struct be_adapter *adapter, status = be_mbox_notify_wait(adapter); if (!status) { struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb); + mccq->id = le16_to_cpu(resp->id); mccq->created = true; } @@ -1274,6 +1279,7 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo) status = be_cmd_notify_wait(adapter, &wrb); if (!status) { struct be_cmd_resp_eth_tx_create *resp = embedded_payload(&wrb); + txq->id = le16_to_cpu(resp->cid); if (ver == 2) txo->db_offset = le32_to_cpu(resp->db_offset); @@ -1318,6 +1324,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter, status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb); + rxq->id = le16_to_cpu(resp->id); rxq->created = true; *rss_id = resp->rss_id; @@ -1431,6 +1438,7 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, status = be_cmd_notify_wait(adapter, &wrb); if (!status) { struct be_cmd_resp_if_create *resp = embedded_payload(&wrb); + *if_handle = le32_to_cpu(resp->interface_id); /* Hack to retrieve VF's pmac-id on BE3 */ @@ -1514,7 +1522,6 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) int lancer_cmd_get_pport_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) { - struct be_mcc_wrb *wrb; struct lancer_cmd_req_pport_stats *req; int status = 0; @@ -1605,6 +1612,7 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed, status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_link_status *resp = embedded_payload(wrb); + if (link_speed) { *link_speed = resp->link_speed ? le16_to_cpu(resp->link_speed) * 10 : @@ -1672,6 +1680,7 @@ int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size) status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_get_fat *resp = embedded_payload(wrb); + if (log_size && resp->log_size) *log_size = le32_to_cpu(resp->log_size) - sizeof(u32); @@ -1681,17 +1690,17 @@ int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size) return status; } -void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf) +int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf) { struct be_dma_mem get_fat_cmd; struct be_mcc_wrb *wrb; struct be_cmd_req_get_fat *req; u32 offset = 0, total_size, buf_size, log_offset = sizeof(u32), payload_len; - int status; + int status = 0; if (buf_len == 0) - return; + return -EIO; total_size = buf_len; @@ -1700,10 +1709,9 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf) get_fat_cmd.size, &get_fat_cmd.dma); if (!get_fat_cmd.va) { - status = -ENOMEM; dev_err(&adapter->pdev->dev, - "Memory allocation failure while retrieving FAT data\n"); - return; + "Memory allocation failure while reading FAT data\n"); + return -ENOMEM; } spin_lock_bh(&adapter->mcc_lock); @@ -1732,6 +1740,7 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf) status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_get_fat *resp = get_fat_cmd.va; + memcpy(buf + offset, resp->data_buffer, le32_to_cpu(resp->read_log_length)); @@ -1746,6 +1755,7 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf) pci_free_consistent(adapter->pdev, get_fat_cmd.size, get_fat_cmd.va, get_fat_cmd.dma); spin_unlock_bh(&adapter->mcc_lock); + return status; } /* Uses synchronous mcc */ @@ -1771,8 +1781,11 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter) status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb); - strcpy(adapter->fw_ver, resp->firmware_version_string); - strcpy(adapter->fw_on_flash, resp->fw_on_flash_version_string); + + strlcpy(adapter->fw_ver, resp->firmware_version_string, + sizeof(adapter->fw_ver)); + strlcpy(adapter->fw_on_flash, resp->fw_on_flash_version_string, + sizeof(adapter->fw_on_flash)); } err: spin_unlock_bh(&adapter->mcc_lock); @@ -1782,8 +1795,8 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter) /* set the EQ delay interval of an EQ to specified value * Uses async mcc */ -int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd, - int num) +static int __be_cmd_modify_eqd(struct be_adapter *adapter, + struct be_set_eqd *set_eqd, int num) { struct be_mcc_wrb *wrb; struct be_cmd_req_modify_eq_delay *req; @@ -1816,6 +1829,25 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd, return status; } +int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd, + int num) +{ + int num_eqs, i = 0; + + if (lancer_chip(adapter) && num > 8) { + while (num) { + num_eqs = min(num, 8); + __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs); + i += num_eqs; + num -= num_eqs; + } + } else { + __be_cmd_modify_eqd(adapter, set_eqd, num); + } + + return 0; +} + /* Uses sycnhronous mcc */ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, u32 num) @@ -1879,8 +1911,8 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_MCAST_PROMISCUOUS); } else if (flags & IFF_ALLMULTI) { - req->if_flags_mask = req->if_flags = - cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS); + req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS); + req->if_flags = cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS); } else if (flags & BE_FLAGS_VLAN_PROMISC) { req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS); @@ -1891,8 +1923,8 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) struct netdev_hw_addr *ha; int i = 0; - req->if_flags_mask = req->if_flags = - cpu_to_le32(BE_IF_FLAGS_MULTICAST); + req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_MULTICAST); + req->if_flags = cpu_to_le32(BE_IF_FLAGS_MULTICAST); /* Reset mcast promisc mode if already set by setting mask * and not setting flags field @@ -1947,6 +1979,7 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc) OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), wrb, NULL); + req->hdr.version = 1; req->tx_flow_control = cpu_to_le16((u16)tx_fc); req->rx_flow_control = cpu_to_le16((u16)rx_fc); @@ -1954,6 +1987,10 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc) err: spin_unlock_bh(&adapter->mcc_lock); + + if (base_status(status) == MCC_STATUS_FEATURE_NOT_SUPPORTED) + return -EOPNOTSUPP; + return status; } @@ -1985,6 +2022,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc) if (!status) { struct be_cmd_resp_get_flow_control *resp = embedded_payload(wrb); + *tx_fc = le16_to_cpu(resp->tx_flow_control); *rx_fc = le16_to_cpu(resp->rx_flow_control); } @@ -2014,10 +2052,14 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter) status = be_mbox_notify_wait(adapter); if (!status) { struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb); + adapter->port_num = le32_to_cpu(resp->phys_port); adapter->function_mode = le32_to_cpu(resp->function_mode); adapter->function_caps = le32_to_cpu(resp->function_caps); adapter->asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF; + dev_info(&adapter->pdev->dev, + "FW config: function_mode=0x%x, function_caps=0x%x\n", + adapter->function_mode, adapter->function_caps); } mutex_unlock(&adapter->mbox_lock); @@ -2159,6 +2201,7 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state) if (!status) { struct be_cmd_resp_get_beacon_state *resp = embedded_payload(wrb); + *state = resp->beacon_state; } @@ -2167,6 +2210,53 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state) return status; } +/* Uses sync mcc */ +int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, + u8 page_num, u8 *data) +{ + struct be_dma_mem cmd; + struct be_mcc_wrb *wrb; + struct be_cmd_req_port_type *req; + int status; + + if (page_num > TR_PAGE_A2) + return -EINVAL; + + cmd.size = sizeof(struct be_cmd_resp_port_type); + cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); + if (!cmd.va) { + dev_err(&adapter->pdev->dev, "Memory allocation failed\n"); + return -ENOMEM; + } + memset(cmd.va, 0, cmd.size); + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + req = cmd.va; + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_READ_TRANSRECV_DATA, + cmd.size, wrb, &cmd); + + req->port = cpu_to_le32(adapter->hba_port_num); + req->page_num = cpu_to_le32(page_num); + status = be_mcc_notify_wait(adapter); + if (!status) { + struct be_cmd_resp_port_type *resp = cmd.va; + + memcpy(data, resp->page_data, PAGE_DATA_LEN); + } +err: + spin_unlock_bh(&adapter->mcc_lock); + pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); + return status; +} + int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd, u32 data_size, u32 data_offset, const char *obj_name, u32 *data_written, @@ -2207,7 +2297,7 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd, be_dws_cpu_to_le(ctxt, sizeof(req->context)); req->write_offset = cpu_to_le32(data_offset); - strcpy(req->object_name, obj_name); + strlcpy(req->object_name, obj_name, sizeof(req->object_name)); req->descriptor_count = cpu_to_le32(1); req->buf_len = cpu_to_le32(data_size); req->addr_low = cpu_to_le32((cmd->dma + @@ -2240,6 +2330,31 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd, return status; } +int be_cmd_query_cable_type(struct be_adapter *adapter) +{ + u8 page_data[PAGE_DATA_LEN]; + int status; + + status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, + page_data); + if (!status) { + switch (adapter->phy.interface_type) { + case PHY_TYPE_QSFP: + adapter->phy.cable_type = + page_data[QSFP_PLUS_CABLE_TYPE_OFFSET]; + break; + case PHY_TYPE_SFP_PLUS_10GB: + adapter->phy.cable_type = + page_data[SFP_PLUS_CABLE_TYPE_OFFSET]; + break; + default: + adapter->phy.cable_type = 0; + break; + } + } + return status; +} + int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name) { struct lancer_cmd_req_delete_object *req; @@ -2260,7 +2375,7 @@ int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name) OPCODE_COMMON_DELETE_OBJECT, sizeof(*req), wrb, NULL); - strcpy(req->object_name, obj_name); + strlcpy(req->object_name, obj_name, sizeof(req->object_name)); status = be_mcc_notify_wait(adapter); err: @@ -2357,7 +2472,7 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, } int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, - u16 optype, int offset) + u16 optype, int offset) { struct be_mcc_wrb *wrb; struct be_cmd_read_flash_crc *req; @@ -2528,9 +2643,10 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, if (!status) { struct be_cmd_resp_ddrdma_test *resp; + resp = cmd->va; if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) || - resp->snd_err) { + resp->snd_err) { status = -1; } } @@ -2603,6 +2719,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter) if (!status) { struct be_phy_info *resp_phy_info = cmd.va + sizeof(struct be_cmd_req_hdr); + adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type); adapter->phy.interface_type = le16_to_cpu(resp_phy_info->interface_type); @@ -2732,6 +2849,7 @@ int be_cmd_req_native_mode(struct be_adapter *adapter) status = be_mbox_notify_wait(adapter); if (!status) { struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb); + adapter->be3_native = le32_to_cpu(resp->cap_flags) & CAPABILITY_BE3_NATIVE_ERX_API; if (!adapter->be3_native) @@ -2771,6 +2889,7 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege, if (!status) { struct be_cmd_resp_get_fn_privileges *resp = embedded_payload(wrb); + *privilege = le32_to_cpu(resp->privilege_mask); /* In UMC mode FW does not return right privileges. @@ -2918,7 +3037,6 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, u8 *mac, u32 if_handle, bool active, u32 domain) { - if (!active) be_cmd_get_mac_from_list(adapter, mac, &active, &curr_pmac_id, if_handle, domain); @@ -3102,6 +3220,7 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, if (!status) { struct be_cmd_resp_get_hsw_config *resp = embedded_payload(wrb); + be_dws_le_to_cpu(&resp->context, sizeof(resp->context)); vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context, pvid, &resp->context); @@ -3161,7 +3280,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter) status = be_mbox_notify_wait(adapter); if (!status) { struct be_cmd_resp_acpi_wol_magic_config_v1 *resp; - resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *) cmd.va; + + resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *)cmd.va; adapter->wol_cap = resp->wol_settings; if (adapter->wol_cap & BE_WOL_CAP) @@ -3197,6 +3317,7 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level) (extfat_cmd.va + sizeof(struct be_cmd_resp_hdr)); for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) { u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes); + for (j = 0; j < num_modes; j++) { if (cfgs->module[i].trace_lvl[j].mode == MODE_UART) cfgs->module[i].trace_lvl[j].dbg_lvl = @@ -3233,6 +3354,7 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter) if (!status) { cfgs = (struct be_fat_conf_params *)(extfat_cmd.va + sizeof(struct be_cmd_resp_hdr)); + for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) { if (cfgs->module[0].trace_lvl[j].mode == MODE_UART) level = cfgs->module[0].trace_lvl[j].dbg_lvl; @@ -3329,6 +3451,7 @@ int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name) status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb); + *port_name = resp->port_name[adapter->hba_port_num]; } else { *port_name = adapter->hba_port_num + '0'; @@ -3952,6 +4075,7 @@ int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile_id) if (!status) { struct be_cmd_resp_get_active_profile *resp = embedded_payload(wrb); + *profile_id = le16_to_cpu(resp->active_profile_id); } @@ -4004,7 +4128,7 @@ int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload, { struct be_adapter *adapter = netdev_priv(netdev_handle); struct be_mcc_wrb *wrb; - struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *) wrb_payload; + struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *)wrb_payload; struct be_cmd_req_hdr *req; struct be_cmd_resp_hdr *resp; int status; diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 5284b825bba28521b072c8b0891a3b36c3b464a6..eb5085d6794fde8b4635d82bd0427d0fde084844 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -57,7 +57,8 @@ enum mcc_base_status { MCC_STATUS_ILLEGAL_FIELD = 3, MCC_STATUS_INSUFFICIENT_BUFFER = 4, MCC_STATUS_UNAUTHORIZED_REQUEST = 5, - MCC_STATUS_NOT_SUPPORTED = 66 + MCC_STATUS_NOT_SUPPORTED = 66, + MCC_STATUS_FEATURE_NOT_SUPPORTED = 68 }; /* Additional status */ @@ -1004,8 +1005,8 @@ struct be_cmd_resp_link_status { /* Identifies the type of port attached to NIC */ struct be_cmd_req_port_type { struct be_cmd_req_hdr hdr; - u32 page_num; - u32 port; + __le32 page_num; + __le32 port; }; enum { @@ -1013,28 +1014,23 @@ enum { TR_PAGE_A2 = 0xa2 }; +/* From SFF-8436 QSFP+ spec */ +#define QSFP_PLUS_CABLE_TYPE_OFFSET 0x83 +#define QSFP_PLUS_CR4_CABLE 0x8 +#define QSFP_PLUS_SR4_CABLE 0x4 +#define QSFP_PLUS_LR4_CABLE 0x2 + +/* From SFF-8472 spec */ +#define SFP_PLUS_SFF_8472_COMP 0x5E +#define SFP_PLUS_CABLE_TYPE_OFFSET 0x8 +#define SFP_PLUS_COPPER_CABLE 0x4 + +#define PAGE_DATA_LEN 256 struct be_cmd_resp_port_type { struct be_cmd_resp_hdr hdr; u32 page_num; u32 port; - struct data { - u8 identifier; - u8 identifier_ext; - u8 connector; - u8 transceiver[8]; - u8 rsvd0[3]; - u8 length_km; - u8 length_hm; - u8 length_om1; - u8 length_om2; - u8 length_cu; - u8 length_cu_m; - u8 vendor_name[16]; - u8 rsvd; - u8 vendor_oui[3]; - u8 vendor_pn[16]; - u8 vendor_rev[4]; - } data; + u8 page_data[PAGE_DATA_LEN]; }; /******************** Get FW Version *******************/ @@ -1367,6 +1363,9 @@ enum { PHY_TYPE_BASET_1GB, PHY_TYPE_BASEX_1GB, PHY_TYPE_SGMII, + PHY_TYPE_QSFP, + PHY_TYPE_KR4_40GB, + PHY_TYPE_KR2_20GB, PHY_TYPE_DISABLED = 255 }; @@ -1375,6 +1374,8 @@ enum { #define BE_SUPPORTED_SPEED_100MBPS 2 #define BE_SUPPORTED_SPEED_1GBPS 4 #define BE_SUPPORTED_SPEED_10GBPS 8 +#define BE_SUPPORTED_SPEED_20GBPS 0x10 +#define BE_SUPPORTED_SPEED_40GBPS 0x20 #define BE_AN_EN 0x2 #define BE_PAUSE_SYM_EN 0x80 @@ -2066,6 +2067,9 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, u8 beacon, u8 status, u8 state); int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state); +int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, + u8 page_num, u8 *data); +int be_cmd_query_cable_type(struct be_adapter *adapter); int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, u32 flash_oper, u32 flash_opcode, u32 buf_size); int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd, @@ -2101,7 +2105,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter); int be_cmd_get_cntl_attributes(struct be_adapter *adapter); int be_cmd_req_native_mode(struct be_adapter *adapter); int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size); -void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf); +int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf); int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege, u32 domain); int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges, diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index 0cd3311409a82a1aef601c66f46c3c72fc244070..e42a791c183579fdddc486e805a46d77518a0909 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -78,6 +78,11 @@ static const struct be_ethtool_stat et_stats[] = { * fifo must never overflow. */ {DRVSTAT_INFO(rxpp_fifo_overflow_drop)}, + /* Received packets dropped when the RX block runs out of space in + * one of its input FIFOs. This could happen due a long burst of + * minimum-sized (64b) frames in the receive path. + * This counter may also be erroneously incremented rarely. + */ {DRVSTAT_INFO(rx_input_fifo_overflow_drop)}, {DRVSTAT_INFO(rx_ip_checksum_errs)}, {DRVSTAT_INFO(rx_tcp_checksum_errs)}, @@ -114,6 +119,8 @@ static const struct be_ethtool_stat et_stats[] = { * is more than 9018 bytes */ {DRVSTAT_INFO(rx_drops_mtu)}, + /* Number of dma mapping errors */ + {DRVSTAT_INFO(dma_map_errors)}, /* Number of packets dropped due to random early drop function */ {DRVSTAT_INFO(eth_red_drops)}, {DRVSTAT_INFO(be_on_die_temperature)}, @@ -123,6 +130,7 @@ static const struct be_ethtool_stat et_stats[] = { {DRVSTAT_INFO(roce_drops_payload_len)}, {DRVSTAT_INFO(roce_drops_crc)} }; + #define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats) /* Stats related to multi RX queues: get_stats routine assumes bytes, pkts @@ -145,6 +153,7 @@ static const struct be_ethtool_stat et_rx_stats[] = { */ {DRVSTAT_RX_INFO(rx_drops_no_frags)} }; + #define ETHTOOL_RXSTATS_NUM (ARRAY_SIZE(et_rx_stats)) /* Stats related to multi TX queues: get_stats routine assumes compl is the @@ -152,6 +161,34 @@ static const struct be_ethtool_stat et_rx_stats[] = { */ static const struct be_ethtool_stat et_tx_stats[] = { {DRVSTAT_TX_INFO(tx_compl)}, /* If moving this member see above note */ + /* This counter is incremented when the HW encounters an error while + * parsing the packet header of an outgoing TX request. This counter is + * applicable only for BE2, BE3 and Skyhawk based adapters. + */ + {DRVSTAT_TX_INFO(tx_hdr_parse_err)}, + /* This counter is incremented when an error occurs in the DMA + * operation associated with the TX request from the host to the device. + */ + {DRVSTAT_TX_INFO(tx_dma_err)}, + /* This counter is incremented when MAC or VLAN spoof checking is + * enabled on the interface and the TX request fails the spoof check + * in HW. + */ + {DRVSTAT_TX_INFO(tx_spoof_check_err)}, + /* This counter is incremented when the HW encounters an error while + * performing TSO offload. This counter is applicable only for Lancer + * adapters. + */ + {DRVSTAT_TX_INFO(tx_tso_err)}, + /* This counter is incremented when the HW detects Q-in-Q style VLAN + * tagging in a packet and such tagging is not expected on the outgoing + * interface. This counter is applicable only for Lancer adapters. + */ + {DRVSTAT_TX_INFO(tx_qinq_err)}, + /* This counter is incremented when the HW detects parity errors in the + * packet data. This counter is applicable only for Lancer adapters. + */ + {DRVSTAT_TX_INFO(tx_internal_parity_err)}, {DRVSTAT_TX_INFO(tx_bytes)}, {DRVSTAT_TX_INFO(tx_pkts)}, /* Number of skbs queued for trasmission by the driver */ @@ -165,6 +202,7 @@ static const struct be_ethtool_stat et_tx_stats[] = { /* Pkts dropped in the driver's transmit path */ {DRVSTAT_TX_INFO(tx_drv_drops)} }; + #define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats)) static const char et_self_tests[][ETH_GSTRING_LEN] = { @@ -239,7 +277,7 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name, while ((total_read_len < buf_len) && !eof) { chunk_size = min_t(u32, (buf_len - total_read_len), - LANCER_READ_FILE_CHUNK); + LANCER_READ_FILE_CHUNK); chunk_size = ALIGN(chunk_size, 4); status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size, total_read_len, file_name, @@ -298,7 +336,6 @@ static int be_get_coalesce(struct net_device *netdev, struct be_adapter *adapter = netdev_priv(netdev); struct be_aic_obj *aic = &adapter->aic_obj[0]; - et->rx_coalesce_usecs = aic->prev_eqd; et->rx_coalesce_usecs_high = aic->max_eqd; et->rx_coalesce_usecs_low = aic->min_eqd; @@ -440,18 +477,27 @@ static int be_get_sset_count(struct net_device *netdev, int stringset) } } -static u32 be_get_port_type(u32 phy_type, u32 dac_cable_len) +static u32 be_get_port_type(struct be_adapter *adapter) { u32 port; - switch (phy_type) { + switch (adapter->phy.interface_type) { case PHY_TYPE_BASET_1GB: case PHY_TYPE_BASEX_1GB: case PHY_TYPE_SGMII: port = PORT_TP; break; case PHY_TYPE_SFP_PLUS_10GB: - port = dac_cable_len ? PORT_DA : PORT_FIBRE; + if (adapter->phy.cable_type & SFP_PLUS_COPPER_CABLE) + port = PORT_DA; + else + port = PORT_FIBRE; + break; + case PHY_TYPE_QSFP: + if (adapter->phy.cable_type & QSFP_PLUS_CR4_CABLE) + port = PORT_DA; + else + port = PORT_FIBRE; break; case PHY_TYPE_XFP_10GB: case PHY_TYPE_SFP_1GB: @@ -467,11 +513,11 @@ static u32 be_get_port_type(u32 phy_type, u32 dac_cable_len) return port; } -static u32 convert_to_et_setting(u32 if_type, u32 if_speeds) +static u32 convert_to_et_setting(struct be_adapter *adapter, u32 if_speeds) { u32 val = 0; - switch (if_type) { + switch (adapter->phy.interface_type) { case PHY_TYPE_BASET_1GB: case PHY_TYPE_BASEX_1GB: case PHY_TYPE_SGMII: @@ -490,10 +536,38 @@ static u32 convert_to_et_setting(u32 if_type, u32 if_speeds) if (if_speeds & BE_SUPPORTED_SPEED_10GBPS) val |= SUPPORTED_10000baseKX4_Full; break; + case PHY_TYPE_KR2_20GB: + val |= SUPPORTED_Backplane; + if (if_speeds & BE_SUPPORTED_SPEED_10GBPS) + val |= SUPPORTED_10000baseKR_Full; + if (if_speeds & BE_SUPPORTED_SPEED_20GBPS) + val |= SUPPORTED_20000baseKR2_Full; + break; case PHY_TYPE_KR_10GB: val |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full; break; + case PHY_TYPE_KR4_40GB: + val |= SUPPORTED_Backplane; + if (if_speeds & BE_SUPPORTED_SPEED_10GBPS) + val |= SUPPORTED_10000baseKR_Full; + if (if_speeds & BE_SUPPORTED_SPEED_40GBPS) + val |= SUPPORTED_40000baseKR4_Full; + break; + case PHY_TYPE_QSFP: + if (if_speeds & BE_SUPPORTED_SPEED_40GBPS) { + switch (adapter->phy.cable_type) { + case QSFP_PLUS_CR4_CABLE: + val |= SUPPORTED_40000baseCR4_Full; + break; + case QSFP_PLUS_LR4_CABLE: + val |= SUPPORTED_40000baseLR4_Full; + break; + default: + val |= SUPPORTED_40000baseSR4_Full; + break; + } + } case PHY_TYPE_SFP_PLUS_10GB: case PHY_TYPE_XFP_10GB: case PHY_TYPE_SFP_1GB: @@ -534,8 +608,6 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) int status; u32 auto_speeds; u32 fixed_speeds; - u32 dac_cable_len; - u16 interface_type; if (adapter->phy.link_speed < 0) { status = be_cmd_link_status_query(adapter, &link_speed, @@ -546,21 +618,19 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) status = be_cmd_get_phy_info(adapter); if (!status) { - interface_type = adapter->phy.interface_type; auto_speeds = adapter->phy.auto_speeds_supported; fixed_speeds = adapter->phy.fixed_speeds_supported; - dac_cable_len = adapter->phy.dac_cable_len; + + be_cmd_query_cable_type(adapter); ecmd->supported = - convert_to_et_setting(interface_type, + convert_to_et_setting(adapter, auto_speeds | fixed_speeds); ecmd->advertising = - convert_to_et_setting(interface_type, - auto_speeds); + convert_to_et_setting(adapter, auto_speeds); - ecmd->port = be_get_port_type(interface_type, - dac_cable_len); + ecmd->port = be_get_port_type(adapter); if (adapter->phy.auto_speeds_supported) { ecmd->supported |= SUPPORTED_Autoneg; @@ -614,8 +684,10 @@ static void be_get_ringparam(struct net_device *netdev, { struct be_adapter *adapter = netdev_priv(netdev); - ring->rx_max_pending = ring->rx_pending = adapter->rx_obj[0].q.len; - ring->tx_max_pending = ring->tx_pending = adapter->tx_obj[0].q.len; + ring->rx_max_pending = adapter->rx_obj[0].q.len; + ring->rx_pending = adapter->rx_obj[0].q.len; + ring->tx_max_pending = adapter->tx_obj[0].q.len; + ring->tx_pending = adapter->tx_obj[0].q.len; } static void @@ -641,7 +713,7 @@ be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd) status = be_cmd_set_flow_control(adapter, adapter->tx_fc, adapter->rx_fc); if (status) - dev_warn(&adapter->pdev->dev, "Pause param set failed.\n"); + dev_warn(&adapter->pdev->dev, "Pause param set failed\n"); return be_cmd_status(status); } @@ -907,8 +979,6 @@ static void be_set_msg_level(struct net_device *netdev, u32 level) FW_LOG_LEVEL_DEFAULT : FW_LOG_LEVEL_FATAL); adapter->msg_enable = level; - - return; } static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type) @@ -1127,6 +1197,7 @@ static int be_set_rxfh(struct net_device *netdev, const u32 *indir, if (indir) { struct be_rx_obj *rxo; + for (i = 0; i < RSS_INDIR_TABLE_LEN; i++) { j = indir[i]; rxo = &adapter->rx_obj[j]; @@ -1142,8 +1213,8 @@ static int be_set_rxfh(struct net_device *netdev, const u32 *indir, hkey = adapter->rss_info.rss_hkey; rc = be_cmd_rss_config(adapter, rsstable, - adapter->rss_info.rss_flags, - RSS_INDIR_TABLE_LEN, hkey); + adapter->rss_info.rss_flags, + RSS_INDIR_TABLE_LEN, hkey); if (rc) { adapter->rss_info.rss_flags = RSS_ENABLE_NONE; return -EIO; @@ -1154,6 +1225,58 @@ static int be_set_rxfh(struct net_device *netdev, const u32 *indir, return 0; } +static int be_get_module_info(struct net_device *netdev, + struct ethtool_modinfo *modinfo) +{ + struct be_adapter *adapter = netdev_priv(netdev); + u8 page_data[PAGE_DATA_LEN]; + int status; + + if (!check_privilege(adapter, MAX_PRIVILEGES)) + return -EOPNOTSUPP; + + status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, + page_data); + if (!status) { + if (!page_data[SFP_PLUS_SFF_8472_COMP]) { + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = PAGE_DATA_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = 2 * PAGE_DATA_LEN; + } + } + return be_cmd_status(status); +} + +static int be_get_module_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct be_adapter *adapter = netdev_priv(netdev); + int status; + + if (!check_privilege(adapter, MAX_PRIVILEGES)) + return -EOPNOTSUPP; + + status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, + data); + if (status) + goto err; + + if (eeprom->offset + eeprom->len > PAGE_DATA_LEN) { + status = be_cmd_read_port_transceiver_data(adapter, + TR_PAGE_A2, + data + + PAGE_DATA_LEN); + if (status) + goto err; + } + if (eeprom->offset) + memcpy(data, data + eeprom->offset, eeprom->len); +err: + return be_cmd_status(status); +} + const struct ethtool_ops be_ethtool_ops = { .get_settings = be_get_settings, .get_drvinfo = be_get_drvinfo, @@ -1185,5 +1308,7 @@ const struct ethtool_ops be_ethtool_ops = { .get_rxfh = be_get_rxfh, .set_rxfh = be_set_rxfh, .get_channels = be_get_channels, - .set_channels = be_set_channels + .set_channels = be_set_channels, + .get_module_info = be_get_module_info, + .get_module_eeprom = be_get_module_eeprom }; diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h index 8840c64aaeca7daca310d0a5a38dfdbed790fb12..295ee0835ba0bb32979d9923fd2db75f030e55a6 100644 --- a/drivers/net/ethernet/emulex/benet/be_hw.h +++ b/drivers/net/ethernet/emulex/benet/be_hw.h @@ -315,6 +315,18 @@ struct be_eth_hdr_wrb { u32 dw[4]; }; +/********* Tx Compl Status Encoding *********/ +#define BE_TX_COMP_HDR_PARSE_ERR 0x2 +#define BE_TX_COMP_NDMA_ERR 0x3 +#define BE_TX_COMP_ACL_ERR 0x5 + +#define LANCER_TX_COMP_LSO_ERR 0x1 +#define LANCER_TX_COMP_HSW_DROP_MAC_ERR 0x3 +#define LANCER_TX_COMP_HSW_DROP_VLAN_ERR 0x5 +#define LANCER_TX_COMP_QINQ_ERR 0x7 +#define LANCER_TX_COMP_PARITY_ERR 0xb +#define LANCER_TX_COMP_DMA_ERR 0xd + /* TX Compl Queue Descriptor */ /* Pseudo amap definition for eth_tx_compl in which each bit of the diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 93ff8ef3935227c348d5bc8b20a6f0c4248ca570..9a18e7930b31bea1cebc533d014655af5673c748 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -86,6 +86,7 @@ static const char * const ue_status_low_desc[] = { "JTAG ", "MPU_INTPEND " }; + /* UE Status High CSR */ static const char * const ue_status_hi_desc[] = { "LPCMEMHOST", @@ -122,10 +123,10 @@ static const char * const ue_status_hi_desc[] = { "Unknown" }; - static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q) { struct be_dma_mem *mem = &q->dma_mem; + if (mem->va) { dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va, mem->dma); @@ -187,6 +188,7 @@ static void be_intr_set(struct be_adapter *adapter, bool enable) static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted) { u32 val = 0; + val |= qid & DB_RQ_RING_ID_MASK; val |= posted << DB_RQ_NUM_POSTED_SHIFT; @@ -198,6 +200,7 @@ static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo, u16 posted) { u32 val = 0; + val |= txo->q.id & DB_TXULP_RING_ID_MASK; val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT; @@ -209,6 +212,7 @@ static void be_eq_notify(struct be_adapter *adapter, u16 qid, bool arm, bool clear_int, u16 num_popped) { u32 val = 0; + val |= qid & DB_EQ_RING_ID_MASK; val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT); @@ -227,6 +231,7 @@ static void be_eq_notify(struct be_adapter *adapter, u16 qid, void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped) { u32 val = 0; + val |= qid & DB_CQ_RING_ID_MASK; val |= ((qid & DB_CQ_RING_ID_EXT_MASK) << DB_CQ_RING_ID_EXT_MASK_SHIFT); @@ -488,7 +493,6 @@ static void populate_be_v2_stats(struct be_adapter *adapter) static void populate_lancer_stats(struct be_adapter *adapter) { - struct be_drv_stats *drvs = &adapter->drv_stats; struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter); @@ -588,6 +592,7 @@ static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev, for_all_rx_queues(adapter, rxo, i) { const struct be_rx_stats *rx_stats = rx_stats(rxo); + do { start = u64_stats_fetch_begin_irq(&rx_stats->sync); pkts = rx_stats(rxo)->rx_pkts; @@ -602,6 +607,7 @@ static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev, for_all_tx_queues(adapter, txo, i) { const struct be_tx_stats *tx_stats = tx_stats(txo); + do { start = u64_stats_fetch_begin_irq(&tx_stats->sync); pkts = tx_stats(txo)->tx_pkts; @@ -738,38 +744,37 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr, memset(hdr, 0, sizeof(*hdr)); - AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1); + SET_TX_WRB_HDR_BITS(crc, hdr, 1); if (skb_is_gso(skb)) { - AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1); - AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss, - hdr, skb_shinfo(skb)->gso_size); + SET_TX_WRB_HDR_BITS(lso, hdr, 1); + SET_TX_WRB_HDR_BITS(lso_mss, hdr, skb_shinfo(skb)->gso_size); if (skb_is_gso_v6(skb) && !lancer_chip(adapter)) - AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1); + SET_TX_WRB_HDR_BITS(lso6, hdr, 1); } else if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb->encapsulation) { - AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1); + SET_TX_WRB_HDR_BITS(ipcs, hdr, 1); proto = skb_inner_ip_proto(skb); } else { proto = skb_ip_proto(skb); } if (proto == IPPROTO_TCP) - AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1); + SET_TX_WRB_HDR_BITS(tcpcs, hdr, 1); else if (proto == IPPROTO_UDP) - AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1); + SET_TX_WRB_HDR_BITS(udpcs, hdr, 1); } if (vlan_tx_tag_present(skb)) { - AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1); + SET_TX_WRB_HDR_BITS(vlan, hdr, 1); vlan_tag = be_get_tx_vlan_tag(adapter, skb); - AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag); + SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag); } /* To skip HW VLAN tagging: evt = 1, compl = 0 */ - AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan); - AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1); - AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt); - AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len); + SET_TX_WRB_HDR_BITS(complete, hdr, !skip_hw_vlan); + SET_TX_WRB_HDR_BITS(event, hdr, 1); + SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt); + SET_TX_WRB_HDR_BITS(len, hdr, len); } static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb, @@ -808,6 +813,7 @@ static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq, if (skb->len > skb->data_len) { int len = skb_headlen(skb); + busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE); if (dma_mapping_error(dev, busaddr)) goto dma_err; @@ -821,6 +827,7 @@ static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq, for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + busaddr = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); if (dma_mapping_error(dev, busaddr)) @@ -850,6 +857,7 @@ static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq, unmap_tx_frag(dev, wrb, map_single); map_single = false; copied -= wrb->frag_len; + adapter->drv_stats.dma_map_errors++; queue_head_inc(txq); } return 0; @@ -910,7 +918,7 @@ static bool be_ipv6_exthdr_check(struct sk_buff *skb) if (ip6h->nexthdr != NEXTHDR_TCP && ip6h->nexthdr != NEXTHDR_UDP) { struct ipv6_opt_hdr *ehdr = - (struct ipv6_opt_hdr *) (skb->data + offset); + (struct ipv6_opt_hdr *)(skb->data + offset); /* offending pkt: 2nd byte following IPv6 hdr is 0xff */ if (ehdr->hdrlen == 0xff) @@ -974,8 +982,8 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter, * skip HW tagging is not enabled by FW. */ if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) && - (adapter->pvid || adapter->qnq_vid) && - !qnq_async_evt_rcvd(adapter))) + (adapter->pvid || adapter->qnq_vid) && + !qnq_async_evt_rcvd(adapter))) goto tx_drop; /* Manual VLAN tag insertion to prevent: @@ -1073,15 +1081,15 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev) static int be_change_mtu(struct net_device *netdev, int new_mtu) { struct be_adapter *adapter = netdev_priv(netdev); - if (new_mtu < BE_MIN_MTU || - new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))) { - dev_info(&adapter->pdev->dev, - "MTU must be between %d and %d bytes\n", - BE_MIN_MTU, - (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))); + struct device *dev = &adapter->pdev->dev; + + if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) { + dev_info(dev, "MTU must be between %d and %d bytes\n", + BE_MIN_MTU, BE_MAX_MTU); return -EINVAL; } - dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n", + + dev_info(dev, "MTU changed from %d to %d bytes\n", netdev->mtu, new_mtu); netdev->mtu = new_mtu; return 0; @@ -1093,6 +1101,7 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu) */ static int be_vid_config(struct be_adapter *adapter) { + struct device *dev = &adapter->pdev->dev; u16 vids[BE_NUM_VLANS_SUPPORTED]; u16 num = 0, i = 0; int status = 0; @@ -1114,16 +1123,15 @@ static int be_vid_config(struct be_adapter *adapter) if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES) goto set_vlan_promisc; - dev_err(&adapter->pdev->dev, - "Setting HW VLAN filtering failed.\n"); + dev_err(dev, "Setting HW VLAN filtering failed\n"); } else { if (adapter->flags & BE_FLAGS_VLAN_PROMISC) { /* hw VLAN filtering re-enabled. */ status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, OFF); if (!status) { - dev_info(&adapter->pdev->dev, - "Disabling VLAN Promiscuous mode.\n"); + dev_info(dev, + "Disabling VLAN Promiscuous mode\n"); adapter->flags &= ~BE_FLAGS_VLAN_PROMISC; } } @@ -1137,11 +1145,10 @@ static int be_vid_config(struct be_adapter *adapter) status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON); if (!status) { - dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n"); + dev_info(dev, "Enable VLAN Promiscuous mode\n"); adapter->flags |= BE_FLAGS_VLAN_PROMISC; } else - dev_err(&adapter->pdev->dev, - "Failed to enable VLAN Promiscuous mode.\n"); + dev_err(dev, "Failed to enable VLAN Promiscuous mode\n"); return status; } @@ -1417,6 +1424,7 @@ static int be_set_vf_tx_rate(struct net_device *netdev, int vf, max_tx_rate, vf); return be_cmd_status(status); } + static int be_set_vf_link_state(struct net_device *netdev, int vf, int link_state) { @@ -1482,7 +1490,6 @@ static void be_eqd_update(struct be_adapter *adapter) tx_pkts = txo->stats.tx_reqs; } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start)); - /* Skip, if wrapped around or first calculation */ now = jiffies; if (!aic->jiffies || time_before(now, aic->jiffies) || @@ -1683,7 +1690,7 @@ static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi, if (netdev->features & NETIF_F_RXHASH) skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3); - skb->encapsulation = rxcp->tunneled; + skb->csum_level = rxcp->tunneled; skb_mark_napi_id(skb, napi); if (rxcp->vlanf) @@ -1741,7 +1748,7 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo, if (adapter->netdev->features & NETIF_F_RXHASH) skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3); - skb->encapsulation = rxcp->tunneled; + skb->csum_level = rxcp->tunneled; skb_mark_napi_id(skb, napi); if (rxcp->vlanf) @@ -1753,65 +1760,46 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo, static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl, struct be_rx_compl_info *rxcp) { - rxcp->pkt_size = - AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl); - rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl); - rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl); - rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl); - rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl); - rxcp->ip_csum = - AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl); - rxcp->l4_csum = - AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl); - rxcp->ipv6 = - AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl); - rxcp->num_rcvd = - AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl); - rxcp->pkt_type = - AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl); - rxcp->rss_hash = - AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl); + rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl); + rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl); + rxcp->err = GET_RX_COMPL_V1_BITS(err, compl); + rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl); + rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl); + rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl); + rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl); + rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl); + rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl); + rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl); + rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl); if (rxcp->vlanf) { - rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq, - compl); - rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, - vlan_tag, compl); + rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl); + rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl); } - rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl); + rxcp->port = GET_RX_COMPL_V1_BITS(port, compl); rxcp->tunneled = - AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tunneled, compl); + GET_RX_COMPL_V1_BITS(tunneled, compl); } static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl, struct be_rx_compl_info *rxcp) { - rxcp->pkt_size = - AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl); - rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl); - rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl); - rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl); - rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl); - rxcp->ip_csum = - AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl); - rxcp->l4_csum = - AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl); - rxcp->ipv6 = - AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl); - rxcp->num_rcvd = - AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl); - rxcp->pkt_type = - AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl); - rxcp->rss_hash = - AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl); + rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl); + rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl); + rxcp->err = GET_RX_COMPL_V0_BITS(err, compl); + rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl); + rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl); + rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl); + rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl); + rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl); + rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl); + rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl); + rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl); if (rxcp->vlanf) { - rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq, - compl); - rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, - vlan_tag, compl); + rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl); + rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl); } - rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl); - rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, - ip_frag, compl); + rxcp->port = GET_RX_COMPL_V0_BITS(port, compl); + rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl); } static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo) @@ -1872,7 +1860,7 @@ static inline struct page *be_alloc_pages(u32 size, gfp_t gfp) * Allocate a page, split it to fragments of size rx_frag_size and post as * receive buffers to BE */ -static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp) +static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed) { struct be_adapter *adapter = rxo->adapter; struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL; @@ -1881,10 +1869,10 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp) struct device *dev = &adapter->pdev->dev; struct be_eth_rx_d *rxd; u64 page_dmaaddr = 0, frag_dmaaddr; - u32 posted, page_offset = 0; + u32 posted, page_offset = 0, notify = 0; page_info = &rxo->page_info_tbl[rxq->head]; - for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) { + for (posted = 0; posted < frags_needed && !page_info->page; posted++) { if (!pagep) { pagep = be_alloc_pages(adapter->big_page_size, gfp); if (unlikely(!pagep)) { @@ -1897,7 +1885,7 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp) if (dma_mapping_error(dev, page_dmaaddr)) { put_page(pagep); pagep = NULL; - rx_stats(rxo)->rx_post_fail++; + adapter->drv_stats.dma_map_errors++; break; } page_offset = 0; @@ -1940,7 +1928,11 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp) atomic_add(posted, &rxq->used); if (rxo->rx_post_starved) rxo->rx_post_starved = false; - be_rxq_notify(adapter, rxq->id, posted); + do { + notify = min(256u, posted); + be_rxq_notify(adapter, rxq->id, notify); + posted -= notify; + } while (posted); } else if (atomic_read(&rxq->used) == 0) { /* Let be_worker replenish when memory is available */ rxo->rx_post_starved = true; @@ -1991,7 +1983,7 @@ static u16 be_tx_compl_process(struct be_adapter *adapter, queue_tail_inc(txq); } while (cur_index != last_index); - dev_kfree_skb_any(sent_skb); + dev_consume_skb_any(sent_skb); return num_wrbs; } @@ -2069,7 +2061,8 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo) memset(page_info, 0, sizeof(*page_info)); } BUG_ON(atomic_read(&rxq->used)); - rxq->tail = rxq->head = 0; + rxq->tail = 0; + rxq->head = 0; } static void be_tx_compl_clean(struct be_adapter *adapter) @@ -2091,9 +2084,7 @@ static void be_tx_compl_clean(struct be_adapter *adapter) num_wrbs = 0; txq = &txo->q; while ((txcp = be_tx_compl_get(&txo->cq))) { - end_idx = - AMAP_GET_BITS(struct amap_eth_tx_compl, - wrb_index, txcp); + end_idx = GET_TX_COMPL_BITS(wrb_index, txcp); num_wrbs += be_tx_compl_process(adapter, txo, end_idx); cmpl++; @@ -2164,7 +2155,6 @@ static int be_evt_queues_create(struct be_adapter *adapter) napi_hash_add(&eqo->napi); aic = &adapter->aic_obj[i]; eqo->adapter = adapter; - eqo->tx_budget = BE_TX_BUDGET; eqo->idx = i; aic->max_eqd = BE_MAX_EQD; aic->enable = true; @@ -2394,6 +2384,7 @@ static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi, struct be_queue_info *rx_cq = &rxo->cq; struct be_rx_compl_info *rxcp; u32 work_done; + u32 frags_consumed = 0; for (work_done = 0; work_done < budget; work_done++) { rxcp = be_rx_compl_get(rxo); @@ -2426,6 +2417,7 @@ static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi, be_rx_compl_process(rxo, napi, rxcp); loop_continue: + frags_consumed += rxcp->num_rcvd; be_rx_stats_update(rxo, rxcp); } @@ -2437,26 +2429,71 @@ static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi, */ if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM && !rxo->rx_post_starved) - be_post_rx_frags(rxo, GFP_ATOMIC); + be_post_rx_frags(rxo, GFP_ATOMIC, + max_t(u32, MAX_RX_POST, + frags_consumed)); } return work_done; } -static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo, - int budget, int idx) +static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status) +{ + switch (status) { + case BE_TX_COMP_HDR_PARSE_ERR: + tx_stats(txo)->tx_hdr_parse_err++; + break; + case BE_TX_COMP_NDMA_ERR: + tx_stats(txo)->tx_dma_err++; + break; + case BE_TX_COMP_ACL_ERR: + tx_stats(txo)->tx_spoof_check_err++; + break; + } +} + +static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status) +{ + switch (status) { + case LANCER_TX_COMP_LSO_ERR: + tx_stats(txo)->tx_tso_err++; + break; + case LANCER_TX_COMP_HSW_DROP_MAC_ERR: + case LANCER_TX_COMP_HSW_DROP_VLAN_ERR: + tx_stats(txo)->tx_spoof_check_err++; + break; + case LANCER_TX_COMP_QINQ_ERR: + tx_stats(txo)->tx_qinq_err++; + break; + case LANCER_TX_COMP_PARITY_ERR: + tx_stats(txo)->tx_internal_parity_err++; + break; + case LANCER_TX_COMP_DMA_ERR: + tx_stats(txo)->tx_dma_err++; + break; + } +} + +static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo, + int idx) { struct be_eth_tx_compl *txcp; - int num_wrbs = 0, work_done; + int num_wrbs = 0, work_done = 0; + u32 compl_status; + u16 last_idx; - for (work_done = 0; work_done < budget; work_done++) { - txcp = be_tx_compl_get(&txo->cq); - if (!txcp) - break; - num_wrbs += be_tx_compl_process(adapter, txo, - AMAP_GET_BITS(struct - amap_eth_tx_compl, - wrb_index, txcp)); + while ((txcp = be_tx_compl_get(&txo->cq))) { + last_idx = GET_TX_COMPL_BITS(wrb_index, txcp); + num_wrbs += be_tx_compl_process(adapter, txo, last_idx); + work_done++; + + compl_status = GET_TX_COMPL_BITS(status, txcp); + if (compl_status) { + if (lancer_chip(adapter)) + lancer_update_tx_err(txo, compl_status); + else + be_update_tx_err(txo, compl_status); + } } if (work_done) { @@ -2474,7 +2511,6 @@ static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo, tx_stats(txo)->tx_compl += work_done; u64_stats_update_end(&tx_stats(txo)->sync_compl); } - return (work_done < budget); /* Done */ } int be_poll(struct napi_struct *napi, int budget) @@ -2483,17 +2519,12 @@ int be_poll(struct napi_struct *napi, int budget) struct be_adapter *adapter = eqo->adapter; int max_work = 0, work, i, num_evts; struct be_rx_obj *rxo; - bool tx_done; + struct be_tx_obj *txo; num_evts = events_get(eqo); - /* Process all TXQs serviced by this EQ */ - for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) { - tx_done = be_process_tx(adapter, &adapter->tx_obj[i], - eqo->tx_budget, i); - if (!tx_done) - max_work = budget; - } + for_all_tx_queues_on_eq(adapter, eqo, txo, i) + be_process_tx(adapter, txo, i); if (be_lock_napi(eqo)) { /* This loop will iterate twice for EQ0 in which @@ -2882,7 +2913,7 @@ static int be_rx_qs_create(struct be_adapter *adapter) /* First time posting */ for_all_rx_queues(adapter, rxo, i) - be_post_rx_frags(rxo, GFP_KERNEL); + be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST); return 0; } @@ -3309,10 +3340,20 @@ static void BEx_get_resources(struct be_adapter *adapter, */ if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) || !be_physfn(adapter) || (be_is_mc(adapter) && - !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) + !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) { res->max_tx_qs = 1; - else + } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) { + struct be_resources super_nic_res = {0}; + + /* On a SuperNIC profile, the driver needs to use the + * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits + */ + be_cmd_get_profile_config(adapter, &super_nic_res, 0); + /* Some old versions of BE3 FW don't report max_tx_qs value */ + res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS; + } else { res->max_tx_qs = BE3_MAX_TX_QS; + } if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) && !use_sriov && be_physfn(adapter)) @@ -3362,7 +3403,7 @@ static int be_get_sriov_config(struct be_adapter *adapter) if (!be_max_vfs(adapter)) { if (num_vfs) - dev_warn(dev, "device doesn't support SRIOV\n"); + dev_warn(dev, "SRIOV is disabled. Ignoring num_vfs\n"); adapter->num_vfs = 0; return 0; } @@ -3413,16 +3454,16 @@ static int be_get_resources(struct be_adapter *adapter) if (be_roce_supported(adapter)) res.max_evt_qs /= 2; adapter->res = res; - - dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n", - be_max_txqs(adapter), be_max_rxqs(adapter), - be_max_rss(adapter), be_max_eqs(adapter), - be_max_vfs(adapter)); - dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n", - be_max_uc(adapter), be_max_mc(adapter), - be_max_vlans(adapter)); } + dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n", + be_max_txqs(adapter), be_max_rxqs(adapter), + be_max_rss(adapter), be_max_eqs(adapter), + be_max_vfs(adapter)); + dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n", + be_max_uc(adapter), be_max_mc(adapter), + be_max_vlans(adapter)); + return 0; } @@ -3633,9 +3674,10 @@ static int be_setup(struct be_adapter *adapter) goto err; be_cmd_get_fw_ver(adapter); + dev_info(dev, "FW version is %s\n", adapter->fw_ver); if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) { - dev_err(dev, "Firmware on card is old(%s), IRQs may not work.", + dev_err(dev, "Firmware on card is old(%s), IRQs may not work", adapter->fw_ver); dev_err(dev, "Please upgrade firmware to version >= 4.0\n"); } @@ -3683,8 +3725,6 @@ static void be_netpoll(struct net_device *netdev) be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0); napi_schedule(&eqo->napi); } - - return; } #endif @@ -4052,6 +4092,7 @@ static int lancer_fw_download(struct be_adapter *adapter, { #define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024) #define LANCER_FW_DOWNLOAD_LOCATION "/prg" + struct device *dev = &adapter->pdev->dev; struct be_dma_mem flash_cmd; const u8 *data_ptr = NULL; u8 *dest_image_ptr = NULL; @@ -4064,21 +4105,16 @@ static int lancer_fw_download(struct be_adapter *adapter, u8 change_status; if (!IS_ALIGNED(fw->size, sizeof(u32))) { - dev_err(&adapter->pdev->dev, - "FW Image not properly aligned. " - "Length must be 4 byte aligned.\n"); - status = -EINVAL; - goto lancer_fw_exit; + dev_err(dev, "FW image size should be multiple of 4\n"); + return -EINVAL; } flash_cmd.size = sizeof(struct lancer_cmd_req_write_object) + LANCER_FW_DOWNLOAD_CHUNK; - flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size, + flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma, GFP_KERNEL); - if (!flash_cmd.va) { - status = -ENOMEM; - goto lancer_fw_exit; - } + if (!flash_cmd.va) + return -ENOMEM; dest_image_ptr = flash_cmd.va + sizeof(struct lancer_cmd_req_write_object); @@ -4113,35 +4149,27 @@ static int lancer_fw_download(struct be_adapter *adapter, &add_status); } - dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va, - flash_cmd.dma); + dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma); if (status) { - dev_err(&adapter->pdev->dev, - "Firmware load error. " - "Status code: 0x%x Additional Status: 0x%x\n", - status, add_status); - goto lancer_fw_exit; + dev_err(dev, "Firmware load error\n"); + return be_cmd_status(status); } + dev_info(dev, "Firmware flashed successfully\n"); + if (change_status == LANCER_FW_RESET_NEEDED) { - dev_info(&adapter->pdev->dev, - "Resetting adapter to activate new FW\n"); + dev_info(dev, "Resetting adapter to activate new FW\n"); status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK); if (status) { - dev_err(&adapter->pdev->dev, - "Adapter busy for FW reset.\n" - "New FW will not be active.\n"); - goto lancer_fw_exit; + dev_err(dev, "Adapter busy, could not reset FW\n"); + dev_err(dev, "Reboot server to activate new FW\n"); } } else if (change_status != LANCER_NO_RESET_NEEDED) { - dev_err(&adapter->pdev->dev, - "System reboot required for new FW to be active\n"); + dev_info(dev, "Reboot server to activate new FW\n"); } - dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n"); -lancer_fw_exit: - return status; + return 0; } #define UFI_TYPE2 2 @@ -4374,7 +4402,6 @@ static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family, return; err: be_disable_vxlan_offloads(adapter); - return; } static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family, @@ -4506,6 +4533,7 @@ static int be_map_pci_bars(struct be_adapter *adapter) return 0; pci_map_err: + dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n"); be_unmap_pci_bars(adapter); return -ENOMEM; } @@ -4713,7 +4741,6 @@ static void be_func_recovery_task(struct work_struct *work) be_detect_error(adapter); if (adapter->hw_error && lancer_chip(adapter)) { - rtnl_lock(); netif_device_detach(adapter->netdev); rtnl_unlock(); @@ -4750,7 +4777,7 @@ static void be_worker(struct work_struct *work) if (!adapter->stats_cmd_sent) { if (lancer_chip(adapter)) lancer_cmd_get_pport_stats(adapter, - &adapter->stats_cmd); + &adapter->stats_cmd); else be_cmd_get_stats(adapter, &adapter->stats_cmd); } @@ -4764,7 +4791,7 @@ static void be_worker(struct work_struct *work) * allocation failures. */ if (rxo->rx_post_starved) - be_post_rx_frags(rxo, GFP_KERNEL); + be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST); } be_eqd_update(adapter); @@ -4822,6 +4849,8 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id) struct net_device *netdev; char port_name; + dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER); + status = pci_enable_device(pdev); if (status) goto do_none; @@ -4853,11 +4882,9 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id) } } - if (be_physfn(adapter)) { - status = pci_enable_pcie_error_reporting(pdev); - if (!status) - dev_info(&pdev->dev, "PCIe error reporting enabled\n"); - } + status = pci_enable_pcie_error_reporting(pdev); + if (!status) + dev_info(&pdev->dev, "PCIe error reporting enabled\n"); status = be_ctrl_init(adapter); if (status) @@ -4897,7 +4924,8 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id) INIT_DELAYED_WORK(&adapter->work, be_worker); INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task); - adapter->rx_fc = adapter->tx_fc = true; + adapter->rx_fc = true; + adapter->tx_fc = true; status = be_setup(adapter); if (status) diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c index ef4672dc735750d24186c987fd1a34e478b21b60..132866433a250cbbee21320aa587e0e947a8d85e 100644 --- a/drivers/net/ethernet/emulex/benet/be_roce.c +++ b/drivers/net/ethernet/emulex/benet/be_roce.c @@ -174,6 +174,7 @@ int be_roce_register_driver(struct ocrdma_driver *drv) ocrdma_drv = drv; list_for_each_entry(dev, &be_adapter_list, entry) { struct net_device *netdev; + _be_roce_dev_add(dev); netdev = dev->netdev; if (netif_running(netdev) && netif_oper_up(netdev)) diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index f3658bdb64cc4265de972cfc2bbd25157e95a680..0bc6c102f3ac12ea41770e90c195e28e1927667b 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -1222,8 +1222,6 @@ static int ethoc_probe(struct platform_device *pdev) goto error; } - ether_setup(netdev); - /* setup the net_device structure */ netdev->netdev_ops = ðoc_netdev_ops; netdev->watchdog_timeo = ETHOC_TIMEOUT; diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index ee41d98b44b6d685ccb7a2c4f3d62bea419c25ff..1d5e1822bb2c0dae66552e8fa48a17c074015103 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -27,8 +27,8 @@ */ #define FEC_IEVENT 0x004 /* Interrupt event reg */ #define FEC_IMASK 0x008 /* Interrupt mask reg */ -#define FEC_R_DES_ACTIVE 0x010 /* Receive descriptor reg */ -#define FEC_X_DES_ACTIVE 0x014 /* Transmit descriptor reg */ +#define FEC_R_DES_ACTIVE_0 0x010 /* Receive descriptor reg */ +#define FEC_X_DES_ACTIVE_0 0x014 /* Transmit descriptor reg */ #define FEC_ECNTRL 0x024 /* Ethernet control reg */ #define FEC_MII_DATA 0x040 /* MII manage frame reg */ #define FEC_MII_SPEED 0x044 /* MII speed control reg */ @@ -38,6 +38,12 @@ #define FEC_ADDR_LOW 0x0e4 /* Low 32bits MAC address */ #define FEC_ADDR_HIGH 0x0e8 /* High 16bits MAC address */ #define FEC_OPD 0x0ec /* Opcode + Pause duration */ +#define FEC_TXIC0 0xF0 /* Tx Interrupt Coalescing for ring 0 */ +#define FEC_TXIC1 0xF4 /* Tx Interrupt Coalescing for ring 1 */ +#define FEC_TXIC2 0xF8 /* Tx Interrupt Coalescing for ring 2 */ +#define FEC_RXIC0 0x100 /* Rx Interrupt Coalescing for ring 0 */ +#define FEC_RXIC1 0x104 /* Rx Interrupt Coalescing for ring 1 */ +#define FEC_RXIC2 0x108 /* Rx Interrupt Coalescing for ring 2 */ #define FEC_HASH_TABLE_HIGH 0x118 /* High 32bits hash table */ #define FEC_HASH_TABLE_LOW 0x11c /* Low 32bits hash table */ #define FEC_GRP_HASH_TABLE_HIGH 0x120 /* High 32bits hash table */ @@ -45,14 +51,27 @@ #define FEC_X_WMRK 0x144 /* FIFO transmit water mark */ #define FEC_R_BOUND 0x14c /* FIFO receive bound reg */ #define FEC_R_FSTART 0x150 /* FIFO receive start reg */ -#define FEC_R_DES_START 0x180 /* Receive descriptor ring */ -#define FEC_X_DES_START 0x184 /* Transmit descriptor ring */ +#define FEC_R_DES_START_1 0x160 /* Receive descriptor ring 1 */ +#define FEC_X_DES_START_1 0x164 /* Transmit descriptor ring 1 */ +#define FEC_R_DES_START_2 0x16c /* Receive descriptor ring 2 */ +#define FEC_X_DES_START_2 0x170 /* Transmit descriptor ring 2 */ +#define FEC_R_DES_START_0 0x180 /* Receive descriptor ring */ +#define FEC_X_DES_START_0 0x184 /* Transmit descriptor ring */ #define FEC_R_BUFF_SIZE 0x188 /* Maximum receive buff size */ #define FEC_R_FIFO_RSFL 0x190 /* Receive FIFO section full threshold */ #define FEC_R_FIFO_RSEM 0x194 /* Receive FIFO section empty threshold */ #define FEC_R_FIFO_RAEM 0x198 /* Receive FIFO almost empty threshold */ #define FEC_R_FIFO_RAFL 0x19c /* Receive FIFO almost full threshold */ #define FEC_RACC 0x1C4 /* Receive Accelerator function */ +#define FEC_RCMR_1 0x1c8 /* Receive classification match ring 1 */ +#define FEC_RCMR_2 0x1cc /* Receive classification match ring 2 */ +#define FEC_DMA_CFG_1 0x1d8 /* DMA class configuration for ring 1 */ +#define FEC_DMA_CFG_2 0x1dc /* DMA class Configuration for ring 2 */ +#define FEC_R_DES_ACTIVE_1 0x1e0 /* Rx descriptor active for ring 1 */ +#define FEC_X_DES_ACTIVE_1 0x1e4 /* Tx descriptor active for ring 1 */ +#define FEC_R_DES_ACTIVE_2 0x1e8 /* Rx descriptor active for ring 2 */ +#define FEC_X_DES_ACTIVE_2 0x1ec /* Tx descriptor active for ring 2 */ +#define FEC_QOS_SCHEME 0x1f0 /* Set multi queues Qos scheme */ #define FEC_MIIGSK_CFGR 0x300 /* MIIGSK Configuration reg */ #define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */ @@ -121,8 +140,12 @@ #define FEC_IEVENT 0x004 /* Interrupt even reg */ #define FEC_IMASK 0x008 /* Interrupt mask reg */ #define FEC_IVEC 0x00c /* Interrupt vec status reg */ -#define FEC_R_DES_ACTIVE 0x010 /* Receive descriptor reg */ -#define FEC_X_DES_ACTIVE 0x014 /* Transmit descriptor reg */ +#define FEC_R_DES_ACTIVE_0 0x010 /* Receive descriptor reg */ +#define FEC_R_DES_ACTIVE_1 FEC_R_DES_ACTIVE_0 +#define FEC_R_DES_ACTIVE_2 FEC_R_DES_ACTIVE_0 +#define FEC_X_DES_ACTIVE_0 0x014 /* Transmit descriptor reg */ +#define FEC_X_DES_ACTIVE_1 FEC_X_DES_ACTIVE_0 +#define FEC_X_DES_ACTIVE_2 FEC_X_DES_ACTIVE_0 #define FEC_MII_DATA 0x040 /* MII manage frame reg */ #define FEC_MII_SPEED 0x044 /* MII speed control reg */ #define FEC_R_BOUND 0x08c /* FIFO receive bound reg */ @@ -136,11 +159,27 @@ #define FEC_ADDR_HIGH 0x3c4 /* High 16bits MAC address */ #define FEC_GRP_HASH_TABLE_HIGH 0x3c8 /* High 32bits hash table */ #define FEC_GRP_HASH_TABLE_LOW 0x3cc /* Low 32bits hash table */ -#define FEC_R_DES_START 0x3d0 /* Receive descriptor ring */ -#define FEC_X_DES_START 0x3d4 /* Transmit descriptor ring */ +#define FEC_R_DES_START_0 0x3d0 /* Receive descriptor ring */ +#define FEC_R_DES_START_1 FEC_R_DES_START_0 +#define FEC_R_DES_START_2 FEC_R_DES_START_0 +#define FEC_X_DES_START_0 0x3d4 /* Transmit descriptor ring */ +#define FEC_X_DES_START_1 FEC_X_DES_START_0 +#define FEC_X_DES_START_2 FEC_X_DES_START_0 #define FEC_R_BUFF_SIZE 0x3d8 /* Maximum receive buff size */ #define FEC_FIFO_RAM 0x400 /* FIFO RAM buffer */ - +/* Not existed in real chip + * Just for pass build. + */ +#define FEC_RCMR_1 0xFFF +#define FEC_RCMR_2 0xFFF +#define FEC_DMA_CFG_1 0xFFF +#define FEC_DMA_CFG_2 0xFFF +#define FEC_TXIC0 0xFFF +#define FEC_TXIC1 0xFFF +#define FEC_TXIC2 0xFFF +#define FEC_RXIC0 0xFFF +#define FEC_RXIC1 0xFFF +#define FEC_RXIC2 0xFFF #endif /* CONFIG_M5272 */ @@ -233,6 +272,44 @@ struct bufdesc_ex { /* This device has up to three irqs on some platforms */ #define FEC_IRQ_NUM 3 +/* Maximum number of queues supported + * ENET with AVB IP can support up to 3 independent tx queues and rx queues. + * User can point the queue number that is less than or equal to 3. + */ +#define FEC_ENET_MAX_TX_QS 3 +#define FEC_ENET_MAX_RX_QS 3 + +#define FEC_R_DES_START(X) ((X == 1) ? FEC_R_DES_START_1 : \ + ((X == 2) ? \ + FEC_R_DES_START_2 : FEC_R_DES_START_0)) +#define FEC_X_DES_START(X) ((X == 1) ? FEC_X_DES_START_1 : \ + ((X == 2) ? \ + FEC_X_DES_START_2 : FEC_X_DES_START_0)) +#define FEC_R_DES_ACTIVE(X) ((X == 1) ? FEC_R_DES_ACTIVE_1 : \ + ((X == 2) ? \ + FEC_R_DES_ACTIVE_2 : FEC_R_DES_ACTIVE_0)) +#define FEC_X_DES_ACTIVE(X) ((X == 1) ? FEC_X_DES_ACTIVE_1 : \ + ((X == 2) ? \ + FEC_X_DES_ACTIVE_2 : FEC_X_DES_ACTIVE_0)) + +#define FEC_DMA_CFG(X) ((X == 2) ? FEC_DMA_CFG_2 : FEC_DMA_CFG_1) + +#define DMA_CLASS_EN (1 << 16) +#define FEC_RCMR(X) ((X == 2) ? FEC_RCMR_2 : FEC_RCMR_1) +#define IDLE_SLOPE_MASK 0xFFFF +#define IDLE_SLOPE_1 0x200 /* BW fraction: 0.5 */ +#define IDLE_SLOPE_2 0x200 /* BW fraction: 0.5 */ +#define IDLE_SLOPE(X) ((X == 1) ? (IDLE_SLOPE_1 & IDLE_SLOPE_MASK) : \ + (IDLE_SLOPE_2 & IDLE_SLOPE_MASK)) +#define RCMR_MATCHEN (0x1 << 16) +#define RCMR_CMP_CFG(v, n) ((v & 0x7) << (n << 2)) +#define RCMR_CMP_1 (RCMR_CMP_CFG(0, 0) | RCMR_CMP_CFG(1, 1) | \ + RCMR_CMP_CFG(2, 2) | RCMR_CMP_CFG(3, 3)) +#define RCMR_CMP_2 (RCMR_CMP_CFG(4, 0) | RCMR_CMP_CFG(5, 1) | \ + RCMR_CMP_CFG(6, 2) | RCMR_CMP_CFG(7, 3)) +#define RCMR_CMP(X) ((X == 1) ? RCMR_CMP_1 : RCMR_CMP_2) +#define FEC_TX_BD_FTYPE(X) ((X & 0xF) << 20) + /* The number of Tx and Rx buffers. These are allocated from the page * pool. The code may assume these are power of two, so it it best * to keep them that size. @@ -240,7 +317,7 @@ struct bufdesc_ex { * the skbuffer directly. */ -#define FEC_ENET_RX_PAGES 8 +#define FEC_ENET_RX_PAGES 256 #define FEC_ENET_RX_FRSIZE 2048 #define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE) #define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES) @@ -256,6 +333,69 @@ struct bufdesc_ex { #define FLAG_RX_CSUM_ENABLED (BD_ENET_RX_ICE | BD_ENET_RX_PCR) #define FLAG_RX_CSUM_ERROR (BD_ENET_RX_ICE | BD_ENET_RX_PCR) +/* Interrupt events/masks. */ +#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */ +#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */ +#define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */ +#define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */ +#define FEC_ENET_TXF_0 ((uint)0x08000000) /* Full frame transmitted */ +#define FEC_ENET_TXF_1 ((uint)0x00000008) /* Full frame transmitted */ +#define FEC_ENET_TXF_2 ((uint)0x00000080) /* Full frame transmitted */ +#define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */ +#define FEC_ENET_RXF_0 ((uint)0x02000000) /* Full frame received */ +#define FEC_ENET_RXF_1 ((uint)0x00000002) /* Full frame received */ +#define FEC_ENET_RXF_2 ((uint)0x00000020) /* Full frame received */ +#define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */ +#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */ +#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */ +#define FEC_ENET_TXF (FEC_ENET_TXF_0 | FEC_ENET_TXF_1 | FEC_ENET_TXF_2) +#define FEC_ENET_RXF (FEC_ENET_RXF_0 | FEC_ENET_RXF_1 | FEC_ENET_RXF_2) +#define FEC_ENET_TS_AVAIL ((uint)0x00010000) +#define FEC_ENET_TS_TIMER ((uint)0x00008000) + +#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII | FEC_ENET_TS_TIMER) +#define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF)) + +/* ENET interrupt coalescing macro define */ +#define FEC_ITR_CLK_SEL (0x1 << 30) +#define FEC_ITR_EN (0x1 << 31) +#define FEC_ITR_ICFT(X) ((X & 0xFF) << 20) +#define FEC_ITR_ICTT(X) ((X) & 0xFFFF) +#define FEC_ITR_ICFT_DEFAULT 200 /* Set 200 frame count threshold */ +#define FEC_ITR_ICTT_DEFAULT 1000 /* Set 1000us timer threshold */ + +#define FEC_VLAN_TAG_LEN 0x04 +#define FEC_ETHTYPE_LEN 0x02 + +struct fec_enet_priv_tx_q { + int index; + unsigned char *tx_bounce[TX_RING_SIZE]; + struct sk_buff *tx_skbuff[TX_RING_SIZE]; + + dma_addr_t bd_dma; + struct bufdesc *tx_bd_base; + uint tx_ring_size; + + unsigned short tx_stop_threshold; + unsigned short tx_wake_threshold; + + struct bufdesc *cur_tx; + struct bufdesc *dirty_tx; + char *tso_hdrs; + dma_addr_t tso_hdrs_dma; +}; + +struct fec_enet_priv_rx_q { + int index; + struct sk_buff *rx_skbuff[RX_RING_SIZE]; + + dma_addr_t bd_dma; + struct bufdesc *rx_bd_base; + uint rx_ring_size; + + struct bufdesc *cur_rx; +}; + /* The FEC buffer descriptors track the ring buffers. The rx_bd_base and * tx_bd_base always point to the base of the buffer descriptors. The * cur_rx and cur_tx point to the currently available buffer. @@ -272,36 +412,28 @@ struct fec_enet_private { struct clk *clk_ipg; struct clk *clk_ahb; + struct clk *clk_ref; struct clk *clk_enet_out; struct clk *clk_ptp; bool ptp_clk_on; struct mutex ptp_clk_mutex; + unsigned int num_tx_queues; + unsigned int num_rx_queues; /* The saved address of a sent-in-place packet/buffer, for skfree(). */ - unsigned char *tx_bounce[TX_RING_SIZE]; - struct sk_buff *tx_skbuff[TX_RING_SIZE]; - struct sk_buff *rx_skbuff[RX_RING_SIZE]; + struct fec_enet_priv_tx_q *tx_queue[FEC_ENET_MAX_TX_QS]; + struct fec_enet_priv_rx_q *rx_queue[FEC_ENET_MAX_RX_QS]; - /* CPM dual port RAM relative addresses */ - dma_addr_t bd_dma; - /* Address of Rx and Tx buffers */ - struct bufdesc *rx_bd_base; - struct bufdesc *tx_bd_base; - /* The next free ring entry */ - struct bufdesc *cur_rx, *cur_tx; - /* The ring entries to be free()ed */ - struct bufdesc *dirty_tx; + unsigned int total_tx_ring_size; + unsigned int total_rx_ring_size; - unsigned short bufdesc_size; - unsigned short tx_ring_size; - unsigned short rx_ring_size; - unsigned short tx_stop_threshold; - unsigned short tx_wake_threshold; + unsigned long work_tx; + unsigned long work_rx; + unsigned long work_ts; + unsigned long work_mdio; - /* Software TSO */ - char *tso_hdrs; - dma_addr_t tso_hdrs_dma; + unsigned short bufdesc_size; struct platform_device *pdev; @@ -340,6 +472,18 @@ struct fec_enet_private { int hwts_tx_en; struct delayed_work time_keep; struct regulator *reg_phy; + + unsigned int tx_align; + unsigned int rx_align; + + /* hw interrupt coalesce */ + unsigned int rx_pkts_itr; + unsigned int rx_time_itr; + unsigned int tx_pkts_itr; + unsigned int tx_time_itr; + unsigned int itr_clk_rate; + + u32 rx_copybreak; }; void fec_ptp_init(struct platform_device *pdev); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 89355a719625567789c5a2ed5a43df783fcad487..87975b5dda94674e93e5c3297a88da21beda5588 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -57,21 +57,19 @@ #include #include #include +#include #include #include "fec.h" static void set_multicast_list(struct net_device *ndev); - -#if defined(CONFIG_ARM) -#define FEC_ALIGNMENT 0xf -#else -#define FEC_ALIGNMENT 0x3 -#endif +static void fec_enet_itr_coal_init(struct net_device *ndev); #define DRIVER_NAME "fec" +#define FEC_ENET_GET_QUQUE(_x) ((_x == 0) ? 1 : ((_x == 1) ? 2 : 0)) + /* Pause frame feild and FIFO threshold */ #define FEC_ENET_FCE (1 << 5) #define FEC_ENET_RSEM_V 0x84 @@ -104,6 +102,22 @@ static void set_multicast_list(struct net_device *ndev); * ENET_TDAR[TDAR]. */ #define FEC_QUIRK_ERR006358 (1 << 7) +/* ENET IP hw AVB + * + * i.MX6SX ENET IP add Audio Video Bridging (AVB) feature support. + * - Two class indicators on receive with configurable priority + * - Two class indicators and line speed timer on transmit allowing + * implementation class credit based shapers externally + * - Additional DMA registers provisioned to allow managing up to 3 + * independent rings + */ +#define FEC_QUIRK_HAS_AVB (1 << 8) +/* There is a TDAR race condition for mutliQ when the software sets TDAR + * and the UDMA clears TDAR simultaneously or in a small window (2-4 cycles). + * This will cause the udma_tx and udma_tx_arbiter state machines to hang. + * The issue exist at i.MX6SX enet IP. + */ +#define FEC_QUIRK_ERR007885 (1 << 9) static struct platform_device_id fec_devtype[] = { { @@ -127,6 +141,12 @@ static struct platform_device_id fec_devtype[] = { }, { .name = "mvf600-fec", .driver_data = FEC_QUIRK_ENET_MAC, + }, { + .name = "imx6sx-fec", + .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | + FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | + FEC_QUIRK_ERR007885, }, { /* sentinel */ } @@ -139,6 +159,7 @@ enum imx_fec_type { IMX28_FEC, IMX6Q_FEC, MVF600_FEC, + IMX6SX_FEC, }; static const struct of_device_id fec_dt_ids[] = { @@ -147,6 +168,7 @@ static const struct of_device_id fec_dt_ids[] = { { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], }, { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], }, { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], }, + { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, fec_dt_ids); @@ -175,21 +197,6 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); #endif #endif /* CONFIG_M5272 */ -/* Interrupt events/masks. */ -#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */ -#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */ -#define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */ -#define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */ -#define FEC_ENET_TXF ((uint)0x08000000) /* Full frame transmitted */ -#define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */ -#define FEC_ENET_RXF ((uint)0x02000000) /* Full frame received */ -#define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */ -#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */ -#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */ - -#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII) -#define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF)) - /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets. */ #define PKT_MAXBUF_SIZE 1522 @@ -230,6 +237,8 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); #define FEC_PAUSE_FLAG_AUTONEG 0x1 #define FEC_PAUSE_FLAG_ENABLE 0x2 +#define COPYBREAK_DEFAULT 256 + #define TSO_HEADER_SIZE 128 /* Max number of allowed TCP segments for software TSO */ #define FEC_MAX_TSO_SEGS 100 @@ -242,22 +251,26 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); static int mii_cnt; static inline -struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, struct fec_enet_private *fep) +struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, + struct fec_enet_private *fep, + int queue_id) { struct bufdesc *new_bd = bdp + 1; struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1; + struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id]; + struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id]; struct bufdesc_ex *ex_base; struct bufdesc *base; int ring_size; - if (bdp >= fep->tx_bd_base) { - base = fep->tx_bd_base; - ring_size = fep->tx_ring_size; - ex_base = (struct bufdesc_ex *)fep->tx_bd_base; + if (bdp >= txq->tx_bd_base) { + base = txq->tx_bd_base; + ring_size = txq->tx_ring_size; + ex_base = (struct bufdesc_ex *)txq->tx_bd_base; } else { - base = fep->rx_bd_base; - ring_size = fep->rx_ring_size; - ex_base = (struct bufdesc_ex *)fep->rx_bd_base; + base = rxq->rx_bd_base; + ring_size = rxq->rx_ring_size; + ex_base = (struct bufdesc_ex *)rxq->rx_bd_base; } if (fep->bufdesc_ex) @@ -269,22 +282,26 @@ struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, struct fec_enet_priva } static inline -struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, struct fec_enet_private *fep) +struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, + struct fec_enet_private *fep, + int queue_id) { struct bufdesc *new_bd = bdp - 1; struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1; + struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id]; + struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id]; struct bufdesc_ex *ex_base; struct bufdesc *base; int ring_size; - if (bdp >= fep->tx_bd_base) { - base = fep->tx_bd_base; - ring_size = fep->tx_ring_size; - ex_base = (struct bufdesc_ex *)fep->tx_bd_base; + if (bdp >= txq->tx_bd_base) { + base = txq->tx_bd_base; + ring_size = txq->tx_ring_size; + ex_base = (struct bufdesc_ex *)txq->tx_bd_base; } else { - base = fep->rx_bd_base; - ring_size = fep->rx_ring_size; - ex_base = (struct bufdesc_ex *)fep->rx_bd_base; + base = rxq->rx_bd_base; + ring_size = rxq->rx_ring_size; + ex_base = (struct bufdesc_ex *)rxq->rx_bd_base; } if (fep->bufdesc_ex) @@ -300,14 +317,15 @@ static int fec_enet_get_bd_index(struct bufdesc *base, struct bufdesc *bdp, return ((const char *)bdp - (const char *)base) / fep->bufdesc_size; } -static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep) +static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep, + struct fec_enet_priv_tx_q *txq) { int entries; - entries = ((const char *)fep->dirty_tx - - (const char *)fep->cur_tx) / fep->bufdesc_size - 1; + entries = ((const char *)txq->dirty_tx - + (const char *)txq->cur_tx) / fep->bufdesc_size - 1; - return entries > 0 ? entries : entries + fep->tx_ring_size; + return entries > 0 ? entries : entries + txq->tx_ring_size; } static void *swap_buffer(void *bufaddr, int len) @@ -324,22 +342,26 @@ static void *swap_buffer(void *bufaddr, int len) static void fec_dump(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - struct bufdesc *bdp = fep->tx_bd_base; - unsigned int index = 0; + struct bufdesc *bdp; + struct fec_enet_priv_tx_q *txq; + int index = 0; netdev_info(ndev, "TX ring dump\n"); pr_info("Nr SC addr len SKB\n"); + txq = fep->tx_queue[0]; + bdp = txq->tx_bd_base; + do { pr_info("%3u %c%c 0x%04x 0x%08lx %4u %p\n", index, - bdp == fep->cur_tx ? 'S' : ' ', - bdp == fep->dirty_tx ? 'H' : ' ', + bdp == txq->cur_tx ? 'S' : ' ', + bdp == txq->dirty_tx ? 'H' : ' ', bdp->cbd_sc, bdp->cbd_bufaddr, bdp->cbd_datlen, - fep->tx_skbuff[index]); - bdp = fec_enet_get_nextdesc(bdp, fep); + txq->tx_skbuff[index]); + bdp = fec_enet_get_nextdesc(bdp, fep, 0); index++; - } while (bdp != fep->tx_bd_base); + } while (bdp != txq->tx_bd_base); } static inline bool is_ipv4_pkt(struct sk_buff *skb) @@ -365,14 +387,17 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev) } static int -fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev) +fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, + struct sk_buff *skb, + struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = platform_get_device_id(fep->pdev); - struct bufdesc *bdp = fep->cur_tx; + struct bufdesc *bdp = txq->cur_tx; struct bufdesc_ex *ebdp; int nr_frags = skb_shinfo(skb)->nr_frags; + unsigned short queue = skb_get_queue_mapping(skb); int frag, frag_len; unsigned short status; unsigned int estatus = 0; @@ -384,7 +409,7 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev) for (frag = 0; frag < nr_frags; frag++) { this_frag = &skb_shinfo(skb)->frags[frag]; - bdp = fec_enet_get_nextdesc(bdp, fep); + bdp = fec_enet_get_nextdesc(bdp, fep, queue); ebdp = (struct bufdesc_ex *)bdp; status = bdp->cbd_sc; @@ -404,6 +429,8 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev) } if (fep->bufdesc_ex) { + if (id_entry->driver_data & FEC_QUIRK_HAS_AVB) + estatus |= FEC_TX_BD_FTYPE(queue); if (skb->ip_summed == CHECKSUM_PARTIAL) estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; ebdp->cbd_bdu = 0; @@ -412,11 +439,11 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev) bufaddr = page_address(this_frag->page.p) + this_frag->page_offset; - index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); - if (((unsigned long) bufaddr) & FEC_ALIGNMENT || + index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); + if (((unsigned long) bufaddr) & fep->tx_align || id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { - memcpy(fep->tx_bounce[index], bufaddr, frag_len); - bufaddr = fep->tx_bounce[index]; + memcpy(txq->tx_bounce[index], bufaddr, frag_len); + bufaddr = txq->tx_bounce[index]; if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) swap_buffer(bufaddr, frag_len); @@ -436,21 +463,22 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev) bdp->cbd_sc = status; } - fep->cur_tx = bdp; + txq->cur_tx = bdp; return 0; dma_mapping_error: - bdp = fep->cur_tx; + bdp = txq->cur_tx; for (i = 0; i < frag; i++) { - bdp = fec_enet_get_nextdesc(bdp, fep); + bdp = fec_enet_get_nextdesc(bdp, fep, queue); dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, bdp->cbd_datlen, DMA_TO_DEVICE); } return NETDEV_TX_OK; } -static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) +static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, + struct sk_buff *skb, struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = @@ -461,12 +489,13 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) dma_addr_t addr; unsigned short status; unsigned short buflen; + unsigned short queue; unsigned int estatus = 0; unsigned int index; int entries_free; int ret; - entries_free = fec_enet_get_free_txdesc_num(fep); + entries_free = fec_enet_get_free_txdesc_num(fep, txq); if (entries_free < MAX_SKB_FRAGS + 1) { dev_kfree_skb_any(skb); if (net_ratelimit()) @@ -481,7 +510,7 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) } /* Fill in a Tx ring entry */ - bdp = fep->cur_tx; + bdp = txq->cur_tx; status = bdp->cbd_sc; status &= ~BD_ENET_TX_STATS; @@ -489,11 +518,12 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) bufaddr = skb->data; buflen = skb_headlen(skb); - index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); - if (((unsigned long) bufaddr) & FEC_ALIGNMENT || + queue = skb_get_queue_mapping(skb); + index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); + if (((unsigned long) bufaddr) & fep->tx_align || id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { - memcpy(fep->tx_bounce[index], skb->data, buflen); - bufaddr = fep->tx_bounce[index]; + memcpy(txq->tx_bounce[index], skb->data, buflen); + bufaddr = txq->tx_bounce[index]; if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) swap_buffer(bufaddr, buflen); @@ -509,7 +539,7 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) } if (nr_frags) { - ret = fec_enet_txq_submit_frag_skb(skb, ndev); + ret = fec_enet_txq_submit_frag_skb(txq, skb, ndev); if (ret) return ret; } else { @@ -530,6 +560,9 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) fep->hwts_tx_en)) skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + if (id_entry->driver_data & FEC_QUIRK_HAS_AVB) + estatus |= FEC_TX_BD_FTYPE(queue); + if (skb->ip_summed == CHECKSUM_PARTIAL) estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; @@ -537,10 +570,10 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) ebdp->cbd_esc = estatus; } - last_bdp = fep->cur_tx; - index = fec_enet_get_bd_index(fep->tx_bd_base, last_bdp, fep); + last_bdp = txq->cur_tx; + index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep); /* Save skb pointer */ - fep->tx_skbuff[index] = skb; + txq->tx_skbuff[index] = skb; bdp->cbd_datlen = buflen; bdp->cbd_bufaddr = addr; @@ -552,27 +585,29 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) bdp->cbd_sc = status; /* If this was the last BD in the ring, start at the beginning again. */ - bdp = fec_enet_get_nextdesc(last_bdp, fep); + bdp = fec_enet_get_nextdesc(last_bdp, fep, queue); skb_tx_timestamp(skb); - fep->cur_tx = bdp; + txq->cur_tx = bdp; /* Trigger transmission start */ - writel(0, fep->hwp + FEC_X_DES_ACTIVE); + writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue)); return 0; } static int -fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev, - struct bufdesc *bdp, int index, char *data, - int size, bool last_tcp, bool is_last) +fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, + struct net_device *ndev, + struct bufdesc *bdp, int index, char *data, + int size, bool last_tcp, bool is_last) { struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = platform_get_device_id(fep->pdev); - struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; + struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); + unsigned short queue = skb_get_queue_mapping(skb); unsigned short status; unsigned int estatus = 0; dma_addr_t addr; @@ -582,10 +617,10 @@ fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev, status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); - if (((unsigned long) data) & FEC_ALIGNMENT || + if (((unsigned long) data) & fep->tx_align || id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { - memcpy(fep->tx_bounce[index], data, size); - data = fep->tx_bounce[index]; + memcpy(txq->tx_bounce[index], data, size); + data = txq->tx_bounce[index]; if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) swap_buffer(data, size); @@ -603,6 +638,8 @@ fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev, bdp->cbd_bufaddr = addr; if (fep->bufdesc_ex) { + if (id_entry->driver_data & FEC_QUIRK_HAS_AVB) + estatus |= FEC_TX_BD_FTYPE(queue); if (skb->ip_summed == CHECKSUM_PARTIAL) estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; ebdp->cbd_bdu = 0; @@ -624,14 +661,16 @@ fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev, } static int -fec_enet_txq_put_hdr_tso(struct sk_buff *skb, struct net_device *ndev, - struct bufdesc *bdp, int index) +fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, + struct sk_buff *skb, struct net_device *ndev, + struct bufdesc *bdp, int index) { struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = platform_get_device_id(fep->pdev); int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); - struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; + struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); + unsigned short queue = skb_get_queue_mapping(skb); void *bufaddr; unsigned long dmabuf; unsigned short status; @@ -641,12 +680,12 @@ fec_enet_txq_put_hdr_tso(struct sk_buff *skb, struct net_device *ndev, status &= ~BD_ENET_TX_STATS; status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); - bufaddr = fep->tso_hdrs + index * TSO_HEADER_SIZE; - dmabuf = fep->tso_hdrs_dma + index * TSO_HEADER_SIZE; - if (((unsigned long) bufaddr) & FEC_ALIGNMENT || + bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE; + dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE; + if (((unsigned long)bufaddr) & fep->tx_align || id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { - memcpy(fep->tx_bounce[index], skb->data, hdr_len); - bufaddr = fep->tx_bounce[index]; + memcpy(txq->tx_bounce[index], skb->data, hdr_len); + bufaddr = txq->tx_bounce[index]; if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) swap_buffer(bufaddr, hdr_len); @@ -665,6 +704,8 @@ fec_enet_txq_put_hdr_tso(struct sk_buff *skb, struct net_device *ndev, bdp->cbd_datlen = hdr_len; if (fep->bufdesc_ex) { + if (id_entry->driver_data & FEC_QUIRK_HAS_AVB) + estatus |= FEC_TX_BD_FTYPE(queue); if (skb->ip_summed == CHECKSUM_PARTIAL) estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; ebdp->cbd_bdu = 0; @@ -676,17 +717,22 @@ fec_enet_txq_put_hdr_tso(struct sk_buff *skb, struct net_device *ndev, return 0; } -static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev) +static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, + struct sk_buff *skb, + struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); int total_len, data_left; - struct bufdesc *bdp = fep->cur_tx; + struct bufdesc *bdp = txq->cur_tx; + unsigned short queue = skb_get_queue_mapping(skb); struct tso_t tso; unsigned int index = 0; int ret; + const struct platform_device_id *id_entry = + platform_get_device_id(fep->pdev); - if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep)) { + if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep, txq)) { dev_kfree_skb_any(skb); if (net_ratelimit()) netdev_err(ndev, "NOT enough BD for TSO!\n"); @@ -706,14 +752,14 @@ static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev) while (total_len > 0) { char *hdr; - index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); + index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); total_len -= data_left; /* prepare packet headers: MAC + IP + TCP */ - hdr = fep->tso_hdrs + index * TSO_HEADER_SIZE; + hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE; tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); - ret = fec_enet_txq_put_hdr_tso(skb, ndev, bdp, index); + ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index); if (ret) goto err_release; @@ -721,10 +767,13 @@ static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev) int size; size = min_t(int, tso.size, data_left); - bdp = fec_enet_get_nextdesc(bdp, fep); - index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); - ret = fec_enet_txq_put_data_tso(skb, ndev, bdp, index, tso.data, - size, size == data_left, + bdp = fec_enet_get_nextdesc(bdp, fep, queue); + index = fec_enet_get_bd_index(txq->tx_bd_base, + bdp, fep); + ret = fec_enet_txq_put_data_tso(txq, skb, ndev, + bdp, index, + tso.data, size, + size == data_left, total_len == 0); if (ret) goto err_release; @@ -733,17 +782,22 @@ static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev) tso_build_data(skb, &tso, size); } - bdp = fec_enet_get_nextdesc(bdp, fep); + bdp = fec_enet_get_nextdesc(bdp, fep, queue); } /* Save skb pointer */ - fep->tx_skbuff[index] = skb; + txq->tx_skbuff[index] = skb; skb_tx_timestamp(skb); - fep->cur_tx = bdp; + txq->cur_tx = bdp; /* Trigger transmission start */ - writel(0, fep->hwp + FEC_X_DES_ACTIVE); + if (!(id_entry->driver_data & FEC_QUIRK_ERR007885) || + !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || + !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || + !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || + !readl(fep->hwp + FEC_X_DES_ACTIVE(queue))) + writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue)); return 0; @@ -757,18 +811,25 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); int entries_free; + unsigned short queue; + struct fec_enet_priv_tx_q *txq; + struct netdev_queue *nq; int ret; + queue = skb_get_queue_mapping(skb); + txq = fep->tx_queue[queue]; + nq = netdev_get_tx_queue(ndev, queue); + if (skb_is_gso(skb)) - ret = fec_enet_txq_submit_tso(skb, ndev); + ret = fec_enet_txq_submit_tso(txq, skb, ndev); else - ret = fec_enet_txq_submit_skb(skb, ndev); + ret = fec_enet_txq_submit_skb(txq, skb, ndev); if (ret) return ret; - entries_free = fec_enet_get_free_txdesc_num(fep); - if (entries_free <= fep->tx_stop_threshold) - netif_stop_queue(ndev); + entries_free = fec_enet_get_free_txdesc_num(fep, txq); + if (entries_free <= txq->tx_stop_threshold) + netif_tx_stop_queue(nq); return NETDEV_TX_OK; } @@ -778,46 +839,111 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) static void fec_enet_bd_init(struct net_device *dev) { struct fec_enet_private *fep = netdev_priv(dev); + struct fec_enet_priv_tx_q *txq; + struct fec_enet_priv_rx_q *rxq; struct bufdesc *bdp; unsigned int i; + unsigned int q; - /* Initialize the receive buffer descriptors. */ - bdp = fep->rx_bd_base; - for (i = 0; i < fep->rx_ring_size; i++) { + for (q = 0; q < fep->num_rx_queues; q++) { + /* Initialize the receive buffer descriptors. */ + rxq = fep->rx_queue[q]; + bdp = rxq->rx_bd_base; - /* Initialize the BD for every fragment in the page. */ - if (bdp->cbd_bufaddr) - bdp->cbd_sc = BD_ENET_RX_EMPTY; - else + for (i = 0; i < rxq->rx_ring_size; i++) { + + /* Initialize the BD for every fragment in the page. */ + if (bdp->cbd_bufaddr) + bdp->cbd_sc = BD_ENET_RX_EMPTY; + else + bdp->cbd_sc = 0; + bdp = fec_enet_get_nextdesc(bdp, fep, q); + } + + /* Set the last buffer to wrap */ + bdp = fec_enet_get_prevdesc(bdp, fep, q); + bdp->cbd_sc |= BD_SC_WRAP; + + rxq->cur_rx = rxq->rx_bd_base; + } + + for (q = 0; q < fep->num_tx_queues; q++) { + /* ...and the same for transmit */ + txq = fep->tx_queue[q]; + bdp = txq->tx_bd_base; + txq->cur_tx = bdp; + + for (i = 0; i < txq->tx_ring_size; i++) { + /* Initialize the BD for every fragment in the page. */ bdp->cbd_sc = 0; - bdp = fec_enet_get_nextdesc(bdp, fep); + if (txq->tx_skbuff[i]) { + dev_kfree_skb_any(txq->tx_skbuff[i]); + txq->tx_skbuff[i] = NULL; + } + bdp->cbd_bufaddr = 0; + bdp = fec_enet_get_nextdesc(bdp, fep, q); + } + + /* Set the last buffer to wrap */ + bdp = fec_enet_get_prevdesc(bdp, fep, q); + bdp->cbd_sc |= BD_SC_WRAP; + txq->dirty_tx = bdp; } +} - /* Set the last buffer to wrap */ - bdp = fec_enet_get_prevdesc(bdp, fep); - bdp->cbd_sc |= BD_SC_WRAP; +static void fec_enet_active_rxring(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + int i; - fep->cur_rx = fep->rx_bd_base; + for (i = 0; i < fep->num_rx_queues; i++) + writel(0, fep->hwp + FEC_R_DES_ACTIVE(i)); +} - /* ...and the same for transmit */ - bdp = fep->tx_bd_base; - fep->cur_tx = bdp; - for (i = 0; i < fep->tx_ring_size; i++) { +static void fec_enet_enable_ring(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct fec_enet_priv_tx_q *txq; + struct fec_enet_priv_rx_q *rxq; + int i; - /* Initialize the BD for every fragment in the page. */ - bdp->cbd_sc = 0; - if (fep->tx_skbuff[i]) { - dev_kfree_skb_any(fep->tx_skbuff[i]); - fep->tx_skbuff[i] = NULL; - } - bdp->cbd_bufaddr = 0; - bdp = fec_enet_get_nextdesc(bdp, fep); + for (i = 0; i < fep->num_rx_queues; i++) { + rxq = fep->rx_queue[i]; + writel(rxq->bd_dma, fep->hwp + FEC_R_DES_START(i)); + + /* enable DMA1/2 */ + if (i) + writel(RCMR_MATCHEN | RCMR_CMP(i), + fep->hwp + FEC_RCMR(i)); } - /* Set the last buffer to wrap */ - bdp = fec_enet_get_prevdesc(bdp, fep); - bdp->cbd_sc |= BD_SC_WRAP; - fep->dirty_tx = bdp; + for (i = 0; i < fep->num_tx_queues; i++) { + txq = fep->tx_queue[i]; + writel(txq->bd_dma, fep->hwp + FEC_X_DES_START(i)); + + /* enable DMA1/2 */ + if (i) + writel(DMA_CLASS_EN | IDLE_SLOPE(i), + fep->hwp + FEC_DMA_CFG(i)); + } +} + +static void fec_enet_reset_skb(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct fec_enet_priv_tx_q *txq; + int i, j; + + for (i = 0; i < fep->num_tx_queues; i++) { + txq = fep->tx_queue[i]; + + for (j = 0; j < txq->tx_ring_size; j++) { + if (txq->tx_skbuff[j]) { + dev_kfree_skb_any(txq->tx_skbuff[j]); + txq->tx_skbuff[j] = NULL; + } + } + } } /* @@ -831,15 +957,21 @@ fec_restart(struct net_device *ndev) struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = platform_get_device_id(fep->pdev); - int i; u32 val; u32 temp_mac[2]; u32 rcntl = OPT_FRAME_SIZE | 0x04; u32 ecntl = 0x2; /* ETHEREN */ - /* Whack a reset. We should wait for this. */ - writel(1, fep->hwp + FEC_ECNTRL); - udelay(10); + /* Whack a reset. We should wait for this. + * For i.MX6SX SOC, enet use AXI bus, we use disable MAC + * instead of reset MAC itself. + */ + if (id_entry && id_entry->driver_data & FEC_QUIRK_HAS_AVB) { + writel(0, fep->hwp + FEC_ECNTRL); + } else { + writel(1, fep->hwp + FEC_ECNTRL); + udelay(10); + } /* * enet-mac reset will reset mac address registers too, @@ -859,22 +991,10 @@ fec_restart(struct net_device *ndev) fec_enet_bd_init(ndev); - /* Set receive and transmit descriptor base. */ - writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); - if (fep->bufdesc_ex) - writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex) - * fep->rx_ring_size, fep->hwp + FEC_X_DES_START); - else - writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) - * fep->rx_ring_size, fep->hwp + FEC_X_DES_START); - + fec_enet_enable_ring(ndev); - for (i = 0; i <= TX_RING_MOD_MASK; i++) { - if (fep->tx_skbuff[i]) { - dev_kfree_skb_any(fep->tx_skbuff[i]); - fep->tx_skbuff[i] = NULL; - } - } + /* Reset tx SKB buffers. */ + fec_enet_reset_skb(ndev); /* Enable MII mode */ if (fep->full_duplex == DUPLEX_FULL) { @@ -996,13 +1116,17 @@ fec_restart(struct net_device *ndev) /* And last, enable the transmit and receive processing */ writel(ecntl, fep->hwp + FEC_ECNTRL); - writel(0, fep->hwp + FEC_R_DES_ACTIVE); + fec_enet_active_rxring(ndev); if (fep->bufdesc_ex) fec_ptp_start_cyclecounter(ndev); /* Enable interrupts we wish to service */ writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); + + /* Init the interrupt coalescing */ + fec_enet_itr_coal_init(ndev); + } static void @@ -1021,9 +1145,16 @@ fec_stop(struct net_device *ndev) netdev_err(ndev, "Graceful transmit stop did not complete!\n"); } - /* Whack a reset. We should wait for this. */ - writel(1, fep->hwp + FEC_ECNTRL); - udelay(10); + /* Whack a reset. We should wait for this. + * For i.MX6SX SOC, enet use AXI bus, we use disable MAC + * instead of reset MAC itself. + */ + if (id_entry && id_entry->driver_data & FEC_QUIRK_HAS_AVB) { + writel(0, fep->hwp + FEC_ECNTRL); + } else { + writel(1, fep->hwp + FEC_ECNTRL); + udelay(10); + } writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); @@ -1081,37 +1212,45 @@ fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts, } static void -fec_enet_tx(struct net_device *ndev) +fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) { struct fec_enet_private *fep; struct bufdesc *bdp; unsigned short status; struct sk_buff *skb; + struct fec_enet_priv_tx_q *txq; + struct netdev_queue *nq; int index = 0; int entries_free; fep = netdev_priv(ndev); - bdp = fep->dirty_tx; + + queue_id = FEC_ENET_GET_QUQUE(queue_id); + + txq = fep->tx_queue[queue_id]; + /* get next bdp of dirty_tx */ + nq = netdev_get_tx_queue(ndev, queue_id); + bdp = txq->dirty_tx; /* get next bdp of dirty_tx */ - bdp = fec_enet_get_nextdesc(bdp, fep); + bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { /* current queue is empty */ - if (bdp == fep->cur_tx) + if (bdp == txq->cur_tx) break; - index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); + index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); - skb = fep->tx_skbuff[index]; - fep->tx_skbuff[index] = NULL; - if (!IS_TSO_HEADER(fep, bdp->cbd_bufaddr)) + skb = txq->tx_skbuff[index]; + txq->tx_skbuff[index] = NULL; + if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr)) dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, bdp->cbd_datlen, DMA_TO_DEVICE); bdp->cbd_bufaddr = 0; if (!skb) { - bdp = fec_enet_get_nextdesc(bdp, fep); + bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); continue; } @@ -1153,23 +1292,81 @@ fec_enet_tx(struct net_device *ndev) /* Free the sk buffer associated with this last transmit */ dev_kfree_skb_any(skb); - fep->dirty_tx = bdp; + txq->dirty_tx = bdp; /* Update pointer to next buffer descriptor to be transmitted */ - bdp = fec_enet_get_nextdesc(bdp, fep); + bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); /* Since we have freed up a buffer, the ring is no longer full */ if (netif_queue_stopped(ndev)) { - entries_free = fec_enet_get_free_txdesc_num(fep); - if (entries_free >= fep->tx_wake_threshold) - netif_wake_queue(ndev); + entries_free = fec_enet_get_free_txdesc_num(fep, txq); + if (entries_free >= txq->tx_wake_threshold) + netif_tx_wake_queue(nq); } } /* ERR006538: Keep the transmitter going */ - if (bdp != fep->cur_tx && readl(fep->hwp + FEC_X_DES_ACTIVE) == 0) - writel(0, fep->hwp + FEC_X_DES_ACTIVE); + if (bdp != txq->cur_tx && + readl(fep->hwp + FEC_X_DES_ACTIVE(queue_id)) == 0) + writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue_id)); +} + +static void +fec_enet_tx(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + u16 queue_id; + /* First process class A queue, then Class B and Best Effort queue */ + for_each_set_bit(queue_id, &fep->work_tx, FEC_ENET_MAX_TX_QS) { + clear_bit(queue_id, &fep->work_tx); + fec_enet_tx_queue(ndev, queue_id); + } + return; +} + +static int +fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff *skb) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + int off; + + off = ((unsigned long)skb->data) & fep->rx_align; + if (off) + skb_reserve(skb, fep->rx_align + 1 - off); + + bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data, + FEC_ENET_RX_FRSIZE - fep->rx_align, + DMA_FROM_DEVICE); + if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { + if (net_ratelimit()) + netdev_err(ndev, "Rx DMA memory map failed\n"); + return -ENOMEM; + } + + return 0; +} + +static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb, + struct bufdesc *bdp, u32 length) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct sk_buff *new_skb; + + if (length > fep->rx_copybreak) + return false; + + new_skb = netdev_alloc_skb(ndev, length); + if (!new_skb) + return false; + + dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, + FEC_ENET_RX_FRSIZE - fep->rx_align, + DMA_FROM_DEVICE); + memcpy(new_skb->data, (*skb)->data, length); + *skb = new_skb; + + return true; } /* During a receive, the cur_rx points to the current incoming buffer. @@ -1178,14 +1375,16 @@ fec_enet_tx(struct net_device *ndev) * effectively tossing the packet. */ static int -fec_enet_rx(struct net_device *ndev, int budget) +fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) { struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = platform_get_device_id(fep->pdev); + struct fec_enet_priv_rx_q *rxq; struct bufdesc *bdp; unsigned short status; - struct sk_buff *skb; + struct sk_buff *skb_new = NULL; + struct sk_buff *skb; ushort pkt_len; __u8 *data; int pkt_received = 0; @@ -1193,15 +1392,18 @@ fec_enet_rx(struct net_device *ndev, int budget) bool vlan_packet_rcvd = false; u16 vlan_tag; int index = 0; + bool is_copybreak; #ifdef CONFIG_M532x flush_cache_all(); #endif + queue_id = FEC_ENET_GET_QUQUE(queue_id); + rxq = fep->rx_queue[queue_id]; /* First, grab all of the stats for the incoming packet. * These get messed up if we get called due to a busy condition. */ - bdp = fep->cur_rx; + bdp = rxq->cur_rx; while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { @@ -1215,7 +1417,6 @@ fec_enet_rx(struct net_device *ndev, int budget) if ((status & BD_ENET_RX_LAST) == 0) netdev_err(ndev, "rcv is not +last\n"); - writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT); /* Check for errors. */ if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | @@ -1248,11 +1449,28 @@ fec_enet_rx(struct net_device *ndev, int budget) pkt_len = bdp->cbd_datlen; ndev->stats.rx_bytes += pkt_len; - index = fec_enet_get_bd_index(fep->rx_bd_base, bdp, fep); - data = fep->rx_skbuff[index]->data; - dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, - FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); + index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep); + skb = rxq->rx_skbuff[index]; + + /* The packet length includes FCS, but we don't want to + * include that when passing upstream as it messes up + * bridging applications. + */ + is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4); + if (!is_copybreak) { + skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); + if (unlikely(!skb_new)) { + ndev->stats.rx_dropped++; + goto rx_processing_done; + } + dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, + FEC_ENET_RX_FRSIZE - fep->rx_align, + DMA_FROM_DEVICE); + } + prefetch(skb->data - NET_IP_ALIGN); + skb_put(skb, pkt_len - 4); + data = skb->data; if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) swap_buffer(data, pkt_len); @@ -1264,66 +1482,53 @@ fec_enet_rx(struct net_device *ndev, int budget) /* If this is a VLAN packet remove the VLAN Tag */ vlan_packet_rcvd = false; if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) && - fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) { + fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) { /* Push and remove the vlan tag */ struct vlan_hdr *vlan_header = (struct vlan_hdr *) (data + ETH_HLEN); vlan_tag = ntohs(vlan_header->h_vlan_TCI); - pkt_len -= VLAN_HLEN; vlan_packet_rcvd = true; + + skb_copy_to_linear_data_offset(skb, VLAN_HLEN, + data, (2 * ETH_ALEN)); + skb_pull(skb, VLAN_HLEN); } - /* This does 16 byte alignment, exactly what we need. - * The packet length includes FCS, but we don't want to - * include that when passing upstream as it messes up - * bridging applications. - */ - skb = netdev_alloc_skb(ndev, pkt_len - 4 + NET_IP_ALIGN); + skb->protocol = eth_type_trans(skb, ndev); - if (unlikely(!skb)) { - ndev->stats.rx_dropped++; - } else { - int payload_offset = (2 * ETH_ALEN); - skb_reserve(skb, NET_IP_ALIGN); - skb_put(skb, pkt_len - 4); /* Make room */ - - /* Extract the frame data without the VLAN header. */ - skb_copy_to_linear_data(skb, data, (2 * ETH_ALEN)); - if (vlan_packet_rcvd) - payload_offset = (2 * ETH_ALEN) + VLAN_HLEN; - skb_copy_to_linear_data_offset(skb, (2 * ETH_ALEN), - data + payload_offset, - pkt_len - 4 - (2 * ETH_ALEN)); - - skb->protocol = eth_type_trans(skb, ndev); - - /* Get receive timestamp from the skb */ - if (fep->hwts_rx_en && fep->bufdesc_ex) - fec_enet_hwtstamp(fep, ebdp->ts, - skb_hwtstamps(skb)); - - if (fep->bufdesc_ex && - (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) { - if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) { - /* don't check it */ - skb->ip_summed = CHECKSUM_UNNECESSARY; - } else { - skb_checksum_none_assert(skb); - } + /* Get receive timestamp from the skb */ + if (fep->hwts_rx_en && fep->bufdesc_ex) + fec_enet_hwtstamp(fep, ebdp->ts, + skb_hwtstamps(skb)); + + if (fep->bufdesc_ex && + (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) { + if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) { + /* don't check it */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else { + skb_checksum_none_assert(skb); } + } + + /* Handle received VLAN packets */ + if (vlan_packet_rcvd) + __vlan_hwaccel_put_tag(skb, + htons(ETH_P_8021Q), + vlan_tag); - /* Handle received VLAN packets */ - if (vlan_packet_rcvd) - __vlan_hwaccel_put_tag(skb, - htons(ETH_P_8021Q), - vlan_tag); + napi_gro_receive(&fep->napi, skb); - napi_gro_receive(&fep->napi, skb); + if (is_copybreak) { + dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr, + FEC_ENET_RX_FRSIZE - fep->rx_align, + DMA_FROM_DEVICE); + } else { + rxq->rx_skbuff[index] = skb_new; + fec_enet_new_rxbdp(ndev, bdp, skb_new); } - dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr, - FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); rx_processing_done: /* Clear the status flags for this buffer */ status &= ~BD_ENET_RX_STATS; @@ -1341,19 +1546,56 @@ fec_enet_rx(struct net_device *ndev, int budget) } /* Update BD pointer to next entry */ - bdp = fec_enet_get_nextdesc(bdp, fep); + bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); /* Doing this here will keep the FEC running while we process * incoming frames. On a heavily loaded network, we should be * able to keep up at the expense of system resources. */ - writel(0, fep->hwp + FEC_R_DES_ACTIVE); + writel(0, fep->hwp + FEC_R_DES_ACTIVE(queue_id)); } - fep->cur_rx = bdp; + rxq->cur_rx = bdp; + return pkt_received; +} +static int +fec_enet_rx(struct net_device *ndev, int budget) +{ + int pkt_received = 0; + u16 queue_id; + struct fec_enet_private *fep = netdev_priv(ndev); + + for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) { + clear_bit(queue_id, &fep->work_rx); + pkt_received += fec_enet_rx_queue(ndev, + budget - pkt_received, queue_id); + } return pkt_received; } +static bool +fec_enet_collect_events(struct fec_enet_private *fep, uint int_events) +{ + if (int_events == 0) + return false; + + if (int_events & FEC_ENET_RXF) + fep->work_rx |= (1 << 2); + if (int_events & FEC_ENET_RXF_1) + fep->work_rx |= (1 << 0); + if (int_events & FEC_ENET_RXF_2) + fep->work_rx |= (1 << 1); + + if (int_events & FEC_ENET_TXF) + fep->work_tx |= (1 << 2); + if (int_events & FEC_ENET_TXF_1) + fep->work_tx |= (1 << 0); + if (int_events & FEC_ENET_TXF_2) + fep->work_tx |= (1 << 1); + + return true; +} + static irqreturn_t fec_enet_interrupt(int irq, void *dev_id) { @@ -1365,6 +1607,7 @@ fec_enet_interrupt(int irq, void *dev_id) int_events = readl(fep->hwp + FEC_IEVENT); writel(int_events & ~napi_mask, fep->hwp + FEC_IEVENT); + fec_enet_collect_events(fep, int_events); if (int_events & napi_mask) { ret = IRQ_HANDLED; @@ -1621,6 +1864,11 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) } mutex_unlock(&fep->ptp_clk_mutex); } + if (fep->clk_ref) { + ret = clk_prepare_enable(fep->clk_ref); + if (ret) + goto failed_clk_ref; + } } else { clk_disable_unprepare(fep->clk_ahb); clk_disable_unprepare(fep->clk_ipg); @@ -1632,9 +1880,15 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) fep->ptp_clk_on = false; mutex_unlock(&fep->ptp_clk_mutex); } + if (fep->clk_ref) + clk_disable_unprepare(fep->clk_ref); } return 0; + +failed_clk_ref: + if (fep->clk_ref) + clk_disable_unprepare(fep->clk_ref); failed_clk_ptp: if (fep->clk_enet_out) clk_disable_unprepare(fep->clk_enet_out); @@ -1674,13 +1928,13 @@ static int fec_enet_mii_probe(struct net_device *ndev) continue; if (dev_id--) continue; - strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); + strlcpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); break; } if (phy_id >= PHY_MAX_ADDR) { netdev_info(ndev, "no PHY, assuming direct connection to switch\n"); - strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); + strlcpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); phy_id = 0; } @@ -2062,12 +2316,179 @@ static int fec_enet_nway_reset(struct net_device *dev) return genphy_restart_aneg(phydev); } +/* ITR clock source is enet system clock (clk_ahb). + * TCTT unit is cycle_ns * 64 cycle + * So, the ICTT value = X us / (cycle_ns * 64) + */ +static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + return us * (fep->itr_clk_rate / 64000) / 1000; +} + +/* Set threshold for interrupt coalescing */ +static void fec_enet_itr_coal_set(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + const struct platform_device_id *id_entry = + platform_get_device_id(fep->pdev); + int rx_itr, tx_itr; + + if (!(id_entry->driver_data & FEC_QUIRK_HAS_AVB)) + return; + + /* Must be greater than zero to avoid unpredictable behavior */ + if (!fep->rx_time_itr || !fep->rx_pkts_itr || + !fep->tx_time_itr || !fep->tx_pkts_itr) + return; + + /* Select enet system clock as Interrupt Coalescing + * timer Clock Source + */ + rx_itr = FEC_ITR_CLK_SEL; + tx_itr = FEC_ITR_CLK_SEL; + + /* set ICFT and ICTT */ + rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr); + rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr)); + tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr); + tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr)); + + rx_itr |= FEC_ITR_EN; + tx_itr |= FEC_ITR_EN; + + writel(tx_itr, fep->hwp + FEC_TXIC0); + writel(rx_itr, fep->hwp + FEC_RXIC0); + writel(tx_itr, fep->hwp + FEC_TXIC1); + writel(rx_itr, fep->hwp + FEC_RXIC1); + writel(tx_itr, fep->hwp + FEC_TXIC2); + writel(rx_itr, fep->hwp + FEC_RXIC2); +} + +static int +fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + const struct platform_device_id *id_entry = + platform_get_device_id(fep->pdev); + + if (!(id_entry->driver_data & FEC_QUIRK_HAS_AVB)) + return -EOPNOTSUPP; + + ec->rx_coalesce_usecs = fep->rx_time_itr; + ec->rx_max_coalesced_frames = fep->rx_pkts_itr; + + ec->tx_coalesce_usecs = fep->tx_time_itr; + ec->tx_max_coalesced_frames = fep->tx_pkts_itr; + + return 0; +} + +static int +fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + const struct platform_device_id *id_entry = + platform_get_device_id(fep->pdev); + + unsigned int cycle; + + if (!(id_entry->driver_data & FEC_QUIRK_HAS_AVB)) + return -EOPNOTSUPP; + + if (ec->rx_max_coalesced_frames > 255) { + pr_err("Rx coalesced frames exceed hardware limiation"); + return -EINVAL; + } + + if (ec->tx_max_coalesced_frames > 255) { + pr_err("Tx coalesced frame exceed hardware limiation"); + return -EINVAL; + } + + cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr); + if (cycle > 0xFFFF) { + pr_err("Rx coalesed usec exceeed hardware limiation"); + return -EINVAL; + } + + cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr); + if (cycle > 0xFFFF) { + pr_err("Rx coalesed usec exceeed hardware limiation"); + return -EINVAL; + } + + fep->rx_time_itr = ec->rx_coalesce_usecs; + fep->rx_pkts_itr = ec->rx_max_coalesced_frames; + + fep->tx_time_itr = ec->tx_coalesce_usecs; + fep->tx_pkts_itr = ec->tx_max_coalesced_frames; + + fec_enet_itr_coal_set(ndev); + + return 0; +} + +static void fec_enet_itr_coal_init(struct net_device *ndev) +{ + struct ethtool_coalesce ec; + + ec.rx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT; + ec.rx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT; + + ec.tx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT; + ec.tx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT; + + fec_enet_set_coalesce(ndev, &ec); +} + +static int fec_enet_get_tunable(struct net_device *netdev, + const struct ethtool_tunable *tuna, + void *data) +{ + struct fec_enet_private *fep = netdev_priv(netdev); + int ret = 0; + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + *(u32 *)data = fep->rx_copybreak; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int fec_enet_set_tunable(struct net_device *netdev, + const struct ethtool_tunable *tuna, + const void *data) +{ + struct fec_enet_private *fep = netdev_priv(netdev); + int ret = 0; + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + fep->rx_copybreak = *(u32 *)data; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + static const struct ethtool_ops fec_enet_ethtool_ops = { .get_settings = fec_enet_get_settings, .set_settings = fec_enet_set_settings, .get_drvinfo = fec_enet_get_drvinfo, .nway_reset = fec_enet_nway_reset, .get_link = ethtool_op_get_link, + .get_coalesce = fec_enet_get_coalesce, + .set_coalesce = fec_enet_set_coalesce, #ifndef CONFIG_M5272 .get_pauseparam = fec_enet_get_pauseparam, .set_pauseparam = fec_enet_set_pauseparam, @@ -2076,6 +2497,8 @@ static const struct ethtool_ops fec_enet_ethtool_ops = { .get_sset_count = fec_enet_get_sset_count, #endif .get_ts_info = fec_enet_get_ts_info, + .get_tunable = fec_enet_get_tunable, + .set_tunable = fec_enet_set_tunable, }; static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) @@ -2105,55 +2528,136 @@ static void fec_enet_free_buffers(struct net_device *ndev) unsigned int i; struct sk_buff *skb; struct bufdesc *bdp; + struct fec_enet_priv_tx_q *txq; + struct fec_enet_priv_rx_q *rxq; + unsigned int q; + + for (q = 0; q < fep->num_rx_queues; q++) { + rxq = fep->rx_queue[q]; + bdp = rxq->rx_bd_base; + for (i = 0; i < rxq->rx_ring_size; i++) { + skb = rxq->rx_skbuff[i]; + rxq->rx_skbuff[i] = NULL; + if (skb) { + dma_unmap_single(&fep->pdev->dev, + bdp->cbd_bufaddr, + FEC_ENET_RX_FRSIZE - fep->rx_align, + DMA_FROM_DEVICE); + dev_kfree_skb(skb); + } + bdp = fec_enet_get_nextdesc(bdp, fep, q); + } + } - bdp = fep->rx_bd_base; - for (i = 0; i < fep->rx_ring_size; i++) { - skb = fep->rx_skbuff[i]; - fep->rx_skbuff[i] = NULL; - if (skb) { - dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, - FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); + for (q = 0; q < fep->num_tx_queues; q++) { + txq = fep->tx_queue[q]; + bdp = txq->tx_bd_base; + for (i = 0; i < txq->tx_ring_size; i++) { + kfree(txq->tx_bounce[i]); + txq->tx_bounce[i] = NULL; + skb = txq->tx_skbuff[i]; + txq->tx_skbuff[i] = NULL; dev_kfree_skb(skb); } - bdp = fec_enet_get_nextdesc(bdp, fep); } +} - bdp = fep->tx_bd_base; - for (i = 0; i < fep->tx_ring_size; i++) { - kfree(fep->tx_bounce[i]); - fep->tx_bounce[i] = NULL; - skb = fep->tx_skbuff[i]; - fep->tx_skbuff[i] = NULL; - dev_kfree_skb(skb); +static void fec_enet_free_queue(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + int i; + struct fec_enet_priv_tx_q *txq; + + for (i = 0; i < fep->num_tx_queues; i++) + if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) { + txq = fep->tx_queue[i]; + dma_free_coherent(NULL, + txq->tx_ring_size * TSO_HEADER_SIZE, + txq->tso_hdrs, + txq->tso_hdrs_dma); + } + + for (i = 0; i < fep->num_rx_queues; i++) + if (fep->rx_queue[i]) + kfree(fep->rx_queue[i]); + + for (i = 0; i < fep->num_tx_queues; i++) + if (fep->tx_queue[i]) + kfree(fep->tx_queue[i]); +} + +static int fec_enet_alloc_queue(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + int i; + int ret = 0; + struct fec_enet_priv_tx_q *txq; + + for (i = 0; i < fep->num_tx_queues; i++) { + txq = kzalloc(sizeof(*txq), GFP_KERNEL); + if (!txq) { + ret = -ENOMEM; + goto alloc_failed; + } + + fep->tx_queue[i] = txq; + txq->tx_ring_size = TX_RING_SIZE; + fep->total_tx_ring_size += fep->tx_queue[i]->tx_ring_size; + + txq->tx_stop_threshold = FEC_MAX_SKB_DESCS; + txq->tx_wake_threshold = + (txq->tx_ring_size - txq->tx_stop_threshold) / 2; + + txq->tso_hdrs = dma_alloc_coherent(NULL, + txq->tx_ring_size * TSO_HEADER_SIZE, + &txq->tso_hdrs_dma, + GFP_KERNEL); + if (!txq->tso_hdrs) { + ret = -ENOMEM; + goto alloc_failed; + } + } + + for (i = 0; i < fep->num_rx_queues; i++) { + fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]), + GFP_KERNEL); + if (!fep->rx_queue[i]) { + ret = -ENOMEM; + goto alloc_failed; + } + + fep->rx_queue[i]->rx_ring_size = RX_RING_SIZE; + fep->total_rx_ring_size += fep->rx_queue[i]->rx_ring_size; } + return ret; + +alloc_failed: + fec_enet_free_queue(ndev); + return ret; } -static int fec_enet_alloc_buffers(struct net_device *ndev) +static int +fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) { struct fec_enet_private *fep = netdev_priv(ndev); unsigned int i; struct sk_buff *skb; struct bufdesc *bdp; + struct fec_enet_priv_rx_q *rxq; - bdp = fep->rx_bd_base; - for (i = 0; i < fep->rx_ring_size; i++) { - dma_addr_t addr; - + rxq = fep->rx_queue[queue]; + bdp = rxq->rx_bd_base; + for (i = 0; i < rxq->rx_ring_size; i++) { skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); if (!skb) goto err_alloc; - addr = dma_map_single(&fep->pdev->dev, skb->data, - FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); - if (dma_mapping_error(&fep->pdev->dev, addr)) { + if (fec_enet_new_rxbdp(ndev, bdp, skb)) { dev_kfree_skb(skb); - if (net_ratelimit()) - netdev_err(ndev, "Rx DMA memory map failed\n"); goto err_alloc; } - fep->rx_skbuff[i] = skb; - bdp->cbd_bufaddr = addr; + rxq->rx_skbuff[i] = skb; bdp->cbd_sc = BD_ENET_RX_EMPTY; if (fep->bufdesc_ex) { @@ -2161,17 +2665,32 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) ebdp->cbd_esc = BD_ENET_RX_INT; } - bdp = fec_enet_get_nextdesc(bdp, fep); + bdp = fec_enet_get_nextdesc(bdp, fep, queue); } /* Set the last buffer to wrap. */ - bdp = fec_enet_get_prevdesc(bdp, fep); + bdp = fec_enet_get_prevdesc(bdp, fep, queue); bdp->cbd_sc |= BD_SC_WRAP; + return 0; - bdp = fep->tx_bd_base; - for (i = 0; i < fep->tx_ring_size; i++) { - fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); - if (!fep->tx_bounce[i]) + err_alloc: + fec_enet_free_buffers(ndev); + return -ENOMEM; +} + +static int +fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + unsigned int i; + struct bufdesc *bdp; + struct fec_enet_priv_tx_q *txq; + + txq = fep->tx_queue[queue]; + bdp = txq->tx_bd_base; + for (i = 0; i < txq->tx_ring_size; i++) { + txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); + if (!txq->tx_bounce[i]) goto err_alloc; bdp->cbd_sc = 0; @@ -2182,11 +2701,11 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) ebdp->cbd_esc = BD_ENET_TX_INT; } - bdp = fec_enet_get_nextdesc(bdp, fep); + bdp = fec_enet_get_nextdesc(bdp, fep, queue); } /* Set the last buffer to wrap. */ - bdp = fec_enet_get_prevdesc(bdp, fep); + bdp = fec_enet_get_prevdesc(bdp, fep, queue); bdp->cbd_sc |= BD_SC_WRAP; return 0; @@ -2196,6 +2715,21 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) return -ENOMEM; } +static int fec_enet_alloc_buffers(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + unsigned int i; + + for (i = 0; i < fep->num_rx_queues; i++) + if (fec_enet_alloc_rxq_buffers(ndev, i)) + return -ENOMEM; + + for (i = 0; i < fep->num_tx_queues; i++) + if (fec_enet_alloc_txq_buffers(ndev, i)) + return -ENOMEM; + return 0; +} + static int fec_enet_open(struct net_device *ndev) { @@ -2213,20 +2747,26 @@ fec_enet_open(struct net_device *ndev) ret = fec_enet_alloc_buffers(ndev); if (ret) - return ret; + goto err_enet_alloc; /* Probe and connect to PHY when open the interface */ ret = fec_enet_mii_probe(ndev); - if (ret) { - fec_enet_free_buffers(ndev); - return ret; - } + if (ret) + goto err_enet_mii_probe; fec_restart(ndev); napi_enable(&fep->napi); phy_start(fep->phy_dev); - netif_start_queue(ndev); + netif_tx_start_all_queues(ndev); + return 0; + +err_enet_mii_probe: + fec_enet_free_buffers(ndev); +err_enet_alloc: + fec_enet_clk_enable(ndev, false); + pinctrl_pm_select_sleep_state(&fep->pdev->dev); + return ret; } static int @@ -2399,7 +2939,7 @@ static int fec_set_features(struct net_device *netdev, /* Resume the device after updates */ if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) { fec_restart(netdev); - netif_wake_queue(netdev); + netif_tx_wake_all_queues(netdev); netif_tx_unlock_bh(netdev); napi_enable(&fep->napi); } @@ -2432,39 +2972,38 @@ static int fec_enet_init(struct net_device *ndev) struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = platform_get_device_id(fep->pdev); + struct fec_enet_priv_tx_q *txq; + struct fec_enet_priv_rx_q *rxq; struct bufdesc *cbd_base; + dma_addr_t bd_dma; int bd_size; + unsigned int i; - /* init the tx & rx ring size */ - fep->tx_ring_size = TX_RING_SIZE; - fep->rx_ring_size = RX_RING_SIZE; +#if defined(CONFIG_ARM) + fep->rx_align = 0xf; + fep->tx_align = 0xf; +#else + fep->rx_align = 0x3; + fep->tx_align = 0x3; +#endif - fep->tx_stop_threshold = FEC_MAX_SKB_DESCS; - fep->tx_wake_threshold = (fep->tx_ring_size - fep->tx_stop_threshold) / 2; + fec_enet_alloc_queue(ndev); if (fep->bufdesc_ex) fep->bufdesc_size = sizeof(struct bufdesc_ex); else fep->bufdesc_size = sizeof(struct bufdesc); - bd_size = (fep->tx_ring_size + fep->rx_ring_size) * + bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * fep->bufdesc_size; /* Allocate memory for buffer descriptors. */ - cbd_base = dma_alloc_coherent(NULL, bd_size, &fep->bd_dma, + cbd_base = dma_alloc_coherent(NULL, bd_size, &bd_dma, GFP_KERNEL); - if (!cbd_base) - return -ENOMEM; - - fep->tso_hdrs = dma_alloc_coherent(NULL, fep->tx_ring_size * TSO_HEADER_SIZE, - &fep->tso_hdrs_dma, GFP_KERNEL); - if (!fep->tso_hdrs) { - dma_free_coherent(NULL, bd_size, cbd_base, fep->bd_dma); + if (!cbd_base) { return -ENOMEM; } - memset(cbd_base, 0, PAGE_SIZE); - - fep->netdev = ndev; + memset(cbd_base, 0, bd_size); /* Get the Ethernet address */ fec_get_mac(ndev); @@ -2472,12 +3011,36 @@ static int fec_enet_init(struct net_device *ndev) fec_set_mac_address(ndev, NULL); /* Set receive and transmit descriptor base. */ - fep->rx_bd_base = cbd_base; - if (fep->bufdesc_ex) - fep->tx_bd_base = (struct bufdesc *) - (((struct bufdesc_ex *)cbd_base) + fep->rx_ring_size); - else - fep->tx_bd_base = cbd_base + fep->rx_ring_size; + for (i = 0; i < fep->num_rx_queues; i++) { + rxq = fep->rx_queue[i]; + rxq->index = i; + rxq->rx_bd_base = (struct bufdesc *)cbd_base; + rxq->bd_dma = bd_dma; + if (fep->bufdesc_ex) { + bd_dma += sizeof(struct bufdesc_ex) * rxq->rx_ring_size; + cbd_base = (struct bufdesc *) + (((struct bufdesc_ex *)cbd_base) + rxq->rx_ring_size); + } else { + bd_dma += sizeof(struct bufdesc) * rxq->rx_ring_size; + cbd_base += rxq->rx_ring_size; + } + } + + for (i = 0; i < fep->num_tx_queues; i++) { + txq = fep->tx_queue[i]; + txq->index = i; + txq->tx_bd_base = (struct bufdesc *)cbd_base; + txq->bd_dma = bd_dma; + if (fep->bufdesc_ex) { + bd_dma += sizeof(struct bufdesc_ex) * txq->tx_ring_size; + cbd_base = (struct bufdesc *) + (((struct bufdesc_ex *)cbd_base) + txq->tx_ring_size); + } else { + bd_dma += sizeof(struct bufdesc) * txq->tx_ring_size; + cbd_base += txq->tx_ring_size; + } + } + /* The FEC Ethernet specific entries in the device structure */ ndev->watchdog_timeo = TX_TIMEOUT; @@ -2500,6 +3063,11 @@ static int fec_enet_init(struct net_device *ndev) fep->csum_flags |= FLAG_RX_CSUM_ENABLED; } + if (id_entry->driver_data & FEC_QUIRK_HAS_AVB) { + fep->tx_align = 0; + fep->rx_align = 0x3f; + } + ndev->hw_features = ndev->features; fec_restart(ndev); @@ -2545,6 +3113,42 @@ static void fec_reset_phy(struct platform_device *pdev) } #endif /* CONFIG_OF */ +static void +fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx) +{ + struct device_node *np = pdev->dev.of_node; + int err; + + *num_tx = *num_rx = 1; + + if (!np || !of_device_is_available(np)) + return; + + /* parse the num of tx and rx queues */ + err = of_property_read_u32(np, "fsl,num-tx-queues", num_tx); + if (err) + *num_tx = 1; + + err = of_property_read_u32(np, "fsl,num-rx-queues", num_rx); + if (err) + *num_rx = 1; + + if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) { + dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n", + *num_tx); + *num_tx = 1; + return; + } + + if (*num_rx < 1 || *num_rx > FEC_ENET_MAX_RX_QS) { + dev_warn(&pdev->dev, "Invalid num_rx(=%d), fall back to 1\n", + *num_rx); + *num_rx = 1; + return; + } + +} + static int fec_probe(struct platform_device *pdev) { @@ -2556,13 +3160,18 @@ fec_probe(struct platform_device *pdev) const struct of_device_id *of_id; static int dev_id; struct device_node *np = pdev->dev.of_node, *phy_node; + int num_tx_qs; + int num_rx_qs; of_id = of_match_device(fec_dt_ids, &pdev->dev); if (of_id) pdev->id_entry = of_id->data; + fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs); + /* Init network device */ - ndev = alloc_etherdev(sizeof(struct fec_enet_private)); + ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private), + num_tx_qs, num_rx_qs); if (!ndev) return -ENOMEM; @@ -2571,6 +3180,9 @@ fec_probe(struct platform_device *pdev) /* setup board info structure */ fep = netdev_priv(ndev); + fep->num_rx_queues = num_rx_qs; + fep->num_tx_queues = num_tx_qs; + #if !defined(CONFIG_M5272) /* default enable pause frame auto negotiation */ if (pdev->id_entry && @@ -2630,6 +3242,8 @@ fec_probe(struct platform_device *pdev) goto failed_clk; } + fep->itr_clk_rate = clk_get_rate(fep->clk_ahb); + /* enet_out is optional, depends on board */ fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out"); if (IS_ERR(fep->clk_enet_out)) @@ -2637,6 +3251,12 @@ fec_probe(struct platform_device *pdev) fep->ptp_clk_on = false; mutex_init(&fep->ptp_clk_mutex); + + /* clk_ref is optional, depends on board */ + fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref"); + if (IS_ERR(fep->clk_ref)) + fep->clk_ref = NULL; + fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); fep->bufdesc_ex = pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX; @@ -2684,6 +3304,7 @@ fec_probe(struct platform_device *pdev) goto failed_irq; } + init_completion(&fep->mdio_done); ret = fec_enet_mii_init(pdev); if (ret) goto failed_mii_init; @@ -2700,6 +3321,7 @@ fec_probe(struct platform_device *pdev) if (fep->bufdesc_ex && fep->ptp_clock) netdev_info(ndev, "registered PHC device %d\n", fep->dev_id); + fep->rx_copybreak = COPYBREAK_DEFAULT; INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); return 0; diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index 748fd24d3d9ec3fa26ff80b528c9f9ff19fafbb4..c92c3b7876ca9c3b238250965e7982414c00e502 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -215,139 +215,23 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget) return received; } -/* non NAPI receive function */ -static int fs_enet_rx_non_napi(struct net_device *dev) +static int fs_enet_tx_napi(struct napi_struct *napi, int budget) { - struct fs_enet_private *fep = netdev_priv(dev); - const struct fs_platform_info *fpi = fep->fpi; - cbd_t __iomem *bdp; - struct sk_buff *skb, *skbn, *skbt; - int received = 0; - u16 pkt_len, sc; - int curidx; - /* - * First, grab all of the stats for the incoming packet. - * These get messed up if we get called due to a busy condition. - */ - bdp = fep->cur_rx; - - while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) { - - curidx = bdp - fep->rx_bd_base; - - /* - * Since we have allocated space to hold a complete frame, - * the last indicator should be set. - */ - if ((sc & BD_ENET_RX_LAST) == 0) - dev_warn(fep->dev, "rcv is not +last\n"); - - /* - * Check for errors. - */ - if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL | - BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) { - fep->stats.rx_errors++; - /* Frame too long or too short. */ - if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) - fep->stats.rx_length_errors++; - /* Frame alignment */ - if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL)) - fep->stats.rx_frame_errors++; - /* CRC Error */ - if (sc & BD_ENET_RX_CR) - fep->stats.rx_crc_errors++; - /* FIFO overrun */ - if (sc & BD_ENET_RX_OV) - fep->stats.rx_crc_errors++; - - skb = fep->rx_skbuff[curidx]; - - dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), - L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), - DMA_FROM_DEVICE); - - skbn = skb; - - } else { - - skb = fep->rx_skbuff[curidx]; - - dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), - L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), - DMA_FROM_DEVICE); - - /* - * Process the incoming frame. - */ - fep->stats.rx_packets++; - pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */ - fep->stats.rx_bytes += pkt_len + 4; - - if (pkt_len <= fpi->rx_copybreak) { - /* +2 to make IP header L1 cache aligned */ - skbn = netdev_alloc_skb(dev, pkt_len + 2); - if (skbn != NULL) { - skb_reserve(skbn, 2); /* align IP header */ - skb_copy_from_linear_data(skb, - skbn->data, pkt_len); - /* swap */ - skbt = skb; - skb = skbn; - skbn = skbt; - } - } else { - skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE); - - if (skbn) - skb_align(skbn, ENET_RX_ALIGN); - } - - if (skbn != NULL) { - skb_put(skb, pkt_len); /* Make room */ - skb->protocol = eth_type_trans(skb, dev); - received++; - netif_rx(skb); - } else { - fep->stats.rx_dropped++; - skbn = skb; - } - } - - fep->rx_skbuff[curidx] = skbn; - CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data, - L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), - DMA_FROM_DEVICE)); - CBDW_DATLEN(bdp, 0); - CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY); - - /* - * Update BD pointer to next entry. - */ - if ((sc & BD_ENET_RX_WRAP) == 0) - bdp++; - else - bdp = fep->rx_bd_base; - - (*fep->ops->rx_bd_done)(dev); - } - - fep->cur_rx = bdp; - - return 0; -} - -static void fs_enet_tx(struct net_device *dev) -{ - struct fs_enet_private *fep = netdev_priv(dev); + struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, + napi_tx); + struct net_device *dev = fep->ndev; cbd_t __iomem *bdp; struct sk_buff *skb; int dirtyidx, do_wake, do_restart; u16 sc; + int has_tx_work = 0; spin_lock(&fep->tx_lock); bdp = fep->dirty_tx; + /* clear TX status bits for napi*/ + (*fep->ops->napi_clear_tx_event)(dev); + do_wake = do_restart = 0; while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) { dirtyidx = bdp - fep->tx_bd_base; @@ -400,7 +284,7 @@ static void fs_enet_tx(struct net_device *dev) /* * Free the sk buffer associated with this last transmit. */ - dev_kfree_skb_irq(skb); + dev_kfree_skb(skb); fep->tx_skbuff[dirtyidx] = NULL; /* @@ -417,6 +301,7 @@ static void fs_enet_tx(struct net_device *dev) */ if (!fep->tx_free++) do_wake = 1; + has_tx_work = 1; } fep->dirty_tx = bdp; @@ -424,10 +309,19 @@ static void fs_enet_tx(struct net_device *dev) if (do_restart) (*fep->ops->tx_restart)(dev); + if (!has_tx_work) { + napi_complete(napi); + (*fep->ops->napi_enable_tx)(dev); + } + spin_unlock(&fep->tx_lock); if (do_wake) netif_wake_queue(dev); + + if (has_tx_work) + return budget; + return 0; } /* @@ -453,8 +347,7 @@ fs_enet_interrupt(int irq, void *dev_id) nr++; int_clr_events = int_events; - if (fpi->use_napi) - int_clr_events &= ~fep->ev_napi_rx; + int_clr_events &= ~fep->ev_napi_rx; (*fep->ops->clear_int_events)(dev, int_clr_events); @@ -462,23 +355,28 @@ fs_enet_interrupt(int irq, void *dev_id) (*fep->ops->ev_error)(dev, int_events); if (int_events & fep->ev_rx) { - if (!fpi->use_napi) - fs_enet_rx_non_napi(dev); - else { - napi_ok = napi_schedule_prep(&fep->napi); - - (*fep->ops->napi_disable_rx)(dev); - (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx); - - /* NOTE: it is possible for FCCs in NAPI mode */ - /* to submit a spurious interrupt while in poll */ - if (napi_ok) - __napi_schedule(&fep->napi); - } + napi_ok = napi_schedule_prep(&fep->napi); + + (*fep->ops->napi_disable_rx)(dev); + (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx); + + /* NOTE: it is possible for FCCs in NAPI mode */ + /* to submit a spurious interrupt while in poll */ + if (napi_ok) + __napi_schedule(&fep->napi); } - if (int_events & fep->ev_tx) - fs_enet_tx(dev); + if (int_events & fep->ev_tx) { + napi_ok = napi_schedule_prep(&fep->napi_tx); + + (*fep->ops->napi_disable_tx)(dev); + (*fep->ops->clear_int_events)(dev, fep->ev_napi_tx); + + /* NOTE: it is possible for FCCs in NAPI mode */ + /* to submit a spurious interrupt while in poll */ + if (napi_ok) + __napi_schedule(&fep->napi_tx); + } } handled = nr > 0; @@ -611,7 +509,6 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) cbd_t __iomem *bdp; int curidx; u16 sc; - unsigned long flags; #ifdef CONFIG_FS_ENET_MPC5121_FEC if (((unsigned long)skb->data) & 0x3) { @@ -626,7 +523,7 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) } } #endif - spin_lock_irqsave(&fep->tx_lock, flags); + spin_lock(&fep->tx_lock); /* * Fill in a Tx ring entry @@ -635,7 +532,7 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) { netif_stop_queue(dev); - spin_unlock_irqrestore(&fep->tx_lock, flags); + spin_unlock(&fep->tx_lock); /* * Ooops. All transmit buffers are full. Bail out. @@ -691,7 +588,7 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) (*fep->ops->tx_kickstart)(dev); - spin_unlock_irqrestore(&fep->tx_lock, flags); + spin_unlock(&fep->tx_lock); return NETDEV_TX_OK; } @@ -811,24 +708,24 @@ static int fs_enet_open(struct net_device *dev) /* not doing this, will cause a crash in fs_enet_rx_napi */ fs_init_bds(fep->ndev); - if (fep->fpi->use_napi) - napi_enable(&fep->napi); + napi_enable(&fep->napi); + napi_enable(&fep->napi_tx); /* Install our interrupt handler. */ r = request_irq(fep->interrupt, fs_enet_interrupt, IRQF_SHARED, "fs_enet-mac", dev); if (r != 0) { dev_err(fep->dev, "Could not allocate FS_ENET IRQ!"); - if (fep->fpi->use_napi) - napi_disable(&fep->napi); + napi_disable(&fep->napi); + napi_disable(&fep->napi_tx); return -EINVAL; } err = fs_init_phy(dev); if (err) { free_irq(fep->interrupt, dev); - if (fep->fpi->use_napi) - napi_disable(&fep->napi); + napi_disable(&fep->napi); + napi_disable(&fep->napi_tx); return err; } phy_start(fep->phydev); @@ -845,8 +742,8 @@ static int fs_enet_close(struct net_device *dev) netif_stop_queue(dev); netif_carrier_off(dev); - if (fep->fpi->use_napi) - napi_disable(&fep->napi); + napi_disable(&fep->napi); + napi_disable(&fep->napi_tx); phy_stop(fep->phydev); spin_lock_irqsave(&fep->lock, flags); @@ -1022,7 +919,6 @@ static int fs_enet_probe(struct platform_device *ofdev) fpi->rx_ring = 32; fpi->tx_ring = 32; fpi->rx_copybreak = 240; - fpi->use_napi = 1; fpi->napi_weight = 17; fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0); if (!fpi->phy_node && of_phy_is_fixed_link(ofdev->dev.of_node)) { @@ -1102,9 +998,8 @@ static int fs_enet_probe(struct platform_device *ofdev) ndev->netdev_ops = &fs_enet_netdev_ops; ndev->watchdog_timeo = 2 * HZ; - if (fpi->use_napi) - netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi, - fpi->napi_weight); + netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi, fpi->napi_weight); + netif_napi_add(ndev, &fep->napi_tx, fs_enet_tx_napi, 2); ndev->ethtool_ops = &fs_ethtool_ops; diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h index 1ece4b1a689ec170983bfb57a0eebae714299d97..3a4b49e0e717b980e09c54d4cf98f42b56b56e53 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h @@ -84,6 +84,9 @@ struct fs_ops { void (*napi_clear_rx_event)(struct net_device *dev); void (*napi_enable_rx)(struct net_device *dev); void (*napi_disable_rx)(struct net_device *dev); + void (*napi_clear_tx_event)(struct net_device *dev); + void (*napi_enable_tx)(struct net_device *dev); + void (*napi_disable_tx)(struct net_device *dev); void (*rx_bd_done)(struct net_device *dev); void (*tx_kickstart)(struct net_device *dev); u32 (*get_int_events)(struct net_device *dev); @@ -119,6 +122,7 @@ struct phy_info { struct fs_enet_private { struct napi_struct napi; + struct napi_struct napi_tx; struct device *dev; /* pointer back to the device (must be initialized first) */ struct net_device *ndev; spinlock_t lock; /* during all ops except TX pckt processing */ @@ -149,6 +153,7 @@ struct fs_enet_private { /* event masks */ u32 ev_napi_rx; /* mask of NAPI rx events */ + u32 ev_napi_tx; /* mask of NAPI rx events */ u32 ev_rx; /* rx event mask */ u32 ev_tx; /* tx event mask */ u32 ev_err; /* error event mask */ @@ -191,8 +196,8 @@ void fs_cleanup_bds(struct net_device *dev); #define DRV_MODULE_NAME "fs_enet" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "1.0" -#define DRV_MODULE_RELDATE "Aug 8, 2005" +#define DRV_MODULE_VERSION "1.1" +#define DRV_MODULE_RELDATE "Sep 22, 2014" /***************************************************************************/ diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c index f5383abbf3993cc920564c07f788c79e45c0cd1e..2c578db401e89dd978e27ce3f9b575c0fd9b4cad 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c @@ -125,6 +125,7 @@ static int do_pd_setup(struct fs_enet_private *fep) } #define FCC_NAPI_RX_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB) +#define FCC_NAPI_TX_EVENT_MSK (FCC_ENET_TXF | FCC_ENET_TXB) #define FCC_RX_EVENT (FCC_ENET_RXF) #define FCC_TX_EVENT (FCC_ENET_TXB) #define FCC_ERR_EVENT_MSK (FCC_ENET_TXE) @@ -137,6 +138,7 @@ static int setup_data(struct net_device *dev) return -EINVAL; fep->ev_napi_rx = FCC_NAPI_RX_EVENT_MSK; + fep->ev_napi_tx = FCC_NAPI_TX_EVENT_MSK; fep->ev_rx = FCC_RX_EVENT; fep->ev_tx = FCC_TX_EVENT; fep->ev_err = FCC_ERR_EVENT_MSK; @@ -446,6 +448,30 @@ static void napi_disable_rx(struct net_device *dev) C16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK); } +static void napi_clear_tx_event(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + fcc_t __iomem *fccp = fep->fcc.fccp; + + W16(fccp, fcc_fcce, FCC_NAPI_TX_EVENT_MSK); +} + +static void napi_enable_tx(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + fcc_t __iomem *fccp = fep->fcc.fccp; + + S16(fccp, fcc_fccm, FCC_NAPI_TX_EVENT_MSK); +} + +static void napi_disable_tx(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + fcc_t __iomem *fccp = fep->fcc.fccp; + + C16(fccp, fcc_fccm, FCC_NAPI_TX_EVENT_MSK); +} + static void rx_bd_done(struct net_device *dev) { /* nothing */ @@ -572,6 +598,9 @@ const struct fs_ops fs_fcc_ops = { .napi_clear_rx_event = napi_clear_rx_event, .napi_enable_rx = napi_enable_rx, .napi_disable_rx = napi_disable_rx, + .napi_clear_tx_event = napi_clear_tx_event, + .napi_enable_tx = napi_enable_tx, + .napi_disable_tx = napi_disable_tx, .rx_bd_done = rx_bd_done, .tx_kickstart = tx_kickstart, .get_int_events = get_int_events, diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c index 1eedfba2ad3c048517b2e95aa1b829c583cea263..3d4e08be170970b7f981c19a23e65c35f7ae860b 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c @@ -110,6 +110,7 @@ static int do_pd_setup(struct fs_enet_private *fep) } #define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB) +#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF | FEC_ENET_TXB) #define FEC_RX_EVENT (FEC_ENET_RXF) #define FEC_TX_EVENT (FEC_ENET_TXF) #define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \ @@ -126,6 +127,7 @@ static int setup_data(struct net_device *dev) fep->fec.htlo = 0; fep->ev_napi_rx = FEC_NAPI_RX_EVENT_MSK; + fep->ev_napi_tx = FEC_NAPI_TX_EVENT_MSK; fep->ev_rx = FEC_RX_EVENT; fep->ev_tx = FEC_TX_EVENT; fep->ev_err = FEC_ERR_EVENT_MSK; @@ -415,6 +417,30 @@ static void napi_disable_rx(struct net_device *dev) FC(fecp, imask, FEC_NAPI_RX_EVENT_MSK); } +static void napi_clear_tx_event(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + struct fec __iomem *fecp = fep->fec.fecp; + + FW(fecp, ievent, FEC_NAPI_TX_EVENT_MSK); +} + +static void napi_enable_tx(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + struct fec __iomem *fecp = fep->fec.fecp; + + FS(fecp, imask, FEC_NAPI_TX_EVENT_MSK); +} + +static void napi_disable_tx(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + struct fec __iomem *fecp = fep->fec.fecp; + + FC(fecp, imask, FEC_NAPI_TX_EVENT_MSK); +} + static void rx_bd_done(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); @@ -487,6 +513,9 @@ const struct fs_ops fs_fec_ops = { .napi_clear_rx_event = napi_clear_rx_event, .napi_enable_rx = napi_enable_rx, .napi_disable_rx = napi_disable_rx, + .napi_clear_tx_event = napi_clear_tx_event, + .napi_enable_tx = napi_enable_tx, + .napi_disable_tx = napi_disable_tx, .rx_bd_done = rx_bd_done, .tx_kickstart = tx_kickstart, .get_int_events = get_int_events, diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c index 90b3b19b7cd324f09421497bcad865e53267c2a8..41aa0b475ca0af009f8edfcfdd9a9cd02a19ecb1 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c @@ -116,6 +116,7 @@ static int do_pd_setup(struct fs_enet_private *fep) } #define SCC_NAPI_RX_EVENT_MSK (SCCE_ENET_RXF | SCCE_ENET_RXB) +#define SCC_NAPI_TX_EVENT_MSK (SCCE_ENET_TXF | SCCE_ENET_TXB) #define SCC_RX_EVENT (SCCE_ENET_RXF) #define SCC_TX_EVENT (SCCE_ENET_TXB) #define SCC_ERR_EVENT_MSK (SCCE_ENET_TXE | SCCE_ENET_BSY) @@ -130,6 +131,7 @@ static int setup_data(struct net_device *dev) fep->scc.htlo = 0; fep->ev_napi_rx = SCC_NAPI_RX_EVENT_MSK; + fep->ev_napi_tx = SCC_NAPI_TX_EVENT_MSK; fep->ev_rx = SCC_RX_EVENT; fep->ev_tx = SCC_TX_EVENT | SCCE_ENET_TXE; fep->ev_err = SCC_ERR_EVENT_MSK; @@ -398,6 +400,30 @@ static void napi_disable_rx(struct net_device *dev) C16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK); } +static void napi_clear_tx_event(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + scc_t __iomem *sccp = fep->scc.sccp; + + W16(sccp, scc_scce, SCC_NAPI_TX_EVENT_MSK); +} + +static void napi_enable_tx(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + scc_t __iomem *sccp = fep->scc.sccp; + + S16(sccp, scc_sccm, SCC_NAPI_TX_EVENT_MSK); +} + +static void napi_disable_tx(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + scc_t __iomem *sccp = fep->scc.sccp; + + C16(sccp, scc_sccm, SCC_NAPI_TX_EVENT_MSK); +} + static void rx_bd_done(struct net_device *dev) { /* nothing */ @@ -471,6 +497,9 @@ const struct fs_ops fs_scc_ops = { .napi_clear_rx_event = napi_clear_rx_event, .napi_enable_rx = napi_enable_rx, .napi_disable_rx = napi_disable_rx, + .napi_clear_tx_event = napi_clear_tx_event, + .napi_enable_tx = napi_enable_tx, + .napi_disable_tx = napi_disable_tx, .rx_bd_done = rx_bd_done, .tx_kickstart = tx_kickstart, .get_int_events = get_int_events, diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c index ed7916f6fbcff0731118843123e5af2d351f5b64..76a6e0c77d69e1a01544a388b465e7e82ddbfd87 100644 --- a/drivers/net/ethernet/hp/hp100.c +++ b/drivers/net/ethernet/hp/hp100.c @@ -1627,7 +1627,7 @@ static void hp100_clean_txring(struct net_device *dev) #endif /* Conversion to new PCI API : NOP */ pci_unmap_single(lp->pci_dev, (dma_addr_t) lp->txrhead->pdl[1], lp->txrhead->pdl[2], PCI_DMA_TODEVICE); - dev_kfree_skb_any(lp->txrhead->skb); + dev_consume_skb_any(lp->txrhead->skb); lp->txrhead->skb = NULL; lp->txrhead = lp->txrhead->next; lp->txrcommit--; @@ -1745,7 +1745,7 @@ static netdev_tx_t hp100_start_xmit(struct sk_buff *skb, hp100_ints_on(); spin_unlock_irqrestore(&lp->lock, flags); - dev_kfree_skb_any(skb); + dev_consume_skb_any(skb); #ifdef HP100_DEBUG_TX printk("hp100: %s: start_xmit: end\n", dev->name); diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index bb9f0ba9d1646be934b4b911292e776f329a5fd9..6a6d5ee51e6a04bd077882b717742ee9a6e39522 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -300,4 +300,23 @@ config I40EVF will be called i40evf. MSI-X interrupt support is required for this driver to work correctly. +config FM10K + tristate "Intel(R) FM10000 Ethernet Switch Host Interface Support" + default n + depends on PCI_MSI + ---help--- + This driver supports Intel(R) FM10000 Ethernet Switch Host + Interface. For more information on how to identify your adapter, + go to the Adapter & Driver ID Guide at: + + + + For general information and support, go to the Intel support + website at: + + + + To compile this driver as a module, choose M here. The module + will be called fm10k. MSI-X interrupt support is required + endif # NET_VENDOR_INTEL diff --git a/drivers/net/ethernet/intel/Makefile b/drivers/net/ethernet/intel/Makefile index cdbbca8a3755b723bbb484886a94c23d62c5950a..5ea764d85ec341570350c35d39f42ee2139931ed 100644 --- a/drivers/net/ethernet/intel/Makefile +++ b/drivers/net/ethernet/intel/Makefile @@ -12,3 +12,4 @@ obj-$(CONFIG_IXGBEVF) += ixgbevf/ obj-$(CONFIG_I40E) += i40e/ obj-$(CONFIG_IXGB) += ixgb/ obj-$(CONFIG_I40EVF) += i40evf/ +obj-$(CONFIG_FM10K) += fm10k/ diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h index 10a0f221b18324a76aac7d9db316642bb8bef43b..69707108d23cdeb57bda5c6dd5e3e1aa29faa2b6 100644 --- a/drivers/net/ethernet/intel/e1000/e1000.h +++ b/drivers/net/ethernet/intel/e1000/e1000.h @@ -148,16 +148,23 @@ struct e1000_adapter; /* wrapper around a pointer to a socket buffer, * so a DMA handle can be stored along with the buffer */ -struct e1000_buffer { +struct e1000_tx_buffer { struct sk_buff *skb; dma_addr_t dma; - struct page *page; unsigned long time_stamp; u16 length; u16 next_to_watch; - unsigned int segs; + bool mapped_as_page; + unsigned short segs; unsigned int bytecount; - u16 mapped_as_page; +}; + +struct e1000_rx_buffer { + union { + struct page *page; /* jumbo: alloc_page */ + u8 *data; /* else, netdev_alloc_frag */ + } rxbuf; + dma_addr_t dma; }; struct e1000_tx_ring { @@ -174,7 +181,7 @@ struct e1000_tx_ring { /* next descriptor to check for DD status bit */ unsigned int next_to_clean; /* array of buffer information structs */ - struct e1000_buffer *buffer_info; + struct e1000_tx_buffer *buffer_info; u16 tdh; u16 tdt; @@ -195,7 +202,7 @@ struct e1000_rx_ring { /* next descriptor to check for DD status bit */ unsigned int next_to_clean; /* array of buffer information structs */ - struct e1000_buffer *buffer_info; + struct e1000_rx_buffer *buffer_info; struct sk_buff *rx_skb_top; /* cpu for rx queue */ diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index cca5bca44e737dd6fdd23e758bcffb8e2ce119e7..b691eb4f63766b281786623f1b533cdcb4fb3a0d 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -1,35 +1,30 @@ /******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2006 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ + * Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2006 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ /* ethtool support for e1000 */ #include "e1000.h" -#include +#include enum {NETDEV_STATS, E1000_STATS}; @@ -42,7 +37,7 @@ struct e1000_stats { #define E1000_STAT(m) E1000_STATS, \ sizeof(((struct e1000_adapter *)0)->m), \ - offsetof(struct e1000_adapter, m) + offsetof(struct e1000_adapter, m) #define E1000_NETDEV_STAT(m) NETDEV_STATS, \ sizeof(((struct net_device *)0)->m), \ offsetof(struct net_device, m) @@ -104,6 +99,7 @@ static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = { "Interrupt test (offline)", "Loopback test (offline)", "Link test (on/offline)" }; + #define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test) static int e1000_get_settings(struct net_device *netdev, @@ -113,7 +109,6 @@ static int e1000_get_settings(struct net_device *netdev, struct e1000_hw *hw = &adapter->hw; if (hw->media_type == e1000_media_type_copper) { - ecmd->supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | @@ -155,9 +150,8 @@ static int e1000_get_settings(struct net_device *netdev, } if (er32(STATUS) & E1000_STATUS_LU) { - e1000_get_speed_and_duplex(hw, &adapter->link_speed, - &adapter->link_duplex); + &adapter->link_duplex); ethtool_cmd_speed_set(ecmd, adapter->link_speed); /* unfortunately FULL_DUPLEX != DUPLEX_FULL @@ -247,9 +241,9 @@ static int e1000_set_settings(struct net_device *netdev, if (netif_running(adapter->netdev)) { e1000_down(adapter); e1000_up(adapter); - } else + } else { e1000_reset(adapter); - + } clear_bit(__E1000_RESETTING, &adapter->flags); return 0; } @@ -279,11 +273,11 @@ static void e1000_get_pauseparam(struct net_device *netdev, pause->autoneg = (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); - if (hw->fc == E1000_FC_RX_PAUSE) + if (hw->fc == E1000_FC_RX_PAUSE) { pause->rx_pause = 1; - else if (hw->fc == E1000_FC_TX_PAUSE) + } else if (hw->fc == E1000_FC_TX_PAUSE) { pause->tx_pause = 1; - else if (hw->fc == E1000_FC_FULL) { + } else if (hw->fc == E1000_FC_FULL) { pause->rx_pause = 1; pause->tx_pause = 1; } @@ -316,8 +310,9 @@ static int e1000_set_pauseparam(struct net_device *netdev, if (netif_running(adapter->netdev)) { e1000_down(adapter); e1000_up(adapter); - } else + } else { e1000_reset(adapter); + } } else retval = ((hw->media_type == e1000_media_type_fiber) ? e1000_setup_link(hw) : e1000_force_mac_fc(hw)); @@ -329,12 +324,14 @@ static int e1000_set_pauseparam(struct net_device *netdev, static u32 e1000_get_msglevel(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; } static void e1000_set_msglevel(struct net_device *netdev, u32 data) { struct e1000_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; } @@ -526,7 +523,7 @@ static int e1000_set_eeprom(struct net_device *netdev, * only the first byte of the word is being modified */ ret_val = e1000_read_eeprom(hw, last_word, 1, - &eeprom_buff[last_word - first_word]); + &eeprom_buff[last_word - first_word]); } /* Device's eeprom is always little-endian, word addressable */ @@ -618,13 +615,12 @@ static int e1000_set_ringparam(struct net_device *netdev, adapter->tx_ring = txdr; adapter->rx_ring = rxdr; - rxdr->count = max(ring->rx_pending,(u32)E1000_MIN_RXD); - rxdr->count = min(rxdr->count,(u32)(mac_type < e1000_82544 ? + rxdr->count = max(ring->rx_pending, (u32)E1000_MIN_RXD); + rxdr->count = min(rxdr->count, (u32)(mac_type < e1000_82544 ? E1000_MAX_RXD : E1000_MAX_82544_RXD)); rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE); - - txdr->count = max(ring->tx_pending,(u32)E1000_MIN_TXD); - txdr->count = min(txdr->count,(u32)(mac_type < e1000_82544 ? + txdr->count = max(ring->tx_pending, (u32)E1000_MIN_TXD); + txdr->count = min(txdr->count, (u32)(mac_type < e1000_82544 ? E1000_MAX_TXD : E1000_MAX_82544_TXD)); txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); @@ -680,8 +676,9 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, int reg, u32 mask, u32 write) { struct e1000_hw *hw = &adapter->hw; - static const u32 test[] = - {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; + static const u32 test[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF + }; u8 __iomem *address = hw->hw_addr + reg; u32 read; int i; @@ -793,8 +790,8 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF); value = E1000_RAR_ENTRIES; for (i = 0; i < value; i++) { - REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF, - 0xFFFFFFFF); + REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), + 0x8003FFFF, 0xFFFFFFFF); } } else { REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x01FFFFFF); @@ -877,7 +874,6 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) /* Test each interrupt */ for (; i < 10; i++) { - /* Interrupt to test */ mask = 1 << i; @@ -972,10 +968,9 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter) if (rxdr->buffer_info[i].dma) dma_unmap_single(&pdev->dev, rxdr->buffer_info[i].dma, - rxdr->buffer_info[i].length, + E1000_RXBUFFER_2048, DMA_FROM_DEVICE); - if (rxdr->buffer_info[i].skb) - dev_kfree_skb(rxdr->buffer_info[i].skb); + kfree(rxdr->buffer_info[i].rxbuf.data); } } @@ -1010,7 +1005,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) if (!txdr->count) txdr->count = E1000_DEFAULT_TXD; - txdr->buffer_info = kcalloc(txdr->count, sizeof(struct e1000_buffer), + txdr->buffer_info = kcalloc(txdr->count, sizeof(struct e1000_tx_buffer), GFP_KERNEL); if (!txdr->buffer_info) { ret_val = 1; @@ -1069,7 +1064,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) if (!rxdr->count) rxdr->count = E1000_DEFAULT_RXD; - rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_buffer), + rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_rx_buffer), GFP_KERNEL); if (!rxdr->buffer_info) { ret_val = 5; @@ -1099,25 +1094,25 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) for (i = 0; i < rxdr->count; i++) { struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i); - struct sk_buff *skb; + u8 *buf; - skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL); - if (!skb) { + buf = kzalloc(E1000_RXBUFFER_2048 + NET_SKB_PAD + NET_IP_ALIGN, + GFP_KERNEL); + if (!buf) { ret_val = 7; goto err_nomem; } - skb_reserve(skb, NET_IP_ALIGN); - rxdr->buffer_info[i].skb = skb; - rxdr->buffer_info[i].length = E1000_RXBUFFER_2048; + rxdr->buffer_info[i].rxbuf.data = buf; + rxdr->buffer_info[i].dma = - dma_map_single(&pdev->dev, skb->data, + dma_map_single(&pdev->dev, + buf + NET_SKB_PAD + NET_IP_ALIGN, E1000_RXBUFFER_2048, DMA_FROM_DEVICE); if (dma_mapping_error(&pdev->dev, rxdr->buffer_info[i].dma)) { ret_val = 8; goto err_nomem; } rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma); - memset(skb->data, 0x00, skb->len); } return 0; @@ -1149,8 +1144,7 @@ static void e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter) */ e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); phy_reg |= M88E1000_EPSCR_TX_CLK_25; - e1000_write_phy_reg(hw, - M88E1000_EXT_PHY_SPEC_CTRL, phy_reg); + e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_reg); /* In addition, because of the s/w reset above, we need to enable * CRS on TX. This must be set for both full and half duplex @@ -1158,8 +1152,7 @@ static void e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter) */ e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg); phy_reg |= M88E1000_PSCR_ASSERT_CRS_ON_TX; - e1000_write_phy_reg(hw, - M88E1000_PHY_SPEC_CTRL, phy_reg); + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_reg); } static int e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter) @@ -1216,7 +1209,7 @@ static int e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter) /* Check Phy Configuration */ e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); if (phy_reg != 0x4100) - return 9; + return 9; e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); if (phy_reg != 0x0070) @@ -1261,7 +1254,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) E1000_CTRL_FD); /* Force Duplex to FULL */ if (hw->media_type == e1000_media_type_copper && - hw->phy_type == e1000_phy_m88) + hw->phy_type == e1000_phy_m88) ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ else { /* Set the ILOS bit on the fiber Nic is half @@ -1299,7 +1292,7 @@ static int e1000_set_phy_loopback(struct e1000_adapter *adapter) * attempt this 10 times. */ while (e1000_nonintegrated_phy_loopback(adapter) && - count++ < 10); + count++ < 10); if (count < 11) return 0; } @@ -1348,8 +1341,9 @@ static int e1000_setup_loopback_test(struct e1000_adapter *adapter) ew32(RCTL, rctl); return 0; } - } else if (hw->media_type == e1000_media_type_copper) + } else if (hw->media_type == e1000_media_type_copper) { return e1000_set_phy_loopback(adapter); + } return 7; } @@ -1391,13 +1385,13 @@ static void e1000_create_lbtest_frame(struct sk_buff *skb, memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); } -static int e1000_check_lbtest_frame(struct sk_buff *skb, +static int e1000_check_lbtest_frame(const unsigned char *data, unsigned int frame_size) { frame_size &= ~1; - if (*(skb->data + 3) == 0xFF) { - if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && - (*(skb->data + frame_size / 2 + 12) == 0xAF)) { + if (*(data + 3) == 0xFF) { + if ((*(data + frame_size / 2 + 10) == 0xBE) && + (*(data + frame_size / 2 + 12) == 0xAF)) { return 0; } } @@ -1410,7 +1404,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) struct e1000_tx_ring *txdr = &adapter->test_tx_ring; struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; struct pci_dev *pdev = adapter->pdev; - int i, j, k, l, lc, good_cnt, ret_val=0; + int i, j, k, l, lc, good_cnt, ret_val = 0; unsigned long time; ew32(RDT, rxdr->count - 1); @@ -1429,12 +1423,13 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) for (j = 0; j <= lc; j++) { /* loop count loop */ for (i = 0; i < 64; i++) { /* send the packets */ e1000_create_lbtest_frame(txdr->buffer_info[i].skb, - 1024); + 1024); dma_sync_single_for_device(&pdev->dev, txdr->buffer_info[k].dma, txdr->buffer_info[k].length, DMA_TO_DEVICE); - if (unlikely(++k == txdr->count)) k = 0; + if (unlikely(++k == txdr->count)) + k = 0; } ew32(TDT, k); E1000_WRITE_FLUSH(); @@ -1444,15 +1439,17 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) do { /* receive the sent packets */ dma_sync_single_for_cpu(&pdev->dev, rxdr->buffer_info[l].dma, - rxdr->buffer_info[l].length, + E1000_RXBUFFER_2048, DMA_FROM_DEVICE); ret_val = e1000_check_lbtest_frame( - rxdr->buffer_info[l].skb, + rxdr->buffer_info[l].rxbuf.data + + NET_SKB_PAD + NET_IP_ALIGN, 1024); if (!ret_val) good_cnt++; - if (unlikely(++l == rxdr->count)) l = 0; + if (unlikely(++l == rxdr->count)) + l = 0; /* time + 20 msecs (200 msecs on 2.4) is more than * enough time to complete the receives, if it's * exceeded, break and error off @@ -1494,6 +1491,7 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data) *data = 0; if (hw->media_type == e1000_media_type_internal_serdes) { int i = 0; + hw->serdes_has_link = false; /* On some blade server designs, link establishment @@ -1512,9 +1510,8 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data) if (hw->autoneg) /* if auto_neg is set wait for it */ msleep(4000); - if (!(er32(STATUS) & E1000_STATUS_LU)) { + if (!(er32(STATUS) & E1000_STATUS_LU)) *data = 1; - } } return *data; } @@ -1665,8 +1662,7 @@ static void e1000_get_wol(struct net_device *netdev, struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - wol->supported = WAKE_UCAST | WAKE_MCAST | - WAKE_BCAST | WAKE_MAGIC; + wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC; wol->wolopts = 0; /* this function will set ->supported = 0 and return 1 if wol is not @@ -1819,6 +1815,7 @@ static int e1000_set_coalesce(struct net_device *netdev, static int e1000_nway_reset(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); + if (netif_running(netdev)) e1000_reinit_locked(adapter); return 0; @@ -1830,22 +1827,29 @@ static void e1000_get_ethtool_stats(struct net_device *netdev, struct e1000_adapter *adapter = netdev_priv(netdev); int i; char *p = NULL; + const struct e1000_stats *stat = e1000_gstrings_stats; e1000_update_stats(adapter); for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { - switch (e1000_gstrings_stats[i].type) { + switch (stat->type) { case NETDEV_STATS: - p = (char *) netdev + - e1000_gstrings_stats[i].stat_offset; + p = (char *)netdev + stat->stat_offset; break; case E1000_STATS: - p = (char *) adapter + - e1000_gstrings_stats[i].stat_offset; + p = (char *)adapter + stat->stat_offset; + break; + default: + WARN_ONCE(1, "Invalid E1000 stat type: %u index %d\n", + stat->type, i); break; } - data[i] = (e1000_gstrings_stats[i].sizeof_stat == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + if (stat->sizeof_stat == sizeof(u64)) + data[i] = *(u64 *)p; + else + data[i] = *(u32 *)p; + + stat++; } /* BUG_ON(i != E1000_STATS_LEN); */ } @@ -1858,8 +1862,7 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset, switch (stringset) { case ETH_SS_TEST: - memcpy(data, *e1000_gstrings_test, - sizeof(e1000_gstrings_test)); + memcpy(data, e1000_gstrings_test, sizeof(e1000_gstrings_test)); break; case ETH_SS_STATS: for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c index 1acf5034db10b3411507023a7b25abd6011a1836..45c8c864104ee7cb8f6a856ae322772da174c088 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_hw.c +++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c @@ -4836,84 +4836,6 @@ void e1000_update_adaptive(struct e1000_hw *hw) } } -/** - * e1000_tbi_adjust_stats - * @hw: Struct containing variables accessed by shared code - * @frame_len: The length of the frame in question - * @mac_addr: The Ethernet destination address of the frame in question - * - * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT - */ -void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats, - u32 frame_len, u8 *mac_addr) -{ - u64 carry_bit; - - /* First adjust the frame length. */ - frame_len--; - /* We need to adjust the statistics counters, since the hardware - * counters overcount this packet as a CRC error and undercount - * the packet as a good packet - */ - /* This packet should not be counted as a CRC error. */ - stats->crcerrs--; - /* This packet does count as a Good Packet Received. */ - stats->gprc++; - - /* Adjust the Good Octets received counters */ - carry_bit = 0x80000000 & stats->gorcl; - stats->gorcl += frame_len; - /* If the high bit of Gorcl (the low 32 bits of the Good Octets - * Received Count) was one before the addition, - * AND it is zero after, then we lost the carry out, - * need to add one to Gorch (Good Octets Received Count High). - * This could be simplified if all environments supported - * 64-bit integers. - */ - if (carry_bit && ((stats->gorcl & 0x80000000) == 0)) - stats->gorch++; - /* Is this a broadcast or multicast? Check broadcast first, - * since the test for a multicast frame will test positive on - * a broadcast frame. - */ - if (is_broadcast_ether_addr(mac_addr)) - /* Broadcast packet */ - stats->bprc++; - else if (is_multicast_ether_addr(mac_addr)) - /* Multicast packet */ - stats->mprc++; - - if (frame_len == hw->max_frame_size) { - /* In this case, the hardware has overcounted the number of - * oversize frames. - */ - if (stats->roc > 0) - stats->roc--; - } - - /* Adjust the bin counters when the extra byte put the frame in the - * wrong bin. Remember that the frame_len was adjusted above. - */ - if (frame_len == 64) { - stats->prc64++; - stats->prc127--; - } else if (frame_len == 127) { - stats->prc127++; - stats->prc255--; - } else if (frame_len == 255) { - stats->prc255++; - stats->prc511--; - } else if (frame_len == 511) { - stats->prc511++; - stats->prc1023--; - } else if (frame_len == 1023) { - stats->prc1023++; - stats->prc1522--; - } else if (frame_len == 1522) { - stats->prc1522++; - } -} - /** * e1000_get_bus_info * @hw: Struct containing variables accessed by shared code diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.h b/drivers/net/ethernet/intel/e1000/e1000_hw.h index 11578c8978dbc99ec95bf19d5558749ca004cd76..5cf7268cc4e13697631f75000b5194319a5af5df 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_hw.h +++ b/drivers/net/ethernet/intel/e1000/e1000_hw.h @@ -393,8 +393,6 @@ s32 e1000_blink_led_start(struct e1000_hw *hw); /* Everything else */ void e1000_reset_adaptive(struct e1000_hw *hw); void e1000_update_adaptive(struct e1000_hw *hw); -void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats, - u32 frame_len, u8 * mac_addr); void e1000_get_bus_info(struct e1000_hw *hw); void e1000_pci_set_mwi(struct e1000_hw *hw); void e1000_pci_clear_mwi(struct e1000_hw *hw); diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index ad3d5d12173faea9992f9e46f74f57e21043b05d..5f6aded512f539f9a12cb692f810fd3d8e560a3f 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -1497,7 +1497,7 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter, struct pci_dev *pdev = adapter->pdev; int size; - size = sizeof(struct e1000_buffer) * txdr->count; + size = sizeof(struct e1000_tx_buffer) * txdr->count; txdr->buffer_info = vzalloc(size); if (!txdr->buffer_info) return -ENOMEM; @@ -1687,7 +1687,7 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter, struct pci_dev *pdev = adapter->pdev; int size, desc_len; - size = sizeof(struct e1000_buffer) * rxdr->count; + size = sizeof(struct e1000_rx_buffer) * rxdr->count; rxdr->buffer_info = vzalloc(size); if (!rxdr->buffer_info) return -ENOMEM; @@ -1947,8 +1947,9 @@ void e1000_free_all_tx_resources(struct e1000_adapter *adapter) e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); } -static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, - struct e1000_buffer *buffer_info) +static void +e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, + struct e1000_tx_buffer *buffer_info) { if (buffer_info->dma) { if (buffer_info->mapped_as_page) @@ -1977,7 +1978,7 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring) { struct e1000_hw *hw = &adapter->hw; - struct e1000_buffer *buffer_info; + struct e1000_tx_buffer *buffer_info; unsigned long size; unsigned int i; @@ -1989,7 +1990,7 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter, } netdev_reset_queue(adapter->netdev); - size = sizeof(struct e1000_buffer) * tx_ring->count; + size = sizeof(struct e1000_tx_buffer) * tx_ring->count; memset(tx_ring->buffer_info, 0, size); /* Zero out the descriptor ring */ @@ -2053,6 +2054,28 @@ void e1000_free_all_rx_resources(struct e1000_adapter *adapter) e1000_free_rx_resources(adapter, &adapter->rx_ring[i]); } +#define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN) +static unsigned int e1000_frag_len(const struct e1000_adapter *a) +{ + return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); +} + +static void *e1000_alloc_frag(const struct e1000_adapter *a) +{ + unsigned int len = e1000_frag_len(a); + u8 *data = netdev_alloc_frag(len); + + if (likely(data)) + data += E1000_HEADROOM; + return data; +} + +static void e1000_free_frag(const void *data) +{ + put_page(virt_to_head_page(data)); +} + /** * e1000_clean_rx_ring - Free Rx Buffers per Queue * @adapter: board private structure @@ -2062,44 +2085,42 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter, struct e1000_rx_ring *rx_ring) { struct e1000_hw *hw = &adapter->hw; - struct e1000_buffer *buffer_info; + struct e1000_rx_buffer *buffer_info; struct pci_dev *pdev = adapter->pdev; unsigned long size; unsigned int i; - /* Free all the Rx ring sk_buffs */ + /* Free all the Rx netfrags */ for (i = 0; i < rx_ring->count; i++) { buffer_info = &rx_ring->buffer_info[i]; - if (buffer_info->dma && - adapter->clean_rx == e1000_clean_rx_irq) { - dma_unmap_single(&pdev->dev, buffer_info->dma, - buffer_info->length, - DMA_FROM_DEVICE); - } else if (buffer_info->dma && - adapter->clean_rx == e1000_clean_jumbo_rx_irq) { - dma_unmap_page(&pdev->dev, buffer_info->dma, - buffer_info->length, - DMA_FROM_DEVICE); + if (adapter->clean_rx == e1000_clean_rx_irq) { + if (buffer_info->dma) + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (buffer_info->rxbuf.data) { + e1000_free_frag(buffer_info->rxbuf.data); + buffer_info->rxbuf.data = NULL; + } + } else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) { + if (buffer_info->dma) + dma_unmap_page(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (buffer_info->rxbuf.page) { + put_page(buffer_info->rxbuf.page); + buffer_info->rxbuf.page = NULL; + } } buffer_info->dma = 0; - if (buffer_info->page) { - put_page(buffer_info->page); - buffer_info->page = NULL; - } - if (buffer_info->skb) { - dev_kfree_skb(buffer_info->skb); - buffer_info->skb = NULL; - } } /* there also may be some cached data from a chained receive */ - if (rx_ring->rx_skb_top) { - dev_kfree_skb(rx_ring->rx_skb_top); - rx_ring->rx_skb_top = NULL; - } + napi_free_frags(&adapter->napi); + rx_ring->rx_skb_top = NULL; - size = sizeof(struct e1000_buffer) * rx_ring->count; + size = sizeof(struct e1000_rx_buffer) * rx_ring->count; memset(rx_ring->buffer_info, 0, size); /* Zero out the descriptor ring */ @@ -2678,7 +2699,7 @@ static int e1000_tso(struct e1000_adapter *adapter, __be16 protocol) { struct e1000_context_desc *context_desc; - struct e1000_buffer *buffer_info; + struct e1000_tx_buffer *buffer_info; unsigned int i; u32 cmd_length = 0; u16 ipcse = 0, tucse, mss; @@ -2750,7 +2771,7 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter, __be16 protocol) { struct e1000_context_desc *context_desc; - struct e1000_buffer *buffer_info; + struct e1000_tx_buffer *buffer_info; unsigned int i; u8 css; u32 cmd_len = E1000_TXD_CMD_DEXT; @@ -2809,7 +2830,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter, { struct e1000_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; - struct e1000_buffer *buffer_info; + struct e1000_tx_buffer *buffer_info; unsigned int len = skb_headlen(skb); unsigned int offset = 0, size, count = 0, i; unsigned int f, bytecount, segs; @@ -2955,7 +2976,7 @@ static void e1000_tx_queue(struct e1000_adapter *adapter, { struct e1000_hw *hw = &adapter->hw; struct e1000_tx_desc *tx_desc = NULL; - struct e1000_buffer *buffer_info; + struct e1000_tx_buffer *buffer_info; u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; unsigned int i; @@ -3373,7 +3394,7 @@ static void e1000_dump(struct e1000_adapter *adapter) for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); - struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i]; + struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i]; struct my_u { __le64 a; __le64 b; }; struct my_u *u = (struct my_u *)tx_desc; const char *type; @@ -3415,7 +3436,7 @@ static void e1000_dump(struct e1000_adapter *adapter) for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i); - struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i]; + struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i]; struct my_u { __le64 a; __le64 b; }; struct my_u *u = (struct my_u *)rx_desc; const char *type; @@ -3429,7 +3450,7 @@ static void e1000_dump(struct e1000_adapter *adapter) pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n", i, le64_to_cpu(u->a), le64_to_cpu(u->b), - (u64)buffer_info->dma, buffer_info->skb, type); + (u64)buffer_info->dma, buffer_info->rxbuf.data, type); } /* for */ /* dump the descriptor caches */ @@ -3811,7 +3832,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, struct e1000_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; struct e1000_tx_desc *tx_desc, *eop_desc; - struct e1000_buffer *buffer_info; + struct e1000_tx_buffer *buffer_info; unsigned int i, eop; unsigned int count = 0; unsigned int total_tx_bytes=0, total_tx_packets=0; @@ -3949,12 +3970,12 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, } /** - * e1000_consume_page - helper function + * e1000_consume_page - helper function for jumbo Rx path **/ -static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, +static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb, u16 length) { - bi->page = NULL; + bi->rxbuf.page = NULL; skb->len += length; skb->data_len += length; skb->truesize += PAGE_SIZE; @@ -3980,6 +4001,113 @@ static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status, napi_gro_receive(&adapter->napi, skb); } +/** + * e1000_tbi_adjust_stats + * @hw: Struct containing variables accessed by shared code + * @frame_len: The length of the frame in question + * @mac_addr: The Ethernet destination address of the frame in question + * + * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT + */ +static void e1000_tbi_adjust_stats(struct e1000_hw *hw, + struct e1000_hw_stats *stats, + u32 frame_len, const u8 *mac_addr) +{ + u64 carry_bit; + + /* First adjust the frame length. */ + frame_len--; + /* We need to adjust the statistics counters, since the hardware + * counters overcount this packet as a CRC error and undercount + * the packet as a good packet + */ + /* This packet should not be counted as a CRC error. */ + stats->crcerrs--; + /* This packet does count as a Good Packet Received. */ + stats->gprc++; + + /* Adjust the Good Octets received counters */ + carry_bit = 0x80000000 & stats->gorcl; + stats->gorcl += frame_len; + /* If the high bit of Gorcl (the low 32 bits of the Good Octets + * Received Count) was one before the addition, + * AND it is zero after, then we lost the carry out, + * need to add one to Gorch (Good Octets Received Count High). + * This could be simplified if all environments supported + * 64-bit integers. + */ + if (carry_bit && ((stats->gorcl & 0x80000000) == 0)) + stats->gorch++; + /* Is this a broadcast or multicast? Check broadcast first, + * since the test for a multicast frame will test positive on + * a broadcast frame. + */ + if (is_broadcast_ether_addr(mac_addr)) + stats->bprc++; + else if (is_multicast_ether_addr(mac_addr)) + stats->mprc++; + + if (frame_len == hw->max_frame_size) { + /* In this case, the hardware has overcounted the number of + * oversize frames. + */ + if (stats->roc > 0) + stats->roc--; + } + + /* Adjust the bin counters when the extra byte put the frame in the + * wrong bin. Remember that the frame_len was adjusted above. + */ + if (frame_len == 64) { + stats->prc64++; + stats->prc127--; + } else if (frame_len == 127) { + stats->prc127++; + stats->prc255--; + } else if (frame_len == 255) { + stats->prc255++; + stats->prc511--; + } else if (frame_len == 511) { + stats->prc511++; + stats->prc1023--; + } else if (frame_len == 1023) { + stats->prc1023++; + stats->prc1522--; + } else if (frame_len == 1522) { + stats->prc1522++; + } +} + +static bool e1000_tbi_should_accept(struct e1000_adapter *adapter, + u8 status, u8 errors, + u32 length, const u8 *data) +{ + struct e1000_hw *hw = &adapter->hw; + u8 last_byte = *(data + length - 1); + + if (TBI_ACCEPT(hw, status, errors, length, last_byte)) { + unsigned long irq_flags; + + spin_lock_irqsave(&adapter->stats_lock, irq_flags); + e1000_tbi_adjust_stats(hw, &adapter->stats, length, data); + spin_unlock_irqrestore(&adapter->stats_lock, irq_flags); + + return true; + } + + return false; +} + +static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter, + unsigned int bufsz) +{ + struct sk_buff *skb = netdev_alloc_skb_ip_align(adapter->netdev, bufsz); + + if (unlikely(!skb)) + adapter->alloc_rx_buff_failed++; + return skb; +} + /** * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy * @adapter: board private structure @@ -3994,12 +4122,10 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, struct e1000_rx_ring *rx_ring, int *work_done, int work_to_do) { - struct e1000_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; struct e1000_rx_desc *rx_desc, *next_rxd; - struct e1000_buffer *buffer_info, *next_buffer; - unsigned long irq_flags; + struct e1000_rx_buffer *buffer_info, *next_buffer; u32 length; unsigned int i; int cleaned_count = 0; @@ -4020,8 +4146,6 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, rmb(); /* read descriptor and rx_buffer_info after status DD */ status = rx_desc->status; - skb = buffer_info->skb; - buffer_info->skb = NULL; if (++i == rx_ring->count) i = 0; next_rxd = E1000_RX_DESC(*rx_ring, i); @@ -4032,7 +4156,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, cleaned = true; cleaned_count++; dma_unmap_page(&pdev->dev, buffer_info->dma, - buffer_info->length, DMA_FROM_DEVICE); + adapter->rx_buffer_len, DMA_FROM_DEVICE); buffer_info->dma = 0; length = le16_to_cpu(rx_desc->length); @@ -4040,25 +4164,15 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, /* errors is only valid for DD + EOP descriptors */ if (unlikely((status & E1000_RXD_STAT_EOP) && (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { - u8 *mapped; - u8 last_byte; - - mapped = page_address(buffer_info->page); - last_byte = *(mapped + length - 1); - if (TBI_ACCEPT(hw, status, rx_desc->errors, length, - last_byte)) { - spin_lock_irqsave(&adapter->stats_lock, - irq_flags); - e1000_tbi_adjust_stats(hw, &adapter->stats, - length, mapped); - spin_unlock_irqrestore(&adapter->stats_lock, - irq_flags); + u8 *mapped = page_address(buffer_info->rxbuf.page); + + if (e1000_tbi_should_accept(adapter, status, + rx_desc->errors, + length, mapped)) { length--; + } else if (netdev->features & NETIF_F_RXALL) { + goto process_skb; } else { - if (netdev->features & NETIF_F_RXALL) - goto process_skb; - /* recycle both page and skb */ - buffer_info->skb = skb; /* an error means any chain goes out the window * too */ @@ -4075,16 +4189,18 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, /* this descriptor is only the beginning (or middle) */ if (!rxtop) { /* this is the beginning of a chain */ - rxtop = skb; - skb_fill_page_desc(rxtop, 0, buffer_info->page, + rxtop = napi_get_frags(&adapter->napi); + if (!rxtop) + break; + + skb_fill_page_desc(rxtop, 0, + buffer_info->rxbuf.page, 0, length); } else { /* this is the middle of a chain */ skb_fill_page_desc(rxtop, skb_shinfo(rxtop)->nr_frags, - buffer_info->page, 0, length); - /* re-use the skb, only consumed the page */ - buffer_info->skb = skb; + buffer_info->rxbuf.page, 0, length); } e1000_consume_page(buffer_info, rxtop, length); goto next_desc; @@ -4093,32 +4209,51 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, /* end of the chain */ skb_fill_page_desc(rxtop, skb_shinfo(rxtop)->nr_frags, - buffer_info->page, 0, length); - /* re-use the current skb, we only consumed the - * page - */ - buffer_info->skb = skb; + buffer_info->rxbuf.page, 0, length); skb = rxtop; rxtop = NULL; e1000_consume_page(buffer_info, skb, length); } else { + struct page *p; /* no chain, got EOP, this buf is the packet * copybreak to save the put_page/alloc_page */ - if (length <= copybreak && - skb_tailroom(skb) >= length) { + p = buffer_info->rxbuf.page; + if (length <= copybreak) { u8 *vaddr; - vaddr = kmap_atomic(buffer_info->page); + + if (likely(!(netdev->features & NETIF_F_RXFCS))) + length -= 4; + skb = e1000_alloc_rx_skb(adapter, + length); + if (!skb) + break; + + vaddr = kmap_atomic(p); memcpy(skb_tail_pointer(skb), vaddr, length); kunmap_atomic(vaddr); /* re-use the page, so don't erase - * buffer_info->page + * buffer_info->rxbuf.page */ skb_put(skb, length); + e1000_rx_checksum(adapter, + status | rx_desc->errors << 24, + le16_to_cpu(rx_desc->csum), skb); + + total_rx_bytes += skb->len; + total_rx_packets++; + + e1000_receive_skb(adapter, status, + rx_desc->special, skb); + goto next_desc; } else { - skb_fill_page_desc(skb, 0, - buffer_info->page, 0, + skb = napi_get_frags(&adapter->napi); + if (!skb) { + adapter->alloc_rx_buff_failed++; + break; + } + skb_fill_page_desc(skb, 0, p, 0, length); e1000_consume_page(buffer_info, skb, length); @@ -4137,14 +4272,14 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, pskb_trim(skb, skb->len - 4); total_rx_packets++; - /* eth type trans needs skb->data to point to something */ - if (!pskb_may_pull(skb, ETH_HLEN)) { - e_err(drv, "pskb_may_pull failed.\n"); - dev_kfree_skb(skb); - goto next_desc; + if (status & E1000_RXD_STAT_VP) { + __le16 vlan = rx_desc->special; + u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); } - e1000_receive_skb(adapter, status, rx_desc->special, skb); + napi_gro_frags(&adapter->napi); next_desc: rx_desc->status = 0; @@ -4175,25 +4310,25 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, /* this should improve performance for small packets with large amounts * of reassembly being done in the stack */ -static void e1000_check_copybreak(struct net_device *netdev, - struct e1000_buffer *buffer_info, - u32 length, struct sk_buff **skb) +static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter, + struct e1000_rx_buffer *buffer_info, + u32 length, const void *data) { - struct sk_buff *new_skb; + struct sk_buff *skb; if (length > copybreak) - return; + return NULL; - new_skb = netdev_alloc_skb_ip_align(netdev, length); - if (!new_skb) - return; + skb = e1000_alloc_rx_skb(adapter, length); + if (!skb) + return NULL; + + dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma, + length, DMA_FROM_DEVICE); - skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN, - (*skb)->data - NET_IP_ALIGN, - length + NET_IP_ALIGN); - /* save the skb in buffer_info as good */ - buffer_info->skb = *skb; - *skb = new_skb; + memcpy(skb_put(skb, length), data, length); + + return skb; } /** @@ -4207,12 +4342,10 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, struct e1000_rx_ring *rx_ring, int *work_done, int work_to_do) { - struct e1000_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; struct e1000_rx_desc *rx_desc, *next_rxd; - struct e1000_buffer *buffer_info, *next_buffer; - unsigned long flags; + struct e1000_rx_buffer *buffer_info, *next_buffer; u32 length; unsigned int i; int cleaned_count = 0; @@ -4225,6 +4358,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, while (rx_desc->status & E1000_RXD_STAT_DD) { struct sk_buff *skb; + u8 *data; u8 status; if (*work_done >= work_to_do) @@ -4233,10 +4367,27 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, rmb(); /* read descriptor and rx_buffer_info after status DD */ status = rx_desc->status; - skb = buffer_info->skb; - buffer_info->skb = NULL; + length = le16_to_cpu(rx_desc->length); + + data = buffer_info->rxbuf.data; + prefetch(data); + skb = e1000_copybreak(adapter, buffer_info, length, data); + if (!skb) { + unsigned int frag_len = e1000_frag_len(adapter); + + skb = build_skb(data - E1000_HEADROOM, frag_len); + if (!skb) { + adapter->alloc_rx_buff_failed++; + break; + } - prefetch(skb->data - NET_IP_ALIGN); + skb_reserve(skb, E1000_HEADROOM); + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + buffer_info->dma = 0; + buffer_info->rxbuf.data = NULL; + } if (++i == rx_ring->count) i = 0; next_rxd = E1000_RX_DESC(*rx_ring, i); @@ -4246,11 +4397,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, cleaned = true; cleaned_count++; - dma_unmap_single(&pdev->dev, buffer_info->dma, - buffer_info->length, DMA_FROM_DEVICE); - buffer_info->dma = 0; - length = le16_to_cpu(rx_desc->length); /* !EOP means multiple descriptors were used to store a single * packet, if thats the case we need to toss it. In fact, we * to toss every packet with the EOP bit clear and the next @@ -4262,29 +4409,22 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, if (adapter->discarding) { /* All receives must fit into a single buffer */ - e_dbg("Receive packet consumed multiple buffers\n"); - /* recycle */ - buffer_info->skb = skb; + netdev_dbg(netdev, "Receive packet consumed multiple buffers\n"); + dev_kfree_skb(skb); if (status & E1000_RXD_STAT_EOP) adapter->discarding = false; goto next_desc; } if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { - u8 last_byte = *(skb->data + length - 1); - if (TBI_ACCEPT(hw, status, rx_desc->errors, length, - last_byte)) { - spin_lock_irqsave(&adapter->stats_lock, flags); - e1000_tbi_adjust_stats(hw, &adapter->stats, - length, skb->data); - spin_unlock_irqrestore(&adapter->stats_lock, - flags); + if (e1000_tbi_should_accept(adapter, status, + rx_desc->errors, + length, data)) { length--; + } else if (netdev->features & NETIF_F_RXALL) { + goto process_skb; } else { - if (netdev->features & NETIF_F_RXALL) - goto process_skb; - /* recycle */ - buffer_info->skb = skb; + dev_kfree_skb(skb); goto next_desc; } } @@ -4299,9 +4439,10 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, */ length -= 4; - e1000_check_copybreak(netdev, buffer_info, length, &skb); - - skb_put(skb, length); + if (buffer_info->rxbuf.data == NULL) + skb_put(skb, length); + else /* copybreak skb */ + skb_trim(skb, length); /* Receive Checksum Offload */ e1000_rx_checksum(adapter, @@ -4347,38 +4488,19 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, struct e1000_rx_ring *rx_ring, int cleaned_count) { - struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; struct e1000_rx_desc *rx_desc; - struct e1000_buffer *buffer_info; - struct sk_buff *skb; + struct e1000_rx_buffer *buffer_info; unsigned int i; - unsigned int bufsz = 256 - 16 /*for skb_reserve */ ; i = rx_ring->next_to_use; buffer_info = &rx_ring->buffer_info[i]; while (cleaned_count--) { - skb = buffer_info->skb; - if (skb) { - skb_trim(skb, 0); - goto check_page; - } - - skb = netdev_alloc_skb_ip_align(netdev, bufsz); - if (unlikely(!skb)) { - /* Better luck next round */ - adapter->alloc_rx_buff_failed++; - break; - } - - buffer_info->skb = skb; - buffer_info->length = adapter->rx_buffer_len; -check_page: /* allocate a new page if necessary */ - if (!buffer_info->page) { - buffer_info->page = alloc_page(GFP_ATOMIC); - if (unlikely(!buffer_info->page)) { + if (!buffer_info->rxbuf.page) { + buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC); + if (unlikely(!buffer_info->rxbuf.page)) { adapter->alloc_rx_buff_failed++; break; } @@ -4386,17 +4508,15 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, if (!buffer_info->dma) { buffer_info->dma = dma_map_page(&pdev->dev, - buffer_info->page, 0, - buffer_info->length, + buffer_info->rxbuf.page, 0, + adapter->rx_buffer_len, DMA_FROM_DEVICE); if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { - put_page(buffer_info->page); - dev_kfree_skb(skb); - buffer_info->page = NULL; - buffer_info->skb = NULL; + put_page(buffer_info->rxbuf.page); + buffer_info->rxbuf.page = NULL; buffer_info->dma = 0; adapter->alloc_rx_buff_failed++; - break; /* while !buffer_info->skb */ + break; } } @@ -4432,11 +4552,9 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, int cleaned_count) { struct e1000_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; struct e1000_rx_desc *rx_desc; - struct e1000_buffer *buffer_info; - struct sk_buff *skb; + struct e1000_rx_buffer *buffer_info; unsigned int i; unsigned int bufsz = adapter->rx_buffer_len; @@ -4444,57 +4562,52 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, buffer_info = &rx_ring->buffer_info[i]; while (cleaned_count--) { - skb = buffer_info->skb; - if (skb) { - skb_trim(skb, 0); - goto map_skb; - } + void *data; - skb = netdev_alloc_skb_ip_align(netdev, bufsz); - if (unlikely(!skb)) { + if (buffer_info->rxbuf.data) + goto skip; + + data = e1000_alloc_frag(adapter); + if (!data) { /* Better luck next round */ adapter->alloc_rx_buff_failed++; break; } /* Fix for errata 23, can't cross 64kB boundary */ - if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { - struct sk_buff *oldskb = skb; + if (!e1000_check_64k_bound(adapter, data, bufsz)) { + void *olddata = data; e_err(rx_err, "skb align check failed: %u bytes at " - "%p\n", bufsz, skb->data); + "%p\n", bufsz, data); /* Try again, without freeing the previous */ - skb = netdev_alloc_skb_ip_align(netdev, bufsz); + data = e1000_alloc_frag(adapter); /* Failed allocation, critical failure */ - if (!skb) { - dev_kfree_skb(oldskb); + if (!data) { + e1000_free_frag(olddata); adapter->alloc_rx_buff_failed++; break; } - if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { + if (!e1000_check_64k_bound(adapter, data, bufsz)) { /* give up */ - dev_kfree_skb(skb); - dev_kfree_skb(oldskb); + e1000_free_frag(data); + e1000_free_frag(olddata); adapter->alloc_rx_buff_failed++; - break; /* while !buffer_info->skb */ + break; } /* Use new allocation */ - dev_kfree_skb(oldskb); + e1000_free_frag(olddata); } - buffer_info->skb = skb; - buffer_info->length = adapter->rx_buffer_len; -map_skb: buffer_info->dma = dma_map_single(&pdev->dev, - skb->data, - buffer_info->length, + data, + adapter->rx_buffer_len, DMA_FROM_DEVICE); if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { - dev_kfree_skb(skb); - buffer_info->skb = NULL; + e1000_free_frag(data); buffer_info->dma = 0; adapter->alloc_rx_buff_failed++; - break; /* while !buffer_info->skb */ + break; } /* XXX if it was allocated cleanly it will never map to a @@ -4508,17 +4621,20 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, e_err(rx_err, "dma align check failed: %u bytes at " "%p\n", adapter->rx_buffer_len, (void *)(unsigned long)buffer_info->dma); - dev_kfree_skb(skb); - buffer_info->skb = NULL; dma_unmap_single(&pdev->dev, buffer_info->dma, adapter->rx_buffer_len, DMA_FROM_DEVICE); + + e1000_free_frag(data); + buffer_info->rxbuf.data = NULL; buffer_info->dma = 0; adapter->alloc_rx_buff_failed++; - break; /* while !buffer_info->skb */ + break; } + buffer_info->rxbuf.data = data; + skip: rx_desc = E1000_RX_DESC(*rx_ring, i); rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); diff --git a/drivers/net/ethernet/intel/fm10k/Makefile b/drivers/net/ethernet/intel/fm10k/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..08859dd220a8b69981ecbdb744e8c40f0a07d9e2 --- /dev/null +++ b/drivers/net/ethernet/intel/fm10k/Makefile @@ -0,0 +1,33 @@ +################################################################################ +# +# Intel Ethernet Switch Host Interface Driver +# Copyright(c) 2013 - 2014 Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# The full GNU General Public License is included in this distribution in +# the file called "COPYING". +# +# Contact Information: +# e1000-devel Mailing List +# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +# +################################################################################ + +# +# Makefile for the Intel(R) FM10000 Ethernet Switch Host Interface driver +# + +obj-$(CONFIG_FM10K) += fm10k.o + +fm10k-objs := fm10k_main.o fm10k_common.o fm10k_pci.o \ + fm10k_netdev.o fm10k_ethtool.o fm10k_pf.o fm10k_vf.o \ + fm10k_mbx.o fm10k_iov.o fm10k_tlv.o \ + fm10k_debugfs.o fm10k_ptp.o fm10k_dcbnl.o diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h new file mode 100644 index 0000000000000000000000000000000000000000..42eb4344a9dc077c52bfa97723d99af2bbcbe06b --- /dev/null +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -0,0 +1,530 @@ +/* Intel Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _FM10K_H_ +#define _FM10K_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "fm10k_pf.h" +#include "fm10k_vf.h" + +#define FM10K_MAX_JUMBO_FRAME_SIZE 15358 /* Maximum supported size 15K */ + +#define MAX_QUEUES FM10K_MAX_QUEUES_PF + +#define FM10K_MIN_RXD 128 +#define FM10K_MAX_RXD 4096 +#define FM10K_DEFAULT_RXD 256 + +#define FM10K_MIN_TXD 128 +#define FM10K_MAX_TXD 4096 +#define FM10K_DEFAULT_TXD 256 +#define FM10K_DEFAULT_TX_WORK 256 + +#define FM10K_RXBUFFER_256 256 +#define FM10K_RX_HDR_LEN FM10K_RXBUFFER_256 +#define FM10K_RXBUFFER_2048 2048 +#define FM10K_RX_BUFSZ FM10K_RXBUFFER_2048 + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define FM10K_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define FM10K_MAX_STATIONS 63 +struct fm10k_l2_accel { + int size; + u16 count; + u16 dglort; + struct rcu_head rcu; + struct net_device *macvlan[0]; +}; + +enum fm10k_ring_state_t { + __FM10K_TX_DETECT_HANG, + __FM10K_HANG_CHECK_ARMED, +}; + +#define check_for_tx_hang(ring) \ + test_bit(__FM10K_TX_DETECT_HANG, &(ring)->state) +#define set_check_for_tx_hang(ring) \ + set_bit(__FM10K_TX_DETECT_HANG, &(ring)->state) +#define clear_check_for_tx_hang(ring) \ + clear_bit(__FM10K_TX_DETECT_HANG, &(ring)->state) + +struct fm10k_tx_buffer { + struct fm10k_tx_desc *next_to_watch; + struct sk_buff *skb; + unsigned int bytecount; + u16 gso_segs; + u16 tx_flags; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); +}; + +struct fm10k_rx_buffer { + dma_addr_t dma; + struct page *page; + u32 page_offset; +}; + +struct fm10k_queue_stats { + u64 packets; + u64 bytes; +}; + +struct fm10k_tx_queue_stats { + u64 restart_queue; + u64 csum_err; + u64 tx_busy; + u64 tx_done_old; +}; + +struct fm10k_rx_queue_stats { + u64 alloc_failed; + u64 csum_err; + u64 errors; +}; + +struct fm10k_ring { + struct fm10k_q_vector *q_vector;/* backpointer to host q_vector */ + struct net_device *netdev; /* netdev ring belongs to */ + struct device *dev; /* device for DMA mapping */ + struct fm10k_l2_accel __rcu *l2_accel; /* L2 acceleration list */ + void *desc; /* descriptor ring memory */ + union { + struct fm10k_tx_buffer *tx_buffer; + struct fm10k_rx_buffer *rx_buffer; + }; + u32 __iomem *tail; + unsigned long state; + dma_addr_t dma; /* phys. address of descriptor ring */ + unsigned int size; /* length in bytes */ + + u8 queue_index; /* needed for queue management */ + u8 reg_idx; /* holds the special value that gets + * the hardware register offset + * associated with this ring, which is + * different for DCB and RSS modes + */ + u8 qos_pc; /* priority class of queue */ + u16 vid; /* default vlan ID of queue */ + u16 count; /* amount of descriptors */ + + u16 next_to_alloc; + u16 next_to_use; + u16 next_to_clean; + + struct fm10k_queue_stats stats; + struct u64_stats_sync syncp; + union { + /* Tx */ + struct fm10k_tx_queue_stats tx_stats; + /* Rx */ + struct { + struct fm10k_rx_queue_stats rx_stats; + struct sk_buff *skb; + }; + }; +} ____cacheline_internodealigned_in_smp; + +struct fm10k_ring_container { + struct fm10k_ring *ring; /* pointer to linked list of rings */ + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u16 work_limit; /* total work allowed per interrupt */ + u16 itr; /* interrupt throttle rate value */ + u8 count; /* total number of rings in vector */ +}; + +#define FM10K_ITR_MAX 0x0FFF /* maximum value for ITR */ +#define FM10K_ITR_10K 100 /* 100us */ +#define FM10K_ITR_20K 50 /* 50us */ +#define FM10K_ITR_ADAPTIVE 0x8000 /* adaptive interrupt moderation flag */ + +#define FM10K_ITR_ENABLE (FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR) + +static inline struct netdev_queue *txring_txq(const struct fm10k_ring *ring) +{ + return &ring->netdev->_tx[ring->queue_index]; +} + +/* iterator for handling rings in ring container */ +#define fm10k_for_each_ring(pos, head) \ + for (pos = &(head).ring[(head).count]; (--pos) >= (head).ring;) + +#define MAX_Q_VECTORS 256 +#define MIN_Q_VECTORS 1 +enum fm10k_non_q_vectors { + FM10K_MBX_VECTOR, +#define NON_Q_VECTORS_VF NON_Q_VECTORS_PF + NON_Q_VECTORS_PF +}; + +#define NON_Q_VECTORS(hw) (((hw)->mac.type == fm10k_mac_pf) ? \ + NON_Q_VECTORS_PF : \ + NON_Q_VECTORS_VF) +#define MIN_MSIX_COUNT(hw) (MIN_Q_VECTORS + NON_Q_VECTORS(hw)) + +struct fm10k_q_vector { + struct fm10k_intfc *interface; + u32 __iomem *itr; /* pointer to ITR register for this vector */ + u16 v_idx; /* index of q_vector within interface array */ + struct fm10k_ring_container rx, tx; + + struct napi_struct napi; + char name[IFNAMSIZ + 9]; + +#ifdef CONFIG_DEBUG_FS + struct dentry *dbg_q_vector; +#endif /* CONFIG_DEBUG_FS */ + struct rcu_head rcu; /* to avoid race with update stats on free */ + + /* for dynamic allocation of rings associated with this q_vector */ + struct fm10k_ring ring[0] ____cacheline_internodealigned_in_smp; +}; + +enum fm10k_ring_f_enum { + RING_F_RSS, + RING_F_QOS, + RING_F_ARRAY_SIZE /* must be last in enum set */ +}; + +struct fm10k_ring_feature { + u16 limit; /* upper limit on feature indices */ + u16 indices; /* current value of indices */ + u16 mask; /* Mask used for feature to ring mapping */ + u16 offset; /* offset to start of feature */ +}; + +struct fm10k_iov_data { + unsigned int num_vfs; + unsigned int next_vf_mbx; + struct rcu_head rcu; + struct fm10k_vf_info vf_info[0]; +}; + +#define fm10k_vxlan_port_for_each(vp, intfc) \ + list_for_each_entry(vp, &(intfc)->vxlan_port, list) +struct fm10k_vxlan_port { + struct list_head list; + sa_family_t sa_family; + __be16 port; +}; + +struct fm10k_intfc { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + struct net_device *netdev; + struct fm10k_l2_accel *l2_accel; /* pointer to L2 acceleration list */ + struct pci_dev *pdev; + unsigned long state; + + u32 flags; +#define FM10K_FLAG_RESET_REQUESTED (u32)(1 << 0) +#define FM10K_FLAG_RSS_FIELD_IPV4_UDP (u32)(1 << 1) +#define FM10K_FLAG_RSS_FIELD_IPV6_UDP (u32)(1 << 2) +#define FM10K_FLAG_RX_TS_ENABLED (u32)(1 << 3) +#define FM10K_FLAG_SWPRI_CONFIG (u32)(1 << 4) + int xcast_mode; + + /* Tx fast path data */ + int num_tx_queues; + u16 tx_itr; + + /* Rx fast path data */ + int num_rx_queues; + u16 rx_itr; + + /* TX */ + struct fm10k_ring *tx_ring[MAX_QUEUES] ____cacheline_aligned_in_smp; + + u64 restart_queue; + u64 tx_busy; + u64 tx_csum_errors; + u64 alloc_failed; + u64 rx_csum_errors; + u64 rx_errors; + + u64 tx_bytes_nic; + u64 tx_packets_nic; + u64 rx_bytes_nic; + u64 rx_packets_nic; + u64 rx_drops_nic; + u64 rx_overrun_pf; + u64 rx_overrun_vf; + u32 tx_timeout_count; + + /* RX */ + struct fm10k_ring *rx_ring[MAX_QUEUES]; + + /* Queueing vectors */ + struct fm10k_q_vector *q_vector[MAX_Q_VECTORS]; + struct msix_entry *msix_entries; + int num_q_vectors; /* current number of q_vectors for device */ + struct fm10k_ring_feature ring_feature[RING_F_ARRAY_SIZE]; + + /* SR-IOV information management structure */ + struct fm10k_iov_data *iov_data; + + struct fm10k_hw_stats stats; + struct fm10k_hw hw; + u32 __iomem *uc_addr; + u32 __iomem *sw_addr; + u16 msg_enable; + u16 tx_ring_count; + u16 rx_ring_count; + struct timer_list service_timer; + struct work_struct service_task; + unsigned long next_stats_update; + unsigned long next_tx_hang_check; + unsigned long last_reset; + unsigned long link_down_event; + bool host_ready; + + u32 reta[FM10K_RETA_SIZE]; + u32 rssrk[FM10K_RSSRK_SIZE]; + + /* VXLAN port tracking information */ + struct list_head vxlan_port; + +#ifdef CONFIG_DEBUG_FS + struct dentry *dbg_intfc; + +#endif /* CONFIG_DEBUG_FS */ + struct ptp_clock_info ptp_caps; + struct ptp_clock *ptp_clock; + + struct sk_buff_head ts_tx_skb_queue; + u32 tx_hwtstamp_timeouts; + + struct hwtstamp_config ts_config; + /* We are unable to actually adjust the clock beyond the frequency + * value. Once the clock is started there is no resetting it. As + * such we maintain a separate offset from the actual hardware clock + * to allow for offset adjustment. + */ + s64 ptp_adjust; + rwlock_t systime_lock; +#ifdef CONFIG_DCB + u8 pfc_en; +#endif + u8 rx_pause; + + /* GLORT resources in use by PF */ + u16 glort; + u16 glort_count; + + /* VLAN ID for updating multicast/unicast lists */ + u16 vid; +}; + +enum fm10k_state_t { + __FM10K_RESETTING, + __FM10K_DOWN, + __FM10K_SERVICE_SCHED, + __FM10K_SERVICE_DISABLE, + __FM10K_MBX_LOCK, + __FM10K_LINK_DOWN, +}; + +static inline void fm10k_mbx_lock(struct fm10k_intfc *interface) +{ + /* busy loop if we cannot obtain the lock as some calls + * such as ndo_set_rx_mode may be made in atomic context + */ + while (test_and_set_bit(__FM10K_MBX_LOCK, &interface->state)) + udelay(20); +} + +static inline void fm10k_mbx_unlock(struct fm10k_intfc *interface) +{ + /* flush memory to make sure state is correct */ + smp_mb__before_atomic(); + clear_bit(__FM10K_MBX_LOCK, &interface->state); +} + +static inline int fm10k_mbx_trylock(struct fm10k_intfc *interface) +{ + return !test_and_set_bit(__FM10K_MBX_LOCK, &interface->state); +} + +/* fm10k_test_staterr - test bits in Rx descriptor status and error fields */ +static inline __le32 fm10k_test_staterr(union fm10k_rx_desc *rx_desc, + const u32 stat_err_bits) +{ + return rx_desc->d.staterr & cpu_to_le32(stat_err_bits); +} + +/* fm10k_desc_unused - calculate if we have unused descriptors */ +static inline u16 fm10k_desc_unused(struct fm10k_ring *ring) +{ + s16 unused = ring->next_to_clean - ring->next_to_use - 1; + + return likely(unused < 0) ? unused + ring->count : unused; +} + +#define FM10K_TX_DESC(R, i) \ + (&(((struct fm10k_tx_desc *)((R)->desc))[i])) +#define FM10K_RX_DESC(R, i) \ + (&(((union fm10k_rx_desc *)((R)->desc))[i])) + +#define FM10K_MAX_TXD_PWR 14 +#define FM10K_MAX_DATA_PER_TXD (1 << FM10K_MAX_TXD_PWR) + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), FM10K_MAX_DATA_PER_TXD) +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) + +enum fm10k_tx_flags { + /* Tx offload flags */ + FM10K_TX_FLAGS_CSUM = 0x01, +}; + +/* This structure is stored as little endian values as that is the native + * format of the Rx descriptor. The ordering of these fields is reversed + * from the actual ftag header to allow for a single bswap to take care + * of placing all of the values in network order + */ +union fm10k_ftag_info { + __le64 ftag; + struct { + /* dglort and sglort combined into a single 32bit desc read */ + __le32 glort; + /* upper 16 bits of vlan are reserved 0 for swpri_type_user */ + __le32 vlan; + } d; + struct { + __le16 dglort; + __le16 sglort; + __le16 vlan; + __le16 swpri_type_user; + } w; +}; + +struct fm10k_cb { + union { + __le64 tstamp; + unsigned long ts_tx_timeout; + }; + union fm10k_ftag_info fi; +}; + +#define FM10K_CB(skb) ((struct fm10k_cb *)(skb)->cb) + +/* main */ +extern char fm10k_driver_name[]; +extern const char fm10k_driver_version[]; +int fm10k_init_queueing_scheme(struct fm10k_intfc *interface); +void fm10k_clear_queueing_scheme(struct fm10k_intfc *interface); +netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb, + struct fm10k_ring *tx_ring); +void fm10k_tx_timeout_reset(struct fm10k_intfc *interface); +bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring); +void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count); + +/* PCI */ +void fm10k_mbx_free_irq(struct fm10k_intfc *); +int fm10k_mbx_request_irq(struct fm10k_intfc *); +void fm10k_qv_free_irq(struct fm10k_intfc *interface); +int fm10k_qv_request_irq(struct fm10k_intfc *interface); +int fm10k_register_pci_driver(void); +void fm10k_unregister_pci_driver(void); +void fm10k_up(struct fm10k_intfc *interface); +void fm10k_down(struct fm10k_intfc *interface); +void fm10k_update_stats(struct fm10k_intfc *interface); +void fm10k_service_event_schedule(struct fm10k_intfc *interface); +void fm10k_update_rx_drop_en(struct fm10k_intfc *interface); + +/* Netdev */ +struct net_device *fm10k_alloc_netdev(void); +int fm10k_setup_rx_resources(struct fm10k_ring *); +int fm10k_setup_tx_resources(struct fm10k_ring *); +void fm10k_free_rx_resources(struct fm10k_ring *); +void fm10k_free_tx_resources(struct fm10k_ring *); +void fm10k_clean_all_rx_rings(struct fm10k_intfc *); +void fm10k_clean_all_tx_rings(struct fm10k_intfc *); +void fm10k_unmap_and_free_tx_resource(struct fm10k_ring *, + struct fm10k_tx_buffer *); +void fm10k_restore_rx_state(struct fm10k_intfc *); +void fm10k_reset_rx_state(struct fm10k_intfc *); +int fm10k_setup_tc(struct net_device *dev, u8 tc); +int fm10k_open(struct net_device *netdev); +int fm10k_close(struct net_device *netdev); + +/* Ethtool */ +void fm10k_set_ethtool_ops(struct net_device *dev); + +/* IOV */ +s32 fm10k_iov_event(struct fm10k_intfc *interface); +s32 fm10k_iov_mbx(struct fm10k_intfc *interface); +void fm10k_iov_suspend(struct pci_dev *pdev); +int fm10k_iov_resume(struct pci_dev *pdev); +void fm10k_iov_disable(struct pci_dev *pdev); +int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs); +s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid); +int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac); +int fm10k_ndo_set_vf_vlan(struct net_device *netdev, + int vf_idx, u16 vid, u8 qos); +int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, int rate, + int unused); +int fm10k_ndo_get_vf_config(struct net_device *netdev, + int vf_idx, struct ifla_vf_info *ivi); + +/* DebugFS */ +#ifdef CONFIG_DEBUG_FS +void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector); +void fm10k_dbg_q_vector_exit(struct fm10k_q_vector *q_vector); +void fm10k_dbg_intfc_init(struct fm10k_intfc *interface); +void fm10k_dbg_intfc_exit(struct fm10k_intfc *interface); +void fm10k_dbg_init(void); +void fm10k_dbg_exit(void); +#else +static inline void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector) {} +static inline void fm10k_dbg_q_vector_exit(struct fm10k_q_vector *q_vector) {} +static inline void fm10k_dbg_intfc_init(struct fm10k_intfc *interface) {} +static inline void fm10k_dbg_intfc_exit(struct fm10k_intfc *interface) {} +static inline void fm10k_dbg_init(void) {} +static inline void fm10k_dbg_exit(void) {} +#endif /* CONFIG_DEBUG_FS */ + +/* Time Stamping */ +void fm10k_systime_to_hwtstamp(struct fm10k_intfc *interface, + struct skb_shared_hwtstamps *hwtstamp, + u64 systime); +void fm10k_ts_tx_enqueue(struct fm10k_intfc *interface, struct sk_buff *skb); +void fm10k_ts_tx_hwtstamp(struct fm10k_intfc *interface, __le16 dglort, + u64 systime); +void fm10k_ts_reset(struct fm10k_intfc *interface); +void fm10k_ts_init(struct fm10k_intfc *interface); +void fm10k_ts_tx_subtask(struct fm10k_intfc *interface); +void fm10k_ptp_register(struct fm10k_intfc *interface); +void fm10k_ptp_unregister(struct fm10k_intfc *interface); +int fm10k_get_ts_config(struct net_device *netdev, struct ifreq *ifr); +int fm10k_set_ts_config(struct net_device *netdev, struct ifreq *ifr); + +/* DCB */ +void fm10k_dcbnl_set_ops(struct net_device *dev); +#endif /* _FM10K_H_ */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.c b/drivers/net/ethernet/intel/fm10k/fm10k_common.c new file mode 100644 index 0000000000000000000000000000000000000000..bf19dccd4288d8693197520d65b2f01251d891c0 --- /dev/null +++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.c @@ -0,0 +1,534 @@ +/* Intel Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include "fm10k_common.h" + +/** + * fm10k_get_bus_info_generic - Generic set PCI bus info + * @hw: pointer to hardware structure + * + * Gets the PCI bus info (speed, width, type) then calls helper function to + * store this data within the fm10k_hw structure. + **/ +s32 fm10k_get_bus_info_generic(struct fm10k_hw *hw) +{ + u16 link_cap, link_status, device_cap, device_control; + + /* Get the maximum link width and speed from PCIe config space */ + link_cap = fm10k_read_pci_cfg_word(hw, FM10K_PCIE_LINK_CAP); + + switch (link_cap & FM10K_PCIE_LINK_WIDTH) { + case FM10K_PCIE_LINK_WIDTH_1: + hw->bus_caps.width = fm10k_bus_width_pcie_x1; + break; + case FM10K_PCIE_LINK_WIDTH_2: + hw->bus_caps.width = fm10k_bus_width_pcie_x2; + break; + case FM10K_PCIE_LINK_WIDTH_4: + hw->bus_caps.width = fm10k_bus_width_pcie_x4; + break; + case FM10K_PCIE_LINK_WIDTH_8: + hw->bus_caps.width = fm10k_bus_width_pcie_x8; + break; + default: + hw->bus_caps.width = fm10k_bus_width_unknown; + break; + } + + switch (link_cap & FM10K_PCIE_LINK_SPEED) { + case FM10K_PCIE_LINK_SPEED_2500: + hw->bus_caps.speed = fm10k_bus_speed_2500; + break; + case FM10K_PCIE_LINK_SPEED_5000: + hw->bus_caps.speed = fm10k_bus_speed_5000; + break; + case FM10K_PCIE_LINK_SPEED_8000: + hw->bus_caps.speed = fm10k_bus_speed_8000; + break; + default: + hw->bus_caps.speed = fm10k_bus_speed_unknown; + break; + } + + /* Get the PCIe maximum payload size for the PCIe function */ + device_cap = fm10k_read_pci_cfg_word(hw, FM10K_PCIE_DEV_CAP); + + switch (device_cap & FM10K_PCIE_DEV_CAP_PAYLOAD) { + case FM10K_PCIE_DEV_CAP_PAYLOAD_128: + hw->bus_caps.payload = fm10k_bus_payload_128; + break; + case FM10K_PCIE_DEV_CAP_PAYLOAD_256: + hw->bus_caps.payload = fm10k_bus_payload_256; + break; + case FM10K_PCIE_DEV_CAP_PAYLOAD_512: + hw->bus_caps.payload = fm10k_bus_payload_512; + break; + default: + hw->bus_caps.payload = fm10k_bus_payload_unknown; + break; + } + + /* Get the negotiated link width and speed from PCIe config space */ + link_status = fm10k_read_pci_cfg_word(hw, FM10K_PCIE_LINK_STATUS); + + switch (link_status & FM10K_PCIE_LINK_WIDTH) { + case FM10K_PCIE_LINK_WIDTH_1: + hw->bus.width = fm10k_bus_width_pcie_x1; + break; + case FM10K_PCIE_LINK_WIDTH_2: + hw->bus.width = fm10k_bus_width_pcie_x2; + break; + case FM10K_PCIE_LINK_WIDTH_4: + hw->bus.width = fm10k_bus_width_pcie_x4; + break; + case FM10K_PCIE_LINK_WIDTH_8: + hw->bus.width = fm10k_bus_width_pcie_x8; + break; + default: + hw->bus.width = fm10k_bus_width_unknown; + break; + } + + switch (link_status & FM10K_PCIE_LINK_SPEED) { + case FM10K_PCIE_LINK_SPEED_2500: + hw->bus.speed = fm10k_bus_speed_2500; + break; + case FM10K_PCIE_LINK_SPEED_5000: + hw->bus.speed = fm10k_bus_speed_5000; + break; + case FM10K_PCIE_LINK_SPEED_8000: + hw->bus.speed = fm10k_bus_speed_8000; + break; + default: + hw->bus.speed = fm10k_bus_speed_unknown; + break; + } + + /* Get the negotiated PCIe maximum payload size for the PCIe function */ + device_control = fm10k_read_pci_cfg_word(hw, FM10K_PCIE_DEV_CTRL); + + switch (device_control & FM10K_PCIE_DEV_CTRL_PAYLOAD) { + case FM10K_PCIE_DEV_CTRL_PAYLOAD_128: + hw->bus.payload = fm10k_bus_payload_128; + break; + case FM10K_PCIE_DEV_CTRL_PAYLOAD_256: + hw->bus.payload = fm10k_bus_payload_256; + break; + case FM10K_PCIE_DEV_CTRL_PAYLOAD_512: + hw->bus.payload = fm10k_bus_payload_512; + break; + default: + hw->bus.payload = fm10k_bus_payload_unknown; + break; + } + + return 0; +} + +static u16 fm10k_get_pcie_msix_count_generic(struct fm10k_hw *hw) +{ + u16 msix_count; + + /* read in value from MSI-X capability register */ + msix_count = fm10k_read_pci_cfg_word(hw, FM10K_PCI_MSIX_MSG_CTRL); + msix_count &= FM10K_PCI_MSIX_MSG_CTRL_TBL_SZ_MASK; + + /* MSI-X count is zero-based in HW */ + msix_count++; + + if (msix_count > FM10K_MAX_MSIX_VECTORS) + msix_count = FM10K_MAX_MSIX_VECTORS; + + return msix_count; +} + +/** + * fm10k_get_invariants_generic - Inits constant values + * @hw: pointer to the hardware structure + * + * Initialize the common invariants for the device. + **/ +s32 fm10k_get_invariants_generic(struct fm10k_hw *hw) +{ + struct fm10k_mac_info *mac = &hw->mac; + + /* initialize GLORT state to avoid any false hits */ + mac->dglort_map = FM10K_DGLORTMAP_NONE; + + /* record maximum number of MSI-X vectors */ + mac->max_msix_vectors = fm10k_get_pcie_msix_count_generic(hw); + + return 0; +} + +/** + * fm10k_start_hw_generic - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + * + * This function sets the Tx ready flag to indicate that the Tx path has + * been initialized. + **/ +s32 fm10k_start_hw_generic(struct fm10k_hw *hw) +{ + /* set flag indicating we are beginning Tx */ + hw->mac.tx_ready = true; + + return 0; +} + +/** + * fm10k_disable_queues_generic - Stop Tx/Rx queues + * @hw: pointer to hardware structure + * @q_cnt: number of queues to be disabled + * + **/ +s32 fm10k_disable_queues_generic(struct fm10k_hw *hw, u16 q_cnt) +{ + u32 reg; + u16 i, time; + + /* clear tx_ready to prevent any false hits for reset */ + hw->mac.tx_ready = false; + + /* clear the enable bit for all rings */ + for (i = 0; i < q_cnt; i++) { + reg = fm10k_read_reg(hw, FM10K_TXDCTL(i)); + fm10k_write_reg(hw, FM10K_TXDCTL(i), + reg & ~FM10K_TXDCTL_ENABLE); + reg = fm10k_read_reg(hw, FM10K_RXQCTL(i)); + fm10k_write_reg(hw, FM10K_RXQCTL(i), + reg & ~FM10K_RXQCTL_ENABLE); + } + + fm10k_write_flush(hw); + udelay(1); + + /* loop through all queues to verify that they are all disabled */ + for (i = 0, time = FM10K_QUEUE_DISABLE_TIMEOUT; time;) { + /* if we are at end of rings all rings are disabled */ + if (i == q_cnt) + return 0; + + /* if queue enables cleared, then move to next ring pair */ + reg = fm10k_read_reg(hw, FM10K_TXDCTL(i)); + if (!~reg || !(reg & FM10K_TXDCTL_ENABLE)) { + reg = fm10k_read_reg(hw, FM10K_RXQCTL(i)); + if (!~reg || !(reg & FM10K_RXQCTL_ENABLE)) { + i++; + continue; + } + } + + /* decrement time and wait 1 usec */ + time--; + if (time) + udelay(1); + } + + return FM10K_ERR_REQUESTS_PENDING; +} + +/** + * fm10k_stop_hw_generic - Stop Tx/Rx units + * @hw: pointer to hardware structure + * + **/ +s32 fm10k_stop_hw_generic(struct fm10k_hw *hw) +{ + return fm10k_disable_queues_generic(hw, hw->mac.max_queues); +} + +/** + * fm10k_read_hw_stats_32b - Reads value of 32-bit registers + * @hw: pointer to the hardware structure + * @addr: address of register containing a 32-bit value + * + * Function reads the content of the register and returns the delta + * between the base and the current value. + * **/ +u32 fm10k_read_hw_stats_32b(struct fm10k_hw *hw, u32 addr, + struct fm10k_hw_stat *stat) +{ + u32 delta = fm10k_read_reg(hw, addr) - stat->base_l; + + if (FM10K_REMOVED(hw->hw_addr)) + stat->base_h = 0; + + return delta; +} + +/** + * fm10k_read_hw_stats_48b - Reads value of 48-bit registers + * @hw: pointer to the hardware structure + * @addr: address of register containing the lower 32-bit value + * + * Function reads the content of 2 registers, combined to represent a 48-bit + * statistical value. Extra processing is required to handle overflowing. + * Finally, a delta value is returned representing the difference between the + * values stored in registers and values stored in the statistic counters. + * **/ +static u64 fm10k_read_hw_stats_48b(struct fm10k_hw *hw, u32 addr, + struct fm10k_hw_stat *stat) +{ + u32 count_l; + u32 count_h; + u32 count_tmp; + u64 delta; + + count_h = fm10k_read_reg(hw, addr + 1); + + /* Check for overflow */ + do { + count_tmp = count_h; + count_l = fm10k_read_reg(hw, addr); + count_h = fm10k_read_reg(hw, addr + 1); + } while (count_h != count_tmp); + + delta = ((u64)(count_h - stat->base_h) << 32) + count_l; + delta -= stat->base_l; + + return delta & FM10K_48_BIT_MASK; +} + +/** + * fm10k_update_hw_base_48b - Updates 48-bit statistic base value + * @stat: pointer to the hardware statistic structure + * @delta: value to be updated into the hardware statistic structure + * + * Function receives a value and determines if an update is required based on + * a delta calculation. Only the base value will be updated. + **/ +static void fm10k_update_hw_base_48b(struct fm10k_hw_stat *stat, u64 delta) +{ + if (!delta) + return; + + /* update lower 32 bits */ + delta += stat->base_l; + stat->base_l = (u32)delta; + + /* update upper 32 bits */ + stat->base_h += (u32)(delta >> 32); +} + +/** + * fm10k_update_hw_stats_tx_q - Updates TX queue statistics counters + * @hw: pointer to the hardware structure + * @q: pointer to the ring of hardware statistics queue + * @idx: index pointing to the start of the ring iteration + * + * Function updates the TX queue statistics counters that are related to the + * hardware. + **/ +static void fm10k_update_hw_stats_tx_q(struct fm10k_hw *hw, + struct fm10k_hw_stats_q *q, + u32 idx) +{ + u32 id_tx, id_tx_prev, tx_packets; + u64 tx_bytes = 0; + + /* Retrieve TX Owner Data */ + id_tx = fm10k_read_reg(hw, FM10K_TXQCTL(idx)); + + /* Process TX Ring */ + do { + tx_packets = fm10k_read_hw_stats_32b(hw, FM10K_QPTC(idx), + &q->tx_packets); + + if (tx_packets) + tx_bytes = fm10k_read_hw_stats_48b(hw, + FM10K_QBTC_L(idx), + &q->tx_bytes); + + /* Re-Check Owner Data */ + id_tx_prev = id_tx; + id_tx = fm10k_read_reg(hw, FM10K_TXQCTL(idx)); + } while ((id_tx ^ id_tx_prev) & FM10K_TXQCTL_ID_MASK); + + /* drop non-ID bits and set VALID ID bit */ + id_tx &= FM10K_TXQCTL_ID_MASK; + id_tx |= FM10K_STAT_VALID; + + /* update packet counts */ + if (q->tx_stats_idx == id_tx) { + q->tx_packets.count += tx_packets; + q->tx_bytes.count += tx_bytes; + } + + /* update bases and record ID */ + fm10k_update_hw_base_32b(&q->tx_packets, tx_packets); + fm10k_update_hw_base_48b(&q->tx_bytes, tx_bytes); + + q->tx_stats_idx = id_tx; +} + +/** + * fm10k_update_hw_stats_rx_q - Updates RX queue statistics counters + * @hw: pointer to the hardware structure + * @q: pointer to the ring of hardware statistics queue + * @idx: index pointing to the start of the ring iteration + * + * Function updates the RX queue statistics counters that are related to the + * hardware. + **/ +static void fm10k_update_hw_stats_rx_q(struct fm10k_hw *hw, + struct fm10k_hw_stats_q *q, + u32 idx) +{ + u32 id_rx, id_rx_prev, rx_packets, rx_drops; + u64 rx_bytes = 0; + + /* Retrieve RX Owner Data */ + id_rx = fm10k_read_reg(hw, FM10K_RXQCTL(idx)); + + /* Process RX Ring*/ + do { + rx_drops = fm10k_read_hw_stats_32b(hw, FM10K_QPRDC(idx), + &q->rx_drops); + + rx_packets = fm10k_read_hw_stats_32b(hw, FM10K_QPRC(idx), + &q->rx_packets); + + if (rx_packets) + rx_bytes = fm10k_read_hw_stats_48b(hw, + FM10K_QBRC_L(idx), + &q->rx_bytes); + + /* Re-Check Owner Data */ + id_rx_prev = id_rx; + id_rx = fm10k_read_reg(hw, FM10K_RXQCTL(idx)); + } while ((id_rx ^ id_rx_prev) & FM10K_RXQCTL_ID_MASK); + + /* drop non-ID bits and set VALID ID bit */ + id_rx &= FM10K_RXQCTL_ID_MASK; + id_rx |= FM10K_STAT_VALID; + + /* update packet counts */ + if (q->rx_stats_idx == id_rx) { + q->rx_drops.count += rx_drops; + q->rx_packets.count += rx_packets; + q->rx_bytes.count += rx_bytes; + } + + /* update bases and record ID */ + fm10k_update_hw_base_32b(&q->rx_drops, rx_drops); + fm10k_update_hw_base_32b(&q->rx_packets, rx_packets); + fm10k_update_hw_base_48b(&q->rx_bytes, rx_bytes); + + q->rx_stats_idx = id_rx; +} + +/** + * fm10k_update_hw_stats_q - Updates queue statistics counters + * @hw: pointer to the hardware structure + * @q: pointer to the ring of hardware statistics queue + * @idx: index pointing to the start of the ring iteration + * @count: number of queues to iterate over + * + * Function updates the queue statistics counters that are related to the + * hardware. + **/ +void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q, + u32 idx, u32 count) +{ + u32 i; + + for (i = 0; i < count; i++, idx++, q++) { + fm10k_update_hw_stats_tx_q(hw, q, idx); + fm10k_update_hw_stats_rx_q(hw, q, idx); + } +} + +/** + * fm10k_unbind_hw_stats_q - Unbind the queue counters from their queues + * @hw: pointer to the hardware structure + * @q: pointer to the ring of hardware statistics queue + * @idx: index pointing to the start of the ring iteration + * @count: number of queues to iterate over + * + * Function invalidates the index values for the queues so any updates that + * may have happened are ignored and the base for the queue stats is reset. + **/ + +void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count) +{ + u32 i; + + for (i = 0; i < count; i++, idx++, q++) { + q->rx_stats_idx = 0; + q->tx_stats_idx = 0; + } +} + +/** + * fm10k_get_host_state_generic - Returns the state of the host + * @hw: pointer to hardware structure + * @host_ready: pointer to boolean value that will record host state + * + * This function will check the health of the mailbox and Tx queue 0 + * in order to determine if we should report that the link is up or not. + **/ +s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + struct fm10k_mac_info *mac = &hw->mac; + s32 ret_val = 0; + u32 txdctl = fm10k_read_reg(hw, FM10K_TXDCTL(0)); + + /* process upstream mailbox in case interrupts were disabled */ + mbx->ops.process(hw, mbx); + + /* If Tx is no longer enabled link should come down */ + if (!(~txdctl) || !(txdctl & FM10K_TXDCTL_ENABLE)) + mac->get_host_state = true; + + /* exit if not checking for link, or link cannot be changed */ + if (!mac->get_host_state || !(~txdctl)) + goto out; + + /* if we somehow dropped the Tx enable we should reset */ + if (hw->mac.tx_ready && !(txdctl & FM10K_TXDCTL_ENABLE)) { + ret_val = FM10K_ERR_RESET_REQUESTED; + goto out; + } + + /* if Mailbox timed out we should request reset */ + if (!mbx->timeout) { + ret_val = FM10K_ERR_RESET_REQUESTED; + goto out; + } + + /* verify Mailbox is still valid */ + if (!mbx->ops.tx_ready(mbx, FM10K_VFMBX_MSG_MTU)) + goto out; + + /* interface cannot receive traffic without logical ports */ + if (mac->dglort_map == FM10K_DGLORTMAP_NONE) + goto out; + + /* if we passed all the tests above then the switch is ready and we no + * longer need to check for link + */ + mac->get_host_state = false; + +out: + *host_ready = !mac->get_host_state; + return ret_val; +} diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.h b/drivers/net/ethernet/intel/fm10k/fm10k_common.h new file mode 100644 index 0000000000000000000000000000000000000000..45e4e5b1f20a83e809560e8b868657df064f3f48 --- /dev/null +++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.h @@ -0,0 +1,65 @@ +/* Intel Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _FM10K_COMMON_H_ +#define _FM10K_COMMON_H_ + +#include "fm10k_type.h" + +#define FM10K_REMOVED(hw_addr) unlikely(!(hw_addr)) + +/* PCI configuration read */ +u16 fm10k_read_pci_cfg_word(struct fm10k_hw *hw, u32 reg); + +/* read operations, indexed using DWORDS */ +u32 fm10k_read_reg(struct fm10k_hw *hw, int reg); + +/* write operations, indexed using DWORDS */ +#define fm10k_write_reg(hw, reg, val) \ +do { \ + u32 __iomem *hw_addr = ACCESS_ONCE((hw)->hw_addr); \ + if (!FM10K_REMOVED(hw_addr)) \ + writel((val), &hw_addr[(reg)]); \ +} while (0) + +/* Switch register write operations, index using DWORDS */ +#define fm10k_write_sw_reg(hw, reg, val) \ +do { \ + u32 __iomem *sw_addr = ACCESS_ONCE((hw)->sw_addr); \ + if (!FM10K_REMOVED(sw_addr)) \ + writel((val), &sw_addr[(reg)]); \ +} while (0) + +/* read ctrl register which has no clear on read fields as PCIe flush */ +#define fm10k_write_flush(hw) fm10k_read_reg((hw), FM10K_CTRL) +s32 fm10k_get_bus_info_generic(struct fm10k_hw *hw); +s32 fm10k_get_invariants_generic(struct fm10k_hw *hw); +s32 fm10k_disable_queues_generic(struct fm10k_hw *hw, u16 q_cnt); +s32 fm10k_start_hw_generic(struct fm10k_hw *hw); +s32 fm10k_stop_hw_generic(struct fm10k_hw *hw); +u32 fm10k_read_hw_stats_32b(struct fm10k_hw *hw, u32 addr, + struct fm10k_hw_stat *stat); +#define fm10k_update_hw_base_32b(stat, delta) ((stat)->base_l += (delta)) +void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q, + u32 idx, u32 count); +#define fm10k_unbind_hw_stats_32b(s) ((s)->base_h = 0) +void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count); +s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready); +#endif /* _FM10K_COMMON_H_ */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c new file mode 100644 index 0000000000000000000000000000000000000000..212a92dad222f846a83b091243fa85012d759132 --- /dev/null +++ b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c @@ -0,0 +1,174 @@ +/* Intel Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include "fm10k.h" + +#ifdef CONFIG_DCB +/** + * fm10k_dcbnl_ieee_getets - get the ETS configuration for the device + * @dev: netdev interface for the device + * @ets: ETS structure to push configuration to + **/ +static int fm10k_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets) +{ + int i; + + /* we support 8 TCs in all modes */ + ets->ets_cap = IEEE_8021QAZ_MAX_TCS; + ets->cbs = 0; + + /* we only support strict priority and cannot do traffic shaping */ + memset(ets->tc_tx_bw, 0, sizeof(ets->tc_tx_bw)); + memset(ets->tc_rx_bw, 0, sizeof(ets->tc_rx_bw)); + memset(ets->tc_tsa, IEEE_8021QAZ_TSA_STRICT, sizeof(ets->tc_tsa)); + + /* populate the prio map based on the netdev */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + ets->prio_tc[i] = netdev_get_prio_tc_map(dev, i); + + return 0; +} + +/** + * fm10k_dcbnl_ieee_setets - set the ETS configuration for the device + * @dev: netdev interface for the device + * @ets: ETS structure to pull configuration from + **/ +static int fm10k_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets) +{ + u8 num_tc = 0; + int i, err; + + /* verify type and determine num_tcs needed */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + if (ets->tc_tx_bw[i] || ets->tc_rx_bw[i]) + return -EINVAL; + if (ets->tc_tsa[i] != IEEE_8021QAZ_TSA_STRICT) + return -EINVAL; + if (ets->prio_tc[i] > num_tc) + num_tc = ets->prio_tc[i]; + } + + /* if requested TC is greater than 0 then num_tcs is max + 1 */ + if (num_tc) + num_tc++; + + if (num_tc > IEEE_8021QAZ_MAX_TCS) + return -EINVAL; + + /* update TC hardware mapping if necessary */ + if (num_tc != netdev_get_num_tc(dev)) { + err = fm10k_setup_tc(dev, num_tc); + if (err) + return err; + } + + /* update priority mapping */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + netdev_set_prio_tc_map(dev, i, ets->prio_tc[i]); + + return 0; +} + +/** + * fm10k_dcbnl_ieee_getpfc - get the PFC configuration for the device + * @dev: netdev interface for the device + * @pfc: PFC structure to push configuration to + **/ +static int fm10k_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + + /* record flow control max count and state of TCs */ + pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS; + pfc->pfc_en = interface->pfc_en; + + return 0; +} + +/** + * fm10k_dcbnl_ieee_setpfc - set the PFC configuration for the device + * @dev: netdev interface for the device + * @pfc: PFC structure to pull configuration from + **/ +static int fm10k_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + + /* record PFC configuration to interface */ + interface->pfc_en = pfc->pfc_en; + + /* if we are running update the drop_en state for all queues */ + if (netif_running(dev)) + fm10k_update_rx_drop_en(interface); + + return 0; +} + +/** + * fm10k_dcbnl_ieee_getdcbx - get the DCBX configuration for the device + * @dev: netdev interface for the device + * + * Returns that we support only IEEE DCB for this interface + **/ +static u8 fm10k_dcbnl_getdcbx(struct net_device *dev) +{ + return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; +} + +/** + * fm10k_dcbnl_ieee_setdcbx - get the DCBX configuration for the device + * @dev: netdev interface for the device + * @mode: new mode for this device + * + * Returns error on attempt to enable anything but IEEE DCB for this interface + **/ +static u8 fm10k_dcbnl_setdcbx(struct net_device *dev, u8 mode) +{ + return (mode != (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE)) ? 1 : 0; +} + +static const struct dcbnl_rtnl_ops fm10k_dcbnl_ops = { + .ieee_getets = fm10k_dcbnl_ieee_getets, + .ieee_setets = fm10k_dcbnl_ieee_setets, + .ieee_getpfc = fm10k_dcbnl_ieee_getpfc, + .ieee_setpfc = fm10k_dcbnl_ieee_setpfc, + + .getdcbx = fm10k_dcbnl_getdcbx, + .setdcbx = fm10k_dcbnl_setdcbx, +}; + +#endif /* CONFIG_DCB */ +/** + * fm10k_dcbnl_set_ops - Configures dcbnl ops pointer for netdev + * @dev: netdev interface for the device + * + * Enables PF for DCB by assigning DCBNL ops pointer. + **/ +void fm10k_dcbnl_set_ops(struct net_device *dev) +{ +#ifdef CONFIG_DCB + struct fm10k_intfc *interface = netdev_priv(dev); + struct fm10k_hw *hw = &interface->hw; + + if (hw->mac.type == fm10k_mac_pf) + dev->dcbnl_ops = &fm10k_dcbnl_ops; +#endif /* CONFIG_DCB */ +} diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..4327f86218b90558bff565e10125925735b21f36 --- /dev/null +++ b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c @@ -0,0 +1,259 @@ +/* Intel Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifdef CONFIG_DEBUG_FS + +#include "fm10k.h" + +#include +#include + +static struct dentry *dbg_root; + +/* Descriptor Seq Functions */ + +static void *fm10k_dbg_desc_seq_start(struct seq_file *s, loff_t *pos) +{ + struct fm10k_ring *ring = s->private; + + return (*pos < ring->count) ? pos : NULL; +} + +static void *fm10k_dbg_desc_seq_next(struct seq_file *s, void *v, loff_t *pos) +{ + struct fm10k_ring *ring = s->private; + + return (++(*pos) < ring->count) ? pos : NULL; +} + +static void fm10k_dbg_desc_seq_stop(struct seq_file *s, void *v) +{ + /* Do nothing. */ +} + +static void fm10k_dbg_desc_break(struct seq_file *s, int i) +{ + while (i--) + seq_puts(s, "-"); + + seq_puts(s, "\n"); +} + +static int fm10k_dbg_tx_desc_seq_show(struct seq_file *s, void *v) +{ + struct fm10k_ring *ring = s->private; + int i = *(loff_t *)v; + static const char tx_desc_hdr[] = + "DES BUFFER_ADDRESS LENGTH VLAN MSS HDRLEN FLAGS\n"; + + /* Generate header */ + if (!i) { + seq_printf(s, tx_desc_hdr); + fm10k_dbg_desc_break(s, sizeof(tx_desc_hdr) - 1); + } + + /* Validate descriptor allocation */ + if (!ring->desc) { + seq_printf(s, "%03X Descriptor ring not allocated.\n", i); + } else { + struct fm10k_tx_desc *txd = FM10K_TX_DESC(ring, i); + + seq_printf(s, "%03X %#018llx %#06x %#06x %#06x %#06x %#04x\n", + i, txd->buffer_addr, txd->buflen, txd->vlan, + txd->mss, txd->hdrlen, txd->flags); + } + + return 0; +} + +static int fm10k_dbg_rx_desc_seq_show(struct seq_file *s, void *v) +{ + struct fm10k_ring *ring = s->private; + int i = *(loff_t *)v; + static const char rx_desc_hdr[] = + "DES DATA RSS STATERR LENGTH VLAN DGLORT SGLORT TIMESTAMP\n"; + + /* Generate header */ + if (!i) { + seq_printf(s, rx_desc_hdr); + fm10k_dbg_desc_break(s, sizeof(rx_desc_hdr) - 1); + } + + /* Validate descriptor allocation */ + if (!ring->desc) { + seq_printf(s, "%03X Descriptor ring not allocated.\n", i); + } else { + union fm10k_rx_desc *rxd = FM10K_RX_DESC(ring, i); + + seq_printf(s, + "%03X %#010x %#010x %#010x %#06x %#06x %#06x %#06x %#018llx\n", + i, rxd->d.data, rxd->d.rss, rxd->d.staterr, + rxd->w.length, rxd->w.vlan, rxd->w.dglort, + rxd->w.sglort, rxd->q.timestamp); + } + + return 0; +} + +static const struct seq_operations fm10k_dbg_tx_desc_seq_ops = { + .start = fm10k_dbg_desc_seq_start, + .next = fm10k_dbg_desc_seq_next, + .stop = fm10k_dbg_desc_seq_stop, + .show = fm10k_dbg_tx_desc_seq_show, +}; + +static const struct seq_operations fm10k_dbg_rx_desc_seq_ops = { + .start = fm10k_dbg_desc_seq_start, + .next = fm10k_dbg_desc_seq_next, + .stop = fm10k_dbg_desc_seq_stop, + .show = fm10k_dbg_rx_desc_seq_show, +}; + +static int fm10k_dbg_desc_open(struct inode *inode, struct file *filep) +{ + struct fm10k_ring *ring = inode->i_private; + struct fm10k_q_vector *q_vector = ring->q_vector; + const struct seq_operations *desc_seq_ops; + int err; + + if (ring < q_vector->rx.ring) + desc_seq_ops = &fm10k_dbg_tx_desc_seq_ops; + else + desc_seq_ops = &fm10k_dbg_rx_desc_seq_ops; + + err = seq_open(filep, desc_seq_ops); + if (err) + return err; + + ((struct seq_file *)filep->private_data)->private = ring; + + return 0; +} + +static const struct file_operations fm10k_dbg_desc_fops = { + .owner = THIS_MODULE, + .open = fm10k_dbg_desc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +/** + * fm10k_dbg_q_vector_init - setup debugfs for the q_vectors + * @q_vector: q_vector to allocate directories for + * + * A folder is created for each q_vector found. In each q_vector + * folder, a debugfs file is created for each tx and rx ring + * allocated to the q_vector. + **/ +void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector) +{ + struct fm10k_intfc *interface = q_vector->interface; + char name[16]; + int i; + + if (!interface->dbg_intfc) + return; + + /* Generate a folder for each q_vector */ + sprintf(name, "q_vector.%03d", q_vector->v_idx); + + q_vector->dbg_q_vector = debugfs_create_dir(name, interface->dbg_intfc); + if (!q_vector->dbg_q_vector) + return; + + /* Generate a file for each rx ring in the q_vector */ + for (i = 0; i < q_vector->tx.count; i++) { + struct fm10k_ring *ring = &q_vector->tx.ring[i]; + + sprintf(name, "tx_ring.%03d", ring->queue_index); + + debugfs_create_file(name, 0600, + q_vector->dbg_q_vector, ring, + &fm10k_dbg_desc_fops); + } + + /* Generate a file for each rx ring in the q_vector */ + for (i = 0; i < q_vector->rx.count; i++) { + struct fm10k_ring *ring = &q_vector->rx.ring[i]; + + sprintf(name, "rx_ring.%03d", ring->queue_index); + + debugfs_create_file(name, 0600, + q_vector->dbg_q_vector, ring, + &fm10k_dbg_desc_fops); + } +} + +/** + * fm10k_dbg_free_q_vector_dir - setup debugfs for the q_vectors + * @q_vector: q_vector to allocate directories for + **/ +void fm10k_dbg_q_vector_exit(struct fm10k_q_vector *q_vector) +{ + struct fm10k_intfc *interface = q_vector->interface; + + if (interface->dbg_intfc) + debugfs_remove_recursive(q_vector->dbg_q_vector); + q_vector->dbg_q_vector = NULL; +} + +/** + * fm10k_dbg_intfc_init - setup the debugfs directory for the intferface + * @interface: the interface that is starting up + **/ + +void fm10k_dbg_intfc_init(struct fm10k_intfc *interface) +{ + const char *name = pci_name(interface->pdev); + + if (dbg_root) + interface->dbg_intfc = debugfs_create_dir(name, dbg_root); +} + +/** + * fm10k_dbg_intfc_exit - clean out the interface's debugfs entries + * @interface: the interface that is stopping + **/ +void fm10k_dbg_intfc_exit(struct fm10k_intfc *interface) +{ + if (dbg_root) + debugfs_remove_recursive(interface->dbg_intfc); + interface->dbg_intfc = NULL; +} + +/** + * fm10k_dbg_init - start up debugfs for the driver + **/ +void fm10k_dbg_init(void) +{ + dbg_root = debugfs_create_dir(fm10k_driver_name, NULL); +} + +/** + * fm10k_dbg_exit - clean out the driver's debugfs entries + **/ +void fm10k_dbg_exit(void) +{ + debugfs_remove_recursive(dbg_root); + dbg_root = NULL; +} + +#endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c new file mode 100644 index 0000000000000000000000000000000000000000..2d04464e6aa32f0bed0f491c621376a4f52afd80 --- /dev/null +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -0,0 +1,1071 @@ +/* Intel Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include + +#include "fm10k.h" + +struct fm10k_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define FM10K_NETDEV_STAT(_net_stat) { \ + .stat_string = #_net_stat, \ + .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \ + .stat_offset = offsetof(struct net_device_stats, _net_stat) \ +} + +static const struct fm10k_stats fm10k_gstrings_net_stats[] = { + FM10K_NETDEV_STAT(tx_packets), + FM10K_NETDEV_STAT(tx_bytes), + FM10K_NETDEV_STAT(tx_errors), + FM10K_NETDEV_STAT(rx_packets), + FM10K_NETDEV_STAT(rx_bytes), + FM10K_NETDEV_STAT(rx_errors), + FM10K_NETDEV_STAT(rx_dropped), + + /* detailed Rx errors */ + FM10K_NETDEV_STAT(rx_length_errors), + FM10K_NETDEV_STAT(rx_crc_errors), + FM10K_NETDEV_STAT(rx_fifo_errors), +}; + +#define FM10K_NETDEV_STATS_LEN ARRAY_SIZE(fm10k_gstrings_net_stats) + +#define FM10K_STAT(_name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = FIELD_SIZEOF(struct fm10k_intfc, _stat), \ + .stat_offset = offsetof(struct fm10k_intfc, _stat) \ +} + +static const struct fm10k_stats fm10k_gstrings_stats[] = { + FM10K_STAT("tx_restart_queue", restart_queue), + FM10K_STAT("tx_busy", tx_busy), + FM10K_STAT("tx_csum_errors", tx_csum_errors), + FM10K_STAT("rx_alloc_failed", alloc_failed), + FM10K_STAT("rx_csum_errors", rx_csum_errors), + FM10K_STAT("rx_errors", rx_errors), + + FM10K_STAT("tx_packets_nic", tx_packets_nic), + FM10K_STAT("tx_bytes_nic", tx_bytes_nic), + FM10K_STAT("rx_packets_nic", rx_packets_nic), + FM10K_STAT("rx_bytes_nic", rx_bytes_nic), + FM10K_STAT("rx_drops_nic", rx_drops_nic), + FM10K_STAT("rx_overrun_pf", rx_overrun_pf), + FM10K_STAT("rx_overrun_vf", rx_overrun_vf), + + FM10K_STAT("timeout", stats.timeout.count), + FM10K_STAT("ur", stats.ur.count), + FM10K_STAT("ca", stats.ca.count), + FM10K_STAT("um", stats.um.count), + FM10K_STAT("xec", stats.xec.count), + FM10K_STAT("vlan_drop", stats.vlan_drop.count), + FM10K_STAT("loopback_drop", stats.loopback_drop.count), + FM10K_STAT("nodesc_drop", stats.nodesc_drop.count), + + FM10K_STAT("swapi_status", hw.swapi.status), + FM10K_STAT("mac_rules_used", hw.swapi.mac.used), + FM10K_STAT("mac_rules_avail", hw.swapi.mac.avail), + + FM10K_STAT("mbx_tx_busy", hw.mbx.tx_busy), + FM10K_STAT("mbx_tx_dropped", hw.mbx.tx_dropped), + FM10K_STAT("mbx_tx_messages", hw.mbx.tx_messages), + FM10K_STAT("mbx_tx_dwords", hw.mbx.tx_dwords), + FM10K_STAT("mbx_rx_messages", hw.mbx.rx_messages), + FM10K_STAT("mbx_rx_dwords", hw.mbx.rx_dwords), + FM10K_STAT("mbx_rx_parse_err", hw.mbx.rx_parse_err), + + FM10K_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), +}; + +#define FM10K_GLOBAL_STATS_LEN ARRAY_SIZE(fm10k_gstrings_stats) + +#define FM10K_QUEUE_STATS_LEN \ + (MAX_QUEUES * 2 * (sizeof(struct fm10k_queue_stats) / sizeof(u64))) + +#define FM10K_STATS_LEN (FM10K_GLOBAL_STATS_LEN + \ + FM10K_NETDEV_STATS_LEN + \ + FM10K_QUEUE_STATS_LEN) + +static const char fm10k_gstrings_test[][ETH_GSTRING_LEN] = { + "Mailbox test (on/offline)" +}; + +#define FM10K_TEST_LEN (sizeof(fm10k_gstrings_test) / ETH_GSTRING_LEN) + +enum fm10k_self_test_types { + FM10K_TEST_MBX, + FM10K_TEST_MAX = FM10K_TEST_LEN +}; + +static void fm10k_get_strings(struct net_device *dev, u32 stringset, + u8 *data) +{ + char *p = (char *)data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *fm10k_gstrings_test, + FM10K_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + for (i = 0; i < FM10K_NETDEV_STATS_LEN; i++) { + memcpy(p, fm10k_gstrings_net_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < FM10K_GLOBAL_STATS_LEN; i++) { + memcpy(p, fm10k_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + + for (i = 0; i < MAX_QUEUES; i++) { + sprintf(p, "tx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static int fm10k_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return FM10K_TEST_LEN; + case ETH_SS_STATS: + return FM10K_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void fm10k_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + const int stat_count = sizeof(struct fm10k_queue_stats) / sizeof(u64); + struct fm10k_intfc *interface = netdev_priv(netdev); + struct net_device_stats *net_stats = &netdev->stats; + char *p; + int i, j; + + fm10k_update_stats(interface); + + for (i = 0; i < FM10K_NETDEV_STATS_LEN; i++) { + p = (char *)net_stats + fm10k_gstrings_net_stats[i].stat_offset; + *(data++) = (fm10k_gstrings_net_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + + for (i = 0; i < FM10K_GLOBAL_STATS_LEN; i++) { + p = (char *)interface + fm10k_gstrings_stats[i].stat_offset; + *(data++) = (fm10k_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + + for (i = 0; i < MAX_QUEUES; i++) { + struct fm10k_ring *ring; + u64 *queue_stat; + + ring = interface->tx_ring[i]; + if (ring) + queue_stat = (u64 *)&ring->stats; + for (j = 0; j < stat_count; j++) + *(data++) = ring ? queue_stat[j] : 0; + + ring = interface->rx_ring[i]; + if (ring) + queue_stat = (u64 *)&ring->stats; + for (j = 0; j < stat_count; j++) + *(data++) = ring ? queue_stat[j] : 0; + } +} + +/* If function below adds more registers this define needs to be updated */ +#define FM10K_REGS_LEN_Q 29 + +static void fm10k_get_reg_q(struct fm10k_hw *hw, u32 *buff, int i) +{ + int idx = 0; + + buff[idx++] = fm10k_read_reg(hw, FM10K_RDBAL(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_RDBAH(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_RDLEN(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_TPH_RXCTRL(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_RDH(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_RDT(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_RXQCTL(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_RXDCTL(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_RXINT(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_SRRCTL(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_QPRC(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_QPRDC(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_QBRC_L(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_QBRC_H(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_TDBAL(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_TDBAH(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_TDLEN(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_TPH_TXCTRL(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_TDH(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_TDT(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_TXDCTL(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_TXQCTL(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_TXINT(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_QPTC(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_QBTC_L(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_QBTC_H(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_TQDLOC(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_TX_SGLORT(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_PFVTCTL(i)); + + BUG_ON(idx != FM10K_REGS_LEN_Q); +} + +/* If function above adds more registers this define needs to be updated */ +#define FM10K_REGS_LEN_VSI 43 + +static void fm10k_get_reg_vsi(struct fm10k_hw *hw, u32 *buff, int i) +{ + int idx = 0, j; + + buff[idx++] = fm10k_read_reg(hw, FM10K_MRQC(i)); + for (j = 0; j < 10; j++) + buff[idx++] = fm10k_read_reg(hw, FM10K_RSSRK(i, j)); + for (j = 0; j < 32; j++) + buff[idx++] = fm10k_read_reg(hw, FM10K_RETA(i, j)); + + BUG_ON(idx != FM10K_REGS_LEN_VSI); +} + +static void fm10k_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + struct fm10k_hw *hw = &interface->hw; + u32 *buff = p; + u16 i; + + regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; + + switch (hw->mac.type) { + case fm10k_mac_pf: + /* General PF Registers */ + *(buff++) = fm10k_read_reg(hw, FM10K_CTRL); + *(buff++) = fm10k_read_reg(hw, FM10K_CTRL_EXT); + *(buff++) = fm10k_read_reg(hw, FM10K_GCR); + *(buff++) = fm10k_read_reg(hw, FM10K_GCR_EXT); + + for (i = 0; i < 8; i++) { + *(buff++) = fm10k_read_reg(hw, FM10K_DGLORTMAP(i)); + *(buff++) = fm10k_read_reg(hw, FM10K_DGLORTDEC(i)); + } + + for (i = 0; i < 65; i++) { + fm10k_get_reg_vsi(hw, buff, i); + buff += FM10K_REGS_LEN_VSI; + } + + *(buff++) = fm10k_read_reg(hw, FM10K_DMA_CTRL); + *(buff++) = fm10k_read_reg(hw, FM10K_DMA_CTRL2); + + for (i = 0; i < FM10K_MAX_QUEUES_PF; i++) { + fm10k_get_reg_q(hw, buff, i); + buff += FM10K_REGS_LEN_Q; + } + + *(buff++) = fm10k_read_reg(hw, FM10K_TPH_CTRL); + + for (i = 0; i < 8; i++) + *(buff++) = fm10k_read_reg(hw, FM10K_INT_MAP(i)); + + /* Interrupt Throttling Registers */ + for (i = 0; i < 130; i++) + *(buff++) = fm10k_read_reg(hw, FM10K_ITR(i)); + + break; + case fm10k_mac_vf: + /* General VF registers */ + *(buff++) = fm10k_read_reg(hw, FM10K_VFCTRL); + *(buff++) = fm10k_read_reg(hw, FM10K_VFINT_MAP); + *(buff++) = fm10k_read_reg(hw, FM10K_VFSYSTIME); + + /* Interrupt Throttling Registers */ + for (i = 0; i < 8; i++) + *(buff++) = fm10k_read_reg(hw, FM10K_VFITR(i)); + + fm10k_get_reg_vsi(hw, buff, 0); + buff += FM10K_REGS_LEN_VSI; + + for (i = 0; i < FM10K_MAX_QUEUES_POOL; i++) { + if (i < hw->mac.max_queues) + fm10k_get_reg_q(hw, buff, i); + else + memset(buff, 0, sizeof(u32) * FM10K_REGS_LEN_Q); + buff += FM10K_REGS_LEN_Q; + } + + break; + default: + return; + } +} + +/* If function above adds more registers these define need to be updated */ +#define FM10K_REGS_LEN_PF \ +(162 + (65 * FM10K_REGS_LEN_VSI) + (FM10K_MAX_QUEUES_PF * FM10K_REGS_LEN_Q)) +#define FM10K_REGS_LEN_VF \ +(11 + FM10K_REGS_LEN_VSI + (FM10K_MAX_QUEUES_POOL * FM10K_REGS_LEN_Q)) + +static int fm10k_get_regs_len(struct net_device *netdev) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + struct fm10k_hw *hw = &interface->hw; + + switch (hw->mac.type) { + case fm10k_mac_pf: + return FM10K_REGS_LEN_PF * sizeof(u32); + case fm10k_mac_vf: + return FM10K_REGS_LEN_VF * sizeof(u32); + default: + return 0; + } +} + +static void fm10k_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + + strncpy(info->driver, fm10k_driver_name, + sizeof(info->driver) - 1); + strncpy(info->version, fm10k_driver_version, + sizeof(info->version) - 1); + strncpy(info->bus_info, pci_name(interface->pdev), + sizeof(info->bus_info) - 1); + + info->n_stats = FM10K_STATS_LEN; + + info->regdump_len = fm10k_get_regs_len(dev); +} + +static void fm10k_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + + /* record fixed values for autoneg and tx pause */ + pause->autoneg = 0; + pause->tx_pause = 1; + + pause->rx_pause = interface->rx_pause ? 1 : 0; +} + +static int fm10k_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + struct fm10k_hw *hw = &interface->hw; + + if (pause->autoneg || !pause->tx_pause) + return -EINVAL; + + /* we can only support pause on the PF to avoid head-of-line blocking */ + if (hw->mac.type == fm10k_mac_pf) + interface->rx_pause = pause->rx_pause ? ~0 : 0; + else if (pause->rx_pause) + return -EINVAL; + + if (netif_running(dev)) + fm10k_update_rx_drop_en(interface); + + return 0; +} + +static u32 fm10k_get_msglevel(struct net_device *netdev) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + + return interface->msg_enable; +} + +static void fm10k_set_msglevel(struct net_device *netdev, u32 data) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + + interface->msg_enable = data; +} + +static void fm10k_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + + ring->rx_max_pending = FM10K_MAX_RXD; + ring->tx_max_pending = FM10K_MAX_TXD; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = interface->rx_ring_count; + ring->tx_pending = interface->tx_ring_count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} + +static int fm10k_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + struct fm10k_ring *temp_ring; + int i, err = 0; + u32 new_rx_count, new_tx_count; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + new_tx_count = clamp_t(u32, ring->tx_pending, + FM10K_MIN_TXD, FM10K_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, FM10K_REQ_TX_DESCRIPTOR_MULTIPLE); + + new_rx_count = clamp_t(u32, ring->rx_pending, + FM10K_MIN_RXD, FM10K_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, FM10K_REQ_RX_DESCRIPTOR_MULTIPLE); + + if ((new_tx_count == interface->tx_ring_count) && + (new_rx_count == interface->rx_ring_count)) { + /* nothing to do */ + return 0; + } + + while (test_and_set_bit(__FM10K_RESETTING, &interface->state)) + usleep_range(1000, 2000); + + if (!netif_running(interface->netdev)) { + for (i = 0; i < interface->num_tx_queues; i++) + interface->tx_ring[i]->count = new_tx_count; + for (i = 0; i < interface->num_rx_queues; i++) + interface->rx_ring[i]->count = new_rx_count; + interface->tx_ring_count = new_tx_count; + interface->rx_ring_count = new_rx_count; + goto clear_reset; + } + + /* allocate temporary buffer to store rings in */ + i = max_t(int, interface->num_tx_queues, interface->num_rx_queues); + temp_ring = vmalloc(i * sizeof(struct fm10k_ring)); + + if (!temp_ring) { + err = -ENOMEM; + goto clear_reset; + } + + fm10k_down(interface); + + /* Setup new Tx resources and free the old Tx resources in that order. + * We can then assign the new resources to the rings via a memcpy. + * The advantage to this approach is that we are guaranteed to still + * have resources even in the case of an allocation failure. + */ + if (new_tx_count != interface->tx_ring_count) { + for (i = 0; i < interface->num_tx_queues; i++) { + memcpy(&temp_ring[i], interface->tx_ring[i], + sizeof(struct fm10k_ring)); + + temp_ring[i].count = new_tx_count; + err = fm10k_setup_tx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + fm10k_free_tx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < interface->num_tx_queues; i++) { + fm10k_free_tx_resources(interface->tx_ring[i]); + + memcpy(interface->tx_ring[i], &temp_ring[i], + sizeof(struct fm10k_ring)); + } + + interface->tx_ring_count = new_tx_count; + } + + /* Repeat the process for the Rx rings if needed */ + if (new_rx_count != interface->rx_ring_count) { + for (i = 0; i < interface->num_rx_queues; i++) { + memcpy(&temp_ring[i], interface->rx_ring[i], + sizeof(struct fm10k_ring)); + + temp_ring[i].count = new_rx_count; + err = fm10k_setup_rx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + fm10k_free_rx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < interface->num_rx_queues; i++) { + fm10k_free_rx_resources(interface->rx_ring[i]); + + memcpy(interface->rx_ring[i], &temp_ring[i], + sizeof(struct fm10k_ring)); + } + + interface->rx_ring_count = new_rx_count; + } + +err_setup: + fm10k_up(interface); + vfree(temp_ring); +clear_reset: + clear_bit(__FM10K_RESETTING, &interface->state); + return err; +} + +static int fm10k_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + + ec->use_adaptive_tx_coalesce = + !!(interface->tx_itr & FM10K_ITR_ADAPTIVE); + ec->tx_coalesce_usecs = interface->tx_itr & ~FM10K_ITR_ADAPTIVE; + + ec->use_adaptive_rx_coalesce = + !!(interface->rx_itr & FM10K_ITR_ADAPTIVE); + ec->rx_coalesce_usecs = interface->rx_itr & ~FM10K_ITR_ADAPTIVE; + + return 0; +} + +static int fm10k_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + struct fm10k_q_vector *qv; + u16 tx_itr, rx_itr; + int i; + + /* verify limits */ + if ((ec->rx_coalesce_usecs > FM10K_ITR_MAX) || + (ec->tx_coalesce_usecs > FM10K_ITR_MAX)) + return -EINVAL; + + /* record settings */ + tx_itr = ec->tx_coalesce_usecs; + rx_itr = ec->rx_coalesce_usecs; + + /* set initial values for adaptive ITR */ + if (ec->use_adaptive_tx_coalesce) + tx_itr = FM10K_ITR_ADAPTIVE | FM10K_ITR_10K; + + if (ec->use_adaptive_rx_coalesce) + rx_itr = FM10K_ITR_ADAPTIVE | FM10K_ITR_20K; + + /* update interface */ + interface->tx_itr = tx_itr; + interface->rx_itr = rx_itr; + + /* update q_vectors */ + for (i = 0; i < interface->num_q_vectors; i++) { + qv = interface->q_vector[i]; + qv->tx.itr = tx_itr; + qv->rx.itr = rx_itr; + } + + return 0; +} + +static int fm10k_get_rss_hash_opts(struct fm10k_intfc *interface, + struct ethtool_rxnfc *cmd) +{ + cmd->data = 0; + + /* Report default options for RSS on fm10k */ + switch (cmd->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through */ + case UDP_V4_FLOW: + if (interface->flags & FM10K_FLAG_RSS_FIELD_IPV4_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through */ + case SCTP_V4_FLOW: + case SCTP_V6_FLOW: + case AH_ESP_V4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V4_FLOW: + case AH_V6_FLOW: + case ESP_V4_FLOW: + case ESP_V6_FLOW: + case IPV4_FLOW: + case IPV6_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case UDP_V6_FLOW: + if (interface->flags & FM10K_FLAG_RSS_FIELD_IPV6_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int fm10k_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = interface->num_rx_queues; + ret = 0; + break; + case ETHTOOL_GRXFH: + ret = fm10k_get_rss_hash_opts(interface, cmd); + break; + default: + break; + } + + return ret; +} + +#define UDP_RSS_FLAGS (FM10K_FLAG_RSS_FIELD_IPV4_UDP | \ + FM10K_FLAG_RSS_FIELD_IPV6_UDP) +static int fm10k_set_rss_hash_opt(struct fm10k_intfc *interface, + struct ethtool_rxnfc *nfc) +{ + u32 flags = interface->flags; + + /* RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + !(nfc->data & RXH_L4_B_0_1) || + !(nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + case UDP_V4_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags &= ~FM10K_FLAG_RSS_FIELD_IPV4_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags |= FM10K_FLAG_RSS_FIELD_IPV4_UDP; + break; + default: + return -EINVAL; + } + break; + case UDP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags &= ~FM10K_FLAG_RSS_FIELD_IPV6_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags |= FM10K_FLAG_RSS_FIELD_IPV6_UDP; + break; + default: + return -EINVAL; + } + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + (nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + /* if we changed something we need to update flags */ + if (flags != interface->flags) { + struct fm10k_hw *hw = &interface->hw; + u32 mrqc; + + if ((flags & UDP_RSS_FLAGS) && + !(interface->flags & UDP_RSS_FLAGS)) + netif_warn(interface, drv, interface->netdev, + "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); + + interface->flags = flags; + + /* Perform hash on these packet types */ + mrqc = FM10K_MRQC_IPV4 | + FM10K_MRQC_TCP_IPV4 | + FM10K_MRQC_IPV6 | + FM10K_MRQC_TCP_IPV6; + + if (flags & FM10K_FLAG_RSS_FIELD_IPV4_UDP) + mrqc |= FM10K_MRQC_UDP_IPV4; + if (flags & FM10K_FLAG_RSS_FIELD_IPV6_UDP) + mrqc |= FM10K_MRQC_UDP_IPV6; + + fm10k_write_reg(hw, FM10K_MRQC(0), mrqc); + } + + return 0; +} + +static int fm10k_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + ret = fm10k_set_rss_hash_opt(interface, cmd); + break; + default: + break; + } + + return ret; +} + +static int fm10k_mbx_test(struct fm10k_intfc *interface, u64 *data) +{ + struct fm10k_hw *hw = &interface->hw; + struct fm10k_mbx_info *mbx = &hw->mbx; + u32 attr_flag, test_msg[6]; + unsigned long timeout; + int err; + + /* For now this is a VF only feature */ + if (hw->mac.type != fm10k_mac_vf) + return 0; + + /* loop through both nested and unnested attribute types */ + for (attr_flag = (1 << FM10K_TEST_MSG_UNSET); + attr_flag < (1 << (2 * FM10K_TEST_MSG_NESTED)); + attr_flag += attr_flag) { + /* generate message to be tested */ + fm10k_tlv_msg_test_create(test_msg, attr_flag); + + fm10k_mbx_lock(interface); + mbx->test_result = FM10K_NOT_IMPLEMENTED; + err = mbx->ops.enqueue_tx(hw, mbx, test_msg); + fm10k_mbx_unlock(interface); + + /* wait up to 1 second for response */ + timeout = jiffies + HZ; + do { + if (err < 0) + goto err_out; + + usleep_range(500, 1000); + + fm10k_mbx_lock(interface); + mbx->ops.process(hw, mbx); + fm10k_mbx_unlock(interface); + + err = mbx->test_result; + if (!err) + break; + } while (time_is_after_jiffies(timeout)); + + /* reporting errors */ + if (err) + goto err_out; + } + +err_out: + *data = err < 0 ? (attr_flag) : (err > 0); + return err; +} + +static void fm10k_self_test(struct net_device *dev, + struct ethtool_test *eth_test, u64 *data) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + struct fm10k_hw *hw = &interface->hw; + + memset(data, 0, sizeof(*data) * FM10K_TEST_LEN); + + if (FM10K_REMOVED(hw)) { + netif_err(interface, drv, dev, + "Interface removed - test blocked\n"); + eth_test->flags |= ETH_TEST_FL_FAILED; + return; + } + + if (fm10k_mbx_test(interface, &data[FM10K_TEST_MBX])) + eth_test->flags |= ETH_TEST_FL_FAILED; +} + +static u32 fm10k_get_reta_size(struct net_device *netdev) +{ + return FM10K_RETA_SIZE * FM10K_RETA_ENTRIES_PER_REG; +} + +static int fm10k_get_reta(struct net_device *netdev, u32 *indir) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + int i; + + if (!indir) + return 0; + + for (i = 0; i < FM10K_RETA_SIZE; i++, indir += 4) { + u32 reta = interface->reta[i]; + + indir[0] = (reta << 24) >> 24; + indir[1] = (reta << 16) >> 24; + indir[2] = (reta << 8) >> 24; + indir[3] = (reta) >> 24; + } + + return 0; +} + +static int fm10k_set_reta(struct net_device *netdev, const u32 *indir) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + struct fm10k_hw *hw = &interface->hw; + int i; + u16 rss_i; + + if (!indir) + return 0; + + /* Verify user input. */ + rss_i = interface->ring_feature[RING_F_RSS].indices; + for (i = fm10k_get_reta_size(netdev); i--;) { + if (indir[i] < rss_i) + continue; + return -EINVAL; + } + + /* record entries to reta table */ + for (i = 0; i < FM10K_RETA_SIZE; i++, indir += 4) { + u32 reta = indir[0] | + (indir[1] << 8) | + (indir[2] << 16) | + (indir[3] << 24); + + if (interface->reta[i] == reta) + continue; + + interface->reta[i] = reta; + fm10k_write_reg(hw, FM10K_RETA(0, i), reta); + } + + return 0; +} + +static u32 fm10k_get_rssrk_size(struct net_device *netdev) +{ + return FM10K_RSSRK_SIZE * FM10K_RSSRK_ENTRIES_PER_REG; +} + +static int fm10k_get_rssh(struct net_device *netdev, u32 *indir, u8 *key) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + int i, err; + + err = fm10k_get_reta(netdev, indir); + if (err || !key) + return err; + + for (i = 0; i < FM10K_RSSRK_SIZE; i++, key += 4) + *(__le32 *)key = cpu_to_le32(interface->rssrk[i]); + + return 0; +} + +static int fm10k_set_rssh(struct net_device *netdev, const u32 *indir, + const u8 *key) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + struct fm10k_hw *hw = &interface->hw; + int i, err; + + err = fm10k_set_reta(netdev, indir); + if (err || !key) + return err; + + for (i = 0; i < FM10K_RSSRK_SIZE; i++, key += 4) { + u32 rssrk = le32_to_cpu(*(__le32 *)key); + + if (interface->rssrk[i] == rssrk) + continue; + + interface->rssrk[i] = rssrk; + fm10k_write_reg(hw, FM10K_RSSRK(0, i), rssrk); + } + + return 0; +} + +static unsigned int fm10k_max_channels(struct net_device *dev) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + unsigned int max_combined = interface->hw.mac.max_queues; + u8 tcs = netdev_get_num_tc(dev); + + /* For QoS report channels per traffic class */ + if (tcs > 1) + max_combined = 1 << (fls(max_combined / tcs) - 1); + + return max_combined; +} + +static void fm10k_get_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + struct fm10k_hw *hw = &interface->hw; + + /* report maximum channels */ + ch->max_combined = fm10k_max_channels(dev); + + /* report info for other vector */ + ch->max_other = NON_Q_VECTORS(hw); + ch->other_count = ch->max_other; + + /* record RSS queues */ + ch->combined_count = interface->ring_feature[RING_F_RSS].indices; +} + +static int fm10k_set_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + unsigned int count = ch->combined_count; + struct fm10k_hw *hw = &interface->hw; + + /* verify they are not requesting separate vectors */ + if (!count || ch->rx_count || ch->tx_count) + return -EINVAL; + + /* verify other_count has not changed */ + if (ch->other_count != NON_Q_VECTORS(hw)) + return -EINVAL; + + /* verify the number of channels does not exceed hardware limits */ + if (count > fm10k_max_channels(dev)) + return -EINVAL; + + interface->ring_feature[RING_F_RSS].limit = count; + + /* use setup TC to update any traffic class queue mapping */ + return fm10k_setup_tc(dev, netdev_get_num_tc(dev)); +} + +static int fm10k_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + if (interface->ptp_clock) + info->phc_index = ptp_clock_index(interface->ptp_clock); + else + info->phc_index = -1; + + info->tx_types = (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + + info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_ALL); + + return 0; +} + +static const struct ethtool_ops fm10k_ethtool_ops = { + .get_strings = fm10k_get_strings, + .get_sset_count = fm10k_get_sset_count, + .get_ethtool_stats = fm10k_get_ethtool_stats, + .get_drvinfo = fm10k_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_pauseparam = fm10k_get_pauseparam, + .set_pauseparam = fm10k_set_pauseparam, + .get_msglevel = fm10k_get_msglevel, + .set_msglevel = fm10k_set_msglevel, + .get_ringparam = fm10k_get_ringparam, + .set_ringparam = fm10k_set_ringparam, + .get_coalesce = fm10k_get_coalesce, + .set_coalesce = fm10k_set_coalesce, + .get_rxnfc = fm10k_get_rxnfc, + .set_rxnfc = fm10k_set_rxnfc, + .get_regs = fm10k_get_regs, + .get_regs_len = fm10k_get_regs_len, + .self_test = fm10k_self_test, + .get_rxfh_indir_size = fm10k_get_reta_size, + .get_rxfh_key_size = fm10k_get_rssrk_size, + .get_rxfh = fm10k_get_rssh, + .set_rxfh = fm10k_set_rssh, + .get_channels = fm10k_get_channels, + .set_channels = fm10k_set_channels, + .get_ts_info = fm10k_get_ts_info, +}; + +void fm10k_set_ethtool_ops(struct net_device *dev) +{ + dev->ethtool_ops = &fm10k_ethtool_ops; +} diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c new file mode 100644 index 0000000000000000000000000000000000000000..0601908642389077b1df46546af52e4a734c8db0 --- /dev/null +++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c @@ -0,0 +1,536 @@ +/* Intel Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include "fm10k.h" +#include "fm10k_vf.h" +#include "fm10k_pf.h" + +static s32 fm10k_iov_msg_error(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx; + struct fm10k_intfc *interface = hw->back; + struct pci_dev *pdev = interface->pdev; + + dev_err(&pdev->dev, "Unknown message ID %u on VF %d\n", + **results & FM10K_TLV_ID_MASK, vf_info->vf_idx); + + return fm10k_tlv_msg_error(hw, results, mbx); +} + +static const struct fm10k_msg_data iov_mbx_data[] = { + FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test), + FM10K_VF_MSG_MSIX_HANDLER(fm10k_iov_msg_msix_pf), + FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_iov_msg_mac_vlan_pf), + FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_iov_msg_lport_state_pf), + FM10K_TLV_MSG_ERROR_HANDLER(fm10k_iov_msg_error), +}; + +s32 fm10k_iov_event(struct fm10k_intfc *interface) +{ + struct fm10k_hw *hw = &interface->hw; + struct fm10k_iov_data *iov_data; + s64 mbicr, vflre; + int i; + + /* if there is no iov_data then there is no mailboxes to process */ + if (!ACCESS_ONCE(interface->iov_data)) + return 0; + + rcu_read_lock(); + + iov_data = interface->iov_data; + + /* check again now that we are in the RCU block */ + if (!iov_data) + goto read_unlock; + + if (!(fm10k_read_reg(hw, FM10K_EICR) & FM10K_EICR_VFLR)) + goto process_mbx; + + /* read VFLRE to determine if any VFs have been reset */ + do { + vflre = fm10k_read_reg(hw, FM10K_PFVFLRE(0)); + vflre <<= 32; + vflre |= fm10k_read_reg(hw, FM10K_PFVFLRE(1)); + vflre = (vflre << 32) | (vflre >> 32); + vflre |= fm10k_read_reg(hw, FM10K_PFVFLRE(0)); + + i = iov_data->num_vfs; + + for (vflre <<= 64 - i; vflre && i--; vflre += vflre) { + struct fm10k_vf_info *vf_info = &iov_data->vf_info[i]; + + if (vflre >= 0) + continue; + + hw->iov.ops.reset_resources(hw, vf_info); + vf_info->mbx.ops.connect(hw, &vf_info->mbx); + } + } while (i != iov_data->num_vfs); + +process_mbx: + /* read MBICR to determine which VFs require attention */ + mbicr = fm10k_read_reg(hw, FM10K_MBICR(1)); + mbicr <<= 32; + mbicr |= fm10k_read_reg(hw, FM10K_MBICR(0)); + + i = iov_data->next_vf_mbx ? : iov_data->num_vfs; + + for (mbicr <<= 64 - i; i--; mbicr += mbicr) { + struct fm10k_mbx_info *mbx = &iov_data->vf_info[i].mbx; + + if (mbicr >= 0) + continue; + + if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU)) + break; + + mbx->ops.process(hw, mbx); + } + + if (i >= 0) { + iov_data->next_vf_mbx = i + 1; + } else if (iov_data->next_vf_mbx) { + iov_data->next_vf_mbx = 0; + goto process_mbx; + } +read_unlock: + rcu_read_unlock(); + + return 0; +} + +s32 fm10k_iov_mbx(struct fm10k_intfc *interface) +{ + struct fm10k_hw *hw = &interface->hw; + struct fm10k_iov_data *iov_data; + int i; + + /* if there is no iov_data then there is no mailboxes to process */ + if (!ACCESS_ONCE(interface->iov_data)) + return 0; + + rcu_read_lock(); + + iov_data = interface->iov_data; + + /* check again now that we are in the RCU block */ + if (!iov_data) + goto read_unlock; + + /* lock the mailbox for transmit and receive */ + fm10k_mbx_lock(interface); + +process_mbx: + for (i = iov_data->next_vf_mbx ? : iov_data->num_vfs; i--;) { + struct fm10k_vf_info *vf_info = &iov_data->vf_info[i]; + struct fm10k_mbx_info *mbx = &vf_info->mbx; + u16 glort = vf_info->glort; + + /* verify port mapping is valid, if not reset port */ + if (vf_info->vf_flags && !fm10k_glort_valid_pf(hw, glort)) + hw->iov.ops.reset_lport(hw, vf_info); + + /* reset VFs that have mailbox timed out */ + if (!mbx->timeout) { + hw->iov.ops.reset_resources(hw, vf_info); + mbx->ops.connect(hw, mbx); + } + + /* no work pending, then just continue */ + if (mbx->ops.tx_complete(mbx) && !mbx->ops.rx_ready(mbx)) + continue; + + /* guarantee we have free space in the SM mailbox */ + if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU)) + break; + + /* cleanup mailbox and process received messages */ + mbx->ops.process(hw, mbx); + } + + if (i >= 0) { + iov_data->next_vf_mbx = i + 1; + } else if (iov_data->next_vf_mbx) { + iov_data->next_vf_mbx = 0; + goto process_mbx; + } + + /* free the lock */ + fm10k_mbx_unlock(interface); + +read_unlock: + rcu_read_unlock(); + + return 0; +} + +void fm10k_iov_suspend(struct pci_dev *pdev) +{ + struct fm10k_intfc *interface = pci_get_drvdata(pdev); + struct fm10k_iov_data *iov_data = interface->iov_data; + struct fm10k_hw *hw = &interface->hw; + int num_vfs, i; + + /* pull out num_vfs from iov_data */ + num_vfs = iov_data ? iov_data->num_vfs : 0; + + /* shut down queue mapping for VFs */ + fm10k_write_reg(hw, FM10K_DGLORTMAP(fm10k_dglort_vf_rss), + FM10K_DGLORTMAP_NONE); + + /* Stop any active VFs and reset their resources */ + for (i = 0; i < num_vfs; i++) { + struct fm10k_vf_info *vf_info = &iov_data->vf_info[i]; + + hw->iov.ops.reset_resources(hw, vf_info); + hw->iov.ops.reset_lport(hw, vf_info); + } +} + +int fm10k_iov_resume(struct pci_dev *pdev) +{ + struct fm10k_intfc *interface = pci_get_drvdata(pdev); + struct fm10k_iov_data *iov_data = interface->iov_data; + struct fm10k_dglort_cfg dglort = { 0 }; + struct fm10k_hw *hw = &interface->hw; + int num_vfs, i; + + /* pull out num_vfs from iov_data */ + num_vfs = iov_data ? iov_data->num_vfs : 0; + + /* return error if iov_data is not already populated */ + if (!iov_data) + return -ENOMEM; + + /* allocate hardware resources for the VFs */ + hw->iov.ops.assign_resources(hw, num_vfs, num_vfs); + + /* configure DGLORT mapping for RSS */ + dglort.glort = hw->mac.dglort_map & FM10K_DGLORTMAP_NONE; + dglort.idx = fm10k_dglort_vf_rss; + dglort.inner_rss = 1; + dglort.rss_l = fls(fm10k_queues_per_pool(hw) - 1); + dglort.queue_b = fm10k_vf_queue_index(hw, 0); + dglort.vsi_l = fls(hw->iov.total_vfs - 1); + dglort.vsi_b = 1; + + hw->mac.ops.configure_dglort_map(hw, &dglort); + + /* assign resources to the device */ + for (i = 0; i < num_vfs; i++) { + struct fm10k_vf_info *vf_info = &iov_data->vf_info[i]; + + /* allocate all but the last GLORT to the VFs */ + if (i == ((~hw->mac.dglort_map) >> FM10K_DGLORTMAP_MASK_SHIFT)) + break; + + /* assign GLORT to VF, and restrict it to multicast */ + hw->iov.ops.set_lport(hw, vf_info, i, + FM10K_VF_FLAG_MULTI_CAPABLE); + + /* assign our default vid to the VF following reset */ + vf_info->sw_vid = hw->mac.default_vid; + + /* mailbox is disconnected so we don't send a message */ + hw->iov.ops.assign_default_mac_vlan(hw, vf_info); + + /* now we are ready so we can connect */ + vf_info->mbx.ops.connect(hw, &vf_info->mbx); + } + + return 0; +} + +s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid) +{ + struct fm10k_iov_data *iov_data = interface->iov_data; + struct fm10k_hw *hw = &interface->hw; + struct fm10k_vf_info *vf_info; + u16 vf_idx = (glort - hw->mac.dglort_map) & FM10K_DGLORTMAP_NONE; + + /* no IOV support, not our message to process */ + if (!iov_data) + return FM10K_ERR_PARAM; + + /* glort outside our range, not our message to process */ + if (vf_idx >= iov_data->num_vfs) + return FM10K_ERR_PARAM; + + /* determine if an update has occured and if so notify the VF */ + vf_info = &iov_data->vf_info[vf_idx]; + if (vf_info->sw_vid != pvid) { + vf_info->sw_vid = pvid; + hw->iov.ops.assign_default_mac_vlan(hw, vf_info); + } + + return 0; +} + +static void fm10k_iov_free_data(struct pci_dev *pdev) +{ + struct fm10k_intfc *interface = pci_get_drvdata(pdev); + + if (!interface->iov_data) + return; + + /* reclaim hardware resources */ + fm10k_iov_suspend(pdev); + + /* drop iov_data from interface */ + kfree_rcu(interface->iov_data, rcu); + interface->iov_data = NULL; +} + +static s32 fm10k_iov_alloc_data(struct pci_dev *pdev, int num_vfs) +{ + struct fm10k_intfc *interface = pci_get_drvdata(pdev); + struct fm10k_iov_data *iov_data = interface->iov_data; + struct fm10k_hw *hw = &interface->hw; + size_t size; + int i, err; + + /* return error if iov_data is already populated */ + if (iov_data) + return -EBUSY; + + /* The PF should always be able to assign resources */ + if (!hw->iov.ops.assign_resources) + return -ENODEV; + + /* nothing to do if no VFs are requested */ + if (!num_vfs) + return 0; + + /* allocate memory for VF storage */ + size = offsetof(struct fm10k_iov_data, vf_info[num_vfs]); + iov_data = kzalloc(size, GFP_KERNEL); + if (!iov_data) + return -ENOMEM; + + /* record number of VFs */ + iov_data->num_vfs = num_vfs; + + /* loop through vf_info structures initializing each entry */ + for (i = 0; i < num_vfs; i++) { + struct fm10k_vf_info *vf_info = &iov_data->vf_info[i]; + + /* Record VF VSI value */ + vf_info->vsi = i + 1; + vf_info->vf_idx = i; + + /* initialize mailbox memory */ + err = fm10k_pfvf_mbx_init(hw, &vf_info->mbx, iov_mbx_data, i); + if (err) { + dev_err(&pdev->dev, + "Unable to initialize SR-IOV mailbox\n"); + kfree(iov_data); + return err; + } + } + + /* assign iov_data to interface */ + interface->iov_data = iov_data; + + /* allocate hardware resources for the VFs */ + fm10k_iov_resume(pdev); + + return 0; +} + +void fm10k_iov_disable(struct pci_dev *pdev) +{ + if (pci_num_vf(pdev) && pci_vfs_assigned(pdev)) + dev_err(&pdev->dev, + "Cannot disable SR-IOV while VFs are assigned\n"); + else + pci_disable_sriov(pdev); + + fm10k_iov_free_data(pdev); +} + +static void fm10k_disable_aer_comp_abort(struct pci_dev *pdev) +{ + u32 err_sev; + int pos; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); + if (!pos) + return; + + pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &err_sev); + err_sev &= ~PCI_ERR_UNC_COMP_ABORT; + pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, err_sev); +} + +int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs) +{ + int current_vfs = pci_num_vf(pdev); + int err = 0; + + if (current_vfs && pci_vfs_assigned(pdev)) { + dev_err(&pdev->dev, + "Cannot modify SR-IOV while VFs are assigned\n"); + num_vfs = current_vfs; + } else { + pci_disable_sriov(pdev); + fm10k_iov_free_data(pdev); + } + + /* allocate resources for the VFs */ + err = fm10k_iov_alloc_data(pdev, num_vfs); + if (err) + return err; + + /* allocate VFs if not already allocated */ + if (num_vfs && (num_vfs != current_vfs)) { + /* Disable completer abort error reporting as + * the VFs can trigger this any time they read a queue + * that they don't own. + */ + fm10k_disable_aer_comp_abort(pdev); + + err = pci_enable_sriov(pdev, num_vfs); + if (err) { + dev_err(&pdev->dev, + "Enable PCI SR-IOV failed: %d\n", err); + return err; + } + } + + return num_vfs; +} + +int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + struct fm10k_iov_data *iov_data = interface->iov_data; + struct fm10k_hw *hw = &interface->hw; + struct fm10k_vf_info *vf_info; + + /* verify SR-IOV is active and that vf idx is valid */ + if (!iov_data || vf_idx >= iov_data->num_vfs) + return -EINVAL; + + /* verify MAC addr is valid */ + if (!is_zero_ether_addr(mac) && !is_valid_ether_addr(mac)) + return -EINVAL; + + /* record new MAC address */ + vf_info = &iov_data->vf_info[vf_idx]; + ether_addr_copy(vf_info->mac, mac); + + /* assigning the MAC will send a mailbox message so lock is needed */ + fm10k_mbx_lock(interface); + + /* assign MAC address to VF */ + hw->iov.ops.assign_default_mac_vlan(hw, vf_info); + + fm10k_mbx_unlock(interface); + + return 0; +} + +int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid, + u8 qos) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + struct fm10k_iov_data *iov_data = interface->iov_data; + struct fm10k_hw *hw = &interface->hw; + struct fm10k_vf_info *vf_info; + + /* verify SR-IOV is active and that vf idx is valid */ + if (!iov_data || vf_idx >= iov_data->num_vfs) + return -EINVAL; + + /* QOS is unsupported and VLAN IDs accepted range 0-4094 */ + if (qos || (vid > (VLAN_VID_MASK - 1))) + return -EINVAL; + + vf_info = &iov_data->vf_info[vf_idx]; + + /* exit if there is nothing to do */ + if (vf_info->pf_vid == vid) + return 0; + + /* record default VLAN ID for VF */ + vf_info->pf_vid = vid; + + /* assigning the VLAN will send a mailbox message so lock is needed */ + fm10k_mbx_lock(interface); + + /* Clear the VLAN table for the VF */ + hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, vf_info->vsi, false); + + /* Update VF assignment and trigger reset */ + hw->iov.ops.assign_default_mac_vlan(hw, vf_info); + + fm10k_mbx_unlock(interface); + + return 0; +} + +int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, int unused, + int rate) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + struct fm10k_iov_data *iov_data = interface->iov_data; + struct fm10k_hw *hw = &interface->hw; + + /* verify SR-IOV is active and that vf idx is valid */ + if (!iov_data || vf_idx >= iov_data->num_vfs) + return -EINVAL; + + /* rate limit cannot be less than 10Mbs or greater than link speed */ + if (rate && ((rate < FM10K_VF_TC_MIN) || rate > FM10K_VF_TC_MAX)) + return -EINVAL; + + /* store values */ + iov_data->vf_info[vf_idx].rate = rate; + + /* update hardware configuration */ + hw->iov.ops.configure_tc(hw, vf_idx, rate); + + return 0; +} + +int fm10k_ndo_get_vf_config(struct net_device *netdev, + int vf_idx, struct ifla_vf_info *ivi) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + struct fm10k_iov_data *iov_data = interface->iov_data; + struct fm10k_vf_info *vf_info; + + /* verify SR-IOV is active and that vf idx is valid */ + if (!iov_data || vf_idx >= iov_data->num_vfs) + return -EINVAL; + + vf_info = &iov_data->vf_info[vf_idx]; + + ivi->vf = vf_idx; + ivi->max_tx_rate = vf_info->rate; + ivi->min_tx_rate = 0; + ether_addr_copy(ivi->mac, vf_info->mac); + ivi->vlan = vf_info->pf_vid; + ivi->qos = 0; + + return 0; +} diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c new file mode 100644 index 0000000000000000000000000000000000000000..6c800a330d66306f1fb2a83435e9373f19ecd857 --- /dev/null +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -0,0 +1,1979 @@ +/* Intel Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "fm10k.h" + +#define DRV_VERSION "0.12.2-k" +const char fm10k_driver_version[] = DRV_VERSION; +char fm10k_driver_name[] = "fm10k"; +static const char fm10k_driver_string[] = + "Intel(R) Ethernet Switch Host Interface Driver"; +static const char fm10k_copyright[] = + "Copyright (c) 2013 Intel Corporation."; + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION("Intel(R) Ethernet Switch Host Interface Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +/** + * fm10k_init_module - Driver Registration Routine + * + * fm10k_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init fm10k_init_module(void) +{ + pr_info("%s - version %s\n", fm10k_driver_string, fm10k_driver_version); + pr_info("%s\n", fm10k_copyright); + + fm10k_dbg_init(); + + return fm10k_register_pci_driver(); +} +module_init(fm10k_init_module); + +/** + * fm10k_exit_module - Driver Exit Cleanup Routine + * + * fm10k_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit fm10k_exit_module(void) +{ + fm10k_unregister_pci_driver(); + + fm10k_dbg_exit(); +} +module_exit(fm10k_exit_module); + +static bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring, + struct fm10k_rx_buffer *bi) +{ + struct page *page = bi->page; + dma_addr_t dma; + + /* Only page will be NULL if buffer was consumed */ + if (likely(page)) + return true; + + /* alloc new page for storage */ + page = alloc_page(GFP_ATOMIC | __GFP_COLD); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_failed++; + return false; + } + + /* map page for use */ + dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); + + /* if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_page(page); + bi->page = NULL; + + rx_ring->rx_stats.alloc_failed++; + return false; + } + + bi->dma = dma; + bi->page = page; + bi->page_offset = 0; + + return true; +} + +/** + * fm10k_alloc_rx_buffers - Replace used receive buffers + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + **/ +void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count) +{ + union fm10k_rx_desc *rx_desc; + struct fm10k_rx_buffer *bi; + u16 i = rx_ring->next_to_use; + + /* nothing to do */ + if (!cleaned_count) + return; + + rx_desc = FM10K_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer[i]; + i -= rx_ring->count; + + do { + if (!fm10k_alloc_mapped_page(rx_ring, bi)) + break; + + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->q.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = FM10K_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer; + i -= rx_ring->count; + } + + /* clear the hdr_addr for the next_to_use descriptor */ + rx_desc->q.hdr_addr = 0; + + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) { + /* record the next descriptor to use */ + rx_ring->next_to_use = i; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + + /* notify hardware of new descriptors */ + writel(i, rx_ring->tail); + } +} + +/** + * fm10k_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the interface + **/ +static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring, + struct fm10k_rx_buffer *old_buff) +{ + struct fm10k_rx_buffer *new_buff; + u16 nta = rx_ring->next_to_alloc; + + new_buff = &rx_ring->rx_buffer[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* transfer page from old buffer to new buffer */ + memcpy(new_buff, old_buff, sizeof(struct fm10k_rx_buffer)); + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma, + old_buff->page_offset, + FM10K_RX_BUFSZ, + DMA_FROM_DEVICE); +} + +static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer, + struct page *page, + unsigned int truesize) +{ + /* avoid re-using remote pages */ + if (unlikely(page_to_nid(page) != numa_mem_id())) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely(page_count(page) != 1)) + return false; + + /* flip page offset to other buffer */ + rx_buffer->page_offset ^= FM10K_RX_BUFSZ; + + /* since we are the only owner of the page and we need to + * increment it, just set the value to 2 in order to avoid + * an unnecessary locked operation + */ + atomic_set(&page->_count, 2); +#else + /* move offset up to the next cache line */ + rx_buffer->page_offset += truesize; + + if (rx_buffer->page_offset > (PAGE_SIZE - FM10K_RX_BUFSZ)) + return false; + + /* bump ref count on page before it is given to the stack */ + get_page(page); +#endif + + return true; +} + +/** + * fm10k_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: buffer containing page to add + * @rx_desc: descriptor containing length of buffer written by hardware + * @skb: sk_buff to place the data into + * + * This function will add the data contained in rx_buffer->page to the skb. + * This is done either through a direct copy if the data in the buffer is + * less than the skb header size, otherwise it will just attach the page as + * a frag to the skb. + * + * The function will then update the page offset if necessary and return + * true if the buffer can be reused by the interface. + **/ +static bool fm10k_add_rx_frag(struct fm10k_ring *rx_ring, + struct fm10k_rx_buffer *rx_buffer, + union fm10k_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct page *page = rx_buffer->page; + unsigned int size = le16_to_cpu(rx_desc->w.length); +#if (PAGE_SIZE < 8192) + unsigned int truesize = FM10K_RX_BUFSZ; +#else + unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); +#endif + + if ((size <= FM10K_RX_HDR_LEN) && !skb_is_nonlinear(skb)) { + unsigned char *va = page_address(page) + rx_buffer->page_offset; + + memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); + + /* we can reuse buffer as-is, just make sure it is local */ + if (likely(page_to_nid(page) == numa_mem_id())) + return true; + + /* this page cannot be reused so discard it */ + put_page(page); + return false; + } + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + rx_buffer->page_offset, size, truesize); + + return fm10k_can_reuse_rx_page(rx_buffer, page, truesize); +} + +static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring, + union fm10k_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct fm10k_rx_buffer *rx_buffer; + struct page *page; + + rx_buffer = &rx_ring->rx_buffer[rx_ring->next_to_clean]; + + page = rx_buffer->page; + prefetchw(page); + + if (likely(!skb)) { + void *page_addr = page_address(page) + + rx_buffer->page_offset; + + /* prefetch first cache line of first page */ + prefetch(page_addr); +#if L1_CACHE_BYTES < 128 + prefetch(page_addr + L1_CACHE_BYTES); +#endif + + /* allocate a skb to store the frags */ + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + FM10K_RX_HDR_LEN); + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_failed++; + return NULL; + } + + /* we will be copying header into skb->data in + * pskb_may_pull so it is in our interest to prefetch + * it now to avoid a possible cache miss + */ + prefetchw(skb->data); + } + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + FM10K_RX_BUFSZ, + DMA_FROM_DEVICE); + + /* pull page into skb */ + if (fm10k_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { + /* hand second half of page back to the ring */ + fm10k_reuse_rx_page(rx_ring, rx_buffer); + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page(rx_ring->dev, rx_buffer->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + } + + /* clear contents of rx_buffer */ + rx_buffer->page = NULL; + + return skb; +} + +static inline void fm10k_rx_checksum(struct fm10k_ring *ring, + union fm10k_rx_desc *rx_desc, + struct sk_buff *skb) +{ + skb_checksum_none_assert(skb); + + /* Rx checksum disabled via ethtool */ + if (!(ring->netdev->features & NETIF_F_RXCSUM)) + return; + + /* TCP/UDP checksum error bit is set */ + if (fm10k_test_staterr(rx_desc, + FM10K_RXD_STATUS_L4E | + FM10K_RXD_STATUS_L4E2 | + FM10K_RXD_STATUS_IPE | + FM10K_RXD_STATUS_IPE2)) { + ring->rx_stats.csum_err++; + return; + } + + /* It must be a TCP or UDP packet with a valid checksum */ + if (fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_L4CS2)) + skb->encapsulation = true; + else if (!fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_L4CS)) + return; + + skb->ip_summed = CHECKSUM_UNNECESSARY; +} + +#define FM10K_RSS_L4_TYPES_MASK \ + ((1ul << FM10K_RSSTYPE_IPV4_TCP) | \ + (1ul << FM10K_RSSTYPE_IPV4_UDP) | \ + (1ul << FM10K_RSSTYPE_IPV6_TCP) | \ + (1ul << FM10K_RSSTYPE_IPV6_UDP)) + +static inline void fm10k_rx_hash(struct fm10k_ring *ring, + union fm10k_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u16 rss_type; + + if (!(ring->netdev->features & NETIF_F_RXHASH)) + return; + + rss_type = le16_to_cpu(rx_desc->w.pkt_info) & FM10K_RXD_RSSTYPE_MASK; + if (!rss_type) + return; + + skb_set_hash(skb, le32_to_cpu(rx_desc->d.rss), + (FM10K_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? + PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); +} + +static void fm10k_rx_hwtstamp(struct fm10k_ring *rx_ring, + union fm10k_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct fm10k_intfc *interface = rx_ring->q_vector->interface; + + FM10K_CB(skb)->tstamp = rx_desc->q.timestamp; + + if (unlikely(interface->flags & FM10K_FLAG_RX_TS_ENABLED)) + fm10k_systime_to_hwtstamp(interface, skb_hwtstamps(skb), + le64_to_cpu(rx_desc->q.timestamp)); +} + +static void fm10k_type_trans(struct fm10k_ring *rx_ring, + union fm10k_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct net_device *dev = rx_ring->netdev; + struct fm10k_l2_accel *l2_accel = rcu_dereference_bh(rx_ring->l2_accel); + + /* check to see if DGLORT belongs to a MACVLAN */ + if (l2_accel) { + u16 idx = le16_to_cpu(FM10K_CB(skb)->fi.w.dglort) - 1; + + idx -= l2_accel->dglort; + if (idx < l2_accel->size && l2_accel->macvlan[idx]) + dev = l2_accel->macvlan[idx]; + else + l2_accel = NULL; + } + + skb->protocol = eth_type_trans(skb, dev); + + if (!l2_accel) + return; + + /* update MACVLAN statistics */ + macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, 1, + !!(rx_desc->w.hdr_info & + cpu_to_le16(FM10K_RXD_HDR_INFO_XC_MASK))); +} + +/** + * fm10k_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * + * This function checks the ring, descriptor, and packet information in + * order to populate the hash, checksum, VLAN, timestamp, protocol, and + * other fields within the skb. + **/ +static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring, + union fm10k_rx_desc *rx_desc, + struct sk_buff *skb) +{ + unsigned int len = skb->len; + + fm10k_rx_hash(rx_ring, rx_desc, skb); + + fm10k_rx_checksum(rx_ring, rx_desc, skb); + + fm10k_rx_hwtstamp(rx_ring, rx_desc, skb); + + FM10K_CB(skb)->fi.w.vlan = rx_desc->w.vlan; + + skb_record_rx_queue(skb, rx_ring->queue_index); + + FM10K_CB(skb)->fi.d.glort = rx_desc->d.glort; + + if (rx_desc->w.vlan) { + u16 vid = le16_to_cpu(rx_desc->w.vlan); + + if (vid != rx_ring->vid) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } + + fm10k_type_trans(rx_ring, rx_desc, skb); + + return len; +} + +/** + * fm10k_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + **/ +static bool fm10k_is_non_eop(struct fm10k_ring *rx_ring, + union fm10k_rx_desc *rx_desc) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(FM10K_RX_DESC(rx_ring, ntc)); + + if (likely(fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_EOP))) + return false; + + return true; +} + +/** + * fm10k_pull_tail - fm10k specific version of skb_pull_tail + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being adjusted + * + * This function is an fm10k specific version of __pskb_pull_tail. The + * main difference between this version and the original function is that + * this function can make several assumptions about the state of things + * that allow for significant optimizations versus the standard function. + * As a result we can do things like drop a frag and maintain an accurate + * truesize for the skb. + */ +static void fm10k_pull_tail(struct fm10k_ring *rx_ring, + union fm10k_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + unsigned char *va; + unsigned int pull_len; + + /* it is valid to use page_address instead of kmap since we are + * working with pages allocated out of the lomem pool per + * alloc_page(GFP_ATOMIC) + */ + va = skb_frag_address(frag); + + /* we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(va, FM10K_RX_HDR_LEN); + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + skb_frag_size_sub(frag, pull_len); + frag->page_offset += pull_len; + skb->data_len -= pull_len; + skb->tail += pull_len; +} + +/** + * fm10k_cleanup_headers - Correct corrupted or empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being fixed + * + * Address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + **/ +static bool fm10k_cleanup_headers(struct fm10k_ring *rx_ring, + union fm10k_rx_desc *rx_desc, + struct sk_buff *skb) +{ + if (unlikely((fm10k_test_staterr(rx_desc, + FM10K_RXD_STATUS_RXE)))) { + dev_kfree_skb_any(skb); + rx_ring->rx_stats.errors++; + return true; + } + + /* place header in linear portion of buffer */ + if (skb_is_nonlinear(skb)) + fm10k_pull_tail(rx_ring, rx_desc, skb); + + /* if skb_pad returns an error the skb was freed */ + if (unlikely(skb->len < 60)) { + int pad_len = 60 - skb->len; + + if (skb_pad(skb, pad_len)) + return true; + __skb_put(skb, pad_len); + } + + return false; +} + +/** + * fm10k_receive_skb - helper function to handle rx indications + * @q_vector: structure containing interrupt and ring information + * @skb: packet to send up + **/ +static void fm10k_receive_skb(struct fm10k_q_vector *q_vector, + struct sk_buff *skb) +{ + napi_gro_receive(&q_vector->napi, skb); +} + +static bool fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector, + struct fm10k_ring *rx_ring, + int budget) +{ + struct sk_buff *skb = rx_ring->skb; + unsigned int total_bytes = 0, total_packets = 0; + u16 cleaned_count = fm10k_desc_unused(rx_ring); + + do { + union fm10k_rx_desc *rx_desc; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= FM10K_RX_BUFFER_WRITE) { + fm10k_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + rx_desc = FM10K_RX_DESC(rx_ring, rx_ring->next_to_clean); + + if (!fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_DD)) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * RXD_STATUS_DD bit is set + */ + rmb(); + + /* retrieve a buffer from the ring */ + skb = fm10k_fetch_rx_buffer(rx_ring, rx_desc, skb); + + /* exit if we failed to retrieve a buffer */ + if (!skb) + break; + + cleaned_count++; + + /* fetch next buffer in frame if non-eop */ + if (fm10k_is_non_eop(rx_ring, rx_desc)) + continue; + + /* verify the packet layout is correct */ + if (fm10k_cleanup_headers(rx_ring, rx_desc, skb)) { + skb = NULL; + continue; + } + + /* populate checksum, timestamp, VLAN, and protocol */ + total_bytes += fm10k_process_skb_fields(rx_ring, rx_desc, skb); + + fm10k_receive_skb(q_vector, skb); + + /* reset skb pointer */ + skb = NULL; + + /* update budget accounting */ + total_packets++; + } while (likely(total_packets < budget)); + + /* place incomplete frames back on ring for completion */ + rx_ring->skb = skb; + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_packets; + rx_ring->stats.bytes += total_bytes; + u64_stats_update_end(&rx_ring->syncp); + q_vector->rx.total_packets += total_packets; + q_vector->rx.total_bytes += total_bytes; + + return total_packets < budget; +} + +#define VXLAN_HLEN (sizeof(struct udphdr) + 8) +static struct ethhdr *fm10k_port_is_vxlan(struct sk_buff *skb) +{ + struct fm10k_intfc *interface = netdev_priv(skb->dev); + struct fm10k_vxlan_port *vxlan_port; + + /* we can only offload a vxlan if we recognize it as such */ + vxlan_port = list_first_entry_or_null(&interface->vxlan_port, + struct fm10k_vxlan_port, list); + + if (!vxlan_port) + return NULL; + if (vxlan_port->port != udp_hdr(skb)->dest) + return NULL; + + /* return offset of udp_hdr plus 8 bytes for VXLAN header */ + return (struct ethhdr *)(skb_transport_header(skb) + VXLAN_HLEN); +} + +#define FM10K_NVGRE_RESERVED0_FLAGS htons(0x9FFF) +#define NVGRE_TNI htons(0x2000) +struct fm10k_nvgre_hdr { + __be16 flags; + __be16 proto; + __be32 tni; +}; + +static struct ethhdr *fm10k_gre_is_nvgre(struct sk_buff *skb) +{ + struct fm10k_nvgre_hdr *nvgre_hdr; + int hlen = ip_hdrlen(skb); + + /* currently only IPv4 is supported due to hlen above */ + if (vlan_get_protocol(skb) != htons(ETH_P_IP)) + return NULL; + + /* our transport header should be NVGRE */ + nvgre_hdr = (struct fm10k_nvgre_hdr *)(skb_network_header(skb) + hlen); + + /* verify all reserved flags are 0 */ + if (nvgre_hdr->flags & FM10K_NVGRE_RESERVED0_FLAGS) + return NULL; + + /* verify protocol is transparent Ethernet bridging */ + if (nvgre_hdr->proto != htons(ETH_P_TEB)) + return NULL; + + /* report start of ethernet header */ + if (nvgre_hdr->flags & NVGRE_TNI) + return (struct ethhdr *)(nvgre_hdr + 1); + + return (struct ethhdr *)(&nvgre_hdr->tni); +} + +static __be16 fm10k_tx_encap_offload(struct sk_buff *skb) +{ + struct ethhdr *eth_hdr; + u8 l4_hdr = 0; + + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_IP): + l4_hdr = ip_hdr(skb)->protocol; + break; + case htons(ETH_P_IPV6): + l4_hdr = ipv6_hdr(skb)->nexthdr; + break; + default: + return 0; + } + + switch (l4_hdr) { + case IPPROTO_UDP: + eth_hdr = fm10k_port_is_vxlan(skb); + break; + case IPPROTO_GRE: + eth_hdr = fm10k_gre_is_nvgre(skb); + break; + default: + return 0; + } + + if (!eth_hdr) + return 0; + + switch (eth_hdr->h_proto) { + case htons(ETH_P_IP): + case htons(ETH_P_IPV6): + break; + default: + return 0; + } + + return eth_hdr->h_proto; +} + +static int fm10k_tso(struct fm10k_ring *tx_ring, + struct fm10k_tx_buffer *first) +{ + struct sk_buff *skb = first->skb; + struct fm10k_tx_desc *tx_desc; + unsigned char *th; + u8 hdrlen; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + /* compute header lengths */ + if (skb->encapsulation) { + if (!fm10k_tx_encap_offload(skb)) + goto err_vxlan; + th = skb_inner_transport_header(skb); + } else { + th = skb_transport_header(skb); + } + + /* compute offset from SOF to transport header and add header len */ + hdrlen = (th - skb->data) + (((struct tcphdr *)th)->doff << 2); + + first->tx_flags |= FM10K_TX_FLAGS_CSUM; + + /* update gso size and bytecount with header size */ + first->gso_segs = skb_shinfo(skb)->gso_segs; + first->bytecount += (first->gso_segs - 1) * hdrlen; + + /* populate Tx descriptor header size and mss */ + tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use); + tx_desc->hdrlen = hdrlen; + tx_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); + + return 1; +err_vxlan: + tx_ring->netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL; + if (!net_ratelimit()) + netdev_err(tx_ring->netdev, + "TSO requested for unsupported tunnel, disabling offload\n"); + return -1; +} + +static void fm10k_tx_csum(struct fm10k_ring *tx_ring, + struct fm10k_tx_buffer *first) +{ + struct sk_buff *skb = first->skb; + struct fm10k_tx_desc *tx_desc; + union { + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + u8 *raw; + } network_hdr; + __be16 protocol; + u8 l4_hdr = 0; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + goto no_csum; + + if (skb->encapsulation) { + protocol = fm10k_tx_encap_offload(skb); + if (!protocol) { + if (skb_checksum_help(skb)) { + dev_warn(tx_ring->dev, + "failed to offload encap csum!\n"); + tx_ring->tx_stats.csum_err++; + } + goto no_csum; + } + network_hdr.raw = skb_inner_network_header(skb); + } else { + protocol = vlan_get_protocol(skb); + network_hdr.raw = skb_network_header(skb); + } + + switch (protocol) { + case htons(ETH_P_IP): + l4_hdr = network_hdr.ipv4->protocol; + break; + case htons(ETH_P_IPV6): + l4_hdr = network_hdr.ipv6->nexthdr; + break; + default: + if (unlikely(net_ratelimit())) { + dev_warn(tx_ring->dev, + "partial checksum but ip version=%x!\n", + protocol); + } + tx_ring->tx_stats.csum_err++; + goto no_csum; + } + + switch (l4_hdr) { + case IPPROTO_TCP: + case IPPROTO_UDP: + break; + case IPPROTO_GRE: + if (skb->encapsulation) + break; + default: + if (unlikely(net_ratelimit())) { + dev_warn(tx_ring->dev, + "partial checksum but l4 proto=%x!\n", + l4_hdr); + } + tx_ring->tx_stats.csum_err++; + goto no_csum; + } + + /* update TX checksum flag */ + first->tx_flags |= FM10K_TX_FLAGS_CSUM; + +no_csum: + /* populate Tx descriptor header size and mss */ + tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use); + tx_desc->hdrlen = 0; + tx_desc->mss = 0; +} + +#define FM10K_SET_FLAG(_input, _flag, _result) \ + ((_flag <= _result) ? \ + ((u32)(_input & _flag) * (_result / _flag)) : \ + ((u32)(_input & _flag) / (_flag / _result))) + +static u8 fm10k_tx_desc_flags(struct sk_buff *skb, u32 tx_flags) +{ + /* set type for advanced descriptor with frame checksum insertion */ + u32 desc_flags = 0; + + /* set timestamping bits */ + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + likely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) + desc_flags |= FM10K_TXD_FLAG_TIME; + + /* set checksum offload bits */ + desc_flags |= FM10K_SET_FLAG(tx_flags, FM10K_TX_FLAGS_CSUM, + FM10K_TXD_FLAG_CSUM); + + return desc_flags; +} + +static bool fm10k_tx_desc_push(struct fm10k_ring *tx_ring, + struct fm10k_tx_desc *tx_desc, u16 i, + dma_addr_t dma, unsigned int size, u8 desc_flags) +{ + /* set RS and INT for last frame in a cache line */ + if ((++i & (FM10K_TXD_WB_FIFO_SIZE - 1)) == 0) + desc_flags |= FM10K_TXD_FLAG_RS | FM10K_TXD_FLAG_INT; + + /* record values to descriptor */ + tx_desc->buffer_addr = cpu_to_le64(dma); + tx_desc->flags = desc_flags; + tx_desc->buflen = cpu_to_le16(size); + + /* return true if we just wrapped the ring */ + return i == tx_ring->count; +} + +static void fm10k_tx_map(struct fm10k_ring *tx_ring, + struct fm10k_tx_buffer *first) +{ + struct sk_buff *skb = first->skb; + struct fm10k_tx_buffer *tx_buffer; + struct fm10k_tx_desc *tx_desc; + struct skb_frag_struct *frag; + unsigned char *data; + dma_addr_t dma; + unsigned int data_len, size; + u32 tx_flags = first->tx_flags; + u16 i = tx_ring->next_to_use; + u8 flags = fm10k_tx_desc_flags(skb, tx_flags); + + tx_desc = FM10K_TX_DESC(tx_ring, i); + + /* add HW VLAN tag */ + if (vlan_tx_tag_present(skb)) + tx_desc->vlan = cpu_to_le16(vlan_tx_tag_get(skb)); + else + tx_desc->vlan = 0; + + size = skb_headlen(skb); + data = skb->data; + + dma = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE); + + data_len = skb->data_len; + tx_buffer = first; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); + + while (unlikely(size > FM10K_MAX_DATA_PER_TXD)) { + if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++, dma, + FM10K_MAX_DATA_PER_TXD, flags)) { + tx_desc = FM10K_TX_DESC(tx_ring, 0); + i = 0; + } + + dma += FM10K_MAX_DATA_PER_TXD; + size -= FM10K_MAX_DATA_PER_TXD; + } + + if (likely(!data_len)) + break; + + if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++, + dma, size, flags)) { + tx_desc = FM10K_TX_DESC(tx_ring, 0); + i = 0; + } + + size = skb_frag_size(frag); + data_len -= size; + + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, + DMA_TO_DEVICE); + + tx_buffer = &tx_ring->tx_buffer[i]; + } + + /* write last descriptor with LAST bit set */ + flags |= FM10K_TXD_FLAG_LAST; + + if (fm10k_tx_desc_push(tx_ring, tx_desc, i++, dma, size, flags)) + i = 0; + + /* record bytecount for BQL */ + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); + + /* record SW timestamp if HW timestamp is not available */ + skb_tx_timestamp(first->skb); + + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + tx_ring->next_to_use = i; + + /* notify HW of packet */ + writel(i, tx_ring->tail); + + /* we need this if more than one processor can write to our tail + * at a time, it synchronizes IO on IA64/Altix systems + */ + mmiowb(); + + return; +dma_error: + dev_err(tx_ring->dev, "TX DMA map failed\n"); + + /* clear dma mappings for failed tx_buffer map */ + for (;;) { + tx_buffer = &tx_ring->tx_buffer[i]; + fm10k_unmap_and_free_tx_resource(tx_ring, tx_buffer); + if (tx_buffer == first) + break; + if (i == 0) + i = tx_ring->count; + i--; + } + + tx_ring->next_to_use = i; +} + +static int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size) +{ + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. */ + if (likely(fm10k_desc_unused(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + return 0; +} + +static inline int fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size) +{ + if (likely(fm10k_desc_unused(tx_ring) >= size)) + return 0; + return __fm10k_maybe_stop_tx(tx_ring, size); +} + +netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb, + struct fm10k_ring *tx_ring) +{ + struct fm10k_tx_buffer *first; + int tso; + u32 tx_flags = 0; +#if PAGE_SIZE > FM10K_MAX_DATA_PER_TXD + unsigned short f; +#endif + u16 count = TXD_USE_COUNT(skb_headlen(skb)); + + /* need: 1 descriptor per page * PAGE_SIZE/FM10K_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/FM10K_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head + * otherwise try next time + */ +#if PAGE_SIZE > FM10K_MAX_DATA_PER_TXD + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); +#else + count += skb_shinfo(skb)->nr_frags; +#endif + if (fm10k_maybe_stop_tx(tx_ring, count + 3)) { + tx_ring->tx_stats.tx_busy++; + return NETDEV_TX_BUSY; + } + + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buffer[tx_ring->next_to_use]; + first->skb = skb; + first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); + first->gso_segs = 1; + + /* record initial flags and protocol */ + first->tx_flags = tx_flags; + + tso = fm10k_tso(tx_ring, first); + if (tso < 0) + goto out_drop; + else if (!tso) + fm10k_tx_csum(tx_ring, first); + + fm10k_tx_map(tx_ring, first); + + fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED); + + return NETDEV_TX_OK; + +out_drop: + dev_kfree_skb_any(first->skb); + first->skb = NULL; + + return NETDEV_TX_OK; +} + +static u64 fm10k_get_tx_completed(struct fm10k_ring *ring) +{ + return ring->stats.packets; +} + +static u64 fm10k_get_tx_pending(struct fm10k_ring *ring) +{ + /* use SW head and tail until we have real hardware */ + u32 head = ring->next_to_clean; + u32 tail = ring->next_to_use; + + return ((head <= tail) ? tail : tail + ring->count) - head; +} + +bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring) +{ + u32 tx_done = fm10k_get_tx_completed(tx_ring); + u32 tx_done_old = tx_ring->tx_stats.tx_done_old; + u32 tx_pending = fm10k_get_tx_pending(tx_ring); + + clear_check_for_tx_hang(tx_ring); + + /* Check for a hung queue, but be thorough. This verifies + * that a transmit has been completed since the previous + * check AND there is at least one packet pending. By + * requiring this to fail twice we avoid races with + * clearing the ARMED bit and conditions where we + * run the check_tx_hang logic with a transmit completion + * pending but without time to complete it yet. + */ + if (!tx_pending || (tx_done_old != tx_done)) { + /* update completed stats and continue */ + tx_ring->tx_stats.tx_done_old = tx_done; + /* reset the countdown */ + clear_bit(__FM10K_HANG_CHECK_ARMED, &tx_ring->state); + + return false; + } + + /* make sure it is true for two checks in a row */ + return test_and_set_bit(__FM10K_HANG_CHECK_ARMED, &tx_ring->state); +} + +/** + * fm10k_tx_timeout_reset - initiate reset due to Tx timeout + * @interface: driver private struct + **/ +void fm10k_tx_timeout_reset(struct fm10k_intfc *interface) +{ + /* Do the reset outside of interrupt context */ + if (!test_bit(__FM10K_DOWN, &interface->state)) { + netdev_err(interface->netdev, "Reset interface\n"); + interface->tx_timeout_count++; + interface->flags |= FM10K_FLAG_RESET_REQUESTED; + fm10k_service_event_schedule(interface); + } +} + +/** + * fm10k_clean_tx_irq - Reclaim resources after transmit completes + * @q_vector: structure containing interrupt and ring information + * @tx_ring: tx ring to clean + **/ +static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector, + struct fm10k_ring *tx_ring) +{ + struct fm10k_intfc *interface = q_vector->interface; + struct fm10k_tx_buffer *tx_buffer; + struct fm10k_tx_desc *tx_desc; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int budget = q_vector->tx.work_limit; + unsigned int i = tx_ring->next_to_clean; + + if (test_bit(__FM10K_DOWN, &interface->state)) + return true; + + tx_buffer = &tx_ring->tx_buffer[i]; + tx_desc = FM10K_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + do { + struct fm10k_tx_desc *eop_desc = tx_buffer->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + read_barrier_depends(); + + /* if DD is not set pending work has not been completed */ + if (!(eop_desc->flags & FM10K_TXD_FLAG_DONE)) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buffer->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buffer->bytecount; + total_packets += tx_buffer->gso_segs; + + /* free the skb */ + dev_consume_skb_any(tx_buffer->skb); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + + /* clear tx_buffer data */ + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer; + tx_desc = FM10K_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + } + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer; + tx_desc = FM10K_TX_DESC(tx_ring, 0); + } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + i += tx_ring->count; + tx_ring->next_to_clean = i; + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + u64_stats_update_end(&tx_ring->syncp); + q_vector->tx.total_bytes += total_bytes; + q_vector->tx.total_packets += total_packets; + + if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring)) { + /* schedule immediate reset if we believe we hung */ + struct fm10k_hw *hw = &interface->hw; + + netif_err(interface, drv, tx_ring->netdev, + "Detected Tx Unit Hang\n" + " Tx Queue <%d>\n" + " TDH, TDT <%x>, <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n", + tx_ring->queue_index, + fm10k_read_reg(hw, FM10K_TDH(tx_ring->reg_idx)), + fm10k_read_reg(hw, FM10K_TDT(tx_ring->reg_idx)), + tx_ring->next_to_use, i); + + netif_stop_subqueue(tx_ring->netdev, + tx_ring->queue_index); + + netif_info(interface, probe, tx_ring->netdev, + "tx hang %d detected on queue %d, resetting interface\n", + interface->tx_timeout_count + 1, + tx_ring->queue_index); + + fm10k_tx_timeout_reset(interface); + + /* the netdev is about to reset, no point in enabling stuff */ + return true; + } + + /* notify netdev of completed buffers */ + netdev_tx_completed_queue(txring_txq(tx_ring), + total_packets, total_bytes); + +#define TX_WAKE_THRESHOLD min_t(u16, FM10K_MIN_TXD - 1, DESC_NEEDED * 2) + if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && + (fm10k_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) && + !test_bit(__FM10K_DOWN, &interface->state)) { + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + } + } + + return !!budget; +} + +/** + * fm10k_update_itr - update the dynamic ITR value based on packet size + * + * Stores a new ITR value based on strictly on packet size. The + * divisors and thresholds used by this function were determined based + * on theoretical maximum wire speed and testing data, in order to + * minimize response time while increasing bulk throughput. + * + * @ring_container: Container for rings to have ITR updated + **/ +static void fm10k_update_itr(struct fm10k_ring_container *ring_container) +{ + unsigned int avg_wire_size, packets; + + /* Only update ITR if we are using adaptive setting */ + if (!(ring_container->itr & FM10K_ITR_ADAPTIVE)) + goto clear_counts; + + packets = ring_container->total_packets; + if (!packets) + goto clear_counts; + + avg_wire_size = ring_container->total_bytes / packets; + + /* Add 24 bytes to size to account for CRC, preamble, and gap */ + avg_wire_size += 24; + + /* Don't starve jumbo frames */ + if (avg_wire_size > 3000) + avg_wire_size = 3000; + + /* Give a little boost to mid-size frames */ + if ((avg_wire_size > 300) && (avg_wire_size < 1200)) + avg_wire_size /= 3; + else + avg_wire_size /= 2; + + /* write back value and retain adaptive flag */ + ring_container->itr = avg_wire_size | FM10K_ITR_ADAPTIVE; + +clear_counts: + ring_container->total_bytes = 0; + ring_container->total_packets = 0; +} + +static void fm10k_qv_enable(struct fm10k_q_vector *q_vector) +{ + /* Enable auto-mask and clear the current mask */ + u32 itr = FM10K_ITR_ENABLE; + + /* Update Tx ITR */ + fm10k_update_itr(&q_vector->tx); + + /* Update Rx ITR */ + fm10k_update_itr(&q_vector->rx); + + /* Store Tx itr in timer slot 0 */ + itr |= (q_vector->tx.itr & FM10K_ITR_MAX); + + /* Shift Rx itr to timer slot 1 */ + itr |= (q_vector->rx.itr & FM10K_ITR_MAX) << FM10K_ITR_INTERVAL1_SHIFT; + + /* Write the final value to the ITR register */ + writel(itr, q_vector->itr); +} + +static int fm10k_poll(struct napi_struct *napi, int budget) +{ + struct fm10k_q_vector *q_vector = + container_of(napi, struct fm10k_q_vector, napi); + struct fm10k_ring *ring; + int per_ring_budget; + bool clean_complete = true; + + fm10k_for_each_ring(ring, q_vector->tx) + clean_complete &= fm10k_clean_tx_irq(q_vector, ring); + + /* attempt to distribute budget to each queue fairly, but don't + * allow the budget to go below 1 because we'll exit polling + */ + if (q_vector->rx.count > 1) + per_ring_budget = max(budget/q_vector->rx.count, 1); + else + per_ring_budget = budget; + + fm10k_for_each_ring(ring, q_vector->rx) + clean_complete &= fm10k_clean_rx_irq(q_vector, ring, + per_ring_budget); + + /* If all work not completed, return budget and keep polling */ + if (!clean_complete) + return budget; + + /* all work done, exit the polling mode */ + napi_complete(napi); + + /* re-enable the q_vector */ + fm10k_qv_enable(q_vector); + + return 0; +} + +/** + * fm10k_set_qos_queues: Allocate queues for a QOS-enabled device + * @interface: board private structure to initialize + * + * When QoS (Quality of Service) is enabled, allocate queues for + * each traffic class. If multiqueue isn't available,then abort QoS + * initialization. + * + * This function handles all combinations of Qos and RSS. + * + **/ +static bool fm10k_set_qos_queues(struct fm10k_intfc *interface) +{ + struct net_device *dev = interface->netdev; + struct fm10k_ring_feature *f; + int rss_i, i; + int pcs; + + /* Map queue offset and counts onto allocated tx queues */ + pcs = netdev_get_num_tc(dev); + + if (pcs <= 1) + return false; + + /* set QoS mask and indices */ + f = &interface->ring_feature[RING_F_QOS]; + f->indices = pcs; + f->mask = (1 << fls(pcs - 1)) - 1; + + /* determine the upper limit for our current DCB mode */ + rss_i = interface->hw.mac.max_queues / pcs; + rss_i = 1 << (fls(rss_i) - 1); + + /* set RSS mask and indices */ + f = &interface->ring_feature[RING_F_RSS]; + rss_i = min_t(u16, rss_i, f->limit); + f->indices = rss_i; + f->mask = (1 << fls(rss_i - 1)) - 1; + + /* configure pause class to queue mapping */ + for (i = 0; i < pcs; i++) + netdev_set_tc_queue(dev, i, rss_i, rss_i * i); + + interface->num_rx_queues = rss_i * pcs; + interface->num_tx_queues = rss_i * pcs; + + return true; +} + +/** + * fm10k_set_rss_queues: Allocate queues for RSS + * @interface: board private structure to initialize + * + * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try + * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. + * + **/ +static bool fm10k_set_rss_queues(struct fm10k_intfc *interface) +{ + struct fm10k_ring_feature *f; + u16 rss_i; + + f = &interface->ring_feature[RING_F_RSS]; + rss_i = min_t(u16, interface->hw.mac.max_queues, f->limit); + + /* record indices and power of 2 mask for RSS */ + f->indices = rss_i; + f->mask = (1 << fls(rss_i - 1)) - 1; + + interface->num_rx_queues = rss_i; + interface->num_tx_queues = rss_i; + + return true; +} + +/** + * fm10k_set_num_queues: Allocate queues for device, feature dependent + * @interface: board private structure to initialize + * + * This is the top level queue allocation routine. The order here is very + * important, starting with the "most" number of features turned on at once, + * and ending with the smallest set of features. This way large combinations + * can be allocated if they're turned on, and smaller combinations are the + * fallthrough conditions. + * + **/ +static void fm10k_set_num_queues(struct fm10k_intfc *interface) +{ + /* Start with base case */ + interface->num_rx_queues = 1; + interface->num_tx_queues = 1; + + if (fm10k_set_qos_queues(interface)) + return; + + fm10k_set_rss_queues(interface); +} + +/** + * fm10k_alloc_q_vector - Allocate memory for a single interrupt vector + * @interface: board private structure to initialize + * @v_count: q_vectors allocated on interface, used for ring interleaving + * @v_idx: index of vector in interface struct + * @txr_count: total number of Tx rings to allocate + * @txr_idx: index of first Tx ring to allocate + * @rxr_count: total number of Rx rings to allocate + * @rxr_idx: index of first Rx ring to allocate + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + **/ +static int fm10k_alloc_q_vector(struct fm10k_intfc *interface, + unsigned int v_count, unsigned int v_idx, + unsigned int txr_count, unsigned int txr_idx, + unsigned int rxr_count, unsigned int rxr_idx) +{ + struct fm10k_q_vector *q_vector; + struct fm10k_ring *ring; + int ring_count, size; + + ring_count = txr_count + rxr_count; + size = sizeof(struct fm10k_q_vector) + + (sizeof(struct fm10k_ring) * ring_count); + + /* allocate q_vector and rings */ + q_vector = kzalloc(size, GFP_KERNEL); + if (!q_vector) + return -ENOMEM; + + /* initialize NAPI */ + netif_napi_add(interface->netdev, &q_vector->napi, + fm10k_poll, NAPI_POLL_WEIGHT); + + /* tie q_vector and interface together */ + interface->q_vector[v_idx] = q_vector; + q_vector->interface = interface; + q_vector->v_idx = v_idx; + + /* initialize pointer to rings */ + ring = q_vector->ring; + + /* save Tx ring container info */ + q_vector->tx.ring = ring; + q_vector->tx.work_limit = FM10K_DEFAULT_TX_WORK; + q_vector->tx.itr = interface->tx_itr; + q_vector->tx.count = txr_count; + + while (txr_count) { + /* assign generic ring traits */ + ring->dev = &interface->pdev->dev; + ring->netdev = interface->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* apply Tx specific ring traits */ + ring->count = interface->tx_ring_count; + ring->queue_index = txr_idx; + + /* assign ring to interface */ + interface->tx_ring[txr_idx] = ring; + + /* update count and index */ + txr_count--; + txr_idx += v_count; + + /* push pointer to next ring */ + ring++; + } + + /* save Rx ring container info */ + q_vector->rx.ring = ring; + q_vector->rx.itr = interface->rx_itr; + q_vector->rx.count = rxr_count; + + while (rxr_count) { + /* assign generic ring traits */ + ring->dev = &interface->pdev->dev; + ring->netdev = interface->netdev; + rcu_assign_pointer(ring->l2_accel, interface->l2_accel); + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* apply Rx specific ring traits */ + ring->count = interface->rx_ring_count; + ring->queue_index = rxr_idx; + + /* assign ring to interface */ + interface->rx_ring[rxr_idx] = ring; + + /* update count and index */ + rxr_count--; + rxr_idx += v_count; + + /* push pointer to next ring */ + ring++; + } + + fm10k_dbg_q_vector_init(q_vector); + + return 0; +} + +/** + * fm10k_free_q_vector - Free memory allocated for specific interrupt vector + * @interface: board private structure to initialize + * @v_idx: Index of vector to be freed + * + * This function frees the memory allocated to the q_vector. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void fm10k_free_q_vector(struct fm10k_intfc *interface, int v_idx) +{ + struct fm10k_q_vector *q_vector = interface->q_vector[v_idx]; + struct fm10k_ring *ring; + + fm10k_dbg_q_vector_exit(q_vector); + + fm10k_for_each_ring(ring, q_vector->tx) + interface->tx_ring[ring->queue_index] = NULL; + + fm10k_for_each_ring(ring, q_vector->rx) + interface->rx_ring[ring->queue_index] = NULL; + + interface->q_vector[v_idx] = NULL; + netif_napi_del(&q_vector->napi); + kfree_rcu(q_vector, rcu); +} + +/** + * fm10k_alloc_q_vectors - Allocate memory for interrupt vectors + * @interface: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int fm10k_alloc_q_vectors(struct fm10k_intfc *interface) +{ + unsigned int q_vectors = interface->num_q_vectors; + unsigned int rxr_remaining = interface->num_rx_queues; + unsigned int txr_remaining = interface->num_tx_queues; + unsigned int rxr_idx = 0, txr_idx = 0, v_idx = 0; + int err; + + if (q_vectors >= (rxr_remaining + txr_remaining)) { + for (; rxr_remaining; v_idx++) { + err = fm10k_alloc_q_vector(interface, q_vectors, v_idx, + 0, 0, 1, rxr_idx); + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining--; + rxr_idx++; + } + } + + for (; v_idx < q_vectors; v_idx++) { + int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); + int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); + + err = fm10k_alloc_q_vector(interface, q_vectors, v_idx, + tqpv, txr_idx, + rqpv, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining -= rqpv; + txr_remaining -= tqpv; + rxr_idx++; + txr_idx++; + } + + return 0; + +err_out: + interface->num_tx_queues = 0; + interface->num_rx_queues = 0; + interface->num_q_vectors = 0; + + while (v_idx--) + fm10k_free_q_vector(interface, v_idx); + + return -ENOMEM; +} + +/** + * fm10k_free_q_vectors - Free memory allocated for interrupt vectors + * @interface: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void fm10k_free_q_vectors(struct fm10k_intfc *interface) +{ + int v_idx = interface->num_q_vectors; + + interface->num_tx_queues = 0; + interface->num_rx_queues = 0; + interface->num_q_vectors = 0; + + while (v_idx--) + fm10k_free_q_vector(interface, v_idx); +} + +/** + * f10k_reset_msix_capability - reset MSI-X capability + * @interface: board private structure to initialize + * + * Reset the MSI-X capability back to its starting state + **/ +static void fm10k_reset_msix_capability(struct fm10k_intfc *interface) +{ + pci_disable_msix(interface->pdev); + kfree(interface->msix_entries); + interface->msix_entries = NULL; +} + +/** + * f10k_init_msix_capability - configure MSI-X capability + * @interface: board private structure to initialize + * + * Attempt to configure the interrupts using the best available + * capabilities of the hardware and the kernel. + **/ +static int fm10k_init_msix_capability(struct fm10k_intfc *interface) +{ + struct fm10k_hw *hw = &interface->hw; + int v_budget, vector; + + /* It's easy to be greedy for MSI-X vectors, but it really + * doesn't do us much good if we have a lot more vectors + * than CPU's. So let's be conservative and only ask for + * (roughly) the same number of vectors as there are CPU's. + * the default is to use pairs of vectors + */ + v_budget = max(interface->num_rx_queues, interface->num_tx_queues); + v_budget = min_t(u16, v_budget, num_online_cpus()); + + /* account for vectors not related to queues */ + v_budget += NON_Q_VECTORS(hw); + + /* At the same time, hardware can only support a maximum of + * hw.mac->max_msix_vectors vectors. With features + * such as RSS and VMDq, we can easily surpass the number of Rx and Tx + * descriptor queues supported by our device. Thus, we cap it off in + * those rare cases where the cpu count also exceeds our vector limit. + */ + v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors); + + /* A failure in MSI-X entry allocation is fatal. */ + interface->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), + GFP_KERNEL); + if (!interface->msix_entries) + return -ENOMEM; + + /* populate entry values */ + for (vector = 0; vector < v_budget; vector++) + interface->msix_entries[vector].entry = vector; + + /* Attempt to enable MSI-X with requested value */ + v_budget = pci_enable_msix_range(interface->pdev, + interface->msix_entries, + MIN_MSIX_COUNT(hw), + v_budget); + if (v_budget < 0) { + kfree(interface->msix_entries); + interface->msix_entries = NULL; + return -ENOMEM; + } + + /* record the number of queues available for q_vectors */ + interface->num_q_vectors = v_budget - NON_Q_VECTORS(hw); + + return 0; +} + +/** + * fm10k_cache_ring_qos - Descriptor ring to register mapping for QoS + * @interface: Interface structure continaining rings and devices + * + * Cache the descriptor ring offsets for Qos + **/ +static bool fm10k_cache_ring_qos(struct fm10k_intfc *interface) +{ + struct net_device *dev = interface->netdev; + int pc, offset, rss_i, i, q_idx; + u16 pc_stride = interface->ring_feature[RING_F_QOS].mask + 1; + u8 num_pcs = netdev_get_num_tc(dev); + + if (num_pcs <= 1) + return false; + + rss_i = interface->ring_feature[RING_F_RSS].indices; + + for (pc = 0, offset = 0; pc < num_pcs; pc++, offset += rss_i) { + q_idx = pc; + for (i = 0; i < rss_i; i++) { + interface->tx_ring[offset + i]->reg_idx = q_idx; + interface->tx_ring[offset + i]->qos_pc = pc; + interface->rx_ring[offset + i]->reg_idx = q_idx; + interface->rx_ring[offset + i]->qos_pc = pc; + q_idx += pc_stride; + } + } + + return true; +} + +/** + * fm10k_cache_ring_rss - Descriptor ring to register mapping for RSS + * @interface: Interface structure continaining rings and devices + * + * Cache the descriptor ring offsets for RSS + **/ +static void fm10k_cache_ring_rss(struct fm10k_intfc *interface) +{ + int i; + + for (i = 0; i < interface->num_rx_queues; i++) + interface->rx_ring[i]->reg_idx = i; + + for (i = 0; i < interface->num_tx_queues; i++) + interface->tx_ring[i]->reg_idx = i; +} + +/** + * fm10k_assign_rings - Map rings to network devices + * @interface: Interface structure containing rings and devices + * + * This function is meant to go though and configure both the network + * devices so that they contain rings, and configure the rings so that + * they function with their network devices. + **/ +static void fm10k_assign_rings(struct fm10k_intfc *interface) +{ + if (fm10k_cache_ring_qos(interface)) + return; + + fm10k_cache_ring_rss(interface); +} + +static void fm10k_init_reta(struct fm10k_intfc *interface) +{ + u16 i, rss_i = interface->ring_feature[RING_F_RSS].indices; + u32 reta, base; + + /* If the netdev is initialized we have to maintain table if possible */ + if (interface->netdev->reg_state) { + for (i = FM10K_RETA_SIZE; i--;) { + reta = interface->reta[i]; + if ((((reta << 24) >> 24) < rss_i) && + (((reta << 16) >> 24) < rss_i) && + (((reta << 8) >> 24) < rss_i) && + (((reta) >> 24) < rss_i)) + continue; + goto repopulate_reta; + } + + /* do nothing if all of the elements are in bounds */ + return; + } + +repopulate_reta: + /* Populate the redirection table 4 entries at a time. To do this + * we are generating the results for n and n+2 and then interleaving + * those with the results with n+1 and n+3. + */ + for (i = FM10K_RETA_SIZE; i--;) { + /* first pass generates n and n+2 */ + base = ((i * 0x00040004) + 0x00020000) * rss_i; + reta = (base & 0x3F803F80) >> 7; + + /* second pass generates n+1 and n+3 */ + base += 0x00010001 * rss_i; + reta |= (base & 0x3F803F80) << 1; + + interface->reta[i] = reta; + } +} + +/** + * fm10k_init_queueing_scheme - Determine proper queueing scheme + * @interface: board private structure to initialize + * + * We determine which queueing scheme to use based on... + * - Hardware queue count (num_*_queues) + * - defined by miscellaneous hardware support/features (RSS, etc.) + **/ +int fm10k_init_queueing_scheme(struct fm10k_intfc *interface) +{ + int err; + + /* Number of supported queues */ + fm10k_set_num_queues(interface); + + /* Configure MSI-X capability */ + err = fm10k_init_msix_capability(interface); + if (err) { + dev_err(&interface->pdev->dev, + "Unable to initialize MSI-X capability\n"); + return err; + } + + /* Allocate memory for queues */ + err = fm10k_alloc_q_vectors(interface); + if (err) + return err; + + /* Map rings to devices, and map devices to physical queues */ + fm10k_assign_rings(interface); + + /* Initialize RSS redirection table */ + fm10k_init_reta(interface); + + return 0; +} + +/** + * fm10k_clear_queueing_scheme - Clear the current queueing scheme settings + * @interface: board private structure to clear queueing scheme on + * + * We go through and clear queueing specific resources and reset the structure + * to pre-load conditions + **/ +void fm10k_clear_queueing_scheme(struct fm10k_intfc *interface) +{ + fm10k_free_q_vectors(interface); + fm10k_reset_msix_capability(interface); +} diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c new file mode 100644 index 0000000000000000000000000000000000000000..14a4ea795c01c58e9d123e816c85ea38fbb0fe60 --- /dev/null +++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c @@ -0,0 +1,2125 @@ +/* Intel Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include "fm10k_common.h" + +/** + * fm10k_fifo_init - Initialize a message FIFO + * @fifo: pointer to FIFO + * @buffer: pointer to memory to be used to store FIFO + * @size: maximum message size to store in FIFO, must be 2^n - 1 + **/ +static void fm10k_fifo_init(struct fm10k_mbx_fifo *fifo, u32 *buffer, u16 size) +{ + fifo->buffer = buffer; + fifo->size = size; + fifo->head = 0; + fifo->tail = 0; +} + +/** + * fm10k_fifo_used - Retrieve used space in FIFO + * @fifo: pointer to FIFO + * + * This function returns the number of DWORDs used in the FIFO + **/ +static u16 fm10k_fifo_used(struct fm10k_mbx_fifo *fifo) +{ + return fifo->tail - fifo->head; +} + +/** + * fm10k_fifo_unused - Retrieve unused space in FIFO + * @fifo: pointer to FIFO + * + * This function returns the number of unused DWORDs in the FIFO + **/ +static u16 fm10k_fifo_unused(struct fm10k_mbx_fifo *fifo) +{ + return fifo->size + fifo->head - fifo->tail; +} + +/** + * fm10k_fifo_empty - Test to verify if fifo is empty + * @fifo: pointer to FIFO + * + * This function returns true if the FIFO is empty, else false + **/ +static bool fm10k_fifo_empty(struct fm10k_mbx_fifo *fifo) +{ + return fifo->head == fifo->tail; +} + +/** + * fm10k_fifo_head_offset - returns indices of head with given offset + * @fifo: pointer to FIFO + * @offset: offset to add to head + * + * This function returns the indicies into the fifo based on head + offset + **/ +static u16 fm10k_fifo_head_offset(struct fm10k_mbx_fifo *fifo, u16 offset) +{ + return (fifo->head + offset) & (fifo->size - 1); +} + +/** + * fm10k_fifo_tail_offset - returns indices of tail with given offset + * @fifo: pointer to FIFO + * @offset: offset to add to tail + * + * This function returns the indicies into the fifo based on tail + offset + **/ +static u16 fm10k_fifo_tail_offset(struct fm10k_mbx_fifo *fifo, u16 offset) +{ + return (fifo->tail + offset) & (fifo->size - 1); +} + +/** + * fm10k_fifo_head_len - Retrieve length of first message in FIFO + * @fifo: pointer to FIFO + * + * This function returns the size of the first message in the FIFO + **/ +static u16 fm10k_fifo_head_len(struct fm10k_mbx_fifo *fifo) +{ + u32 *head = fifo->buffer + fm10k_fifo_head_offset(fifo, 0); + + /* verify there is at least 1 DWORD in the fifo so *head is valid */ + if (fm10k_fifo_empty(fifo)) + return 0; + + /* retieve the message length */ + return FM10K_TLV_DWORD_LEN(*head); +} + +/** + * fm10k_fifo_head_drop - Drop the first message in FIFO + * @fifo: pointer to FIFO + * + * This function returns the size of the message dropped from the FIFO + **/ +static u16 fm10k_fifo_head_drop(struct fm10k_mbx_fifo *fifo) +{ + u16 len = fm10k_fifo_head_len(fifo); + + /* update head so it is at the start of next frame */ + fifo->head += len; + + return len; +} + +/** + * fm10k_mbx_index_len - Convert a head/tail index into a length value + * @mbx: pointer to mailbox + * @head: head index + * @tail: head index + * + * This function takes the head and tail index and determines the length + * of the data indicated by this pair. + **/ +static u16 fm10k_mbx_index_len(struct fm10k_mbx_info *mbx, u16 head, u16 tail) +{ + u16 len = tail - head; + + /* we wrapped so subtract 2, one for index 0, one for all 1s index */ + if (len > tail) + len -= 2; + + return len & ((mbx->mbmem_len << 1) - 1); +} + +/** + * fm10k_mbx_tail_add - Determine new tail value with added offset + * @mbx: pointer to mailbox + * @offset: length to add to head offset + * + * This function takes the local tail index and recomputes it for + * a given length added as an offset. + **/ +static u16 fm10k_mbx_tail_add(struct fm10k_mbx_info *mbx, u16 offset) +{ + u16 tail = (mbx->tail + offset + 1) & ((mbx->mbmem_len << 1) - 1); + + /* add/sub 1 because we cannot have offset 0 or all 1s */ + return (tail > mbx->tail) ? --tail : ++tail; +} + +/** + * fm10k_mbx_tail_sub - Determine new tail value with subtracted offset + * @mbx: pointer to mailbox + * @offset: length to add to head offset + * + * This function takes the local tail index and recomputes it for + * a given length added as an offset. + **/ +static u16 fm10k_mbx_tail_sub(struct fm10k_mbx_info *mbx, u16 offset) +{ + u16 tail = (mbx->tail - offset - 1) & ((mbx->mbmem_len << 1) - 1); + + /* sub/add 1 because we cannot have offset 0 or all 1s */ + return (tail < mbx->tail) ? ++tail : --tail; +} + +/** + * fm10k_mbx_head_add - Determine new head value with added offset + * @mbx: pointer to mailbox + * @offset: length to add to head offset + * + * This function takes the local head index and recomputes it for + * a given length added as an offset. + **/ +static u16 fm10k_mbx_head_add(struct fm10k_mbx_info *mbx, u16 offset) +{ + u16 head = (mbx->head + offset + 1) & ((mbx->mbmem_len << 1) - 1); + + /* add/sub 1 because we cannot have offset 0 or all 1s */ + return (head > mbx->head) ? --head : ++head; +} + +/** + * fm10k_mbx_head_sub - Determine new head value with subtracted offset + * @mbx: pointer to mailbox + * @offset: length to add to head offset + * + * This function takes the local head index and recomputes it for + * a given length added as an offset. + **/ +static u16 fm10k_mbx_head_sub(struct fm10k_mbx_info *mbx, u16 offset) +{ + u16 head = (mbx->head - offset - 1) & ((mbx->mbmem_len << 1) - 1); + + /* sub/add 1 because we cannot have offset 0 or all 1s */ + return (head < mbx->head) ? ++head : --head; +} + +/** + * fm10k_mbx_pushed_tail_len - Retrieve the length of message being pushed + * @mbx: pointer to mailbox + * + * This function will return the length of the message currently being + * pushed onto the tail of the Rx queue. + **/ +static u16 fm10k_mbx_pushed_tail_len(struct fm10k_mbx_info *mbx) +{ + u32 *tail = mbx->rx.buffer + fm10k_fifo_tail_offset(&mbx->rx, 0); + + /* pushed tail is only valid if pushed is set */ + if (!mbx->pushed) + return 0; + + return FM10K_TLV_DWORD_LEN(*tail); +} + +/** + * fm10k_fifo_write_copy - pulls data off of msg and places it in fifo + * @fifo: pointer to FIFO + * @msg: message array to populate + * @tail_offset: additional offset to add to tail pointer + * @len: length of FIFO to copy into message header + * + * This function will take a message and copy it into a section of the + * FIFO. In order to get something into a location other than just + * the tail you can use tail_offset to adjust the pointer. + **/ +static void fm10k_fifo_write_copy(struct fm10k_mbx_fifo *fifo, + const u32 *msg, u16 tail_offset, u16 len) +{ + u16 end = fm10k_fifo_tail_offset(fifo, tail_offset); + u32 *tail = fifo->buffer + end; + + /* track when we should cross the end of the FIFO */ + end = fifo->size - end; + + /* copy end of message before start of message */ + if (end < len) + memcpy(fifo->buffer, msg + end, (len - end) << 2); + else + end = len; + + /* Copy remaining message into Tx FIFO */ + memcpy(tail, msg, end << 2); +} + +/** + * fm10k_fifo_enqueue - Enqueues the message to the tail of the FIFO + * @fifo: pointer to FIFO + * @msg: message array to read + * + * This function enqueues a message up to the size specified by the length + * contained in the first DWORD of the message and will place at the tail + * of the FIFO. It will return 0 on success, or a negative value on error. + **/ +static s32 fm10k_fifo_enqueue(struct fm10k_mbx_fifo *fifo, const u32 *msg) +{ + u16 len = FM10K_TLV_DWORD_LEN(*msg); + + /* verify parameters */ + if (len > fifo->size) + return FM10K_MBX_ERR_SIZE; + + /* verify there is room for the message */ + if (len > fm10k_fifo_unused(fifo)) + return FM10K_MBX_ERR_NO_SPACE; + + /* Copy message into FIFO */ + fm10k_fifo_write_copy(fifo, msg, 0, len); + + /* memory barrier to guarantee FIFO is written before tail update */ + wmb(); + + /* Update Tx FIFO tail */ + fifo->tail += len; + + return 0; +} + +/** + * fm10k_mbx_validate_msg_size - Validate incoming message based on size + * @mbx: pointer to mailbox + * @len: length of data pushed onto buffer + * + * This function analyzes the frame and will return a non-zero value when + * the start of a message larger than the mailbox is detected. + **/ +static u16 fm10k_mbx_validate_msg_size(struct fm10k_mbx_info *mbx, u16 len) +{ + struct fm10k_mbx_fifo *fifo = &mbx->rx; + u16 total_len = 0, msg_len; + u32 *msg; + + /* length should include previous amounts pushed */ + len += mbx->pushed; + + /* offset in message is based off of current message size */ + do { + msg = fifo->buffer + fm10k_fifo_tail_offset(fifo, total_len); + msg_len = FM10K_TLV_DWORD_LEN(*msg); + total_len += msg_len; + } while (total_len < len); + + /* message extends out of pushed section, but fits in FIFO */ + if ((len < total_len) && (msg_len <= mbx->rx.size)) + return 0; + + /* return length of invalid section */ + return (len < total_len) ? len : (len - total_len); +} + +/** + * fm10k_mbx_write_copy - pulls data off of Tx FIFO and places it in mbmem + * @mbx: pointer to mailbox + * + * This function will take a seciton of the Rx FIFO and copy it into the + mbx->tail--; + * mailbox memory. The offset in mbmem is based on the lower bits of the + * tail and len determines the length to copy. + **/ +static void fm10k_mbx_write_copy(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + struct fm10k_mbx_fifo *fifo = &mbx->tx; + u32 mbmem = mbx->mbmem_reg; + u32 *head = fifo->buffer; + u16 end, len, tail, mask; + + if (!mbx->tail_len) + return; + + /* determine data length and mbmem tail index */ + mask = mbx->mbmem_len - 1; + len = mbx->tail_len; + tail = fm10k_mbx_tail_sub(mbx, len); + if (tail > mask) + tail++; + + /* determine offset in the ring */ + end = fm10k_fifo_head_offset(fifo, mbx->pulled); + head += end; + + /* memory barrier to guarantee data is ready to be read */ + rmb(); + + /* Copy message from Tx FIFO */ + for (end = fifo->size - end; len; head = fifo->buffer) { + do { + /* adjust tail to match offset for FIFO */ + tail &= mask; + if (!tail) + tail++; + + /* write message to hardware FIFO */ + fm10k_write_reg(hw, mbmem + tail++, *(head++)); + } while (--len && --end); + } +} + +/** + * fm10k_mbx_pull_head - Pulls data off of head of Tx FIFO + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * @head: acknowledgement number last received + * + * This function will push the tail index forward based on the remote + * head index. It will then pull up to mbmem_len DWORDs off of the + * head of the FIFO and will place it in the MBMEM registers + * associated with the mailbox. + **/ +static void fm10k_mbx_pull_head(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx, u16 head) +{ + u16 mbmem_len, len, ack = fm10k_mbx_index_len(mbx, head, mbx->tail); + struct fm10k_mbx_fifo *fifo = &mbx->tx; + + /* update number of bytes pulled and update bytes in transit */ + mbx->pulled += mbx->tail_len - ack; + + /* determine length of data to pull, reserve space for mbmem header */ + mbmem_len = mbx->mbmem_len - 1; + len = fm10k_fifo_used(fifo) - mbx->pulled; + if (len > mbmem_len) + len = mbmem_len; + + /* update tail and record number of bytes in transit */ + mbx->tail = fm10k_mbx_tail_add(mbx, len - ack); + mbx->tail_len = len; + + /* drop pulled messages from the FIFO */ + for (len = fm10k_fifo_head_len(fifo); + len && (mbx->pulled >= len); + len = fm10k_fifo_head_len(fifo)) { + mbx->pulled -= fm10k_fifo_head_drop(fifo); + mbx->tx_messages++; + mbx->tx_dwords += len; + } + + /* Copy message out from the Tx FIFO */ + fm10k_mbx_write_copy(hw, mbx); +} + +/** + * fm10k_mbx_read_copy - pulls data off of mbmem and places it in Rx FIFO + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function will take a seciton of the mailbox memory and copy it + * into the Rx FIFO. The offset is based on the lower bits of the + * head and len determines the length to copy. + **/ +static void fm10k_mbx_read_copy(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + struct fm10k_mbx_fifo *fifo = &mbx->rx; + u32 mbmem = mbx->mbmem_reg ^ mbx->mbmem_len; + u32 *tail = fifo->buffer; + u16 end, len, head; + + /* determine data length and mbmem head index */ + len = mbx->head_len; + head = fm10k_mbx_head_sub(mbx, len); + if (head >= mbx->mbmem_len) + head++; + + /* determine offset in the ring */ + end = fm10k_fifo_tail_offset(fifo, mbx->pushed); + tail += end; + + /* Copy message into Rx FIFO */ + for (end = fifo->size - end; len; tail = fifo->buffer) { + do { + /* adjust head to match offset for FIFO */ + head &= mbx->mbmem_len - 1; + if (!head) + head++; + + /* read message from hardware FIFO */ + *(tail++) = fm10k_read_reg(hw, mbmem + head++); + } while (--len && --end); + } + + /* memory barrier to guarantee FIFO is written before tail update */ + wmb(); +} + +/** + * fm10k_mbx_push_tail - Pushes up to 15 DWORDs on to tail of FIFO + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * @tail: tail index of message + * + * This function will first validate the tail index and size for the + * incoming message. It then updates the acknowlegment number and + * copies the data into the FIFO. It will return the number of messages + * dequeued on success and a negative value on error. + **/ +static s32 fm10k_mbx_push_tail(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx, + u16 tail) +{ + struct fm10k_mbx_fifo *fifo = &mbx->rx; + u16 len, seq = fm10k_mbx_index_len(mbx, mbx->head, tail); + + /* determine length of data to push */ + len = fm10k_fifo_unused(fifo) - mbx->pushed; + if (len > seq) + len = seq; + + /* update head and record bytes received */ + mbx->head = fm10k_mbx_head_add(mbx, len); + mbx->head_len = len; + + /* nothing to do if there is no data */ + if (!len) + return 0; + + /* Copy msg into Rx FIFO */ + fm10k_mbx_read_copy(hw, mbx); + + /* determine if there are any invalid lengths in message */ + if (fm10k_mbx_validate_msg_size(mbx, len)) + return FM10K_MBX_ERR_SIZE; + + /* Update pushed */ + mbx->pushed += len; + + /* flush any completed messages */ + for (len = fm10k_mbx_pushed_tail_len(mbx); + len && (mbx->pushed >= len); + len = fm10k_mbx_pushed_tail_len(mbx)) { + fifo->tail += len; + mbx->pushed -= len; + mbx->rx_messages++; + mbx->rx_dwords += len; + } + + return 0; +} + +/* pre-generated data for generating the CRC based on the poly 0xAC9A. */ +static const u16 fm10k_crc_16b_table[256] = { + 0x0000, 0x7956, 0xF2AC, 0x8BFA, 0xBC6D, 0xC53B, 0x4EC1, 0x3797, + 0x21EF, 0x58B9, 0xD343, 0xAA15, 0x9D82, 0xE4D4, 0x6F2E, 0x1678, + 0x43DE, 0x3A88, 0xB172, 0xC824, 0xFFB3, 0x86E5, 0x0D1F, 0x7449, + 0x6231, 0x1B67, 0x909D, 0xE9CB, 0xDE5C, 0xA70A, 0x2CF0, 0x55A6, + 0x87BC, 0xFEEA, 0x7510, 0x0C46, 0x3BD1, 0x4287, 0xC97D, 0xB02B, + 0xA653, 0xDF05, 0x54FF, 0x2DA9, 0x1A3E, 0x6368, 0xE892, 0x91C4, + 0xC462, 0xBD34, 0x36CE, 0x4F98, 0x780F, 0x0159, 0x8AA3, 0xF3F5, + 0xE58D, 0x9CDB, 0x1721, 0x6E77, 0x59E0, 0x20B6, 0xAB4C, 0xD21A, + 0x564D, 0x2F1B, 0xA4E1, 0xDDB7, 0xEA20, 0x9376, 0x188C, 0x61DA, + 0x77A2, 0x0EF4, 0x850E, 0xFC58, 0xCBCF, 0xB299, 0x3963, 0x4035, + 0x1593, 0x6CC5, 0xE73F, 0x9E69, 0xA9FE, 0xD0A8, 0x5B52, 0x2204, + 0x347C, 0x4D2A, 0xC6D0, 0xBF86, 0x8811, 0xF147, 0x7ABD, 0x03EB, + 0xD1F1, 0xA8A7, 0x235D, 0x5A0B, 0x6D9C, 0x14CA, 0x9F30, 0xE666, + 0xF01E, 0x8948, 0x02B2, 0x7BE4, 0x4C73, 0x3525, 0xBEDF, 0xC789, + 0x922F, 0xEB79, 0x6083, 0x19D5, 0x2E42, 0x5714, 0xDCEE, 0xA5B8, + 0xB3C0, 0xCA96, 0x416C, 0x383A, 0x0FAD, 0x76FB, 0xFD01, 0x8457, + 0xAC9A, 0xD5CC, 0x5E36, 0x2760, 0x10F7, 0x69A1, 0xE25B, 0x9B0D, + 0x8D75, 0xF423, 0x7FD9, 0x068F, 0x3118, 0x484E, 0xC3B4, 0xBAE2, + 0xEF44, 0x9612, 0x1DE8, 0x64BE, 0x5329, 0x2A7F, 0xA185, 0xD8D3, + 0xCEAB, 0xB7FD, 0x3C07, 0x4551, 0x72C6, 0x0B90, 0x806A, 0xF93C, + 0x2B26, 0x5270, 0xD98A, 0xA0DC, 0x974B, 0xEE1D, 0x65E7, 0x1CB1, + 0x0AC9, 0x739F, 0xF865, 0x8133, 0xB6A4, 0xCFF2, 0x4408, 0x3D5E, + 0x68F8, 0x11AE, 0x9A54, 0xE302, 0xD495, 0xADC3, 0x2639, 0x5F6F, + 0x4917, 0x3041, 0xBBBB, 0xC2ED, 0xF57A, 0x8C2C, 0x07D6, 0x7E80, + 0xFAD7, 0x8381, 0x087B, 0x712D, 0x46BA, 0x3FEC, 0xB416, 0xCD40, + 0xDB38, 0xA26E, 0x2994, 0x50C2, 0x6755, 0x1E03, 0x95F9, 0xECAF, + 0xB909, 0xC05F, 0x4BA5, 0x32F3, 0x0564, 0x7C32, 0xF7C8, 0x8E9E, + 0x98E6, 0xE1B0, 0x6A4A, 0x131C, 0x248B, 0x5DDD, 0xD627, 0xAF71, + 0x7D6B, 0x043D, 0x8FC7, 0xF691, 0xC106, 0xB850, 0x33AA, 0x4AFC, + 0x5C84, 0x25D2, 0xAE28, 0xD77E, 0xE0E9, 0x99BF, 0x1245, 0x6B13, + 0x3EB5, 0x47E3, 0xCC19, 0xB54F, 0x82D8, 0xFB8E, 0x7074, 0x0922, + 0x1F5A, 0x660C, 0xEDF6, 0x94A0, 0xA337, 0xDA61, 0x519B, 0x28CD }; + +/** + * fm10k_crc_16b - Generate a 16 bit CRC for a region of 16 bit data + * @data: pointer to data to process + * @seed: seed value for CRC + * @len: length measured in 16 bits words + * + * This function will generate a CRC based on the polynomial 0xAC9A and + * whatever value is stored in the seed variable. Note that this + * value inverts the local seed and the result in order to capture all + * leading and trailing zeros. + */ +static u16 fm10k_crc_16b(const u32 *data, u16 seed, u16 len) +{ + u32 result = seed; + + while (len--) { + result ^= *(data++); + result = (result >> 8) ^ fm10k_crc_16b_table[result & 0xFF]; + result = (result >> 8) ^ fm10k_crc_16b_table[result & 0xFF]; + + if (!(len--)) + break; + + result = (result >> 8) ^ fm10k_crc_16b_table[result & 0xFF]; + result = (result >> 8) ^ fm10k_crc_16b_table[result & 0xFF]; + } + + return (u16)result; +} + +/** + * fm10k_fifo_crc - generate a CRC based off of FIFO data + * @fifo: pointer to FIFO + * @offset: offset point for start of FIFO + * @len: number of DWORDS words to process + * @seed: seed value for CRC + * + * This function generates a CRC for some region of the FIFO + **/ +static u16 fm10k_fifo_crc(struct fm10k_mbx_fifo *fifo, u16 offset, + u16 len, u16 seed) +{ + u32 *data = fifo->buffer + offset; + + /* track when we should cross the end of the FIFO */ + offset = fifo->size - offset; + + /* if we are in 2 blocks process the end of the FIFO first */ + if (offset < len) { + seed = fm10k_crc_16b(data, seed, offset * 2); + data = fifo->buffer; + len -= offset; + } + + /* process any remaining bits */ + return fm10k_crc_16b(data, seed, len * 2); +} + +/** + * fm10k_mbx_update_local_crc - Update the local CRC for outgoing data + * @mbx: pointer to mailbox + * @head: head index provided by remote mailbox + * + * This function will generate the CRC for all data from the end of the + * last head update to the current one. It uses the result of the + * previous CRC as the seed for this update. The result is stored in + * mbx->local. + **/ +static void fm10k_mbx_update_local_crc(struct fm10k_mbx_info *mbx, u16 head) +{ + u16 len = mbx->tail_len - fm10k_mbx_index_len(mbx, head, mbx->tail); + + /* determine the offset for the start of the region to be pulled */ + head = fm10k_fifo_head_offset(&mbx->tx, mbx->pulled); + + /* update local CRC to include all of the pulled data */ + mbx->local = fm10k_fifo_crc(&mbx->tx, head, len, mbx->local); +} + +/** + * fm10k_mbx_verify_remote_crc - Verify the CRC is correct for current data + * @mbx: pointer to mailbox + * + * This function will take all data that has been provided from the remote + * end and generate a CRC for it. This is stored in mbx->remote. The + * CRC for the header is then computed and if the result is non-zero this + * is an error and we signal an error dropping all data and resetting the + * connection. + */ +static s32 fm10k_mbx_verify_remote_crc(struct fm10k_mbx_info *mbx) +{ + struct fm10k_mbx_fifo *fifo = &mbx->rx; + u16 len = mbx->head_len; + u16 offset = fm10k_fifo_tail_offset(fifo, mbx->pushed) - len; + u16 crc; + + /* update the remote CRC if new data has been received */ + if (len) + mbx->remote = fm10k_fifo_crc(fifo, offset, len, mbx->remote); + + /* process the full header as we have to validate the CRC */ + crc = fm10k_crc_16b(&mbx->mbx_hdr, mbx->remote, 1); + + /* notify other end if we have a problem */ + return crc ? FM10K_MBX_ERR_CRC : 0; +} + +/** + * fm10k_mbx_rx_ready - Indicates that a message is ready in the Rx FIFO + * @mbx: pointer to mailbox + * + * This function returns true if there is a message in the Rx FIFO to dequeue. + **/ +static bool fm10k_mbx_rx_ready(struct fm10k_mbx_info *mbx) +{ + u16 msg_size = fm10k_fifo_head_len(&mbx->rx); + + return msg_size && (fm10k_fifo_used(&mbx->rx) >= msg_size); +} + +/** + * fm10k_mbx_tx_ready - Indicates that the mailbox is in state ready for Tx + * @mbx: pointer to mailbox + * @len: verify free space is >= this value + * + * This function returns true if the mailbox is in a state ready to transmit. + **/ +static bool fm10k_mbx_tx_ready(struct fm10k_mbx_info *mbx, u16 len) +{ + u16 fifo_unused = fm10k_fifo_unused(&mbx->tx); + + return (mbx->state == FM10K_STATE_OPEN) && (fifo_unused >= len); +} + +/** + * fm10k_mbx_tx_complete - Indicates that the Tx FIFO has been emptied + * @mbx: pointer to mailbox + * + * This function returns true if the Tx FIFO is empty. + **/ +static bool fm10k_mbx_tx_complete(struct fm10k_mbx_info *mbx) +{ + return fm10k_fifo_empty(&mbx->tx); +} + +/** + * fm10k_mbx_deqeueue_rx - Dequeues the message from the head in the Rx FIFO + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function dequeues messages and hands them off to the tlv parser. + * It will return the number of messages processed when called. + **/ +static u16 fm10k_mbx_dequeue_rx(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + struct fm10k_mbx_fifo *fifo = &mbx->rx; + s32 err; + u16 cnt; + + /* parse Rx messages out of the Rx FIFO to empty it */ + for (cnt = 0; !fm10k_fifo_empty(fifo); cnt++) { + err = fm10k_tlv_msg_parse(hw, fifo->buffer + fifo->head, + mbx, mbx->msg_data); + if (err < 0) + mbx->rx_parse_err++; + + fm10k_fifo_head_drop(fifo); + } + + /* shift remaining bytes back to start of FIFO */ + memmove(fifo->buffer, fifo->buffer + fifo->tail, mbx->pushed << 2); + + /* shift head and tail based on the memory we moved */ + fifo->tail -= fifo->head; + fifo->head = 0; + + return cnt; +} + +/** + * fm10k_mbx_enqueue_tx - Enqueues the message to the tail of the Tx FIFO + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * @msg: message array to read + * + * This function enqueues a message up to the size specified by the length + * contained in the first DWORD of the message and will place at the tail + * of the FIFO. It will return 0 on success, or a negative value on error. + **/ +static s32 fm10k_mbx_enqueue_tx(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx, const u32 *msg) +{ + u32 countdown = mbx->timeout; + s32 err; + + switch (mbx->state) { + case FM10K_STATE_CLOSED: + case FM10K_STATE_DISCONNECT: + return FM10K_MBX_ERR_NO_MBX; + default: + break; + } + + /* enqueue the message on the Tx FIFO */ + err = fm10k_fifo_enqueue(&mbx->tx, msg); + + /* if it failed give the FIFO a chance to drain */ + while (err && countdown) { + countdown--; + udelay(mbx->udelay); + mbx->ops.process(hw, mbx); + err = fm10k_fifo_enqueue(&mbx->tx, msg); + } + + /* if we failed trhead the error */ + if (err) { + mbx->timeout = 0; + mbx->tx_busy++; + } + + /* begin processing message, ignore errors as this is just meant + * to start the mailbox flow so we are not concerned if there + * is a bad error, or the mailbox is already busy with a request + */ + if (!mbx->tail_len) + mbx->ops.process(hw, mbx); + + return 0; +} + +/** + * fm10k_mbx_read - Copies the mbmem to local message buffer + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function copies the message from the mbmem to the message array + **/ +static s32 fm10k_mbx_read(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx) +{ + /* only allow one reader in here at a time */ + if (mbx->mbx_hdr) + return FM10K_MBX_ERR_BUSY; + + /* read to capture initial interrupt bits */ + if (fm10k_read_reg(hw, mbx->mbx_reg) & FM10K_MBX_REQ_INTERRUPT) + mbx->mbx_lock = FM10K_MBX_ACK; + + /* write back interrupt bits to clear */ + fm10k_write_reg(hw, mbx->mbx_reg, + FM10K_MBX_REQ_INTERRUPT | FM10K_MBX_ACK_INTERRUPT); + + /* read remote header */ + mbx->mbx_hdr = fm10k_read_reg(hw, mbx->mbmem_reg ^ mbx->mbmem_len); + + return 0; +} + +/** + * fm10k_mbx_write - Copies the local message buffer to mbmem + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function copies the message from the the message array to mbmem + **/ +static void fm10k_mbx_write(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx) +{ + u32 mbmem = mbx->mbmem_reg; + + /* write new msg header to notify recepient of change */ + fm10k_write_reg(hw, mbmem, mbx->mbx_hdr); + + /* write mailbox to sent interrupt */ + if (mbx->mbx_lock) + fm10k_write_reg(hw, mbx->mbx_reg, mbx->mbx_lock); + + /* we no longer are using the header so free it */ + mbx->mbx_hdr = 0; + mbx->mbx_lock = 0; +} + +/** + * fm10k_mbx_create_connect_hdr - Generate a connect mailbox header + * @mbx: pointer to mailbox + * + * This function returns a connection mailbox header + **/ +static void fm10k_mbx_create_connect_hdr(struct fm10k_mbx_info *mbx) +{ + mbx->mbx_lock |= FM10K_MBX_REQ; + + mbx->mbx_hdr = FM10K_MSG_HDR_FIELD_SET(FM10K_MSG_CONNECT, TYPE) | + FM10K_MSG_HDR_FIELD_SET(mbx->head, HEAD) | + FM10K_MSG_HDR_FIELD_SET(mbx->rx.size - 1, CONNECT_SIZE); +} + +/** + * fm10k_mbx_create_data_hdr - Generate a data mailbox header + * @mbx: pointer to mailbox + * + * This function returns a data mailbox header + **/ +static void fm10k_mbx_create_data_hdr(struct fm10k_mbx_info *mbx) +{ + u32 hdr = FM10K_MSG_HDR_FIELD_SET(FM10K_MSG_DATA, TYPE) | + FM10K_MSG_HDR_FIELD_SET(mbx->tail, TAIL) | + FM10K_MSG_HDR_FIELD_SET(mbx->head, HEAD); + struct fm10k_mbx_fifo *fifo = &mbx->tx; + u16 crc; + + if (mbx->tail_len) + mbx->mbx_lock |= FM10K_MBX_REQ; + + /* generate CRC for data in flight and header */ + crc = fm10k_fifo_crc(fifo, fm10k_fifo_head_offset(fifo, mbx->pulled), + mbx->tail_len, mbx->local); + crc = fm10k_crc_16b(&hdr, crc, 1); + + /* load header to memory to be written */ + mbx->mbx_hdr = hdr | FM10K_MSG_HDR_FIELD_SET(crc, CRC); +} + +/** + * fm10k_mbx_create_disconnect_hdr - Generate a disconnect mailbox header + * @mbx: pointer to mailbox + * + * This function returns a disconnect mailbox header + **/ +static void fm10k_mbx_create_disconnect_hdr(struct fm10k_mbx_info *mbx) +{ + u32 hdr = FM10K_MSG_HDR_FIELD_SET(FM10K_MSG_DISCONNECT, TYPE) | + FM10K_MSG_HDR_FIELD_SET(mbx->tail, TAIL) | + FM10K_MSG_HDR_FIELD_SET(mbx->head, HEAD); + u16 crc = fm10k_crc_16b(&hdr, mbx->local, 1); + + mbx->mbx_lock |= FM10K_MBX_ACK; + + /* load header to memory to be written */ + mbx->mbx_hdr = hdr | FM10K_MSG_HDR_FIELD_SET(crc, CRC); +} + +/** + * fm10k_mbx_create_error_msg - Generate a error message + * @mbx: pointer to mailbox + * @err: local error encountered + * + * This function will interpret the error provided by err, and based on + * that it may shift the message by 1 DWORD and then place an error header + * at the start of the message. + **/ +static void fm10k_mbx_create_error_msg(struct fm10k_mbx_info *mbx, s32 err) +{ + /* only generate an error message for these types */ + switch (err) { + case FM10K_MBX_ERR_TAIL: + case FM10K_MBX_ERR_HEAD: + case FM10K_MBX_ERR_TYPE: + case FM10K_MBX_ERR_SIZE: + case FM10K_MBX_ERR_RSVD0: + case FM10K_MBX_ERR_CRC: + break; + default: + return; + } + + mbx->mbx_lock |= FM10K_MBX_REQ; + + mbx->mbx_hdr = FM10K_MSG_HDR_FIELD_SET(FM10K_MSG_ERROR, TYPE) | + FM10K_MSG_HDR_FIELD_SET(err, ERR_NO) | + FM10K_MSG_HDR_FIELD_SET(mbx->head, HEAD); +} + +/** + * fm10k_mbx_validate_msg_hdr - Validate common fields in the message header + * @mbx: pointer to mailbox + * @msg: message array to read + * + * This function will parse up the fields in the mailbox header and return + * an error if the header contains any of a number of invalid configurations + * including unrecognized type, invalid route, or a malformed message. + **/ +static s32 fm10k_mbx_validate_msg_hdr(struct fm10k_mbx_info *mbx) +{ + u16 type, rsvd0, head, tail, size; + const u32 *hdr = &mbx->mbx_hdr; + + type = FM10K_MSG_HDR_FIELD_GET(*hdr, TYPE); + rsvd0 = FM10K_MSG_HDR_FIELD_GET(*hdr, RSVD0); + tail = FM10K_MSG_HDR_FIELD_GET(*hdr, TAIL); + head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); + size = FM10K_MSG_HDR_FIELD_GET(*hdr, CONNECT_SIZE); + + if (rsvd0) + return FM10K_MBX_ERR_RSVD0; + + switch (type) { + case FM10K_MSG_DISCONNECT: + /* validate that all data has been received */ + if (tail != mbx->head) + return FM10K_MBX_ERR_TAIL; + + /* fall through */ + case FM10K_MSG_DATA: + /* validate that head is moving correctly */ + if (!head || (head == FM10K_MSG_HDR_MASK(HEAD))) + return FM10K_MBX_ERR_HEAD; + if (fm10k_mbx_index_len(mbx, head, mbx->tail) > mbx->tail_len) + return FM10K_MBX_ERR_HEAD; + + /* validate that tail is moving correctly */ + if (!tail || (tail == FM10K_MSG_HDR_MASK(TAIL))) + return FM10K_MBX_ERR_TAIL; + if (fm10k_mbx_index_len(mbx, mbx->head, tail) < mbx->mbmem_len) + break; + + return FM10K_MBX_ERR_TAIL; + case FM10K_MSG_CONNECT: + /* validate size is in range and is power of 2 mask */ + if ((size < FM10K_VFMBX_MSG_MTU) || (size & (size + 1))) + return FM10K_MBX_ERR_SIZE; + + /* fall through */ + case FM10K_MSG_ERROR: + if (!head || (head == FM10K_MSG_HDR_MASK(HEAD))) + return FM10K_MBX_ERR_HEAD; + /* neither create nor error include a tail offset */ + if (tail) + return FM10K_MBX_ERR_TAIL; + + break; + default: + return FM10K_MBX_ERR_TYPE; + } + + return 0; +} + +/** + * fm10k_mbx_create_reply - Generate reply based on state and remote head + * @mbx: pointer to mailbox + * @head: acknowledgement number + * + * This function will generate an outgoing message based on the current + * mailbox state and the remote fifo head. It will return the length + * of the outgoing message excluding header on success, and a negative value + * on error. + **/ +static s32 fm10k_mbx_create_reply(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx, u16 head) +{ + switch (mbx->state) { + case FM10K_STATE_OPEN: + case FM10K_STATE_DISCONNECT: + /* update our checksum for the outgoing data */ + fm10k_mbx_update_local_crc(mbx, head); + + /* as long as other end recognizes us keep sending data */ + fm10k_mbx_pull_head(hw, mbx, head); + + /* generate new header based on data */ + if (mbx->tail_len || (mbx->state == FM10K_STATE_OPEN)) + fm10k_mbx_create_data_hdr(mbx); + else + fm10k_mbx_create_disconnect_hdr(mbx); + break; + case FM10K_STATE_CONNECT: + /* send disconnect even if we aren't connected */ + fm10k_mbx_create_connect_hdr(mbx); + break; + case FM10K_STATE_CLOSED: + /* generate new header based on data */ + fm10k_mbx_create_disconnect_hdr(mbx); + default: + break; + } + + return 0; +} + +/** + * fm10k_mbx_reset_work- Reset internal pointers for any pending work + * @mbx: pointer to mailbox + * + * This function will reset all internal pointers so any work in progress + * is dropped. This call should occur every time we transition from the + * open state to the connect state. + **/ +static void fm10k_mbx_reset_work(struct fm10k_mbx_info *mbx) +{ + /* reset our outgoing max size back to Rx limits */ + mbx->max_size = mbx->rx.size - 1; + + /* just do a quick resysnc to start of message */ + mbx->pushed = 0; + mbx->pulled = 0; + mbx->tail_len = 0; + mbx->head_len = 0; + mbx->rx.tail = 0; + mbx->rx.head = 0; +} + +/** + * fm10k_mbx_update_max_size - Update the max_size and drop any large messages + * @mbx: pointer to mailbox + * @size: new value for max_size + * + * This function will update the max_size value and drop any outgoing messages + * from the head of the Tx FIFO that are larger than max_size. + **/ +static void fm10k_mbx_update_max_size(struct fm10k_mbx_info *mbx, u16 size) +{ + u16 len; + + mbx->max_size = size; + + /* flush any oversized messages from the queue */ + for (len = fm10k_fifo_head_len(&mbx->tx); + len > size; + len = fm10k_fifo_head_len(&mbx->tx)) { + fm10k_fifo_head_drop(&mbx->tx); + mbx->tx_dropped++; + } +} + +/** + * fm10k_mbx_connect_reset - Reset following request for reset + * @mbx: pointer to mailbox + * + * This function resets the mailbox to either a disconnected state + * or a connect state depending on the current mailbox state + **/ +static void fm10k_mbx_connect_reset(struct fm10k_mbx_info *mbx) +{ + /* just do a quick resysnc to start of frame */ + fm10k_mbx_reset_work(mbx); + + /* reset CRC seeds */ + mbx->local = FM10K_MBX_CRC_SEED; + mbx->remote = FM10K_MBX_CRC_SEED; + + /* we cannot exit connect until the size is good */ + if (mbx->state == FM10K_STATE_OPEN) + mbx->state = FM10K_STATE_CONNECT; + else + mbx->state = FM10K_STATE_CLOSED; +} + +/** + * fm10k_mbx_process_connect - Process connect header + * @mbx: pointer to mailbox + * @msg: message array to process + * + * This function will read an incoming connect header and reply with the + * appropriate message. It will return a value indicating the number of + * data DWORDs on success, or will return a negative value on failure. + **/ +static s32 fm10k_mbx_process_connect(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + const enum fm10k_mbx_state state = mbx->state; + const u32 *hdr = &mbx->mbx_hdr; + u16 size, head; + + /* we will need to pull all of the fields for verification */ + size = FM10K_MSG_HDR_FIELD_GET(*hdr, CONNECT_SIZE); + head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); + + switch (state) { + case FM10K_STATE_DISCONNECT: + case FM10K_STATE_OPEN: + /* reset any in-progress work */ + fm10k_mbx_connect_reset(mbx); + break; + case FM10K_STATE_CONNECT: + /* we cannot exit connect until the size is good */ + if (size > mbx->rx.size) { + mbx->max_size = mbx->rx.size - 1; + } else { + /* record the remote system requesting connection */ + mbx->state = FM10K_STATE_OPEN; + + fm10k_mbx_update_max_size(mbx, size); + } + break; + default: + break; + } + + /* align our tail index to remote head index */ + mbx->tail = head; + + return fm10k_mbx_create_reply(hw, mbx, head); +} + +/** + * fm10k_mbx_process_data - Process data header + * @mbx: pointer to mailbox + * + * This function will read an incoming data header and reply with the + * appropriate message. It will return a value indicating the number of + * data DWORDs on success, or will return a negative value on failure. + **/ +static s32 fm10k_mbx_process_data(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + const u32 *hdr = &mbx->mbx_hdr; + u16 head, tail; + s32 err; + + /* we will need to pull all of the fields for verification */ + head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); + tail = FM10K_MSG_HDR_FIELD_GET(*hdr, TAIL); + + /* if we are in connect just update our data and go */ + if (mbx->state == FM10K_STATE_CONNECT) { + mbx->tail = head; + mbx->state = FM10K_STATE_OPEN; + } + + /* abort on message size errors */ + err = fm10k_mbx_push_tail(hw, mbx, tail); + if (err < 0) + return err; + + /* verify the checksum on the incoming data */ + err = fm10k_mbx_verify_remote_crc(mbx); + if (err) + return err; + + /* process messages if we have received any */ + fm10k_mbx_dequeue_rx(hw, mbx); + + return fm10k_mbx_create_reply(hw, mbx, head); +} + +/** + * fm10k_mbx_process_disconnect - Process disconnect header + * @mbx: pointer to mailbox + * + * This function will read an incoming disconnect header and reply with the + * appropriate message. It will return a value indicating the number of + * data DWORDs on success, or will return a negative value on failure. + **/ +static s32 fm10k_mbx_process_disconnect(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + const enum fm10k_mbx_state state = mbx->state; + const u32 *hdr = &mbx->mbx_hdr; + u16 head, tail; + s32 err; + + /* we will need to pull all of the fields for verification */ + head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); + tail = FM10K_MSG_HDR_FIELD_GET(*hdr, TAIL); + + /* We should not be receiving disconnect if Rx is incomplete */ + if (mbx->pushed) + return FM10K_MBX_ERR_TAIL; + + /* we have already verified mbx->head == tail so we know this is 0 */ + mbx->head_len = 0; + + /* verify the checksum on the incoming header is correct */ + err = fm10k_mbx_verify_remote_crc(mbx); + if (err) + return err; + + switch (state) { + case FM10K_STATE_DISCONNECT: + case FM10K_STATE_OPEN: + /* state doesn't change if we still have work to do */ + if (!fm10k_mbx_tx_complete(mbx)) + break; + + /* verify the head indicates we completed all transmits */ + if (head != mbx->tail) + return FM10K_MBX_ERR_HEAD; + + /* reset any in-progress work */ + fm10k_mbx_connect_reset(mbx); + break; + default: + break; + } + + return fm10k_mbx_create_reply(hw, mbx, head); +} + +/** + * fm10k_mbx_process_error - Process error header + * @mbx: pointer to mailbox + * + * This function will read an incoming error header and reply with the + * appropriate message. It will return a value indicating the number of + * data DWORDs on success, or will return a negative value on failure. + **/ +static s32 fm10k_mbx_process_error(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + const u32 *hdr = &mbx->mbx_hdr; + s32 err_no; + u16 head; + + /* we will need to pull all of the fields for verification */ + head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); + + /* we only have lower 10 bits of error number os add upper bits */ + err_no = FM10K_MSG_HDR_FIELD_GET(*hdr, ERR_NO); + err_no |= ~FM10K_MSG_HDR_MASK(ERR_NO); + + switch (mbx->state) { + case FM10K_STATE_OPEN: + case FM10K_STATE_DISCONNECT: + /* flush any uncompleted work */ + fm10k_mbx_reset_work(mbx); + + /* reset CRC seeds */ + mbx->local = FM10K_MBX_CRC_SEED; + mbx->remote = FM10K_MBX_CRC_SEED; + + /* reset tail index and size to prepare for reconnect */ + mbx->tail = head; + + /* if open then reset max_size and go back to connect */ + if (mbx->state == FM10K_STATE_OPEN) { + mbx->state = FM10K_STATE_CONNECT; + break; + } + + /* send a connect message to get data flowing again */ + fm10k_mbx_create_connect_hdr(mbx); + return 0; + default: + break; + } + + return fm10k_mbx_create_reply(hw, mbx, mbx->tail); +} + +/** + * fm10k_mbx_process - Process mailbox interrupt + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function will process incoming mailbox events and generate mailbox + * replies. It will return a value indicating the number of DWORDs + * transmitted excluding header on success or a negative value on error. + **/ +static s32 fm10k_mbx_process(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + s32 err; + + /* we do not read mailbox if closed */ + if (mbx->state == FM10K_STATE_CLOSED) + return 0; + + /* copy data from mailbox */ + err = fm10k_mbx_read(hw, mbx); + if (err) + return err; + + /* validate type, source, and destination */ + err = fm10k_mbx_validate_msg_hdr(mbx); + if (err < 0) + goto msg_err; + + switch (FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, TYPE)) { + case FM10K_MSG_CONNECT: + err = fm10k_mbx_process_connect(hw, mbx); + break; + case FM10K_MSG_DATA: + err = fm10k_mbx_process_data(hw, mbx); + break; + case FM10K_MSG_DISCONNECT: + err = fm10k_mbx_process_disconnect(hw, mbx); + break; + case FM10K_MSG_ERROR: + err = fm10k_mbx_process_error(hw, mbx); + break; + default: + err = FM10K_MBX_ERR_TYPE; + break; + } + +msg_err: + /* notify partner of errors on our end */ + if (err < 0) + fm10k_mbx_create_error_msg(mbx, err); + + /* copy data from mailbox */ + fm10k_mbx_write(hw, mbx); + + return err; +} + +/** + * fm10k_mbx_disconnect - Shutdown mailbox connection + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function will shut down the mailbox. It places the mailbox first + * in the disconnect state, it then allows up to a predefined timeout for + * the mailbox to transition to close on its own. If this does not occur + * then the mailbox will be forced into the closed state. + * + * Any mailbox transactions not completed before calling this function + * are not guaranteed to complete and may be dropped. + **/ +static void fm10k_mbx_disconnect(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + int timeout = mbx->timeout ? FM10K_MBX_DISCONNECT_TIMEOUT : 0; + + /* Place mbx in ready to disconnect state */ + mbx->state = FM10K_STATE_DISCONNECT; + + /* trigger interrupt to start shutdown process */ + fm10k_write_reg(hw, mbx->mbx_reg, FM10K_MBX_REQ | + FM10K_MBX_INTERRUPT_DISABLE); + do { + udelay(FM10K_MBX_POLL_DELAY); + mbx->ops.process(hw, mbx); + timeout -= FM10K_MBX_POLL_DELAY; + } while ((timeout > 0) && (mbx->state != FM10K_STATE_CLOSED)); + + /* in case we didn't close just force the mailbox into shutdown */ + fm10k_mbx_connect_reset(mbx); + fm10k_mbx_update_max_size(mbx, 0); + + fm10k_write_reg(hw, mbx->mbmem_reg, 0); +} + +/** + * fm10k_mbx_connect - Start mailbox connection + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function will initiate a mailbox connection. It will populate the + * mailbox with a broadcast connect message and then initialize the lock. + * This is safe since the connect message is a single DWORD so the mailbox + * transaction is guaranteed to be atomic. + * + * This function will return an error if the mailbox has not been initiated + * or is currently in use. + **/ +static s32 fm10k_mbx_connect(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx) +{ + /* we cannot connect an uninitialized mailbox */ + if (!mbx->rx.buffer) + return FM10K_MBX_ERR_NO_SPACE; + + /* we cannot connect an already connected mailbox */ + if (mbx->state != FM10K_STATE_CLOSED) + return FM10K_MBX_ERR_BUSY; + + /* mailbox timeout can now become active */ + mbx->timeout = FM10K_MBX_INIT_TIMEOUT; + + /* Place mbx in ready to connect state */ + mbx->state = FM10K_STATE_CONNECT; + + /* initialize header of remote mailbox */ + fm10k_mbx_create_disconnect_hdr(mbx); + fm10k_write_reg(hw, mbx->mbmem_reg ^ mbx->mbmem_len, mbx->mbx_hdr); + + /* enable interrupt and notify other party of new message */ + mbx->mbx_lock = FM10K_MBX_REQ_INTERRUPT | FM10K_MBX_ACK_INTERRUPT | + FM10K_MBX_INTERRUPT_ENABLE; + + /* generate and load connect header into mailbox */ + fm10k_mbx_create_connect_hdr(mbx); + fm10k_mbx_write(hw, mbx); + + return 0; +} + +/** + * fm10k_mbx_validate_handlers - Validate layout of message parsing data + * @msg_data: handlers for mailbox events + * + * This function validates the layout of the message parsing data. This + * should be mostly static, but it is important to catch any errors that + * are made when constructing the parsers. + **/ +static s32 fm10k_mbx_validate_handlers(const struct fm10k_msg_data *msg_data) +{ + const struct fm10k_tlv_attr *attr; + unsigned int id; + + /* Allow NULL mailboxes that transmit but don't receive */ + if (!msg_data) + return 0; + + while (msg_data->id != FM10K_TLV_ERROR) { + /* all messages should have a function handler */ + if (!msg_data->func) + return FM10K_ERR_PARAM; + + /* parser is optional */ + attr = msg_data->attr; + if (attr) { + while (attr->id != FM10K_TLV_ERROR) { + id = attr->id; + attr++; + /* ID should always be increasing */ + if (id >= attr->id) + return FM10K_ERR_PARAM; + /* ID should fit in results array */ + if (id >= FM10K_TLV_RESULTS_MAX) + return FM10K_ERR_PARAM; + } + + /* verify terminator is in the list */ + if (attr->id != FM10K_TLV_ERROR) + return FM10K_ERR_PARAM; + } + + id = msg_data->id; + msg_data++; + /* ID should always be increasing */ + if (id >= msg_data->id) + return FM10K_ERR_PARAM; + } + + /* verify terminator is in the list */ + if ((msg_data->id != FM10K_TLV_ERROR) || !msg_data->func) + return FM10K_ERR_PARAM; + + return 0; +} + +/** + * fm10k_mbx_register_handlers - Register a set of handler ops for mailbox + * @mbx: pointer to mailbox + * @msg_data: handlers for mailbox events + * + * This function associates a set of message handling ops with a mailbox. + **/ +static s32 fm10k_mbx_register_handlers(struct fm10k_mbx_info *mbx, + const struct fm10k_msg_data *msg_data) +{ + /* validate layout of handlers before assigning them */ + if (fm10k_mbx_validate_handlers(msg_data)) + return FM10K_ERR_PARAM; + + /* initialize the message handlers */ + mbx->msg_data = msg_data; + + return 0; +} + +/** + * fm10k_pfvf_mbx_init - Initialize mailbox memory for PF/VF mailbox + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * @msg_data: handlers for mailbox events + * @id: ID reference for PF as it supports up to 64 PF/VF mailboxes + * + * This function initializes the mailbox for use. It will split the + * buffer provided an use that th populate both the Tx and Rx FIFO by + * evenly splitting it. In order to allow for easy masking of head/tail + * the value reported in size must be a power of 2 and is reported in + * DWORDs, not bytes. Any invalid values will cause the mailbox to return + * error. + **/ +s32 fm10k_pfvf_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx, + const struct fm10k_msg_data *msg_data, u8 id) +{ + /* initialize registers */ + switch (hw->mac.type) { + case fm10k_mac_vf: + mbx->mbx_reg = FM10K_VFMBX; + mbx->mbmem_reg = FM10K_VFMBMEM(FM10K_VFMBMEM_VF_XOR); + break; + case fm10k_mac_pf: + /* there are only 64 VF <-> PF mailboxes */ + if (id < 64) { + mbx->mbx_reg = FM10K_MBX(id); + mbx->mbmem_reg = FM10K_MBMEM_VF(id, 0); + break; + } + /* fallthough */ + default: + return FM10K_MBX_ERR_NO_MBX; + } + + /* start out in closed state */ + mbx->state = FM10K_STATE_CLOSED; + + /* validate layout of handlers before assigning them */ + if (fm10k_mbx_validate_handlers(msg_data)) + return FM10K_ERR_PARAM; + + /* initialize the message handlers */ + mbx->msg_data = msg_data; + + /* start mailbox as timed out and let the reset_hw call + * set the timeout value to begin communications + */ + mbx->timeout = 0; + mbx->udelay = FM10K_MBX_INIT_DELAY; + + /* initalize tail and head */ + mbx->tail = 1; + mbx->head = 1; + + /* initialize CRC seeds */ + mbx->local = FM10K_MBX_CRC_SEED; + mbx->remote = FM10K_MBX_CRC_SEED; + + /* Split buffer for use by Tx/Rx FIFOs */ + mbx->max_size = FM10K_MBX_MSG_MAX_SIZE; + mbx->mbmem_len = FM10K_VFMBMEM_VF_XOR; + + /* initialize the FIFOs, sizes are in 4 byte increments */ + fm10k_fifo_init(&mbx->tx, mbx->buffer, FM10K_MBX_TX_BUFFER_SIZE); + fm10k_fifo_init(&mbx->rx, &mbx->buffer[FM10K_MBX_TX_BUFFER_SIZE], + FM10K_MBX_RX_BUFFER_SIZE); + + /* initialize function pointers */ + mbx->ops.connect = fm10k_mbx_connect; + mbx->ops.disconnect = fm10k_mbx_disconnect; + mbx->ops.rx_ready = fm10k_mbx_rx_ready; + mbx->ops.tx_ready = fm10k_mbx_tx_ready; + mbx->ops.tx_complete = fm10k_mbx_tx_complete; + mbx->ops.enqueue_tx = fm10k_mbx_enqueue_tx; + mbx->ops.process = fm10k_mbx_process; + mbx->ops.register_handlers = fm10k_mbx_register_handlers; + + return 0; +} + +/** + * fm10k_sm_mbx_create_data_hdr - Generate a mailbox header for local FIFO + * @mbx: pointer to mailbox + * + * This function returns a connection mailbox header + **/ +static void fm10k_sm_mbx_create_data_hdr(struct fm10k_mbx_info *mbx) +{ + if (mbx->tail_len) + mbx->mbx_lock |= FM10K_MBX_REQ; + + mbx->mbx_hdr = FM10K_MSG_HDR_FIELD_SET(mbx->tail, SM_TAIL) | + FM10K_MSG_HDR_FIELD_SET(mbx->remote, SM_VER) | + FM10K_MSG_HDR_FIELD_SET(mbx->head, SM_HEAD); +} + +/** + * fm10k_sm_mbx_create_connect_hdr - Generate a mailbox header for local FIFO + * @mbx: pointer to mailbox + * @err: error flags to report if any + * + * This function returns a connection mailbox header + **/ +static void fm10k_sm_mbx_create_connect_hdr(struct fm10k_mbx_info *mbx, u8 err) +{ + if (mbx->local) + mbx->mbx_lock |= FM10K_MBX_REQ; + + mbx->mbx_hdr = FM10K_MSG_HDR_FIELD_SET(mbx->tail, SM_TAIL) | + FM10K_MSG_HDR_FIELD_SET(mbx->remote, SM_VER) | + FM10K_MSG_HDR_FIELD_SET(mbx->head, SM_HEAD) | + FM10K_MSG_HDR_FIELD_SET(err, SM_ERR); +} + +/** + * fm10k_sm_mbx_connect_reset - Reset following request for reset + * @mbx: pointer to mailbox + * + * This function resets the mailbox to a just connected state + **/ +static void fm10k_sm_mbx_connect_reset(struct fm10k_mbx_info *mbx) +{ + /* flush any uncompleted work */ + fm10k_mbx_reset_work(mbx); + + /* set local version to max and remote version to 0 */ + mbx->local = FM10K_SM_MBX_VERSION; + mbx->remote = 0; + + /* initalize tail and head */ + mbx->tail = 1; + mbx->head = 1; + + /* reset state back to connect */ + mbx->state = FM10K_STATE_CONNECT; +} + +/** + * fm10k_sm_mbx_connect - Start switch manager mailbox connection + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function will initiate a mailbox connection with the switch + * manager. To do this it will first disconnect the mailbox, and then + * reconnect it in order to complete a reset of the mailbox. + * + * This function will return an error if the mailbox has not been initiated + * or is currently in use. + **/ +static s32 fm10k_sm_mbx_connect(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx) +{ + /* we cannot connect an uninitialized mailbox */ + if (!mbx->rx.buffer) + return FM10K_MBX_ERR_NO_SPACE; + + /* we cannot connect an already connected mailbox */ + if (mbx->state != FM10K_STATE_CLOSED) + return FM10K_MBX_ERR_BUSY; + + /* mailbox timeout can now become active */ + mbx->timeout = FM10K_MBX_INIT_TIMEOUT; + + /* Place mbx in ready to connect state */ + mbx->state = FM10K_STATE_CONNECT; + mbx->max_size = FM10K_MBX_MSG_MAX_SIZE; + + /* reset interface back to connect */ + fm10k_sm_mbx_connect_reset(mbx); + + /* enable interrupt and notify other party of new message */ + mbx->mbx_lock = FM10K_MBX_REQ_INTERRUPT | FM10K_MBX_ACK_INTERRUPT | + FM10K_MBX_INTERRUPT_ENABLE; + + /* generate and load connect header into mailbox */ + fm10k_sm_mbx_create_connect_hdr(mbx, 0); + fm10k_mbx_write(hw, mbx); + + /* enable interrupt and notify other party of new message */ + + return 0; +} + +/** + * fm10k_sm_mbx_disconnect - Shutdown mailbox connection + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function will shut down the mailbox. It places the mailbox first + * in the disconnect state, it then allows up to a predefined timeout for + * the mailbox to transition to close on its own. If this does not occur + * then the mailbox will be forced into the closed state. + * + * Any mailbox transactions not completed before calling this function + * are not guaranteed to complete and may be dropped. + **/ +static void fm10k_sm_mbx_disconnect(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + int timeout = mbx->timeout ? FM10K_MBX_DISCONNECT_TIMEOUT : 0; + + /* Place mbx in ready to disconnect state */ + mbx->state = FM10K_STATE_DISCONNECT; + + /* trigger interrupt to start shutdown process */ + fm10k_write_reg(hw, mbx->mbx_reg, FM10K_MBX_REQ | + FM10K_MBX_INTERRUPT_DISABLE); + do { + udelay(FM10K_MBX_POLL_DELAY); + mbx->ops.process(hw, mbx); + timeout -= FM10K_MBX_POLL_DELAY; + } while ((timeout > 0) && (mbx->state != FM10K_STATE_CLOSED)); + + /* in case we didn't close just force the mailbox into shutdown */ + mbx->state = FM10K_STATE_CLOSED; + mbx->remote = 0; + fm10k_mbx_reset_work(mbx); + fm10k_mbx_update_max_size(mbx, 0); + + fm10k_write_reg(hw, mbx->mbmem_reg, 0); +} + +/** + * fm10k_mbx_validate_fifo_hdr - Validate fields in the remote FIFO header + * @mbx: pointer to mailbox + * + * This function will parse up the fields in the mailbox header and return + * an error if the header contains any of a number of invalid configurations + * including unrecognized offsets or version numbers. + **/ +static s32 fm10k_sm_mbx_validate_fifo_hdr(struct fm10k_mbx_info *mbx) +{ + const u32 *hdr = &mbx->mbx_hdr; + u16 tail, head, ver; + + tail = FM10K_MSG_HDR_FIELD_GET(*hdr, SM_TAIL); + ver = FM10K_MSG_HDR_FIELD_GET(*hdr, SM_VER); + head = FM10K_MSG_HDR_FIELD_GET(*hdr, SM_HEAD); + + switch (ver) { + case 0: + break; + case FM10K_SM_MBX_VERSION: + if (!head || head > FM10K_SM_MBX_FIFO_LEN) + return FM10K_MBX_ERR_HEAD; + if (!tail || tail > FM10K_SM_MBX_FIFO_LEN) + return FM10K_MBX_ERR_TAIL; + if (mbx->tail < head) + head += mbx->mbmem_len - 1; + if (tail < mbx->head) + tail += mbx->mbmem_len - 1; + if (fm10k_mbx_index_len(mbx, head, mbx->tail) > mbx->tail_len) + return FM10K_MBX_ERR_HEAD; + if (fm10k_mbx_index_len(mbx, mbx->head, tail) < mbx->mbmem_len) + break; + return FM10K_MBX_ERR_TAIL; + default: + return FM10K_MBX_ERR_SRC; + } + + return 0; +} + +/** + * fm10k_sm_mbx_process_error - Process header with error flag set + * @mbx: pointer to mailbox + * + * This function is meant to respond to a request where the error flag + * is set. As a result we will terminate a connection if one is present + * and fall back into the reset state with a connection header of version + * 0 (RESET). + **/ +static void fm10k_sm_mbx_process_error(struct fm10k_mbx_info *mbx) +{ + const enum fm10k_mbx_state state = mbx->state; + + switch (state) { + case FM10K_STATE_DISCONNECT: + /* if there is an error just disconnect */ + mbx->remote = 0; + break; + case FM10K_STATE_OPEN: + /* flush any uncompleted work */ + fm10k_sm_mbx_connect_reset(mbx); + break; + case FM10K_STATE_CONNECT: + /* try connnecting at lower version */ + if (mbx->remote) { + while (mbx->local > 1) + mbx->local--; + mbx->remote = 0; + } + break; + default: + break; + } + + fm10k_sm_mbx_create_connect_hdr(mbx, 0); +} + +/** + * fm10k_sm_mbx_create_error_message - Process an error in FIFO hdr + * @mbx: pointer to mailbox + * @err: local error encountered + * + * This function will interpret the error provided by err, and based on + * that it may set the error bit in the local message header + **/ +static void fm10k_sm_mbx_create_error_msg(struct fm10k_mbx_info *mbx, s32 err) +{ + /* only generate an error message for these types */ + switch (err) { + case FM10K_MBX_ERR_TAIL: + case FM10K_MBX_ERR_HEAD: + case FM10K_MBX_ERR_SRC: + case FM10K_MBX_ERR_SIZE: + case FM10K_MBX_ERR_RSVD0: + break; + default: + return; + } + + /* process it as though we received an error, and send error reply */ + fm10k_sm_mbx_process_error(mbx); + fm10k_sm_mbx_create_connect_hdr(mbx, 1); +} + +/** + * fm10k_sm_mbx_receive - Take message from Rx mailbox FIFO and put it in Rx + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function will dequeue one message from the Rx switch manager mailbox + * FIFO and place it in the Rx mailbox FIFO for processing by software. + **/ +static s32 fm10k_sm_mbx_receive(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx, + u16 tail) +{ + /* reduce length by 1 to convert to a mask */ + u16 mbmem_len = mbx->mbmem_len - 1; + s32 err; + + /* push tail in front of head */ + if (tail < mbx->head) + tail += mbmem_len; + + /* copy data to the Rx FIFO */ + err = fm10k_mbx_push_tail(hw, mbx, tail); + if (err < 0) + return err; + + /* process messages if we have received any */ + fm10k_mbx_dequeue_rx(hw, mbx); + + /* guarantee head aligns with the end of the last message */ + mbx->head = fm10k_mbx_head_sub(mbx, mbx->pushed); + mbx->pushed = 0; + + /* clear any extra bits left over since index adds 1 extra bit */ + if (mbx->head > mbmem_len) + mbx->head -= mbmem_len; + + return err; +} + +/** + * fm10k_sm_mbx_transmit - Take message from Tx and put it in Tx mailbox FIFO + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function will dequeue one message from the Tx mailbox FIFO and place + * it in the Tx switch manager mailbox FIFO for processing by hardware. + **/ +static void fm10k_sm_mbx_transmit(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx, u16 head) +{ + struct fm10k_mbx_fifo *fifo = &mbx->tx; + /* reduce length by 1 to convert to a mask */ + u16 mbmem_len = mbx->mbmem_len - 1; + u16 tail_len, len = 0; + u32 *msg; + + /* push head behind tail */ + if (mbx->tail < head) + head += mbmem_len; + + fm10k_mbx_pull_head(hw, mbx, head); + + /* determine msg aligned offset for end of buffer */ + do { + msg = fifo->buffer + fm10k_fifo_head_offset(fifo, len); + tail_len = len; + len += FM10K_TLV_DWORD_LEN(*msg); + } while ((len <= mbx->tail_len) && (len < mbmem_len)); + + /* guarantee we stop on a message boundary */ + if (mbx->tail_len > tail_len) { + mbx->tail = fm10k_mbx_tail_sub(mbx, mbx->tail_len - tail_len); + mbx->tail_len = tail_len; + } + + /* clear any extra bits left over since index adds 1 extra bit */ + if (mbx->tail > mbmem_len) + mbx->tail -= mbmem_len; +} + +/** + * fm10k_sm_mbx_create_reply - Generate reply based on state and remote head + * @mbx: pointer to mailbox + * @head: acknowledgement number + * + * This function will generate an outgoing message based on the current + * mailbox state and the remote fifo head. It will return the length + * of the outgoing message excluding header on success, and a negative value + * on error. + **/ +static void fm10k_sm_mbx_create_reply(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx, u16 head) +{ + switch (mbx->state) { + case FM10K_STATE_OPEN: + case FM10K_STATE_DISCONNECT: + /* flush out Tx data */ + fm10k_sm_mbx_transmit(hw, mbx, head); + + /* generate new header based on data */ + if (mbx->tail_len || (mbx->state == FM10K_STATE_OPEN)) { + fm10k_sm_mbx_create_data_hdr(mbx); + } else { + mbx->remote = 0; + fm10k_sm_mbx_create_connect_hdr(mbx, 0); + } + break; + case FM10K_STATE_CONNECT: + case FM10K_STATE_CLOSED: + fm10k_sm_mbx_create_connect_hdr(mbx, 0); + break; + default: + break; + } +} + +/** + * fm10k_sm_mbx_process_reset - Process header with version == 0 (RESET) + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function is meant to respond to a request where the version data + * is set to 0. As such we will either terminate the connection or go + * into the connect state in order to re-establish the connection. This + * function can also be used to respond to an error as the connection + * resetting would also be a means of dealing with errors. + **/ +static void fm10k_sm_mbx_process_reset(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + const enum fm10k_mbx_state state = mbx->state; + + switch (state) { + case FM10K_STATE_DISCONNECT: + /* drop remote connections and disconnect */ + mbx->state = FM10K_STATE_CLOSED; + mbx->remote = 0; + mbx->local = 0; + break; + case FM10K_STATE_OPEN: + /* flush any incomplete work */ + fm10k_sm_mbx_connect_reset(mbx); + break; + case FM10K_STATE_CONNECT: + /* Update remote value to match local value */ + mbx->remote = mbx->local; + default: + break; + } + + fm10k_sm_mbx_create_reply(hw, mbx, mbx->tail); +} + +/** + * fm10k_sm_mbx_process_version_1 - Process header with version == 1 + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function is meant to process messages received when the remote + * mailbox is active. + **/ +static s32 fm10k_sm_mbx_process_version_1(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + const u32 *hdr = &mbx->mbx_hdr; + u16 head, tail; + s32 len; + + /* pull all fields needed for verification */ + tail = FM10K_MSG_HDR_FIELD_GET(*hdr, SM_TAIL); + head = FM10K_MSG_HDR_FIELD_GET(*hdr, SM_HEAD); + + /* if we are in connect and wanting version 1 then start up and go */ + if (mbx->state == FM10K_STATE_CONNECT) { + if (!mbx->remote) + goto send_reply; + if (mbx->remote != 1) + return FM10K_MBX_ERR_SRC; + + mbx->state = FM10K_STATE_OPEN; + } + + do { + /* abort on message size errors */ + len = fm10k_sm_mbx_receive(hw, mbx, tail); + if (len < 0) + return len; + + /* continue until we have flushed the Rx FIFO */ + } while (len); + +send_reply: + fm10k_sm_mbx_create_reply(hw, mbx, head); + + return 0; +} + +/** + * fm10k_sm_mbx_process - Process mailbox switch mailbox interrupt + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function will process incoming mailbox events and generate mailbox + * replies. It will return a value indicating the number of DWORDs + * transmitted excluding header on success or a negative value on error. + **/ +static s32 fm10k_sm_mbx_process(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + s32 err; + + /* we do not read mailbox if closed */ + if (mbx->state == FM10K_STATE_CLOSED) + return 0; + + /* retrieve data from switch manager */ + err = fm10k_mbx_read(hw, mbx); + if (err) + return err; + + err = fm10k_sm_mbx_validate_fifo_hdr(mbx); + if (err < 0) + goto fifo_err; + + if (FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, SM_ERR)) { + fm10k_sm_mbx_process_error(mbx); + goto fifo_err; + } + + switch (FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, SM_VER)) { + case 0: + fm10k_sm_mbx_process_reset(hw, mbx); + break; + case FM10K_SM_MBX_VERSION: + err = fm10k_sm_mbx_process_version_1(hw, mbx); + break; + } + +fifo_err: + if (err < 0) + fm10k_sm_mbx_create_error_msg(mbx, err); + + /* report data to switch manager */ + fm10k_mbx_write(hw, mbx); + + return err; +} + +/** + * fm10k_sm_mbx_init - Initialize mailbox memory for PF/SM mailbox + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * @msg_data: handlers for mailbox events + * + * This function for now is used to stub out the PF/SM mailbox + **/ +s32 fm10k_sm_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx, + const struct fm10k_msg_data *msg_data) +{ + mbx->mbx_reg = FM10K_GMBX; + mbx->mbmem_reg = FM10K_MBMEM_PF(0); + /* start out in closed state */ + mbx->state = FM10K_STATE_CLOSED; + + /* validate layout of handlers before assigning them */ + if (fm10k_mbx_validate_handlers(msg_data)) + return FM10K_ERR_PARAM; + + /* initialize the message handlers */ + mbx->msg_data = msg_data; + + /* start mailbox as timed out and let the reset_hw call + * set the timeout value to begin communications + */ + mbx->timeout = 0; + mbx->udelay = FM10K_MBX_INIT_DELAY; + + /* Split buffer for use by Tx/Rx FIFOs */ + mbx->max_size = FM10K_MBX_MSG_MAX_SIZE; + mbx->mbmem_len = FM10K_MBMEM_PF_XOR; + + /* initialize the FIFOs, sizes are in 4 byte increments */ + fm10k_fifo_init(&mbx->tx, mbx->buffer, FM10K_MBX_TX_BUFFER_SIZE); + fm10k_fifo_init(&mbx->rx, &mbx->buffer[FM10K_MBX_TX_BUFFER_SIZE], + FM10K_MBX_RX_BUFFER_SIZE); + + /* initialize function pointers */ + mbx->ops.connect = fm10k_sm_mbx_connect; + mbx->ops.disconnect = fm10k_sm_mbx_disconnect; + mbx->ops.rx_ready = fm10k_mbx_rx_ready; + mbx->ops.tx_ready = fm10k_mbx_tx_ready; + mbx->ops.tx_complete = fm10k_mbx_tx_complete; + mbx->ops.enqueue_tx = fm10k_mbx_enqueue_tx; + mbx->ops.process = fm10k_sm_mbx_process; + mbx->ops.register_handlers = fm10k_mbx_register_handlers; + + return 0; +} diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h new file mode 100644 index 0000000000000000000000000000000000000000..0419a7f0035e574d1bb8cec0ed69099ba8fffc3b --- /dev/null +++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h @@ -0,0 +1,307 @@ +/* Intel Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _FM10K_MBX_H_ +#define _FM10K_MBX_H_ + +/* forward declaration */ +struct fm10k_mbx_info; + +#include "fm10k_type.h" +#include "fm10k_tlv.h" + +/* PF Mailbox Registers */ +#define FM10K_MBMEM(_n) ((_n) + 0x18000) +#define FM10K_MBMEM_VF(_n, _m) (((_n) * 0x10) + (_m) + 0x18000) +#define FM10K_MBMEM_SM(_n) ((_n) + 0x18400) +#define FM10K_MBMEM_PF(_n) ((_n) + 0x18600) +/* XOR provides means of switching from Tx to Rx FIFO */ +#define FM10K_MBMEM_PF_XOR (FM10K_MBMEM_SM(0) ^ FM10K_MBMEM_PF(0)) +#define FM10K_MBX(_n) ((_n) + 0x18800) +#define FM10K_MBX_REQ 0x00000002 +#define FM10K_MBX_ACK 0x00000004 +#define FM10K_MBX_REQ_INTERRUPT 0x00000008 +#define FM10K_MBX_ACK_INTERRUPT 0x00000010 +#define FM10K_MBX_INTERRUPT_ENABLE 0x00000020 +#define FM10K_MBX_INTERRUPT_DISABLE 0x00000040 +#define FM10K_MBICR(_n) ((_n) + 0x18840) +#define FM10K_GMBX 0x18842 + +/* VF Mailbox Registers */ +#define FM10K_VFMBX 0x00010 +#define FM10K_VFMBMEM(_n) ((_n) + 0x00020) +#define FM10K_VFMBMEM_LEN 16 +#define FM10K_VFMBMEM_VF_XOR (FM10K_VFMBMEM_LEN / 2) + +/* Delays/timeouts */ +#define FM10K_MBX_DISCONNECT_TIMEOUT 500 +#define FM10K_MBX_POLL_DELAY 19 +#define FM10K_MBX_INT_DELAY 20 + +/* PF/VF Mailbox state machine + * + * +----------+ connect() +----------+ + * | CLOSED | --------------> | CONNECT | + * +----------+ +----------+ + * ^ ^ | + * | rcv: rcv: | | rcv: + * | Connect Disconnect | | Connect + * | Disconnect Error | | Data + * | | | + * | | V + * +----------+ disconnect() +----------+ + * |DISCONNECT| <-------------- | OPEN | + * +----------+ +----------+ + * + * The diagram above describes the PF/VF mailbox state machine. There + * are four main states to this machine. + * Closed: This state represents a mailbox that is in a standby state + * with interrupts disabled. In this state the mailbox should not + * read the mailbox or write any data. The only means of exiting + * this state is for the system to make the connect() call for the + * mailbox, it will then transition to the connect state. + * Connect: In this state the mailbox is seeking a connection. It will + * post a connect message with no specified destination and will + * wait for a reply from the other side of the mailbox. This state + * is exited when either a connect with the local mailbox as the + * destination is received or when a data message is received with + * a valid sequence number. + * Open: In this state the mailbox is able to transfer data between the local + * entity and the remote. It will fall back to connect in the event of + * receiving either an error message, or a disconnect message. It will + * transition to disconnect on a call to disconnect(); + * Disconnect: In this state the mailbox is attempting to gracefully terminate + * the connection. It will do so at the first point where it knows + * that the remote endpoint is either done sending, or when the + * remote endpoint has fallen back into connect. + */ +enum fm10k_mbx_state { + FM10K_STATE_CLOSED, + FM10K_STATE_CONNECT, + FM10K_STATE_OPEN, + FM10K_STATE_DISCONNECT, +}; + +/* PF/VF Mailbox header format + * 3 2 1 0 + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Size/Err_no/CRC | Rsvd0 | Head | Tail | Type | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * The layout above describes the format for the header used in the PF/VF + * mailbox. The header is broken out into the following fields: + * Type: There are 4 supported message types + * 0x8: Data header - used to transport message data + * 0xC: Connect header - used to establish connection + * 0xD: Disconnect header - used to tear down a connection + * 0xE: Error header - used to address message exceptions + * Tail: Tail index for local FIFO + * Tail index actually consists of two parts. The MSB of + * the head is a loop tracker, it is 0 on an even numbered + * loop through the FIFO, and 1 on the odd numbered loops. + * To get the actual mailbox offset based on the tail it + * is necessary to add bit 3 to bit 0 and clear bit 3. This + * gives us a valid range of 0x1 - 0xE. + * Head: Head index for remote FIFO + * Head index follows the same format as the tail index. + * Rsvd0: Reserved 0 portion of the mailbox header + * CRC: Running CRC for all data since connect plus current message header + * Size: Maximum message size - Applies only to connect headers + * The maximum message size is provided during connect to avoid + * jamming the mailbox with messages that do not fit. + * Err_no: Error number - Applies only to error headers + * The error number provides a indication of the type of error + * experienced. + */ + +/* macros for retriving and setting header values */ +#define FM10K_MSG_HDR_MASK(name) \ + ((0x1u << FM10K_MSG_##name##_SIZE) - 1) +#define FM10K_MSG_HDR_FIELD_SET(value, name) \ + (((u32)(value) & FM10K_MSG_HDR_MASK(name)) << FM10K_MSG_##name##_SHIFT) +#define FM10K_MSG_HDR_FIELD_GET(value, name) \ + ((u16)((value) >> FM10K_MSG_##name##_SHIFT) & FM10K_MSG_HDR_MASK(name)) + +/* offsets shared between all headers */ +#define FM10K_MSG_TYPE_SHIFT 0 +#define FM10K_MSG_TYPE_SIZE 4 +#define FM10K_MSG_TAIL_SHIFT 4 +#define FM10K_MSG_TAIL_SIZE 4 +#define FM10K_MSG_HEAD_SHIFT 8 +#define FM10K_MSG_HEAD_SIZE 4 +#define FM10K_MSG_RSVD0_SHIFT 12 +#define FM10K_MSG_RSVD0_SIZE 4 + +/* offsets for data/disconnect headers */ +#define FM10K_MSG_CRC_SHIFT 16 +#define FM10K_MSG_CRC_SIZE 16 + +/* offsets for connect headers */ +#define FM10K_MSG_CONNECT_SIZE_SHIFT 16 +#define FM10K_MSG_CONNECT_SIZE_SIZE 16 + +/* offsets for error headers */ +#define FM10K_MSG_ERR_NO_SHIFT 16 +#define FM10K_MSG_ERR_NO_SIZE 16 + +enum fm10k_msg_type { + FM10K_MSG_DATA = 0x8, + FM10K_MSG_CONNECT = 0xC, + FM10K_MSG_DISCONNECT = 0xD, + FM10K_MSG_ERROR = 0xE, +}; + +/* HNI/SM Mailbox FIFO format + * 3 2 1 0 + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-------+-----------------------+-------+-----------------------+ + * | Error | Remote Head |Version| Local Tail | + * +-------+-----------------------+-------+-----------------------+ + * | | + * . Local FIFO Data . + * . . + * +-------+-----------------------+-------+-----------------------+ + * + * The layout above describes the format for the FIFOs used by the host + * network interface and the switch manager to communicate messages back + * and forth. Both the HNI and the switch maintain one such FIFO. The + * layout in memory has the switch manager FIFO followed immediately by + * the HNI FIFO. For this reason I am using just the pointer to the + * HNI FIFO in the mailbox ops as the offset between the two is fixed. + * + * The header for the FIFO is broken out into the following fields: + * Local Tail: Offset into FIFO region for next DWORD to write. + * Version: Version info for mailbox, only values of 0/1 are supported. + * Remote Head: Offset into remote FIFO to indicate how much we have read. + * Error: Error indication, values TBD. + */ + +/* version number for switch manager mailboxes */ +#define FM10K_SM_MBX_VERSION 1 +#define FM10K_SM_MBX_FIFO_LEN (FM10K_MBMEM_PF_XOR - 1) + +/* offsets shared between all SM FIFO headers */ +#define FM10K_MSG_SM_TAIL_SHIFT 0 +#define FM10K_MSG_SM_TAIL_SIZE 12 +#define FM10K_MSG_SM_VER_SHIFT 12 +#define FM10K_MSG_SM_VER_SIZE 4 +#define FM10K_MSG_SM_HEAD_SHIFT 16 +#define FM10K_MSG_SM_HEAD_SIZE 12 +#define FM10K_MSG_SM_ERR_SHIFT 28 +#define FM10K_MSG_SM_ERR_SIZE 4 + +/* All error messages returned by mailbox functions + * The value -511 is 0xFE01 in hex. The idea is to order the errors + * from 0xFE01 - 0xFEFF so error codes are easily visible in the mailbox + * messages. This also helps to avoid error number collisions as Linux + * doesn't appear to use error numbers 256 - 511. + */ +#define FM10K_MBX_ERR(_n) ((_n) - 512) +#define FM10K_MBX_ERR_NO_MBX FM10K_MBX_ERR(0x01) +#define FM10K_MBX_ERR_NO_SPACE FM10K_MBX_ERR(0x03) +#define FM10K_MBX_ERR_TAIL FM10K_MBX_ERR(0x05) +#define FM10K_MBX_ERR_HEAD FM10K_MBX_ERR(0x06) +#define FM10K_MBX_ERR_SRC FM10K_MBX_ERR(0x08) +#define FM10K_MBX_ERR_TYPE FM10K_MBX_ERR(0x09) +#define FM10K_MBX_ERR_SIZE FM10K_MBX_ERR(0x0B) +#define FM10K_MBX_ERR_BUSY FM10K_MBX_ERR(0x0C) +#define FM10K_MBX_ERR_RSVD0 FM10K_MBX_ERR(0x0E) +#define FM10K_MBX_ERR_CRC FM10K_MBX_ERR(0x0F) + +#define FM10K_MBX_CRC_SEED 0xFFFF + +struct fm10k_mbx_ops { + s32 (*connect)(struct fm10k_hw *, struct fm10k_mbx_info *); + void (*disconnect)(struct fm10k_hw *, struct fm10k_mbx_info *); + bool (*rx_ready)(struct fm10k_mbx_info *); + bool (*tx_ready)(struct fm10k_mbx_info *, u16); + bool (*tx_complete)(struct fm10k_mbx_info *); + s32 (*enqueue_tx)(struct fm10k_hw *, struct fm10k_mbx_info *, + const u32 *); + s32 (*process)(struct fm10k_hw *, struct fm10k_mbx_info *); + s32 (*register_handlers)(struct fm10k_mbx_info *, + const struct fm10k_msg_data *); +}; + +struct fm10k_mbx_fifo { + u32 *buffer; + u16 head; + u16 tail; + u16 size; +}; + +/* size of buffer to be stored in mailbox for FIFOs */ +#define FM10K_MBX_TX_BUFFER_SIZE 512 +#define FM10K_MBX_RX_BUFFER_SIZE 128 +#define FM10K_MBX_BUFFER_SIZE \ + (FM10K_MBX_TX_BUFFER_SIZE + FM10K_MBX_RX_BUFFER_SIZE) + +/* minimum and maximum message size in dwords */ +#define FM10K_MBX_MSG_MAX_SIZE \ + ((FM10K_MBX_TX_BUFFER_SIZE - 1) & (FM10K_MBX_RX_BUFFER_SIZE - 1)) +#define FM10K_VFMBX_MSG_MTU ((FM10K_VFMBMEM_LEN / 2) - 1) + +#define FM10K_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ +#define FM10K_MBX_INIT_DELAY 500 /* microseconds between retries */ + +struct fm10k_mbx_info { + /* function pointers for mailbox operations */ + struct fm10k_mbx_ops ops; + const struct fm10k_msg_data *msg_data; + + /* message FIFOs */ + struct fm10k_mbx_fifo rx; + struct fm10k_mbx_fifo tx; + + /* delay for handling timeouts */ + u32 timeout; + u32 udelay; + + /* mailbox state info */ + u32 mbx_reg, mbmem_reg, mbx_lock, mbx_hdr; + u16 max_size, mbmem_len; + u16 tail, tail_len, pulled; + u16 head, head_len, pushed; + u16 local, remote; + enum fm10k_mbx_state state; + + /* result of last mailbox test */ + s32 test_result; + + /* statistics */ + u64 tx_busy; + u64 tx_dropped; + u64 tx_messages; + u64 tx_dwords; + u64 rx_messages; + u64 rx_dwords; + u64 rx_parse_err; + + /* Buffer to store messages */ + u32 buffer[FM10K_MBX_BUFFER_SIZE]; +}; + +s32 fm10k_pfvf_mbx_init(struct fm10k_hw *, struct fm10k_mbx_info *, + const struct fm10k_msg_data *, u8); +s32 fm10k_sm_mbx_init(struct fm10k_hw *, struct fm10k_mbx_info *, + const struct fm10k_msg_data *); + +#endif /* _FM10K_MBX_H_ */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c new file mode 100644 index 0000000000000000000000000000000000000000..bf44a8fe711f5f4dde3071b231ecfedad35a7ffe --- /dev/null +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -0,0 +1,1435 @@ +/* Intel Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include "fm10k.h" +#include +#if IS_ENABLED(CONFIG_VXLAN) +#include +#endif /* CONFIG_VXLAN */ + +/** + * fm10k_setup_tx_resources - allocate Tx resources (Descriptors) + * @tx_ring: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +int fm10k_setup_tx_resources(struct fm10k_ring *tx_ring) +{ + struct device *dev = tx_ring->dev; + int size; + + size = sizeof(struct fm10k_tx_buffer) * tx_ring->count; + + tx_ring->tx_buffer = vzalloc(size); + if (!tx_ring->tx_buffer) + goto err; + + u64_stats_init(&tx_ring->syncp); + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(struct fm10k_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) + goto err; + + return 0; + +err: + vfree(tx_ring->tx_buffer); + tx_ring->tx_buffer = NULL; + return -ENOMEM; +} + +/** + * fm10k_setup_all_tx_resources - allocate all queues Tx resources + * @interface: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int fm10k_setup_all_tx_resources(struct fm10k_intfc *interface) +{ + int i, err = 0; + + for (i = 0; i < interface->num_tx_queues; i++) { + err = fm10k_setup_tx_resources(interface->tx_ring[i]); + if (!err) + continue; + + netif_err(interface, probe, interface->netdev, + "Allocation for Tx Queue %u failed\n", i); + goto err_setup_tx; + } + + return 0; +err_setup_tx: + /* rewind the index freeing the rings as we go */ + while (i--) + fm10k_free_tx_resources(interface->tx_ring[i]); + return err; +} + +/** + * fm10k_setup_rx_resources - allocate Rx resources (Descriptors) + * @rx_ring: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +int fm10k_setup_rx_resources(struct fm10k_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + int size; + + size = sizeof(struct fm10k_rx_buffer) * rx_ring->count; + + rx_ring->rx_buffer = vzalloc(size); + if (!rx_ring->rx_buffer) + goto err; + + u64_stats_init(&rx_ring->syncp); + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * sizeof(union fm10k_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); + + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + if (!rx_ring->desc) + goto err; + + return 0; +err: + vfree(rx_ring->rx_buffer); + rx_ring->rx_buffer = NULL; + return -ENOMEM; +} + +/** + * fm10k_setup_all_rx_resources - allocate all queues Rx resources + * @interface: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int fm10k_setup_all_rx_resources(struct fm10k_intfc *interface) +{ + int i, err = 0; + + for (i = 0; i < interface->num_rx_queues; i++) { + err = fm10k_setup_rx_resources(interface->rx_ring[i]); + if (!err) + continue; + + netif_err(interface, probe, interface->netdev, + "Allocation for Rx Queue %u failed\n", i); + goto err_setup_rx; + } + + return 0; +err_setup_rx: + /* rewind the index freeing the rings as we go */ + while (i--) + fm10k_free_rx_resources(interface->rx_ring[i]); + return err; +} + +void fm10k_unmap_and_free_tx_resource(struct fm10k_ring *ring, + struct fm10k_tx_buffer *tx_buffer) +{ + if (tx_buffer->skb) { + dev_kfree_skb_any(tx_buffer->skb); + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + /* tx_buffer must be completely set up in the transmit path */ +} + +/** + * fm10k_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + **/ +static void fm10k_clean_tx_ring(struct fm10k_ring *tx_ring) +{ + struct fm10k_tx_buffer *tx_buffer; + unsigned long size; + u16 i; + + /* ring already cleared, nothing to do */ + if (!tx_ring->tx_buffer) + return; + + /* Free all the Tx ring sk_buffs */ + for (i = 0; i < tx_ring->count; i++) { + tx_buffer = &tx_ring->tx_buffer[i]; + fm10k_unmap_and_free_tx_resource(tx_ring, tx_buffer); + } + + /* reset BQL values */ + netdev_tx_reset_queue(txring_txq(tx_ring)); + + size = sizeof(struct fm10k_tx_buffer) * tx_ring->count; + memset(tx_ring->tx_buffer, 0, size); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); +} + +/** + * fm10k_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +void fm10k_free_tx_resources(struct fm10k_ring *tx_ring) +{ + fm10k_clean_tx_ring(tx_ring); + + vfree(tx_ring->tx_buffer); + tx_ring->tx_buffer = NULL; + + /* if not set, then don't free */ + if (!tx_ring->desc) + return; + + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); + tx_ring->desc = NULL; +} + +/** + * fm10k_clean_all_tx_rings - Free Tx Buffers for all queues + * @interface: board private structure + **/ +void fm10k_clean_all_tx_rings(struct fm10k_intfc *interface) +{ + int i; + + for (i = 0; i < interface->num_tx_queues; i++) + fm10k_clean_tx_ring(interface->tx_ring[i]); + + /* remove any stale timestamp buffers and free them */ + skb_queue_purge(&interface->ts_tx_skb_queue); +} + +/** + * fm10k_free_all_tx_resources - Free Tx Resources for All Queues + * @interface: board private structure + * + * Free all transmit software resources + **/ +static void fm10k_free_all_tx_resources(struct fm10k_intfc *interface) +{ + int i = interface->num_tx_queues; + + while (i--) + fm10k_free_tx_resources(interface->tx_ring[i]); +} + +/** + * fm10k_clean_rx_ring - Free Rx Buffers per Queue + * @rx_ring: ring to free buffers from + **/ +static void fm10k_clean_rx_ring(struct fm10k_ring *rx_ring) +{ + unsigned long size; + u16 i; + + if (!rx_ring->rx_buffer) + return; + + if (rx_ring->skb) + dev_kfree_skb(rx_ring->skb); + rx_ring->skb = NULL; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + struct fm10k_rx_buffer *buffer = &rx_ring->rx_buffer[i]; + /* clean-up will only set page pointer to NULL */ + if (!buffer->page) + continue; + + dma_unmap_page(rx_ring->dev, buffer->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + __free_page(buffer->page); + + buffer->page = NULL; + } + + size = sizeof(struct fm10k_rx_buffer) * rx_ring->count; + memset(rx_ring->rx_buffer, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; +} + +/** + * fm10k_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +void fm10k_free_rx_resources(struct fm10k_ring *rx_ring) +{ + fm10k_clean_rx_ring(rx_ring); + + vfree(rx_ring->rx_buffer); + rx_ring->rx_buffer = NULL; + + /* if not set, then don't free */ + if (!rx_ring->desc) + return; + + dma_free_coherent(rx_ring->dev, rx_ring->size, + rx_ring->desc, rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * fm10k_clean_all_rx_rings - Free Rx Buffers for all queues + * @interface: board private structure + **/ +void fm10k_clean_all_rx_rings(struct fm10k_intfc *interface) +{ + int i; + + for (i = 0; i < interface->num_rx_queues; i++) + fm10k_clean_rx_ring(interface->rx_ring[i]); +} + +/** + * fm10k_free_all_rx_resources - Free Rx Resources for All Queues + * @interface: board private structure + * + * Free all receive software resources + **/ +static void fm10k_free_all_rx_resources(struct fm10k_intfc *interface) +{ + int i = interface->num_rx_queues; + + while (i--) + fm10k_free_rx_resources(interface->rx_ring[i]); +} + +/** + * fm10k_request_glort_range - Request GLORTs for use in configuring rules + * @interface: board private structure + * + * This function allocates a range of glorts for this inteface to use. + **/ +static void fm10k_request_glort_range(struct fm10k_intfc *interface) +{ + struct fm10k_hw *hw = &interface->hw; + u16 mask = (~hw->mac.dglort_map) >> FM10K_DGLORTMAP_MASK_SHIFT; + + /* establish GLORT base */ + interface->glort = hw->mac.dglort_map & FM10K_DGLORTMAP_NONE; + interface->glort_count = 0; + + /* nothing we can do until mask is allocated */ + if (hw->mac.dglort_map == FM10K_DGLORTMAP_NONE) + return; + + /* we support 3 possible GLORT configurations. + * 1: VFs consume all but the last 1 + * 2: VFs and PF split glorts with possible gap between + * 3: VFs allocated first 64, all others belong to PF + */ + if (mask <= hw->iov.total_vfs) { + interface->glort_count = 1; + interface->glort += mask; + } else if (mask < 64) { + interface->glort_count = (mask + 1) / 2; + interface->glort += interface->glort_count; + } else { + interface->glort_count = mask - 63; + interface->glort += 64; + } +} + +/** + * fm10k_del_vxlan_port_all + * @interface: board private structure + * + * This function frees the entire vxlan_port list + **/ +static void fm10k_del_vxlan_port_all(struct fm10k_intfc *interface) +{ + struct fm10k_vxlan_port *vxlan_port; + + /* flush all entries from list */ + vxlan_port = list_first_entry_or_null(&interface->vxlan_port, + struct fm10k_vxlan_port, list); + while (vxlan_port) { + list_del(&vxlan_port->list); + kfree(vxlan_port); + vxlan_port = list_first_entry_or_null(&interface->vxlan_port, + struct fm10k_vxlan_port, + list); + } +} + +/** + * fm10k_restore_vxlan_port + * @interface: board private structure + * + * This function restores the value in the tunnel_cfg register after reset + **/ +static void fm10k_restore_vxlan_port(struct fm10k_intfc *interface) +{ + struct fm10k_hw *hw = &interface->hw; + struct fm10k_vxlan_port *vxlan_port; + + /* only the PF supports configuring tunnels */ + if (hw->mac.type != fm10k_mac_pf) + return; + + vxlan_port = list_first_entry_or_null(&interface->vxlan_port, + struct fm10k_vxlan_port, list); + + /* restore tunnel configuration register */ + fm10k_write_reg(hw, FM10K_TUNNEL_CFG, + (vxlan_port ? ntohs(vxlan_port->port) : 0) | + (ETH_P_TEB << FM10K_TUNNEL_CFG_NVGRE_SHIFT)); +} + +/** + * fm10k_add_vxlan_port + * @netdev: network interface device structure + * @sa_family: Address family of new port + * @port: port number used for VXLAN + * + * This funciton is called when a new VXLAN interface has added a new port + * number to the range that is currently in use for VXLAN. The new port + * number is always added to the tail so that the port number list should + * match the order in which the ports were allocated. The head of the list + * is always used as the VXLAN port number for offloads. + **/ +static void fm10k_add_vxlan_port(struct net_device *dev, + sa_family_t sa_family, __be16 port) { + struct fm10k_intfc *interface = netdev_priv(dev); + struct fm10k_vxlan_port *vxlan_port; + + /* only the PF supports configuring tunnels */ + if (interface->hw.mac.type != fm10k_mac_pf) + return; + + /* existing ports are pulled out so our new entry is always last */ + fm10k_vxlan_port_for_each(vxlan_port, interface) { + if ((vxlan_port->port == port) && + (vxlan_port->sa_family == sa_family)) { + list_del(&vxlan_port->list); + goto insert_tail; + } + } + + /* allocate memory to track ports */ + vxlan_port = kmalloc(sizeof(*vxlan_port), GFP_ATOMIC); + if (!vxlan_port) + return; + vxlan_port->port = port; + vxlan_port->sa_family = sa_family; + +insert_tail: + /* add new port value to list */ + list_add_tail(&vxlan_port->list, &interface->vxlan_port); + + fm10k_restore_vxlan_port(interface); +} + +/** + * fm10k_del_vxlan_port + * @netdev: network interface device structure + * @sa_family: Address family of freed port + * @port: port number used for VXLAN + * + * This funciton is called when a new VXLAN interface has freed a port + * number from the range that is currently in use for VXLAN. The freed + * port is removed from the list and the new head is used to determine + * the port number for offloads. + **/ +static void fm10k_del_vxlan_port(struct net_device *dev, + sa_family_t sa_family, __be16 port) { + struct fm10k_intfc *interface = netdev_priv(dev); + struct fm10k_vxlan_port *vxlan_port; + + if (interface->hw.mac.type != fm10k_mac_pf) + return; + + /* find the port in the list and free it */ + fm10k_vxlan_port_for_each(vxlan_port, interface) { + if ((vxlan_port->port == port) && + (vxlan_port->sa_family == sa_family)) { + list_del(&vxlan_port->list); + kfree(vxlan_port); + break; + } + } + + fm10k_restore_vxlan_port(interface); +} + +/** + * fm10k_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +int fm10k_open(struct net_device *netdev) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + int err; + + /* allocate transmit descriptors */ + err = fm10k_setup_all_tx_resources(interface); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = fm10k_setup_all_rx_resources(interface); + if (err) + goto err_setup_rx; + + /* allocate interrupt resources */ + err = fm10k_qv_request_irq(interface); + if (err) + goto err_req_irq; + + /* setup GLORT assignment for this port */ + fm10k_request_glort_range(interface); + + /* Notify the stack of the actual queue counts */ + err = netif_set_real_num_tx_queues(netdev, + interface->num_tx_queues); + if (err) + goto err_set_queues; + + err = netif_set_real_num_rx_queues(netdev, + interface->num_rx_queues); + if (err) + goto err_set_queues; + +#if IS_ENABLED(CONFIG_VXLAN) + /* update VXLAN port configuration */ + vxlan_get_rx_port(netdev); + +#endif + fm10k_up(interface); + + return 0; + +err_set_queues: + fm10k_qv_free_irq(interface); +err_req_irq: + fm10k_free_all_rx_resources(interface); +err_setup_rx: + fm10k_free_all_tx_resources(interface); +err_setup_tx: + return err; +} + +/** + * fm10k_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +int fm10k_close(struct net_device *netdev) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + + fm10k_down(interface); + + fm10k_qv_free_irq(interface); + + fm10k_del_vxlan_port_all(interface); + + fm10k_free_all_tx_resources(interface); + fm10k_free_all_rx_resources(interface); + + return 0; +} + +static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + unsigned int r_idx = skb->queue_mapping; + int err; + + if ((skb->protocol == htons(ETH_P_8021Q)) && + !vlan_tx_tag_present(skb)) { + /* FM10K only supports hardware tagging, any tags in frame + * are considered 2nd level or "outer" tags + */ + struct vlan_hdr *vhdr; + __be16 proto; + + /* make sure skb is not shared */ + skb = skb_share_check(skb, GFP_ATOMIC); + if (!skb) + return NETDEV_TX_OK; + + /* make sure there is enough room to move the ethernet header */ + if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN))) + return NETDEV_TX_OK; + + /* verify the skb head is not shared */ + err = skb_cow_head(skb, 0); + if (err) + return NETDEV_TX_OK; + + /* locate vlan header */ + vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); + + /* pull the 2 key pieces of data out of it */ + __vlan_hwaccel_put_tag(skb, + htons(ETH_P_8021Q), + ntohs(vhdr->h_vlan_TCI)); + proto = vhdr->h_vlan_encapsulated_proto; + skb->protocol = (ntohs(proto) >= 1536) ? proto : + htons(ETH_P_802_2); + + /* squash it by moving the ethernet addresses up 4 bytes */ + memmove(skb->data + VLAN_HLEN, skb->data, 12); + __skb_pull(skb, VLAN_HLEN); + skb_reset_mac_header(skb); + } + + /* The minimum packet size for a single buffer is 17B so pad the skb + * in order to meet this minimum size requirement. + */ + if (unlikely(skb->len < 17)) { + int pad_len = 17 - skb->len; + + if (skb_pad(skb, pad_len)) + return NETDEV_TX_OK; + __skb_put(skb, pad_len); + } + + /* prepare packet for hardware time stamping */ + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) + fm10k_ts_tx_enqueue(interface, skb); + + if (r_idx >= interface->num_tx_queues) + r_idx %= interface->num_tx_queues; + + err = fm10k_xmit_frame_ring(skb, interface->tx_ring[r_idx]); + + return err; +} + +static int fm10k_change_mtu(struct net_device *dev, int new_mtu) +{ + if (new_mtu < 68 || new_mtu > FM10K_MAX_JUMBO_FRAME_SIZE) + return -EINVAL; + + dev->mtu = new_mtu; + + return 0; +} + +/** + * fm10k_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ +static void fm10k_tx_timeout(struct net_device *netdev) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + bool real_tx_hang = false; + int i; + +#define TX_TIMEO_LIMIT 16000 + for (i = 0; i < interface->num_tx_queues; i++) { + struct fm10k_ring *tx_ring = interface->tx_ring[i]; + + if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring)) + real_tx_hang = true; + } + + if (real_tx_hang) { + fm10k_tx_timeout_reset(interface); + } else { + netif_info(interface, drv, netdev, + "Fake Tx hang detected with timeout of %d seconds\n", + netdev->watchdog_timeo/HZ); + + /* fake Tx hang - increase the kernel timeout */ + if (netdev->watchdog_timeo < TX_TIMEO_LIMIT) + netdev->watchdog_timeo *= 2; + } +} + +static int fm10k_uc_vlan_unsync(struct net_device *netdev, + const unsigned char *uc_addr) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + struct fm10k_hw *hw = &interface->hw; + u16 glort = interface->glort; + u16 vid = interface->vid; + bool set = !!(vid / VLAN_N_VID); + int err; + + /* drop any leading bits on the VLAN ID */ + vid &= VLAN_N_VID - 1; + + err = hw->mac.ops.update_uc_addr(hw, glort, uc_addr, vid, set, 0); + if (err) + return err; + + /* return non-zero value as we are only doing a partial sync/unsync */ + return 1; +} + +static int fm10k_mc_vlan_unsync(struct net_device *netdev, + const unsigned char *mc_addr) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + struct fm10k_hw *hw = &interface->hw; + u16 glort = interface->glort; + u16 vid = interface->vid; + bool set = !!(vid / VLAN_N_VID); + int err; + + /* drop any leading bits on the VLAN ID */ + vid &= VLAN_N_VID - 1; + + err = hw->mac.ops.update_mc_addr(hw, glort, mc_addr, vid, set); + if (err) + return err; + + /* return non-zero value as we are only doing a partial sync/unsync */ + return 1; +} + +static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + struct fm10k_hw *hw = &interface->hw; + s32 err; + + /* updates do not apply to VLAN 0 */ + if (!vid) + return 0; + + if (vid >= VLAN_N_VID) + return -EINVAL; + + /* Verify we have permission to add VLANs */ + if (hw->mac.vlan_override) + return -EACCES; + + /* if default VLAN is already present do nothing */ + if (vid == hw->mac.default_vid) + return -EBUSY; + + /* update active_vlans bitmask */ + set_bit(vid, interface->active_vlans); + if (!set) + clear_bit(vid, interface->active_vlans); + + fm10k_mbx_lock(interface); + + /* only need to update the VLAN if not in promiscous mode */ + if (!(netdev->flags & IFF_PROMISC)) { + err = hw->mac.ops.update_vlan(hw, vid, 0, set); + if (err) + return err; + } + + /* update our base MAC address */ + err = hw->mac.ops.update_uc_addr(hw, interface->glort, hw->mac.addr, + vid, set, 0); + if (err) + return err; + + /* set vid prior to syncing/unsyncing the VLAN */ + interface->vid = vid + (set ? VLAN_N_VID : 0); + + /* Update the unicast and multicast address list to add/drop VLAN */ + __dev_uc_unsync(netdev, fm10k_uc_vlan_unsync); + __dev_mc_unsync(netdev, fm10k_mc_vlan_unsync); + + fm10k_mbx_unlock(interface); + + return 0; +} + +static int fm10k_vlan_rx_add_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +{ + /* update VLAN and address table based on changes */ + return fm10k_update_vid(netdev, vid, true); +} + +static int fm10k_vlan_rx_kill_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +{ + /* update VLAN and address table based on changes */ + return fm10k_update_vid(netdev, vid, false); +} + +static u16 fm10k_find_next_vlan(struct fm10k_intfc *interface, u16 vid) +{ + struct fm10k_hw *hw = &interface->hw; + u16 default_vid = hw->mac.default_vid; + u16 vid_limit = vid < default_vid ? default_vid : VLAN_N_VID; + + vid = find_next_bit(interface->active_vlans, vid_limit, ++vid); + + return vid; +} + +static void fm10k_clear_unused_vlans(struct fm10k_intfc *interface) +{ + struct fm10k_hw *hw = &interface->hw; + u32 vid, prev_vid; + + /* loop through and find any gaps in the table */ + for (vid = 0, prev_vid = 0; + prev_vid < VLAN_N_VID; + prev_vid = vid + 1, vid = fm10k_find_next_vlan(interface, vid)) { + if (prev_vid == vid) + continue; + + /* send request to clear multiple bits at a time */ + prev_vid += (vid - prev_vid - 1) << FM10K_VLAN_LENGTH_SHIFT; + hw->mac.ops.update_vlan(hw, prev_vid, 0, false); + } +} + +static int __fm10k_uc_sync(struct net_device *dev, + const unsigned char *addr, bool sync) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + struct fm10k_hw *hw = &interface->hw; + u16 vid, glort = interface->glort; + s32 err; + + if (!is_valid_ether_addr(addr)) + return -EADDRNOTAVAIL; + + /* update table with current entries */ + for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0; + vid < VLAN_N_VID; + vid = fm10k_find_next_vlan(interface, vid)) { + err = hw->mac.ops.update_uc_addr(hw, glort, addr, + vid, sync, 0); + if (err) + return err; + } + + return 0; +} + +static int fm10k_uc_sync(struct net_device *dev, + const unsigned char *addr) +{ + return __fm10k_uc_sync(dev, addr, true); +} + +static int fm10k_uc_unsync(struct net_device *dev, + const unsigned char *addr) +{ + return __fm10k_uc_sync(dev, addr, false); +} + +static int fm10k_set_mac(struct net_device *dev, void *p) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + struct fm10k_hw *hw = &interface->hw; + struct sockaddr *addr = p; + s32 err = 0; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (dev->flags & IFF_UP) { + /* setting MAC address requires mailbox */ + fm10k_mbx_lock(interface); + + err = fm10k_uc_sync(dev, addr->sa_data); + if (!err) + fm10k_uc_unsync(dev, hw->mac.addr); + + fm10k_mbx_unlock(interface); + } + + if (!err) { + ether_addr_copy(dev->dev_addr, addr->sa_data); + ether_addr_copy(hw->mac.addr, addr->sa_data); + dev->addr_assign_type &= ~NET_ADDR_RANDOM; + } + + /* if we had a mailbox error suggest trying again */ + return err ? -EAGAIN : 0; +} + +static int __fm10k_mc_sync(struct net_device *dev, + const unsigned char *addr, bool sync) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + struct fm10k_hw *hw = &interface->hw; + u16 vid, glort = interface->glort; + s32 err; + + if (!is_multicast_ether_addr(addr)) + return -EADDRNOTAVAIL; + + /* update table with current entries */ + for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0; + vid < VLAN_N_VID; + vid = fm10k_find_next_vlan(interface, vid)) { + err = hw->mac.ops.update_mc_addr(hw, glort, addr, vid, sync); + if (err) + return err; + } + + return 0; +} + +static int fm10k_mc_sync(struct net_device *dev, + const unsigned char *addr) +{ + return __fm10k_mc_sync(dev, addr, true); +} + +static int fm10k_mc_unsync(struct net_device *dev, + const unsigned char *addr) +{ + return __fm10k_mc_sync(dev, addr, false); +} + +static void fm10k_set_rx_mode(struct net_device *dev) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + struct fm10k_hw *hw = &interface->hw; + int xcast_mode; + + /* no need to update the harwdare if we are not running */ + if (!(dev->flags & IFF_UP)) + return; + + /* determine new mode based on flags */ + xcast_mode = (dev->flags & IFF_PROMISC) ? FM10K_XCAST_MODE_PROMISC : + (dev->flags & IFF_ALLMULTI) ? FM10K_XCAST_MODE_ALLMULTI : + (dev->flags & (IFF_BROADCAST | IFF_MULTICAST)) ? + FM10K_XCAST_MODE_MULTI : FM10K_XCAST_MODE_NONE; + + fm10k_mbx_lock(interface); + + /* syncronize all of the addresses */ + if (xcast_mode != FM10K_XCAST_MODE_PROMISC) { + __dev_uc_sync(dev, fm10k_uc_sync, fm10k_uc_unsync); + if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI) + __dev_mc_sync(dev, fm10k_mc_sync, fm10k_mc_unsync); + } + + /* if we aren't changing modes there is nothing to do */ + if (interface->xcast_mode != xcast_mode) { + /* update VLAN table */ + if (xcast_mode == FM10K_XCAST_MODE_PROMISC) + hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, 0, true); + if (interface->xcast_mode == FM10K_XCAST_MODE_PROMISC) + fm10k_clear_unused_vlans(interface); + + /* update xcast mode */ + hw->mac.ops.update_xcast_mode(hw, interface->glort, xcast_mode); + + /* record updated xcast mode state */ + interface->xcast_mode = xcast_mode; + } + + fm10k_mbx_unlock(interface); +} + +void fm10k_restore_rx_state(struct fm10k_intfc *interface) +{ + struct net_device *netdev = interface->netdev; + struct fm10k_hw *hw = &interface->hw; + int xcast_mode; + u16 vid, glort; + + /* restore our address if perm_addr is set */ + if (hw->mac.type == fm10k_mac_vf) { + if (is_valid_ether_addr(hw->mac.perm_addr)) { + ether_addr_copy(hw->mac.addr, hw->mac.perm_addr); + ether_addr_copy(netdev->perm_addr, hw->mac.perm_addr); + ether_addr_copy(netdev->dev_addr, hw->mac.perm_addr); + netdev->addr_assign_type &= ~NET_ADDR_RANDOM; + } + + if (hw->mac.vlan_override) + netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; + else + netdev->features |= NETIF_F_HW_VLAN_CTAG_RX; + } + + /* record glort for this interface */ + glort = interface->glort; + + /* convert interface flags to xcast mode */ + if (netdev->flags & IFF_PROMISC) + xcast_mode = FM10K_XCAST_MODE_PROMISC; + else if (netdev->flags & IFF_ALLMULTI) + xcast_mode = FM10K_XCAST_MODE_ALLMULTI; + else if (netdev->flags & (IFF_BROADCAST | IFF_MULTICAST)) + xcast_mode = FM10K_XCAST_MODE_MULTI; + else + xcast_mode = FM10K_XCAST_MODE_NONE; + + fm10k_mbx_lock(interface); + + /* Enable logical port */ + hw->mac.ops.update_lport_state(hw, glort, interface->glort_count, true); + + /* update VLAN table */ + hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, 0, + xcast_mode == FM10K_XCAST_MODE_PROMISC); + + /* Add filter for VLAN 0 */ + hw->mac.ops.update_vlan(hw, 0, 0, true); + + /* update table with current entries */ + for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0; + vid < VLAN_N_VID; + vid = fm10k_find_next_vlan(interface, vid)) { + hw->mac.ops.update_vlan(hw, vid, 0, true); + hw->mac.ops.update_uc_addr(hw, glort, hw->mac.addr, + vid, true, 0); + } + + /* syncronize all of the addresses */ + if (xcast_mode != FM10K_XCAST_MODE_PROMISC) { + __dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync); + if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI) + __dev_mc_sync(netdev, fm10k_mc_sync, fm10k_mc_unsync); + } + + /* update xcast mode */ + hw->mac.ops.update_xcast_mode(hw, glort, xcast_mode); + + fm10k_mbx_unlock(interface); + + /* record updated xcast mode state */ + interface->xcast_mode = xcast_mode; + + /* Restore tunnel configuration */ + fm10k_restore_vxlan_port(interface); +} + +void fm10k_reset_rx_state(struct fm10k_intfc *interface) +{ + struct net_device *netdev = interface->netdev; + struct fm10k_hw *hw = &interface->hw; + + fm10k_mbx_lock(interface); + + /* clear the logical port state on lower device */ + hw->mac.ops.update_lport_state(hw, interface->glort, + interface->glort_count, false); + + fm10k_mbx_unlock(interface); + + /* reset flags to default state */ + interface->xcast_mode = FM10K_XCAST_MODE_NONE; + + /* clear the sync flag since the lport has been dropped */ + __dev_uc_unsync(netdev, NULL); + __dev_mc_unsync(netdev, NULL); +} + +/** + * fm10k_get_stats64 - Get System Network Statistics + * @netdev: network interface device structure + * @stats: storage space for 64bit statistics + * + * Returns 64bit statistics, for use in the ndo_get_stats64 callback. This + * function replaces fm10k_get_stats for kernels which support it. + */ +static struct rtnl_link_stats64 *fm10k_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + struct fm10k_ring *ring; + unsigned int start, i; + u64 bytes, packets; + + rcu_read_lock(); + + for (i = 0; i < interface->num_rx_queues; i++) { + ring = ACCESS_ONCE(interface->rx_ring[i]); + + if (!ring) + continue; + + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + + stats->rx_packets += packets; + stats->rx_bytes += bytes; + } + + for (i = 0; i < interface->num_tx_queues; i++) { + ring = ACCESS_ONCE(interface->rx_ring[i]); + + if (!ring) + continue; + + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + + stats->tx_packets += packets; + stats->tx_bytes += bytes; + } + + rcu_read_unlock(); + + /* following stats updated by fm10k_service_task() */ + stats->rx_missed_errors = netdev->stats.rx_missed_errors; + + return stats; +} + +int fm10k_setup_tc(struct net_device *dev, u8 tc) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + + /* Currently only the PF supports priority classes */ + if (tc && (interface->hw.mac.type != fm10k_mac_pf)) + return -EINVAL; + + /* Hardware supports up to 8 traffic classes */ + if (tc > 8) + return -EINVAL; + + /* Hardware has to reinitialize queues to match packet + * buffer alignment. Unfortunately, the hardware is not + * flexible enough to do this dynamically. + */ + if (netif_running(dev)) + fm10k_close(dev); + + fm10k_mbx_free_irq(interface); + + fm10k_clear_queueing_scheme(interface); + + /* we expect the prio_tc map to be repopulated later */ + netdev_reset_tc(dev); + netdev_set_num_tc(dev, tc); + + fm10k_init_queueing_scheme(interface); + + fm10k_mbx_request_irq(interface); + + if (netif_running(dev)) + fm10k_open(dev); + + /* flag to indicate SWPRI has yet to be updated */ + interface->flags |= FM10K_FLAG_SWPRI_CONFIG; + + return 0; +} + +static int fm10k_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGHWTSTAMP: + return fm10k_get_ts_config(netdev, ifr); + case SIOCSHWTSTAMP: + return fm10k_set_ts_config(netdev, ifr); + default: + return -EOPNOTSUPP; + } +} + +static void fm10k_assign_l2_accel(struct fm10k_intfc *interface, + struct fm10k_l2_accel *l2_accel) +{ + struct fm10k_ring *ring; + int i; + + for (i = 0; i < interface->num_rx_queues; i++) { + ring = interface->rx_ring[i]; + rcu_assign_pointer(ring->l2_accel, l2_accel); + } + + interface->l2_accel = l2_accel; +} + +static void *fm10k_dfwd_add_station(struct net_device *dev, + struct net_device *sdev) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + struct fm10k_l2_accel *l2_accel = interface->l2_accel; + struct fm10k_l2_accel *old_l2_accel = NULL; + struct fm10k_dglort_cfg dglort = { 0 }; + struct fm10k_hw *hw = &interface->hw; + int size = 0, i; + u16 glort; + + /* allocate l2 accel structure if it is not available */ + if (!l2_accel) { + /* verify there is enough free GLORTs to support l2_accel */ + if (interface->glort_count < 7) + return ERR_PTR(-EBUSY); + + size = offsetof(struct fm10k_l2_accel, macvlan[7]); + l2_accel = kzalloc(size, GFP_KERNEL); + if (!l2_accel) + return ERR_PTR(-ENOMEM); + + l2_accel->size = 7; + l2_accel->dglort = interface->glort; + + /* update pointers */ + fm10k_assign_l2_accel(interface, l2_accel); + /* do not expand if we are at our limit */ + } else if ((l2_accel->count == FM10K_MAX_STATIONS) || + (l2_accel->count == (interface->glort_count - 1))) { + return ERR_PTR(-EBUSY); + /* expand if we have hit the size limit */ + } else if (l2_accel->count == l2_accel->size) { + old_l2_accel = l2_accel; + size = offsetof(struct fm10k_l2_accel, + macvlan[(l2_accel->size * 2) + 1]); + l2_accel = kzalloc(size, GFP_KERNEL); + if (!l2_accel) + return ERR_PTR(-ENOMEM); + + memcpy(l2_accel, old_l2_accel, + offsetof(struct fm10k_l2_accel, + macvlan[old_l2_accel->size])); + + l2_accel->size = (old_l2_accel->size * 2) + 1; + + /* update pointers */ + fm10k_assign_l2_accel(interface, l2_accel); + kfree_rcu(old_l2_accel, rcu); + } + + /* add macvlan to accel table, and record GLORT for position */ + for (i = 0; i < l2_accel->size; i++) { + if (!l2_accel->macvlan[i]) + break; + } + + /* record station */ + l2_accel->macvlan[i] = sdev; + l2_accel->count++; + + /* configure default DGLORT mapping for RSS/DCB */ + dglort.idx = fm10k_dglort_pf_rss; + dglort.inner_rss = 1; + dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask); + dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask); + dglort.glort = interface->glort; + dglort.shared_l = fls(l2_accel->size); + hw->mac.ops.configure_dglort_map(hw, &dglort); + + /* Add rules for this specific dglort to the switch */ + fm10k_mbx_lock(interface); + + glort = l2_accel->dglort + 1 + i; + hw->mac.ops.update_xcast_mode(hw, glort, FM10K_XCAST_MODE_MULTI); + hw->mac.ops.update_uc_addr(hw, glort, sdev->dev_addr, 0, true, 0); + + fm10k_mbx_unlock(interface); + + return sdev; +} + +static void fm10k_dfwd_del_station(struct net_device *dev, void *priv) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + struct fm10k_l2_accel *l2_accel = ACCESS_ONCE(interface->l2_accel); + struct fm10k_dglort_cfg dglort = { 0 }; + struct fm10k_hw *hw = &interface->hw; + struct net_device *sdev = priv; + int i; + u16 glort; + + if (!l2_accel) + return; + + /* search table for matching interface */ + for (i = 0; i < l2_accel->size; i++) { + if (l2_accel->macvlan[i] == sdev) + break; + } + + /* exit if macvlan not found */ + if (i == l2_accel->size) + return; + + /* Remove any rules specific to this dglort */ + fm10k_mbx_lock(interface); + + glort = l2_accel->dglort + 1 + i; + hw->mac.ops.update_xcast_mode(hw, glort, FM10K_XCAST_MODE_NONE); + hw->mac.ops.update_uc_addr(hw, glort, sdev->dev_addr, 0, false, 0); + + fm10k_mbx_unlock(interface); + + /* record removal */ + l2_accel->macvlan[i] = NULL; + l2_accel->count--; + + /* configure default DGLORT mapping for RSS/DCB */ + dglort.idx = fm10k_dglort_pf_rss; + dglort.inner_rss = 1; + dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask); + dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask); + dglort.glort = interface->glort; + if (l2_accel) + dglort.shared_l = fls(l2_accel->size); + hw->mac.ops.configure_dglort_map(hw, &dglort); + + /* If table is empty remove it */ + if (l2_accel->count == 0) { + fm10k_assign_l2_accel(interface, NULL); + kfree_rcu(l2_accel, rcu); + } +} + +static const struct net_device_ops fm10k_netdev_ops = { + .ndo_open = fm10k_open, + .ndo_stop = fm10k_close, + .ndo_validate_addr = eth_validate_addr, + .ndo_start_xmit = fm10k_xmit_frame, + .ndo_set_mac_address = fm10k_set_mac, + .ndo_change_mtu = fm10k_change_mtu, + .ndo_tx_timeout = fm10k_tx_timeout, + .ndo_vlan_rx_add_vid = fm10k_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = fm10k_vlan_rx_kill_vid, + .ndo_set_rx_mode = fm10k_set_rx_mode, + .ndo_get_stats64 = fm10k_get_stats64, + .ndo_setup_tc = fm10k_setup_tc, + .ndo_set_vf_mac = fm10k_ndo_set_vf_mac, + .ndo_set_vf_vlan = fm10k_ndo_set_vf_vlan, + .ndo_set_vf_rate = fm10k_ndo_set_vf_bw, + .ndo_get_vf_config = fm10k_ndo_get_vf_config, + .ndo_add_vxlan_port = fm10k_add_vxlan_port, + .ndo_del_vxlan_port = fm10k_del_vxlan_port, + .ndo_do_ioctl = fm10k_ioctl, + .ndo_dfwd_add_station = fm10k_dfwd_add_station, + .ndo_dfwd_del_station = fm10k_dfwd_del_station, +}; + +#define DEFAULT_DEBUG_LEVEL_SHIFT 3 + +struct net_device *fm10k_alloc_netdev(void) +{ + struct fm10k_intfc *interface; + struct net_device *dev; + + dev = alloc_etherdev_mq(sizeof(struct fm10k_intfc), MAX_QUEUES); + if (!dev) + return NULL; + + /* set net device and ethtool ops */ + dev->netdev_ops = &fm10k_netdev_ops; + fm10k_set_ethtool_ops(dev); + + /* configure default debug level */ + interface = netdev_priv(dev); + interface->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; + + /* configure default features */ + dev->features |= NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_SG | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_TSO_ECN | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_RXHASH | + NETIF_F_RXCSUM; + + /* all features defined to this point should be changeable */ + dev->hw_features |= dev->features; + + /* allow user to enable L2 forwarding acceleration */ + dev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD; + + /* configure VLAN features */ + dev->vlan_features |= dev->features; + + /* configure tunnel offloads */ + dev->hw_enc_features = NETIF_F_IP_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_TSO_ECN | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_IPV6_CSUM | + NETIF_F_SG; + + /* we want to leave these both on as we cannot disable VLAN tag + * insertion or stripping on the hardware since it is contained + * in the FTAG and not in the frame itself. + */ + dev->features |= NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER; + + dev->priv_flags |= IFF_UNICAST_FLT; + + return dev; +} diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c new file mode 100644 index 0000000000000000000000000000000000000000..e02036c427b9670e946c3654af2ff2c85770593d --- /dev/null +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -0,0 +1,2166 @@ +/* Intel Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include +#include + +#include "fm10k.h" + +static const struct fm10k_info *fm10k_info_tbl[] = { + [fm10k_device_pf] = &fm10k_pf_info, + [fm10k_device_vf] = &fm10k_vf_info, +}; + +/** + * fm10k_pci_tbl - PCI Device ID Table + * + * Wildcard entries (PCI_ANY_ID) should come last + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static const struct pci_device_id fm10k_pci_tbl[] = { + { PCI_VDEVICE(INTEL, FM10K_DEV_ID_PF), fm10k_device_pf }, + { PCI_VDEVICE(INTEL, FM10K_DEV_ID_VF), fm10k_device_vf }, + /* required last entry */ + { 0, } +}; +MODULE_DEVICE_TABLE(pci, fm10k_pci_tbl); + +u16 fm10k_read_pci_cfg_word(struct fm10k_hw *hw, u32 reg) +{ + struct fm10k_intfc *interface = hw->back; + u16 value = 0; + + if (FM10K_REMOVED(hw->hw_addr)) + return ~value; + + pci_read_config_word(interface->pdev, reg, &value); + if (value == 0xFFFF) + fm10k_write_flush(hw); + + return value; +} + +u32 fm10k_read_reg(struct fm10k_hw *hw, int reg) +{ + u32 __iomem *hw_addr = ACCESS_ONCE(hw->hw_addr); + u32 value = 0; + + if (FM10K_REMOVED(hw_addr)) + return ~value; + + value = readl(&hw_addr[reg]); + if (!(~value) && (!reg || !(~readl(hw_addr)))) { + struct fm10k_intfc *interface = hw->back; + struct net_device *netdev = interface->netdev; + + hw->hw_addr = NULL; + netif_device_detach(netdev); + netdev_err(netdev, "PCIe link lost, device now detached\n"); + } + + return value; +} + +static int fm10k_hw_ready(struct fm10k_intfc *interface) +{ + struct fm10k_hw *hw = &interface->hw; + + fm10k_write_flush(hw); + + return FM10K_REMOVED(hw->hw_addr) ? -ENODEV : 0; +} + +void fm10k_service_event_schedule(struct fm10k_intfc *interface) +{ + if (!test_bit(__FM10K_SERVICE_DISABLE, &interface->state) && + !test_and_set_bit(__FM10K_SERVICE_SCHED, &interface->state)) + schedule_work(&interface->service_task); +} + +static void fm10k_service_event_complete(struct fm10k_intfc *interface) +{ + BUG_ON(!test_bit(__FM10K_SERVICE_SCHED, &interface->state)); + + /* flush memory to make sure state is correct before next watchog */ + smp_mb__before_atomic(); + clear_bit(__FM10K_SERVICE_SCHED, &interface->state); +} + +/** + * fm10k_service_timer - Timer Call-back + * @data: pointer to interface cast into an unsigned long + **/ +static void fm10k_service_timer(unsigned long data) +{ + struct fm10k_intfc *interface = (struct fm10k_intfc *)data; + + /* Reset the timer */ + mod_timer(&interface->service_timer, (HZ * 2) + jiffies); + + fm10k_service_event_schedule(interface); +} + +static void fm10k_detach_subtask(struct fm10k_intfc *interface) +{ + struct net_device *netdev = interface->netdev; + + /* do nothing if device is still present or hw_addr is set */ + if (netif_device_present(netdev) || interface->hw.hw_addr) + return; + + rtnl_lock(); + + if (netif_running(netdev)) + dev_close(netdev); + + rtnl_unlock(); +} + +static void fm10k_reinit(struct fm10k_intfc *interface) +{ + struct net_device *netdev = interface->netdev; + struct fm10k_hw *hw = &interface->hw; + int err; + + WARN_ON(in_interrupt()); + + /* put off any impending NetWatchDogTimeout */ + netdev->trans_start = jiffies; + + while (test_and_set_bit(__FM10K_RESETTING, &interface->state)) + usleep_range(1000, 2000); + + rtnl_lock(); + + fm10k_iov_suspend(interface->pdev); + + if (netif_running(netdev)) + fm10k_close(netdev); + + fm10k_mbx_free_irq(interface); + + /* delay any future reset requests */ + interface->last_reset = jiffies + (10 * HZ); + + /* reset and initialize the hardware so it is in a known state */ + err = hw->mac.ops.reset_hw(hw) ? : hw->mac.ops.init_hw(hw); + if (err) + dev_err(&interface->pdev->dev, "init_hw failed: %d\n", err); + + /* reassociate interrupts */ + fm10k_mbx_request_irq(interface); + + /* reset clock */ + fm10k_ts_reset(interface); + + if (netif_running(netdev)) + fm10k_open(netdev); + + fm10k_iov_resume(interface->pdev); + + rtnl_unlock(); + + clear_bit(__FM10K_RESETTING, &interface->state); +} + +static void fm10k_reset_subtask(struct fm10k_intfc *interface) +{ + if (!(interface->flags & FM10K_FLAG_RESET_REQUESTED)) + return; + + interface->flags &= ~FM10K_FLAG_RESET_REQUESTED; + + netdev_err(interface->netdev, "Reset interface\n"); + interface->tx_timeout_count++; + + fm10k_reinit(interface); +} + +/** + * fm10k_configure_swpri_map - Configure Receive SWPRI to PC mapping + * @interface: board private structure + * + * Configure the SWPRI to PC mapping for the port. + **/ +static void fm10k_configure_swpri_map(struct fm10k_intfc *interface) +{ + struct net_device *netdev = interface->netdev; + struct fm10k_hw *hw = &interface->hw; + int i; + + /* clear flag indicating update is needed */ + interface->flags &= ~FM10K_FLAG_SWPRI_CONFIG; + + /* these registers are only available on the PF */ + if (hw->mac.type != fm10k_mac_pf) + return; + + /* configure SWPRI to PC map */ + for (i = 0; i < FM10K_SWPRI_MAX; i++) + fm10k_write_reg(hw, FM10K_SWPRI_MAP(i), + netdev_get_prio_tc_map(netdev, i)); +} + +/** + * fm10k_watchdog_update_host_state - Update the link status based on host. + * @interface: board private structure + **/ +static void fm10k_watchdog_update_host_state(struct fm10k_intfc *interface) +{ + struct fm10k_hw *hw = &interface->hw; + s32 err; + + if (test_bit(__FM10K_LINK_DOWN, &interface->state)) { + interface->host_ready = false; + if (time_is_after_jiffies(interface->link_down_event)) + return; + clear_bit(__FM10K_LINK_DOWN, &interface->state); + } + + if (interface->flags & FM10K_FLAG_SWPRI_CONFIG) { + if (rtnl_trylock()) { + fm10k_configure_swpri_map(interface); + rtnl_unlock(); + } + } + + /* lock the mailbox for transmit and receive */ + fm10k_mbx_lock(interface); + + err = hw->mac.ops.get_host_state(hw, &interface->host_ready); + if (err && time_is_before_jiffies(interface->last_reset)) + interface->flags |= FM10K_FLAG_RESET_REQUESTED; + + /* free the lock */ + fm10k_mbx_unlock(interface); +} + +/** + * fm10k_mbx_subtask - Process upstream and downstream mailboxes + * @interface: board private structure + * + * This function will process both the upstream and downstream mailboxes. + * It is necessary for us to hold the rtnl_lock while doing this as the + * mailbox accesses are protected by this lock. + **/ +static void fm10k_mbx_subtask(struct fm10k_intfc *interface) +{ + /* process upstream mailbox and update device state */ + fm10k_watchdog_update_host_state(interface); + + /* process downstream mailboxes */ + fm10k_iov_mbx(interface); +} + +/** + * fm10k_watchdog_host_is_ready - Update netdev status based on host ready + * @interface: board private structure + **/ +static void fm10k_watchdog_host_is_ready(struct fm10k_intfc *interface) +{ + struct net_device *netdev = interface->netdev; + + /* only continue if link state is currently down */ + if (netif_carrier_ok(netdev)) + return; + + netif_info(interface, drv, netdev, "NIC Link is up\n"); + + netif_carrier_on(netdev); + netif_tx_wake_all_queues(netdev); +} + +/** + * fm10k_watchdog_host_not_ready - Update netdev status based on host not ready + * @interface: board private structure + **/ +static void fm10k_watchdog_host_not_ready(struct fm10k_intfc *interface) +{ + struct net_device *netdev = interface->netdev; + + /* only continue if link state is currently up */ + if (!netif_carrier_ok(netdev)) + return; + + netif_info(interface, drv, netdev, "NIC Link is down\n"); + + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); +} + +/** + * fm10k_update_stats - Update the board statistics counters. + * @interface: board private structure + **/ +void fm10k_update_stats(struct fm10k_intfc *interface) +{ + struct net_device_stats *net_stats = &interface->netdev->stats; + struct fm10k_hw *hw = &interface->hw; + u64 rx_errors = 0, rx_csum_errors = 0, tx_csum_errors = 0; + u64 restart_queue = 0, tx_busy = 0, alloc_failed = 0; + u64 rx_bytes_nic = 0, rx_pkts_nic = 0, rx_drops_nic = 0; + u64 tx_bytes_nic = 0, tx_pkts_nic = 0; + u64 bytes, pkts; + int i; + + /* do not allow stats update via service task for next second */ + interface->next_stats_update = jiffies + HZ; + + /* gather some stats to the interface struct that are per queue */ + for (bytes = 0, pkts = 0, i = 0; i < interface->num_tx_queues; i++) { + struct fm10k_ring *tx_ring = interface->tx_ring[i]; + + restart_queue += tx_ring->tx_stats.restart_queue; + tx_busy += tx_ring->tx_stats.tx_busy; + tx_csum_errors += tx_ring->tx_stats.csum_err; + bytes += tx_ring->stats.bytes; + pkts += tx_ring->stats.packets; + } + + interface->restart_queue = restart_queue; + interface->tx_busy = tx_busy; + net_stats->tx_bytes = bytes; + net_stats->tx_packets = pkts; + interface->tx_csum_errors = tx_csum_errors; + /* gather some stats to the interface struct that are per queue */ + for (bytes = 0, pkts = 0, i = 0; i < interface->num_rx_queues; i++) { + struct fm10k_ring *rx_ring = interface->rx_ring[i]; + + bytes += rx_ring->stats.bytes; + pkts += rx_ring->stats.packets; + alloc_failed += rx_ring->rx_stats.alloc_failed; + rx_csum_errors += rx_ring->rx_stats.csum_err; + rx_errors += rx_ring->rx_stats.errors; + } + + net_stats->rx_bytes = bytes; + net_stats->rx_packets = pkts; + interface->alloc_failed = alloc_failed; + interface->rx_csum_errors = rx_csum_errors; + interface->rx_errors = rx_errors; + + hw->mac.ops.update_hw_stats(hw, &interface->stats); + + for (i = 0; i < FM10K_MAX_QUEUES_PF; i++) { + struct fm10k_hw_stats_q *q = &interface->stats.q[i]; + + tx_bytes_nic += q->tx_bytes.count; + tx_pkts_nic += q->tx_packets.count; + rx_bytes_nic += q->rx_bytes.count; + rx_pkts_nic += q->rx_packets.count; + rx_drops_nic += q->rx_drops.count; + } + + interface->tx_bytes_nic = tx_bytes_nic; + interface->tx_packets_nic = tx_pkts_nic; + interface->rx_bytes_nic = rx_bytes_nic; + interface->rx_packets_nic = rx_pkts_nic; + interface->rx_drops_nic = rx_drops_nic; + + /* Fill out the OS statistics structure */ + net_stats->rx_errors = interface->stats.xec.count; + net_stats->rx_dropped = interface->stats.nodesc_drop.count; +} + +/** + * fm10k_watchdog_flush_tx - flush queues on host not ready + * @interface - pointer to the device interface structure + **/ +static void fm10k_watchdog_flush_tx(struct fm10k_intfc *interface) +{ + int some_tx_pending = 0; + int i; + + /* nothing to do if carrier is up */ + if (netif_carrier_ok(interface->netdev)) + return; + + for (i = 0; i < interface->num_tx_queues; i++) { + struct fm10k_ring *tx_ring = interface->tx_ring[i]; + + if (tx_ring->next_to_use != tx_ring->next_to_clean) { + some_tx_pending = 1; + break; + } + } + + /* We've lost link, so the controller stops DMA, but we've got + * queued Tx work that's never going to get done, so reset + * controller to flush Tx. + */ + if (some_tx_pending) + interface->flags |= FM10K_FLAG_RESET_REQUESTED; +} + +/** + * fm10k_watchdog_subtask - check and bring link up + * @interface - pointer to the device interface structure + **/ +static void fm10k_watchdog_subtask(struct fm10k_intfc *interface) +{ + /* if interface is down do nothing */ + if (test_bit(__FM10K_DOWN, &interface->state) || + test_bit(__FM10K_RESETTING, &interface->state)) + return; + + if (interface->host_ready) + fm10k_watchdog_host_is_ready(interface); + else + fm10k_watchdog_host_not_ready(interface); + + /* update stats only once every second */ + if (time_is_before_jiffies(interface->next_stats_update)) + fm10k_update_stats(interface); + + /* flush any uncompleted work */ + fm10k_watchdog_flush_tx(interface); +} + +/** + * fm10k_check_hang_subtask - check for hung queues and dropped interrupts + * @interface - pointer to the device interface structure + * + * This function serves two purposes. First it strobes the interrupt lines + * in order to make certain interrupts are occurring. Secondly it sets the + * bits needed to check for TX hangs. As a result we should immediately + * determine if a hang has occurred. + */ +static void fm10k_check_hang_subtask(struct fm10k_intfc *interface) +{ + int i; + + /* If we're down or resetting, just bail */ + if (test_bit(__FM10K_DOWN, &interface->state) || + test_bit(__FM10K_RESETTING, &interface->state)) + return; + + /* rate limit tx hang checks to only once every 2 seconds */ + if (time_is_after_eq_jiffies(interface->next_tx_hang_check)) + return; + interface->next_tx_hang_check = jiffies + (2 * HZ); + + if (netif_carrier_ok(interface->netdev)) { + /* Force detection of hung controller */ + for (i = 0; i < interface->num_tx_queues; i++) + set_check_for_tx_hang(interface->tx_ring[i]); + + /* Rearm all in-use q_vectors for immediate firing */ + for (i = 0; i < interface->num_q_vectors; i++) { + struct fm10k_q_vector *qv = interface->q_vector[i]; + + if (!qv->tx.count && !qv->rx.count) + continue; + writel(FM10K_ITR_ENABLE | FM10K_ITR_PENDING2, qv->itr); + } + } +} + +/** + * fm10k_service_task - manages and runs subtasks + * @work: pointer to work_struct containing our data + **/ +static void fm10k_service_task(struct work_struct *work) +{ + struct fm10k_intfc *interface; + + interface = container_of(work, struct fm10k_intfc, service_task); + + /* tasks always capable of running, but must be rtnl protected */ + fm10k_mbx_subtask(interface); + fm10k_detach_subtask(interface); + fm10k_reset_subtask(interface); + + /* tasks only run when interface is up */ + fm10k_watchdog_subtask(interface); + fm10k_check_hang_subtask(interface); + fm10k_ts_tx_subtask(interface); + + /* release lock on service events to allow scheduling next event */ + fm10k_service_event_complete(interface); +} + +/** + * fm10k_configure_tx_ring - Configure Tx ring after Reset + * @interface: board private structure + * @ring: structure containing ring specific data + * + * Configure the Tx descriptor ring after a reset. + **/ +static void fm10k_configure_tx_ring(struct fm10k_intfc *interface, + struct fm10k_ring *ring) +{ + struct fm10k_hw *hw = &interface->hw; + u64 tdba = ring->dma; + u32 size = ring->count * sizeof(struct fm10k_tx_desc); + u32 txint = FM10K_INT_MAP_DISABLE; + u32 txdctl = FM10K_TXDCTL_ENABLE | (1 << FM10K_TXDCTL_MAX_TIME_SHIFT); + u8 reg_idx = ring->reg_idx; + + /* disable queue to avoid issues while updating state */ + fm10k_write_reg(hw, FM10K_TXDCTL(reg_idx), 0); + fm10k_write_flush(hw); + + /* possible poll here to verify ring resources have been cleaned */ + + /* set location and size for descriptor ring */ + fm10k_write_reg(hw, FM10K_TDBAL(reg_idx), tdba & DMA_BIT_MASK(32)); + fm10k_write_reg(hw, FM10K_TDBAH(reg_idx), tdba >> 32); + fm10k_write_reg(hw, FM10K_TDLEN(reg_idx), size); + + /* reset head and tail pointers */ + fm10k_write_reg(hw, FM10K_TDH(reg_idx), 0); + fm10k_write_reg(hw, FM10K_TDT(reg_idx), 0); + + /* store tail pointer */ + ring->tail = &interface->uc_addr[FM10K_TDT(reg_idx)]; + + /* reset ntu and ntc to place SW in sync with hardwdare */ + ring->next_to_clean = 0; + ring->next_to_use = 0; + + /* Map interrupt */ + if (ring->q_vector) { + txint = ring->q_vector->v_idx + NON_Q_VECTORS(hw); + txint |= FM10K_INT_MAP_TIMER0; + } + + fm10k_write_reg(hw, FM10K_TXINT(reg_idx), txint); + + /* enable use of FTAG bit in Tx descriptor, register is RO for VF */ + fm10k_write_reg(hw, FM10K_PFVTCTL(reg_idx), + FM10K_PFVTCTL_FTAG_DESC_ENABLE); + + /* enable queue */ + fm10k_write_reg(hw, FM10K_TXDCTL(reg_idx), txdctl); +} + +/** + * fm10k_enable_tx_ring - Verify Tx ring is enabled after configuration + * @interface: board private structure + * @ring: structure containing ring specific data + * + * Verify the Tx descriptor ring is ready for transmit. + **/ +static void fm10k_enable_tx_ring(struct fm10k_intfc *interface, + struct fm10k_ring *ring) +{ + struct fm10k_hw *hw = &interface->hw; + int wait_loop = 10; + u32 txdctl; + u8 reg_idx = ring->reg_idx; + + /* if we are already enabled just exit */ + if (fm10k_read_reg(hw, FM10K_TXDCTL(reg_idx)) & FM10K_TXDCTL_ENABLE) + return; + + /* poll to verify queue is enabled */ + do { + usleep_range(1000, 2000); + txdctl = fm10k_read_reg(hw, FM10K_TXDCTL(reg_idx)); + } while (!(txdctl & FM10K_TXDCTL_ENABLE) && --wait_loop); + if (!wait_loop) + netif_err(interface, drv, interface->netdev, + "Could not enable Tx Queue %d\n", reg_idx); +} + +/** + * fm10k_configure_tx - Configure Transmit Unit after Reset + * @interface: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void fm10k_configure_tx(struct fm10k_intfc *interface) +{ + int i; + + /* Setup the HW Tx Head and Tail descriptor pointers */ + for (i = 0; i < interface->num_tx_queues; i++) + fm10k_configure_tx_ring(interface, interface->tx_ring[i]); + + /* poll here to verify that Tx rings are now enabled */ + for (i = 0; i < interface->num_tx_queues; i++) + fm10k_enable_tx_ring(interface, interface->tx_ring[i]); +} + +/** + * fm10k_configure_rx_ring - Configure Rx ring after Reset + * @interface: board private structure + * @ring: structure containing ring specific data + * + * Configure the Rx descriptor ring after a reset. + **/ +static void fm10k_configure_rx_ring(struct fm10k_intfc *interface, + struct fm10k_ring *ring) +{ + u64 rdba = ring->dma; + struct fm10k_hw *hw = &interface->hw; + u32 size = ring->count * sizeof(union fm10k_rx_desc); + u32 rxqctl = FM10K_RXQCTL_ENABLE | FM10K_RXQCTL_PF; + u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY; + u32 srrctl = FM10K_SRRCTL_BUFFER_CHAINING_EN; + u32 rxint = FM10K_INT_MAP_DISABLE; + u8 rx_pause = interface->rx_pause; + u8 reg_idx = ring->reg_idx; + + /* disable queue to avoid issues while updating state */ + fm10k_write_reg(hw, FM10K_RXQCTL(reg_idx), 0); + fm10k_write_flush(hw); + + /* possible poll here to verify ring resources have been cleaned */ + + /* set location and size for descriptor ring */ + fm10k_write_reg(hw, FM10K_RDBAL(reg_idx), rdba & DMA_BIT_MASK(32)); + fm10k_write_reg(hw, FM10K_RDBAH(reg_idx), rdba >> 32); + fm10k_write_reg(hw, FM10K_RDLEN(reg_idx), size); + + /* reset head and tail pointers */ + fm10k_write_reg(hw, FM10K_RDH(reg_idx), 0); + fm10k_write_reg(hw, FM10K_RDT(reg_idx), 0); + + /* store tail pointer */ + ring->tail = &interface->uc_addr[FM10K_RDT(reg_idx)]; + + /* reset ntu and ntc to place SW in sync with hardwdare */ + ring->next_to_clean = 0; + ring->next_to_use = 0; + ring->next_to_alloc = 0; + + /* Configure the Rx buffer size for one buff without split */ + srrctl |= FM10K_RX_BUFSZ >> FM10K_SRRCTL_BSIZEPKT_SHIFT; + + /* Configure the Rx ring to supress loopback packets */ + srrctl |= FM10K_SRRCTL_LOOPBACK_SUPPRESS; + fm10k_write_reg(hw, FM10K_SRRCTL(reg_idx), srrctl); + + /* Enable drop on empty */ +#ifdef CONFIG_DCB + if (interface->pfc_en) + rx_pause = interface->pfc_en; +#endif + if (!(rx_pause & (1 << ring->qos_pc))) + rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY; + + fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl); + + /* assign default VLAN to queue */ + ring->vid = hw->mac.default_vid; + + /* Map interrupt */ + if (ring->q_vector) { + rxint = ring->q_vector->v_idx + NON_Q_VECTORS(hw); + rxint |= FM10K_INT_MAP_TIMER1; + } + + fm10k_write_reg(hw, FM10K_RXINT(reg_idx), rxint); + + /* enable queue */ + fm10k_write_reg(hw, FM10K_RXQCTL(reg_idx), rxqctl); + + /* place buffers on ring for receive data */ + fm10k_alloc_rx_buffers(ring, fm10k_desc_unused(ring)); +} + +/** + * fm10k_update_rx_drop_en - Configures the drop enable bits for Rx rings + * @interface: board private structure + * + * Configure the drop enable bits for the Rx rings. + **/ +void fm10k_update_rx_drop_en(struct fm10k_intfc *interface) +{ + struct fm10k_hw *hw = &interface->hw; + u8 rx_pause = interface->rx_pause; + int i; + +#ifdef CONFIG_DCB + if (interface->pfc_en) + rx_pause = interface->pfc_en; + +#endif + for (i = 0; i < interface->num_rx_queues; i++) { + struct fm10k_ring *ring = interface->rx_ring[i]; + u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY; + u8 reg_idx = ring->reg_idx; + + if (!(rx_pause & (1 << ring->qos_pc))) + rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY; + + fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl); + } +} + +/** + * fm10k_configure_dglort - Configure Receive DGLORT after reset + * @interface: board private structure + * + * Configure the DGLORT description and RSS tables. + **/ +static void fm10k_configure_dglort(struct fm10k_intfc *interface) +{ + struct fm10k_dglort_cfg dglort = { 0 }; + struct fm10k_hw *hw = &interface->hw; + int i; + u32 mrqc; + + /* Fill out hash function seeds */ + for (i = 0; i < FM10K_RSSRK_SIZE; i++) + fm10k_write_reg(hw, FM10K_RSSRK(0, i), interface->rssrk[i]); + + /* Write RETA table to hardware */ + for (i = 0; i < FM10K_RETA_SIZE; i++) + fm10k_write_reg(hw, FM10K_RETA(0, i), interface->reta[i]); + + /* Generate RSS hash based on packet types, TCP/UDP + * port numbers and/or IPv4/v6 src and dst addresses + */ + mrqc = FM10K_MRQC_IPV4 | + FM10K_MRQC_TCP_IPV4 | + FM10K_MRQC_IPV6 | + FM10K_MRQC_TCP_IPV6; + + if (interface->flags & FM10K_FLAG_RSS_FIELD_IPV4_UDP) + mrqc |= FM10K_MRQC_UDP_IPV4; + if (interface->flags & FM10K_FLAG_RSS_FIELD_IPV6_UDP) + mrqc |= FM10K_MRQC_UDP_IPV6; + + fm10k_write_reg(hw, FM10K_MRQC(0), mrqc); + + /* configure default DGLORT mapping for RSS/DCB */ + dglort.inner_rss = 1; + dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask); + dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask); + hw->mac.ops.configure_dglort_map(hw, &dglort); + + /* assign GLORT per queue for queue mapped testing */ + if (interface->glort_count > 64) { + memset(&dglort, 0, sizeof(dglort)); + dglort.inner_rss = 1; + dglort.glort = interface->glort + 64; + dglort.idx = fm10k_dglort_pf_queue; + dglort.queue_l = fls(interface->num_rx_queues - 1); + hw->mac.ops.configure_dglort_map(hw, &dglort); + } + + /* assign glort value for RSS/DCB specific to this interface */ + memset(&dglort, 0, sizeof(dglort)); + dglort.inner_rss = 1; + dglort.glort = interface->glort; + dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask); + dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask); + /* configure DGLORT mapping for RSS/DCB */ + dglort.idx = fm10k_dglort_pf_rss; + if (interface->l2_accel) + dglort.shared_l = fls(interface->l2_accel->size); + hw->mac.ops.configure_dglort_map(hw, &dglort); +} + +/** + * fm10k_configure_rx - Configure Receive Unit after Reset + * @interface: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void fm10k_configure_rx(struct fm10k_intfc *interface) +{ + int i; + + /* Configure SWPRI to PC map */ + fm10k_configure_swpri_map(interface); + + /* Configure RSS and DGLORT map */ + fm10k_configure_dglort(interface); + + /* Setup the HW Rx Head and Tail descriptor pointers */ + for (i = 0; i < interface->num_rx_queues; i++) + fm10k_configure_rx_ring(interface, interface->rx_ring[i]); + + /* possible poll here to verify that Rx rings are now enabled */ +} + +static void fm10k_napi_enable_all(struct fm10k_intfc *interface) +{ + struct fm10k_q_vector *q_vector; + int q_idx; + + for (q_idx = 0; q_idx < interface->num_q_vectors; q_idx++) { + q_vector = interface->q_vector[q_idx]; + napi_enable(&q_vector->napi); + } +} + +static irqreturn_t fm10k_msix_clean_rings(int irq, void *data) +{ + struct fm10k_q_vector *q_vector = data; + + if (q_vector->rx.count || q_vector->tx.count) + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +static irqreturn_t fm10k_msix_mbx_vf(int irq, void *data) +{ + struct fm10k_intfc *interface = data; + struct fm10k_hw *hw = &interface->hw; + struct fm10k_mbx_info *mbx = &hw->mbx; + + /* re-enable mailbox interrupt and indicate 20us delay */ + fm10k_write_reg(hw, FM10K_VFITR(FM10K_MBX_VECTOR), + FM10K_ITR_ENABLE | FM10K_MBX_INT_DELAY); + + /* service upstream mailbox */ + if (fm10k_mbx_trylock(interface)) { + mbx->ops.process(hw, mbx); + fm10k_mbx_unlock(interface); + } + + hw->mac.get_host_state = 1; + fm10k_service_event_schedule(interface); + + return IRQ_HANDLED; +} + +#define FM10K_ERR_MSG(type) case (type): error = #type; break +static void fm10k_print_fault(struct fm10k_intfc *interface, int type, + struct fm10k_fault *fault) +{ + struct pci_dev *pdev = interface->pdev; + char *error; + + switch (type) { + case FM10K_PCA_FAULT: + switch (fault->type) { + default: + error = "Unknown PCA error"; + break; + FM10K_ERR_MSG(PCA_NO_FAULT); + FM10K_ERR_MSG(PCA_UNMAPPED_ADDR); + FM10K_ERR_MSG(PCA_BAD_QACCESS_PF); + FM10K_ERR_MSG(PCA_BAD_QACCESS_VF); + FM10K_ERR_MSG(PCA_MALICIOUS_REQ); + FM10K_ERR_MSG(PCA_POISONED_TLP); + FM10K_ERR_MSG(PCA_TLP_ABORT); + } + break; + case FM10K_THI_FAULT: + switch (fault->type) { + default: + error = "Unknown THI error"; + break; + FM10K_ERR_MSG(THI_NO_FAULT); + FM10K_ERR_MSG(THI_MAL_DIS_Q_FAULT); + } + break; + case FM10K_FUM_FAULT: + switch (fault->type) { + default: + error = "Unknown FUM error"; + break; + FM10K_ERR_MSG(FUM_NO_FAULT); + FM10K_ERR_MSG(FUM_UNMAPPED_ADDR); + FM10K_ERR_MSG(FUM_BAD_VF_QACCESS); + FM10K_ERR_MSG(FUM_ADD_DECODE_ERR); + FM10K_ERR_MSG(FUM_RO_ERROR); + FM10K_ERR_MSG(FUM_QPRC_CRC_ERROR); + FM10K_ERR_MSG(FUM_CSR_TIMEOUT); + FM10K_ERR_MSG(FUM_INVALID_TYPE); + FM10K_ERR_MSG(FUM_INVALID_LENGTH); + FM10K_ERR_MSG(FUM_INVALID_BE); + FM10K_ERR_MSG(FUM_INVALID_ALIGN); + } + break; + default: + error = "Undocumented fault"; + break; + } + + dev_warn(&pdev->dev, + "%s Address: 0x%llx SpecInfo: 0x%x Func: %02x.%0x\n", + error, fault->address, fault->specinfo, + PCI_SLOT(fault->func), PCI_FUNC(fault->func)); +} + +static void fm10k_report_fault(struct fm10k_intfc *interface, u32 eicr) +{ + struct fm10k_hw *hw = &interface->hw; + struct fm10k_fault fault = { 0 }; + int type, err; + + for (eicr &= FM10K_EICR_FAULT_MASK, type = FM10K_PCA_FAULT; + eicr; + eicr >>= 1, type += FM10K_FAULT_SIZE) { + /* only check if there is an error reported */ + if (!(eicr & 0x1)) + continue; + + /* retrieve fault info */ + err = hw->mac.ops.get_fault(hw, type, &fault); + if (err) { + dev_err(&interface->pdev->dev, + "error reading fault\n"); + continue; + } + + fm10k_print_fault(interface, type, &fault); + } +} + +static void fm10k_reset_drop_on_empty(struct fm10k_intfc *interface, u32 eicr) +{ + struct fm10k_hw *hw = &interface->hw; + const u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY; + u32 maxholdq; + int q; + + if (!(eicr & FM10K_EICR_MAXHOLDTIME)) + return; + + maxholdq = fm10k_read_reg(hw, FM10K_MAXHOLDQ(7)); + if (maxholdq) + fm10k_write_reg(hw, FM10K_MAXHOLDQ(7), maxholdq); + for (q = 255;;) { + if (maxholdq & (1 << 31)) { + if (q < FM10K_MAX_QUEUES_PF) { + interface->rx_overrun_pf++; + fm10k_write_reg(hw, FM10K_RXDCTL(q), rxdctl); + } else { + interface->rx_overrun_vf++; + } + } + + maxholdq *= 2; + if (!maxholdq) + q &= ~(32 - 1); + + if (!q) + break; + + if (q-- % 32) + continue; + + maxholdq = fm10k_read_reg(hw, FM10K_MAXHOLDQ(q / 32)); + if (maxholdq) + fm10k_write_reg(hw, FM10K_MAXHOLDQ(q / 32), maxholdq); + } +} + +static irqreturn_t fm10k_msix_mbx_pf(int irq, void *data) +{ + struct fm10k_intfc *interface = data; + struct fm10k_hw *hw = &interface->hw; + struct fm10k_mbx_info *mbx = &hw->mbx; + u32 eicr; + + /* unmask any set bits related to this interrupt */ + eicr = fm10k_read_reg(hw, FM10K_EICR); + fm10k_write_reg(hw, FM10K_EICR, eicr & (FM10K_EICR_MAILBOX | + FM10K_EICR_SWITCHREADY | + FM10K_EICR_SWITCHNOTREADY)); + + /* report any faults found to the message log */ + fm10k_report_fault(interface, eicr); + + /* reset any queues disabled due to receiver overrun */ + fm10k_reset_drop_on_empty(interface, eicr); + + /* service mailboxes */ + if (fm10k_mbx_trylock(interface)) { + mbx->ops.process(hw, mbx); + fm10k_iov_event(interface); + fm10k_mbx_unlock(interface); + } + + /* if switch toggled state we should reset GLORTs */ + if (eicr & FM10K_EICR_SWITCHNOTREADY) { + /* force link down for at least 4 seconds */ + interface->link_down_event = jiffies + (4 * HZ); + set_bit(__FM10K_LINK_DOWN, &interface->state); + + /* reset dglort_map back to no config */ + hw->mac.dglort_map = FM10K_DGLORTMAP_NONE; + } + + /* we should validate host state after interrupt event */ + hw->mac.get_host_state = 1; + fm10k_service_event_schedule(interface); + + /* re-enable mailbox interrupt and indicate 20us delay */ + fm10k_write_reg(hw, FM10K_ITR(FM10K_MBX_VECTOR), + FM10K_ITR_ENABLE | FM10K_MBX_INT_DELAY); + + return IRQ_HANDLED; +} + +void fm10k_mbx_free_irq(struct fm10k_intfc *interface) +{ + struct msix_entry *entry = &interface->msix_entries[FM10K_MBX_VECTOR]; + struct fm10k_hw *hw = &interface->hw; + int itr_reg; + + /* disconnect the mailbox */ + hw->mbx.ops.disconnect(hw, &hw->mbx); + + /* disable Mailbox cause */ + if (hw->mac.type == fm10k_mac_pf) { + fm10k_write_reg(hw, FM10K_EIMR, + FM10K_EIMR_DISABLE(PCA_FAULT) | + FM10K_EIMR_DISABLE(FUM_FAULT) | + FM10K_EIMR_DISABLE(MAILBOX) | + FM10K_EIMR_DISABLE(SWITCHREADY) | + FM10K_EIMR_DISABLE(SWITCHNOTREADY) | + FM10K_EIMR_DISABLE(SRAMERROR) | + FM10K_EIMR_DISABLE(VFLR) | + FM10K_EIMR_DISABLE(MAXHOLDTIME)); + itr_reg = FM10K_ITR(FM10K_MBX_VECTOR); + } else { + itr_reg = FM10K_VFITR(FM10K_MBX_VECTOR); + } + + fm10k_write_reg(hw, itr_reg, FM10K_ITR_MASK_SET); + + free_irq(entry->vector, interface); +} + +static s32 fm10k_mbx_mac_addr(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + bool vlan_override = hw->mac.vlan_override; + u16 default_vid = hw->mac.default_vid; + struct fm10k_intfc *interface; + s32 err; + + err = fm10k_msg_mac_vlan_vf(hw, results, mbx); + if (err) + return err; + + interface = container_of(hw, struct fm10k_intfc, hw); + + /* MAC was changed so we need reset */ + if (is_valid_ether_addr(hw->mac.perm_addr) && + memcmp(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN)) + interface->flags |= FM10K_FLAG_RESET_REQUESTED; + + /* VLAN override was changed, or default VLAN changed */ + if ((vlan_override != hw->mac.vlan_override) || + (default_vid != hw->mac.default_vid)) + interface->flags |= FM10K_FLAG_RESET_REQUESTED; + + return 0; +} + +static s32 fm10k_1588_msg_vf(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + struct fm10k_intfc *interface; + u64 timestamp; + s32 err; + + err = fm10k_tlv_attr_get_u64(results[FM10K_1588_MSG_TIMESTAMP], + ×tamp); + if (err) + return err; + + interface = container_of(hw, struct fm10k_intfc, hw); + + fm10k_ts_tx_hwtstamp(interface, 0, timestamp); + + return 0; +} + +/* generic error handler for mailbox issues */ +static s32 fm10k_mbx_error(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + struct fm10k_intfc *interface; + struct pci_dev *pdev; + + interface = container_of(hw, struct fm10k_intfc, hw); + pdev = interface->pdev; + + dev_err(&pdev->dev, "Unknown message ID %u\n", + **results & FM10K_TLV_ID_MASK); + + return 0; +} + +static const struct fm10k_msg_data vf_mbx_data[] = { + FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test), + FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_mbx_mac_addr), + FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf), + FM10K_VF_MSG_1588_HANDLER(fm10k_1588_msg_vf), + FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error), +}; + +static int fm10k_mbx_request_irq_vf(struct fm10k_intfc *interface) +{ + struct msix_entry *entry = &interface->msix_entries[FM10K_MBX_VECTOR]; + struct net_device *dev = interface->netdev; + struct fm10k_hw *hw = &interface->hw; + int err; + + /* Use timer0 for interrupt moderation on the mailbox */ + u32 itr = FM10K_INT_MAP_TIMER0 | entry->entry; + + /* register mailbox handlers */ + err = hw->mbx.ops.register_handlers(&hw->mbx, vf_mbx_data); + if (err) + return err; + + /* request the IRQ */ + err = request_irq(entry->vector, fm10k_msix_mbx_vf, 0, + dev->name, interface); + if (err) { + netif_err(interface, probe, dev, + "request_irq for msix_mbx failed: %d\n", err); + return err; + } + + /* map all of the interrupt sources */ + fm10k_write_reg(hw, FM10K_VFINT_MAP, itr); + + /* enable interrupt */ + fm10k_write_reg(hw, FM10K_VFITR(entry->entry), FM10K_ITR_ENABLE); + + return 0; +} + +static s32 fm10k_lport_map(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + struct fm10k_intfc *interface; + u32 dglort_map = hw->mac.dglort_map; + s32 err; + + err = fm10k_msg_lport_map_pf(hw, results, mbx); + if (err) + return err; + + interface = container_of(hw, struct fm10k_intfc, hw); + + /* we need to reset if port count was just updated */ + if (dglort_map != hw->mac.dglort_map) + interface->flags |= FM10K_FLAG_RESET_REQUESTED; + + return 0; +} + +static s32 fm10k_update_pvid(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + struct fm10k_intfc *interface; + u16 glort, pvid; + u32 pvid_update; + s32 err; + + err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_UPDATE_PVID], + &pvid_update); + if (err) + return err; + + /* extract values from the pvid update */ + glort = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_GLORT); + pvid = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_PVID); + + /* if glort is not valid return error */ + if (!fm10k_glort_valid_pf(hw, glort)) + return FM10K_ERR_PARAM; + + /* verify VID is valid */ + if (pvid >= FM10K_VLAN_TABLE_VID_MAX) + return FM10K_ERR_PARAM; + + interface = container_of(hw, struct fm10k_intfc, hw); + + /* check to see if this belongs to one of the VFs */ + err = fm10k_iov_update_pvid(interface, glort, pvid); + if (!err) + return 0; + + /* we need to reset if default VLAN was just updated */ + if (pvid != hw->mac.default_vid) + interface->flags |= FM10K_FLAG_RESET_REQUESTED; + + hw->mac.default_vid = pvid; + + return 0; +} + +static s32 fm10k_1588_msg_pf(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + struct fm10k_swapi_1588_timestamp timestamp; + struct fm10k_iov_data *iov_data; + struct fm10k_intfc *interface; + u16 sglort, vf_idx; + s32 err; + + err = fm10k_tlv_attr_get_le_struct( + results[FM10K_PF_ATTR_ID_1588_TIMESTAMP], + ×tamp, sizeof(timestamp)); + if (err) + return err; + + interface = container_of(hw, struct fm10k_intfc, hw); + + if (timestamp.dglort) { + fm10k_ts_tx_hwtstamp(interface, timestamp.dglort, + le64_to_cpu(timestamp.egress)); + return 0; + } + + /* either dglort or sglort must be set */ + if (!timestamp.sglort) + return FM10K_ERR_PARAM; + + /* verify GLORT is at least one of the ones we own */ + sglort = le16_to_cpu(timestamp.sglort); + if (!fm10k_glort_valid_pf(hw, sglort)) + return FM10K_ERR_PARAM; + + if (sglort == interface->glort) { + fm10k_ts_tx_hwtstamp(interface, 0, + le64_to_cpu(timestamp.ingress)); + return 0; + } + + /* if there is no iov_data then there is no mailboxes to process */ + if (!ACCESS_ONCE(interface->iov_data)) + return FM10K_ERR_PARAM; + + rcu_read_lock(); + + /* notify VF if this timestamp belongs to it */ + iov_data = interface->iov_data; + vf_idx = (hw->mac.dglort_map & FM10K_DGLORTMAP_NONE) - sglort; + + if (!iov_data || vf_idx >= iov_data->num_vfs) { + err = FM10K_ERR_PARAM; + goto err_unlock; + } + + err = hw->iov.ops.report_timestamp(hw, &iov_data->vf_info[vf_idx], + le64_to_cpu(timestamp.ingress)); + +err_unlock: + rcu_read_unlock(); + + return err; +} + +static const struct fm10k_msg_data pf_mbx_data[] = { + FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf), + FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf), + FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_lport_map), + FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf), + FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf), + FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_update_pvid), + FM10K_PF_MSG_1588_TIMESTAMP_HANDLER(fm10k_1588_msg_pf), + FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error), +}; + +static int fm10k_mbx_request_irq_pf(struct fm10k_intfc *interface) +{ + struct msix_entry *entry = &interface->msix_entries[FM10K_MBX_VECTOR]; + struct net_device *dev = interface->netdev; + struct fm10k_hw *hw = &interface->hw; + int err; + + /* Use timer0 for interrupt moderation on the mailbox */ + u32 mbx_itr = FM10K_INT_MAP_TIMER0 | entry->entry; + u32 other_itr = FM10K_INT_MAP_IMMEDIATE | entry->entry; + + /* register mailbox handlers */ + err = hw->mbx.ops.register_handlers(&hw->mbx, pf_mbx_data); + if (err) + return err; + + /* request the IRQ */ + err = request_irq(entry->vector, fm10k_msix_mbx_pf, 0, + dev->name, interface); + if (err) { + netif_err(interface, probe, dev, + "request_irq for msix_mbx failed: %d\n", err); + return err; + } + + /* Enable interrupts w/ no moderation for "other" interrupts */ + fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), other_itr); + fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), other_itr); + fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_SRAM), other_itr); + fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_MaxHoldTime), other_itr); + fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_VFLR), other_itr); + + /* Enable interrupts w/ moderation for mailbox */ + fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_Mailbox), mbx_itr); + + /* Enable individual interrupt causes */ + fm10k_write_reg(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) | + FM10K_EIMR_ENABLE(FUM_FAULT) | + FM10K_EIMR_ENABLE(MAILBOX) | + FM10K_EIMR_ENABLE(SWITCHREADY) | + FM10K_EIMR_ENABLE(SWITCHNOTREADY) | + FM10K_EIMR_ENABLE(SRAMERROR) | + FM10K_EIMR_ENABLE(VFLR) | + FM10K_EIMR_ENABLE(MAXHOLDTIME)); + + /* enable interrupt */ + fm10k_write_reg(hw, FM10K_ITR(entry->entry), FM10K_ITR_ENABLE); + + return 0; +} + +int fm10k_mbx_request_irq(struct fm10k_intfc *interface) +{ + struct fm10k_hw *hw = &interface->hw; + int err; + + /* enable Mailbox cause */ + if (hw->mac.type == fm10k_mac_pf) + err = fm10k_mbx_request_irq_pf(interface); + else + err = fm10k_mbx_request_irq_vf(interface); + + /* connect mailbox */ + if (!err) + err = hw->mbx.ops.connect(hw, &hw->mbx); + + return err; +} + +/** + * fm10k_qv_free_irq - release interrupts associated with queue vectors + * @interface: board private structure + * + * Release all interrupts associated with this interface + **/ +void fm10k_qv_free_irq(struct fm10k_intfc *interface) +{ + int vector = interface->num_q_vectors; + struct fm10k_hw *hw = &interface->hw; + struct msix_entry *entry; + + entry = &interface->msix_entries[NON_Q_VECTORS(hw) + vector]; + + while (vector) { + struct fm10k_q_vector *q_vector; + + vector--; + entry--; + q_vector = interface->q_vector[vector]; + + if (!q_vector->tx.count && !q_vector->rx.count) + continue; + + /* disable interrupts */ + + writel(FM10K_ITR_MASK_SET, q_vector->itr); + + free_irq(entry->vector, q_vector); + } +} + +/** + * fm10k_qv_request_irq - initialize interrupts for queue vectors + * @interface: board private structure + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +int fm10k_qv_request_irq(struct fm10k_intfc *interface) +{ + struct net_device *dev = interface->netdev; + struct fm10k_hw *hw = &interface->hw; + struct msix_entry *entry; + int ri = 0, ti = 0; + int vector, err; + + entry = &interface->msix_entries[NON_Q_VECTORS(hw)]; + + for (vector = 0; vector < interface->num_q_vectors; vector++) { + struct fm10k_q_vector *q_vector = interface->q_vector[vector]; + + /* name the vector */ + if (q_vector->tx.count && q_vector->rx.count) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-TxRx-%d", dev->name, ri++); + ti++; + } else if (q_vector->rx.count) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-rx-%d", dev->name, ri++); + } else if (q_vector->tx.count) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-tx-%d", dev->name, ti++); + } else { + /* skip this unused q_vector */ + continue; + } + + /* Assign ITR register to q_vector */ + q_vector->itr = (hw->mac.type == fm10k_mac_pf) ? + &interface->uc_addr[FM10K_ITR(entry->entry)] : + &interface->uc_addr[FM10K_VFITR(entry->entry)]; + + /* request the IRQ */ + err = request_irq(entry->vector, &fm10k_msix_clean_rings, 0, + q_vector->name, q_vector); + if (err) { + netif_err(interface, probe, dev, + "request_irq failed for MSIX interrupt Error: %d\n", + err); + goto err_out; + } + + /* Enable q_vector */ + writel(FM10K_ITR_ENABLE, q_vector->itr); + + entry++; + } + + return 0; + +err_out: + /* wind through the ring freeing all entries and vectors */ + while (vector) { + struct fm10k_q_vector *q_vector; + + entry--; + vector--; + q_vector = interface->q_vector[vector]; + + if (!q_vector->tx.count && !q_vector->rx.count) + continue; + + /* disable interrupts */ + + writel(FM10K_ITR_MASK_SET, q_vector->itr); + + free_irq(entry->vector, q_vector); + } + + return err; +} + +void fm10k_up(struct fm10k_intfc *interface) +{ + struct fm10k_hw *hw = &interface->hw; + + /* Enable Tx/Rx DMA */ + hw->mac.ops.start_hw(hw); + + /* configure Tx descriptor rings */ + fm10k_configure_tx(interface); + + /* configure Rx descriptor rings */ + fm10k_configure_rx(interface); + + /* configure interrupts */ + hw->mac.ops.update_int_moderator(hw); + + /* clear down bit to indicate we are ready to go */ + clear_bit(__FM10K_DOWN, &interface->state); + + /* enable polling cleanups */ + fm10k_napi_enable_all(interface); + + /* re-establish Rx filters */ + fm10k_restore_rx_state(interface); + + /* enable transmits */ + netif_tx_start_all_queues(interface->netdev); + + /* kick off the service timer */ + mod_timer(&interface->service_timer, jiffies); +} + +static void fm10k_napi_disable_all(struct fm10k_intfc *interface) +{ + struct fm10k_q_vector *q_vector; + int q_idx; + + for (q_idx = 0; q_idx < interface->num_q_vectors; q_idx++) { + q_vector = interface->q_vector[q_idx]; + napi_disable(&q_vector->napi); + } +} + +void fm10k_down(struct fm10k_intfc *interface) +{ + struct net_device *netdev = interface->netdev; + struct fm10k_hw *hw = &interface->hw; + + /* signal that we are down to the interrupt handler and service task */ + set_bit(__FM10K_DOWN, &interface->state); + + /* call carrier off first to avoid false dev_watchdog timeouts */ + netif_carrier_off(netdev); + + /* disable transmits */ + netif_tx_stop_all_queues(netdev); + netif_tx_disable(netdev); + + /* reset Rx filters */ + fm10k_reset_rx_state(interface); + + /* allow 10ms for device to quiesce */ + usleep_range(10000, 20000); + + /* disable polling routines */ + fm10k_napi_disable_all(interface); + + del_timer_sync(&interface->service_timer); + + /* capture stats one last time before stopping interface */ + fm10k_update_stats(interface); + + /* Disable DMA engine for Tx/Rx */ + hw->mac.ops.stop_hw(hw); + + /* free any buffers still on the rings */ + fm10k_clean_all_tx_rings(interface); +} + +/** + * fm10k_sw_init - Initialize general software structures + * @interface: host interface private structure to initialize + * + * fm10k_sw_init initializes the interface private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static int fm10k_sw_init(struct fm10k_intfc *interface, + const struct pci_device_id *ent) +{ + static const u32 seed[FM10K_RSSRK_SIZE] = { 0xda565a6d, 0xc20e5b25, + 0x3d256741, 0xb08fa343, + 0xcb2bcad0, 0xb4307bae, + 0xa32dcb77, 0x0cf23080, + 0x3bb7426a, 0xfa01acbe }; + const struct fm10k_info *fi = fm10k_info_tbl[ent->driver_data]; + struct fm10k_hw *hw = &interface->hw; + struct pci_dev *pdev = interface->pdev; + struct net_device *netdev = interface->netdev; + unsigned int rss; + int err; + + /* initialize back pointer */ + hw->back = interface; + hw->hw_addr = interface->uc_addr; + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->revision_id = pdev->revision; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + + /* Setup hw api */ + memcpy(&hw->mac.ops, fi->mac_ops, sizeof(hw->mac.ops)); + hw->mac.type = fi->mac; + + /* Setup IOV handlers */ + if (fi->iov_ops) + memcpy(&hw->iov.ops, fi->iov_ops, sizeof(hw->iov.ops)); + + /* Set common capability flags and settings */ + rss = min_t(int, FM10K_MAX_RSS_INDICES, num_online_cpus()); + interface->ring_feature[RING_F_RSS].limit = rss; + fi->get_invariants(hw); + + /* pick up the PCIe bus settings for reporting later */ + if (hw->mac.ops.get_bus_info) + hw->mac.ops.get_bus_info(hw); + + /* limit the usable DMA range */ + if (hw->mac.ops.set_dma_mask) + hw->mac.ops.set_dma_mask(hw, dma_get_mask(&pdev->dev)); + + /* update netdev with DMA restrictions */ + if (dma_get_mask(&pdev->dev) > DMA_BIT_MASK(32)) { + netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features |= NETIF_F_HIGHDMA; + } + + /* delay any future reset requests */ + interface->last_reset = jiffies + (10 * HZ); + + /* reset and initialize the hardware so it is in a known state */ + err = hw->mac.ops.reset_hw(hw) ? : hw->mac.ops.init_hw(hw); + if (err) { + dev_err(&pdev->dev, "init_hw failed: %d\n", err); + return err; + } + + /* initialize hardware statistics */ + hw->mac.ops.update_hw_stats(hw, &interface->stats); + + /* Set upper limit on IOV VFs that can be allocated */ + pci_sriov_set_totalvfs(pdev, hw->iov.total_vfs); + + /* Start with random Ethernet address */ + eth_random_addr(hw->mac.addr); + + /* Initialize MAC address from hardware */ + err = hw->mac.ops.read_mac_addr(hw); + if (err) { + dev_warn(&pdev->dev, + "Failed to obtain MAC address defaulting to random\n"); + /* tag address assignment as random */ + netdev->addr_assign_type |= NET_ADDR_RANDOM; + } + + memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); + memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->perm_addr)) { + dev_err(&pdev->dev, "Invalid MAC Address\n"); + return -EIO; + } + + /* assign BAR 4 resources for use with PTP */ + if (fm10k_read_reg(hw, FM10K_CTRL) & FM10K_CTRL_BAR4_ALLOWED) + interface->sw_addr = ioremap(pci_resource_start(pdev, 4), + pci_resource_len(pdev, 4)); + hw->sw_addr = interface->sw_addr; + + /* Only the PF can support VXLAN and NVGRE offloads */ + if (hw->mac.type != fm10k_mac_pf) { + netdev->hw_enc_features = 0; + netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL; + netdev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL; + } + + /* initialize DCBNL interface */ + fm10k_dcbnl_set_ops(netdev); + + /* Initialize service timer and service task */ + set_bit(__FM10K_SERVICE_DISABLE, &interface->state); + setup_timer(&interface->service_timer, &fm10k_service_timer, + (unsigned long)interface); + INIT_WORK(&interface->service_task, fm10k_service_task); + + /* Intitialize timestamp data */ + fm10k_ts_init(interface); + + /* set default ring sizes */ + interface->tx_ring_count = FM10K_DEFAULT_TXD; + interface->rx_ring_count = FM10K_DEFAULT_RXD; + + /* set default interrupt moderation */ + interface->tx_itr = FM10K_ITR_10K; + interface->rx_itr = FM10K_ITR_ADAPTIVE | FM10K_ITR_20K; + + /* initialize vxlan_port list */ + INIT_LIST_HEAD(&interface->vxlan_port); + + /* initialize RSS key */ + memcpy(interface->rssrk, seed, sizeof(seed)); + + /* Start off interface as being down */ + set_bit(__FM10K_DOWN, &interface->state); + + return 0; +} + +static void fm10k_slot_warn(struct fm10k_intfc *interface) +{ + struct device *dev = &interface->pdev->dev; + struct fm10k_hw *hw = &interface->hw; + + if (hw->mac.ops.is_slot_appropriate(hw)) + return; + + dev_warn(dev, + "For optimal performance, a %s %s slot is recommended.\n", + (hw->bus_caps.width == fm10k_bus_width_pcie_x1 ? "x1" : + hw->bus_caps.width == fm10k_bus_width_pcie_x4 ? "x4" : + "x8"), + (hw->bus_caps.speed == fm10k_bus_speed_2500 ? "2.5GT/s" : + hw->bus_caps.speed == fm10k_bus_speed_5000 ? "5.0GT/s" : + "8.0GT/s")); + dev_warn(dev, + "A slot with more lanes and/or higher speed is suggested.\n"); +} + +/** + * fm10k_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in fm10k_pci_tbl + * + * Returns 0 on success, negative on failure + * + * fm10k_probe initializes an interface identified by a pci_dev structure. + * The OS initialization, configuring of the interface private structure, + * and a hardware reset occur. + **/ +static int fm10k_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct fm10k_intfc *interface; + struct fm10k_hw *hw; + int err; + u64 dma_mask; + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + /* By default fm10k only supports a 48 bit DMA mask */ + dma_mask = DMA_BIT_MASK(48) | dma_get_required_mask(&pdev->dev); + + if ((dma_mask <= DMA_BIT_MASK(32)) || + dma_set_mask_and_coherent(&pdev->dev, dma_mask)) { + dma_mask &= DMA_BIT_MASK(32); + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + err = dma_set_coherent_mask(&pdev->dev, + DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_dma; + } + } + } + + err = pci_request_selected_regions(pdev, + pci_select_bars(pdev, + IORESOURCE_MEM), + fm10k_driver_name); + if (err) { + dev_err(&pdev->dev, + "pci_request_selected_regions failed 0x%x\n", err); + goto err_pci_reg; + } + + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); + pci_save_state(pdev); + + netdev = fm10k_alloc_netdev(); + if (!netdev) { + err = -ENOMEM; + goto err_alloc_netdev; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + interface = netdev_priv(netdev); + pci_set_drvdata(pdev, interface); + + interface->netdev = netdev; + interface->pdev = pdev; + hw = &interface->hw; + + interface->uc_addr = ioremap(pci_resource_start(pdev, 0), + FM10K_UC_ADDR_SIZE); + if (!interface->uc_addr) { + err = -EIO; + goto err_ioremap; + } + + err = fm10k_sw_init(interface, ent); + if (err) + goto err_sw_init; + + /* enable debugfs support */ + fm10k_dbg_intfc_init(interface); + + err = fm10k_init_queueing_scheme(interface); + if (err) + goto err_sw_init; + + err = fm10k_mbx_request_irq(interface); + if (err) + goto err_mbx_interrupt; + + /* final check of hardware state before registering the interface */ + err = fm10k_hw_ready(interface); + if (err) + goto err_register; + + err = register_netdev(netdev); + if (err) + goto err_register; + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + + /* stop all the transmit queues from transmitting until link is up */ + netif_tx_stop_all_queues(netdev); + + /* Register PTP interface */ + fm10k_ptp_register(interface); + + /* print bus type/speed/width info */ + dev_info(&pdev->dev, "(PCI Express:%s Width: %s Payload: %s)\n", + (hw->bus.speed == fm10k_bus_speed_8000 ? "8.0GT/s" : + hw->bus.speed == fm10k_bus_speed_5000 ? "5.0GT/s" : + hw->bus.speed == fm10k_bus_speed_2500 ? "2.5GT/s" : + "Unknown"), + (hw->bus.width == fm10k_bus_width_pcie_x8 ? "x8" : + hw->bus.width == fm10k_bus_width_pcie_x4 ? "x4" : + hw->bus.width == fm10k_bus_width_pcie_x1 ? "x1" : + "Unknown"), + (hw->bus.payload == fm10k_bus_payload_128 ? "128B" : + hw->bus.payload == fm10k_bus_payload_256 ? "256B" : + hw->bus.payload == fm10k_bus_payload_512 ? "512B" : + "Unknown")); + + /* print warning for non-optimal configurations */ + fm10k_slot_warn(interface); + + /* enable SR-IOV after registering netdev to enforce PF/VF ordering */ + fm10k_iov_configure(pdev, 0); + + /* clear the service task disable bit to allow service task to start */ + clear_bit(__FM10K_SERVICE_DISABLE, &interface->state); + + return 0; + +err_register: + fm10k_mbx_free_irq(interface); +err_mbx_interrupt: + fm10k_clear_queueing_scheme(interface); +err_sw_init: + if (interface->sw_addr) + iounmap(interface->sw_addr); + iounmap(interface->uc_addr); +err_ioremap: + free_netdev(netdev); +err_alloc_netdev: + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * fm10k_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * fm10k_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void fm10k_remove(struct pci_dev *pdev) +{ + struct fm10k_intfc *interface = pci_get_drvdata(pdev); + struct net_device *netdev = interface->netdev; + + set_bit(__FM10K_SERVICE_DISABLE, &interface->state); + cancel_work_sync(&interface->service_task); + + /* free netdev, this may bounce the interrupts due to setup_tc */ + if (netdev->reg_state == NETREG_REGISTERED) + unregister_netdev(netdev); + + /* cleanup timestamp handling */ + fm10k_ptp_unregister(interface); + + /* release VFs */ + fm10k_iov_disable(pdev); + + /* disable mailbox interrupt */ + fm10k_mbx_free_irq(interface); + + /* free interrupts */ + fm10k_clear_queueing_scheme(interface); + + /* remove any debugfs interfaces */ + fm10k_dbg_intfc_exit(interface); + + if (interface->sw_addr) + iounmap(interface->sw_addr); + iounmap(interface->uc_addr); + + free_netdev(netdev); + + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); + + pci_disable_pcie_error_reporting(pdev); + + pci_disable_device(pdev); +} + +#ifdef CONFIG_PM +/** + * fm10k_resume - Restore device to pre-sleep state + * @pdev: PCI device information struct + * + * fm10k_resume is called after the system has powered back up from a sleep + * state and is ready to resume operation. This function is meant to restore + * the device back to its pre-sleep state. + **/ +static int fm10k_resume(struct pci_dev *pdev) +{ + struct fm10k_intfc *interface = pci_get_drvdata(pdev); + struct net_device *netdev = interface->netdev; + struct fm10k_hw *hw = &interface->hw; + u32 err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + /* pci_restore_state clears dev->state_saved so call + * pci_save_state to restore it. + */ + pci_save_state(pdev); + + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); + return err; + } + pci_set_master(pdev); + + pci_wake_from_d3(pdev, false); + + /* refresh hw_addr in case it was dropped */ + hw->hw_addr = interface->uc_addr; + + /* reset hardware to known state */ + err = hw->mac.ops.init_hw(&interface->hw); + if (err) + return err; + + /* reset statistics starting values */ + hw->mac.ops.rebind_hw_stats(hw, &interface->stats); + + /* reset clock */ + fm10k_ts_reset(interface); + + rtnl_lock(); + + err = fm10k_init_queueing_scheme(interface); + if (!err) { + fm10k_mbx_request_irq(interface); + if (netif_running(netdev)) + err = fm10k_open(netdev); + } + + rtnl_unlock(); + + if (err) + return err; + + /* restore SR-IOV interface */ + fm10k_iov_resume(pdev); + + netif_device_attach(netdev); + + return 0; +} + +/** + * fm10k_suspend - Prepare the device for a system sleep state + * @pdev: PCI device information struct + * + * fm10k_suspend is meant to shutdown the device prior to the system entering + * a sleep state. The fm10k hardware does not support wake on lan so the + * driver simply needs to shut down the device so it is in a low power state. + **/ +static int fm10k_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct fm10k_intfc *interface = pci_get_drvdata(pdev); + struct net_device *netdev = interface->netdev; + int err = 0; + + netif_device_detach(netdev); + + fm10k_iov_suspend(pdev); + + rtnl_lock(); + + if (netif_running(netdev)) + fm10k_close(netdev); + + fm10k_mbx_free_irq(interface); + + fm10k_clear_queueing_scheme(interface); + + rtnl_unlock(); + + err = pci_save_state(pdev); + if (err) + return err; + + pci_disable_device(pdev); + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + + return 0; +} + +#endif /* CONFIG_PM */ +/** + * fm10k_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct fm10k_intfc *interface = pci_get_drvdata(pdev); + struct net_device *netdev = interface->netdev; + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + fm10k_close(netdev); + + fm10k_mbx_free_irq(interface); + + pci_disable_device(pdev); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * fm10k_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. + */ +static pci_ers_result_t fm10k_io_slot_reset(struct pci_dev *pdev) +{ + struct fm10k_intfc *interface = pci_get_drvdata(pdev); + pci_ers_result_t result; + + if (pci_enable_device_mem(pdev)) { + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset.\n"); + result = PCI_ERS_RESULT_DISCONNECT; + } else { + pci_set_master(pdev); + pci_restore_state(pdev); + + /* After second error pci->state_saved is false, this + * resets it so EEH doesn't break. + */ + pci_save_state(pdev); + + pci_wake_from_d3(pdev, false); + + /* refresh hw_addr in case it was dropped */ + interface->hw.hw_addr = interface->uc_addr; + + interface->flags |= FM10K_FLAG_RESET_REQUESTED; + fm10k_service_event_schedule(interface); + + result = PCI_ERS_RESULT_RECOVERED; + } + + pci_cleanup_aer_uncorrect_error_status(pdev); + + return result; +} + +/** + * fm10k_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. + */ +static void fm10k_io_resume(struct pci_dev *pdev) +{ + struct fm10k_intfc *interface = pci_get_drvdata(pdev); + struct net_device *netdev = interface->netdev; + struct fm10k_hw *hw = &interface->hw; + int err = 0; + + /* reset hardware to known state */ + hw->mac.ops.init_hw(&interface->hw); + + /* reset statistics starting values */ + hw->mac.ops.rebind_hw_stats(hw, &interface->stats); + + /* reassociate interrupts */ + fm10k_mbx_request_irq(interface); + + /* reset clock */ + fm10k_ts_reset(interface); + + if (netif_running(netdev)) + err = fm10k_open(netdev); + + /* final check of hardware state before registering the interface */ + err = err ? : fm10k_hw_ready(interface); + + if (!err) + netif_device_attach(netdev); +} + +static const struct pci_error_handlers fm10k_err_handler = { + .error_detected = fm10k_io_error_detected, + .slot_reset = fm10k_io_slot_reset, + .resume = fm10k_io_resume, +}; + +static struct pci_driver fm10k_driver = { + .name = fm10k_driver_name, + .id_table = fm10k_pci_tbl, + .probe = fm10k_probe, + .remove = fm10k_remove, +#ifdef CONFIG_PM + .suspend = fm10k_suspend, + .resume = fm10k_resume, +#endif + .sriov_configure = fm10k_iov_configure, + .err_handler = &fm10k_err_handler +}; + +/** + * fm10k_register_pci_driver - register driver interface + * + * This funciton is called on module load in order to register the driver. + **/ +int fm10k_register_pci_driver(void) +{ + return pci_register_driver(&fm10k_driver); +} + +/** + * fm10k_unregister_pci_driver - unregister driver interface + * + * This funciton is called on module unload in order to remove the driver. + **/ +void fm10k_unregister_pci_driver(void) +{ + pci_unregister_driver(&fm10k_driver); +} diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c new file mode 100644 index 0000000000000000000000000000000000000000..275423d4f77778911b71145dfd165de6dcb8d2ca --- /dev/null +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -0,0 +1,1880 @@ +/* Intel Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include "fm10k_pf.h" +#include "fm10k_vf.h" + +/** + * fm10k_reset_hw_pf - PF hardware reset + * @hw: pointer to hardware structure + * + * This function should return the hardware to a state similar to the + * one it is in after being powered on. + **/ +static s32 fm10k_reset_hw_pf(struct fm10k_hw *hw) +{ + s32 err; + u32 reg; + u16 i; + + /* Disable interrupts */ + fm10k_write_reg(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(ALL)); + + /* Lock ITR2 reg 0 into itself and disable interrupt moderation */ + fm10k_write_reg(hw, FM10K_ITR2(0), 0); + fm10k_write_reg(hw, FM10K_INT_CTRL, 0); + + /* We assume here Tx and Rx queue 0 are owned by the PF */ + + /* Shut off VF access to their queues forcing them to queue 0 */ + for (i = 0; i < FM10K_TQMAP_TABLE_SIZE; i++) { + fm10k_write_reg(hw, FM10K_TQMAP(i), 0); + fm10k_write_reg(hw, FM10K_RQMAP(i), 0); + } + + /* shut down all rings */ + err = fm10k_disable_queues_generic(hw, FM10K_MAX_QUEUES); + if (err) + return err; + + /* Verify that DMA is no longer active */ + reg = fm10k_read_reg(hw, FM10K_DMA_CTRL); + if (reg & (FM10K_DMA_CTRL_TX_ACTIVE | FM10K_DMA_CTRL_RX_ACTIVE)) + return FM10K_ERR_DMA_PENDING; + + /* Inititate data path reset */ + reg |= FM10K_DMA_CTRL_DATAPATH_RESET; + fm10k_write_reg(hw, FM10K_DMA_CTRL, reg); + + /* Flush write and allow 100us for reset to complete */ + fm10k_write_flush(hw); + udelay(FM10K_RESET_TIMEOUT); + + /* Verify we made it out of reset */ + reg = fm10k_read_reg(hw, FM10K_IP); + if (!(reg & FM10K_IP_NOTINRESET)) + err = FM10K_ERR_RESET_FAILED; + + return err; +} + +/** + * fm10k_is_ari_hierarchy_pf - Indicate ARI hierarchy support + * @hw: pointer to hardware structure + * + * Looks at the ARI hierarchy bit to determine whether ARI is supported or not. + **/ +static bool fm10k_is_ari_hierarchy_pf(struct fm10k_hw *hw) +{ + u16 sriov_ctrl = fm10k_read_pci_cfg_word(hw, FM10K_PCIE_SRIOV_CTRL); + + return !!(sriov_ctrl & FM10K_PCIE_SRIOV_CTRL_VFARI); +} + +/** + * fm10k_init_hw_pf - PF hardware initialization + * @hw: pointer to hardware structure + * + **/ +static s32 fm10k_init_hw_pf(struct fm10k_hw *hw) +{ + u32 dma_ctrl, txqctl; + u16 i; + + /* Establish default VSI as valid */ + fm10k_write_reg(hw, FM10K_DGLORTDEC(fm10k_dglort_default), 0); + fm10k_write_reg(hw, FM10K_DGLORTMAP(fm10k_dglort_default), + FM10K_DGLORTMAP_ANY); + + /* Invalidate all other GLORT entries */ + for (i = 1; i < FM10K_DGLORT_COUNT; i++) + fm10k_write_reg(hw, FM10K_DGLORTMAP(i), FM10K_DGLORTMAP_NONE); + + /* reset ITR2(0) to point to itself */ + fm10k_write_reg(hw, FM10K_ITR2(0), 0); + + /* reset VF ITR2(0) to point to 0 avoid PF registers */ + fm10k_write_reg(hw, FM10K_ITR2(FM10K_ITR_REG_COUNT_PF), 0); + + /* loop through all PF ITR2 registers pointing them to the previous */ + for (i = 1; i < FM10K_ITR_REG_COUNT_PF; i++) + fm10k_write_reg(hw, FM10K_ITR2(i), i - 1); + + /* Enable interrupt moderator if not already enabled */ + fm10k_write_reg(hw, FM10K_INT_CTRL, FM10K_INT_CTRL_ENABLEMODERATOR); + + /* compute the default txqctl configuration */ + txqctl = FM10K_TXQCTL_PF | FM10K_TXQCTL_UNLIMITED_BW | + (hw->mac.default_vid << FM10K_TXQCTL_VID_SHIFT); + + for (i = 0; i < FM10K_MAX_QUEUES; i++) { + /* configure rings for 256 Queue / 32 Descriptor cache mode */ + fm10k_write_reg(hw, FM10K_TQDLOC(i), + (i * FM10K_TQDLOC_BASE_32_DESC) | + FM10K_TQDLOC_SIZE_32_DESC); + fm10k_write_reg(hw, FM10K_TXQCTL(i), txqctl); + + /* configure rings to provide TPH processing hints */ + fm10k_write_reg(hw, FM10K_TPH_TXCTRL(i), + FM10K_TPH_TXCTRL_DESC_TPHEN | + FM10K_TPH_TXCTRL_DESC_RROEN | + FM10K_TPH_TXCTRL_DESC_WROEN | + FM10K_TPH_TXCTRL_DATA_RROEN); + fm10k_write_reg(hw, FM10K_TPH_RXCTRL(i), + FM10K_TPH_RXCTRL_DESC_TPHEN | + FM10K_TPH_RXCTRL_DESC_RROEN | + FM10K_TPH_RXCTRL_DATA_WROEN | + FM10K_TPH_RXCTRL_HDR_WROEN); + } + + /* set max hold interval to align with 1.024 usec in all modes */ + switch (hw->bus.speed) { + case fm10k_bus_speed_2500: + dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN1; + break; + case fm10k_bus_speed_5000: + dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN2; + break; + case fm10k_bus_speed_8000: + dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN3; + break; + default: + dma_ctrl = 0; + break; + } + + /* Configure TSO flags */ + fm10k_write_reg(hw, FM10K_DTXTCPFLGL, FM10K_TSO_FLAGS_LOW); + fm10k_write_reg(hw, FM10K_DTXTCPFLGH, FM10K_TSO_FLAGS_HI); + + /* Enable DMA engine + * Set Rx Descriptor size to 32 + * Set Minimum MSS to 64 + * Set Maximum number of Rx queues to 256 / 32 Descriptor + */ + dma_ctrl |= FM10K_DMA_CTRL_TX_ENABLE | FM10K_DMA_CTRL_RX_ENABLE | + FM10K_DMA_CTRL_RX_DESC_SIZE | FM10K_DMA_CTRL_MINMSS_64 | + FM10K_DMA_CTRL_32_DESC; + + fm10k_write_reg(hw, FM10K_DMA_CTRL, dma_ctrl); + + /* record maximum queue count, we limit ourselves to 128 */ + hw->mac.max_queues = FM10K_MAX_QUEUES_PF; + + /* We support either 64 VFs or 7 VFs depending on if we have ARI */ + hw->iov.total_vfs = fm10k_is_ari_hierarchy_pf(hw) ? 64 : 7; + + return 0; +} + +/** + * fm10k_is_slot_appropriate_pf - Indicate appropriate slot for this SKU + * @hw: pointer to hardware structure + * + * Looks at the PCIe bus info to confirm whether or not this slot can support + * the necessary bandwidth for this device. + **/ +static bool fm10k_is_slot_appropriate_pf(struct fm10k_hw *hw) +{ + return (hw->bus.speed == hw->bus_caps.speed) && + (hw->bus.width == hw->bus_caps.width); +} + +/** + * fm10k_update_vlan_pf - Update status of VLAN ID in VLAN filter table + * @hw: pointer to hardware structure + * @vid: VLAN ID to add to table + * @vsi: Index indicating VF ID or PF ID in table + * @set: Indicates if this is a set or clear operation + * + * This function adds or removes the corresponding VLAN ID from the VLAN + * filter table for the corresponding function. In addition to the + * standard set/clear that supports one bit a multi-bit write is + * supported to set 64 bits at a time. + **/ +static s32 fm10k_update_vlan_pf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set) +{ + u32 vlan_table, reg, mask, bit, len; + + /* verify the VSI index is valid */ + if (vsi > FM10K_VLAN_TABLE_VSI_MAX) + return FM10K_ERR_PARAM; + + /* VLAN multi-bit write: + * The multi-bit write has several parts to it. + * 3 2 1 0 + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | RSVD0 | Length |C|RSVD0| VLAN ID | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * VLAN ID: Vlan Starting value + * RSVD0: Reserved section, must be 0 + * C: Flag field, 0 is set, 1 is clear (Used in VF VLAN message) + * Length: Number of times to repeat the bit being set + */ + len = vid >> 16; + vid = (vid << 17) >> 17; + + /* verify the reserved 0 fields are 0 */ + if (len >= FM10K_VLAN_TABLE_VID_MAX || + vid >= FM10K_VLAN_TABLE_VID_MAX) + return FM10K_ERR_PARAM; + + /* Loop through the table updating all required VLANs */ + for (reg = FM10K_VLAN_TABLE(vsi, vid / 32), bit = vid % 32; + len < FM10K_VLAN_TABLE_VID_MAX; + len -= 32 - bit, reg++, bit = 0) { + /* record the initial state of the register */ + vlan_table = fm10k_read_reg(hw, reg); + + /* truncate mask if we are at the start or end of the run */ + mask = (~(u32)0 >> ((len < 31) ? 31 - len : 0)) << bit; + + /* make necessary modifications to the register */ + mask &= set ? ~vlan_table : vlan_table; + if (mask) + fm10k_write_reg(hw, reg, vlan_table ^ mask); + } + + return 0; +} + +/** + * fm10k_read_mac_addr_pf - Read device MAC address + * @hw: pointer to the HW structure + * + * Reads the device MAC address from the SM_AREA and stores the value. + **/ +static s32 fm10k_read_mac_addr_pf(struct fm10k_hw *hw) +{ + u8 perm_addr[ETH_ALEN]; + u32 serial_num; + int i; + + serial_num = fm10k_read_reg(hw, FM10K_SM_AREA(1)); + + /* last byte should be all 1's */ + if ((~serial_num) << 24) + return FM10K_ERR_INVALID_MAC_ADDR; + + perm_addr[0] = (u8)(serial_num >> 24); + perm_addr[1] = (u8)(serial_num >> 16); + perm_addr[2] = (u8)(serial_num >> 8); + + serial_num = fm10k_read_reg(hw, FM10K_SM_AREA(0)); + + /* first byte should be all 1's */ + if ((~serial_num) >> 24) + return FM10K_ERR_INVALID_MAC_ADDR; + + perm_addr[3] = (u8)(serial_num >> 16); + perm_addr[4] = (u8)(serial_num >> 8); + perm_addr[5] = (u8)(serial_num); + + for (i = 0; i < ETH_ALEN; i++) { + hw->mac.perm_addr[i] = perm_addr[i]; + hw->mac.addr[i] = perm_addr[i]; + } + + return 0; +} + +/** + * fm10k_glort_valid_pf - Validate that the provided glort is valid + * @hw: pointer to the HW structure + * @glort: base glort to be validated + * + * This function will return an error if the provided glort is invalid + **/ +bool fm10k_glort_valid_pf(struct fm10k_hw *hw, u16 glort) +{ + glort &= hw->mac.dglort_map >> FM10K_DGLORTMAP_MASK_SHIFT; + + return glort == (hw->mac.dglort_map & FM10K_DGLORTMAP_NONE); +} + +/** + * fm10k_update_uc_addr_pf - Update device unicast addresss + * @hw: pointer to the HW structure + * @glort: base resource tag for this request + * @mac: MAC address to add/remove from table + * @vid: VLAN ID to add/remove from table + * @add: Indicates if this is an add or remove operation + * @flags: flags field to indicate add and secure + * + * This function generates a message to the Switch API requesting + * that the given logical port add/remove the given L2 MAC/VLAN address. + **/ +static s32 fm10k_update_xc_addr_pf(struct fm10k_hw *hw, u16 glort, + const u8 *mac, u16 vid, bool add, u8 flags) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + struct fm10k_mac_update mac_update; + u32 msg[5]; + + /* if glort is not valid return error */ + if (!fm10k_glort_valid_pf(hw, glort)) + return FM10K_ERR_PARAM; + + /* drop upper 4 bits of VLAN ID */ + vid = (vid << 4) >> 4; + + /* record fields */ + mac_update.mac_lower = cpu_to_le32(((u32)mac[2] << 24) | + ((u32)mac[3] << 16) | + ((u32)mac[4] << 8) | + ((u32)mac[5])); + mac_update.mac_upper = cpu_to_le16(((u32)mac[0] << 8) | + ((u32)mac[1])); + mac_update.vlan = cpu_to_le16(vid); + mac_update.glort = cpu_to_le16(glort); + mac_update.action = add ? 0 : 1; + mac_update.flags = flags; + + /* populate mac_update fields */ + fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_UPDATE_MAC_FWD_RULE); + fm10k_tlv_attr_put_le_struct(msg, FM10K_PF_ATTR_ID_MAC_UPDATE, + &mac_update, sizeof(mac_update)); + + /* load onto outgoing mailbox */ + return mbx->ops.enqueue_tx(hw, mbx, msg); +} + +/** + * fm10k_update_uc_addr_pf - Update device unicast addresss + * @hw: pointer to the HW structure + * @glort: base resource tag for this request + * @mac: MAC address to add/remove from table + * @vid: VLAN ID to add/remove from table + * @add: Indicates if this is an add or remove operation + * @flags: flags field to indicate add and secure + * + * This function is used to add or remove unicast addresses for + * the PF. + **/ +static s32 fm10k_update_uc_addr_pf(struct fm10k_hw *hw, u16 glort, + const u8 *mac, u16 vid, bool add, u8 flags) +{ + /* verify MAC address is valid */ + if (!is_valid_ether_addr(mac)) + return FM10K_ERR_PARAM; + + return fm10k_update_xc_addr_pf(hw, glort, mac, vid, add, flags); +} + +/** + * fm10k_update_mc_addr_pf - Update device multicast addresses + * @hw: pointer to the HW structure + * @glort: base resource tag for this request + * @mac: MAC address to add/remove from table + * @vid: VLAN ID to add/remove from table + * @add: Indicates if this is an add or remove operation + * + * This function is used to add or remove multicast MAC addresses for + * the PF. + **/ +static s32 fm10k_update_mc_addr_pf(struct fm10k_hw *hw, u16 glort, + const u8 *mac, u16 vid, bool add) +{ + /* verify multicast address is valid */ + if (!is_multicast_ether_addr(mac)) + return FM10K_ERR_PARAM; + + return fm10k_update_xc_addr_pf(hw, glort, mac, vid, add, 0); +} + +/** + * fm10k_update_xcast_mode_pf - Request update of multicast mode + * @hw: pointer to hardware structure + * @glort: base resource tag for this request + * @mode: integer value indicating mode being requested + * + * This function will attempt to request a higher mode for the port + * so that it can enable either multicast, multicast promiscuous, or + * promiscuous mode of operation. + **/ +static s32 fm10k_update_xcast_mode_pf(struct fm10k_hw *hw, u16 glort, u8 mode) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + u32 msg[3], xcast_mode; + + if (mode > FM10K_XCAST_MODE_NONE) + return FM10K_ERR_PARAM; + /* if glort is not valid return error */ + if (!fm10k_glort_valid_pf(hw, glort)) + return FM10K_ERR_PARAM; + + /* write xcast mode as a single u32 value, + * lower 16 bits: glort + * upper 16 bits: mode + */ + xcast_mode = ((u32)mode << 16) | glort; + + /* generate message requesting to change xcast mode */ + fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_XCAST_MODES); + fm10k_tlv_attr_put_u32(msg, FM10K_PF_ATTR_ID_XCAST_MODE, xcast_mode); + + /* load onto outgoing mailbox */ + return mbx->ops.enqueue_tx(hw, mbx, msg); +} + +/** + * fm10k_update_int_moderator_pf - Update interrupt moderator linked list + * @hw: pointer to hardware structure + * + * This function walks through the MSI-X vector table to determine the + * number of active interrupts and based on that information updates the + * interrupt moderator linked list. + **/ +static void fm10k_update_int_moderator_pf(struct fm10k_hw *hw) +{ + u32 i; + + /* Disable interrupt moderator */ + fm10k_write_reg(hw, FM10K_INT_CTRL, 0); + + /* loop through PF from last to first looking enabled vectors */ + for (i = FM10K_ITR_REG_COUNT_PF - 1; i; i--) { + if (!fm10k_read_reg(hw, FM10K_MSIX_VECTOR_MASK(i))) + break; + } + + /* always reset VFITR2[0] to point to last enabled PF vector*/ + fm10k_write_reg(hw, FM10K_ITR2(FM10K_ITR_REG_COUNT_PF), i); + + /* reset ITR2[0] to point to last enabled PF vector */ + if (!hw->iov.num_vfs) + fm10k_write_reg(hw, FM10K_ITR2(0), i); + + /* Enable interrupt moderator */ + fm10k_write_reg(hw, FM10K_INT_CTRL, FM10K_INT_CTRL_ENABLEMODERATOR); +} + +/** + * fm10k_update_lport_state_pf - Notify the switch of a change in port state + * @hw: pointer to the HW structure + * @glort: base resource tag for this request + * @count: number of logical ports being updated + * @enable: boolean value indicating enable or disable + * + * This function is used to add/remove a logical port from the switch. + **/ +static s32 fm10k_update_lport_state_pf(struct fm10k_hw *hw, u16 glort, + u16 count, bool enable) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + u32 msg[3], lport_msg; + + /* do nothing if we are being asked to create or destroy 0 ports */ + if (!count) + return 0; + + /* if glort is not valid return error */ + if (!fm10k_glort_valid_pf(hw, glort)) + return FM10K_ERR_PARAM; + + /* construct the lport message from the 2 pieces of data we have */ + lport_msg = ((u32)count << 16) | glort; + + /* generate lport create/delete message */ + fm10k_tlv_msg_init(msg, enable ? FM10K_PF_MSG_ID_LPORT_CREATE : + FM10K_PF_MSG_ID_LPORT_DELETE); + fm10k_tlv_attr_put_u32(msg, FM10K_PF_ATTR_ID_PORT, lport_msg); + + /* load onto outgoing mailbox */ + return mbx->ops.enqueue_tx(hw, mbx, msg); +} + +/** + * fm10k_configure_dglort_map_pf - Configures GLORT entry and queues + * @hw: pointer to hardware structure + * @dglort: pointer to dglort configuration structure + * + * Reads the configuration structure contained in dglort_cfg and uses + * that information to then populate a DGLORTMAP/DEC entry and the queues + * to which it has been assigned. + **/ +static s32 fm10k_configure_dglort_map_pf(struct fm10k_hw *hw, + struct fm10k_dglort_cfg *dglort) +{ + u16 glort, queue_count, vsi_count, pc_count; + u16 vsi, queue, pc, q_idx; + u32 txqctl, dglortdec, dglortmap; + + /* verify the dglort pointer */ + if (!dglort) + return FM10K_ERR_PARAM; + + /* verify the dglort values */ + if ((dglort->idx > 7) || (dglort->rss_l > 7) || (dglort->pc_l > 3) || + (dglort->vsi_l > 6) || (dglort->vsi_b > 64) || + (dglort->queue_l > 8) || (dglort->queue_b >= 256)) + return FM10K_ERR_PARAM; + + /* determine count of VSIs and queues */ + queue_count = 1 << (dglort->rss_l + dglort->pc_l); + vsi_count = 1 << (dglort->vsi_l + dglort->queue_l); + glort = dglort->glort; + q_idx = dglort->queue_b; + + /* configure SGLORT for queues */ + for (vsi = 0; vsi < vsi_count; vsi++, glort++) { + for (queue = 0; queue < queue_count; queue++, q_idx++) { + if (q_idx >= FM10K_MAX_QUEUES) + break; + + fm10k_write_reg(hw, FM10K_TX_SGLORT(q_idx), glort); + fm10k_write_reg(hw, FM10K_RX_SGLORT(q_idx), glort); + } + } + + /* determine count of PCs and queues */ + queue_count = 1 << (dglort->queue_l + dglort->rss_l + dglort->vsi_l); + pc_count = 1 << dglort->pc_l; + + /* configure PC for Tx queues */ + for (pc = 0; pc < pc_count; pc++) { + q_idx = pc + dglort->queue_b; + for (queue = 0; queue < queue_count; queue++) { + if (q_idx >= FM10K_MAX_QUEUES) + break; + + txqctl = fm10k_read_reg(hw, FM10K_TXQCTL(q_idx)); + txqctl &= ~FM10K_TXQCTL_PC_MASK; + txqctl |= pc << FM10K_TXQCTL_PC_SHIFT; + fm10k_write_reg(hw, FM10K_TXQCTL(q_idx), txqctl); + + q_idx += pc_count; + } + } + + /* configure DGLORTDEC */ + dglortdec = ((u32)(dglort->rss_l) << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) | + ((u32)(dglort->queue_b) << FM10K_DGLORTDEC_QBASE_SHIFT) | + ((u32)(dglort->pc_l) << FM10K_DGLORTDEC_PCLENGTH_SHIFT) | + ((u32)(dglort->vsi_b) << FM10K_DGLORTDEC_VSIBASE_SHIFT) | + ((u32)(dglort->vsi_l) << FM10K_DGLORTDEC_VSILENGTH_SHIFT) | + ((u32)(dglort->queue_l)); + if (dglort->inner_rss) + dglortdec |= FM10K_DGLORTDEC_INNERRSS_ENABLE; + + /* configure DGLORTMAP */ + dglortmap = (dglort->idx == fm10k_dglort_default) ? + FM10K_DGLORTMAP_ANY : FM10K_DGLORTMAP_ZERO; + dglortmap <<= dglort->vsi_l + dglort->queue_l + dglort->shared_l; + dglortmap |= dglort->glort; + + /* write values to hardware */ + fm10k_write_reg(hw, FM10K_DGLORTDEC(dglort->idx), dglortdec); + fm10k_write_reg(hw, FM10K_DGLORTMAP(dglort->idx), dglortmap); + + return 0; +} + +u16 fm10k_queues_per_pool(struct fm10k_hw *hw) +{ + u16 num_pools = hw->iov.num_pools; + + return (num_pools > 32) ? 2 : (num_pools > 16) ? 4 : (num_pools > 8) ? + 8 : FM10K_MAX_QUEUES_POOL; +} + +u16 fm10k_vf_queue_index(struct fm10k_hw *hw, u16 vf_idx) +{ + u16 num_vfs = hw->iov.num_vfs; + u16 vf_q_idx = FM10K_MAX_QUEUES; + + vf_q_idx -= fm10k_queues_per_pool(hw) * (num_vfs - vf_idx); + + return vf_q_idx; +} + +static u16 fm10k_vectors_per_pool(struct fm10k_hw *hw) +{ + u16 num_pools = hw->iov.num_pools; + + return (num_pools > 32) ? 8 : (num_pools > 16) ? 16 : + FM10K_MAX_VECTORS_POOL; +} + +static u16 fm10k_vf_vector_index(struct fm10k_hw *hw, u16 vf_idx) +{ + u16 vf_v_idx = FM10K_MAX_VECTORS_PF; + + vf_v_idx += fm10k_vectors_per_pool(hw) * vf_idx; + + return vf_v_idx; +} + +/** + * fm10k_iov_assign_resources_pf - Assign pool resources for virtualization + * @hw: pointer to the HW structure + * @num_vfs: number of VFs to be allocated + * @num_pools: number of virtualization pools to be allocated + * + * Allocates queues and traffic classes to virtualization entities to prepare + * the PF for SR-IOV and VMDq + **/ +static s32 fm10k_iov_assign_resources_pf(struct fm10k_hw *hw, u16 num_vfs, + u16 num_pools) +{ + u16 qmap_stride, qpp, vpp, vf_q_idx, vf_q_idx0, qmap_idx; + u32 vid = hw->mac.default_vid << FM10K_TXQCTL_VID_SHIFT; + int i, j; + + /* hardware only supports up to 64 pools */ + if (num_pools > 64) + return FM10K_ERR_PARAM; + + /* the number of VFs cannot exceed the number of pools */ + if ((num_vfs > num_pools) || (num_vfs > hw->iov.total_vfs)) + return FM10K_ERR_PARAM; + + /* record number of virtualization entities */ + hw->iov.num_vfs = num_vfs; + hw->iov.num_pools = num_pools; + + /* determine qmap offsets and counts */ + qmap_stride = (num_vfs > 8) ? 32 : 256; + qpp = fm10k_queues_per_pool(hw); + vpp = fm10k_vectors_per_pool(hw); + + /* calculate starting index for queues */ + vf_q_idx = fm10k_vf_queue_index(hw, 0); + qmap_idx = 0; + + /* establish TCs with -1 credits and no quanta to prevent transmit */ + for (i = 0; i < num_vfs; i++) { + fm10k_write_reg(hw, FM10K_TC_MAXCREDIT(i), 0); + fm10k_write_reg(hw, FM10K_TC_RATE(i), 0); + fm10k_write_reg(hw, FM10K_TC_CREDIT(i), + FM10K_TC_CREDIT_CREDIT_MASK); + } + + /* zero out all mbmem registers */ + for (i = FM10K_VFMBMEM_LEN * num_vfs; i--;) + fm10k_write_reg(hw, FM10K_MBMEM(i), 0); + + /* clear event notification of VF FLR */ + fm10k_write_reg(hw, FM10K_PFVFLREC(0), ~0); + fm10k_write_reg(hw, FM10K_PFVFLREC(1), ~0); + + /* loop through unallocated rings assigning them back to PF */ + for (i = FM10K_MAX_QUEUES_PF; i < vf_q_idx; i++) { + fm10k_write_reg(hw, FM10K_TXDCTL(i), 0); + fm10k_write_reg(hw, FM10K_TXQCTL(i), FM10K_TXQCTL_PF | vid); + fm10k_write_reg(hw, FM10K_RXQCTL(i), FM10K_RXQCTL_PF); + } + + /* PF should have already updated VFITR2[0] */ + + /* update all ITR registers to flow to VFITR2[0] */ + for (i = FM10K_ITR_REG_COUNT_PF + 1; i < FM10K_ITR_REG_COUNT; i++) { + if (!(i & (vpp - 1))) + fm10k_write_reg(hw, FM10K_ITR2(i), i - vpp); + else + fm10k_write_reg(hw, FM10K_ITR2(i), i - 1); + } + + /* update PF ITR2[0] to reference the last vector */ + fm10k_write_reg(hw, FM10K_ITR2(0), + fm10k_vf_vector_index(hw, num_vfs - 1)); + + /* loop through rings populating rings and TCs */ + for (i = 0; i < num_vfs; i++) { + /* record index for VF queue 0 for use in end of loop */ + vf_q_idx0 = vf_q_idx; + + for (j = 0; j < qpp; j++, qmap_idx++, vf_q_idx++) { + /* assign VF and locked TC to queues */ + fm10k_write_reg(hw, FM10K_TXDCTL(vf_q_idx), 0); + fm10k_write_reg(hw, FM10K_TXQCTL(vf_q_idx), + (i << FM10K_TXQCTL_TC_SHIFT) | i | + FM10K_TXQCTL_VF | vid); + fm10k_write_reg(hw, FM10K_RXDCTL(vf_q_idx), + FM10K_RXDCTL_WRITE_BACK_MIN_DELAY | + FM10K_RXDCTL_DROP_ON_EMPTY); + fm10k_write_reg(hw, FM10K_RXQCTL(vf_q_idx), + FM10K_RXQCTL_VF | + (i << FM10K_RXQCTL_VF_SHIFT)); + + /* map queue pair to VF */ + fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), vf_q_idx); + fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx), vf_q_idx); + } + + /* repeat the first ring for all of the remaining VF rings */ + for (; j < qmap_stride; j++, qmap_idx++) { + fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), vf_q_idx0); + fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx), vf_q_idx0); + } + } + + /* loop through remaining indexes assigning all to queue 0 */ + while (qmap_idx < FM10K_TQMAP_TABLE_SIZE) { + fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), 0); + fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx), 0); + qmap_idx++; + } + + return 0; +} + +/** + * fm10k_iov_configure_tc_pf - Configure the shaping group for VF + * @hw: pointer to the HW structure + * @vf_idx: index of VF receiving GLORT + * @rate: Rate indicated in Mb/s + * + * Configured the TC for a given VF to allow only up to a given number + * of Mb/s of outgoing Tx throughput. + **/ +static s32 fm10k_iov_configure_tc_pf(struct fm10k_hw *hw, u16 vf_idx, int rate) +{ + /* configure defaults */ + u32 interval = FM10K_TC_RATE_INTERVAL_4US_GEN3; + u32 tc_rate = FM10K_TC_RATE_QUANTA_MASK; + + /* verify vf is in range */ + if (vf_idx >= hw->iov.num_vfs) + return FM10K_ERR_PARAM; + + /* set interval to align with 4.096 usec in all modes */ + switch (hw->bus.speed) { + case fm10k_bus_speed_2500: + interval = FM10K_TC_RATE_INTERVAL_4US_GEN1; + break; + case fm10k_bus_speed_5000: + interval = FM10K_TC_RATE_INTERVAL_4US_GEN2; + break; + default: + break; + } + + if (rate) { + if (rate > FM10K_VF_TC_MAX || rate < FM10K_VF_TC_MIN) + return FM10K_ERR_PARAM; + + /* The quanta is measured in Bytes per 4.096 or 8.192 usec + * The rate is provided in Mbits per second + * To tralslate from rate to quanta we need to multiply the + * rate by 8.192 usec and divide by 8 bits/byte. To avoid + * dealing with floating point we can round the values up + * to the nearest whole number ratio which gives us 128 / 125. + */ + tc_rate = (rate * 128) / 125; + + /* try to keep the rate limiting accurate by increasing + * the number of credits and interval for rates less than 4Gb/s + */ + if (rate < 4000) + interval <<= 1; + else + tc_rate >>= 1; + } + + /* update rate limiter with new values */ + fm10k_write_reg(hw, FM10K_TC_RATE(vf_idx), tc_rate | interval); + fm10k_write_reg(hw, FM10K_TC_MAXCREDIT(vf_idx), FM10K_TC_MAXCREDIT_64K); + fm10k_write_reg(hw, FM10K_TC_CREDIT(vf_idx), FM10K_TC_MAXCREDIT_64K); + + return 0; +} + +/** + * fm10k_iov_assign_int_moderator_pf - Add VF interrupts to moderator list + * @hw: pointer to the HW structure + * @vf_idx: index of VF receiving GLORT + * + * Update the interrupt moderator linked list to include any MSI-X + * interrupts which the VF has enabled in the MSI-X vector table. + **/ +static s32 fm10k_iov_assign_int_moderator_pf(struct fm10k_hw *hw, u16 vf_idx) +{ + u16 vf_v_idx, vf_v_limit, i; + + /* verify vf is in range */ + if (vf_idx >= hw->iov.num_vfs) + return FM10K_ERR_PARAM; + + /* determine vector offset and count*/ + vf_v_idx = fm10k_vf_vector_index(hw, vf_idx); + vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw); + + /* search for first vector that is not masked */ + for (i = vf_v_limit - 1; i > vf_v_idx; i--) { + if (!fm10k_read_reg(hw, FM10K_MSIX_VECTOR_MASK(i))) + break; + } + + /* reset linked list so it now includes our active vectors */ + if (vf_idx == (hw->iov.num_vfs - 1)) + fm10k_write_reg(hw, FM10K_ITR2(0), i); + else + fm10k_write_reg(hw, FM10K_ITR2(vf_v_limit), i); + + return 0; +} + +/** + * fm10k_iov_assign_default_mac_vlan_pf - Assign a MAC and VLAN to VF + * @hw: pointer to the HW structure + * @vf_info: pointer to VF information structure + * + * Assign a MAC address and default VLAN to a VF and notify it of the update + **/ +static s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw, + struct fm10k_vf_info *vf_info) +{ + u16 qmap_stride, queues_per_pool, vf_q_idx, timeout, qmap_idx, i; + u32 msg[4], txdctl, txqctl, tdbal = 0, tdbah = 0; + s32 err = 0; + u16 vf_idx, vf_vid; + + /* verify vf is in range */ + if (!vf_info || vf_info->vf_idx >= hw->iov.num_vfs) + return FM10K_ERR_PARAM; + + /* determine qmap offsets and counts */ + qmap_stride = (hw->iov.num_vfs > 8) ? 32 : 256; + queues_per_pool = fm10k_queues_per_pool(hw); + + /* calculate starting index for queues */ + vf_idx = vf_info->vf_idx; + vf_q_idx = fm10k_vf_queue_index(hw, vf_idx); + qmap_idx = qmap_stride * vf_idx; + + /* MAP Tx queue back to 0 temporarily, and disable it */ + fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), 0); + fm10k_write_reg(hw, FM10K_TXDCTL(vf_q_idx), 0); + + /* determine correct default VLAN ID */ + if (vf_info->pf_vid) + vf_vid = vf_info->pf_vid | FM10K_VLAN_CLEAR; + else + vf_vid = vf_info->sw_vid; + + /* generate MAC_ADDR request */ + fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN); + fm10k_tlv_attr_put_mac_vlan(msg, FM10K_MAC_VLAN_MSG_DEFAULT_MAC, + vf_info->mac, vf_vid); + + /* load onto outgoing mailbox, ignore any errors on enqueue */ + if (vf_info->mbx.ops.enqueue_tx) + vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg); + + /* verify ring has disabled before modifying base address registers */ + txdctl = fm10k_read_reg(hw, FM10K_TXDCTL(vf_q_idx)); + for (timeout = 0; txdctl & FM10K_TXDCTL_ENABLE; timeout++) { + /* limit ourselves to a 1ms timeout */ + if (timeout == 10) { + err = FM10K_ERR_DMA_PENDING; + goto err_out; + } + + usleep_range(100, 200); + txdctl = fm10k_read_reg(hw, FM10K_TXDCTL(vf_q_idx)); + } + + /* Update base address registers to contain MAC address */ + if (is_valid_ether_addr(vf_info->mac)) { + tdbal = (((u32)vf_info->mac[3]) << 24) | + (((u32)vf_info->mac[4]) << 16) | + (((u32)vf_info->mac[5]) << 8); + + tdbah = (((u32)0xFF) << 24) | + (((u32)vf_info->mac[0]) << 16) | + (((u32)vf_info->mac[1]) << 8) | + ((u32)vf_info->mac[2]); + } + + /* Record the base address into queue 0 */ + fm10k_write_reg(hw, FM10K_TDBAL(vf_q_idx), tdbal); + fm10k_write_reg(hw, FM10K_TDBAH(vf_q_idx), tdbah); + +err_out: + /* configure Queue control register */ + txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) & + FM10K_TXQCTL_VID_MASK; + txqctl |= (vf_idx << FM10K_TXQCTL_TC_SHIFT) | + FM10K_TXQCTL_VF | vf_idx; + + /* assign VID */ + for (i = 0; i < queues_per_pool; i++) + fm10k_write_reg(hw, FM10K_TXQCTL(vf_q_idx + i), txqctl); + + /* restore the queue back to VF ownership */ + fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), vf_q_idx); + return err; +} + +/** + * fm10k_iov_reset_resources_pf - Reassign queues and interrupts to a VF + * @hw: pointer to the HW structure + * @vf_info: pointer to VF information structure + * + * Reassign the interrupts and queues to a VF following an FLR + **/ +static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw, + struct fm10k_vf_info *vf_info) +{ + u16 qmap_stride, queues_per_pool, vf_q_idx, qmap_idx; + u32 tdbal = 0, tdbah = 0, txqctl, rxqctl; + u16 vf_v_idx, vf_v_limit, vf_vid; + u8 vf_idx = vf_info->vf_idx; + int i; + + /* verify vf is in range */ + if (vf_idx >= hw->iov.num_vfs) + return FM10K_ERR_PARAM; + + /* clear event notification of VF FLR */ + fm10k_write_reg(hw, FM10K_PFVFLREC(vf_idx / 32), 1 << (vf_idx % 32)); + + /* force timeout and then disconnect the mailbox */ + vf_info->mbx.timeout = 0; + if (vf_info->mbx.ops.disconnect) + vf_info->mbx.ops.disconnect(hw, &vf_info->mbx); + + /* determine vector offset and count*/ + vf_v_idx = fm10k_vf_vector_index(hw, vf_idx); + vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw); + + /* determine qmap offsets and counts */ + qmap_stride = (hw->iov.num_vfs > 8) ? 32 : 256; + queues_per_pool = fm10k_queues_per_pool(hw); + qmap_idx = qmap_stride * vf_idx; + + /* make all the queues inaccessible to the VF */ + for (i = qmap_idx; i < (qmap_idx + qmap_stride); i++) { + fm10k_write_reg(hw, FM10K_TQMAP(i), 0); + fm10k_write_reg(hw, FM10K_RQMAP(i), 0); + } + + /* calculate starting index for queues */ + vf_q_idx = fm10k_vf_queue_index(hw, vf_idx); + + /* determine correct default VLAN ID */ + if (vf_info->pf_vid) + vf_vid = vf_info->pf_vid; + else + vf_vid = vf_info->sw_vid; + + /* configure Queue control register */ + txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) | + (vf_idx << FM10K_TXQCTL_TC_SHIFT) | + FM10K_TXQCTL_VF | vf_idx; + rxqctl = FM10K_RXQCTL_VF | (vf_idx << FM10K_RXQCTL_VF_SHIFT); + + /* stop further DMA and reset queue ownership back to VF */ + for (i = vf_q_idx; i < (queues_per_pool + vf_q_idx); i++) { + fm10k_write_reg(hw, FM10K_TXDCTL(i), 0); + fm10k_write_reg(hw, FM10K_TXQCTL(i), txqctl); + fm10k_write_reg(hw, FM10K_RXDCTL(i), + FM10K_RXDCTL_WRITE_BACK_MIN_DELAY | + FM10K_RXDCTL_DROP_ON_EMPTY); + fm10k_write_reg(hw, FM10K_RXQCTL(i), rxqctl); + } + + /* reset TC with -1 credits and no quanta to prevent transmit */ + fm10k_write_reg(hw, FM10K_TC_MAXCREDIT(vf_idx), 0); + fm10k_write_reg(hw, FM10K_TC_RATE(vf_idx), 0); + fm10k_write_reg(hw, FM10K_TC_CREDIT(vf_idx), + FM10K_TC_CREDIT_CREDIT_MASK); + + /* update our first entry in the table based on previous VF */ + if (!vf_idx) + hw->mac.ops.update_int_moderator(hw); + else + hw->iov.ops.assign_int_moderator(hw, vf_idx - 1); + + /* reset linked list so it now includes our active vectors */ + if (vf_idx == (hw->iov.num_vfs - 1)) + fm10k_write_reg(hw, FM10K_ITR2(0), vf_v_idx); + else + fm10k_write_reg(hw, FM10K_ITR2(vf_v_limit), vf_v_idx); + + /* link remaining vectors so that next points to previous */ + for (vf_v_idx++; vf_v_idx < vf_v_limit; vf_v_idx++) + fm10k_write_reg(hw, FM10K_ITR2(vf_v_idx), vf_v_idx - 1); + + /* zero out MBMEM, VLAN_TABLE, RETA, RSSRK, and MRQC registers */ + for (i = FM10K_VFMBMEM_LEN; i--;) + fm10k_write_reg(hw, FM10K_MBMEM_VF(vf_idx, i), 0); + for (i = FM10K_VLAN_TABLE_SIZE; i--;) + fm10k_write_reg(hw, FM10K_VLAN_TABLE(vf_info->vsi, i), 0); + for (i = FM10K_RETA_SIZE; i--;) + fm10k_write_reg(hw, FM10K_RETA(vf_info->vsi, i), 0); + for (i = FM10K_RSSRK_SIZE; i--;) + fm10k_write_reg(hw, FM10K_RSSRK(vf_info->vsi, i), 0); + fm10k_write_reg(hw, FM10K_MRQC(vf_info->vsi), 0); + + /* Update base address registers to contain MAC address */ + if (is_valid_ether_addr(vf_info->mac)) { + tdbal = (((u32)vf_info->mac[3]) << 24) | + (((u32)vf_info->mac[4]) << 16) | + (((u32)vf_info->mac[5]) << 8); + tdbah = (((u32)0xFF) << 24) | + (((u32)vf_info->mac[0]) << 16) | + (((u32)vf_info->mac[1]) << 8) | + ((u32)vf_info->mac[2]); + } + + /* map queue pairs back to VF from last to first*/ + for (i = queues_per_pool; i--;) { + fm10k_write_reg(hw, FM10K_TDBAL(vf_q_idx + i), tdbal); + fm10k_write_reg(hw, FM10K_TDBAH(vf_q_idx + i), tdbah); + fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx + i), vf_q_idx + i); + fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx + i), vf_q_idx + i); + } + + return 0; +} + +/** + * fm10k_iov_set_lport_pf - Assign and enable a logical port for a given VF + * @hw: pointer to hardware structure + * @vf_info: pointer to VF information structure + * @lport_idx: Logical port offset from the hardware glort + * @flags: Set of capability flags to extend port beyond basic functionality + * + * This function allows enabling a VF port by assigning it a GLORT and + * setting the flags so that it can enable an Rx mode. + **/ +static s32 fm10k_iov_set_lport_pf(struct fm10k_hw *hw, + struct fm10k_vf_info *vf_info, + u16 lport_idx, u8 flags) +{ + u16 glort = (hw->mac.dglort_map + lport_idx) & FM10K_DGLORTMAP_NONE; + + /* if glort is not valid return error */ + if (!fm10k_glort_valid_pf(hw, glort)) + return FM10K_ERR_PARAM; + + vf_info->vf_flags = flags | FM10K_VF_FLAG_NONE_CAPABLE; + vf_info->glort = glort; + + return 0; +} + +/** + * fm10k_iov_reset_lport_pf - Disable a logical port for a given VF + * @hw: pointer to hardware structure + * @vf_info: pointer to VF information structure + * + * This function disables a VF port by stripping it of a GLORT and + * setting the flags so that it cannot enable any Rx mode. + **/ +static void fm10k_iov_reset_lport_pf(struct fm10k_hw *hw, + struct fm10k_vf_info *vf_info) +{ + u32 msg[1]; + + /* need to disable the port if it is already enabled */ + if (FM10K_VF_FLAG_ENABLED(vf_info)) { + /* notify switch that this port has been disabled */ + fm10k_update_lport_state_pf(hw, vf_info->glort, 1, false); + + /* generate port state response to notify VF it is not ready */ + fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE); + vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg); + } + + /* clear flags and glort if it exists */ + vf_info->vf_flags = 0; + vf_info->glort = 0; +} + +/** + * fm10k_iov_update_stats_pf - Updates hardware related statistics for VFs + * @hw: pointer to hardware structure + * @q: stats for all queues of a VF + * @vf_idx: index of VF + * + * This function collects queue stats for VFs. + **/ +static void fm10k_iov_update_stats_pf(struct fm10k_hw *hw, + struct fm10k_hw_stats_q *q, + u16 vf_idx) +{ + u32 idx, qpp; + + /* get stats for all of the queues */ + qpp = fm10k_queues_per_pool(hw); + idx = fm10k_vf_queue_index(hw, vf_idx); + fm10k_update_hw_stats_q(hw, q, idx, qpp); +} + +static s32 fm10k_iov_report_timestamp_pf(struct fm10k_hw *hw, + struct fm10k_vf_info *vf_info, + u64 timestamp) +{ + u32 msg[4]; + + /* generate port state response to notify VF it is not ready */ + fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_1588); + fm10k_tlv_attr_put_u64(msg, FM10K_1588_MSG_TIMESTAMP, timestamp); + + return vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg); +} + +/** + * fm10k_iov_msg_msix_pf - Message handler for MSI-X request from VF + * @hw: Pointer to hardware structure + * @results: Pointer array to message, results[0] is pointer to message + * @mbx: Pointer to mailbox information structure + * + * This function is a default handler for MSI-X requests from the VF. The + * assumption is that in this case it is acceptable to just directly + * hand off the message form the VF to the underlying shared code. + **/ +s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx; + u8 vf_idx = vf_info->vf_idx; + + return hw->iov.ops.assign_int_moderator(hw, vf_idx); +} + +/** + * fm10k_iov_msg_mac_vlan_pf - Message handler for MAC/VLAN request from VF + * @hw: Pointer to hardware structure + * @results: Pointer array to message, results[0] is pointer to message + * @mbx: Pointer to mailbox information structure + * + * This function is a default handler for MAC/VLAN requests from the VF. + * The assumption is that in this case it is acceptable to just directly + * hand off the message form the VF to the underlying shared code. + **/ +s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx; + int err = 0; + u8 mac[ETH_ALEN]; + u32 *result; + u16 vlan; + u32 vid; + + /* we shouldn't be updating rules on a disabled interface */ + if (!FM10K_VF_FLAG_ENABLED(vf_info)) + err = FM10K_ERR_PARAM; + + if (!err && !!results[FM10K_MAC_VLAN_MSG_VLAN]) { + result = results[FM10K_MAC_VLAN_MSG_VLAN]; + + /* record VLAN id requested */ + err = fm10k_tlv_attr_get_u32(result, &vid); + if (err) + return err; + + /* if VLAN ID is 0, set the default VLAN ID instead of 0 */ + if (!vid || (vid == FM10K_VLAN_CLEAR)) { + if (vf_info->pf_vid) + vid |= vf_info->pf_vid; + else + vid |= vf_info->sw_vid; + } else if (vid != vf_info->pf_vid) { + return FM10K_ERR_PARAM; + } + + /* update VSI info for VF in regards to VLAN table */ + err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, + !(vid & FM10K_VLAN_CLEAR)); + } + + if (!err && !!results[FM10K_MAC_VLAN_MSG_MAC]) { + result = results[FM10K_MAC_VLAN_MSG_MAC]; + + /* record unicast MAC address requested */ + err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan); + if (err) + return err; + + /* block attempts to set MAC for a locked device */ + if (is_valid_ether_addr(vf_info->mac) && + memcmp(mac, vf_info->mac, ETH_ALEN)) + return FM10K_ERR_PARAM; + + /* if VLAN ID is 0, set the default VLAN ID instead of 0 */ + if (!vlan || (vlan == FM10K_VLAN_CLEAR)) { + if (vf_info->pf_vid) + vlan |= vf_info->pf_vid; + else + vlan |= vf_info->sw_vid; + } else if (vf_info->pf_vid) { + return FM10K_ERR_PARAM; + } + + /* notify switch of request for new unicast address */ + err = hw->mac.ops.update_uc_addr(hw, vf_info->glort, mac, vlan, + !(vlan & FM10K_VLAN_CLEAR), 0); + } + + if (!err && !!results[FM10K_MAC_VLAN_MSG_MULTICAST]) { + result = results[FM10K_MAC_VLAN_MSG_MULTICAST]; + + /* record multicast MAC address requested */ + err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan); + if (err) + return err; + + /* verify that the VF is allowed to request multicast */ + if (!(vf_info->vf_flags & FM10K_VF_FLAG_MULTI_ENABLED)) + return FM10K_ERR_PARAM; + + /* if VLAN ID is 0, set the default VLAN ID instead of 0 */ + if (!vlan || (vlan == FM10K_VLAN_CLEAR)) { + if (vf_info->pf_vid) + vlan |= vf_info->pf_vid; + else + vlan |= vf_info->sw_vid; + } else if (vf_info->pf_vid) { + return FM10K_ERR_PARAM; + } + + /* notify switch of request for new multicast address */ + err = hw->mac.ops.update_mc_addr(hw, vf_info->glort, mac, + !(vlan & FM10K_VLAN_CLEAR), 0); + } + + return err; +} + +/** + * fm10k_iov_supported_xcast_mode_pf - Determine best match for xcast mode + * @vf_info: VF info structure containing capability flags + * @mode: Requested xcast mode + * + * This function outputs the mode that most closely matches the requested + * mode. If not modes match it will request we disable the port + **/ +static u8 fm10k_iov_supported_xcast_mode_pf(struct fm10k_vf_info *vf_info, + u8 mode) +{ + u8 vf_flags = vf_info->vf_flags; + + /* match up mode to capabilities as best as possible */ + switch (mode) { + case FM10K_XCAST_MODE_PROMISC: + if (vf_flags & FM10K_VF_FLAG_PROMISC_CAPABLE) + return FM10K_XCAST_MODE_PROMISC; + /* fallthough */ + case FM10K_XCAST_MODE_ALLMULTI: + if (vf_flags & FM10K_VF_FLAG_ALLMULTI_CAPABLE) + return FM10K_XCAST_MODE_ALLMULTI; + /* fallthough */ + case FM10K_XCAST_MODE_MULTI: + if (vf_flags & FM10K_VF_FLAG_MULTI_CAPABLE) + return FM10K_XCAST_MODE_MULTI; + /* fallthough */ + case FM10K_XCAST_MODE_NONE: + if (vf_flags & FM10K_VF_FLAG_NONE_CAPABLE) + return FM10K_XCAST_MODE_NONE; + /* fallthough */ + default: + break; + } + + /* disable interface as it should not be able to request any */ + return FM10K_XCAST_MODE_DISABLE; +} + +/** + * fm10k_iov_msg_lport_state_pf - Message handler for port state requests + * @hw: Pointer to hardware structure + * @results: Pointer array to message, results[0] is pointer to message + * @mbx: Pointer to mailbox information structure + * + * This function is a default handler for port state requests. The port + * state requests for now are basic and consist of enabling or disabling + * the port. + **/ +s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx; + u32 *result; + s32 err = 0; + u32 msg[2]; + u8 mode = 0; + + /* verify VF is allowed to enable even minimal mode */ + if (!(vf_info->vf_flags & FM10K_VF_FLAG_NONE_CAPABLE)) + return FM10K_ERR_PARAM; + + if (!!results[FM10K_LPORT_STATE_MSG_XCAST_MODE]) { + result = results[FM10K_LPORT_STATE_MSG_XCAST_MODE]; + + /* XCAST mode update requested */ + err = fm10k_tlv_attr_get_u8(result, &mode); + if (err) + return FM10K_ERR_PARAM; + + /* prep for possible demotion depending on capabilities */ + mode = fm10k_iov_supported_xcast_mode_pf(vf_info, mode); + + /* if mode is not currently enabled, enable it */ + if (!(FM10K_VF_FLAG_ENABLED(vf_info) & (1 << mode))) + fm10k_update_xcast_mode_pf(hw, vf_info->glort, mode); + + /* swap mode back to a bit flag */ + mode = FM10K_VF_FLAG_SET_MODE(mode); + } else if (!results[FM10K_LPORT_STATE_MSG_DISABLE]) { + /* need to disable the port if it is already enabled */ + if (FM10K_VF_FLAG_ENABLED(vf_info)) + err = fm10k_update_lport_state_pf(hw, vf_info->glort, + 1, false); + + /* when enabling the port we should reset the rate limiters */ + hw->iov.ops.configure_tc(hw, vf_info->vf_idx, vf_info->rate); + + /* set mode for minimal functionality */ + mode = FM10K_VF_FLAG_SET_MODE_NONE; + + /* generate port state response to notify VF it is ready */ + fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE); + fm10k_tlv_attr_put_bool(msg, FM10K_LPORT_STATE_MSG_READY); + mbx->ops.enqueue_tx(hw, mbx, msg); + } + + /* if enable state toggled note the update */ + if (!err && (!FM10K_VF_FLAG_ENABLED(vf_info) != !mode)) + err = fm10k_update_lport_state_pf(hw, vf_info->glort, 1, + !!mode); + + /* if state change succeeded, then update our stored state */ + mode |= FM10K_VF_FLAG_CAPABLE(vf_info); + if (!err) + vf_info->vf_flags = mode; + + return err; +} + +const struct fm10k_msg_data fm10k_iov_msg_data_pf[] = { + FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test), + FM10K_VF_MSG_MSIX_HANDLER(fm10k_iov_msg_msix_pf), + FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_iov_msg_mac_vlan_pf), + FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_iov_msg_lport_state_pf), + FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error), +}; + +/** + * fm10k_update_stats_hw_pf - Updates hardware related statistics of PF + * @hw: pointer to hardware structure + * @stats: pointer to the stats structure to update + * + * This function collects and aggregates global and per queue hardware + * statistics. + **/ +static void fm10k_update_hw_stats_pf(struct fm10k_hw *hw, + struct fm10k_hw_stats *stats) +{ + u32 timeout, ur, ca, um, xec, vlan_drop, loopback_drop, nodesc_drop; + u32 id, id_prev; + + /* Use Tx queue 0 as a canary to detect a reset */ + id = fm10k_read_reg(hw, FM10K_TXQCTL(0)); + + /* Read Global Statistics */ + do { + timeout = fm10k_read_hw_stats_32b(hw, FM10K_STATS_TIMEOUT, + &stats->timeout); + ur = fm10k_read_hw_stats_32b(hw, FM10K_STATS_UR, &stats->ur); + ca = fm10k_read_hw_stats_32b(hw, FM10K_STATS_CA, &stats->ca); + um = fm10k_read_hw_stats_32b(hw, FM10K_STATS_UM, &stats->um); + xec = fm10k_read_hw_stats_32b(hw, FM10K_STATS_XEC, &stats->xec); + vlan_drop = fm10k_read_hw_stats_32b(hw, FM10K_STATS_VLAN_DROP, + &stats->vlan_drop); + loopback_drop = fm10k_read_hw_stats_32b(hw, + FM10K_STATS_LOOPBACK_DROP, + &stats->loopback_drop); + nodesc_drop = fm10k_read_hw_stats_32b(hw, + FM10K_STATS_NODESC_DROP, + &stats->nodesc_drop); + + /* if value has not changed then we have consistent data */ + id_prev = id; + id = fm10k_read_reg(hw, FM10K_TXQCTL(0)); + } while ((id ^ id_prev) & FM10K_TXQCTL_ID_MASK); + + /* drop non-ID bits and set VALID ID bit */ + id &= FM10K_TXQCTL_ID_MASK; + id |= FM10K_STAT_VALID; + + /* Update Global Statistics */ + if (stats->stats_idx == id) { + stats->timeout.count += timeout; + stats->ur.count += ur; + stats->ca.count += ca; + stats->um.count += um; + stats->xec.count += xec; + stats->vlan_drop.count += vlan_drop; + stats->loopback_drop.count += loopback_drop; + stats->nodesc_drop.count += nodesc_drop; + } + + /* Update bases and record current PF id */ + fm10k_update_hw_base_32b(&stats->timeout, timeout); + fm10k_update_hw_base_32b(&stats->ur, ur); + fm10k_update_hw_base_32b(&stats->ca, ca); + fm10k_update_hw_base_32b(&stats->um, um); + fm10k_update_hw_base_32b(&stats->xec, xec); + fm10k_update_hw_base_32b(&stats->vlan_drop, vlan_drop); + fm10k_update_hw_base_32b(&stats->loopback_drop, loopback_drop); + fm10k_update_hw_base_32b(&stats->nodesc_drop, nodesc_drop); + stats->stats_idx = id; + + /* Update Queue Statistics */ + fm10k_update_hw_stats_q(hw, stats->q, 0, hw->mac.max_queues); +} + +/** + * fm10k_rebind_hw_stats_pf - Resets base for hardware statistics of PF + * @hw: pointer to hardware structure + * @stats: pointer to the stats structure to update + * + * This function resets the base for global and per queue hardware + * statistics. + **/ +static void fm10k_rebind_hw_stats_pf(struct fm10k_hw *hw, + struct fm10k_hw_stats *stats) +{ + /* Unbind Global Statistics */ + fm10k_unbind_hw_stats_32b(&stats->timeout); + fm10k_unbind_hw_stats_32b(&stats->ur); + fm10k_unbind_hw_stats_32b(&stats->ca); + fm10k_unbind_hw_stats_32b(&stats->um); + fm10k_unbind_hw_stats_32b(&stats->xec); + fm10k_unbind_hw_stats_32b(&stats->vlan_drop); + fm10k_unbind_hw_stats_32b(&stats->loopback_drop); + fm10k_unbind_hw_stats_32b(&stats->nodesc_drop); + + /* Unbind Queue Statistics */ + fm10k_unbind_hw_stats_q(stats->q, 0, hw->mac.max_queues); + + /* Reinitialize bases for all stats */ + fm10k_update_hw_stats_pf(hw, stats); +} + +/** + * fm10k_set_dma_mask_pf - Configures PhyAddrSpace to limit DMA to system + * @hw: pointer to hardware structure + * @dma_mask: 64 bit DMA mask required for platform + * + * This function sets the PHYADDR.PhyAddrSpace bits for the endpoint in order + * to limit the access to memory beyond what is physically in the system. + **/ +static void fm10k_set_dma_mask_pf(struct fm10k_hw *hw, u64 dma_mask) +{ + /* we need to write the upper 32 bits of DMA mask to PhyAddrSpace */ + u32 phyaddr = (u32)(dma_mask >> 32); + + fm10k_write_reg(hw, FM10K_PHYADDR, phyaddr); +} + +/** + * fm10k_get_fault_pf - Record a fault in one of the interface units + * @hw: pointer to hardware structure + * @type: pointer to fault type register offset + * @fault: pointer to memory location to record the fault + * + * Record the fault register contents to the fault data structure and + * clear the entry from the register. + * + * Returns ERR_PARAM if invalid register is specified or no error is present. + **/ +static s32 fm10k_get_fault_pf(struct fm10k_hw *hw, int type, + struct fm10k_fault *fault) +{ + u32 func; + + /* verify the fault register is in range and is aligned */ + switch (type) { + case FM10K_PCA_FAULT: + case FM10K_THI_FAULT: + case FM10K_FUM_FAULT: + break; + default: + return FM10K_ERR_PARAM; + } + + /* only service faults that are valid */ + func = fm10k_read_reg(hw, type + FM10K_FAULT_FUNC); + if (!(func & FM10K_FAULT_FUNC_VALID)) + return FM10K_ERR_PARAM; + + /* read remaining fields */ + fault->address = fm10k_read_reg(hw, type + FM10K_FAULT_ADDR_HI); + fault->address <<= 32; + fault->address = fm10k_read_reg(hw, type + FM10K_FAULT_ADDR_LO); + fault->specinfo = fm10k_read_reg(hw, type + FM10K_FAULT_SPECINFO); + + /* clear valid bit to allow for next error */ + fm10k_write_reg(hw, type + FM10K_FAULT_FUNC, FM10K_FAULT_FUNC_VALID); + + /* Record which function triggered the error */ + if (func & FM10K_FAULT_FUNC_PF) + fault->func = 0; + else + fault->func = 1 + ((func & FM10K_FAULT_FUNC_VF_MASK) >> + FM10K_FAULT_FUNC_VF_SHIFT); + + /* record fault type */ + fault->type = func & FM10K_FAULT_FUNC_TYPE_MASK; + + return 0; +} + +/** + * fm10k_request_lport_map_pf - Request LPORT map from the switch API + * @hw: pointer to hardware structure + * + **/ +static s32 fm10k_request_lport_map_pf(struct fm10k_hw *hw) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + u32 msg[1]; + + /* issue request asking for LPORT map */ + fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_LPORT_MAP); + + /* load onto outgoing mailbox */ + return mbx->ops.enqueue_tx(hw, mbx, msg); +} + +/** + * fm10k_get_host_state_pf - Returns the state of the switch and mailbox + * @hw: pointer to hardware structure + * @switch_ready: pointer to boolean value that will record switch state + * + * This funciton will check the DMA_CTRL2 register and mailbox in order + * to determine if the switch is ready for the PF to begin requesting + * addresses and mapping traffic to the local interface. + **/ +static s32 fm10k_get_host_state_pf(struct fm10k_hw *hw, bool *switch_ready) +{ + s32 ret_val = 0; + u32 dma_ctrl2; + + /* verify the switch is ready for interraction */ + dma_ctrl2 = fm10k_read_reg(hw, FM10K_DMA_CTRL2); + if (!(dma_ctrl2 & FM10K_DMA_CTRL2_SWITCH_READY)) + goto out; + + /* retrieve generic host state info */ + ret_val = fm10k_get_host_state_generic(hw, switch_ready); + if (ret_val) + goto out; + + /* interface cannot receive traffic without logical ports */ + if (hw->mac.dglort_map == FM10K_DGLORTMAP_NONE) + ret_val = fm10k_request_lport_map_pf(hw); + +out: + return ret_val; +} + +/* This structure defines the attibutes to be parsed below */ +const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[] = { + FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_LPORT_MAP), + FM10K_TLV_ATTR_LAST +}; + +/** + * fm10k_msg_lport_map_pf - Message handler for lport_map message from SM + * @hw: Pointer to hardware structure + * @results: pointer array containing parsed data + * @mbx: Pointer to mailbox information structure + * + * This handler configures the lport mapping based on the reply from the + * switch API. + **/ +s32 fm10k_msg_lport_map_pf(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + u16 glort, mask; + u32 dglort_map; + s32 err; + + err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_LPORT_MAP], + &dglort_map); + if (err) + return err; + + /* extract values out of the header */ + glort = FM10K_MSG_HDR_FIELD_GET(dglort_map, LPORT_MAP_GLORT); + mask = FM10K_MSG_HDR_FIELD_GET(dglort_map, LPORT_MAP_MASK); + + /* verify mask is set and none of the masked bits in glort are set */ + if (!mask || (glort & ~mask)) + return FM10K_ERR_PARAM; + + /* verify the mask is contiguous, and that it is 1's followed by 0's */ + if (((~(mask - 1) & mask) + mask) & FM10K_DGLORTMAP_NONE) + return FM10K_ERR_PARAM; + + /* record the glort, mask, and port count */ + hw->mac.dglort_map = dglort_map; + + return 0; +} + +const struct fm10k_tlv_attr fm10k_update_pvid_msg_attr[] = { + FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_UPDATE_PVID), + FM10K_TLV_ATTR_LAST +}; + +/** + * fm10k_msg_update_pvid_pf - Message handler for port VLAN message from SM + * @hw: Pointer to hardware structure + * @results: pointer array containing parsed data + * @mbx: Pointer to mailbox information structure + * + * This handler configures the default VLAN for the PF + **/ +s32 fm10k_msg_update_pvid_pf(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + u16 glort, pvid; + u32 pvid_update; + s32 err; + + err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_UPDATE_PVID], + &pvid_update); + if (err) + return err; + + /* extract values from the pvid update */ + glort = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_GLORT); + pvid = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_PVID); + + /* if glort is not valid return error */ + if (!fm10k_glort_valid_pf(hw, glort)) + return FM10K_ERR_PARAM; + + /* verify VID is valid */ + if (pvid >= FM10K_VLAN_TABLE_VID_MAX) + return FM10K_ERR_PARAM; + + /* record the port VLAN ID value */ + hw->mac.default_vid = pvid; + + return 0; +} + +/** + * fm10k_record_global_table_data - Move global table data to swapi table info + * @from: pointer to source table data structure + * @to: pointer to destination table info structure + * + * This function is will copy table_data to the table_info contained in + * the hw struct. + **/ +static void fm10k_record_global_table_data(struct fm10k_global_table_data *from, + struct fm10k_swapi_table_info *to) +{ + /* convert from le32 struct to CPU byte ordered values */ + to->used = le32_to_cpu(from->used); + to->avail = le32_to_cpu(from->avail); +} + +const struct fm10k_tlv_attr fm10k_err_msg_attr[] = { + FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_ERR, + sizeof(struct fm10k_swapi_error)), + FM10K_TLV_ATTR_LAST +}; + +/** + * fm10k_msg_err_pf - Message handler for error reply + * @hw: Pointer to hardware structure + * @results: pointer array containing parsed data + * @mbx: Pointer to mailbox information structure + * + * This handler will capture the data for any error replies to previous + * messages that the PF has sent. + **/ +s32 fm10k_msg_err_pf(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + struct fm10k_swapi_error err_msg; + s32 err; + + /* extract structure from message */ + err = fm10k_tlv_attr_get_le_struct(results[FM10K_PF_ATTR_ID_ERR], + &err_msg, sizeof(err_msg)); + if (err) + return err; + + /* record table status */ + fm10k_record_global_table_data(&err_msg.mac, &hw->swapi.mac); + fm10k_record_global_table_data(&err_msg.nexthop, &hw->swapi.nexthop); + fm10k_record_global_table_data(&err_msg.ffu, &hw->swapi.ffu); + + /* record SW API status value */ + hw->swapi.status = le32_to_cpu(err_msg.status); + + return 0; +} + +const struct fm10k_tlv_attr fm10k_1588_timestamp_msg_attr[] = { + FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_1588_TIMESTAMP, + sizeof(struct fm10k_swapi_1588_timestamp)), + FM10K_TLV_ATTR_LAST +}; + +/* currently there is no shared 1588 timestamp handler */ + +/** + * fm10k_adjust_systime_pf - Adjust systime frequency + * @hw: pointer to hardware structure + * @ppb: adjustment rate in parts per billion + * + * This function will adjust the SYSTIME_CFG register contained in BAR 4 + * if this function is supported for BAR 4 access. The adjustment amount + * is based on the parts per billion value provided and adjusted to a + * value based on parts per 2^48 clock cycles. + * + * If adjustment is not supported or the requested value is too large + * we will return an error. + **/ +static s32 fm10k_adjust_systime_pf(struct fm10k_hw *hw, s32 ppb) +{ + u64 systime_adjust; + + /* if sw_addr is not set we don't have switch register access */ + if (!hw->sw_addr) + return ppb ? FM10K_ERR_PARAM : 0; + + /* we must convert the value from parts per billion to parts per + * 2^48 cycles. In addition I have opted to only use the 30 most + * significant bits of the adjustment value as the 8 least + * significant bits are located in another register and represent + * a value significantly less than a part per billion, the result + * of dropping the 8 least significant bits is that the adjustment + * value is effectively multiplied by 2^8 when we write it. + * + * As a result of all this the math for this breaks down as follows: + * ppb / 10^9 == adjust * 2^8 / 2^48 + * If we solve this for adjust, and simplify it comes out as: + * ppb * 2^31 / 5^9 == adjust + */ + systime_adjust = (ppb < 0) ? -ppb : ppb; + systime_adjust <<= 31; + do_div(systime_adjust, 1953125); + + /* verify the requested adjustment value is in range */ + if (systime_adjust > FM10K_SW_SYSTIME_ADJUST_MASK) + return FM10K_ERR_PARAM; + + if (ppb < 0) + systime_adjust |= FM10K_SW_SYSTIME_ADJUST_DIR_NEGATIVE; + + fm10k_write_sw_reg(hw, FM10K_SW_SYSTIME_ADJUST, (u32)systime_adjust); + + return 0; +} + +/** + * fm10k_read_systime_pf - Reads value of systime registers + * @hw: pointer to the hardware structure + * + * Function reads the content of 2 registers, combined to represent a 64 bit + * value measured in nanosecods. In order to guarantee the value is accurate + * we check the 32 most significant bits both before and after reading the + * 32 least significant bits to verify they didn't change as we were reading + * the registers. + **/ +static u64 fm10k_read_systime_pf(struct fm10k_hw *hw) +{ + u32 systime_l, systime_h, systime_tmp; + + systime_h = fm10k_read_reg(hw, FM10K_SYSTIME + 1); + + do { + systime_tmp = systime_h; + systime_l = fm10k_read_reg(hw, FM10K_SYSTIME); + systime_h = fm10k_read_reg(hw, FM10K_SYSTIME + 1); + } while (systime_tmp != systime_h); + + return ((u64)systime_h << 32) | systime_l; +} + +static const struct fm10k_msg_data fm10k_msg_data_pf[] = { + FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf), + FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf), + FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf), + FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf), + FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf), + FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf), + FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error), +}; + +static struct fm10k_mac_ops mac_ops_pf = { + .get_bus_info = &fm10k_get_bus_info_generic, + .reset_hw = &fm10k_reset_hw_pf, + .init_hw = &fm10k_init_hw_pf, + .start_hw = &fm10k_start_hw_generic, + .stop_hw = &fm10k_stop_hw_generic, + .is_slot_appropriate = &fm10k_is_slot_appropriate_pf, + .update_vlan = &fm10k_update_vlan_pf, + .read_mac_addr = &fm10k_read_mac_addr_pf, + .update_uc_addr = &fm10k_update_uc_addr_pf, + .update_mc_addr = &fm10k_update_mc_addr_pf, + .update_xcast_mode = &fm10k_update_xcast_mode_pf, + .update_int_moderator = &fm10k_update_int_moderator_pf, + .update_lport_state = &fm10k_update_lport_state_pf, + .update_hw_stats = &fm10k_update_hw_stats_pf, + .rebind_hw_stats = &fm10k_rebind_hw_stats_pf, + .configure_dglort_map = &fm10k_configure_dglort_map_pf, + .set_dma_mask = &fm10k_set_dma_mask_pf, + .get_fault = &fm10k_get_fault_pf, + .get_host_state = &fm10k_get_host_state_pf, + .adjust_systime = &fm10k_adjust_systime_pf, + .read_systime = &fm10k_read_systime_pf, +}; + +static struct fm10k_iov_ops iov_ops_pf = { + .assign_resources = &fm10k_iov_assign_resources_pf, + .configure_tc = &fm10k_iov_configure_tc_pf, + .assign_int_moderator = &fm10k_iov_assign_int_moderator_pf, + .assign_default_mac_vlan = fm10k_iov_assign_default_mac_vlan_pf, + .reset_resources = &fm10k_iov_reset_resources_pf, + .set_lport = &fm10k_iov_set_lport_pf, + .reset_lport = &fm10k_iov_reset_lport_pf, + .update_stats = &fm10k_iov_update_stats_pf, + .report_timestamp = &fm10k_iov_report_timestamp_pf, +}; + +static s32 fm10k_get_invariants_pf(struct fm10k_hw *hw) +{ + fm10k_get_invariants_generic(hw); + + return fm10k_sm_mbx_init(hw, &hw->mbx, fm10k_msg_data_pf); +} + +struct fm10k_info fm10k_pf_info = { + .mac = fm10k_mac_pf, + .get_invariants = &fm10k_get_invariants_pf, + .mac_ops = &mac_ops_pf, + .iov_ops = &iov_ops_pf, +}; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h new file mode 100644 index 0000000000000000000000000000000000000000..7ab1db4fff32027b5b72501a69b690fd6e90343d --- /dev/null +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h @@ -0,0 +1,135 @@ +/* Intel Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _FM10K_PF_H_ +#define _FM10K_PF_H_ + +#include "fm10k_type.h" +#include "fm10k_common.h" + +bool fm10k_glort_valid_pf(struct fm10k_hw *hw, u16 glort); +u16 fm10k_queues_per_pool(struct fm10k_hw *hw); +u16 fm10k_vf_queue_index(struct fm10k_hw *hw, u16 vf_idx); + +enum fm10k_pf_tlv_msg_id_v1 { + FM10K_PF_MSG_ID_TEST = 0x000, /* msg ID reserved */ + FM10K_PF_MSG_ID_XCAST_MODES = 0x001, + FM10K_PF_MSG_ID_UPDATE_MAC_FWD_RULE = 0x002, + FM10K_PF_MSG_ID_LPORT_MAP = 0x100, + FM10K_PF_MSG_ID_LPORT_CREATE = 0x200, + FM10K_PF_MSG_ID_LPORT_DELETE = 0x201, + FM10K_PF_MSG_ID_CONFIG = 0x300, + FM10K_PF_MSG_ID_UPDATE_PVID = 0x400, + FM10K_PF_MSG_ID_CREATE_FLOW_TABLE = 0x501, + FM10K_PF_MSG_ID_DELETE_FLOW_TABLE = 0x502, + FM10K_PF_MSG_ID_UPDATE_FLOW = 0x503, + FM10K_PF_MSG_ID_DELETE_FLOW = 0x504, + FM10K_PF_MSG_ID_SET_FLOW_STATE = 0x505, + FM10K_PF_MSG_ID_GET_1588_INFO = 0x506, + FM10K_PF_MSG_ID_1588_TIMESTAMP = 0x701, +}; + +enum fm10k_pf_tlv_attr_id_v1 { + FM10K_PF_ATTR_ID_ERR = 0x00, + FM10K_PF_ATTR_ID_LPORT_MAP = 0x01, + FM10K_PF_ATTR_ID_XCAST_MODE = 0x02, + FM10K_PF_ATTR_ID_MAC_UPDATE = 0x03, + FM10K_PF_ATTR_ID_VLAN_UPDATE = 0x04, + FM10K_PF_ATTR_ID_CONFIG = 0x05, + FM10K_PF_ATTR_ID_CREATE_FLOW_TABLE = 0x06, + FM10K_PF_ATTR_ID_DELETE_FLOW_TABLE = 0x07, + FM10K_PF_ATTR_ID_UPDATE_FLOW = 0x08, + FM10K_PF_ATTR_ID_FLOW_STATE = 0x09, + FM10K_PF_ATTR_ID_FLOW_HANDLE = 0x0A, + FM10K_PF_ATTR_ID_DELETE_FLOW = 0x0B, + FM10K_PF_ATTR_ID_PORT = 0x0C, + FM10K_PF_ATTR_ID_UPDATE_PVID = 0x0D, + FM10K_PF_ATTR_ID_1588_TIMESTAMP = 0x10, +}; + +#define FM10K_MSG_LPORT_MAP_GLORT_SHIFT 0 +#define FM10K_MSG_LPORT_MAP_GLORT_SIZE 16 +#define FM10K_MSG_LPORT_MAP_MASK_SHIFT 16 +#define FM10K_MSG_LPORT_MAP_MASK_SIZE 16 + +#define FM10K_MSG_UPDATE_PVID_GLORT_SHIFT 0 +#define FM10K_MSG_UPDATE_PVID_GLORT_SIZE 16 +#define FM10K_MSG_UPDATE_PVID_PVID_SHIFT 16 +#define FM10K_MSG_UPDATE_PVID_PVID_SIZE 16 + +struct fm10k_mac_update { + __le32 mac_lower; + __le16 mac_upper; + __le16 vlan; + __le16 glort; + u8 flags; + u8 action; +}; + +struct fm10k_global_table_data { + __le32 used; + __le32 avail; +}; + +struct fm10k_swapi_error { + __le32 status; + struct fm10k_global_table_data mac; + struct fm10k_global_table_data nexthop; + struct fm10k_global_table_data ffu; +}; + +struct fm10k_swapi_1588_timestamp { + __le64 egress; + __le64 ingress; + __le16 dglort; + __le16 sglort; +}; + +s32 fm10k_msg_lport_map_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *); +extern const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[]; +#define FM10K_PF_MSG_LPORT_MAP_HANDLER(func) \ + FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_LPORT_MAP, \ + fm10k_lport_map_msg_attr, func) +s32 fm10k_msg_update_pvid_pf(struct fm10k_hw *, u32 **, + struct fm10k_mbx_info *); +extern const struct fm10k_tlv_attr fm10k_update_pvid_msg_attr[]; +#define FM10K_PF_MSG_UPDATE_PVID_HANDLER(func) \ + FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_UPDATE_PVID, \ + fm10k_update_pvid_msg_attr, func) + +s32 fm10k_msg_err_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *); +extern const struct fm10k_tlv_attr fm10k_err_msg_attr[]; +#define FM10K_PF_MSG_ERR_HANDLER(msg, func) \ + FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_##msg, fm10k_err_msg_attr, func) + +extern const struct fm10k_tlv_attr fm10k_1588_timestamp_msg_attr[]; +#define FM10K_PF_MSG_1588_TIMESTAMP_HANDLER(func) \ + FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_1588_TIMESTAMP, \ + fm10k_1588_timestamp_msg_attr, func) + +s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *); +s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *, u32 **, + struct fm10k_mbx_info *); +s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *, u32 **, + struct fm10k_mbx_info *); +extern const struct fm10k_msg_data fm10k_iov_msg_data_pf[]; + +extern struct fm10k_info fm10k_pf_info; +#endif /* _FM10K_PF_H */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c b/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c new file mode 100644 index 0000000000000000000000000000000000000000..7822809436a362069770c57db8c6e0f03cbf27ed --- /dev/null +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c @@ -0,0 +1,463 @@ +/* Intel Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include +#include + +#include "fm10k.h" + +#define FM10K_TS_TX_TIMEOUT (HZ * 15) + +void fm10k_systime_to_hwtstamp(struct fm10k_intfc *interface, + struct skb_shared_hwtstamps *hwtstamp, + u64 systime) +{ + unsigned long flags; + + read_lock_irqsave(&interface->systime_lock, flags); + systime += interface->ptp_adjust; + read_unlock_irqrestore(&interface->systime_lock, flags); + + hwtstamp->hwtstamp = ns_to_ktime(systime); +} + +static struct sk_buff *fm10k_ts_tx_skb(struct fm10k_intfc *interface, + __le16 dglort) +{ + struct sk_buff_head *list = &interface->ts_tx_skb_queue; + struct sk_buff *skb; + + skb_queue_walk(list, skb) { + if (FM10K_CB(skb)->fi.w.dglort == dglort) + return skb; + } + + return NULL; +} + +void fm10k_ts_tx_enqueue(struct fm10k_intfc *interface, struct sk_buff *skb) +{ + struct sk_buff_head *list = &interface->ts_tx_skb_queue; + struct sk_buff *clone; + unsigned long flags; + __le16 dglort; + + /* create clone for us to return on the Tx path */ + clone = skb_clone_sk(skb); + if (!clone) + return; + + FM10K_CB(clone)->ts_tx_timeout = jiffies + FM10K_TS_TX_TIMEOUT; + dglort = FM10K_CB(clone)->fi.w.dglort; + + spin_lock_irqsave(&list->lock, flags); + + /* attempt to locate any buffers with the same dglort, + * if none are present then insert skb in tail of list + */ + skb = fm10k_ts_tx_skb(interface, FM10K_CB(clone)->fi.w.dglort); + if (!skb) + __skb_queue_tail(list, clone); + + spin_unlock_irqrestore(&list->lock, flags); + + /* if list is already has one then we just free the clone */ + if (skb) + kfree_skb(skb); + else + skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS; +} + +void fm10k_ts_tx_hwtstamp(struct fm10k_intfc *interface, __le16 dglort, + u64 systime) +{ + struct skb_shared_hwtstamps shhwtstamps; + struct sk_buff_head *list = &interface->ts_tx_skb_queue; + struct sk_buff *skb; + unsigned long flags; + + spin_lock_irqsave(&list->lock, flags); + + /* attempt to locate and pull the sk_buff out of the list */ + skb = fm10k_ts_tx_skb(interface, dglort); + if (skb) + __skb_unlink(skb, list); + + spin_unlock_irqrestore(&list->lock, flags); + + /* if not found do nothing */ + if (!skb) + return; + + /* timestamp the sk_buff and return it to the socket */ + fm10k_systime_to_hwtstamp(interface, &shhwtstamps, systime); + skb_complete_tx_timestamp(skb, &shhwtstamps); +} + +void fm10k_ts_tx_subtask(struct fm10k_intfc *interface) +{ + struct sk_buff_head *list = &interface->ts_tx_skb_queue; + struct sk_buff *skb, *tmp; + unsigned long flags; + + /* If we're down or resetting, just bail */ + if (test_bit(__FM10K_DOWN, &interface->state) || + test_bit(__FM10K_RESETTING, &interface->state)) + return; + + spin_lock_irqsave(&list->lock, flags); + + /* walk though the list and flush any expired timestamp packets */ + skb_queue_walk_safe(list, skb, tmp) { + if (!time_is_after_jiffies(FM10K_CB(skb)->ts_tx_timeout)) + continue; + __skb_unlink(skb, list); + kfree_skb(skb); + interface->tx_hwtstamp_timeouts++; + } + + spin_unlock_irqrestore(&list->lock, flags); +} + +static u64 fm10k_systime_read(struct fm10k_intfc *interface) +{ + struct fm10k_hw *hw = &interface->hw; + + return hw->mac.ops.read_systime(hw); +} + +void fm10k_ts_reset(struct fm10k_intfc *interface) +{ + s64 ns = ktime_to_ns(ktime_get_real()); + unsigned long flags; + + /* reinitialize the clock */ + write_lock_irqsave(&interface->systime_lock, flags); + interface->ptp_adjust = fm10k_systime_read(interface) - ns; + write_unlock_irqrestore(&interface->systime_lock, flags); +} + +void fm10k_ts_init(struct fm10k_intfc *interface) +{ + /* Initialize lock protecting systime access */ + rwlock_init(&interface->systime_lock); + + /* Initialize skb queue for pending timestamp requests */ + skb_queue_head_init(&interface->ts_tx_skb_queue); + + /* reset the clock to current kernel time */ + fm10k_ts_reset(interface); +} + +/** + * fm10k_get_ts_config - get current hardware timestamping configuration + * @netdev: network interface device structure + * @ifreq: ioctl data + * + * This function returns the current timestamping settings. Rather than + * attempt to deconstruct registers to fill in the values, simply keep a copy + * of the old settings around, and return a copy when requested. + */ +int fm10k_get_ts_config(struct net_device *netdev, struct ifreq *ifr) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + struct hwtstamp_config *config = &interface->ts_config; + + return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? + -EFAULT : 0; +} + +/** + * fm10k_set_ts_config - control hardware time stamping + * @netdev: network interface device structure + * @ifreq: ioctl data + * + * Outgoing time stamping can be enabled and disabled. Play nice and + * disable it when requested, although it shouldn't cause any overhead + * when no packet needs it. At most one packet in the queue may be + * marked for time stamping, otherwise it would be impossible to tell + * for sure to which packet the hardware time stamp belongs. + * + * Incoming time stamping has to be configured via the hardware + * filters. Not all combinations are supported, in particular event + * type has to be specified. Matching the kind of event packet is + * not supported, with the exception of "all V2 events regardless of + * level 2 or 4". + * + * Since hardware always timestamps Path delay packets when timestamping V2 + * packets, regardless of the type specified in the register, only use V2 + * Event mode. This more accurately tells the user what the hardware is going + * to do anyways. + */ +int fm10k_set_ts_config(struct net_device *netdev, struct ifreq *ifr) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + struct hwtstamp_config ts_config; + + if (copy_from_user(&ts_config, ifr->ifr_data, sizeof(ts_config))) + return -EFAULT; + + /* reserved for future extensions */ + if (ts_config.flags) + return -EINVAL; + + switch (ts_config.tx_type) { + case HWTSTAMP_TX_OFF: + break; + case HWTSTAMP_TX_ON: + /* we likely need some check here to see if this is supported */ + break; + default: + return -ERANGE; + } + + switch (ts_config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + interface->flags &= ~FM10K_FLAG_RX_TS_ENABLED; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_ALL: + interface->flags |= FM10K_FLAG_RX_TS_ENABLED; + ts_config.rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + return -ERANGE; + } + + /* save these settings for future reference */ + interface->ts_config = ts_config; + + return copy_to_user(ifr->ifr_data, &ts_config, sizeof(ts_config)) ? + -EFAULT : 0; +} + +static int fm10k_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ + struct fm10k_intfc *interface; + struct fm10k_hw *hw; + int err; + + interface = container_of(ptp, struct fm10k_intfc, ptp_caps); + hw = &interface->hw; + + err = hw->mac.ops.adjust_systime(hw, ppb); + + /* the only error we should see is if the value is out of range */ + return (err == FM10K_ERR_PARAM) ? -ERANGE : err; +} + +static int fm10k_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct fm10k_intfc *interface; + unsigned long flags; + + interface = container_of(ptp, struct fm10k_intfc, ptp_caps); + + write_lock_irqsave(&interface->systime_lock, flags); + interface->ptp_adjust += delta; + write_unlock_irqrestore(&interface->systime_lock, flags); + + return 0; +} + +static int fm10k_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts) +{ + struct fm10k_intfc *interface; + unsigned long flags; + u64 now; + + interface = container_of(ptp, struct fm10k_intfc, ptp_caps); + + read_lock_irqsave(&interface->systime_lock, flags); + now = fm10k_systime_read(interface) + interface->ptp_adjust; + read_unlock_irqrestore(&interface->systime_lock, flags); + + *ts = ns_to_timespec(now); + + return 0; +} + +static int fm10k_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec *ts) +{ + struct fm10k_intfc *interface; + unsigned long flags; + u64 ns = timespec_to_ns(ts); + + interface = container_of(ptp, struct fm10k_intfc, ptp_caps); + + write_lock_irqsave(&interface->systime_lock, flags); + interface->ptp_adjust = fm10k_systime_read(interface) - ns; + write_unlock_irqrestore(&interface->systime_lock, flags); + + return 0; +} + +static int fm10k_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct ptp_clock_time *t = &rq->perout.period; + struct fm10k_intfc *interface; + struct fm10k_hw *hw; + u64 period; + u32 step; + + /* we can only support periodic output */ + if (rq->type != PTP_CLK_REQ_PEROUT) + return -EINVAL; + + /* verify the requested channel is there */ + if (rq->perout.index >= ptp->n_per_out) + return -EINVAL; + + /* we cannot enforce start time as there is no + * mechanism for that in the hardware, we can only control + * the period. + */ + + /* we cannot support periods greater than 4 seconds due to reg limit */ + if (t->sec > 4 || t->sec < 0) + return -ERANGE; + + interface = container_of(ptp, struct fm10k_intfc, ptp_caps); + hw = &interface->hw; + + /* we simply cannot support the operation if we don't have BAR4 */ + if (!hw->sw_addr) + return -ENOTSUPP; + + /* convert to unsigned 64b ns, verify we can put it in a 32b register */ + period = t->sec * 1000000000LL + t->nsec; + + /* determine the minimum size for period */ + step = 2 * (fm10k_read_reg(hw, FM10K_SYSTIME_CFG) & + FM10K_SYSTIME_CFG_STEP_MASK); + + /* verify the value is in range supported by hardware */ + if ((period && (period < step)) || (period > U32_MAX)) + return -ERANGE; + + /* notify hardware of request to being sending pulses */ + fm10k_write_sw_reg(hw, FM10K_SW_SYSTIME_PULSE(rq->perout.index), + (u32)period); + + return 0; +} + +static struct ptp_pin_desc fm10k_ptp_pd[2] = { + { + .name = "IEEE1588_PULSE0", + .index = 0, + .func = PTP_PF_PEROUT, + .chan = 0 + }, + { + .name = "IEEE1588_PULSE1", + .index = 1, + .func = PTP_PF_PEROUT, + .chan = 1 + } +}; + +static int fm10k_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + /* verify the requested pin is there */ + if (pin >= ptp->n_pins || !ptp->pin_config) + return -EINVAL; + + /* enforce locked channels, no changing them */ + if (chan != ptp->pin_config[pin].chan) + return -EINVAL; + + /* we want to keep the functions locked as well */ + if (func != ptp->pin_config[pin].func) + return -EINVAL; + + return 0; +} + +void fm10k_ptp_register(struct fm10k_intfc *interface) +{ + struct ptp_clock_info *ptp_caps = &interface->ptp_caps; + struct device *dev = &interface->pdev->dev; + struct ptp_clock *ptp_clock; + + snprintf(ptp_caps->name, sizeof(ptp_caps->name), + "%s", interface->netdev->name); + ptp_caps->owner = THIS_MODULE; + /* This math is simply the inverse of the math in + * fm10k_adjust_systime_pf applied to an adjustment value + * of 2^30 - 1 which is the maximum value of the register: + * max_ppb == ((2^30 - 1) * 5^9) / 2^31 + */ + ptp_caps->max_adj = 976562; + ptp_caps->adjfreq = fm10k_ptp_adjfreq; + ptp_caps->adjtime = fm10k_ptp_adjtime; + ptp_caps->gettime = fm10k_ptp_gettime; + ptp_caps->settime = fm10k_ptp_settime; + + /* provide pins if BAR4 is accessible */ + if (interface->sw_addr) { + /* enable periodic outputs */ + ptp_caps->n_per_out = 2; + ptp_caps->enable = fm10k_ptp_enable; + + /* enable clock pins */ + ptp_caps->verify = fm10k_ptp_verify; + ptp_caps->n_pins = 2; + ptp_caps->pin_config = fm10k_ptp_pd; + } + + ptp_clock = ptp_clock_register(ptp_caps, dev); + if (IS_ERR(ptp_clock)) { + ptp_clock = NULL; + dev_err(dev, "ptp_clock_register failed\n"); + } else { + dev_info(dev, "registered PHC device %s\n", ptp_caps->name); + } + + interface->ptp_clock = ptp_clock; +} + +void fm10k_ptp_unregister(struct fm10k_intfc *interface) +{ + struct ptp_clock *ptp_clock = interface->ptp_clock; + struct device *dev = &interface->pdev->dev; + + if (!ptp_clock) + return; + + interface->ptp_clock = NULL; + + ptp_clock_unregister(ptp_clock); + dev_info(dev, "removed PHC %s\n", interface->ptp_caps.name); +} diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c new file mode 100644 index 0000000000000000000000000000000000000000..fd0a05f011a863e48f47994577c9d9077d8148e3 --- /dev/null +++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c @@ -0,0 +1,863 @@ +/* Intel Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include "fm10k_tlv.h" + +/** + * fm10k_tlv_msg_init - Initialize message block for TLV data storage + * @msg: Pointer to message block + * @msg_id: Message ID indicating message type + * + * This function return success if provided with a valid message pointer + **/ +s32 fm10k_tlv_msg_init(u32 *msg, u16 msg_id) +{ + /* verify pointer is not NULL */ + if (!msg) + return FM10K_ERR_PARAM; + + *msg = (FM10K_TLV_FLAGS_MSG << FM10K_TLV_FLAGS_SHIFT) | msg_id; + + return 0; +} + +/** + * fm10k_tlv_attr_put_null_string - Place null terminated string on message + * @msg: Pointer to message block + * @attr_id: Attribute ID + * @string: Pointer to string to be stored in attribute + * + * This function will reorder a string to be CPU endian and store it in + * the attribute buffer. It will return success if provided with a valid + * pointers. + **/ +s32 fm10k_tlv_attr_put_null_string(u32 *msg, u16 attr_id, + const unsigned char *string) +{ + u32 attr_data = 0, len = 0; + u32 *attr; + + /* verify pointers are not NULL */ + if (!string || !msg) + return FM10K_ERR_PARAM; + + attr = &msg[FM10K_TLV_DWORD_LEN(*msg)]; + + /* copy string into local variable and then write to msg */ + do { + /* write data to message */ + if (len && !(len % 4)) { + attr[len / 4] = attr_data; + attr_data = 0; + } + + /* record character to offset location */ + attr_data |= (u32)(*string) << (8 * (len % 4)); + len++; + + /* test for NULL and then increment */ + } while (*(string++)); + + /* write last piece of data to message */ + attr[(len + 3) / 4] = attr_data; + + /* record attribute header, update message length */ + len <<= FM10K_TLV_LEN_SHIFT; + attr[0] = len | attr_id; + + /* add header length to length */ + len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT; + *msg += FM10K_TLV_LEN_ALIGN(len); + + return 0; +} + +/** + * fm10k_tlv_attr_get_null_string - Get null terminated string from attribute + * @attr: Pointer to attribute + * @string: Pointer to location of destination string + * + * This function pulls the string back out of the attribute and will place + * it in the array pointed by by string. It will return success if provided + * with a valid pointers. + **/ +s32 fm10k_tlv_attr_get_null_string(u32 *attr, unsigned char *string) +{ + u32 len; + + /* verify pointers are not NULL */ + if (!string || !attr) + return FM10K_ERR_PARAM; + + len = *attr >> FM10K_TLV_LEN_SHIFT; + attr++; + + while (len--) + string[len] = (u8)(attr[len / 4] >> (8 * (len % 4))); + + return 0; +} + +/** + * fm10k_tlv_attr_put_mac_vlan - Store MAC/VLAN attribute in message + * @msg: Pointer to message block + * @attr_id: Attribute ID + * @mac_addr: MAC address to be stored + * + * This function will reorder a MAC address to be CPU endian and store it + * in the attribute buffer. It will return success if provided with a + * valid pointers. + **/ +s32 fm10k_tlv_attr_put_mac_vlan(u32 *msg, u16 attr_id, + const u8 *mac_addr, u16 vlan) +{ + u32 len = ETH_ALEN << FM10K_TLV_LEN_SHIFT; + u32 *attr; + + /* verify pointers are not NULL */ + if (!msg || !mac_addr) + return FM10K_ERR_PARAM; + + attr = &msg[FM10K_TLV_DWORD_LEN(*msg)]; + + /* record attribute header, update message length */ + attr[0] = len | attr_id; + + /* copy value into local variable and then write to msg */ + attr[1] = le32_to_cpu(*(const __le32 *)&mac_addr[0]); + attr[2] = le16_to_cpu(*(const __le16 *)&mac_addr[4]); + attr[2] |= (u32)vlan << 16; + + /* add header length to length */ + len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT; + *msg += FM10K_TLV_LEN_ALIGN(len); + + return 0; +} + +/** + * fm10k_tlv_attr_get_mac_vlan - Get MAC/VLAN stored in attribute + * @attr: Pointer to attribute + * @attr_id: Attribute ID + * @mac_addr: location of buffer to store MAC address + * + * This function pulls the MAC address back out of the attribute and will + * place it in the array pointed by by mac_addr. It will return success + * if provided with a valid pointers. + **/ +s32 fm10k_tlv_attr_get_mac_vlan(u32 *attr, u8 *mac_addr, u16 *vlan) +{ + /* verify pointers are not NULL */ + if (!mac_addr || !attr) + return FM10K_ERR_PARAM; + + *(__le32 *)&mac_addr[0] = cpu_to_le32(attr[1]); + *(__le16 *)&mac_addr[4] = cpu_to_le16((u16)(attr[2])); + *vlan = (u16)(attr[2] >> 16); + + return 0; +} + +/** + * fm10k_tlv_attr_put_bool - Add header indicating value "true" + * @msg: Pointer to message block + * @attr_id: Attribute ID + * + * This function will simply add an attribute header, the fact + * that the header is here means the attribute value is true, else + * it is false. The function will return success if provided with a + * valid pointers. + **/ +s32 fm10k_tlv_attr_put_bool(u32 *msg, u16 attr_id) +{ + /* verify pointers are not NULL */ + if (!msg) + return FM10K_ERR_PARAM; + + /* record attribute header */ + msg[FM10K_TLV_DWORD_LEN(*msg)] = attr_id; + + /* add header length to length */ + *msg += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT; + + return 0; +} + +/** + * fm10k_tlv_attr_put_value - Store integer value attribute in message + * @msg: Pointer to message block + * @attr_id: Attribute ID + * @value: Value to be written + * @len: Size of value + * + * This function will place an integer value of up to 8 bytes in size + * in a message attribute. The function will return success provided + * that msg is a valid pointer, and len is 1, 2, 4, or 8. + **/ +s32 fm10k_tlv_attr_put_value(u32 *msg, u16 attr_id, s64 value, u32 len) +{ + u32 *attr; + + /* verify non-null msg and len is 1, 2, 4, or 8 */ + if (!msg || !len || len > 8 || (len & (len - 1))) + return FM10K_ERR_PARAM; + + attr = &msg[FM10K_TLV_DWORD_LEN(*msg)]; + + if (len < 4) { + attr[1] = (u32)value & ((0x1ul << (8 * len)) - 1); + } else { + attr[1] = (u32)value; + if (len > 4) + attr[2] = (u32)(value >> 32); + } + + /* record attribute header, update message length */ + len <<= FM10K_TLV_LEN_SHIFT; + attr[0] = len | attr_id; + + /* add header length to length */ + len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT; + *msg += FM10K_TLV_LEN_ALIGN(len); + + return 0; +} + +/** + * fm10k_tlv_attr_get_value - Get integer value stored in attribute + * @attr: Pointer to attribute + * @value: Pointer to destination buffer + * @len: Size of value + * + * This function will place an integer value of up to 8 bytes in size + * in the offset pointed to by value. The function will return success + * provided that pointers are valid and the len value matches the + * attribute length. + **/ +s32 fm10k_tlv_attr_get_value(u32 *attr, void *value, u32 len) +{ + /* verify pointers are not NULL */ + if (!attr || !value) + return FM10K_ERR_PARAM; + + if ((*attr >> FM10K_TLV_LEN_SHIFT) != len) + return FM10K_ERR_PARAM; + + if (len == 8) + *(u64 *)value = ((u64)attr[2] << 32) | attr[1]; + else if (len == 4) + *(u32 *)value = attr[1]; + else if (len == 2) + *(u16 *)value = (u16)attr[1]; + else + *(u8 *)value = (u8)attr[1]; + + return 0; +} + +/** + * fm10k_tlv_attr_put_le_struct - Store little endian structure in message + * @msg: Pointer to message block + * @attr_id: Attribute ID + * @le_struct: Pointer to structure to be written + * @len: Size of le_struct + * + * This function will place a little endian structure value in a message + * attribute. The function will return success provided that all pointers + * are valid and length is a non-zero multiple of 4. + **/ +s32 fm10k_tlv_attr_put_le_struct(u32 *msg, u16 attr_id, + const void *le_struct, u32 len) +{ + const __le32 *le32_ptr = (const __le32 *)le_struct; + u32 *attr; + u32 i; + + /* verify non-null msg and len is in 32 bit words */ + if (!msg || !len || (len % 4)) + return FM10K_ERR_PARAM; + + attr = &msg[FM10K_TLV_DWORD_LEN(*msg)]; + + /* copy le32 structure into host byte order at 32b boundaries */ + for (i = 0; i < (len / 4); i++) + attr[i + 1] = le32_to_cpu(le32_ptr[i]); + + /* record attribute header, update message length */ + len <<= FM10K_TLV_LEN_SHIFT; + attr[0] = len | attr_id; + + /* add header length to length */ + len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT; + *msg += FM10K_TLV_LEN_ALIGN(len); + + return 0; +} + +/** + * fm10k_tlv_attr_get_le_struct - Get little endian struct form attribute + * @attr: Pointer to attribute + * @le_struct: Pointer to structure to be written + * @len: Size of structure + * + * This function will place a little endian structure in the buffer + * pointed to by le_struct. The function will return success + * provided that pointers are valid and the len value matches the + * attribute length. + **/ +s32 fm10k_tlv_attr_get_le_struct(u32 *attr, void *le_struct, u32 len) +{ + __le32 *le32_ptr = (__le32 *)le_struct; + u32 i; + + /* verify pointers are not NULL */ + if (!le_struct || !attr) + return FM10K_ERR_PARAM; + + if ((*attr >> FM10K_TLV_LEN_SHIFT) != len) + return FM10K_ERR_PARAM; + + attr++; + + for (i = 0; len; i++, len -= 4) + le32_ptr[i] = cpu_to_le32(attr[i]); + + return 0; +} + +/** + * fm10k_tlv_attr_nest_start - Start a set of nested attributes + * @msg: Pointer to message block + * @attr_id: Attribute ID + * + * This function will mark off a new nested region for encapsulating + * a given set of attributes. The idea is if you wish to place a secondary + * structure within the message this mechanism allows for that. The + * function will return NULL on failure, and a pointer to the start + * of the nested attributes on success. + **/ +u32 *fm10k_tlv_attr_nest_start(u32 *msg, u16 attr_id) +{ + u32 *attr; + + /* verify pointer is not NULL */ + if (!msg) + return NULL; + + attr = &msg[FM10K_TLV_DWORD_LEN(*msg)]; + + attr[0] = attr_id; + + /* return pointer to nest header */ + return attr; +} + +/** + * fm10k_tlv_attr_nest_start - Start a set of nested attributes + * @msg: Pointer to message block + * + * This function closes off an existing set of nested attributes. The + * message pointer should be pointing to the parent of the nest. So in + * the case of a nest within the nest this would be the outer nest pointer. + * This function will return success provided all pointers are valid. + **/ +s32 fm10k_tlv_attr_nest_stop(u32 *msg) +{ + u32 *attr; + u32 len; + + /* verify pointer is not NULL */ + if (!msg) + return FM10K_ERR_PARAM; + + /* locate the nested header and retrieve its length */ + attr = &msg[FM10K_TLV_DWORD_LEN(*msg)]; + len = (attr[0] >> FM10K_TLV_LEN_SHIFT) << FM10K_TLV_LEN_SHIFT; + + /* only include nest if data was added to it */ + if (len) { + len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT; + *msg += len; + } + + return 0; +} + +/** + * fm10k_tlv_attr_validate - Validate attribute metadata + * @attr: Pointer to attribute + * @tlv_attr: Type and length info for attribute + * + * This function does some basic validation of the input TLV. It + * verifies the length, and in the case of null terminated strings + * it verifies that the last byte is null. The function will + * return FM10K_ERR_PARAM if any attribute is malformed, otherwise + * it returns 0. + **/ +static s32 fm10k_tlv_attr_validate(u32 *attr, + const struct fm10k_tlv_attr *tlv_attr) +{ + u32 attr_id = *attr & FM10K_TLV_ID_MASK; + u16 len = *attr >> FM10K_TLV_LEN_SHIFT; + + /* verify this is an attribute and not a message */ + if (*attr & (FM10K_TLV_FLAGS_MSG << FM10K_TLV_FLAGS_SHIFT)) + return FM10K_ERR_PARAM; + + /* search through the list of attributes to find a matching ID */ + while (tlv_attr->id < attr_id) + tlv_attr++; + + /* if didn't find a match then we should exit */ + if (tlv_attr->id != attr_id) + return FM10K_NOT_IMPLEMENTED; + + /* move to start of attribute data */ + attr++; + + switch (tlv_attr->type) { + case FM10K_TLV_NULL_STRING: + if (!len || + (attr[(len - 1) / 4] & (0xFF << (8 * ((len - 1) % 4))))) + return FM10K_ERR_PARAM; + if (len > tlv_attr->len) + return FM10K_ERR_PARAM; + break; + case FM10K_TLV_MAC_ADDR: + if (len != ETH_ALEN) + return FM10K_ERR_PARAM; + break; + case FM10K_TLV_BOOL: + if (len) + return FM10K_ERR_PARAM; + break; + case FM10K_TLV_UNSIGNED: + case FM10K_TLV_SIGNED: + if (len != tlv_attr->len) + return FM10K_ERR_PARAM; + break; + case FM10K_TLV_LE_STRUCT: + /* struct must be 4 byte aligned */ + if ((len % 4) || len != tlv_attr->len) + return FM10K_ERR_PARAM; + break; + case FM10K_TLV_NESTED: + /* nested attributes must be 4 byte aligned */ + if (len % 4) + return FM10K_ERR_PARAM; + break; + default: + /* attribute id is mapped to bad value */ + return FM10K_ERR_PARAM; + } + + return 0; +} + +/** + * fm10k_tlv_attr_parse - Parses stream of attribute data + * @attr: Pointer to attribute list + * @results: Pointer array to store pointers to attributes + * @tlv_attr: Type and length info for attributes + * + * This function validates a stream of attributes and parses them + * up into an array of pointers stored in results. The function will + * return FM10K_ERR_PARAM on any input or message error, + * FM10K_NOT_IMPLEMENTED for any attribute that is outside of the array + * and 0 on success. + **/ +s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results, + const struct fm10k_tlv_attr *tlv_attr) +{ + u32 i, attr_id, offset = 0; + s32 err = 0; + u16 len; + + /* verify pointers are not NULL */ + if (!attr || !results) + return FM10K_ERR_PARAM; + + /* initialize results to NULL */ + for (i = 0; i < FM10K_TLV_RESULTS_MAX; i++) + results[i] = NULL; + + /* pull length from the message header */ + len = *attr >> FM10K_TLV_LEN_SHIFT; + + /* no attributes to parse if there is no length */ + if (!len) + return 0; + + /* no attributes to parse, just raw data, message becomes attribute */ + if (!tlv_attr) { + results[0] = attr; + return 0; + } + + /* move to start of attribute data */ + attr++; + + /* run through list parsing all attributes */ + while (offset < len) { + attr_id = *attr & FM10K_TLV_ID_MASK; + + if (attr_id < FM10K_TLV_RESULTS_MAX) + err = fm10k_tlv_attr_validate(attr, tlv_attr); + else + err = FM10K_NOT_IMPLEMENTED; + + if (err < 0) + return err; + if (!err) + results[attr_id] = attr; + + /* update offset */ + offset += FM10K_TLV_DWORD_LEN(*attr) * 4; + + /* move to next attribute */ + attr = &attr[FM10K_TLV_DWORD_LEN(*attr)]; + } + + /* we should find ourselves at the end of the list */ + if (offset != len) + return FM10K_ERR_PARAM; + + return 0; +} + +/** + * fm10k_tlv_msg_parse - Parses message header and calls function handler + * @hw: Pointer to hardware structure + * @msg: Pointer to message + * @mbx: Pointer to mailbox information structure + * @func: Function array containing list of message handling functions + * + * This function should be the first function called upon receiving a + * message. The handler will identify the message type and call the correct + * handler for the given message. It will return the value from the function + * call on a recognized message type, otherwise it will return + * FM10K_NOT_IMPLEMENTED on an unrecognized type. + **/ +s32 fm10k_tlv_msg_parse(struct fm10k_hw *hw, u32 *msg, + struct fm10k_mbx_info *mbx, + const struct fm10k_msg_data *data) +{ + u32 *results[FM10K_TLV_RESULTS_MAX]; + u32 msg_id; + s32 err; + + /* verify pointer is not NULL */ + if (!msg || !data) + return FM10K_ERR_PARAM; + + /* verify this is a message and not an attribute */ + if (!(*msg & (FM10K_TLV_FLAGS_MSG << FM10K_TLV_FLAGS_SHIFT))) + return FM10K_ERR_PARAM; + + /* grab message ID */ + msg_id = *msg & FM10K_TLV_ID_MASK; + + while (data->id < msg_id) + data++; + + /* if we didn't find it then pass it up as an error */ + if (data->id != msg_id) { + while (data->id != FM10K_TLV_ERROR) + data++; + } + + /* parse the attributes into the results list */ + err = fm10k_tlv_attr_parse(msg, results, data->attr); + if (err < 0) + return err; + + return data->func(hw, results, mbx); +} + +/** + * fm10k_tlv_msg_error - Default handler for unrecognized TLV message IDs + * @hw: Pointer to hardware structure + * @results: Pointer array to message, results[0] is pointer to message + * @mbx: Unused mailbox pointer + * + * This function is a default handler for unrecognized messages. At a + * a minimum it just indicates that the message requested was + * unimplemented. + **/ +s32 fm10k_tlv_msg_error(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + return FM10K_NOT_IMPLEMENTED; +} + +static const unsigned char test_str[] = "fm10k"; +static const unsigned char test_mac[ETH_ALEN] = { 0x12, 0x34, 0x56, + 0x78, 0x9a, 0xbc }; +static const u16 test_vlan = 0x0FED; +static const u64 test_u64 = 0xfedcba9876543210ull; +static const u32 test_u32 = 0x87654321; +static const u16 test_u16 = 0x8765; +static const u8 test_u8 = 0x87; +static const s64 test_s64 = -0x123456789abcdef0ll; +static const s32 test_s32 = -0x1235678; +static const s16 test_s16 = -0x1234; +static const s8 test_s8 = -0x12; +static const __le32 test_le[2] = { cpu_to_le32(0x12345678), + cpu_to_le32(0x9abcdef0)}; + +/* The message below is meant to be used as a test message to demonstrate + * how to use the TLV interface and to test the types. Normally this code + * be compiled out by stripping the code wrapped in FM10K_TLV_TEST_MSG + */ +const struct fm10k_tlv_attr fm10k_tlv_msg_test_attr[] = { + FM10K_TLV_ATTR_NULL_STRING(FM10K_TEST_MSG_STRING, 80), + FM10K_TLV_ATTR_MAC_ADDR(FM10K_TEST_MSG_MAC_ADDR), + FM10K_TLV_ATTR_U8(FM10K_TEST_MSG_U8), + FM10K_TLV_ATTR_U16(FM10K_TEST_MSG_U16), + FM10K_TLV_ATTR_U32(FM10K_TEST_MSG_U32), + FM10K_TLV_ATTR_U64(FM10K_TEST_MSG_U64), + FM10K_TLV_ATTR_S8(FM10K_TEST_MSG_S8), + FM10K_TLV_ATTR_S16(FM10K_TEST_MSG_S16), + FM10K_TLV_ATTR_S32(FM10K_TEST_MSG_S32), + FM10K_TLV_ATTR_S64(FM10K_TEST_MSG_S64), + FM10K_TLV_ATTR_LE_STRUCT(FM10K_TEST_MSG_LE_STRUCT, 8), + FM10K_TLV_ATTR_NESTED(FM10K_TEST_MSG_NESTED), + FM10K_TLV_ATTR_S32(FM10K_TEST_MSG_RESULT), + FM10K_TLV_ATTR_LAST +}; + +/** + * fm10k_tlv_msg_test_generate_data - Stuff message with data + * @msg: Pointer to message + * @attr_flags: List of flags indicating what attributes to add + * + * This function is meant to load a message buffer with attribute data + **/ +static void fm10k_tlv_msg_test_generate_data(u32 *msg, u32 attr_flags) +{ + if (attr_flags & (1 << FM10K_TEST_MSG_STRING)) + fm10k_tlv_attr_put_null_string(msg, FM10K_TEST_MSG_STRING, + test_str); + if (attr_flags & (1 << FM10K_TEST_MSG_MAC_ADDR)) + fm10k_tlv_attr_put_mac_vlan(msg, FM10K_TEST_MSG_MAC_ADDR, + test_mac, test_vlan); + if (attr_flags & (1 << FM10K_TEST_MSG_U8)) + fm10k_tlv_attr_put_u8(msg, FM10K_TEST_MSG_U8, test_u8); + if (attr_flags & (1 << FM10K_TEST_MSG_U16)) + fm10k_tlv_attr_put_u16(msg, FM10K_TEST_MSG_U16, test_u16); + if (attr_flags & (1 << FM10K_TEST_MSG_U32)) + fm10k_tlv_attr_put_u32(msg, FM10K_TEST_MSG_U32, test_u32); + if (attr_flags & (1 << FM10K_TEST_MSG_U64)) + fm10k_tlv_attr_put_u64(msg, FM10K_TEST_MSG_U64, test_u64); + if (attr_flags & (1 << FM10K_TEST_MSG_S8)) + fm10k_tlv_attr_put_s8(msg, FM10K_TEST_MSG_S8, test_s8); + if (attr_flags & (1 << FM10K_TEST_MSG_S16)) + fm10k_tlv_attr_put_s16(msg, FM10K_TEST_MSG_S16, test_s16); + if (attr_flags & (1 << FM10K_TEST_MSG_S32)) + fm10k_tlv_attr_put_s32(msg, FM10K_TEST_MSG_S32, test_s32); + if (attr_flags & (1 << FM10K_TEST_MSG_S64)) + fm10k_tlv_attr_put_s64(msg, FM10K_TEST_MSG_S64, test_s64); + if (attr_flags & (1 << FM10K_TEST_MSG_LE_STRUCT)) + fm10k_tlv_attr_put_le_struct(msg, FM10K_TEST_MSG_LE_STRUCT, + test_le, 8); +} + +/** + * fm10k_tlv_msg_test_create - Create a test message testing all attributes + * @msg: Pointer to message + * @attr_flags: List of flags indicating what attributes to add + * + * This function is meant to load a message buffer with all attribute types + * including a nested attribute. + **/ +void fm10k_tlv_msg_test_create(u32 *msg, u32 attr_flags) +{ + u32 *nest = NULL; + + fm10k_tlv_msg_init(msg, FM10K_TLV_MSG_ID_TEST); + + fm10k_tlv_msg_test_generate_data(msg, attr_flags); + + /* check for nested attributes */ + attr_flags >>= FM10K_TEST_MSG_NESTED; + + if (attr_flags) { + nest = fm10k_tlv_attr_nest_start(msg, FM10K_TEST_MSG_NESTED); + + fm10k_tlv_msg_test_generate_data(nest, attr_flags); + + fm10k_tlv_attr_nest_stop(msg); + } +} + +/** + * fm10k_tlv_msg_test - Validate all results on test message receive + * @hw: Pointer to hardware structure + * @results: Pointer array to attributes in the mesage + * @mbx: Pointer to mailbox information structure + * + * This function does a check to verify all attributes match what the test + * message placed in the message buffer. It is the default handler + * for TLV test messages. + **/ +s32 fm10k_tlv_msg_test(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + u32 *nest_results[FM10K_TLV_RESULTS_MAX]; + unsigned char result_str[80]; + unsigned char result_mac[ETH_ALEN]; + s32 err = 0; + __le32 result_le[2]; + u16 result_vlan; + u64 result_u64; + u32 result_u32; + u16 result_u16; + u8 result_u8; + s64 result_s64; + s32 result_s32; + s16 result_s16; + s8 result_s8; + u32 reply[3]; + + /* retrieve results of a previous test */ + if (!!results[FM10K_TEST_MSG_RESULT]) + return fm10k_tlv_attr_get_s32(results[FM10K_TEST_MSG_RESULT], + &mbx->test_result); + +parse_nested: + if (!!results[FM10K_TEST_MSG_STRING]) { + err = fm10k_tlv_attr_get_null_string( + results[FM10K_TEST_MSG_STRING], + result_str); + if (!err && memcmp(test_str, result_str, sizeof(test_str))) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + if (!!results[FM10K_TEST_MSG_MAC_ADDR]) { + err = fm10k_tlv_attr_get_mac_vlan( + results[FM10K_TEST_MSG_MAC_ADDR], + result_mac, &result_vlan); + if (!err && memcmp(test_mac, result_mac, ETH_ALEN)) + err = FM10K_ERR_INVALID_VALUE; + if (!err && test_vlan != result_vlan) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + if (!!results[FM10K_TEST_MSG_U8]) { + err = fm10k_tlv_attr_get_u8(results[FM10K_TEST_MSG_U8], + &result_u8); + if (!err && test_u8 != result_u8) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + if (!!results[FM10K_TEST_MSG_U16]) { + err = fm10k_tlv_attr_get_u16(results[FM10K_TEST_MSG_U16], + &result_u16); + if (!err && test_u16 != result_u16) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + if (!!results[FM10K_TEST_MSG_U32]) { + err = fm10k_tlv_attr_get_u32(results[FM10K_TEST_MSG_U32], + &result_u32); + if (!err && test_u32 != result_u32) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + if (!!results[FM10K_TEST_MSG_U64]) { + err = fm10k_tlv_attr_get_u64(results[FM10K_TEST_MSG_U64], + &result_u64); + if (!err && test_u64 != result_u64) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + if (!!results[FM10K_TEST_MSG_S8]) { + err = fm10k_tlv_attr_get_s8(results[FM10K_TEST_MSG_S8], + &result_s8); + if (!err && test_s8 != result_s8) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + if (!!results[FM10K_TEST_MSG_S16]) { + err = fm10k_tlv_attr_get_s16(results[FM10K_TEST_MSG_S16], + &result_s16); + if (!err && test_s16 != result_s16) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + if (!!results[FM10K_TEST_MSG_S32]) { + err = fm10k_tlv_attr_get_s32(results[FM10K_TEST_MSG_S32], + &result_s32); + if (!err && test_s32 != result_s32) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + if (!!results[FM10K_TEST_MSG_S64]) { + err = fm10k_tlv_attr_get_s64(results[FM10K_TEST_MSG_S64], + &result_s64); + if (!err && test_s64 != result_s64) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + if (!!results[FM10K_TEST_MSG_LE_STRUCT]) { + err = fm10k_tlv_attr_get_le_struct( + results[FM10K_TEST_MSG_LE_STRUCT], + result_le, + sizeof(result_le)); + if (!err && memcmp(test_le, result_le, sizeof(test_le))) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + + if (!!results[FM10K_TEST_MSG_NESTED]) { + /* clear any pointers */ + memset(nest_results, 0, sizeof(nest_results)); + + /* parse the nested attributes into the nest results list */ + err = fm10k_tlv_attr_parse(results[FM10K_TEST_MSG_NESTED], + nest_results, + fm10k_tlv_msg_test_attr); + if (err) + goto report_result; + + /* loop back through to the start */ + results = nest_results; + goto parse_nested; + } + +report_result: + /* generate reply with test result */ + fm10k_tlv_msg_init(reply, FM10K_TLV_MSG_ID_TEST); + fm10k_tlv_attr_put_s32(reply, FM10K_TEST_MSG_RESULT, err); + + /* load onto outgoing mailbox */ + return mbx->ops.enqueue_tx(hw, mbx, reply); +} diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h new file mode 100644 index 0000000000000000000000000000000000000000..7e045e8bf1ebaedfcc29a8350019d5cecbfdd9c7 --- /dev/null +++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h @@ -0,0 +1,186 @@ +/* Intel Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _FM10K_TLV_H_ +#define _FM10K_TLV_H_ + +/* forward declaration */ +struct fm10k_msg_data; + +#include "fm10k_type.h" + +/* Message / Argument header format + * 3 2 1 0 + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Length | Flags | Type / ID | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * The message header format described here is used for messages that are + * passed between the PF and the VF. To allow for messages larger then + * mailbox size we will provide a message with the above header and it + * will be segmented and transported to the mailbox to the other side where + * it is reassembled. It contains the following fields: + * Len: Length of the message in bytes excluding the message header + * Flags: TBD + * Rule: These will be the message/argument types we pass + */ +/* message data header */ +#define FM10K_TLV_ID_SHIFT 0 +#define FM10K_TLV_ID_SIZE 16 +#define FM10K_TLV_ID_MASK ((1u << FM10K_TLV_ID_SIZE) - 1) +#define FM10K_TLV_FLAGS_SHIFT 16 +#define FM10K_TLV_FLAGS_MSG 0x1 +#define FM10K_TLV_FLAGS_SIZE 4 +#define FM10K_TLV_LEN_SHIFT 20 +#define FM10K_TLV_LEN_SIZE 12 + +#define FM10K_TLV_HDR_LEN 4ul +#define FM10K_TLV_LEN_ALIGN_MASK \ + ((FM10K_TLV_HDR_LEN - 1) << FM10K_TLV_LEN_SHIFT) +#define FM10K_TLV_LEN_ALIGN(tlv) \ + (((tlv) + FM10K_TLV_LEN_ALIGN_MASK) & ~FM10K_TLV_LEN_ALIGN_MASK) +#define FM10K_TLV_DWORD_LEN(tlv) \ + ((u16)((FM10K_TLV_LEN_ALIGN(tlv)) >> (FM10K_TLV_LEN_SHIFT + 2)) + 1) + +#define FM10K_TLV_RESULTS_MAX 32 + +enum fm10k_tlv_type { + FM10K_TLV_NULL_STRING, + FM10K_TLV_MAC_ADDR, + FM10K_TLV_BOOL, + FM10K_TLV_UNSIGNED, + FM10K_TLV_SIGNED, + FM10K_TLV_LE_STRUCT, + FM10K_TLV_NESTED, + FM10K_TLV_MAX_TYPE +}; + +#define FM10K_TLV_ERROR (~0u) + +struct fm10k_tlv_attr { + unsigned int id; + enum fm10k_tlv_type type; + u16 len; +}; + +#define FM10K_TLV_ATTR_NULL_STRING(id, len) { id, FM10K_TLV_NULL_STRING, len } +#define FM10K_TLV_ATTR_MAC_ADDR(id) { id, FM10K_TLV_MAC_ADDR, 6 } +#define FM10K_TLV_ATTR_BOOL(id) { id, FM10K_TLV_BOOL, 0 } +#define FM10K_TLV_ATTR_U8(id) { id, FM10K_TLV_UNSIGNED, 1 } +#define FM10K_TLV_ATTR_U16(id) { id, FM10K_TLV_UNSIGNED, 2 } +#define FM10K_TLV_ATTR_U32(id) { id, FM10K_TLV_UNSIGNED, 4 } +#define FM10K_TLV_ATTR_U64(id) { id, FM10K_TLV_UNSIGNED, 8 } +#define FM10K_TLV_ATTR_S8(id) { id, FM10K_TLV_SIGNED, 1 } +#define FM10K_TLV_ATTR_S16(id) { id, FM10K_TLV_SIGNED, 2 } +#define FM10K_TLV_ATTR_S32(id) { id, FM10K_TLV_SIGNED, 4 } +#define FM10K_TLV_ATTR_S64(id) { id, FM10K_TLV_SIGNED, 8 } +#define FM10K_TLV_ATTR_LE_STRUCT(id, len) { id, FM10K_TLV_LE_STRUCT, len } +#define FM10K_TLV_ATTR_NESTED(id) { id, FM10K_TLV_NESTED } +#define FM10K_TLV_ATTR_LAST { FM10K_TLV_ERROR } + +struct fm10k_msg_data { + unsigned int id; + const struct fm10k_tlv_attr *attr; + s32 (*func)(struct fm10k_hw *, u32 **, + struct fm10k_mbx_info *); +}; + +#define FM10K_MSG_HANDLER(id, attr, func) { id, attr, func } + +s32 fm10k_tlv_msg_init(u32 *, u16); +s32 fm10k_tlv_attr_put_null_string(u32 *, u16, const unsigned char *); +s32 fm10k_tlv_attr_get_null_string(u32 *, unsigned char *); +s32 fm10k_tlv_attr_put_mac_vlan(u32 *, u16, const u8 *, u16); +s32 fm10k_tlv_attr_get_mac_vlan(u32 *, u8 *, u16 *); +s32 fm10k_tlv_attr_put_bool(u32 *, u16); +s32 fm10k_tlv_attr_put_value(u32 *, u16, s64, u32); +#define fm10k_tlv_attr_put_u8(msg, attr_id, val) \ + fm10k_tlv_attr_put_value(msg, attr_id, val, 1) +#define fm10k_tlv_attr_put_u16(msg, attr_id, val) \ + fm10k_tlv_attr_put_value(msg, attr_id, val, 2) +#define fm10k_tlv_attr_put_u32(msg, attr_id, val) \ + fm10k_tlv_attr_put_value(msg, attr_id, val, 4) +#define fm10k_tlv_attr_put_u64(msg, attr_id, val) \ + fm10k_tlv_attr_put_value(msg, attr_id, val, 8) +#define fm10k_tlv_attr_put_s8(msg, attr_id, val) \ + fm10k_tlv_attr_put_value(msg, attr_id, val, 1) +#define fm10k_tlv_attr_put_s16(msg, attr_id, val) \ + fm10k_tlv_attr_put_value(msg, attr_id, val, 2) +#define fm10k_tlv_attr_put_s32(msg, attr_id, val) \ + fm10k_tlv_attr_put_value(msg, attr_id, val, 4) +#define fm10k_tlv_attr_put_s64(msg, attr_id, val) \ + fm10k_tlv_attr_put_value(msg, attr_id, val, 8) +s32 fm10k_tlv_attr_get_value(u32 *, void *, u32); +#define fm10k_tlv_attr_get_u8(attr, ptr) \ + fm10k_tlv_attr_get_value(attr, ptr, sizeof(u8)) +#define fm10k_tlv_attr_get_u16(attr, ptr) \ + fm10k_tlv_attr_get_value(attr, ptr, sizeof(u16)) +#define fm10k_tlv_attr_get_u32(attr, ptr) \ + fm10k_tlv_attr_get_value(attr, ptr, sizeof(u32)) +#define fm10k_tlv_attr_get_u64(attr, ptr) \ + fm10k_tlv_attr_get_value(attr, ptr, sizeof(u64)) +#define fm10k_tlv_attr_get_s8(attr, ptr) \ + fm10k_tlv_attr_get_value(attr, ptr, sizeof(s8)) +#define fm10k_tlv_attr_get_s16(attr, ptr) \ + fm10k_tlv_attr_get_value(attr, ptr, sizeof(s16)) +#define fm10k_tlv_attr_get_s32(attr, ptr) \ + fm10k_tlv_attr_get_value(attr, ptr, sizeof(s32)) +#define fm10k_tlv_attr_get_s64(attr, ptr) \ + fm10k_tlv_attr_get_value(attr, ptr, sizeof(s64)) +s32 fm10k_tlv_attr_put_le_struct(u32 *, u16, const void *, u32); +s32 fm10k_tlv_attr_get_le_struct(u32 *, void *, u32); +u32 *fm10k_tlv_attr_nest_start(u32 *, u16); +s32 fm10k_tlv_attr_nest_stop(u32 *); +s32 fm10k_tlv_attr_parse(u32 *, u32 **, const struct fm10k_tlv_attr *); +s32 fm10k_tlv_msg_parse(struct fm10k_hw *, u32 *, struct fm10k_mbx_info *, + const struct fm10k_msg_data *); +s32 fm10k_tlv_msg_error(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *); + +#define FM10K_TLV_MSG_ID_TEST 0 + +enum fm10k_tlv_test_attr_id { + FM10K_TEST_MSG_UNSET, + FM10K_TEST_MSG_STRING, + FM10K_TEST_MSG_MAC_ADDR, + FM10K_TEST_MSG_U8, + FM10K_TEST_MSG_U16, + FM10K_TEST_MSG_U32, + FM10K_TEST_MSG_U64, + FM10K_TEST_MSG_S8, + FM10K_TEST_MSG_S16, + FM10K_TEST_MSG_S32, + FM10K_TEST_MSG_S64, + FM10K_TEST_MSG_LE_STRUCT, + FM10K_TEST_MSG_NESTED, + FM10K_TEST_MSG_RESULT, + FM10K_TEST_MSG_MAX +}; + +extern const struct fm10k_tlv_attr fm10k_tlv_msg_test_attr[]; +void fm10k_tlv_msg_test_create(u32 *, u32); +s32 fm10k_tlv_msg_test(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *); + +#define FM10K_TLV_MSG_TEST_HANDLER(func) \ + FM10K_MSG_HANDLER(FM10K_TLV_MSG_ID_TEST, fm10k_tlv_msg_test_attr, func) +#define FM10K_TLV_MSG_ERROR_HANDLER(func) \ + FM10K_MSG_HANDLER(FM10K_TLV_ERROR, NULL, func) +#endif /* _FM10K_MSG_H_ */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h new file mode 100644 index 0000000000000000000000000000000000000000..280296f291544b4f327fa563103fb903889f1fb4 --- /dev/null +++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h @@ -0,0 +1,770 @@ +/* Intel Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _FM10K_TYPE_H_ +#define _FM10K_TYPE_H_ + +/* forward declaration */ +struct fm10k_hw; + +#include +#include +#include + +#include "fm10k_mbx.h" + +#define FM10K_DEV_ID_PF 0x15A4 +#define FM10K_DEV_ID_VF 0x15A5 + +#define FM10K_MAX_QUEUES 256 +#define FM10K_MAX_QUEUES_PF 128 +#define FM10K_MAX_QUEUES_POOL 16 + +#define FM10K_48_BIT_MASK 0x0000FFFFFFFFFFFFull +#define FM10K_STAT_VALID 0x80000000 + +/* PCI Bus Info */ +#define FM10K_PCIE_LINK_CAP 0x7C +#define FM10K_PCIE_LINK_STATUS 0x82 +#define FM10K_PCIE_LINK_WIDTH 0x3F0 +#define FM10K_PCIE_LINK_WIDTH_1 0x10 +#define FM10K_PCIE_LINK_WIDTH_2 0x20 +#define FM10K_PCIE_LINK_WIDTH_4 0x40 +#define FM10K_PCIE_LINK_WIDTH_8 0x80 +#define FM10K_PCIE_LINK_SPEED 0xF +#define FM10K_PCIE_LINK_SPEED_2500 0x1 +#define FM10K_PCIE_LINK_SPEED_5000 0x2 +#define FM10K_PCIE_LINK_SPEED_8000 0x3 + +/* PCIe payload size */ +#define FM10K_PCIE_DEV_CAP 0x74 +#define FM10K_PCIE_DEV_CAP_PAYLOAD 0x07 +#define FM10K_PCIE_DEV_CAP_PAYLOAD_128 0x00 +#define FM10K_PCIE_DEV_CAP_PAYLOAD_256 0x01 +#define FM10K_PCIE_DEV_CAP_PAYLOAD_512 0x02 +#define FM10K_PCIE_DEV_CTRL 0x78 +#define FM10K_PCIE_DEV_CTRL_PAYLOAD 0xE0 +#define FM10K_PCIE_DEV_CTRL_PAYLOAD_128 0x00 +#define FM10K_PCIE_DEV_CTRL_PAYLOAD_256 0x20 +#define FM10K_PCIE_DEV_CTRL_PAYLOAD_512 0x40 + +/* PCIe MSI-X Capability info */ +#define FM10K_PCI_MSIX_MSG_CTRL 0xB2 +#define FM10K_PCI_MSIX_MSG_CTRL_TBL_SZ_MASK 0x7FF +#define FM10K_MAX_MSIX_VECTORS 256 +#define FM10K_MAX_VECTORS_PF 256 +#define FM10K_MAX_VECTORS_POOL 32 + +/* PCIe SR-IOV Info */ +#define FM10K_PCIE_SRIOV_CTRL 0x190 +#define FM10K_PCIE_SRIOV_CTRL_VFARI 0x10 + +#define FM10K_ERR_PARAM -2 +#define FM10K_ERR_REQUESTS_PENDING -4 +#define FM10K_ERR_RESET_REQUESTED -5 +#define FM10K_ERR_DMA_PENDING -6 +#define FM10K_ERR_RESET_FAILED -7 +#define FM10K_ERR_INVALID_MAC_ADDR -8 +#define FM10K_ERR_INVALID_VALUE -9 +#define FM10K_NOT_IMPLEMENTED 0x7FFFFFFF + +/* Start of PF registers */ +#define FM10K_CTRL 0x0000 +#define FM10K_CTRL_BAR4_ALLOWED 0x00000004 + +#define FM10K_CTRL_EXT 0x0001 +#define FM10K_GCR 0x0003 +#define FM10K_GCR_EXT 0x0005 + +/* Interrupt control registers */ +#define FM10K_EICR 0x0006 +#define FM10K_EICR_FAULT_MASK 0x0000003F +#define FM10K_EICR_MAILBOX 0x00000040 +#define FM10K_EICR_SWITCHREADY 0x00000080 +#define FM10K_EICR_SWITCHNOTREADY 0x00000100 +#define FM10K_EICR_SWITCHINTERRUPT 0x00000200 +#define FM10K_EICR_VFLR 0x00000800 +#define FM10K_EICR_MAXHOLDTIME 0x00001000 +#define FM10K_EIMR 0x0007 +#define FM10K_EIMR_PCA_FAULT 0x00000001 +#define FM10K_EIMR_THI_FAULT 0x00000010 +#define FM10K_EIMR_FUM_FAULT 0x00000400 +#define FM10K_EIMR_MAILBOX 0x00001000 +#define FM10K_EIMR_SWITCHREADY 0x00004000 +#define FM10K_EIMR_SWITCHNOTREADY 0x00010000 +#define FM10K_EIMR_SWITCHINTERRUPT 0x00040000 +#define FM10K_EIMR_SRAMERROR 0x00100000 +#define FM10K_EIMR_VFLR 0x00400000 +#define FM10K_EIMR_MAXHOLDTIME 0x01000000 +#define FM10K_EIMR_ALL 0x55555555 +#define FM10K_EIMR_DISABLE(NAME) ((FM10K_EIMR_ ## NAME) << 0) +#define FM10K_EIMR_ENABLE(NAME) ((FM10K_EIMR_ ## NAME) << 1) +#define FM10K_FAULT_ADDR_LO 0x0 +#define FM10K_FAULT_ADDR_HI 0x1 +#define FM10K_FAULT_SPECINFO 0x2 +#define FM10K_FAULT_FUNC 0x3 +#define FM10K_FAULT_SIZE 0x4 +#define FM10K_FAULT_FUNC_VALID 0x00008000 +#define FM10K_FAULT_FUNC_PF 0x00004000 +#define FM10K_FAULT_FUNC_VF_MASK 0x00003F00 +#define FM10K_FAULT_FUNC_VF_SHIFT 8 +#define FM10K_FAULT_FUNC_TYPE_MASK 0x000000FF + +#define FM10K_PCA_FAULT 0x0008 +#define FM10K_THI_FAULT 0x0010 +#define FM10K_FUM_FAULT 0x001C + +/* Rx queue timeout indicator */ +#define FM10K_MAXHOLDQ(_n) ((_n) + 0x0020) + +/* Switch Manager info */ +#define FM10K_SM_AREA(_n) ((_n) + 0x0028) + +/* GLORT mapping registers */ +#define FM10K_DGLORTMAP(_n) ((_n) + 0x0030) +#define FM10K_DGLORT_COUNT 8 +#define FM10K_DGLORTMAP_MASK_SHIFT 16 +#define FM10K_DGLORTMAP_ANY 0x00000000 +#define FM10K_DGLORTMAP_NONE 0x0000FFFF +#define FM10K_DGLORTMAP_ZERO 0xFFFF0000 +#define FM10K_DGLORTDEC(_n) ((_n) + 0x0038) +#define FM10K_DGLORTDEC_VSILENGTH_SHIFT 4 +#define FM10K_DGLORTDEC_VSIBASE_SHIFT 7 +#define FM10K_DGLORTDEC_PCLENGTH_SHIFT 14 +#define FM10K_DGLORTDEC_QBASE_SHIFT 16 +#define FM10K_DGLORTDEC_RSSLENGTH_SHIFT 24 +#define FM10K_DGLORTDEC_INNERRSS_ENABLE 0x08000000 +#define FM10K_TUNNEL_CFG 0x0040 +#define FM10K_TUNNEL_CFG_NVGRE_SHIFT 16 +#define FM10K_SWPRI_MAP(_n) ((_n) + 0x0050) +#define FM10K_SWPRI_MAX 16 +#define FM10K_RSSRK(_n, _m) (((_n) * 0x10) + (_m) + 0x0800) +#define FM10K_RSSRK_SIZE 10 +#define FM10K_RSSRK_ENTRIES_PER_REG 4 +#define FM10K_RETA(_n, _m) (((_n) * 0x20) + (_m) + 0x1000) +#define FM10K_RETA_SIZE 32 +#define FM10K_RETA_ENTRIES_PER_REG 4 +#define FM10K_MAX_RSS_INDICES 128 + +/* Rate limiting registers */ +#define FM10K_TC_CREDIT(_n) ((_n) + 0x2000) +#define FM10K_TC_CREDIT_CREDIT_MASK 0x001FFFFF +#define FM10K_TC_MAXCREDIT(_n) ((_n) + 0x2040) +#define FM10K_TC_MAXCREDIT_64K 0x00010000 +#define FM10K_TC_RATE(_n) ((_n) + 0x2080) +#define FM10K_TC_RATE_QUANTA_MASK 0x0000FFFF +#define FM10K_TC_RATE_INTERVAL_4US_GEN1 0x00020000 +#define FM10K_TC_RATE_INTERVAL_4US_GEN2 0x00040000 +#define FM10K_TC_RATE_INTERVAL_4US_GEN3 0x00080000 + +/* DMA control registers */ +#define FM10K_DMA_CTRL 0x20C3 +#define FM10K_DMA_CTRL_TX_ENABLE 0x00000001 +#define FM10K_DMA_CTRL_TX_ACTIVE 0x00000008 +#define FM10K_DMA_CTRL_RX_ENABLE 0x00000010 +#define FM10K_DMA_CTRL_RX_ACTIVE 0x00000080 +#define FM10K_DMA_CTRL_RX_DESC_SIZE 0x00000100 +#define FM10K_DMA_CTRL_MINMSS_64 0x00008000 +#define FM10K_DMA_CTRL_MAX_HOLD_1US_GEN3 0x04800000 +#define FM10K_DMA_CTRL_MAX_HOLD_1US_GEN2 0x04000000 +#define FM10K_DMA_CTRL_MAX_HOLD_1US_GEN1 0x03800000 +#define FM10K_DMA_CTRL_DATAPATH_RESET 0x20000000 +#define FM10K_DMA_CTRL_32_DESC 0x00000000 + +#define FM10K_DMA_CTRL2 0x20C4 +#define FM10K_DMA_CTRL2_SWITCH_READY 0x00002000 + +/* TSO flags configuration + * First packet contains all flags except for fin and psh + * Middle packet contains only urg and ack + * Last packet contains urg, ack, fin, and psh + */ +#define FM10K_TSO_FLAGS_LOW 0x00300FF6 +#define FM10K_TSO_FLAGS_HI 0x00000039 +#define FM10K_DTXTCPFLGL 0x20C5 +#define FM10K_DTXTCPFLGH 0x20C6 + +#define FM10K_TPH_CTRL 0x20C7 +#define FM10K_MRQC(_n) ((_n) + 0x2100) +#define FM10K_MRQC_TCP_IPV4 0x00000001 +#define FM10K_MRQC_IPV4 0x00000002 +#define FM10K_MRQC_IPV6 0x00000010 +#define FM10K_MRQC_TCP_IPV6 0x00000020 +#define FM10K_MRQC_UDP_IPV4 0x00000040 +#define FM10K_MRQC_UDP_IPV6 0x00000080 + +#define FM10K_TQMAP(_n) ((_n) + 0x2800) +#define FM10K_TQMAP_TABLE_SIZE 2048 +#define FM10K_RQMAP(_n) ((_n) + 0x3000) + +/* Hardware Statistics */ +#define FM10K_STATS_TIMEOUT 0x3800 +#define FM10K_STATS_UR 0x3801 +#define FM10K_STATS_CA 0x3802 +#define FM10K_STATS_UM 0x3803 +#define FM10K_STATS_XEC 0x3804 +#define FM10K_STATS_VLAN_DROP 0x3805 +#define FM10K_STATS_LOOPBACK_DROP 0x3806 +#define FM10K_STATS_NODESC_DROP 0x3807 + +/* Timesync registers */ +#define FM10K_SYSTIME 0x3814 +#define FM10K_SYSTIME_CFG 0x3818 +#define FM10K_SYSTIME_CFG_STEP_MASK 0x0000000F + +/* PCIe state registers */ +#define FM10K_PHYADDR 0x381C + +/* Rx ring registers */ +#define FM10K_RDBAL(_n) ((0x40 * (_n)) + 0x4000) +#define FM10K_RDBAH(_n) ((0x40 * (_n)) + 0x4001) +#define FM10K_RDLEN(_n) ((0x40 * (_n)) + 0x4002) +#define FM10K_TPH_RXCTRL(_n) ((0x40 * (_n)) + 0x4003) +#define FM10K_TPH_RXCTRL_DESC_TPHEN 0x00000020 +#define FM10K_TPH_RXCTRL_DESC_RROEN 0x00000200 +#define FM10K_TPH_RXCTRL_DATA_WROEN 0x00002000 +#define FM10K_TPH_RXCTRL_HDR_WROEN 0x00008000 +#define FM10K_RDH(_n) ((0x40 * (_n)) + 0x4004) +#define FM10K_RDT(_n) ((0x40 * (_n)) + 0x4005) +#define FM10K_RXQCTL(_n) ((0x40 * (_n)) + 0x4006) +#define FM10K_RXQCTL_ENABLE 0x00000001 +#define FM10K_RXQCTL_PF 0x000000FC +#define FM10K_RXQCTL_VF_SHIFT 2 +#define FM10K_RXQCTL_VF 0x00000100 +#define FM10K_RXQCTL_ID_MASK (FM10K_RXQCTL_PF | FM10K_RXQCTL_VF) +#define FM10K_RXDCTL(_n) ((0x40 * (_n)) + 0x4007) +#define FM10K_RXDCTL_WRITE_BACK_MIN_DELAY 0x00000001 +#define FM10K_RXDCTL_DROP_ON_EMPTY 0x00000200 +#define FM10K_RXINT(_n) ((0x40 * (_n)) + 0x4008) +#define FM10K_SRRCTL(_n) ((0x40 * (_n)) + 0x4009) +#define FM10K_SRRCTL_BSIZEPKT_SHIFT 8 /* shift _right_ */ +#define FM10K_SRRCTL_LOOPBACK_SUPPRESS 0x40000000 +#define FM10K_SRRCTL_BUFFER_CHAINING_EN 0x80000000 + +/* Rx Statistics */ +#define FM10K_QPRC(_n) ((0x40 * (_n)) + 0x400A) +#define FM10K_QPRDC(_n) ((0x40 * (_n)) + 0x400B) +#define FM10K_QBRC_L(_n) ((0x40 * (_n)) + 0x400C) +#define FM10K_QBRC_H(_n) ((0x40 * (_n)) + 0x400D) + +/* Rx GLORT register */ +#define FM10K_RX_SGLORT(_n) ((0x40 * (_n)) + 0x400E) + +/* Tx ring registers */ +#define FM10K_TDBAL(_n) ((0x40 * (_n)) + 0x8000) +#define FM10K_TDBAH(_n) ((0x40 * (_n)) + 0x8001) +#define FM10K_TDLEN(_n) ((0x40 * (_n)) + 0x8002) +#define FM10K_TPH_TXCTRL(_n) ((0x40 * (_n)) + 0x8003) +#define FM10K_TPH_TXCTRL_DESC_TPHEN 0x00000020 +#define FM10K_TPH_TXCTRL_DESC_RROEN 0x00000200 +#define FM10K_TPH_TXCTRL_DESC_WROEN 0x00000800 +#define FM10K_TPH_TXCTRL_DATA_RROEN 0x00002000 +#define FM10K_TDH(_n) ((0x40 * (_n)) + 0x8004) +#define FM10K_TDT(_n) ((0x40 * (_n)) + 0x8005) +#define FM10K_TXDCTL(_n) ((0x40 * (_n)) + 0x8006) +#define FM10K_TXDCTL_ENABLE 0x00004000 +#define FM10K_TXDCTL_MAX_TIME_SHIFT 16 +#define FM10K_TXQCTL(_n) ((0x40 * (_n)) + 0x8007) +#define FM10K_TXQCTL_PF 0x0000003F +#define FM10K_TXQCTL_VF 0x00000040 +#define FM10K_TXQCTL_ID_MASK (FM10K_TXQCTL_PF | FM10K_TXQCTL_VF) +#define FM10K_TXQCTL_PC_SHIFT 7 +#define FM10K_TXQCTL_PC_MASK 0x00000380 +#define FM10K_TXQCTL_TC_SHIFT 10 +#define FM10K_TXQCTL_VID_SHIFT 16 +#define FM10K_TXQCTL_VID_MASK 0x0FFF0000 +#define FM10K_TXQCTL_UNLIMITED_BW 0x10000000 +#define FM10K_TXINT(_n) ((0x40 * (_n)) + 0x8008) + +/* Tx Statistics */ +#define FM10K_QPTC(_n) ((0x40 * (_n)) + 0x8009) +#define FM10K_QBTC_L(_n) ((0x40 * (_n)) + 0x800A) +#define FM10K_QBTC_H(_n) ((0x40 * (_n)) + 0x800B) + +/* Tx Push registers */ +#define FM10K_TQDLOC(_n) ((0x40 * (_n)) + 0x800C) +#define FM10K_TQDLOC_BASE_32_DESC 0x08 +#define FM10K_TQDLOC_SIZE_32_DESC 0x00050000 + +/* Tx GLORT registers */ +#define FM10K_TX_SGLORT(_n) ((0x40 * (_n)) + 0x800D) +#define FM10K_PFVTCTL(_n) ((0x40 * (_n)) + 0x800E) +#define FM10K_PFVTCTL_FTAG_DESC_ENABLE 0x00000001 + +/* Interrupt moderation and control registers */ +#define FM10K_INT_MAP(_n) ((_n) + 0x10080) +#define FM10K_INT_MAP_TIMER0 0x00000000 +#define FM10K_INT_MAP_TIMER1 0x00000100 +#define FM10K_INT_MAP_IMMEDIATE 0x00000200 +#define FM10K_INT_MAP_DISABLE 0x00000300 +#define FM10K_MSIX_VECTOR_MASK(_n) ((0x4 * (_n)) + 0x11003) +#define FM10K_INT_CTRL 0x12000 +#define FM10K_INT_CTRL_ENABLEMODERATOR 0x00000400 +#define FM10K_ITR(_n) ((_n) + 0x12400) +#define FM10K_ITR_INTERVAL1_SHIFT 12 +#define FM10K_ITR_PENDING2 0x10000000 +#define FM10K_ITR_AUTOMASK 0x20000000 +#define FM10K_ITR_MASK_SET 0x40000000 +#define FM10K_ITR_MASK_CLEAR 0x80000000 +#define FM10K_ITR2(_n) ((0x2 * (_n)) + 0x12800) +#define FM10K_ITR_REG_COUNT 768 +#define FM10K_ITR_REG_COUNT_PF 256 + +/* Switch manager interrupt registers */ +#define FM10K_IP 0x13000 +#define FM10K_IP_NOTINRESET 0x00000100 + +/* VLAN registers */ +#define FM10K_VLAN_TABLE(_n, _m) ((0x80 * (_n)) + (_m) + 0x14000) +#define FM10K_VLAN_TABLE_SIZE 128 + +/* VLAN specific message offsets */ +#define FM10K_VLAN_TABLE_VID_MAX 4096 +#define FM10K_VLAN_TABLE_VSI_MAX 64 +#define FM10K_VLAN_LENGTH_SHIFT 16 +#define FM10K_VLAN_CLEAR (1 << 15) +#define FM10K_VLAN_ALL \ + ((FM10K_VLAN_TABLE_VID_MAX - 1) << FM10K_VLAN_LENGTH_SHIFT) + +/* VF FLR event notification registers */ +#define FM10K_PFVFLRE(_n) ((0x1 * (_n)) + 0x18844) +#define FM10K_PFVFLREC(_n) ((0x1 * (_n)) + 0x18846) + +/* Defines for size of uncacheable memories */ +#define FM10K_UC_ADDR_START 0x000000 /* start of standard regs */ +#define FM10K_UC_ADDR_END 0x100000 /* end of standard regs */ +#define FM10K_UC_ADDR_SIZE (FM10K_UC_ADDR_END - FM10K_UC_ADDR_START) + +/* Define timeouts for resets and disables */ +#define FM10K_QUEUE_DISABLE_TIMEOUT 100 +#define FM10K_RESET_TIMEOUT 100 + +/* VF registers */ +#define FM10K_VFCTRL 0x00000 +#define FM10K_VFCTRL_RST 0x00000008 +#define FM10K_VFINT_MAP 0x00030 +#define FM10K_VFSYSTIME 0x00040 +#define FM10K_VFITR(_n) ((_n) + 0x00060) + +/* Registers contained in BAR 4 for Switch management */ +#define FM10K_SW_SYSTIME_ADJUST 0x0224D +#define FM10K_SW_SYSTIME_ADJUST_MASK 0x3FFFFFFF +#define FM10K_SW_SYSTIME_ADJUST_DIR_NEGATIVE 0x80000000 +#define FM10K_SW_SYSTIME_PULSE(_n) ((_n) + 0x02252) + +enum fm10k_int_source { + fm10k_int_Mailbox = 0, + fm10k_int_PCIeFault = 1, + fm10k_int_SwitchUpDown = 2, + fm10k_int_SwitchEvent = 3, + fm10k_int_SRAM = 4, + fm10k_int_VFLR = 5, + fm10k_int_MaxHoldTime = 6, + fm10k_int_sources_max_pf +}; + +/* PCIe bus speeds */ +enum fm10k_bus_speed { + fm10k_bus_speed_unknown = 0, + fm10k_bus_speed_2500 = 2500, + fm10k_bus_speed_5000 = 5000, + fm10k_bus_speed_8000 = 8000, + fm10k_bus_speed_reserved +}; + +/* PCIe bus widths */ +enum fm10k_bus_width { + fm10k_bus_width_unknown = 0, + fm10k_bus_width_pcie_x1 = 1, + fm10k_bus_width_pcie_x2 = 2, + fm10k_bus_width_pcie_x4 = 4, + fm10k_bus_width_pcie_x8 = 8, + fm10k_bus_width_reserved +}; + +/* PCIe payload sizes */ +enum fm10k_bus_payload { + fm10k_bus_payload_unknown = 0, + fm10k_bus_payload_128 = 1, + fm10k_bus_payload_256 = 2, + fm10k_bus_payload_512 = 3, + fm10k_bus_payload_reserved +}; + +/* Bus parameters */ +struct fm10k_bus_info { + enum fm10k_bus_speed speed; + enum fm10k_bus_width width; + enum fm10k_bus_payload payload; +}; + +/* Statistics related declarations */ +struct fm10k_hw_stat { + u64 count; + u32 base_l; + u32 base_h; +}; + +struct fm10k_hw_stats_q { + struct fm10k_hw_stat tx_bytes; + struct fm10k_hw_stat tx_packets; +#define tx_stats_idx tx_packets.base_h + struct fm10k_hw_stat rx_bytes; + struct fm10k_hw_stat rx_packets; +#define rx_stats_idx rx_packets.base_h + struct fm10k_hw_stat rx_drops; +}; + +struct fm10k_hw_stats { + struct fm10k_hw_stat timeout; +#define stats_idx timeout.base_h + struct fm10k_hw_stat ur; + struct fm10k_hw_stat ca; + struct fm10k_hw_stat um; + struct fm10k_hw_stat xec; + struct fm10k_hw_stat vlan_drop; + struct fm10k_hw_stat loopback_drop; + struct fm10k_hw_stat nodesc_drop; + struct fm10k_hw_stats_q q[FM10K_MAX_QUEUES_PF]; +}; + +/* Establish DGLORT feature priority */ +enum fm10k_dglortdec_idx { + fm10k_dglort_default = 0, + fm10k_dglort_vf_rsvd0 = 1, + fm10k_dglort_vf_rss = 2, + fm10k_dglort_pf_rsvd0 = 3, + fm10k_dglort_pf_queue = 4, + fm10k_dglort_pf_vsi = 5, + fm10k_dglort_pf_rsvd1 = 6, + fm10k_dglort_pf_rss = 7 +}; + +struct fm10k_dglort_cfg { + u16 glort; /* GLORT base */ + u16 queue_b; /* Base value for queue */ + u8 vsi_b; /* Base value for VSI */ + u8 idx; /* index of DGLORTDEC entry */ + u8 rss_l; /* RSS indices */ + u8 pc_l; /* Priority Class indices */ + u8 vsi_l; /* Number of bits from GLORT used to determine VSI */ + u8 queue_l; /* Number of bits from GLORT used to determine queue */ + u8 shared_l; /* Ignored bits from GLORT resulting in shared VSI */ + u8 inner_rss; /* Boolean value if inner header is used for RSS */ +}; + +enum fm10k_pca_fault { + PCA_NO_FAULT, + PCA_UNMAPPED_ADDR, + PCA_BAD_QACCESS_PF, + PCA_BAD_QACCESS_VF, + PCA_MALICIOUS_REQ, + PCA_POISONED_TLP, + PCA_TLP_ABORT, + __PCA_MAX +}; + +enum fm10k_thi_fault { + THI_NO_FAULT, + THI_MAL_DIS_Q_FAULT, + __THI_MAX +}; + +enum fm10k_fum_fault { + FUM_NO_FAULT, + FUM_UNMAPPED_ADDR, + FUM_POISONED_TLP, + FUM_BAD_VF_QACCESS, + FUM_ADD_DECODE_ERR, + FUM_RO_ERROR, + FUM_QPRC_CRC_ERROR, + FUM_CSR_TIMEOUT, + FUM_INVALID_TYPE, + FUM_INVALID_LENGTH, + FUM_INVALID_BE, + FUM_INVALID_ALIGN, + __FUM_MAX +}; + +struct fm10k_fault { + u64 address; /* Address at the time fault was detected */ + u32 specinfo; /* Extra info on this fault (fault dependent) */ + u8 type; /* Fault value dependent on subunit */ + u8 func; /* Function number of the fault */ +}; + +struct fm10k_mac_ops { + /* basic bring-up and tear-down */ + s32 (*reset_hw)(struct fm10k_hw *); + s32 (*init_hw)(struct fm10k_hw *); + s32 (*start_hw)(struct fm10k_hw *); + s32 (*stop_hw)(struct fm10k_hw *); + s32 (*get_bus_info)(struct fm10k_hw *); + s32 (*get_host_state)(struct fm10k_hw *, bool *); + bool (*is_slot_appropriate)(struct fm10k_hw *); + s32 (*update_vlan)(struct fm10k_hw *, u32, u8, bool); + s32 (*read_mac_addr)(struct fm10k_hw *); + s32 (*update_uc_addr)(struct fm10k_hw *, u16, const u8 *, + u16, bool, u8); + s32 (*update_mc_addr)(struct fm10k_hw *, u16, const u8 *, u16, bool); + s32 (*update_xcast_mode)(struct fm10k_hw *, u16, u8); + void (*update_int_moderator)(struct fm10k_hw *); + s32 (*update_lport_state)(struct fm10k_hw *, u16, u16, bool); + void (*update_hw_stats)(struct fm10k_hw *, struct fm10k_hw_stats *); + void (*rebind_hw_stats)(struct fm10k_hw *, struct fm10k_hw_stats *); + s32 (*configure_dglort_map)(struct fm10k_hw *, + struct fm10k_dglort_cfg *); + void (*set_dma_mask)(struct fm10k_hw *, u64); + s32 (*get_fault)(struct fm10k_hw *, int, struct fm10k_fault *); + void (*request_lport_map)(struct fm10k_hw *); + s32 (*adjust_systime)(struct fm10k_hw *, s32 ppb); + u64 (*read_systime)(struct fm10k_hw *); +}; + +enum fm10k_mac_type { + fm10k_mac_unknown = 0, + fm10k_mac_pf, + fm10k_mac_vf, + fm10k_num_macs +}; + +struct fm10k_mac_info { + struct fm10k_mac_ops ops; + enum fm10k_mac_type type; + u8 addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + u16 default_vid; + u16 max_msix_vectors; + u16 max_queues; + bool vlan_override; + bool get_host_state; + bool tx_ready; + u32 dglort_map; +}; + +struct fm10k_swapi_table_info { + u32 used; + u32 avail; +}; + +struct fm10k_swapi_info { + u32 status; + struct fm10k_swapi_table_info mac; + struct fm10k_swapi_table_info nexthop; + struct fm10k_swapi_table_info ffu; +}; + +enum fm10k_xcast_modes { + FM10K_XCAST_MODE_ALLMULTI = 0, + FM10K_XCAST_MODE_MULTI = 1, + FM10K_XCAST_MODE_PROMISC = 2, + FM10K_XCAST_MODE_NONE = 3, + FM10K_XCAST_MODE_DISABLE = 4 +}; + +#define FM10K_VF_TC_MAX 100000 /* 100,000 Mb/s aka 100Gb/s */ +#define FM10K_VF_TC_MIN 1 /* 1 Mb/s is the slowest rate */ + +struct fm10k_vf_info { + /* mbx must be first field in struct unless all default IOV message + * handlers are redone as the assumption is that vf_info starts + * at the same offset as the mailbox + */ + struct fm10k_mbx_info mbx; /* PF side of VF mailbox */ + int rate; /* Tx BW cap as defined by OS */ + u16 glort; /* resource tag for this VF */ + u16 sw_vid; /* Switch API assigned VLAN */ + u16 pf_vid; /* PF assigned Default VLAN */ + u8 mac[ETH_ALEN]; /* PF Default MAC address */ + u8 vsi; /* VSI idenfifier */ + u8 vf_idx; /* which VF this is */ + u8 vf_flags; /* flags indicating what modes + * are supported for the port + */ +}; + +#define FM10K_VF_FLAG_ALLMULTI_CAPABLE ((u8)1 << FM10K_XCAST_MODE_ALLMULTI) +#define FM10K_VF_FLAG_MULTI_CAPABLE ((u8)1 << FM10K_XCAST_MODE_MULTI) +#define FM10K_VF_FLAG_PROMISC_CAPABLE ((u8)1 << FM10K_XCAST_MODE_PROMISC) +#define FM10K_VF_FLAG_NONE_CAPABLE ((u8)1 << FM10K_XCAST_MODE_NONE) +#define FM10K_VF_FLAG_CAPABLE(vf_info) ((vf_info)->vf_flags & (u8)0xF) +#define FM10K_VF_FLAG_ENABLED(vf_info) ((vf_info)->vf_flags >> 4) +#define FM10K_VF_FLAG_SET_MODE(mode) ((u8)0x10 << (mode)) +#define FM10K_VF_FLAG_SET_MODE_NONE \ + FM10K_VF_FLAG_SET_MODE(FM10K_XCAST_MODE_NONE) +#define FM10K_VF_FLAG_MULTI_ENABLED \ + (FM10K_VF_FLAG_SET_MODE(FM10K_XCAST_MODE_ALLMULTI) | \ + FM10K_VF_FLAG_SET_MODE(FM10K_XCAST_MODE_MULTI) | \ + FM10K_VF_FLAG_SET_MODE(FM10K_XCAST_MODE_PROMISC)) + +struct fm10k_iov_ops { + /* IOV related bring-up and tear-down */ + s32 (*assign_resources)(struct fm10k_hw *, u16, u16); + s32 (*configure_tc)(struct fm10k_hw *, u16, int); + s32 (*assign_int_moderator)(struct fm10k_hw *, u16); + s32 (*assign_default_mac_vlan)(struct fm10k_hw *, + struct fm10k_vf_info *); + s32 (*reset_resources)(struct fm10k_hw *, + struct fm10k_vf_info *); + s32 (*set_lport)(struct fm10k_hw *, struct fm10k_vf_info *, u16, u8); + void (*reset_lport)(struct fm10k_hw *, struct fm10k_vf_info *); + void (*update_stats)(struct fm10k_hw *, struct fm10k_hw_stats_q *, u16); + s32 (*report_timestamp)(struct fm10k_hw *, struct fm10k_vf_info *, u64); +}; + +struct fm10k_iov_info { + struct fm10k_iov_ops ops; + u16 total_vfs; + u16 num_vfs; + u16 num_pools; +}; + +enum fm10k_devices { + fm10k_device_pf, + fm10k_device_vf, +}; + +struct fm10k_info { + enum fm10k_mac_type mac; + s32 (*get_invariants)(struct fm10k_hw *); + struct fm10k_mac_ops *mac_ops; + struct fm10k_iov_ops *iov_ops; +}; + +struct fm10k_hw { + u32 __iomem *hw_addr; + u32 __iomem *sw_addr; + void *back; + struct fm10k_mac_info mac; + struct fm10k_bus_info bus; + struct fm10k_bus_info bus_caps; + struct fm10k_iov_info iov; + struct fm10k_mbx_info mbx; + struct fm10k_swapi_info swapi; + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u8 revision_id; +}; + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define FM10K_REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define FM10K_REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* Transmit Descriptor */ +struct fm10k_tx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + __le16 buflen; /* Length of data to be DMAed */ + __le16 vlan; /* VLAN_ID and VPRI to be inserted in FTAG */ + __le16 mss; /* MSS for segmentation offload */ + u8 hdrlen; /* Header size for segmentation offload */ + u8 flags; /* Status and offload request flags */ +}; + +/* Transmit Descriptor Cache Structure */ +struct fm10k_tx_desc_cache { + struct fm10k_tx_desc tx_desc[256]; +}; + +#define FM10K_TXD_FLAG_INT 0x01 +#define FM10K_TXD_FLAG_TIME 0x02 +#define FM10K_TXD_FLAG_CSUM 0x04 +#define FM10K_TXD_FLAG_FTAG 0x10 +#define FM10K_TXD_FLAG_RS 0x20 +#define FM10K_TXD_FLAG_LAST 0x40 +#define FM10K_TXD_FLAG_DONE 0x80 + +/* These macros are meant to enable optimal placement of the RS and INT + * bits. It will point us to the last descriptor in the cache for either the + * start of the packet, or the end of the packet. If the index is actually + * at the start of the FIFO it will point to the offset for the last index + * in the FIFO to prevent an unnecessary write. + */ +#define FM10K_TXD_WB_FIFO_SIZE 4 + +/* Receive Descriptor - 32B */ +union fm10k_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + __le64 reserved; /* Empty space, RSS hash */ + __le64 timestamp; + } q; /* Read, Writeback, 64b quad-words */ + struct { + __le32 data; /* RSS and header data */ + __le32 rss; /* RSS Hash */ + __le32 staterr; + __le32 vlan_len; + __le32 glort; /* sglort/dglort */ + } d; /* Writeback, 32b double-words */ + struct { + __le16 pkt_info; /* RSS, Pkt type */ + __le16 hdr_info; /* Splithdr, hdrlen, xC */ + __le16 rss_lower; + __le16 rss_upper; + __le16 status; /* status/error */ + __le16 csum_err; /* checksum or extended error value */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + __le16 dglort; + __le16 sglort; + } w; /* Writeback, 16b words */ +}; + +#define FM10K_RXD_RSSTYPE_MASK 0x000F +enum fm10k_rdesc_rss_type { + FM10K_RSSTYPE_NONE = 0x0, + FM10K_RSSTYPE_IPV4_TCP = 0x1, + FM10K_RSSTYPE_IPV4 = 0x2, + FM10K_RSSTYPE_IPV6_TCP = 0x3, + /* Reserved 0x4 */ + FM10K_RSSTYPE_IPV6 = 0x5, + /* Reserved 0x6 */ + FM10K_RSSTYPE_IPV4_UDP = 0x7, + FM10K_RSSTYPE_IPV6_UDP = 0x8 + /* Reserved 0x9 - 0xF */ +}; + +#define FM10K_RXD_HDR_INFO_XC_MASK 0x0006 +enum fm10k_rxdesc_xc { + FM10K_XC_UNICAST = 0x0, + FM10K_XC_MULTICAST = 0x4, + FM10K_XC_BROADCAST = 0x6 +}; + +#define FM10K_RXD_STATUS_DD 0x0001 /* Descriptor done */ +#define FM10K_RXD_STATUS_EOP 0x0002 /* End of packet */ +#define FM10K_RXD_STATUS_L4CS 0x0010 /* Indicates an L4 csum */ +#define FM10K_RXD_STATUS_L4CS2 0x0040 /* Inner header L4 csum */ +#define FM10K_RXD_STATUS_L4E2 0x0800 /* Inner header L4 csum err */ +#define FM10K_RXD_STATUS_IPE2 0x1000 /* Inner header IPv4 csum err */ +#define FM10K_RXD_STATUS_RXE 0x2000 /* Generic Rx error */ +#define FM10K_RXD_STATUS_L4E 0x4000 /* L4 csum error */ +#define FM10K_RXD_STATUS_IPE 0x8000 /* IPv4 csum error */ + +struct fm10k_ftag { + __be16 swpri_type_user; + __be16 vlan; + __be16 sglort; + __be16 dglort; +}; + +#endif /* _FM10K_TYPE_H */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c new file mode 100644 index 0000000000000000000000000000000000000000..f0aa0f97b4a91d7f7cc4c07d990e2e04f06aafa1 --- /dev/null +++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c @@ -0,0 +1,578 @@ +/* Intel Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include "fm10k_vf.h" + +/** + * fm10k_stop_hw_vf - Stop Tx/Rx units + * @hw: pointer to hardware structure + * + **/ +static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw) +{ + u8 *perm_addr = hw->mac.perm_addr; + u32 bal = 0, bah = 0; + s32 err; + u16 i; + + /* we need to disable the queues before taking further steps */ + err = fm10k_stop_hw_generic(hw); + if (err) + return err; + + /* If permenant address is set then we need to restore it */ + if (is_valid_ether_addr(perm_addr)) { + bal = (((u32)perm_addr[3]) << 24) | + (((u32)perm_addr[4]) << 16) | + (((u32)perm_addr[5]) << 8); + bah = (((u32)0xFF) << 24) | + (((u32)perm_addr[0]) << 16) | + (((u32)perm_addr[1]) << 8) | + ((u32)perm_addr[2]); + } + + /* The queues have already been disabled so we just need to + * update their base address registers + */ + for (i = 0; i < hw->mac.max_queues; i++) { + fm10k_write_reg(hw, FM10K_TDBAL(i), bal); + fm10k_write_reg(hw, FM10K_TDBAH(i), bah); + fm10k_write_reg(hw, FM10K_RDBAL(i), bal); + fm10k_write_reg(hw, FM10K_RDBAH(i), bah); + } + + return 0; +} + +/** + * fm10k_reset_hw_vf - VF hardware reset + * @hw: pointer to hardware structure + * + * This function should return the hardare to a state similar to the + * one it is in after just being initialized. + **/ +static s32 fm10k_reset_hw_vf(struct fm10k_hw *hw) +{ + s32 err; + + /* shut down queues we own and reset DMA configuration */ + err = fm10k_stop_hw_vf(hw); + if (err) + return err; + + /* Inititate VF reset */ + fm10k_write_reg(hw, FM10K_VFCTRL, FM10K_VFCTRL_RST); + + /* Flush write and allow 100us for reset to complete */ + fm10k_write_flush(hw); + udelay(FM10K_RESET_TIMEOUT); + + /* Clear reset bit and verify it was cleared */ + fm10k_write_reg(hw, FM10K_VFCTRL, 0); + if (fm10k_read_reg(hw, FM10K_VFCTRL) & FM10K_VFCTRL_RST) + err = FM10K_ERR_RESET_FAILED; + + return err; +} + +/** + * fm10k_init_hw_vf - VF hardware initialization + * @hw: pointer to hardware structure + * + **/ +static s32 fm10k_init_hw_vf(struct fm10k_hw *hw) +{ + u32 tqdloc, tqdloc0 = ~fm10k_read_reg(hw, FM10K_TQDLOC(0)); + s32 err; + u16 i; + + /* assume we always have at least 1 queue */ + for (i = 1; tqdloc0 && (i < FM10K_MAX_QUEUES_POOL); i++) { + /* verify the Descriptor cache offsets are increasing */ + tqdloc = ~fm10k_read_reg(hw, FM10K_TQDLOC(i)); + if (!tqdloc || (tqdloc == tqdloc0)) + break; + + /* check to verify the PF doesn't own any of our queues */ + if (!~fm10k_read_reg(hw, FM10K_TXQCTL(i)) || + !~fm10k_read_reg(hw, FM10K_RXQCTL(i))) + break; + } + + /* shut down queues we own and reset DMA configuration */ + err = fm10k_disable_queues_generic(hw, i); + if (err) + return err; + + /* record maximum queue count */ + hw->mac.max_queues = i; + + return 0; +} + +/** + * fm10k_is_slot_appropriate_vf - Indicate appropriate slot for this SKU + * @hw: pointer to hardware structure + * + * Looks at the PCIe bus info to confirm whether or not this slot can support + * the necessary bandwidth for this device. Since the VF has no control over + * the "slot" it is in, always indicate that the slot is appropriate. + **/ +static bool fm10k_is_slot_appropriate_vf(struct fm10k_hw *hw) +{ + return true; +} + +/* This structure defines the attibutes to be parsed below */ +const struct fm10k_tlv_attr fm10k_mac_vlan_msg_attr[] = { + FM10K_TLV_ATTR_U32(FM10K_MAC_VLAN_MSG_VLAN), + FM10K_TLV_ATTR_BOOL(FM10K_MAC_VLAN_MSG_SET), + FM10K_TLV_ATTR_MAC_ADDR(FM10K_MAC_VLAN_MSG_MAC), + FM10K_TLV_ATTR_MAC_ADDR(FM10K_MAC_VLAN_MSG_DEFAULT_MAC), + FM10K_TLV_ATTR_MAC_ADDR(FM10K_MAC_VLAN_MSG_MULTICAST), + FM10K_TLV_ATTR_LAST +}; + +/** + * fm10k_update_vlan_vf - Update status of VLAN ID in VLAN filter table + * @hw: pointer to hardware structure + * @vid: VLAN ID to add to table + * @vsi: Reserved, should always be 0 + * @set: Indicates if this is a set or clear operation + * + * This function adds or removes the corresponding VLAN ID from the VLAN + * filter table for this VF. + **/ +static s32 fm10k_update_vlan_vf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + u32 msg[4]; + + /* verify the index is not set */ + if (vsi) + return FM10K_ERR_PARAM; + + /* verify upper 4 bits of vid and length are 0 */ + if ((vid << 16 | vid) >> 28) + return FM10K_ERR_PARAM; + + /* encode set bit into the VLAN ID */ + if (!set) + vid |= FM10K_VLAN_CLEAR; + + /* generate VLAN request */ + fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN); + fm10k_tlv_attr_put_u32(msg, FM10K_MAC_VLAN_MSG_VLAN, vid); + + /* load onto outgoing mailbox */ + return mbx->ops.enqueue_tx(hw, mbx, msg); +} + +/** + * fm10k_msg_mac_vlan_vf - Read device MAC address from mailbox message + * @hw: pointer to the HW structure + * @results: Attributes for message + * @mbx: unused mailbox data + * + * This function should determine the MAC address for the VF + **/ +s32 fm10k_msg_mac_vlan_vf(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + u8 perm_addr[ETH_ALEN]; + u16 vid; + s32 err; + + /* record MAC address requested */ + err = fm10k_tlv_attr_get_mac_vlan( + results[FM10K_MAC_VLAN_MSG_DEFAULT_MAC], + perm_addr, &vid); + if (err) + return err; + + ether_addr_copy(hw->mac.perm_addr, perm_addr); + hw->mac.default_vid = vid & (FM10K_VLAN_TABLE_VID_MAX - 1); + hw->mac.vlan_override = !!(vid & FM10K_VLAN_CLEAR); + + return 0; +} + +/** + * fm10k_read_mac_addr_vf - Read device MAC address + * @hw: pointer to the HW structure + * + * This function should determine the MAC address for the VF + **/ +static s32 fm10k_read_mac_addr_vf(struct fm10k_hw *hw) +{ + u8 perm_addr[ETH_ALEN]; + u32 base_addr; + + base_addr = fm10k_read_reg(hw, FM10K_TDBAL(0)); + + /* last byte should be 0 */ + if (base_addr << 24) + return FM10K_ERR_INVALID_MAC_ADDR; + + perm_addr[3] = (u8)(base_addr >> 24); + perm_addr[4] = (u8)(base_addr >> 16); + perm_addr[5] = (u8)(base_addr >> 8); + + base_addr = fm10k_read_reg(hw, FM10K_TDBAH(0)); + + /* first byte should be all 1's */ + if ((~base_addr) >> 24) + return FM10K_ERR_INVALID_MAC_ADDR; + + perm_addr[0] = (u8)(base_addr >> 16); + perm_addr[1] = (u8)(base_addr >> 8); + perm_addr[2] = (u8)(base_addr); + + ether_addr_copy(hw->mac.perm_addr, perm_addr); + ether_addr_copy(hw->mac.addr, perm_addr); + + return 0; +} + +/** + * fm10k_update_uc_addr_vf - Update device unicast address + * @hw: pointer to the HW structure + * @glort: unused + * @mac: MAC address to add/remove from table + * @vid: VLAN ID to add/remove from table + * @add: Indicates if this is an add or remove operation + * @flags: flags field to indicate add and secure - unused + * + * This function is used to add or remove unicast MAC addresses for + * the VF. + **/ +static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort, + const u8 *mac, u16 vid, bool add, u8 flags) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + u32 msg[7]; + + /* verify VLAN ID is valid */ + if (vid >= FM10K_VLAN_TABLE_VID_MAX) + return FM10K_ERR_PARAM; + + /* verify MAC address is valid */ + if (!is_valid_ether_addr(mac)) + return FM10K_ERR_PARAM; + + /* verify we are not locked down on the MAC address */ + if (is_valid_ether_addr(hw->mac.perm_addr) && + memcmp(hw->mac.perm_addr, mac, ETH_ALEN)) + return FM10K_ERR_PARAM; + + /* add bit to notify us if this is a set of clear operation */ + if (!add) + vid |= FM10K_VLAN_CLEAR; + + /* generate VLAN request */ + fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN); + fm10k_tlv_attr_put_mac_vlan(msg, FM10K_MAC_VLAN_MSG_MAC, mac, vid); + + /* load onto outgoing mailbox */ + return mbx->ops.enqueue_tx(hw, mbx, msg); +} + +/** + * fm10k_update_mc_addr_vf - Update device multicast address + * @hw: pointer to the HW structure + * @glort: unused + * @mac: MAC address to add/remove from table + * @vid: VLAN ID to add/remove from table + * @add: Indicates if this is an add or remove operation + * + * This function is used to add or remove multicast MAC addresses for + * the VF. + **/ +static s32 fm10k_update_mc_addr_vf(struct fm10k_hw *hw, u16 glort, + const u8 *mac, u16 vid, bool add) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + u32 msg[7]; + + /* verify VLAN ID is valid */ + if (vid >= FM10K_VLAN_TABLE_VID_MAX) + return FM10K_ERR_PARAM; + + /* verify multicast address is valid */ + if (!is_multicast_ether_addr(mac)) + return FM10K_ERR_PARAM; + + /* add bit to notify us if this is a set of clear operation */ + if (!add) + vid |= FM10K_VLAN_CLEAR; + + /* generate VLAN request */ + fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN); + fm10k_tlv_attr_put_mac_vlan(msg, FM10K_MAC_VLAN_MSG_MULTICAST, + mac, vid); + + /* load onto outgoing mailbox */ + return mbx->ops.enqueue_tx(hw, mbx, msg); +} + +/** + * fm10k_update_int_moderator_vf - Request update of interrupt moderator list + * @hw: pointer to hardware structure + * + * This function will issue a request to the PF to rescan our MSI-X table + * and to update the interrupt moderator linked list. + **/ +static void fm10k_update_int_moderator_vf(struct fm10k_hw *hw) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + u32 msg[1]; + + /* generate MSI-X request */ + fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MSIX); + + /* load onto outgoing mailbox */ + mbx->ops.enqueue_tx(hw, mbx, msg); +} + +/* This structure defines the attibutes to be parsed below */ +const struct fm10k_tlv_attr fm10k_lport_state_msg_attr[] = { + FM10K_TLV_ATTR_BOOL(FM10K_LPORT_STATE_MSG_DISABLE), + FM10K_TLV_ATTR_U8(FM10K_LPORT_STATE_MSG_XCAST_MODE), + FM10K_TLV_ATTR_BOOL(FM10K_LPORT_STATE_MSG_READY), + FM10K_TLV_ATTR_LAST +}; + +/** + * fm10k_msg_lport_state_vf - Message handler for lport_state message from PF + * @hw: Pointer to hardware structure + * @results: pointer array containing parsed data + * @mbx: Pointer to mailbox information structure + * + * This handler is meant to capture the indication from the PF that we + * are ready to bring up the interface. + **/ +s32 fm10k_msg_lport_state_vf(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + hw->mac.dglort_map = !results[FM10K_LPORT_STATE_MSG_READY] ? + FM10K_DGLORTMAP_NONE : FM10K_DGLORTMAP_ZERO; + + return 0; +} + +/** + * fm10k_update_lport_state_vf - Update device state in lower device + * @hw: pointer to the HW structure + * @glort: unused + * @count: number of logical ports to enable - unused (always 1) + * @enable: boolean value indicating if this is an enable or disable request + * + * Notify the lower device of a state change. If the lower device is + * enabled we can add filters, if it is disabled all filters for this + * logical port are flushed. + **/ +static s32 fm10k_update_lport_state_vf(struct fm10k_hw *hw, u16 glort, + u16 count, bool enable) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + u32 msg[2]; + + /* reset glort mask 0 as we have to wait to be enabled */ + hw->mac.dglort_map = FM10K_DGLORTMAP_NONE; + + /* generate port state request */ + fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE); + if (!enable) + fm10k_tlv_attr_put_bool(msg, FM10K_LPORT_STATE_MSG_DISABLE); + + /* load onto outgoing mailbox */ + return mbx->ops.enqueue_tx(hw, mbx, msg); +} + +/** + * fm10k_update_xcast_mode_vf - Request update of multicast mode + * @hw: pointer to hardware structure + * @glort: unused + * @mode: integer value indicating mode being requested + * + * This function will attempt to request a higher mode for the port + * so that it can enable either multicast, multicast promiscuous, or + * promiscuous mode of operation. + **/ +static s32 fm10k_update_xcast_mode_vf(struct fm10k_hw *hw, u16 glort, u8 mode) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + u32 msg[3]; + + if (mode > FM10K_XCAST_MODE_NONE) + return FM10K_ERR_PARAM; + /* generate message requesting to change xcast mode */ + fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE); + fm10k_tlv_attr_put_u8(msg, FM10K_LPORT_STATE_MSG_XCAST_MODE, mode); + + /* load onto outgoing mailbox */ + return mbx->ops.enqueue_tx(hw, mbx, msg); +} + +const struct fm10k_tlv_attr fm10k_1588_msg_attr[] = { + FM10K_TLV_ATTR_U64(FM10K_1588_MSG_TIMESTAMP), + FM10K_TLV_ATTR_LAST +}; + +/* currently there is no shared 1588 timestamp handler */ + +/** + * fm10k_update_hw_stats_vf - Updates hardware related statistics of VF + * @hw: pointer to hardware structure + * @stats: pointer to statistics structure + * + * This function collects and aggregates per queue hardware statistics. + **/ +static void fm10k_update_hw_stats_vf(struct fm10k_hw *hw, + struct fm10k_hw_stats *stats) +{ + fm10k_update_hw_stats_q(hw, stats->q, 0, hw->mac.max_queues); +} + +/** + * fm10k_rebind_hw_stats_vf - Resets base for hardware statistics of VF + * @hw: pointer to hardware structure + * @stats: pointer to the stats structure to update + * + * This function resets the base for queue hardware statistics. + **/ +static void fm10k_rebind_hw_stats_vf(struct fm10k_hw *hw, + struct fm10k_hw_stats *stats) +{ + /* Unbind Queue Statistics */ + fm10k_unbind_hw_stats_q(stats->q, 0, hw->mac.max_queues); + + /* Reinitialize bases for all stats */ + fm10k_update_hw_stats_vf(hw, stats); +} + +/** + * fm10k_configure_dglort_map_vf - Configures GLORT entry and queues + * @hw: pointer to hardware structure + * @dglort: pointer to dglort configuration structure + * + * Reads the configuration structure contained in dglort_cfg and uses + * that information to then populate a DGLORTMAP/DEC entry and the queues + * to which it has been assigned. + **/ +static s32 fm10k_configure_dglort_map_vf(struct fm10k_hw *hw, + struct fm10k_dglort_cfg *dglort) +{ + /* verify the dglort pointer */ + if (!dglort) + return FM10K_ERR_PARAM; + + /* stub for now until we determine correct message for this */ + + return 0; +} + +/** + * fm10k_adjust_systime_vf - Adjust systime frequency + * @hw: pointer to hardware structure + * @ppb: adjustment rate in parts per billion + * + * This function takes an adjustment rate in parts per billion and will + * verify that this value is 0 as the VF cannot support adjusting the + * systime clock. + * + * If the ppb value is non-zero the return is ERR_PARAM else success + **/ +static s32 fm10k_adjust_systime_vf(struct fm10k_hw *hw, s32 ppb) +{ + /* The VF cannot adjust the clock frequency, however it should + * already have a syntonic clock with whichever host interface is + * running as the master for the host interface clock domain so + * there should be not frequency adjustment necessary. + */ + return ppb ? FM10K_ERR_PARAM : 0; +} + +/** + * fm10k_read_systime_vf - Reads value of systime registers + * @hw: pointer to the hardware structure + * + * Function reads the content of 2 registers, combined to represent a 64 bit + * value measured in nanosecods. In order to guarantee the value is accurate + * we check the 32 most significant bits both before and after reading the + * 32 least significant bits to verify they didn't change as we were reading + * the registers. + **/ +static u64 fm10k_read_systime_vf(struct fm10k_hw *hw) +{ + u32 systime_l, systime_h, systime_tmp; + + systime_h = fm10k_read_reg(hw, FM10K_VFSYSTIME + 1); + + do { + systime_tmp = systime_h; + systime_l = fm10k_read_reg(hw, FM10K_VFSYSTIME); + systime_h = fm10k_read_reg(hw, FM10K_VFSYSTIME + 1); + } while (systime_tmp != systime_h); + + return ((u64)systime_h << 32) | systime_l; +} + +static const struct fm10k_msg_data fm10k_msg_data_vf[] = { + FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test), + FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf), + FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf), + FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error), +}; + +static struct fm10k_mac_ops mac_ops_vf = { + .get_bus_info = &fm10k_get_bus_info_generic, + .reset_hw = &fm10k_reset_hw_vf, + .init_hw = &fm10k_init_hw_vf, + .start_hw = &fm10k_start_hw_generic, + .stop_hw = &fm10k_stop_hw_vf, + .is_slot_appropriate = &fm10k_is_slot_appropriate_vf, + .update_vlan = &fm10k_update_vlan_vf, + .read_mac_addr = &fm10k_read_mac_addr_vf, + .update_uc_addr = &fm10k_update_uc_addr_vf, + .update_mc_addr = &fm10k_update_mc_addr_vf, + .update_xcast_mode = &fm10k_update_xcast_mode_vf, + .update_int_moderator = &fm10k_update_int_moderator_vf, + .update_lport_state = &fm10k_update_lport_state_vf, + .update_hw_stats = &fm10k_update_hw_stats_vf, + .rebind_hw_stats = &fm10k_rebind_hw_stats_vf, + .configure_dglort_map = &fm10k_configure_dglort_map_vf, + .get_host_state = &fm10k_get_host_state_generic, + .adjust_systime = &fm10k_adjust_systime_vf, + .read_systime = &fm10k_read_systime_vf, +}; + +static s32 fm10k_get_invariants_vf(struct fm10k_hw *hw) +{ + fm10k_get_invariants_generic(hw); + + return fm10k_pfvf_mbx_init(hw, &hw->mbx, fm10k_msg_data_vf, 0); +} + +struct fm10k_info fm10k_vf_info = { + .mac = fm10k_mac_vf, + .get_invariants = &fm10k_get_invariants_vf, + .mac_ops = &mac_ops_vf, +}; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.h b/drivers/net/ethernet/intel/fm10k/fm10k_vf.h new file mode 100644 index 0000000000000000000000000000000000000000..06a99d794c99beda298ad4ffa7d812545f56eb72 --- /dev/null +++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.h @@ -0,0 +1,78 @@ +/* Intel Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _FM10K_VF_H_ +#define _FM10K_VF_H_ + +#include "fm10k_type.h" +#include "fm10k_common.h" + +enum fm10k_vf_tlv_msg_id { + FM10K_VF_MSG_ID_TEST = 0, /* msg ID reserved for testing */ + FM10K_VF_MSG_ID_MSIX, + FM10K_VF_MSG_ID_MAC_VLAN, + FM10K_VF_MSG_ID_LPORT_STATE, + FM10K_VF_MSG_ID_1588, + FM10K_VF_MSG_ID_MAX, +}; + +enum fm10k_tlv_mac_vlan_attr_id { + FM10K_MAC_VLAN_MSG_VLAN, + FM10K_MAC_VLAN_MSG_SET, + FM10K_MAC_VLAN_MSG_MAC, + FM10K_MAC_VLAN_MSG_DEFAULT_MAC, + FM10K_MAC_VLAN_MSG_MULTICAST, + FM10K_MAC_VLAN_MSG_ID_MAX +}; + +enum fm10k_tlv_lport_state_attr_id { + FM10K_LPORT_STATE_MSG_DISABLE, + FM10K_LPORT_STATE_MSG_XCAST_MODE, + FM10K_LPORT_STATE_MSG_READY, + FM10K_LPORT_STATE_MSG_MAX +}; + +enum fm10k_tlv_1588_attr_id { + FM10K_1588_MSG_TIMESTAMP, + FM10K_1588_MSG_MAX +}; + +#define FM10K_VF_MSG_MSIX_HANDLER(func) \ + FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_MSIX, NULL, func) + +s32 fm10k_msg_mac_vlan_vf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *); +extern const struct fm10k_tlv_attr fm10k_mac_vlan_msg_attr[]; +#define FM10K_VF_MSG_MAC_VLAN_HANDLER(func) \ + FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_MAC_VLAN, \ + fm10k_mac_vlan_msg_attr, func) + +s32 fm10k_msg_lport_state_vf(struct fm10k_hw *, u32 **, + struct fm10k_mbx_info *); +extern const struct fm10k_tlv_attr fm10k_lport_state_msg_attr[]; +#define FM10K_VF_MSG_LPORT_STATE_HANDLER(func) \ + FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_LPORT_STATE, \ + fm10k_lport_state_msg_attr, func) + +extern const struct fm10k_tlv_attr fm10k_1588_msg_attr[]; +#define FM10K_VF_MSG_1588_HANDLER(func) \ + FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_1588, fm10k_1588_msg_attr, func) + +extern struct fm10k_info fm10k_vf_info; +#endif /* _FM10K_VF_H */ diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 801da392a20e23dc84e176579720f0a106695776..f1e33f896439b1d6ccbc5f2428d9aef05fcbcee3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -144,6 +144,8 @@ enum i40e_state_t { __I40E_PTP_TX_IN_PROGRESS, __I40E_BAD_EEPROM, __I40E_DOWN_REQUESTED, + __I40E_FD_FLUSH_REQUESTED, + __I40E_RESET_FAILED, }; enum i40e_interrupt_policy { @@ -250,6 +252,11 @@ struct i40e_pf { u16 fdir_pf_active_filters; u16 fd_sb_cnt_idx; u16 fd_atr_cnt_idx; + unsigned long fd_flush_timestamp; + u32 fd_flush_cnt; + u32 fd_add_err; + u32 fd_atr_cnt; + u32 fd_tcp_rule; #ifdef CONFIG_I40E_VXLAN __be16 vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS]; @@ -310,6 +317,7 @@ struct i40e_pf { u32 tx_timeout_count; u32 tx_timeout_recovery_level; unsigned long tx_timeout_last_recovery; + u32 tx_sluggish_count; u32 hw_csum_rx_error; u32 led_status; u16 corer_count; /* Core reset count */ @@ -608,6 +616,7 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi, void i40e_fdir_check_and_reenable(struct i40e_pf *pf); int i40e_get_current_fd_count(struct i40e_pf *pf); int i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf); +int i40e_get_current_atr_cnt(struct i40e_pf *pf); bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features); void i40e_set_ethtool_ops(struct net_device *netdev); struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index b29c157b1f57b7d587f00ff208a0124991d06bdb..72f5d25a222f0769ea514d5789407a543e66c3c2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -840,7 +840,8 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, /* bump the tail */ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n"); - i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff); + i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, + buff, buff_size); (hw->aq.asq.next_to_use)++; if (hw->aq.asq.next_to_use == hw->aq.asq.count) hw->aq.asq.next_to_use = 0; @@ -891,7 +892,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer writeback:\n"); - i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff); + i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size); /* update the error if time out occurred */ if ((!cmd_completed) && @@ -987,7 +988,8 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw, e->msg_size); i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n"); - i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf); + i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf, + hw->aq.arq_buf_size); /* Restore the original datalen and buffer address in the desc, * FW updates datalen to indicate the event message diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index df43e7c6777c238423af02e9171bf418ad08e339..30056b25d94e6c331d9456c7171e3b51b5b7e754 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -75,13 +75,15 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw) * @mask: debug mask * @desc: pointer to admin queue descriptor * @buffer: pointer to command buffer + * @buf_len: max length of buffer * * Dumps debug log about adminq command with descriptor contents. **/ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, - void *buffer) + void *buffer, u16 buf_len) { struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; + u16 len = le16_to_cpu(aq_desc->datalen); u8 *aq_buffer = (u8 *)buffer; u32 data[4]; u32 i = 0; @@ -105,7 +107,9 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, if ((buffer != NULL) && (aq_desc->datalen != 0)) { memset(data, 0, sizeof(data)); i40e_debug(hw, mask, "AQ CMD Buffer:\n"); - for (i = 0; i < le16_to_cpu(aq_desc->datalen); i++) { + if (buf_len < len) + len = buf_len; + for (i = 0; i < len; i++) { data[((i % 16) / 4)] |= ((u32)aq_buffer[i]) << (8 * (i % 4)); if ((i % 16) == 15) { @@ -748,6 +752,8 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw) switch (hw->phy.link_info.phy_type) { case I40E_PHY_TYPE_10GBASE_SR: case I40E_PHY_TYPE_10GBASE_LR: + case I40E_PHY_TYPE_1000BASE_SX: + case I40E_PHY_TYPE_1000BASE_LX: case I40E_PHY_TYPE_40GBASE_SR4: case I40E_PHY_TYPE_40GBASE_LR4: media = I40E_MEDIA_TYPE_FIBER; diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 5a0cabeb35ed7f21e37a523a1418981fb23d0836..7067f4b9159c99e9720484ca2ea5d7fe574fed5f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -1356,6 +1356,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp, "emp reset count: %d\n", pf->empr_count); dev_info(&pf->pdev->dev, "pf reset count: %d\n", pf->pfr_count); + dev_info(&pf->pdev->dev, + "pf tx sluggish count: %d\n", + pf->tx_sluggish_count); } else if (strncmp(&cmd_buf[5], "port", 4) == 0) { struct i40e_aqc_query_port_ets_config_resp *bw_data; struct i40e_dcbx_config *cfg = diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index e8ba7470700af1abaaf33826c7bffc8409666b9e..1dda467ae1ac62bdc5428e4c6086c322fdac597d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -145,6 +145,7 @@ static struct i40e_stats i40e_gstrings_stats[] = { I40E_PF_STAT("rx_jabber", stats.rx_jabber), I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests), I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), + I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt), I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match), I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match), @@ -312,7 +313,10 @@ static int i40e_get_settings(struct net_device *netdev, break; case I40E_PHY_TYPE_10GBASE_SR: case I40E_PHY_TYPE_10GBASE_LR: + case I40E_PHY_TYPE_1000BASE_SX: + case I40E_PHY_TYPE_1000BASE_LX: ecmd->supported = SUPPORTED_10000baseT_Full; + ecmd->supported |= SUPPORTED_1000baseT_Full; break; case I40E_PHY_TYPE_10GBASE_CR1_CU: case I40E_PHY_TYPE_10GBASE_CR1: @@ -351,7 +355,8 @@ static int i40e_get_settings(struct net_device *netdev, break; default: /* if we got here and link is up something bad is afoot */ - WARN_ON(link_up); + netdev_info(netdev, "WARNING: Link is up but PHY type 0x%x is not recognized.\n", + hw_link_info->phy_type); } no_valid_phy_type: @@ -461,7 +466,8 @@ static int i40e_set_settings(struct net_device *netdev, if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET && hw->phy.media_type != I40E_MEDIA_TYPE_FIBER && - hw->phy.media_type != I40E_MEDIA_TYPE_BACKPLANE) + hw->phy.media_type != I40E_MEDIA_TYPE_BACKPLANE && + hw->phy.link_info.link_info & I40E_AQ_LINK_UP) return -EOPNOTSUPP; /* get our own copy of the bits to check against */ @@ -492,11 +498,10 @@ static int i40e_set_settings(struct net_device *netdev, if (status) return -EAGAIN; - /* Copy link_speed and abilities to config in case they are not + /* Copy abilities to config in case autoneg is not * set below */ memset(&config, 0, sizeof(struct i40e_aq_set_phy_config)); - config.link_speed = abilities.link_speed; config.abilities = abilities.abilities; /* Check autoneg */ @@ -533,42 +538,38 @@ static int i40e_set_settings(struct net_device *netdev, return -EINVAL; if (advertise & ADVERTISED_100baseT_Full) - if (!(abilities.link_speed & I40E_LINK_SPEED_100MB)) { - config.link_speed |= I40E_LINK_SPEED_100MB; - change = true; - } + config.link_speed |= I40E_LINK_SPEED_100MB; if (advertise & ADVERTISED_1000baseT_Full || advertise & ADVERTISED_1000baseKX_Full) - if (!(abilities.link_speed & I40E_LINK_SPEED_1GB)) { - config.link_speed |= I40E_LINK_SPEED_1GB; - change = true; - } + config.link_speed |= I40E_LINK_SPEED_1GB; if (advertise & ADVERTISED_10000baseT_Full || advertise & ADVERTISED_10000baseKX4_Full || advertise & ADVERTISED_10000baseKR_Full) - if (!(abilities.link_speed & I40E_LINK_SPEED_10GB)) { - config.link_speed |= I40E_LINK_SPEED_10GB; - change = true; - } + config.link_speed |= I40E_LINK_SPEED_10GB; if (advertise & ADVERTISED_40000baseKR4_Full || advertise & ADVERTISED_40000baseCR4_Full || advertise & ADVERTISED_40000baseSR4_Full || advertise & ADVERTISED_40000baseLR4_Full) - if (!(abilities.link_speed & I40E_LINK_SPEED_40GB)) { - config.link_speed |= I40E_LINK_SPEED_40GB; - change = true; - } + config.link_speed |= I40E_LINK_SPEED_40GB; - if (change) { + if (change || (abilities.link_speed != config.link_speed)) { /* copy over the rest of the abilities */ config.phy_type = abilities.phy_type; config.eee_capability = abilities.eee_capability; config.eeer = abilities.eeer_val; config.low_power_ctrl = abilities.d3_lpan; - /* If link is up set link and an so changes take effect */ - if (hw->phy.link_info.link_info & I40E_AQ_LINK_UP) - config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; + /* set link and auto negotiation so changes take effect */ + config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; + /* If link is up put link down */ + if (hw->phy.link_info.link_info & I40E_AQ_LINK_UP) { + /* Tell the OS link is going down, the link will go + * back up when fw says it is ready asynchronously + */ + netdev_info(netdev, "PHY settings change requested, NIC Link is going down.\n"); + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + } /* make the aq call */ status = i40e_aq_set_phy_config(hw, &config, NULL); @@ -685,6 +686,13 @@ static int i40e_set_pauseparam(struct net_device *netdev, else return -EINVAL; + /* Tell the OS link is going down, the link will go back up when fw + * says it is ready asynchronously + */ + netdev_info(netdev, "Flow control settings change requested, NIC Link is going down.\n"); + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + /* Set the fc mode and only restart an if link is up*/ status = i40e_set_fc(hw, &aq_failures, link_up); @@ -1977,6 +1985,13 @@ static int i40e_del_fdir_entry(struct i40e_vsi *vsi, struct i40e_pf *pf = vsi->back; int ret = 0; + if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) || + test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) + return -EBUSY; + + if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) + return -EBUSY; + ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd); i40e_fdir_check_and_reenable(pf); @@ -2010,6 +2025,13 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, if (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED) return -ENOSPC; + if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) || + test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) + return -EBUSY; + + if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) + return -EBUSY; + fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; if (fsp->location >= (pf->hw.func_caps.fd_filters_best_effort + diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index eddec6ba095b7105118962438c9914d40acd97b4..ed5f1c15fb0f4cd5fe7ce797920fd3b6cd21f7df 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -37,9 +37,9 @@ static const char i40e_driver_string[] = #define DRV_KERN "-k" -#define DRV_VERSION_MAJOR 0 -#define DRV_VERSION_MINOR 4 -#define DRV_VERSION_BUILD 21 +#define DRV_VERSION_MAJOR 1 +#define DRV_VERSION_MINOR 0 +#define DRV_VERSION_BUILD 11 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@ -1239,8 +1239,11 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr, * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM * @vsi: the PF Main VSI - inappropriate for any other VSI * @macaddr: the MAC address + * + * Some older firmware configurations set up a default promiscuous VLAN + * filter that needs to be removed. **/ -static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) +static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) { struct i40e_aqc_remove_macvlan_element_data element; struct i40e_pf *pf = vsi->back; @@ -1248,15 +1251,18 @@ static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) /* Only appropriate for the PF main VSI */ if (vsi->type != I40E_VSI_MAIN) - return; + return -EINVAL; + memset(&element, 0, sizeof(element)); ether_addr_copy(element.mac_addr, macaddr); element.vlan_tag = 0; element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); if (aq_ret) - dev_err(&pf->pdev->dev, "Could not remove default MAC-VLAN\n"); + return -ENOENT; + + return 0; } /** @@ -1385,18 +1391,30 @@ static int i40e_set_mac(struct net_device *netdev, void *p) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; struct sockaddr *addr = p; struct i40e_mac_filter *f; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - netdev_info(netdev, "set mac address=%pM\n", addr->sa_data); + if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) { + netdev_info(netdev, "already using mac address %pM\n", + addr->sa_data); + return 0; + } if (test_bit(__I40E_DOWN, &vsi->back->state) || test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) return -EADDRNOTAVAIL; + if (ether_addr_equal(hw->mac.addr, addr->sa_data)) + netdev_info(netdev, "returning to hw mac address %pM\n", + hw->mac.addr); + else + netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); + if (vsi->type == I40E_VSI_MAIN) { i40e_status ret; ret = i40e_aq_mac_address_write(&vsi->back->hw, @@ -1410,25 +1428,34 @@ static int i40e_set_mac(struct net_device *netdev, void *p) } } - f = i40e_find_mac(vsi, addr->sa_data, false, true); - if (!f) { - /* In order to be sure to not drop any packets, add the - * new address first then delete the old one. - */ - f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, - false, false); - if (!f) - return -ENOMEM; + if (ether_addr_equal(netdev->dev_addr, hw->mac.addr)) { + struct i40e_aqc_remove_macvlan_element_data element; - i40e_sync_vsi_filters(vsi); + memset(&element, 0, sizeof(element)); + ether_addr_copy(element.mac_addr, netdev->dev_addr); + element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; + i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); + } else { i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, false, false); - i40e_sync_vsi_filters(vsi); } - f->is_laa = true; - if (!ether_addr_equal(netdev->dev_addr, addr->sa_data)) - ether_addr_copy(netdev->dev_addr, addr->sa_data); + if (ether_addr_equal(addr->sa_data, hw->mac.addr)) { + struct i40e_aqc_add_macvlan_element_data element; + + memset(&element, 0, sizeof(element)); + ether_addr_copy(element.mac_addr, hw->mac.addr); + element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH); + i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); + } else { + f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, + false, false); + if (f) + f->is_laa = true; + } + + i40e_sync_vsi_filters(vsi); + ether_addr_copy(netdev->dev_addr, addr->sa_data); return 0; } @@ -1796,9 +1823,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) kfree(add_list); add_list = NULL; - if (add_happened && (!aq_ret)) { - /* do nothing */; - } else if (add_happened && (aq_ret)) { + if (add_happened && aq_ret && + pf->hw.aq.asq_last_status != I40E_AQ_RC_EINVAL) { dev_info(&pf->pdev->dev, "add filter failed, err %d, aq_err %d\n", aq_ret, pf->hw.aq.asq_last_status); @@ -4480,11 +4506,26 @@ static int i40e_up_complete(struct i40e_vsi *vsi) netif_carrier_on(vsi->netdev); } else if (vsi->netdev) { i40e_print_link_message(vsi, false); + /* need to check for qualified module here*/ + if ((pf->hw.phy.link_info.link_info & + I40E_AQ_MEDIA_AVAILABLE) && + (!(pf->hw.phy.link_info.an_info & + I40E_AQ_QUALIFIED_MODULE))) + netdev_err(vsi->netdev, + "the driver failed to link because an unqualified module was detected."); } /* replay FDIR SB filters */ - if (vsi->type == I40E_VSI_FDIR) + if (vsi->type == I40E_VSI_FDIR) { + /* reset fd counters */ + pf->fd_add_err = pf->fd_atr_cnt = 0; + if (pf->fd_tcp_rule > 0) { + pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; + dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n"); + pf->fd_tcp_rule = 0; + } i40e_fdir_filter_restore(vsi); + } i40e_service_event_schedule(pf); return 0; @@ -5125,6 +5166,7 @@ int i40e_get_current_fd_count(struct i40e_pf *pf) I40E_PFQF_FDSTAT_BEST_CNT_SHIFT); return fcnt_prog; } + /** * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled * @pf: board private structure @@ -5133,15 +5175,17 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf) { u32 fcnt_prog, fcnt_avail; + if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) + return; + /* Check if, FD SB or ATR was auto disabled and if there is enough room * to re-enable */ - if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && - (pf->flags & I40E_FLAG_FD_SB_ENABLED)) - return; fcnt_prog = i40e_get_cur_guaranteed_fd_count(pf); fcnt_avail = pf->fdir_pf_filter_count; - if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) { + if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) || + (pf->fd_add_err == 0) || + (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) { if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; @@ -5158,23 +5202,84 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf) } } +#define I40E_MIN_FD_FLUSH_INTERVAL 10 +/** + * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB + * @pf: board private structure + **/ +static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) +{ + int flush_wait_retry = 50; + int reg; + + if (time_after(jiffies, pf->fd_flush_timestamp + + (I40E_MIN_FD_FLUSH_INTERVAL * HZ))) { + set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); + pf->fd_flush_timestamp = jiffies; + pf->auto_disable_flags |= I40E_FLAG_FD_SB_ENABLED; + pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; + /* flush all filters */ + wr32(&pf->hw, I40E_PFQF_CTL_1, + I40E_PFQF_CTL_1_CLEARFDTABLE_MASK); + i40e_flush(&pf->hw); + pf->fd_flush_cnt++; + pf->fd_add_err = 0; + do { + /* Check FD flush status every 5-6msec */ + usleep_range(5000, 6000); + reg = rd32(&pf->hw, I40E_PFQF_CTL_1); + if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK)) + break; + } while (flush_wait_retry--); + if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) { + dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n"); + } else { + /* replay sideband filters */ + i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]); + + pf->flags |= I40E_FLAG_FD_ATR_ENABLED; + pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; + pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; + clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); + dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); + } + } +} + +/** + * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed + * @pf: board private structure + **/ +int i40e_get_current_atr_cnt(struct i40e_pf *pf) +{ + return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters; +} + +/* We can see up to 256 filter programming desc in transit if the filters are + * being applied really fast; before we see the first + * filter miss error on Rx queue 0. Accumulating enough error messages before + * reacting will make sure we don't cause flush too often. + */ +#define I40E_MAX_FD_PROGRAM_ERROR 256 + /** * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table * @pf: board private structure **/ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) { - if (!(pf->flags & I40E_FLAG_FDIR_REQUIRES_REINIT)) - return; /* if interface is down do nothing */ if (test_bit(__I40E_DOWN, &pf->state)) return; + + if ((pf->fd_add_err >= I40E_MAX_FD_PROGRAM_ERROR) && + (i40e_get_current_atr_cnt(pf) >= pf->fd_atr_cnt) && + (i40e_get_current_atr_cnt(pf) > pf->fdir_pf_filter_count)) + i40e_fdir_flush_and_replay(pf); + i40e_fdir_check_and_reenable(pf); - if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && - (pf->flags & I40E_FLAG_FD_SB_ENABLED)) - pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT; } /** @@ -5184,7 +5289,7 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) **/ static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up) { - if (!vsi) + if (!vsi || test_bit(__I40E_DOWN, &vsi->state)) return; switch (vsi->type) { @@ -5420,6 +5525,13 @@ static void i40e_handle_link_event(struct i40e_pf *pf, memcpy(&pf->hw.phy.link_info_old, hw_link_info, sizeof(pf->hw.phy.link_info_old)); + /* check for unqualified module, if link is down */ + if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) && + (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) && + (!(status->link_info & I40E_AQ_LINK_UP))) + dev_err(&pf->pdev->dev, + "The driver failed to link because an unqualified module was detected.\n"); + /* update link status */ hw_link_info->phy_type = (enum i40e_aq_phy_type)status->phy_type; hw_link_info->link_speed = (enum i40e_aq_link_speed)status->link_speed; @@ -5456,6 +5568,10 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) u32 oldval; u32 val; + /* Do not run clean AQ when PF reset fails */ + if (test_bit(__I40E_RESET_FAILED, &pf->state)) + return; + /* check for error indications */ val = rd32(&pf->hw, pf->hw.aq.arq.len); oldval = val; @@ -5861,19 +5977,20 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) ret = i40e_pf_reset(hw); if (ret) { dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret); - goto end_core_reset; + set_bit(__I40E_RESET_FAILED, &pf->state); + goto clear_recovery; } pf->pfr_count++; if (test_bit(__I40E_DOWN, &pf->state)) - goto end_core_reset; + goto clear_recovery; dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); /* rebuild the basics for the AdminQ, HMC, and initial HW switch */ ret = i40e_init_adminq(&pf->hw); if (ret) { dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret); - goto end_core_reset; + goto clear_recovery; } /* re-verify the eeprom if we just had an EMP reset */ @@ -5991,6 +6108,8 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) i40e_send_version(pf); end_core_reset: + clear_bit(__I40E_RESET_FAILED, &pf->state); +clear_recovery: clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state); } @@ -6036,9 +6155,9 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) I40E_GL_MDET_TX_EVENT_SHIFT; u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >> I40E_GL_MDET_TX_QUEUE_SHIFT; - dev_info(&pf->pdev->dev, - "Malicious Driver Detection event 0x%02x on TX queue %d pf number 0x%02x vf number 0x%02x\n", - event, queue, pf_num, vf_num); + if (netif_msg_tx_err(pf)) + dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d pf number 0x%02x vf number 0x%02x\n", + event, queue, pf_num, vf_num); wr32(hw, I40E_GL_MDET_TX, 0xffffffff); mdd_detected = true; } @@ -6050,9 +6169,9 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) I40E_GL_MDET_RX_EVENT_SHIFT; u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >> I40E_GL_MDET_RX_QUEUE_SHIFT; - dev_info(&pf->pdev->dev, - "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n", - event, queue, func); + if (netif_msg_rx_err(pf)) + dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n", + event, queue, func); wr32(hw, I40E_GL_MDET_RX, 0xffffffff); mdd_detected = true; } @@ -6061,17 +6180,13 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) reg = rd32(hw, I40E_PF_MDET_TX); if (reg & I40E_PF_MDET_TX_VALID_MASK) { wr32(hw, I40E_PF_MDET_TX, 0xFFFF); - dev_info(&pf->pdev->dev, - "MDD TX event is for this function 0x%08x, requesting PF reset.\n", - reg); + dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n"); pf_mdd_detected = true; } reg = rd32(hw, I40E_PF_MDET_RX); if (reg & I40E_PF_MDET_RX_VALID_MASK) { wr32(hw, I40E_PF_MDET_RX, 0xFFFF); - dev_info(&pf->pdev->dev, - "MDD RX event is for this function 0x%08x, requesting PF reset.\n", - reg); + dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n"); pf_mdd_detected = true; } /* Queue belongs to the PF, initiate a reset */ @@ -6088,14 +6203,16 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) if (reg & I40E_VP_MDET_TX_VALID_MASK) { wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF); vf->num_mdd_events++; - dev_info(&pf->pdev->dev, "MDD TX event on VF %d\n", i); + dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", + i); } reg = rd32(hw, I40E_VP_MDET_RX(i)); if (reg & I40E_VP_MDET_RX_VALID_MASK) { wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF); vf->num_mdd_events++; - dev_info(&pf->pdev->dev, "MDD RX event on VF %d\n", i); + dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n", + i); } if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) { @@ -7086,6 +7203,11 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) } pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; + /* reset fd counters */ + pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0; + pf->fdir_pf_active_filters = 0; + pf->flags |= I40E_FLAG_FD_ATR_ENABLED; + dev_info(&pf->pdev->dev, "ATR re-enabled.\n"); /* if ATR was auto disabled it can be re-enabled. */ if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) @@ -7352,7 +7474,7 @@ static const struct net_device_ops i40e_netdev_ops = { .ndo_set_vf_rate = i40e_ndo_set_vf_bw, .ndo_get_vf_config = i40e_ndo_get_vf_config, .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, - .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofck, + .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk, #ifdef CONFIG_I40E_VXLAN .ndo_add_vxlan_port = i40e_add_vxlan_port, .ndo_del_vxlan_port = i40e_del_vxlan_port, @@ -7421,14 +7543,14 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) if (vsi->type == I40E_VSI_MAIN) { SET_NETDEV_DEV(netdev, &pf->pdev->dev); ether_addr_copy(mac_addr, hw->mac.perm_addr); - /* The following two steps are necessary to prevent reception - * of tagged packets - by default the NVM loads a MAC-VLAN - * filter that will accept any tagged packet. This is to - * prevent that during normal operations until a specific - * VLAN tag filter has been set. + /* The following steps are necessary to prevent reception + * of tagged packets - some older NVM configurations load a + * default a MAC-VLAN filter that accepts any tagged packet + * which must be replaced by a normal filter. */ - i40e_rm_default_mac_filter(vsi, mac_addr); - i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, true); + if (!i40e_rm_default_mac_filter(vsi, mac_addr)) + i40e_add_filter(vsi, mac_addr, + I40E_VLAN_ANY, false, true); } else { /* relate the VSI_VMDQ name to the VSI_MAIN name */ snprintf(netdev->name, IFNAMSIZ, "%sv%%d", @@ -7644,7 +7766,22 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) f_count++; if (f->is_laa && vsi->type == I40E_VSI_MAIN) { - i40e_aq_mac_address_write(&vsi->back->hw, + struct i40e_aqc_remove_macvlan_element_data element; + + memset(&element, 0, sizeof(element)); + ether_addr_copy(element.mac_addr, f->macaddr); + element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; + ret = i40e_aq_remove_macvlan(hw, vsi->seid, + &element, 1, NULL); + if (ret) { + /* some older FW has a different default */ + element.flags |= + I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; + i40e_aq_remove_macvlan(hw, vsi->seid, + &element, 1, NULL); + } + + i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL, f->macaddr, NULL); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index 949a9a01778b9c17aad87a78af9a933ac191856a..0988b5c1fe87d080e950f146c4c0c0b8ae231189 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -52,10 +52,8 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); /* debug function for adminq */ -void i40e_debug_aq(struct i40e_hw *hw, - enum i40e_debug_mask mask, - void *desc, - void *buffer); +void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, + void *desc, void *buffer, u16 buf_len); void i40e_idle_aq(struct i40e_hw *hw); bool i40e_check_asq_alive(struct i40e_hw *hw); diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 369848e107f8ed37952b4849ed632655f598b6ba..3195d82e49425369951185ccc1e664a8485ea716 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -224,15 +224,19 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi, ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add); if (ret) { dev_info(&pf->pdev->dev, - "Filter command send failed for PCTYPE %d (ret = %d)\n", - fd_data->pctype, ret); + "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n", + fd_data->pctype, fd_data->fd_id, ret); err = true; } else { - dev_info(&pf->pdev->dev, - "Filter OK for PCTYPE %d (ret = %d)\n", - fd_data->pctype, ret); + if (add) + dev_info(&pf->pdev->dev, + "Filter OK for PCTYPE %d loc = %d\n", + fd_data->pctype, fd_data->fd_id); + else + dev_info(&pf->pdev->dev, + "Filter deleted for PCTYPE %d loc = %d\n", + fd_data->pctype, fd_data->fd_id); } - return err ? -EOPNOTSUPP : 0; } @@ -276,10 +280,18 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, tcp->source = fd_data->src_port; if (add) { + pf->fd_tcp_rule++; if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) { dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n"); pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; } + } else { + pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ? + (pf->fd_tcp_rule - 1) : 0; + if (pf->fd_tcp_rule == 0) { + pf->flags |= I40E_FLAG_FD_ATR_ENABLED; + dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n"); + } } fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP; @@ -287,12 +299,17 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, if (ret) { dev_info(&pf->pdev->dev, - "Filter command send failed for PCTYPE %d (ret = %d)\n", - fd_data->pctype, ret); + "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n", + fd_data->pctype, fd_data->fd_id, ret); err = true; } else { - dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n", - fd_data->pctype, ret); + if (add) + dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n", + fd_data->pctype, fd_data->fd_id); + else + dev_info(&pf->pdev->dev, + "Filter deleted for PCTYPE %d loc = %d\n", + fd_data->pctype, fd_data->fd_id); } return err ? -EOPNOTSUPP : 0; @@ -355,13 +372,18 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi, if (ret) { dev_info(&pf->pdev->dev, - "Filter command send failed for PCTYPE %d (ret = %d)\n", - fd_data->pctype, ret); + "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n", + fd_data->pctype, fd_data->fd_id, ret); err = true; } else { - dev_info(&pf->pdev->dev, - "Filter OK for PCTYPE %d (ret = %d)\n", - fd_data->pctype, ret); + if (add) + dev_info(&pf->pdev->dev, + "Filter OK for PCTYPE %d loc = %d\n", + fd_data->pctype, fd_data->fd_id); + else + dev_info(&pf->pdev->dev, + "Filter deleted for PCTYPE %d loc = %d\n", + fd_data->pctype, fd_data->fd_id); } } @@ -443,8 +465,14 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT; if (error == (0x1 << I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) { - dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n", - rx_desc->wb.qword0.hi_dword.fd_id); + if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) || + (I40E_DEBUG_FD & pf->hw.debug_mask)) + dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n", + rx_desc->wb.qword0.hi_dword.fd_id); + + pf->fd_add_err++; + /* store the current atr filter count */ + pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf); /* filter programming failed most likely due to table full */ fcnt_prog = i40e_get_cur_guaranteed_fd_count(pf); @@ -454,29 +482,21 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, * FD ATR/SB and then re-enable it when there is room. */ if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) { - /* Turn off ATR first */ - if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && + if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && !(pf->auto_disable_flags & - I40E_FLAG_FD_ATR_ENABLED)) { - dev_warn(&pdev->dev, "FD filter space full, ATR for further flows will be turned off\n"); - pf->auto_disable_flags |= - I40E_FLAG_FD_ATR_ENABLED; - pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT; - } else if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && - !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n"); pf->auto_disable_flags |= I40E_FLAG_FD_SB_ENABLED; - pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT; } } else { - dev_info(&pdev->dev, "FD filter programming error\n"); + dev_info(&pdev->dev, + "FD filter programming failed due to incorrect filter parameters\n"); } } else if (error == (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) { if (I40E_DEBUG_FD & pf->hw.debug_mask) - dev_info(&pdev->dev, "ntuple filter loc = %d, could not be removed\n", + dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n", rx_desc->wb.qword0.hi_dword.fd_id); } } @@ -587,6 +607,7 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring) static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) { u32 tx_pending = i40e_get_tx_pending(tx_ring); + struct i40e_pf *pf = tx_ring->vsi->back; bool ret = false; clear_check_for_tx_hang(tx_ring); @@ -603,10 +624,17 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) * pending but without time to complete it yet. */ if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && - tx_pending) { + (tx_pending >= I40E_MIN_DESC_PENDING)) { /* make sure it is true for two checks in a row */ ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); + } else if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && + (tx_pending < I40E_MIN_DESC_PENDING) && + (tx_pending > 0)) { + if (I40E_DEBUG_FLOW & pf->hw.debug_mask) + dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d", + tx_pending, tx_ring->queue_index); + pf->tx_sluggish_count++; } else { /* update completed stats and disarm the hang check */ tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets; @@ -674,7 +702,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) total_packets += tx_buf->gso_segs; /* free the skb */ - dev_kfree_skb_any(tx_buf->skb); + dev_consume_skb_any(tx_buf->skb); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -1213,7 +1241,6 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); - skb->encapsulation = ipv4_tunnel || ipv6_tunnel; skb->ip_summed = CHECKSUM_NONE; /* Rx csum enabled and ip headers found? */ @@ -1287,6 +1314,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, } skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_level = ipv4_tunnel || ipv6_tunnel; return; @@ -2024,6 +2052,47 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss); } +/** + * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions + * @tx_ring: the ring to be checked + * @size: the size buffer we want to assure is available + * + * Returns -EBUSY if a stop is needed, else 0 + **/ +static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +{ + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + /* Memory barrier before checking head and tail */ + smp_mb(); + + /* Check again in a case another CPU has just made room available. */ + if (likely(I40E_DESC_UNUSED(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + return 0; +} + +/** + * i40e_maybe_stop_tx - 1st level check for tx stop conditions + * @tx_ring: the ring to be checked + * @size: the size buffer we want to assure is available + * + * Returns 0 if stop is not needed + **/ +#ifdef I40E_FCOE +int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +#else +static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +#endif +{ + if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) + return 0; + return __i40e_maybe_stop_tx(tx_ring, size); +} + /** * i40e_tx_map - Build the Tx descriptor * @tx_ring: ring to send buffer on @@ -2167,8 +2236,12 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_ring->next_to_use = i; + i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); /* notify HW of packet */ - writel(i, tx_ring->tail); + if (!skb->xmit_more || + netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->queue_index))) + writel(i, tx_ring->tail); return; @@ -2189,47 +2262,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_ring->next_to_use = i; } -/** - * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions - * @tx_ring: the ring to be checked - * @size: the size buffer we want to assure is available - * - * Returns -EBUSY if a stop is needed, else 0 - **/ -static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) -{ - netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); - /* Memory barrier before checking head and tail */ - smp_mb(); - - /* Check again in a case another CPU has just made room available. */ - if (likely(I40E_DESC_UNUSED(tx_ring) < size)) - return -EBUSY; - - /* A reprieve! - use start_queue because it doesn't call schedule */ - netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); - ++tx_ring->tx_stats.restart_queue; - return 0; -} - -/** - * i40e_maybe_stop_tx - 1st level check for tx stop conditions - * @tx_ring: the ring to be checked - * @size: the size buffer we want to assure is available - * - * Returns 0 if stop is not needed - **/ -#ifdef I40E_FCOE -int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) -#else -static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) -#endif -{ - if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) - return 0; - return __i40e_maybe_stop_tx(tx_ring, size); -} - /** * i40e_xmit_descriptor_count - calculate number of tx descriptors needed * @skb: send buffer @@ -2344,8 +2376,6 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, td_cmd, td_offset); - i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); - return NETDEV_TX_OK; out_drop: diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 73f4fa4256974ed68479e5f6bebd44ee5d50d6bc..d7a625a6a14f54fa1b79456df9de7d14f9ff1809 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -121,6 +121,7 @@ enum i40e_dyn_idx_t { /* Tx Descriptors needed, worst case */ #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD) #define DESC_NEEDED (MAX_SKB_FRAGS + 4) +#define I40E_MIN_DESC_PENDING 4 #define I40E_TX_FLAGS_CSUM (u32)(1) #define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 3ac6a0d2f1433d8bff3323a28f77af063a2ddb89..4eeed267e4b71ce9d242d3a71fccd5ad38600c8f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -73,7 +73,7 @@ static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u8 vsi_id, { struct i40e_pf *pf = vf->pf; - return qid < pf->vsi[vsi_id]->num_queue_pairs; + return qid < pf->vsi[vsi_id]->alloc_queue_pairs; } /** @@ -350,6 +350,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx, rx_ctx.lrxqthresh = 2; rx_ctx.crcstrip = 1; rx_ctx.prefena = 1; + rx_ctx.l2tsel = 1; /* clear the context in the HMC */ ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id); @@ -468,7 +469,7 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf) wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg); /* map PF queues to VF queues */ - for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) { + for (j = 0; j < pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs; j++) { u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j); reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK); wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg); @@ -477,7 +478,7 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf) /* map PF queues to VSI */ for (j = 0; j < 7; j++) { - if (j * 2 >= pf->vsi[vf->lan_vsi_index]->num_queue_pairs) { + if (j * 2 >= pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs) { reg = 0x07FF07FF; /* unused */ } else { u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, @@ -584,7 +585,7 @@ static int i40e_alloc_vf_res(struct i40e_vf *vf) ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV); if (ret) goto error_alloc; - total_queue_pairs += pf->vsi[vf->lan_vsi_index]->num_queue_pairs; + total_queue_pairs += pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs; set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); /* store the total qps number for the runtime @@ -706,35 +707,6 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr) wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); i40e_flush(hw); } - -/** - * i40e_vfs_are_assigned - * @pf: pointer to the pf structure - * - * Determine if any VFs are assigned to VMs - **/ -static bool i40e_vfs_are_assigned(struct i40e_pf *pf) -{ - struct pci_dev *pdev = pf->pdev; - struct pci_dev *vfdev; - - /* loop through all the VFs to see if we own any that are assigned */ - vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_VF , NULL); - while (vfdev) { - /* if we don't own it we don't care */ - if (vfdev->is_virtfn && pci_physfn(vfdev) == pdev) { - /* if it is assigned we cannot release it */ - if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) - return true; - } - - vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, - I40E_DEV_ID_VF, - vfdev); - } - - return false; -} #ifdef CONFIG_PCI_IOV /** @@ -842,7 +814,7 @@ void i40e_free_vfs(struct i40e_pf *pf) * assigned. Setting the number of VFs to 0 through sysfs is caught * before this function ever gets called. */ - if (!i40e_vfs_are_assigned(pf)) { + if (!pci_vfs_assigned(pf->pdev)) { pci_disable_sriov(pf->pdev); /* Acknowledge VFLR for all VFS. Without this, VFs will fail to * work correctly when SR-IOV gets re-enabled. @@ -979,7 +951,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) if (num_vfs) return i40e_pci_sriov_enable(pdev, num_vfs); - if (!i40e_vfs_are_assigned(pf)) { + if (!pci_vfs_assigned(pf->pdev)) { i40e_free_vfs(pf); } else { dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); @@ -1123,7 +1095,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf) vfres->vsi_res[i].vsi_id = vf->lan_vsi_index; vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV; vfres->vsi_res[i].num_queue_pairs = - pf->vsi[vf->lan_vsi_index]->num_queue_pairs; + pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs; memcpy(vfres->vsi_res[i].default_mac_addr, vf->default_lan_addr.addr, ETH_ALEN); i++; @@ -1209,6 +1181,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) struct i40e_virtchnl_vsi_queue_config_info *qci = (struct i40e_virtchnl_vsi_queue_config_info *)msg; struct i40e_virtchnl_queue_pair_info *qpi; + struct i40e_pf *pf = vf->pf; u16 vsi_id, vsi_queue_id; i40e_status aq_ret = 0; int i; @@ -1242,6 +1215,8 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) goto error_param; } } + /* set vsi num_queue_pairs in use to num configured by vf */ + pf->vsi[vf->lan_vsi_index]->num_queue_pairs = qci->num_queue_pairs; error_param: /* send the response to the vf */ @@ -2094,7 +2069,6 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) /* Force the VF driver stop so it has to reload with new MAC address */ i40e_vc_disable_vf(pf, vf); dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n"); - ret = 0; error_param: return ret; @@ -2419,7 +2393,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) * * Enable or disable VF spoof checking **/ -int i40e_ndo_set_vf_spoofck(struct net_device *netdev, int vf_id, bool enable) +int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 63e7e0d81ad22754622695dabc1a0475885477ee..0adc61e1052db9dabdcbea3d8930ec709e1868ef 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -122,7 +122,7 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, int i40e_ndo_get_vf_config(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi); int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link); -int i40e_ndo_set_vf_spoofck(struct net_device *netdev, int vf_id, bool enable); +int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable); void i40e_vc_notify_link_state(struct i40e_pf *pf); void i40e_vc_notify_reset(struct i40e_pf *pf); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c index 00300603361464b4e9f3b20da7774a1069bdf071..f206be9178422afc01a955f481f8e523cbb3093c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c @@ -788,7 +788,8 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, /* bump the tail */ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n"); - i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff); + i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, + buff, buff_size); (hw->aq.asq.next_to_use)++; if (hw->aq.asq.next_to_use == hw->aq.asq.count) hw->aq.asq.next_to_use = 0; @@ -842,7 +843,8 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer writeback:\n"); - i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff); + i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, + buff_size); /* update the error if time out occurred */ if ((!cmd_completed) && @@ -938,7 +940,8 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, hw->aq.nvm_busy = false; i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n"); - i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf); + i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf, + hw->aq.arq_buf_size); /* Restore the original datalen and buffer address in the desc, * FW updates datalen to indicate the event message diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index 4ea90bf239bb204ed5603b66680413191f46bf66..9525605519645eb241f5473bcdd345a0afe21a09 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -75,13 +75,15 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw) * @mask: debug mask * @desc: pointer to admin queue descriptor * @buffer: pointer to command buffer + * @buf_len: max length of buffer * * Dumps debug log about adminq command with descriptor contents. **/ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, - void *buffer) + void *buffer, u16 buf_len) { struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; + u16 len = le16_to_cpu(aq_desc->datalen); u8 *aq_buffer = (u8 *)buffer; u32 data[4]; u32 i = 0; @@ -105,7 +107,9 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, if ((buffer != NULL) && (aq_desc->datalen != 0)) { memset(data, 0, sizeof(data)); i40e_debug(hw, mask, "AQ CMD Buffer:\n"); - for (i = 0; i < le16_to_cpu(aq_desc->datalen); i++) { + if (buf_len < len) + len = buf_len; + for (i = 0; i < len; i++) { data[((i % 16) / 4)] |= ((u32)aq_buffer[i]) << (8 * (i % 4)); if ((i % 16) == 15) { diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h index 849edcc2e398f7bfdaaea5eaf078ba714977ca5f..9173834825ac4cc1bbe5c3ecbc85e77db693c76f 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h @@ -53,10 +53,8 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, bool i40evf_asq_done(struct i40e_hw *hw); /* debug function for adminq */ -void i40evf_debug_aq(struct i40e_hw *hw, - enum i40e_debug_mask mask, - void *desc, - void *buffer); +void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, + void *desc, void *buffer, u16 buf_len); void i40e_idle_aq(struct i40e_hw *hw); void i40evf_resume_aq(struct i40e_hw *hw); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 95a3ec236b4951ab7166637159393df569da97fb..04c7c1557a0c770ab0f256d3012cd3b9be70240e 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -163,11 +163,13 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) * pending but without time to complete it yet. */ if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && - tx_pending) { + (tx_pending >= I40E_MIN_DESC_PENDING)) { /* make sure it is true for two checks in a row */ ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); - } else { + } else if (!(tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) || + !(tx_pending < I40E_MIN_DESC_PENDING) || + !(tx_pending > 0)) { /* update completed stats and disarm the hang check */ tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets; clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); @@ -744,7 +746,6 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); - skb->encapsulation = ipv4_tunnel || ipv6_tunnel; skb->ip_summed = CHECKSUM_NONE; /* Rx csum enabled and ip headers found? */ @@ -818,6 +819,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, } skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_level = ipv4_tunnel || ipv6_tunnel; return; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index 8bc6858163b0f1b668e56d3c0d682574cc4582d5..f6dcf9dd9290d95554c7505f4bbd9f5832185597 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -121,6 +121,7 @@ enum i40e_dyn_idx_t { /* Tx Descriptors needed, worst case */ #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD) #define DESC_NEEDED (MAX_SKB_FRAGS + 4) +#define I40E_MIN_DESC_PENDING 4 #define I40E_TX_FLAGS_CSUM (u32)(1) #define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1) diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 38429fae4fcfa2ccb1db770e7e735e27303b8c2d..c51bc7a33bc50898dedf7f1fdfaa5100c45e501a 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -36,7 +36,7 @@ char i40evf_driver_name[] = "i40evf"; static const char i40evf_driver_string[] = "Intel(R) XL710/X710 Virtual Function Network Driver"; -#define DRV_VERSION "0.9.40" +#define DRV_VERSION "1.0.5" const char i40evf_driver_version[] = DRV_VERSION; static const char i40evf_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation."; diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index 236a6183a86523a3aef0430b3a575081b32ba7b5..051ea94bdcd3e8046181b361d8985c51d15ea19c 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -2548,11 +2548,13 @@ s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data) /** * igb_set_eee_i350 - Enable/disable EEE support * @hw: pointer to the HW structure + * @adv1G: boolean flag enabling 1G EEE advertisement + * @adv100m: boolean flag enabling 100M EEE advertisement * * Enable/disable EEE based on setting in dev_spec structure. * **/ -s32 igb_set_eee_i350(struct e1000_hw *hw) +s32 igb_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M) { u32 ipcnfg, eeer; @@ -2566,7 +2568,16 @@ s32 igb_set_eee_i350(struct e1000_hw *hw) if (!(hw->dev_spec._82575.eee_disable)) { u32 eee_su = rd32(E1000_EEE_SU); - ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN); + if (adv100M) + ipcnfg |= E1000_IPCNFG_EEE_100M_AN; + else + ipcnfg &= ~E1000_IPCNFG_EEE_100M_AN; + + if (adv1G) + ipcnfg |= E1000_IPCNFG_EEE_1G_AN; + else + ipcnfg &= ~E1000_IPCNFG_EEE_1G_AN; + eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | E1000_EEER_LPI_FC); @@ -2593,11 +2604,13 @@ s32 igb_set_eee_i350(struct e1000_hw *hw) /** * igb_set_eee_i354 - Enable/disable EEE support * @hw: pointer to the HW structure + * @adv1G: boolean flag enabling 1G EEE advertisement + * @adv100m: boolean flag enabling 100M EEE advertisement * * Enable/disable EEE legacy mode based on setting in dev_spec structure. * **/ -s32 igb_set_eee_i354(struct e1000_hw *hw) +s32 igb_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val = 0; @@ -2636,8 +2649,16 @@ s32 igb_set_eee_i354(struct e1000_hw *hw) if (ret_val) goto out; - phy_data |= E1000_EEE_ADV_100_SUPPORTED | - E1000_EEE_ADV_1000_SUPPORTED; + if (adv100M) + phy_data |= E1000_EEE_ADV_100_SUPPORTED; + else + phy_data &= ~E1000_EEE_ADV_100_SUPPORTED; + + if (adv1G) + phy_data |= E1000_EEE_ADV_1000_SUPPORTED; + else + phy_data &= ~E1000_EEE_ADV_1000_SUPPORTED; + ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, E1000_EEE_ADV_DEV_I354, phy_data); diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h index b407c55738fadf0a333ee7947fedd2c0c31638a1..2154aea7aa7e70c9d5083bc3631a37f885d48bd4 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.h +++ b/drivers/net/ethernet/intel/igb/e1000_82575.h @@ -263,8 +263,8 @@ void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool); void igb_vmdq_set_replication_pf(struct e1000_hw *, bool); u16 igb_rxpbs_adjust_82580(u32 data); s32 igb_read_emi_reg(struct e1000_hw *, u16 addr, u16 *data); -s32 igb_set_eee_i350(struct e1000_hw *); -s32 igb_set_eee_i354(struct e1000_hw *); +s32 igb_set_eee_i350(struct e1000_hw *, bool adv1G, bool adv100M); +s32 igb_set_eee_i354(struct e1000_hw *, bool adv1G, bool adv100M); s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status); #define E1000_I2C_THERMAL_SENSOR_ADDR 0xF8 diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h index ce55ea5d750cd7edb69d90a6f6ff9a13e8e47f3f..2003b3756ba2d7321f8735b302f467852ed85fdc 100644 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h @@ -265,11 +265,6 @@ struct e1000_hw_stats { u64 b2ogprc; }; -struct e1000_phy_stats { - u32 idle_errors; - u32 receive_errors; -}; - struct e1000_host_mng_dhcp_cookie { u32 signature; u8 status; diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 06102d1f7c0362208118ec99dcaba0e6db89eeb3..82d891e183b18f3e2e675dc6778b85bf555e67cc 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -403,7 +403,6 @@ struct igb_adapter { struct e1000_hw hw; struct e1000_hw_stats stats; struct e1000_phy_info phy_info; - struct e1000_phy_stats phy_stats; u32 test_icr; struct igb_ring test_tx_ring; diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index c737d1f4083829ae0eed6e7ecff1d29b5c75239e..02cfd3b14762cbf884ef1b0f4586735f14bee3c9 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2675,6 +2675,7 @@ static int igb_set_eee(struct net_device *netdev, struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; struct ethtool_eee eee_curr; + bool adv1g_eee = true, adv100m_eee = true; s32 ret_val; if ((hw->mac.type < e1000_i350) || @@ -2701,12 +2702,14 @@ static int igb_set_eee(struct net_device *netdev, return -EINVAL; } - if (edata->advertised & - ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL)) { + if (!edata->advertised || (edata->advertised & + ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL))) { dev_err(&adapter->pdev->dev, - "EEE Advertisement supports only 100Tx and or 100T full duplex\n"); + "EEE Advertisement supports only 100Tx and/or 100T full duplex\n"); return -EINVAL; } + adv100m_eee = !!(edata->advertised & ADVERTISE_100_FULL); + adv1g_eee = !!(edata->advertised & ADVERTISE_1000_FULL); } else if (!edata->eee_enabled) { dev_err(&adapter->pdev->dev, @@ -2718,10 +2721,6 @@ static int igb_set_eee(struct net_device *netdev, if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) { hw->dev_spec._82575.eee_disable = !edata->eee_enabled; adapter->flags |= IGB_FLAG_EEE; - if (hw->mac.type == e1000_i350) - igb_set_eee_i350(hw); - else - igb_set_eee_i354(hw); /* reset link */ if (netif_running(netdev)) @@ -2730,6 +2729,17 @@ static int igb_set_eee(struct net_device *netdev, igb_reset(adapter); } + if (hw->mac.type == e1000_i354) + ret_val = igb_set_eee_i354(hw, adv1g_eee, adv100m_eee); + else + ret_val = igb_set_eee_i350(hw, adv1g_eee, adv100m_eee); + + if (ret_val) { + dev_err(&adapter->pdev->dev, + "Problem setting EEE advertisement options\n"); + return -EINVAL; + } + return 0; } diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index cb14bbdfb0562df213253d18c83eaf78c6a63f2c..ae59c0b108c55bf2d802a9e14ed6c92b19758635 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -58,7 +58,7 @@ #define MAJ 5 #define MIN 2 -#define BUILD 13 +#define BUILD 15 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ __stringify(BUILD) "-k" char igb_driver_name[] = "igb"; @@ -2012,10 +2012,10 @@ void igb_reset(struct igb_adapter *adapter) case e1000_i350: case e1000_i210: case e1000_i211: - igb_set_eee_i350(hw); + igb_set_eee_i350(hw, true, true); break; case e1000_i354: - igb_set_eee_i354(hw); + igb_set_eee_i354(hw, true, true); break; default: break; @@ -2619,7 +2619,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) case e1000_i210: case e1000_i211: /* Enable EEE for internal copper PHY devices */ - err = igb_set_eee_i350(hw); + err = igb_set_eee_i350(hw, true, true); if ((!err) && (!hw->dev_spec._82575.eee_disable)) { adapter->eee_advert = @@ -2630,7 +2630,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) case e1000_i354: if ((rd32(E1000_CTRL_EXT) & E1000_CTRL_EXT_LINK_MODE_SGMII)) { - err = igb_set_eee_i354(hw); + err = igb_set_eee_i354(hw, true, true); if ((!err) && (!hw->dev_spec._82575.eee_disable)) { adapter->eee_advert = @@ -4813,6 +4813,41 @@ static void igb_tx_olinfo_status(struct igb_ring *tx_ring, tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); } +static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) +{ + struct net_device *netdev = tx_ring->netdev; + + netif_stop_subqueue(netdev, tx_ring->queue_index); + + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (igb_desc_unused(tx_ring) < size) + return -EBUSY; + + /* A reprieve! */ + netif_wake_subqueue(netdev, tx_ring->queue_index); + + u64_stats_update_begin(&tx_ring->tx_syncp2); + tx_ring->tx_stats.restart_queue2++; + u64_stats_update_end(&tx_ring->tx_syncp2); + + return 0; +} + +static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) +{ + if (igb_desc_unused(tx_ring) >= size) + return 0; + return __igb_maybe_stop_tx(tx_ring, size); +} + static void igb_tx_map(struct igb_ring *tx_ring, struct igb_tx_buffer *first, const u8 hdr_len) @@ -4915,13 +4950,17 @@ static void igb_tx_map(struct igb_ring *tx_ring, tx_ring->next_to_use = i; - writel(i, tx_ring->tail); + /* Make sure there is space in the ring for the next send. */ + igb_maybe_stop_tx(tx_ring, DESC_NEEDED); - /* we need this if more than one processor can write to our tail - * at a time, it synchronizes IO on IA64/Altix systems - */ - mmiowb(); + if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { + writel(i, tx_ring->tail); + /* we need this if more than one processor can write to our tail + * at a time, it synchronizes IO on IA64/Altix systems + */ + mmiowb(); + } return; dma_error: @@ -4941,41 +4980,6 @@ static void igb_tx_map(struct igb_ring *tx_ring, tx_ring->next_to_use = i; } -static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) -{ - struct net_device *netdev = tx_ring->netdev; - - netif_stop_subqueue(netdev, tx_ring->queue_index); - - /* Herbert's original patch had: - * smp_mb__after_netif_stop_queue(); - * but since that doesn't exist yet, just open code it. - */ - smp_mb(); - - /* We need to check again in a case another CPU has just - * made room available. - */ - if (igb_desc_unused(tx_ring) < size) - return -EBUSY; - - /* A reprieve! */ - netif_wake_subqueue(netdev, tx_ring->queue_index); - - u64_stats_update_begin(&tx_ring->tx_syncp2); - tx_ring->tx_stats.restart_queue2++; - u64_stats_update_end(&tx_ring->tx_syncp2); - - return 0; -} - -static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) -{ - if (igb_desc_unused(tx_ring) >= size) - return 0; - return __igb_maybe_stop_tx(tx_ring, size); -} - netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, struct igb_ring *tx_ring) { @@ -5046,9 +5050,6 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, igb_tx_map(tx_ring, first, hdr_len); - /* Make sure there is space in the ring for the next send. */ - igb_maybe_stop_tx(tx_ring, DESC_NEEDED); - return NETDEV_TX_OK; out_drop: @@ -5205,14 +5206,11 @@ void igb_update_stats(struct igb_adapter *adapter, struct e1000_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; u32 reg, mpc; - u16 phy_tmp; int i; u64 bytes, packets; unsigned int start; u64 _bytes, _packets; -#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF - /* Prevent stats update while adapter is being reset, or if the pci * connection is down. */ @@ -5373,15 +5371,6 @@ void igb_update_stats(struct igb_adapter *adapter, /* Tx Dropped needs to be maintained elsewhere */ - /* Phy Stats */ - if (hw->phy.media_type == e1000_media_type_copper) { - if ((adapter->link_speed == SPEED_1000) && - (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { - phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; - adapter->phy_stats.idle_errors += phy_tmp; - } - } - /* Management Stats */ adapter->stats.mgptc += rd32(E1000_MGTPTC); adapter->stats.mgprc += rd32(E1000_MGTPRC); @@ -6385,7 +6374,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) total_packets += tx_buffer->gso_segs; /* free the skb */ - dev_kfree_skb_any(tx_buffer->skb); + dev_consume_skb_any(tx_buffer->skb); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -6767,113 +6756,6 @@ static bool igb_is_non_eop(struct igb_ring *rx_ring, return true; } -/** - * igb_get_headlen - determine size of header for LRO/GRO - * @data: pointer to the start of the headers - * @max_len: total length of section to find headers in - * - * This function is meant to determine the length of headers that will - * be recognized by hardware for LRO, and GRO offloads. The main - * motivation of doing this is to only perform one pull for IPv4 TCP - * packets so that we can do basic things like calculating the gso_size - * based on the average data per packet. - **/ -static unsigned int igb_get_headlen(unsigned char *data, - unsigned int max_len) -{ - union { - unsigned char *network; - /* l2 headers */ - struct ethhdr *eth; - struct vlan_hdr *vlan; - /* l3 headers */ - struct iphdr *ipv4; - struct ipv6hdr *ipv6; - } hdr; - __be16 protocol; - u8 nexthdr = 0; /* default to not TCP */ - u8 hlen; - - /* this should never happen, but better safe than sorry */ - if (max_len < ETH_HLEN) - return max_len; - - /* initialize network frame pointer */ - hdr.network = data; - - /* set first protocol and move network header forward */ - protocol = hdr.eth->h_proto; - hdr.network += ETH_HLEN; - - /* handle any vlan tag if present */ - if (protocol == htons(ETH_P_8021Q)) { - if ((hdr.network - data) > (max_len - VLAN_HLEN)) - return max_len; - - protocol = hdr.vlan->h_vlan_encapsulated_proto; - hdr.network += VLAN_HLEN; - } - - /* handle L3 protocols */ - if (protocol == htons(ETH_P_IP)) { - if ((hdr.network - data) > (max_len - sizeof(struct iphdr))) - return max_len; - - /* access ihl as a u8 to avoid unaligned access on ia64 */ - hlen = (hdr.network[0] & 0x0F) << 2; - - /* verify hlen meets minimum size requirements */ - if (hlen < sizeof(struct iphdr)) - return hdr.network - data; - - /* record next protocol if header is present */ - if (!(hdr.ipv4->frag_off & htons(IP_OFFSET))) - nexthdr = hdr.ipv4->protocol; - } else if (protocol == htons(ETH_P_IPV6)) { - if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) - return max_len; - - /* record next protocol */ - nexthdr = hdr.ipv6->nexthdr; - hlen = sizeof(struct ipv6hdr); - } else { - return hdr.network - data; - } - - /* relocate pointer to start of L4 header */ - hdr.network += hlen; - - /* finally sort out TCP */ - if (nexthdr == IPPROTO_TCP) { - if ((hdr.network - data) > (max_len - sizeof(struct tcphdr))) - return max_len; - - /* access doff as a u8 to avoid unaligned access on ia64 */ - hlen = (hdr.network[12] & 0xF0) >> 2; - - /* verify hlen meets minimum size requirements */ - if (hlen < sizeof(struct tcphdr)) - return hdr.network - data; - - hdr.network += hlen; - } else if (nexthdr == IPPROTO_UDP) { - if ((hdr.network - data) > (max_len - sizeof(struct udphdr))) - return max_len; - - hdr.network += sizeof(struct udphdr); - } - - /* If everything has gone correctly hdr.network should be the - * data section of the packet and will be the end of the header. - * If not then it probably represents the end of the last recognized - * header. - */ - if ((hdr.network - data) < max_len) - return hdr.network - data; - else - return max_len; -} - /** * igb_pull_tail - igb specific version of skb_pull_tail * @rx_ring: rx descriptor ring packet is being transacted on @@ -6918,7 +6800,7 @@ static void igb_pull_tail(struct igb_ring *rx_ring, /* we need the header to contain the greater of either ETH_HLEN or * 60 bytes if the skb->len is less than 60 for skb_pad. */ - pull_len = igb_get_headlen(va, IGB_RX_HDR_LEN); + pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN); /* align pull length to size of long to optimize memcpy performance */ skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index ac9f2148cdc5ca6d20e81a43170a9550ff6ffce6..5032a602d5c98265d63037d6aac1e1bfc3188c76 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -307,7 +307,6 @@ enum ixgbe_ring_f_enum { #define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) #define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) #define IXGBE_MAX_L2A_QUEUES 4 -#define IXGBE_MAX_L2A_QUEUES 4 #define IXGBE_BAD_L2A_QUEUE 3 #define IXGBE_MAX_MACVLANS 31 #define IXGBE_MAX_DCBMACVLANS 8 @@ -386,119 +385,87 @@ struct ixgbe_q_vector { char name[IFNAMSIZ + 9]; #ifdef CONFIG_NET_RX_BUSY_POLL - unsigned int state; -#define IXGBE_QV_STATE_IDLE 0 -#define IXGBE_QV_STATE_NAPI 1 /* NAPI owns this QV */ -#define IXGBE_QV_STATE_POLL 2 /* poll owns this QV */ -#define IXGBE_QV_STATE_DISABLED 4 /* QV is disabled */ -#define IXGBE_QV_OWNED (IXGBE_QV_STATE_NAPI | IXGBE_QV_STATE_POLL) -#define IXGBE_QV_LOCKED (IXGBE_QV_OWNED | IXGBE_QV_STATE_DISABLED) -#define IXGBE_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */ -#define IXGBE_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */ -#define IXGBE_QV_YIELD (IXGBE_QV_STATE_NAPI_YIELD | IXGBE_QV_STATE_POLL_YIELD) -#define IXGBE_QV_USER_PEND (IXGBE_QV_STATE_POLL | IXGBE_QV_STATE_POLL_YIELD) - spinlock_t lock; + atomic_t state; #endif /* CONFIG_NET_RX_BUSY_POLL */ /* for dynamic allocation of rings associated with this q_vector */ struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp; }; + #ifdef CONFIG_NET_RX_BUSY_POLL +enum ixgbe_qv_state_t { + IXGBE_QV_STATE_IDLE = 0, + IXGBE_QV_STATE_NAPI, + IXGBE_QV_STATE_POLL, + IXGBE_QV_STATE_DISABLE +}; + static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector) { - - spin_lock_init(&q_vector->lock); - q_vector->state = IXGBE_QV_STATE_IDLE; + /* reset state to idle */ + atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE); } /* called from the device poll routine to get ownership of a q_vector */ static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector) { - int rc = true; - spin_lock_bh(&q_vector->lock); - if (q_vector->state & IXGBE_QV_LOCKED) { - WARN_ON(q_vector->state & IXGBE_QV_STATE_NAPI); - q_vector->state |= IXGBE_QV_STATE_NAPI_YIELD; - rc = false; + int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE, + IXGBE_QV_STATE_NAPI); #ifdef BP_EXTENDED_STATS + if (rc != IXGBE_QV_STATE_IDLE) q_vector->tx.ring->stats.yields++; #endif - } else { - /* we don't care if someone yielded */ - q_vector->state = IXGBE_QV_STATE_NAPI; - } - spin_unlock_bh(&q_vector->lock); - return rc; + + return rc == IXGBE_QV_STATE_IDLE; } /* returns true is someone tried to get the qv while napi had it */ -static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector) +static inline void ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector) { - int rc = false; - spin_lock_bh(&q_vector->lock); - WARN_ON(q_vector->state & (IXGBE_QV_STATE_POLL | - IXGBE_QV_STATE_NAPI_YIELD)); - - if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD) - rc = true; - /* will reset state to idle, unless QV is disabled */ - q_vector->state &= IXGBE_QV_STATE_DISABLED; - spin_unlock_bh(&q_vector->lock); - return rc; + WARN_ON(atomic_read(&q_vector->state) != IXGBE_QV_STATE_NAPI); + + /* flush any outstanding Rx frames */ + if (q_vector->napi.gro_list) + napi_gro_flush(&q_vector->napi, false); + + /* reset state to idle */ + atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE); } /* called from ixgbe_low_latency_poll() */ static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector) { - int rc = true; - spin_lock_bh(&q_vector->lock); - if ((q_vector->state & IXGBE_QV_LOCKED)) { - q_vector->state |= IXGBE_QV_STATE_POLL_YIELD; - rc = false; + int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE, + IXGBE_QV_STATE_POLL); #ifdef BP_EXTENDED_STATS - q_vector->rx.ring->stats.yields++; + if (rc != IXGBE_QV_STATE_IDLE) + q_vector->tx.ring->stats.yields++; #endif - } else { - /* preserve yield marks */ - q_vector->state |= IXGBE_QV_STATE_POLL; - } - spin_unlock_bh(&q_vector->lock); - return rc; + return rc == IXGBE_QV_STATE_IDLE; } /* returns true if someone tried to get the qv while it was locked */ -static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector) +static inline void ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector) { - int rc = false; - spin_lock_bh(&q_vector->lock); - WARN_ON(q_vector->state & (IXGBE_QV_STATE_NAPI)); - - if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD) - rc = true; - /* will reset state to idle, unless QV is disabled */ - q_vector->state &= IXGBE_QV_STATE_DISABLED; - spin_unlock_bh(&q_vector->lock); - return rc; + WARN_ON(atomic_read(&q_vector->state) != IXGBE_QV_STATE_POLL); + + /* reset state to idle */ + atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE); } /* true if a socket is polling, even if it did not get the lock */ static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector) { - WARN_ON(!(q_vector->state & IXGBE_QV_OWNED)); - return q_vector->state & IXGBE_QV_USER_PEND; + return atomic_read(&q_vector->state) == IXGBE_QV_STATE_POLL; } /* false if QV is currently owned */ static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector) { - int rc = true; - spin_lock_bh(&q_vector->lock); - if (q_vector->state & IXGBE_QV_OWNED) - rc = false; - q_vector->state |= IXGBE_QV_STATE_DISABLED; - spin_unlock_bh(&q_vector->lock); - - return rc; + int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE, + IXGBE_QV_STATE_DISABLE); + + return rc == IXGBE_QV_STATE_IDLE; } #else /* CONFIG_NET_RX_BUSY_POLL */ @@ -643,9 +610,7 @@ struct ixgbe_adapter { * thus the additional *_CAPABLE flags. */ u32 flags; -#define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 0) #define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 1) -#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 2) #define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 3) #define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 4) #define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 5) @@ -760,8 +725,6 @@ struct ixgbe_adapter { u8 __iomem *io_addr; /* Mainly for iounmap use */ u32 wol; - u16 bd_number; - u16 eeprom_verh; u16 eeprom_verl; u16 eeprom_cap; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index e4100b5737b67a3cb862428e120e67d51bd1b7fd..3ce4a258f94534da25304c9138e0bf28fdd3c9ff 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1303,7 +1303,7 @@ static const struct ixgbe_reg_test reg_test_82599[] = { { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF }, { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { 0, 0, 0, 0 } + { .reg = 0 } }; /* default 82598 register test */ @@ -1331,7 +1331,7 @@ static const struct ixgbe_reg_test reg_test_82598[] = { { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF }, { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { 0, 0, 0, 0 } + { .reg = 0 } }; static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg, @@ -2267,7 +2267,6 @@ static int ixgbe_set_coalesce(struct net_device *netdev, if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) adapter->tx_itr_setting = adapter->rx_itr_setting; -#if IS_ENABLED(CONFIG_BQL) /* detect ITR changes that require update of TXDCTL.WTHRESH */ if ((adapter->tx_itr_setting != 1) && (adapter->tx_itr_setting < IXGBE_100K_ITR)) { @@ -2279,7 +2278,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev, (tx_itr_prev < IXGBE_100K_ITR)) need_reset = true; } -#endif + /* check the old value and enable RSC if necessary */ need_reset |= ixgbe_update_rsc(adapter); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 2d9451e3968624db0b527dd00e9a106adfae16be..ce40c77381e9d957de1e06cc86ffabad8b6998ce 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -696,46 +696,83 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) ixgbe_set_rss_queues(adapter); } -static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, - int vectors) +/** + * ixgbe_acquire_msix_vectors - acquire MSI-X vectors + * @adapter: board private structure + * + * Attempts to acquire a suitable range of MSI-X vector interrupts. Will + * return a negative error code if unable to acquire MSI-X vectors for any + * reason. + */ +static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter) { - int vector_threshold; + struct ixgbe_hw *hw = &adapter->hw; + int i, vectors, vector_threshold; + + /* We start by asking for one vector per queue pair */ + vectors = max(adapter->num_rx_queues, adapter->num_tx_queues); - /* We'll want at least 2 (vector_threshold): - * 1) TxQ[0] + RxQ[0] handler - * 2) Other (Link Status Change, etc.) + /* It is easy to be greedy for MSI-X vectors. However, it really + * doesn't do much good if we have a lot more vectors than CPUs. We'll + * be somewhat conservative and only ask for (roughly) the same number + * of vectors as there are CPUs. */ - vector_threshold = MIN_MSIX_COUNT; + vectors = min_t(int, vectors, num_online_cpus()); - /* - * The more we get, the more we will assign to Tx/Rx Cleanup - * for the separate queues...where Rx Cleanup >= Tx Cleanup. - * Right now, we simply care about how many we'll get; we'll - * set them up later while requesting irq's. + /* Some vectors are necessary for non-queue interrupts */ + vectors += NON_Q_VECTORS; + + /* Hardware can only support a maximum of hw.mac->max_msix_vectors. + * With features such as RSS and VMDq, we can easily surpass the + * number of Rx and Tx descriptor queues supported by our device. + * Thus, we cap the maximum in the rare cases where the CPU count also + * exceeds our vector limit */ + vectors = min_t(int, vectors, hw->mac.max_msix_vectors); + + /* We want a minimum of two MSI-X vectors for (1) a TxQ[0] + RxQ[0] + * handler, and (2) an Other (Link Status Change, etc.) handler. + */ + vector_threshold = MIN_MSIX_COUNT; + + adapter->msix_entries = kcalloc(vectors, + sizeof(struct msix_entry), + GFP_KERNEL); + if (!adapter->msix_entries) + return -ENOMEM; + + for (i = 0; i < vectors; i++) + adapter->msix_entries[i].entry = i; + vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, vector_threshold, vectors); if (vectors < 0) { - /* Can't allocate enough MSI-X interrupts? Oh well. - * This just means we'll go with either a single MSI - * vector or fall back to legacy interrupts. + /* A negative count of allocated vectors indicates an error in + * acquiring within the specified range of MSI-X vectors */ - netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, - "Unable to allocate MSI-X interrupts\n"); + e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n", + vectors); + adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; kfree(adapter->msix_entries); adapter->msix_entries = NULL; - } else { - adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */ - /* - * Adjust for only the vectors we'll use, which is minimum - * of max_msix_q_vectors + NON_Q_VECTORS, or the number of - * vectors we were allocated. - */ - vectors -= NON_Q_VECTORS; - adapter->num_q_vectors = min(vectors, adapter->max_q_vectors); + + return vectors; } + + /* we successfully allocated some number of vectors within our + * requested range. + */ + adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; + + /* Adjust for only the vectors we'll use, which is minimum + * of max_q_vectors, or the number of vectors we were allocated. + */ + vectors -= NON_Q_VECTORS; + adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors); + + return 0; } static void ixgbe_add_ring(struct ixgbe_ring *ring, @@ -807,6 +844,11 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, ixgbe_poll, 64); napi_hash_add(&q_vector->napi); +#ifdef CONFIG_NET_RX_BUSY_POLL + /* initialize busy poll */ + atomic_set(&q_vector->state, IXGBE_QV_STATE_DISABLE); + +#endif /* tie q_vector and adapter together */ adapter->q_vector[v_idx] = q_vector; q_vector->adapter = adapter; @@ -1049,46 +1091,20 @@ static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) **/ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) { - struct ixgbe_hw *hw = &adapter->hw; - int vector, v_budget, err; + int err; - /* - * It's easy to be greedy for MSI-X vectors, but it really - * doesn't do us much good if we have a lot more vectors - * than CPU's. So let's be conservative and only ask for - * (roughly) the same number of vectors as there are CPU's. - * The default is to use pairs of vectors. - */ - v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues); - v_budget = min_t(int, v_budget, num_online_cpus()); - v_budget += NON_Q_VECTORS; + /* We will try to get MSI-X interrupts first */ + if (!ixgbe_acquire_msix_vectors(adapter)) + return; - /* - * At the same time, hardware can only support a maximum of - * hw.mac->max_msix_vectors vectors. With features - * such as RSS and VMDq, we can easily surpass the number of Rx and Tx - * descriptor queues supported by our device. Thus, we cap it off in - * those rare cases where the cpu count also exceeds our vector limit. + /* At this point, we do not have MSI-X capabilities. We need to + * reconfigure or disable various features which require MSI-X + * capability. */ - v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors); - - /* A failure in MSI-X entry allocation isn't fatal, but it does - * mean we disable MSI-X capabilities of the adapter. */ - adapter->msix_entries = kcalloc(v_budget, - sizeof(struct msix_entry), GFP_KERNEL); - if (adapter->msix_entries) { - for (vector = 0; vector < v_budget; vector++) - adapter->msix_entries[vector].entry = vector; - - ixgbe_acquire_msix_vectors(adapter, v_budget); - - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) - return; - } - /* disable DCB if number of TCs exceeds 1 */ + /* Disable DCB unless we only have a single traffic class */ if (netdev_get_num_tc(adapter->netdev) > 1) { - e_err(probe, "num TCs exceeds number of queues - disabling DCB\n"); + e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n"); netdev_reset_tc(adapter->netdev); if (adapter->hw.mac.type == ixgbe_mac_82598EB) @@ -1098,26 +1114,30 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) adapter->temp_dcb_cfg.pfc_mode_enable = false; adapter->dcb_cfg.pfc_mode_enable = false; } + adapter->dcb_cfg.num_tcs.pg_tcs = 1; adapter->dcb_cfg.num_tcs.pfc_tcs = 1; - /* disable SR-IOV */ + /* Disable SR-IOV support */ + e_dev_warn("Disabling SR-IOV support\n"); ixgbe_disable_sriov(adapter); - /* disable RSS */ + /* Disable RSS */ + e_dev_warn("Disabling RSS support\n"); adapter->ring_feature[RING_F_RSS].limit = 1; + /* recalculate number of queues now that many features have been + * changed or disabled. + */ ixgbe_set_num_queues(adapter); adapter->num_q_vectors = 1; err = pci_enable_msi(adapter->pdev); - if (err) { - netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, - "Unable to allocate MSI interrupt, falling back to legacy. Error: %d\n", - err); - return; - } - adapter->flags |= IXGBE_FLAG_MSI_ENABLED; + if (err) + e_dev_warn("Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n", + err); + else + adapter->flags |= IXGBE_FLAG_MSI_ENABLED; } /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 87bd53fdd209152dc14a09fbee8101582237c6e8..d677b5a23b5869a71a8d9f04fa7f7d1e6762ea05 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -440,7 +440,7 @@ static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = { {IXGBE_TXDCTL(0), "TXDCTL"}, /* List Terminator */ - {} + { .name = NULL } }; @@ -1094,7 +1094,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, total_packets += tx_buffer->gso_segs; /* free the skb */ - dev_kfree_skb_any(tx_buffer->skb); + dev_consume_skb_any(tx_buffer->skb); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -1521,120 +1521,6 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) ixgbe_release_rx_desc(rx_ring, i); } -/** - * ixgbe_get_headlen - determine size of header for RSC/LRO/GRO/FCOE - * @data: pointer to the start of the headers - * @max_len: total length of section to find headers in - * - * This function is meant to determine the length of headers that will - * be recognized by hardware for LRO, GRO, and RSC offloads. The main - * motivation of doing this is to only perform one pull for IPv4 TCP - * packets so that we can do basic things like calculating the gso_size - * based on the average data per packet. - **/ -static unsigned int ixgbe_get_headlen(unsigned char *data, - unsigned int max_len) -{ - union { - unsigned char *network; - /* l2 headers */ - struct ethhdr *eth; - struct vlan_hdr *vlan; - /* l3 headers */ - struct iphdr *ipv4; - struct ipv6hdr *ipv6; - } hdr; - __be16 protocol; - u8 nexthdr = 0; /* default to not TCP */ - u8 hlen; - - /* this should never happen, but better safe than sorry */ - if (max_len < ETH_HLEN) - return max_len; - - /* initialize network frame pointer */ - hdr.network = data; - - /* set first protocol and move network header forward */ - protocol = hdr.eth->h_proto; - hdr.network += ETH_HLEN; - - /* handle any vlan tag if present */ - if (protocol == htons(ETH_P_8021Q)) { - if ((hdr.network - data) > (max_len - VLAN_HLEN)) - return max_len; - - protocol = hdr.vlan->h_vlan_encapsulated_proto; - hdr.network += VLAN_HLEN; - } - - /* handle L3 protocols */ - if (protocol == htons(ETH_P_IP)) { - if ((hdr.network - data) > (max_len - sizeof(struct iphdr))) - return max_len; - - /* access ihl as a u8 to avoid unaligned access on ia64 */ - hlen = (hdr.network[0] & 0x0F) << 2; - - /* verify hlen meets minimum size requirements */ - if (hlen < sizeof(struct iphdr)) - return hdr.network - data; - - /* record next protocol if header is present */ - if (!(hdr.ipv4->frag_off & htons(IP_OFFSET))) - nexthdr = hdr.ipv4->protocol; - } else if (protocol == htons(ETH_P_IPV6)) { - if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) - return max_len; - - /* record next protocol */ - nexthdr = hdr.ipv6->nexthdr; - hlen = sizeof(struct ipv6hdr); -#ifdef IXGBE_FCOE - } else if (protocol == htons(ETH_P_FCOE)) { - if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN)) - return max_len; - hlen = FCOE_HEADER_LEN; -#endif - } else { - return hdr.network - data; - } - - /* relocate pointer to start of L4 header */ - hdr.network += hlen; - - /* finally sort out TCP/UDP */ - if (nexthdr == IPPROTO_TCP) { - if ((hdr.network - data) > (max_len - sizeof(struct tcphdr))) - return max_len; - - /* access doff as a u8 to avoid unaligned access on ia64 */ - hlen = (hdr.network[12] & 0xF0) >> 2; - - /* verify hlen meets minimum size requirements */ - if (hlen < sizeof(struct tcphdr)) - return hdr.network - data; - - hdr.network += hlen; - } else if (nexthdr == IPPROTO_UDP) { - if ((hdr.network - data) > (max_len - sizeof(struct udphdr))) - return max_len; - - hdr.network += sizeof(struct udphdr); - } - - /* - * If everything has gone correctly hdr.network should be the - * data section of the packet and will be the end of the header. - * If not then it probably represents the end of the last recognized - * header. - */ - if ((hdr.network - data) < max_len) - return hdr.network - data; - else - return max_len; -} - static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring, struct sk_buff *skb) { @@ -1793,7 +1679,7 @@ static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring, * we need the header to contain the greater of either ETH_HLEN or * 60 bytes if the skb->len is less than 60 for skb_pad. */ - pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE); + pull_len = eth_get_headlen(va, IXGBE_RX_HDR_SIZE); /* align pull length to size of long to optimize memcpy performance */ skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); @@ -2191,9 +2077,6 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, q_vector->rx.total_packets += total_rx_packets; q_vector->rx.total_bytes += total_rx_bytes; - if (cleaned_count) - ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); - return total_rx_packets; } @@ -3099,11 +2982,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, * to or less than the number of on chip descriptors, which is * currently 40. */ -#if IS_ENABLED(CONFIG_BQL) if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR)) -#else - if (!ring->q_vector || (ring->q_vector->itr < 8)) -#endif txdctl |= (1 << 16); /* WTHRESH = 1 */ else txdctl |= (8 << 16); /* WTHRESH = 8 */ @@ -5300,15 +5179,15 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) { struct device *dev = tx_ring->dev; int orig_node = dev_to_node(dev); - int numa_node = -1; + int ring_node = -1; int size; size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; if (tx_ring->q_vector) - numa_node = tx_ring->q_vector->numa_node; + ring_node = tx_ring->q_vector->numa_node; - tx_ring->tx_buffer_info = vzalloc_node(size, numa_node); + tx_ring->tx_buffer_info = vzalloc_node(size, ring_node); if (!tx_ring->tx_buffer_info) tx_ring->tx_buffer_info = vzalloc(size); if (!tx_ring->tx_buffer_info) @@ -5320,7 +5199,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); tx_ring->size = ALIGN(tx_ring->size, 4096); - set_dev_node(dev, numa_node); + set_dev_node(dev, ring_node); tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, @@ -5384,15 +5263,15 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring) { struct device *dev = rx_ring->dev; int orig_node = dev_to_node(dev); - int numa_node = -1; + int ring_node = -1; int size; size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; if (rx_ring->q_vector) - numa_node = rx_ring->q_vector->numa_node; + ring_node = rx_ring->q_vector->numa_node; - rx_ring->rx_buffer_info = vzalloc_node(size, numa_node); + rx_ring->rx_buffer_info = vzalloc_node(size, ring_node); if (!rx_ring->rx_buffer_info) rx_ring->rx_buffer_info = vzalloc(size); if (!rx_ring->rx_buffer_info) @@ -5404,7 +5283,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring) rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); rx_ring->size = ALIGN(rx_ring->size, 4096); - set_dev_node(dev, numa_node); + set_dev_node(dev, ring_node); rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, @@ -6319,25 +6198,55 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter) ixgbe_ping_all_vfs(adapter); } +static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; + + if (tx_ring->next_to_use != tx_ring->next_to_clean) + return true; + } + + return false; +} + +static bool ixgbe_vf_tx_pending(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + + int i, j; + + if (!adapter->num_vfs) + return false; + + for (i = 0; i < adapter->num_vfs; i++) { + for (j = 0; j < q_per_pool; j++) { + u32 h, t; + + h = IXGBE_READ_REG(hw, IXGBE_PVFTDHN(q_per_pool, i, j)); + t = IXGBE_READ_REG(hw, IXGBE_PVFTDTN(q_per_pool, i, j)); + + if (h != t) + return true; + } + } + + return false; +} + /** * ixgbe_watchdog_flush_tx - flush queues on link down * @adapter: pointer to the device adapter structure **/ static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter) { - int i; - int some_tx_pending = 0; - if (!netif_carrier_ok(adapter->netdev)) { - for (i = 0; i < adapter->num_tx_queues; i++) { - struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; - if (tx_ring->next_to_use != tx_ring->next_to_clean) { - some_tx_pending = 1; - break; - } - } - - if (some_tx_pending) { + if (ixgbe_ring_tx_pending(adapter) || + ixgbe_vf_tx_pending(adapter)) { /* We've lost link, so the controller stops DMA, * but we've got queued Tx work that's never going * to get done, so reset controller to flush Tx. @@ -6837,6 +6746,36 @@ static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); } +static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) +{ + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (likely(ixgbe_desc_unused(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + return 0; +} + +static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) +{ + if (likely(ixgbe_desc_unused(tx_ring) >= size)) + return 0; + + return __ixgbe_maybe_stop_tx(tx_ring, size); +} + #define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \ IXGBE_TXD_CMD_RS) @@ -6958,8 +6897,12 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, tx_ring->next_to_use = i; - /* notify HW of packet */ - ixgbe_write_tail(tx_ring, i); + ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); + + if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { + /* notify HW of packet */ + ixgbe_write_tail(tx_ring, i); + } return; dma_error: @@ -7067,32 +7010,6 @@ static void ixgbe_atr(struct ixgbe_ring *ring, input, common, ring->queue_index); } -static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) -{ - netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); - /* Herbert's original patch had: - * smp_mb__after_netif_stop_queue(); - * but since that doesn't exist yet, just open code it. */ - smp_mb(); - - /* We need to check again in a case another CPU has just - * made room available. */ - if (likely(ixgbe_desc_unused(tx_ring) < size)) - return -EBUSY; - - /* A reprieve! - use start_queue because it doesn't call schedule */ - netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); - ++tx_ring->tx_stats.restart_queue; - return 0; -} - -static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) -{ - if (likely(ixgbe_desc_unused(tx_ring) >= size)) - return 0; - return __ixgbe_maybe_stop_tx(tx_ring, size); -} - static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback) { @@ -7187,9 +7104,10 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, tx_flags |= IXGBE_TX_FLAGS_SW_VLAN; } - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && - !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS, - &adapter->state))) { + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + adapter->ptp_clock && + !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS, + &adapter->state)) { skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; tx_flags |= IXGBE_TX_FLAGS_TSTAMP; @@ -7261,8 +7179,6 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, #endif /* IXGBE_FCOE */ ixgbe_tx_map(tx_ring, first, hdr_len); - ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); - return NETDEV_TX_OK; out_drop: @@ -7735,39 +7651,13 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], const unsigned char *addr, u16 flags) { - struct ixgbe_adapter *adapter = netdev_priv(dev); - int err; - - if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) - return ndo_dflt_fdb_add(ndm, tb, dev, addr, flags); - - /* Hardware does not support aging addresses so if a - * ndm_state is given only allow permanent addresses - */ - if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { - pr_info("%s: FDB only supports static addresses\n", - ixgbe_driver_name); - return -EINVAL; - } - + /* guarantee we can provide a unique filter for the unicast address */ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { - u32 rar_uc_entries = IXGBE_MAX_PF_MACVLANS; - - if (netdev_uc_count(dev) < rar_uc_entries) - err = dev_uc_add_excl(dev, addr); - else - err = -ENOMEM; - } else if (is_multicast_ether_addr(addr)) { - err = dev_mc_add_excl(dev, addr); - } else { - err = -EINVAL; + if (IXGBE_MAX_PF_MACVLANS <= netdev_uc_count(dev)) + return -ENOMEM; } - /* Only return duplicate errors if NLM_F_EXCL is set */ - if (err == -EEXIST && !(flags & NLM_F_EXCL)) - err = 0; - - return err; + return ndo_dflt_fdb_add(ndm, tb, dev, addr, flags); } static int ixgbe_ndo_bridge_setlink(struct net_device *dev, @@ -7830,9 +7720,17 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) { struct ixgbe_fwd_adapter *fwd_adapter = NULL; struct ixgbe_adapter *adapter = netdev_priv(pdev); + int used_pools = adapter->num_vfs + adapter->num_rx_pools; unsigned int limit; int pool, err; + /* Hardware has a limited number of available pools. Each VF, and the + * PF require a pool. Check to ensure we don't attempt to use more + * then the available number of pools. + */ + if (used_pools >= IXGBE_MAX_VF_FUNCTIONS) + return ERR_PTR(-EINVAL); + #ifdef CONFIG_RPS if (vdev->num_rx_queues != vdev->num_tx_queues) { netdev_info(pdev, "%s: Only supports a single queue count for TX and RX\n", @@ -8080,7 +7978,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct ixgbe_adapter *adapter = NULL; struct ixgbe_hw *hw; const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; - static int cards_found; int i, err, pci_using_dac, expected_gts; unsigned int indices = MAX_TX_QUEUES; u8 part_str[IXGBE_PBANUM_LENGTH]; @@ -8166,8 +8063,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->watchdog_timeo = 5 * HZ; strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); - adapter->bd_number = cards_found; - /* Setup hw api */ memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); hw->mac.type = ii->mac; @@ -8451,7 +8346,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ixgbe_add_sanmac_netdev(netdev); e_dev_info("%s\n", ixgbe_default_device_descr); - cards_found++; #ifdef CONFIG_IXGBE_HWMON if (ixgbe_sysfs_init(adapter)) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 11f02ea78c4a3cabd05ec64b57942645d3818b2b..d47b19f27c355c96ea006c004afa38efac3dcfe7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -445,8 +445,6 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) { s32 status = 0; - u32 time_out; - u32 max_time_out = 10; u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; bool autoneg = false; ixgbe_link_speed speed; @@ -514,25 +512,6 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_AN, autoneg_reg); - /* Wait for autonegotiation to finish */ - for (time_out = 0; time_out < max_time_out; time_out++) { - udelay(10); - /* Restart PHY autonegotiation and wait for completion */ - status = hw->phy.ops.read_reg(hw, MDIO_STAT1, - MDIO_MMD_AN, - &autoneg_reg); - - autoneg_reg &= MDIO_AN_STAT1_COMPLETE; - if (autoneg_reg == MDIO_AN_STAT1_COMPLETE) { - break; - } - } - - if (time_out == max_time_out) { - hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out\n"); - return IXGBE_ERR_LINK_SETUP; - } - return status; } @@ -657,8 +636,6 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) { s32 status; - u32 time_out; - u32 max_time_out = 10; u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; bool autoneg = false; ixgbe_link_speed speed; @@ -724,24 +701,6 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_AN, autoneg_reg); - /* Wait for autonegotiation to finish */ - for (time_out = 0; time_out < max_time_out; time_out++) { - udelay(10); - /* Restart PHY autonegotiation and wait for completion */ - status = hw->phy.ops.read_reg(hw, MDIO_STAT1, - MDIO_MMD_AN, - &autoneg_reg); - - autoneg_reg &= MDIO_AN_STAT1_COMPLETE; - if (autoneg_reg == MDIO_AN_STAT1_COMPLETE) - break; - } - - if (time_out == max_time_out) { - hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out\n"); - return IXGBE_ERR_LINK_SETUP; - } - return status; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index c14d4d89672ff7009168ecf14f98e444e9126ce5..706fc69aa0c51d3824d7c7e8ed0d47ff8f7adbe9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -250,13 +250,15 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs) if (err) return err; - /* While the SR-IOV capability structure reports total VFs to be - * 64 we limit the actual number that can be allocated to 63 so - * that some transmit/receive resources can be reserved to the - * PF. The PCI bus driver already checks for other values out of - * range. + /* While the SR-IOV capability structure reports total VFs to be 64, + * we have to limit the actual number allocated based on two factors. + * First, we reserve some transmit/receive resources for the PF. + * Second, VMDQ also uses the same pools that SR-IOV does. We need to + * account for this, so that we don't accidentally allocate more VFs + * than we have available pools. The PCI bus driver already checks for + * other values out of range. */ - if (num_vfs > IXGBE_MAX_VFS_DRV_LIMIT) + if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VF_FUNCTIONS) return -EPERM; adapter->num_vfs = num_vfs; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index e6b07c2a01fe3cfdc6458a73646a4754feab8026..dfd55d83bc0335ad46defa9f22923c5125e256d0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -2194,6 +2194,8 @@ enum { #define IXGBE_VFLRE(_i) ((((_i) & 1) ? 0x001C0 : 0x00600)) #define IXGBE_VFLREC(_i) (0x00700 + ((_i) * 4)) /* Translated register #defines */ +#define IXGBE_PVFTDH(P) (0x06010 + (0x40 * (P))) +#define IXGBE_PVFTDT(P) (0x06018 + (0x40 * (P))) #define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P))) #define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P))) @@ -2202,6 +2204,11 @@ enum { #define IXGBE_PVFTDWBAHn(q_per_pool, vf_number, vf_q_index) \ (IXGBE_PVFTDWBAH((q_per_pool)*(vf_number) + (vf_q_index))) +#define IXGBE_PVFTDHN(q_per_pool, vf_number, vf_q_index) \ + (IXGBE_PVFTDH((q_per_pool)*(vf_number) + (vf_q_index))) +#define IXGBE_PVFTDTN(q_per_pool, vf_number, vf_q_index) \ + (IXGBE_PVFTDT((q_per_pool)*(vf_number) + (vf_q_index))) + enum ixgbe_fdir_pballoc_type { IXGBE_FDIR_PBALLOC_NONE = 0, IXGBE_FDIR_PBALLOC_64K = 1, diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index d420f124633f046d5b61f6c357656c2a3b13b9f0..cc0e5b7ff041e58752dfa62728bb08eab5d11a5a 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -523,7 +523,7 @@ static const struct ixgbevf_reg_test reg_test_vf[] = { { IXGBE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, { IXGBE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { IXGBE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 }, - { 0, 0, 0, 0 } + { .reg = 0 } }; static const u32 register_test_patterns[] = { diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index a0a1de9ce2389fdcc15ebad8f19a26b456d12447..ba96cb5b886d2880f83ebad953f75b05c71a3276 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -385,7 +385,6 @@ struct ixgbevf_adapter { /* structs defined in ixgbe_vf.h */ struct ixgbe_hw hw; u16 msg_enable; - u16 bd_number; /* Interrupt Throttle Rate */ u32 eitr_param; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index c22a00c3621ab96b79d53033a0ef4283016a5185..030a219c85e363802644f1302dbf56c0ac709c8b 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -3464,7 +3464,6 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct ixgbevf_adapter *adapter = NULL; struct ixgbe_hw *hw = NULL; const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data]; - static int cards_found; int err, pci_using_dac; err = pci_enable_device(pdev); @@ -3525,8 +3524,6 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ixgbevf_assign_netdev_ops(netdev); - adapter->bd_number = cards_found; - /* Setup hw api */ memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); hw->mac.type = ii->mac; @@ -3601,7 +3598,6 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw_dbg(hw, "MAC: %d\n", hw->mac.type); hw_dbg(hw, "Intel(R) 82599 Virtual Function\n"); - cards_found++; return 0; err_register: diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index 4d44d64ae3870c42dd62bb4d1cdfeb96fe9122b9..9cddd56d02c39305a69e13a25c2de88b4d92ccbb 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -434,6 +434,21 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw, if (!(links_reg & IXGBE_LINKS_UP)) goto out; + /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs + * before the link status is correct + */ + if (mac->type == ixgbe_mac_82599_vf) { + int i; + + for (i = 0; i < 5; i++) { + udelay(100); + links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + + if (!(links_reg & IXGBE_LINKS_UP)) + goto out; + } + } + switch (links_reg & IXGBE_LINKS_SPEED_82599) { case IXGBE_LINKS_SPEED_10G_82599: *speed = IXGBE_LINK_SPEED_10GB_FULL; diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index fd4b6aecf6ee85d8f135cd6491582b417e1f3f1a..2dad4d5047bae724510dbd94d72d0fe4dbb006d8 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -633,7 +633,6 @@ ltq_etop_init(struct net_device *dev) int err; bool random_mac = false; - ether_setup(dev); dev->watchdog_timeo = 10 * HZ; err = ltq_etop_hw_init(dev); if (err) diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index 1b4fc7c639e6842ec8498a28d8e41ccb2402660e..b3b72ad92d4a9de406e327d93d10cc1e1ef7d8cd 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig @@ -64,7 +64,7 @@ config MVPP2 config PXA168_ETH tristate "Marvell pxa168 ethernet support" - depends on CPU_PXA168 + depends on (CPU_PXA168 || ARCH_BERLIN || COMPILE_TEST) && HAS_IOMEM select PHYLIB ---help--- This driver supports the pxa168 Ethernet ports. diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 8f5aa7c62b18f41f8eaa1b31e209687059921a47..c3b209cd06609fb85ffe3283d8c31028da8e6a60 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -22,27 +22,30 @@ * along with this program; if not, see . */ -#include -#include -#include -#include -#include -#include #include +#include #include +#include +#include #include -#include -#include +#include +#include +#include +#include #include -#include -#include +#include +#include +#include #include -#include -#include +#include +#include +#include #include +#include +#include + #include #include -#include #define DRIVER_NAME "pxa168-eth" #define DRIVER_VERSION "0.3" @@ -58,6 +61,8 @@ #define PORT_COMMAND 0x0410 #define PORT_STATUS 0x0418 #define HTPR 0x0428 +#define MAC_ADDR_LOW 0x0430 +#define MAC_ADDR_HIGH 0x0438 #define SDMA_CONFIG 0x0440 #define SDMA_CMD 0x0448 #define INT_CAUSE 0x0450 @@ -161,7 +166,7 @@ /* Bit definitions for Port status */ #define PORT_SPEED_100 (1 << 0) #define FULL_DUPLEX (1 << 1) -#define FLOW_CONTROL_ENABLED (1 << 2) +#define FLOW_CONTROL_DISABLED (1 << 2) #define LINK_UP (1 << 3) /* Bit definitions for work to be done */ @@ -191,6 +196,7 @@ struct tx_desc { struct pxa168_eth_private { int port_num; /* User Ethernet port number */ + int phy_addr; int rx_resource_err; /* Rx ring resource error flag */ @@ -296,7 +302,7 @@ static void abort_dma(struct pxa168_eth_private *pep) } while (max_retries-- > 0 && delay <= 0); if (max_retries <= 0) - printk(KERN_ERR "%s : DMA Stuck\n", __func__); + netdev_err(pep->dev, "%s : DMA Stuck\n", __func__); } static int ethernet_phy_get(struct pxa168_eth_private *pep) @@ -507,9 +513,10 @@ static int add_del_hash_entry(struct pxa168_eth_private *pep, if (i == HOP_NUMBER) { if (!del) { - printk(KERN_INFO "%s: table section is full, need to " - "move to 16kB implementation?\n", - __FILE__); + netdev_info(pep->dev, + "%s: table section is full, need to " + "move to 16kB implementation?\n", + __FILE__); return -ENOSPC; } else return 0; @@ -600,16 +607,42 @@ static void pxa168_eth_set_rx_mode(struct net_device *dev) update_hash_table_mac_address(pep, NULL, ha->addr); } +static void pxa168_eth_get_mac_address(struct net_device *dev, + unsigned char *addr) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + unsigned int mac_h = rdl(pep, MAC_ADDR_HIGH); + unsigned int mac_l = rdl(pep, MAC_ADDR_LOW); + + addr[0] = (mac_h >> 24) & 0xff; + addr[1] = (mac_h >> 16) & 0xff; + addr[2] = (mac_h >> 8) & 0xff; + addr[3] = mac_h & 0xff; + addr[4] = (mac_l >> 8) & 0xff; + addr[5] = mac_l & 0xff; +} + static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr) { struct sockaddr *sa = addr; struct pxa168_eth_private *pep = netdev_priv(dev); unsigned char oldMac[ETH_ALEN]; + u32 mac_h, mac_l; if (!is_valid_ether_addr(sa->sa_data)) return -EADDRNOTAVAIL; memcpy(oldMac, dev->dev_addr, ETH_ALEN); memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN); + + mac_h = dev->dev_addr[0] << 24; + mac_h |= dev->dev_addr[1] << 16; + mac_h |= dev->dev_addr[2] << 8; + mac_h |= dev->dev_addr[3]; + mac_l = dev->dev_addr[4] << 8; + mac_l |= dev->dev_addr[5]; + wrl(pep, MAC_ADDR_HIGH, mac_h); + wrl(pep, MAC_ADDR_LOW, mac_l); + netif_addr_lock_bh(dev); update_hash_table_mac_address(pep, oldMac, dev->dev_addr); netif_addr_unlock_bh(dev); @@ -726,7 +759,7 @@ static int txq_reclaim(struct net_device *dev, int force) if (cmd_sts & TX_ERROR) { if (net_ratelimit()) - printk(KERN_ERR "%s: Error in TX\n", dev->name); + netdev_err(dev, "Error in TX\n"); dev->stats.tx_errors++; } dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE); @@ -743,8 +776,7 @@ static void pxa168_eth_tx_timeout(struct net_device *dev) { struct pxa168_eth_private *pep = netdev_priv(dev); - printk(KERN_INFO "%s: TX timeout desc_count %d\n", - dev->name, pep->tx_desc_count); + netdev_info(dev, "TX timeout desc_count %d\n", pep->tx_desc_count); schedule_work(&pep->tx_timeout_task); } @@ -814,9 +846,8 @@ static int rxq_process(struct net_device *dev, int budget) if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != (RX_FIRST_DESC | RX_LAST_DESC)) { if (net_ratelimit()) - printk(KERN_ERR - "%s: Rx pkt on multiple desc\n", - dev->name); + netdev_err(dev, + "Rx pkt on multiple desc\n"); } if (cmd_sts & RX_ERROR) stats->rx_errors++; @@ -871,7 +902,7 @@ static void handle_link_event(struct pxa168_eth_private *pep) port_status = rdl(pep, PORT_STATUS); if (!(port_status & LINK_UP)) { if (netif_carrier_ok(dev)) { - printk(KERN_INFO "%s: link down\n", dev->name); + netdev_info(dev, "link down\n"); netif_carrier_off(dev); txq_reclaim(dev, 1); } @@ -883,10 +914,9 @@ static void handle_link_event(struct pxa168_eth_private *pep) speed = 10; duplex = (port_status & FULL_DUPLEX) ? 1 : 0; - fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0; - printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, " - "flow control %sabled\n", dev->name, - speed, duplex ? "full" : "half", fc ? "en" : "dis"); + fc = (port_status & FLOW_CONTROL_DISABLED) ? 0 : 1; + netdev_info(dev, "link up, %d Mb/s, %s duplex, flow control %sabled\n", + speed, duplex ? "full" : "half", fc ? "en" : "dis"); if (!netif_carrier_ok(dev)) netif_carrier_on(dev); } @@ -1039,9 +1069,8 @@ static void rxq_deinit(struct net_device *dev) } } if (pep->rx_desc_count) - printk(KERN_ERR - "Error in freeing Rx Ring. %d skb's still\n", - pep->rx_desc_count); + netdev_err(dev, "Error in freeing Rx Ring. %d skb's still\n", + pep->rx_desc_count); /* Free RX ring */ if (pep->p_rx_desc_area) dma_free_coherent(pep->dev->dev.parent, pep->rx_desc_area_size, @@ -1280,15 +1309,15 @@ static int pxa168_smi_read(struct mii_bus *bus, int phy_addr, int regnum) int val; if (smi_wait_ready(pep)) { - printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n"); + netdev_warn(pep->dev, "pxa168_eth: SMI bus busy timeout\n"); return -ETIMEDOUT; } wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | SMI_OP_R); /* now wait for the data to be valid */ for (i = 0; !((val = rdl(pep, SMI)) & SMI_R_VALID); i++) { if (i == PHY_WAIT_ITERATIONS) { - printk(KERN_WARNING - "pxa168_eth: SMI bus read not valid\n"); + netdev_warn(pep->dev, + "pxa168_eth: SMI bus read not valid\n"); return -ENODEV; } msleep(10); @@ -1303,7 +1332,7 @@ static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum, struct pxa168_eth_private *pep = bus->priv; if (smi_wait_ready(pep)) { - printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n"); + netdev_warn(pep->dev, "pxa168_eth: SMI bus busy timeout\n"); return -ETIMEDOUT; } @@ -1311,7 +1340,7 @@ static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum, SMI_OP_W | (value & 0xffff)); if (smi_wait_ready(pep)) { - printk(KERN_ERR "pxa168_eth: SMI bus busy timeout\n"); + netdev_err(pep->dev, "pxa168_eth: SMI bus busy timeout\n"); return -ETIMEDOUT; } @@ -1361,24 +1390,25 @@ static struct phy_device *phy_scan(struct pxa168_eth_private *pep, int phy_addr) return phydev; } -static void phy_init(struct pxa168_eth_private *pep, int speed, int duplex) +static void phy_init(struct pxa168_eth_private *pep) { struct phy_device *phy = pep->phy; phy_attach(pep->dev, dev_name(&phy->dev), PHY_INTERFACE_MODE_MII); - if (speed == 0) { + if (pep->pd && pep->pd->speed != 0) { + phy->autoneg = AUTONEG_DISABLE; + phy->advertising = 0; + phy->speed = pep->pd->speed; + phy->duplex = pep->pd->duplex; + } else { phy->autoneg = AUTONEG_ENABLE; phy->speed = 0; phy->duplex = 0; phy->supported &= PHY_BASIC_FEATURES; phy->advertising = phy->supported | ADVERTISED_Autoneg; - } else { - phy->autoneg = AUTONEG_DISABLE; - phy->advertising = 0; - phy->speed = speed; - phy->duplex = duplex; } + phy_start_aneg(phy); } @@ -1386,11 +1416,13 @@ static int ethernet_phy_setup(struct net_device *dev) { struct pxa168_eth_private *pep = netdev_priv(dev); - if (pep->pd->init) + if (pep->pd && pep->pd->init) pep->pd->init(); - pep->phy = phy_scan(pep, pep->pd->phy_addr & 0x1f); + + pep->phy = phy_scan(pep, pep->phy_addr & 0x1f); if (pep->phy != NULL) - phy_init(pep, pep->pd->speed, pep->pd->duplex); + phy_init(pep); + update_hash_table_mac_address(pep, NULL, dev->dev_addr); return 0; @@ -1425,23 +1457,23 @@ static void pxa168_get_drvinfo(struct net_device *dev, } static const struct ethtool_ops pxa168_ethtool_ops = { - .get_settings = pxa168_get_settings, - .set_settings = pxa168_set_settings, - .get_drvinfo = pxa168_get_drvinfo, - .get_link = ethtool_op_get_link, - .get_ts_info = ethtool_op_get_ts_info, + .get_settings = pxa168_get_settings, + .set_settings = pxa168_set_settings, + .get_drvinfo = pxa168_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_ts_info = ethtool_op_get_ts_info, }; static const struct net_device_ops pxa168_eth_netdev_ops = { - .ndo_open = pxa168_eth_open, - .ndo_stop = pxa168_eth_stop, - .ndo_start_xmit = pxa168_eth_start_xmit, - .ndo_set_rx_mode = pxa168_eth_set_rx_mode, - .ndo_set_mac_address = pxa168_eth_set_mac_address, - .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = pxa168_eth_do_ioctl, - .ndo_change_mtu = pxa168_eth_change_mtu, - .ndo_tx_timeout = pxa168_eth_tx_timeout, + .ndo_open = pxa168_eth_open, + .ndo_stop = pxa168_eth_stop, + .ndo_start_xmit = pxa168_eth_start_xmit, + .ndo_set_rx_mode = pxa168_eth_set_rx_mode, + .ndo_set_mac_address = pxa168_eth_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = pxa168_eth_do_ioctl, + .ndo_change_mtu = pxa168_eth_change_mtu, + .ndo_tx_timeout = pxa168_eth_tx_timeout, }; static int pxa168_eth_probe(struct platform_device *pdev) @@ -1450,17 +1482,18 @@ static int pxa168_eth_probe(struct platform_device *pdev) struct net_device *dev = NULL; struct resource *res; struct clk *clk; + struct device_node *np; + const unsigned char *mac_addr = NULL; int err; printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n"); - clk = clk_get(&pdev->dev, "MFUCLK"); + clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(clk)) { - printk(KERN_ERR "%s: Fast Ethernet failed to get clock\n", - DRIVER_NAME); + dev_err(&pdev->dev, "Fast Ethernet failed to get clock\n"); return -ENODEV; } - clk_enable(clk); + clk_prepare_enable(clk); dev = alloc_etherdev(sizeof(struct pxa168_eth_private)); if (!dev) { @@ -1477,8 +1510,8 @@ static int pxa168_eth_probe(struct platform_device *pdev) err = -ENODEV; goto err_netdev; } - pep->base = ioremap(res->start, resource_size(res)); - if (pep->base == NULL) { + pep->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pep->base)) { err = -ENOMEM; goto err_netdev; } @@ -1492,19 +1525,42 @@ static int pxa168_eth_probe(struct platform_device *pdev) INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task); - printk(KERN_INFO "%s:Using random mac address\n", DRIVER_NAME); - eth_hw_addr_random(dev); + if (pdev->dev.of_node) + mac_addr = of_get_mac_address(pdev->dev.of_node); - pep->pd = dev_get_platdata(&pdev->dev); - pep->rx_ring_size = NUM_RX_DESCS; - if (pep->pd->rx_queue_size) - pep->rx_ring_size = pep->pd->rx_queue_size; + if (mac_addr && is_valid_ether_addr(mac_addr)) { + ether_addr_copy(dev->dev_addr, mac_addr); + } else { + /* try reading the mac address, if set by the bootloader */ + pxa168_eth_get_mac_address(dev, dev->dev_addr); + if (!is_valid_ether_addr(dev->dev_addr)) { + dev_info(&pdev->dev, "Using random mac address\n"); + eth_hw_addr_random(dev); + } + } + pep->rx_ring_size = NUM_RX_DESCS; pep->tx_ring_size = NUM_TX_DESCS; - if (pep->pd->tx_queue_size) - pep->tx_ring_size = pep->pd->tx_queue_size; - pep->port_num = pep->pd->port_number; + pep->pd = dev_get_platdata(&pdev->dev); + if (pep->pd) { + if (pep->pd->rx_queue_size) + pep->rx_ring_size = pep->pd->rx_queue_size; + + if (pep->pd->tx_queue_size) + pep->tx_ring_size = pep->pd->tx_queue_size; + + pep->port_num = pep->pd->port_number; + pep->phy_addr = pep->pd->phy_addr; + } else if (pdev->dev.of_node) { + of_property_read_u32(pdev->dev.of_node, "port-id", + &pep->port_num); + + np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); + if (np) + of_property_read_u32(np, "reg", &pep->phy_addr); + } + /* Hardware supports only 3 ports */ BUG_ON(pep->port_num > 2); netif_napi_add(dev, &pep->napi, pxa168_rx_poll, pep->rx_ring_size); @@ -1605,6 +1661,12 @@ static int pxa168_eth_suspend(struct platform_device *pdev, pm_message_t state) #define pxa168_eth_suspend NULL #endif +static const struct of_device_id pxa168_eth_of_match[] = { + { .compatible = "marvell,pxa168-eth" }, + { }, +}; +MODULE_DEVICE_TABLE(of, pxa168_eth_of_match); + static struct platform_driver pxa168_eth_driver = { .probe = pxa168_eth_probe, .remove = pxa168_eth_remove, @@ -1612,8 +1674,9 @@ static struct platform_driver pxa168_eth_driver = { .resume = pxa168_eth_resume, .suspend = pxa168_eth_suspend, .driver = { - .name = DRIVER_NAME, - }, + .name = DRIVER_NAME, + .of_match_table = of_match_ptr(pxa168_eth_of_match), + }, }; module_platform_driver(pxa168_eth_driver); diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 24b242277ea1c5f1144c7b5e0c042922c44f97d6..264eab7d3b26e0cb53774ae160ecdbe235be1f7b 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -1107,7 +1107,7 @@ static u16 xm_phy_read(struct skge_hw *hw, int port, u16 reg) { u16 v = 0; if (__xm_phy_read(hw, port, reg, &v)) - pr_warning("%s: phy read timed out\n", hw->dev[port]->name); + pr_warn("%s: phy read timed out\n", hw->dev[port]->name); return v; } @@ -1903,7 +1903,7 @@ static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) return 0; } - pr_warning("%s: phy write timeout\n", hw->dev[port]->name); + pr_warn("%s: phy write timeout\n", hw->dev[port]->name); return -EIO; } @@ -1931,7 +1931,7 @@ static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg) { u16 v = 0; if (__gm_phy_read(hw, port, reg, &v)) - pr_warning("%s: phy read timeout\n", hw->dev[port]->name); + pr_warn("%s: phy read timeout\n", hw->dev[port]->name); return v; } diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index dba48a5ce7ab411c5ef2ec4f13e0296723853c47..bd3366267039bc233e526acf3926d4f8b6fb5638 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -2814,7 +2814,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx) default: if (net_ratelimit()) - pr_warning("unknown status opcode 0x%x\n", opcode); + pr_warn("unknown status opcode 0x%x\n", opcode); } } while (hw->st_idx != idx); diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 923c4878461e8a1c1242034b618473efba584060..b16e1b95566f4db9708540742c4002233a429cb2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -580,8 +580,18 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param, err = context->result; if (err) { - mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n", - op, context->fw_status); + /* Since we do not want to have this error message always + * displayed at driver start when there are ConnectX2 HCAs + * on the host, we deprecate the error message for this + * specific command/input_mod/opcode_mod/fw-status to be debug. + */ + if (op == MLX4_CMD_SET_PORT && in_modifier == 1 && + op_modifier == 0 && context->fw_status == CMD_STAT_BAD_SIZE) + mlx4_dbg(dev, "command 0x%x failed: fw status = 0x%x\n", + op, context->fw_status); + else + mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n", + op, context->fw_status); goto out; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 35ff2925110a3f690098e62ab73a3321965be17f..ae83da9cd18a3f870ab51bc3ff9cb207b6f0432e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -112,6 +112,7 @@ static const char main_strings[][ETH_GSTRING_LEN] = { /* port statistics */ "tso_packets", + "xmit_more", "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed", "rx_csum_good", "rx_csum_none", "tx_chksum_offload", @@ -1266,6 +1267,48 @@ static u32 mlx4_en_get_priv_flags(struct net_device *dev) return priv->pflags; } +static int mlx4_en_get_tunable(struct net_device *dev, + const struct ethtool_tunable *tuna, + void *data) +{ + const struct mlx4_en_priv *priv = netdev_priv(dev); + int ret = 0; + + switch (tuna->id) { + case ETHTOOL_TX_COPYBREAK: + *(u32 *)data = priv->prof->inline_thold; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int mlx4_en_set_tunable(struct net_device *dev, + const struct ethtool_tunable *tuna, + const void *data) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + int val, ret = 0; + + switch (tuna->id) { + case ETHTOOL_TX_COPYBREAK: + val = *(u32 *)data; + if (val < MIN_PKT_LEN || val > MAX_INLINE) + ret = -EINVAL; + else + priv->prof->inline_thold = val; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + const struct ethtool_ops mlx4_en_ethtool_ops = { .get_drvinfo = mlx4_en_get_drvinfo, @@ -1296,6 +1339,8 @@ const struct ethtool_ops mlx4_en_ethtool_ops = { .get_ts_info = mlx4_en_get_ts_info, .set_priv_flags = mlx4_en_set_priv_flags, .get_priv_flags = mlx4_en_get_priv_flags, + .get_tunable = mlx4_en_get_tunable, + .set_tunable = mlx4_en_set_tunable, }; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c index 3626fdf4cb5ddd491d82b3e3496bfd8994546b0e..2091ae88615d3fde397da0e1e04f442c96012483 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c @@ -78,27 +78,24 @@ MLX4_EN_PARM_INT(inline_thold, MAX_INLINE, #define MAX_PFC_TX 0xff #define MAX_PFC_RX 0xff -int en_print(const char *level, const struct mlx4_en_priv *priv, - const char *format, ...) +void en_print(const char *level, const struct mlx4_en_priv *priv, + const char *format, ...) { va_list args; struct va_format vaf; - int i; va_start(args, format); vaf.fmt = format; vaf.va = &args; if (priv->registered) - i = printk("%s%s: %s: %pV", - level, DRV_NAME, priv->dev->name, &vaf); + printk("%s%s: %s: %pV", + level, DRV_NAME, priv->dev->name, &vaf); else - i = printk("%s%s: %s: Port %d: %pV", - level, DRV_NAME, dev_name(&priv->mdev->pdev->dev), - priv->port, &vaf); + printk("%s%s: %s: Port %d: %pV", + level, DRV_NAME, dev_name(&priv->mdev->pdev->dev), + priv->port, &vaf); va_end(args); - - return i; } void mlx4_en_update_loopback_state(struct net_device *dev, diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index abddcf8c40aa120c4d4ade2822ccef68d00ea434..f3032fec8fce03767580bd555a4760e8d7faba1e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2459,6 +2459,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, } priv->rx_ring_num = prof->rx_ring_num; priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0; + priv->cqe_size = mdev->dev->caps.cqe_size; priv->mac_index = -1; priv->msg_enable = MLX4_EN_MSG_LEVEL; spin_lock_init(&priv->stats_lock); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c index c2cfb05e72905cc6961ddc773547e1d09ae26bbf..0a0261d128b9b200369b2fd34d602681ea25c363 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_port.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c @@ -150,14 +150,19 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) priv->port_stats.tx_chksum_offload = 0; priv->port_stats.queue_stopped = 0; priv->port_stats.wake_queue = 0; + priv->port_stats.tso_packets = 0; + priv->port_stats.xmit_more = 0; for (i = 0; i < priv->tx_ring_num; i++) { - stats->tx_packets += priv->tx_ring[i]->packets; - stats->tx_bytes += priv->tx_ring[i]->bytes; - priv->port_stats.tx_chksum_offload += priv->tx_ring[i]->tx_csum; - priv->port_stats.queue_stopped += - priv->tx_ring[i]->queue_stopped; - priv->port_stats.wake_queue += priv->tx_ring[i]->wake_queue; + const struct mlx4_en_tx_ring *ring = priv->tx_ring[i]; + + stats->tx_packets += ring->packets; + stats->tx_bytes += ring->bytes; + priv->port_stats.tx_chksum_offload += ring->tx_csum; + priv->port_stats.queue_stopped += ring->queue_stopped; + priv->port_stats.wake_queue += ring->wake_queue; + priv->port_stats.tso_packets += ring->tso_packets; + priv->port_stats.xmit_more += ring->xmit_more; } stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) + diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 9c909d23f14c4eda34ce8f03215b76aa29639894..a33048ee9621a360060fed128d6f048faf53ea42 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -588,6 +588,8 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv, skb_copy_to_linear_data(skb, va, length); skb->tail += length; } else { + unsigned int pull_len; + /* Move relevant fragments to skb */ used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, frags, skb, length); @@ -597,16 +599,17 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv, } skb_shinfo(skb)->nr_frags = used_frags; + pull_len = eth_get_headlen(va, SMALL_PACKET_SIZE); /* Copy headers into the skb linear buffer */ - memcpy(skb->data, va, HEADER_COPY_SIZE); - skb->tail += HEADER_COPY_SIZE; + memcpy(skb->data, va, pull_len); + skb->tail += pull_len; /* Skip headers in first fragment */ - skb_shinfo(skb)->frags[0].page_offset += HEADER_COPY_SIZE; + skb_shinfo(skb)->frags[0].page_offset += pull_len; /* Adjust size of first fragment */ - skb_frag_size_sub(&skb_shinfo(skb)->frags[0], HEADER_COPY_SIZE); - skb->data_len = length - HEADER_COPY_SIZE; + skb_frag_size_sub(&skb_shinfo(skb)->frags[0], pull_len); + skb->data_len = length - pull_len; } return skb; } @@ -668,7 +671,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud * descriptor offset can be deduced from the CQE index instead of * reading 'cqe->index' */ index = cq->mcq.cons_index & ring->size_mask; - cqe = &cq->buf[(index << factor) + factor]; + cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; /* Process all completed CQEs */ while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, @@ -769,7 +772,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud gro_skb->ip_summed = CHECKSUM_UNNECESSARY; if (l2_tunnel) - gro_skb->encapsulation = 1; + gro_skb->csum_level = 1; if ((cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) && (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { @@ -823,8 +826,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud skb->protocol = eth_type_trans(skb, dev); skb_record_rx_queue(skb, cq->ring); - if (l2_tunnel) - skb->encapsulation = 1; + if (l2_tunnel && ip_summed == CHECKSUM_UNNECESSARY) + skb->csum_level = 1; if (dev->features & NETIF_F_RXHASH) skb_set_hash(skb, @@ -855,7 +858,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud ++cq->mcq.cons_index; index = (cq->mcq.cons_index) & ring->size_mask; - cqe = &cq->buf[(index << factor) + factor]; + cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; if (++polled == budget) goto out; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index dae3da6d8dd08979e4a4ba3f0f3c7413f10198cd..34c137878545fc672dad1a3d86e11c034c0ac368 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include #include @@ -65,10 +66,9 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, ring->size = size; ring->size_mask = size - 1; ring->stride = stride; - ring->inline_thold = priv->prof->inline_thold; tmp = size * sizeof(struct mlx4_en_tx_info); - ring->tx_info = vmalloc_node(tmp, node); + ring->tx_info = kmalloc_node(tmp, GFP_KERNEL | __GFP_NOWARN, node); if (!ring->tx_info) { ring->tx_info = vmalloc(tmp); if (!ring->tx_info) { @@ -151,7 +151,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, kfree(ring->bounce_buf); ring->bounce_buf = NULL; err_info: - vfree(ring->tx_info); + kvfree(ring->tx_info); ring->tx_info = NULL; err_ring: kfree(ring); @@ -174,7 +174,7 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); kfree(ring->bounce_buf); ring->bounce_buf = NULL; - vfree(ring->tx_info); + kvfree(ring->tx_info); ring->tx_info = NULL; kfree(ring); *pring = NULL; @@ -191,12 +191,12 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, ring->prod = 0; ring->cons = 0xffffffff; ring->last_nr_txbb = 1; - ring->poll_cnt = 0; memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info)); memset(ring->buf, 0, ring->buf_size); ring->qp_state = MLX4_QP_STATE_RST; - ring->doorbell_qpn = ring->qp.qpn << 8; + ring->doorbell_qpn = cpu_to_be32(ring->qp.qpn << 8); + ring->mr_key = cpu_to_be32(mdev->mr.key); mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, ring->cqn, user_prio, &ring->context); @@ -259,38 +259,45 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, int index, u8 owner, u64 timestamp) { - struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE; struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset; - struct sk_buff *skb = tx_info->skb; - struct skb_frag_struct *frag; void *end = ring->buf + ring->buf_size; - int frags = skb_shinfo(skb)->nr_frags; + struct sk_buff *skb = tx_info->skb; + int nr_maps = tx_info->nr_maps; int i; - struct skb_shared_hwtstamps hwts; - if (timestamp) { - mlx4_en_fill_hwtstamps(mdev, &hwts, timestamp); + /* We do not touch skb here, so prefetch skb->users location + * to speedup consume_skb() + */ + prefetchw(&skb->users); + + if (unlikely(timestamp)) { + struct skb_shared_hwtstamps hwts; + + mlx4_en_fill_hwtstamps(priv->mdev, &hwts, timestamp); skb_tstamp_tx(skb, &hwts); } /* Optimize the common case when there are no wraparounds */ if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) { if (!tx_info->inl) { - if (tx_info->linear) { + if (tx_info->linear) dma_unmap_single(priv->ddev, - (dma_addr_t) be64_to_cpu(data->addr), - be32_to_cpu(data->byte_count), - PCI_DMA_TODEVICE); - ++data; - } - - for (i = 0; i < frags; i++) { - frag = &skb_shinfo(skb)->frags[i]; + tx_info->map0_dma, + tx_info->map0_byte_count, + PCI_DMA_TODEVICE); + else dma_unmap_page(priv->ddev, - (dma_addr_t) be64_to_cpu(data[i].addr), - skb_frag_size(frag), PCI_DMA_TODEVICE); + tx_info->map0_dma, + tx_info->map0_byte_count, + PCI_DMA_TODEVICE); + for (i = 1; i < nr_maps; i++) { + data++; + dma_unmap_page(priv->ddev, + (dma_addr_t)be64_to_cpu(data->addr), + be32_to_cpu(data->byte_count), + PCI_DMA_TODEVICE); } } } else { @@ -299,27 +306,29 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, data = ring->buf + ((void *)data - end); } - if (tx_info->linear) { + if (tx_info->linear) dma_unmap_single(priv->ddev, - (dma_addr_t) be64_to_cpu(data->addr), - be32_to_cpu(data->byte_count), - PCI_DMA_TODEVICE); - ++data; - } - - for (i = 0; i < frags; i++) { + tx_info->map0_dma, + tx_info->map0_byte_count, + PCI_DMA_TODEVICE); + else + dma_unmap_page(priv->ddev, + tx_info->map0_dma, + tx_info->map0_byte_count, + PCI_DMA_TODEVICE); + for (i = 1; i < nr_maps; i++) { + data++; /* Check for wraparound before unmapping */ if ((void *) data >= end) data = ring->buf; - frag = &skb_shinfo(skb)->frags[i]; dma_unmap_page(priv->ddev, - (dma_addr_t) be64_to_cpu(data->addr), - skb_frag_size(frag), PCI_DMA_TODEVICE); - ++data; + (dma_addr_t)be64_to_cpu(data->addr), + be32_to_cpu(data->byte_count), + PCI_DMA_TODEVICE); } } } - dev_kfree_skb_any(skb); + dev_consume_skb_any(skb); return tx_info->nr_txbb; } @@ -377,13 +386,19 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev, u64 timestamp = 0; int done = 0; int budget = priv->tx_work_limit; + u32 last_nr_txbb; + u32 ring_cons; if (!priv->port_up) return true; + netdev_txq_bql_complete_prefetchw(ring->tx_queue); + index = cons_index & size_mask; - cqe = &buf[(index << factor) + factor]; - ring_index = ring->cons & size_mask; + cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor; + last_nr_txbb = ACCESS_ONCE(ring->last_nr_txbb); + ring_cons = ACCESS_ONCE(ring->cons); + ring_index = ring_cons & size_mask; stamp_index = ring_index; /* Process all completed CQEs */ @@ -408,19 +423,19 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev, new_index = be16_to_cpu(cqe->wqe_index) & size_mask; do { - txbbs_skipped += ring->last_nr_txbb; - ring_index = (ring_index + ring->last_nr_txbb) & size_mask; + txbbs_skipped += last_nr_txbb; + ring_index = (ring_index + last_nr_txbb) & size_mask; if (ring->tx_info[ring_index].ts_requested) timestamp = mlx4_en_get_cqe_ts(cqe); /* free next descriptor */ - ring->last_nr_txbb = mlx4_en_free_tx_desc( + last_nr_txbb = mlx4_en_free_tx_desc( priv, ring, ring_index, - !!((ring->cons + txbbs_skipped) & + !!((ring_cons + txbbs_skipped) & ring->size), timestamp); mlx4_en_stamp_wqe(priv, ring, stamp_index, - !!((ring->cons + txbbs_stamp) & + !!((ring_cons + txbbs_stamp) & ring->size)); stamp_index = ring_index; txbbs_stamp = txbbs_skipped; @@ -430,7 +445,7 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev, ++cons_index; index = cons_index & size_mask; - cqe = &buf[(index << factor) + factor]; + cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor; } @@ -441,7 +456,11 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev, mcq->cons_index = cons_index; mlx4_cq_set_ci(mcq); wmb(); - ring->cons += txbbs_skipped; + + /* we want to dirty this cache line once */ + ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb; + ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped; + netdev_tx_completed_queue(ring->tx_queue, packets, bytes); /* @@ -512,30 +531,35 @@ static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv, return ring->buf + index * TXBB_SIZE; } -static int is_inline(int inline_thold, struct sk_buff *skb, void **pfrag) +/* Decide if skb can be inlined in tx descriptor to avoid dma mapping + * + * It seems strange we do not simply use skb_copy_bits(). + * This would allow to inline all skbs iff skb->len <= inline_thold + * + * Note that caller already checked skb was not a gso packet + */ +static bool is_inline(int inline_thold, const struct sk_buff *skb, + const struct skb_shared_info *shinfo, + void **pfrag) { void *ptr; - if (inline_thold && !skb_is_gso(skb) && skb->len <= inline_thold) { - if (skb_shinfo(skb)->nr_frags == 1) { - ptr = skb_frag_address_safe(&skb_shinfo(skb)->frags[0]); - if (unlikely(!ptr)) - return 0; - - if (pfrag) - *pfrag = ptr; + if (skb->len > inline_thold || !inline_thold) + return false; - return 1; - } else if (unlikely(skb_shinfo(skb)->nr_frags)) - return 0; - else - return 1; + if (shinfo->nr_frags == 1) { + ptr = skb_frag_address_safe(&shinfo->frags[0]); + if (unlikely(!ptr)) + return false; + *pfrag = ptr; + return true; } - - return 0; + if (shinfo->nr_frags) + return false; + return true; } -static int inline_size(struct sk_buff *skb) +static int inline_size(const struct sk_buff *skb) { if (skb->len + CTRL_SIZE + sizeof(struct mlx4_wqe_inline_seg) <= MLX4_INLINE_ALIGN) @@ -546,18 +570,23 @@ static int inline_size(struct sk_buff *skb) sizeof(struct mlx4_wqe_inline_seg), 16); } -static int get_real_size(struct sk_buff *skb, struct net_device *dev, - int *lso_header_size) +static int get_real_size(const struct sk_buff *skb, + const struct skb_shared_info *shinfo, + struct net_device *dev, + int *lso_header_size, + bool *inline_ok, + void **pfrag) { struct mlx4_en_priv *priv = netdev_priv(dev); int real_size; - if (skb_is_gso(skb)) { + if (shinfo->gso_size) { + *inline_ok = false; if (skb->encapsulation) *lso_header_size = (skb_inner_transport_header(skb) - skb->data) + inner_tcp_hdrlen(skb); else *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb); - real_size = CTRL_SIZE + skb_shinfo(skb)->nr_frags * DS_SIZE + + real_size = CTRL_SIZE + shinfo->nr_frags * DS_SIZE + ALIGN(*lso_header_size + 4, DS_SIZE); if (unlikely(*lso_header_size != skb_headlen(skb))) { /* We add a segment for the skb linear buffer only if @@ -572,20 +601,28 @@ static int get_real_size(struct sk_buff *skb, struct net_device *dev, } } else { *lso_header_size = 0; - if (!is_inline(priv->prof->inline_thold, skb, NULL)) - real_size = CTRL_SIZE + (skb_shinfo(skb)->nr_frags + 1) * DS_SIZE; - else + *inline_ok = is_inline(priv->prof->inline_thold, skb, + shinfo, pfrag); + + if (*inline_ok) real_size = inline_size(skb); + else + real_size = CTRL_SIZE + + (shinfo->nr_frags + 1) * DS_SIZE; } return real_size; } -static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *skb, - int real_size, u16 *vlan_tag, int tx_ind, void *fragptr) +static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, + const struct sk_buff *skb, + const struct skb_shared_info *shinfo, + int real_size, u16 *vlan_tag, + int tx_ind, void *fragptr) { struct mlx4_wqe_inline_seg *inl = &tx_desc->inl; int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl; + unsigned int hlen = skb_headlen(skb); if (skb->len <= spc) { if (likely(skb->len >= MIN_PKT_LEN)) { @@ -595,19 +632,19 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk memset(((void *)(inl + 1)) + skb->len, 0, MIN_PKT_LEN - skb->len); } - skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb)); - if (skb_shinfo(skb)->nr_frags) - memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr, - skb_frag_size(&skb_shinfo(skb)->frags[0])); + skb_copy_from_linear_data(skb, inl + 1, hlen); + if (shinfo->nr_frags) + memcpy(((void *)(inl + 1)) + hlen, fragptr, + skb_frag_size(&shinfo->frags[0])); } else { inl->byte_count = cpu_to_be32(1 << 31 | spc); - if (skb_headlen(skb) <= spc) { - skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb)); - if (skb_headlen(skb) < spc) { - memcpy(((void *)(inl + 1)) + skb_headlen(skb), - fragptr, spc - skb_headlen(skb)); - fragptr += spc - skb_headlen(skb); + if (hlen <= spc) { + skb_copy_from_linear_data(skb, inl + 1, hlen); + if (hlen < spc) { + memcpy(((void *)(inl + 1)) + hlen, + fragptr, spc - hlen); + fragptr += spc - hlen; } inl = (void *) (inl + 1) + spc; memcpy(((void *)(inl + 1)), fragptr, skb->len - spc); @@ -615,10 +652,11 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk skb_copy_from_linear_data(skb, inl + 1, spc); inl = (void *) (inl + 1) + spc; skb_copy_from_linear_data_offset(skb, spc, inl + 1, - skb_headlen(skb) - spc); - if (skb_shinfo(skb)->nr_frags) - memcpy(((void *)(inl + 1)) + skb_headlen(skb) - spc, - fragptr, skb_frag_size(&skb_shinfo(skb)->frags[0])); + hlen - spc); + if (shinfo->nr_frags) + memcpy(((void *)(inl + 1)) + hlen - spc, + fragptr, + skb_frag_size(&shinfo->frags[0])); } wmb(); @@ -642,15 +680,16 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, return fallback(dev, skb) % rings_p_up + up * rings_p_up; } -static void mlx4_bf_copy(void __iomem *dst, unsigned long *src, unsigned bytecnt) +static void mlx4_bf_copy(void __iomem *dst, const void *src, + unsigned int bytecnt) { __iowrite64_copy(dst, src, bytecnt / 8); } netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) { + struct skb_shared_info *shinfo = skb_shinfo(skb); struct mlx4_en_priv *priv = netdev_priv(dev); - struct mlx4_en_dev *mdev = priv->mdev; struct device *ddev = priv->ddev; struct mlx4_en_tx_ring *ring; struct mlx4_en_tx_desc *tx_desc; @@ -663,15 +702,26 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) u32 index, bf_index; __be32 op_own; u16 vlan_tag = 0; - int i; + int i_frag; int lso_header_size; - void *fragptr; + void *fragptr = NULL; bool bounce = false; + bool send_doorbell; + bool stop_queue; + bool inline_ok; + u32 ring_cons; if (!priv->port_up) goto tx_drop; - real_size = get_real_size(skb, dev, &lso_header_size); + tx_ind = skb_get_queue_mapping(skb); + ring = priv->tx_ring[tx_ind]; + + /* fetch ring->cons far ahead before needing it to avoid stall */ + ring_cons = ACCESS_ONCE(ring->cons); + + real_size = get_real_size(skb, shinfo, dev, &lso_header_size, + &inline_ok, &fragptr); if (unlikely(!real_size)) goto tx_drop; @@ -684,38 +734,15 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) goto tx_drop; } - tx_ind = skb->queue_mapping; - ring = priv->tx_ring[tx_ind]; if (vlan_tx_tag_present(skb)) vlan_tag = vlan_tx_tag_get(skb); - /* Check available TXBBs And 2K spare for prefetch */ - if (unlikely(((int)(ring->prod - ring->cons)) > - ring->size - HEADROOM - MAX_DESC_TXBBS)) { - /* every full Tx ring stops queue */ - netif_tx_stop_queue(ring->tx_queue); - ring->queue_stopped++; - /* If queue was emptied after the if, and before the - * stop_queue - need to wake the queue, or else it will remain - * stopped forever. - * Need a memory barrier to make sure ring->cons was not - * updated before queue was stopped. - */ - wmb(); - - if (unlikely(((int)(ring->prod - ring->cons)) <= - ring->size - HEADROOM - MAX_DESC_TXBBS)) { - netif_tx_wake_queue(ring->tx_queue); - ring->wake_queue++; - } else { - return NETDEV_TX_BUSY; - } - } + netdev_txq_bql_enqueue_prefetchw(ring->tx_queue); /* Track current inflight packets for performance analysis */ AVG_PERF_COUNTER(priv->pstats.inflight_avg, - (u32) (ring->prod - ring->cons - 1)); + (u32)(ring->prod - ring_cons - 1)); /* Packet is good - grab an index and transmit it */ index = ring->prod & ring->size_mask; @@ -735,46 +762,48 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) tx_info->skb = skb; tx_info->nr_txbb = nr_txbb; + data = &tx_desc->data; if (lso_header_size) data = ((void *)&tx_desc->lso + ALIGN(lso_header_size + 4, DS_SIZE)); - else - data = &tx_desc->data; /* valid only for none inline segments */ tx_info->data_offset = (void *)data - (void *)tx_desc; + tx_info->inl = inline_ok; + tx_info->linear = (lso_header_size < skb_headlen(skb) && - !is_inline(ring->inline_thold, skb, NULL)) ? 1 : 0; + !inline_ok) ? 1 : 0; - data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1; + tx_info->nr_maps = shinfo->nr_frags + tx_info->linear; + data += tx_info->nr_maps - 1; - if (is_inline(ring->inline_thold, skb, &fragptr)) { - tx_info->inl = 1; - } else { - /* Map fragments */ - for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) { - struct skb_frag_struct *frag; - dma_addr_t dma; + if (!tx_info->inl) { + dma_addr_t dma = 0; + u32 byte_count = 0; + + /* Map fragments if any */ + for (i_frag = shinfo->nr_frags - 1; i_frag >= 0; i_frag--) { + const struct skb_frag_struct *frag; - frag = &skb_shinfo(skb)->frags[i]; + frag = &shinfo->frags[i_frag]; + byte_count = skb_frag_size(frag); dma = skb_frag_dma_map(ddev, frag, - 0, skb_frag_size(frag), + 0, byte_count, DMA_TO_DEVICE); if (dma_mapping_error(ddev, dma)) goto tx_drop_unmap; data->addr = cpu_to_be64(dma); - data->lkey = cpu_to_be32(mdev->mr.key); + data->lkey = ring->mr_key; wmb(); - data->byte_count = cpu_to_be32(skb_frag_size(frag)); + data->byte_count = cpu_to_be32(byte_count); --data; } - /* Map linear part */ + /* Map linear part if needed */ if (tx_info->linear) { - u32 byte_count = skb_headlen(skb) - lso_header_size; - dma_addr_t dma; + byte_count = skb_headlen(skb) - lso_header_size; dma = dma_map_single(ddev, skb->data + lso_header_size, byte_count, @@ -783,29 +812,28 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) goto tx_drop_unmap; data->addr = cpu_to_be64(dma); - data->lkey = cpu_to_be32(mdev->mr.key); + data->lkey = ring->mr_key; wmb(); data->byte_count = cpu_to_be32(byte_count); } - tx_info->inl = 0; + /* tx completion can avoid cache line miss for common cases */ + tx_info->map0_dma = dma; + tx_info->map0_byte_count = byte_count; } /* * For timestamping add flag to skb_shinfo and * set flag for further reference */ - if (ring->hwtstamp_tx_type == HWTSTAMP_TX_ON && - skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { - skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + tx_info->ts_requested = 0; + if (unlikely(ring->hwtstamp_tx_type == HWTSTAMP_TX_ON && + shinfo->tx_flags & SKBTX_HW_TSTAMP)) { + shinfo->tx_flags |= SKBTX_IN_PROGRESS; tx_info->ts_requested = 1; } /* Prepare ctrl segement apart opcode+ownership, which depends on * whether LSO is used */ - tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag); - tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * - !!vlan_tx_tag_present(skb); - tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f; tx_desc->ctrl.srcrb_flags = priv->ctrl_flags; if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM | @@ -826,6 +854,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) /* Handle LSO (TSO) packets */ if (lso_header_size) { + int i; + /* Mark opcode as LSO */ op_own = cpu_to_be32(MLX4_OPCODE_LSO | (1 << 6)) | ((ring->prod & ring->size) ? @@ -833,15 +863,16 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) /* Fill in the LSO prefix */ tx_desc->lso.mss_hdr_size = cpu_to_be32( - skb_shinfo(skb)->gso_size << 16 | lso_header_size); + shinfo->gso_size << 16 | lso_header_size); /* Copy headers; * note that we already verified that it is linear */ memcpy(tx_desc->lso.header, skb->data, lso_header_size); - priv->port_stats.tso_packets++; - i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) + - !!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size); + ring->tso_packets++; + + i = ((skb->len - lso_header_size) / shinfo->gso_size) + + !!((skb->len - lso_header_size) % shinfo->gso_size); tx_info->nr_bytes = skb->len + (i - 1) * lso_header_size; ring->packets += i; } else { @@ -851,16 +882,14 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); tx_info->nr_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); ring->packets++; - } ring->bytes += tx_info->nr_bytes; netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes); AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len); - if (tx_info->inl) { - build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr); - tx_info->inl = 1; - } + if (tx_info->inl) + build_inline_wqe(tx_desc, skb, shinfo, real_size, &vlan_tag, + tx_ind, fragptr); if (skb->encapsulation) { struct iphdr *ipv4 = (struct iphdr *)skb_inner_network_header(skb); @@ -873,44 +902,85 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ring->prod += nr_txbb; /* If we used a bounce buffer then copy descriptor back into place */ - if (bounce) + if (unlikely(bounce)) tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size); skb_tx_timestamp(skb); - if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tx_tag_present(skb)) { - tx_desc->ctrl.bf_qpn |= cpu_to_be32(ring->doorbell_qpn); + /* Check available TXBBs And 2K spare for prefetch */ + stop_queue = (int)(ring->prod - ring_cons) > + ring->size - HEADROOM - MAX_DESC_TXBBS; + if (unlikely(stop_queue)) { + netif_tx_stop_queue(ring->tx_queue); + ring->queue_stopped++; + } + send_doorbell = !skb->xmit_more || netif_xmit_stopped(ring->tx_queue); + + real_size = (real_size / 16) & 0x3f; + + if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && + !vlan_tx_tag_present(skb) && send_doorbell) { + tx_desc->ctrl.bf_qpn = ring->doorbell_qpn | + cpu_to_be32(real_size); op_own |= htonl((bf_index & 0xffff) << 8); - /* Ensure new descirptor hits memory - * before setting ownership of this descriptor to HW */ + /* Ensure new descriptor hits memory + * before setting ownership of this descriptor to HW + */ wmb(); tx_desc->ctrl.owner_opcode = op_own; wmb(); - mlx4_bf_copy(ring->bf.reg + ring->bf.offset, (unsigned long *) &tx_desc->ctrl, - desc_size); + mlx4_bf_copy(ring->bf.reg + ring->bf.offset, &tx_desc->ctrl, + desc_size); wmb(); ring->bf.offset ^= ring->bf.buf_size; } else { - /* Ensure new descirptor hits memory - * before setting ownership of this descriptor to HW */ + tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag); + tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * + !!vlan_tx_tag_present(skb); + tx_desc->ctrl.fence_size = real_size; + + /* Ensure new descriptor hits memory + * before setting ownership of this descriptor to HW + */ wmb(); tx_desc->ctrl.owner_opcode = op_own; - wmb(); - iowrite32be(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL); + if (send_doorbell) { + wmb(); + iowrite32(ring->doorbell_qpn, + ring->bf.uar->map + MLX4_SEND_DOORBELL); + } else { + ring->xmit_more++; + } } + if (unlikely(stop_queue)) { + /* If queue was emptied after the if (stop_queue) , and before + * the netif_tx_stop_queue() - need to wake the queue, + * or else it will remain stopped forever. + * Need a memory barrier to make sure ring->cons was not + * updated before queue was stopped. + */ + smp_rmb(); + + ring_cons = ACCESS_ONCE(ring->cons); + if (unlikely(((int)(ring->prod - ring_cons)) <= + ring->size - HEADROOM - MAX_DESC_TXBBS)) { + netif_tx_wake_queue(ring->tx_queue); + ring->wake_queue++; + } + } return NETDEV_TX_OK; tx_drop_unmap: en_err(priv, "DMA mapping error\n"); - for (i++; i < skb_shinfo(skb)->nr_frags; i++) { - data++; + while (++i_frag < shinfo->nr_frags) { + ++data; dma_unmap_page(ddev, (dma_addr_t) be64_to_cpu(data->addr), be32_to_cpu(data->byte_count), PCI_DMA_TODEVICE); diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 2a004b347e1dd896f4b20c9cd36178c0e1f7bfb5..a49c9d11d8a58e5a8bcc5419133a7909204a0a41 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -101,21 +101,24 @@ static void eq_set_ci(struct mlx4_eq *eq, int req_not) mb(); } -static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry, u8 eqe_factor) +static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry, u8 eqe_factor, + u8 eqe_size) { /* (entry & (eq->nent - 1)) gives us a cyclic array */ - unsigned long offset = (entry & (eq->nent - 1)) * (MLX4_EQ_ENTRY_SIZE << eqe_factor); - /* CX3 is capable of extending the EQE from 32 to 64 bytes. - * When this feature is enabled, the first (in the lower addresses) + unsigned long offset = (entry & (eq->nent - 1)) * eqe_size; + /* CX3 is capable of extending the EQE from 32 to 64 bytes with + * strides of 64B,128B and 256B. + * When 64B EQE is used, the first (in the lower addresses) * 32 bytes in the 64 byte EQE are reserved and the next 32 bytes * contain the legacy EQE information. + * In all other cases, the first 32B contains the legacy EQE info. */ return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % PAGE_SIZE; } -static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq, u8 eqe_factor) +static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq, u8 eqe_factor, u8 size) { - struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index, eqe_factor); + struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index, eqe_factor, size); return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe; } @@ -459,8 +462,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) enum slave_port_gen_event gen_event; unsigned long flags; struct mlx4_vport_state *s_info; + int eqe_size = dev->caps.eqe_size; - while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor))) { + while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor, eqe_size))) { /* * Make sure we read EQ entry contents after we've * checked the ownership bit. @@ -894,8 +898,10 @@ static int mlx4_create_eq(struct mlx4_dev *dev, int nent, eq->dev = dev; eq->nent = roundup_pow_of_two(max(nent, 2)); - /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes */ - npages = PAGE_ALIGN(eq->nent * (MLX4_EQ_ENTRY_SIZE << dev->caps.eqe_factor)) / PAGE_SIZE; + /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with + * strides of 64B,128B and 256B. + */ + npages = PAGE_ALIGN(eq->nent * dev->caps.eqe_size) / PAGE_SIZE; eq->page_list = kmalloc(npages * sizeof *eq->page_list, GFP_KERNEL); @@ -997,8 +1003,10 @@ static void mlx4_free_eq(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox; int err; int i; - /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes */ - int npages = PAGE_ALIGN((MLX4_EQ_ENTRY_SIZE << dev->caps.eqe_factor) * eq->nent) / PAGE_SIZE; + /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with + * strides of 64B,128B and 256B + */ + int npages = PAGE_ALIGN(dev->caps.eqe_size * eq->nent) / PAGE_SIZE; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 494753e44ae3a1324fac4a269f60868766926717..2e88a235e26b47b5fc85ad2a6c5ea4a494997e14 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -137,7 +137,9 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) [8] = "Dynamic QP updates support", [9] = "Device managed flow steering IPoIB support", [10] = "TCP/IP offloads/flow-steering for VXLAN support", - [11] = "MAD DEMUX (Secure-Host) support" + [11] = "MAD DEMUX (Secure-Host) support", + [12] = "Large cache line (>64B) CQE stride support", + [13] = "Large cache line (>64B) EQE stride support" }; int i; @@ -557,6 +559,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) #define QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET 0x74 #define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76 #define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77 +#define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE 0x7a #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80 #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82 #define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84 @@ -733,6 +736,11 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev_cap->max_rq_sg = field; MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET); dev_cap->max_rq_desc_sz = size; + MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE); + if (field & (1 << 6)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CQE_STRIDE; + if (field & (1 << 7)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_EQE_STRIDE; MLX4_GET(dev_cap->bmme_flags, outbox, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); @@ -974,8 +982,13 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, if (port < 0) return -EINVAL; - vhcr->in_modifier = (vhcr->in_modifier & ~0xFF) | - (port & 0xFF); + /* Protect against untrusted guests: enforce that this is the + * QUERY_PORT general query. + */ + if (vhcr->op_modifier || vhcr->in_modifier & ~0xFF) + return -EINVAL; + + vhcr->in_modifier = port; err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0, MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, @@ -1376,6 +1389,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) #define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30) #define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37) #define INIT_HCA_EQE_CQE_OFFSETS (INIT_HCA_QPC_OFFSET + 0x38) +#define INIT_HCA_EQE_CQE_STRIDE_OFFSET (INIT_HCA_QPC_OFFSET + 0x3b) #define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40) #define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50) #define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60) @@ -1452,11 +1466,25 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_CQE) { *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 30); dev->caps.cqe_size = 64; - dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_64B_CQE; + dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; } else { dev->caps.cqe_size = 32; } + /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */ + if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) && + (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE)) { + dev->caps.eqe_size = cache_line_size(); + dev->caps.cqe_size = cache_line_size(); + dev->caps.eqe_factor = 0; + MLX4_PUT(inbox, (u8)((ilog2(dev->caps.eqe_size) - 5) << 4 | + (ilog2(dev->caps.eqe_size) - 5)), + INIT_HCA_EQE_CQE_STRIDE_OFFSET); + + /* User still need to know to support CQE > 32B */ + dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; + } + /* QPC/EEC/CQC/EQC/RDMARC attributes */ MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET); @@ -1616,6 +1644,17 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev, if (byte_field & 0x40) /* 64-bytes cqe enabled */ param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED; + /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */ + MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_STRIDE_OFFSET); + if (byte_field) { + param->dev_cap_enabled |= MLX4_DEV_CAP_64B_EQE_ENABLED; + param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED; + param->cqe_size = 1 << ((byte_field & + MLX4_CQE_SIZE_MASK_STRIDE) + 5); + param->eqe_size = 1 << (((byte_field & + MLX4_EQE_SIZE_MASK_STRIDE) >> 4) + 5); + } + /* TPT attributes */ MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET); diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h index 1fce03ebe5c4a27903256642abe6c3cf8fae0eb8..9b835aecac96c5aa87f3bbc7b256f2fa4da164b7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.h +++ b/drivers/net/ethernet/mellanox/mlx4/fw.h @@ -178,6 +178,8 @@ struct mlx4_init_hca_param { u8 uar_page_sz; /* log pg sz in 4k chunks */ u8 steering_mode; /* for QUERY_HCA */ u64 dev_cap_enabled; + u16 cqe_size; /* For use only when CQE stride feature enabled */ + u16 eqe_size; /* For use only when EQE stride feature enabled */ }; struct mlx4_init_ib_param { diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 871e3a5bda38535c3dbe8c3649e335aaa2b4077c..90de6e1ad06e9eb0bf002d9c1f87a5d80019fffe 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -104,7 +104,8 @@ module_param(enable_64b_cqe_eqe, bool, 0444); MODULE_PARM_DESC(enable_64b_cqe_eqe, "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)"); -#define PF_CONTEXT_BEHAVIOUR_MASK MLX4_FUNC_CAP_64B_EQE_CQE +#define PF_CONTEXT_BEHAVIOUR_MASK (MLX4_FUNC_CAP_64B_EQE_CQE | \ + MLX4_FUNC_CAP_EQE_CQE_STRIDE) static char mlx4_version[] = DRV_NAME ": Mellanox ConnectX core driver v" @@ -196,6 +197,40 @@ static void mlx4_set_port_mask(struct mlx4_dev *dev) dev->caps.port_mask[i] = dev->caps.port_type[i]; } +static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev) +{ + struct mlx4_caps *dev_cap = &dev->caps; + + /* FW not supporting or cancelled by user */ + if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) || + !(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE)) + return; + + /* Must have 64B CQE_EQE enabled by FW to use bigger stride + * When FW has NCSI it may decide not to report 64B CQE/EQEs + */ + if (!(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_EQE) || + !(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_CQE)) { + dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; + dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; + return; + } + + if (cache_line_size() == 128 || cache_line_size() == 256) { + mlx4_dbg(dev, "Enabling CQE stride cacheLine supported\n"); + /* Changing the real data inside CQE size to 32B */ + dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE; + dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE; + + if (mlx4_is_master(dev)) + dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE; + } else { + mlx4_dbg(dev, "Disabling CQE stride cacheLine unsupported\n"); + dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; + dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; + } +} + static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) { int err; @@ -390,6 +425,14 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE; dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE; } + + if (dev_cap->flags2 & + (MLX4_DEV_CAP_FLAG2_CQE_STRIDE | + MLX4_DEV_CAP_FLAG2_EQE_STRIDE)) { + mlx4_warn(dev, "Disabling EQE/CQE stride per user request\n"); + dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; + dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; + } } if ((dev->caps.flags & @@ -397,6 +440,9 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) mlx4_is_master(dev)) dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE; + if (!mlx4_is_slave(dev)) + mlx4_enable_cqe_eqe_stride(dev); + return 0; } @@ -724,11 +770,22 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) { dev->caps.cqe_size = 64; - dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_64B_CQE; + dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; } else { dev->caps.cqe_size = 32; } + if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_EQE_STRIDE_ENABLED) { + dev->caps.eqe_size = hca_param.eqe_size; + dev->caps.eqe_factor = 0; + } + + if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_CQE_STRIDE_ENABLED) { + dev->caps.cqe_size = hca_param.cqe_size; + /* User still need to know when CQE > 32B */ + dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; + } + dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; mlx4_warn(dev, "Timestamping is not supported in slave mode\n"); @@ -2202,115 +2259,18 @@ static void mlx4_free_ownership(struct mlx4_dev *dev) iounmap(owner); } -static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) +static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, + int total_vfs, int *nvfs, struct mlx4_priv *priv) { - struct mlx4_priv *priv; struct mlx4_dev *dev; + unsigned sum = 0; int err; int port; - int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; - int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0}; - const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = { - {2, 0, 0}, {0, 1, 2}, {0, 1, 2} }; - unsigned total_vfs = 0; - int sriov_initialized = 0; - unsigned int i; - - pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); - - err = pci_enable_device(pdev); - if (err) { - dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); - return err; - } - - /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS - * per port, we must limit the number of VFs to 63 (since their are - * 128 MACs) - */ - for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && i < num_vfs_argc; - total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) { - nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i]; - if (nvfs[i] < 0) { - dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n"); - return -EINVAL; - } - } - for (i = 0; i < sizeof(prb_vf)/sizeof(prb_vf[0]) && i < probe_vfs_argc; - i++) { - prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i]; - if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) { - dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n"); - return -EINVAL; - } - } - if (total_vfs >= MLX4_MAX_NUM_VF) { - dev_err(&pdev->dev, - "Requested more VF's (%d) than allowed (%d)\n", - total_vfs, MLX4_MAX_NUM_VF - 1); - return -EINVAL; - } - - for (i = 0; i < MLX4_MAX_PORTS; i++) { - if (nvfs[i] + nvfs[2] >= MLX4_MAX_NUM_VF_P_PORT) { - dev_err(&pdev->dev, - "Requested more VF's (%d) for port (%d) than allowed (%d)\n", - nvfs[i] + nvfs[2], i + 1, - MLX4_MAX_NUM_VF_P_PORT - 1); - return -EINVAL; - } - } - - - /* - * Check for BARs. - */ - if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) && - !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { - dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n", - pci_dev_data, pci_resource_flags(pdev, 0)); - err = -ENODEV; - goto err_disable_pdev; - } - if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { - dev_err(&pdev->dev, "Missing UAR, aborting\n"); - err = -ENODEV; - goto err_disable_pdev; - } - - err = pci_request_regions(pdev, DRV_NAME); - if (err) { - dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); - goto err_disable_pdev; - } - - pci_set_master(pdev); - - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) { - dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n"); - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n"); - goto err_release_regions; - } - } - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) { - dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n"); - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n"); - goto err_release_regions; - } - } + int i; + int existing_vfs = 0; - /* Allow large DMA segments, up to the firmware limit of 1 GB */ - dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); + dev = &priv->dev; - dev = pci_get_drvdata(pdev); - priv = mlx4_priv(dev); - dev->pdev = pdev; INIT_LIST_HEAD(&priv->ctx_list); spin_lock_init(&priv->ctx_lock); @@ -2324,28 +2284,9 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) dev->rev_id = pdev->revision; dev->numa_node = dev_to_node(&pdev->dev); + /* Detect if this device is a virtual function */ if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { - /* When acting as pf, we normally skip vfs unless explicitly - * requested to probe them. */ - if (total_vfs) { - unsigned vfs_offset = 0; - for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && - vfs_offset + nvfs[i] < extended_func_num(pdev); - vfs_offset += nvfs[i], i++) - ; - if (i == sizeof(nvfs)/sizeof(nvfs[0])) { - err = -ENODEV; - goto err_free_dev; - } - if ((extended_func_num(pdev) - vfs_offset) - > prb_vf[i]) { - mlx4_warn(dev, "Skipping virtual function:%d\n", - extended_func_num(pdev)); - err = -ENODEV; - goto err_free_dev; - } - } mlx4_warn(dev, "Detected virtual function - running in slave mode\n"); dev->flags |= MLX4_FLAG_SLAVE; } else { @@ -2355,11 +2296,10 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) err = mlx4_get_ownership(dev); if (err) { if (err < 0) - goto err_free_dev; + return err; else { mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n"); - err = -EINVAL; - goto err_free_dev; + return -EINVAL; } } @@ -2371,21 +2311,28 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) GFP_KERNEL); if (NULL == dev->dev_vfs) { mlx4_err(dev, "Failed to allocate memory for VFs\n"); - err = 0; + err = -ENOMEM; + goto err_free_own; } else { atomic_inc(&pf_loading); - err = pci_enable_sriov(pdev, total_vfs); + existing_vfs = pci_num_vf(pdev); + if (existing_vfs) { + err = 0; + if (existing_vfs != total_vfs) + mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n", + existing_vfs, total_vfs); + } else { + err = pci_enable_sriov(pdev, total_vfs); + } if (err) { mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n", err); atomic_dec(&pf_loading); - err = 0; } else { mlx4_warn(dev, "Running in master mode\n"); dev->flags |= MLX4_FLAG_SRIOV | MLX4_FLAG_MASTER; dev->num_vfs = total_vfs; - sriov_initialized = 1; } } } @@ -2401,7 +2348,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) err = mlx4_reset(dev); if (err) { mlx4_err(dev, "Failed to reset HCA, aborting\n"); - goto err_rel_own; + goto err_sriov; } } @@ -2451,34 +2398,46 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) /* In master functions, the communication channel must be initialized * after obtaining its address from fw */ if (mlx4_is_master(dev)) { - unsigned sum = 0; - err = mlx4_multi_func_init(dev); - if (err) { - mlx4_err(dev, "Failed to init master mfunc interface, aborting\n"); + int ib_ports = 0; + + mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) + ib_ports++; + + if (ib_ports && + (num_vfs_argc > 1 || probe_vfs_argc > 1)) { + mlx4_err(dev, + "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n"); + err = -EINVAL; + goto err_close; + } + if (dev->caps.num_ports < 2 && + num_vfs_argc > 1) { + err = -EINVAL; + mlx4_err(dev, + "Error: Trying to configure VFs on port 2, but HCA has only %d physical ports\n", + dev->caps.num_ports); goto err_close; } - if (sriov_initialized) { - int ib_ports = 0; - mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) - ib_ports++; + memcpy(dev->nvfs, nvfs, sizeof(dev->nvfs)); - if (ib_ports && - (num_vfs_argc > 1 || probe_vfs_argc > 1)) { - mlx4_err(dev, - "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n"); - err = -EINVAL; - goto err_master_mfunc; - } - for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]); i++) { - unsigned j; - for (j = 0; j < nvfs[i]; ++sum, ++j) { - dev->dev_vfs[sum].min_port = - i < 2 ? i + 1 : 1; - dev->dev_vfs[sum].n_ports = i < 2 ? 1 : - dev->caps.num_ports; - } + for (i = 0; i < sizeof(dev->nvfs)/sizeof(dev->nvfs[0]); i++) { + unsigned j; + + for (j = 0; j < dev->nvfs[i]; ++sum, ++j) { + dev->dev_vfs[sum].min_port = i < 2 ? i + 1 : 1; + dev->dev_vfs[sum].n_ports = i < 2 ? 1 : + dev->caps.num_ports; } } + + /* In master functions, the communication channel + * must be initialized after obtaining its address from fw + */ + err = mlx4_multi_func_init(dev); + if (err) { + mlx4_err(dev, "Failed to init master mfunc interface, aborting.\n"); + goto err_close; + } } err = mlx4_alloc_eq_table(dev); @@ -2499,7 +2458,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) if (!mlx4_is_slave(dev)) { err = mlx4_init_steering(dev); if (err) - goto err_free_eq; + goto err_disable_msix; } err = mlx4_setup_hca(dev); @@ -2559,6 +2518,10 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) if (!mlx4_is_slave(dev)) mlx4_clear_steering(dev); +err_disable_msix: + if (dev->flags & MLX4_FLAG_MSI_X) + pci_disable_msix(pdev); + err_free_eq: mlx4_free_eq_table(dev); @@ -2575,9 +2538,6 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) } err_close: - if (dev->flags & MLX4_FLAG_MSI_X) - pci_disable_msix(pdev); - mlx4_close_hca(dev); err_mfunc: @@ -2588,20 +2548,154 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) mlx4_cmd_cleanup(dev); err_sriov: - if (dev->flags & MLX4_FLAG_SRIOV) + if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) pci_disable_sriov(pdev); -err_rel_own: - if (!mlx4_is_slave(dev)) - mlx4_free_ownership(dev); - if (mlx4_is_master(dev) && dev->num_vfs) atomic_dec(&pf_loading); kfree(priv->dev.dev_vfs); -err_free_dev: - kfree(priv); +err_free_own: + if (!mlx4_is_slave(dev)) + mlx4_free_ownership(dev); + + return err; +} + +static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data, + struct mlx4_priv *priv) +{ + int err; + int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; + int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0}; + const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = { + {2, 0, 0}, {0, 1, 2}, {0, 1, 2} }; + unsigned total_vfs = 0; + unsigned int i; + + pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); + return err; + } + + /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS + * per port, we must limit the number of VFs to 63 (since their are + * 128 MACs) + */ + for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && i < num_vfs_argc; + total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) { + nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i]; + if (nvfs[i] < 0) { + dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n"); + err = -EINVAL; + goto err_disable_pdev; + } + } + for (i = 0; i < sizeof(prb_vf)/sizeof(prb_vf[0]) && i < probe_vfs_argc; + i++) { + prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i]; + if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) { + dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n"); + err = -EINVAL; + goto err_disable_pdev; + } + } + if (total_vfs >= MLX4_MAX_NUM_VF) { + dev_err(&pdev->dev, + "Requested more VF's (%d) than allowed (%d)\n", + total_vfs, MLX4_MAX_NUM_VF - 1); + err = -EINVAL; + goto err_disable_pdev; + } + + for (i = 0; i < MLX4_MAX_PORTS; i++) { + if (nvfs[i] + nvfs[2] >= MLX4_MAX_NUM_VF_P_PORT) { + dev_err(&pdev->dev, + "Requested more VF's (%d) for port (%d) than allowed (%d)\n", + nvfs[i] + nvfs[2], i + 1, + MLX4_MAX_NUM_VF_P_PORT - 1); + err = -EINVAL; + goto err_disable_pdev; + } + } + + /* Check for BARs. */ + if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) && + !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n", + pci_dev_data, pci_resource_flags(pdev, 0)); + err = -ENODEV; + goto err_disable_pdev; + } + if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { + dev_err(&pdev->dev, "Missing UAR, aborting\n"); + err = -ENODEV; + goto err_disable_pdev; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); + goto err_disable_pdev; + } + + pci_set_master(pdev); + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n"); + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n"); + goto err_release_regions; + } + } + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n"); + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n"); + goto err_release_regions; + } + } + + /* Allow large DMA segments, up to the firmware limit of 1 GB */ + dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); + /* Detect if this device is a virtual function */ + if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { + /* When acting as pf, we normally skip vfs unless explicitly + * requested to probe them. + */ + if (total_vfs) { + unsigned vfs_offset = 0; + + for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && + vfs_offset + nvfs[i] < extended_func_num(pdev); + vfs_offset += nvfs[i], i++) + ; + if (i == sizeof(nvfs)/sizeof(nvfs[0])) { + err = -ENODEV; + goto err_release_regions; + } + if ((extended_func_num(pdev) - vfs_offset) + > prb_vf[i]) { + dev_warn(&pdev->dev, "Skipping virtual function:%d\n", + extended_func_num(pdev)); + err = -ENODEV; + goto err_release_regions; + } + } + } + + err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv); + if (err) + goto err_release_regions; + return 0; err_release_regions: pci_release_regions(pdev); @@ -2616,6 +2710,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { struct mlx4_priv *priv; struct mlx4_dev *dev; + int ret; printk_once(KERN_INFO "%s", mlx4_version); @@ -2624,28 +2719,38 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) return -ENOMEM; dev = &priv->dev; + dev->pdev = pdev; pci_set_drvdata(pdev, dev); priv->pci_dev_data = id->driver_data; - return __mlx4_init_one(pdev, id->driver_data); + ret = __mlx4_init_one(pdev, id->driver_data, priv); + if (ret) + kfree(priv); + + return ret; } -static void __mlx4_remove_one(struct pci_dev *pdev) +static void mlx4_unload_one(struct pci_dev *pdev) { struct mlx4_dev *dev = pci_get_drvdata(pdev); struct mlx4_priv *priv = mlx4_priv(dev); int pci_dev_data; int p; + int active_vfs = 0; if (priv->removed) return; pci_dev_data = priv->pci_dev_data; - /* in SRIOV it is not allowed to unload the pf's - * driver while there are alive vf's */ - if (mlx4_is_master(dev) && mlx4_how_many_lives_vf(dev)) - pr_warn("Removing PF when there are assigned VF's !!!\n"); + /* Disabling SR-IOV is not allowed while there are active vf's */ + if (mlx4_is_master(dev)) { + active_vfs = mlx4_how_many_lives_vf(dev); + if (active_vfs) { + pr_warn("Removing PF when there are active VF's !!\n"); + pr_warn("Will not disable SR-IOV.\n"); + } + } mlx4_stop_sense(dev); mlx4_unregister_device(dev); @@ -2688,7 +2793,7 @@ static void __mlx4_remove_one(struct pci_dev *pdev) if (dev->flags & MLX4_FLAG_MSI_X) pci_disable_msix(pdev); - if (dev->flags & MLX4_FLAG_SRIOV) { + if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) { mlx4_warn(dev, "Disabling SR-IOV\n"); pci_disable_sriov(pdev); dev->num_vfs = 0; @@ -2704,8 +2809,6 @@ static void __mlx4_remove_one(struct pci_dev *pdev) kfree(dev->caps.qp1_proxy); kfree(dev->dev_vfs); - pci_release_regions(pdev); - pci_disable_device(pdev); memset(priv, 0, sizeof(*priv)); priv->pci_dev_data = pci_dev_data; priv->removed = 1; @@ -2716,7 +2819,9 @@ static void mlx4_remove_one(struct pci_dev *pdev) struct mlx4_dev *dev = pci_get_drvdata(pdev); struct mlx4_priv *priv = mlx4_priv(dev); - __mlx4_remove_one(pdev); + mlx4_unload_one(pdev); + pci_release_regions(pdev); + pci_disable_device(pdev); kfree(priv); pci_set_drvdata(pdev, NULL); } @@ -2725,11 +2830,22 @@ int mlx4_restart_one(struct pci_dev *pdev) { struct mlx4_dev *dev = pci_get_drvdata(pdev); struct mlx4_priv *priv = mlx4_priv(dev); - int pci_dev_data; + int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; + int pci_dev_data, err, total_vfs; pci_dev_data = priv->pci_dev_data; - __mlx4_remove_one(pdev); - return __mlx4_init_one(pdev, pci_dev_data); + total_vfs = dev->num_vfs; + memcpy(nvfs, dev->nvfs, sizeof(dev->nvfs)); + + mlx4_unload_one(pdev); + err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv); + if (err) { + mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n", + __func__, pci_name(pdev), err); + return err; + } + + return err; } static const struct pci_device_id mlx4_pci_table[] = { @@ -2783,7 +2899,7 @@ MODULE_DEVICE_TABLE(pci, mlx4_pci_table); static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t state) { - __mlx4_remove_one(pdev); + mlx4_unload_one(pdev); return state == pci_channel_io_perm_failure ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; @@ -2795,7 +2911,7 @@ static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev) struct mlx4_priv *priv = mlx4_priv(dev); int ret; - ret = __mlx4_init_one(pdev, priv->pci_dev_data); + ret = __mlx4_init_one(pdev, priv->pci_dev_data, priv); return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; } @@ -2809,7 +2925,7 @@ static struct pci_driver mlx4_driver = { .name = DRV_NAME, .id_table = mlx4_pci_table, .probe = mlx4_init_one, - .shutdown = __mlx4_remove_one, + .shutdown = mlx4_unload_one, .remove = mlx4_remove_one, .err_handler = &mlx4_err_handler, }; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index b508c7887ef84999c92bc4e959134cf0bf6ffaf3..de10dbb2e6ed519e958f6f4d77b75bdd91d3076f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -285,6 +285,9 @@ struct mlx4_icm_table { #define MLX4_MPT_STATUS_SW 0xF0 #define MLX4_MPT_STATUS_HW 0x00 +#define MLX4_CQE_SIZE_MASK_STRIDE 0x3 +#define MLX4_EQE_SIZE_MASK_STRIDE 0x30 + /* * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits. */ diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 3de41be49425ce23c19477db2e0c19fca4f395ac..8fef65840b3b5e4176d512b4a2e36d510a1da311 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -216,13 +216,16 @@ enum cq_type { struct mlx4_en_tx_info { struct sk_buff *skb; - u32 nr_txbb; - u32 nr_bytes; - u8 linear; - u8 data_offset; - u8 inl; - u8 ts_requested; -}; + dma_addr_t map0_dma; + u32 map0_byte_count; + u32 nr_txbb; + u32 nr_bytes; + u8 linear; + u8 data_offset; + u8 inl; + u8 ts_requested; + u8 nr_maps; +} ____cacheline_aligned_in_smp; #define MLX4_EN_BIT_DESC_OWN 0x80000000 @@ -253,39 +256,46 @@ struct mlx4_en_rx_alloc { }; struct mlx4_en_tx_ring { + /* cache line used and dirtied in tx completion + * (mlx4_en_free_tx_buf()) + */ + u32 last_nr_txbb; + u32 cons; + unsigned long wake_queue; + + /* cache line used and dirtied in mlx4_en_xmit() */ + u32 prod ____cacheline_aligned_in_smp; + unsigned long bytes; + unsigned long packets; + unsigned long tx_csum; + unsigned long tso_packets; + unsigned long xmit_more; + struct mlx4_bf bf; + unsigned long queue_stopped; + + /* Following part should be mostly read */ + cpumask_t affinity_mask; + struct mlx4_qp qp; struct mlx4_hwq_resources wqres; - u32 size ; /* number of TXBBs */ - u32 size_mask; - u16 stride; - u16 cqn; /* index of port CQ associated with this ring */ - u32 prod; - u32 cons; - u32 buf_size; - u32 doorbell_qpn; - void *buf; - u16 poll_cnt; - struct mlx4_en_tx_info *tx_info; - u8 *bounce_buf; - u8 queue_index; - cpumask_t affinity_mask; - u32 last_nr_txbb; - struct mlx4_qp qp; - struct mlx4_qp_context context; - int qpn; - enum mlx4_qp_state qp_state; - struct mlx4_srq dummy; - unsigned long bytes; - unsigned long packets; - unsigned long tx_csum; - unsigned long queue_stopped; - unsigned long wake_queue; - struct mlx4_bf bf; - bool bf_enabled; - bool bf_alloced; - struct netdev_queue *tx_queue; - int hwtstamp_tx_type; - int inline_thold; -}; + u32 size; /* number of TXBBs */ + u32 size_mask; + u16 stride; + u16 cqn; /* index of port CQ associated with this ring */ + u32 buf_size; + __be32 doorbell_qpn; + __be32 mr_key; + void *buf; + struct mlx4_en_tx_info *tx_info; + u8 *bounce_buf; + struct mlx4_qp_context context; + int qpn; + enum mlx4_qp_state qp_state; + u8 queue_index; + bool bf_enabled; + bool bf_alloced; + struct netdev_queue *tx_queue; + int hwtstamp_tx_type; +} ____cacheline_aligned_in_smp; struct mlx4_en_rx_desc { /* actual number of entries depends on rx ring stride */ @@ -426,6 +436,7 @@ struct mlx4_en_pkt_stats { struct mlx4_en_port_stats { unsigned long tso_packets; + unsigned long xmit_more; unsigned long queue_stopped; unsigned long wake_queue; unsigned long tx_timeout; @@ -433,7 +444,7 @@ struct mlx4_en_port_stats { unsigned long rx_chksum_good; unsigned long rx_chksum_none; unsigned long tx_chksum_offload; -#define NUM_PORT_STATS 8 +#define NUM_PORT_STATS 9 }; struct mlx4_en_perf_stats { @@ -542,6 +553,7 @@ struct mlx4_en_priv { unsigned max_mtu; int base_qpn; int cqe_factor; + int cqe_size; struct mlx4_en_rss_map rss_map; __be32 ctrl_flags; @@ -612,6 +624,11 @@ struct mlx4_mac_entry { struct rcu_head rcu; }; +static inline struct mlx4_cqe *mlx4_en_get_cqe(void *buf, int idx, int cqe_sz) +{ + return buf + idx * cqe_sz; +} + #ifdef CONFIG_NET_RX_BUSY_POLL static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq) { @@ -836,8 +853,8 @@ extern const struct ethtool_ops mlx4_en_ethtool_ops; */ __printf(3, 4) -int en_print(const char *level, const struct mlx4_en_priv *priv, - const char *format, ...); +void en_print(const char *level, const struct mlx4_en_priv *priv, + const char *format, ...); #define en_dbg(mlevel, priv, format, ...) \ do { \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 65a7da69e2ac82f17903754f16b4d80fe1824d20..368c6c5ea014d2a1c8adc8284269452ac915faab 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -357,60 +357,24 @@ const char *mlx5_command_str(int command) case MLX5_CMD_OP_2ERR_QP: return "2ERR_QP"; - case MLX5_CMD_OP_RTS2SQD_QP: - return "RTS2SQD_QP"; - - case MLX5_CMD_OP_SQD2RTS_QP: - return "SQD2RTS_QP"; - case MLX5_CMD_OP_2RST_QP: return "2RST_QP"; case MLX5_CMD_OP_QUERY_QP: return "QUERY_QP"; - case MLX5_CMD_OP_CONF_SQP: - return "CONF_SQP"; - case MLX5_CMD_OP_MAD_IFC: return "MAD_IFC"; case MLX5_CMD_OP_INIT2INIT_QP: return "INIT2INIT_QP"; - case MLX5_CMD_OP_SUSPEND_QP: - return "SUSPEND_QP"; - - case MLX5_CMD_OP_UNSUSPEND_QP: - return "UNSUSPEND_QP"; - - case MLX5_CMD_OP_SQD2SQD_QP: - return "SQD2SQD_QP"; - - case MLX5_CMD_OP_ALLOC_QP_COUNTER_SET: - return "ALLOC_QP_COUNTER_SET"; - - case MLX5_CMD_OP_DEALLOC_QP_COUNTER_SET: - return "DEALLOC_QP_COUNTER_SET"; - - case MLX5_CMD_OP_QUERY_QP_COUNTER_SET: - return "QUERY_QP_COUNTER_SET"; - case MLX5_CMD_OP_CREATE_PSV: return "CREATE_PSV"; case MLX5_CMD_OP_DESTROY_PSV: return "DESTROY_PSV"; - case MLX5_CMD_OP_QUERY_PSV: - return "QUERY_PSV"; - - case MLX5_CMD_OP_QUERY_SIG_RULE_TABLE: - return "QUERY_SIG_RULE_TABLE"; - - case MLX5_CMD_OP_QUERY_BLOCK_SIZE_TABLE: - return "QUERY_BLOCK_SIZE_TABLE"; - case MLX5_CMD_OP_CREATE_SRQ: return "CREATE_SRQ"; @@ -1538,16 +1502,9 @@ static const char *cmd_status_str(u8 status) } } -int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr) +static int cmd_status_to_err(u8 status) { - if (!hdr->status) - return 0; - - pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n", - cmd_status_str(hdr->status), hdr->status, - be32_to_cpu(hdr->syndrome)); - - switch (hdr->status) { + switch (status) { case MLX5_CMD_STAT_OK: return 0; case MLX5_CMD_STAT_INT_ERR: return -EIO; case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL; @@ -1567,3 +1524,33 @@ int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr) default: return -EIO; } } + +/* this will be available till all the commands use set/get macros */ +int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr) +{ + if (!hdr->status) + return 0; + + pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n", + cmd_status_str(hdr->status), hdr->status, + be32_to_cpu(hdr->syndrome)); + + return cmd_status_to_err(hdr->status); +} + +int mlx5_cmd_status_to_err_v2(void *ptr) +{ + u32 syndrome; + u8 status; + + status = be32_to_cpu(*(__be32 *)ptr) >> 24; + if (!status) + return 0; + + syndrome = be32_to_cpu(*(__be32 *)(ptr + 4)); + + pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n", + cmd_status_str(status), status, syndrome); + + return cmd_status_to_err(status); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 4e8bd0b34bb059d6fc2c78d1a6a61c4796ca870e..ed53291468f3226e644aa039025a962aff0b0fe0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -198,7 +198,7 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) int eqes_found = 0; int set_ci = 0; u32 cqn; - u32 srqn; + u32 rsn; u8 port; while ((eqe = next_eqe_sw(eq))) { @@ -224,18 +224,18 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) case MLX5_EVENT_TYPE_PATH_MIG_FAILED: case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: + rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; mlx5_core_dbg(dev, "event %s(%d) arrived\n", eqe_type_str(eqe->type), eqe->type); - mlx5_qp_event(dev, be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff, - eqe->type); + mlx5_rsc_event(dev, rsn, eqe->type); break; case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: - srqn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; + rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n", - eqe_type_str(eqe->type), eqe->type, srqn); - mlx5_srq_event(dev, srqn, eqe->type); + eqe_type_str(eqe->type), eqe->type, rsn); + mlx5_srq_event(dev, rsn, eqe->type); break; case MLX5_EVENT_TYPE_CMD: @@ -468,7 +468,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) err = mlx5_create_map_eq(dev, &table->pages_eq, MLX5_EQ_VEC_PAGES, - dev->caps.max_vf + 1, + dev->caps.gen.max_vf + 1, 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq", &dev->priv.uuari.uars[0]); if (err) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index f012658b6a927baf1fca6a2d39d806346c61a1a6..087c4c797debdc90a7cec13aacf3b78f5cf04190 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -64,86 +64,9 @@ int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev) return err; } -int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, - struct mlx5_caps *caps) +int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, struct mlx5_caps *caps) { - struct mlx5_cmd_query_hca_cap_mbox_out *out; - struct mlx5_cmd_query_hca_cap_mbox_in in; - struct mlx5_query_special_ctxs_mbox_out ctx_out; - struct mlx5_query_special_ctxs_mbox_in ctx_in; - int err; - u16 t16; - - out = kzalloc(sizeof(*out), GFP_KERNEL); - if (!out) - return -ENOMEM; - - memset(&in, 0, sizeof(in)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_HCA_CAP); - in.hdr.opmod = cpu_to_be16(0x1); - err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); - if (err) - goto out_out; - - if (out->hdr.status) { - err = mlx5_cmd_status_to_err(&out->hdr); - goto out_out; - } - - - caps->log_max_eq = out->hca_cap.log_max_eq & 0xf; - caps->max_cqes = 1 << out->hca_cap.log_max_cq_sz; - caps->max_wqes = 1 << out->hca_cap.log_max_qp_sz; - caps->max_sq_desc_sz = be16_to_cpu(out->hca_cap.max_desc_sz_sq); - caps->max_rq_desc_sz = be16_to_cpu(out->hca_cap.max_desc_sz_rq); - caps->flags = be64_to_cpu(out->hca_cap.flags); - caps->stat_rate_support = be16_to_cpu(out->hca_cap.stat_rate_support); - caps->log_max_msg = out->hca_cap.log_max_msg & 0x1f; - caps->num_ports = out->hca_cap.num_ports & 0xf; - caps->log_max_cq = out->hca_cap.log_max_cq & 0x1f; - if (caps->num_ports > MLX5_MAX_PORTS) { - mlx5_core_err(dev, "device has %d ports while the driver supports max %d ports\n", - caps->num_ports, MLX5_MAX_PORTS); - err = -EINVAL; - goto out_out; - } - caps->log_max_qp = out->hca_cap.log_max_qp & 0x1f; - caps->log_max_mkey = out->hca_cap.log_max_mkey & 0x3f; - caps->log_max_pd = out->hca_cap.log_max_pd & 0x1f; - caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f; - caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f; - caps->log_max_mcg = out->hca_cap.log_max_mcg; - caps->max_qp_mcg = be32_to_cpu(out->hca_cap.max_qp_mcg) & 0xffffff; - caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f); - caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f); - caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz; - t16 = be16_to_cpu(out->hca_cap.bf_log_bf_reg_size); - if (t16 & 0x8000) { - caps->bf_reg_size = 1 << (t16 & 0x1f); - caps->bf_regs_per_page = MLX5_BF_REGS_PER_PAGE; - } else { - caps->bf_reg_size = 0; - caps->bf_regs_per_page = 0; - } - caps->min_page_sz = ~(u32)((1 << out->hca_cap.log_pg_sz) - 1); - - memset(&ctx_in, 0, sizeof(ctx_in)); - memset(&ctx_out, 0, sizeof(ctx_out)); - ctx_in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS); - err = mlx5_cmd_exec(dev, &ctx_in, sizeof(ctx_in), - &ctx_out, sizeof(ctx_out)); - if (err) - goto out_out; - - if (ctx_out.hdr.status) - err = mlx5_cmd_status_to_err(&ctx_out.hdr); - - caps->reserved_lkey = be32_to_cpu(ctx_out.reserved_lkey); - -out_out: - kfree(out); - - return err; + return mlx5_core_get_caps(dev, caps, HCA_CAP_OPMOD_GET_CUR); } int mlx5_cmd_init_hca(struct mlx5_core_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index f2716cc1f51df8eba8da25cd9ec415de55f05df3..3d8e8e489b2ddb3539a4a9c89b784974c833e50e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -43,6 +43,7 @@ #include #include #include +#include #include "mlx5_core.h" #define DRIVER_NAME "mlx5_core" @@ -207,11 +208,11 @@ static void release_bar(struct pci_dev *pdev) static int mlx5_enable_msix(struct mlx5_core_dev *dev) { struct mlx5_eq_table *table = &dev->priv.eq_table; - int num_eqs = 1 << dev->caps.log_max_eq; + int num_eqs = 1 << dev->caps.gen.log_max_eq; int nvec; int i; - nvec = dev->caps.num_ports * num_online_cpus() + MLX5_EQ_VEC_COMP_BASE; + nvec = dev->caps.gen.num_ports * num_online_cpus() + MLX5_EQ_VEC_COMP_BASE; nvec = min_t(int, nvec, num_eqs); if (nvec <= MLX5_EQ_VEC_COMP_BASE) return -ENOMEM; @@ -250,91 +251,205 @@ struct mlx5_reg_host_endianess { #define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos)) enum { - MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) | - CAP_MASK(MLX5_CAP_OFF_DCT, 1), + MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) | + MLX5_DEV_CAP_FLAG_DCT, }; +static u16 to_fw_pkey_sz(u32 size) +{ + switch (size) { + case 128: + return 0; + case 256: + return 1; + case 512: + return 2; + case 1024: + return 3; + case 2048: + return 4; + case 4096: + return 5; + default: + pr_warn("invalid pkey table size %d\n", size); + return 0; + } +} + /* selectively copy writable fields clearing any reserved area */ -static void copy_rw_fields(struct mlx5_hca_cap *to, struct mlx5_hca_cap *from) +static void copy_rw_fields(void *to, struct mlx5_caps *from) { + __be64 *flags_off = (__be64 *)MLX5_ADDR_OF(cmd_hca_cap, to, reserved_22); u64 v64; - to->log_max_qp = from->log_max_qp & 0x1f; - to->log_max_ra_req_dc = from->log_max_ra_req_dc & 0x3f; - to->log_max_ra_res_dc = from->log_max_ra_res_dc & 0x3f; - to->log_max_ra_req_qp = from->log_max_ra_req_qp & 0x3f; - to->log_max_ra_res_qp = from->log_max_ra_res_qp & 0x3f; - to->log_max_atomic_size_qp = from->log_max_atomic_size_qp; - to->log_max_atomic_size_dc = from->log_max_atomic_size_dc; - v64 = be64_to_cpu(from->flags) & MLX5_CAP_BITS_RW_MASK; - to->flags = cpu_to_be64(v64); + MLX5_SET(cmd_hca_cap, to, log_max_qp, from->gen.log_max_qp); + MLX5_SET(cmd_hca_cap, to, log_max_ra_req_qp, from->gen.log_max_ra_req_qp); + MLX5_SET(cmd_hca_cap, to, log_max_ra_res_qp, from->gen.log_max_ra_res_qp); + MLX5_SET(cmd_hca_cap, to, pkey_table_size, from->gen.pkey_table_size); + MLX5_SET(cmd_hca_cap, to, log_max_ra_req_dc, from->gen.log_max_ra_req_dc); + MLX5_SET(cmd_hca_cap, to, log_max_ra_res_dc, from->gen.log_max_ra_res_dc); + MLX5_SET(cmd_hca_cap, to, pkey_table_size, to_fw_pkey_sz(from->gen.pkey_table_size)); + v64 = from->gen.flags & MLX5_CAP_BITS_RW_MASK; + *flags_off = cpu_to_be64(v64); } -enum { - HCA_CAP_OPMOD_GET_MAX = 0, - HCA_CAP_OPMOD_GET_CUR = 1, -}; +static u16 get_pkey_table_size(int pkey) +{ + if (pkey > MLX5_MAX_LOG_PKEY_TABLE) + return 0; -static int handle_hca_cap(struct mlx5_core_dev *dev) + return MLX5_MIN_PKEY_TABLE_SIZE << pkey; +} + +static void fw2drv_caps(struct mlx5_caps *caps, void *out) +{ + struct mlx5_general_caps *gen = &caps->gen; + + gen->max_srq_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_srq_sz); + gen->max_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_qp_sz); + gen->log_max_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_qp); + gen->log_max_strq = MLX5_GET_PR(cmd_hca_cap, out, log_max_strq_sz); + gen->log_max_srq = MLX5_GET_PR(cmd_hca_cap, out, log_max_srqs); + gen->max_cqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_cq_sz); + gen->log_max_cq = MLX5_GET_PR(cmd_hca_cap, out, log_max_cq); + gen->max_eqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_eq_sz); + gen->log_max_mkey = MLX5_GET_PR(cmd_hca_cap, out, log_max_mkey); + gen->log_max_eq = MLX5_GET_PR(cmd_hca_cap, out, log_max_eq); + gen->max_indirection = MLX5_GET_PR(cmd_hca_cap, out, max_indirection); + gen->log_max_mrw_sz = MLX5_GET_PR(cmd_hca_cap, out, log_max_mrw_sz); + gen->log_max_bsf_list_size = MLX5_GET_PR(cmd_hca_cap, out, log_max_bsf_list_size); + gen->log_max_klm_list_size = MLX5_GET_PR(cmd_hca_cap, out, log_max_klm_list_size); + gen->log_max_ra_req_dc = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_req_dc); + gen->log_max_ra_res_dc = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_res_dc); + gen->log_max_ra_req_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_req_qp); + gen->log_max_ra_res_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_res_qp); + gen->max_qp_counters = MLX5_GET_PR(cmd_hca_cap, out, max_qp_cnt); + gen->pkey_table_size = get_pkey_table_size(MLX5_GET_PR(cmd_hca_cap, out, pkey_table_size)); + gen->local_ca_ack_delay = MLX5_GET_PR(cmd_hca_cap, out, local_ca_ack_delay); + gen->num_ports = MLX5_GET_PR(cmd_hca_cap, out, num_ports); + gen->log_max_msg = MLX5_GET_PR(cmd_hca_cap, out, log_max_msg); + gen->stat_rate_support = MLX5_GET_PR(cmd_hca_cap, out, stat_rate_support); + gen->flags = be64_to_cpu(*(__be64 *)MLX5_ADDR_OF(cmd_hca_cap, out, reserved_22)); + pr_debug("flags = 0x%llx\n", gen->flags); + gen->uar_sz = MLX5_GET_PR(cmd_hca_cap, out, uar_sz); + gen->min_log_pg_sz = MLX5_GET_PR(cmd_hca_cap, out, log_pg_sz); + gen->bf_reg_size = MLX5_GET_PR(cmd_hca_cap, out, bf); + gen->bf_reg_size = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_bf_reg_size); + gen->max_sq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_sq); + gen->max_rq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_rq); + gen->max_dc_sq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_sq_dc); + gen->max_qp_mcg = MLX5_GET_PR(cmd_hca_cap, out, max_qp_mcg); + gen->log_max_pd = MLX5_GET_PR(cmd_hca_cap, out, log_max_pd); + gen->log_max_xrcd = MLX5_GET_PR(cmd_hca_cap, out, log_max_xrcd); + gen->log_uar_page_sz = MLX5_GET_PR(cmd_hca_cap, out, log_uar_page_sz); +} + +static const char *caps_opmod_str(u16 opmod) { - struct mlx5_cmd_query_hca_cap_mbox_out *query_out = NULL; - struct mlx5_cmd_set_hca_cap_mbox_in *set_ctx = NULL; - struct mlx5_cmd_query_hca_cap_mbox_in query_ctx; - struct mlx5_cmd_set_hca_cap_mbox_out set_out; - u64 flags; + switch (opmod) { + case HCA_CAP_OPMOD_GET_MAX: + return "GET_MAX"; + case HCA_CAP_OPMOD_GET_CUR: + return "GET_CUR"; + default: + return "Invalid"; + } +} + +int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps, + u16 opmod) +{ + u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)]; + int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); + void *out; int err; - memset(&query_ctx, 0, sizeof(query_ctx)); - query_out = kzalloc(sizeof(*query_out), GFP_KERNEL); - if (!query_out) + memset(in, 0, sizeof(in)); + out = kzalloc(out_sz, GFP_KERNEL); + if (!out) return -ENOMEM; + MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); + MLX5_SET(query_hca_cap_in, in, op_mod, opmod); + err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz); + if (err) + goto query_ex; - set_ctx = kzalloc(sizeof(*set_ctx), GFP_KERNEL); - if (!set_ctx) { - err = -ENOMEM; + err = mlx5_cmd_status_to_err_v2(out); + if (err) { + mlx5_core_warn(dev, "query max hca cap failed, %d\n", err); goto query_ex; } + mlx5_core_dbg(dev, "%s\n", caps_opmod_str(opmod)); + fw2drv_caps(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability_struct)); + +query_ex: + kfree(out); + return err; +} - query_ctx.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_HCA_CAP); - query_ctx.hdr.opmod = cpu_to_be16(HCA_CAP_OPMOD_GET_CUR); - err = mlx5_cmd_exec(dev, &query_ctx, sizeof(query_ctx), - query_out, sizeof(*query_out)); +static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz) +{ + u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)]; + int err; + + memset(out, 0, sizeof(out)); + + MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP); + err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out)); if (err) - goto query_ex; + return err; - err = mlx5_cmd_status_to_err(&query_out->hdr); - if (err) { - mlx5_core_warn(dev, "query hca cap failed, %d\n", err); + err = mlx5_cmd_status_to_err_v2(out); + + return err; +} + +static int handle_hca_cap(struct mlx5_core_dev *dev) +{ + void *set_ctx = NULL; + struct mlx5_profile *prof = dev->profile; + struct mlx5_caps *cur_caps = NULL; + struct mlx5_caps *max_caps = NULL; + int err = -ENOMEM; + int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in); + + set_ctx = kzalloc(set_sz, GFP_KERNEL); + if (!set_ctx) goto query_ex; - } - copy_rw_fields(&set_ctx->hca_cap, &query_out->hca_cap); + max_caps = kzalloc(sizeof(*max_caps), GFP_KERNEL); + if (!max_caps) + goto query_ex; - if (dev->profile && dev->profile->mask & MLX5_PROF_MASK_QP_SIZE) - set_ctx->hca_cap.log_max_qp = dev->profile->log_max_qp; + cur_caps = kzalloc(sizeof(*cur_caps), GFP_KERNEL); + if (!cur_caps) + goto query_ex; - flags = be64_to_cpu(query_out->hca_cap.flags); - /* disable checksum */ - flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM; - - set_ctx->hca_cap.flags = cpu_to_be64(flags); - memset(&set_out, 0, sizeof(set_out)); - set_ctx->hca_cap.log_uar_page_sz = cpu_to_be16(PAGE_SHIFT - 12); - set_ctx->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_SET_HCA_CAP); - err = mlx5_cmd_exec(dev, set_ctx, sizeof(*set_ctx), - &set_out, sizeof(set_out)); - if (err) { - mlx5_core_warn(dev, "set hca cap failed, %d\n", err); + err = mlx5_core_get_caps(dev, max_caps, HCA_CAP_OPMOD_GET_MAX); + if (err) goto query_ex; - } - err = mlx5_cmd_status_to_err(&set_out.hdr); + err = mlx5_core_get_caps(dev, cur_caps, HCA_CAP_OPMOD_GET_CUR); if (err) goto query_ex; + /* we limit the size of the pkey table to 128 entries for now */ + cur_caps->gen.pkey_table_size = 128; + + if (prof->mask & MLX5_PROF_MASK_QP_SIZE) + cur_caps->gen.log_max_qp = prof->log_max_qp; + + /* disable checksum */ + cur_caps->gen.flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM; + + copy_rw_fields(MLX5_ADDR_OF(set_hca_cap_in, set_ctx, hca_capability_struct), + cur_caps); + err = set_caps(dev, set_ctx, set_sz); + query_ex: - kfree(query_out); + kfree(cur_caps); + kfree(max_caps); kfree(set_ctx); return err; @@ -782,6 +897,7 @@ static void remove_one(struct pci_dev *pdev) static const struct pci_device_id mlx5_core_pci_table[] = { { PCI_VDEVICE(MELLANOX, 4113) }, /* MT4113 Connect-IB */ + { PCI_VDEVICE(MELLANOX, 4115) }, /* ConnectX-4 */ { 0, } }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c index 8145b4668229d6a483e9b36fdc457387a6c6ab41..5261a2b0da436b3a4359787c11dbc3686ee70689 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c @@ -39,28 +39,53 @@ #include "mlx5_core.h" -void mlx5_qp_event(struct mlx5_core_dev *dev, u32 qpn, int event_type) +static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev, + u32 rsn) { struct mlx5_qp_table *table = &dev->priv.qp_table; - struct mlx5_core_qp *qp; + struct mlx5_core_rsc_common *common; spin_lock(&table->lock); - qp = radix_tree_lookup(&table->tree, qpn); - if (qp) - atomic_inc(&qp->refcount); + common = radix_tree_lookup(&table->tree, rsn); + if (common) + atomic_inc(&common->refcount); spin_unlock(&table->lock); - if (!qp) { - mlx5_core_warn(dev, "Async event for bogus QP 0x%x\n", qpn); - return; + if (!common) { + mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n", + rsn); + return NULL; } + return common; +} - qp->event(qp, event_type); +void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common) +{ + if (atomic_dec_and_test(&common->refcount)) + complete(&common->free); +} + +void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type) +{ + struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn); + struct mlx5_core_qp *qp; + + if (!common) + return; + + switch (common->res) { + case MLX5_RES_QP: + qp = (struct mlx5_core_qp *)common; + qp->event(qp, event_type); + break; + + default: + mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn); + } - if (atomic_dec_and_test(&qp->refcount)) - complete(&qp->free); + mlx5_core_put_rsc(common); } int mlx5_core_create_qp(struct mlx5_core_dev *dev, @@ -92,6 +117,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev, qp->qpn = be32_to_cpu(out.qpn) & 0xffffff; mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn); + qp->common.res = MLX5_RES_QP; spin_lock_irq(&table->lock); err = radix_tree_insert(&table->tree, qp->qpn, qp); spin_unlock_irq(&table->lock); @@ -106,9 +132,9 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev, qp->qpn); qp->pid = current->pid; - atomic_set(&qp->refcount, 1); + atomic_set(&qp->common.refcount, 1); atomic_inc(&dev->num_qps); - init_completion(&qp->free); + init_completion(&qp->common.free); return 0; @@ -138,9 +164,8 @@ int mlx5_core_destroy_qp(struct mlx5_core_dev *dev, radix_tree_delete(&table->tree, qp->qpn); spin_unlock_irqrestore(&table->lock, flags); - if (atomic_dec_and_test(&qp->refcount)) - complete(&qp->free); - wait_for_completion(&qp->free); + mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp); + wait_for_completion(&qp->common.free); memset(&in, 0, sizeof(in)); memset(&out, 0, sizeof(out)); @@ -184,13 +209,10 @@ int mlx5_core_qp_modify(struct mlx5_core_dev *dev, enum mlx5_qp_state cur_state, [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTS2RTS_QP, - [MLX5_QP_STATE_SQD] = MLX5_CMD_OP_RTS2SQD_QP, }, [MLX5_QP_STATE_SQD] = { [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, - [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_SQD2RTS_QP, - [MLX5_QP_STATE_SQD] = MLX5_CMD_OP_SQD2SQD_QP, }, [MLX5_QP_STATE_SQER] = { [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c index 68f5d9c77c7b13b9038cf5bc4e920e09ac3c33a9..0a6348cefc0137999577dedf341bef369e6b0166 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c @@ -174,11 +174,11 @@ int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari) for (i = 0; i < tot_uuars; i++) { bf = &uuari->bfs[i]; - bf->buf_size = dev->caps.bf_reg_size / 2; + bf->buf_size = dev->caps.gen.bf_reg_size / 2; bf->uar = &uuari->uars[i / MLX5_BF_REGS_PER_PAGE]; bf->regreg = uuari->uars[i / MLX5_BF_REGS_PER_PAGE].map; bf->reg = NULL; /* Add WC support */ - bf->offset = (i % MLX5_BF_REGS_PER_PAGE) * dev->caps.bf_reg_size + + bf->offset = (i % MLX5_BF_REGS_PER_PAGE) * dev->caps.gen.bf_reg_size + MLX5_BF_OFFSET; bf->need_lock = need_uuar_lock(i); spin_lock_init(&bf->lock); diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index 2f12c88c66abfa1cd0bb0e099e4a33a761327fd6..bde1b70f473b31efd8c66dfeaccb28a77f1ab239 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -511,7 +511,6 @@ static int moxart_mac_probe(struct platform_device *pdev) goto init_fail; } - ether_setup(ndev); ndev->netdev_ops = &moxart_netdev_ops; netif_napi_add(ndev, &priv->napi, moxart_rx_poll, RX_DESC_NUM); ndev->priv_flags |= IFF_UNICAST_FLT; diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index 4f40d7b8629e3a103c34e081a616d8d900e9093d..cc0485e3c6210997c532fd4cee51946e93983104 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -3537,7 +3537,7 @@ static void vxge_device_unregister(struct __vxge_hw_device *hldev) vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d", vdev->ndev->name, __func__, __LINE__); - strncpy(buf, dev->name, IFNAMSIZ); + strlcpy(buf, dev->name, IFNAMSIZ); flush_work(&vdev->reset_task); diff --git a/drivers/net/ethernet/netx-eth.c b/drivers/net/ethernet/netx-eth.c index 31eb911e4763c691bb288912a8e48900cf986552..8176c8a1cc6a93b146724b62377b2f059c608306 100644 --- a/drivers/net/ethernet/netx-eth.c +++ b/drivers/net/ethernet/netx-eth.c @@ -315,8 +315,6 @@ static int netx_eth_enable(struct net_device *ndev) unsigned int mac4321, mac65; int running, i; - ether_setup(ndev); - ndev->netdev_ops = &netx_eth_netdev_ops; ndev->watchdog_timeo = msecs_to_jiffies(5000); diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c index 79645f74b3a84400865e6f148dc31ad3cbf3e887..379b7fbded784df52068b92f836c921bf4661559 100644 --- a/drivers/net/ethernet/nuvoton/w90p910_ether.c +++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c @@ -943,7 +943,6 @@ static int w90p910_ether_setup(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); - ether_setup(dev); dev->netdev_ops = &w90p910_ether_netdev_ops; dev->ethtool_ops = &w90p910_ether_ethtool_ops; diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 925b296d8ab84d8497d5a85b2bd5dda8b0c08aa9..f39cae620f61568688c79415c6dbf9ebbf96ff90 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -1481,7 +1481,7 @@ static int phy_init(struct net_device *dev) } /* phy vendor specific configuration */ - if ((np->phy_oui == PHY_OUI_CICADA)) { + if (np->phy_oui == PHY_OUI_CICADA) { if (init_cicada(dev, np, phyinterface)) { netdev_info(dev, "%s: phy init failed\n", pci_name(np->pci_dev)); diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index a44a03c45014903a6410ebe2d665f3651ee30db2..66fd868152e579a4ba3483fd4726f0e9d9662436 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -1377,9 +1377,6 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) goto err_out_iounmap; } - /* Fill in the fields of the device structure with ethernet values. */ - ether_setup(ndev); - /* Setup driver functions */ ndev->netdev_ops = &lpc_netdev_ops; ndev->ethtool_ops = &lpc_eth_ethtool_ops; diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c index 2d6b148528ddf42f7b613dde08344ada5ad7431f..fa2db41e02f85d472d89b30ddd22be53bee85280 100644 --- a/drivers/net/ethernet/packetengines/yellowfin.c +++ b/drivers/net/ethernet/packetengines/yellowfin.c @@ -693,11 +693,11 @@ static void yellowfin_tx_timeout(struct net_device *dev) /* Note: these should be KERN_DEBUG. */ if (yellowfin_debug) { int i; - pr_warning(" Rx ring %p: ", yp->rx_ring); + pr_warn(" Rx ring %p: ", yp->rx_ring); for (i = 0; i < RX_RING_SIZE; i++) pr_cont(" %08x", yp->rx_ring[i].result_status); pr_cont("\n"); - pr_warning(" Tx ring %p: ", yp->tx_ring); + pr_warn(" Tx ring %p: ", yp->tx_ring); for (i = 0; i < TX_RING_SIZE; i++) pr_cont(" %04x /%08x", yp->tx_status[i].tx_errs, diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 5ec5a2b0e9895e02a0dd778d55c79a9d210685ef..0b2a1ccd276dbd4c1e582ef635b6304c351d03d7 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -1475,9 +1475,8 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) u32 val; if (pdev->revision >= NX_P3_A0 && pdev->revision <= NX_P3_B1) { - pr_warning("%s: chip revisions between 0x%x-0x%x " - "will not be enabled.\n", - module_name(THIS_MODULE), NX_P3_A0, NX_P3_B1); + pr_warn("%s: chip revisions between 0x%x-0x%x will not be enabled\n", + module_name(THIS_MODULE), NX_P3_A0, NX_P3_B1); return -ENODEV; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index b84f5ea3d659ee599e02db455296c54bf10e5fbe..e56c1bb361412144e2fa60cc6b2b21b3ecb35245 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -39,8 +39,8 @@ #define _QLCNIC_LINUX_MAJOR 5 #define _QLCNIC_LINUX_MINOR 3 -#define _QLCNIC_LINUX_SUBVERSION 61 -#define QLCNIC_LINUX_VERSIONID "5.3.61" +#define _QLCNIC_LINUX_SUBVERSION 62 +#define QLCNIC_LINUX_VERSIONID "5.3.62" #define QLCNIC_DRV_IDC_VER 0x01 #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) @@ -540,6 +540,8 @@ struct qlcnic_hardware_context { u8 lb_mode; u16 vxlan_port; struct device *hwmon_dev; + u32 post_mode; + bool run_post; }; struct qlcnic_adapter_stats { @@ -2283,6 +2285,7 @@ extern const struct ethtool_ops qlcnic_ethtool_failed_ops; #define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020 #define PCI_DEVICE_ID_QLOGIC_QLE834X 0x8030 +#define PCI_DEVICE_ID_QLOGIC_QLE8830 0x8830 #define PCI_DEVICE_ID_QLOGIC_VF_QLE834X 0x8430 #define PCI_DEVICE_ID_QLOGIC_QLE844X 0x8040 #define PCI_DEVICE_ID_QLOGIC_VF_QLE844X 0x8440 @@ -2307,6 +2310,7 @@ static inline bool qlcnic_83xx_check(struct qlcnic_adapter *adapter) bool status; status = ((device == PCI_DEVICE_ID_QLOGIC_QLE834X) || + (device == PCI_DEVICE_ID_QLOGIC_QLE8830) || (device == PCI_DEVICE_ID_QLOGIC_QLE844X) || (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X) || (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X)) ? true : false; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 476e4998ef991693008818a4c559df1c0fc03e2d..840bf36b5e9d0d8ac5ddca14cc5e1afca26191f8 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -35,6 +35,35 @@ static void qlcnic_83xx_get_beacon_state(struct qlcnic_adapter *); #define QLC_SKIP_INACTIVE_PCI_REGS 7 #define QLC_MAX_LEGACY_FUNC_SUPP 8 +/* 83xx Module type */ +#define QLC_83XX_MODULE_FIBRE_10GBASE_LRM 0x1 /* 10GBase-LRM */ +#define QLC_83XX_MODULE_FIBRE_10GBASE_LR 0x2 /* 10GBase-LR */ +#define QLC_83XX_MODULE_FIBRE_10GBASE_SR 0x3 /* 10GBase-SR */ +#define QLC_83XX_MODULE_DA_10GE_PASSIVE_CP 0x4 /* 10GE passive + * copper(compliant) + */ +#define QLC_83XX_MODULE_DA_10GE_ACTIVE_CP 0x5 /* 10GE active limiting + * copper(compliant) + */ +#define QLC_83XX_MODULE_DA_10GE_LEGACY_CP 0x6 /* 10GE passive copper + * (legacy, best effort) + */ +#define QLC_83XX_MODULE_FIBRE_1000BASE_SX 0x7 /* 1000Base-SX */ +#define QLC_83XX_MODULE_FIBRE_1000BASE_LX 0x8 /* 1000Base-LX */ +#define QLC_83XX_MODULE_FIBRE_1000BASE_CX 0x9 /* 1000Base-CX */ +#define QLC_83XX_MODULE_TP_1000BASE_T 0xa /* 1000Base-T*/ +#define QLC_83XX_MODULE_DA_1GE_PASSIVE_CP 0xb /* 1GE passive copper + * (legacy, best effort) + */ +#define QLC_83XX_MODULE_UNKNOWN 0xf /* Unknown module type */ + +/* Port types */ +#define QLC_83XX_10_CAPABLE BIT_8 +#define QLC_83XX_100_CAPABLE BIT_9 +#define QLC_83XX_1G_CAPABLE BIT_10 +#define QLC_83XX_10G_CAPABLE BIT_11 +#define QLC_83XX_AUTONEG_ENABLE BIT_15 + static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = { {QLCNIC_CMD_CONFIGURE_IP_ADDR, 6, 1}, {QLCNIC_CMD_CONFIG_INTRPT, 18, 34}, @@ -667,6 +696,7 @@ void qlcnic_83xx_write_crb(struct qlcnic_adapter *adapter, char *buf, int qlcnic_83xx_get_port_info(struct qlcnic_adapter *adapter) { + struct qlcnic_hardware_context *ahw = adapter->ahw; int status; status = qlcnic_83xx_get_port_config(adapter); @@ -674,13 +704,20 @@ int qlcnic_83xx_get_port_info(struct qlcnic_adapter *adapter) dev_err(&adapter->pdev->dev, "Get Port Info failed\n"); } else { - if (QLC_83XX_SFP_10G_CAPABLE(adapter->ahw->port_config)) - adapter->ahw->port_type = QLCNIC_XGBE; - else - adapter->ahw->port_type = QLCNIC_GBE; - if (QLC_83XX_AUTONEG(adapter->ahw->port_config)) - adapter->ahw->link_autoneg = AUTONEG_ENABLE; + if (ahw->port_config & QLC_83XX_10G_CAPABLE) { + ahw->port_type = QLCNIC_XGBE; + } else if (ahw->port_config & QLC_83XX_10_CAPABLE || + ahw->port_config & QLC_83XX_100_CAPABLE || + ahw->port_config & QLC_83XX_1G_CAPABLE) { + ahw->port_type = QLCNIC_GBE; + } else { + ahw->port_type = QLCNIC_XGBE; + } + + if (QLC_83XX_AUTONEG(ahw->port_config)) + ahw->link_autoneg = AUTONEG_ENABLE; + } return status; } @@ -2664,7 +2701,7 @@ static int qlcnic_83xx_poll_flash_status_reg(struct qlcnic_adapter *adapter) QLC_83XX_FLASH_STATUS_READY) break; - msleep(QLC_83XX_FLASH_STATUS_REG_POLL_DELAY); + usleep_range(1000, 1100); } while (--retries); if (!retries) @@ -3176,22 +3213,33 @@ int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter) break; } config = cmd.rsp.arg[3]; - if (QLC_83XX_SFP_PRESENT(config)) { - switch (ahw->module_type) { - case LINKEVENT_MODULE_OPTICAL_UNKNOWN: - case LINKEVENT_MODULE_OPTICAL_SRLR: - case LINKEVENT_MODULE_OPTICAL_LRM: - case LINKEVENT_MODULE_OPTICAL_SFP_1G: - ahw->supported_type = PORT_FIBRE; - break; - case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE: - case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN: - case LINKEVENT_MODULE_TWINAX: - ahw->supported_type = PORT_TP; - break; - default: - ahw->supported_type = PORT_OTHER; - } + switch (QLC_83XX_SFP_MODULE_TYPE(config)) { + case QLC_83XX_MODULE_FIBRE_10GBASE_LRM: + case QLC_83XX_MODULE_FIBRE_10GBASE_LR: + case QLC_83XX_MODULE_FIBRE_10GBASE_SR: + ahw->supported_type = PORT_FIBRE; + ahw->port_type = QLCNIC_XGBE; + break; + case QLC_83XX_MODULE_FIBRE_1000BASE_SX: + case QLC_83XX_MODULE_FIBRE_1000BASE_LX: + case QLC_83XX_MODULE_FIBRE_1000BASE_CX: + ahw->supported_type = PORT_FIBRE; + ahw->port_type = QLCNIC_GBE; + break; + case QLC_83XX_MODULE_TP_1000BASE_T: + ahw->supported_type = PORT_TP; + ahw->port_type = QLCNIC_GBE; + break; + case QLC_83XX_MODULE_DA_10GE_PASSIVE_CP: + case QLC_83XX_MODULE_DA_10GE_ACTIVE_CP: + case QLC_83XX_MODULE_DA_10GE_LEGACY_CP: + case QLC_83XX_MODULE_DA_1GE_PASSIVE_CP: + ahw->supported_type = PORT_DA; + ahw->port_type = QLCNIC_XGBE; + break; + default: + ahw->supported_type = PORT_OTHER; + ahw->port_type = QLCNIC_XGBE; } if (config & 1) err = 1; @@ -3204,9 +3252,9 @@ int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter) int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter, struct ethtool_cmd *ecmd) { + struct qlcnic_hardware_context *ahw = adapter->ahw; u32 config = 0; int status = 0; - struct qlcnic_hardware_context *ahw = adapter->ahw; if (!test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) { /* Get port configuration info */ @@ -3229,20 +3277,41 @@ int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter, ecmd->autoneg = AUTONEG_DISABLE; } - if (ahw->port_type == QLCNIC_XGBE) { - ecmd->supported = SUPPORTED_10000baseT_Full; - ecmd->advertising = ADVERTISED_10000baseT_Full; + ecmd->supported = (SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_10000baseT_Full | + SUPPORTED_Autoneg); + + if (ecmd->autoneg == AUTONEG_ENABLE) { + if (ahw->port_config & QLC_83XX_10_CAPABLE) + ecmd->advertising |= SUPPORTED_10baseT_Full; + if (ahw->port_config & QLC_83XX_100_CAPABLE) + ecmd->advertising |= SUPPORTED_100baseT_Full; + if (ahw->port_config & QLC_83XX_1G_CAPABLE) + ecmd->advertising |= SUPPORTED_1000baseT_Full; + if (ahw->port_config & QLC_83XX_10G_CAPABLE) + ecmd->advertising |= SUPPORTED_10000baseT_Full; + if (ahw->port_config & QLC_83XX_AUTONEG_ENABLE) + ecmd->advertising |= ADVERTISED_Autoneg; } else { - ecmd->supported = (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Half | - SUPPORTED_1000baseT_Full); - ecmd->advertising = (ADVERTISED_100baseT_Half | - ADVERTISED_100baseT_Full | - ADVERTISED_1000baseT_Half | - ADVERTISED_1000baseT_Full); + switch (ahw->link_speed) { + case SPEED_10: + ecmd->advertising = SUPPORTED_10baseT_Full; + break; + case SPEED_100: + ecmd->advertising = SUPPORTED_100baseT_Full; + break; + case SPEED_1000: + ecmd->advertising = SUPPORTED_1000baseT_Full; + break; + case SPEED_10000: + ecmd->advertising = SUPPORTED_10000baseT_Full; + break; + default: + break; + } + } switch (ahw->supported_type) { @@ -3258,6 +3327,12 @@ int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter, ecmd->port = PORT_TP; ecmd->transceiver = XCVR_INTERNAL; break; + case PORT_DA: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_DA; + ecmd->transceiver = XCVR_EXTERNAL; + break; default: ecmd->supported |= SUPPORTED_FIBRE; ecmd->advertising |= ADVERTISED_FIBRE; @@ -3272,35 +3347,60 @@ int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter, int qlcnic_83xx_set_settings(struct qlcnic_adapter *adapter, struct ethtool_cmd *ecmd) { - int status = 0; + struct qlcnic_hardware_context *ahw = adapter->ahw; u32 config = adapter->ahw->port_config; + int status = 0; - if (ecmd->autoneg) - adapter->ahw->port_config |= BIT_15; - - switch (ethtool_cmd_speed(ecmd)) { - case SPEED_10: - adapter->ahw->port_config |= BIT_8; - break; - case SPEED_100: - adapter->ahw->port_config |= BIT_9; - break; - case SPEED_1000: - adapter->ahw->port_config |= BIT_10; - break; - case SPEED_10000: - adapter->ahw->port_config |= BIT_11; - break; - default: - return -EINVAL; + /* 83xx devices do not support Half duplex */ + if (ecmd->duplex == DUPLEX_HALF) { + netdev_info(adapter->netdev, + "Half duplex mode not supported\n"); + return -EINVAL; } + if (ecmd->autoneg) { + ahw->port_config |= QLC_83XX_AUTONEG_ENABLE; + ahw->port_config |= (QLC_83XX_100_CAPABLE | + QLC_83XX_1G_CAPABLE | + QLC_83XX_10G_CAPABLE); + } else { /* force speed */ + ahw->port_config &= ~QLC_83XX_AUTONEG_ENABLE; + switch (ethtool_cmd_speed(ecmd)) { + case SPEED_10: + ahw->port_config &= ~(QLC_83XX_100_CAPABLE | + QLC_83XX_1G_CAPABLE | + QLC_83XX_10G_CAPABLE); + ahw->port_config |= QLC_83XX_10_CAPABLE; + break; + case SPEED_100: + ahw->port_config &= ~(QLC_83XX_10_CAPABLE | + QLC_83XX_1G_CAPABLE | + QLC_83XX_10G_CAPABLE); + ahw->port_config |= QLC_83XX_100_CAPABLE; + break; + case SPEED_1000: + ahw->port_config &= ~(QLC_83XX_10_CAPABLE | + QLC_83XX_100_CAPABLE | + QLC_83XX_10G_CAPABLE); + ahw->port_config |= QLC_83XX_1G_CAPABLE; + break; + case SPEED_10000: + ahw->port_config &= ~(QLC_83XX_10_CAPABLE | + QLC_83XX_100_CAPABLE | + QLC_83XX_1G_CAPABLE); + ahw->port_config |= QLC_83XX_10G_CAPABLE; + break; + default: + return -EINVAL; + } + } status = qlcnic_83xx_set_port_config(adapter); if (status) { - dev_info(&adapter->pdev->dev, - "Failed to Set Link Speed and autoneg.\n"); - adapter->ahw->port_config = config; + netdev_info(adapter->netdev, + "Failed to Set Link Speed and autoneg.\n"); + ahw->port_config = config; } + return status; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index 2bf101a47d02db1afbd153e032aab728f2ecb159..f3346a3779d3c36c2f91d4703feb75dc500e39df 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h @@ -83,6 +83,7 @@ /* Firmware image definitions */ #define QLC_83XX_BOOTLOADER_FLASH_ADDR 0x10000 #define QLC_83XX_FW_FILE_NAME "83xx_fw.bin" +#define QLC_83XX_POST_FW_FILE_NAME "83xx_post_fw.bin" #define QLC_84XX_FW_FILE_NAME "84xx_fw.bin" #define QLC_83XX_BOOT_FROM_FLASH 0 #define QLC_83XX_BOOT_FROM_FILE 0x12345678 @@ -360,7 +361,6 @@ enum qlcnic_83xx_states { #define QLC_83XX_SFP_MODULE_TYPE(data) (((data) >> 4) & 0x1F) #define QLC_83XX_SFP_CU_LENGTH(data) (LSB((data) >> 16)) #define QLC_83XX_SFP_TX_FAULT(data) ((data) & BIT_10) -#define QLC_83XX_SFP_10G_CAPABLE(data) ((data) & BIT_11) #define QLC_83XX_LINK_STATS(data) ((data) & BIT_0) #define QLC_83XX_CURRENT_LINK_SPEED(data) (((data) >> 3) & 7) #define QLC_83XX_LINK_PAUSE(data) (((data) >> 6) & 3) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 3172cdf591fe1622c263d00f36ee12216205c57a..2bb48d57e7a51856225b7cbb7fa7bb0ebca7e510 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -2074,6 +2074,121 @@ static void qlcnic_83xx_init_hw(struct qlcnic_adapter *p_dev) dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__); } +/* POST FW related definations*/ +#define QLC_83XX_POST_SIGNATURE_REG 0x41602014 +#define QLC_83XX_POST_MODE_REG 0x41602018 +#define QLC_83XX_POST_FAST_MODE 0 +#define QLC_83XX_POST_MEDIUM_MODE 1 +#define QLC_83XX_POST_SLOW_MODE 2 + +/* POST Timeout values in milliseconds */ +#define QLC_83XX_POST_FAST_MODE_TIMEOUT 690 +#define QLC_83XX_POST_MED_MODE_TIMEOUT 2930 +#define QLC_83XX_POST_SLOW_MODE_TIMEOUT 7500 + +/* POST result values */ +#define QLC_83XX_POST_PASS 0xfffffff0 +#define QLC_83XX_POST_ASIC_STRESS_TEST_FAIL 0xffffffff +#define QLC_83XX_POST_DDR_TEST_FAIL 0xfffffffe +#define QLC_83XX_POST_ASIC_MEMORY_TEST_FAIL 0xfffffffc +#define QLC_83XX_POST_FLASH_TEST_FAIL 0xfffffff8 + +static int qlcnic_83xx_run_post(struct qlcnic_adapter *adapter) +{ + struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info; + struct device *dev = &adapter->pdev->dev; + int timeout, count, ret = 0; + u32 signature; + + /* Set timeout values with extra 2 seconds of buffer */ + switch (adapter->ahw->post_mode) { + case QLC_83XX_POST_FAST_MODE: + timeout = QLC_83XX_POST_FAST_MODE_TIMEOUT + 2000; + break; + case QLC_83XX_POST_MEDIUM_MODE: + timeout = QLC_83XX_POST_MED_MODE_TIMEOUT + 2000; + break; + case QLC_83XX_POST_SLOW_MODE: + timeout = QLC_83XX_POST_SLOW_MODE_TIMEOUT + 2000; + break; + default: + return -EINVAL; + } + + strncpy(fw_info->fw_file_name, QLC_83XX_POST_FW_FILE_NAME, + QLC_FW_FILE_NAME_LEN); + + ret = request_firmware(&fw_info->fw, fw_info->fw_file_name, dev); + if (ret) { + dev_err(dev, "POST firmware can not be loaded, skipping POST\n"); + return 0; + } + + ret = qlcnic_83xx_copy_fw_file(adapter); + if (ret) + return ret; + + /* clear QLC_83XX_POST_SIGNATURE_REG register */ + qlcnic_ind_wr(adapter, QLC_83XX_POST_SIGNATURE_REG, 0); + + /* Set POST mode */ + qlcnic_ind_wr(adapter, QLC_83XX_POST_MODE_REG, + adapter->ahw->post_mode); + + QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID, + QLC_83XX_BOOT_FROM_FILE); + + qlcnic_83xx_start_hw(adapter); + + count = 0; + do { + msleep(100); + count += 100; + + signature = qlcnic_ind_rd(adapter, QLC_83XX_POST_SIGNATURE_REG); + if (signature == QLC_83XX_POST_PASS) + break; + } while (timeout > count); + + if (timeout <= count) { + dev_err(dev, "POST timed out, signature = 0x%08x\n", signature); + return -EIO; + } + + switch (signature) { + case QLC_83XX_POST_PASS: + dev_info(dev, "POST passed, Signature = 0x%08x\n", signature); + break; + case QLC_83XX_POST_ASIC_STRESS_TEST_FAIL: + dev_err(dev, "POST failed, Test case : ASIC STRESS TEST, Signature = 0x%08x\n", + signature); + ret = -EIO; + break; + case QLC_83XX_POST_DDR_TEST_FAIL: + dev_err(dev, "POST failed, Test case : DDT TEST, Signature = 0x%08x\n", + signature); + ret = -EIO; + break; + case QLC_83XX_POST_ASIC_MEMORY_TEST_FAIL: + dev_err(dev, "POST failed, Test case : ASIC MEMORY TEST, Signature = 0x%08x\n", + signature); + ret = -EIO; + break; + case QLC_83XX_POST_FLASH_TEST_FAIL: + dev_err(dev, "POST failed, Test case : FLASH TEST, Signature = 0x%08x\n", + signature); + ret = -EIO; + break; + default: + dev_err(dev, "POST failed, Test case : INVALID, Signature = 0x%08x\n", + signature); + ret = -EIO; + break; + } + + return ret; +} + static int qlcnic_83xx_load_fw_image_from_host(struct qlcnic_adapter *adapter) { struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info; @@ -2118,8 +2233,27 @@ static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter) if (qlcnic_83xx_copy_bootloader(adapter)) return err; + + /* Check if POST needs to be run */ + if (adapter->ahw->run_post) { + err = qlcnic_83xx_run_post(adapter); + if (err) + return err; + + /* No need to run POST in next reset sequence */ + adapter->ahw->run_post = false; + + /* Again reset the adapter to load regular firmware */ + qlcnic_83xx_stop_hw(adapter); + qlcnic_83xx_init_hw(adapter); + + err = qlcnic_83xx_copy_bootloader(adapter); + if (err) + return err; + } + /* Boot either flash image or firmware image from host file system */ - if (qlcnic_load_fw_file) { + if (qlcnic_load_fw_file == 1) { if (qlcnic_83xx_load_fw_image_from_host(adapter)) return err; } else { @@ -2283,6 +2417,7 @@ static int qlcnic_83xx_get_fw_info(struct qlcnic_adapter *adapter) fw_info = ahw->fw_info; switch (pdev->device) { case PCI_DEVICE_ID_QLOGIC_QLE834X: + case PCI_DEVICE_ID_QLOGIC_QLE8830: strncpy(fw_info->fw_file_name, QLC_83XX_FW_FILE_NAME, QLC_FW_FILE_NAME_LEN); break; @@ -2327,6 +2462,25 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac) adapter->rx_mac_learn = false; ahw->msix_supported = !!qlcnic_use_msi_x; + /* Check if POST needs to be run */ + switch (qlcnic_load_fw_file) { + case 2: + ahw->post_mode = QLC_83XX_POST_FAST_MODE; + ahw->run_post = true; + break; + case 3: + ahw->post_mode = QLC_83XX_POST_MEDIUM_MODE; + ahw->run_post = true; + break; + case 4: + ahw->post_mode = QLC_83XX_POST_SLOW_MODE; + ahw->run_post = true; + break; + default: + ahw->run_post = false; + break; + } + qlcnic_83xx_init_rings(adapter); err = qlcnic_83xx_init_mailbox_work(adapter); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c index 03cd4c3d7835a855fca6796a252a6902031280bb..69b46c051cc0c299feb3db576dca2056463f3ba5 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c @@ -341,7 +341,7 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg) } return -EIO; } - msleep(1); + usleep_range(1000, 1500); } if (id_reg) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c index c4262c23ed7c77b009f6c5cebf233a54ebfd685b..be41e4c77b657285841c610c1f30a701dc2c87ee 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c @@ -537,7 +537,7 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter) QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0xc, 0); QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x8, 0); QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0xc, 0); - msleep(1); + usleep_range(1000, 1500); QLC_SHARED_REG_WR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0); QLC_SHARED_REG_WR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0); @@ -1198,7 +1198,7 @@ qlcnic_load_firmware(struct qlcnic_adapter *adapter) flashaddr += 8; } } - msleep(1); + usleep_range(1000, 1500); QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x18, 0x1020); QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0x80001e); @@ -1295,7 +1295,7 @@ void qlcnic_request_firmware(struct qlcnic_adapter *adapter) rc = qlcnic_validate_firmware(adapter); if (rc != 0) { release_firmware(adapter->fw); - msleep(1); + usleep_range(1000, 1500); goto next; } } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index e45bf09af0c9fe4dbe9cdc629370af792ab88c01..18e5de72e9b4c2c9b95848bf444597251942039e 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -1753,7 +1753,7 @@ qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter, if (qlcnic_encap_length(sts_data[1]) && skb->ip_summed == CHECKSUM_UNNECESSARY) { - skb->encapsulation = 1; + skb->csum_level = 1; adapter->stats.encap_rx_csummed++; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index cf08b2de071e4d602745d2dd5c7bc6e7b3b530df..f5e29f7bdae39b77eed8abc0b8e73360ceeda698 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -52,7 +52,7 @@ MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled)"); module_param_named(auto_fw_reset, qlcnic_auto_fw_reset, int, 0644); int qlcnic_load_fw_file; -MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file)"); +MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file, 2=POST in fast mode, 3= POST in medium mode, 4=POST in slow mode)"); module_param_named(load_fw_file, qlcnic_load_fw_file, int, 0444); static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent); @@ -111,6 +111,7 @@ static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter) static const struct pci_device_id qlcnic_pci_tbl[] = { ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X), ENTRY(PCI_DEVICE_ID_QLOGIC_QLE834X), + ENTRY(PCI_DEVICE_ID_QLOGIC_QLE8830), ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE834X), ENTRY(PCI_DEVICE_ID_QLOGIC_QLE844X), ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE844X), @@ -227,6 +228,11 @@ static const struct qlcnic_board_info qlcnic_boards[] = { { PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_QLE834X, 0x0, 0x0, "8300 Series 1/10GbE Controller" }, + { PCI_VENDOR_ID_QLOGIC, + PCI_DEVICE_ID_QLOGIC_QLE8830, + 0x0, + 0x0, + "8830 Series 1/10GbE Controller" }, { PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_QLE824X, PCI_VENDOR_ID_QLOGIC, @@ -1131,6 +1137,7 @@ static void qlcnic_get_bar_length(u32 dev_id, ulong *bar) *bar = QLCNIC_82XX_BAR0_LENGTH; break; case PCI_DEVICE_ID_QLOGIC_QLE834X: + case PCI_DEVICE_ID_QLOGIC_QLE8830: case PCI_DEVICE_ID_QLOGIC_QLE844X: case PCI_DEVICE_ID_QLOGIC_VF_QLE834X: case PCI_DEVICE_ID_QLOGIC_VF_QLE844X: @@ -2474,6 +2481,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ahw->reg_tbl = (u32 *) qlcnic_reg_tbl; break; case PCI_DEVICE_ID_QLOGIC_QLE834X: + case PCI_DEVICE_ID_QLOGIC_QLE8830: case PCI_DEVICE_ID_QLOGIC_QLE844X: qlcnic_83xx_register_map(ahw); break; diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 3e96f269150d197253fdcdf3066c1a2da5b62985..6c904a6cad2a177036b42190cffb17a25e194708 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -1922,7 +1922,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, sbq_desc->p.skb = NULL; skb_reserve(skb, NET_IP_ALIGN); } - while (length > 0) { + do { lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); size = (length < rx_ring->lbq_buf_size) ? length : rx_ring->lbq_buf_size; @@ -1939,7 +1939,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, skb->truesize += size; length -= size; i++; - } + } while (length > 0); ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va, &hlen); __pskb_pull_tail(skb, hlen); diff --git a/drivers/net/ethernet/qualcomm/Kconfig b/drivers/net/ethernet/qualcomm/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..f3a47147937d46b3a7aa5955b6e9729d17597a99 --- /dev/null +++ b/drivers/net/ethernet/qualcomm/Kconfig @@ -0,0 +1,30 @@ +# +# Qualcomm network device configuration +# + +config NET_VENDOR_QUALCOMM + bool "Qualcomm devices" + default y + depends on SPI_MASTER && OF_GPIO + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Qualcomm cards. If you say Y, you will be asked + for your specific card in the following questions. + +if NET_VENDOR_QUALCOMM + +config QCA7000 + tristate "Qualcomm Atheros QCA7000 support" + depends on SPI_MASTER && OF_GPIO + ---help--- + This SPI protocol driver supports the Qualcomm Atheros QCA7000. + + To compile this driver as a module, choose M here. The module + will be called qcaspi. + +endif # NET_VENDOR_QUALCOMM diff --git a/drivers/net/ethernet/qualcomm/Makefile b/drivers/net/ethernet/qualcomm/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..9da2d75db7006a4755602fd602a75a528c827f53 --- /dev/null +++ b/drivers/net/ethernet/qualcomm/Makefile @@ -0,0 +1,6 @@ +# +# Makefile for the Qualcomm network device drivers. +# + +obj-$(CONFIG_QCA7000) += qcaspi.o +qcaspi-objs := qca_spi.o qca_framing.o qca_7k.o qca_debug.o diff --git a/drivers/net/ethernet/qualcomm/qca_7k.c b/drivers/net/ethernet/qualcomm/qca_7k.c new file mode 100644 index 0000000000000000000000000000000000000000..f0066fbb44a65996cafcfab3717a0d9bdd7b4476 --- /dev/null +++ b/drivers/net/ethernet/qualcomm/qca_7k.c @@ -0,0 +1,149 @@ +/* + * + * Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc. + * Copyright (c) 2014, I2SE GmbH + * + * Permission to use, copy, modify, and/or distribute this software + * for any purpose with or without fee is hereby granted, provided + * that the above copyright notice and this permission notice appear + * in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL + * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, + * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + +/* This module implements the Qualcomm Atheros SPI protocol for + * kernel-based SPI device. + */ + +#include +#include +#include +#include +#include + +#include "qca_7k.h" + +void +qcaspi_spi_error(struct qcaspi *qca) +{ + if (qca->sync != QCASPI_SYNC_READY) + return; + + netdev_err(qca->net_dev, "spi error\n"); + qca->sync = QCASPI_SYNC_UNKNOWN; + qca->stats.spi_err++; +} + +int +qcaspi_read_register(struct qcaspi *qca, u16 reg, u16 *result) +{ + __be16 rx_data; + __be16 tx_data; + struct spi_transfer *transfer; + struct spi_message *msg; + int ret; + + tx_data = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_INTERNAL | reg); + + if (qca->legacy_mode) { + msg = &qca->spi_msg1; + transfer = &qca->spi_xfer1; + transfer->tx_buf = &tx_data; + transfer->rx_buf = NULL; + transfer->len = QCASPI_CMD_LEN; + spi_sync(qca->spi_dev, msg); + } else { + msg = &qca->spi_msg2; + transfer = &qca->spi_xfer2[0]; + transfer->tx_buf = &tx_data; + transfer->rx_buf = NULL; + transfer->len = QCASPI_CMD_LEN; + transfer = &qca->spi_xfer2[1]; + } + transfer->tx_buf = NULL; + transfer->rx_buf = &rx_data; + transfer->len = QCASPI_CMD_LEN; + ret = spi_sync(qca->spi_dev, msg); + + if (!ret) + ret = msg->status; + + if (ret) + qcaspi_spi_error(qca); + else + *result = be16_to_cpu(rx_data); + + return ret; +} + +int +qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value) +{ + __be16 tx_data[2]; + struct spi_transfer *transfer; + struct spi_message *msg; + int ret; + + tx_data[0] = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_INTERNAL | reg); + tx_data[1] = cpu_to_be16(value); + + if (qca->legacy_mode) { + msg = &qca->spi_msg1; + transfer = &qca->spi_xfer1; + transfer->tx_buf = &tx_data[0]; + transfer->rx_buf = NULL; + transfer->len = QCASPI_CMD_LEN; + spi_sync(qca->spi_dev, msg); + } else { + msg = &qca->spi_msg2; + transfer = &qca->spi_xfer2[0]; + transfer->tx_buf = &tx_data[0]; + transfer->rx_buf = NULL; + transfer->len = QCASPI_CMD_LEN; + transfer = &qca->spi_xfer2[1]; + } + transfer->tx_buf = &tx_data[1]; + transfer->rx_buf = NULL; + transfer->len = QCASPI_CMD_LEN; + ret = spi_sync(qca->spi_dev, msg); + + if (!ret) + ret = msg->status; + + if (ret) + qcaspi_spi_error(qca); + + return ret; +} + +int +qcaspi_tx_cmd(struct qcaspi *qca, u16 cmd) +{ + __be16 tx_data; + struct spi_message *msg = &qca->spi_msg1; + struct spi_transfer *transfer = &qca->spi_xfer1; + int ret; + + tx_data = cpu_to_be16(cmd); + transfer->len = sizeof(tx_data); + transfer->tx_buf = &tx_data; + transfer->rx_buf = NULL; + + ret = spi_sync(qca->spi_dev, msg); + + if (!ret) + ret = msg->status; + + if (ret) + qcaspi_spi_error(qca); + + return ret; +} diff --git a/drivers/net/ethernet/qualcomm/qca_7k.h b/drivers/net/ethernet/qualcomm/qca_7k.h new file mode 100644 index 0000000000000000000000000000000000000000..1cad851ee507fb5d6a8a5c42da45caa0e6e6a105 --- /dev/null +++ b/drivers/net/ethernet/qualcomm/qca_7k.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc. + * Copyright (c) 2014, I2SE GmbH + * + * Permission to use, copy, modify, and/or distribute this software + * for any purpose with or without fee is hereby granted, provided + * that the above copyright notice and this permission notice appear + * in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL + * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, + * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + +/* Qualcomm Atheros SPI register definition. + * + * This module is designed to define the Qualcomm Atheros SPI + * register placeholders. + */ + +#ifndef _QCA_7K_H +#define _QCA_7K_H + +#include + +#include "qca_spi.h" + +#define QCA7K_SPI_READ (1 << 15) +#define QCA7K_SPI_WRITE (0 << 15) +#define QCA7K_SPI_INTERNAL (1 << 14) +#define QCA7K_SPI_EXTERNAL (0 << 14) + +#define QCASPI_CMD_LEN 2 +#define QCASPI_HW_PKT_LEN 4 +#define QCASPI_HW_BUF_LEN 0xC5B + +/* SPI registers; */ +#define SPI_REG_BFR_SIZE 0x0100 +#define SPI_REG_WRBUF_SPC_AVA 0x0200 +#define SPI_REG_RDBUF_BYTE_AVA 0x0300 +#define SPI_REG_SPI_CONFIG 0x0400 +#define SPI_REG_SPI_STATUS 0x0500 +#define SPI_REG_INTR_CAUSE 0x0C00 +#define SPI_REG_INTR_ENABLE 0x0D00 +#define SPI_REG_RDBUF_WATERMARK 0x1200 +#define SPI_REG_WRBUF_WATERMARK 0x1300 +#define SPI_REG_SIGNATURE 0x1A00 +#define SPI_REG_ACTION_CTRL 0x1B00 + +/* SPI_CONFIG register definition; */ +#define QCASPI_SLAVE_RESET_BIT (1 << 6) + +/* INTR_CAUSE/ENABLE register definition. */ +#define SPI_INT_WRBUF_BELOW_WM (1 << 10) +#define SPI_INT_CPU_ON (1 << 6) +#define SPI_INT_ADDR_ERR (1 << 3) +#define SPI_INT_WRBUF_ERR (1 << 2) +#define SPI_INT_RDBUF_ERR (1 << 1) +#define SPI_INT_PKT_AVLBL (1 << 0) + +void qcaspi_spi_error(struct qcaspi *qca); +int qcaspi_read_register(struct qcaspi *qca, u16 reg, u16 *result); +int qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value); +int qcaspi_tx_cmd(struct qcaspi *qca, u16 cmd); + +#endif /* _QCA_7K_H */ diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c new file mode 100644 index 0000000000000000000000000000000000000000..8e28234dddad3e8b58f6377ea21cc637f27ea8ac --- /dev/null +++ b/drivers/net/ethernet/qualcomm/qca_debug.c @@ -0,0 +1,311 @@ +/* + * Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc. + * Copyright (c) 2014, I2SE GmbH + * + * Permission to use, copy, modify, and/or distribute this software + * for any purpose with or without fee is hereby granted, provided + * that the above copyright notice and this permission notice appear + * in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL + * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, + * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* This file contains debugging routines for use in the QCA7K driver. + */ + +#include +#include +#include +#include + +#include "qca_7k.h" +#include "qca_debug.h" + +#define QCASPI_MAX_REGS 0x20 + +static const u16 qcaspi_spi_regs[] = { + SPI_REG_BFR_SIZE, + SPI_REG_WRBUF_SPC_AVA, + SPI_REG_RDBUF_BYTE_AVA, + SPI_REG_SPI_CONFIG, + SPI_REG_SPI_STATUS, + SPI_REG_INTR_CAUSE, + SPI_REG_INTR_ENABLE, + SPI_REG_RDBUF_WATERMARK, + SPI_REG_WRBUF_WATERMARK, + SPI_REG_SIGNATURE, + SPI_REG_ACTION_CTRL +}; + +/* The order of these strings must match the order of the fields in + * struct qcaspi_stats + * See qca_spi.h + */ +static const char qcaspi_gstrings_stats[][ETH_GSTRING_LEN] = { + "Triggered resets", + "Device resets", + "Reset timeouts", + "Read errors", + "Write errors", + "Read buffer errors", + "Write buffer errors", + "Out of memory", + "Write buffer misses", + "Transmit ring full", + "SPI errors", +}; + +#ifdef CONFIG_DEBUG_FS + +static int +qcaspi_info_show(struct seq_file *s, void *what) +{ + struct qcaspi *qca = s->private; + + seq_printf(s, "RX buffer size : %lu\n", + (unsigned long)qca->buffer_size); + + seq_puts(s, "TX ring state : "); + + if (qca->txr.skb[qca->txr.head] == NULL) + seq_puts(s, "empty"); + else if (qca->txr.skb[qca->txr.tail]) + seq_puts(s, "full"); + else + seq_puts(s, "in use"); + + seq_puts(s, "\n"); + + seq_printf(s, "TX ring size : %u\n", + qca->txr.size); + + seq_printf(s, "Sync state : %u (", + (unsigned int)qca->sync); + switch (qca->sync) { + case QCASPI_SYNC_UNKNOWN: + seq_puts(s, "QCASPI_SYNC_UNKNOWN"); + break; + case QCASPI_SYNC_RESET: + seq_puts(s, "QCASPI_SYNC_RESET"); + break; + case QCASPI_SYNC_READY: + seq_puts(s, "QCASPI_SYNC_READY"); + break; + default: + seq_puts(s, "INVALID"); + break; + } + seq_puts(s, ")\n"); + + seq_printf(s, "IRQ : %d\n", + qca->spi_dev->irq); + seq_printf(s, "INTR REQ : %u\n", + qca->intr_req); + seq_printf(s, "INTR SVC : %u\n", + qca->intr_svc); + + seq_printf(s, "SPI max speed : %lu\n", + (unsigned long)qca->spi_dev->max_speed_hz); + seq_printf(s, "SPI mode : %x\n", + qca->spi_dev->mode); + seq_printf(s, "SPI chip select : %u\n", + (unsigned int)qca->spi_dev->chip_select); + seq_printf(s, "SPI legacy mode : %u\n", + (unsigned int)qca->legacy_mode); + seq_printf(s, "SPI burst length : %u\n", + (unsigned int)qca->burst_len); + + return 0; +} + +static int +qcaspi_info_open(struct inode *inode, struct file *file) +{ + return single_open(file, qcaspi_info_show, inode->i_private); +} + +static const struct file_operations qcaspi_info_ops = { + .open = qcaspi_info_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +void +qcaspi_init_device_debugfs(struct qcaspi *qca) +{ + struct dentry *device_root; + + device_root = debugfs_create_dir(dev_name(&qca->net_dev->dev), NULL); + qca->device_root = device_root; + + if (IS_ERR(device_root) || !device_root) { + pr_warn("failed to create debugfs directory for %s\n", + dev_name(&qca->net_dev->dev)); + return; + } + debugfs_create_file("info", S_IFREG | S_IRUGO, device_root, qca, + &qcaspi_info_ops); +} + +void +qcaspi_remove_device_debugfs(struct qcaspi *qca) +{ + debugfs_remove_recursive(qca->device_root); +} + +#else /* CONFIG_DEBUG_FS */ + +void +qcaspi_init_device_debugfs(struct qcaspi *qca) +{ +} + +void +qcaspi_remove_device_debugfs(struct qcaspi *qca) +{ +} + +#endif + +static void +qcaspi_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *p) +{ + struct qcaspi *qca = netdev_priv(dev); + + strlcpy(p->driver, QCASPI_DRV_NAME, sizeof(p->driver)); + strlcpy(p->version, QCASPI_DRV_VERSION, sizeof(p->version)); + strlcpy(p->fw_version, "QCA7000", sizeof(p->fw_version)); + strlcpy(p->bus_info, dev_name(&qca->spi_dev->dev), + sizeof(p->bus_info)); +} + +static int +qcaspi_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + cmd->transceiver = XCVR_INTERNAL; + cmd->supported = SUPPORTED_10baseT_Half; + ethtool_cmd_speed_set(cmd, SPEED_10); + cmd->duplex = DUPLEX_HALF; + cmd->port = PORT_OTHER; + cmd->autoneg = AUTONEG_DISABLE; + + return 0; +} + +static void +qcaspi_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *data) +{ + struct qcaspi *qca = netdev_priv(dev); + struct qcaspi_stats *st = &qca->stats; + + memcpy(data, st, ARRAY_SIZE(qcaspi_gstrings_stats) * sizeof(u64)); +} + +static void +qcaspi_get_strings(struct net_device *dev, u32 stringset, u8 *buf) +{ + switch (stringset) { + case ETH_SS_STATS: + memcpy(buf, &qcaspi_gstrings_stats, + sizeof(qcaspi_gstrings_stats)); + break; + default: + WARN_ON(1); + break; + } +} + +static int +qcaspi_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(qcaspi_gstrings_stats); + default: + return -EINVAL; + } +} + +static int +qcaspi_get_regs_len(struct net_device *dev) +{ + return sizeof(u32) * QCASPI_MAX_REGS; +} + +static void +qcaspi_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) +{ + struct qcaspi *qca = netdev_priv(dev); + u32 *regs_buff = p; + unsigned int i; + + regs->version = 1; + memset(regs_buff, 0, sizeof(u32) * QCASPI_MAX_REGS); + + for (i = 0; i < ARRAY_SIZE(qcaspi_spi_regs); i++) { + u16 offset, value; + + qcaspi_read_register(qca, qcaspi_spi_regs[i], &value); + offset = qcaspi_spi_regs[i] >> 8; + regs_buff[offset] = value; + } +} + +static void +qcaspi_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring) +{ + struct qcaspi *qca = netdev_priv(dev); + + ring->rx_max_pending = 4; + ring->tx_max_pending = TX_RING_MAX_LEN; + ring->rx_pending = 4; + ring->tx_pending = qca->txr.count; +} + +static int +qcaspi_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring) +{ + struct qcaspi *qca = netdev_priv(dev); + + if ((ring->rx_pending) || + (ring->rx_mini_pending) || + (ring->rx_jumbo_pending)) + return -EINVAL; + + if (netif_running(dev)) + qcaspi_netdev_close(dev); + + qca->txr.count = max_t(u32, ring->tx_pending, TX_RING_MIN_LEN); + qca->txr.count = min_t(u16, qca->txr.count, TX_RING_MAX_LEN); + + if (netif_running(dev)) + qcaspi_netdev_open(dev); + + return 0; +} + +static const struct ethtool_ops qcaspi_ethtool_ops = { + .get_drvinfo = qcaspi_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_settings = qcaspi_get_settings, + .get_ethtool_stats = qcaspi_get_ethtool_stats, + .get_strings = qcaspi_get_strings, + .get_sset_count = qcaspi_get_sset_count, + .get_regs_len = qcaspi_get_regs_len, + .get_regs = qcaspi_get_regs, + .get_ringparam = qcaspi_get_ringparam, + .set_ringparam = qcaspi_set_ringparam, +}; + +void qcaspi_set_ethtool_ops(struct net_device *dev) +{ + dev->ethtool_ops = &qcaspi_ethtool_ops; +} diff --git a/drivers/net/ethernet/qualcomm/qca_debug.h b/drivers/net/ethernet/qualcomm/qca_debug.h new file mode 100644 index 0000000000000000000000000000000000000000..46a785844421c9cd6f0012d80e5207fe985a13c8 --- /dev/null +++ b/drivers/net/ethernet/qualcomm/qca_debug.h @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc. + * Copyright (c) 2014, I2SE GmbH + * + * Permission to use, copy, modify, and/or distribute this software + * for any purpose with or without fee is hereby granted, provided + * that the above copyright notice and this permission notice appear + * in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL + * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, + * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* This file contains debugging routines for use in the QCA7K driver. + */ + +#ifndef _QCA_DEBUG_H +#define _QCA_DEBUG_H + +#include "qca_spi.h" + +void qcaspi_init_device_debugfs(struct qcaspi *qca); + +void qcaspi_remove_device_debugfs(struct qcaspi *qca); + +void qcaspi_set_ethtool_ops(struct net_device *dev); + +#endif /* _QCA_DEBUG_H */ diff --git a/drivers/net/ethernet/qualcomm/qca_framing.c b/drivers/net/ethernet/qualcomm/qca_framing.c new file mode 100644 index 0000000000000000000000000000000000000000..faa924c85e29364672e47ed392a4033257189e8d --- /dev/null +++ b/drivers/net/ethernet/qualcomm/qca_framing.c @@ -0,0 +1,156 @@ +/* + * Copyright (c) 2011, 2012, Atheros Communications Inc. + * Copyright (c) 2014, I2SE GmbH + * + * Permission to use, copy, modify, and/or distribute this software + * for any purpose with or without fee is hereby granted, provided + * that the above copyright notice and this permission notice appear + * in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL + * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, + * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* Atheros ethernet framing. Every Ethernet frame is surrounded + * by an atheros frame while transmitted over a serial channel; + */ + +#include + +#include "qca_framing.h" + +u16 +qcafrm_create_header(u8 *buf, u16 length) +{ + __le16 len; + + if (!buf) + return 0; + + len = cpu_to_le16(length); + + buf[0] = 0xAA; + buf[1] = 0xAA; + buf[2] = 0xAA; + buf[3] = 0xAA; + buf[4] = len & 0xff; + buf[5] = (len >> 8) & 0xff; + buf[6] = 0; + buf[7] = 0; + + return QCAFRM_HEADER_LEN; +} + +u16 +qcafrm_create_footer(u8 *buf) +{ + if (!buf) + return 0; + + buf[0] = 0x55; + buf[1] = 0x55; + return QCAFRM_FOOTER_LEN; +} + +/* Gather received bytes and try to extract a full ethernet frame by + * following a simple state machine. + * + * Return: QCAFRM_GATHER No ethernet frame fully received yet. + * QCAFRM_NOHEAD Header expected but not found. + * QCAFRM_INVLEN Atheros frame length is invalid + * QCAFRM_NOTAIL Footer expected but not found. + * > 0 Number of byte in the fully received + * Ethernet frame + */ + +s32 +qcafrm_fsm_decode(struct qcafrm_handle *handle, u8 *buf, u16 buf_len, u8 recv_byte) +{ + s32 ret = QCAFRM_GATHER; + u16 len; + + switch (handle->state) { + case QCAFRM_HW_LEN0: + case QCAFRM_HW_LEN1: + /* by default, just go to next state */ + handle->state--; + + if (recv_byte != 0x00) { + /* first two bytes of length must be 0 */ + handle->state = QCAFRM_HW_LEN0; + } + break; + case QCAFRM_HW_LEN2: + case QCAFRM_HW_LEN3: + handle->state--; + break; + /* 4 bytes header pattern */ + case QCAFRM_WAIT_AA1: + case QCAFRM_WAIT_AA2: + case QCAFRM_WAIT_AA3: + case QCAFRM_WAIT_AA4: + if (recv_byte != 0xAA) { + ret = QCAFRM_NOHEAD; + handle->state = QCAFRM_HW_LEN0; + } else { + handle->state--; + } + break; + /* 2 bytes length. */ + /* Borrow offset field to hold length for now. */ + case QCAFRM_WAIT_LEN_BYTE0: + handle->offset = recv_byte; + handle->state = QCAFRM_WAIT_LEN_BYTE1; + break; + case QCAFRM_WAIT_LEN_BYTE1: + handle->offset = handle->offset | (recv_byte << 8); + handle->state = QCAFRM_WAIT_RSVD_BYTE1; + break; + case QCAFRM_WAIT_RSVD_BYTE1: + handle->state = QCAFRM_WAIT_RSVD_BYTE2; + break; + case QCAFRM_WAIT_RSVD_BYTE2: + len = handle->offset; + if (len > buf_len || len < QCAFRM_ETHMINLEN) { + ret = QCAFRM_INVLEN; + handle->state = QCAFRM_HW_LEN0; + } else { + handle->state = (enum qcafrm_state)(len + 1); + /* Remaining number of bytes. */ + handle->offset = 0; + } + break; + default: + /* Receiving Ethernet frame itself. */ + buf[handle->offset] = recv_byte; + handle->offset++; + handle->state--; + break; + case QCAFRM_WAIT_551: + if (recv_byte != 0x55) { + ret = QCAFRM_NOTAIL; + handle->state = QCAFRM_HW_LEN0; + } else { + handle->state = QCAFRM_WAIT_552; + } + break; + case QCAFRM_WAIT_552: + if (recv_byte != 0x55) { + ret = QCAFRM_NOTAIL; + handle->state = QCAFRM_HW_LEN0; + } else { + ret = handle->offset; + /* Frame is fully received. */ + handle->state = QCAFRM_HW_LEN0; + } + break; + } + + return ret; +} diff --git a/drivers/net/ethernet/qualcomm/qca_framing.h b/drivers/net/ethernet/qualcomm/qca_framing.h new file mode 100644 index 0000000000000000000000000000000000000000..5d965959c978efa7d0c07094b948652e87e5dad0 --- /dev/null +++ b/drivers/net/ethernet/qualcomm/qca_framing.h @@ -0,0 +1,134 @@ +/* + * Copyright (c) 2011, 2012, Atheros Communications Inc. + * Copyright (c) 2014, I2SE GmbH + * + * Permission to use, copy, modify, and/or distribute this software + * for any purpose with or without fee is hereby granted, provided + * that the above copyright notice and this permission notice appear + * in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL + * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, + * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* Atheros Ethernet framing. Every Ethernet frame is surrounded by an atheros + * frame while transmitted over a serial channel. + */ + +#ifndef _QCA_FRAMING_H +#define _QCA_FRAMING_H + +#include +#include +#include + +/* Frame is currently being received */ +#define QCAFRM_GATHER 0 + +/* No header byte while expecting it */ +#define QCAFRM_NOHEAD (QCAFRM_ERR_BASE - 1) + +/* No tailer byte while expecting it */ +#define QCAFRM_NOTAIL (QCAFRM_ERR_BASE - 2) + +/* Frame length is invalid */ +#define QCAFRM_INVLEN (QCAFRM_ERR_BASE - 3) + +/* Frame length is invalid */ +#define QCAFRM_INVFRAME (QCAFRM_ERR_BASE - 4) + +/* Min/Max Ethernet MTU */ +#define QCAFRM_ETHMINMTU 46 +#define QCAFRM_ETHMAXMTU 1500 + +/* Min/Max frame lengths */ +#define QCAFRM_ETHMINLEN (QCAFRM_ETHMINMTU + ETH_HLEN) +#define QCAFRM_ETHMAXLEN (QCAFRM_ETHMAXMTU + VLAN_ETH_HLEN) + +/* QCA7K header len */ +#define QCAFRM_HEADER_LEN 8 + +/* QCA7K footer len */ +#define QCAFRM_FOOTER_LEN 2 + +/* QCA7K Framing. */ +#define QCAFRM_ERR_BASE -1000 + +enum qcafrm_state { + QCAFRM_HW_LEN0 = 0x8000, + QCAFRM_HW_LEN1 = QCAFRM_HW_LEN0 - 1, + QCAFRM_HW_LEN2 = QCAFRM_HW_LEN1 - 1, + QCAFRM_HW_LEN3 = QCAFRM_HW_LEN2 - 1, + + /* Waiting first 0xAA of header */ + QCAFRM_WAIT_AA1 = QCAFRM_HW_LEN3 - 1, + + /* Waiting second 0xAA of header */ + QCAFRM_WAIT_AA2 = QCAFRM_WAIT_AA1 - 1, + + /* Waiting third 0xAA of header */ + QCAFRM_WAIT_AA3 = QCAFRM_WAIT_AA2 - 1, + + /* Waiting fourth 0xAA of header */ + QCAFRM_WAIT_AA4 = QCAFRM_WAIT_AA3 - 1, + + /* Waiting Byte 0-1 of length (litte endian) */ + QCAFRM_WAIT_LEN_BYTE0 = QCAFRM_WAIT_AA4 - 1, + QCAFRM_WAIT_LEN_BYTE1 = QCAFRM_WAIT_AA4 - 2, + + /* Reserved bytes */ + QCAFRM_WAIT_RSVD_BYTE1 = QCAFRM_WAIT_AA4 - 3, + QCAFRM_WAIT_RSVD_BYTE2 = QCAFRM_WAIT_AA4 - 4, + + /* The frame length is used as the state until + * the end of the Ethernet frame + * Waiting for first 0x55 of footer + */ + QCAFRM_WAIT_551 = 1, + + /* Waiting for second 0x55 of footer */ + QCAFRM_WAIT_552 = QCAFRM_WAIT_551 - 1 +}; + +/* Structure to maintain the frame decoding during reception. */ + +struct qcafrm_handle { + /* Current decoding state */ + enum qcafrm_state state; + + /* Offset in buffer (borrowed for length too) */ + s16 offset; + + /* Frame length as kept by this module */ + u16 len; +}; + +u16 qcafrm_create_header(u8 *buf, u16 len); + +u16 qcafrm_create_footer(u8 *buf); + +static inline void qcafrm_fsm_init(struct qcafrm_handle *handle) +{ + handle->state = QCAFRM_HW_LEN0; +} + +/* Gather received bytes and try to extract a full Ethernet frame + * by following a simple state machine. + * + * Return: QCAFRM_GATHER No Ethernet frame fully received yet. + * QCAFRM_NOHEAD Header expected but not found. + * QCAFRM_INVLEN QCA7K frame length is invalid + * QCAFRM_NOTAIL Footer expected but not found. + * > 0 Number of byte in the fully received + * Ethernet frame + */ + +s32 qcafrm_fsm_decode(struct qcafrm_handle *handle, u8 *buf, u16 buf_len, u8 recv_byte); + +#endif /* _QCA_FRAMING_H */ diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c new file mode 100644 index 0000000000000000000000000000000000000000..2c811f66d5acc47da86407222c4eea21d14954d0 --- /dev/null +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -0,0 +1,991 @@ +/* + * Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc. + * Copyright (c) 2014, I2SE GmbH + * + * Permission to use, copy, modify, and/or distribute this software + * for any purpose with or without fee is hereby granted, provided + * that the above copyright notice and this permission notice appear + * in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL + * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, + * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* This module implements the Qualcomm Atheros SPI protocol for + * kernel-based SPI device; it is essentially an Ethernet-to-SPI + * serial converter; + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "qca_7k.h" +#include "qca_debug.h" +#include "qca_framing.h" +#include "qca_spi.h" + +#define MAX_DMA_BURST_LEN 5000 + +/* Modules parameters */ +#define QCASPI_CLK_SPEED_MIN 1000000 +#define QCASPI_CLK_SPEED_MAX 16000000 +#define QCASPI_CLK_SPEED 8000000 +static int qcaspi_clkspeed; +module_param(qcaspi_clkspeed, int, 0); +MODULE_PARM_DESC(qcaspi_clkspeed, "SPI bus clock speed (Hz). Use 1000000-16000000."); + +#define QCASPI_BURST_LEN_MIN 1 +#define QCASPI_BURST_LEN_MAX MAX_DMA_BURST_LEN +static int qcaspi_burst_len = MAX_DMA_BURST_LEN; +module_param(qcaspi_burst_len, int, 0); +MODULE_PARM_DESC(qcaspi_burst_len, "Number of data bytes per burst. Use 1-5000."); + +#define QCASPI_PLUGGABLE_MIN 0 +#define QCASPI_PLUGGABLE_MAX 1 +static int qcaspi_pluggable = QCASPI_PLUGGABLE_MIN; +module_param(qcaspi_pluggable, int, 0); +MODULE_PARM_DESC(qcaspi_pluggable, "Pluggable SPI connection (yes/no)."); + +#define QCASPI_MTU QCAFRM_ETHMAXMTU +#define QCASPI_TX_TIMEOUT (1 * HZ) +#define QCASPI_QCA7K_REBOOT_TIME_MS 1000 + +static void +start_spi_intr_handling(struct qcaspi *qca, u16 *intr_cause) +{ + *intr_cause = 0; + + qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, 0); + qcaspi_read_register(qca, SPI_REG_INTR_CAUSE, intr_cause); + netdev_dbg(qca->net_dev, "interrupts: 0x%04x\n", *intr_cause); +} + +static void +end_spi_intr_handling(struct qcaspi *qca, u16 intr_cause) +{ + u16 intr_enable = (SPI_INT_CPU_ON | + SPI_INT_PKT_AVLBL | + SPI_INT_RDBUF_ERR | + SPI_INT_WRBUF_ERR); + + qcaspi_write_register(qca, SPI_REG_INTR_CAUSE, intr_cause); + qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, intr_enable); + netdev_dbg(qca->net_dev, "acking int: 0x%04x\n", intr_cause); +} + +static u32 +qcaspi_write_burst(struct qcaspi *qca, u8 *src, u32 len) +{ + __be16 cmd; + struct spi_message *msg = &qca->spi_msg2; + struct spi_transfer *transfer = &qca->spi_xfer2[0]; + int ret; + + cmd = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_EXTERNAL); + transfer->tx_buf = &cmd; + transfer->rx_buf = NULL; + transfer->len = QCASPI_CMD_LEN; + transfer = &qca->spi_xfer2[1]; + transfer->tx_buf = src; + transfer->rx_buf = NULL; + transfer->len = len; + + ret = spi_sync(qca->spi_dev, msg); + + if (ret || (msg->actual_length != QCASPI_CMD_LEN + len)) { + qcaspi_spi_error(qca); + return 0; + } + + return len; +} + +static u32 +qcaspi_write_legacy(struct qcaspi *qca, u8 *src, u32 len) +{ + struct spi_message *msg = &qca->spi_msg1; + struct spi_transfer *transfer = &qca->spi_xfer1; + int ret; + + transfer->tx_buf = src; + transfer->rx_buf = NULL; + transfer->len = len; + + ret = spi_sync(qca->spi_dev, msg); + + if (ret || (msg->actual_length != len)) { + qcaspi_spi_error(qca); + return 0; + } + + return len; +} + +static u32 +qcaspi_read_burst(struct qcaspi *qca, u8 *dst, u32 len) +{ + struct spi_message *msg = &qca->spi_msg2; + __be16 cmd; + struct spi_transfer *transfer = &qca->spi_xfer2[0]; + int ret; + + cmd = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_EXTERNAL); + transfer->tx_buf = &cmd; + transfer->rx_buf = NULL; + transfer->len = QCASPI_CMD_LEN; + transfer = &qca->spi_xfer2[1]; + transfer->tx_buf = NULL; + transfer->rx_buf = dst; + transfer->len = len; + + ret = spi_sync(qca->spi_dev, msg); + + if (ret || (msg->actual_length != QCASPI_CMD_LEN + len)) { + qcaspi_spi_error(qca); + return 0; + } + + return len; +} + +static u32 +qcaspi_read_legacy(struct qcaspi *qca, u8 *dst, u32 len) +{ + struct spi_message *msg = &qca->spi_msg1; + struct spi_transfer *transfer = &qca->spi_xfer1; + int ret; + + transfer->tx_buf = NULL; + transfer->rx_buf = dst; + transfer->len = len; + + ret = spi_sync(qca->spi_dev, msg); + + if (ret || (msg->actual_length != len)) { + qcaspi_spi_error(qca); + return 0; + } + + return len; +} + +static int +qcaspi_tx_frame(struct qcaspi *qca, struct sk_buff *skb) +{ + u32 count; + u32 written; + u32 offset; + u32 len; + + len = skb->len; + + qcaspi_write_register(qca, SPI_REG_BFR_SIZE, len); + if (qca->legacy_mode) + qcaspi_tx_cmd(qca, QCA7K_SPI_WRITE | QCA7K_SPI_EXTERNAL); + + offset = 0; + while (len) { + count = len; + if (count > qca->burst_len) + count = qca->burst_len; + + if (qca->legacy_mode) { + written = qcaspi_write_legacy(qca, + skb->data + offset, + count); + } else { + written = qcaspi_write_burst(qca, + skb->data + offset, + count); + } + + if (written != count) + return -1; + + offset += count; + len -= count; + } + + return 0; +} + +static int +qcaspi_transmit(struct qcaspi *qca) +{ + struct net_device_stats *n_stats = &qca->net_dev->stats; + u16 available = 0; + u32 pkt_len; + u16 new_head; + u16 packets = 0; + + if (qca->txr.skb[qca->txr.head] == NULL) + return 0; + + qcaspi_read_register(qca, SPI_REG_WRBUF_SPC_AVA, &available); + + while (qca->txr.skb[qca->txr.head]) { + pkt_len = qca->txr.skb[qca->txr.head]->len + QCASPI_HW_PKT_LEN; + + if (available < pkt_len) { + if (packets == 0) + qca->stats.write_buf_miss++; + break; + } + + if (qcaspi_tx_frame(qca, qca->txr.skb[qca->txr.head]) == -1) { + qca->stats.write_err++; + return -1; + } + + packets++; + n_stats->tx_packets++; + n_stats->tx_bytes += qca->txr.skb[qca->txr.head]->len; + available -= pkt_len; + + /* remove the skb from the queue */ + /* XXX After inconsistent lock states netif_tx_lock() + * has been replaced by netif_tx_lock_bh() and so on. + */ + netif_tx_lock_bh(qca->net_dev); + dev_kfree_skb(qca->txr.skb[qca->txr.head]); + qca->txr.skb[qca->txr.head] = NULL; + qca->txr.size -= pkt_len; + new_head = qca->txr.head + 1; + if (new_head >= qca->txr.count) + new_head = 0; + qca->txr.head = new_head; + if (netif_queue_stopped(qca->net_dev)) + netif_wake_queue(qca->net_dev); + netif_tx_unlock_bh(qca->net_dev); + } + + return 0; +} + +static int +qcaspi_receive(struct qcaspi *qca) +{ + struct net_device *net_dev = qca->net_dev; + struct net_device_stats *n_stats = &net_dev->stats; + u16 available = 0; + u32 bytes_read; + u8 *cp; + + /* Allocate rx SKB if we don't have one available. */ + if (!qca->rx_skb) { + qca->rx_skb = netdev_alloc_skb(net_dev, + net_dev->mtu + VLAN_ETH_HLEN); + if (!qca->rx_skb) { + netdev_dbg(net_dev, "out of RX resources\n"); + qca->stats.out_of_mem++; + return -1; + } + } + + /* Read the packet size. */ + qcaspi_read_register(qca, SPI_REG_RDBUF_BYTE_AVA, &available); + netdev_dbg(net_dev, "qcaspi_receive: SPI_REG_RDBUF_BYTE_AVA: Value: %08x\n", + available); + + if (available == 0) { + netdev_dbg(net_dev, "qcaspi_receive called without any data being available!\n"); + return -1; + } + + qcaspi_write_register(qca, SPI_REG_BFR_SIZE, available); + + if (qca->legacy_mode) + qcaspi_tx_cmd(qca, QCA7K_SPI_READ | QCA7K_SPI_EXTERNAL); + + while (available) { + u32 count = available; + + if (count > qca->burst_len) + count = qca->burst_len; + + if (qca->legacy_mode) { + bytes_read = qcaspi_read_legacy(qca, qca->rx_buffer, + count); + } else { + bytes_read = qcaspi_read_burst(qca, qca->rx_buffer, + count); + } + + netdev_dbg(net_dev, "available: %d, byte read: %d\n", + available, bytes_read); + + if (bytes_read) { + available -= bytes_read; + } else { + qca->stats.read_err++; + return -1; + } + + cp = qca->rx_buffer; + + while ((bytes_read--) && (qca->rx_skb)) { + s32 retcode; + + retcode = qcafrm_fsm_decode(&qca->frm_handle, + qca->rx_skb->data, + skb_tailroom(qca->rx_skb), + *cp); + cp++; + switch (retcode) { + case QCAFRM_GATHER: + case QCAFRM_NOHEAD: + break; + case QCAFRM_NOTAIL: + netdev_dbg(net_dev, "no RX tail\n"); + n_stats->rx_errors++; + n_stats->rx_dropped++; + break; + case QCAFRM_INVLEN: + netdev_dbg(net_dev, "invalid RX length\n"); + n_stats->rx_errors++; + n_stats->rx_dropped++; + break; + default: + qca->rx_skb->dev = qca->net_dev; + n_stats->rx_packets++; + n_stats->rx_bytes += retcode; + skb_put(qca->rx_skb, retcode); + qca->rx_skb->protocol = eth_type_trans( + qca->rx_skb, qca->rx_skb->dev); + qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY; + netif_rx_ni(qca->rx_skb); + qca->rx_skb = netdev_alloc_skb(net_dev, + net_dev->mtu + VLAN_ETH_HLEN); + if (!qca->rx_skb) { + netdev_dbg(net_dev, "out of RX resources\n"); + n_stats->rx_errors++; + qca->stats.out_of_mem++; + break; + } + } + } + } + + return 0; +} + +/* Check that tx ring stores only so much bytes + * that fit into the internal QCA buffer. + */ + +static int +qcaspi_tx_ring_has_space(struct tx_ring *txr) +{ + if (txr->skb[txr->tail]) + return 0; + + return (txr->size + QCAFRM_ETHMAXLEN < QCASPI_HW_BUF_LEN) ? 1 : 0; +} + +/* Flush the tx ring. This function is only safe to + * call from the qcaspi_spi_thread. + */ + +static void +qcaspi_flush_tx_ring(struct qcaspi *qca) +{ + int i; + + /* XXX After inconsistent lock states netif_tx_lock() + * has been replaced by netif_tx_lock_bh() and so on. + */ + netif_tx_lock_bh(qca->net_dev); + for (i = 0; i < TX_RING_MAX_LEN; i++) { + if (qca->txr.skb[i]) { + dev_kfree_skb(qca->txr.skb[i]); + qca->txr.skb[i] = NULL; + qca->net_dev->stats.tx_dropped++; + } + } + qca->txr.tail = 0; + qca->txr.head = 0; + qca->txr.size = 0; + netif_tx_unlock_bh(qca->net_dev); +} + +static void +qcaspi_qca7k_sync(struct qcaspi *qca, int event) +{ + u16 signature = 0; + u16 spi_config; + u16 wrbuf_space = 0; + static u16 reset_count; + + if (event == QCASPI_EVENT_CPUON) { + /* Read signature twice, if not valid + * go back to unknown state. + */ + qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature); + qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature); + if (signature != QCASPI_GOOD_SIGNATURE) { + qca->sync = QCASPI_SYNC_UNKNOWN; + netdev_dbg(qca->net_dev, "sync: got CPU on, but signature was invalid, restart\n"); + } else { + /* ensure that the WRBUF is empty */ + qcaspi_read_register(qca, SPI_REG_WRBUF_SPC_AVA, + &wrbuf_space); + if (wrbuf_space != QCASPI_HW_BUF_LEN) { + netdev_dbg(qca->net_dev, "sync: got CPU on, but wrbuf not empty. reset!\n"); + qca->sync = QCASPI_SYNC_UNKNOWN; + } else { + netdev_dbg(qca->net_dev, "sync: got CPU on, now in sync\n"); + qca->sync = QCASPI_SYNC_READY; + return; + } + } + } + + switch (qca->sync) { + case QCASPI_SYNC_READY: + /* Read signature, if not valid go to unknown state. */ + qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature); + if (signature != QCASPI_GOOD_SIGNATURE) { + qca->sync = QCASPI_SYNC_UNKNOWN; + netdev_dbg(qca->net_dev, "sync: bad signature, restart\n"); + /* don't reset right away */ + return; + } + break; + case QCASPI_SYNC_UNKNOWN: + /* Read signature, if not valid stay in unknown state */ + qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature); + if (signature != QCASPI_GOOD_SIGNATURE) { + netdev_dbg(qca->net_dev, "sync: could not read signature to reset device, retry.\n"); + return; + } + + /* TODO: use GPIO to reset QCA7000 in legacy mode*/ + netdev_dbg(qca->net_dev, "sync: resetting device.\n"); + qcaspi_read_register(qca, SPI_REG_SPI_CONFIG, &spi_config); + spi_config |= QCASPI_SLAVE_RESET_BIT; + qcaspi_write_register(qca, SPI_REG_SPI_CONFIG, spi_config); + + qca->sync = QCASPI_SYNC_RESET; + qca->stats.trig_reset++; + reset_count = 0; + break; + case QCASPI_SYNC_RESET: + reset_count++; + netdev_dbg(qca->net_dev, "sync: waiting for CPU on, count %u.\n", + reset_count); + if (reset_count >= QCASPI_RESET_TIMEOUT) { + /* reset did not seem to take place, try again */ + qca->sync = QCASPI_SYNC_UNKNOWN; + qca->stats.reset_timeout++; + netdev_dbg(qca->net_dev, "sync: reset timeout, restarting process.\n"); + } + break; + } +} + +static int +qcaspi_spi_thread(void *data) +{ + struct qcaspi *qca = data; + u16 intr_cause = 0; + + netdev_info(qca->net_dev, "SPI thread created\n"); + while (!kthread_should_stop()) { + set_current_state(TASK_INTERRUPTIBLE); + if ((qca->intr_req == qca->intr_svc) && + (qca->txr.skb[qca->txr.head] == NULL) && + (qca->sync == QCASPI_SYNC_READY)) + schedule(); + + set_current_state(TASK_RUNNING); + + netdev_dbg(qca->net_dev, "have work to do. int: %d, tx_skb: %p\n", + qca->intr_req - qca->intr_svc, + qca->txr.skb[qca->txr.head]); + + qcaspi_qca7k_sync(qca, QCASPI_EVENT_UPDATE); + + if (qca->sync != QCASPI_SYNC_READY) { + netdev_dbg(qca->net_dev, "sync: not ready %u, turn off carrier and flush\n", + (unsigned int)qca->sync); + netif_stop_queue(qca->net_dev); + netif_carrier_off(qca->net_dev); + qcaspi_flush_tx_ring(qca); + msleep(QCASPI_QCA7K_REBOOT_TIME_MS); + } + + if (qca->intr_svc != qca->intr_req) { + qca->intr_svc = qca->intr_req; + start_spi_intr_handling(qca, &intr_cause); + + if (intr_cause & SPI_INT_CPU_ON) { + qcaspi_qca7k_sync(qca, QCASPI_EVENT_CPUON); + + /* not synced. */ + if (qca->sync != QCASPI_SYNC_READY) + continue; + + qca->stats.device_reset++; + netif_wake_queue(qca->net_dev); + netif_carrier_on(qca->net_dev); + } + + if (intr_cause & SPI_INT_RDBUF_ERR) { + /* restart sync */ + netdev_dbg(qca->net_dev, "===> rdbuf error!\n"); + qca->stats.read_buf_err++; + qca->sync = QCASPI_SYNC_UNKNOWN; + continue; + } + + if (intr_cause & SPI_INT_WRBUF_ERR) { + /* restart sync */ + netdev_dbg(qca->net_dev, "===> wrbuf error!\n"); + qca->stats.write_buf_err++; + qca->sync = QCASPI_SYNC_UNKNOWN; + continue; + } + + /* can only handle other interrupts + * if sync has occured + */ + if (qca->sync == QCASPI_SYNC_READY) { + if (intr_cause & SPI_INT_PKT_AVLBL) + qcaspi_receive(qca); + } + + end_spi_intr_handling(qca, intr_cause); + } + + if (qca->sync == QCASPI_SYNC_READY) + qcaspi_transmit(qca); + } + set_current_state(TASK_RUNNING); + netdev_info(qca->net_dev, "SPI thread exit\n"); + + return 0; +} + +static irqreturn_t +qcaspi_intr_handler(int irq, void *data) +{ + struct qcaspi *qca = data; + + qca->intr_req++; + if (qca->spi_thread && + qca->spi_thread->state != TASK_RUNNING) + wake_up_process(qca->spi_thread); + + return IRQ_HANDLED; +} + +int +qcaspi_netdev_open(struct net_device *dev) +{ + struct qcaspi *qca = netdev_priv(dev); + int ret = 0; + + if (!qca) + return -EINVAL; + + qca->intr_req = 1; + qca->intr_svc = 0; + qca->sync = QCASPI_SYNC_UNKNOWN; + qcafrm_fsm_init(&qca->frm_handle); + + qca->spi_thread = kthread_run((void *)qcaspi_spi_thread, + qca, "%s", dev->name); + + if (IS_ERR(qca->spi_thread)) { + netdev_err(dev, "%s: unable to start kernel thread.\n", + QCASPI_DRV_NAME); + return PTR_ERR(qca->spi_thread); + } + + ret = request_irq(qca->spi_dev->irq, qcaspi_intr_handler, 0, + dev->name, qca); + if (ret) { + netdev_err(dev, "%s: unable to get IRQ %d (irqval=%d).\n", + QCASPI_DRV_NAME, qca->spi_dev->irq, ret); + kthread_stop(qca->spi_thread); + return ret; + } + + netif_start_queue(qca->net_dev); + + return 0; +} + +int +qcaspi_netdev_close(struct net_device *dev) +{ + struct qcaspi *qca = netdev_priv(dev); + + netif_stop_queue(dev); + + qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, 0); + free_irq(qca->spi_dev->irq, qca); + + kthread_stop(qca->spi_thread); + qca->spi_thread = NULL; + qcaspi_flush_tx_ring(qca); + + return 0; +} + +static netdev_tx_t +qcaspi_netdev_xmit(struct sk_buff *skb, struct net_device *dev) +{ + u32 frame_len; + u8 *ptmp; + struct qcaspi *qca = netdev_priv(dev); + u16 new_tail; + struct sk_buff *tskb; + u8 pad_len = 0; + + if (skb->len < QCAFRM_ETHMINLEN) + pad_len = QCAFRM_ETHMINLEN - skb->len; + + if (qca->txr.skb[qca->txr.tail]) { + netdev_warn(qca->net_dev, "queue was unexpectedly full!\n"); + netif_stop_queue(qca->net_dev); + qca->stats.ring_full++; + return NETDEV_TX_BUSY; + } + + if ((skb_headroom(skb) < QCAFRM_HEADER_LEN) || + (skb_tailroom(skb) < QCAFRM_FOOTER_LEN + pad_len)) { + tskb = skb_copy_expand(skb, QCAFRM_HEADER_LEN, + QCAFRM_FOOTER_LEN + pad_len, GFP_ATOMIC); + if (!tskb) { + netdev_dbg(qca->net_dev, "could not allocate tx_buff\n"); + qca->stats.out_of_mem++; + return NETDEV_TX_BUSY; + } + dev_kfree_skb(skb); + skb = tskb; + } + + frame_len = skb->len + pad_len; + + ptmp = skb_push(skb, QCAFRM_HEADER_LEN); + qcafrm_create_header(ptmp, frame_len); + + if (pad_len) { + ptmp = skb_put(skb, pad_len); + memset(ptmp, 0, pad_len); + } + + ptmp = skb_put(skb, QCAFRM_FOOTER_LEN); + qcafrm_create_footer(ptmp); + + netdev_dbg(qca->net_dev, "Tx-ing packet: Size: 0x%08x\n", + skb->len); + + qca->txr.size += skb->len + QCASPI_HW_PKT_LEN; + + new_tail = qca->txr.tail + 1; + if (new_tail >= qca->txr.count) + new_tail = 0; + + qca->txr.skb[qca->txr.tail] = skb; + qca->txr.tail = new_tail; + + if (!qcaspi_tx_ring_has_space(&qca->txr)) { + netif_stop_queue(qca->net_dev); + qca->stats.ring_full++; + } + + dev->trans_start = jiffies; + + if (qca->spi_thread && + qca->spi_thread->state != TASK_RUNNING) + wake_up_process(qca->spi_thread); + + return NETDEV_TX_OK; +} + +static void +qcaspi_netdev_tx_timeout(struct net_device *dev) +{ + struct qcaspi *qca = netdev_priv(dev); + + netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n", + jiffies, jiffies - dev->trans_start); + qca->net_dev->stats.tx_errors++; + /* wake the queue if there is room */ + if (qcaspi_tx_ring_has_space(&qca->txr)) + netif_wake_queue(dev); +} + +static int +qcaspi_netdev_init(struct net_device *dev) +{ + struct qcaspi *qca = netdev_priv(dev); + + dev->mtu = QCASPI_MTU; + dev->type = ARPHRD_ETHER; + qca->clkspeed = qcaspi_clkspeed; + qca->burst_len = qcaspi_burst_len; + qca->spi_thread = NULL; + qca->buffer_size = (dev->mtu + VLAN_ETH_HLEN + QCAFRM_HEADER_LEN + + QCAFRM_FOOTER_LEN + 4) * 4; + + memset(&qca->stats, 0, sizeof(struct qcaspi_stats)); + + qca->rx_buffer = kmalloc(qca->buffer_size, GFP_KERNEL); + if (!qca->rx_buffer) + return -ENOBUFS; + + qca->rx_skb = netdev_alloc_skb(dev, qca->net_dev->mtu + VLAN_ETH_HLEN); + if (!qca->rx_skb) { + kfree(qca->rx_buffer); + netdev_info(qca->net_dev, "Failed to allocate RX sk_buff.\n"); + return -ENOBUFS; + } + + return 0; +} + +static void +qcaspi_netdev_uninit(struct net_device *dev) +{ + struct qcaspi *qca = netdev_priv(dev); + + kfree(qca->rx_buffer); + qca->buffer_size = 0; + if (qca->rx_skb) + dev_kfree_skb(qca->rx_skb); +} + +static int +qcaspi_netdev_change_mtu(struct net_device *dev, int new_mtu) +{ + if ((new_mtu < QCAFRM_ETHMINMTU) || (new_mtu > QCAFRM_ETHMAXMTU)) + return -EINVAL; + + dev->mtu = new_mtu; + + return 0; +} + +static const struct net_device_ops qcaspi_netdev_ops = { + .ndo_init = qcaspi_netdev_init, + .ndo_uninit = qcaspi_netdev_uninit, + .ndo_open = qcaspi_netdev_open, + .ndo_stop = qcaspi_netdev_close, + .ndo_start_xmit = qcaspi_netdev_xmit, + .ndo_change_mtu = qcaspi_netdev_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_tx_timeout = qcaspi_netdev_tx_timeout, + .ndo_validate_addr = eth_validate_addr, +}; + +static void +qcaspi_netdev_setup(struct net_device *dev) +{ + struct qcaspi *qca = NULL; + + dev->netdev_ops = &qcaspi_netdev_ops; + qcaspi_set_ethtool_ops(dev); + dev->watchdog_timeo = QCASPI_TX_TIMEOUT; + dev->flags = IFF_MULTICAST; + dev->tx_queue_len = 100; + + qca = netdev_priv(dev); + memset(qca, 0, sizeof(struct qcaspi)); + + memset(&qca->spi_xfer1, 0, sizeof(struct spi_transfer)); + memset(&qca->spi_xfer2, 0, sizeof(struct spi_transfer) * 2); + + spi_message_init(&qca->spi_msg1); + spi_message_add_tail(&qca->spi_xfer1, &qca->spi_msg1); + + spi_message_init(&qca->spi_msg2); + spi_message_add_tail(&qca->spi_xfer2[0], &qca->spi_msg2); + spi_message_add_tail(&qca->spi_xfer2[1], &qca->spi_msg2); + + memset(&qca->txr, 0, sizeof(qca->txr)); + qca->txr.count = TX_RING_MAX_LEN; +} + +static const struct of_device_id qca_spi_of_match[] = { + { .compatible = "qca,qca7000" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, qca_spi_of_match); + +static int +qca_spi_probe(struct spi_device *spi_device) +{ + struct qcaspi *qca = NULL; + struct net_device *qcaspi_devs = NULL; + u8 legacy_mode = 0; + u16 signature; + const char *mac; + + if (!spi_device->dev.of_node) { + dev_err(&spi_device->dev, "Missing device tree\n"); + return -EINVAL; + } + + legacy_mode = of_property_read_bool(spi_device->dev.of_node, + "qca,legacy-mode"); + + if (qcaspi_clkspeed == 0) { + if (spi_device->max_speed_hz) + qcaspi_clkspeed = spi_device->max_speed_hz; + else + qcaspi_clkspeed = QCASPI_CLK_SPEED; + } + + if ((qcaspi_clkspeed < QCASPI_CLK_SPEED_MIN) || + (qcaspi_clkspeed > QCASPI_CLK_SPEED_MAX)) { + dev_info(&spi_device->dev, "Invalid clkspeed: %d\n", + qcaspi_clkspeed); + return -EINVAL; + } + + if ((qcaspi_burst_len < QCASPI_BURST_LEN_MIN) || + (qcaspi_burst_len > QCASPI_BURST_LEN_MAX)) { + dev_info(&spi_device->dev, "Invalid burst len: %d\n", + qcaspi_burst_len); + return -EINVAL; + } + + if ((qcaspi_pluggable < QCASPI_PLUGGABLE_MIN) || + (qcaspi_pluggable > QCASPI_PLUGGABLE_MAX)) { + dev_info(&spi_device->dev, "Invalid pluggable: %d\n", + qcaspi_pluggable); + return -EINVAL; + } + + dev_info(&spi_device->dev, "ver=%s, clkspeed=%d, burst_len=%d, pluggable=%d\n", + QCASPI_DRV_VERSION, + qcaspi_clkspeed, + qcaspi_burst_len, + qcaspi_pluggable); + + spi_device->mode = SPI_MODE_3; + spi_device->max_speed_hz = qcaspi_clkspeed; + if (spi_setup(spi_device) < 0) { + dev_err(&spi_device->dev, "Unable to setup SPI device\n"); + return -EFAULT; + } + + qcaspi_devs = alloc_etherdev(sizeof(struct qcaspi)); + if (!qcaspi_devs) + return -ENOMEM; + + qcaspi_netdev_setup(qcaspi_devs); + + qca = netdev_priv(qcaspi_devs); + if (!qca) { + free_netdev(qcaspi_devs); + dev_err(&spi_device->dev, "Fail to retrieve private structure\n"); + return -ENOMEM; + } + qca->net_dev = qcaspi_devs; + qca->spi_dev = spi_device; + qca->legacy_mode = legacy_mode; + + mac = of_get_mac_address(spi_device->dev.of_node); + + if (mac) + ether_addr_copy(qca->net_dev->dev_addr, mac); + + if (!is_valid_ether_addr(qca->net_dev->dev_addr)) { + eth_hw_addr_random(qca->net_dev); + dev_info(&spi_device->dev, "Using random MAC address: %pM\n", + qca->net_dev->dev_addr); + } + + netif_carrier_off(qca->net_dev); + + if (!qcaspi_pluggable) { + qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature); + qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature); + + if (signature != QCASPI_GOOD_SIGNATURE) { + dev_err(&spi_device->dev, "Invalid signature (0x%04X)\n", + signature); + free_netdev(qcaspi_devs); + return -EFAULT; + } + } + + if (register_netdev(qcaspi_devs)) { + dev_info(&spi_device->dev, "Unable to register net device %s\n", + qcaspi_devs->name); + free_netdev(qcaspi_devs); + return -EFAULT; + } + + spi_set_drvdata(spi_device, qcaspi_devs); + + qcaspi_init_device_debugfs(qca); + + return 0; +} + +static int +qca_spi_remove(struct spi_device *spi_device) +{ + struct net_device *qcaspi_devs = spi_get_drvdata(spi_device); + struct qcaspi *qca = netdev_priv(qcaspi_devs); + + qcaspi_remove_device_debugfs(qca); + + unregister_netdev(qcaspi_devs); + free_netdev(qcaspi_devs); + + return 0; +} + +static const struct spi_device_id qca_spi_id[] = { + { "qca7000", 0 }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(spi, qca_spi_id); + +static struct spi_driver qca_spi_driver = { + .driver = { + .name = QCASPI_DRV_NAME, + .owner = THIS_MODULE, + .of_match_table = qca_spi_of_match, + }, + .id_table = qca_spi_id, + .probe = qca_spi_probe, + .remove = qca_spi_remove, +}; +module_spi_driver(qca_spi_driver); + +MODULE_DESCRIPTION("Qualcomm Atheros SPI Driver"); +MODULE_AUTHOR("Qualcomm Atheros Communications"); +MODULE_AUTHOR("Stefan Wahren "); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(QCASPI_DRV_VERSION); diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h new file mode 100644 index 0000000000000000000000000000000000000000..6e31a0e744a45b48fda518489504a447a7cc6d94 --- /dev/null +++ b/drivers/net/ethernet/qualcomm/qca_spi.h @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc. + * Copyright (c) 2014, I2SE GmbH + * + * Permission to use, copy, modify, and/or distribute this software + * for any purpose with or without fee is hereby granted, provided + * that the above copyright notice and this permission notice appear + * in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL + * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, + * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* Qualcomm Atheros SPI register definition. + * + * This module is designed to define the Qualcomm Atheros SPI register + * placeholders; + */ + +#ifndef _QCA_SPI_H +#define _QCA_SPI_H + +#include +#include +#include +#include +#include + +#include "qca_framing.h" + +#define QCASPI_DRV_VERSION "0.2.7-i" +#define QCASPI_DRV_NAME "qcaspi" + +#define QCASPI_GOOD_SIGNATURE 0xAA55 + +#define TX_RING_MAX_LEN 10 +#define TX_RING_MIN_LEN 2 + +/* sync related constants */ +#define QCASPI_SYNC_UNKNOWN 0 +#define QCASPI_SYNC_RESET 1 +#define QCASPI_SYNC_READY 2 + +#define QCASPI_RESET_TIMEOUT 10 + +/* sync events */ +#define QCASPI_EVENT_UPDATE 0 +#define QCASPI_EVENT_CPUON 1 + +struct tx_ring { + struct sk_buff *skb[TX_RING_MAX_LEN]; + u16 head; + u16 tail; + u16 size; + u16 count; +}; + +struct qcaspi_stats { + u64 trig_reset; + u64 device_reset; + u64 reset_timeout; + u64 read_err; + u64 write_err; + u64 read_buf_err; + u64 write_buf_err; + u64 out_of_mem; + u64 write_buf_miss; + u64 ring_full; + u64 spi_err; +}; + +struct qcaspi { + struct net_device *net_dev; + struct spi_device *spi_dev; + struct task_struct *spi_thread; + + struct tx_ring txr; + struct qcaspi_stats stats; + + struct spi_message spi_msg1; + struct spi_message spi_msg2; + struct spi_transfer spi_xfer1; + struct spi_transfer spi_xfer2[2]; + + u8 *rx_buffer; + u32 buffer_size; + u8 sync; + + struct qcafrm_handle frm_handle; + struct sk_buff *rx_skb; + + unsigned int intr_req; + unsigned int intr_svc; + +#ifdef CONFIG_DEBUG_FS + struct dentry *device_root; +#endif + + /* user configurable options */ + u32 clkspeed; + u8 legacy_mode; + u16 burst_len; +}; + +int qcaspi_netdev_open(struct net_device *dev); +int qcaspi_netdev_close(struct net_device *dev); + +#endif /* _QCA_SPI_H */ diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 0921302553c6f09f871e8932a0c19e804de61189..cf154f74cba1e18e38d6e9e6d3fb31b01377576b 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -52,6 +52,10 @@ #define FIRMWARE_8106E_2 "rtl_nic/rtl8106e-2.fw" #define FIRMWARE_8168G_2 "rtl_nic/rtl8168g-2.fw" #define FIRMWARE_8168G_3 "rtl_nic/rtl8168g-3.fw" +#define FIRMWARE_8168H_1 "rtl_nic/rtl8168h-1.fw" +#define FIRMWARE_8168H_2 "rtl_nic/rtl8168h-2.fw" +#define FIRMWARE_8107E_1 "rtl_nic/rtl8107e-1.fw" +#define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw" #ifdef RTL8169_DEBUG #define assert(expr) \ @@ -147,6 +151,13 @@ enum mac_version { RTL_GIGA_MAC_VER_42, RTL_GIGA_MAC_VER_43, RTL_GIGA_MAC_VER_44, + RTL_GIGA_MAC_VER_45, + RTL_GIGA_MAC_VER_46, + RTL_GIGA_MAC_VER_47, + RTL_GIGA_MAC_VER_48, + RTL_GIGA_MAC_VER_49, + RTL_GIGA_MAC_VER_50, + RTL_GIGA_MAC_VER_51, RTL_GIGA_MAC_NONE = 0xff, }; @@ -282,6 +293,27 @@ static const struct { [RTL_GIGA_MAC_VER_44] = _R("RTL8411", RTL_TD_1, FIRMWARE_8411_2, JUMBO_9K, false), + [RTL_GIGA_MAC_VER_45] = + _R("RTL8168h/8111h", RTL_TD_1, FIRMWARE_8168H_1, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_46] = + _R("RTL8168h/8111h", RTL_TD_1, FIRMWARE_8168H_2, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_47] = + _R("RTL8107e", RTL_TD_1, FIRMWARE_8107E_1, + JUMBO_1K, false), + [RTL_GIGA_MAC_VER_48] = + _R("RTL8107e", RTL_TD_1, FIRMWARE_8107E_2, + JUMBO_1K, false), + [RTL_GIGA_MAC_VER_49] = + _R("RTL8168ep/8111ep", RTL_TD_1, NULL, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_50] = + _R("RTL8168ep/8111ep", RTL_TD_1, NULL, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_51] = + _R("RTL8168ep/8111ep", RTL_TD_1, NULL, + JUMBO_9K, false), }; #undef _R @@ -380,6 +412,10 @@ enum rtl_registers { FuncEvent = 0xf0, FuncEventMask = 0xf4, FuncPresetState = 0xf8, + IBCR0 = 0xf8, + IBCR2 = 0xf9, + IBIMR0 = 0xfa, + IBISR0 = 0xfb, FuncForceEvent = 0xfc, }; @@ -410,6 +446,7 @@ enum rtl8168_8101_registers { #define EPHYAR_DATA_MASK 0xffff DLLPR = 0xd0, #define PFM_EN (1 << 6) +#define TX_10M_PS_EN (1 << 7) DBG_REG = 0xd1, #define FIX_NAK_1 (1 << 4) #define FIX_NAK_2 (1 << 3) @@ -429,6 +466,8 @@ enum rtl8168_8101_registers { #define EFUSEAR_REG_MASK 0x03ff #define EFUSEAR_REG_SHIFT 8 #define EFUSEAR_DATA_MASK 0xff + MISC_1 = 0xf2, +#define PFM_D3COLD_EN (1 << 6) }; enum rtl8168_registers { @@ -444,9 +483,11 @@ enum rtl8168_registers { #define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT) #define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT) #define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT) +#define ERIAR_OOB (0x02 << ERIAR_TYPE_SHIFT) #define ERIAR_MASK_SHIFT 12 #define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT) #define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_0100 (0x4 << ERIAR_MASK_SHIFT) #define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT) #define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT) EPHY_RXER_NUM = 0x7c, @@ -598,6 +639,9 @@ enum rtl_register_content { /* DumpCounterCommand */ CounterDump = 0x8, + + /* magic enable v2 */ + MagicPacket_v2 = (1 << 16), /* Wake up when receives a Magic Packet */ }; enum rtl_desc_bit { @@ -823,6 +867,10 @@ MODULE_FIRMWARE(FIRMWARE_8106E_1); MODULE_FIRMWARE(FIRMWARE_8106E_2); MODULE_FIRMWARE(FIRMWARE_8168G_2); MODULE_FIRMWARE(FIRMWARE_8168G_3); +MODULE_FIRMWARE(FIRMWARE_8168H_1); +MODULE_FIRMWARE(FIRMWARE_8168H_2); +MODULE_FIRMWARE(FIRMWARE_8107E_1); +MODULE_FIRMWARE(FIRMWARE_8107E_2); static void rtl_lock_work(struct rtl8169_private *tp) { @@ -904,93 +952,6 @@ static const struct rtl_cond name = { \ \ static bool name ## _check(struct rtl8169_private *tp) -DECLARE_RTL_COND(rtl_ocpar_cond) -{ - void __iomem *ioaddr = tp->mmio_addr; - - return RTL_R32(OCPAR) & OCPAR_FLAG; -} - -static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg) -{ - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff)); - - return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ? - RTL_R32(OCPDR) : ~0; -} - -static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data) -{ - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W32(OCPDR, data); - RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff)); - - rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20); -} - -DECLARE_RTL_COND(rtl_eriar_cond) -{ - void __iomem *ioaddr = tp->mmio_addr; - - return RTL_R32(ERIAR) & ERIAR_FLAG; -} - -static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd) -{ - void __iomem *ioaddr = tp->mmio_addr; - - RTL_W8(ERIDR, cmd); - RTL_W32(ERIAR, 0x800010e8); - msleep(2); - - if (!rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 5)) - return; - - ocp_write(tp, 0x1, 0x30, 0x00000001); -} - -#define OOB_CMD_RESET 0x00 -#define OOB_CMD_DRIVER_START 0x05 -#define OOB_CMD_DRIVER_STOP 0x06 - -static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp) -{ - return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10; -} - -DECLARE_RTL_COND(rtl_ocp_read_cond) -{ - u16 reg; - - reg = rtl8168_get_ocp_reg(tp); - - return ocp_read(tp, 0x0f, reg) & 0x00000800; -} - -static void rtl8168_driver_start(struct rtl8169_private *tp) -{ - rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START); - - rtl_msleep_loop_wait_high(tp, &rtl_ocp_read_cond, 10, 10); -} - -static void rtl8168_driver_stop(struct rtl8169_private *tp) -{ - rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP); - - rtl_msleep_loop_wait_low(tp, &rtl_ocp_read_cond, 10, 10); -} - -static int r8168dp_check_dash(struct rtl8169_private *tp) -{ - u16 reg = rtl8168_get_ocp_reg(tp); - - return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0; -} - static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg) { if (reg & 0xffff0001) { @@ -1132,6 +1093,13 @@ static int r8169_mdio_read(struct rtl8169_private *tp, int reg) return value; } +DECLARE_RTL_COND(rtl_ocpar_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R32(OCPAR) & OCPAR_FLAG; +} + static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data) { void __iomem *ioaddr = tp->mmio_addr; @@ -1215,12 +1183,12 @@ static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value) rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value); } -static void rtl_w1w0_phy(struct rtl8169_private *tp, int reg_addr, int p, int m) +static void rtl_w0w1_phy(struct rtl8169_private *tp, int reg_addr, int p, int m) { int val; val = rtl_readphy(tp, reg_addr); - rtl_writephy(tp, reg_addr, (val | p) & ~m); + rtl_writephy(tp, reg_addr, (val & ~m) | p); } static void rtl_mdio_write(struct net_device *dev, int phy_id, int location, @@ -1267,6 +1235,13 @@ static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr) RTL_R32(EPHYAR) & EPHYAR_DATA_MASK : ~0; } +DECLARE_RTL_COND(rtl_eriar_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R32(ERIAR) & ERIAR_FLAG; +} + static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask, u32 val, int type) { @@ -1289,7 +1264,7 @@ static u32 rtl_eri_read(struct rtl8169_private *tp, int addr, int type) RTL_R32(ERIDR) : ~0; } -static void rtl_w1w0_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p, +static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p, u32 m, int type) { u32 val; @@ -1298,6 +1273,208 @@ static void rtl_w1w0_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p, rtl_eri_write(tp, addr, mask, (val & ~m) | p, type); } +static u32 r8168dp_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff)); + return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ? + RTL_R32(OCPDR) : ~0; +} + +static u32 r8168ep_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg) +{ + return rtl_eri_read(tp, reg, ERIAR_OOB); +} + +static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + return r8168dp_ocp_read(tp, mask, reg); + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + return r8168ep_ocp_read(tp, mask, reg); + default: + BUG(); + return ~0; + } +} + +static void r8168dp_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, + u32 data) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(OCPDR, data); + RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff)); + rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20); +} + +static void r8168ep_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, + u32 data) +{ + rtl_eri_write(tp, reg, ((u32)mask & 0x0f) << ERIAR_MASK_SHIFT, + data, ERIAR_OOB); +} + +static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + r8168dp_ocp_write(tp, mask, reg, data); + break; + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + r8168ep_ocp_write(tp, mask, reg, data); + break; + default: + BUG(); + break; + } +} + +static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd) +{ + rtl_eri_write(tp, 0xe8, ERIAR_MASK_0001, cmd, ERIAR_EXGMAC); + + ocp_write(tp, 0x1, 0x30, 0x00000001); +} + +#define OOB_CMD_RESET 0x00 +#define OOB_CMD_DRIVER_START 0x05 +#define OOB_CMD_DRIVER_STOP 0x06 + +static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp) +{ + return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10; +} + +DECLARE_RTL_COND(rtl_ocp_read_cond) +{ + u16 reg; + + reg = rtl8168_get_ocp_reg(tp); + + return ocp_read(tp, 0x0f, reg) & 0x00000800; +} + +DECLARE_RTL_COND(rtl_ep_ocp_read_cond) +{ + return ocp_read(tp, 0x0f, 0x124) & 0x00000001; +} + +DECLARE_RTL_COND(rtl_ocp_tx_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R8(IBISR0) & 0x02; +} + +static void rtl8168dp_driver_start(struct rtl8169_private *tp) +{ + rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START); + rtl_msleep_loop_wait_high(tp, &rtl_ocp_read_cond, 10, 10); +} + +static void rtl8168ep_driver_start(struct rtl8169_private *tp) +{ + ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START); + ocp_write(tp, 0x01, 0x30, ocp_read(tp, 0x01, 0x30) | 0x01); + rtl_msleep_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10, 10); +} + +static void rtl8168_driver_start(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + rtl8168dp_driver_start(tp); + break; + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + rtl8168ep_driver_start(tp); + break; + default: + BUG(); + break; + } +} + +static void rtl8168dp_driver_stop(struct rtl8169_private *tp) +{ + rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP); + rtl_msleep_loop_wait_low(tp, &rtl_ocp_read_cond, 10, 10); +} + +static void rtl8168ep_driver_stop(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(IBCR2, RTL_R8(IBCR2) & ~0x01); + rtl_msleep_loop_wait_low(tp, &rtl_ocp_tx_cond, 50, 2000); + RTL_W8(IBISR0, RTL_R8(IBISR0) | 0x20); + RTL_W8(IBCR0, RTL_R8(IBCR0) & ~0x01); + ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP); + ocp_write(tp, 0x01, 0x30, ocp_read(tp, 0x01, 0x30) | 0x01); + rtl_msleep_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10, 10); +} + +static void rtl8168_driver_stop(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + rtl8168dp_driver_stop(tp); + break; + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + rtl8168ep_driver_stop(tp); + break; + default: + BUG(); + break; + } +} + +static int r8168dp_check_dash(struct rtl8169_private *tp) +{ + u16 reg = rtl8168_get_ocp_reg(tp); + + return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0; +} + +static int r8168ep_check_dash(struct rtl8169_private *tp) +{ + return (ocp_read(tp, 0x0f, 0x128) & 0x00000001) ? 1 : 0; +} + +static int r8168_check_dash(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + return r8168dp_check_dash(tp); + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + return r8168ep_check_dash(tp); + default: + return 0; + } +} + struct exgmac_reg { u16 addr; u16 mask; @@ -1442,9 +1619,9 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp) ERIAR_EXGMAC); } /* Reset packet filter */ - rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); - rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 || tp->mac_version == RTL_GIGA_MAC_VER_36) { @@ -1514,8 +1691,32 @@ static u32 __rtl8169_get_wol(struct rtl8169_private *tp) options = RTL_R8(Config3); if (options & LinkUp) wolopts |= WAKE_PHY; - if (options & MagicPacket) - wolopts |= WAKE_MAGIC; + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_35: + case RTL_GIGA_MAC_VER_36: + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_38: + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_42: + case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + if (rtl_eri_read(tp, 0xdc, ERIAR_EXGMAC) & MagicPacket_v2) + wolopts |= WAKE_MAGIC; + break; + default: + if (options & MagicPacket) + wolopts |= WAKE_MAGIC; + break; + } options = RTL_R8(Config5); if (options & UWF) @@ -1543,24 +1744,63 @@ static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) { void __iomem *ioaddr = tp->mmio_addr; - unsigned int i; + unsigned int i, tmp; static const struct { u32 opt; u16 reg; u8 mask; } cfg[] = { { WAKE_PHY, Config3, LinkUp }, - { WAKE_MAGIC, Config3, MagicPacket }, { WAKE_UCAST, Config5, UWF }, { WAKE_BCAST, Config5, BWF }, { WAKE_MCAST, Config5, MWF }, - { WAKE_ANY, Config5, LanWake } + { WAKE_ANY, Config5, LanWake }, + { WAKE_MAGIC, Config3, MagicPacket } }; u8 options; RTL_W8(Cfg9346, Cfg9346_Unlock); - for (i = 0; i < ARRAY_SIZE(cfg); i++) { + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_35: + case RTL_GIGA_MAC_VER_36: + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_38: + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_42: + case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + tmp = ARRAY_SIZE(cfg) - 1; + if (wolopts & WAKE_MAGIC) + rtl_w0w1_eri(tp, + 0x0dc, + ERIAR_MASK_0100, + MagicPacket_v2, + 0x0000, + ERIAR_EXGMAC); + else + rtl_w0w1_eri(tp, + 0x0dc, + ERIAR_MASK_0100, + 0x0000, + MagicPacket_v2, + ERIAR_EXGMAC); + break; + default: + tmp = ARRAY_SIZE(cfg); + break; + } + + for (i = 0; i < tmp; i++) { options = RTL_R8(cfg[i].reg) & ~cfg[i].mask; if (wolopts & cfg[i].opt) options |= cfg[i].mask; @@ -2045,6 +2285,15 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp, u32 val; int mac_version; } mac_info[] = { + /* 8168EP family. */ + { 0x7cf00000, 0x50200000, RTL_GIGA_MAC_VER_51 }, + { 0x7cf00000, 0x50100000, RTL_GIGA_MAC_VER_50 }, + { 0x7cf00000, 0x50000000, RTL_GIGA_MAC_VER_49 }, + + /* 8168H family. */ + { 0x7cf00000, 0x54100000, RTL_GIGA_MAC_VER_46 }, + { 0x7cf00000, 0x54000000, RTL_GIGA_MAC_VER_45 }, + /* 8168G family. */ { 0x7cf00000, 0x5c800000, RTL_GIGA_MAC_VER_44 }, { 0x7cf00000, 0x50900000, RTL_GIGA_MAC_VER_42 }, @@ -2140,6 +2389,14 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp, tp->mac_version = tp->mii.supports_gmii ? RTL_GIGA_MAC_VER_42 : RTL_GIGA_MAC_VER_43; + } else if (tp->mac_version == RTL_GIGA_MAC_VER_45) { + tp->mac_version = tp->mii.supports_gmii ? + RTL_GIGA_MAC_VER_45 : + RTL_GIGA_MAC_VER_47; + } else if (tp->mac_version == RTL_GIGA_MAC_VER_46) { + tp->mac_version = tp->mii.supports_gmii ? + RTL_GIGA_MAC_VER_46 : + RTL_GIGA_MAC_VER_48; } } @@ -2801,8 +3058,8 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp) * Fine Tune Switching regulator parameter */ rtl_writephy(tp, 0x1f, 0x0002); - rtl_w1w0_phy(tp, 0x0b, 0x0010, 0x00ef); - rtl_w1w0_phy(tp, 0x0c, 0xa200, 0x5d00); + rtl_w0w1_phy(tp, 0x0b, 0x0010, 0x00ef); + rtl_w0w1_phy(tp, 0x0c, 0xa200, 0x5d00); if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) { static const struct phy_reg phy_reg_init[] = { @@ -2851,8 +3108,8 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp) /* Fine tune PLL performance */ rtl_writephy(tp, 0x1f, 0x0002); - rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600); - rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000); + rtl_w0w1_phy(tp, 0x02, 0x0100, 0x0600); + rtl_w0w1_phy(tp, 0x03, 0x0000, 0xe000); rtl_writephy(tp, 0x1f, 0x0005); rtl_writephy(tp, 0x05, 0x001b); @@ -2949,8 +3206,8 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp) /* Fine tune PLL performance */ rtl_writephy(tp, 0x1f, 0x0002); - rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600); - rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000); + rtl_w0w1_phy(tp, 0x02, 0x0100, 0x0600); + rtl_w0w1_phy(tp, 0x03, 0x0000, 0xe000); /* Switching regulator Slew rate */ rtl_writephy(tp, 0x1f, 0x0002); @@ -3078,32 +3335,32 @@ static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp) /* DCO enable for 10M IDLE Power */ rtl_writephy(tp, 0x1f, 0x0007); rtl_writephy(tp, 0x1e, 0x0023); - rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000); + rtl_w0w1_phy(tp, 0x17, 0x0006, 0x0000); rtl_writephy(tp, 0x1f, 0x0000); /* For impedance matching */ rtl_writephy(tp, 0x1f, 0x0002); - rtl_w1w0_phy(tp, 0x08, 0x8000, 0x7f00); + rtl_w0w1_phy(tp, 0x08, 0x8000, 0x7f00); rtl_writephy(tp, 0x1f, 0x0000); /* PHY auto speed down */ rtl_writephy(tp, 0x1f, 0x0007); rtl_writephy(tp, 0x1e, 0x002d); - rtl_w1w0_phy(tp, 0x18, 0x0050, 0x0000); + rtl_w0w1_phy(tp, 0x18, 0x0050, 0x0000); rtl_writephy(tp, 0x1f, 0x0000); - rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000); + rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000); rtl_writephy(tp, 0x1f, 0x0005); rtl_writephy(tp, 0x05, 0x8b86); - rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000); + rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000); rtl_writephy(tp, 0x1f, 0x0000); rtl_writephy(tp, 0x1f, 0x0005); rtl_writephy(tp, 0x05, 0x8b85); - rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000); + rtl_w0w1_phy(tp, 0x06, 0x0000, 0x2000); rtl_writephy(tp, 0x1f, 0x0007); rtl_writephy(tp, 0x1e, 0x0020); - rtl_w1w0_phy(tp, 0x15, 0x0000, 0x1100); + rtl_w0w1_phy(tp, 0x15, 0x0000, 0x1100); rtl_writephy(tp, 0x1f, 0x0006); rtl_writephy(tp, 0x00, 0x5a00); rtl_writephy(tp, 0x1f, 0x0000); @@ -3167,39 +3424,39 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp) /* For 4-corner performance improve */ rtl_writephy(tp, 0x1f, 0x0005); rtl_writephy(tp, 0x05, 0x8b80); - rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000); + rtl_w0w1_phy(tp, 0x17, 0x0006, 0x0000); rtl_writephy(tp, 0x1f, 0x0000); /* PHY auto speed down */ rtl_writephy(tp, 0x1f, 0x0004); rtl_writephy(tp, 0x1f, 0x0007); rtl_writephy(tp, 0x1e, 0x002d); - rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000); + rtl_w0w1_phy(tp, 0x18, 0x0010, 0x0000); rtl_writephy(tp, 0x1f, 0x0002); rtl_writephy(tp, 0x1f, 0x0000); - rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000); + rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000); /* improve 10M EEE waveform */ rtl_writephy(tp, 0x1f, 0x0005); rtl_writephy(tp, 0x05, 0x8b86); - rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000); + rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000); rtl_writephy(tp, 0x1f, 0x0000); /* Improve 2-pair detection performance */ rtl_writephy(tp, 0x1f, 0x0005); rtl_writephy(tp, 0x05, 0x8b85); - rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000); + rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000); rtl_writephy(tp, 0x1f, 0x0000); /* EEE setting */ - rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, ERIAR_EXGMAC); rtl_writephy(tp, 0x1f, 0x0005); rtl_writephy(tp, 0x05, 0x8b85); - rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000); + rtl_w0w1_phy(tp, 0x06, 0x0000, 0x2000); rtl_writephy(tp, 0x1f, 0x0004); rtl_writephy(tp, 0x1f, 0x0007); rtl_writephy(tp, 0x1e, 0x0020); - rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100); + rtl_w0w1_phy(tp, 0x15, 0x0000, 0x0100); rtl_writephy(tp, 0x1f, 0x0002); rtl_writephy(tp, 0x1f, 0x0000); rtl_writephy(tp, 0x0d, 0x0007); @@ -3210,8 +3467,8 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp) /* Green feature */ rtl_writephy(tp, 0x1f, 0x0003); - rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001); - rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400); + rtl_w0w1_phy(tp, 0x19, 0x0000, 0x0001); + rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0400); rtl_writephy(tp, 0x1f, 0x0000); /* Broken BIOS workaround: feed GigaMAC registers with MAC address. */ @@ -3223,20 +3480,20 @@ static void rtl8168f_hw_phy_config(struct rtl8169_private *tp) /* For 4-corner performance improve */ rtl_writephy(tp, 0x1f, 0x0005); rtl_writephy(tp, 0x05, 0x8b80); - rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000); + rtl_w0w1_phy(tp, 0x06, 0x0006, 0x0000); rtl_writephy(tp, 0x1f, 0x0000); /* PHY auto speed down */ rtl_writephy(tp, 0x1f, 0x0007); rtl_writephy(tp, 0x1e, 0x002d); - rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000); + rtl_w0w1_phy(tp, 0x18, 0x0010, 0x0000); rtl_writephy(tp, 0x1f, 0x0000); - rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000); + rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000); /* Improve 10M EEE waveform */ rtl_writephy(tp, 0x1f, 0x0005); rtl_writephy(tp, 0x05, 0x8b86); - rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000); + rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000); rtl_writephy(tp, 0x1f, 0x0000); } @@ -3286,7 +3543,7 @@ static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp) /* Improve 2-pair detection performance */ rtl_writephy(tp, 0x1f, 0x0005); rtl_writephy(tp, 0x05, 0x8b85); - rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000); + rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000); rtl_writephy(tp, 0x1f, 0x0000); } @@ -3342,7 +3599,7 @@ static void rtl8411_hw_phy_config(struct rtl8169_private *tp) /* Improve 2-pair detection performance */ rtl_writephy(tp, 0x1f, 0x0005); rtl_writephy(tp, 0x05, 0x8b85); - rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000); + rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000); rtl_writephy(tp, 0x1f, 0x0000); rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); @@ -3350,121 +3607,437 @@ static void rtl8411_hw_phy_config(struct rtl8169_private *tp) /* Modify green table for giga */ rtl_writephy(tp, 0x1f, 0x0005); rtl_writephy(tp, 0x05, 0x8b54); - rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800); + rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0800); rtl_writephy(tp, 0x05, 0x8b5d); - rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800); + rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0800); rtl_writephy(tp, 0x05, 0x8a7c); - rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100); + rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100); rtl_writephy(tp, 0x05, 0x8a7f); - rtl_w1w0_phy(tp, 0x06, 0x0100, 0x0000); + rtl_w0w1_phy(tp, 0x06, 0x0100, 0x0000); rtl_writephy(tp, 0x05, 0x8a82); - rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100); + rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100); rtl_writephy(tp, 0x05, 0x8a85); - rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100); + rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100); rtl_writephy(tp, 0x05, 0x8a88); - rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100); + rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100); + rtl_writephy(tp, 0x1f, 0x0000); + + /* uc same-seed solution */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w0w1_phy(tp, 0x06, 0x8000, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* eee setting */ + rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC); + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w0w1_phy(tp, 0x06, 0x0000, 0x2000); + rtl_writephy(tp, 0x1f, 0x0004); + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x0020); + rtl_w0w1_phy(tp, 0x15, 0x0000, 0x0100); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, 0x0d, 0x0007); + rtl_writephy(tp, 0x0e, 0x003c); + rtl_writephy(tp, 0x0d, 0x4007); + rtl_writephy(tp, 0x0e, 0x0000); + rtl_writephy(tp, 0x0d, 0x0000); + + /* Green feature */ + rtl_writephy(tp, 0x1f, 0x0003); + rtl_w0w1_phy(tp, 0x19, 0x0000, 0x0001); + rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0400); + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp) +{ + rtl_apply_firmware(tp); + + rtl_writephy(tp, 0x1f, 0x0a46); + if (rtl_readphy(tp, 0x10) & 0x0100) { + rtl_writephy(tp, 0x1f, 0x0bcc); + rtl_w0w1_phy(tp, 0x12, 0x0000, 0x8000); + } else { + rtl_writephy(tp, 0x1f, 0x0bcc); + rtl_w0w1_phy(tp, 0x12, 0x8000, 0x0000); + } + + rtl_writephy(tp, 0x1f, 0x0a46); + if (rtl_readphy(tp, 0x13) & 0x0100) { + rtl_writephy(tp, 0x1f, 0x0c41); + rtl_w0w1_phy(tp, 0x15, 0x0002, 0x0000); + } else { + rtl_writephy(tp, 0x1f, 0x0c41); + rtl_w0w1_phy(tp, 0x15, 0x0000, 0x0002); + } + + /* Enable PHY auto speed down */ + rtl_writephy(tp, 0x1f, 0x0a44); + rtl_w0w1_phy(tp, 0x11, 0x000c, 0x0000); + + rtl_writephy(tp, 0x1f, 0x0bcc); + rtl_w0w1_phy(tp, 0x14, 0x0100, 0x0000); + rtl_writephy(tp, 0x1f, 0x0a44); + rtl_w0w1_phy(tp, 0x11, 0x00c0, 0x0000); + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x8084); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x6000); + rtl_w0w1_phy(tp, 0x10, 0x1003, 0x0000); + + /* EEE auto-fallback function */ + rtl_writephy(tp, 0x1f, 0x0a4b); + rtl_w0w1_phy(tp, 0x11, 0x0004, 0x0000); + + /* Enable UC LPF tune function */ + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x8012); + rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000); + + rtl_writephy(tp, 0x1f, 0x0c42); + rtl_w0w1_phy(tp, 0x11, 0x4000, 0x2000); + + /* Improve SWR Efficiency */ + rtl_writephy(tp, 0x1f, 0x0bcd); + rtl_writephy(tp, 0x14, 0x5065); + rtl_writephy(tp, 0x14, 0xd065); + rtl_writephy(tp, 0x1f, 0x0bc8); + rtl_writephy(tp, 0x11, 0x5655); + rtl_writephy(tp, 0x1f, 0x0bcd); + rtl_writephy(tp, 0x14, 0x1065); + rtl_writephy(tp, 0x14, 0x9065); + rtl_writephy(tp, 0x14, 0x1065); + + /* Check ALDPS bit, disable it if enabled */ + rtl_writephy(tp, 0x1f, 0x0a43); + if (rtl_readphy(tp, 0x10) & 0x0004) + rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004); + + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp) +{ + rtl_apply_firmware(tp); +} + +static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp) +{ + u16 dout_tapbin; + u32 data; + + rtl_apply_firmware(tp); + + /* CHN EST parameters adjust - giga master */ + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x809b); + rtl_w0w1_phy(tp, 0x14, 0x8000, 0xf800); + rtl_writephy(tp, 0x13, 0x80a2); + rtl_w0w1_phy(tp, 0x14, 0x8000, 0xff00); + rtl_writephy(tp, 0x13, 0x80a4); + rtl_w0w1_phy(tp, 0x14, 0x8500, 0xff00); + rtl_writephy(tp, 0x13, 0x809c); + rtl_w0w1_phy(tp, 0x14, 0xbd00, 0xff00); + rtl_writephy(tp, 0x1f, 0x0000); + + /* CHN EST parameters adjust - giga slave */ + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x80ad); + rtl_w0w1_phy(tp, 0x14, 0x7000, 0xf800); + rtl_writephy(tp, 0x13, 0x80b4); + rtl_w0w1_phy(tp, 0x14, 0x5000, 0xff00); + rtl_writephy(tp, 0x13, 0x80ac); + rtl_w0w1_phy(tp, 0x14, 0x4000, 0xff00); + rtl_writephy(tp, 0x1f, 0x0000); + + /* CHN EST parameters adjust - fnet */ + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x808e); + rtl_w0w1_phy(tp, 0x14, 0x1200, 0xff00); + rtl_writephy(tp, 0x13, 0x8090); + rtl_w0w1_phy(tp, 0x14, 0xe500, 0xff00); + rtl_writephy(tp, 0x13, 0x8092); + rtl_w0w1_phy(tp, 0x14, 0x9f00, 0xff00); + rtl_writephy(tp, 0x1f, 0x0000); + + /* enable R-tune & PGA-retune function */ + dout_tapbin = 0; + rtl_writephy(tp, 0x1f, 0x0a46); + data = rtl_readphy(tp, 0x13); + data &= 3; + data <<= 2; + dout_tapbin |= data; + data = rtl_readphy(tp, 0x12); + data &= 0xc000; + data >>= 14; + dout_tapbin |= data; + dout_tapbin = ~(dout_tapbin^0x08); + dout_tapbin <<= 12; + dout_tapbin &= 0xf000; + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x827a); + rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000); + rtl_writephy(tp, 0x13, 0x827b); + rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000); + rtl_writephy(tp, 0x13, 0x827c); + rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000); + rtl_writephy(tp, 0x13, 0x827d); + rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000); + + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x0811); + rtl_w0w1_phy(tp, 0x14, 0x0800, 0x0000); + rtl_writephy(tp, 0x1f, 0x0a42); + rtl_w0w1_phy(tp, 0x16, 0x0002, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* enable GPHY 10M */ + rtl_writephy(tp, 0x1f, 0x0a44); + rtl_w0w1_phy(tp, 0x11, 0x0800, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* SAR ADC performance */ + rtl_writephy(tp, 0x1f, 0x0bca); + rtl_w0w1_phy(tp, 0x17, 0x4000, 0x3000); + rtl_writephy(tp, 0x1f, 0x0000); + + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x803f); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000); + rtl_writephy(tp, 0x13, 0x8047); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000); + rtl_writephy(tp, 0x13, 0x804f); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000); + rtl_writephy(tp, 0x13, 0x8057); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000); + rtl_writephy(tp, 0x13, 0x805f); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000); + rtl_writephy(tp, 0x13, 0x8067); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000); + rtl_writephy(tp, 0x13, 0x806f); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* disable phy pfm mode */ + rtl_writephy(tp, 0x1f, 0x0a44); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x0080); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Check ALDPS bit, disable it if enabled */ + rtl_writephy(tp, 0x1f, 0x0a43); + if (rtl_readphy(tp, 0x10) & 0x0004) + rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004); + + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp) +{ + u16 ioffset_p3, ioffset_p2, ioffset_p1, ioffset_p0; + u16 rlen; + u32 data; + + rtl_apply_firmware(tp); + + /* CHIN EST parameter update */ + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x808a); + rtl_w0w1_phy(tp, 0x14, 0x000a, 0x003f); rtl_writephy(tp, 0x1f, 0x0000); - /* uc same-seed solution */ - rtl_writephy(tp, 0x1f, 0x0005); - rtl_writephy(tp, 0x05, 0x8b85); - rtl_w1w0_phy(tp, 0x06, 0x8000, 0x0000); + /* enable R-tune & PGA-retune function */ + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x0811); + rtl_w0w1_phy(tp, 0x14, 0x0800, 0x0000); + rtl_writephy(tp, 0x1f, 0x0a42); + rtl_w0w1_phy(tp, 0x16, 0x0002, 0x0000); rtl_writephy(tp, 0x1f, 0x0000); - /* eee setting */ - rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC); - rtl_writephy(tp, 0x1f, 0x0005); - rtl_writephy(tp, 0x05, 0x8b85); - rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000); - rtl_writephy(tp, 0x1f, 0x0004); - rtl_writephy(tp, 0x1f, 0x0007); - rtl_writephy(tp, 0x1e, 0x0020); - rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100); + /* enable GPHY 10M */ + rtl_writephy(tp, 0x1f, 0x0a44); + rtl_w0w1_phy(tp, 0x11, 0x0800, 0x0000); rtl_writephy(tp, 0x1f, 0x0000); - rtl_writephy(tp, 0x0d, 0x0007); - rtl_writephy(tp, 0x0e, 0x003c); - rtl_writephy(tp, 0x0d, 0x4007); - rtl_writephy(tp, 0x0e, 0x0000); - rtl_writephy(tp, 0x0d, 0x0000); - /* Green feature */ - rtl_writephy(tp, 0x1f, 0x0003); - rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001); - rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400); + r8168_mac_ocp_write(tp, 0xdd02, 0x807d); + data = r8168_mac_ocp_read(tp, 0xdd02); + ioffset_p3 = ((data & 0x80)>>7); + ioffset_p3 <<= 3; + + data = r8168_mac_ocp_read(tp, 0xdd00); + ioffset_p3 |= ((data & (0xe000))>>13); + ioffset_p2 = ((data & (0x1e00))>>9); + ioffset_p1 = ((data & (0x01e0))>>5); + ioffset_p0 = ((data & 0x0010)>>4); + ioffset_p0 <<= 3; + ioffset_p0 |= (data & (0x07)); + data = (ioffset_p3<<12)|(ioffset_p2<<8)|(ioffset_p1<<4)|(ioffset_p0); + + if ((ioffset_p3 != 0x0f) || (ioffset_p2 != 0x0f) || + (ioffset_p1 != 0x0f) || (ioffset_p0 == 0x0f)) { + rtl_writephy(tp, 0x1f, 0x0bcf); + rtl_writephy(tp, 0x16, data); + rtl_writephy(tp, 0x1f, 0x0000); + } + + /* Modify rlen (TX LPF corner frequency) level */ + rtl_writephy(tp, 0x1f, 0x0bcd); + data = rtl_readphy(tp, 0x16); + data &= 0x000f; + rlen = 0; + if (data > 3) + rlen = data - 3; + data = rlen | (rlen<<4) | (rlen<<8) | (rlen<<12); + rtl_writephy(tp, 0x17, data); + rtl_writephy(tp, 0x1f, 0x0bcd); rtl_writephy(tp, 0x1f, 0x0000); -} -static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp) -{ - rtl_apply_firmware(tp); + /* disable phy pfm mode */ + rtl_writephy(tp, 0x1f, 0x0a44); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x0080); + rtl_writephy(tp, 0x1f, 0x0000); - rtl_writephy(tp, 0x1f, 0x0a46); - if (rtl_readphy(tp, 0x10) & 0x0100) { - rtl_writephy(tp, 0x1f, 0x0bcc); - rtl_w1w0_phy(tp, 0x12, 0x0000, 0x8000); - } else { - rtl_writephy(tp, 0x1f, 0x0bcc); - rtl_w1w0_phy(tp, 0x12, 0x8000, 0x0000); - } + /* Check ALDPS bit, disable it if enabled */ + rtl_writephy(tp, 0x1f, 0x0a43); + if (rtl_readphy(tp, 0x10) & 0x0004) + rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004); - rtl_writephy(tp, 0x1f, 0x0a46); - if (rtl_readphy(tp, 0x13) & 0x0100) { - rtl_writephy(tp, 0x1f, 0x0c41); - rtl_w1w0_phy(tp, 0x15, 0x0002, 0x0000); - } else { - rtl_writephy(tp, 0x1f, 0x0c41); - rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0002); - } + rtl_writephy(tp, 0x1f, 0x0000); +} +static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp) +{ /* Enable PHY auto speed down */ rtl_writephy(tp, 0x1f, 0x0a44); - rtl_w1w0_phy(tp, 0x11, 0x000c, 0x0000); + rtl_w0w1_phy(tp, 0x11, 0x000c, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + /* patch 10M & ALDPS */ rtl_writephy(tp, 0x1f, 0x0bcc); - rtl_w1w0_phy(tp, 0x14, 0x0100, 0x0000); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x0100); rtl_writephy(tp, 0x1f, 0x0a44); - rtl_w1w0_phy(tp, 0x11, 0x00c0, 0x0000); + rtl_w0w1_phy(tp, 0x11, 0x00c0, 0x0000); rtl_writephy(tp, 0x1f, 0x0a43); rtl_writephy(tp, 0x13, 0x8084); - rtl_w1w0_phy(tp, 0x14, 0x0000, 0x6000); - rtl_w1w0_phy(tp, 0x10, 0x1003, 0x0000); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x6000); + rtl_w0w1_phy(tp, 0x10, 0x1003, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); - /* EEE auto-fallback function */ + /* Enable EEE auto-fallback function */ rtl_writephy(tp, 0x1f, 0x0a4b); - rtl_w1w0_phy(tp, 0x11, 0x0004, 0x0000); + rtl_w0w1_phy(tp, 0x11, 0x0004, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); /* Enable UC LPF tune function */ rtl_writephy(tp, 0x1f, 0x0a43); rtl_writephy(tp, 0x13, 0x8012); - rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000); + rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + /* set rg_sel_sdm_rate */ rtl_writephy(tp, 0x1f, 0x0c42); - rtl_w1w0_phy(tp, 0x11, 0x4000, 0x2000); + rtl_w0w1_phy(tp, 0x11, 0x4000, 0x2000); + rtl_writephy(tp, 0x1f, 0x0000); - /* Improve SWR Efficiency */ + /* Check ALDPS bit, disable it if enabled */ + rtl_writephy(tp, 0x1f, 0x0a43); + if (rtl_readphy(tp, 0x10) & 0x0004) + rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004); + + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168ep_2_hw_phy_config(struct rtl8169_private *tp) +{ + /* patch 10M & ALDPS */ + rtl_writephy(tp, 0x1f, 0x0bcc); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x0100); + rtl_writephy(tp, 0x1f, 0x0a44); + rtl_w0w1_phy(tp, 0x11, 0x00c0, 0x0000); + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x8084); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x6000); + rtl_w0w1_phy(tp, 0x10, 0x1003, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Enable UC LPF tune function */ + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x8012); + rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Set rg_sel_sdm_rate */ + rtl_writephy(tp, 0x1f, 0x0c42); + rtl_w0w1_phy(tp, 0x11, 0x4000, 0x2000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Channel estimation parameters */ + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x80f3); + rtl_w0w1_phy(tp, 0x14, 0x8b00, ~0x8bff); + rtl_writephy(tp, 0x13, 0x80f0); + rtl_w0w1_phy(tp, 0x14, 0x3a00, ~0x3aff); + rtl_writephy(tp, 0x13, 0x80ef); + rtl_w0w1_phy(tp, 0x14, 0x0500, ~0x05ff); + rtl_writephy(tp, 0x13, 0x80f6); + rtl_w0w1_phy(tp, 0x14, 0x6e00, ~0x6eff); + rtl_writephy(tp, 0x13, 0x80ec); + rtl_w0w1_phy(tp, 0x14, 0x6800, ~0x68ff); + rtl_writephy(tp, 0x13, 0x80ed); + rtl_w0w1_phy(tp, 0x14, 0x7c00, ~0x7cff); + rtl_writephy(tp, 0x13, 0x80f2); + rtl_w0w1_phy(tp, 0x14, 0xf400, ~0xf4ff); + rtl_writephy(tp, 0x13, 0x80f4); + rtl_w0w1_phy(tp, 0x14, 0x8500, ~0x85ff); + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x8110); + rtl_w0w1_phy(tp, 0x14, 0xa800, ~0xa8ff); + rtl_writephy(tp, 0x13, 0x810f); + rtl_w0w1_phy(tp, 0x14, 0x1d00, ~0x1dff); + rtl_writephy(tp, 0x13, 0x8111); + rtl_w0w1_phy(tp, 0x14, 0xf500, ~0xf5ff); + rtl_writephy(tp, 0x13, 0x8113); + rtl_w0w1_phy(tp, 0x14, 0x6100, ~0x61ff); + rtl_writephy(tp, 0x13, 0x8115); + rtl_w0w1_phy(tp, 0x14, 0x9200, ~0x92ff); + rtl_writephy(tp, 0x13, 0x810e); + rtl_w0w1_phy(tp, 0x14, 0x0400, ~0x04ff); + rtl_writephy(tp, 0x13, 0x810c); + rtl_w0w1_phy(tp, 0x14, 0x7c00, ~0x7cff); + rtl_writephy(tp, 0x13, 0x810b); + rtl_w0w1_phy(tp, 0x14, 0x5a00, ~0x5aff); + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x80d1); + rtl_w0w1_phy(tp, 0x14, 0xff00, ~0xffff); + rtl_writephy(tp, 0x13, 0x80cd); + rtl_w0w1_phy(tp, 0x14, 0x9e00, ~0x9eff); + rtl_writephy(tp, 0x13, 0x80d3); + rtl_w0w1_phy(tp, 0x14, 0x0e00, ~0x0eff); + rtl_writephy(tp, 0x13, 0x80d5); + rtl_w0w1_phy(tp, 0x14, 0xca00, ~0xcaff); + rtl_writephy(tp, 0x13, 0x80d7); + rtl_w0w1_phy(tp, 0x14, 0x8400, ~0x84ff); + + /* Force PWM-mode */ rtl_writephy(tp, 0x1f, 0x0bcd); rtl_writephy(tp, 0x14, 0x5065); rtl_writephy(tp, 0x14, 0xd065); rtl_writephy(tp, 0x1f, 0x0bc8); - rtl_writephy(tp, 0x11, 0x5655); + rtl_writephy(tp, 0x12, 0x00ed); rtl_writephy(tp, 0x1f, 0x0bcd); rtl_writephy(tp, 0x14, 0x1065); rtl_writephy(tp, 0x14, 0x9065); rtl_writephy(tp, 0x14, 0x1065); + rtl_writephy(tp, 0x1f, 0x0000); /* Check ALDPS bit, disable it if enabled */ rtl_writephy(tp, 0x1f, 0x0a43); if (rtl_readphy(tp, 0x10) & 0x0004) - rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0004); + rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004); rtl_writephy(tp, 0x1f, 0x0000); } -static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp) -{ - rtl_apply_firmware(tp); -} - static void rtl8102e_hw_phy_config(struct rtl8169_private *tp) { static const struct phy_reg phy_reg_init[] = { @@ -3655,6 +4228,22 @@ static void rtl_hw_phy_config(struct net_device *dev) case RTL_GIGA_MAC_VER_44: rtl8168g_2_hw_phy_config(tp); break; + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_47: + rtl8168h_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_48: + rtl8168h_2_hw_phy_config(tp); + break; + + case RTL_GIGA_MAC_VER_49: + rtl8168ep_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + rtl8168ep_2_hw_phy_config(tp); + break; case RTL_GIGA_MAC_VER_41: default: @@ -3866,6 +4455,13 @@ static void rtl_init_mdio_ops(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_42: case RTL_GIGA_MAC_VER_43: case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: ops->write = r8168g_mdio_write; ops->read = r8168g_mdio_read; break; @@ -3920,6 +4516,13 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_42: case RTL_GIGA_MAC_VER_43: case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast | AcceptMulticast | AcceptMyPhys); break; @@ -3988,6 +4591,10 @@ static void r810x_pll_power_up(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_13: case RTL_GIGA_MAC_VER_16: break; + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: + RTL_W8(PMCH, RTL_R8(PMCH) | 0xc0); + break; default: RTL_W8(PMCH, RTL_R8(PMCH) | 0x80); break; @@ -4060,8 +4667,11 @@ static void r8168_pll_power_down(struct rtl8169_private *tp) if ((tp->mac_version == RTL_GIGA_MAC_VER_27 || tp->mac_version == RTL_GIGA_MAC_VER_28 || - tp->mac_version == RTL_GIGA_MAC_VER_31) && - r8168dp_check_dash(tp)) { + tp->mac_version == RTL_GIGA_MAC_VER_31 || + tp->mac_version == RTL_GIGA_MAC_VER_49 || + tp->mac_version == RTL_GIGA_MAC_VER_50 || + tp->mac_version == RTL_GIGA_MAC_VER_51) && + r8168_check_dash(tp)) { return; } @@ -4088,12 +4698,19 @@ static void r8168_pll_power_down(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_31: case RTL_GIGA_MAC_VER_32: case RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80); break; case RTL_GIGA_MAC_VER_40: case RTL_GIGA_MAC_VER_41: - rtl_w1w0_eri(tp, 0x1a8, ERIAR_MASK_1111, 0x00000000, + case RTL_GIGA_MAC_VER_49: + rtl_w0w1_eri(tp, 0x1a8, ERIAR_MASK_1111, 0x00000000, 0xfc000000, ERIAR_EXGMAC); + RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80); break; } } @@ -4112,9 +4729,18 @@ static void r8168_pll_power_up(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_33: RTL_W8(PMCH, RTL_R8(PMCH) | 0x80); break; + case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + RTL_W8(PMCH, RTL_R8(PMCH) | 0xc0); + break; case RTL_GIGA_MAC_VER_40: case RTL_GIGA_MAC_VER_41: - rtl_w1w0_eri(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000, + case RTL_GIGA_MAC_VER_49: + RTL_W8(PMCH, RTL_R8(PMCH) | 0xc0); + rtl_w0w1_eri(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000, 0x00000000, ERIAR_EXGMAC); break; } @@ -4154,6 +4780,8 @@ static void rtl_init_pll_power_ops(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_37: case RTL_GIGA_MAC_VER_39: case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: ops->down = r810x_pll_power_down; ops->up = r810x_pll_power_up; break; @@ -4183,6 +4811,11 @@ static void rtl_init_pll_power_ops(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_41: case RTL_GIGA_MAC_VER_42: case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: ops->down = r8168_pll_power_down; ops->up = r8168_pll_power_up; break; @@ -4233,6 +4866,13 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_42: case RTL_GIGA_MAC_VER_43: case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF); break; default: @@ -4394,6 +5034,13 @@ static void rtl_init_jumbo_ops(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_42: case RTL_GIGA_MAC_VER_43: case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: default: ops->disable = NULL; ops->enable = NULL; @@ -4415,6 +5062,8 @@ static void rtl_hw_reset(struct rtl8169_private *tp) RTL_W8(ChipCmd, CmdReset); rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100); + + netdev_reset_queue(tp->dev); } static void rtl_request_uncached_firmware(struct rtl8169_private *tp) @@ -4496,15 +5145,22 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp) tp->mac_version == RTL_GIGA_MAC_VER_31) { rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42); } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 || - tp->mac_version == RTL_GIGA_MAC_VER_35 || - tp->mac_version == RTL_GIGA_MAC_VER_36 || - tp->mac_version == RTL_GIGA_MAC_VER_37 || - tp->mac_version == RTL_GIGA_MAC_VER_40 || - tp->mac_version == RTL_GIGA_MAC_VER_41 || - tp->mac_version == RTL_GIGA_MAC_VER_42 || - tp->mac_version == RTL_GIGA_MAC_VER_43 || - tp->mac_version == RTL_GIGA_MAC_VER_44 || - tp->mac_version == RTL_GIGA_MAC_VER_38) { + tp->mac_version == RTL_GIGA_MAC_VER_35 || + tp->mac_version == RTL_GIGA_MAC_VER_36 || + tp->mac_version == RTL_GIGA_MAC_VER_37 || + tp->mac_version == RTL_GIGA_MAC_VER_38 || + tp->mac_version == RTL_GIGA_MAC_VER_40 || + tp->mac_version == RTL_GIGA_MAC_VER_41 || + tp->mac_version == RTL_GIGA_MAC_VER_42 || + tp->mac_version == RTL_GIGA_MAC_VER_43 || + tp->mac_version == RTL_GIGA_MAC_VER_44 || + tp->mac_version == RTL_GIGA_MAC_VER_45 || + tp->mac_version == RTL_GIGA_MAC_VER_46 || + tp->mac_version == RTL_GIGA_MAC_VER_47 || + tp->mac_version == RTL_GIGA_MAC_VER_48 || + tp->mac_version == RTL_GIGA_MAC_VER_49 || + tp->mac_version == RTL_GIGA_MAC_VER_50 || + tp->mac_version == RTL_GIGA_MAC_VER_51) { RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq); rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666); } else { @@ -4674,7 +5330,7 @@ static void rtl_hw_start_8169(struct net_device *dev) if (tp->mac_version == RTL_GIGA_MAC_VER_02 || tp->mac_version == RTL_GIGA_MAC_VER_03) { - dprintk("Set MAC Reg C+CR Offset 0xE0. " + dprintk("Set MAC Reg C+CR Offset 0xe0. " "Bit-3 and bit-14 MUST be 1\n"); tp->cp_cmd |= (1 << 14); } @@ -4709,7 +5365,7 @@ static void rtl_hw_start_8169(struct net_device *dev) rtl_set_rx_mode(dev); /* no early-rx interrupts */ - RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000); + RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000); } static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value) @@ -5172,8 +5828,8 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC); rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC); - rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); - rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC); RTL_W8(MaxTxPacketSize, EarlySize); @@ -5203,10 +5859,10 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp) rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC); rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); - rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); - rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); - rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); - rtl_w1w0_eri(tp, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC); rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC); @@ -5235,7 +5891,7 @@ static void rtl_hw_start_8168f_1(struct rtl8169_private *tp) rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1)); - rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC); /* Adjust EEE LED frequency */ RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); @@ -5255,7 +5911,7 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp) rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1)); - rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, ERIAR_EXGMAC); } static void rtl_hw_start_8168g_1(struct rtl8169_private *tp) @@ -5274,8 +5930,8 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp) rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); - rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); - rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f, ERIAR_EXGMAC); RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); @@ -5288,8 +5944,8 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp) /* Adjust EEE LED frequency */ RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); - rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC); - rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC); rtl_pcie_state_l2l3_enable(tp, false); } @@ -5331,6 +5987,219 @@ static void rtl_hw_start_8411_2(struct rtl8169_private *tp) rtl_ephy_init(tp, e_info_8411_2, ARRAY_SIZE(e_info_8411_2)); } +static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + u16 rg_saw_cnt; + u32 data; + static const struct ephy_info e_info_8168h_1[] = { + { 0x1e, 0x0800, 0x0001 }, + { 0x1d, 0x0000, 0x0800 }, + { 0x05, 0xffff, 0x2089 }, + { 0x06, 0xffff, 0x5881 }, + { 0x04, 0xffff, 0x154a }, + { 0x01, 0xffff, 0x068b } + }; + + /* disable aspm and clock request before access ephy */ + RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn); + RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en); + rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1)); + + RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); + + rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); + + rtl_csi_access_enable_1(tp); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); + + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_1111, 0x0010, 0x00, ERIAR_EXGMAC); + + rtl_w0w1_eri(tp, 0xd4, ERIAR_MASK_1111, 0x1f00, 0x00, ERIAR_EXGMAC); + + rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87, ERIAR_EXGMAC); + + RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); + RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN); + RTL_W8(MaxTxPacketSize, EarlySize); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + + /* Adjust EEE LED frequency */ + RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); + + RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN); + RTL_W8(DLLPR, RTL_R8(MISC_1) & ~PFM_D3COLD_EN); + + RTL_W8(DLLPR, RTL_R8(DLLPR) & ~TX_10M_PS_EN); + + rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC); + + rtl_pcie_state_l2l3_enable(tp, false); + + rtl_writephy(tp, 0x1f, 0x0c42); + rg_saw_cnt = rtl_readphy(tp, 0x13); + rtl_writephy(tp, 0x1f, 0x0000); + if (rg_saw_cnt > 0) { + u16 sw_cnt_1ms_ini; + + sw_cnt_1ms_ini = 16000000/rg_saw_cnt; + sw_cnt_1ms_ini &= 0x0fff; + data = r8168_mac_ocp_read(tp, 0xd412); + data &= 0x0fff; + data |= sw_cnt_1ms_ini; + r8168_mac_ocp_write(tp, 0xd412, data); + } + + data = r8168_mac_ocp_read(tp, 0xe056); + data &= 0xf0; + data |= 0x07; + r8168_mac_ocp_write(tp, 0xe056, data); + + data = r8168_mac_ocp_read(tp, 0xe052); + data &= 0x8008; + data |= 0x6000; + r8168_mac_ocp_write(tp, 0xe052, data); + + data = r8168_mac_ocp_read(tp, 0xe0d6); + data &= 0x01ff; + data |= 0x017f; + r8168_mac_ocp_write(tp, 0xe0d6, data); + + data = r8168_mac_ocp_read(tp, 0xd420); + data &= 0x0fff; + data |= 0x047f; + r8168_mac_ocp_write(tp, 0xd420, data); + + r8168_mac_ocp_write(tp, 0xe63e, 0x0001); + r8168_mac_ocp_write(tp, 0xe63e, 0x0000); + r8168_mac_ocp_write(tp, 0xc094, 0x0000); + r8168_mac_ocp_write(tp, 0xc09e, 0x0000); +} + +static void rtl_hw_start_8168ep(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); + + rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x2f, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x5f, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); + + rtl_csi_access_enable_1(tp); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); + + rtl_w0w1_eri(tp, 0xd4, ERIAR_MASK_1111, 0x1f80, 0x00, ERIAR_EXGMAC); + + rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87, ERIAR_EXGMAC); + + RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); + RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN); + RTL_W8(MaxTxPacketSize, EarlySize); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + + /* Adjust EEE LED frequency */ + RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); + + rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC); + + RTL_W8(DLLPR, RTL_R8(DLLPR) & ~TX_10M_PS_EN); + + rtl_pcie_state_l2l3_enable(tp, false); +} + +static void rtl_hw_start_8168ep_1(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + static const struct ephy_info e_info_8168ep_1[] = { + { 0x00, 0xffff, 0x10ab }, + { 0x06, 0xffff, 0xf030 }, + { 0x08, 0xffff, 0x2006 }, + { 0x0d, 0xffff, 0x1666 }, + { 0x0c, 0x3ff0, 0x0000 } + }; + + /* disable aspm and clock request before access ephy */ + RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn); + RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en); + rtl_ephy_init(tp, e_info_8168ep_1, ARRAY_SIZE(e_info_8168ep_1)); + + rtl_hw_start_8168ep(tp); +} + +static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + static const struct ephy_info e_info_8168ep_2[] = { + { 0x00, 0xffff, 0x10a3 }, + { 0x19, 0xffff, 0xfc00 }, + { 0x1e, 0xffff, 0x20ea } + }; + + /* disable aspm and clock request before access ephy */ + RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn); + RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en); + rtl_ephy_init(tp, e_info_8168ep_2, ARRAY_SIZE(e_info_8168ep_2)); + + rtl_hw_start_8168ep(tp); + + RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN); + RTL_W8(DLLPR, RTL_R8(MISC_1) & ~PFM_D3COLD_EN); +} + +static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + u32 data; + static const struct ephy_info e_info_8168ep_3[] = { + { 0x00, 0xffff, 0x10a3 }, + { 0x19, 0xffff, 0x7c00 }, + { 0x1e, 0xffff, 0x20eb }, + { 0x0d, 0xffff, 0x1666 } + }; + + /* disable aspm and clock request before access ephy */ + RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn); + RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en); + rtl_ephy_init(tp, e_info_8168ep_3, ARRAY_SIZE(e_info_8168ep_3)); + + rtl_hw_start_8168ep(tp); + + RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN); + RTL_W8(DLLPR, RTL_R8(MISC_1) & ~PFM_D3COLD_EN); + + data = r8168_mac_ocp_read(tp, 0xd3e2); + data &= 0xf000; + data |= 0x0271; + r8168_mac_ocp_write(tp, 0xd3e2, data); + + data = r8168_mac_ocp_read(tp, 0xd3e4); + data &= 0xff00; + r8168_mac_ocp_write(tp, 0xd3e4, data); + + data = r8168_mac_ocp_read(tp, 0xe860); + data |= 0x0080; + r8168_mac_ocp_write(tp, 0xe860, data); +} + static void rtl_hw_start_8168(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); @@ -5441,6 +6310,23 @@ static void rtl_hw_start_8168(struct net_device *dev) rtl_hw_start_8411_2(tp); break; + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + rtl_hw_start_8168h_1(tp); + break; + + case RTL_GIGA_MAC_VER_49: + rtl_hw_start_8168ep_1(tp); + break; + + case RTL_GIGA_MAC_VER_50: + rtl_hw_start_8168ep_2(tp); + break; + + case RTL_GIGA_MAC_VER_51: + rtl_hw_start_8168ep_3(tp); + break; + default: printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n", dev->name, tp->mac_version); @@ -5453,7 +6339,7 @@ static void rtl_hw_start_8168(struct net_device *dev) rtl_set_rx_mode(dev); - RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000); + RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000); } #define R810X_CPCMD_QUIRK_MASK (\ @@ -5576,11 +6462,11 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp) rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC); rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC); - rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); - rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); - rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC); rtl_pcie_state_l2l3_enable(tp, false); } @@ -5656,6 +6542,10 @@ static void rtl_hw_start_8101(struct net_device *dev) case RTL_GIGA_MAC_VER_43: rtl_hw_start_8168g_2(tp); break; + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: + rtl_hw_start_8168h_1(tp); + break; } RTL_W8(Cfg9346, Cfg9346_Lock); @@ -5896,7 +6786,7 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb, { struct skb_shared_info *info = skb_shinfo(skb); unsigned int cur_frag, entry; - struct TxDesc * uninitialized_var(txd); + struct TxDesc *uninitialized_var(txd); struct device *d = &tp->pci_dev->dev; entry = tp->cur_tx; @@ -6183,6 +7073,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, txd->opts2 = cpu_to_le32(opts[1]); + netdev_sent_queue(dev, skb->len); + skb_tx_timestamp(skb); wmb(); @@ -6282,6 +7174,7 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev) static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) { unsigned int dirty_tx, tx_left; + unsigned int bytes_compl = 0, pkts_compl = 0; dirty_tx = tp->dirty_tx; smp_rmb(); @@ -6300,10 +7193,8 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb, tp->TxDescArray + entry); if (status & LastFrag) { - u64_stats_update_begin(&tp->tx_stats.syncp); - tp->tx_stats.packets++; - tp->tx_stats.bytes += tx_skb->skb->len; - u64_stats_update_end(&tp->tx_stats.syncp); + pkts_compl++; + bytes_compl += tx_skb->skb->len; dev_kfree_skb_any(tx_skb->skb); tx_skb->skb = NULL; } @@ -6312,6 +7203,13 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) } if (tp->dirty_tx != dirty_tx) { + netdev_completed_queue(tp->dev, pkts_compl, bytes_compl); + + u64_stats_update_begin(&tp->tx_stats.syncp); + tp->tx_stats.packets += pkts_compl; + tp->tx_stats.bytes += bytes_compl; + u64_stats_update_end(&tp->tx_stats.syncp); + tp->dirty_tx = dirty_tx; /* Sync with rtl8169_start_xmit: * - publish dirty_tx ring index (write barrier) @@ -6957,9 +7855,13 @@ static void rtl_remove_one(struct pci_dev *pdev) struct net_device *dev = pci_get_drvdata(pdev); struct rtl8169_private *tp = netdev_priv(dev); - if (tp->mac_version == RTL_GIGA_MAC_VER_27 || - tp->mac_version == RTL_GIGA_MAC_VER_28 || - tp->mac_version == RTL_GIGA_MAC_VER_31) { + if ((tp->mac_version == RTL_GIGA_MAC_VER_27 || + tp->mac_version == RTL_GIGA_MAC_VER_28 || + tp->mac_version == RTL_GIGA_MAC_VER_31 || + tp->mac_version == RTL_GIGA_MAC_VER_49 || + tp->mac_version == RTL_GIGA_MAC_VER_50 || + tp->mac_version == RTL_GIGA_MAC_VER_51) && + r8168_check_dash(tp)) { rtl8168_driver_stop(tp); } @@ -7111,6 +8013,13 @@ static void rtl_hw_initialize(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_42: case RTL_GIGA_MAC_VER_43: case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: rtl_hw_init_8168g(tp); break; @@ -7248,8 +8157,34 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) RTL_W8(Cfg9346, Cfg9346_Unlock); RTL_W8(Config1, RTL_R8(Config1) | PMEnable); RTL_W8(Config5, RTL_R8(Config5) & (BWF | MWF | UWF | LanWake | PMEStatus)); - if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0) - tp->features |= RTL_FEATURE_WOL; + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_35: + case RTL_GIGA_MAC_VER_36: + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_38: + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_42: + case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + if (rtl_eri_read(tp, 0xdc, ERIAR_EXGMAC) & MagicPacket_v2) + tp->features |= RTL_FEATURE_WOL; + if ((RTL_R8(Config3) & LinkUp) != 0) + tp->features |= RTL_FEATURE_WOL; + break; + default: + if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0) + tp->features |= RTL_FEATURE_WOL; + break; + } if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0) tp->features |= RTL_FEATURE_WOL; tp->features |= rtl_try_msi(tp, cfg); @@ -7276,6 +8211,30 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) u64_stats_init(&tp->tx_stats.syncp); /* Get MAC address */ + if (tp->mac_version == RTL_GIGA_MAC_VER_35 || + tp->mac_version == RTL_GIGA_MAC_VER_36 || + tp->mac_version == RTL_GIGA_MAC_VER_37 || + tp->mac_version == RTL_GIGA_MAC_VER_38 || + tp->mac_version == RTL_GIGA_MAC_VER_40 || + tp->mac_version == RTL_GIGA_MAC_VER_41 || + tp->mac_version == RTL_GIGA_MAC_VER_42 || + tp->mac_version == RTL_GIGA_MAC_VER_43 || + tp->mac_version == RTL_GIGA_MAC_VER_44 || + tp->mac_version == RTL_GIGA_MAC_VER_45 || + tp->mac_version == RTL_GIGA_MAC_VER_46 || + tp->mac_version == RTL_GIGA_MAC_VER_47 || + tp->mac_version == RTL_GIGA_MAC_VER_48 || + tp->mac_version == RTL_GIGA_MAC_VER_49 || + tp->mac_version == RTL_GIGA_MAC_VER_50 || + tp->mac_version == RTL_GIGA_MAC_VER_51) { + u16 mac_addr[3]; + + *(u32 *)&mac_addr[0] = rtl_eri_read(tp, 0xe0, ERIAR_EXGMAC); + *(u16 *)&mac_addr[2] = rtl_eri_read(tp, 0xe4, ERIAR_EXGMAC); + + if (is_valid_ether_addr((u8 *)mac_addr)) + rtl_rar_set(tp, (u8 *)mac_addr); + } for (i = 0; i < ETH_ALEN; i++) dev->dev_addr[i] = RTL_R8(MAC0 + i); @@ -7344,9 +8303,13 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko"); } - if (tp->mac_version == RTL_GIGA_MAC_VER_27 || - tp->mac_version == RTL_GIGA_MAC_VER_28 || - tp->mac_version == RTL_GIGA_MAC_VER_31) { + if ((tp->mac_version == RTL_GIGA_MAC_VER_27 || + tp->mac_version == RTL_GIGA_MAC_VER_28 || + tp->mac_version == RTL_GIGA_MAC_VER_31 || + tp->mac_version == RTL_GIGA_MAC_VER_49 || + tp->mac_version == RTL_GIGA_MAC_VER_50 || + tp->mac_version == RTL_GIGA_MAC_VER_51) && + r8168_check_dash(tp)) { rtl8168_driver_start(tp); } diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 65c220f8661df1a39267e94e9e17de775efd5055..32060984221195793ee6eced35b68d64f559c614 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -78,7 +78,7 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, if (buffer->flags & EFX_TX_BUF_SKB) { (*pkts_compl)++; (*bytes_compl) += buffer->skb->len; - dev_kfree_skb_any((struct sk_buff *) buffer->skb); + dev_consume_skb_any((struct sk_buff *)buffer->skb); netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, "TX queue %d transmission id %x complete\n", tx_queue->queue, tx_queue->read_count); diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c index 9778cba9fc74906b68a26625f06f1cf4a88648c8..e88df9c7f1c071120b09e6f8ee896c261365bc6f 100644 --- a/drivers/net/ethernet/smsc/smc911x.c +++ b/drivers/net/ethernet/smsc/smc911x.c @@ -1927,9 +1927,6 @@ static int smc911x_probe(struct net_device *dev) } dev->irq = irq_canonicalize(dev->irq); - /* Fill in the fields of the device structure with ethernet values. */ - ether_setup(dev); - dev->netdev_ops = &smc911x_netdev_ops; dev->watchdog_timeo = msecs_to_jiffies(watchdog); dev->ethtool_ops = &smc911x_ethtool_ops; diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index bcaa41af1e628e9f8d2e84fccc7a38ea716e428c..5e94d00b96b301d9dc9cd09a17943520b96e8b52 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -1967,9 +1967,6 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr, } dev->irq = irq_canonicalize(dev->irq); - /* Fill in the fields of the device structure with ethernet values. */ - ether_setup(dev); - dev->watchdog_timeo = msecs_to_jiffies(watchdog); dev->netdev_ops = &smc_netdev_ops; dev->ethtool_ops = &smc_ethtool_ops; diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 5e13fa5524ae911f5516f060a5acdb729fec7be0..affb29da353e29139fb758c5a5ce68b701479815 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -2255,7 +2255,6 @@ static int smsc911x_init(struct net_device *dev) if (smsc911x_soft_reset(pdata)) return -ENODEV; - ether_setup(dev); dev->flags |= IFF_MULTICAST; netif_napi_add(dev, &pdata->napi, smsc911x_poll, SMSC_NAPI_WEIGHT); dev->netdev_ops = &smsc911x_netdev_ops; diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index 2d09c116cbc8c57b9ded844082dc2693d27a2402..b02d4a3ffa379666477d69a7625db1fe1e020aa7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -26,6 +26,16 @@ config STMMAC_PLATFORM If unsure, say N. +config DWMAC_MESON + bool "Amlogic Meson dwmac support" + depends on STMMAC_PLATFORM && ARCH_MESON + help + Support for Ethernet controller on Amlogic Meson SoCs. + + This selects the Amlogic Meson SoC glue layer support for + the stmmac device driver. This driver is used for Meson6 and + Meson8 SoCs. + config DWMAC_SOCFPGA bool "SOCFPGA dwmac support" depends on STMMAC_PLATFORM && MFD_SYSCON && (ARCH_SOCFPGA || COMPILE_TEST) diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index 18695ebef7e43cb636e60640fbd52e4835e36ede..0533d0ba783d1d238f23fd367dc36763ef2950e4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -1,6 +1,7 @@ obj-$(CONFIG_STMMAC_ETH) += stmmac.o stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o +stmmac-$(CONFIG_DWMAC_MESON) += dwmac-meson.o stmmac-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o stmmac-$(CONFIG_DWMAC_STI) += dwmac-sti.o stmmac-$(CONFIG_DWMAC_SOCFPGA) += dwmac-socfpga.o diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c new file mode 100644 index 0000000000000000000000000000000000000000..d225a603e604b7a21b8e5c8d021c971448aa7dcb --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c @@ -0,0 +1,67 @@ +/* + * Amlogic Meson DWMAC glue layer + * + * Copyright (C) 2014 Beniamino Galvani + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include + +#define ETHMAC_SPEED_100 BIT(1) + +struct meson_dwmac { + struct device *dev; + void __iomem *reg; +}; + +static void meson6_dwmac_fix_mac_speed(void *priv, unsigned int speed) +{ + struct meson_dwmac *dwmac = priv; + unsigned int val; + + val = readl(dwmac->reg); + + switch (speed) { + case SPEED_10: + val &= ~ETHMAC_SPEED_100; + break; + case SPEED_100: + val |= ETHMAC_SPEED_100; + break; + } + + writel(val, dwmac->reg); +} + +static void *meson6_dwmac_setup(struct platform_device *pdev) +{ + struct meson_dwmac *dwmac; + struct resource *res; + + dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); + if (!dwmac) + return ERR_PTR(-ENOMEM); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + dwmac->reg = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(dwmac->reg)) + return dwmac->reg; + + return dwmac; +} + +const struct stmmac_of_data meson6_dwmac_data = { + .setup = meson6_dwmac_setup, + .fix_mac_speed = meson6_dwmac_fix_mac_speed, +}; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index ec632e666c56ad5138384cae4f23e43b45ad4d34..3aad413e74b4a4ba9dd19012cf85460870840015 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -17,6 +17,7 @@ #include #include +#include #include #include #include @@ -30,6 +31,12 @@ #define SYSMGR_EMACGRP_CTRL_PHYSEL_WIDTH 2 #define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK 0x00000003 +#define EMAC_SPLITTER_CTRL_REG 0x0 +#define EMAC_SPLITTER_CTRL_SPEED_MASK 0x3 +#define EMAC_SPLITTER_CTRL_SPEED_10 0x2 +#define EMAC_SPLITTER_CTRL_SPEED_100 0x3 +#define EMAC_SPLITTER_CTRL_SPEED_1000 0x0 + struct socfpga_dwmac { int interface; u32 reg_offset; @@ -37,14 +44,46 @@ struct socfpga_dwmac { struct device *dev; struct regmap *sys_mgr_base_addr; struct reset_control *stmmac_rst; + void __iomem *splitter_base; }; +static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed) +{ + struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv; + void __iomem *splitter_base = dwmac->splitter_base; + u32 val; + + if (!splitter_base) + return; + + val = readl(splitter_base + EMAC_SPLITTER_CTRL_REG); + val &= ~EMAC_SPLITTER_CTRL_SPEED_MASK; + + switch (speed) { + case 1000: + val |= EMAC_SPLITTER_CTRL_SPEED_1000; + break; + case 100: + val |= EMAC_SPLITTER_CTRL_SPEED_100; + break; + case 10: + val |= EMAC_SPLITTER_CTRL_SPEED_10; + break; + default: + return; + } + + writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG); +} + static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *dev) { struct device_node *np = dev->of_node; struct regmap *sys_mgr_base_addr; u32 reg_offset, reg_shift; int ret; + struct device_node *np_splitter; + struct resource res_splitter; dwmac->stmmac_rst = devm_reset_control_get(dev, STMMAC_RESOURCE_NAME); @@ -73,6 +112,20 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device * return -EINVAL; } + np_splitter = of_parse_phandle(np, "altr,emac-splitter", 0); + if (np_splitter) { + if (of_address_to_resource(np_splitter, 0, &res_splitter)) { + dev_info(dev, "Missing emac splitter address\n"); + return -EINVAL; + } + + dwmac->splitter_base = devm_ioremap_resource(dev, &res_splitter); + if (IS_ERR(dwmac->splitter_base)) { + dev_info(dev, "Failed to mapping emac splitter\n"); + return PTR_ERR(dwmac->splitter_base); + } + } + dwmac->reg_offset = reg_offset; dwmac->reg_shift = reg_shift; dwmac->sys_mgr_base_addr = sys_mgr_base_addr; @@ -91,6 +144,7 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac) switch (phymode) { case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII; break; case PHY_INTERFACE_MODE_MII: @@ -102,6 +156,13 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac) return -EINVAL; } + /* Overwrite val to GMII if splitter core is enabled. The phymode here + * is the actual phy mode on phy hardware, but phy interface from + * EMAC core is GMII. + */ + if (dwmac->splitter_base) + val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII; + regmap_read(sys_mgr_base_addr, reg_offset, &ctrl); ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift); ctrl |= val << reg_shift; @@ -196,4 +257,5 @@ const struct stmmac_of_data socfpga_gmac_data = { .setup = socfpga_dwmac_probe, .init = socfpga_dwmac_init, .exit = socfpga_dwmac_exit, + .fix_mac_speed = socfpga_dwmac_fix_mac_speed, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 58097c0e2ad502699ace45392bb3f12855101625..44528896355d8d4dbae235b6c7b7b76c5caa7ed8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -137,6 +137,9 @@ void stmmac_disable_eee_mode(struct stmmac_priv *priv); bool stmmac_eee_init(struct stmmac_priv *priv); #ifdef CONFIG_STMMAC_PLATFORM +#ifdef CONFIG_DWMAC_MESON +extern const struct stmmac_of_data meson6_dwmac_data; +#endif #ifdef CONFIG_DWMAC_SUNXI extern const struct stmmac_of_data sun7i_gmac_data; #endif diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index cf4f38db1c0a60338c1d7479b0cfe28f50b6b558..3a08a1f78c732b49992cc711907131d74b5e18e8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -261,11 +261,11 @@ static int stmmac_ethtool_getsettings(struct net_device *dev, ethtool_cmd_speed_set(cmd, priv->xstats.pcs_speed); /* Get and convert ADV/LP_ADV from the HW AN registers */ - if (priv->hw->mac->get_adv) - priv->hw->mac->get_adv(priv->hw, &adv); - else + if (!priv->hw->mac->get_adv) return -EOPNOTSUPP; /* should never happen indeed */ + priv->hw->mac->get_adv(priv->hw, &adv); + /* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */ if (adv.pause & STMMAC_PCS_PAUSE) @@ -340,19 +340,17 @@ static int stmmac_ethtool_setsettings(struct net_device *dev, if (cmd->autoneg != AUTONEG_ENABLE) return -EINVAL; - if (cmd->autoneg == AUTONEG_ENABLE) { - mask &= (ADVERTISED_1000baseT_Half | + mask &= (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full); - spin_lock(&priv->lock); - if (priv->hw->mac->ctrl_ane) - priv->hw->mac->ctrl_ane(priv->hw, 1); - spin_unlock(&priv->lock); - } + spin_lock(&priv->lock); + if (priv->hw->mac->ctrl_ane) + priv->hw->mac->ctrl_ane(priv->hw, 1); + spin_unlock(&priv->lock); return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index b0c1521e08a3a30df88871fdfc36b22bbf6ecb4e..6f77a46c7e2c446da3b3b4ae139057a621ef00ef 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -834,7 +834,7 @@ static int stmmac_init_phy(struct net_device *dev) /* Stop Advertising 1000BASE Capability if interface is not GMII */ if ((interface == PHY_INTERFACE_MODE_MII) || (interface == PHY_INTERFACE_MODE_RMII) || - (max_speed < 1000 && max_speed > 0)) + (max_speed < 1000 && max_speed > 0)) phydev->advertising &= ~(SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full); @@ -2765,8 +2765,6 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device, priv->device = device; priv->dev = ndev; - ether_setup(ndev); - stmmac_set_ethtool_ops(ndev); priv->pause = pause; priv->plat = plat_dat; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index a5b1e1b776fe3313c5c9852062ed66ea1547c285..b735fa22ac95a95fa7bef8b4c4672637bb356148 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -253,7 +253,7 @@ int stmmac_mdio_register(struct net_device *ndev) } /* - * If we're going to bind the MAC to this PHY bus, + * If we're going to bind the MAC to this PHY bus, * and no PHY number was provided to the MAC, * use the one probed here. */ @@ -282,7 +282,7 @@ int stmmac_mdio_register(struct net_device *ndev) } if (!found) { - pr_warning("%s: No PHY found\n", ndev->name); + pr_warn("%s: No PHY found\n", ndev->name); mdiobus_unregister(new_bus); mdiobus_free(new_bus); return -ENODEV; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index bb524a932be48b6a0fa2e096eab0a2bb7732314d..65217170625803c5085d4be0c42a660c0e2f3f22 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -30,6 +30,9 @@ #include "stmmac.h" static const struct of_device_id stmmac_dt_ids[] = { +#ifdef CONFIG_DWMAC_MESON + { .compatible = "amlogic,meson6-dwmac", .data = &meson6_dwmac_data}, +#endif #ifdef CONFIG_DWMAC_SUNXI { .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data}, #endif diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 37f87ff28f036c7f46048826a82d790d371ccc43..02d370e58110de31b74d1d1b89f4038b7b476a3f 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -4962,7 +4962,7 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) pci_cmd |= PCI_COMMAND_PARITY; pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); if (pci_try_set_mwi(pdev)) - pr_warning("Could not enable MWI for %s\n", pci_name(pdev)); + pr_warn("Could not enable MWI for %s\n", pci_name(pdev)); cas_program_bridge(pdev); diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 8216be46540ff24f41f020116c009a21005235a2..904fd1ab5f6e3762703d2931cf8fb6963c53c32c 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -8717,8 +8717,8 @@ static void niu_divide_channels(struct niu_parent *parent, parent->txchan_per_port[i] = 1; } if (tot_rx < NIU_NUM_RXCHAN || tot_tx < NIU_NUM_TXCHAN) { - pr_warning("niu%d: Driver bug, wasted channels, RX[%d] TX[%d]\n", - parent->index, tot_rx, tot_tx); + pr_warn("niu%d: Driver bug, wasted channels, RX[%d] TX[%d]\n", + parent->index, tot_rx, tot_tx); } } diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index f7415b6bf141caf5359986ae5de290937e01c7e9..fef5dec2cffe9c3bb7f09bbe728ab2dc54b0cba9 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -115,7 +115,7 @@ static const struct pci_device_id gem_pci_tbl[] = { MODULE_DEVICE_TABLE(pci, gem_pci_tbl); -static u16 __phy_read(struct gem *gp, int phy_addr, int reg) +static u16 __sungem_phy_read(struct gem *gp, int phy_addr, int reg) { u32 cmd; int limit = 10000; @@ -141,18 +141,18 @@ static u16 __phy_read(struct gem *gp, int phy_addr, int reg) return cmd & MIF_FRAME_DATA; } -static inline int _phy_read(struct net_device *dev, int mii_id, int reg) +static inline int _sungem_phy_read(struct net_device *dev, int mii_id, int reg) { struct gem *gp = netdev_priv(dev); - return __phy_read(gp, mii_id, reg); + return __sungem_phy_read(gp, mii_id, reg); } -static inline u16 phy_read(struct gem *gp, int reg) +static inline u16 sungem_phy_read(struct gem *gp, int reg) { - return __phy_read(gp, gp->mii_phy_addr, reg); + return __sungem_phy_read(gp, gp->mii_phy_addr, reg); } -static void __phy_write(struct gem *gp, int phy_addr, int reg, u16 val) +static void __sungem_phy_write(struct gem *gp, int phy_addr, int reg, u16 val) { u32 cmd; int limit = 10000; @@ -174,15 +174,15 @@ static void __phy_write(struct gem *gp, int phy_addr, int reg, u16 val) } } -static inline void _phy_write(struct net_device *dev, int mii_id, int reg, int val) +static inline void _sungem_phy_write(struct net_device *dev, int mii_id, int reg, int val) { struct gem *gp = netdev_priv(dev); - __phy_write(gp, mii_id, reg, val & 0xffff); + __sungem_phy_write(gp, mii_id, reg, val & 0xffff); } -static inline void phy_write(struct gem *gp, int reg, u16 val) +static inline void sungem_phy_write(struct gem *gp, int reg, u16 val) { - __phy_write(gp, gp->mii_phy_addr, reg, val); + __sungem_phy_write(gp, gp->mii_phy_addr, reg, val); } static inline void gem_enable_ints(struct gem *gp) @@ -1687,9 +1687,9 @@ static void gem_init_phy(struct gem *gp) /* Some PHYs used by apple have problem getting back to us, * we do an additional reset here */ - phy_write(gp, MII_BMCR, BMCR_RESET); + sungem_phy_write(gp, MII_BMCR, BMCR_RESET); msleep(20); - if (phy_read(gp, MII_BMCR) != 0xffff) + if (sungem_phy_read(gp, MII_BMCR) != 0xffff) break; if (i == 2) netdev_warn(gp->dev, "GMAC PHY not responding !\n"); @@ -2012,7 +2012,7 @@ static int gem_check_invariants(struct gem *gp) for (i = 0; i < 32; i++) { gp->mii_phy_addr = i; - if (phy_read(gp, MII_BMCR) != 0xffff) + if (sungem_phy_read(gp, MII_BMCR) != 0xffff) break; } if (i == 32) { @@ -2696,13 +2696,13 @@ static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /* Fallthrough... */ case SIOCGMIIREG: /* Read MII PHY register. */ - data->val_out = __phy_read(gp, data->phy_id & 0x1f, + data->val_out = __sungem_phy_read(gp, data->phy_id & 0x1f, data->reg_num & 0x1f); rc = 0; break; case SIOCSMIIREG: /* Write MII PHY register. */ - __phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f, + __sungem_phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in); rc = 0; break; @@ -2933,8 +2933,8 @@ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* Fill up the mii_phy structure (even if we won't use it) */ gp->phy_mii.dev = dev; - gp->phy_mii.mdio_read = _phy_read; - gp->phy_mii.mdio_write = _phy_write; + gp->phy_mii.mdio_read = _sungem_phy_read; + gp->phy_mii.mdio_write = _sungem_phy_write; #ifdef CONFIG_PPC_PMAC gp->phy_mii.platform_data = gp->of_node; #endif diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index f67539650c380010eced17ea5ab880ec3dc0e328..15396720f489a802338eda997b26dbff2de66103 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -15,6 +15,14 @@ #include #include #include +#include + +#if IS_ENABLED(CONFIG_IPV6) +#include +#endif + +#include +#include #include #include @@ -37,8 +45,11 @@ MODULE_VERSION(DRV_MODULE_VERSION); */ #define VNET_MAX_RETRIES 10 +static int __vnet_tx_trigger(struct vnet_port *port, u32 start); + /* Ordered from largest major to lowest */ static struct vio_version vnet_versions[] = { + { .major = 1, .minor = 6 }, { .major = 1, .minor = 0 }, }; @@ -65,6 +76,7 @@ static int vnet_send_attr(struct vio_driver_state *vio) struct vnet_port *port = to_vnet_port(vio); struct net_device *dev = port->vp->dev; struct vio_net_attr_info pkt; + int framelen = ETH_FRAME_LEN; int i; memset(&pkt, 0, sizeof(pkt)); @@ -72,19 +84,41 @@ static int vnet_send_attr(struct vio_driver_state *vio) pkt.tag.stype = VIO_SUBTYPE_INFO; pkt.tag.stype_env = VIO_ATTR_INFO; pkt.tag.sid = vio_send_sid(vio); - pkt.xfer_mode = VIO_DRING_MODE; + if (vio_version_before(vio, 1, 2)) + pkt.xfer_mode = VIO_DRING_MODE; + else + pkt.xfer_mode = VIO_NEW_DRING_MODE; pkt.addr_type = VNET_ADDR_ETHERMAC; pkt.ack_freq = 0; for (i = 0; i < 6; i++) pkt.addr |= (u64)dev->dev_addr[i] << ((5 - i) * 8); - pkt.mtu = ETH_FRAME_LEN; + if (vio_version_after(vio, 1, 3)) { + if (port->rmtu) { + port->rmtu = min(VNET_MAXPACKET, port->rmtu); + pkt.mtu = port->rmtu; + } else { + port->rmtu = VNET_MAXPACKET; + pkt.mtu = port->rmtu; + } + if (vio_version_after_eq(vio, 1, 6)) + pkt.options = VIO_TX_DRING; + } else if (vio_version_before(vio, 1, 3)) { + pkt.mtu = framelen; + } else { /* v1.3 */ + pkt.mtu = framelen + VLAN_HLEN; + } + + pkt.plnk_updt = PHYSLINK_UPDATE_NONE; + pkt.cflags = 0; viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] " - "ackfreq[%u] mtu[%llu]\n", + "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] " + "cflags[0x%04x] lso_max[%u]\n", pkt.xfer_mode, pkt.addr_type, - (unsigned long long) pkt.addr, - pkt.ack_freq, - (unsigned long long) pkt.mtu); + (unsigned long long)pkt.addr, + pkt.ack_freq, pkt.plnk_updt, pkt.options, + (unsigned long long)pkt.mtu, pkt.cflags, pkt.ipv4_lso_maxlen); + return vio_ldc_send(vio, &pkt, sizeof(pkt)); } @@ -92,18 +126,52 @@ static int vnet_send_attr(struct vio_driver_state *vio) static int handle_attr_info(struct vio_driver_state *vio, struct vio_net_attr_info *pkt) { - viodbg(HS, "GOT NET ATTR INFO xmode[0x%x] atype[0x%x] addr[%llx] " - "ackfreq[%u] mtu[%llu]\n", + struct vnet_port *port = to_vnet_port(vio); + u64 localmtu; + u8 xfer_mode; + + viodbg(HS, "GOT NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] " + "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] " + " (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n", pkt->xfer_mode, pkt->addr_type, - (unsigned long long) pkt->addr, - pkt->ack_freq, - (unsigned long long) pkt->mtu); + (unsigned long long)pkt->addr, + pkt->ack_freq, pkt->plnk_updt, pkt->options, + (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags, + pkt->ipv4_lso_maxlen); pkt->tag.sid = vio_send_sid(vio); - if (pkt->xfer_mode != VIO_DRING_MODE || + xfer_mode = pkt->xfer_mode; + /* for version < 1.2, VIO_DRING_MODE = 0x3 and no bitmask */ + if (vio_version_before(vio, 1, 2) && xfer_mode == VIO_DRING_MODE) + xfer_mode = VIO_NEW_DRING_MODE; + + /* MTU negotiation: + * < v1.3 - ETH_FRAME_LEN exactly + * > v1.3 - MIN(pkt.mtu, VNET_MAXPACKET, port->rmtu) and change + * pkt->mtu for ACK + * = v1.3 - ETH_FRAME_LEN + VLAN_HLEN exactly + */ + if (vio_version_before(vio, 1, 3)) { + localmtu = ETH_FRAME_LEN; + } else if (vio_version_after(vio, 1, 3)) { + localmtu = port->rmtu ? port->rmtu : VNET_MAXPACKET; + localmtu = min(pkt->mtu, localmtu); + pkt->mtu = localmtu; + } else { /* v1.3 */ + localmtu = ETH_FRAME_LEN + VLAN_HLEN; + } + port->rmtu = localmtu; + + /* for version >= 1.6, ACK packet mode we support */ + if (vio_version_after_eq(vio, 1, 6)) { + pkt->xfer_mode = VIO_NEW_DRING_MODE; + pkt->options = VIO_TX_DRING; + } + + if (!(xfer_mode | VIO_NEW_DRING_MODE) || pkt->addr_type != VNET_ADDR_ETHERMAC || - pkt->mtu != ETH_FRAME_LEN) { + pkt->mtu != localmtu) { viodbg(HS, "SEND NET ATTR NACK\n"); pkt->tag.stype = VIO_SUBTYPE_NACK; @@ -112,7 +180,14 @@ static int handle_attr_info(struct vio_driver_state *vio, return -ECONNRESET; } else { - viodbg(HS, "SEND NET ATTR ACK\n"); + viodbg(HS, "SEND NET ATTR ACK xmode[0x%x] atype[0x%x] " + "addr[%llx] ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] " + "mtu[%llu] (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n", + pkt->xfer_mode, pkt->addr_type, + (unsigned long long)pkt->addr, + pkt->ack_freq, pkt->plnk_updt, pkt->options, + (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags, + pkt->ipv4_lso_maxlen); pkt->tag.stype = VIO_SUBTYPE_ACK; @@ -208,7 +283,7 @@ static int vnet_rx_one(struct vnet_port *port, unsigned int len, int err; err = -EMSGSIZE; - if (unlikely(len < ETH_ZLEN || len > ETH_FRAME_LEN)) { + if (unlikely(len < ETH_ZLEN || len > port->rmtu)) { dev->stats.rx_length_errors++; goto out_dropped; } @@ -283,10 +358,18 @@ static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr, port->raddr[0], port->raddr[1], port->raddr[2], port->raddr[3], port->raddr[4], port->raddr[5]); - err = -ECONNRESET; + break; } } while (err == -EAGAIN); + if (err <= 0 && vio_dring_state == VIO_DRING_STOPPED) { + port->stop_rx_idx = end; + port->stop_rx = true; + } else { + port->stop_rx_idx = 0; + port->stop_rx = false; + } + return err; } @@ -451,7 +534,7 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf) struct net_device *dev; struct vnet *vp; u32 end; - + struct vio_net_desc *desc; if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA)) return 0; @@ -459,7 +542,24 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf) if (unlikely(!idx_is_pending(dr, end))) return 0; + /* sync for race conditions with vnet_start_xmit() and tell xmit it + * is time to send a trigger. + */ dr->cons = next_idx(end, dr); + desc = vio_dring_entry(dr, dr->cons); + if (desc->hdr.state == VIO_DESC_READY && port->start_cons) { + /* vnet_start_xmit() just populated this dring but missed + * sending the "start" LDC message to the consumer. + * Send a "start" trigger on its behalf. + */ + if (__vnet_tx_trigger(port, dr->cons) > 0) + port->start_cons = false; + else + port->start_cons = true; + } else { + port->start_cons = true; + } + vp = port->vp; dev = vp->dev; @@ -531,13 +631,15 @@ static void vnet_event(void *arg, int event) vio_link_state_change(vio, event); spin_unlock_irqrestore(&vio->lock, flags); - if (event == LDC_EVENT_RESET) + if (event == LDC_EVENT_RESET) { + port->rmtu = 0; vio_port_up(vio); + } return; } if (unlikely(event != LDC_EVENT_DATA_READY)) { - pr_warning("Unexpected LDC event %d\n", event); + pr_warn("Unexpected LDC event %d\n", event); spin_unlock_irqrestore(&vio->lock, flags); return; } @@ -600,7 +702,7 @@ static void vnet_event(void *arg, int event) local_irq_restore(flags); } -static int __vnet_tx_trigger(struct vnet_port *port) +static int __vnet_tx_trigger(struct vnet_port *port, u32 start) { struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; struct vio_dring_data hdr = { @@ -611,12 +713,21 @@ static int __vnet_tx_trigger(struct vnet_port *port) .sid = vio_send_sid(&port->vio), }, .dring_ident = dr->ident, - .start_idx = dr->prod, + .start_idx = start, .end_idx = (u32) -1, }; int err, delay; int retries = 0; + if (port->stop_rx) { + err = vnet_send_ack(port, + &port->vio.drings[VIO_DRIVER_RX_RING], + port->stop_rx_idx, -1, + VIO_DRING_STOPPED); + if (err <= 0) + return err; + } + hdr.seq = dr->snd_nxt; delay = 1; do { @@ -676,6 +787,117 @@ struct vnet_port *tx_port_find(struct vnet *vp, struct sk_buff *skb) return ret; } +static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port, + unsigned *pending) +{ + struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + struct sk_buff *skb = NULL; + int i, txi; + + *pending = 0; + + txi = dr->prod-1; + if (txi < 0) + txi = VNET_TX_RING_SIZE-1; + + for (i = 0; i < VNET_TX_RING_SIZE; ++i) { + struct vio_net_desc *d; + + d = vio_dring_entry(dr, txi); + + if (d->hdr.state == VIO_DESC_DONE) { + if (port->tx_bufs[txi].skb) { + BUG_ON(port->tx_bufs[txi].skb->next); + + port->tx_bufs[txi].skb->next = skb; + skb = port->tx_bufs[txi].skb; + port->tx_bufs[txi].skb = NULL; + + ldc_unmap(port->vio.lp, + port->tx_bufs[txi].cookies, + port->tx_bufs[txi].ncookies); + } + d->hdr.state = VIO_DESC_FREE; + } else if (d->hdr.state == VIO_DESC_READY) { + (*pending)++; + } else if (d->hdr.state == VIO_DESC_FREE) { + break; + } + --txi; + if (txi < 0) + txi = VNET_TX_RING_SIZE-1; + } + return skb; +} + +static inline void vnet_free_skbs(struct sk_buff *skb) +{ + struct sk_buff *next; + + while (skb) { + next = skb->next; + skb->next = NULL; + dev_kfree_skb(skb); + skb = next; + } +} + +static void vnet_clean_timer_expire(unsigned long port0) +{ + struct vnet_port *port = (struct vnet_port *)port0; + struct sk_buff *freeskbs; + unsigned pending; + unsigned long flags; + + spin_lock_irqsave(&port->vio.lock, flags); + freeskbs = vnet_clean_tx_ring(port, &pending); + spin_unlock_irqrestore(&port->vio.lock, flags); + + vnet_free_skbs(freeskbs); + + if (pending) + (void)mod_timer(&port->clean_timer, + jiffies + VNET_CLEAN_TIMEOUT); + else + del_timer(&port->clean_timer); +} + +static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, void **pstart, + int *plen) +{ + struct sk_buff *nskb; + int len, pad; + + len = skb->len; + pad = 0; + if (len < ETH_ZLEN) { + pad += ETH_ZLEN - skb->len; + len += pad; + } + len += VNET_PACKET_SKIP; + pad += 8 - (len & 7); + len += 8 - (len & 7); + + if (((unsigned long)skb->data & 7) != VNET_PACKET_SKIP || + skb_tailroom(skb) < pad || + skb_headroom(skb) < VNET_PACKET_SKIP) { + nskb = alloc_and_align_skb(skb->dev, skb->len); + skb_reserve(nskb, VNET_PACKET_SKIP); + if (skb_copy_bits(skb, 0, nskb->data, skb->len)) { + dev_kfree_skb(nskb); + dev_kfree_skb(skb); + return NULL; + } + (void)skb_put(nskb, skb->len); + dev_kfree_skb(skb); + skb = nskb; + } + + *pstart = skb->data - VNET_PACKET_SKIP; + *plen = len; + return skb; +} + static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct vnet *vp = netdev_priv(dev); @@ -684,12 +906,51 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) struct vio_net_desc *d; unsigned long flags; unsigned int len; - void *tx_buf; - int i, err; + struct sk_buff *freeskbs = NULL; + int i, err, txi; + void *start = NULL; + int nlen = 0; + unsigned pending = 0; if (unlikely(!port)) goto out_dropped; + skb = vnet_skb_shape(skb, &start, &nlen); + + if (unlikely(!skb)) + goto out_dropped; + + if (skb->len > port->rmtu) { + unsigned long localmtu = port->rmtu - ETH_HLEN; + + if (vio_version_after_eq(&port->vio, 1, 3)) + localmtu -= VLAN_HLEN; + + if (skb->protocol == htons(ETH_P_IP)) { + struct flowi4 fl4; + struct rtable *rt = NULL; + + memset(&fl4, 0, sizeof(fl4)); + fl4.flowi4_oif = dev->ifindex; + fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos); + fl4.daddr = ip_hdr(skb)->daddr; + fl4.saddr = ip_hdr(skb)->saddr; + + rt = ip_route_output_key(dev_net(dev), &fl4); + if (!IS_ERR(rt)) { + skb_dst_set(skb, &rt->dst); + icmp_send(skb, ICMP_DEST_UNREACH, + ICMP_FRAG_NEEDED, + htonl(localmtu)); + } + } +#if IS_ENABLED(CONFIG_IPV6) + else if (skb->protocol == htons(ETH_P_IPV6)) + icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu); +#endif + goto out_dropped; + } + spin_lock_irqsave(&port->vio.lock, flags); dr = &port->vio.drings[VIO_DRIVER_TX_RING]; @@ -707,14 +968,27 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) d = vio_dring_cur(dr); - tx_buf = port->tx_bufs[dr->prod].buf; - skb_copy_from_linear_data(skb, tx_buf + VNET_PACKET_SKIP, skb->len); + txi = dr->prod; + + freeskbs = vnet_clean_tx_ring(port, &pending); + + BUG_ON(port->tx_bufs[txi].skb); len = skb->len; - if (len < ETH_ZLEN) { + if (len < ETH_ZLEN) len = ETH_ZLEN; - memset(tx_buf+VNET_PACKET_SKIP+skb->len, 0, len - skb->len); + + port->tx_bufs[txi].skb = skb; + skb = NULL; + + err = ldc_map_single(port->vio.lp, start, nlen, + port->tx_bufs[txi].cookies, VNET_MAXCOOKIES, + (LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_RW)); + if (err < 0) { + netdev_info(dev, "tx buffer map error %d\n", err); + goto out_dropped_unlock; } + port->tx_bufs[txi].ncookies = err; /* We don't rely on the ACKs to free the skb in vnet_start_xmit(), * thus it is safe to not set VIO_ACK_ENABLE for each transmission: @@ -726,9 +1000,9 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) */ d->hdr.ack = VIO_ACK_DISABLE; d->size = len; - d->ncookies = port->tx_bufs[dr->prod].ncookies; + d->ncookies = port->tx_bufs[txi].ncookies; for (i = 0; i < d->ncookies; i++) - d->cookies[i] = port->tx_bufs[dr->prod].cookies[i]; + d->cookies[i] = port->tx_bufs[txi].cookies[i]; /* This has to be a non-SMP write barrier because we are writing * to memory which is shared with the peer LDOM. @@ -737,7 +1011,30 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) d->hdr.state = VIO_DESC_READY; - err = __vnet_tx_trigger(port); + /* Exactly one ldc "start" trigger (for dr->cons) needs to be sent + * to notify the consumer that some descriptors are READY. + * After that "start" trigger, no additional triggers are needed until + * a DRING_STOPPED is received from the consumer. The dr->cons field + * (set up by vnet_ack()) has the value of the next dring index + * that has not yet been ack-ed. We send a "start" trigger here + * if, and only if, start_cons is true (reset it afterward). Conversely, + * vnet_ack() should check if the dring corresponding to cons + * is marked READY, but start_cons was false. + * If so, vnet_ack() should send out the missed "start" trigger. + * + * Note that the wmb() above makes sure the cookies et al. are + * not globally visible before the VIO_DESC_READY, and that the + * stores are ordered correctly by the compiler. The consumer will + * not proceed until the VIO_DESC_READY is visible assuring that + * the consumer does not observe anything related to descriptors + * out of order. The HV trap from the LDC start trigger is the + * producer to consumer announcement that work is available to the + * consumer + */ + if (!port->start_cons) + goto ldc_start_done; /* previous trigger suffices */ + + err = __vnet_tx_trigger(port, dr->cons); if (unlikely(err < 0)) { netdev_info(dev, "TX trigger error %d\n", err); d->hdr.state = VIO_DESC_FREE; @@ -745,8 +1042,11 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) goto out_dropped_unlock; } +ldc_start_done: + port->start_cons = false; + dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; + dev->stats.tx_bytes += port->tx_bufs[txi].skb->len; dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1); if (unlikely(vnet_tx_dring_avail(dr) < 2)) { @@ -757,7 +1057,9 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) spin_unlock_irqrestore(&port->vio.lock, flags); - dev_kfree_skb(skb); + vnet_free_skbs(freeskbs); + + (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT); return NETDEV_TX_OK; @@ -765,7 +1067,14 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) spin_unlock_irqrestore(&port->vio.lock, flags); out_dropped: - dev_kfree_skb(skb); + if (skb) + dev_kfree_skb(skb); + vnet_free_skbs(freeskbs); + if (pending) + (void)mod_timer(&port->clean_timer, + jiffies + VNET_CLEAN_TIMEOUT); + else if (port) + del_timer(&port->clean_timer); dev->stats.tx_dropped++; return NETDEV_TX_OK; } @@ -911,7 +1220,7 @@ static void vnet_set_rx_mode(struct net_device *dev) static int vnet_change_mtu(struct net_device *dev, int new_mtu) { - if (new_mtu != ETH_DATA_LEN) + if (new_mtu < 68 || new_mtu > 65535) return -EINVAL; dev->mtu = new_mtu; @@ -967,17 +1276,22 @@ static void vnet_port_free_tx_bufs(struct vnet_port *port) } for (i = 0; i < VNET_TX_RING_SIZE; i++) { - void *buf = port->tx_bufs[i].buf; + struct vio_net_desc *d; + void *skb = port->tx_bufs[i].skb; - if (!buf) + if (!skb) continue; + d = vio_dring_entry(dr, i); + if (d->hdr.state == VIO_DESC_READY) + pr_warn("active transmit buffers freed\n"); + ldc_unmap(port->vio.lp, port->tx_bufs[i].cookies, port->tx_bufs[i].ncookies); - - kfree(buf); - port->tx_bufs[i].buf = NULL; + dev_kfree_skb(skb); + port->tx_bufs[i].skb = NULL; + d->hdr.state = VIO_DESC_FREE; } } @@ -988,34 +1302,6 @@ static int vnet_port_alloc_tx_bufs(struct vnet_port *port) int i, err, ncookies; void *dring; - for (i = 0; i < VNET_TX_RING_SIZE; i++) { - void *buf = kzalloc(ETH_FRAME_LEN + 8, GFP_KERNEL); - int map_len = (ETH_FRAME_LEN + 7) & ~7; - - err = -ENOMEM; - if (!buf) - goto err_out; - - err = -EFAULT; - if ((unsigned long)buf & (8UL - 1)) { - pr_err("TX buffer misaligned\n"); - kfree(buf); - goto err_out; - } - - err = ldc_map_single(port->vio.lp, buf, map_len, - port->tx_bufs[i].cookies, 2, - (LDC_MAP_SHADOW | - LDC_MAP_DIRECT | - LDC_MAP_RW)); - if (err < 0) { - kfree(buf); - goto err_out; - } - port->tx_bufs[i].buf = buf; - port->tx_bufs[i].ncookies = err; - } - dr = &port->vio.drings[VIO_DRIVER_TX_RING]; len = (VNET_TX_RING_SIZE * @@ -1038,9 +1324,16 @@ static int vnet_port_alloc_tx_bufs(struct vnet_port *port) (sizeof(struct ldc_trans_cookie) * 2)); dr->num_entries = VNET_TX_RING_SIZE; dr->prod = dr->cons = 0; + port->start_cons = true; /* need an initial trigger */ dr->pending = VNET_TX_RING_SIZE; dr->ncookies = ncookies; + for (i = 0; i < VNET_TX_RING_SIZE; ++i) { + struct vio_net_desc *d; + + d = vio_dring_entry(dr, i); + d->hdr.state = VIO_DESC_FREE; + } return 0; err_out: @@ -1072,6 +1365,8 @@ static struct vnet *vnet_new(const u64 *local_mac) dev = alloc_etherdev(sizeof(*vp)); if (!dev) return ERR_PTR(-ENOMEM); + dev->needed_headroom = VNET_PACKET_SKIP + 8; + dev->needed_tailroom = 8; for (i = 0; i < ETH_ALEN; i++) dev->dev_addr[i] = (*local_mac >> (5 - i) * 8) & 0xff; @@ -1266,6 +1561,9 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) pr_info("%s: PORT ( remote-mac %pM%s )\n", vp->dev->name, port->raddr, switch_port ? " switch-port" : ""); + setup_timer(&port->clean_timer, vnet_clean_timer_expire, + (unsigned long)port); + vio_port_up(&port->vio); mdesc_release(hp); @@ -1292,6 +1590,7 @@ static int vnet_port_remove(struct vio_dev *vdev) unsigned long flags; del_timer_sync(&port->vio.timer); + del_timer_sync(&port->clean_timer); spin_lock_irqsave(&vp->lock, flags); list_del(&port->list); diff --git a/drivers/net/ethernet/sun/sunvnet.h b/drivers/net/ethernet/sun/sunvnet.h index de5c2c64996f34c5eb76e5376a71d93b72ebf490..c9110454261905bc60861eb34a7bfe7c268f0b9f 100644 --- a/drivers/net/ethernet/sun/sunvnet.h +++ b/drivers/net/ethernet/sun/sunvnet.h @@ -11,6 +11,12 @@ */ #define VNET_TX_TIMEOUT (5 * HZ) +/* length of time (or less) we expect pending descriptors to be marked + * as VIO_DESC_DONE and skbs ready to be freed + */ +#define VNET_CLEAN_TIMEOUT ((HZ/100)+1) + +#define VNET_MAXPACKET (65535ULL + ETH_HLEN + VLAN_HLEN) #define VNET_TX_RING_SIZE 512 #define VNET_TX_WAKEUP_THRESH(dr) ((dr)->pending / 4) @@ -20,10 +26,12 @@ */ #define VNET_PACKET_SKIP 6 +#define VNET_MAXCOOKIES (VNET_MAXPACKET/PAGE_SIZE + 1) + struct vnet_tx_entry { - void *buf; + struct sk_buff *skb; unsigned int ncookies; - struct ldc_trans_cookie cookies[2]; + struct ldc_trans_cookie cookies[VNET_MAXCOOKIES]; }; struct vnet; @@ -40,6 +48,14 @@ struct vnet_port { struct vnet_tx_entry tx_bufs[VNET_TX_RING_SIZE]; struct list_head list; + + u32 stop_rx_idx; + bool stop_rx; + bool start_cons; + + struct timer_list clean_timer; + + u64 rmtu; }; static inline struct vnet_port *to_vnet_port(struct vio_driver_state *vio) diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index 1769700a6070bb238c9895d38c2d379d67375504..5d8cb79561134bc2769fc900d5654bc6cce40933 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -62,6 +62,8 @@ config TI_CPSW select TI_DAVINCI_CPDMA select TI_DAVINCI_MDIO select TI_CPSW_PHY_SEL + select MFD_SYSCON + select REGMAP ---help--- This driver supports TI's CPSW Ethernet Switch. diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c index f9bcf7aa88ca3a2d6aa4b607bd134e0c36fc6d91..dd9430043536541b577a4b90418cebb33a67baf3 100644 --- a/drivers/net/ethernet/ti/cpmac.c +++ b/drivers/net/ethernet/ti/cpmac.c @@ -1207,7 +1207,6 @@ static int cpmac_remove(struct platform_device *pdev) static struct platform_driver cpmac_driver = { .driver = { .name = "cpmac", - .owner = THIS_MODULE, }, .probe = cpmac_probe, .remove = cpmac_remove, diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c index aa8bf45e53dc9b0ddab4265b2e527067339a00fc..0ea78326cc2165d63b82eff89c5b90a49ecb9ac5 100644 --- a/drivers/net/ethernet/ti/cpsw-phy-sel.c +++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c @@ -211,7 +211,6 @@ static struct platform_driver cpsw_phy_sel_driver = { .probe = cpsw_phy_sel_probe, .driver = { .name = "cpsw-phy-sel", - .owner = THIS_MODULE, .of_match_table = cpsw_phy_sel_id_table, }, }; diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index e2a00287f8eb8a1e86a471851e05c6e46d5aa487..ab167dc49ce4ca79579f7c0d953fa271f1e9acc9 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -33,6 +33,8 @@ #include #include #include +#include +#include #include @@ -397,6 +399,8 @@ struct cpsw_priv { struct cpdma_ctlr *dma; struct cpdma_chan *txch, *rxch; struct cpsw_ale *ale; + bool rx_pause; + bool tx_pause; /* snapshot of IRQ numbers */ u32 irqs_table[4]; u32 num_irqs; @@ -855,6 +859,12 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave, else if (phy->speed == 10) mac_control |= BIT(18); /* In Band mode */ + if (priv->rx_pause) + mac_control |= BIT(3); + + if (priv->tx_pause) + mac_control |= BIT(4); + *link = true; } else { mac_control = 0; @@ -1246,6 +1256,9 @@ static int cpsw_ndo_open(struct net_device *ndev) /* enable statistics collection only on all ports */ __raw_writel(0x7, &priv->regs->stat_port_en); + /* Enable internal fifo flow control */ + writel(0x7, &priv->regs->flow_control); + if (WARN_ON(!priv->data.rx_descs)) priv->data.rx_descs = 128; @@ -1807,6 +1820,30 @@ static int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) return -EOPNOTSUPP; } +static void cpsw_get_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pause) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + + pause->autoneg = AUTONEG_DISABLE; + pause->rx_pause = priv->rx_pause ? true : false; + pause->tx_pause = priv->tx_pause ? true : false; +} + +static int cpsw_set_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pause) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + bool link; + + priv->rx_pause = pause->rx_pause ? true : false; + priv->tx_pause = pause->tx_pause ? true : false; + + for_each_slave(priv, _cpsw_adjust_link, priv, &link); + + return 0; +} + static const struct ethtool_ops cpsw_ethtool_ops = { .get_drvinfo = cpsw_get_drvinfo, .get_msglevel = cpsw_get_msglevel, @@ -1820,6 +1857,8 @@ static const struct ethtool_ops cpsw_ethtool_ops = { .get_sset_count = cpsw_get_sset_count, .get_strings = cpsw_get_strings, .get_ethtool_stats = cpsw_get_ethtool_stats, + .get_pauseparam = cpsw_get_pauseparam, + .set_pauseparam = cpsw_set_pauseparam, .get_wol = cpsw_get_wol, .set_wol = cpsw_set_wol, .get_regs_len = cpsw_get_regs_len, @@ -1839,6 +1878,36 @@ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv, slave->port_vlan = data->dual_emac_res_vlan; } +#define AM33XX_CTRL_MAC_LO_REG(id) (0x630 + 0x8 * id) +#define AM33XX_CTRL_MAC_HI_REG(id) (0x630 + 0x8 * id + 0x4) + +static int cpsw_am33xx_cm_get_macid(struct device *dev, int slave, + u8 *mac_addr) +{ + u32 macid_lo; + u32 macid_hi; + struct regmap *syscon; + + syscon = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon"); + if (IS_ERR(syscon)) { + if (PTR_ERR(syscon) == -ENODEV) + return 0; + return PTR_ERR(syscon); + } + + regmap_read(syscon, AM33XX_CTRL_MAC_LO_REG(slave), &macid_lo); + regmap_read(syscon, AM33XX_CTRL_MAC_HI_REG(slave), &macid_hi); + + mac_addr[5] = (macid_lo >> 8) & 0xff; + mac_addr[4] = macid_lo & 0xff; + mac_addr[3] = (macid_hi >> 24) & 0xff; + mac_addr[2] = (macid_hi >> 16) & 0xff; + mac_addr[1] = (macid_hi >> 8) & 0xff; + mac_addr[0] = macid_hi & 0xff; + + return 0; +} + static int cpsw_probe_dt(struct cpsw_platform_data *data, struct platform_device *pdev) { @@ -1944,15 +2013,23 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, mdio = of_find_device_by_node(mdio_node); of_node_put(mdio_node); if (!mdio) { - pr_err("Missing mdio platform device\n"); + dev_err(&pdev->dev, "Missing mdio platform device\n"); return -EINVAL; } snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), PHY_ID_FMT, mdio->name, phyid); mac_addr = of_get_mac_address(slave_node); - if (mac_addr) + if (mac_addr) { memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); + } else { + if (of_machine_is_compatible("ti,am33xx")) { + ret = cpsw_am33xx_cm_get_macid(&pdev->dev, i, + slave_data->mac_addr); + if (ret) + return ret; + } + } slave_data->phy_if = of_get_phy_mode(slave_node); if (slave_data->phy_if < 0) { @@ -2086,6 +2163,7 @@ static int cpsw_probe(struct platform_device *pdev) priv->irq_enabled = true; if (!priv->cpts) { dev_err(&pdev->dev, "error allocating cpts\n"); + ret = -ENOMEM; goto clean_ndev_ret; } @@ -2255,18 +2333,24 @@ static int cpsw_probe(struct platform_device *pdev) } while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { - for (i = res->start; i <= res->end; i++) { - if (devm_request_irq(&pdev->dev, i, cpsw_interrupt, 0, - dev_name(&pdev->dev), priv)) { - dev_err(priv->dev, "error attaching irq\n"); - goto clean_ale_ret; - } - priv->irqs_table[k] = i; - priv->num_irqs = k + 1; + if (k >= ARRAY_SIZE(priv->irqs_table)) { + ret = -EINVAL; + goto clean_ale_ret; } + + ret = devm_request_irq(&pdev->dev, res->start, cpsw_interrupt, + 0, dev_name(&pdev->dev), priv); + if (ret < 0) { + dev_err(priv->dev, "error attaching irq (%d)\n", ret); + goto clean_ale_ret; + } + + priv->irqs_table[k] = res->start; k++; } + priv->num_irqs = k; + ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; ndev->netdev_ops = &cpsw_netdev_ops; @@ -2395,7 +2479,6 @@ MODULE_DEVICE_TABLE(of, cpsw_of_mtable); static struct platform_driver cpsw_driver = { .driver = { .name = "cpsw", - .owner = THIS_MODULE, .pm = &cpsw_pm_ops, .of_match_table = cpsw_of_mtable, }, diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h index 574f49da693f194fe9a32ac731c90487498df9ba..1b710674630c71d5732ccee80d095d7587769351 100644 --- a/drivers/net/ethernet/ti/cpsw.h +++ b/drivers/net/ethernet/ti/cpsw.h @@ -15,6 +15,7 @@ #define __CPSW_H__ #include +#include struct cpsw_slave_data { char phy_id[MII_BUS_ID_SIZE]; diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 35a139e9a833520a195d16a7c620d423638eb90c..ea712512c7d1f5e155129bf073cac1d7381d38bb 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -2083,7 +2083,6 @@ MODULE_DEVICE_TABLE(of, davinci_emac_of_match); static struct platform_driver davinci_emac_driver = { .driver = { .name = "davinci_emac", - .owner = THIS_MODULE, .pm = &davinci_emac_pm_ops, .of_match_table = of_match_ptr(davinci_emac_of_match), }, diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index 2791f6f2db1178e8f5ce0e0f2a9ecad9f87cffba..98655b44b97e2d7690ef2fa28156730697098d2b 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -481,7 +481,6 @@ MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable); static struct platform_driver davinci_mdio_driver = { .driver = { .name = "davinci_mdio", - .owner = THIS_MODULE, .pm = &davinci_mdio_pm_ops, .of_match_table = of_match_ptr(davinci_mdio_of_mtable), }, diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c index 88c71212669281efe24ba02a783709b2043f8529..88818d5054ab849ee7d6c88fb67faa8aca81f857 100644 --- a/drivers/net/ethernet/tile/tilepro.c +++ b/drivers/net/ethernet/tile/tilepro.c @@ -956,7 +956,7 @@ static int tile_net_open_aux(struct net_device *dev) */ if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, sizeof(dummy), NETIO_IPP_START_SHIM_OFF) < 0) { - pr_warning("Failed to start LIPP/LEPP.\n"); + pr_warn("Failed to start LIPP/LEPP\n"); return -EIO; } @@ -2399,8 +2399,7 @@ static int __init network_cpus_setup(char *str) { int rc = cpulist_parse_crop(str, &network_cpus_map); if (rc != 0) { - pr_warning("network_cpus=%s: malformed cpu list\n", - str); + pr_warn("network_cpus=%s: malformed cpu list\n", str); } else { /* Remove dedicated cpus. */ @@ -2409,8 +2408,7 @@ static int __init network_cpus_setup(char *str) if (cpumask_empty(&network_cpus_map)) { - pr_warning("Ignoring network_cpus='%s'.\n", - str); + pr_warn("Ignoring network_cpus='%s'\n", str); } else { char buf[1024]; cpulist_scnprintf(buf, sizeof(buf), &network_cpus_map); diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index 3e38f67c60111ac834644b6ac2882c76c3bf4f4c..8e9371a3388a00ed78043c1d098b8c31db776ea7 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -266,34 +266,6 @@ spider_net_set_promisc(struct spider_net_card *card) } } -/** - * spider_net_get_mac_address - read mac address from spider card - * @card: device structure - * - * reads MAC address from GMACUNIMACU and GMACUNIMACL registers - */ -static int -spider_net_get_mac_address(struct net_device *netdev) -{ - struct spider_net_card *card = netdev_priv(netdev); - u32 macl, macu; - - macl = spider_net_read_reg(card, SPIDER_NET_GMACUNIMACL); - macu = spider_net_read_reg(card, SPIDER_NET_GMACUNIMACU); - - netdev->dev_addr[0] = (macu >> 24) & 0xff; - netdev->dev_addr[1] = (macu >> 16) & 0xff; - netdev->dev_addr[2] = (macu >> 8) & 0xff; - netdev->dev_addr[3] = macu & 0xff; - netdev->dev_addr[4] = (macl >> 8) & 0xff; - netdev->dev_addr[5] = macl & 0xff; - - if (!is_valid_ether_addr(&netdev->dev_addr[0])) - return -EINVAL; - - return 0; -} - /** * spider_net_get_descr_status -- returns the status of a descriptor * @descr: descriptor to look at @@ -1345,15 +1317,17 @@ spider_net_set_mac(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; + memcpy(netdev->dev_addr, addr->sa_data, ETH_ALEN); + /* switch off GMACTPE and GMACRPE */ regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD); regvalue &= ~((1 << 5) | (1 << 6)); spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue); /* write mac */ - macu = (addr->sa_data[0]<<24) + (addr->sa_data[1]<<16) + - (addr->sa_data[2]<<8) + (addr->sa_data[3]); - macl = (addr->sa_data[4]<<8) + (addr->sa_data[5]); + macu = (netdev->dev_addr[0]<<24) + (netdev->dev_addr[1]<<16) + + (netdev->dev_addr[2]<<8) + (netdev->dev_addr[3]); + macl = (netdev->dev_addr[4]<<8) + (netdev->dev_addr[5]); spider_net_write_reg(card, SPIDER_NET_GMACUNIMACU, macu); spider_net_write_reg(card, SPIDER_NET_GMACUNIMACL, macl); @@ -1364,12 +1338,6 @@ spider_net_set_mac(struct net_device *netdev, void *p) spider_net_set_promisc(card); - /* look up, whether we have been successful */ - if (spider_net_get_mac_address(netdev)) - return -EADDRNOTAVAIL; - if (memcmp(netdev->dev_addr,addr->sa_data,netdev->addr_len)) - return -EADDRNOTAVAIL; - return 0; } diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index 104d46f37969f990c920d4ee42c61e7258579f8a..0f56b1c0e08203ea4fdc6dd29b54480bdd87a41b 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -708,7 +708,6 @@ static int w5100_probe(struct platform_device *pdev) priv = netdev_priv(ndev); priv->ndev = ndev; - ether_setup(ndev); ndev->netdev_ops = &w5100_netdev_ops; ndev->ethtool_ops = &w5100_ethtool_ops; ndev->watchdog_timeo = HZ; diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c index 1f33c4c86c207f6a4fecf52bca8645c5e114be9e..f961f14a047343e7b06a3ea6601656e7ce2c139b 100644 --- a/drivers/net/ethernet/wiznet/w5300.c +++ b/drivers/net/ethernet/wiznet/w5300.c @@ -620,7 +620,6 @@ static int w5300_probe(struct platform_device *pdev) priv = netdev_priv(ndev); priv->ndev = ndev; - ether_setup(ndev); ndev->netdev_ops = &w5300_netdev_ops; ndev->ethtool_ops = &w5300_ethtool_ops; ndev->watchdog_timeo = HZ; diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index fda5891835d4c8ea3b2fad1725d542dc0dacff10..629077050fce02de2437ed73d6bf7f7b7de5a177 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -1012,7 +1012,6 @@ static int temac_of_probe(struct platform_device *op) if (!ndev) return -ENOMEM; - ether_setup(ndev); platform_set_drvdata(op, ndev); SET_NETDEV_DEV(ndev, &op->dev); ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index c8fd94133ecd22380cf047bf44a3e21064f5154f..4ea2d4e6f1d1894f22362e9a2b422392e959331d 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -1485,7 +1485,6 @@ static int axienet_of_probe(struct platform_device *op) if (!ndev) return -ENOMEM; - ether_setup(ndev); platform_set_drvdata(op, ndev); SET_NETDEV_DEV(ndev, &op->dev); diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c index c44eaf019dea0d19302ccd2f597636b923f45b2d..caed6eee289c3a65a38bb9bb1a711617dd8d5134 100644 --- a/drivers/net/fddi/defxx.c +++ b/drivers/net/fddi/defxx.c @@ -466,7 +466,8 @@ static void dfx_get_bars(struct device *bdev, *bar_len = (bar | PI_MEM_ADD_MASK_M) + 1; } else { *bar_start = base_addr; - *bar_len = PI_ESIC_K_CSR_IO_LEN; + *bar_len = PI_ESIC_K_CSR_IO_LEN + + PI_ESIC_K_BURST_HOLDOFF_LEN; } } if (dfx_bus_tc) { @@ -683,6 +684,9 @@ static void dfx_bus_init(struct net_device *dev) if (dfx_bus_eisa) { unsigned long base_addr = to_eisa_device(bdev)->base_addr; + /* Disable the board before fiddling with the decoders. */ + outb(0, base_addr + PI_ESIC_K_SLOT_CNTRL); + /* Get the interrupt level from the ESIC chip. */ val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0); val &= PI_CONFIG_STAT_0_M_IRQ; @@ -709,38 +713,46 @@ static void dfx_bus_init(struct net_device *dev) /* * Enable memory decoding (MEMCS0) and/or port decoding * (IOCS1/IOCS0) as appropriate in Function Control - * Register. One of the port chip selects seems to be - * used for the Burst Holdoff register, but this bit of - * documentation is missing and as yet it has not been - * determined which of the two. This is also the reason - * the size of the decoded port range is twice as large - * as one required by the PDQ. + * Register. IOCS0 is used for PDQ registers, taking 16 + * 32-bit words, while IOCS1 is used for the Burst Holdoff + * register, taking a single 32-bit word only. We use the + * slot-specific I/O range as per the ESIC spec, that is + * set bits 15:12 in the mask registers to mask them out. */ /* Set the decode range of the board. */ - val = ((bp->base.port >> 12) << PI_IO_CMP_V_SLOT); - outb(base_addr + PI_ESIC_K_IO_ADD_CMP_0_1, val); - outb(base_addr + PI_ESIC_K_IO_ADD_CMP_0_0, 0); - outb(base_addr + PI_ESIC_K_IO_ADD_CMP_1_1, val); - outb(base_addr + PI_ESIC_K_IO_ADD_CMP_1_0, 0); - val = PI_ESIC_K_CSR_IO_LEN - 1; - outb(base_addr + PI_ESIC_K_IO_ADD_MASK_0_1, (val >> 8) & 0xff); - outb(base_addr + PI_ESIC_K_IO_ADD_MASK_0_0, val & 0xff); - outb(base_addr + PI_ESIC_K_IO_ADD_MASK_1_1, (val >> 8) & 0xff); - outb(base_addr + PI_ESIC_K_IO_ADD_MASK_1_0, val & 0xff); + val = 0; + outb(val, base_addr + PI_ESIC_K_IO_ADD_CMP_0_1); + val = PI_DEFEA_K_CSR_IO; + outb(val, base_addr + PI_ESIC_K_IO_ADD_CMP_0_0); + + val = PI_IO_CMP_M_SLOT; + outb(val, base_addr + PI_ESIC_K_IO_ADD_MASK_0_1); + val = (PI_ESIC_K_CSR_IO_LEN - 1) & ~3; + outb(val, base_addr + PI_ESIC_K_IO_ADD_MASK_0_0); + + val = 0; + outb(val, base_addr + PI_ESIC_K_IO_ADD_CMP_1_1); + val = PI_DEFEA_K_BURST_HOLDOFF; + outb(val, base_addr + PI_ESIC_K_IO_ADD_CMP_1_0); + + val = PI_IO_CMP_M_SLOT; + outb(val, base_addr + PI_ESIC_K_IO_ADD_MASK_1_1); + val = (PI_ESIC_K_BURST_HOLDOFF_LEN - 1) & ~3; + outb(val, base_addr + PI_ESIC_K_IO_ADD_MASK_1_0); /* Enable the decoders. */ val = PI_FUNCTION_CNTRL_M_IOCS1 | PI_FUNCTION_CNTRL_M_IOCS0; if (dfx_use_mmio) val |= PI_FUNCTION_CNTRL_M_MEMCS0; - outb(base_addr + PI_ESIC_K_FUNCTION_CNTRL, val); + outb(val, base_addr + PI_ESIC_K_FUNCTION_CNTRL); /* * Enable access to the rest of the module * (including PDQ and packet memory). */ val = PI_SLOT_CNTRL_M_ENB; - outb(base_addr + PI_ESIC_K_SLOT_CNTRL, val); + outb(val, base_addr + PI_ESIC_K_SLOT_CNTRL); /* * Map PDQ registers into memory or port space. This is @@ -748,15 +760,15 @@ static void dfx_bus_init(struct net_device *dev) */ val = inb(base_addr + PI_DEFEA_K_BURST_HOLDOFF); if (dfx_use_mmio) - val |= PI_BURST_HOLDOFF_V_MEM_MAP; + val |= PI_BURST_HOLDOFF_M_MEM_MAP; else - val &= ~PI_BURST_HOLDOFF_V_MEM_MAP; - outb(base_addr + PI_DEFEA_K_BURST_HOLDOFF, val); + val &= ~PI_BURST_HOLDOFF_M_MEM_MAP; + outb(val, base_addr + PI_DEFEA_K_BURST_HOLDOFF); /* Enable interrupts at EISA bus interface chip (ESIC) */ val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0); val |= PI_CONFIG_STAT_0_M_INT_ENB; - outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, val); + outb(val, base_addr + PI_ESIC_K_IO_CONFIG_STAT_0); } if (dfx_bus_pci) { struct pci_dev *pdev = to_pci_dev(bdev); @@ -825,7 +837,7 @@ static void dfx_bus_uninit(struct net_device *dev) /* Disable interrupts at EISA bus interface chip (ESIC) */ val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0); val &= ~PI_CONFIG_STAT_0_M_INT_ENB; - outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, val); + outb(val, base_addr + PI_ESIC_K_IO_CONFIG_STAT_0); } if (dfx_bus_pci) { /* Disable interrupts at PCI bus interface chip (PFI) */ @@ -1917,7 +1929,7 @@ static irqreturn_t dfx_interrupt(int irq, void *dev_id) /* Disable interrupts at the ESIC */ status &= ~PI_CONFIG_STAT_0_M_INT_ENB; - outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, status); + outb(status, base_addr + PI_ESIC_K_IO_CONFIG_STAT_0); /* Call interrupt service routine for this adapter */ dfx_int_common(dev); @@ -1925,7 +1937,7 @@ static irqreturn_t dfx_interrupt(int irq, void *dev_id) /* Reenable interrupts at the ESIC */ status = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0); status |= PI_CONFIG_STAT_0_M_INT_ENB; - outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, status); + outb(status, base_addr + PI_ESIC_K_IO_CONFIG_STAT_0); spin_unlock(&bp->lock); } diff --git a/drivers/net/fddi/defxx.h b/drivers/net/fddi/defxx.h index adb63f3f7b4a32b093c82cf29cf222db68c8acd7..9527f0182fd41c70df8796b3888e7e1f3234a5a5 100644 --- a/drivers/net/fddi/defxx.h +++ b/drivers/net/fddi/defxx.h @@ -1479,8 +1479,10 @@ typedef union /* Define EISA controller register offsets */ -#define PI_ESIC_K_CSR_IO_LEN 0x80 /* 128 bytes */ +#define PI_ESIC_K_CSR_IO_LEN 0x40 /* 64 bytes */ +#define PI_ESIC_K_BURST_HOLDOFF_LEN 0x04 /* 4 bytes */ +#define PI_DEFEA_K_CSR_IO 0x000 #define PI_DEFEA_K_BURST_HOLDOFF 0x040 #define PI_ESIC_K_SLOT_ID 0xC80 @@ -1558,11 +1560,9 @@ typedef union #define PI_MEM_ADD_MASK_M 0x3ff -/* - * Define the fields in the IO Compare registers. - * The driver must initialize the slot field with the slot ID shifted by the - * amount shown below. - */ +/* Define the fields in the I/O Address Compare and Mask registers. */ + +#define PI_IO_CMP_M_SLOT 0xf0 #define PI_IO_CMP_V_SLOT 4 diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index d5e07def6a598ca3ccb7abda3c80e1732dc64413..2f48f790c9b43e983f44107a97f42f2268262099 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -591,7 +591,7 @@ struct nvsp_message { #define NETVSC_RECEIVE_BUFFER_ID 0xcafe -#define NETVSC_PACKET_SIZE 2048 +#define NETVSC_PACKET_SIZE 4096 #define VRSS_SEND_TAB_SIZE 16 @@ -642,7 +642,7 @@ struct netvsc_device { int ring_size; /* The primary channel callback buffer */ - unsigned char cb_buffer[NETVSC_PACKET_SIZE]; + unsigned char *cb_buffer; /* The sub channel callback buffer */ unsigned char *sub_cb_buf; }; diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 66979cf7fca6a3d68d88a7c908c45aad2a7b58aa..7d76c9523395e5b5137784189d902d9cb1ecaed5 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -42,6 +42,12 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device) if (!net_device) return NULL; + net_device->cb_buffer = kzalloc(NETVSC_PACKET_SIZE, GFP_KERNEL); + if (!net_device->cb_buffer) { + kfree(net_device); + return NULL; + } + init_waitqueue_head(&net_device->wait_drain); net_device->start_remove = false; net_device->destroy = false; @@ -52,6 +58,12 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device) return net_device; } +static void free_netvsc_device(struct netvsc_device *nvdev) +{ + kfree(nvdev->cb_buffer); + kfree(nvdev); +} + static struct netvsc_device *get_outbound_net_device(struct hv_device *device) { struct netvsc_device *net_device; @@ -551,7 +563,7 @@ int netvsc_device_remove(struct hv_device *device) if (net_device->sub_cb_buf) vfree(net_device->sub_cb_buf); - kfree(net_device); + free_netvsc_device(net_device); return 0; } @@ -705,6 +717,7 @@ int netvsc_send(struct hv_device *device, unsigned int section_index = NETVSC_INVALID_INDEX; u32 msg_size = 0; struct sk_buff *skb; + u16 q_idx = packet->q_idx; net_device = get_outbound_net_device(device); @@ -769,24 +782,24 @@ int netvsc_send(struct hv_device *device, if (ret == 0) { atomic_inc(&net_device->num_outstanding_sends); - atomic_inc(&net_device->queue_sends[packet->q_idx]); + atomic_inc(&net_device->queue_sends[q_idx]); if (hv_ringbuf_avail_percent(&out_channel->outbound) < RING_AVAIL_PERCENT_LOWATER) { netif_tx_stop_queue(netdev_get_tx_queue( - ndev, packet->q_idx)); + ndev, q_idx)); if (atomic_read(&net_device-> - queue_sends[packet->q_idx]) < 1) + queue_sends[q_idx]) < 1) netif_tx_wake_queue(netdev_get_tx_queue( - ndev, packet->q_idx)); + ndev, q_idx)); } } else if (ret == -EAGAIN) { netif_tx_stop_queue(netdev_get_tx_queue( - ndev, packet->q_idx)); - if (atomic_read(&net_device->queue_sends[packet->q_idx]) < 1) { + ndev, q_idx)); + if (atomic_read(&net_device->queue_sends[q_idx]) < 1) { netif_tx_wake_queue(netdev_get_tx_queue( - ndev, packet->q_idx)); + ndev, q_idx)); ret = -ENOSPC; } } else { @@ -1042,10 +1055,8 @@ int netvsc_device_add(struct hv_device *device, void *additional_info) struct net_device *ndev; net_device = alloc_net_device(device); - if (!net_device) { - ret = -ENOMEM; - goto cleanup; - } + if (!net_device) + return -ENOMEM; net_device->ring_size = ring_size; @@ -1093,7 +1104,7 @@ int netvsc_device_add(struct hv_device *device, void *additional_info) vmbus_close(device->channel); cleanup: - kfree(net_device); + free_netvsc_device(net_device); return ret; } diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c index 9e6a124b13f2c760d9c998d9bfbfde13c627c4dd..07e0b887c350f07bb125b88479626187f814e828 100644 --- a/drivers/net/ieee802154/mrf24j40.c +++ b/drivers/net/ieee802154/mrf24j40.c @@ -323,8 +323,8 @@ static int mrf24j40_read_rx_buf(struct mrf24j40 *devrec, #ifdef DEBUG print_hex_dump(KERN_DEBUG, "mrf24j40 rx: ", DUMP_PREFIX_OFFSET, 16, 1, data, *len, 0); - printk(KERN_DEBUG "mrf24j40 rx: lqi: %02hhx rssi: %02hhx\n", - lqi_rssi[0], lqi_rssi[1]); + pr_debug("mrf24j40 rx: lqi: %02hhx rssi: %02hhx\n", + lqi_rssi[0], lqi_rssi[1]); #endif out: @@ -385,7 +385,7 @@ static int mrf24j40_tx(struct ieee802154_dev *dev, struct sk_buff *skb) static int mrf24j40_ed(struct ieee802154_dev *dev, u8 *level) { /* TODO: */ - printk(KERN_WARNING "mrf24j40: ed not implemented\n"); + pr_warn("mrf24j40: ed not implemented\n"); *level = 0; return 0; } @@ -412,6 +412,7 @@ static void mrf24j40_stop(struct ieee802154_dev *dev) struct mrf24j40 *devrec = dev->priv; u8 val; int ret; + dev_dbg(printdev(devrec), "stop\n"); ret = read_short_reg(devrec, REG_INTCON, &val); @@ -419,8 +420,6 @@ static void mrf24j40_stop(struct ieee802154_dev *dev) return; val |= 0x1|0x8; /* Set TXNIE and RXIE. Disable Interrupts */ write_short_reg(devrec, REG_INTCON, val); - - return; } static int mrf24j40_set_channel(struct ieee802154_dev *dev, @@ -465,6 +464,7 @@ static int mrf24j40_filter(struct ieee802154_dev *dev, if (changed & IEEE802515_AFILT_SADDR_CHANGED) { /* Short Addr */ u8 addrh, addrl; + addrh = le16_to_cpu(filt->short_addr) >> 8 & 0xff; addrl = le16_to_cpu(filt->short_addr) & 0xff; @@ -483,16 +483,17 @@ static int mrf24j40_filter(struct ieee802154_dev *dev, write_short_reg(devrec, REG_EADR0 + i, addr[i]); #ifdef DEBUG - printk(KERN_DEBUG "Set long addr to: "); + pr_debug("Set long addr to: "); for (i = 0; i < 8; i++) - printk("%02hhx ", addr[7 - i]); - printk(KERN_DEBUG "\n"); + pr_debug("%02hhx ", addr[7 - i]); + pr_debug("\n"); #endif } if (changed & IEEE802515_AFILT_PANID_CHANGED) { /* PAN ID */ u8 panidl, panidh; + panidh = le16_to_cpu(filt->pan_id) >> 8 & 0xff; panidl = le16_to_cpu(filt->pan_id) & 0xff; write_short_reg(devrec, REG_PANIDH, panidh); @@ -701,7 +702,7 @@ static int mrf24j40_probe(struct spi_device *spi) int ret = -ENOMEM; struct mrf24j40 *devrec; - printk(KERN_INFO "mrf24j40: probe(). IRQ: %d\n", spi->irq); + dev_info(&spi->dev, "probe(). IRQ: %d\n", spi->irq); devrec = devm_kzalloc(&spi->dev, sizeof(struct mrf24j40), GFP_KERNEL); if (!devrec) diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index d2d4a3d2237f7c735a326a949ec55561fe0c79f7..34f846b4bd0574a8168d669c408181e1086d3386 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c @@ -185,7 +185,8 @@ static void ifb_setup(struct net_device *dev) dev->flags |= IFF_NOARP; dev->flags &= ~IFF_MULTICAST; - dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); + dev->priv_flags &= ~IFF_TX_SKB_SHARING; + netif_keep_dst(dev); eth_hw_addr_random(dev); } diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig index 8d101d63abca9a48466edfed2db11ab54df32d63..a2c227bfb687e3e5bae61955d203b842aa7dca5a 100644 --- a/drivers/net/irda/Kconfig +++ b/drivers/net/irda/Kconfig @@ -397,7 +397,7 @@ config MCS_FIR config SH_IRDA tristate "SuperH IrDA driver" depends on IRDA - depends on ARCH_SHMOBILE || COMPILE_TEST + depends on (ARCH_SHMOBILE || COMPILE_TEST) && HAS_IOMEM help Say Y here if your want to enable SuperH IrDA devices. diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c index a04af9d0f8f93006a1f6425eeb8423c65846a591..a2e556168286649f3c8d809ea8de145fdb7fbad5 100644 --- a/drivers/net/irda/vlsi_ir.c +++ b/drivers/net/irda/vlsi_ir.c @@ -324,12 +324,8 @@ static void vlsi_proc_ring(struct seq_file *seq, struct vlsi_ring *r) seq_printf(seq, "current: rd = %d / status = %02x / len = %u\n", h, (unsigned)rd_get_status(rd), j); if (j > 0) { - seq_printf(seq, " data:"); - if (j > 20) - j = 20; - for (i = 0; i < j; i++) - seq_printf(seq, " %02x", (unsigned)((unsigned char *)rd->buf)[i]); - seq_printf(seq, "\n"); + seq_printf(seq, " data: %*ph\n", + min_t(unsigned, j, 20), rd->buf); } } for (i = 0; i < r->size; i++) { diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 8f2262540561caf546ea53740a20795b10f51205..c76283c2f84a4e7aa21b684770542e9b6202bffd 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -169,7 +169,7 @@ static void loopback_setup(struct net_device *dev) dev->type = ARPHRD_LOOPBACK; /* 0x0001*/ dev->flags = IFF_LOOPBACK; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; - dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; + netif_keep_dst(dev); dev->hw_features = NETIF_F_ALL_TSO | NETIF_F_UFO; dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 726edabff26b710d06b622782a71092430b184b4..38b4fae61f04ab32a2a0020ed50f0e12aef24a2c 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -35,7 +35,8 @@ #include #include -#define MACVLAN_HASH_SIZE (1 << BITS_PER_BYTE) +#define MACVLAN_HASH_BITS 8 +#define MACVLAN_HASH_SIZE (1<>= 16; +#else + value <<= 16; +#endif + return hash_64(value, MACVLAN_HASH_BITS); +} + static struct macvlan_port *macvlan_port_get_rcu(const struct net_device *dev) { return rcu_dereference(dev->rx_handler_data); @@ -73,20 +96,68 @@ static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port, const unsigned char *addr) { struct macvlan_dev *vlan; + u32 idx = macvlan_eth_hash(addr); - hlist_for_each_entry_rcu(vlan, &port->vlan_hash[addr[5]], hlist) { + hlist_for_each_entry_rcu(vlan, &port->vlan_hash[idx], hlist) { if (ether_addr_equal_64bits(vlan->dev->dev_addr, addr)) return vlan; } return NULL; } +static struct macvlan_source_entry *macvlan_hash_lookup_source( + const struct macvlan_dev *vlan, + const unsigned char *addr) +{ + struct macvlan_source_entry *entry; + u32 idx = macvlan_eth_hash(addr); + struct hlist_head *h = &vlan->port->vlan_source_hash[idx]; + + hlist_for_each_entry_rcu(entry, h, hlist) { + if (ether_addr_equal_64bits(entry->addr, addr) && + entry->vlan == vlan) + return entry; + } + return NULL; +} + +static int macvlan_hash_add_source(struct macvlan_dev *vlan, + const unsigned char *addr) +{ + struct macvlan_port *port = vlan->port; + struct macvlan_source_entry *entry; + struct hlist_head *h; + + entry = macvlan_hash_lookup_source(vlan, addr); + if (entry) + return 0; + + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + ether_addr_copy(entry->addr, addr); + entry->vlan = vlan; + h = &port->vlan_source_hash[macvlan_eth_hash(addr)]; + hlist_add_head_rcu(&entry->hlist, h); + vlan->macaddr_count++; + + return 0; +} + static void macvlan_hash_add(struct macvlan_dev *vlan) { struct macvlan_port *port = vlan->port; const unsigned char *addr = vlan->dev->dev_addr; + u32 idx = macvlan_eth_hash(addr); - hlist_add_head_rcu(&vlan->hlist, &port->vlan_hash[addr[5]]); + hlist_add_head_rcu(&vlan->hlist, &port->vlan_hash[idx]); +} + +static void macvlan_hash_del_source(struct macvlan_source_entry *entry) +{ + hlist_del_rcu(&entry->hlist); + kfree_rcu(entry, rcu); } static void macvlan_hash_del(struct macvlan_dev *vlan, bool sync) @@ -267,6 +338,65 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port, atomic_long_inc(&skb->dev->rx_dropped); } +static void macvlan_flush_sources(struct macvlan_port *port, + struct macvlan_dev *vlan) +{ + int i; + + for (i = 0; i < MACVLAN_HASH_SIZE; i++) { + struct hlist_node *h, *n; + + hlist_for_each_safe(h, n, &port->vlan_source_hash[i]) { + struct macvlan_source_entry *entry; + + entry = hlist_entry(h, struct macvlan_source_entry, + hlist); + if (entry->vlan == vlan) + macvlan_hash_del_source(entry); + } + } + vlan->macaddr_count = 0; +} + +static void macvlan_forward_source_one(struct sk_buff *skb, + struct macvlan_dev *vlan) +{ + struct sk_buff *nskb; + struct net_device *dev; + int len; + int ret; + + dev = vlan->dev; + if (unlikely(!(dev->flags & IFF_UP))) + return; + + nskb = skb_clone(skb, GFP_ATOMIC); + if (!nskb) + return; + + len = nskb->len + ETH_HLEN; + nskb->dev = dev; + nskb->pkt_type = PACKET_HOST; + + ret = netif_rx(nskb); + macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, 0); +} + +static void macvlan_forward_source(struct sk_buff *skb, + struct macvlan_port *port, + const unsigned char *addr) +{ + struct macvlan_source_entry *entry; + u32 idx = macvlan_eth_hash(addr); + struct hlist_head *h = &port->vlan_source_hash[idx]; + + hlist_for_each_entry_rcu(entry, h, hlist) { + if (ether_addr_equal_64bits(entry->addr, addr)) + if (entry->vlan->dev->flags & IFF_UP) + macvlan_forward_source_one(skb, entry->vlan); + } +} + /* called under rcu_read_lock() from netif_receive_skb */ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) { @@ -285,6 +415,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) if (!skb) return RX_HANDLER_CONSUMED; eth = eth_hdr(skb); + macvlan_forward_source(skb, port, eth->h_source); src = macvlan_hash_lookup(port, eth->h_source); if (src && src->mode != MACVLAN_MODE_VEPA && src->mode != MACVLAN_MODE_BRIDGE) { @@ -301,6 +432,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) return RX_HANDLER_PASS; } + macvlan_forward_source(skb, port, eth->h_source); if (port->passthru) vlan = list_first_or_null_rcu(&port->vlans, struct macvlan_dev, list); @@ -667,6 +799,7 @@ static void macvlan_uninit(struct net_device *dev) free_percpu(vlan->pcpu_stats); + macvlan_flush_sources(port, vlan); port->count -= 1; if (!port->count) macvlan_port_destroy(port->dev); @@ -892,7 +1025,8 @@ void macvlan_common_setup(struct net_device *dev) { ether_setup(dev); - dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); + dev->priv_flags &= ~IFF_TX_SKB_SHARING; + netif_keep_dst(dev); dev->priv_flags |= IFF_UNICAST_FLT; dev->netdev_ops = &macvlan_netdev_ops; dev->destructor = free_netdev; @@ -925,6 +1059,8 @@ static int macvlan_port_create(struct net_device *dev) INIT_LIST_HEAD(&port->vlans); for (i = 0; i < MACVLAN_HASH_SIZE; i++) INIT_HLIST_HEAD(&port->vlan_hash[i]); + for (i = 0; i < MACVLAN_HASH_SIZE; i++) + INIT_HLIST_HEAD(&port->vlan_source_hash[i]); skb_queue_head_init(&port->bc_queue); INIT_WORK(&port->bc_work, macvlan_process_broadcast); @@ -966,11 +1102,102 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[]) case MACVLAN_MODE_VEPA: case MACVLAN_MODE_BRIDGE: case MACVLAN_MODE_PASSTHRU: + case MACVLAN_MODE_SOURCE: + break; + default: + return -EINVAL; + } + } + + if (data && data[IFLA_MACVLAN_MACADDR_MODE]) { + switch (nla_get_u32(data[IFLA_MACVLAN_MACADDR_MODE])) { + case MACVLAN_MACADDR_ADD: + case MACVLAN_MACADDR_DEL: + case MACVLAN_MACADDR_FLUSH: + case MACVLAN_MACADDR_SET: break; default: return -EINVAL; } } + + if (data && data[IFLA_MACVLAN_MACADDR]) { + if (nla_len(data[IFLA_MACVLAN_MACADDR]) != ETH_ALEN) + return -EINVAL; + + if (!is_valid_ether_addr(nla_data(data[IFLA_MACVLAN_MACADDR]))) + return -EADDRNOTAVAIL; + } + + if (data && data[IFLA_MACVLAN_MACADDR_COUNT]) + return -EINVAL; + + return 0; +} + +/** + * reconfigure list of remote source mac address + * (only for macvlan devices in source mode) + * Note regarding alignment: all netlink data is aligned to 4 Byte, which + * suffices for both ether_addr_copy and ether_addr_equal_64bits usage. + */ +static int macvlan_changelink_sources(struct macvlan_dev *vlan, u32 mode, + struct nlattr *data[]) +{ + char *addr = NULL; + int ret, rem, len; + struct nlattr *nla, *head; + struct macvlan_source_entry *entry; + + if (data[IFLA_MACVLAN_MACADDR]) + addr = nla_data(data[IFLA_MACVLAN_MACADDR]); + + if (mode == MACVLAN_MACADDR_ADD) { + if (!addr) + return -EINVAL; + + return macvlan_hash_add_source(vlan, addr); + + } else if (mode == MACVLAN_MACADDR_DEL) { + if (!addr) + return -EINVAL; + + entry = macvlan_hash_lookup_source(vlan, addr); + if (entry) { + macvlan_hash_del_source(entry); + vlan->macaddr_count--; + } + } else if (mode == MACVLAN_MACADDR_FLUSH) { + macvlan_flush_sources(vlan->port, vlan); + } else if (mode == MACVLAN_MACADDR_SET) { + macvlan_flush_sources(vlan->port, vlan); + + if (addr) { + ret = macvlan_hash_add_source(vlan, addr); + if (ret) + return ret; + } + + if (!data || !data[IFLA_MACVLAN_MACADDR_DATA]) + return 0; + + head = nla_data(data[IFLA_MACVLAN_MACADDR_DATA]); + len = nla_len(data[IFLA_MACVLAN_MACADDR_DATA]); + + nla_for_each_attr(nla, head, len, rem) { + if (nla_type(nla) != IFLA_MACVLAN_MACADDR || + nla_len(nla) != ETH_ALEN) + continue; + + addr = nla_data(nla); + ret = macvlan_hash_add_source(vlan, addr); + if (ret) + return ret; + } + } else { + return -EINVAL; + } + return 0; } @@ -981,6 +1208,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, struct macvlan_port *port; struct net_device *lowerdev; int err; + int macmode; if (!tb[IFLA_LINK]) return -EINVAL; @@ -1034,6 +1262,15 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, eth_hw_addr_inherit(dev, lowerdev); } + if (data && data[IFLA_MACVLAN_MACADDR_MODE]) { + if (vlan->mode != MACVLAN_MODE_SOURCE) + return -EINVAL; + macmode = nla_get_u32(data[IFLA_MACVLAN_MACADDR_MODE]); + err = macvlan_changelink_sources(vlan, macmode, data); + if (err) + return err; + } + port->count += 1; err = register_netdevice(dev); if (err < 0) @@ -1070,6 +1307,8 @@ void macvlan_dellink(struct net_device *dev, struct list_head *head) { struct macvlan_dev *vlan = netdev_priv(dev); + if (vlan->mode == MACVLAN_MODE_SOURCE) + macvlan_flush_sources(vlan->port, vlan); list_del_rcu(&vlan->list); unregister_netdevice_queue(dev, head); netdev_upper_dev_unlink(vlan->lowerdev, dev); @@ -1082,6 +1321,8 @@ static int macvlan_changelink(struct net_device *dev, struct macvlan_dev *vlan = netdev_priv(dev); enum macvlan_mode mode; bool set_mode = false; + enum macvlan_macaddr_mode macmode; + int ret; /* Validate mode, but don't set yet: setting flags may fail. */ if (data && data[IFLA_MACVLAN_MODE]) { @@ -1091,6 +1332,9 @@ static int macvlan_changelink(struct net_device *dev, if ((mode == MACVLAN_MODE_PASSTHRU) != (vlan->mode == MACVLAN_MODE_PASSTHRU)) return -EINVAL; + if (vlan->mode == MACVLAN_MODE_SOURCE && + vlan->mode != mode) + macvlan_flush_sources(vlan->port, vlan); } if (data && data[IFLA_MACVLAN_FLAGS]) { @@ -1110,26 +1354,77 @@ static int macvlan_changelink(struct net_device *dev, } if (set_mode) vlan->mode = mode; + if (data && data[IFLA_MACVLAN_MACADDR_MODE]) { + if (vlan->mode != MACVLAN_MODE_SOURCE) + return -EINVAL; + macmode = nla_get_u32(data[IFLA_MACVLAN_MACADDR_MODE]); + ret = macvlan_changelink_sources(vlan, macmode, data); + if (ret) + return ret; + } return 0; } +static size_t macvlan_get_size_mac(const struct macvlan_dev *vlan) +{ + if (vlan->macaddr_count == 0) + return 0; + return nla_total_size(0) /* IFLA_MACVLAN_MACADDR_DATA */ + + vlan->macaddr_count * nla_total_size(sizeof(u8) * ETH_ALEN); +} + static size_t macvlan_get_size(const struct net_device *dev) { + struct macvlan_dev *vlan = netdev_priv(dev); + return (0 + nla_total_size(4) /* IFLA_MACVLAN_MODE */ + nla_total_size(2) /* IFLA_MACVLAN_FLAGS */ + + nla_total_size(4) /* IFLA_MACVLAN_MACADDR_COUNT */ + + macvlan_get_size_mac(vlan) /* IFLA_MACVLAN_MACADDR */ ); } +static int macvlan_fill_info_macaddr(struct sk_buff *skb, + const struct macvlan_dev *vlan, + const int i) +{ + struct hlist_head *h = &vlan->port->vlan_source_hash[i]; + struct macvlan_source_entry *entry; + + hlist_for_each_entry_rcu(entry, h, hlist) { + if (entry->vlan != vlan) + continue; + if (nla_put(skb, IFLA_MACVLAN_MACADDR, ETH_ALEN, entry->addr)) + return 1; + } + return 0; +} + static int macvlan_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); + int i; + struct nlattr *nest; if (nla_put_u32(skb, IFLA_MACVLAN_MODE, vlan->mode)) goto nla_put_failure; if (nla_put_u16(skb, IFLA_MACVLAN_FLAGS, vlan->flags)) goto nla_put_failure; + if (nla_put_u32(skb, IFLA_MACVLAN_MACADDR_COUNT, vlan->macaddr_count)) + goto nla_put_failure; + if (vlan->macaddr_count > 0) { + nest = nla_nest_start(skb, IFLA_MACVLAN_MACADDR_DATA); + if (nest == NULL) + goto nla_put_failure; + + for (i = 0; i < MACVLAN_HASH_SIZE; i++) { + if (macvlan_fill_info_macaddr(skb, vlan, i)) + goto nla_put_failure; + } + nla_nest_end(skb, nest); + } return 0; nla_put_failure: @@ -1139,6 +1434,10 @@ static int macvlan_fill_info(struct sk_buff *skb, static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = { [IFLA_MACVLAN_MODE] = { .type = NLA_U32 }, [IFLA_MACVLAN_FLAGS] = { .type = NLA_U16 }, + [IFLA_MACVLAN_MACADDR_MODE] = { .type = NLA_U32 }, + [IFLA_MACVLAN_MACADDR] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, + [IFLA_MACVLAN_MACADDR_DATA] = { .type = NLA_NESTED }, + [IFLA_MACVLAN_MACADDR_COUNT] = { .type = NLA_U32 }, }; int macvlan_link_register(struct rtnl_link_ops *ops) diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 65de0cab8d07795c5c2255d7397a95e3720477fc..75472cf734de14f53e57daa1e541701e71643c40 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -159,8 +159,6 @@ config MDIO_OCTEON config MDIO_SUN4I tristate "Allwinner sun4i MDIO interface support" depends on ARCH_SUNXI - select REGULATOR - select REGULATOR_FIXED_VOLTAGE help This driver supports the MDIO interface found in the network interface units of the Allwinner SoC that have an EMAC (A10, @@ -205,6 +203,15 @@ config MDIO_BUS_MUX_MMIOREG Currently, only 8-bit registers are supported. +config MDIO_BCM_UNIMAC + tristate "Broadcom UniMAC MDIO bus controller" + depends on HAS_IOMEM + help + This module provides a driver for the Broadcom UniMAC MDIO busses. + This hardware can be found in the Broadcom GENET Ethernet MAC + controllers as well as some Broadcom Ethernet switches such as the + Starfighter 2 switches. + endif # PHYLIB config MICREL_KS8995MA diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 7dc3d5b304cfc250a2618e096abecf428ed05bad..eb3b18b5978b33ec7441a2ca2e0f4aa33906cdd5 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -34,3 +34,4 @@ obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o obj-$(CONFIG_AMD_XGBE_PHY) += amd-xgbe-phy.o +obj-$(CONFIG_MDIO_BCM_UNIMAC) += mdio-bcm-unimac.o diff --git a/drivers/net/phy/amd-xgbe-phy.c b/drivers/net/phy/amd-xgbe-phy.c index f3230eef41fda7a09442571bb26553ca86b14768..c456559f6e7f2ea3ae713e98c93e58148aaa7776 100644 --- a/drivers/net/phy/amd-xgbe-phy.c +++ b/drivers/net/phy/amd-xgbe-phy.c @@ -75,7 +75,6 @@ #include #include - MODULE_AUTHOR("Tom Lendacky "); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION("1.0.0-a"); @@ -100,9 +99,11 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver"); #ifndef MDIO_PMA_10GBR_PMD_CTRL #define MDIO_PMA_10GBR_PMD_CTRL 0x0096 #endif + #ifndef MDIO_PMA_10GBR_FEC_CTRL #define MDIO_PMA_10GBR_FEC_CTRL 0x00ab #endif + #ifndef MDIO_AN_XNP #define MDIO_AN_XNP 0x0016 #endif @@ -110,14 +111,23 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver"); #ifndef MDIO_AN_INTMASK #define MDIO_AN_INTMASK 0x8001 #endif + #ifndef MDIO_AN_INT #define MDIO_AN_INT 0x8002 #endif +#ifndef MDIO_AN_KR_CTRL +#define MDIO_AN_KR_CTRL 0x8003 +#endif + #ifndef MDIO_CTRL1_SPEED1G #define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100) #endif +#ifndef MDIO_KR_CTRL_PDETECT +#define MDIO_KR_CTRL_PDETECT 0x01 +#endif + /* SerDes integration register offsets */ #define SIR0_KR_RT_1 0x002c #define SIR0_STATUS 0x0040 @@ -161,7 +171,6 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver"); #define SPEED_1000_TXAMP 0xf #define SPEED_1000_WORD 0x1 - /* SerDes RxTx register offsets */ #define RXTX_REG20 0x0050 #define RXTX_REG114 0x01c8 @@ -255,7 +264,6 @@ do { \ XSIR1_IOWRITE((_priv), _reg, reg_val); \ } while (0) - /* Macros for reading or writing SerDes RxTx registers * The ioread macros will get bit fields or full values using the * register definitions formed using the input names @@ -283,7 +291,6 @@ do { \ XRXTX_IOWRITE((_priv), _reg, reg_val); \ } while (0) - enum amd_xgbe_phy_an { AMD_XGBE_AN_READY = 0, AMD_XGBE_AN_START, @@ -331,7 +338,6 @@ struct amd_xgbe_phy_priv { /* Maintain link status for re-starting auto-negotiation */ unsigned int link; - enum amd_xgbe_phy_mode mode; unsigned int speed_set; /* Auto-negotiation state machine support */ @@ -342,6 +348,7 @@ struct amd_xgbe_phy_priv { enum amd_xgbe_phy_rx kx_state; struct work_struct an_work; struct workqueue_struct *an_workqueue; + unsigned int parallel_detect; }; static int amd_xgbe_an_enable_kr_training(struct phy_device *phydev) @@ -468,8 +475,6 @@ static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev) amd_xgbe_phy_serdes_complete_ratechange(phydev); - priv->mode = AMD_XGBE_MODE_KR; - return 0; } @@ -518,8 +523,6 @@ static int amd_xgbe_phy_gmii_2500_mode(struct phy_device *phydev) amd_xgbe_phy_serdes_complete_ratechange(phydev); - priv->mode = AMD_XGBE_MODE_KX; - return 0; } @@ -568,18 +571,43 @@ static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev) amd_xgbe_phy_serdes_complete_ratechange(phydev); - priv->mode = AMD_XGBE_MODE_KX; + return 0; +} + +static int amd_xgbe_phy_cur_mode(struct phy_device *phydev, + enum amd_xgbe_phy_mode *mode) +{ + int ret; + + ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2); + if (ret < 0) + return ret; + + if ((ret & MDIO_PCS_CTRL2_TYPE) == MDIO_PCS_CTRL2_10GBR) + *mode = AMD_XGBE_MODE_KR; + else + *mode = AMD_XGBE_MODE_KX; return 0; } +static bool amd_xgbe_phy_in_kr_mode(struct phy_device *phydev) +{ + enum amd_xgbe_phy_mode mode; + + if (amd_xgbe_phy_cur_mode(phydev, &mode)) + return false; + + return (mode == AMD_XGBE_MODE_KR); +} + static int amd_xgbe_phy_switch_mode(struct phy_device *phydev) { struct amd_xgbe_phy_priv *priv = phydev->priv; int ret; /* If we are in KR switch to KX, and vice-versa */ - if (priv->mode == AMD_XGBE_MODE_KR) { + if (amd_xgbe_phy_in_kr_mode(phydev)) { if (priv->speed_set == AMD_XGBE_PHY_SPEEDSET_1000_10000) ret = amd_xgbe_phy_gmii_mode(phydev); else @@ -591,15 +619,20 @@ static int amd_xgbe_phy_switch_mode(struct phy_device *phydev) return ret; } -static enum amd_xgbe_phy_an amd_xgbe_an_switch_mode(struct phy_device *phydev) +static int amd_xgbe_phy_set_mode(struct phy_device *phydev, + enum amd_xgbe_phy_mode mode) { + enum amd_xgbe_phy_mode cur_mode; int ret; - ret = amd_xgbe_phy_switch_mode(phydev); - if (ret < 0) - return AMD_XGBE_AN_ERROR; + ret = amd_xgbe_phy_cur_mode(phydev, &cur_mode); + if (ret) + return ret; - return AMD_XGBE_AN_START; + if (mode != cur_mode) + ret = amd_xgbe_phy_switch_mode(phydev); + + return ret; } static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev, @@ -610,8 +643,8 @@ static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev, *state = AMD_XGBE_RX_COMPLETE; - /* If we're in KX mode then we're done */ - if (priv->mode == AMD_XGBE_MODE_KX) + /* If we're not in KR mode then we're done */ + if (!amd_xgbe_phy_in_kr_mode(phydev)) return AMD_XGBE_AN_EVENT; /* Enable/Disable FEC */ @@ -669,7 +702,6 @@ static enum amd_xgbe_phy_an amd_xgbe_an_tx_xnp(struct phy_device *phydev, static enum amd_xgbe_phy_an amd_xgbe_an_rx_bpa(struct phy_device *phydev, enum amd_xgbe_phy_rx *state) { - struct amd_xgbe_phy_priv *priv = phydev->priv; unsigned int link_support; int ret, ad_reg, lp_reg; @@ -679,9 +711,9 @@ static enum amd_xgbe_phy_an amd_xgbe_an_rx_bpa(struct phy_device *phydev, return AMD_XGBE_AN_ERROR; /* Check for a supported mode, otherwise restart in a different one */ - link_support = (priv->mode == AMD_XGBE_MODE_KR) ? 0x80 : 0x20; + link_support = amd_xgbe_phy_in_kr_mode(phydev) ? 0x80 : 0x20; if (!(ret & link_support)) - return amd_xgbe_an_switch_mode(phydev); + return AMD_XGBE_AN_INCOMPAT_LINK; /* Check Extended Next Page support */ ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE); @@ -722,7 +754,7 @@ static enum amd_xgbe_phy_an amd_xgbe_an_start(struct phy_device *phydev) int ret; /* Be sure we aren't looping trying to negotiate */ - if (priv->mode == AMD_XGBE_MODE_KR) { + if (amd_xgbe_phy_in_kr_mode(phydev)) { if (priv->kr_state != AMD_XGBE_RX_READY) return AMD_XGBE_AN_NO_LINK; priv->kr_state = AMD_XGBE_RX_BPA; @@ -785,6 +817,13 @@ static enum amd_xgbe_phy_an amd_xgbe_an_start(struct phy_device *phydev) /* Enable and start auto-negotiation */ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0); + ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_KR_CTRL); + if (ret < 0) + return AMD_XGBE_AN_ERROR; + + ret |= MDIO_KR_CTRL_PDETECT; + phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_KR_CTRL, ret); + ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1); if (ret < 0) return AMD_XGBE_AN_ERROR; @@ -825,8 +864,8 @@ static enum amd_xgbe_phy_an amd_xgbe_an_page_received(struct phy_device *phydev) enum amd_xgbe_phy_rx *state; int ret; - state = (priv->mode == AMD_XGBE_MODE_KR) ? &priv->kr_state - : &priv->kx_state; + state = amd_xgbe_phy_in_kr_mode(phydev) ? &priv->kr_state + : &priv->kx_state; switch (*state) { case AMD_XGBE_RX_BPA: @@ -846,7 +885,13 @@ static enum amd_xgbe_phy_an amd_xgbe_an_page_received(struct phy_device *phydev) static enum amd_xgbe_phy_an amd_xgbe_an_incompat_link(struct phy_device *phydev) { - return amd_xgbe_an_switch_mode(phydev); + int ret; + + ret = amd_xgbe_phy_switch_mode(phydev); + if (ret) + return AMD_XGBE_AN_ERROR; + + return AMD_XGBE_AN_START; } static void amd_xgbe_an_state_machine(struct work_struct *work) @@ -859,6 +904,10 @@ static void amd_xgbe_an_state_machine(struct work_struct *work) int sleep; unsigned int an_supported = 0; + /* Start in KX mode */ + if (amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX)) + priv->an_state = AMD_XGBE_AN_ERROR; + while (1) { mutex_lock(&priv->an_mutex); @@ -866,8 +915,9 @@ static void amd_xgbe_an_state_machine(struct work_struct *work) switch (priv->an_state) { case AMD_XGBE_AN_START: - priv->an_state = amd_xgbe_an_start(phydev); an_supported = 0; + priv->parallel_detect = 0; + priv->an_state = amd_xgbe_an_start(phydev); break; case AMD_XGBE_AN_EVENT: @@ -884,6 +934,7 @@ static void amd_xgbe_an_state_machine(struct work_struct *work) break; case AMD_XGBE_AN_COMPLETE: + priv->parallel_detect = an_supported ? 0 : 1; netdev_info(phydev->attached_dev, "%s successful\n", an_supported ? "Auto negotiation" : "Parallel detection"); @@ -1018,7 +1069,6 @@ static int amd_xgbe_phy_config_aneg(struct phy_device *phydev) { struct amd_xgbe_phy_priv *priv = phydev->priv; u32 mmd_mask = phydev->c45_ids.devices_in_package; - int ret; if (phydev->autoneg != AUTONEG_ENABLE) return amd_xgbe_phy_setup_forced(phydev); @@ -1027,11 +1077,6 @@ static int amd_xgbe_phy_config_aneg(struct phy_device *phydev) if (!(mmd_mask & MDIO_DEVS_AN)) return -EINVAL; - /* Get the current speed mode */ - ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2); - if (ret < 0) - return ret; - /* Start/Restart the auto-negotiation state machine */ mutex_lock(&priv->an_mutex); priv->an_result = AMD_XGBE_AN_READY; @@ -1121,18 +1166,14 @@ static int amd_xgbe_phy_read_status(struct phy_device *phydev) { struct amd_xgbe_phy_priv *priv = phydev->priv; u32 mmd_mask = phydev->c45_ids.devices_in_package; - int ret, mode, ad_ret, lp_ret; + int ret, ad_ret, lp_ret; ret = amd_xgbe_phy_update_link(phydev); if (ret) return ret; - mode = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2); - if (mode < 0) - return mode; - mode &= MDIO_PCS_CTRL2_TYPE; - - if (phydev->autoneg == AUTONEG_ENABLE) { + if ((phydev->autoneg == AUTONEG_ENABLE) && + !priv->parallel_detect) { if (!(mmd_mask & MDIO_DEVS_AN)) return -EINVAL; @@ -1163,40 +1204,39 @@ static int amd_xgbe_phy_read_status(struct phy_device *phydev) ad_ret &= lp_ret; if (ad_ret & 0x80) { phydev->speed = SPEED_10000; - if (mode != MDIO_PCS_CTRL2_10GBR) { - ret = amd_xgbe_phy_xgmii_mode(phydev); - if (ret < 0) - return ret; - } + ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR); + if (ret) + return ret; } else { - int (*mode_fcn)(struct phy_device *); - - if (priv->speed_set == - AMD_XGBE_PHY_SPEEDSET_1000_10000) { + switch (priv->speed_set) { + case AMD_XGBE_PHY_SPEEDSET_1000_10000: phydev->speed = SPEED_1000; - mode_fcn = amd_xgbe_phy_gmii_mode; - } else { + break; + + case AMD_XGBE_PHY_SPEEDSET_2500_10000: phydev->speed = SPEED_2500; - mode_fcn = amd_xgbe_phy_gmii_2500_mode; + break; } - if (mode == MDIO_PCS_CTRL2_10GBR) { - ret = mode_fcn(phydev); - if (ret < 0) - return ret; - } + ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX); + if (ret) + return ret; } phydev->duplex = DUPLEX_FULL; } else { - if (mode == MDIO_PCS_CTRL2_10GBR) { + if (amd_xgbe_phy_in_kr_mode(phydev)) { phydev->speed = SPEED_10000; } else { - if (priv->speed_set == - AMD_XGBE_PHY_SPEEDSET_1000_10000) + switch (priv->speed_set) { + case AMD_XGBE_PHY_SPEEDSET_1000_10000: phydev->speed = SPEED_1000; - else + break; + + case AMD_XGBE_PHY_SPEEDSET_2500_10000: phydev->speed = SPEED_2500; + break; + } } phydev->duplex = DUPLEX_FULL; phydev->pause = 0; @@ -1329,14 +1369,6 @@ static int amd_xgbe_phy_probe(struct phy_device *phydev) priv->link = 1; - ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2); - if (ret < 0) - goto err_sir1; - if ((ret & MDIO_PCS_CTRL2_TYPE) == MDIO_PCS_CTRL2_10GBR) - priv->mode = AMD_XGBE_MODE_KR; - else - priv->mode = AMD_XGBE_MODE_KX; - mutex_init(&priv->an_mutex); INIT_WORK(&priv->an_work, amd_xgbe_an_state_machine); priv->an_workqueue = create_singlethread_workqueue(wq_name); diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c index fdce1ea28790c4e9ae0d56e9995d1a299782f4ec..1d211d36903965a7f2fc461190763f2b2aadd705 100644 --- a/drivers/net/phy/bcm7xxx.c +++ b/drivers/net/phy/bcm7xxx.c @@ -14,6 +14,7 @@ #include #include #include +#include /* Broadcom BCM7xxx internal PHY registers */ #define MII_BCM7XXX_CHANNEL_WIDTH 0x2000 @@ -146,15 +147,79 @@ static int bcm7xxx_28nm_afe_config_init(struct phy_device *phydev) return 0; } +static int bcm7xxx_apd_enable(struct phy_device *phydev) +{ + int val; + + /* Enable powering down of the DLL during auto-power down */ + val = bcm54xx_shadow_read(phydev, BCM54XX_SHD_SCR3); + if (val < 0) + return val; + + val |= BCM54XX_SHD_SCR3_DLLAPD_DIS; + bcm54xx_shadow_write(phydev, BCM54XX_SHD_SCR3, val); + + /* Enable auto-power down */ + val = bcm54xx_shadow_read(phydev, BCM54XX_SHD_APD); + if (val < 0) + return val; + + val |= BCM54XX_SHD_APD_EN; + return bcm54xx_shadow_write(phydev, BCM54XX_SHD_APD, val); +} + +static int bcm7xxx_eee_enable(struct phy_device *phydev) +{ + int val; + + val = phy_read_mmd_indirect(phydev, BRCM_CL45VEN_EEE_CONTROL, + MDIO_MMD_AN, phydev->addr); + if (val < 0) + return val; + + /* Enable general EEE feature at the PHY level */ + val |= LPI_FEATURE_EN | LPI_FEATURE_EN_DIG1000X; + + phy_write_mmd_indirect(phydev, BRCM_CL45VEN_EEE_CONTROL, + MDIO_MMD_AN, phydev->addr, val); + + /* Advertise supported modes */ + val = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV, + MDIO_MMD_AN, phydev->addr); + + val |= (MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T); + phy_write_mmd_indirect(phydev, MDIO_AN_EEE_ADV, + MDIO_MMD_AN, phydev->addr, val); + + return 0; +} + static int bcm7xxx_28nm_config_init(struct phy_device *phydev) { - int ret; + u8 rev = PHY_BRCM_7XXX_REV(phydev->dev_flags); + u8 patch = PHY_BRCM_7XXX_PATCH(phydev->dev_flags); + int ret = 0; + + dev_info(&phydev->dev, "PHY revision: 0x%02x, patch: %d\n", rev, patch); + + switch (rev) { + case 0xa0: + case 0xb0: + ret = bcm7445_config_init(phydev); + break; + default: + ret = bcm7xxx_28nm_afe_config_init(phydev); + break; + } - ret = bcm7445_config_init(phydev); if (ret) return ret; - return bcm7xxx_28nm_afe_config_init(phydev); + ret = bcm7xxx_eee_enable(phydev); + if (ret) + return ret; + + return bcm7xxx_apd_enable(phydev); } static int bcm7xxx_28nm_resume(struct phy_device *phydev) @@ -201,8 +266,8 @@ static int bcm7xxx_config_init(struct phy_device *phydev) phy_write(phydev, MII_BCM7XXX_AUX_MODE, MII_BCM7XX_64CLK_MDIO); phy_read(phydev, MII_BCM7XXX_AUX_MODE); - /* Workaround only required for 100Mbits/sec */ - if (!(phydev->dev_flags & PHY_BRCM_100MBPS_WAR)) + /* Workaround only required for 100Mbits/sec capable PHYs */ + if (phydev->supported & PHY_GBIT_FEATURES) return 0; /* set shadow mode 2 */ @@ -263,43 +328,53 @@ static int bcm7xxx_dummy_config_init(struct phy_device *phydev) return 0; } +#define BCM7XXX_28NM_GPHY(_oui, _name) \ +{ \ + .phy_id = (_oui), \ + .phy_id_mask = 0xfffffff0, \ + .name = _name, \ + .features = PHY_GBIT_FEATURES | \ + SUPPORTED_Pause | SUPPORTED_Asym_Pause, \ + .flags = PHY_IS_INTERNAL, \ + .config_init = bcm7xxx_28nm_afe_config_init, \ + .config_aneg = genphy_config_aneg, \ + .read_status = genphy_read_status, \ + .resume = bcm7xxx_28nm_resume, \ + .driver = { .owner = THIS_MODULE }, \ +} + static struct phy_driver bcm7xxx_driver[] = { + BCM7XXX_28NM_GPHY(PHY_ID_BCM7250, "Broadcom BCM7250"), + BCM7XXX_28NM_GPHY(PHY_ID_BCM7364, "Broadcom BCM7364"), + BCM7XXX_28NM_GPHY(PHY_ID_BCM7366, "Broadcom BCM7366"), + BCM7XXX_28NM_GPHY(PHY_ID_BCM7439, "Broadcom BCM7439"), + BCM7XXX_28NM_GPHY(PHY_ID_BCM7445, "Broadcom BCM7445"), { - .phy_id = PHY_ID_BCM7366, - .phy_id_mask = 0xfffffff0, - .name = "Broadcom BCM7366", - .features = PHY_GBIT_FEATURES | - SUPPORTED_Pause | SUPPORTED_Asym_Pause, - .flags = PHY_IS_INTERNAL, - .config_init = bcm7xxx_28nm_afe_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, - .resume = bcm7xxx_28nm_resume, - .driver = { .owner = THIS_MODULE }, -}, { - .phy_id = PHY_ID_BCM7439, - .phy_id_mask = 0xfffffff0, - .name = "Broadcom BCM7439", - .features = PHY_GBIT_FEATURES | + .phy_id = PHY_ID_BCM7425, + .phy_id_mask = 0xfffffff0, + .name = "Broadcom BCM7425", + .features = PHY_GBIT_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause, - .flags = PHY_IS_INTERNAL, - .config_init = bcm7xxx_28nm_afe_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, - .resume = bcm7xxx_28nm_resume, - .driver = { .owner = THIS_MODULE }, + .flags = 0, + .config_init = bcm7xxx_config_init, + .config_aneg = genphy_config_aneg, + .read_status = genphy_read_status, + .suspend = bcm7xxx_suspend, + .resume = bcm7xxx_config_init, + .driver = { .owner = THIS_MODULE }, }, { - .phy_id = PHY_ID_BCM7445, - .phy_id_mask = 0xfffffff0, - .name = "Broadcom BCM7445", - .features = PHY_GBIT_FEATURES | + .phy_id = PHY_ID_BCM7429, + .phy_id_mask = 0xfffffff0, + .name = "Broadcom BCM7429", + .features = PHY_GBIT_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause, - .flags = PHY_IS_INTERNAL, - .config_init = bcm7xxx_28nm_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, - .resume = bcm7xxx_28nm_afe_config_init, - .driver = { .owner = THIS_MODULE }, + .flags = PHY_IS_INTERNAL, + .config_init = bcm7xxx_config_init, + .config_aneg = genphy_config_aneg, + .read_status = genphy_read_status, + .suspend = bcm7xxx_suspend, + .resume = bcm7xxx_config_init, + .driver = { .owner = THIS_MODULE }, }, { .phy_id = PHY_BCM_OUI_4, .phy_id_mask = 0xffff0000, @@ -329,7 +404,11 @@ static struct phy_driver bcm7xxx_driver[] = { } }; static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = { + { PHY_ID_BCM7250, 0xfffffff0, }, + { PHY_ID_BCM7364, 0xfffffff0, }, { PHY_ID_BCM7366, 0xfffffff0, }, + { PHY_ID_BCM7425, 0xfffffff0, }, + { PHY_ID_BCM7429, 0xfffffff0, }, { PHY_ID_BCM7439, 0xfffffff0, }, { PHY_ID_BCM7445, 0xfffffff0, }, { PHY_BCM_OUI_4, 0xffff0000 }, diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index 34088d60da74613cd007916ddd4844ee5420f90a..854f2c9a7b2b5224f1dab808d16d8f514cdd486a 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -25,132 +25,10 @@ #define BRCM_PHY_REV(phydev) \ ((phydev)->drv->phy_id & ~((phydev)->drv->phy_id_mask)) -/* - * Broadcom LED source encodings. These are used in BCM5461, BCM5481, - * BCM5482, and possibly some others. - */ -#define BCM_LED_SRC_LINKSPD1 0x0 -#define BCM_LED_SRC_LINKSPD2 0x1 -#define BCM_LED_SRC_XMITLED 0x2 -#define BCM_LED_SRC_ACTIVITYLED 0x3 -#define BCM_LED_SRC_FDXLED 0x4 -#define BCM_LED_SRC_SLAVE 0x5 -#define BCM_LED_SRC_INTR 0x6 -#define BCM_LED_SRC_QUALITY 0x7 -#define BCM_LED_SRC_RCVLED 0x8 -#define BCM_LED_SRC_MULTICOLOR1 0xa -#define BCM_LED_SRC_OPENSHORT 0xb -#define BCM_LED_SRC_OFF 0xe /* Tied high */ -#define BCM_LED_SRC_ON 0xf /* Tied low */ - - -/* - * BCM5482: Shadow registers - * Shadow values go into bits [14:10] of register 0x1c to select a shadow - * register to access. - */ -/* 00101: Spare Control Register 3 */ -#define BCM54XX_SHD_SCR3 0x05 -#define BCM54XX_SHD_SCR3_DEF_CLK125 0x0001 -#define BCM54XX_SHD_SCR3_DLLAPD_DIS 0x0002 -#define BCM54XX_SHD_SCR3_TRDDAPD 0x0004 - -/* 01010: Auto Power-Down */ -#define BCM54XX_SHD_APD 0x0a -#define BCM54XX_SHD_APD_EN 0x0020 - -#define BCM5482_SHD_LEDS1 0x0d /* 01101: LED Selector 1 */ - /* LED3 / ~LINKSPD[2] selector */ -#define BCM5482_SHD_LEDS1_LED3(src) ((src & 0xf) << 4) - /* LED1 / ~LINKSPD[1] selector */ -#define BCM5482_SHD_LEDS1_LED1(src) ((src & 0xf) << 0) -#define BCM54XX_SHD_RGMII_MODE 0x0b /* 01011: RGMII Mode Selector */ -#define BCM5482_SHD_SSD 0x14 /* 10100: Secondary SerDes control */ -#define BCM5482_SHD_SSD_LEDM 0x0008 /* SSD LED Mode enable */ -#define BCM5482_SHD_SSD_EN 0x0001 /* SSD enable */ -#define BCM5482_SHD_MODE 0x1f /* 11111: Mode Control Register */ -#define BCM5482_SHD_MODE_1000BX 0x0001 /* Enable 1000BASE-X registers */ - - -/* - * EXPANSION SHADOW ACCESS REGISTERS. (PHY REG 0x15, 0x16, and 0x17) - */ -#define MII_BCM54XX_EXP_AADJ1CH0 0x001f -#define MII_BCM54XX_EXP_AADJ1CH0_SWP_ABCD_OEN 0x0200 -#define MII_BCM54XX_EXP_AADJ1CH0_SWSEL_THPF 0x0100 -#define MII_BCM54XX_EXP_AADJ1CH3 0x601f -#define MII_BCM54XX_EXP_AADJ1CH3_ADCCKADJ 0x0002 -#define MII_BCM54XX_EXP_EXP08 0x0F08 -#define MII_BCM54XX_EXP_EXP08_RJCT_2MHZ 0x0001 -#define MII_BCM54XX_EXP_EXP08_EARLY_DAC_WAKE 0x0200 -#define MII_BCM54XX_EXP_EXP75 0x0f75 -#define MII_BCM54XX_EXP_EXP75_VDACCTRL 0x003c -#define MII_BCM54XX_EXP_EXP75_CM_OSC 0x0001 -#define MII_BCM54XX_EXP_EXP96 0x0f96 -#define MII_BCM54XX_EXP_EXP96_MYST 0x0010 -#define MII_BCM54XX_EXP_EXP97 0x0f97 -#define MII_BCM54XX_EXP_EXP97_MYST 0x0c0c - -/* - * BCM5482: Secondary SerDes registers - */ -#define BCM5482_SSD_1000BX_CTL 0x00 /* 1000BASE-X Control */ -#define BCM5482_SSD_1000BX_CTL_PWRDOWN 0x0800 /* Power-down SSD */ -#define BCM5482_SSD_SGMII_SLAVE 0x15 /* SGMII Slave Register */ -#define BCM5482_SSD_SGMII_SLAVE_EN 0x0002 /* Slave mode enable */ -#define BCM5482_SSD_SGMII_SLAVE_AD 0x0001 /* Slave auto-detection */ - - -/*****************************************************************************/ -/* Fast Ethernet Transceiver definitions. */ -/*****************************************************************************/ - -#define MII_BRCM_FET_INTREG 0x1a /* Interrupt register */ -#define MII_BRCM_FET_IR_MASK 0x0100 /* Mask all interrupts */ -#define MII_BRCM_FET_IR_LINK_EN 0x0200 /* Link status change enable */ -#define MII_BRCM_FET_IR_SPEED_EN 0x0400 /* Link speed change enable */ -#define MII_BRCM_FET_IR_DUPLEX_EN 0x0800 /* Duplex mode change enable */ -#define MII_BRCM_FET_IR_ENABLE 0x4000 /* Interrupt enable */ - -#define MII_BRCM_FET_BRCMTEST 0x1f /* Brcm test register */ -#define MII_BRCM_FET_BT_SRE 0x0080 /* Shadow register enable */ - - -/*** Shadow register definitions ***/ - -#define MII_BRCM_FET_SHDW_MISCCTRL 0x10 /* Shadow misc ctrl */ -#define MII_BRCM_FET_SHDW_MC_FAME 0x4000 /* Force Auto MDIX enable */ - -#define MII_BRCM_FET_SHDW_AUXMODE4 0x1a /* Auxiliary mode 4 */ -#define MII_BRCM_FET_SHDW_AM4_LED_MASK 0x0003 -#define MII_BRCM_FET_SHDW_AM4_LED_MODE1 0x0001 - -#define MII_BRCM_FET_SHDW_AUXSTAT2 0x1b /* Auxiliary status 2 */ -#define MII_BRCM_FET_SHDW_AS2_APDE 0x0020 /* Auto power down enable */ - - MODULE_DESCRIPTION("Broadcom PHY driver"); MODULE_AUTHOR("Maciej W. Rozycki"); MODULE_LICENSE("GPL"); -/* - * Indirect register access functions for the 1000BASE-T/100BASE-TX/10BASE-T - * 0x1c shadow registers. - */ -static int bcm54xx_shadow_read(struct phy_device *phydev, u16 shadow) -{ - phy_write(phydev, MII_BCM54XX_SHD, MII_BCM54XX_SHD_VAL(shadow)); - return MII_BCM54XX_SHD_DATA(phy_read(phydev, MII_BCM54XX_SHD)); -} - -static int bcm54xx_shadow_write(struct phy_device *phydev, u16 shadow, u16 val) -{ - return phy_write(phydev, MII_BCM54XX_SHD, - MII_BCM54XX_SHD_WRITE | - MII_BCM54XX_SHD_VAL(shadow) | - MII_BCM54XX_SHD_DATA(val)); -} - /* Indirect register access functions for the Expansion Registers */ static int bcm54xx_exp_read(struct phy_device *phydev, u16 regnum) { diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index c301e4cb37cacc3b77645afb265871242abac37a..2954052706e8bcbf0979e89772326656ba6c57fa 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -721,7 +721,7 @@ static inline u16 exts_chan_to_edata(int ch) } static int decode_evnt(struct dp83640_private *dp83640, - void *data, u16 ests) + void *data, int len, u16 ests) { struct phy_txts *phy_txts; struct ptp_clock_event event; @@ -729,6 +729,16 @@ static int decode_evnt(struct dp83640_private *dp83640, int words = (ests >> EVNT_TS_LEN_SHIFT) & EVNT_TS_LEN_MASK; u16 ext_status = 0; + /* calculate length of the event timestamp status message */ + if (ests & MULT_EVNT) + parsed = (words + 2) * sizeof(u16); + else + parsed = (words + 1) * sizeof(u16); + + /* check if enough data is available */ + if (len < parsed) + return len; + if (ests & MULT_EVNT) { ext_status = *(u16 *) data; data += sizeof(ext_status); @@ -747,10 +757,7 @@ static int decode_evnt(struct dp83640_private *dp83640, dp83640->edata.ns_lo = phy_txts->ns_lo; } - if (ext_status) { - parsed = words + 2; - } else { - parsed = words + 1; + if (!ext_status) { i = ((ests >> EVNT_NUM_SHIFT) & EVNT_NUM_MASK) - EXT_EVENT; ext_status = exts_chan_to_edata(i); } @@ -768,7 +775,7 @@ static int decode_evnt(struct dp83640_private *dp83640, } } - return parsed * sizeof(u16); + return parsed; } static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts) @@ -905,9 +912,9 @@ static void decode_status_frame(struct dp83640_private *dp83640, decode_txts(dp83640, phy_txts); size = sizeof(*phy_txts); - } else if (PSF_EVNT == type && len >= sizeof(*phy_txts)) { + } else if (PSF_EVNT == type) { - size = decode_evnt(dp83640, ptr, ests); + size = decode_evnt(dp83640, ptr, len, ests); } else { size = 0; @@ -1129,7 +1136,6 @@ static void dp83640_remove(struct phy_device *phydev) struct dp83640_clock *clock; struct list_head *this, *next; struct dp83640_private *tmp, *dp83640 = phydev->priv; - struct sk_buff *skb; if (phydev->addr == BROADCAST_ADDR) return; @@ -1137,11 +1143,8 @@ static void dp83640_remove(struct phy_device *phydev) enable_status_frames(phydev, false); cancel_work_sync(&dp83640->ts_work); - while ((skb = skb_dequeue(&dp83640->rx_queue)) != NULL) - kfree_skb(skb); - - while ((skb = skb_dequeue(&dp83640->tx_queue)) != NULL) - skb_complete_tx_timestamp(skb, NULL); + skb_queue_purge(&dp83640->rx_queue); + skb_queue_purge(&dp83640->tx_queue); clock = dp83640_clock_get(dp83640->clock); @@ -1398,7 +1401,7 @@ static void dp83640_txtstamp(struct phy_device *phydev, case HWTSTAMP_TX_ONESTEP_SYNC: if (is_sync(skb, type)) { - skb_complete_tx_timestamp(skb, NULL); + kfree_skb(skb); return; } /* fall through */ @@ -1409,7 +1412,7 @@ static void dp83640_txtstamp(struct phy_device *phydev, case HWTSTAMP_TX_OFF: default: - skb_complete_tx_timestamp(skb, NULL); + kfree_skb(skb); break; } } diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c index d60d875cb4450ab6cf72114c35b4c816e2e031fd..47872caa0081dbc1974c4aa199f035d83986757d 100644 --- a/drivers/net/phy/fixed.c +++ b/drivers/net/phy/fixed.c @@ -124,6 +124,17 @@ static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num) if (reg_num >= MII_REGS_NUM) return -1; + /* We do not support emulating Clause 45 over Clause 22 register reads + * return an error instead of bogus data. + */ + switch (reg_num) { + case MII_MMD_CTRL: + case MII_MMD_DATA: + return -1; + default: + break; + } + list_for_each_entry(fp, &fmb->phys, node) { if (fp->addr == phy_addr) { /* Issue callback if user registered it. */ @@ -222,9 +233,9 @@ EXPORT_SYMBOL_GPL(fixed_phy_del); static int phy_fixed_addr; static DEFINE_SPINLOCK(phy_fixed_addr_lock); -int fixed_phy_register(unsigned int irq, - struct fixed_phy_status *status, - struct device_node *np) +struct phy_device *fixed_phy_register(unsigned int irq, + struct fixed_phy_status *status, + struct device_node *np) { struct fixed_mdio_bus *fmb = &platform_fmb; struct phy_device *phy; @@ -235,19 +246,19 @@ int fixed_phy_register(unsigned int irq, spin_lock(&phy_fixed_addr_lock); if (phy_fixed_addr == PHY_MAX_ADDR) { spin_unlock(&phy_fixed_addr_lock); - return -ENOSPC; + return ERR_PTR(-ENOSPC); } phy_addr = phy_fixed_addr++; spin_unlock(&phy_fixed_addr_lock); ret = fixed_phy_add(PHY_POLL, phy_addr, status); if (ret < 0) - return ret; + return ERR_PTR(ret); phy = get_phy_device(fmb->mii_bus, phy_addr, false); if (!phy || IS_ERR(phy)) { fixed_phy_del(phy_addr); - return -EINVAL; + return ERR_PTR(-EINVAL); } of_node_get(np); @@ -258,10 +269,10 @@ int fixed_phy_register(unsigned int irq, phy_device_free(phy); of_node_put(np); fixed_phy_del(phy_addr); - return ret; + return ERR_PTR(ret); } - return 0; + return phy; } static int __init fixed_mdio_bus_init(void) diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c new file mode 100644 index 0000000000000000000000000000000000000000..5b643e588e8fdd688ad3b65c4ef3b3a7bb978b79 --- /dev/null +++ b/drivers/net/phy/mdio-bcm-unimac.c @@ -0,0 +1,213 @@ +/* + * Broadcom UniMAC MDIO bus controller driver + * + * Copyright (C) 2014, Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define MDIO_CMD 0x00 +#define MDIO_START_BUSY (1 << 29) +#define MDIO_READ_FAIL (1 << 28) +#define MDIO_RD (2 << 26) +#define MDIO_WR (1 << 26) +#define MDIO_PMD_SHIFT 21 +#define MDIO_PMD_MASK 0x1F +#define MDIO_REG_SHIFT 16 +#define MDIO_REG_MASK 0x1F + +#define MDIO_CFG 0x04 +#define MDIO_C22 (1 << 0) +#define MDIO_C45 0 +#define MDIO_CLK_DIV_SHIFT 4 +#define MDIO_CLK_DIV_MASK 0x3F +#define MDIO_SUPP_PREAMBLE (1 << 12) + +struct unimac_mdio_priv { + struct mii_bus *mii_bus; + void __iomem *base; +}; + +static inline void unimac_mdio_start(struct unimac_mdio_priv *priv) +{ + u32 reg; + + reg = __raw_readl(priv->base + MDIO_CMD); + reg |= MDIO_START_BUSY; + __raw_writel(reg, priv->base + MDIO_CMD); +} + +static inline unsigned int unimac_mdio_busy(struct unimac_mdio_priv *priv) +{ + return __raw_readl(priv->base + MDIO_CMD) & MDIO_START_BUSY; +} + +static int unimac_mdio_read(struct mii_bus *bus, int phy_id, int reg) +{ + struct unimac_mdio_priv *priv = bus->priv; + unsigned int timeout = 1000; + u32 cmd; + + /* Prepare the read operation */ + cmd = MDIO_RD | (phy_id << MDIO_PMD_SHIFT) | (reg << MDIO_REG_SHIFT); + __raw_writel(cmd, priv->base + MDIO_CMD); + + /* Start MDIO transaction */ + unimac_mdio_start(priv); + + do { + if (!unimac_mdio_busy(priv)) + break; + + usleep_range(1000, 2000); + } while (timeout--); + + if (!timeout) + return -ETIMEDOUT; + + cmd = __raw_readl(priv->base + MDIO_CMD); + if (cmd & MDIO_READ_FAIL) + return -EIO; + + return cmd & 0xffff; +} + +static int unimac_mdio_write(struct mii_bus *bus, int phy_id, + int reg, u16 val) +{ + struct unimac_mdio_priv *priv = bus->priv; + unsigned int timeout = 1000; + u32 cmd; + + /* Prepare the write operation */ + cmd = MDIO_WR | (phy_id << MDIO_PMD_SHIFT) | + (reg << MDIO_REG_SHIFT) | (0xffff & val); + __raw_writel(cmd, priv->base + MDIO_CMD); + + unimac_mdio_start(priv); + + do { + if (!unimac_mdio_busy(priv)) + break; + + usleep_range(1000, 2000); + } while (timeout--); + + if (!timeout) + return -ETIMEDOUT; + + return 0; +} + +static int unimac_mdio_probe(struct platform_device *pdev) +{ + struct unimac_mdio_priv *priv; + struct device_node *np; + struct mii_bus *bus; + struct resource *r; + int ret; + + np = pdev->dev.of_node; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + /* Just ioremap, as this MDIO block is usually integrated into an + * Ethernet MAC controller register range + */ + priv->base = devm_ioremap(&pdev->dev, r->start, resource_size(r)); + if (!priv->base) { + dev_err(&pdev->dev, "failed to remap register\n"); + return -ENOMEM; + } + + priv->mii_bus = mdiobus_alloc(); + if (!priv->mii_bus) + return -ENOMEM; + + bus = priv->mii_bus; + bus->priv = priv; + bus->name = "unimac MII bus"; + bus->parent = &pdev->dev; + bus->read = unimac_mdio_read; + bus->write = unimac_mdio_write; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s", pdev->name); + + bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL); + if (!bus->irq) { + ret = -ENOMEM; + goto out_mdio_free; + } + + ret = of_mdiobus_register(bus, np); + if (ret) { + dev_err(&pdev->dev, "MDIO bus registration failed\n"); + goto out_mdio_irq; + } + + platform_set_drvdata(pdev, priv); + + dev_info(&pdev->dev, "Broadcom UniMAC MDIO bus at 0x%p\n", priv->base); + + return 0; + +out_mdio_irq: + kfree(bus->irq); +out_mdio_free: + mdiobus_free(bus); + return ret; +} + +static int unimac_mdio_remove(struct platform_device *pdev) +{ + struct unimac_mdio_priv *priv = platform_get_drvdata(pdev); + + mdiobus_unregister(priv->mii_bus); + kfree(priv->mii_bus->irq); + mdiobus_free(priv->mii_bus); + + return 0; +} + +static struct of_device_id unimac_mdio_ids[] = { + { .compatible = "brcm,genet-mdio-v4", }, + { .compatible = "brcm,genet-mdio-v3", }, + { .compatible = "brcm,genet-mdio-v2", }, + { .compatible = "brcm,genet-mdio-v1", }, + { .compatible = "brcm,unimac-mdio", }, + { /* sentinel */ }, +}; + +static struct platform_driver unimac_mdio_driver = { + .driver = { + .name = "unimac-mdio", + .owner = THIS_MODULE, + .of_match_table = unimac_mdio_ids, + }, + .probe = unimac_mdio_probe, + .remove = unimac_mdio_remove, +}; +module_platform_driver(unimac_mdio_driver); + +MODULE_AUTHOR("Broadcom Corporation"); +MODULE_DESCRIPTION("Broadcom UniMAC MDIO bus controller"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:unimac-mdio"); diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 4eaadcfcb0fe5ed2d5bd82a4632989916ade90e2..50051f271b10e8c02f34fae073f8d48ee15b8a97 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -553,8 +553,14 @@ static ssize_t phy_interface_show(struct device *dev, struct device_attribute *attr, char *buf) { struct phy_device *phydev = to_phy_device(dev); + const char *mode = NULL; - return sprintf(buf, "%s\n", phy_modes(phydev->interface)); + if (phy_is_internal(phydev)) + mode = "internal"; + else + mode = phy_modes(phydev->interface); + + return sprintf(buf, "%s\n", mode); } static DEVICE_ATTR_RO(phy_interface); diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index a854d38c231dfebc983a3bcd4840fdd5a2257bd2..1dfffdc9dfc3577bd6893688da191c38539e243f 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -955,7 +955,7 @@ static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad, * 3) Write reg 13 // MMD Data Command for MMD DEVAD * 3) Read reg 14 // Read MMD data */ -static int phy_read_mmd_indirect(struct phy_device *phydev, int prtad, +int phy_read_mmd_indirect(struct phy_device *phydev, int prtad, int devad, int addr) { struct phy_driver *phydrv = phydev->drv; @@ -971,6 +971,7 @@ static int phy_read_mmd_indirect(struct phy_device *phydev, int prtad, } return value; } +EXPORT_SYMBOL(phy_read_mmd_indirect); /** * phy_write_mmd_indirect - writes data to the MMD registers @@ -988,7 +989,7 @@ static int phy_read_mmd_indirect(struct phy_device *phydev, int prtad, * 3) Write reg 13 // MMD Data Command for MMD DEVAD * 3) Write reg 14 // Write MMD data */ -static void phy_write_mmd_indirect(struct phy_device *phydev, int prtad, +void phy_write_mmd_indirect(struct phy_device *phydev, int prtad, int devad, int addr, u32 data) { struct phy_driver *phydrv = phydev->drv; @@ -1002,6 +1003,7 @@ static void phy_write_mmd_indirect(struct phy_device *phydev, int prtad, phydrv->write_mmd_indirect(phydev, prtad, devad, addr, data); } } +EXPORT_SYMBOL(phy_write_mmd_indirect); /** * phy_init_eee - init and check the EEE feature @@ -1017,12 +1019,14 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) { /* According to 802.3az,the EEE is supported only in full duplex-mode. * Also EEE feature is active when core is operating with MII, GMII - * or RGMII. + * or RGMII. Internal PHYs are also allowed to proceed and should + * return an error if they do not support EEE. */ if ((phydev->duplex == DUPLEX_FULL) && ((phydev->interface == PHY_INTERFACE_MODE_MII) || (phydev->interface == PHY_INTERFACE_MODE_GMII) || - (phydev->interface == PHY_INTERFACE_MODE_RGMII))) { + (phydev->interface == PHY_INTERFACE_MODE_RGMII) || + phy_is_internal(phydev))) { int eee_lp, eee_cap, eee_adv; u32 lp, cap, adv; int status; diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index ca5ec3e18d3662daeebd999291cc6a78c73e79de..3fc91e89f5a564bb36ab251c81ec083fc7cab68d 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -230,13 +230,13 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id, for (i = 1; i < num_ids && c45_ids->devices_in_package == 0; i++) { - reg_addr = MII_ADDR_C45 | i << 16 | 6; + reg_addr = MII_ADDR_C45 | i << 16 | MDIO_DEVS2; phy_reg = mdiobus_read(bus, addr, reg_addr); if (phy_reg < 0) return -EIO; c45_ids->devices_in_package = (phy_reg & 0xffff) << 16; - reg_addr = MII_ADDR_C45 | i << 16 | 5; + reg_addr = MII_ADDR_C45 | i << 16 | MDIO_DEVS1; phy_reg = mdiobus_read(bus, addr, reg_addr); if (phy_reg < 0) return -EIO; diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index fa0d71727894a2ef8dc74f8e27c68bbce87f44fa..80e6f3430f65e96dfe3e3b8b13d694f727f330cc 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -1103,7 +1103,7 @@ static void ppp_setup(struct net_device *dev) dev->type = ARPHRD_PPP; dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; dev->features |= NETIF_F_NETNS_LOCAL; - dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; + netif_keep_dst(dev); } /* diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c index ae7cd7f3656d04174b74ae534814029795217f18..92578d72e4ee51ce26d3e23d4b86099a0097801d 100644 --- a/drivers/net/sungem_phy.c +++ b/drivers/net/sungem_phy.c @@ -47,22 +47,22 @@ static const int phy_BCM5400_link_table[8][3] = { { 1, 0, 1 }, /* 1000BT */ }; -static inline int __phy_read(struct mii_phy* phy, int id, int reg) +static inline int __sungem_phy_read(struct mii_phy* phy, int id, int reg) { return phy->mdio_read(phy->dev, id, reg); } -static inline void __phy_write(struct mii_phy* phy, int id, int reg, int val) +static inline void __sungem_phy_write(struct mii_phy* phy, int id, int reg, int val) { phy->mdio_write(phy->dev, id, reg, val); } -static inline int phy_read(struct mii_phy* phy, int reg) +static inline int sungem_phy_read(struct mii_phy* phy, int reg) { return phy->mdio_read(phy->dev, phy->mii_id, reg); } -static inline void phy_write(struct mii_phy* phy, int reg, int val) +static inline void sungem_phy_write(struct mii_phy* phy, int reg, int val) { phy->mdio_write(phy->dev, phy->mii_id, reg, val); } @@ -72,21 +72,21 @@ static int reset_one_mii_phy(struct mii_phy* phy, int phy_id) u16 val; int limit = 10000; - val = __phy_read(phy, phy_id, MII_BMCR); + val = __sungem_phy_read(phy, phy_id, MII_BMCR); val &= ~(BMCR_ISOLATE | BMCR_PDOWN); val |= BMCR_RESET; - __phy_write(phy, phy_id, MII_BMCR, val); + __sungem_phy_write(phy, phy_id, MII_BMCR, val); udelay(100); while (--limit) { - val = __phy_read(phy, phy_id, MII_BMCR); + val = __sungem_phy_read(phy, phy_id, MII_BMCR); if ((val & BMCR_RESET) == 0) break; udelay(10); } if ((val & BMCR_ISOLATE) && limit > 0) - __phy_write(phy, phy_id, MII_BMCR, val & ~BMCR_ISOLATE); + __sungem_phy_write(phy, phy_id, MII_BMCR, val & ~BMCR_ISOLATE); return limit <= 0; } @@ -95,19 +95,19 @@ static int bcm5201_init(struct mii_phy* phy) { u16 data; - data = phy_read(phy, MII_BCM5201_MULTIPHY); + data = sungem_phy_read(phy, MII_BCM5201_MULTIPHY); data &= ~MII_BCM5201_MULTIPHY_SUPERISOLATE; - phy_write(phy, MII_BCM5201_MULTIPHY, data); + sungem_phy_write(phy, MII_BCM5201_MULTIPHY, data); - phy_write(phy, MII_BCM5201_INTERRUPT, 0); + sungem_phy_write(phy, MII_BCM5201_INTERRUPT, 0); return 0; } static int bcm5201_suspend(struct mii_phy* phy) { - phy_write(phy, MII_BCM5201_INTERRUPT, 0); - phy_write(phy, MII_BCM5201_MULTIPHY, MII_BCM5201_MULTIPHY_SUPERISOLATE); + sungem_phy_write(phy, MII_BCM5201_INTERRUPT, 0); + sungem_phy_write(phy, MII_BCM5201_MULTIPHY, MII_BCM5201_MULTIPHY_SUPERISOLATE); return 0; } @@ -116,20 +116,20 @@ static int bcm5221_init(struct mii_phy* phy) { u16 data; - data = phy_read(phy, MII_BCM5221_TEST); - phy_write(phy, MII_BCM5221_TEST, + data = sungem_phy_read(phy, MII_BCM5221_TEST); + sungem_phy_write(phy, MII_BCM5221_TEST, data | MII_BCM5221_TEST_ENABLE_SHADOWS); - data = phy_read(phy, MII_BCM5221_SHDOW_AUX_STAT2); - phy_write(phy, MII_BCM5221_SHDOW_AUX_STAT2, + data = sungem_phy_read(phy, MII_BCM5221_SHDOW_AUX_STAT2); + sungem_phy_write(phy, MII_BCM5221_SHDOW_AUX_STAT2, data | MII_BCM5221_SHDOW_AUX_STAT2_APD); - data = phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4); - phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4, + data = sungem_phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4); + sungem_phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4, data | MII_BCM5221_SHDOW_AUX_MODE4_CLKLOPWR); - data = phy_read(phy, MII_BCM5221_TEST); - phy_write(phy, MII_BCM5221_TEST, + data = sungem_phy_read(phy, MII_BCM5221_TEST); + sungem_phy_write(phy, MII_BCM5221_TEST, data & ~MII_BCM5221_TEST_ENABLE_SHADOWS); return 0; @@ -139,12 +139,12 @@ static int bcm5221_suspend(struct mii_phy* phy) { u16 data; - data = phy_read(phy, MII_BCM5221_TEST); - phy_write(phy, MII_BCM5221_TEST, + data = sungem_phy_read(phy, MII_BCM5221_TEST); + sungem_phy_write(phy, MII_BCM5221_TEST, data | MII_BCM5221_TEST_ENABLE_SHADOWS); - data = phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4); - phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4, + data = sungem_phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4); + sungem_phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4, data | MII_BCM5221_SHDOW_AUX_MODE4_IDDQMODE); return 0; @@ -154,20 +154,20 @@ static int bcm5241_init(struct mii_phy* phy) { u16 data; - data = phy_read(phy, MII_BCM5221_TEST); - phy_write(phy, MII_BCM5221_TEST, + data = sungem_phy_read(phy, MII_BCM5221_TEST); + sungem_phy_write(phy, MII_BCM5221_TEST, data | MII_BCM5221_TEST_ENABLE_SHADOWS); - data = phy_read(phy, MII_BCM5221_SHDOW_AUX_STAT2); - phy_write(phy, MII_BCM5221_SHDOW_AUX_STAT2, + data = sungem_phy_read(phy, MII_BCM5221_SHDOW_AUX_STAT2); + sungem_phy_write(phy, MII_BCM5221_SHDOW_AUX_STAT2, data | MII_BCM5221_SHDOW_AUX_STAT2_APD); - data = phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4); - phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4, + data = sungem_phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4); + sungem_phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4, data & ~MII_BCM5241_SHDOW_AUX_MODE4_STANDBYPWR); - data = phy_read(phy, MII_BCM5221_TEST); - phy_write(phy, MII_BCM5221_TEST, + data = sungem_phy_read(phy, MII_BCM5221_TEST); + sungem_phy_write(phy, MII_BCM5221_TEST, data & ~MII_BCM5221_TEST_ENABLE_SHADOWS); return 0; @@ -177,12 +177,12 @@ static int bcm5241_suspend(struct mii_phy* phy) { u16 data; - data = phy_read(phy, MII_BCM5221_TEST); - phy_write(phy, MII_BCM5221_TEST, + data = sungem_phy_read(phy, MII_BCM5221_TEST); + sungem_phy_write(phy, MII_BCM5221_TEST, data | MII_BCM5221_TEST_ENABLE_SHADOWS); - data = phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4); - phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4, + data = sungem_phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4); + sungem_phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4, data | MII_BCM5241_SHDOW_AUX_MODE4_STANDBYPWR); return 0; @@ -193,26 +193,26 @@ static int bcm5400_init(struct mii_phy* phy) u16 data; /* Configure for gigabit full duplex */ - data = phy_read(phy, MII_BCM5400_AUXCONTROL); + data = sungem_phy_read(phy, MII_BCM5400_AUXCONTROL); data |= MII_BCM5400_AUXCONTROL_PWR10BASET; - phy_write(phy, MII_BCM5400_AUXCONTROL, data); + sungem_phy_write(phy, MII_BCM5400_AUXCONTROL, data); - data = phy_read(phy, MII_BCM5400_GB_CONTROL); + data = sungem_phy_read(phy, MII_BCM5400_GB_CONTROL); data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP; - phy_write(phy, MII_BCM5400_GB_CONTROL, data); + sungem_phy_write(phy, MII_BCM5400_GB_CONTROL, data); udelay(100); /* Reset and configure cascaded 10/100 PHY */ (void)reset_one_mii_phy(phy, 0x1f); - data = __phy_read(phy, 0x1f, MII_BCM5201_MULTIPHY); + data = __sungem_phy_read(phy, 0x1f, MII_BCM5201_MULTIPHY); data |= MII_BCM5201_MULTIPHY_SERIALMODE; - __phy_write(phy, 0x1f, MII_BCM5201_MULTIPHY, data); + __sungem_phy_write(phy, 0x1f, MII_BCM5201_MULTIPHY, data); - data = phy_read(phy, MII_BCM5400_AUXCONTROL); + data = sungem_phy_read(phy, MII_BCM5400_AUXCONTROL); data &= ~MII_BCM5400_AUXCONTROL_PWR10BASET; - phy_write(phy, MII_BCM5400_AUXCONTROL, data); + sungem_phy_write(phy, MII_BCM5400_AUXCONTROL, data); return 0; } @@ -220,7 +220,7 @@ static int bcm5400_init(struct mii_phy* phy) static int bcm5400_suspend(struct mii_phy* phy) { #if 0 /* Commented out in Darwin... someone has those dawn docs ? */ - phy_write(phy, MII_BMCR, BMCR_PDOWN); + sungem_phy_write(phy, MII_BMCR, BMCR_PDOWN); #endif return 0; } @@ -230,7 +230,7 @@ static int bcm5401_init(struct mii_phy* phy) u16 data; int rev; - rev = phy_read(phy, MII_PHYSID2) & 0x000f; + rev = sungem_phy_read(phy, MII_PHYSID2) & 0x000f; if (rev == 0 || rev == 3) { /* Some revisions of 5401 appear to need this * initialisation sequence to disable, according @@ -243,32 +243,32 @@ static int bcm5401_init(struct mii_phy* phy) * Note: This should (and does) match tg3_init_5401phy_dsp * in the tg3.c driver. -DaveM */ - phy_write(phy, 0x18, 0x0c20); - phy_write(phy, 0x17, 0x0012); - phy_write(phy, 0x15, 0x1804); - phy_write(phy, 0x17, 0x0013); - phy_write(phy, 0x15, 0x1204); - phy_write(phy, 0x17, 0x8006); - phy_write(phy, 0x15, 0x0132); - phy_write(phy, 0x17, 0x8006); - phy_write(phy, 0x15, 0x0232); - phy_write(phy, 0x17, 0x201f); - phy_write(phy, 0x15, 0x0a20); + sungem_phy_write(phy, 0x18, 0x0c20); + sungem_phy_write(phy, 0x17, 0x0012); + sungem_phy_write(phy, 0x15, 0x1804); + sungem_phy_write(phy, 0x17, 0x0013); + sungem_phy_write(phy, 0x15, 0x1204); + sungem_phy_write(phy, 0x17, 0x8006); + sungem_phy_write(phy, 0x15, 0x0132); + sungem_phy_write(phy, 0x17, 0x8006); + sungem_phy_write(phy, 0x15, 0x0232); + sungem_phy_write(phy, 0x17, 0x201f); + sungem_phy_write(phy, 0x15, 0x0a20); } /* Configure for gigabit full duplex */ - data = phy_read(phy, MII_BCM5400_GB_CONTROL); + data = sungem_phy_read(phy, MII_BCM5400_GB_CONTROL); data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP; - phy_write(phy, MII_BCM5400_GB_CONTROL, data); + sungem_phy_write(phy, MII_BCM5400_GB_CONTROL, data); udelay(10); /* Reset and configure cascaded 10/100 PHY */ (void)reset_one_mii_phy(phy, 0x1f); - data = __phy_read(phy, 0x1f, MII_BCM5201_MULTIPHY); + data = __sungem_phy_read(phy, 0x1f, MII_BCM5201_MULTIPHY); data |= MII_BCM5201_MULTIPHY_SERIALMODE; - __phy_write(phy, 0x1f, MII_BCM5201_MULTIPHY, data); + __sungem_phy_write(phy, 0x1f, MII_BCM5201_MULTIPHY, data); return 0; } @@ -276,7 +276,7 @@ static int bcm5401_init(struct mii_phy* phy) static int bcm5401_suspend(struct mii_phy* phy) { #if 0 /* Commented out in Darwin... someone has those dawn docs ? */ - phy_write(phy, MII_BMCR, BMCR_PDOWN); + sungem_phy_write(phy, MII_BMCR, BMCR_PDOWN); #endif return 0; } @@ -288,19 +288,19 @@ static int bcm5411_init(struct mii_phy* phy) /* Here's some more Apple black magic to setup * some voltage stuffs. */ - phy_write(phy, 0x1c, 0x8c23); - phy_write(phy, 0x1c, 0x8ca3); - phy_write(phy, 0x1c, 0x8c23); + sungem_phy_write(phy, 0x1c, 0x8c23); + sungem_phy_write(phy, 0x1c, 0x8ca3); + sungem_phy_write(phy, 0x1c, 0x8c23); /* Here, Apple seems to want to reset it, do * it as well */ - phy_write(phy, MII_BMCR, BMCR_RESET); - phy_write(phy, MII_BMCR, 0x1340); + sungem_phy_write(phy, MII_BMCR, BMCR_RESET); + sungem_phy_write(phy, MII_BMCR, 0x1340); - data = phy_read(phy, MII_BCM5400_GB_CONTROL); + data = sungem_phy_read(phy, MII_BCM5400_GB_CONTROL); data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP; - phy_write(phy, MII_BCM5400_GB_CONTROL, data); + sungem_phy_write(phy, MII_BCM5400_GB_CONTROL, data); udelay(10); @@ -321,7 +321,7 @@ static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise) phy->advertising = advertise; /* Setup standard advertise */ - adv = phy_read(phy, MII_ADVERTISE); + adv = sungem_phy_read(phy, MII_ADVERTISE); adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); if (advertise & ADVERTISED_10baseT_Half) adv |= ADVERTISE_10HALF; @@ -331,12 +331,12 @@ static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise) adv |= ADVERTISE_100HALF; if (advertise & ADVERTISED_100baseT_Full) adv |= ADVERTISE_100FULL; - phy_write(phy, MII_ADVERTISE, adv); + sungem_phy_write(phy, MII_ADVERTISE, adv); /* Start/Restart aneg */ - ctl = phy_read(phy, MII_BMCR); + ctl = sungem_phy_read(phy, MII_BMCR); ctl |= (BMCR_ANENABLE | BMCR_ANRESTART); - phy_write(phy, MII_BMCR, ctl); + sungem_phy_write(phy, MII_BMCR, ctl); return 0; } @@ -350,11 +350,11 @@ static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd) phy->duplex = fd; phy->pause = 0; - ctl = phy_read(phy, MII_BMCR); + ctl = sungem_phy_read(phy, MII_BMCR); ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_ANENABLE); /* First reset the PHY */ - phy_write(phy, MII_BMCR, ctl | BMCR_RESET); + sungem_phy_write(phy, MII_BMCR, ctl | BMCR_RESET); /* Select speed & duplex */ switch(speed) { @@ -369,7 +369,7 @@ static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd) } if (fd == DUPLEX_FULL) ctl |= BMCR_FULLDPLX; - phy_write(phy, MII_BMCR, ctl); + sungem_phy_write(phy, MII_BMCR, ctl); return 0; } @@ -378,8 +378,8 @@ static int genmii_poll_link(struct mii_phy *phy) { u16 status; - (void)phy_read(phy, MII_BMSR); - status = phy_read(phy, MII_BMSR); + (void)sungem_phy_read(phy, MII_BMSR); + status = sungem_phy_read(phy, MII_BMSR); if ((status & BMSR_LSTATUS) == 0) return 0; if (phy->autoneg && !(status & BMSR_ANEGCOMPLETE)) @@ -392,7 +392,7 @@ static int genmii_read_link(struct mii_phy *phy) u16 lpa; if (phy->autoneg) { - lpa = phy_read(phy, MII_LPA); + lpa = sungem_phy_read(phy, MII_LPA); if (lpa & (LPA_10FULL | LPA_100FULL)) phy->duplex = DUPLEX_FULL; @@ -413,7 +413,7 @@ static int genmii_read_link(struct mii_phy *phy) static int generic_suspend(struct mii_phy* phy) { - phy_write(phy, MII_BMCR, BMCR_PDOWN); + sungem_phy_write(phy, MII_BMCR, BMCR_PDOWN); return 0; } @@ -423,27 +423,27 @@ static int bcm5421_init(struct mii_phy* phy) u16 data; unsigned int id; - id = (phy_read(phy, MII_PHYSID1) << 16 | phy_read(phy, MII_PHYSID2)); + id = (sungem_phy_read(phy, MII_PHYSID1) << 16 | sungem_phy_read(phy, MII_PHYSID2)); /* Revision 0 of 5421 needs some fixups */ if (id == 0x002060e0) { /* This is borrowed from MacOS */ - phy_write(phy, 0x18, 0x1007); - data = phy_read(phy, 0x18); - phy_write(phy, 0x18, data | 0x0400); - phy_write(phy, 0x18, 0x0007); - data = phy_read(phy, 0x18); - phy_write(phy, 0x18, data | 0x0800); - phy_write(phy, 0x17, 0x000a); - data = phy_read(phy, 0x15); - phy_write(phy, 0x15, data | 0x0200); + sungem_phy_write(phy, 0x18, 0x1007); + data = sungem_phy_read(phy, 0x18); + sungem_phy_write(phy, 0x18, data | 0x0400); + sungem_phy_write(phy, 0x18, 0x0007); + data = sungem_phy_read(phy, 0x18); + sungem_phy_write(phy, 0x18, data | 0x0800); + sungem_phy_write(phy, 0x17, 0x000a); + data = sungem_phy_read(phy, 0x15); + sungem_phy_write(phy, 0x15, data | 0x0200); } /* Pick up some init code from OF for K2 version */ if ((id & 0xfffffff0) == 0x002062e0) { - phy_write(phy, 4, 0x01e1); - phy_write(phy, 9, 0x0300); + sungem_phy_write(phy, 4, 0x01e1); + sungem_phy_write(phy, 9, 0x0300); } /* Check if we can enable automatic low power */ @@ -455,9 +455,9 @@ static int bcm5421_init(struct mii_phy* phy) can_low_power = 0; if (can_low_power) { /* Enable automatic low-power */ - phy_write(phy, 0x1c, 0x9002); - phy_write(phy, 0x1c, 0xa821); - phy_write(phy, 0x1c, 0x941d); + sungem_phy_write(phy, 0x1c, 0x9002); + sungem_phy_write(phy, 0x1c, 0xa821); + sungem_phy_write(phy, 0x1c, 0x941d); } } #endif /* CONFIG_PPC_PMAC */ @@ -476,7 +476,7 @@ static int bcm54xx_setup_aneg(struct mii_phy *phy, u32 advertise) phy->advertising = advertise; /* Setup standard advertise */ - adv = phy_read(phy, MII_ADVERTISE); + adv = sungem_phy_read(phy, MII_ADVERTISE); adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); if (advertise & ADVERTISED_10baseT_Half) adv |= ADVERTISE_10HALF; @@ -490,21 +490,21 @@ static int bcm54xx_setup_aneg(struct mii_phy *phy, u32 advertise) adv |= ADVERTISE_PAUSE_CAP; if (advertise & ADVERTISED_Asym_Pause) adv |= ADVERTISE_PAUSE_ASYM; - phy_write(phy, MII_ADVERTISE, adv); + sungem_phy_write(phy, MII_ADVERTISE, adv); /* Setup 1000BT advertise */ - adv = phy_read(phy, MII_1000BASETCONTROL); + adv = sungem_phy_read(phy, MII_1000BASETCONTROL); adv &= ~(MII_1000BASETCONTROL_FULLDUPLEXCAP|MII_1000BASETCONTROL_HALFDUPLEXCAP); if (advertise & SUPPORTED_1000baseT_Half) adv |= MII_1000BASETCONTROL_HALFDUPLEXCAP; if (advertise & SUPPORTED_1000baseT_Full) adv |= MII_1000BASETCONTROL_FULLDUPLEXCAP; - phy_write(phy, MII_1000BASETCONTROL, adv); + sungem_phy_write(phy, MII_1000BASETCONTROL, adv); /* Start/Restart aneg */ - ctl = phy_read(phy, MII_BMCR); + ctl = sungem_phy_read(phy, MII_BMCR); ctl |= (BMCR_ANENABLE | BMCR_ANRESTART); - phy_write(phy, MII_BMCR, ctl); + sungem_phy_write(phy, MII_BMCR, ctl); return 0; } @@ -518,11 +518,11 @@ static int bcm54xx_setup_forced(struct mii_phy *phy, int speed, int fd) phy->duplex = fd; phy->pause = 0; - ctl = phy_read(phy, MII_BMCR); + ctl = sungem_phy_read(phy, MII_BMCR); ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_SPD2|BMCR_ANENABLE); /* First reset the PHY */ - phy_write(phy, MII_BMCR, ctl | BMCR_RESET); + sungem_phy_write(phy, MII_BMCR, ctl | BMCR_RESET); /* Select speed & duplex */ switch(speed) { @@ -539,7 +539,7 @@ static int bcm54xx_setup_forced(struct mii_phy *phy, int speed, int fd) // XXX Should we set the sungem to GII now on 1000BT ? - phy_write(phy, MII_BMCR, ctl); + sungem_phy_write(phy, MII_BMCR, ctl); return 0; } @@ -550,7 +550,7 @@ static int bcm54xx_read_link(struct mii_phy *phy) u16 val; if (phy->autoneg) { - val = phy_read(phy, MII_BCM5400_AUXSTATUS); + val = sungem_phy_read(phy, MII_BCM5400_AUXSTATUS); link_mode = ((val & MII_BCM5400_AUXSTATUS_LINKMODE_MASK) >> MII_BCM5400_AUXSTATUS_LINKMODE_SHIFT); phy->duplex = phy_BCM5400_link_table[link_mode][0] ? @@ -559,7 +559,7 @@ static int bcm54xx_read_link(struct mii_phy *phy) SPEED_1000 : (phy_BCM5400_link_table[link_mode][1] ? SPEED_100 : SPEED_10); - val = phy_read(phy, MII_LPA); + val = sungem_phy_read(phy, MII_LPA); phy->pause = (phy->duplex == DUPLEX_FULL) && ((val & LPA_PAUSE) != 0); } @@ -575,19 +575,19 @@ static int marvell88e1111_init(struct mii_phy* phy) u16 rev; /* magic init sequence for rev 0 */ - rev = phy_read(phy, MII_PHYSID2) & 0x000f; + rev = sungem_phy_read(phy, MII_PHYSID2) & 0x000f; if (rev == 0) { - phy_write(phy, 0x1d, 0x000a); - phy_write(phy, 0x1e, 0x0821); + sungem_phy_write(phy, 0x1d, 0x000a); + sungem_phy_write(phy, 0x1e, 0x0821); - phy_write(phy, 0x1d, 0x0006); - phy_write(phy, 0x1e, 0x8600); + sungem_phy_write(phy, 0x1d, 0x0006); + sungem_phy_write(phy, 0x1e, 0x8600); - phy_write(phy, 0x1d, 0x000b); - phy_write(phy, 0x1e, 0x0100); + sungem_phy_write(phy, 0x1d, 0x000b); + sungem_phy_write(phy, 0x1e, 0x0100); - phy_write(phy, 0x1d, 0x0004); - phy_write(phy, 0x1e, 0x4850); + sungem_phy_write(phy, 0x1d, 0x0004); + sungem_phy_write(phy, 0x1e, 0x4850); } return 0; } @@ -600,8 +600,8 @@ static int bcm5421_poll_link(struct mii_phy* phy) int mode; /* find out in what mode we are */ - phy_write(phy, MII_NCONFIG, 0x1000); - phy_reg = phy_read(phy, MII_NCONFIG); + sungem_phy_write(phy, MII_NCONFIG, 0x1000); + phy_reg = sungem_phy_read(phy, MII_NCONFIG); mode = (phy_reg & BCM5421_MODE_MASK) >> 5; @@ -609,8 +609,8 @@ static int bcm5421_poll_link(struct mii_phy* phy) return genmii_poll_link(phy); /* try to find out whether we have a link */ - phy_write(phy, MII_NCONFIG, 0x2000); - phy_reg = phy_read(phy, MII_NCONFIG); + sungem_phy_write(phy, MII_NCONFIG, 0x2000); + phy_reg = sungem_phy_read(phy, MII_NCONFIG); if (phy_reg & 0x0020) return 0; @@ -624,8 +624,8 @@ static int bcm5421_read_link(struct mii_phy* phy) int mode; /* find out in what mode we are */ - phy_write(phy, MII_NCONFIG, 0x1000); - phy_reg = phy_read(phy, MII_NCONFIG); + sungem_phy_write(phy, MII_NCONFIG, 0x1000); + phy_reg = sungem_phy_read(phy, MII_NCONFIG); mode = (phy_reg & BCM5421_MODE_MASK ) >> 5; @@ -635,8 +635,8 @@ static int bcm5421_read_link(struct mii_phy* phy) phy->speed = SPEED_1000; /* find out whether we are running half- or full duplex */ - phy_write(phy, MII_NCONFIG, 0x2000); - phy_reg = phy_read(phy, MII_NCONFIG); + sungem_phy_write(phy, MII_NCONFIG, 0x2000); + phy_reg = sungem_phy_read(phy, MII_NCONFIG); if ( (phy_reg & 0x0080) >> 7) phy->duplex |= DUPLEX_HALF; @@ -649,14 +649,14 @@ static int bcm5421_read_link(struct mii_phy* phy) static int bcm5421_enable_fiber(struct mii_phy* phy, int autoneg) { /* enable fiber mode */ - phy_write(phy, MII_NCONFIG, 0x9020); + sungem_phy_write(phy, MII_NCONFIG, 0x9020); /* LEDs active in both modes, autosense prio = fiber */ - phy_write(phy, MII_NCONFIG, 0x945f); + sungem_phy_write(phy, MII_NCONFIG, 0x945f); if (!autoneg) { /* switch off fibre autoneg */ - phy_write(phy, MII_NCONFIG, 0xfc01); - phy_write(phy, 0x0b, 0x0004); + sungem_phy_write(phy, MII_NCONFIG, 0xfc01); + sungem_phy_write(phy, 0x0b, 0x0004); } phy->autoneg = autoneg; @@ -673,8 +673,8 @@ static int bcm5461_poll_link(struct mii_phy* phy) int mode; /* find out in what mode we are */ - phy_write(phy, MII_NCONFIG, 0x7c00); - phy_reg = phy_read(phy, MII_NCONFIG); + sungem_phy_write(phy, MII_NCONFIG, 0x7c00); + phy_reg = sungem_phy_read(phy, MII_NCONFIG); mode = (phy_reg & BCM5461_MODE_MASK ) >> 1; @@ -682,8 +682,8 @@ static int bcm5461_poll_link(struct mii_phy* phy) return genmii_poll_link(phy); /* find out whether we have a link */ - phy_write(phy, MII_NCONFIG, 0x7000); - phy_reg = phy_read(phy, MII_NCONFIG); + sungem_phy_write(phy, MII_NCONFIG, 0x7000); + phy_reg = sungem_phy_read(phy, MII_NCONFIG); if (phy_reg & BCM5461_FIBER_LINK) return 1; @@ -699,8 +699,8 @@ static int bcm5461_read_link(struct mii_phy* phy) int mode; /* find out in what mode we are */ - phy_write(phy, MII_NCONFIG, 0x7c00); - phy_reg = phy_read(phy, MII_NCONFIG); + sungem_phy_write(phy, MII_NCONFIG, 0x7c00); + phy_reg = sungem_phy_read(phy, MII_NCONFIG); mode = (phy_reg & BCM5461_MODE_MASK ) >> 1; @@ -711,8 +711,8 @@ static int bcm5461_read_link(struct mii_phy* phy) phy->speed = SPEED_1000; /* find out whether we are running half- or full duplex */ - phy_write(phy, MII_NCONFIG, 0x7000); - phy_reg = phy_read(phy, MII_NCONFIG); + sungem_phy_write(phy, MII_NCONFIG, 0x7000); + phy_reg = sungem_phy_read(phy, MII_NCONFIG); if (phy_reg & BCM5461_FIBER_DUPLEX) phy->duplex |= DUPLEX_FULL; @@ -725,15 +725,15 @@ static int bcm5461_read_link(struct mii_phy* phy) static int bcm5461_enable_fiber(struct mii_phy* phy, int autoneg) { /* select fiber mode, enable 1000 base-X registers */ - phy_write(phy, MII_NCONFIG, 0xfc0b); + sungem_phy_write(phy, MII_NCONFIG, 0xfc0b); if (autoneg) { /* enable fiber with no autonegotiation */ - phy_write(phy, MII_ADVERTISE, 0x01e0); - phy_write(phy, MII_BMCR, 0x1140); + sungem_phy_write(phy, MII_ADVERTISE, 0x01e0); + sungem_phy_write(phy, MII_BMCR, 0x1140); } else { /* enable fiber with autonegotiation */ - phy_write(phy, MII_BMCR, 0x0140); + sungem_phy_write(phy, MII_BMCR, 0x0140); } phy->autoneg = autoneg; @@ -752,7 +752,7 @@ static int marvell_setup_aneg(struct mii_phy *phy, u32 advertise) phy->advertising = advertise; /* Setup standard advertise */ - adv = phy_read(phy, MII_ADVERTISE); + adv = sungem_phy_read(phy, MII_ADVERTISE); adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); if (advertise & ADVERTISED_10baseT_Half) adv |= ADVERTISE_10HALF; @@ -766,7 +766,7 @@ static int marvell_setup_aneg(struct mii_phy *phy, u32 advertise) adv |= ADVERTISE_PAUSE_CAP; if (advertise & ADVERTISED_Asym_Pause) adv |= ADVERTISE_PAUSE_ASYM; - phy_write(phy, MII_ADVERTISE, adv); + sungem_phy_write(phy, MII_ADVERTISE, adv); /* Setup 1000BT advertise & enable crossover detect * XXX How do we advertise 1000BT ? Darwin source is @@ -774,7 +774,7 @@ static int marvell_setup_aneg(struct mii_phy *phy, u32 advertise) * write to control... Someone has specs for those * beasts ? */ - adv = phy_read(phy, MII_M1011_PHY_SPEC_CONTROL); + adv = sungem_phy_read(phy, MII_M1011_PHY_SPEC_CONTROL); adv |= MII_M1011_PHY_SPEC_CONTROL_AUTO_MDIX; adv &= ~(MII_1000BASETCONTROL_FULLDUPLEXCAP | MII_1000BASETCONTROL_HALFDUPLEXCAP); @@ -782,12 +782,12 @@ static int marvell_setup_aneg(struct mii_phy *phy, u32 advertise) adv |= MII_1000BASETCONTROL_HALFDUPLEXCAP; if (advertise & SUPPORTED_1000baseT_Full) adv |= MII_1000BASETCONTROL_FULLDUPLEXCAP; - phy_write(phy, MII_1000BASETCONTROL, adv); + sungem_phy_write(phy, MII_1000BASETCONTROL, adv); /* Start/Restart aneg */ - ctl = phy_read(phy, MII_BMCR); + ctl = sungem_phy_read(phy, MII_BMCR); ctl |= (BMCR_ANENABLE | BMCR_ANRESTART); - phy_write(phy, MII_BMCR, ctl); + sungem_phy_write(phy, MII_BMCR, ctl); return 0; } @@ -801,7 +801,7 @@ static int marvell_setup_forced(struct mii_phy *phy, int speed, int fd) phy->duplex = fd; phy->pause = 0; - ctl = phy_read(phy, MII_BMCR); + ctl = sungem_phy_read(phy, MII_BMCR); ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_SPD2|BMCR_ANENABLE); ctl |= BMCR_RESET; @@ -824,7 +824,7 @@ static int marvell_setup_forced(struct mii_phy *phy, int speed, int fd) /* Disable crossover. Again, the way Apple does it is strange, * though I don't assume they are wrong ;) */ - ctl2 = phy_read(phy, MII_M1011_PHY_SPEC_CONTROL); + ctl2 = sungem_phy_read(phy, MII_M1011_PHY_SPEC_CONTROL); ctl2 &= ~(MII_M1011_PHY_SPEC_CONTROL_MANUAL_MDIX | MII_M1011_PHY_SPEC_CONTROL_AUTO_MDIX | MII_1000BASETCONTROL_FULLDUPLEXCAP | @@ -833,11 +833,11 @@ static int marvell_setup_forced(struct mii_phy *phy, int speed, int fd) ctl2 |= (fd == DUPLEX_FULL) ? MII_1000BASETCONTROL_FULLDUPLEXCAP : MII_1000BASETCONTROL_HALFDUPLEXCAP; - phy_write(phy, MII_1000BASETCONTROL, ctl2); + sungem_phy_write(phy, MII_1000BASETCONTROL, ctl2); // XXX Should we set the sungem to GII now on 1000BT ? - phy_write(phy, MII_BMCR, ctl); + sungem_phy_write(phy, MII_BMCR, ctl); return 0; } @@ -847,7 +847,7 @@ static int marvell_read_link(struct mii_phy *phy) u16 status, pmask; if (phy->autoneg) { - status = phy_read(phy, MII_M1011_PHY_SPEC_STATUS); + status = sungem_phy_read(phy, MII_M1011_PHY_SPEC_STATUS); if ((status & MII_M1011_PHY_SPEC_STATUS_RESOLVED) == 0) return -EAGAIN; if (status & MII_M1011_PHY_SPEC_STATUS_1000) @@ -1174,7 +1174,7 @@ int sungem_phy_probe(struct mii_phy *phy, int mii_id) goto fail; /* Read ID and find matching entry */ - id = (phy_read(phy, MII_PHYSID1) << 16 | phy_read(phy, MII_PHYSID2)); + id = (sungem_phy_read(phy, MII_PHYSID1) << 16 | sungem_phy_read(phy, MII_PHYSID2)); printk(KERN_DEBUG KBUILD_MODNAME ": " "PHY ID: %x, addr: %x\n", id, mii_id); for (i=0; (def = mii_phy_table[i]) != NULL; i++) diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index ef10302ec936ee5969001bb9da484b80d573c1ee..2368395d8ae5acb957e8a819f71dc08480c31ed1 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -647,7 +647,7 @@ static void team_notify_peers(struct team *team) { if (!team->notify_peers.count || !netif_running(team->dev)) return; - atomic_set(&team->notify_peers.count_pending, team->notify_peers.count); + atomic_add(team->notify_peers.count, &team->notify_peers.count_pending); schedule_delayed_work(&team->notify_peers.dw, 0); } @@ -687,7 +687,7 @@ static void team_mcast_rejoin(struct team *team) { if (!team->mcast_rejoin.count || !netif_running(team->dev)) return; - atomic_set(&team->mcast_rejoin.count_pending, team->mcast_rejoin.count); + atomic_add(team->mcast_rejoin.count, &team->mcast_rejoin.count_pending); schedule_delayed_work(&team->mcast_rejoin.dw, 0); } @@ -970,7 +970,8 @@ static void __team_compute_features(struct team *team) struct team_port *port; u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL; unsigned short max_hard_header_len = ETH_HLEN; - unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE; + unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE | + IFF_XMIT_DST_RELEASE_PERM; list_for_each_entry(port, &team->port_list, list) { vlan_features = netdev_increment_features(vlan_features, @@ -985,8 +986,9 @@ static void __team_compute_features(struct team *team) team->dev->vlan_features = vlan_features; team->dev->hard_header_len = max_hard_header_len; - flags = team->dev->priv_flags & ~IFF_XMIT_DST_RELEASE; - team->dev->priv_flags = flags | dst_release_flag; + team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; + if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM)) + team->dev->priv_flags |= IFF_XMIT_DST_RELEASE; netdev_change_features(team->dev); } @@ -1003,7 +1005,6 @@ static int team_port_enter(struct team *team, struct team_port *port) int err = 0; dev_hold(team->dev); - port->dev->priv_flags |= IFF_TEAM_PORT; if (team->ops.port_enter) { err = team->ops.port_enter(team, port); if (err) { @@ -1016,7 +1017,6 @@ static int team_port_enter(struct team *team, struct team_port *port) return 0; err_port_enter: - port->dev->priv_flags &= ~IFF_TEAM_PORT; dev_put(team->dev); return err; @@ -1026,7 +1026,6 @@ static void team_port_leave(struct team *team, struct team_port *port) { if (team->ops.port_leave) team->ops.port_leave(team, port); - port->dev->priv_flags &= ~IFF_TEAM_PORT; dev_put(team->dev); } @@ -1075,6 +1074,25 @@ static void team_port_disable_netpoll(struct team_port *port) } #endif +static int team_upper_dev_link(struct net_device *dev, + struct net_device *port_dev) +{ + int err; + + err = netdev_master_upper_dev_link(port_dev, dev); + if (err) + return err; + port_dev->priv_flags |= IFF_TEAM_PORT; + return 0; +} + +static void team_upper_dev_unlink(struct net_device *dev, + struct net_device *port_dev) +{ + netdev_upper_dev_unlink(port_dev, dev); + port_dev->priv_flags &= ~IFF_TEAM_PORT; +} + static void __team_port_change_port_added(struct team_port *port, bool linkup); static int team_dev_type_check_change(struct net_device *dev, struct net_device *port_dev); @@ -1161,13 +1179,6 @@ static int team_port_add(struct team *team, struct net_device *port_dev) goto err_enable_netpoll; } - err = netdev_master_upper_dev_link(port_dev, dev); - if (err) { - netdev_err(dev, "Device %s failed to set upper link\n", - portname); - goto err_set_upper_link; - } - err = netdev_rx_handler_register(port_dev, team_handle_frame, port); if (err) { @@ -1176,6 +1187,13 @@ static int team_port_add(struct team *team, struct net_device *port_dev) goto err_handler_register; } + err = team_upper_dev_link(dev, port_dev); + if (err) { + netdev_err(dev, "Device %s failed to set upper link\n", + portname); + goto err_set_upper_link; + } + err = __team_option_inst_add_port(team, port); if (err) { netdev_err(dev, "Device %s failed to add per-port options\n", @@ -1195,12 +1213,12 @@ static int team_port_add(struct team *team, struct net_device *port_dev) return 0; err_option_port_add: + team_upper_dev_unlink(dev, port_dev); + +err_set_upper_link: netdev_rx_handler_unregister(port_dev); err_handler_register: - netdev_upper_dev_unlink(port_dev, dev); - -err_set_upper_link: team_port_disable_netpoll(port); err_enable_netpoll: @@ -1239,8 +1257,8 @@ static int team_port_del(struct team *team, struct net_device *port_dev) team_port_disable(team, port); list_del_rcu(&port->list); + team_upper_dev_unlink(dev, port_dev); netdev_rx_handler_unregister(port_dev); - netdev_upper_dev_unlink(port_dev, dev); team_port_disable_netpoll(port); vlan_vids_del_by_dev(port_dev, dev); dev_uc_unsync(port_dev, dev); diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index 5d194093f3e1f3b3c1893faf0cf8ebb266facdad..2c05f6cdb12f3a1e2ae9bed29c6241cee7dc3a46 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -890,7 +890,7 @@ static const struct driver_info ax88772_info = { .unbind = ax88772_unbind, .status = asix_status, .link_reset = ax88772_link_reset, - .reset = ax88772_reset, + .reset = ax88772_link_reset, .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET, .rx_fixup = asix_rx_fixup_common, .tx_fixup = asix_tx_fixup, diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 604ef210a4dea5f8e8bd9b543b3dc4156b357286..5cfd414b9a3ea5cc59224cffe0dd675bef490930 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -22,6 +22,8 @@ #include #include #include +#include +#include /* Version Information */ #define DRIVER_VERSION "v1.06.1 (2014/10/01)" @@ -129,7 +131,9 @@ #define OCP_SRAM_ADDR 0xa436 #define OCP_SRAM_DATA 0xa438 #define OCP_DOWN_SPEED 0xa442 -#define OCP_EEE_CFG2 0xa5d0 +#define OCP_EEE_ABLE 0xa5c4 +#define OCP_EEE_ADV 0xa5d0 +#define OCP_EEE_LPABLE 0xa5d2 #define OCP_ADC_CFG 0xbc06 /* SRAM Register */ @@ -361,7 +365,8 @@ #define EEE_NWAY_EN 0x1000 #define TX_QUIET_EN 0x0200 #define RX_QUIET_EN 0x0100 -#define SDRISETIME 0x0010 /* bit 4 ~ 6 */ +#define sd_rise_time_mask 0x0070 +#define sd_rise_time(x) (min(x, 7) << 4) /* bit 4 ~ 6 */ #define RG_RXLPI_MSK_HFDUP 0x0008 #define SDFALLTIME 0x0007 /* bit 0 ~ 2 */ @@ -373,7 +378,8 @@ #define RG_EEEPRG_EN 0x0010 /* OCP_EEE_CONFIG3 */ -#define FST_SNR_EYE_R 0x1500 /* bit 7 ~ 15 */ +#define fast_snr_mask 0xff80 +#define fast_snr(x) (min(x, 0x1ff) << 7) /* bit 7 ~ 15 */ #define RG_LFS_SEL 0x0060 /* bit 6 ~ 5 */ #define MSK_PH 0x0006 /* bit 0 ~ 3 */ @@ -382,11 +388,6 @@ #define FUN_ADDR 0x0000 #define FUN_DATA 0x4000 /* bit[4:0] device addr */ -#define DEVICE_ADDR 0x0007 - -/* OCP_EEE_DATA */ -#define EEE_ADDR 0x003C -#define EEE_DATA 0x0002 /* OCP_EEE_CFG */ #define CTAP_SHORT_EN 0x0040 @@ -395,10 +396,6 @@ /* OCP_DOWN_SPEED */ #define EN_10M_BGOFF 0x0080 -/* OCP_EEE_CFG2 */ -#define MY1000_EEE 0x0004 -#define MY100_EEE 0x0002 - /* OCP_ADC_CFG */ #define CKADSEL_L 0x0100 #define ADC_EN 0x0080 @@ -424,7 +421,7 @@ enum rtl_register_content { FULL_DUP = 0x01, }; -#define RTL8152_MAX_TX 10 +#define RTL8152_MAX_TX 4 #define RTL8152_MAX_RX 10 #define INTBUFSIZE 2 #define CRC_SIZE 4 @@ -506,6 +503,7 @@ struct rx_desc { #define IPF (1 << 23) /* IP checksum fail */ #define UDPF (1 << 22) /* UDP checksum fail */ #define TCPF (1 << 21) /* TCP checksum fail */ +#define RX_VLAN_TAG (1 << 16) __le32 opts4; __le32 opts5; @@ -531,6 +529,7 @@ struct tx_desc { #define MSS_MAX 0x7ffU #define TCPHO_SHIFT 17 #define TCPHO_MAX 0x7ffU +#define TX_VLAN_TAG (1 << 16) }; struct r8152; @@ -575,6 +574,8 @@ struct r8152 { void (*up)(struct r8152 *); void (*down)(struct r8152 *); void (*unload)(struct r8152 *); + int (*eee_get)(struct r8152 *, struct ethtool_eee *); + int (*eee_set)(struct r8152 *, struct ethtool_eee *); } rtl_ops; int intr_interval; @@ -607,9 +608,9 @@ enum tx_csum_stat { * The RTL chips use a 64 element hash table based on the Ethernet CRC. */ static const int multicast_filter_limit = 32; -static unsigned int rx_buf_sz = 16384; +static unsigned int agg_buf_sz = 16384; -#define RTL_LIMITED_TSO_SIZE (rx_buf_sz - sizeof(struct tx_desc) - \ +#define RTL_LIMITED_TSO_SIZE (agg_buf_sz - sizeof(struct tx_desc) - \ VLAN_ETH_HLEN - VLAN_HLEN) static @@ -623,8 +624,8 @@ int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) return -ENOMEM; ret = usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0), - RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, - value, index, tmp, size, 500); + RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, + value, index, tmp, size, 500); memcpy(data, tmp, size); kfree(tmp); @@ -643,8 +644,8 @@ int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) return -ENOMEM; ret = usb_control_msg(tp->udev, usb_sndctrlpipe(tp->udev, 0), - RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE, - value, index, tmp, size, 500); + RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE, + value, index, tmp, size, 500); kfree(tmp); @@ -652,7 +653,7 @@ int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) } static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size, - void *data, u16 type) + void *data, u16 type) { u16 limit = 64; int ret = 0; @@ -692,7 +693,7 @@ static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size, } static int generic_ocp_write(struct r8152 *tp, u16 index, u16 byteen, - u16 size, void *data, u16 type) + u16 size, void *data, u16 type) { int ret; u16 byteen_start, byteen_end, byen; @@ -726,8 +727,8 @@ static int generic_ocp_write(struct r8152 *tp, u16 index, u16 byteen, while (size) { if (size > limit) { ret = set_registers(tp, index, - type | BYTE_EN_DWORD, - limit, data); + type | BYTE_EN_DWORD, + limit, data); if (ret < 0) goto error1; @@ -736,8 +737,8 @@ static int generic_ocp_write(struct r8152 *tp, u16 index, u16 byteen, size -= limit; } else { ret = set_registers(tp, index, - type | BYTE_EN_DWORD, - size, data); + type | BYTE_EN_DWORD, + size, data); if (ret < 0) goto error1; @@ -972,44 +973,21 @@ void write_mii_word(struct net_device *netdev, int phy_id, int reg, int val) usb_autopm_put_interface(tp->intf); } -static -int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags); - -static inline void set_ethernet_addr(struct r8152 *tp) -{ - struct net_device *dev = tp->netdev; - int ret; - u8 node_id[8] = {0}; - - if (tp->version == RTL_VER_01) - ret = pla_ocp_read(tp, PLA_IDR, sizeof(node_id), node_id); - else - ret = pla_ocp_read(tp, PLA_BACKUP, sizeof(node_id), node_id); - - if (ret < 0) { - netif_notice(tp, probe, dev, "inet addr fail\n"); - } else { - if (tp->version != RTL_VER_01) { - ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, - CRWECR_CONFIG); - pla_ocp_write(tp, PLA_IDR, BYTE_EN_SIX_BYTES, - sizeof(node_id), node_id); - ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, - CRWECR_NORAML); - } - - memcpy(dev->dev_addr, node_id, dev->addr_len); - memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); - } -} +static int +r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags); static int rtl8152_set_mac_address(struct net_device *netdev, void *p) { struct r8152 *tp = netdev_priv(netdev); struct sockaddr *addr = p; + int ret = -EADDRNOTAVAIL; if (!is_valid_ether_addr(addr->sa_data)) - return -EADDRNOTAVAIL; + goto out1; + + ret = usb_autopm_get_interface(tp->intf); + if (ret < 0) + goto out1; memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); @@ -1017,7 +995,40 @@ static int rtl8152_set_mac_address(struct net_device *netdev, void *p) pla_ocp_write(tp, PLA_IDR, BYTE_EN_SIX_BYTES, 8, addr->sa_data); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); - return 0; + usb_autopm_put_interface(tp->intf); +out1: + return ret; +} + +static int set_ethernet_addr(struct r8152 *tp) +{ + struct net_device *dev = tp->netdev; + struct sockaddr sa; + int ret; + + if (tp->version == RTL_VER_01) + ret = pla_ocp_read(tp, PLA_IDR, 8, sa.sa_data); + else + ret = pla_ocp_read(tp, PLA_BACKUP, 8, sa.sa_data); + + if (ret < 0) { + netif_err(tp, probe, dev, "Get ether addr fail\n"); + } else if (!is_valid_ether_addr(sa.sa_data)) { + netif_err(tp, probe, dev, "Invalid ether addr %pM\n", + sa.sa_data); + eth_hw_addr_random(dev); + ether_addr_copy(sa.sa_data, dev->dev_addr); + ret = rtl8152_set_mac_address(dev, &sa); + netif_info(tp, probe, dev, "Random ether addr %pM\n", + sa.sa_data); + } else { + if (tp->version == RTL_VER_01) + ether_addr_copy(dev->dev_addr, sa.sa_data); + else + ret = rtl8152_set_mac_address(dev, &sa); + } + + return ret; } static void read_bulk_callback(struct urb *urb) @@ -1248,13 +1259,13 @@ static int alloc_all_mem(struct r8152 *tp) skb_queue_head_init(&tp->tx_queue); for (i = 0; i < RTL8152_MAX_RX; i++) { - buf = kmalloc_node(rx_buf_sz, GFP_KERNEL, node); + buf = kmalloc_node(agg_buf_sz, GFP_KERNEL, node); if (!buf) goto err1; if (buf != rx_agg_align(buf)) { kfree(buf); - buf = kmalloc_node(rx_buf_sz + RX_ALIGN, GFP_KERNEL, + buf = kmalloc_node(agg_buf_sz + RX_ALIGN, GFP_KERNEL, node); if (!buf) goto err1; @@ -1274,13 +1285,13 @@ static int alloc_all_mem(struct r8152 *tp) } for (i = 0; i < RTL8152_MAX_TX; i++) { - buf = kmalloc_node(rx_buf_sz, GFP_KERNEL, node); + buf = kmalloc_node(agg_buf_sz, GFP_KERNEL, node); if (!buf) goto err1; if (buf != tx_agg_align(buf)) { kfree(buf); - buf = kmalloc_node(rx_buf_sz + TX_ALIGN, GFP_KERNEL, + buf = kmalloc_node(agg_buf_sz + TX_ALIGN, GFP_KERNEL, node); if (!buf) goto err1; @@ -1311,8 +1322,8 @@ static int alloc_all_mem(struct r8152 *tp) tp->intr_interval = (int)ep_intr->desc.bInterval; usb_fill_int_urb(tp->intr_urb, tp->udev, usb_rcvintpipe(tp->udev, 3), - tp->intr_buff, INTBUFSIZE, intr_callback, - tp, tp->intr_interval); + tp->intr_buff, INTBUFSIZE, intr_callback, + tp, tp->intr_interval); return 0; @@ -1354,8 +1365,7 @@ static inline __be16 get_protocol(struct sk_buff *skb) return protocol; } -/* - * r8152_csum_workaround() +/* r8152_csum_workaround() * The hw limites the value the transport offset. When the offset is out of the * range, calculate the checksum by sw. */ @@ -1398,8 +1408,7 @@ static void r8152_csum_workaround(struct r8152 *tp, struct sk_buff *skb, } } -/* - * msdn_giant_send_check() +/* msdn_giant_send_check() * According to the document of microsoft, the TCP Pseudo Header excludes the * packet length for IPv6 TCP large packets. */ @@ -1422,6 +1431,25 @@ static int msdn_giant_send_check(struct sk_buff *skb) return ret; } +static inline void rtl_tx_vlan_tag(struct tx_desc *desc, struct sk_buff *skb) +{ + if (vlan_tx_tag_present(skb)) { + u32 opts2; + + opts2 = TX_VLAN_TAG | swab16(vlan_tx_tag_get(skb)); + desc->opts2 |= cpu_to_le32(opts2); + } +} + +static inline void rtl_rx_vlan_tag(struct rx_desc *desc, struct sk_buff *skb) +{ + u32 opts2 = le32_to_cpu(desc->opts2); + + if (opts2 & RX_VLAN_TAG) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + swab16(opts2 & 0xffff)); +} + static int r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc, struct sk_buff *skb, u32 len, u32 transport_offset) { @@ -1518,8 +1546,9 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg) spin_unlock(&tx_queue->lock); tx_data = agg->head; - agg->skb_num = agg->skb_len = 0; - remain = rx_buf_sz; + agg->skb_num = 0; + agg->skb_len = 0; + remain = agg_buf_sz; while (remain >= ETH_ZLEN + sizeof(struct tx_desc)) { struct tx_desc *tx_desc; @@ -1548,6 +1577,8 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg) continue; } + rtl_tx_vlan_tag(tx_desc, skb); + tx_data += sizeof(*tx_desc); len = skb->len; @@ -1566,7 +1597,7 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg) dev_kfree_skb_any(skb); - remain = rx_buf_sz - (int)(tx_agg_align(tx_data) - agg->head); + remain = agg_buf_sz - (int)(tx_agg_align(tx_data) - agg->head); } if (!skb_queue_empty(&skb_head)) { @@ -1689,6 +1720,7 @@ static void rx_bottom(struct r8152 *tp) memcpy(skb->data, rx_data, pkt_len); skb_put(skb, pkt_len); skb->protocol = eth_type_trans(skb, netdev); + rtl_rx_vlan_tag(rx_desc, skb); netif_receive_skb(skb); stats->rx_packets++; stats->rx_bytes += pkt_len; @@ -1772,8 +1804,8 @@ static int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags) { usb_fill_bulk_urb(agg->urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1), - agg->head, rx_buf_sz, - (usb_complete_t)read_bulk_callback, agg); + agg->head, agg_buf_sz, + (usb_complete_t)read_bulk_callback, agg); return usb_submit_urb(agg->urb, mem_flags); } @@ -1835,18 +1867,22 @@ static void _rtl8152_set_rx_mode(struct net_device *netdev) /* Unconditionally log net taps. */ netif_notice(tp, link, netdev, "Promiscuous mode enabled\n"); ocp_data |= RCR_AM | RCR_AAP; - mc_filter[1] = mc_filter[0] = 0xffffffff; + mc_filter[1] = 0xffffffff; + mc_filter[0] = 0xffffffff; } else if ((netdev_mc_count(netdev) > multicast_filter_limit) || (netdev->flags & IFF_ALLMULTI)) { /* Too many to filter perfectly -- accept all multicasts. */ ocp_data |= RCR_AM; - mc_filter[1] = mc_filter[0] = 0xffffffff; + mc_filter[1] = 0xffffffff; + mc_filter[0] = 0xffffffff; } else { struct netdev_hw_addr *ha; - mc_filter[1] = mc_filter[0] = 0; + mc_filter[1] = 0; + mc_filter[0] = 0; netdev_for_each_mc_addr(ha, netdev) { int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; + mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); ocp_data |= RCR_AM; } @@ -1861,7 +1897,7 @@ static void _rtl8152_set_rx_mode(struct net_device *netdev) } static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, - struct net_device *netdev) + struct net_device *netdev) { struct r8152 *tp = netdev_priv(netdev); @@ -1877,8 +1913,9 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, usb_mark_last_busy(tp->udev); tasklet_schedule(&tp->tl); } - } else if (skb_queue_len(&tp->tx_queue) > tp->tx_qlen) + } else if (skb_queue_len(&tp->tx_queue) > tp->tx_qlen) { netif_stop_queue(netdev); + } return NETDEV_TX_OK; } @@ -1903,7 +1940,7 @@ static void rtl8152_nic_reset(struct r8152 *tp) for (i = 0; i < 1000; i++) { if (!(ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR) & CR_RST)) break; - udelay(100); + usleep_range(100, 400); } } @@ -1911,8 +1948,8 @@ static void set_tx_qlen(struct r8152 *tp) { struct net_device *netdev = tp->netdev; - tp->tx_qlen = rx_buf_sz / (netdev->mtu + VLAN_ETH_HLEN + VLAN_HLEN + - sizeof(struct tx_desc)); + tp->tx_qlen = agg_buf_sz / (netdev->mtu + VLAN_ETH_HLEN + VLAN_HLEN + + sizeof(struct tx_desc)); } static inline u8 rtl8152_get_speed(struct r8152 *tp) @@ -2061,13 +2098,13 @@ static void rtl_disable(struct r8152 *tp) ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); if ((ocp_data & FIFO_EMPTY) == FIFO_EMPTY) break; - mdelay(1); + usleep_range(1000, 2000); } for (i = 0; i < 1000; i++) { if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0) & TCR0_TX_EMPTY) break; - mdelay(1); + usleep_range(1000, 2000); } rtl_stop_rx(tp); @@ -2091,6 +2128,34 @@ static void r8152_power_cut_en(struct r8152 *tp, bool enable) ocp_write_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS, ocp_data); } +static void rtl_rx_vlan_en(struct r8152 *tp, bool enable) +{ + u32 ocp_data; + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR); + if (enable) + ocp_data |= CPCR_RX_VLAN; + else + ocp_data &= ~CPCR_RX_VLAN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data); +} + +static int rtl8152_set_features(struct net_device *dev, + netdev_features_t features) +{ + netdev_features_t changed = features ^ dev->features; + struct r8152 *tp = netdev_priv(dev); + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) { + if (features & NETIF_F_HW_VLAN_CTAG_RX) + rtl_rx_vlan_en(tp, true); + else + rtl_rx_vlan_en(tp, false); + } + + return 0; +} + #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) static u32 __rtl_get_wol(struct r8152 *tp) @@ -2274,7 +2339,7 @@ static void r8152b_exit_oob(struct r8152 *tp) ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); if (ocp_data & LINK_LIST_READY) break; - mdelay(1); + usleep_range(1000, 2000); } ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); @@ -2285,7 +2350,7 @@ static void r8152b_exit_oob(struct r8152 *tp) ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); if (ocp_data & LINK_LIST_READY) break; - mdelay(1); + usleep_range(1000, 2000); } rtl8152_nic_reset(tp); @@ -2316,9 +2381,7 @@ static void r8152b_exit_oob(struct r8152 *tp) ocp_write_dword(tp, MCU_TYPE_USB, USB_TX_DMA, TEST_MODE_DISABLE | TX_SIZE_ADJUST1); - ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR); - ocp_data &= ~CPCR_RX_VLAN; - ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data); + rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS); @@ -2346,7 +2409,7 @@ static void r8152b_enter_oob(struct r8152 *tp) ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); if (ocp_data & LINK_LIST_READY) break; - mdelay(1); + usleep_range(1000, 2000); } ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); @@ -2357,14 +2420,12 @@ static void r8152b_enter_oob(struct r8152 *tp) ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); if (ocp_data & LINK_LIST_READY) break; - mdelay(1); + usleep_range(1000, 2000); } ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS); - ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR); - ocp_data |= CPCR_RX_VLAN; - ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data); + rtl_rx_vlan_en(tp, true); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PAL_BDC_CR); ocp_data |= ALDPS_PROXY_MODE; @@ -2499,7 +2560,7 @@ static void r8153_first_init(struct r8152 *tp) ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); if (ocp_data & LINK_LIST_READY) break; - mdelay(1); + usleep_range(1000, 2000); } ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); @@ -2510,12 +2571,10 @@ static void r8153_first_init(struct r8152 *tp) ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); if (ocp_data & LINK_LIST_READY) break; - mdelay(1); + usleep_range(1000, 2000); } - ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR); - ocp_data &= ~CPCR_RX_VLAN; - ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data); + rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8153_RMS); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_JUMBO); @@ -2554,7 +2613,7 @@ static void r8153_enter_oob(struct r8152 *tp) ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); if (ocp_data & LINK_LIST_READY) break; - mdelay(1); + usleep_range(1000, 2000); } ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); @@ -2565,7 +2624,7 @@ static void r8153_enter_oob(struct r8152 *tp) ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); if (ocp_data & LINK_LIST_READY) break; - mdelay(1); + usleep_range(1000, 2000); } ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8153_RMS); @@ -2574,9 +2633,7 @@ static void r8153_enter_oob(struct r8152 *tp) ocp_data &= ~TEREDO_WAKE_MASK; ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, ocp_data); - ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR); - ocp_data |= CPCR_RX_VLAN; - ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data); + rtl_rx_vlan_en(tp, true); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PAL_BDC_CR); ocp_data |= ALDPS_PROXY_MODE; @@ -2876,8 +2933,7 @@ static int rtl8152_close(struct net_device *netdev) if (res < 0) { rtl_drop_queued_tx(tp); } else { - /* - * The autosuspend may have been enabled and wouldn't + /* The autosuspend may have been enabled and wouldn't * be disable when autoresume occurs, because the * netif_running() would be false. */ @@ -2897,43 +2953,92 @@ static int rtl8152_close(struct net_device *netdev) return res; } -static void r8152b_enable_eee(struct r8152 *tp) +static inline void r8152_mmd_indirect(struct r8152 *tp, u16 dev, u16 reg) +{ + ocp_reg_write(tp, OCP_EEE_AR, FUN_ADDR | dev); + ocp_reg_write(tp, OCP_EEE_DATA, reg); + ocp_reg_write(tp, OCP_EEE_AR, FUN_DATA | dev); +} + +static u16 r8152_mmd_read(struct r8152 *tp, u16 dev, u16 reg) +{ + u16 data; + + r8152_mmd_indirect(tp, dev, reg); + data = ocp_reg_read(tp, OCP_EEE_DATA); + ocp_reg_write(tp, OCP_EEE_AR, 0x0000); + + return data; +} + +static void r8152_mmd_write(struct r8152 *tp, u16 dev, u16 reg, u16 data) { + r8152_mmd_indirect(tp, dev, reg); + ocp_reg_write(tp, OCP_EEE_DATA, data); + ocp_reg_write(tp, OCP_EEE_AR, 0x0000); +} + +static void r8152_eee_en(struct r8152 *tp, bool enable) +{ + u16 config1, config2, config3; u32 ocp_data; ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR); - ocp_data |= EEE_RX_EN | EEE_TX_EN; + config1 = ocp_reg_read(tp, OCP_EEE_CONFIG1) & ~sd_rise_time_mask; + config2 = ocp_reg_read(tp, OCP_EEE_CONFIG2); + config3 = ocp_reg_read(tp, OCP_EEE_CONFIG3) & ~fast_snr_mask; + + if (enable) { + ocp_data |= EEE_RX_EN | EEE_TX_EN; + config1 |= EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN | RX_QUIET_EN; + config1 |= sd_rise_time(1); + config2 |= RG_DACQUIET_EN | RG_LDVQUIET_EN; + config3 |= fast_snr(42); + } else { + ocp_data &= ~(EEE_RX_EN | EEE_TX_EN); + config1 &= ~(EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN | + RX_QUIET_EN); + config1 |= sd_rise_time(7); + config2 &= ~(RG_DACQUIET_EN | RG_LDVQUIET_EN); + config3 |= fast_snr(511); + } + ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data); - ocp_reg_write(tp, OCP_EEE_CONFIG1, RG_TXLPI_MSK_HFDUP | RG_MATCLR_EN | - EEE_10_CAP | EEE_NWAY_EN | - TX_QUIET_EN | RX_QUIET_EN | - SDRISETIME | RG_RXLPI_MSK_HFDUP | - SDFALLTIME); - ocp_reg_write(tp, OCP_EEE_CONFIG2, RG_LPIHYS_NUM | RG_DACQUIET_EN | - RG_LDVQUIET_EN | RG_CKRSEL | - RG_EEEPRG_EN); - ocp_reg_write(tp, OCP_EEE_CONFIG3, FST_SNR_EYE_R | RG_LFS_SEL | MSK_PH); - ocp_reg_write(tp, OCP_EEE_AR, FUN_ADDR | DEVICE_ADDR); - ocp_reg_write(tp, OCP_EEE_DATA, EEE_ADDR); - ocp_reg_write(tp, OCP_EEE_AR, FUN_DATA | DEVICE_ADDR); - ocp_reg_write(tp, OCP_EEE_DATA, EEE_DATA); - ocp_reg_write(tp, OCP_EEE_AR, 0x0000); + ocp_reg_write(tp, OCP_EEE_CONFIG1, config1); + ocp_reg_write(tp, OCP_EEE_CONFIG2, config2); + ocp_reg_write(tp, OCP_EEE_CONFIG3, config3); } -static void r8153_enable_eee(struct r8152 *tp) +static void r8152b_enable_eee(struct r8152 *tp) +{ + r8152_eee_en(tp, true); + r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, MDIO_EEE_100TX); +} + +static void r8153_eee_en(struct r8152 *tp, bool enable) { u32 ocp_data; - u16 data; + u16 config; ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR); - ocp_data |= EEE_RX_EN | EEE_TX_EN; + config = ocp_reg_read(tp, OCP_EEE_CFG); + + if (enable) { + ocp_data |= EEE_RX_EN | EEE_TX_EN; + config |= EEE10_EN; + } else { + ocp_data &= ~(EEE_RX_EN | EEE_TX_EN); + config &= ~EEE10_EN; + } + ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data); - data = ocp_reg_read(tp, OCP_EEE_CFG); - data |= EEE10_EN; - ocp_reg_write(tp, OCP_EEE_CFG, data); - data = ocp_reg_read(tp, OCP_EEE_CFG2); - data |= MY1000_EEE | MY100_EEE; - ocp_reg_write(tp, OCP_EEE_CFG2, data); + ocp_reg_write(tp, OCP_EEE_CFG, config); +} + +static void r8153_enable_eee(struct r8152 *tp) +{ + r8153_eee_en(tp, true); + ocp_reg_write(tp, OCP_EEE_ADV, MDIO_EEE_1000T | MDIO_EEE_100TX); } static void r8152b_enable_fc(struct r8152 *tp) @@ -3105,8 +3210,9 @@ static int rtl8152_resume(struct usb_interface *intf) } else { tp->rtl_ops.up(tp); rtl8152_set_speed(tp, AUTONEG_ENABLE, - tp->mii.supports_gmii ? SPEED_1000 : SPEED_100, - DUPLEX_FULL); + tp->mii.supports_gmii ? + SPEED_1000 : SPEED_100, + DUPLEX_FULL); tp->speed = 0; netif_carrier_off(tp->netdev); set_bit(WORK_ENABLE, &tp->flags); @@ -3167,8 +3273,8 @@ static void rtl8152_get_drvinfo(struct net_device *netdev, { struct r8152 *tp = netdev_priv(netdev); - strncpy(info->driver, MODULENAME, ETHTOOL_BUSINFO_LEN); - strncpy(info->version, DRIVER_VERSION, ETHTOOL_BUSINFO_LEN); + strlcpy(info->driver, MODULENAME, sizeof(info->driver)); + strlcpy(info->version, DRIVER_VERSION, sizeof(info->version)); usb_make_path(tp->udev, info->bus_info, sizeof(info->bus_info)); } @@ -3263,6 +3369,124 @@ static void rtl8152_get_strings(struct net_device *dev, u32 stringset, u8 *data) } } +static int r8152_get_eee(struct r8152 *tp, struct ethtool_eee *eee) +{ + u32 ocp_data, lp, adv, supported = 0; + u16 val; + + val = r8152_mmd_read(tp, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE); + supported = mmd_eee_cap_to_ethtool_sup_t(val); + + val = r8152_mmd_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV); + adv = mmd_eee_adv_to_ethtool_adv_t(val); + + val = r8152_mmd_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE); + lp = mmd_eee_adv_to_ethtool_adv_t(val); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR); + ocp_data &= EEE_RX_EN | EEE_TX_EN; + + eee->eee_enabled = !!ocp_data; + eee->eee_active = !!(supported & adv & lp); + eee->supported = supported; + eee->advertised = adv; + eee->lp_advertised = lp; + + return 0; +} + +static int r8152_set_eee(struct r8152 *tp, struct ethtool_eee *eee) +{ + u16 val = ethtool_adv_to_mmd_eee_adv_t(eee->advertised); + + r8152_eee_en(tp, eee->eee_enabled); + + if (!eee->eee_enabled) + val = 0; + + r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val); + + return 0; +} + +static int r8153_get_eee(struct r8152 *tp, struct ethtool_eee *eee) +{ + u32 ocp_data, lp, adv, supported = 0; + u16 val; + + val = ocp_reg_read(tp, OCP_EEE_ABLE); + supported = mmd_eee_cap_to_ethtool_sup_t(val); + + val = ocp_reg_read(tp, OCP_EEE_ADV); + adv = mmd_eee_adv_to_ethtool_adv_t(val); + + val = ocp_reg_read(tp, OCP_EEE_LPABLE); + lp = mmd_eee_adv_to_ethtool_adv_t(val); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR); + ocp_data &= EEE_RX_EN | EEE_TX_EN; + + eee->eee_enabled = !!ocp_data; + eee->eee_active = !!(supported & adv & lp); + eee->supported = supported; + eee->advertised = adv; + eee->lp_advertised = lp; + + return 0; +} + +static int r8153_set_eee(struct r8152 *tp, struct ethtool_eee *eee) +{ + u16 val = ethtool_adv_to_mmd_eee_adv_t(eee->advertised); + + r8153_eee_en(tp, eee->eee_enabled); + + if (!eee->eee_enabled) + val = 0; + + ocp_reg_write(tp, OCP_EEE_ADV, val); + + return 0; +} + +static int +rtl_ethtool_get_eee(struct net_device *net, struct ethtool_eee *edata) +{ + struct r8152 *tp = netdev_priv(net); + int ret; + + ret = usb_autopm_get_interface(tp->intf); + if (ret < 0) + goto out; + + ret = tp->rtl_ops.eee_get(tp, edata); + + usb_autopm_put_interface(tp->intf); + +out: + return ret; +} + +static int +rtl_ethtool_set_eee(struct net_device *net, struct ethtool_eee *edata) +{ + struct r8152 *tp = netdev_priv(net); + int ret; + + ret = usb_autopm_get_interface(tp->intf); + if (ret < 0) + goto out; + + ret = tp->rtl_ops.eee_set(tp, edata); + if (!ret) + ret = mii_nway_restart(&tp->mii); + + usb_autopm_put_interface(tp->intf); + +out: + return ret; +} + static struct ethtool_ops ops = { .get_drvinfo = rtl8152_get_drvinfo, .get_settings = rtl8152_get_settings, @@ -3275,6 +3499,8 @@ static struct ethtool_ops ops = { .get_strings = rtl8152_get_strings, .get_sset_count = rtl8152_get_sset_count, .get_ethtool_stats = rtl8152_get_ethtool_stats, + .get_eee = rtl_ethtool_get_eee, + .set_eee = rtl_ethtool_set_eee, }; static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) @@ -3343,6 +3569,7 @@ static const struct net_device_ops rtl8152_netdev_ops = { .ndo_do_ioctl = rtl8152_ioctl, .ndo_start_xmit = rtl8152_start_xmit, .ndo_tx_timeout = rtl8152_tx_timeout, + .ndo_set_features = rtl8152_set_features, .ndo_set_rx_mode = rtl8152_set_rx_mode, .ndo_set_mac_address = rtl8152_set_mac_address, .ndo_change_mtu = rtl8152_change_mtu, @@ -3415,6 +3642,8 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id) ops->up = rtl8152_up; ops->down = rtl8152_down; ops->unload = rtl8152_unload; + ops->eee_get = r8152_get_eee; + ops->eee_set = r8152_set_eee; ret = 0; break; case PRODUCT_ID_RTL8153: @@ -3424,6 +3653,8 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id) ops->up = rtl8153_up; ops->down = rtl8153_down; ops->unload = rtl8153_unload; + ops->eee_get = r8153_get_eee; + ops->eee_set = r8153_set_eee; ret = 0; break; default: @@ -3440,6 +3671,8 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id) ops->up = rtl8153_up; ops->down = rtl8153_down; ops->unload = rtl8153_unload; + ops->eee_get = r8153_get_eee; + ops->eee_set = r8153_set_eee; ret = 0; break; default: @@ -3497,10 +3730,16 @@ static int rtl8152_probe(struct usb_interface *intf, netdev->features |= NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_FRAGLIST | NETIF_F_IPV6_CSUM | - NETIF_F_TSO6; + NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_FRAGLIST | - NETIF_F_IPV6_CSUM | NETIF_F_TSO6; + NETIF_F_IPV6_CSUM | NETIF_F_TSO6 | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; + netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | + NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | + NETIF_F_IPV6_CSUM | NETIF_F_TSO6; netdev->ethtool_ops = &ops; netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 59caa06f34a6b3b17fa3cbce41ea3fb1b08b14a3..3d0ce4468ce6b6a5cde558a9df6e8614150fc2ea 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -546,8 +546,8 @@ static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp) skb_put(skb, GOOD_PACKET_LEN); hdr = skb_vnet_hdr(skb); + sg_init_table(rq->sg, MAX_SKB_FRAGS + 2); sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr); - skb_to_sgvec(skb, rq->sg + 1, 0, skb->len); err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp); @@ -563,6 +563,8 @@ static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp) char *p; int i, err, offset; + sg_init_table(rq->sg, MAX_SKB_FRAGS + 2); + /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */ for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { first = get_a_page(rq, gfp); @@ -899,6 +901,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) if (vi->mergeable_rx_bufs) hdr->mhdr.num_buffers = 0; + sg_init_table(sq->sg, MAX_SKB_FRAGS + 2); if (can_push) { __skb_push(skb, hdr_len); num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); @@ -934,7 +937,6 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) dev_kfree_skb_any(skb); return NETDEV_TX_OK; } - virtqueue_kick(sq->vq); /* Don't wait up for transmitted skbs to be freed. */ skb_orphan(skb); @@ -954,6 +956,9 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) } } + if (__netif_subqueue_stopped(dev, qnum) || !skb->xmit_more) + virtqueue_kick(sq->vq); + return NETDEV_TX_OK; } diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index beb377b2d4b78e67e15fa2c7fe6404bef240eba0..2a51e6e48e1ef27c3176f9501fcb9e8ae3594558 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -42,6 +42,7 @@ #include #include #include +#include #if IS_ENABLED(CONFIG_IPV6) #include #include @@ -1062,7 +1063,6 @@ void vxlan_sock_release(struct vxlan_sock *vs) spin_lock(&vn->sock_lock); hlist_del_rcu(&vs->hlist); - rcu_assign_sk_user_data(vs->sock->sk, NULL); vxlan_notify_del_rx_port(vs); spin_unlock(&vn->sock_lock); @@ -1158,8 +1158,6 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) if (!vs) goto drop; - skb_pop_rcv_encapsulation(skb); - vs->rcv(vs, skb, vxh->vx_vni); return 0; @@ -1338,7 +1336,6 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb) } #if IS_ENABLED(CONFIG_IPV6) - static struct sk_buff *vxlan_na_create(struct sk_buff *request, struct neighbour *n, bool isrouter) { @@ -1572,13 +1569,6 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb) return false; } -static inline struct sk_buff *vxlan_handle_offloads(struct sk_buff *skb, - bool udp_csum) -{ - int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; - return iptunnel_handle_offloads(skb, udp_csum, type); -} - #if IS_ENABLED(CONFIG_IPV6) static int vxlan6_xmit_skb(struct vxlan_sock *vs, struct dst_entry *dst, struct sk_buff *skb, @@ -1587,13 +1577,12 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs, __be16 src_port, __be16 dst_port, __be32 vni, bool xnet) { - struct ipv6hdr *ip6h; struct vxlanhdr *vxh; - struct udphdr *uh; int min_headroom; int err; + bool udp_sum = !udp_get_no_check6_tx(vs->sock->sk); - skb = vxlan_handle_offloads(skb, !udp_get_no_check6_tx(vs->sock->sk)); + skb = udp_tunnel_handle_offloads(skb, udp_sum); if (IS_ERR(skb)) return -EINVAL; @@ -1621,38 +1610,10 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs, vxh->vx_flags = htonl(VXLAN_FLAGS); vxh->vx_vni = vni; - __skb_push(skb, sizeof(*uh)); - skb_reset_transport_header(skb); - uh = udp_hdr(skb); - - uh->dest = dst_port; - uh->source = src_port; - - uh->len = htons(skb->len); - - memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); - IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | - IPSKB_REROUTED); - skb_dst_set(skb, dst); - - udp6_set_csum(udp_get_no_check6_tx(vs->sock->sk), skb, - saddr, daddr, skb->len); + skb_set_inner_protocol(skb, htons(ETH_P_TEB)); - __skb_push(skb, sizeof(*ip6h)); - skb_reset_network_header(skb); - ip6h = ipv6_hdr(skb); - ip6h->version = 6; - ip6h->priority = prio; - ip6h->flow_lbl[0] = 0; - ip6h->flow_lbl[1] = 0; - ip6h->flow_lbl[2] = 0; - ip6h->payload_len = htons(skb->len); - ip6h->nexthdr = IPPROTO_UDP; - ip6h->hop_limit = ttl; - ip6h->daddr = *daddr; - ip6h->saddr = *saddr; - - ip6tunnel_xmit(skb, dev); + udp_tunnel6_xmit_skb(vs->sock, dst, skb, dev, saddr, daddr, prio, + ttl, src_port, dst_port); return 0; } #endif @@ -1663,11 +1624,11 @@ int vxlan_xmit_skb(struct vxlan_sock *vs, __be16 src_port, __be16 dst_port, __be32 vni, bool xnet) { struct vxlanhdr *vxh; - struct udphdr *uh; int min_headroom; int err; + bool udp_sum = !vs->sock->sk->sk_no_check_tx; - skb = vxlan_handle_offloads(skb, !vs->sock->sk->sk_no_check_tx); + skb = udp_tunnel_handle_offloads(skb, udp_sum); if (IS_ERR(skb)) return -EINVAL; @@ -1693,20 +1654,10 @@ int vxlan_xmit_skb(struct vxlan_sock *vs, vxh->vx_flags = htonl(VXLAN_FLAGS); vxh->vx_vni = vni; - __skb_push(skb, sizeof(*uh)); - skb_reset_transport_header(skb); - uh = udp_hdr(skb); - - uh->dest = dst_port; - uh->source = src_port; - - uh->len = htons(skb->len); + skb_set_inner_protocol(skb, htons(ETH_P_TEB)); - udp_set_csum(vs->sock->sk->sk_no_check_tx, skb, - src, dst, skb->len); - - return iptunnel_xmit(vs->sock->sk, rt, skb, src, dst, IPPROTO_UDP, - tos, ttl, df, xnet); + return udp_tunnel_xmit_skb(vs->sock, rt, skb, src, dst, tos, + ttl, df, src_port, dst_port, xnet); } EXPORT_SYMBOL_GPL(vxlan_xmit_skb); @@ -2242,7 +2193,7 @@ static void vxlan_setup(struct net_device *dev) dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM; dev->hw_features |= NETIF_F_GSO_SOFTWARE; dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; - dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; + netif_keep_dst(dev); dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; INIT_LIST_HEAD(&vxlan->next); @@ -2335,8 +2286,7 @@ static const struct ethtool_ops vxlan_ethtool_ops = { static void vxlan_del_work(struct work_struct *work) { struct vxlan_sock *vs = container_of(work, struct vxlan_sock, del_work); - - sk_release_kernel(vs->sock->sk); + udp_tunnel_sock_release(vs->sock); kfree_rcu(vs, rcu); } @@ -2369,9 +2319,6 @@ static struct socket *vxlan_create_sock(struct net *net, bool ipv6, if (err < 0) return ERR_PTR(err); - /* Disable multicast loopback */ - inet_sk(sock->sk)->mc_loop = 0; - return sock; } @@ -2383,9 +2330,9 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port, struct vxlan_net *vn = net_generic(net, vxlan_net_id); struct vxlan_sock *vs; struct socket *sock; - struct sock *sk; unsigned int h; bool ipv6 = !!(flags & VXLAN_F_IPV6); + struct udp_tunnel_sock_cfg tunnel_cfg; vs = kzalloc(sizeof(*vs), GFP_KERNEL); if (!vs) @@ -2403,11 +2350,9 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port, } vs->sock = sock; - sk = sock->sk; atomic_set(&vs->refcnt, 1); vs->rcv = rcv; vs->data = data; - rcu_assign_sk_user_data(vs->sock->sk, vs); /* Initialize the vxlan udp offloads structure */ vs->udp_offloads.port = port; @@ -2420,14 +2365,12 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port, spin_unlock(&vn->sock_lock); /* Mark socket as an encapsulation socket. */ - udp_sk(sk)->encap_type = 1; - udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv; -#if IS_ENABLED(CONFIG_IPV6) - if (ipv6) - ipv6_stub->udpv6_encap_enable(); - else -#endif - udp_encap_enable(); + tunnel_cfg.sk_user_data = vs; + tunnel_cfg.encap_type = 1; + tunnel_cfg.encap_rcv = vxlan_udp_encap_recv; + tunnel_cfg.encap_destroy = NULL; + + setup_udp_tunnel_sock(net, sock, &tunnel_cfg); return vs; } diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c index 43c9960dce1c4bd89195add029eefe26d0bfd1a6..ae6ecf4011892f4fa50861cfff6ee4bf158639fe 100644 --- a/drivers/net/wan/dlci.c +++ b/drivers/net/wan/dlci.c @@ -192,8 +192,10 @@ static netdev_tx_t dlci_transmit(struct sk_buff *skb, struct net_device *dev) { struct dlci_local *dlp = netdev_priv(dev); - if (skb) - dlp->slave->netdev_ops->ndo_start_xmit(skb, dlp->slave); + if (skb) { + struct netdev_queue *txq = skb_get_tx_queue(dev, skb); + netdev_start_xmit(skb, dlp->slave, txq, false); + } return NETDEV_TX_OK; } diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index e5c7e6165a4b658046c0bba6173e14c7c62671f5..3ebed1c40abb11c192db0acc36d90bfd2028c646 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c @@ -1047,7 +1047,7 @@ static void pvc_setup(struct net_device *dev) dev->flags = IFF_POINTOPOINT; dev->hard_header_len = 10; dev->addr_len = 2; - dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; + netif_keep_dst(dev); } static const struct net_device_ops pvc_ops = { diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig index c63d1159db5ca939718f4c34385347865b53ef67..ce7826009eeb9dbc4375ff8329cf898e9d738e9c 100644 --- a/drivers/net/wireless/ath/Kconfig +++ b/drivers/net/wireless/ath/Kconfig @@ -25,6 +25,14 @@ config ATH_DEBUG Say Y, if you want to debug atheros wireless drivers. Right now only ath9k makes use of this. +config ATH_TRACEPOINTS + bool "Atheros wireless tracing" + depends on ATH_DEBUG + depends on EVENT_TRACING + ---help--- + This option enables tracepoints for atheros wireless drivers. + Currently, ath9k makes use of this facility. + config ATH_REG_DYNAMIC_USER_REG_HINTS bool "Atheros dynamic user regulatory hints" depends on CFG80211_CERTIFICATION_ONUS diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile index 7d023b0f13b47d064cd2db064c58690e2da8e1ea..89f8d5979402dfaecd9d7565798804ed581cf261 100644 --- a/drivers/net/wireless/ath/Makefile +++ b/drivers/net/wireless/ath/Makefile @@ -17,4 +17,8 @@ ath-objs := main.o \ dfs_pri_detector.o ath-$(CONFIG_ATH_DEBUG) += debug.o +ath-$(CONFIG_ATH_TRACEPOINTS) += trace.o + ccflags-y += -D__CHECK_ENDIAN__ + +CFLAGS_trace.o := -I$(src) diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h index fd9e5305e77fd263cb55ecb46ff08e6d06ca0935..e5ba6faf32815173fa47fa81fd303082688202bd 100644 --- a/drivers/net/wireless/ath/ath.h +++ b/drivers/net/wireless/ath/ath.h @@ -234,6 +234,7 @@ void ath_printk(const char *level, const struct ath_common *common, * AR9462. * @ATH_DBG_DFS: radar datection * @ATH_DBG_WOW: Wake on Wireless + * @ATH_DBG_DYNACK: dynack handling * @ATH_DBG_ANY: enable all debugging * * The debug level is used to control the amount and type of debugging output @@ -261,10 +262,13 @@ enum ATH_DEBUG { ATH_DBG_MCI = 0x00008000, ATH_DBG_DFS = 0x00010000, ATH_DBG_WOW = 0x00020000, + ATH_DBG_CHAN_CTX = 0x00040000, + ATH_DBG_DYNACK = 0x00080000, ATH_DBG_ANY = 0xffffffff }; #define ATH_DBG_DEFAULT (ATH_DBG_FATAL) +#define ATH_DBG_MAX_LEN 512 #ifdef CONFIG_ATH_DEBUG diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig index a6f5285235af04e7527ab7436ae31a63666a037f..72acb822bb11440989364c6407e5a7579e4efb24 100644 --- a/drivers/net/wireless/ath/ath10k/Kconfig +++ b/drivers/net/wireless/ath/ath10k/Kconfig @@ -24,7 +24,8 @@ config ATH10K_DEBUG config ATH10K_DEBUGFS bool "Atheros ath10k debugfs support" - depends on ATH10K + depends on ATH10K && DEBUG_FS + select RELAY ---help--- Enabled debugfs support diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile index a4179f49ee1f3db7cd3882abd5972c5b2d884132..8b1b1adb477a12020bad2d273118e10402338e0a 100644 --- a/drivers/net/wireless/ath/ath10k/Makefile +++ b/drivers/net/wireless/ath/ath10k/Makefile @@ -10,6 +10,8 @@ ath10k_core-y += mac.o \ wmi.o \ bmi.o +ath10k_core-$(CONFIG_ATH10K_DEBUGFS) += spectral.o +ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c index 17d221abd58c0bb70d72a6fbf14f1e7422f8ced5..3d29b0875b3e0e0abf442d8f875fe39b7ddb9171 100644 --- a/drivers/net/wireless/ath/ath10k/bmi.c +++ b/drivers/net/wireless/ath/ath10k/bmi.c @@ -22,7 +22,7 @@ void ath10k_bmi_start(struct ath10k *ar) { - ath10k_dbg(ATH10K_DBG_BMI, "bmi start\n"); + ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi start\n"); ar->bmi.done_sent = false; } @@ -33,10 +33,10 @@ int ath10k_bmi_done(struct ath10k *ar) u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done); int ret; - ath10k_dbg(ATH10K_DBG_BMI, "bmi done\n"); + ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi done\n"); if (ar->bmi.done_sent) { - ath10k_dbg(ATH10K_DBG_BMI, "bmi skipped\n"); + ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi skipped\n"); return 0; } @@ -45,7 +45,7 @@ int ath10k_bmi_done(struct ath10k *ar) ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL); if (ret) { - ath10k_warn("unable to write to the device: %d\n", ret); + ath10k_warn(ar, "unable to write to the device: %d\n", ret); return ret; } @@ -61,10 +61,10 @@ int ath10k_bmi_get_target_info(struct ath10k *ar, u32 resplen = sizeof(resp.get_target_info); int ret; - ath10k_dbg(ATH10K_DBG_BMI, "bmi get target info\n"); + ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info\n"); if (ar->bmi.done_sent) { - ath10k_warn("BMI Get Target Info Command disallowed\n"); + ath10k_warn(ar, "BMI Get Target Info Command disallowed\n"); return -EBUSY; } @@ -72,12 +72,12 @@ int ath10k_bmi_get_target_info(struct ath10k *ar, ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen); if (ret) { - ath10k_warn("unable to get target info from device\n"); + ath10k_warn(ar, "unable to get target info from device\n"); return ret; } if (resplen < sizeof(resp.get_target_info)) { - ath10k_warn("invalid get_target_info response length (%d)\n", + ath10k_warn(ar, "invalid get_target_info response length (%d)\n", resplen); return -EIO; } @@ -97,11 +97,11 @@ int ath10k_bmi_read_memory(struct ath10k *ar, u32 rxlen; int ret; - ath10k_dbg(ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n", + ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n", address, length); if (ar->bmi.done_sent) { - ath10k_warn("command disallowed\n"); + ath10k_warn(ar, "command disallowed\n"); return -EBUSY; } @@ -115,7 +115,7 @@ int ath10k_bmi_read_memory(struct ath10k *ar, ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &rxlen); if (ret) { - ath10k_warn("unable to read from the device (%d)\n", + ath10k_warn(ar, "unable to read from the device (%d)\n", ret); return ret; } @@ -137,11 +137,11 @@ int ath10k_bmi_write_memory(struct ath10k *ar, u32 txlen; int ret; - ath10k_dbg(ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n", + ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n", address, length); if (ar->bmi.done_sent) { - ath10k_warn("command disallowed\n"); + ath10k_warn(ar, "command disallowed\n"); return -EBUSY; } @@ -159,7 +159,7 @@ int ath10k_bmi_write_memory(struct ath10k *ar, ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen, NULL, NULL); if (ret) { - ath10k_warn("unable to write to the device (%d)\n", + ath10k_warn(ar, "unable to write to the device (%d)\n", ret); return ret; } @@ -183,11 +183,11 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result) u32 resplen = sizeof(resp.execute); int ret; - ath10k_dbg(ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n", + ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n", address, param); if (ar->bmi.done_sent) { - ath10k_warn("command disallowed\n"); + ath10k_warn(ar, "command disallowed\n"); return -EBUSY; } @@ -197,19 +197,19 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result) ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen); if (ret) { - ath10k_warn("unable to read from the device\n"); + ath10k_warn(ar, "unable to read from the device\n"); return ret; } if (resplen < sizeof(resp.execute)) { - ath10k_warn("invalid execute response length (%d)\n", + ath10k_warn(ar, "invalid execute response length (%d)\n", resplen); return -EIO; } *result = __le32_to_cpu(resp.execute.result); - ath10k_dbg(ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result); + ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result); return 0; } @@ -221,11 +221,11 @@ int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length) u32 txlen; int ret; - ath10k_dbg(ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n", + ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n", buffer, length); if (ar->bmi.done_sent) { - ath10k_warn("command disallowed\n"); + ath10k_warn(ar, "command disallowed\n"); return -EBUSY; } @@ -241,7 +241,7 @@ int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length) ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen, NULL, NULL); if (ret) { - ath10k_warn("unable to write to the device\n"); + ath10k_warn(ar, "unable to write to the device\n"); return ret; } @@ -258,11 +258,11 @@ int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address) u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start); int ret; - ath10k_dbg(ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n", + ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n", address); if (ar->bmi.done_sent) { - ath10k_warn("command disallowed\n"); + ath10k_warn(ar, "command disallowed\n"); return -EBUSY; } @@ -271,7 +271,7 @@ int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address) ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL); if (ret) { - ath10k_warn("unable to Start LZ Stream to the device\n"); + ath10k_warn(ar, "unable to Start LZ Stream to the device\n"); return ret; } @@ -286,7 +286,7 @@ int ath10k_bmi_fast_download(struct ath10k *ar, u32 trailer_len = length - head_len; int ret; - ath10k_dbg(ATH10K_DBG_BMI, + ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi fast download address 0x%x buffer 0x%p length %d\n", address, buffer, length); diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h index 111ab701465c0e2dfa731b3e5409278151545a3a..31a990635490aa07cf74a515e620c90fc1b697c8 100644 --- a/drivers/net/wireless/ath/ath10k/bmi.h +++ b/drivers/net/wireless/ath/ath10k/bmi.h @@ -177,7 +177,6 @@ struct bmi_target_info { u32 type; }; - /* in msec */ #define BMI_COMMUNICATION_TIMEOUT_HZ (1*HZ) diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index 4333107ecf37b8325c55f31569361a9b537dddc6..101cadb6e4ba8c3659200093f5da739c4a501236 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -260,7 +260,6 @@ static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar, ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IS_ADDRESS, mask); } - /* * Guts of ath10k_ce_send, used by both ath10k_ce_send and * ath10k_ce_sendlist_send. @@ -284,13 +283,9 @@ int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state, int ret = 0; if (nbytes > ce_state->src_sz_max) - ath10k_warn("%s: send more we can (nbytes: %d, max: %d)\n", + ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n", __func__, nbytes, ce_state->src_sz_max); - ret = ath10k_pci_wake(ar); - if (ret) - return ret; - if (unlikely(CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) <= 0)) { ret = -ENOSR; @@ -325,7 +320,6 @@ int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state, src_ring->write_index = write_index; exit: - ath10k_pci_sleep(ar); return ret; } @@ -390,49 +384,56 @@ int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe) return delta; } -int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state, - void *per_recv_context, - u32 buffer) +int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe) { - struct ath10k_ce_ring *dest_ring = ce_state->dest_ring; - u32 ctrl_addr = ce_state->ctrl_addr; - struct ath10k *ar = ce_state->ar; + struct ath10k *ar = pipe->ar; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce_ring *dest_ring = pipe->dest_ring; unsigned int nentries_mask = dest_ring->nentries_mask; - unsigned int write_index; - unsigned int sw_index; - int ret; + unsigned int write_index = dest_ring->write_index; + unsigned int sw_index = dest_ring->sw_index; - spin_lock_bh(&ar_pci->ce_lock); - write_index = dest_ring->write_index; - sw_index = dest_ring->sw_index; + lockdep_assert_held(&ar_pci->ce_lock); - ret = ath10k_pci_wake(ar); - if (ret) - goto out; + return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1); +} - if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) { - struct ce_desc *base = dest_ring->base_addr_owner_space; - struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index); +int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr) +{ + struct ath10k *ar = pipe->ar; + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce_ring *dest_ring = pipe->dest_ring; + unsigned int nentries_mask = dest_ring->nentries_mask; + unsigned int write_index = dest_ring->write_index; + unsigned int sw_index = dest_ring->sw_index; + struct ce_desc *base = dest_ring->base_addr_owner_space; + struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index); + u32 ctrl_addr = pipe->ctrl_addr; - /* Update destination descriptor */ - desc->addr = __cpu_to_le32(buffer); - desc->nbytes = 0; + lockdep_assert_held(&ar_pci->ce_lock); - dest_ring->per_transfer_context[write_index] = - per_recv_context; + if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0) + return -EIO; - /* Update Destination Ring Write Index */ - write_index = CE_RING_IDX_INCR(nentries_mask, write_index); - ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index); - dest_ring->write_index = write_index; - ret = 0; - } else { - ret = -EIO; - } - ath10k_pci_sleep(ar); + desc->addr = __cpu_to_le32(paddr); + desc->nbytes = 0; + + dest_ring->per_transfer_context[write_index] = ctx; + write_index = CE_RING_IDX_INCR(nentries_mask, write_index); + ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index); + dest_ring->write_index = write_index; + + return 0; +} + +int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr) +{ + struct ath10k *ar = pipe->ar; + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + int ret; -out: + spin_lock_bh(&ar_pci->ce_lock); + ret = __ath10k_ce_rx_post_buf(pipe, ctx, paddr); spin_unlock_bh(&ar_pci->ce_lock); return ret; @@ -588,7 +589,6 @@ static int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, unsigned int sw_index = src_ring->sw_index; struct ce_desc *sdesc, *sbase; unsigned int read_index; - int ret; if (src_ring->hw_index == sw_index) { /* @@ -599,18 +599,12 @@ static int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, * value of the HW index has become stale. */ - ret = ath10k_pci_wake(ar); - if (ret) - return ret; - read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); if (read_index == 0xffffffff) return -ENODEV; read_index &= nentries_mask; src_ring->hw_index = read_index; - - ath10k_pci_sleep(ar); } read_index = src_ring->hw_index; @@ -731,11 +725,6 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id) struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; u32 ctrl_addr = ce_state->ctrl_addr; - int ret; - - ret = ath10k_pci_wake(ar); - if (ret) - return; spin_lock_bh(&ar_pci->ce_lock); @@ -760,7 +749,6 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id) ath10k_ce_engine_int_status_clear(ar, ctrl_addr, CE_WATERMARK_MASK); spin_unlock_bh(&ar_pci->ce_lock); - ath10k_pci_sleep(ar); } /* @@ -771,13 +759,9 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id) void ath10k_ce_per_engine_service_any(struct ath10k *ar) { - int ce_id, ret; + int ce_id; u32 intr_summary; - ret = ath10k_pci_wake(ar); - if (ret) - return; - intr_summary = CE_INTERRUPT_SUMMARY(ar); for (ce_id = 0; intr_summary && (ce_id < CE_COUNT); ce_id++) { @@ -789,8 +773,6 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar) ath10k_ce_per_engine_service(ar, ce_id); } - - ath10k_pci_sleep(ar); } /* @@ -800,16 +782,11 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar) * * Called with ce_lock held. */ -static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state, - int disable_copy_compl_intr) +static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state) { u32 ctrl_addr = ce_state->ctrl_addr; struct ath10k *ar = ce_state->ar; - int ret; - - ret = ath10k_pci_wake(ar); - if (ret) - return; + bool disable_copy_compl_intr = ce_state->attr_flags & CE_ATTR_DIS_INTR; if ((!disable_copy_compl_intr) && (ce_state->send_cb || ce_state->recv_cb)) @@ -818,17 +795,11 @@ static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state, ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr); ath10k_ce_watermark_intr_disable(ar, ctrl_addr); - - ath10k_pci_sleep(ar); } int ath10k_ce_disable_interrupts(struct ath10k *ar) { - int ce_id, ret; - - ret = ath10k_pci_wake(ar); - if (ret) - return ret; + int ce_id; for (ce_id = 0; ce_id < CE_COUNT; ce_id++) { u32 ctrl_addr = ath10k_ce_base_address(ce_id); @@ -838,34 +809,16 @@ int ath10k_ce_disable_interrupts(struct ath10k *ar) ath10k_ce_watermark_intr_disable(ar, ctrl_addr); } - ath10k_pci_sleep(ar); - return 0; } -void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state, - void (*send_cb)(struct ath10k_ce_pipe *), - int disable_interrupts) +void ath10k_ce_enable_interrupts(struct ath10k *ar) { - struct ath10k *ar = ce_state->ar; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + int ce_id; - spin_lock_bh(&ar_pci->ce_lock); - ce_state->send_cb = send_cb; - ath10k_ce_per_engine_handler_adjust(ce_state, disable_interrupts); - spin_unlock_bh(&ar_pci->ce_lock); -} - -void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state, - void (*recv_cb)(struct ath10k_ce_pipe *)) -{ - struct ath10k *ar = ce_state->ar; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - - spin_lock_bh(&ar_pci->ce_lock); - ce_state->recv_cb = recv_cb; - ath10k_ce_per_engine_handler_adjust(ce_state, 0); - spin_unlock_bh(&ar_pci->ce_lock); + for (ce_id = 0; ce_id < CE_COUNT; ce_id++) + ath10k_ce_per_engine_handler_adjust(&ar_pci->ce_states[ce_id]); } static int ath10k_ce_init_src_ring(struct ath10k *ar, @@ -898,7 +851,7 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar, ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0); ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries); - ath10k_dbg(ATH10K_DBG_BOOT, + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot init ce src ring id %d entries %d base_addr %p\n", ce_id, nentries, src_ring->base_addr_owner_space); @@ -932,7 +885,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar, ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0); ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries); - ath10k_dbg(ATH10K_DBG_BOOT, + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot ce dest ring id %d entries %d base_addr %p\n", ce_id, nentries, dest_ring->base_addr_owner_space); @@ -1067,7 +1020,9 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id, * initialized by software/firmware. */ int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id, - const struct ce_attr *attr) + const struct ce_attr *attr, + void (*send_cb)(struct ath10k_ce_pipe *), + void (*recv_cb)(struct ath10k_ce_pipe *)) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; @@ -1084,39 +1039,37 @@ int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id, BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC > (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); - ret = ath10k_pci_wake(ar); - if (ret) - return ret; - spin_lock_bh(&ar_pci->ce_lock); ce_state->ar = ar; ce_state->id = ce_id; ce_state->ctrl_addr = ath10k_ce_base_address(ce_id); ce_state->attr_flags = attr->flags; ce_state->src_sz_max = attr->src_sz_max; + if (attr->src_nentries) + ce_state->send_cb = send_cb; + if (attr->dest_nentries) + ce_state->recv_cb = recv_cb; spin_unlock_bh(&ar_pci->ce_lock); if (attr->src_nentries) { ret = ath10k_ce_init_src_ring(ar, ce_id, attr); if (ret) { - ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n", + ath10k_err(ar, "Failed to initialize CE src ring for ID: %d (%d)\n", ce_id, ret); - goto out; + return ret; } } if (attr->dest_nentries) { ret = ath10k_ce_init_dest_ring(ar, ce_id, attr); if (ret) { - ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n", + ath10k_err(ar, "Failed to initialize CE dest ring for ID: %d (%d)\n", ce_id, ret); - goto out; + return ret; } } -out: - ath10k_pci_sleep(ar); - return ret; + return 0; } static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id) @@ -1140,16 +1093,8 @@ static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id) void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id) { - int ret; - - ret = ath10k_pci_wake(ar); - if (ret) - return; - ath10k_ce_deinit_src_ring(ar, ce_id); ath10k_ce_deinit_dest_ring(ar, ce_id); - - ath10k_pci_sleep(ar); } int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, @@ -1163,7 +1108,7 @@ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr); if (IS_ERR(ce_state->src_ring)) { ret = PTR_ERR(ce_state->src_ring); - ath10k_err("failed to allocate copy engine source ring %d: %d\n", + ath10k_err(ar, "failed to allocate copy engine source ring %d: %d\n", ce_id, ret); ce_state->src_ring = NULL; return ret; @@ -1175,7 +1120,7 @@ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, attr); if (IS_ERR(ce_state->dest_ring)) { ret = PTR_ERR(ce_state->dest_ring); - ath10k_err("failed to allocate copy engine destination ring %d: %d\n", + ath10k_err(ar, "failed to allocate copy engine destination ring %d: %d\n", ce_id, ret); ce_state->dest_ring = NULL; return ret; diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h index 7a5a36fc59c1ad19c51e2c69496660f75aeca073..329b7340fa72cc5157dec176686f4f19a5099629 100644 --- a/drivers/net/wireless/ath/ath10k/ce.h +++ b/drivers/net/wireless/ath/ath10k/ce.h @@ -20,7 +20,6 @@ #include "hif.h" - /* Maximum number of Copy Engine's supported */ #define CE_COUNT_MAX 8 #define CE_HTT_H2T_MSG_SRC_NENTRIES 4096 @@ -37,7 +36,6 @@ struct ath10k_ce_pipe; - #define CE_DESC_FLAGS_GATHER (1 << 0) #define CE_DESC_FLAGS_BYTE_SWAP (1 << 1) #define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC @@ -162,30 +160,13 @@ int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state, void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe); -void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state, - void (*send_cb)(struct ath10k_ce_pipe *), - int disable_interrupts); - int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe); /*==================Recv=======================*/ -/* - * Make a buffer available to receive. The buffer must be at least of a - * minimal size appropriate for this copy engine (src_sz_max attribute). - * ce - which copy engine to use - * per_transfer_recv_context - context passed back to caller's recv_cb - * buffer - address of buffer in CE space - * Returns 0 on success; otherwise an error status. - * - * Implemenation note: Pushes a buffer to Dest ring. - */ -int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state, - void *per_transfer_recv_context, - u32 buffer); - -void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state, - void (*recv_cb)(struct ath10k_ce_pipe *)); +int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe); +int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr); +int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr); /* recv flags */ /* Data is byte-swapped */ @@ -206,18 +187,20 @@ int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state, * Pops 1 completed send buffer from Source ring. */ int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state, - void **per_transfer_contextp, - u32 *bufferp, - unsigned int *nbytesp, - unsigned int *transfer_idp); + void **per_transfer_contextp, + u32 *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp); /*==================CE Engine Initialization=======================*/ int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id, - const struct ce_attr *attr); + const struct ce_attr *attr, + void (*send_cb)(struct ath10k_ce_pipe *), + void (*recv_cb)(struct ath10k_ce_pipe *)); void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id); int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, - const struct ce_attr *attr); + const struct ce_attr *attr); void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id); /*==================CE Engine Shutdown=======================*/ @@ -245,6 +228,7 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state, void ath10k_ce_per_engine_service_any(struct ath10k *ar); void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id); int ath10k_ce_disable_interrupts(struct ath10k *ar); +void ath10k_ce_enable_interrupts(struct ath10k *ar); /* ce_attr.flags values */ /* Use NonSnooping PCIe accesses? */ @@ -397,7 +381,6 @@ struct ce_attr { #define DST_WATERMARK_HIGH_RESET 0 #define DST_WATERMARK_ADDRESS 0x0050 - static inline u32 ath10k_ce_base_address(unsigned int ce_id) { return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id; diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 93adb8c5896926c787bb80e6c0869cd7f5fd4672..cee18c89d7f23ef77ad7a8a2265ca8efa887f9dd 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -26,6 +26,7 @@ #include "bmi.h" #include "debug.h" #include "htt.h" +#include "testmode.h" unsigned int ath10k_debug_mask; static bool uart_print; @@ -53,7 +54,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { static void ath10k_send_suspend_complete(struct ath10k *ar) { - ath10k_dbg(ATH10K_DBG_BOOT, "boot suspend complete\n"); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot suspend complete\n"); complete(&ar->target_suspend); } @@ -67,14 +68,14 @@ static int ath10k_init_configure_target(struct ath10k *ar) ret = ath10k_bmi_write32(ar, hi_app_host_interest, HTC_PROTOCOL_VERSION); if (ret) { - ath10k_err("settings HTC version failed\n"); + ath10k_err(ar, "settings HTC version failed\n"); return ret; } /* set the firmware mode to STA/IBSS/AP */ ret = ath10k_bmi_read32(ar, hi_option_flag, ¶m_host); if (ret) { - ath10k_err("setting firmware mode (1/2) failed\n"); + ath10k_err(ar, "setting firmware mode (1/2) failed\n"); return ret; } @@ -93,14 +94,14 @@ static int ath10k_init_configure_target(struct ath10k *ar) ret = ath10k_bmi_write32(ar, hi_option_flag, param_host); if (ret) { - ath10k_err("setting firmware mode (2/2) failed\n"); + ath10k_err(ar, "setting firmware mode (2/2) failed\n"); return ret; } /* We do all byte-swapping on the host */ ret = ath10k_bmi_write32(ar, hi_be, 0); if (ret) { - ath10k_err("setting host CPU BE mode failed\n"); + ath10k_err(ar, "setting host CPU BE mode failed\n"); return ret; } @@ -108,7 +109,7 @@ static int ath10k_init_configure_target(struct ath10k *ar) ret = ath10k_bmi_write32(ar, hi_fw_swap, 0); if (ret) { - ath10k_err("setting FW data/desc swap flags failed\n"); + ath10k_err(ar, "setting FW data/desc swap flags failed\n"); return ret; } @@ -146,11 +147,12 @@ static int ath10k_push_board_ext_data(struct ath10k *ar) ret = ath10k_bmi_read32(ar, hi_board_ext_data, &board_ext_data_addr); if (ret) { - ath10k_err("could not read board ext data addr (%d)\n", ret); + ath10k_err(ar, "could not read board ext data addr (%d)\n", + ret); return ret; } - ath10k_dbg(ATH10K_DBG_BOOT, + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot push board extended data addr 0x%x\n", board_ext_data_addr); @@ -158,7 +160,7 @@ static int ath10k_push_board_ext_data(struct ath10k *ar) return 0; if (ar->board_len != (board_data_size + board_ext_data_size)) { - ath10k_err("invalid board (ext) data sizes %zu != %d+%d\n", + ath10k_err(ar, "invalid board (ext) data sizes %zu != %d+%d\n", ar->board_len, board_data_size, board_ext_data_size); return -EINVAL; } @@ -167,14 +169,15 @@ static int ath10k_push_board_ext_data(struct ath10k *ar) ar->board_data + board_data_size, board_ext_data_size); if (ret) { - ath10k_err("could not write board ext data (%d)\n", ret); + ath10k_err(ar, "could not write board ext data (%d)\n", ret); return ret; } ret = ath10k_bmi_write32(ar, hi_board_ext_data_config, (board_ext_data_size << 16) | 1); if (ret) { - ath10k_err("could not write board ext data bit (%d)\n", ret); + ath10k_err(ar, "could not write board ext data bit (%d)\n", + ret); return ret; } @@ -189,13 +192,13 @@ static int ath10k_download_board_data(struct ath10k *ar) ret = ath10k_push_board_ext_data(ar); if (ret) { - ath10k_err("could not push board ext data (%d)\n", ret); + ath10k_err(ar, "could not push board ext data (%d)\n", ret); goto exit; } ret = ath10k_bmi_read32(ar, hi_board_data, &address); if (ret) { - ath10k_err("could not read board data addr (%d)\n", ret); + ath10k_err(ar, "could not read board data addr (%d)\n", ret); goto exit; } @@ -203,13 +206,13 @@ static int ath10k_download_board_data(struct ath10k *ar) min_t(u32, board_data_size, ar->board_len)); if (ret) { - ath10k_err("could not write board data (%d)\n", ret); + ath10k_err(ar, "could not write board data (%d)\n", ret); goto exit; } ret = ath10k_bmi_write32(ar, hi_board_data_initialized, 1); if (ret) { - ath10k_err("could not write board data bit (%d)\n", ret); + ath10k_err(ar, "could not write board data bit (%d)\n", ret); goto exit; } @@ -225,51 +228,72 @@ static int ath10k_download_and_run_otp(struct ath10k *ar) /* OTP is optional */ if (!ar->otp_data || !ar->otp_len) { - ath10k_warn("Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n", + ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n", ar->otp_data, ar->otp_len); return 0; } - ath10k_dbg(ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd\n", + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd\n", address, ar->otp_len); ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len); if (ret) { - ath10k_err("could not write otp (%d)\n", ret); + ath10k_err(ar, "could not write otp (%d)\n", ret); return ret; } ret = ath10k_bmi_execute(ar, address, 0, &result); if (ret) { - ath10k_err("could not execute otp (%d)\n", ret); + ath10k_err(ar, "could not execute otp (%d)\n", ret); return ret; } - ath10k_dbg(ATH10K_DBG_BOOT, "boot otp execute result %d\n", result); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result); if (result != 0) { - ath10k_err("otp calibration failed: %d", result); + ath10k_err(ar, "otp calibration failed: %d", result); return -EINVAL; } return 0; } -static int ath10k_download_fw(struct ath10k *ar) +static int ath10k_download_fw(struct ath10k *ar, enum ath10k_firmware_mode mode) { - u32 address; + u32 address, data_len; + const char *mode_name; + const void *data; int ret; address = ar->hw_params.patch_load_addr; - ret = ath10k_bmi_fast_download(ar, address, ar->firmware_data, - ar->firmware_len); + switch (mode) { + case ATH10K_FIRMWARE_MODE_NORMAL: + data = ar->firmware_data; + data_len = ar->firmware_len; + mode_name = "normal"; + break; + case ATH10K_FIRMWARE_MODE_UTF: + data = ar->testmode.utf->data; + data_len = ar->testmode.utf->size; + mode_name = "utf"; + break; + default: + ath10k_err(ar, "unknown firmware mode: %d\n", mode); + return -EINVAL; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot uploading firmware image %p len %d mode %s\n", + data, data_len, mode_name); + + ret = ath10k_bmi_fast_download(ar, address, data, data_len); if (ret) { - ath10k_err("could not write fw (%d)\n", ret); - goto exit; + ath10k_err(ar, "failed to download %s firmware: %d\n", + mode_name, ret); + return ret; } -exit: return ret; } @@ -302,12 +326,12 @@ static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar) int ret = 0; if (ar->hw_params.fw.fw == NULL) { - ath10k_err("firmware file not defined\n"); + ath10k_err(ar, "firmware file not defined\n"); return -EINVAL; } if (ar->hw_params.fw.board == NULL) { - ath10k_err("board data file not defined"); + ath10k_err(ar, "board data file not defined"); return -EINVAL; } @@ -316,7 +340,7 @@ static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar) ar->hw_params.fw.board); if (IS_ERR(ar->board)) { ret = PTR_ERR(ar->board); - ath10k_err("could not fetch board data (%d)\n", ret); + ath10k_err(ar, "could not fetch board data (%d)\n", ret); goto err; } @@ -328,7 +352,7 @@ static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar) ar->hw_params.fw.fw); if (IS_ERR(ar->firmware)) { ret = PTR_ERR(ar->firmware); - ath10k_err("could not fetch firmware (%d)\n", ret); + ath10k_err(ar, "could not fetch firmware (%d)\n", ret); goto err; } @@ -344,7 +368,7 @@ static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar) ar->hw_params.fw.otp); if (IS_ERR(ar->otp)) { ret = PTR_ERR(ar->otp); - ath10k_err("could not fetch otp (%d)\n", ret); + ath10k_err(ar, "could not fetch otp (%d)\n", ret); goto err; } @@ -369,7 +393,7 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name) /* first fetch the firmware file (firmware-*.bin) */ ar->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name); if (IS_ERR(ar->firmware)) { - ath10k_err("could not fetch firmware file '%s/%s': %ld\n", + ath10k_err(ar, "could not fetch firmware file '%s/%s': %ld\n", ar->hw_params.fw.dir, name, PTR_ERR(ar->firmware)); return PTR_ERR(ar->firmware); } @@ -381,14 +405,14 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name) magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1; if (len < magic_len) { - ath10k_err("firmware file '%s/%s' too small to contain magic: %zu\n", + ath10k_err(ar, "firmware file '%s/%s' too small to contain magic: %zu\n", ar->hw_params.fw.dir, name, len); ret = -EINVAL; goto err; } if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) { - ath10k_err("invalid firmware magic\n"); + ath10k_err(ar, "invalid firmware magic\n"); ret = -EINVAL; goto err; } @@ -410,7 +434,7 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name) data += sizeof(*hdr); if (len < ie_len) { - ath10k_err("invalid length for FW IE %d (%zu < %zu)\n", + ath10k_err(ar, "invalid length for FW IE %d (%zu < %zu)\n", ie_id, len, ie_len); ret = -EINVAL; goto err; @@ -424,7 +448,7 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name) memcpy(ar->hw->wiphy->fw_version, data, ie_len); ar->hw->wiphy->fw_version[ie_len] = '\0'; - ath10k_dbg(ATH10K_DBG_BOOT, + ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw version %s\n", ar->hw->wiphy->fw_version); break; @@ -434,11 +458,11 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name) timestamp = (__le32 *)data; - ath10k_dbg(ATH10K_DBG_BOOT, "found fw timestamp %d\n", + ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw timestamp %d\n", le32_to_cpup(timestamp)); break; case ATH10K_FW_IE_FEATURES: - ath10k_dbg(ATH10K_DBG_BOOT, + ath10k_dbg(ar, ATH10K_DBG_BOOT, "found firmware features ie (%zd B)\n", ie_len); @@ -450,19 +474,19 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name) break; if (data[index] & (1 << bit)) { - ath10k_dbg(ATH10K_DBG_BOOT, + ath10k_dbg(ar, ATH10K_DBG_BOOT, "Enabling feature bit: %i\n", i); __set_bit(i, ar->fw_features); } } - ath10k_dbg_dump(ATH10K_DBG_BOOT, "features", "", + ath10k_dbg_dump(ar, ATH10K_DBG_BOOT, "features", "", ar->fw_features, sizeof(ar->fw_features)); break; case ATH10K_FW_IE_FW_IMAGE: - ath10k_dbg(ATH10K_DBG_BOOT, + ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw image ie (%zd B)\n", ie_len); @@ -471,7 +495,7 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name) break; case ATH10K_FW_IE_OTP_IMAGE: - ath10k_dbg(ATH10K_DBG_BOOT, + ath10k_dbg(ar, ATH10K_DBG_BOOT, "found otp image ie (%zd B)\n", ie_len); @@ -480,7 +504,7 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name) break; default: - ath10k_warn("Unknown FW IE: %u\n", + ath10k_warn(ar, "Unknown FW IE: %u\n", le32_to_cpu(hdr->id)); break; } @@ -493,15 +517,22 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name) } if (!ar->firmware_data || !ar->firmware_len) { - ath10k_warn("No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n", + ath10k_warn(ar, "No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n", ar->hw_params.fw.dir, name); ret = -ENOMEDIUM; goto err; } + if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features) && + !test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { + ath10k_err(ar, "feature bits corrupted: 10.2 feature requires 10.x feature to be set as well"); + ret = -EINVAL; + goto err; + } + /* now fetch the board file */ if (ar->hw_params.fw.board == NULL) { - ath10k_err("board data file not defined"); + ath10k_err(ar, "board data file not defined"); ret = -EINVAL; goto err; } @@ -511,7 +542,7 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name) ar->hw_params.fw.board); if (IS_ERR(ar->board)) { ret = PTR_ERR(ar->board); - ath10k_err("could not fetch board data '%s/%s' (%d)\n", + ath10k_err(ar, "could not fetch board data '%s/%s' (%d)\n", ar->hw_params.fw.dir, ar->hw_params.fw.board, ret); goto err; @@ -531,45 +562,53 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar) { int ret; + ar->fw_api = 3; + ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api); + + ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API3_FILE); + if (ret == 0) + goto success; + ar->fw_api = 2; - ath10k_dbg(ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api); ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE); if (ret == 0) goto success; ar->fw_api = 1; - ath10k_dbg(ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api); ret = ath10k_core_fetch_firmware_api_1(ar); if (ret) return ret; success: - ath10k_dbg(ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api); return 0; } -static int ath10k_init_download_firmware(struct ath10k *ar) +static int ath10k_init_download_firmware(struct ath10k *ar, + enum ath10k_firmware_mode mode) { int ret; ret = ath10k_download_board_data(ar); if (ret) { - ath10k_err("failed to download board data: %d\n", ret); + ath10k_err(ar, "failed to download board data: %d\n", ret); return ret; } ret = ath10k_download_and_run_otp(ar); if (ret) { - ath10k_err("failed to run otp: %d\n", ret); + ath10k_err(ar, "failed to run otp: %d\n", ret); return ret; } - ret = ath10k_download_fw(ar); + ret = ath10k_download_fw(ar, mode); if (ret) { - ath10k_err("failed to download firmware: %d\n", ret); + ath10k_err(ar, "failed to download firmware: %d\n", ret); return ret; } @@ -586,7 +625,7 @@ static int ath10k_init_uart(struct ath10k *ar) */ ret = ath10k_bmi_write32(ar, hi_serial_enable, 0); if (ret) { - ath10k_warn("could not disable UART prints (%d)\n", ret); + ath10k_warn(ar, "could not disable UART prints (%d)\n", ret); return ret; } @@ -595,24 +634,24 @@ static int ath10k_init_uart(struct ath10k *ar) ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin, 7); if (ret) { - ath10k_warn("could not enable UART prints (%d)\n", ret); + ath10k_warn(ar, "could not enable UART prints (%d)\n", ret); return ret; } ret = ath10k_bmi_write32(ar, hi_serial_enable, 1); if (ret) { - ath10k_warn("could not enable UART prints (%d)\n", ret); + ath10k_warn(ar, "could not enable UART prints (%d)\n", ret); return ret; } /* Set the UART baud rate to 19200. */ ret = ath10k_bmi_write32(ar, hi_desired_baud_rate, 19200); if (ret) { - ath10k_warn("could not set the baud rate (%d)\n", ret); + ath10k_warn(ar, "could not set the baud rate (%d)\n", ret); return ret; } - ath10k_info("UART prints enabled\n"); + ath10k_info(ar, "UART prints enabled\n"); return 0; } @@ -629,14 +668,14 @@ static int ath10k_init_hw_params(struct ath10k *ar) } if (i == ARRAY_SIZE(ath10k_hw_params_list)) { - ath10k_err("Unsupported hardware version: 0x%x\n", + ath10k_err(ar, "Unsupported hardware version: 0x%x\n", ar->target_version); return -EINVAL; } ar->hw_params = *hw_params; - ath10k_dbg(ATH10K_DBG_BOOT, "Hardware name %s version 0x%x\n", + ath10k_dbg(ar, ATH10K_DBG_BOOT, "Hardware name %s version 0x%x\n", ar->hw_params.name, ar->target_version); return 0; @@ -651,14 +690,14 @@ static void ath10k_core_restart(struct work_struct *work) switch (ar->state) { case ATH10K_STATE_ON: ar->state = ATH10K_STATE_RESTARTING; - del_timer_sync(&ar->scan.timeout); - ath10k_reset_scan((unsigned long)ar); + ath10k_hif_stop(ar); + ath10k_scan_finish(ar); ieee80211_restart_hw(ar->hw); break; case ATH10K_STATE_OFF: /* this can happen if driver is being unloaded * or if the crash happens during FW probing */ - ath10k_warn("cannot restart a device that hasn't been started\n"); + ath10k_warn(ar, "cannot restart a device that hasn't been started\n"); break; case ATH10K_STATE_RESTARTING: /* hw restart might be requested from multiple places */ @@ -667,14 +706,17 @@ static void ath10k_core_restart(struct work_struct *work) ar->state = ATH10K_STATE_WEDGED; /* fall through */ case ATH10K_STATE_WEDGED: - ath10k_warn("device is wedged, will not restart\n"); + ath10k_warn(ar, "device is wedged, will not restart\n"); + break; + case ATH10K_STATE_UTF: + ath10k_warn(ar, "firmware restart in UTF mode not supported\n"); break; } mutex_unlock(&ar->conf_mutex); } -int ath10k_core_start(struct ath10k *ar) +int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode) { int status; @@ -687,7 +729,7 @@ int ath10k_core_start(struct ath10k *ar) goto err; } - status = ath10k_init_download_firmware(ar); + status = ath10k_init_download_firmware(ar, mode); if (status) goto err; @@ -700,7 +742,7 @@ int ath10k_core_start(struct ath10k *ar) status = ath10k_htc_init(ar); if (status) { - ath10k_err("could not init HTC (%d)\n", status); + ath10k_err(ar, "could not init HTC (%d)\n", status); goto err; } @@ -710,90 +752,98 @@ int ath10k_core_start(struct ath10k *ar) status = ath10k_wmi_attach(ar); if (status) { - ath10k_err("WMI attach failed: %d\n", status); + ath10k_err(ar, "WMI attach failed: %d\n", status); goto err; } status = ath10k_htt_init(ar); if (status) { - ath10k_err("failed to init htt: %d\n", status); + ath10k_err(ar, "failed to init htt: %d\n", status); goto err_wmi_detach; } status = ath10k_htt_tx_alloc(&ar->htt); if (status) { - ath10k_err("failed to alloc htt tx: %d\n", status); + ath10k_err(ar, "failed to alloc htt tx: %d\n", status); goto err_wmi_detach; } status = ath10k_htt_rx_alloc(&ar->htt); if (status) { - ath10k_err("failed to alloc htt rx: %d\n", status); + ath10k_err(ar, "failed to alloc htt rx: %d\n", status); goto err_htt_tx_detach; } status = ath10k_hif_start(ar); if (status) { - ath10k_err("could not start HIF: %d\n", status); + ath10k_err(ar, "could not start HIF: %d\n", status); goto err_htt_rx_detach; } status = ath10k_htc_wait_target(&ar->htc); if (status) { - ath10k_err("failed to connect to HTC: %d\n", status); + ath10k_err(ar, "failed to connect to HTC: %d\n", status); goto err_hif_stop; } - status = ath10k_htt_connect(&ar->htt); - if (status) { - ath10k_err("failed to connect htt (%d)\n", status); - goto err_hif_stop; + if (mode == ATH10K_FIRMWARE_MODE_NORMAL) { + status = ath10k_htt_connect(&ar->htt); + if (status) { + ath10k_err(ar, "failed to connect htt (%d)\n", status); + goto err_hif_stop; + } } status = ath10k_wmi_connect(ar); if (status) { - ath10k_err("could not connect wmi: %d\n", status); + ath10k_err(ar, "could not connect wmi: %d\n", status); goto err_hif_stop; } status = ath10k_htc_start(&ar->htc); if (status) { - ath10k_err("failed to start htc: %d\n", status); + ath10k_err(ar, "failed to start htc: %d\n", status); goto err_hif_stop; } - status = ath10k_wmi_wait_for_service_ready(ar); - if (status <= 0) { - ath10k_warn("wmi service ready event not received"); - status = -ETIMEDOUT; - goto err_htc_stop; + if (mode == ATH10K_FIRMWARE_MODE_NORMAL) { + status = ath10k_wmi_wait_for_service_ready(ar); + if (status <= 0) { + ath10k_warn(ar, "wmi service ready event not received"); + status = -ETIMEDOUT; + goto err_hif_stop; + } } - ath10k_dbg(ATH10K_DBG_BOOT, "firmware %s booted\n", + ath10k_dbg(ar, ATH10K_DBG_BOOT, "firmware %s booted\n", ar->hw->wiphy->fw_version); status = ath10k_wmi_cmd_init(ar); if (status) { - ath10k_err("could not send WMI init command (%d)\n", status); - goto err_htc_stop; + ath10k_err(ar, "could not send WMI init command (%d)\n", + status); + goto err_hif_stop; } status = ath10k_wmi_wait_for_unified_ready(ar); if (status <= 0) { - ath10k_err("wmi unified ready event not received\n"); + ath10k_err(ar, "wmi unified ready event not received\n"); status = -ETIMEDOUT; - goto err_htc_stop; + goto err_hif_stop; } - status = ath10k_htt_setup(&ar->htt); - if (status) { - ath10k_err("failed to setup htt: %d\n", status); - goto err_htc_stop; + /* we don't care about HTT in UTF mode */ + if (mode == ATH10K_FIRMWARE_MODE_NORMAL) { + status = ath10k_htt_setup(&ar->htt); + if (status) { + ath10k_err(ar, "failed to setup htt: %d\n", status); + goto err_hif_stop; + } } status = ath10k_debug_start(ar); if (status) - goto err_htc_stop; + goto err_hif_stop; if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) ar->free_vdev_map = (1 << TARGET_10X_NUM_VDEVS) - 1; @@ -802,28 +852,8 @@ int ath10k_core_start(struct ath10k *ar) INIT_LIST_HEAD(&ar->arvifs); - if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags)) { - ath10k_info("%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d\n", - ar->hw_params.name, - ar->target_version, - ar->chip_id, - ar->hw->wiphy->fw_version, - ar->fw_api, - ar->htt.target_version_major, - ar->htt.target_version_minor); - ath10k_info("debug %d debugfs %d tracing %d dfs %d\n", - config_enabled(CONFIG_ATH10K_DEBUG), - config_enabled(CONFIG_ATH10K_DEBUGFS), - config_enabled(CONFIG_ATH10K_TRACING), - config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)); - } - - __set_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags); - return 0; -err_htc_stop: - ath10k_htc_stop(&ar->htc); err_hif_stop: ath10k_hif_stop(ar); err_htt_rx_detach: @@ -845,14 +875,14 @@ int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt) ret = ath10k_wmi_pdev_suspend_target(ar, suspend_opt); if (ret) { - ath10k_warn("could not suspend target (%d)\n", ret); + ath10k_warn(ar, "could not suspend target (%d)\n", ret); return ret; } ret = wait_for_completion_timeout(&ar->target_suspend, 1 * HZ); if (ret == 0) { - ath10k_warn("suspend timed out - target pause event never came\n"); + ath10k_warn(ar, "suspend timed out - target pause event never came\n"); return -ETIMEDOUT; } @@ -864,11 +894,11 @@ void ath10k_core_stop(struct ath10k *ar) lockdep_assert_held(&ar->conf_mutex); /* try to suspend target */ - if (ar->state != ATH10K_STATE_RESTARTING) + if (ar->state != ATH10K_STATE_RESTARTING && + ar->state != ATH10K_STATE_UTF) ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR); ath10k_debug_stop(ar); - ath10k_htc_stop(&ar->htc); ath10k_hif_stop(ar); ath10k_htt_tx_free(&ar->htt); ath10k_htt_rx_free(&ar->htt); @@ -887,14 +917,14 @@ static int ath10k_core_probe_fw(struct ath10k *ar) ret = ath10k_hif_power_up(ar); if (ret) { - ath10k_err("could not start pci hif (%d)\n", ret); + ath10k_err(ar, "could not start pci hif (%d)\n", ret); return ret; } memset(&target_info, 0, sizeof(target_info)); ret = ath10k_bmi_get_target_info(ar, &target_info); if (ret) { - ath10k_err("could not get target info (%d)\n", ret); + ath10k_err(ar, "could not get target info (%d)\n", ret); ath10k_hif_power_down(ar); return ret; } @@ -904,29 +934,30 @@ static int ath10k_core_probe_fw(struct ath10k *ar) ret = ath10k_init_hw_params(ar); if (ret) { - ath10k_err("could not get hw params (%d)\n", ret); + ath10k_err(ar, "could not get hw params (%d)\n", ret); ath10k_hif_power_down(ar); return ret; } ret = ath10k_core_fetch_firmware_files(ar); if (ret) { - ath10k_err("could not fetch firmware files (%d)\n", ret); + ath10k_err(ar, "could not fetch firmware files (%d)\n", ret); ath10k_hif_power_down(ar); return ret; } mutex_lock(&ar->conf_mutex); - ret = ath10k_core_start(ar); + ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL); if (ret) { - ath10k_err("could not init core (%d)\n", ret); + ath10k_err(ar, "could not init core (%d)\n", ret); ath10k_core_free_firmware_files(ar); ath10k_hif_power_down(ar); mutex_unlock(&ar->conf_mutex); return ret; } + ath10k_print_driver_info(ar); ath10k_core_stop(ar); mutex_unlock(&ar->conf_mutex); @@ -939,7 +970,7 @@ static int ath10k_core_check_chip_id(struct ath10k *ar) { u32 hw_revision = MS(ar->chip_id, SOC_CHIP_ID_REV); - ath10k_dbg(ATH10K_DBG_BOOT, "boot chip_id 0x%08x hw_revision 0x%x\n", + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip_id 0x%08x hw_revision 0x%x\n", ar->chip_id, hw_revision); /* Check that we are not using hw1.0 (some of them have same pci id @@ -947,7 +978,7 @@ static int ath10k_core_check_chip_id(struct ath10k *ar) * due to missing hw1.0 workarounds. */ switch (hw_revision) { case QCA988X_HW_1_0_CHIP_ID_REV: - ath10k_err("ERROR: qca988x hw1.0 is not supported\n"); + ath10k_err(ar, "ERROR: qca988x hw1.0 is not supported\n"); return -EOPNOTSUPP; case QCA988X_HW_2_0_CHIP_ID_REV: @@ -955,7 +986,7 @@ static int ath10k_core_check_chip_id(struct ath10k *ar) return 0; default: - ath10k_warn("Warning: hardware revision unknown (0x%x), expect problems\n", + ath10k_warn(ar, "Warning: hardware revision unknown (0x%x), expect problems\n", ar->chip_id); return 0; } @@ -970,25 +1001,33 @@ static void ath10k_core_register_work(struct work_struct *work) status = ath10k_core_probe_fw(ar); if (status) { - ath10k_err("could not probe fw (%d)\n", status); + ath10k_err(ar, "could not probe fw (%d)\n", status); goto err; } status = ath10k_mac_register(ar); if (status) { - ath10k_err("could not register to mac80211 (%d)\n", status); + ath10k_err(ar, "could not register to mac80211 (%d)\n", status); goto err_release_fw; } - status = ath10k_debug_create(ar); + status = ath10k_debug_register(ar); if (status) { - ath10k_err("unable to initialize debugfs\n"); + ath10k_err(ar, "unable to initialize debugfs\n"); goto err_unregister_mac; } + status = ath10k_spectral_create(ar); + if (status) { + ath10k_err(ar, "failed to initialize spectral\n"); + goto err_debug_destroy; + } + set_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags); return; +err_debug_destroy: + ath10k_debug_destroy(ar); err_unregister_mac: ath10k_mac_unregister(ar); err_release_fw: @@ -1008,7 +1047,7 @@ int ath10k_core_register(struct ath10k *ar, u32 chip_id) status = ath10k_core_check_chip_id(ar); if (status) { - ath10k_err("Unsupported chip id 0x%08x\n", ar->chip_id); + ath10k_err(ar, "Unsupported chip id 0x%08x\n", ar->chip_id); return status; } @@ -1025,23 +1064,32 @@ void ath10k_core_unregister(struct ath10k *ar) if (!test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags)) return; + /* Stop spectral before unregistering from mac80211 to remove the + * relayfs debugfs file cleanly. Otherwise the parent debugfs tree + * would be already be free'd recursively, leading to a double free. + */ + ath10k_spectral_destroy(ar); + /* We must unregister from mac80211 before we stop HTC and HIF. * Otherwise we will fail to submit commands to FW and mac80211 will be * unhappy about callback failures. */ ath10k_mac_unregister(ar); + ath10k_testmode_destroy(ar); + ath10k_core_free_firmware_files(ar); - ath10k_debug_destroy(ar); + ath10k_debug_unregister(ar); } EXPORT_SYMBOL(ath10k_core_unregister); -struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev, +struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, const struct ath10k_hif_ops *hif_ops) { struct ath10k *ar; + int ret; - ar = ath10k_mac_create(); + ar = ath10k_mac_create(priv_size); if (!ar) return NULL; @@ -1051,7 +1099,6 @@ struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev, ar->p2p = !!ath10k_p2p; ar->dev = dev; - ar->hif.priv = hif_priv; ar->hif.ops = hif_ops; init_completion(&ar->scan.started); @@ -1062,11 +1109,11 @@ struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev, init_completion(&ar->install_key_done); init_completion(&ar->vdev_setup_done); - setup_timer(&ar->scan.timeout, ath10k_reset_scan, (unsigned long)ar); + INIT_DELAYED_WORK(&ar->scan.timeout, ath10k_scan_timeout_work); ar->workqueue = create_singlethread_workqueue("ath10k_wq"); if (!ar->workqueue) - goto err_wq; + goto err_free_mac; mutex_init(&ar->conf_mutex); spin_lock_init(&ar->data_lock); @@ -1084,10 +1131,18 @@ struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev, INIT_WORK(&ar->register_work, ath10k_core_register_work); INIT_WORK(&ar->restart_work, ath10k_core_restart); + ret = ath10k_debug_create(ar); + if (ret) + goto err_free_wq; + return ar; -err_wq: +err_free_wq: + destroy_workqueue(ar->workqueue); + +err_free_mac: ath10k_mac_destroy(ar); + return NULL; } EXPORT_SYMBOL(ath10k_core_create); @@ -1097,6 +1152,7 @@ void ath10k_core_destroy(struct ath10k *ar) flush_workqueue(ar->workqueue); destroy_workqueue(ar->workqueue); + ath10k_debug_destroy(ar); ath10k_mac_destroy(ar); } EXPORT_SYMBOL(ath10k_core_destroy); diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 83a5fa91531d159b9095ef0fff37db55c38e0eeb..fe531ea6926cce4fc2076dd132455440cb647454 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -22,6 +22,8 @@ #include #include #include +#include +#include #include "htt.h" #include "htc.h" @@ -31,6 +33,7 @@ #include "../ath.h" #include "../regd.h" #include "../dfs_pattern_detector.h" +#include "spectral.h" #define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB) #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK) @@ -237,6 +240,7 @@ struct ath10k_vif { bool is_started; bool is_up; + bool spectral_enabled; u32 aid; u8 bssid[ETH_ALEN]; @@ -276,11 +280,20 @@ struct ath10k_vif_iter { struct ath10k_vif *arvif; }; +/* used for crash-dump storage, protected by data-lock */ +struct ath10k_fw_crash_data { + bool crashed_since_read; + + uuid_le uuid; + struct timespec timestamp; + __le32 registers[REG_DUMP_COUNT_QCA988X]; +}; + struct ath10k_debug { struct dentry *debugfs_phy; struct ath10k_target_stats target_stats; - u32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE]; + DECLARE_BITMAP(wmi_service_bitmap, WMI_SERVICE_MAX); struct completion event_stats_compl; @@ -293,6 +306,8 @@ struct ath10k_debug { u8 htt_max_amsdu; u8 htt_max_ampdu; + + struct ath10k_fw_crash_data *fw_crash_data; }; enum ath10k_state { @@ -315,6 +330,17 @@ enum ath10k_state { * prevents completion timeouts and makes the driver more responsive to * userspace commands. This is also prevents recursive recovery. */ ATH10K_STATE_WEDGED, + + /* factory tests */ + ATH10K_STATE_UTF, +}; + +enum ath10k_firmware_mode { + /* the default mode, standard 802.11 functionality */ + ATH10K_FIRMWARE_MODE_NORMAL, + + /* factory tests etc */ + ATH10K_FIRMWARE_MODE_UTF, }; enum ath10k_fw_features { @@ -330,6 +356,11 @@ enum ath10k_fw_features { /* Firmware does not support P2P */ ATH10K_FW_FEATURE_NO_P2P = 3, + /* Firmware 10.2 feature bit. The ATH10K_FW_FEATURE_WMI_10X feature bit + * is required to be set as well. + */ + ATH10K_FW_FEATURE_WMI_10_2 = 4, + /* keep last */ ATH10K_FW_FEATURE_COUNT, }; @@ -337,10 +368,32 @@ enum ath10k_fw_features { enum ath10k_dev_flags { /* Indicates that ath10k device is during CAC phase of DFS */ ATH10K_CAC_RUNNING, - ATH10K_FLAG_FIRST_BOOT_DONE, ATH10K_FLAG_CORE_REGISTERED, }; +enum ath10k_scan_state { + ATH10K_SCAN_IDLE, + ATH10K_SCAN_STARTING, + ATH10K_SCAN_RUNNING, + ATH10K_SCAN_ABORTING, +}; + +static inline const char *ath10k_scan_state_str(enum ath10k_scan_state state) +{ + switch (state) { + case ATH10K_SCAN_IDLE: + return "idle"; + case ATH10K_SCAN_STARTING: + return "starting"; + case ATH10K_SCAN_RUNNING: + return "running"; + case ATH10K_SCAN_ABORTING: + return "aborting"; + } + + return "unknown"; +} + struct ath10k { struct ath_common ath_common; struct ieee80211_hw *hw; @@ -368,7 +421,6 @@ struct ath10k { bool p2p; struct { - void *priv; const struct ath10k_hif_ops *ops; } hif; @@ -410,10 +462,9 @@ struct ath10k { struct completion started; struct completion completed; struct completion on_channel; - struct timer_list timeout; + struct delayed_work timeout; + enum ath10k_scan_state state; bool is_roc; - bool in_progress; - bool aborting; int vdev_id; int roc_freq; } scan; @@ -432,7 +483,6 @@ struct ath10k { struct cfg80211_chan_def chandef; int free_vdev_map; - bool promisc; bool monitor; int monitor_vdev_id; bool monitor_started; @@ -494,13 +544,34 @@ struct ath10k { #ifdef CONFIG_ATH10K_DEBUGFS struct ath10k_debug debug; #endif + + struct { + /* relay(fs) channel for spectral scan */ + struct rchan *rfs_chan_spec_scan; + + /* spectral_mode and spec_config are protected by conf_mutex */ + enum ath10k_spectral_mode mode; + struct ath10k_spec_scan config; + } spectral; + + struct { + /* protected by conf_mutex */ + const struct firmware *utf; + DECLARE_BITMAP(orig_fw_features, ATH10K_FW_FEATURE_COUNT); + + /* protected by data_lock */ + bool utf_monitor; + } testmode; + + /* must be last */ + u8 drv_priv[0] __aligned(sizeof(void *)); }; -struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev, +struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, const struct ath10k_hif_ops *hif_ops); void ath10k_core_destroy(struct ath10k *ar); -int ath10k_core_start(struct ath10k *ar); +int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode); int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt); void ath10k_core_stop(struct ath10k *ar); int ath10k_core_register(struct ath10k *ar, u32 chip_id); diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 3030158c478e86cd9ee193af14b29da614f0d168..3756feba32231cffcddd112575163d1a7cf6cc67 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -17,6 +17,9 @@ #include #include +#include +#include +#include #include "core.h" #include "debug.h" @@ -24,25 +27,86 @@ /* ms */ #define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000 -static int ath10k_printk(const char *level, const char *fmt, ...) -{ - struct va_format vaf; - va_list args; - int rtn; +#define ATH10K_FW_CRASH_DUMP_VERSION 1 - va_start(args, fmt); +/** + * enum ath10k_fw_crash_dump_type - types of data in the dump file + * @ATH10K_FW_CRASH_DUMP_REGDUMP: Register crash dump in binary format + */ +enum ath10k_fw_crash_dump_type { + ATH10K_FW_CRASH_DUMP_REGISTERS = 0, - vaf.fmt = fmt; - vaf.va = &args; + ATH10K_FW_CRASH_DUMP_MAX, +}; - rtn = printk("%sath10k: %pV", level, &vaf); +struct ath10k_tlv_dump_data { + /* see ath10k_fw_crash_dump_type above */ + __le32 type; - va_end(args); + /* in bytes */ + __le32 tlv_len; - return rtn; -} + /* pad to 32-bit boundaries as needed */ + u8 tlv_data[]; +} __packed; + +struct ath10k_dump_file_data { + /* dump file information */ + + /* "ATH10K-FW-DUMP" */ + char df_magic[16]; + + __le32 len; + + /* file dump version */ + __le32 version; + + /* some info we can get from ath10k struct that might help */ + + u8 uuid[16]; + + __le32 chip_id; + + /* 0 for now, in place for later hardware */ + __le32 bus_type; + + __le32 target_version; + __le32 fw_version_major; + __le32 fw_version_minor; + __le32 fw_version_release; + __le32 fw_version_build; + __le32 phy_capability; + __le32 hw_min_tx_power; + __le32 hw_max_tx_power; + __le32 ht_cap_info; + __le32 vht_cap_info; + __le32 num_rf_chains; + + /* firmware version string */ + char fw_ver[ETHTOOL_FWVERS_LEN]; + + /* Kernel related information */ + + /* time-of-day stamp */ + __le64 tv_sec; + + /* time-of-day stamp, nano-seconds */ + __le64 tv_nsec; + + /* LINUX_VERSION_CODE */ + __le32 kernel_ver_code; + + /* VERMAGIC_STRING */ + char kernel_ver[64]; -int ath10k_info(const char *fmt, ...) + /* room for growth w/out changing binary format */ + u8 unused[128]; + + /* struct ath10k_tlv_dump_data + more */ + u8 data[0]; +} __packed; + +int ath10k_info(struct ath10k *ar, const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, @@ -52,15 +116,34 @@ int ath10k_info(const char *fmt, ...) va_start(args, fmt); vaf.va = &args; - ret = ath10k_printk(KERN_INFO, "%pV", &vaf); - trace_ath10k_log_info(&vaf); + ret = dev_info(ar->dev, "%pV", &vaf); + trace_ath10k_log_info(ar, &vaf); va_end(args); return ret; } EXPORT_SYMBOL(ath10k_info); -int ath10k_err(const char *fmt, ...) +void ath10k_print_driver_info(struct ath10k *ar) +{ + ath10k_info(ar, "%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d\n", + ar->hw_params.name, + ar->target_version, + ar->chip_id, + ar->hw->wiphy->fw_version, + ar->fw_api, + ar->htt.target_version_major, + ar->htt.target_version_minor); + ath10k_info(ar, "debug %d debugfs %d tracing %d dfs %d testmode %d\n", + config_enabled(CONFIG_ATH10K_DEBUG), + config_enabled(CONFIG_ATH10K_DEBUGFS), + config_enabled(CONFIG_ATH10K_TRACING), + config_enabled(CONFIG_ATH10K_DFS_CERTIFIED), + config_enabled(CONFIG_NL80211_TESTMODE)); +} +EXPORT_SYMBOL(ath10k_print_driver_info); + +int ath10k_err(struct ath10k *ar, const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, @@ -70,33 +153,29 @@ int ath10k_err(const char *fmt, ...) va_start(args, fmt); vaf.va = &args; - ret = ath10k_printk(KERN_ERR, "%pV", &vaf); - trace_ath10k_log_err(&vaf); + ret = dev_err(ar->dev, "%pV", &vaf); + trace_ath10k_log_err(ar, &vaf); va_end(args); return ret; } EXPORT_SYMBOL(ath10k_err); -int ath10k_warn(const char *fmt, ...) +int ath10k_warn(struct ath10k *ar, const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, }; va_list args; - int ret = 0; va_start(args, fmt); vaf.va = &args; - - if (net_ratelimit()) - ret = ath10k_printk(KERN_WARNING, "%pV", &vaf); - - trace_ath10k_log_warn(&vaf); + dev_warn_ratelimited(ar->dev, "%pV", &vaf); + trace_ath10k_log_warn(ar, &vaf); va_end(args); - return ret; + return 0; } EXPORT_SYMBOL(ath10k_warn); @@ -115,9 +194,10 @@ static ssize_t ath10k_read_wmi_services(struct file *file, { struct ath10k *ar = file->private_data; char *buf; - unsigned int len = 0, buf_len = 1500; - const char *status; + unsigned int len = 0, buf_len = 4096; + const char *name; ssize_t ret_cnt; + bool enabled; int i; buf = kzalloc(buf_len, GFP_KERNEL); @@ -129,15 +209,22 @@ static ssize_t ath10k_read_wmi_services(struct file *file, if (len > buf_len) len = buf_len; - for (i = 0; i < WMI_SERVICE_LAST; i++) { - if (WMI_SERVICE_IS_ENABLED(ar->debug.wmi_service_bitmap, i)) - status = "enabled"; - else - status = "disabled"; + for (i = 0; i < WMI_SERVICE_MAX; i++) { + enabled = test_bit(i, ar->debug.wmi_service_bitmap); + name = wmi_service_name(i); + + if (!name) { + if (enabled) + len += scnprintf(buf + len, buf_len - len, + "%-40s %s (bit %d)\n", + "unknown", "enabled", i); + + continue; + } len += scnprintf(buf + len, buf_len - len, - "0x%02x - %20s - %s\n", - i, wmi_service_name(i), status); + "%-40s %s\n", + name, enabled ? "enabled" : "-"); } ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); @@ -309,7 +396,7 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf, ret = ath10k_wmi_request_stats(ar, WMI_REQUEST_PEER_STAT); if (ret) { - ath10k_warn("could not request stats (%d)\n", ret); + ath10k_warn(ar, "could not request stats (%d)\n", ret); goto exit; } @@ -478,16 +565,35 @@ static const struct file_operations fops_fw_stats = { .llseek = default_llseek, }; +/* This is a clean assert crash in firmware. */ +static int ath10k_debug_fw_assert(struct ath10k *ar) +{ + struct wmi_vdev_install_key_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd) + 16); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_vdev_install_key_cmd *)skb->data; + memset(cmd, 0, sizeof(*cmd)); + + /* big enough number so that firmware asserts */ + cmd->vdev_id = __cpu_to_le32(0x7ffe); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->vdev_install_key_cmdid); +} + static ssize_t ath10k_read_simulate_fw_crash(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { - const char buf[] = "To simulate firmware crash write one of the" - " keywords to this file:\n `soft` - this will send" - " WMI_FORCE_FW_HANG_ASSERT to firmware if FW" - " supports that command.\n `hard` - this will send" - " to firmware command with illegal parameters" - " causing firmware crash.\n"; + const char buf[] = + "To simulate firmware crash write one of the keywords to this file:\n" + "`soft` - this will send WMI_FORCE_FW_HANG_ASSERT to firmware if FW supports that command.\n" + "`hard` - this will send to firmware command with illegal parameters causing firmware crash.\n" + "`assert` - this will send special illegal parameter to firmware to cause assert failure and crash.\n"; return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); } @@ -527,19 +633,26 @@ static ssize_t ath10k_write_simulate_fw_crash(struct file *file, } if (!strcmp(buf, "soft")) { - ath10k_info("simulating soft firmware crash\n"); + ath10k_info(ar, "simulating soft firmware crash\n"); ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0); } else if (!strcmp(buf, "hard")) { - ath10k_info("simulating hard firmware crash\n"); - ret = ath10k_wmi_vdev_set_param(ar, TARGET_NUM_VDEVS + 1, - ar->wmi.vdev_param->rts_threshold, 0); + ath10k_info(ar, "simulating hard firmware crash\n"); + /* 0x7fff is vdev id, and it is always out of range for all + * firmware variants in order to force a firmware crash. + */ + ret = ath10k_wmi_vdev_set_param(ar, 0x7fff, + ar->wmi.vdev_param->rts_threshold, + 0); + } else if (!strcmp(buf, "assert")) { + ath10k_info(ar, "simulating firmware assert crash\n"); + ret = ath10k_debug_fw_assert(ar); } else { ret = -EINVAL; goto exit; } if (ret) { - ath10k_warn("failed to simulate firmware crash: %d\n", ret); + ath10k_warn(ar, "failed to simulate firmware crash: %d\n", ret); goto exit; } @@ -577,6 +690,138 @@ static const struct file_operations fops_chip_id = { .llseek = default_llseek, }; +struct ath10k_fw_crash_data * +ath10k_debug_get_new_fw_crash_data(struct ath10k *ar) +{ + struct ath10k_fw_crash_data *crash_data = ar->debug.fw_crash_data; + + lockdep_assert_held(&ar->data_lock); + + crash_data->crashed_since_read = true; + uuid_le_gen(&crash_data->uuid); + getnstimeofday(&crash_data->timestamp); + + return crash_data; +} +EXPORT_SYMBOL(ath10k_debug_get_new_fw_crash_data); + +static struct ath10k_dump_file_data *ath10k_build_dump_file(struct ath10k *ar) +{ + struct ath10k_fw_crash_data *crash_data = ar->debug.fw_crash_data; + struct ath10k_dump_file_data *dump_data; + struct ath10k_tlv_dump_data *dump_tlv; + int hdr_len = sizeof(*dump_data); + unsigned int len, sofar = 0; + unsigned char *buf; + + len = hdr_len; + len += sizeof(*dump_tlv) + sizeof(crash_data->registers); + + sofar += hdr_len; + + /* This is going to get big when we start dumping FW RAM and such, + * so go ahead and use vmalloc. + */ + buf = vzalloc(len); + if (!buf) + return NULL; + + spin_lock_bh(&ar->data_lock); + + if (!crash_data->crashed_since_read) { + spin_unlock_bh(&ar->data_lock); + vfree(buf); + return NULL; + } + + dump_data = (struct ath10k_dump_file_data *)(buf); + strlcpy(dump_data->df_magic, "ATH10K-FW-DUMP", + sizeof(dump_data->df_magic)); + dump_data->len = cpu_to_le32(len); + + dump_data->version = cpu_to_le32(ATH10K_FW_CRASH_DUMP_VERSION); + + memcpy(dump_data->uuid, &crash_data->uuid, sizeof(dump_data->uuid)); + dump_data->chip_id = cpu_to_le32(ar->chip_id); + dump_data->bus_type = cpu_to_le32(0); + dump_data->target_version = cpu_to_le32(ar->target_version); + dump_data->fw_version_major = cpu_to_le32(ar->fw_version_major); + dump_data->fw_version_minor = cpu_to_le32(ar->fw_version_minor); + dump_data->fw_version_release = cpu_to_le32(ar->fw_version_release); + dump_data->fw_version_build = cpu_to_le32(ar->fw_version_build); + dump_data->phy_capability = cpu_to_le32(ar->phy_capability); + dump_data->hw_min_tx_power = cpu_to_le32(ar->hw_min_tx_power); + dump_data->hw_max_tx_power = cpu_to_le32(ar->hw_max_tx_power); + dump_data->ht_cap_info = cpu_to_le32(ar->ht_cap_info); + dump_data->vht_cap_info = cpu_to_le32(ar->vht_cap_info); + dump_data->num_rf_chains = cpu_to_le32(ar->num_rf_chains); + + strlcpy(dump_data->fw_ver, ar->hw->wiphy->fw_version, + sizeof(dump_data->fw_ver)); + + dump_data->kernel_ver_code = cpu_to_le32(LINUX_VERSION_CODE); + strlcpy(dump_data->kernel_ver, VERMAGIC_STRING, + sizeof(dump_data->kernel_ver)); + + dump_data->tv_sec = cpu_to_le64(crash_data->timestamp.tv_sec); + dump_data->tv_nsec = cpu_to_le64(crash_data->timestamp.tv_nsec); + + /* Gather crash-dump */ + dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar); + dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_REGISTERS); + dump_tlv->tlv_len = cpu_to_le32(sizeof(crash_data->registers)); + memcpy(dump_tlv->tlv_data, &crash_data->registers, + sizeof(crash_data->registers)); + sofar += sizeof(*dump_tlv) + sizeof(crash_data->registers); + + ar->debug.fw_crash_data->crashed_since_read = false; + + spin_unlock_bh(&ar->data_lock); + + return dump_data; +} + +static int ath10k_fw_crash_dump_open(struct inode *inode, struct file *file) +{ + struct ath10k *ar = inode->i_private; + struct ath10k_dump_file_data *dump; + + dump = ath10k_build_dump_file(ar); + if (!dump) + return -ENODATA; + + file->private_data = dump; + + return 0; +} + +static ssize_t ath10k_fw_crash_dump_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k_dump_file_data *dump_file = file->private_data; + + return simple_read_from_buffer(user_buf, count, ppos, + dump_file, + le32_to_cpu(dump_file->len)); +} + +static int ath10k_fw_crash_dump_release(struct inode *inode, + struct file *file) +{ + vfree(file->private_data); + + return 0; +} + +static const struct file_operations fops_fw_crash_dump = { + .open = ath10k_fw_crash_dump_open, + .read = ath10k_fw_crash_dump_read, + .release = ath10k_fw_crash_dump_release, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + static int ath10k_debug_htt_stats_req(struct ath10k *ar) { u64 cookie; @@ -596,7 +841,7 @@ static int ath10k_debug_htt_stats_req(struct ath10k *ar) ret = ath10k_htt_h2t_stats_req(&ar->htt, ar->debug.htt_stats_mask, cookie); if (ret) { - ath10k_warn("failed to send htt stats request: %d\n", ret); + ath10k_warn(ar, "failed to send htt stats request: %d\n", ret); return ret; } @@ -619,8 +864,8 @@ static void ath10k_debug_htt_stats_dwork(struct work_struct *work) } static ssize_t ath10k_read_htt_stats_mask(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) + char __user *user_buf, + size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; char buf[32]; @@ -632,8 +877,8 @@ static ssize_t ath10k_read_htt_stats_mask(struct file *file, } static ssize_t ath10k_write_htt_stats_mask(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) + const char __user *user_buf, + size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; unsigned long mask; @@ -738,8 +983,8 @@ static const struct file_operations fops_htt_max_amsdu_ampdu = { }; static ssize_t ath10k_read_fw_dbglog(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) + char __user *user_buf, + size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; unsigned int len; @@ -770,7 +1015,7 @@ static ssize_t ath10k_write_fw_dbglog(struct file *file, if (ar->state == ATH10K_STATE_ON) { ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask); if (ret) { - ath10k_warn("dbglog cfg failed from debugfs: %d\n", + ath10k_warn(ar, "dbglog cfg failed from debugfs: %d\n", ret); goto exit; } @@ -801,13 +1046,14 @@ int ath10k_debug_start(struct ath10k *ar) ret = ath10k_debug_htt_stats_req(ar); if (ret) /* continue normally anyway, this isn't serious */ - ath10k_warn("failed to start htt stats workqueue: %d\n", ret); + ath10k_warn(ar, "failed to start htt stats workqueue: %d\n", + ret); if (ar->debug.fw_dbglog_mask) { ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask); if (ret) /* not serious */ - ath10k_warn("failed to enable dbglog during start: %d", + ath10k_warn(ar, "failed to enable dbglog during start: %d", ret); } @@ -909,12 +1155,30 @@ static const struct file_operations fops_dfs_stats = { }; int ath10k_debug_create(struct ath10k *ar) +{ + ar->debug.fw_crash_data = vzalloc(sizeof(*ar->debug.fw_crash_data)); + if (!ar->debug.fw_crash_data) + return -ENOMEM; + + return 0; +} + +void ath10k_debug_destroy(struct ath10k *ar) +{ + vfree(ar->debug.fw_crash_data); + ar->debug.fw_crash_data = NULL; +} + +int ath10k_debug_register(struct ath10k *ar) { ar->debug.debugfs_phy = debugfs_create_dir("ath10k", ar->hw->wiphy->debugfsdir); + if (IS_ERR_OR_NULL(ar->debug.debugfs_phy)) { + if (IS_ERR(ar->debug.debugfs_phy)) + return PTR_ERR(ar->debug.debugfs_phy); - if (!ar->debug.debugfs_phy) return -ENOMEM; + } INIT_DELAYED_WORK(&ar->debug.htt_stats_dwork, ath10k_debug_htt_stats_dwork); @@ -930,6 +1194,9 @@ int ath10k_debug_create(struct ath10k *ar) debugfs_create_file("simulate_fw_crash", S_IRUSR, ar->debug.debugfs_phy, ar, &fops_simulate_fw_crash); + debugfs_create_file("fw_crash_dump", S_IRUSR, ar->debug.debugfs_phy, + ar, &fops_fw_crash_dump); + debugfs_create_file("chip_id", S_IRUSR, ar->debug.debugfs_phy, ar, &fops_chip_id); @@ -960,7 +1227,7 @@ int ath10k_debug_create(struct ath10k *ar) return 0; } -void ath10k_debug_destroy(struct ath10k *ar) +void ath10k_debug_unregister(struct ath10k *ar) { cancel_delayed_work_sync(&ar->debug.htt_stats_dwork); } @@ -968,7 +1235,8 @@ void ath10k_debug_destroy(struct ath10k *ar) #endif /* CONFIG_ATH10K_DEBUGFS */ #ifdef CONFIG_ATH10K_DEBUG -void ath10k_dbg(enum ath10k_debug_mask mask, const char *fmt, ...) +void ath10k_dbg(struct ath10k *ar, enum ath10k_debug_mask mask, + const char *fmt, ...) { struct va_format vaf; va_list args; @@ -979,27 +1247,28 @@ void ath10k_dbg(enum ath10k_debug_mask mask, const char *fmt, ...) vaf.va = &args; if (ath10k_debug_mask & mask) - ath10k_printk(KERN_DEBUG, "%pV", &vaf); + dev_printk(KERN_DEBUG, ar->dev, "%pV", &vaf); - trace_ath10k_log_dbg(mask, &vaf); + trace_ath10k_log_dbg(ar, mask, &vaf); va_end(args); } EXPORT_SYMBOL(ath10k_dbg); -void ath10k_dbg_dump(enum ath10k_debug_mask mask, +void ath10k_dbg_dump(struct ath10k *ar, + enum ath10k_debug_mask mask, const char *msg, const char *prefix, const void *buf, size_t len) { if (ath10k_debug_mask & mask) { if (msg) - ath10k_dbg(mask, "%s\n", msg); + ath10k_dbg(ar, mask, "%s\n", msg); print_hex_dump_bytes(prefix, DUMP_PREFIX_OFFSET, buf, len); } /* tracing code doesn't like null strings :/ */ - trace_ath10k_log_dbg_dump(msg ? msg : "", prefix ? prefix : "", + trace_ath10k_log_dbg_dump(ar, msg ? msg : "", prefix ? prefix : "", buf, len); } EXPORT_SYMBOL(ath10k_dbg_dump); diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h index a5824990bd2a8c789e69d18bb2e3f12f26548248..b3774f7f492c480d941a300a81b15a2c8a6cb234 100644 --- a/drivers/net/wireless/ath/ath10k/debug.h +++ b/drivers/net/wireless/ath/ath10k/debug.h @@ -34,25 +34,33 @@ enum ath10k_debug_mask { ATH10K_DBG_DATA = 0x00000200, ATH10K_DBG_BMI = 0x00000400, ATH10K_DBG_REGULATORY = 0x00000800, + ATH10K_DBG_TESTMODE = 0x00001000, ATH10K_DBG_ANY = 0xffffffff, }; extern unsigned int ath10k_debug_mask; -__printf(1, 2) int ath10k_info(const char *fmt, ...); -__printf(1, 2) int ath10k_err(const char *fmt, ...); -__printf(1, 2) int ath10k_warn(const char *fmt, ...); +__printf(2, 3) int ath10k_info(struct ath10k *ar, const char *fmt, ...); +__printf(2, 3) int ath10k_err(struct ath10k *ar, const char *fmt, ...); +__printf(2, 3) int ath10k_warn(struct ath10k *ar, const char *fmt, ...); +void ath10k_print_driver_info(struct ath10k *ar); #ifdef CONFIG_ATH10K_DEBUGFS int ath10k_debug_start(struct ath10k *ar); void ath10k_debug_stop(struct ath10k *ar); int ath10k_debug_create(struct ath10k *ar); void ath10k_debug_destroy(struct ath10k *ar); +int ath10k_debug_register(struct ath10k *ar); +void ath10k_debug_unregister(struct ath10k *ar); void ath10k_debug_read_service_map(struct ath10k *ar, void *service_map, size_t map_size); void ath10k_debug_read_target_stats(struct ath10k *ar, struct wmi_stats_event *ev); +struct ath10k_fw_crash_data * +ath10k_debug_get_new_fw_crash_data(struct ath10k *ar); + +void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer, int len); #define ATH10K_DFS_STAT_INC(ar, c) (ar->debug.dfs_stats.c++) @@ -75,6 +83,15 @@ static inline void ath10k_debug_destroy(struct ath10k *ar) { } +static inline int ath10k_debug_register(struct ath10k *ar) +{ + return 0; +} + +static inline void ath10k_debug_unregister(struct ath10k *ar) +{ +} + static inline void ath10k_debug_read_service_map(struct ath10k *ar, void *service_map, size_t map_size) @@ -86,25 +103,40 @@ static inline void ath10k_debug_read_target_stats(struct ath10k *ar, { } +static inline void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer, + int len) +{ +} + +static inline struct ath10k_fw_crash_data * +ath10k_debug_get_new_fw_crash_data(struct ath10k *ar) +{ + return NULL; +} + #define ATH10K_DFS_STAT_INC(ar, c) do { } while (0) #endif /* CONFIG_ATH10K_DEBUGFS */ #ifdef CONFIG_ATH10K_DEBUG -__printf(2, 3) void ath10k_dbg(enum ath10k_debug_mask mask, +__printf(3, 4) void ath10k_dbg(struct ath10k *ar, + enum ath10k_debug_mask mask, const char *fmt, ...); -void ath10k_dbg_dump(enum ath10k_debug_mask mask, +void ath10k_dbg_dump(struct ath10k *ar, + enum ath10k_debug_mask mask, const char *msg, const char *prefix, const void *buf, size_t len); #else /* CONFIG_ATH10K_DEBUG */ -static inline int ath10k_dbg(enum ath10k_debug_mask dbg_mask, +static inline int ath10k_dbg(struct ath10k *ar, + enum ath10k_debug_mask dbg_mask, const char *fmt, ...) { return 0; } -static inline void ath10k_dbg_dump(enum ath10k_debug_mask mask, +static inline void ath10k_dbg_dump(struct ath10k *ar, + enum ath10k_debug_mask mask, const char *msg, const char *prefix, const void *buf, size_t len) { diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h index 2ac7beacddca4b44705fbe2cb6af3fe8f67a7461..62323fea27e136c9fdad3913dfcc36a712cd86ec 100644 --- a/drivers/net/wireless/ath/ath10k/hif.h +++ b/drivers/net/wireless/ath/ath10k/hif.h @@ -91,7 +91,6 @@ struct ath10k_hif_ops { int (*resume)(struct ath10k *ar); }; - static inline int ath10k_hif_tx_sg(struct ath10k *ar, u8 pipe_id, struct ath10k_hif_sg_item *items, int n_items) diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c index 5fdc40d3b378303a77edb1b39fc46cb5c3660d0a..676bd4ed969bd5d9f63817ea6e5229c41a7c644e 100644 --- a/drivers/net/wireless/ath/ath10k/htc.c +++ b/drivers/net/wireless/ath/ath10k/htc.c @@ -45,10 +45,8 @@ static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar) struct ath10k_skb_cb *skb_cb; skb = dev_alloc_skb(ATH10K_HTC_CONTROL_BUFFER_SIZE); - if (!skb) { - ath10k_warn("Unable to allocate ctrl skb\n"); + if (!skb) return NULL; - } skb_reserve(skb, 20); /* FIXME: why 20 bytes? */ WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb"); @@ -56,7 +54,7 @@ static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar) skb_cb = ATH10K_SKB_CB(skb); memset(skb_cb, 0, sizeof(*skb_cb)); - ath10k_dbg(ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb); + ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb); return skb; } @@ -72,13 +70,15 @@ static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc, static void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__, + struct ath10k *ar = ep->htc->ar; + + ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__, ep->eid, skb); ath10k_htc_restore_tx_skb(ep->htc, skb); if (!ep->ep_ops.ep_tx_complete) { - ath10k_warn("no tx handler for eid %d\n", ep->eid); + ath10k_warn(ar, "no tx handler for eid %d\n", ep->eid); dev_kfree_skb_any(skb); return; } @@ -89,12 +89,14 @@ static void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep, /* assumes tx_lock is held */ static bool ath10k_htc_ep_need_credit_update(struct ath10k_htc_ep *ep) { + struct ath10k *ar = ep->htc->ar; + if (!ep->tx_credit_flow_enabled) return false; if (ep->tx_credits >= ep->tx_credits_per_max_message) return false; - ath10k_dbg(ATH10K_DBG_HTC, "HTC: endpoint %d needs credit update\n", + ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC: endpoint %d needs credit update\n", ep->eid); return true; } @@ -123,6 +125,7 @@ int ath10k_htc_send(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid, struct sk_buff *skb) { + struct ath10k *ar = htc->ar; struct ath10k_htc_ep *ep = &htc->endpoint[eid]; struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); struct ath10k_hif_sg_item sg_item; @@ -134,18 +137,10 @@ int ath10k_htc_send(struct ath10k_htc *htc, return -ECOMM; if (eid >= ATH10K_HTC_EP_COUNT) { - ath10k_warn("Invalid endpoint id: %d\n", eid); + ath10k_warn(ar, "Invalid endpoint id: %d\n", eid); return -ENOENT; } - /* FIXME: This looks ugly, can we fix it? */ - spin_lock_bh(&htc->tx_lock); - if (htc->stopped) { - spin_unlock_bh(&htc->tx_lock); - return -ESHUTDOWN; - } - spin_unlock_bh(&htc->tx_lock); - skb_push(skb, sizeof(struct ath10k_htc_hdr)); if (ep->tx_credit_flow_enabled) { @@ -157,7 +152,7 @@ int ath10k_htc_send(struct ath10k_htc *htc, goto err_pull; } ep->tx_credits -= credits; - ath10k_dbg(ATH10K_DBG_HTC, + ath10k_dbg(ar, ATH10K_DBG_HTC, "htc ep %d consumed %d credits (total %d)\n", eid, credits, ep->tx_credits); spin_unlock_bh(&htc->tx_lock); @@ -188,7 +183,7 @@ int ath10k_htc_send(struct ath10k_htc *htc, if (ep->tx_credit_flow_enabled) { spin_lock_bh(&htc->tx_lock); ep->tx_credits += credits; - ath10k_dbg(ATH10K_DBG_HTC, + ath10k_dbg(ar, ATH10K_DBG_HTC, "htc ep %d reverted %d credits back (total %d)\n", eid, credits, ep->tx_credits); spin_unlock_bh(&htc->tx_lock); @@ -227,11 +222,12 @@ ath10k_htc_process_credit_report(struct ath10k_htc *htc, int len, enum ath10k_htc_ep_id eid) { + struct ath10k *ar = htc->ar; struct ath10k_htc_ep *ep; int i, n_reports; if (len % sizeof(*report)) - ath10k_warn("Uneven credit report len %d", len); + ath10k_warn(ar, "Uneven credit report len %d", len); n_reports = len / sizeof(*report); @@ -243,7 +239,7 @@ ath10k_htc_process_credit_report(struct ath10k_htc *htc, ep = &htc->endpoint[report->eid]; ep->tx_credits += report->credits; - ath10k_dbg(ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n", + ath10k_dbg(ar, ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n", report->eid, report->credits, ep->tx_credits); if (ep->ep_ops.ep_tx_credits) { @@ -260,6 +256,7 @@ static int ath10k_htc_process_trailer(struct ath10k_htc *htc, int length, enum ath10k_htc_ep_id src_eid) { + struct ath10k *ar = htc->ar; int status = 0; struct ath10k_htc_record *record; u8 *orig_buffer; @@ -279,7 +276,7 @@ static int ath10k_htc_process_trailer(struct ath10k_htc *htc, if (record->hdr.len > length) { /* no room left in buffer for record */ - ath10k_warn("Invalid record length: %d\n", + ath10k_warn(ar, "Invalid record length: %d\n", record->hdr.len); status = -EINVAL; break; @@ -289,7 +286,7 @@ static int ath10k_htc_process_trailer(struct ath10k_htc *htc, case ATH10K_HTC_RECORD_CREDITS: len = sizeof(struct ath10k_htc_credit_report); if (record->hdr.len < len) { - ath10k_warn("Credit report too long\n"); + ath10k_warn(ar, "Credit report too long\n"); status = -EINVAL; break; } @@ -299,7 +296,7 @@ static int ath10k_htc_process_trailer(struct ath10k_htc *htc, src_eid); break; default: - ath10k_warn("Unhandled record: id:%d length:%d\n", + ath10k_warn(ar, "Unhandled record: id:%d length:%d\n", record->hdr.id, record->hdr.len); break; } @@ -313,7 +310,7 @@ static int ath10k_htc_process_trailer(struct ath10k_htc *htc, } if (status) - ath10k_dbg_dump(ATH10K_DBG_HTC, "htc rx bad trailer", "", + ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc rx bad trailer", "", orig_buffer, orig_length); return status; @@ -339,8 +336,8 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar, eid = hdr->eid; if (eid >= ATH10K_HTC_EP_COUNT) { - ath10k_warn("HTC Rx: invalid eid %d\n", eid); - ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad header", "", + ath10k_warn(ar, "HTC Rx: invalid eid %d\n", eid); + ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad header", "", hdr, sizeof(*hdr)); status = -EINVAL; goto out; @@ -360,19 +357,19 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar, payload_len = __le16_to_cpu(hdr->len); if (payload_len + sizeof(*hdr) > ATH10K_HTC_MAX_LEN) { - ath10k_warn("HTC rx frame too long, len: %zu\n", + ath10k_warn(ar, "HTC rx frame too long, len: %zu\n", payload_len + sizeof(*hdr)); - ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad rx pkt len", "", + ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len", "", hdr, sizeof(*hdr)); status = -EINVAL; goto out; } if (skb->len < payload_len) { - ath10k_dbg(ATH10K_DBG_HTC, + ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC Rx: insufficient length, got %d, expected %d\n", skb->len, payload_len); - ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad rx pkt len", + ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len", "", hdr, sizeof(*hdr)); status = -EINVAL; goto out; @@ -388,7 +385,7 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar, if ((trailer_len < min_len) || (trailer_len > payload_len)) { - ath10k_warn("Invalid trailer length: %d\n", + ath10k_warn(ar, "Invalid trailer length: %d\n", trailer_len); status = -EPROTO; goto out; @@ -421,7 +418,7 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar, * this is a fatal error, target should not be * sending unsolicited messages on the ep 0 */ - ath10k_warn("HTC rx ctrl still processing\n"); + ath10k_warn(ar, "HTC rx ctrl still processing\n"); status = -EINVAL; complete(&htc->ctl_resp); goto out; @@ -442,7 +439,7 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar, goto out; } - ath10k_dbg(ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n", + ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n", eid, skb); ep->ep_ops.ep_rx_complete(ar, skb); @@ -459,7 +456,7 @@ static void ath10k_htc_control_rx_complete(struct ath10k *ar, { /* This is unexpected. FW is not supposed to send regular rx on this * endpoint. */ - ath10k_warn("unexpected htc rx\n"); + ath10k_warn(ar, "unexpected htc rx\n"); kfree_skb(skb); } @@ -546,6 +543,7 @@ static u8 ath10k_htc_get_credit_allocation(struct ath10k_htc *htc, int ath10k_htc_wait_target(struct ath10k_htc *htc) { + struct ath10k *ar = htc->ar; int i, status = 0; struct ath10k_htc_svc_conn_req conn_req; struct ath10k_htc_svc_conn_resp conn_resp; @@ -563,25 +561,25 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc) * iomap writes unmasking PCI CE irqs aren't propagated * properly in KVM PCI-passthrough sometimes. */ - ath10k_warn("failed to receive control response completion, polling..\n"); + ath10k_warn(ar, "failed to receive control response completion, polling..\n"); for (i = 0; i < CE_COUNT; i++) ath10k_hif_send_complete_check(htc->ar, i, 1); status = wait_for_completion_timeout(&htc->ctl_resp, - ATH10K_HTC_WAIT_TIMEOUT_HZ); + ATH10K_HTC_WAIT_TIMEOUT_HZ); if (status == 0) status = -ETIMEDOUT; } if (status < 0) { - ath10k_err("ctl_resp never came in (%d)\n", status); + ath10k_err(ar, "ctl_resp never came in (%d)\n", status); return status; } if (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->ready)) { - ath10k_err("Invalid HTC ready msg len:%d\n", + ath10k_err(ar, "Invalid HTC ready msg len:%d\n", htc->control_resp_len); return -ECOMM; } @@ -592,21 +590,21 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc) credit_size = __le16_to_cpu(msg->ready.credit_size); if (message_id != ATH10K_HTC_MSG_READY_ID) { - ath10k_err("Invalid HTC ready msg: 0x%x\n", message_id); + ath10k_err(ar, "Invalid HTC ready msg: 0x%x\n", message_id); return -ECOMM; } htc->total_transmit_credits = credit_count; htc->target_credit_size = credit_size; - ath10k_dbg(ATH10K_DBG_HTC, + ath10k_dbg(ar, ATH10K_DBG_HTC, "Target ready! transmit resources: %d size:%d\n", htc->total_transmit_credits, htc->target_credit_size); if ((htc->total_transmit_credits == 0) || (htc->target_credit_size == 0)) { - ath10k_err("Invalid credit size received\n"); + ath10k_err(ar, "Invalid credit size received\n"); return -ECOMM; } @@ -623,7 +621,8 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc) /* connect fake service */ status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp); if (status) { - ath10k_err("could not connect to htc service (%d)\n", status); + ath10k_err(ar, "could not connect to htc service (%d)\n", + status); return status; } @@ -634,6 +633,7 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc, struct ath10k_htc_svc_conn_req *conn_req, struct ath10k_htc_svc_conn_resp *conn_resp) { + struct ath10k *ar = htc->ar; struct ath10k_htc_msg *msg; struct ath10k_htc_conn_svc *req_msg; struct ath10k_htc_conn_svc_response resp_msg_dummy; @@ -659,13 +659,13 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc, tx_alloc = ath10k_htc_get_credit_allocation(htc, conn_req->service_id); if (!tx_alloc) - ath10k_dbg(ATH10K_DBG_BOOT, + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot htc service %s does not allocate target credits\n", htc_service_name(conn_req->service_id)); skb = ath10k_htc_build_tx_ctrl_skb(htc->ar); if (!skb) { - ath10k_err("Failed to allocate HTC packet\n"); + ath10k_err(ar, "Failed to allocate HTC packet\n"); return -ENOMEM; } @@ -703,7 +703,7 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc, if (status <= 0) { if (status == 0) status = -ETIMEDOUT; - ath10k_err("Service connect timeout: %d\n", status); + ath10k_err(ar, "Service connect timeout: %d\n", status); return status; } @@ -716,11 +716,11 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc, if ((message_id != ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID) || (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->connect_service_response))) { - ath10k_err("Invalid resp message ID 0x%x", message_id); + ath10k_err(ar, "Invalid resp message ID 0x%x", message_id); return -EPROTO; } - ath10k_dbg(ATH10K_DBG_HTC, + ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC Service %s connect response: status: 0x%x, assigned ep: 0x%x\n", htc_service_name(service_id), resp_msg->status, resp_msg->eid); @@ -729,7 +729,7 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc, /* check response status */ if (resp_msg->status != ATH10K_HTC_CONN_SVC_STATUS_SUCCESS) { - ath10k_err("HTC Service %s connect request failed: 0x%x)\n", + ath10k_err(ar, "HTC Service %s connect request failed: 0x%x)\n", htc_service_name(service_id), resp_msg->status); return -EPROTO; @@ -780,18 +780,18 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc, if (status) return status; - ath10k_dbg(ATH10K_DBG_BOOT, + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n", htc_service_name(ep->service_id), ep->ul_pipe_id, ep->dl_pipe_id, ep->eid); - ath10k_dbg(ATH10K_DBG_BOOT, + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot htc ep %d ul polled %d dl polled %d\n", ep->eid, ep->ul_is_polled, ep->dl_is_polled); if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) { ep->tx_credit_flow_enabled = false; - ath10k_dbg(ATH10K_DBG_BOOT, + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot htc service '%s' eid %d TX flow control disabled\n", htc_service_name(ep->service_id), assigned_eid); } @@ -799,27 +799,26 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc, return status; } -struct sk_buff *ath10k_htc_alloc_skb(int size) +struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size) { struct sk_buff *skb; skb = dev_alloc_skb(size + sizeof(struct ath10k_htc_hdr)); - if (!skb) { - ath10k_warn("could not allocate HTC tx skb\n"); + if (!skb) return NULL; - } skb_reserve(skb, sizeof(struct ath10k_htc_hdr)); /* FW/HTC requires 4-byte aligned streams */ if (!IS_ALIGNED((unsigned long)skb->data, 4)) - ath10k_warn("Unaligned HTC tx skb\n"); + ath10k_warn(ar, "Unaligned HTC tx skb\n"); return skb; } int ath10k_htc_start(struct ath10k_htc *htc) { + struct ath10k *ar = htc->ar; struct sk_buff *skb; int status = 0; struct ath10k_htc_msg *msg; @@ -835,7 +834,7 @@ int ath10k_htc_start(struct ath10k_htc *htc) msg->hdr.message_id = __cpu_to_le16(ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID); - ath10k_dbg(ATH10K_DBG_HTC, "HTC is using TX credit flow control\n"); + ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC is using TX credit flow control\n"); status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb); if (status) { @@ -846,13 +845,6 @@ int ath10k_htc_start(struct ath10k_htc *htc) return 0; } -void ath10k_htc_stop(struct ath10k_htc *htc) -{ - spin_lock_bh(&htc->tx_lock); - htc->stopped = true; - spin_unlock_bh(&htc->tx_lock); -} - /* registered target arrival callback from the HIF layer */ int ath10k_htc_init(struct ath10k *ar) { @@ -862,7 +854,6 @@ int ath10k_htc_init(struct ath10k *ar) spin_lock_init(&htc->tx_lock); - htc->stopped = false; ath10k_htc_reset_endpoint_states(htc); /* setup HIF layer callbacks */ diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h index 4716d331e6b6504d712c858345545a410cbcbab1..527179c0edced2f008e6cfa37daf79d3cf94ec0d 100644 --- a/drivers/net/wireless/ath/ath10k/htc.h +++ b/drivers/net/wireless/ath/ath10k/htc.h @@ -214,7 +214,6 @@ struct ath10k_htc_frame { struct ath10k_htc_record trailer[0]; } __packed __aligned(4); - /*******************/ /* Host-side stuff */ /*******************/ @@ -332,7 +331,7 @@ struct ath10k_htc { struct ath10k *ar; struct ath10k_htc_ep endpoint[ATH10K_HTC_EP_COUNT]; - /* protects endpoint and stopped fields */ + /* protects endpoints */ spinlock_t tx_lock; struct ath10k_htc_ops htc_ops; @@ -345,8 +344,6 @@ struct ath10k_htc { int total_transmit_credits; struct ath10k_htc_svc_tx_credits service_tx_alloc[ATH10K_HTC_EP_COUNT]; int target_credit_size; - - bool stopped; }; int ath10k_htc_init(struct ath10k *ar); @@ -357,7 +354,6 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc, struct ath10k_htc_svc_conn_resp *conn_resp); int ath10k_htc_send(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid, struct sk_buff *packet); -void ath10k_htc_stop(struct ath10k_htc *htc); -struct sk_buff *ath10k_htc_alloc_skb(int size); +struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size); #endif diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c index 19c12cc8d66317ebbb8123b4a54b6654c5705c26..56cb4aceb38312315c54de41e8cde657df70cbb2 100644 --- a/drivers/net/wireless/ath/ath10k/htt.c +++ b/drivers/net/wireless/ath/ath10k/htt.c @@ -74,12 +74,14 @@ int ath10k_htt_init(struct ath10k *ar) static int ath10k_htt_verify_version(struct ath10k_htt *htt) { - ath10k_dbg(ATH10K_DBG_BOOT, "htt target version %d.%d\n", + struct ath10k *ar = htt->ar; + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt target version %d.%d\n", htt->target_version_major, htt->target_version_minor); if (htt->target_version_major != 2 && htt->target_version_major != 3) { - ath10k_err("unsupported htt major version %d. supported versions are 2 and 3\n", + ath10k_err(ar, "unsupported htt major version %d. supported versions are 2 and 3\n", htt->target_version_major); return -ENOTSUPP; } @@ -89,6 +91,7 @@ static int ath10k_htt_verify_version(struct ath10k_htt *htt) int ath10k_htt_setup(struct ath10k_htt *htt) { + struct ath10k *ar = htt->ar; int status; init_completion(&htt->target_version_received); @@ -98,9 +101,9 @@ int ath10k_htt_setup(struct ath10k_htt *htt) return status; status = wait_for_completion_timeout(&htt->target_version_received, - HTT_TARGET_VERSION_TIMEOUT_HZ); + HTT_TARGET_VERSION_TIMEOUT_HZ); if (status <= 0) { - ath10k_warn("htt version request timed out\n"); + ath10k_warn(ar, "htt version request timed out\n"); return -ETIMEDOUT; } diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index 6c93f3885ee571097eeb59f59e9d72e772c97ab7..3b44217a6c190b70749182037e051583983b6c48 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -265,7 +265,6 @@ enum htt_mgmt_tx_status { /*=== target -> host messages ===============================================*/ - enum htt_t2h_msg_type { HTT_T2H_MSG_TYPE_VERSION_CONF = 0x0, HTT_T2H_MSG_TYPE_RX_IND = 0x1, @@ -1032,6 +1031,7 @@ static inline struct htt_stats_conf_item *htt_stats_conf_next_item( { return (void *)item + sizeof(*item) + roundup(item->length, 4); } + /* * host -> target FRAG DESCRIPTOR/MSDU_EXT DESC bank * @@ -1148,7 +1148,6 @@ struct htt_resp { }; } __packed; - /*** host side structures follow ***/ struct htt_tx_done { diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 80cdac15588a2352608881acb5fc59fe8808f0ec..60d40a04508b1d8189f41dd88d417d76d68ac50a 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -42,7 +42,6 @@ /* when under memory pressure rx ring refill may fail and needs a retry */ #define HTT_RX_RING_REFILL_RETRY_MS 50 - static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb); static void ath10k_htt_txrx_compl_task(unsigned long ptr); @@ -133,7 +132,7 @@ static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) dma_addr_t paddr; int ret = 0, idx; - idx = __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr)); + idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); while (num > 0) { skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN); if (!skb) { @@ -171,7 +170,7 @@ static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) } fail: - *(htt->rx_ring.alloc_idx.vaddr) = __cpu_to_le32(idx); + *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx); return ret; } @@ -223,6 +222,7 @@ static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt) static void ath10k_htt_rx_ring_refill_retry(unsigned long arg) { struct ath10k_htt *htt = (struct ath10k_htt *)arg; + ath10k_htt_rx_msdu_buff_replenish(htt); } @@ -271,13 +271,14 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt) static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt) { + struct ath10k *ar = htt->ar; int idx; struct sk_buff *msdu; lockdep_assert_held(&htt->rx_ring.lock); if (htt->rx_ring.fill_cnt == 0) { - ath10k_warn("tried to pop sk_buff from an empty rx ring\n"); + ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n"); return NULL; } @@ -311,14 +312,15 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, struct sk_buff **tail_msdu, u32 *attention) { + struct ath10k *ar = htt->ar; int msdu_len, msdu_chaining = 0; - struct sk_buff *msdu; + struct sk_buff *msdu, *next; struct htt_rx_desc *rx_desc; lockdep_assert_held(&htt->rx_ring.lock); if (htt->rx_confused) { - ath10k_warn("htt is confused. refusing rx\n"); + ath10k_warn(ar, "htt is confused. refusing rx\n"); return -1; } @@ -331,7 +333,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, msdu->len + skb_tailroom(msdu), DMA_FROM_DEVICE); - ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx pop: ", + ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx pop: ", msdu->data, msdu->len + skb_tailroom(msdu)); rx_desc = (struct htt_rx_desc *)msdu->data; @@ -354,7 +356,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, ath10k_htt_rx_free_msdu_chain(*head_msdu); *head_msdu = NULL; msdu = NULL; - ath10k_err("htt rx stopped. cannot recover\n"); + ath10k_err(ar, "htt rx stopped. cannot recover\n"); htt->rx_confused = true; break; } @@ -429,7 +431,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, next->len + skb_tailroom(next), DMA_FROM_DEVICE); - ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, + ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx chained: ", next->data, next->len + skb_tailroom(next)); @@ -448,11 +450,11 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, if (last_msdu) { msdu->next = NULL; break; - } else { - struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt); - msdu->next = next; - msdu = next; } + + next = ath10k_htt_rx_netbuf_pop(htt); + msdu->next = next; + msdu = next; } *tail_msdu = msdu; @@ -478,18 +480,21 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, static void ath10k_htt_rx_replenish_task(unsigned long ptr) { struct ath10k_htt *htt = (struct ath10k_htt *)ptr; + ath10k_htt_rx_msdu_buff_replenish(htt); } int ath10k_htt_rx_alloc(struct ath10k_htt *htt) { + struct ath10k *ar = htt->ar; dma_addr_t paddr; void *vaddr; + size_t size; struct timer_list *timer = &htt->rx_ring.refill_retry_timer; htt->rx_ring.size = ath10k_htt_rx_ring_size(htt); if (!is_power_of_2(htt->rx_ring.size)) { - ath10k_warn("htt rx ring size is not power of 2\n"); + ath10k_warn(ar, "htt rx ring size is not power of 2\n"); return -EINVAL; } @@ -512,9 +517,9 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt) if (!htt->rx_ring.netbufs_ring) goto err_netbuf; - vaddr = dma_alloc_coherent(htt->ar->dev, - (htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring)), - &paddr, GFP_DMA); + size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring); + + vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_DMA); if (!vaddr) goto err_dma_ring; @@ -550,7 +555,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt) tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task, (unsigned long)htt); - ath10k_dbg(ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n", + ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n", htt->rx_ring.size, htt->rx_ring.fill_level); return 0; @@ -572,7 +577,8 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt) return -ENOMEM; } -static int ath10k_htt_rx_crypto_param_len(enum htt_rx_mpdu_encrypt_type type) +static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar, + enum htt_rx_mpdu_encrypt_type type) { switch (type) { case HTT_RX_MPDU_ENCRYPT_WEP40: @@ -588,11 +594,12 @@ static int ath10k_htt_rx_crypto_param_len(enum htt_rx_mpdu_encrypt_type type) return 0; } - ath10k_warn("unknown encryption type %d\n", type); + ath10k_warn(ar, "unknown encryption type %d\n", type); return 0; } -static int ath10k_htt_rx_crypto_tail_len(enum htt_rx_mpdu_encrypt_type type) +static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar, + enum htt_rx_mpdu_encrypt_type type) { switch (type) { case HTT_RX_MPDU_ENCRYPT_NONE: @@ -608,7 +615,7 @@ static int ath10k_htt_rx_crypto_tail_len(enum htt_rx_mpdu_encrypt_type type) return 8; } - ath10k_warn("unknown encryption type %d\n", type); + ath10k_warn(ar, "unknown encryption type %d\n", type); return 0; } @@ -620,19 +627,21 @@ static struct ieee80211_hdr *ath10k_htt_rx_skb_get_hdr(struct sk_buff *skb) rxd = (void *)skb->data - sizeof(*rxd); fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), - RX_MSDU_START_INFO1_DECAP_FORMAT); + RX_MSDU_START_INFO1_DECAP_FORMAT); if (fmt == RX_MSDU_DECAP_RAW) return (void *)skb->data; - else - return (void *)skb->data - RX_HTT_HDR_STATUS_LEN; + + return (void *)skb->data - RX_HTT_HDR_STATUS_LEN; } /* This function only applies for first msdu in an msdu chain */ static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr *hdr) { + u8 *qc; + if (ieee80211_is_data_qos(hdr->frame_control)) { - u8 *qc = ieee80211_get_qos_ctl(hdr); + qc = ieee80211_get_qos_ctl(hdr); if (qc[0] & 0x80) return true; } @@ -819,19 +828,55 @@ static bool ath10k_htt_rx_h_channel(struct ath10k *ar, return true; } +static const char * const tid_to_ac[] = { + "BE", + "BK", + "BK", + "BE", + "VI", + "VI", + "VO", + "VO", +}; + +static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size) +{ + u8 *qc; + int tid; + + if (!ieee80211_is_data_qos(hdr->frame_control)) + return ""; + + qc = ieee80211_get_qos_ctl(hdr); + tid = *qc & IEEE80211_QOS_CTL_TID_MASK; + if (tid < 8) + snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]); + else + snprintf(out, size, "tid %d", tid); + + return out; +} + static void ath10k_process_rx(struct ath10k *ar, struct ieee80211_rx_status *rx_status, struct sk_buff *skb) { struct ieee80211_rx_status *status; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + char tid[32]; status = IEEE80211_SKB_RXCB(skb); *status = *rx_status; - ath10k_dbg(ATH10K_DBG_DATA, - "rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %imic-err %i\n", + ath10k_dbg(ar, ATH10K_DBG_DATA, + "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", skb, skb->len, + ieee80211_get_SA(hdr), + ath10k_get_tid(hdr, tid, sizeof(tid)), + is_multicast_ether_addr(ieee80211_get_DA(hdr)) ? + "mcast" : "ucast", + (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4, status->flag == 0 ? "legacy" : "", status->flag & RX_FLAG_HT ? "ht" : "", status->flag & RX_FLAG_VHT ? "vht" : "", @@ -843,8 +888,9 @@ static void ath10k_process_rx(struct ath10k *ar, status->freq, status->band, status->flag, !!(status->flag & RX_FLAG_FAILED_FCS_CRC), - !!(status->flag & RX_FLAG_MMIC_ERROR)); - ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ", + !!(status->flag & RX_FLAG_MMIC_ERROR), + !!(status->flag & RX_FLAG_AMSDU_MORE)); + ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ", skb->data, skb->len); ieee80211_rx(ar->hw, skb); @@ -860,18 +906,19 @@ static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt, struct ieee80211_rx_status *rx_status, struct sk_buff *skb_in) { + struct ath10k *ar = htt->ar; struct htt_rx_desc *rxd; struct sk_buff *skb = skb_in; struct sk_buff *first; enum rx_msdu_decap_format fmt; enum htt_rx_mpdu_encrypt_type enctype; struct ieee80211_hdr *hdr; - u8 hdr_buf[64], addr[ETH_ALEN], *qos; + u8 hdr_buf[64], da[ETH_ALEN], sa[ETH_ALEN], *qos; unsigned int hdr_len; rxd = (void *)skb->data - sizeof(*rxd); enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), - RX_MPDU_START_INFO0_ENCRYPT_TYPE); + RX_MPDU_START_INFO0_ENCRYPT_TYPE); hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status; hdr_len = ieee80211_hdrlen(hdr->frame_control); @@ -893,8 +940,8 @@ static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt, /* First frame in an A-MSDU chain has more decapped data. */ if (skb == first) { len = round_up(ieee80211_hdrlen(hdr->frame_control), 4); - len += round_up(ath10k_htt_rx_crypto_param_len(enctype), - 4); + len += round_up(ath10k_htt_rx_crypto_param_len(ar, + enctype), 4); decap_hdr += len; } @@ -904,10 +951,11 @@ static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt, skb_trim(skb, skb->len - FCS_LEN); break; case RX_MSDU_DECAP_NATIVE_WIFI: - /* pull decapped header and copy DA */ + /* pull decapped header and copy SA & DA */ hdr = (struct ieee80211_hdr *)skb->data; hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr); - memcpy(addr, ieee80211_get_DA(hdr), ETH_ALEN); + ether_addr_copy(da, ieee80211_get_DA(hdr)); + ether_addr_copy(sa, ieee80211_get_SA(hdr)); skb_pull(skb, hdr_len); /* push original 802.11 header */ @@ -921,8 +969,11 @@ static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt, qos = ieee80211_get_qos_ctl(hdr); qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; - /* original 802.11 header has a different DA */ - memcpy(ieee80211_get_DA(hdr), addr, ETH_ALEN); + /* original 802.11 header has a different DA and in + * case of 4addr it may also have different SA + */ + ether_addr_copy(ieee80211_get_DA(hdr), da); + ether_addr_copy(ieee80211_get_SA(hdr), sa); break; case RX_MSDU_DECAP_ETHERNET2_DIX: /* strip ethernet header and insert decapped 802.11 @@ -965,6 +1016,7 @@ static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct ieee80211_rx_status *rx_status, struct sk_buff *skb) { + struct ath10k *ar = htt->ar; struct htt_rx_desc *rxd; struct ieee80211_hdr *hdr; enum rx_msdu_decap_format fmt; @@ -974,16 +1026,16 @@ static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, /* This shouldn't happen. If it does than it may be a FW bug. */ if (skb->next) { - ath10k_warn("htt rx received chained non A-MSDU frame\n"); + ath10k_warn(ar, "htt rx received chained non A-MSDU frame\n"); ath10k_htt_rx_free_msdu_chain(skb->next); skb->next = NULL; } rxd = (void *)skb->data - sizeof(*rxd); fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), - RX_MSDU_START_INFO1_DECAP_FORMAT); + RX_MSDU_START_INFO1_DECAP_FORMAT); enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), - RX_MPDU_START_INFO0_ENCRYPT_TYPE); + RX_MPDU_START_INFO0_ENCRYPT_TYPE); hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status; hdr_len = ieee80211_hdrlen(hdr->frame_control); @@ -1011,7 +1063,8 @@ static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, rfc1042 = hdr; rfc1042 += roundup(hdr_len, 4); - rfc1042 += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4); + rfc1042 += roundup(ath10k_htt_rx_crypto_param_len(ar, + enctype), 4); skb_pull(skb, sizeof(struct ethhdr)); memcpy(skb_push(skb, sizeof(struct rfc1042_hdr)), @@ -1120,27 +1173,29 @@ static bool ath10k_htt_rx_amsdu_allowed(struct ath10k_htt *htt, bool channel_set, u32 attention) { + struct ath10k *ar = htt->ar; + if (head->len == 0) { - ath10k_dbg(ATH10K_DBG_HTT, + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx dropping due to zero-len\n"); return false; } if (attention & RX_ATTENTION_FLAGS_DECRYPT_ERR) { - ath10k_dbg(ATH10K_DBG_HTT, + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx dropping due to decrypt-err\n"); return false; } if (!channel_set) { - ath10k_warn("no channel configured; ignoring frame!\n"); + ath10k_warn(ar, "no channel configured; ignoring frame!\n"); return false; } /* Skip mgmt frames while we handle this in WMI */ if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL || attention & RX_ATTENTION_FLAGS_MGMT_TYPE) { - ath10k_dbg(ATH10K_DBG_HTT, "htt rx mgmt ctrl\n"); + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx mgmt ctrl\n"); return false; } @@ -1148,14 +1203,14 @@ static bool ath10k_htt_rx_amsdu_allowed(struct ath10k_htt *htt, status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR && status != HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER && !htt->ar->monitor_started) { - ath10k_dbg(ATH10K_DBG_HTT, + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx ignoring frame w/ status %d\n", status); return false; } if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) { - ath10k_dbg(ATH10K_DBG_HTT, + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx CAC running\n"); return false; } @@ -1166,6 +1221,7 @@ static bool ath10k_htt_rx_amsdu_allowed(struct ath10k_htt *htt, static void ath10k_htt_rx_handler(struct ath10k_htt *htt, struct htt_rx_indication *rx) { + struct ath10k *ar = htt->ar; struct ieee80211_rx_status *rx_status = &htt->rx_status; struct htt_rx_indication_mpdu_range *mpdu_ranges; struct htt_rx_desc *rxd; @@ -1211,7 +1267,7 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt, rx_status); } - ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ", + ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ", rx, sizeof(*rx) + (sizeof(struct htt_rx_indication_mpdu_range) * num_mpdu_ranges)); @@ -1233,7 +1289,7 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt, &attention); if (ret < 0) { - ath10k_warn("failed to pop amsdu from htt rx ring %d\n", + ath10k_warn(ar, "failed to pop amsdu from htt rx ring %d\n", ret); ath10k_htt_rx_free_msdu_chain(msdu_head); continue; @@ -1280,8 +1336,9 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt, } static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt, - struct htt_rx_fragment_indication *frag) + struct htt_rx_fragment_indication *frag) { + struct ath10k *ar = htt->ar; struct sk_buff *msdu_head, *msdu_tail; enum htt_rx_mpdu_encrypt_type enctype; struct htt_rx_desc *rxd; @@ -1308,10 +1365,10 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt, &attention); spin_unlock_bh(&htt->rx_ring.lock); - ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n"); + ath10k_dbg(ar, ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n"); if (ret) { - ath10k_warn("failed to pop amsdu from httr rx ring for fragmented rx %d\n", + ath10k_warn(ar, "failed to pop amsdu from httr rx ring for fragmented rx %d\n", ret); ath10k_htt_rx_free_msdu_chain(msdu_head); return; @@ -1325,10 +1382,10 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt, tkip_mic_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR); decrypt_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR); fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), - RX_MSDU_START_INFO1_DECAP_FORMAT); + RX_MSDU_START_INFO1_DECAP_FORMAT); if (fmt != RX_MSDU_DECAP_RAW) { - ath10k_warn("we dont support non-raw fragmented rx yet\n"); + ath10k_warn(ar, "we dont support non-raw fragmented rx yet\n"); dev_kfree_skb_any(msdu_head); goto end; } @@ -1340,17 +1397,17 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt, msdu_head->ip_summed = ath10k_htt_rx_get_csum_state(msdu_head); if (tkip_mic_err) - ath10k_warn("tkip mic error\n"); + ath10k_warn(ar, "tkip mic error\n"); if (decrypt_err) { - ath10k_warn("decryption err in fragmented rx\n"); + ath10k_warn(ar, "decryption err in fragmented rx\n"); dev_kfree_skb_any(msdu_head); goto end; } if (enctype != HTT_RX_MPDU_ENCRYPT_NONE) { hdrlen = ieee80211_hdrlen(hdr->frame_control); - paramlen = ath10k_htt_rx_crypto_param_len(enctype); + paramlen = ath10k_htt_rx_crypto_param_len(ar, enctype); /* It is more efficient to move the header than the payload */ memmove((void *)msdu_head->data + paramlen, @@ -1364,7 +1421,7 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt, trim = 4; /* remove crypto trailer */ - trim += ath10k_htt_rx_crypto_tail_len(enctype); + trim += ath10k_htt_rx_crypto_tail_len(ar, enctype); /* last fragment of TKIP frags has MIC */ if (!ieee80211_has_morefrags(hdr->frame_control) && @@ -1372,20 +1429,20 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt, trim += 8; if (trim > msdu_head->len) { - ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n"); + ath10k_warn(ar, "htt rx fragment: trailer longer than the frame itself? drop\n"); dev_kfree_skb_any(msdu_head); goto end; } skb_trim(msdu_head, msdu_head->len - trim); - ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx frag mpdu: ", + ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx frag mpdu: ", msdu_head->data, msdu_head->len); ath10k_process_rx(htt->ar, rx_status, msdu_head); end: if (fw_desc_len > 0) { - ath10k_dbg(ATH10K_DBG_HTT, + ath10k_dbg(ar, ATH10K_DBG_HTT, "expecting more fragmented rx in one indication %d\n", fw_desc_len); } @@ -1415,12 +1472,12 @@ static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar, tx_done.discard = true; break; default: - ath10k_warn("unhandled tx completion status %d\n", status); + ath10k_warn(ar, "unhandled tx completion status %d\n", status); tx_done.discard = true; break; } - ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n", + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n", resp->data_tx_completion.num_msdus); for (i = 0; i < resp->data_tx_completion.num_msdus; i++) { @@ -1441,14 +1498,14 @@ static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp) tid = MS(info0, HTT_RX_BA_INFO0_TID); peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID); - ath10k_dbg(ATH10K_DBG_HTT, + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx addba tid %hu peer_id %hu size %hhu\n", tid, peer_id, ev->window_size); spin_lock_bh(&ar->data_lock); peer = ath10k_peer_find_by_id(ar, peer_id); if (!peer) { - ath10k_warn("received addba event for invalid peer_id: %hu\n", + ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n", peer_id); spin_unlock_bh(&ar->data_lock); return; @@ -1456,13 +1513,13 @@ static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp) arvif = ath10k_get_arvif(ar, peer->vdev_id); if (!arvif) { - ath10k_warn("received addba event for invalid vdev_id: %u\n", + ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n", peer->vdev_id); spin_unlock_bh(&ar->data_lock); return; } - ath10k_dbg(ATH10K_DBG_HTT, + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx start rx ba session sta %pM tid %hu size %hhu\n", peer->addr, tid, ev->window_size); @@ -1481,14 +1538,14 @@ static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp) tid = MS(info0, HTT_RX_BA_INFO0_TID); peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID); - ath10k_dbg(ATH10K_DBG_HTT, + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx delba tid %hu peer_id %hu\n", tid, peer_id); spin_lock_bh(&ar->data_lock); peer = ath10k_peer_find_by_id(ar, peer_id); if (!peer) { - ath10k_warn("received addba event for invalid peer_id: %hu\n", + ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n", peer_id); spin_unlock_bh(&ar->data_lock); return; @@ -1496,13 +1553,13 @@ static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp) arvif = ath10k_get_arvif(ar, peer->vdev_id); if (!arvif) { - ath10k_warn("received addba event for invalid vdev_id: %u\n", + ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n", peer->vdev_id); spin_unlock_bh(&ar->data_lock); return; } - ath10k_dbg(ATH10K_DBG_HTT, + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx stop rx ba session sta %pM tid %hu\n", peer->addr, tid); @@ -1517,9 +1574,9 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) /* confirm alignment */ if (!IS_ALIGNED((unsigned long)skb->data, 4)) - ath10k_warn("unaligned htt message, expect trouble\n"); + ath10k_warn(ar, "unaligned htt message, expect trouble\n"); - ath10k_dbg(ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n", + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n", resp->hdr.msg_type); switch (resp->hdr.msg_type) { case HTT_T2H_MSG_TYPE_VERSION_CONF: { @@ -1583,7 +1640,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) struct ath10k *ar = htt->ar; struct htt_security_indication *ev = &resp->security_indication; - ath10k_dbg(ATH10K_DBG_HTT, + ath10k_dbg(ar, ATH10K_DBG_HTT, "sec ind peer_id %d unicast %d type %d\n", __le16_to_cpu(ev->peer_id), !!(ev->flags & HTT_SECURITY_IS_UNICAST), @@ -1592,7 +1649,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) break; } case HTT_T2H_MSG_TYPE_RX_FRAG_IND: { - ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", + ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", skb->data, skb->len); ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind); break; @@ -1601,7 +1658,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) /* FIX THIS */ break; case HTT_T2H_MSG_TYPE_STATS_CONF: - trace_ath10k_htt_stats(skb->data, skb->len); + trace_ath10k_htt_stats(ar, skb->data, skb->len); break; case HTT_T2H_MSG_TYPE_TX_INSPECT_IND: /* Firmware can return tx frames if it's unable to fully @@ -1609,7 +1666,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) * sends all tx frames as already inspected so this shouldn't * happen unless fw has a bug. */ - ath10k_warn("received an unexpected htt tx inspect event\n"); + ath10k_warn(ar, "received an unexpected htt tx inspect event\n"); break; case HTT_T2H_MSG_TYPE_RX_ADDBA: ath10k_htt_rx_addba(ar, resp); @@ -1624,9 +1681,9 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) break; } default: - ath10k_dbg(ATH10K_DBG_HTT, "htt event (%d) not handled\n", + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt event (%d) not handled\n", resp->hdr.msg_type); - ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", + ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", skb->data, skb->len); break; }; diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index 8b27bfcc1de39b3d3b86dc7881b532064e0a76f7..bd87a35201d895856620719dc5477fbedb3d7062 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -58,6 +58,7 @@ static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt) int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt) { + struct ath10k *ar = htt->ar; int msdu_id; lockdep_assert_held(&htt->tx_lock); @@ -67,24 +68,29 @@ int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt) if (msdu_id == htt->max_num_pending_tx) return -ENOBUFS; - ath10k_dbg(ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", msdu_id); + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", msdu_id); __set_bit(msdu_id, htt->used_msdu_ids); return msdu_id; } void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id) { + struct ath10k *ar = htt->ar; + lockdep_assert_held(&htt->tx_lock); if (!test_bit(msdu_id, htt->used_msdu_ids)) - ath10k_warn("trying to free unallocated msdu_id %d\n", msdu_id); + ath10k_warn(ar, "trying to free unallocated msdu_id %d\n", + msdu_id); - ath10k_dbg(ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id); + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id); __clear_bit(msdu_id, htt->used_msdu_ids); } int ath10k_htt_tx_alloc(struct ath10k_htt *htt) { + struct ath10k *ar = htt->ar; + spin_lock_init(&htt->tx_lock); init_waitqueue_head(&htt->empty_tx_wq); @@ -93,7 +99,7 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt) else htt->max_num_pending_tx = TARGET_NUM_MSDU_DESC; - ath10k_dbg(ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", + ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", htt->max_num_pending_tx); htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) * @@ -122,6 +128,7 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt) static void ath10k_htt_tx_free_pending(struct ath10k_htt *htt) { + struct ath10k *ar = htt->ar; struct htt_tx_done tx_done = {0}; int msdu_id; @@ -130,7 +137,7 @@ static void ath10k_htt_tx_free_pending(struct ath10k_htt *htt) if (!test_bit(msdu_id, htt->used_msdu_ids)) continue; - ath10k_dbg(ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", + ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id); tx_done.discard = 1; @@ -147,7 +154,6 @@ void ath10k_htt_tx_free(struct ath10k_htt *htt) kfree(htt->pending_tx); kfree(htt->used_msdu_ids); dma_pool_destroy(htt->tx_pool); - return; } void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) @@ -157,6 +163,7 @@ void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt) { + struct ath10k *ar = htt->ar; struct sk_buff *skb; struct htt_cmd *cmd; int len = 0; @@ -165,7 +172,7 @@ int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt) len += sizeof(cmd->hdr); len += sizeof(cmd->ver_req); - skb = ath10k_htc_alloc_skb(len); + skb = ath10k_htc_alloc_skb(ar, len); if (!skb) return -ENOMEM; @@ -184,6 +191,7 @@ int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt) int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie) { + struct ath10k *ar = htt->ar; struct htt_stats_req *req; struct sk_buff *skb; struct htt_cmd *cmd; @@ -192,7 +200,7 @@ int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie) len += sizeof(cmd->hdr); len += sizeof(cmd->stats_req); - skb = ath10k_htc_alloc_skb(len); + skb = ath10k_htc_alloc_skb(ar, len); if (!skb) return -ENOMEM; @@ -214,7 +222,8 @@ int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie) ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { - ath10k_warn("failed to send htt type stats request: %d", ret); + ath10k_warn(ar, "failed to send htt type stats request: %d", + ret); dev_kfree_skb_any(skb); return ret; } @@ -224,6 +233,7 @@ int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie) int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt) { + struct ath10k *ar = htt->ar; struct sk_buff *skb; struct htt_cmd *cmd; struct htt_rx_ring_setup_ring *ring; @@ -242,7 +252,7 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt) len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr) + (sizeof(*ring) * num_rx_ring); - skb = ath10k_htc_alloc_skb(len); + skb = ath10k_htc_alloc_skb(ar, len); if (!skb) return -ENOMEM; @@ -311,6 +321,7 @@ int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt, u8 max_subfrms_ampdu, u8 max_subfrms_amsdu) { + struct ath10k *ar = htt->ar; struct htt_aggr_conf *aggr_conf; struct sk_buff *skb; struct htt_cmd *cmd; @@ -328,7 +339,7 @@ int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt, len = sizeof(cmd->hdr); len += sizeof(cmd->aggr_conf); - skb = ath10k_htc_alloc_skb(len); + skb = ath10k_htc_alloc_skb(ar, len); if (!skb) return -ENOMEM; @@ -340,7 +351,7 @@ int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt, aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu; aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu; - ath10k_dbg(ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d", + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d", aggr_conf->max_num_amsdu_subframes, aggr_conf->max_num_ampdu_subframes); @@ -355,7 +366,8 @@ int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt, int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) { - struct device *dev = htt->ar->dev; + struct ath10k *ar = htt->ar; + struct device *dev = ar->dev; struct sk_buff *txdesc = NULL; struct htt_cmd *cmd; struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); @@ -364,7 +376,6 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) int msdu_id = -1; int res; - res = ath10k_htt_tx_inc_pending(htt); if (res) goto err; @@ -382,7 +393,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) htt->pending_tx[msdu_id] = msdu; spin_unlock_bh(&htt->tx_lock); - txdesc = ath10k_htc_alloc_skb(len); + txdesc = ath10k_htc_alloc_skb(ar, len); if (!txdesc) { res = -ENOMEM; goto err_free_msdu_id; @@ -429,7 +440,8 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) { - struct device *dev = htt->ar->dev; + struct ath10k *ar = htt->ar; + struct device *dev = ar->dev; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); struct ath10k_hif_sg_item sg_items[2]; @@ -545,11 +557,11 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr); skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID); - ath10k_dbg(ATH10K_DBG_HTT, + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu\n", flags0, flags1, msdu->len, msdu_id, frags_paddr, (u32)skb_cb->paddr, vdev_id, tid); - ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ", + ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ", msdu->data, msdu->len); sg_items[0].transfer_id = 0; diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 007e855f4ba99f9067725a11b85fdeadb3412483..3cf5702c1e7ef7ee49fa2b6860b08854fc48b53b 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -28,16 +28,21 @@ #define QCA988X_HW_2_0_CHIP_ID_REV 0x2 #define QCA988X_HW_2_0_FW_DIR "ath10k/QCA988X/hw2.0" #define QCA988X_HW_2_0_FW_FILE "firmware.bin" -#define QCA988X_HW_2_0_FW_2_FILE "firmware-2.bin" +#define QCA988X_HW_2_0_FW_3_FILE "firmware-3.bin" #define QCA988X_HW_2_0_OTP_FILE "otp.bin" #define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin" #define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234 #define ATH10K_FW_API2_FILE "firmware-2.bin" +#define ATH10K_FW_API3_FILE "firmware-3.bin" + +#define ATH10K_FW_UTF_FILE "utf.bin" /* includes also the null byte */ #define ATH10K_FIRMWARE_MAGIC "QCA-ATH10K" +#define REG_DUMP_COUNT_QCA988X 60 + struct ath10k_fw_ie { __le32 id; __le32 len; diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 9d61bb157189bed0798170f8f5be5b76ead4387b..46709301a51e1986bb4b505b659d7f94bbfca6d9 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -26,6 +26,7 @@ #include "wmi.h" #include "htt.h" #include "txrx.h" +#include "testmode.h" /**********/ /* Crypto */ @@ -36,6 +37,7 @@ static int ath10k_send_key(struct ath10k_vif *arvif, enum set_key_cmd cmd, const u8 *macaddr) { + struct ath10k *ar = arvif->ar; struct wmi_vdev_install_key_arg arg = { .vdev_id = arvif->vdev_id, .key_idx = key->keyidx, @@ -73,7 +75,7 @@ static int ath10k_send_key(struct ath10k_vif *arvif, arg.key_flags = WMI_KEY_PAIRWISE; break; default: - ath10k_warn("cipher %d is not supported\n", key->cipher); + ath10k_warn(ar, "cipher %d is not supported\n", key->cipher); return -EOPNOTSUPP; } @@ -168,7 +170,7 @@ static int ath10k_clear_peer_keys(struct ath10k_vif *arvif, first_errno = ret; if (ret) - ath10k_warn("failed to remove peer wep key %d: %d\n", + ath10k_warn(ar, "failed to remove peer wep key %d: %d\n", i, ret); peer->keys[i] = NULL; @@ -197,7 +199,7 @@ static int ath10k_clear_vdev_key(struct ath10k_vif *arvif, list_for_each_entry(peer, &ar->peers, list) { for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { if (peer->keys[i] == key) { - memcpy(addr, peer->addr, ETH_ALEN); + ether_addr_copy(addr, peer->addr); peer->keys[i] = NULL; break; } @@ -216,14 +218,13 @@ static int ath10k_clear_vdev_key(struct ath10k_vif *arvif, first_errno = ret; if (ret) - ath10k_warn("failed to remove key for %pM: %d\n", + ath10k_warn(ar, "failed to remove key for %pM: %d\n", addr, ret); } return first_errno; } - /*********************/ /* General utilities */ /*********************/ @@ -327,14 +328,14 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr) ret = ath10k_wmi_peer_create(ar, vdev_id, addr); if (ret) { - ath10k_warn("failed to create wmi peer %pM on vdev %i: %i\n", + ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n", addr, vdev_id, ret); return ret; } ret = ath10k_wait_for_peer_created(ar, vdev_id, addr); if (ret) { - ath10k_warn("failed to wait for created wmi peer %pM on vdev %i: %i\n", + ath10k_warn(ar, "failed to wait for created wmi peer %pM on vdev %i: %i\n", addr, vdev_id, ret); return ret; } @@ -355,7 +356,7 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif) ret = ath10k_wmi_pdev_set_param(ar, param, ATH10K_KICKOUT_THRESHOLD); if (ret) { - ath10k_warn("failed to set kickout threshold on vdev %i: %d\n", + ath10k_warn(ar, "failed to set kickout threshold on vdev %i: %d\n", arvif->vdev_id, ret); return ret; } @@ -364,7 +365,7 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif) ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, ATH10K_KEEPALIVE_MIN_IDLE); if (ret) { - ath10k_warn("failed to set keepalive minimum idle time on vdev %i: %d\n", + ath10k_warn(ar, "failed to set keepalive minimum idle time on vdev %i: %d\n", arvif->vdev_id, ret); return ret; } @@ -373,7 +374,7 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif) ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, ATH10K_KEEPALIVE_MAX_IDLE); if (ret) { - ath10k_warn("failed to set keepalive maximum idle time on vdev %i: %d\n", + ath10k_warn(ar, "failed to set keepalive maximum idle time on vdev %i: %d\n", arvif->vdev_id, ret); return ret; } @@ -382,7 +383,7 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif) ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, ATH10K_KEEPALIVE_MAX_UNRESPONSIVE); if (ret) { - ath10k_warn("failed to set keepalive maximum unresponsive time on vdev %i: %d\n", + ath10k_warn(ar, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n", arvif->vdev_id, ret); return ret; } @@ -449,7 +450,7 @@ static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id) if (peer->vdev_id != vdev_id) continue; - ath10k_warn("removing stale peer %pM from vdev_id %d\n", + ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n", peer->addr, vdev_id); list_del(&peer->list); @@ -492,19 +493,6 @@ static inline int ath10k_vdev_setup_sync(struct ath10k *ar) return 0; } -static bool ath10k_monitor_is_enabled(struct ath10k *ar) -{ - lockdep_assert_held(&ar->conf_mutex); - - ath10k_dbg(ATH10K_DBG_MAC, - "mac monitor refs: promisc %d monitor %d cac %d\n", - ar->promisc, ar->monitor, - test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)); - - return ar->promisc || ar->monitor || - test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); -} - static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id) { struct cfg80211_chan_def *chandef = &ar->chandef; @@ -531,35 +519,35 @@ static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id) ret = ath10k_wmi_vdev_start(ar, &arg); if (ret) { - ath10k_warn("failed to request monitor vdev %i start: %d\n", + ath10k_warn(ar, "failed to request monitor vdev %i start: %d\n", vdev_id, ret); return ret; } ret = ath10k_vdev_setup_sync(ar); if (ret) { - ath10k_warn("failed to synchronize setup for monitor vdev %i: %d\n", + ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i: %d\n", vdev_id, ret); return ret; } ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr); if (ret) { - ath10k_warn("failed to put up monitor vdev %i: %d\n", + ath10k_warn(ar, "failed to put up monitor vdev %i: %d\n", vdev_id, ret); goto vdev_stop; } ar->monitor_vdev_id = vdev_id; - ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %i started\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i started\n", ar->monitor_vdev_id); return 0; vdev_stop: ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); if (ret) - ath10k_warn("failed to stop monitor vdev %i after start failure: %d\n", + ath10k_warn(ar, "failed to stop monitor vdev %i after start failure: %d\n", ar->monitor_vdev_id, ret); return ret; @@ -573,20 +561,20 @@ static int ath10k_monitor_vdev_stop(struct ath10k *ar) ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id); if (ret) - ath10k_warn("failed to put down monitor vdev %i: %d\n", + ath10k_warn(ar, "failed to put down monitor vdev %i: %d\n", ar->monitor_vdev_id, ret); ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); if (ret) - ath10k_warn("failed to to request monitor vdev %i stop: %d\n", + ath10k_warn(ar, "failed to to request monitor vdev %i stop: %d\n", ar->monitor_vdev_id, ret); ret = ath10k_vdev_setup_sync(ar); if (ret) - ath10k_warn("failed to synchronise monitor vdev %i: %d\n", + ath10k_warn(ar, "failed to synchronise monitor vdev %i: %d\n", ar->monitor_vdev_id, ret); - ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n", ar->monitor_vdev_id); return ret; } @@ -597,35 +585,29 @@ static int ath10k_monitor_vdev_create(struct ath10k *ar) lockdep_assert_held(&ar->conf_mutex); - bit = ffs(ar->free_vdev_map); - if (bit == 0) { - ath10k_warn("failed to find free vdev id for monitor vdev\n"); + if (ar->free_vdev_map == 0) { + ath10k_warn(ar, "failed to find free vdev id for monitor vdev\n"); return -ENOMEM; } + bit = ffs(ar->free_vdev_map); + ar->monitor_vdev_id = bit - 1; - ar->free_vdev_map &= ~(1 << ar->monitor_vdev_id); ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id, WMI_VDEV_TYPE_MONITOR, 0, ar->mac_addr); if (ret) { - ath10k_warn("failed to request monitor vdev %i creation: %d\n", + ath10k_warn(ar, "failed to request monitor vdev %i creation: %d\n", ar->monitor_vdev_id, ret); - goto vdev_fail; + return ret; } - ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d created\n", + ar->free_vdev_map &= ~(1 << ar->monitor_vdev_id); + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d created\n", ar->monitor_vdev_id); return 0; - -vdev_fail: - /* - * Restore the ID to the global map. - */ - ar->free_vdev_map |= 1 << (ar->monitor_vdev_id); - return ret; } static int ath10k_monitor_vdev_delete(struct ath10k *ar) @@ -636,14 +618,14 @@ static int ath10k_monitor_vdev_delete(struct ath10k *ar) ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id); if (ret) { - ath10k_warn("failed to request wmi monitor vdev %i removal: %d\n", + ath10k_warn(ar, "failed to request wmi monitor vdev %i removal: %d\n", ar->monitor_vdev_id, ret); return ret; } - ar->free_vdev_map |= 1 << (ar->monitor_vdev_id); + ar->free_vdev_map |= 1 << ar->monitor_vdev_id; - ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n", ar->monitor_vdev_id); return ret; } @@ -654,63 +636,70 @@ static int ath10k_monitor_start(struct ath10k *ar) lockdep_assert_held(&ar->conf_mutex); - if (!ath10k_monitor_is_enabled(ar)) { - ath10k_warn("trying to start monitor with no references\n"); - return 0; - } - - if (ar->monitor_started) { - ath10k_dbg(ATH10K_DBG_MAC, "mac monitor already started\n"); - return 0; - } - ret = ath10k_monitor_vdev_create(ar); if (ret) { - ath10k_warn("failed to create monitor vdev: %d\n", ret); + ath10k_warn(ar, "failed to create monitor vdev: %d\n", ret); return ret; } ret = ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id); if (ret) { - ath10k_warn("failed to start monitor vdev: %d\n", ret); + ath10k_warn(ar, "failed to start monitor vdev: %d\n", ret); ath10k_monitor_vdev_delete(ar); return ret; } ar->monitor_started = true; - ath10k_dbg(ATH10K_DBG_MAC, "mac monitor started\n"); + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor started\n"); return 0; } -static void ath10k_monitor_stop(struct ath10k *ar) +static int ath10k_monitor_stop(struct ath10k *ar) { int ret; lockdep_assert_held(&ar->conf_mutex); - if (ath10k_monitor_is_enabled(ar)) { - ath10k_dbg(ATH10K_DBG_MAC, - "mac monitor will be stopped later\n"); - return; + ret = ath10k_monitor_vdev_stop(ar); + if (ret) { + ath10k_warn(ar, "failed to stop monitor vdev: %d\n", ret); + return ret; } - if (!ar->monitor_started) { - ath10k_dbg(ATH10K_DBG_MAC, - "mac monitor probably failed to start earlier\n"); - return; + ret = ath10k_monitor_vdev_delete(ar); + if (ret) { + ath10k_warn(ar, "failed to delete monitor vdev: %d\n", ret); + return ret; } - ret = ath10k_monitor_vdev_stop(ar); - if (ret) - ath10k_warn("failed to stop monitor vdev: %d\n", ret); + ar->monitor_started = false; + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopped\n"); - ret = ath10k_monitor_vdev_delete(ar); - if (ret) - ath10k_warn("failed to delete monitor vdev: %d\n", ret); + return 0; +} - ar->monitor_started = false; - ath10k_dbg(ATH10K_DBG_MAC, "mac monitor stopped\n"); +static int ath10k_monitor_recalc(struct ath10k *ar) +{ + bool should_start; + + lockdep_assert_held(&ar->conf_mutex); + + should_start = ar->monitor || + ar->filter_flags & FIF_PROMISC_IN_BSS || + test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); + + ath10k_dbg(ar, ATH10K_DBG_MAC, + "mac monitor recalc started? %d should? %d\n", + ar->monitor_started, should_start); + + if (should_start == ar->monitor_started) + return 0; + + if (should_start) + return ath10k_monitor_start(ar); + + return ath10k_monitor_stop(ar); } static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif) @@ -741,14 +730,14 @@ static int ath10k_start_cac(struct ath10k *ar) set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); - ret = ath10k_monitor_start(ar); + ret = ath10k_monitor_recalc(ar); if (ret) { - ath10k_warn("failed to start monitor (cac): %d\n", ret); + ath10k_warn(ar, "failed to start monitor (cac): %d\n", ret); clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); return ret; } - ath10k_dbg(ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n", ar->monitor_vdev_id); return 0; @@ -765,7 +754,7 @@ static int ath10k_stop_cac(struct ath10k *ar) clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); ath10k_monitor_stop(ar); - ath10k_dbg(ATH10K_DBG_MAC, "mac cac finished\n"); + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac finished\n"); return 0; } @@ -791,12 +780,12 @@ static void ath10k_recalc_radar_detection(struct ath10k *ar) * radiation is not allowed, make this channel DFS_UNAVAILABLE * by indicating that radar was detected. */ - ath10k_warn("failed to start CAC: %d\n", ret); + ath10k_warn(ar, "failed to start CAC: %d\n", ret); ieee80211_radar_detected(ar->hw); } } -static int ath10k_vdev_start(struct ath10k_vif *arvif) +static int ath10k_vdev_start_restart(struct ath10k_vif *arvif, bool restart) { struct ath10k *ar = arvif->ar; struct cfg80211_chan_def *chandef = &ar->chandef; @@ -833,21 +822,25 @@ static int ath10k_vdev_start(struct ath10k_vif *arvif) arg.ssid_len = arvif->vif->bss_conf.ssid_len; } - ath10k_dbg(ATH10K_DBG_MAC, + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d start center_freq %d phymode %s\n", arg.vdev_id, arg.channel.freq, ath10k_wmi_phymode_str(arg.channel.mode)); - ret = ath10k_wmi_vdev_start(ar, &arg); + if (restart) + ret = ath10k_wmi_vdev_restart(ar, &arg); + else + ret = ath10k_wmi_vdev_start(ar, &arg); + if (ret) { - ath10k_warn("failed to start WMI vdev %i: %d\n", + ath10k_warn(ar, "failed to start WMI vdev %i: %d\n", arg.vdev_id, ret); return ret; } ret = ath10k_vdev_setup_sync(ar); if (ret) { - ath10k_warn("failed to synchronise setup for vdev %i: %d\n", + ath10k_warn(ar, "failed to synchronise setup for vdev %i: %d\n", arg.vdev_id, ret); return ret; } @@ -858,6 +851,16 @@ static int ath10k_vdev_start(struct ath10k_vif *arvif) return ret; } +static int ath10k_vdev_start(struct ath10k_vif *arvif) +{ + return ath10k_vdev_start_restart(arvif, false); +} + +static int ath10k_vdev_restart(struct ath10k_vif *arvif) +{ + return ath10k_vdev_start_restart(arvif, true); +} + static int ath10k_vdev_stop(struct ath10k_vif *arvif) { struct ath10k *ar = arvif->ar; @@ -869,14 +872,14 @@ static int ath10k_vdev_stop(struct ath10k_vif *arvif) ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id); if (ret) { - ath10k_warn("failed to stop WMI vdev %i: %d\n", + ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n", arvif->vdev_id, ret); return ret; } ret = ath10k_vdev_setup_sync(ar); if (ret) { - ath10k_warn("failed to syncronise setup for vdev %i: %d\n", + ath10k_warn(ar, "failed to syncronise setup for vdev %i: %d\n", arvif->vdev_id, ret); return ret; } @@ -892,8 +895,9 @@ static int ath10k_vdev_stop(struct ath10k_vif *arvif) } static void ath10k_control_beaconing(struct ath10k_vif *arvif, - struct ieee80211_bss_conf *info) + struct ieee80211_bss_conf *info) { + struct ath10k *ar = arvif->ar; int ret = 0; lockdep_assert_held(&arvif->ar->conf_mutex); @@ -926,12 +930,12 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif, return; arvif->aid = 0; - memcpy(arvif->bssid, info->bssid, ETH_ALEN); + ether_addr_copy(arvif->bssid, info->bssid); ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, arvif->bssid); if (ret) { - ath10k_warn("failed to bring up vdev %d: %i\n", + ath10k_warn(ar, "failed to bring up vdev %d: %i\n", arvif->vdev_id, ret); ath10k_vdev_stop(arvif); return; @@ -940,13 +944,14 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif, arvif->is_started = true; arvif->is_up = true; - ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id); + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id); } static void ath10k_control_ibss(struct ath10k_vif *arvif, struct ieee80211_bss_conf *info, const u8 self_peer[ETH_ALEN]) { + struct ath10k *ar = arvif->ar; u32 vdev_param; int ret = 0; @@ -955,7 +960,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif, if (!info->ibss_joined) { ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, self_peer); if (ret) - ath10k_warn("failed to delete IBSS self peer %pM for vdev %d: %d\n", + ath10k_warn(ar, "failed to delete IBSS self peer %pM for vdev %d: %d\n", self_peer, arvif->vdev_id, ret); if (is_zero_ether_addr(arvif->bssid)) @@ -964,7 +969,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif, ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, arvif->bssid); if (ret) { - ath10k_warn("failed to delete IBSS BSSID peer %pM for vdev %d: %d\n", + ath10k_warn(ar, "failed to delete IBSS BSSID peer %pM for vdev %d: %d\n", arvif->bssid, arvif->vdev_id, ret); return; } @@ -976,7 +981,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif, ret = ath10k_peer_create(arvif->ar, arvif->vdev_id, self_peer); if (ret) { - ath10k_warn("failed to create IBSS self peer %pM for vdev %d: %d\n", + ath10k_warn(ar, "failed to create IBSS self peer %pM for vdev %d: %d\n", self_peer, arvif->vdev_id, ret); return; } @@ -985,7 +990,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif, ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param, ATH10K_DEFAULT_ATIM); if (ret) - ath10k_warn("failed to set IBSS ATIM for vdev %d: %d\n", + ath10k_warn(ar, "failed to set IBSS ATIM for vdev %d: %d\n", arvif->vdev_id, ret); } @@ -1012,7 +1017,7 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif) ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, conf->dynamic_ps_timeout); if (ret) { - ath10k_warn("failed to set inactivity time for vdev %d: %i\n", + ath10k_warn(ar, "failed to set inactivity time for vdev %d: %i\n", arvif->vdev_id, ret); return ret; } @@ -1020,12 +1025,12 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif) psmode = WMI_STA_PS_MODE_DISABLED; } - ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d psmode %s\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d psmode %s\n", arvif->vdev_id, psmode ? "enable" : "disable"); ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode); if (ret) { - ath10k_warn("failed to set PS Mode %d for vdev %d: %d\n", + ath10k_warn(ar, "failed to set PS Mode %d for vdev %d: %d\n", psmode, arvif->vdev_id, ret); return ret; } @@ -1045,7 +1050,7 @@ static void ath10k_peer_assoc_h_basic(struct ath10k *ar, { lockdep_assert_held(&ar->conf_mutex); - memcpy(arg->addr, sta->addr, ETH_ALEN); + ether_addr_copy(arg->addr, sta->addr); arg->vdev_id = arvif->vdev_id; arg->peer_aid = sta->aid; arg->peer_flags |= WMI_PEER_AUTH; @@ -1100,21 +1105,21 @@ static void ath10k_peer_assoc_h_crypto(struct ath10k *ar, ies = rcu_dereference(bss->ies); wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, - WLAN_OUI_TYPE_MICROSOFT_WPA, - ies->data, - ies->len); + WLAN_OUI_TYPE_MICROSOFT_WPA, + ies->data, + ies->len); rcu_read_unlock(); cfg80211_put_bss(ar->hw->wiphy, bss); } /* FIXME: base on RSN IE/WPA IE is a correct idea? */ if (rsnie || wpaie) { - ath10k_dbg(ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__); + ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__); arg->peer_flags |= WMI_PEER_NEED_PTK_4_WAY; } if (wpaie) { - ath10k_dbg(ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__); + ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__); arg->peer_flags |= WMI_PEER_NEED_GTK_2_WAY; } } @@ -1152,6 +1157,7 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar, { const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; int i, n; + u32 stbc; lockdep_assert_held(&ar->conf_mutex); @@ -1188,7 +1194,6 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar, } if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) { - u32 stbc; stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC; stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT; stbc = stbc << WMI_RC_RX_STBC_FLAG_S; @@ -1223,7 +1228,7 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar, arg->peer_num_spatial_streams = sta->rx_nss; } - ath10k_dbg(ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n", arg->addr, arg->peer_ht_rates.num_rates, arg->peer_num_spatial_streams); @@ -1240,7 +1245,7 @@ static int ath10k_peer_assoc_qos_ap(struct ath10k *ar, lockdep_assert_held(&ar->conf_mutex); if (sta->wme && sta->uapsd_queues) { - ath10k_dbg(ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n", sta->uapsd_queues, sta->max_sp); if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) @@ -1256,7 +1261,6 @@ static int ath10k_peer_assoc_qos_ap(struct ath10k *ar, uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN | WMI_AP_PS_UAPSD_AC0_TRIGGER_EN; - if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP) max_sp = sta->max_sp; @@ -1265,7 +1269,7 @@ static int ath10k_peer_assoc_qos_ap(struct ath10k *ar, WMI_AP_PS_PEER_PARAM_UAPSD, uapsd); if (ret) { - ath10k_warn("failed to set ap ps peer param uapsd for vdev %i: %d\n", + ath10k_warn(ar, "failed to set ap ps peer param uapsd for vdev %i: %d\n", arvif->vdev_id, ret); return ret; } @@ -1275,7 +1279,7 @@ static int ath10k_peer_assoc_qos_ap(struct ath10k *ar, WMI_AP_PS_PEER_PARAM_MAX_SP, max_sp); if (ret) { - ath10k_warn("failed to set ap ps peer param max sp for vdev %i: %d\n", + ath10k_warn(ar, "failed to set ap ps peer param max sp for vdev %i: %d\n", arvif->vdev_id, ret); return ret; } @@ -1285,9 +1289,10 @@ static int ath10k_peer_assoc_qos_ap(struct ath10k *ar, sta->listen_interval - mac80211 patch required. Currently use 10 seconds */ ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr, - WMI_AP_PS_PEER_PARAM_AGEOUT_TIME, 10); + WMI_AP_PS_PEER_PARAM_AGEOUT_TIME, + 10); if (ret) { - ath10k_warn("failed to set ap ps peer param ageout time for vdev %i: %d\n", + ath10k_warn(ar, "failed to set ap ps peer param ageout time for vdev %i: %d\n", arvif->vdev_id, ret); return ret; } @@ -1309,7 +1314,6 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar, arg->peer_flags |= WMI_PEER_VHT; arg->peer_vht_caps = vht_cap->cap; - ampdu_factor = (vht_cap->cap & IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >> IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; @@ -1334,7 +1338,7 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar, arg->peer_vht_rates.tx_mcs_set = __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map); - ath10k_dbg(ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n", sta->addr, arg->peer_max_mpdu, arg->peer_flags); } @@ -1407,7 +1411,7 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar, break; } - ath10k_dbg(ATH10K_DBG_MAC, "mac peer %pM phymode %s\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM phymode %s\n", sta->addr, ath10k_wmi_phymode_str(phymode)); arg->peer_phymode = phymode; @@ -1480,7 +1484,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw, ap_sta = ieee80211_find_sta(vif, bss_conf->bssid); if (!ap_sta) { - ath10k_warn("failed to find station entry for bss %pM vdev %i\n", + ath10k_warn(ar, "failed to find station entry for bss %pM vdev %i\n", bss_conf->bssid, arvif->vdev_id); rcu_read_unlock(); return; @@ -1493,7 +1497,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw, ret = ath10k_peer_assoc_prepare(ar, arvif, ap_sta, bss_conf, &peer_arg); if (ret) { - ath10k_warn("failed to prepare peer assoc for %pM vdev %i: %d\n", + ath10k_warn(ar, "failed to prepare peer assoc for %pM vdev %i: %d\n", bss_conf->bssid, arvif->vdev_id, ret); rcu_read_unlock(); return; @@ -1503,28 +1507,28 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw, ret = ath10k_wmi_peer_assoc(ar, &peer_arg); if (ret) { - ath10k_warn("failed to run peer assoc for %pM vdev %i: %d\n", + ath10k_warn(ar, "failed to run peer assoc for %pM vdev %i: %d\n", bss_conf->bssid, arvif->vdev_id, ret); return; } ret = ath10k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ht_cap); if (ret) { - ath10k_warn("failed to setup peer SMPS for vdev %i: %d\n", + ath10k_warn(ar, "failed to setup peer SMPS for vdev %i: %d\n", arvif->vdev_id, ret); return; } - ath10k_dbg(ATH10K_DBG_MAC, + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d up (associated) bssid %pM aid %d\n", arvif->vdev_id, bss_conf->bssid, bss_conf->aid); arvif->aid = bss_conf->aid; - memcpy(arvif->bssid, bss_conf->bssid, ETH_ALEN); + ether_addr_copy(arvif->bssid, bss_conf->bssid); ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid); if (ret) { - ath10k_warn("failed to set vdev %d up: %d\n", + ath10k_warn(ar, "failed to set vdev %d up: %d\n", arvif->vdev_id, ret); return; } @@ -1550,7 +1554,7 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw, * No idea why this happens, even though VDEV-DOWN is supposed * to be analogous to link down, so just stop the VDEV. */ - ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d stop (disassociated\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d stop (disassociated\n", arvif->vdev_id); /* FIXME: check return value */ @@ -1563,7 +1567,7 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw, * interfaces as it expects there is no rx when no interface is * running. */ - ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d down\n", arvif->vdev_id); + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d down\n", arvif->vdev_id); /* FIXME: why don't we print error if wmi call fails? */ ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); @@ -1584,7 +1588,7 @@ static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif, ret = ath10k_peer_assoc_prepare(ar, arvif, sta, NULL, &peer_arg); if (ret) { - ath10k_warn("failed to prepare WMI peer assoc for %pM vdev %i: %i\n", + ath10k_warn(ar, "failed to prepare WMI peer assoc for %pM vdev %i: %i\n", sta->addr, arvif->vdev_id, ret); return ret; } @@ -1592,23 +1596,23 @@ static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif, peer_arg.peer_reassoc = reassoc; ret = ath10k_wmi_peer_assoc(ar, &peer_arg); if (ret) { - ath10k_warn("failed to run peer assoc for STA %pM vdev %i: %d\n", + ath10k_warn(ar, "failed to run peer assoc for STA %pM vdev %i: %d\n", sta->addr, arvif->vdev_id, ret); return ret; } ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, &sta->ht_cap); if (ret) { - ath10k_warn("failed to setup peer SMPS for vdev %d: %d\n", + ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n", arvif->vdev_id, ret); return ret; } - if (!sta->wme) { + if (!sta->wme && !reassoc) { arvif->num_legacy_stations++; ret = ath10k_recalc_rtscts_prot(arvif); if (ret) { - ath10k_warn("failed to recalculate rts/cts prot for vdev %d: %d\n", + ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", arvif->vdev_id, ret); return ret; } @@ -1616,14 +1620,14 @@ static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif, ret = ath10k_install_peer_wep_keys(arvif, sta->addr); if (ret) { - ath10k_warn("failed to install peer wep keys for vdev %i: %d\n", + ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n", arvif->vdev_id, ret); return ret; } ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta); if (ret) { - ath10k_warn("failed to set qos params for STA %pM for vdev %i: %d\n", + ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n", sta->addr, arvif->vdev_id, ret); return ret; } @@ -1642,7 +1646,7 @@ static int ath10k_station_disassoc(struct ath10k *ar, struct ath10k_vif *arvif, arvif->num_legacy_stations--; ret = ath10k_recalc_rtscts_prot(arvif); if (ret) { - ath10k_warn("failed to recalculate rts/cts prot for vdev %d: %d\n", + ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", arvif->vdev_id, ret); return ret; } @@ -1650,7 +1654,7 @@ static int ath10k_station_disassoc(struct ath10k *ar, struct ath10k_vif *arvif, ret = ath10k_clear_peer_keys(arvif, sta->addr); if (ret) { - ath10k_warn("failed to clear all peer wep keys for vdev %i: %d\n", + ath10k_warn(ar, "failed to clear all peer wep keys for vdev %i: %d\n", arvif->vdev_id, ret); return ret; } @@ -1742,7 +1746,7 @@ static int ath10k_update_channel_list(struct ath10k *ar) if (WARN_ON_ONCE(ch->mode == MODE_UNKNOWN)) continue; - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n", ch - arg.channels, arg.n_channels, ch->freq, ch->max_power, ch->max_reg_power, @@ -1785,7 +1789,7 @@ static void ath10k_regd_update(struct ath10k *ar) ret = ath10k_update_channel_list(ar); if (ret) - ath10k_warn("failed to update channel list: %d\n", ret); + ath10k_warn(ar, "failed to update channel list: %d\n", ret); regpair = ar->ath_common.regulatory.regpair; @@ -1806,7 +1810,7 @@ static void ath10k_regd_update(struct ath10k *ar) regpair->reg_5ghz_ctl, wmi_dfs_reg); if (ret) - ath10k_warn("failed to set pdev regdomain: %d\n", ret); + ath10k_warn(ar, "failed to set pdev regdomain: %d\n", ret); } static void ath10k_reg_notifier(struct wiphy *wiphy, @@ -1819,12 +1823,12 @@ static void ath10k_reg_notifier(struct wiphy *wiphy, ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory); if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) { - ath10k_dbg(ATH10K_DBG_REGULATORY, "dfs region 0x%x\n", + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n", request->dfs_region); result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector, request->dfs_region); if (!result) - ath10k_warn("DFS region 0x%X not supported, will trigger radar for every pulse\n", + ath10k_warn(ar, "DFS region 0x%X not supported, will trigger radar for every pulse\n", request->dfs_region); } @@ -1852,16 +1856,15 @@ static u8 ath10k_tx_h_get_tid(struct ieee80211_hdr *hdr) return ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK; } -static u8 ath10k_tx_h_get_vdev_id(struct ath10k *ar, - struct ieee80211_tx_info *info) +static u8 ath10k_tx_h_get_vdev_id(struct ath10k *ar, struct ieee80211_vif *vif) { - if (info->control.vif) - return ath10k_vif_to_arvif(info->control.vif)->vdev_id; + if (vif) + return ath10k_vif_to_arvif(vif)->vdev_id; if (ar->monitor_started) return ar->monitor_vdev_id; - ath10k_warn("failed to resolve vdev id\n"); + ath10k_warn(ar, "failed to resolve vdev id\n"); return 0; } @@ -1897,6 +1900,7 @@ static void ath10k_tx_wep_key_work(struct work_struct *work) { struct ath10k_vif *arvif = container_of(work, struct ath10k_vif, wep_key_work); + struct ath10k *ar = arvif->ar; int ret, keyidx = arvif->def_wep_key_newidx; mutex_lock(&arvif->ar->conf_mutex); @@ -1907,7 +1911,7 @@ static void ath10k_tx_wep_key_work(struct work_struct *work) if (arvif->def_wep_key_idx == keyidx) goto unlock; - ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n", arvif->vdev_id, keyidx); ret = ath10k_wmi_vdev_set_param(arvif->ar, @@ -1915,7 +1919,7 @@ static void ath10k_tx_wep_key_work(struct work_struct *work) arvif->ar->wmi.vdev_param->def_keyid, keyidx); if (ret) { - ath10k_warn("failed to update wep key index for vdev %d: %d\n", + ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n", arvif->vdev_id, ret); goto unlock; @@ -1995,7 +1999,7 @@ static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb) ar->fw_features)) { if (skb_queue_len(&ar->wmi_mgmt_tx_queue) >= ATH10K_MAX_NUM_MGMT_PENDING) { - ath10k_warn("reached WMI management tranmist queue limit\n"); + ath10k_warn(ar, "reached WMI management transmit queue limit\n"); ret = -EBUSY; goto exit; } @@ -2019,7 +2023,8 @@ static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb) exit: if (ret) { - ath10k_warn("failed to transmit packet, dropping: %d\n", ret); + ath10k_warn(ar, "failed to transmit packet, dropping: %d\n", + ret); ieee80211_free_txskb(ar->hw, skb); } } @@ -2061,7 +2066,7 @@ void ath10k_offchan_tx_work(struct work_struct *work) mutex_lock(&ar->conf_mutex); - ath10k_dbg(ATH10K_DBG_MAC, "mac offchannel skb %p\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %p\n", skb); hdr = (struct ieee80211_hdr *)skb->data; @@ -2074,13 +2079,13 @@ void ath10k_offchan_tx_work(struct work_struct *work) if (peer) /* FIXME: should this use ath10k_warn()? */ - ath10k_dbg(ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n", peer_addr, vdev_id); if (!peer) { ret = ath10k_peer_create(ar, vdev_id, peer_addr); if (ret) - ath10k_warn("failed to create peer %pM on vdev %d: %d\n", + ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n", peer_addr, vdev_id, ret); } @@ -2094,13 +2099,13 @@ void ath10k_offchan_tx_work(struct work_struct *work) ret = wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ); if (ret <= 0) - ath10k_warn("timed out waiting for offchannel skb %p\n", + ath10k_warn(ar, "timed out waiting for offchannel skb %p\n", skb); if (!peer) { ret = ath10k_peer_delete(ar, vdev_id, peer_addr); if (ret) - ath10k_warn("failed to delete peer %pM on vdev %d: %d\n", + ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n", peer_addr, vdev_id, ret); } @@ -2134,7 +2139,7 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work) ret = ath10k_wmi_mgmt_tx(ar, skb); if (ret) { - ath10k_warn("failed to transmit management frame via WMI: %d\n", + ath10k_warn(ar, "failed to transmit management frame via WMI: %d\n", ret); ieee80211_free_txskb(ar->hw, skb); } @@ -2145,34 +2150,40 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work) /* Scanning */ /************/ -/* - * This gets called if we dont get a heart-beat during scan. - * This may indicate the FW has hung and we need to abort the - * scan manually to prevent cancel_hw_scan() from deadlocking - */ -void ath10k_reset_scan(unsigned long ptr) +void __ath10k_scan_finish(struct ath10k *ar) { - struct ath10k *ar = (struct ath10k *)ptr; + lockdep_assert_held(&ar->data_lock); - spin_lock_bh(&ar->data_lock); - if (!ar->scan.in_progress) { - spin_unlock_bh(&ar->data_lock); - return; + switch (ar->scan.state) { + case ATH10K_SCAN_IDLE: + break; + case ATH10K_SCAN_RUNNING: + case ATH10K_SCAN_ABORTING: + if (ar->scan.is_roc) + ieee80211_remain_on_channel_expired(ar->hw); + else + ieee80211_scan_completed(ar->hw, + (ar->scan.state == + ATH10K_SCAN_ABORTING)); + /* fall through */ + case ATH10K_SCAN_STARTING: + ar->scan.state = ATH10K_SCAN_IDLE; + ar->scan_channel = NULL; + ath10k_offchan_tx_purge(ar); + cancel_delayed_work(&ar->scan.timeout); + complete_all(&ar->scan.completed); + break; } +} - ath10k_warn("scan timed out, firmware problem?\n"); - - if (ar->scan.is_roc) - ieee80211_remain_on_channel_expired(ar->hw); - else - ieee80211_scan_completed(ar->hw, 1 /* aborted */); - - ar->scan.in_progress = false; - complete_all(&ar->scan.completed); +void ath10k_scan_finish(struct ath10k *ar) +{ + spin_lock_bh(&ar->data_lock); + __ath10k_scan_finish(ar); spin_unlock_bh(&ar->data_lock); } -static int ath10k_abort_scan(struct ath10k *ar) +static int ath10k_scan_stop(struct ath10k *ar) { struct wmi_stop_scan_arg arg = { .req_id = 1, /* FIXME */ @@ -2183,47 +2194,79 @@ static int ath10k_abort_scan(struct ath10k *ar) lockdep_assert_held(&ar->conf_mutex); - del_timer_sync(&ar->scan.timeout); + ret = ath10k_wmi_stop_scan(ar, &arg); + if (ret) { + ath10k_warn(ar, "failed to stop wmi scan: %d\n", ret); + goto out; + } - spin_lock_bh(&ar->data_lock); - if (!ar->scan.in_progress) { - spin_unlock_bh(&ar->data_lock); - return 0; + ret = wait_for_completion_timeout(&ar->scan.completed, 3*HZ); + if (ret == 0) { + ath10k_warn(ar, "failed to receive scan abortion completion: timed out\n"); + ret = -ETIMEDOUT; + } else if (ret > 0) { + ret = 0; } - ar->scan.aborting = true; +out: + /* Scan state should be updated upon scan completion but in case + * firmware fails to deliver the event (for whatever reason) it is + * desired to clean up scan state anyway. Firmware may have just + * dropped the scan completion event delivery due to transport pipe + * being overflown with data and/or it can recover on its own before + * next scan request is submitted. + */ + spin_lock_bh(&ar->data_lock); + if (ar->scan.state != ATH10K_SCAN_IDLE) + __ath10k_scan_finish(ar); spin_unlock_bh(&ar->data_lock); - ret = ath10k_wmi_stop_scan(ar, &arg); - if (ret) { - ath10k_warn("failed to stop wmi scan: %d\n", ret); - spin_lock_bh(&ar->data_lock); - ar->scan.in_progress = false; - ath10k_offchan_tx_purge(ar); - spin_unlock_bh(&ar->data_lock); - return -EIO; - } + return ret; +} - ret = wait_for_completion_timeout(&ar->scan.completed, 3*HZ); - if (ret == 0) - ath10k_warn("timed out while waiting for scan to stop\n"); +static void ath10k_scan_abort(struct ath10k *ar) +{ + int ret; - /* scan completion may be done right after we timeout here, so let's - * check the in_progress and tell mac80211 scan is completed. if we - * don't do that and FW fails to send us scan completion indication - * then userspace won't be able to scan anymore */ - ret = 0; + lockdep_assert_held(&ar->conf_mutex); spin_lock_bh(&ar->data_lock); - if (ar->scan.in_progress) { - ath10k_warn("failed to stop scan, it's still in progress\n"); - ar->scan.in_progress = false; - ath10k_offchan_tx_purge(ar); - ret = -ETIMEDOUT; + + switch (ar->scan.state) { + case ATH10K_SCAN_IDLE: + /* This can happen if timeout worker kicked in and called + * abortion while scan completion was being processed. + */ + break; + case ATH10K_SCAN_STARTING: + case ATH10K_SCAN_ABORTING: + ath10k_warn(ar, "refusing scan abortion due to invalid scan state: %s (%d)\n", + ath10k_scan_state_str(ar->scan.state), + ar->scan.state); + break; + case ATH10K_SCAN_RUNNING: + ar->scan.state = ATH10K_SCAN_ABORTING; + spin_unlock_bh(&ar->data_lock); + + ret = ath10k_scan_stop(ar); + if (ret) + ath10k_warn(ar, "failed to abort scan: %d\n", ret); + + spin_lock_bh(&ar->data_lock); + break; } + spin_unlock_bh(&ar->data_lock); +} - return ret; +void ath10k_scan_timeout_work(struct work_struct *work) +{ + struct ath10k *ar = container_of(work, struct ath10k, + scan.timeout.work); + + mutex_lock(&ar->conf_mutex); + ath10k_scan_abort(ar); + mutex_unlock(&ar->conf_mutex); } static int ath10k_start_scan(struct ath10k *ar, @@ -2239,17 +2282,16 @@ static int ath10k_start_scan(struct ath10k *ar, ret = wait_for_completion_timeout(&ar->scan.started, 1*HZ); if (ret == 0) { - ath10k_abort_scan(ar); - return ret; + ret = ath10k_scan_stop(ar); + if (ret) + ath10k_warn(ar, "failed to stop scan: %d\n", ret); + + return -ETIMEDOUT; } - /* the scan can complete earlier, before we even - * start the timer. in that case the timer handler - * checks ar->scan.in_progress and bails out if its - * false. Add a 200ms margin to account event/command - * processing. */ - mod_timer(&ar->scan.timeout, jiffies + - msecs_to_jiffies(arg->max_scan_time+200)); + /* Add a 200ms margin to account for event/command processing */ + ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout, + msecs_to_jiffies(arg->max_scan_time+200)); return 0; } @@ -2269,11 +2311,11 @@ static void ath10k_tx(struct ieee80211_hw *hw, /* We should disable CCK RATE due to P2P */ if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE) - ath10k_dbg(ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n"); + ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n"); ATH10K_SKB_CB(skb)->htt.is_offchan = false; ATH10K_SKB_CB(skb)->htt.tid = ath10k_tx_h_get_tid(hdr); - ATH10K_SKB_CB(skb)->vdev_id = ath10k_tx_h_get_vdev_id(ar, info); + ATH10K_SKB_CB(skb)->vdev_id = ath10k_tx_h_get_vdev_id(ar, vif); /* it makes no sense to process injected frames like that */ if (vif && vif->type != NL80211_IFTYPE_MONITOR) { @@ -2289,7 +2331,8 @@ static void ath10k_tx(struct ieee80211_hw *hw, ATH10K_SKB_CB(skb)->vdev_id = ar->scan.vdev_id; spin_unlock_bh(&ar->data_lock); - ath10k_dbg(ATH10K_DBG_MAC, "queued offchannel skb %p\n", skb); + ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n", + skb); skb_queue_tail(&ar->offchan_tx_queue, skb); ieee80211_queue_work(hw, &ar->offchan_tx_work); @@ -2318,15 +2361,16 @@ void ath10k_halt(struct ath10k *ar) lockdep_assert_held(&ar->conf_mutex); - if (ath10k_monitor_is_enabled(ar)) { - clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); - ar->promisc = false; - ar->monitor = false; + clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); + ar->filter_flags = 0; + ar->monitor = false; + + if (ar->monitor_started) ath10k_monitor_stop(ar); - } - del_timer_sync(&ar->scan.timeout); - ath10k_reset_scan((unsigned long)ar); + ar->monitor_started = false; + + ath10k_scan_finish(ar); ath10k_peer_cleanup_all(ar); ath10k_core_stop(ar); ath10k_hif_power_down(ar); @@ -2380,7 +2424,7 @@ static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant) ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->tx_chain_mask, tx_ant); if (ret) { - ath10k_warn("failed to set tx-chainmask: %d, req 0x%x\n", + ath10k_warn(ar, "failed to set tx-chainmask: %d, req 0x%x\n", ret, tx_ant); return ret; } @@ -2388,7 +2432,7 @@ static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant) ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rx_chain_mask, rx_ant); if (ret) { - ath10k_warn("failed to set rx-chainmask: %d, req 0x%x\n", + ath10k_warn(ar, "failed to set rx-chainmask: %d, req 0x%x\n", ret, rx_ant); return ret; } @@ -2435,29 +2479,32 @@ static int ath10k_start(struct ieee80211_hw *hw) WARN_ON(1); ret = -EINVAL; goto err; + case ATH10K_STATE_UTF: + ret = -EBUSY; + goto err; } ret = ath10k_hif_power_up(ar); if (ret) { - ath10k_err("Could not init hif: %d\n", ret); + ath10k_err(ar, "Could not init hif: %d\n", ret); goto err_off; } - ret = ath10k_core_start(ar); + ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL); if (ret) { - ath10k_err("Could not init core: %d\n", ret); + ath10k_err(ar, "Could not init core: %d\n", ret); goto err_power_down; } ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pmf_qos, 1); if (ret) { - ath10k_warn("failed to enable PMF QOS: %d\n", ret); + ath10k_warn(ar, "failed to enable PMF QOS: %d\n", ret); goto err_core_stop; } ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 1); if (ret) { - ath10k_warn("failed to enable dynamic BW: %d\n", ret); + ath10k_warn(ar, "failed to enable dynamic BW: %d\n", ret); goto err_core_stop; } @@ -2477,7 +2524,7 @@ static int ath10k_start(struct ieee80211_hw *hw) ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->arp_ac_override, 0); if (ret) { - ath10k_warn("failed to set arp ac override parameter: %d\n", + ath10k_warn(ar, "failed to set arp ac override parameter: %d\n", ret); goto err_core_stop; } @@ -2485,6 +2532,8 @@ static int ath10k_start(struct ieee80211_hw *hw) ar->num_started_vdevs = 0; ath10k_regd_update(ar); + ath10k_spectral_start(ar); + mutex_unlock(&ar->conf_mutex); return 0; @@ -2515,6 +2564,7 @@ static void ath10k_stop(struct ieee80211_hw *hw) } mutex_unlock(&ar->conf_mutex); + cancel_delayed_work_sync(&ar->scan.timeout); cancel_work_sync(&ar->restart_work); } @@ -2528,7 +2578,7 @@ static int ath10k_config_ps(struct ath10k *ar) list_for_each_entry(arvif, &ar->arvifs, list) { ret = ath10k_mac_vif_setup_ps(arvif); if (ret) { - ath10k_warn("failed to setup powersave: %d\n", ret); + ath10k_warn(ar, "failed to setup powersave: %d\n", ret); break; } } @@ -2566,7 +2616,7 @@ static void ath10k_config_chan(struct ath10k *ar) lockdep_assert_held(&ar->conf_mutex); - ath10k_dbg(ATH10K_DBG_MAC, + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac config channel to %dMHz (cf1 %dMHz cf2 %dMHz width %s)\n", ar->chandef.chan->center_freq, ar->chandef.center_freq1, @@ -2576,24 +2626,27 @@ static void ath10k_config_chan(struct ath10k *ar) /* First stop monitor interface. Some FW versions crash if there's a * lone monitor interface. */ if (ar->monitor_started) - ath10k_monitor_vdev_stop(ar); + ath10k_monitor_stop(ar); list_for_each_entry(arvif, &ar->arvifs, list) { if (!arvif->is_started) continue; + if (!arvif->is_up) + continue; + if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) continue; - ret = ath10k_vdev_stop(arvif); + ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); if (ret) { - ath10k_warn("failed to stop vdev %d: %d\n", + ath10k_warn(ar, "failed to down vdev %d: %d\n", arvif->vdev_id, ret); continue; } } - /* all vdevs are now stopped - now attempt to restart them */ + /* all vdevs are downed now - attempt to restart and re-up them */ list_for_each_entry(arvif, &ar->arvifs, list) { if (!arvif->is_started) @@ -2602,9 +2655,9 @@ static void ath10k_config_chan(struct ath10k *ar) if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) continue; - ret = ath10k_vdev_start(arvif); + ret = ath10k_vdev_restart(arvif); if (ret) { - ath10k_warn("failed to start vdev %d: %d\n", + ath10k_warn(ar, "failed to restart vdev %d: %d\n", arvif->vdev_id, ret); continue; } @@ -2615,14 +2668,13 @@ static void ath10k_config_chan(struct ath10k *ar) ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, arvif->bssid); if (ret) { - ath10k_warn("failed to bring vdev up %d: %d\n", + ath10k_warn(ar, "failed to bring vdev up %d: %d\n", arvif->vdev_id, ret); continue; } } - if (ath10k_monitor_is_enabled(ar)) - ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id); + ath10k_monitor_recalc(ar); } static int ath10k_config(struct ieee80211_hw *hw, u32 changed) @@ -2635,7 +2687,7 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed) mutex_lock(&ar->conf_mutex); if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { - ath10k_dbg(ATH10K_DBG_MAC, + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac config channel %dMHz flags 0x%x radar %d\n", conf->chandef.chan->center_freq, conf->chandef.chan->flags, @@ -2655,21 +2707,21 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed) } if (changed & IEEE80211_CONF_CHANGE_POWER) { - ath10k_dbg(ATH10K_DBG_MAC, "mac config power %d\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac config power %d\n", hw->conf.power_level); param = ar->wmi.pdev_param->txpower_limit2g; ret = ath10k_wmi_pdev_set_param(ar, param, hw->conf.power_level * 2); if (ret) - ath10k_warn("failed to set 2g txpower %d: %d\n", + ath10k_warn(ar, "failed to set 2g txpower %d: %d\n", hw->conf.power_level, ret); param = ar->wmi.pdev_param->txpower_limit5g; ret = ath10k_wmi_pdev_set_param(ar, param, hw->conf.power_level * 2); if (ret) - ath10k_warn("failed to set 5g txpower %d: %d\n", + ath10k_warn(ar, "failed to set 5g txpower %d: %d\n", hw->conf.power_level, ret); } @@ -2677,19 +2729,10 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed) ath10k_config_ps(ar); if (changed & IEEE80211_CONF_CHANGE_MONITOR) { - if (conf->flags & IEEE80211_CONF_MONITOR && !ar->monitor) { - ar->monitor = true; - ret = ath10k_monitor_start(ar); - if (ret) { - ath10k_warn("failed to start monitor (config): %d\n", - ret); - ar->monitor = false; - } - } else if (!(conf->flags & IEEE80211_CONF_MONITOR) && - ar->monitor) { - ar->monitor = false; - ath10k_monitor_stop(ar); - } + ar->monitor = conf->flags & IEEE80211_CONF_MONITOR; + ret = ath10k_monitor_recalc(ar); + if (ret) + ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); } mutex_unlock(&ar->conf_mutex); @@ -2724,11 +2767,12 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, INIT_WORK(&arvif->wep_key_work, ath10k_tx_wep_key_work); INIT_LIST_HEAD(&arvif->list); - bit = ffs(ar->free_vdev_map); - if (bit == 0) { + if (ar->free_vdev_map == 0) { + ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n"); ret = -EBUSY; goto err; } + bit = ffs(ar->free_vdev_map); arvif->vdev_id = bit - 1; arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE; @@ -2760,25 +2804,25 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, break; } - ath10k_dbg(ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d\n", arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype); ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype, vif->addr); if (ret) { - ath10k_warn("failed to create WMI vdev %i: %d\n", + ath10k_warn(ar, "failed to create WMI vdev %i: %d\n", arvif->vdev_id, ret); goto err; } - ar->free_vdev_map &= ~BIT(arvif->vdev_id); + ar->free_vdev_map &= ~(1 << arvif->vdev_id); list_add(&arvif->list, &ar->arvifs); vdev_param = ar->wmi.vdev_param->def_keyid; ret = ath10k_wmi_vdev_set_param(ar, 0, vdev_param, arvif->def_wep_key_idx); if (ret) { - ath10k_warn("failed to set vdev %i default key id: %d\n", + ath10k_warn(ar, "failed to set vdev %i default key id: %d\n", arvif->vdev_id, ret); goto err_vdev_delete; } @@ -2788,7 +2832,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, ATH10K_HW_TXRX_NATIVE_WIFI); /* 10.X firmware does not support this VDEV parameter. Do not warn */ if (ret && ret != -EOPNOTSUPP) { - ath10k_warn("failed to set vdev %i TX encapsulation: %d\n", + ath10k_warn(ar, "failed to set vdev %i TX encapsulation: %d\n", arvif->vdev_id, ret); goto err_vdev_delete; } @@ -2796,14 +2840,14 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr); if (ret) { - ath10k_warn("failed to create vdev %i peer for AP: %d\n", + ath10k_warn(ar, "failed to create vdev %i peer for AP: %d\n", arvif->vdev_id, ret); goto err_vdev_delete; } ret = ath10k_mac_set_kickout(arvif); if (ret) { - ath10k_warn("failed to set vdev %i kickout parameters: %d\n", + ath10k_warn(ar, "failed to set vdev %i kickout parameters: %d\n", arvif->vdev_id, ret); goto err_peer_delete; } @@ -2815,7 +2859,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, value); if (ret) { - ath10k_warn("failed to set vdev %i RX wake policy: %d\n", + ath10k_warn(ar, "failed to set vdev %i RX wake policy: %d\n", arvif->vdev_id, ret); goto err_peer_delete; } @@ -2825,7 +2869,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, value); if (ret) { - ath10k_warn("failed to set vdev %i TX wake thresh: %d\n", + ath10k_warn(ar, "failed to set vdev %i TX wake thresh: %d\n", arvif->vdev_id, ret); goto err_peer_delete; } @@ -2835,7 +2879,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, value); if (ret) { - ath10k_warn("failed to set vdev %i PSPOLL count: %d\n", + ath10k_warn(ar, "failed to set vdev %i PSPOLL count: %d\n", arvif->vdev_id, ret); goto err_peer_delete; } @@ -2843,14 +2887,14 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold); if (ret) { - ath10k_warn("failed to set rts threshold for vdev %d: %d\n", + ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n", arvif->vdev_id, ret); goto err_peer_delete; } ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold); if (ret) { - ath10k_warn("failed to set frag threshold for vdev %d: %d\n", + ath10k_warn(ar, "failed to set frag threshold for vdev %d: %d\n", arvif->vdev_id, ret); goto err_peer_delete; } @@ -2864,7 +2908,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, err_vdev_delete: ath10k_wmi_vdev_delete(ar, arvif->vdev_id); - ar->free_vdev_map &= ~BIT(arvif->vdev_id); + ar->free_vdev_map |= 1 << arvif->vdev_id; list_del(&arvif->list); err: @@ -2892,26 +2936,32 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw, dev_kfree_skb_any(arvif->beacon); arvif->beacon = NULL; } + spin_unlock_bh(&ar->data_lock); - ar->free_vdev_map |= 1 << (arvif->vdev_id); + ret = ath10k_spectral_vif_stop(arvif); + if (ret) + ath10k_warn(ar, "failed to stop spectral for vdev %i: %d\n", + arvif->vdev_id, ret); + + ar->free_vdev_map |= 1 << arvif->vdev_id; list_del(&arvif->list); if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr); if (ret) - ath10k_warn("failed to remove peer for AP vdev %i: %d\n", + ath10k_warn(ar, "failed to remove peer for AP vdev %i: %d\n", arvif->vdev_id, ret); kfree(arvif->u.ap.noa_data); } - ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n", arvif->vdev_id); ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id); if (ret) - ath10k_warn("failed to delete WMI vdev %i: %d\n", + ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n", arvif->vdev_id, ret); ath10k_peer_cleanup(ar, arvif->vdev_id); @@ -2946,18 +2996,9 @@ static void ath10k_configure_filter(struct ieee80211_hw *hw, *total_flags &= SUPPORTED_FILTERS; ar->filter_flags = *total_flags; - if (ar->filter_flags & FIF_PROMISC_IN_BSS && !ar->promisc) { - ar->promisc = true; - ret = ath10k_monitor_start(ar); - if (ret) { - ath10k_warn("failed to start monitor (promisc): %d\n", - ret); - ar->promisc = false; - } - } else if (!(ar->filter_flags & FIF_PROMISC_IN_BSS) && ar->promisc) { - ar->promisc = false; - ath10k_monitor_stop(ar); - } + ret = ath10k_monitor_recalc(ar); + if (ret) + ath10k_warn(ar, "failed to recalc montior: %d\n", ret); mutex_unlock(&ar->conf_mutex); } @@ -2970,7 +3011,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, struct ath10k *ar = hw->priv; struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); int ret = 0; - u32 vdev_param, pdev_param; + u32 vdev_param, pdev_param, slottime, preamble; mutex_lock(&ar->conf_mutex); @@ -2982,17 +3023,17 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, vdev_param = ar->wmi.vdev_param->beacon_interval; ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, arvif->beacon_interval); - ath10k_dbg(ATH10K_DBG_MAC, + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d beacon_interval %d\n", arvif->vdev_id, arvif->beacon_interval); if (ret) - ath10k_warn("failed to set beacon interval for vdev %d: %i\n", + ath10k_warn(ar, "failed to set beacon interval for vdev %d: %i\n", arvif->vdev_id, ret); } if (changed & BSS_CHANGED_BEACON) { - ath10k_dbg(ATH10K_DBG_MAC, + ath10k_dbg(ar, ATH10K_DBG_MAC, "vdev %d set beacon tx mode to staggered\n", arvif->vdev_id); @@ -3000,14 +3041,14 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, ret = ath10k_wmi_pdev_set_param(ar, pdev_param, WMI_BEACON_STAGGERED_MODE); if (ret) - ath10k_warn("failed to set beacon mode for vdev %d: %i\n", + ath10k_warn(ar, "failed to set beacon mode for vdev %d: %i\n", arvif->vdev_id, ret); } if (changed & BSS_CHANGED_BEACON_INFO) { arvif->dtim_period = info->dtim_period; - ath10k_dbg(ATH10K_DBG_MAC, + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d dtim_period %d\n", arvif->vdev_id, arvif->dtim_period); @@ -3015,7 +3056,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, arvif->dtim_period); if (ret) - ath10k_warn("failed to set dtim period for vdev %d: %i\n", + ath10k_warn(ar, "failed to set dtim period for vdev %d: %i\n", arvif->vdev_id, ret); } @@ -3034,14 +3075,14 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_BSSID && vif->type != NL80211_IFTYPE_AP) { if (!is_zero_ether_addr(info->bssid)) { - ath10k_dbg(ATH10K_DBG_MAC, + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d create peer %pM\n", arvif->vdev_id, info->bssid); ret = ath10k_peer_create(ar, arvif->vdev_id, info->bssid); if (ret) - ath10k_warn("failed to add peer %pM for vdev %d when changing bssid: %i\n", + ath10k_warn(ar, "failed to add peer %pM for vdev %d when changing bssid: %i\n", info->bssid, arvif->vdev_id, ret); if (vif->type == NL80211_IFTYPE_STATION) { @@ -3049,15 +3090,15 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, * this is never erased as we it for crypto key * clearing; this is FW requirement */ - memcpy(arvif->bssid, info->bssid, ETH_ALEN); + ether_addr_copy(arvif->bssid, info->bssid); - ath10k_dbg(ATH10K_DBG_MAC, + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d start %pM\n", arvif->vdev_id, info->bssid); ret = ath10k_vdev_start(arvif); if (ret) { - ath10k_warn("failed to start vdev %i: %d\n", + ath10k_warn(ar, "failed to start vdev %i: %d\n", arvif->vdev_id, ret); goto exit; } @@ -3081,42 +3122,40 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_ERP_CTS_PROT) { arvif->use_cts_prot = info->use_cts_prot; - ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n", arvif->vdev_id, info->use_cts_prot); ret = ath10k_recalc_rtscts_prot(arvif); if (ret) - ath10k_warn("failed to recalculate rts/cts prot for vdev %d: %d\n", + ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", arvif->vdev_id, ret); } if (changed & BSS_CHANGED_ERP_SLOT) { - u32 slottime; if (info->use_short_slot) slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */ else slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */ - ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n", arvif->vdev_id, slottime); vdev_param = ar->wmi.vdev_param->slot_time; ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, slottime); if (ret) - ath10k_warn("failed to set erp slot for vdev %d: %i\n", + ath10k_warn(ar, "failed to set erp slot for vdev %d: %i\n", arvif->vdev_id, ret); } if (changed & BSS_CHANGED_ERP_PREAMBLE) { - u32 preamble; if (info->use_short_preamble) preamble = WMI_VDEV_PREAMBLE_SHORT; else preamble = WMI_VDEV_PREAMBLE_LONG; - ath10k_dbg(ATH10K_DBG_MAC, + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d preamble %dn", arvif->vdev_id, preamble); @@ -3124,13 +3163,21 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, preamble); if (ret) - ath10k_warn("failed to set preamble for vdev %d: %i\n", + ath10k_warn(ar, "failed to set preamble for vdev %d: %i\n", arvif->vdev_id, ret); } if (changed & BSS_CHANGED_ASSOC) { - if (info->assoc) + if (info->assoc) { + /* Workaround: Make sure monitor vdev is not running + * when associating to prevent some firmware revisions + * (e.g. 10.1 and 10.2) from crashing. + */ + if (ar->monitor_started) + ath10k_monitor_stop(ar); ath10k_bss_assoc(hw, vif, info); + ath10k_monitor_recalc(ar); + } } exit: @@ -3151,20 +3198,26 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw, mutex_lock(&ar->conf_mutex); spin_lock_bh(&ar->data_lock); - if (ar->scan.in_progress) { - spin_unlock_bh(&ar->data_lock); + switch (ar->scan.state) { + case ATH10K_SCAN_IDLE: + reinit_completion(&ar->scan.started); + reinit_completion(&ar->scan.completed); + ar->scan.state = ATH10K_SCAN_STARTING; + ar->scan.is_roc = false; + ar->scan.vdev_id = arvif->vdev_id; + ret = 0; + break; + case ATH10K_SCAN_STARTING: + case ATH10K_SCAN_RUNNING: + case ATH10K_SCAN_ABORTING: ret = -EBUSY; - goto exit; + break; } - - reinit_completion(&ar->scan.started); - reinit_completion(&ar->scan.completed); - ar->scan.in_progress = true; - ar->scan.aborting = false; - ar->scan.is_roc = false; - ar->scan.vdev_id = arvif->vdev_id; spin_unlock_bh(&ar->data_lock); + if (ret) + goto exit; + memset(&arg, 0, sizeof(arg)); ath10k_wmi_start_scan_init(ar, &arg); arg.vdev_id = arvif->vdev_id; @@ -3196,9 +3249,9 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw, ret = ath10k_start_scan(ar, &arg); if (ret) { - ath10k_warn("failed to start hw scan: %d\n", ret); + ath10k_warn(ar, "failed to start hw scan: %d\n", ret); spin_lock_bh(&ar->data_lock); - ar->scan.in_progress = false; + ar->scan.state = ATH10K_SCAN_IDLE; spin_unlock_bh(&ar->data_lock); } @@ -3211,14 +3264,10 @@ static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ath10k *ar = hw->priv; - int ret; mutex_lock(&ar->conf_mutex); - ret = ath10k_abort_scan(ar); - if (ret) { - ath10k_warn("failed to abort scan: %d\n", ret); - ieee80211_scan_completed(hw, 1 /* aborted */); - } + cancel_delayed_work_sync(&ar->scan.timeout); + ath10k_scan_abort(ar); mutex_unlock(&ar->conf_mutex); } @@ -3256,7 +3305,7 @@ static void ath10k_set_key_h_def_keyidx(struct ath10k *ar, ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, key->keyidx); if (ret) - ath10k_warn("failed to set vdev %i group key as default key: %d\n", + ath10k_warn(ar, "failed to set vdev %i group key as default key: %d\n", arvif->vdev_id, ret); } @@ -3294,7 +3343,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, if (!peer) { if (cmd == SET_KEY) { - ath10k_warn("failed to install key for non-existent peer %pM\n", + ath10k_warn(ar, "failed to install key for non-existent peer %pM\n", peer_addr); ret = -EOPNOTSUPP; goto exit; @@ -3317,7 +3366,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, ret = ath10k_install_key(arvif, key, cmd, peer_addr); if (ret) { - ath10k_warn("failed to install key for vdev %i peer %pM: %d\n", + ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n", arvif->vdev_id, peer_addr, ret); goto exit; } @@ -3332,7 +3381,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, peer->keys[key->keyidx] = NULL; else if (peer == NULL) /* impossible unless FW goes crazy */ - ath10k_warn("Peer %pM disappeared!\n", peer_addr); + ath10k_warn(ar, "Peer %pM disappeared!\n", peer_addr); spin_unlock_bh(&ar->data_lock); exit: @@ -3368,45 +3417,45 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk) mutex_lock(&ar->conf_mutex); if (changed & IEEE80211_RC_BW_CHANGED) { - ath10k_dbg(ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n", sta->addr, bw); err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, WMI_PEER_CHAN_WIDTH, bw); if (err) - ath10k_warn("failed to update STA %pM peer bw %d: %d\n", + ath10k_warn(ar, "failed to update STA %pM peer bw %d: %d\n", sta->addr, bw, err); } if (changed & IEEE80211_RC_NSS_CHANGED) { - ath10k_dbg(ATH10K_DBG_MAC, "mac update sta %pM nss %d\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM nss %d\n", sta->addr, nss); err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, WMI_PEER_NSS, nss); if (err) - ath10k_warn("failed to update STA %pM nss %d: %d\n", + ath10k_warn(ar, "failed to update STA %pM nss %d: %d\n", sta->addr, nss, err); } if (changed & IEEE80211_RC_SMPS_CHANGED) { - ath10k_dbg(ATH10K_DBG_MAC, "mac update sta %pM smps %d\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM smps %d\n", sta->addr, smps); err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, WMI_PEER_SMPS_STATE, smps); if (err) - ath10k_warn("failed to update STA %pM smps %d: %d\n", + ath10k_warn(ar, "failed to update STA %pM smps %d: %d\n", sta->addr, smps, err); } if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) { - ath10k_dbg(ATH10K_DBG_MAC, "mac update sta %pM supp rates\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates\n", sta->addr); err = ath10k_station_assoc(ar, arvif, sta, true); if (err) - ath10k_warn("failed to reassociate station: %pM\n", + ath10k_warn(ar, "failed to reassociate station: %pM\n", sta->addr); } @@ -3451,31 +3500,31 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, max_num_peers = TARGET_NUM_PEERS; if (ar->num_peers >= max_num_peers) { - ath10k_warn("number of peers exceeded: peers number %d (max peers %d)\n", + ath10k_warn(ar, "number of peers exceeded: peers number %d (max peers %d)\n", ar->num_peers, max_num_peers); ret = -ENOBUFS; goto exit; } - ath10k_dbg(ATH10K_DBG_MAC, + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d peer create %pM (new sta) num_peers %d\n", arvif->vdev_id, sta->addr, ar->num_peers); ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr); if (ret) - ath10k_warn("failed to add peer %pM for vdev %d when adding a new sta: %i\n", + ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n", sta->addr, arvif->vdev_id, ret); } else if ((old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_NOTEXIST)) { /* * Existing station deletion. */ - ath10k_dbg(ATH10K_DBG_MAC, + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d peer delete %pM (sta gone)\n", arvif->vdev_id, sta->addr); ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); if (ret) - ath10k_warn("failed to delete peer %pM for vdev %d: %i\n", + ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n", sta->addr, arvif->vdev_id, ret); if (vif->type == NL80211_IFTYPE_STATION) @@ -3487,12 +3536,12 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, /* * New association. */ - ath10k_dbg(ATH10K_DBG_MAC, "mac sta %pM associated\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM associated\n", sta->addr); ret = ath10k_station_assoc(ar, arvif, sta, false); if (ret) - ath10k_warn("failed to associate station %pM for vdev %i: %i\n", + ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n", sta->addr, arvif->vdev_id, ret); } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTH && @@ -3501,12 +3550,12 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, /* * Disassociation. */ - ath10k_dbg(ATH10K_DBG_MAC, "mac sta %pM disassociated\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM disassociated\n", sta->addr); ret = ath10k_station_disassoc(ar, arvif, sta); if (ret) - ath10k_warn("failed to disassociate station: %pM vdev %i: %i\n", + ath10k_warn(ar, "failed to disassociate station: %pM vdev %i: %i\n", sta->addr, arvif->vdev_id, ret); } exit: @@ -3515,7 +3564,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, } static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif, - u16 ac, bool enable) + u16 ac, bool enable) { struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); u32 value = 0; @@ -3554,7 +3603,7 @@ static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif, WMI_STA_PS_PARAM_UAPSD, arvif->u.sta.uapsd); if (ret) { - ath10k_warn("failed to set uapsd params: %d\n", ret); + ath10k_warn(ar, "failed to set uapsd params: %d\n", ret); goto exit; } @@ -3567,7 +3616,7 @@ static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif, WMI_STA_PS_PARAM_RX_WAKE_POLICY, value); if (ret) - ath10k_warn("failed to set rx wake param: %d\n", ret); + ath10k_warn(ar, "failed to set rx wake param: %d\n", ret); exit: return ret; @@ -3617,13 +3666,13 @@ static int ath10k_conf_tx(struct ieee80211_hw *hw, /* FIXME: FW accepts wmm params per hw, not per vif */ ret = ath10k_wmi_pdev_set_wmm_params(ar, &ar->wmm_params); if (ret) { - ath10k_warn("failed to set wmm params: %d\n", ret); + ath10k_warn(ar, "failed to set wmm params: %d\n", ret); goto exit; } ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd); if (ret) - ath10k_warn("failed to set sta uapsd: %d\n", ret); + ath10k_warn(ar, "failed to set sta uapsd: %d\n", ret); exit: mutex_unlock(&ar->conf_mutex); @@ -3641,27 +3690,33 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw, struct ath10k *ar = hw->priv; struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); struct wmi_start_scan_arg arg; - int ret; + int ret = 0; mutex_lock(&ar->conf_mutex); spin_lock_bh(&ar->data_lock); - if (ar->scan.in_progress) { - spin_unlock_bh(&ar->data_lock); + switch (ar->scan.state) { + case ATH10K_SCAN_IDLE: + reinit_completion(&ar->scan.started); + reinit_completion(&ar->scan.completed); + reinit_completion(&ar->scan.on_channel); + ar->scan.state = ATH10K_SCAN_STARTING; + ar->scan.is_roc = true; + ar->scan.vdev_id = arvif->vdev_id; + ar->scan.roc_freq = chan->center_freq; + ret = 0; + break; + case ATH10K_SCAN_STARTING: + case ATH10K_SCAN_RUNNING: + case ATH10K_SCAN_ABORTING: ret = -EBUSY; - goto exit; + break; } - - reinit_completion(&ar->scan.started); - reinit_completion(&ar->scan.completed); - reinit_completion(&ar->scan.on_channel); - ar->scan.in_progress = true; - ar->scan.aborting = false; - ar->scan.is_roc = true; - ar->scan.vdev_id = arvif->vdev_id; - ar->scan.roc_freq = chan->center_freq; spin_unlock_bh(&ar->data_lock); + if (ret) + goto exit; + memset(&arg, 0, sizeof(arg)); ath10k_wmi_start_scan_init(ar, &arg); arg.vdev_id = arvif->vdev_id; @@ -3676,17 +3731,21 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw, ret = ath10k_start_scan(ar, &arg); if (ret) { - ath10k_warn("failed to start roc scan: %d\n", ret); + ath10k_warn(ar, "failed to start roc scan: %d\n", ret); spin_lock_bh(&ar->data_lock); - ar->scan.in_progress = false; + ar->scan.state = ATH10K_SCAN_IDLE; spin_unlock_bh(&ar->data_lock); goto exit; } ret = wait_for_completion_timeout(&ar->scan.on_channel, 3*HZ); if (ret == 0) { - ath10k_warn("failed to switch to channel for roc scan\n"); - ath10k_abort_scan(ar); + ath10k_warn(ar, "failed to switch to channel for roc scan\n"); + + ret = ath10k_scan_stop(ar); + if (ret) + ath10k_warn(ar, "failed to stop scan: %d\n", ret); + ret = -ETIMEDOUT; goto exit; } @@ -3702,7 +3761,8 @@ static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw) struct ath10k *ar = hw->priv; mutex_lock(&ar->conf_mutex); - ath10k_abort_scan(ar); + cancel_delayed_work_sync(&ar->scan.timeout); + ath10k_scan_abort(ar); mutex_unlock(&ar->conf_mutex); return 0; @@ -3721,12 +3781,12 @@ static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value) mutex_lock(&ar->conf_mutex); list_for_each_entry(arvif, &ar->arvifs, list) { - ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n", arvif->vdev_id, value); ret = ath10k_mac_set_rts(arvif, value); if (ret) { - ath10k_warn("failed to set rts threshold for vdev %d: %d\n", + ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n", arvif->vdev_id, ret); break; } @@ -3744,12 +3804,12 @@ static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value) mutex_lock(&ar->conf_mutex); list_for_each_entry(arvif, &ar->arvifs, list) { - ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d fragmentation threshold %d\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d fragmentation threshold %d\n", arvif->vdev_id, value); ret = ath10k_mac_set_rts(arvif, value); if (ret) { - ath10k_warn("failed to set fragmentation threshold for vdev %d: %d\n", + ath10k_warn(ar, "failed to set fragmentation threshold for vdev %d: %d\n", arvif->vdev_id, ret); break; } @@ -3789,7 +3849,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, }), ATH10K_FLUSH_TIMEOUT_HZ); if (ret <= 0 || skip) - ath10k_warn("failed to flush transmit queue (skip %i ar-state %i): %i\n", + ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %i\n", skip, ar->state, ret); skip: @@ -3824,7 +3884,7 @@ static int ath10k_suspend(struct ieee80211_hw *hw, ret = ath10k_hif_suspend(ar); if (ret) { - ath10k_warn("failed to suspend hif: %d\n", ret); + ath10k_warn(ar, "failed to suspend hif: %d\n", ret); goto resume; } @@ -3833,7 +3893,7 @@ static int ath10k_suspend(struct ieee80211_hw *hw, resume: ret = ath10k_wmi_pdev_resume_target(ar); if (ret) - ath10k_warn("failed to resume target: %d\n", ret); + ath10k_warn(ar, "failed to resume target: %d\n", ret); ret = 1; exit: @@ -3850,14 +3910,14 @@ static int ath10k_resume(struct ieee80211_hw *hw) ret = ath10k_hif_resume(ar); if (ret) { - ath10k_warn("failed to resume hif: %d\n", ret); + ath10k_warn(ar, "failed to resume hif: %d\n", ret); ret = 1; goto exit; } ret = ath10k_wmi_pdev_resume_target(ar); if (ret) { - ath10k_warn("failed to resume target: %d\n", ret); + ath10k_warn(ar, "failed to resume target: %d\n", ret); ret = 1; goto exit; } @@ -3878,7 +3938,7 @@ static void ath10k_restart_complete(struct ieee80211_hw *hw) /* If device failed to restart it will be in a different state, e.g. * ATH10K_STATE_WEDGED */ if (ar->state == ATH10K_STATE_RESTARTED) { - ath10k_info("device successfully recovered\n"); + ath10k_info(ar, "device successfully recovered\n"); ar->state = ATH10K_STATE_ON; } @@ -4005,8 +4065,8 @@ ath10k_bitrate_mask_nss(const struct cfg80211_bitrate_mask *mask, continue; else if (mask->control[band].ht_mcs[i] == 0x00) break; - else - return false; + + return false; } ht_nss = i; @@ -4017,8 +4077,8 @@ ath10k_bitrate_mask_nss(const struct cfg80211_bitrate_mask *mask, continue; else if (mask->control[band].vht_mcs[i] == 0x0000) break; - else - return false; + + return false; } vht_nss = i; @@ -4075,7 +4135,8 @@ ath10k_bitrate_mask_correct(const struct cfg80211_bitrate_mask *mask, } static bool -ath10k_bitrate_mask_rate(const struct cfg80211_bitrate_mask *mask, +ath10k_bitrate_mask_rate(struct ath10k *ar, + const struct cfg80211_bitrate_mask *mask, enum ieee80211_band band, u8 *fixed_rate, u8 *fixed_nss) @@ -4133,7 +4194,7 @@ ath10k_bitrate_mask_rate(const struct cfg80211_bitrate_mask *mask, nss <<= 4; pream <<= 6; - ath10k_dbg(ATH10K_DBG_MAC, "mac fixed rate pream 0x%02x nss 0x%02x rate 0x%02x\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac fixed rate pream 0x%02x nss 0x%02x rate 0x%02x\n", pream, nss, rate); *fixed_rate = pream | nss | rate; @@ -4141,7 +4202,8 @@ ath10k_bitrate_mask_rate(const struct cfg80211_bitrate_mask *mask, return true; } -static bool ath10k_get_fixed_rate_nss(const struct cfg80211_bitrate_mask *mask, +static bool ath10k_get_fixed_rate_nss(struct ath10k *ar, + const struct cfg80211_bitrate_mask *mask, enum ieee80211_band band, u8 *fixed_rate, u8 *fixed_nss) @@ -4151,7 +4213,7 @@ static bool ath10k_get_fixed_rate_nss(const struct cfg80211_bitrate_mask *mask, return true; /* Next Check single rate is set */ - return ath10k_bitrate_mask_rate(mask, band, fixed_rate, fixed_nss); + return ath10k_bitrate_mask_rate(ar, mask, band, fixed_rate, fixed_nss); } static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif, @@ -4171,16 +4233,16 @@ static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif, goto exit; if (fixed_rate == WMI_FIXED_RATE_NONE) - ath10k_dbg(ATH10K_DBG_MAC, "mac disable fixed bitrate mask\n"); + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac disable fixed bitrate mask\n"); if (force_sgi) - ath10k_dbg(ATH10K_DBG_MAC, "mac force sgi\n"); + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac force sgi\n"); vdev_param = ar->wmi.vdev_param->fixed_rate; ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, fixed_rate); if (ret) { - ath10k_warn("failed to set fixed rate param 0x%02x: %d\n", + ath10k_warn(ar, "failed to set fixed rate param 0x%02x: %d\n", fixed_rate, ret); ret = -EINVAL; goto exit; @@ -4193,7 +4255,7 @@ static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif, vdev_param, fixed_nss); if (ret) { - ath10k_warn("failed to set fixed nss param %d: %d\n", + ath10k_warn(ar, "failed to set fixed nss param %d: %d\n", fixed_nss, ret); ret = -EINVAL; goto exit; @@ -4206,7 +4268,7 @@ static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif, force_sgi); if (ret) { - ath10k_warn("failed to set sgi param %d: %d\n", + ath10k_warn(ar, "failed to set sgi param %d: %d\n", force_sgi, ret); ret = -EINVAL; goto exit; @@ -4235,14 +4297,14 @@ static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw, return -EINVAL; if (!ath10k_default_bitrate_mask(ar, band, mask)) { - if (!ath10k_get_fixed_rate_nss(mask, band, + if (!ath10k_get_fixed_rate_nss(ar, mask, band, &fixed_rate, &fixed_nss)) return -EINVAL; } if (fixed_rate == WMI_FIXED_RATE_NONE && force_sgi) { - ath10k_warn("failed to force SGI usage for default rate settings\n"); + ath10k_warn(ar, "failed to force SGI usage for default rate settings\n"); return -EINVAL; } @@ -4261,7 +4323,7 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw, spin_lock_bh(&ar->data_lock); - ath10k_dbg(ATH10K_DBG_MAC, + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n", sta->addr, changed, sta->bandwidth, sta->rx_nss, sta->smps_mode); @@ -4280,7 +4342,7 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw, bw = WMI_PEER_CHWIDTH_80MHZ; break; case IEEE80211_STA_RX_BW_160: - ath10k_warn("Invalid bandwith %d in rc update for %pM\n", + ath10k_warn(ar, "Invalid bandwith %d in rc update for %pM\n", sta->bandwidth, sta->addr); bw = WMI_PEER_CHWIDTH_20MHZ; break; @@ -4307,7 +4369,7 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw, smps = WMI_PEER_SMPS_DYNAMIC; break; case IEEE80211_SMPS_NUM_MODES: - ath10k_warn("Invalid smps %d in sta rc update for %pM\n", + ath10k_warn(ar, "Invalid smps %d in sta rc update for %pM\n", sta->smps_mode, sta->addr); smps = WMI_PEER_SMPS_PS_NONE; break; @@ -4339,9 +4401,10 @@ static int ath10k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_sta *sta, u16 tid, u16 *ssn, u8 buf_size) { + struct ath10k *ar = hw->priv; struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); - ath10k_dbg(ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n", arvif->vdev_id, sta->addr, tid, action); switch (action) { @@ -4393,6 +4456,9 @@ static const struct ieee80211_ops ath10k_ops = { .sta_rc_update = ath10k_sta_rc_update, .get_tsf = ath10k_get_tsf, .ampdu_action = ath10k_ampdu_action, + + CFG80211_TESTMODE_CMD(ath10k_tm_cmd) + #ifdef CONFIG_PM .suspend = ath10k_suspend, .resume = ath10k_resume, @@ -4489,12 +4555,12 @@ static struct ieee80211_rate ath10k_rates[] = { #define ath10k_g_rates (ath10k_rates + 0) #define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates)) -struct ath10k *ath10k_mac_create(void) +struct ath10k *ath10k_mac_create(size_t priv_size) { struct ieee80211_hw *hw; struct ath10k *ar; - hw = ieee80211_alloc_hw(sizeof(struct ath10k), &ath10k_ops); + hw = ieee80211_alloc_hw(sizeof(struct ath10k) + priv_size, &ath10k_ops); if (!hw) return NULL; @@ -4644,7 +4710,6 @@ static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar) return ht_cap; } - static void ath10k_get_arvif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { @@ -4669,7 +4734,7 @@ struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id) ath10k_get_arvif_iter, &arvif_iter); if (!arvif_iter.arvif) { - ath10k_warn("No VIF found for vdev %d\n", vdev_id); + ath10k_warn(ar, "No VIF found for vdev %d\n", vdev_id); return NULL; } @@ -4759,7 +4824,6 @@ int ath10k_mac_register(struct ath10k *ar) IEEE80211_HW_MFP_CAPABLE | IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_HAS_RATE_CONTROL | - IEEE80211_HW_SUPPORTS_STATIC_SMPS | IEEE80211_HW_AP_LINK_PS | IEEE80211_HW_SPECTRUM_MGMT; @@ -4767,8 +4831,10 @@ int ath10k_mac_register(struct ath10k *ar) * bytes is used for padding/alignment if necessary. */ ar->hw->extra_tx_headroom += sizeof(struct htt_data_tx_desc_frag)*2 + 4; + ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS; + if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) - ar->hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS; + ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS; if (ar->ht_cap_info & WMI_HT_CAP_ENABLED) { ar->hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION; @@ -4815,19 +4881,19 @@ int ath10k_mac_register(struct ath10k *ar) NL80211_DFS_UNSET); if (!ar->dfs_detector) - ath10k_warn("failed to initialise DFS pattern detector\n"); + ath10k_warn(ar, "failed to initialise DFS pattern detector\n"); } ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy, ath10k_reg_notifier); if (ret) { - ath10k_err("failed to initialise regulatory: %i\n", ret); + ath10k_err(ar, "failed to initialise regulatory: %i\n", ret); goto err_free; } ret = ieee80211_register_hw(ar->hw); if (ret) { - ath10k_err("failed to register ieee80211: %d\n", ret); + ath10k_err(ar, "failed to register ieee80211: %d\n", ret); goto err_free; } diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h index ef4f84376d7c2fe7d16432621b7f067ccb620992..6c80eeada3e28863bb1fe22e753251b64f73608d 100644 --- a/drivers/net/wireless/ath/ath10k/mac.h +++ b/drivers/net/wireless/ath/ath10k/mac.h @@ -26,12 +26,14 @@ struct ath10k_generic_iter { int ret; }; -struct ath10k *ath10k_mac_create(void); +struct ath10k *ath10k_mac_create(size_t priv_size); void ath10k_mac_destroy(struct ath10k *ar); int ath10k_mac_register(struct ath10k *ar); void ath10k_mac_unregister(struct ath10k *ar); struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id); -void ath10k_reset_scan(unsigned long ptr); +void __ath10k_scan_finish(struct ath10k *ar); +void ath10k_scan_finish(struct ath10k *ar); +void ath10k_scan_timeout_work(struct work_struct *work); void ath10k_offchan_tx_purge(struct ath10k *ar); void ath10k_offchan_tx_work(struct work_struct *work); void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar); diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 3376963a4862db7f133064b6b8869c56b2c39c71..59e0ea83be5098ea261f35330ba1659749b0eeb8 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -44,13 +44,9 @@ enum ath10k_pci_reset_mode { ATH10K_PCI_RESET_WARM_ONLY = 1, }; -static unsigned int ath10k_pci_target_ps; static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO; static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO; -module_param_named(target_ps, ath10k_pci_target_ps, uint, 0644); -MODULE_PARM_DESC(target_ps, "Enable ath10k Target (SoC) PS option"); - module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644); MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)"); @@ -68,13 +64,7 @@ static const struct pci_device_id ath10k_pci_id_table[] = { {0} }; -static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address, - u32 *data); - -static int ath10k_pci_post_rx(struct ath10k *ar); -static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info, - int num); -static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info); +static void ath10k_pci_buffer_cleanup(struct ath10k *ar); static int ath10k_pci_cold_reset(struct ath10k *ar); static int ath10k_pci_warm_reset(struct ath10k *ar); static int ath10k_pci_wait_for_target_init(struct ath10k *ar); @@ -156,79 +146,175 @@ static const struct ce_attr host_ce_config_wlan[] = { static const struct ce_pipe_config target_ce_config_wlan[] = { /* CE0: host->target HTC control and raw streams */ { - .pipenum = 0, - .pipedir = PIPEDIR_OUT, - .nentries = 32, - .nbytes_max = 256, - .flags = CE_ATTR_FLAGS, - .reserved = 0, + .pipenum = __cpu_to_le32(0), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(256), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), }, /* CE1: target->host HTT + HTC control */ { - .pipenum = 1, - .pipedir = PIPEDIR_IN, - .nentries = 32, - .nbytes_max = 512, - .flags = CE_ATTR_FLAGS, - .reserved = 0, + .pipenum = __cpu_to_le32(1), + .pipedir = __cpu_to_le32(PIPEDIR_IN), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(512), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), }, /* CE2: target->host WMI */ { - .pipenum = 2, - .pipedir = PIPEDIR_IN, - .nentries = 32, - .nbytes_max = 2048, - .flags = CE_ATTR_FLAGS, - .reserved = 0, + .pipenum = __cpu_to_le32(2), + .pipedir = __cpu_to_le32(PIPEDIR_IN), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), }, /* CE3: host->target WMI */ { - .pipenum = 3, - .pipedir = PIPEDIR_OUT, - .nentries = 32, - .nbytes_max = 2048, - .flags = CE_ATTR_FLAGS, - .reserved = 0, + .pipenum = __cpu_to_le32(3), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), }, /* CE4: host->target HTT */ { - .pipenum = 4, - .pipedir = PIPEDIR_OUT, - .nentries = 256, - .nbytes_max = 256, - .flags = CE_ATTR_FLAGS, - .reserved = 0, + .pipenum = __cpu_to_le32(4), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), + .nentries = __cpu_to_le32(256), + .nbytes_max = __cpu_to_le32(256), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), }, /* NB: 50% of src nentries, since tx has 2 frags */ /* CE5: unused */ { - .pipenum = 5, - .pipedir = PIPEDIR_OUT, - .nentries = 32, - .nbytes_max = 2048, - .flags = CE_ATTR_FLAGS, - .reserved = 0, + .pipenum = __cpu_to_le32(5), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), }, /* CE6: Reserved for target autonomous hif_memcpy */ { - .pipenum = 6, - .pipedir = PIPEDIR_INOUT, - .nentries = 32, - .nbytes_max = 4096, - .flags = CE_ATTR_FLAGS, - .reserved = 0, + .pipenum = __cpu_to_le32(6), + .pipedir = __cpu_to_le32(PIPEDIR_INOUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(4096), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), }, /* CE7 used only by Host */ }; +/* + * Map from service/endpoint to Copy Engine. + * This table is derived from the CE_PCI TABLE, above. + * It is passed to the Target at startup for use by firmware. + */ +static const struct service_to_pipe target_service_to_ce_map_wlan[] = { + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(0), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(1), + }, + { /* not used */ + __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(0), + }, + { /* not used */ + __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(1), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(4), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(1), + }, + + /* (Additions here) */ + + { /* must be last */ + __cpu_to_le32(0), + __cpu_to_le32(0), + __cpu_to_le32(0), + }, +}; + static bool ath10k_pci_irq_pending(struct ath10k *ar) { u32 cause; @@ -254,8 +340,8 @@ static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar) /* IMPORTANT: this extra read transaction is required to * flush the posted write buffer. */ - (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + - PCIE_INTR_ENABLE_ADDRESS); + (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + + PCIE_INTR_ENABLE_ADDRESS); } static void ath10k_pci_enable_legacy_irq(struct ath10k *ar) @@ -266,48 +352,116 @@ static void ath10k_pci_enable_legacy_irq(struct ath10k *ar) /* IMPORTANT: this extra read transaction is required to * flush the posted write buffer. */ - (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + - PCIE_INTR_ENABLE_ADDRESS); + (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + + PCIE_INTR_ENABLE_ADDRESS); } -static irqreturn_t ath10k_pci_early_irq_handler(int irq, void *arg) +static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar) { - struct ath10k *ar = arg; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - if (ar_pci->num_msi_intrs == 0) { - if (!ath10k_pci_irq_pending(ar)) - return IRQ_NONE; - - ath10k_pci_disable_and_clear_legacy_irq(ar); - } + if (ar_pci->num_msi_intrs > 1) + return "msi-x"; - tasklet_schedule(&ar_pci->early_irq_tasklet); + if (ar_pci->num_msi_intrs == 1) + return "msi"; - return IRQ_HANDLED; + return "legacy"; } -static int ath10k_pci_request_early_irq(struct ath10k *ar) +static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe) { + struct ath10k *ar = pipe->hif_ce_state; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl; + struct sk_buff *skb; + dma_addr_t paddr; int ret; - /* Regardless whether MSI-X/MSI/legacy irqs have been set up the first - * interrupt from irq vector is triggered in all cases for FW - * indication/errors */ - ret = request_irq(ar_pci->pdev->irq, ath10k_pci_early_irq_handler, - IRQF_SHARED, "ath10k_pci (early)", ar); + lockdep_assert_held(&ar_pci->ce_lock); + + skb = dev_alloc_skb(pipe->buf_sz); + if (!skb) + return -ENOMEM; + + WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb"); + + paddr = dma_map_single(ar->dev, skb->data, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(ar->dev, paddr))) { + ath10k_warn(ar, "failed to dma map pci rx buf\n"); + dev_kfree_skb_any(skb); + return -EIO; + } + + ATH10K_SKB_CB(skb)->paddr = paddr; + + ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr); if (ret) { - ath10k_warn("failed to request early irq: %d\n", ret); + ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret); + dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); return ret; } return 0; } -static void ath10k_pci_free_early_irq(struct ath10k *ar) +static void __ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe) +{ + struct ath10k *ar = pipe->hif_ce_state; + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl; + int ret, num; + + lockdep_assert_held(&ar_pci->ce_lock); + + if (pipe->buf_sz == 0) + return; + + if (!ce_pipe->dest_ring) + return; + + num = __ath10k_ce_rx_num_free_bufs(ce_pipe); + while (num--) { + ret = __ath10k_pci_rx_post_buf(pipe); + if (ret) { + ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret); + mod_timer(&ar_pci->rx_post_retry, jiffies + + ATH10K_PCI_RX_POST_RETRY_MS); + break; + } + } +} + +static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe) +{ + struct ath10k *ar = pipe->hif_ce_state; + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + + spin_lock_bh(&ar_pci->ce_lock); + __ath10k_pci_rx_post_pipe(pipe); + spin_unlock_bh(&ar_pci->ce_lock); +} + +static void ath10k_pci_rx_post(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + int i; + + spin_lock_bh(&ar_pci->ce_lock); + for (i = 0; i < CE_COUNT; i++) + __ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]); + spin_unlock_bh(&ar_pci->ce_lock); +} + +static void ath10k_pci_rx_replenish_retry(unsigned long ptr) { - free_irq(ath10k_pci_priv(ar)->pdev->irq, ar); + struct ath10k *ar = (void *)ptr; + + ath10k_pci_rx_post(ar); } /* @@ -331,25 +485,6 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, void *data_buf = NULL; int i; - /* - * This code cannot handle reads to non-memory space. Redirect to the - * register read fn but preserve the multi word read capability of - * this fn - */ - if (address < DRAM_BASE_ADDRESS) { - if (!IS_ALIGNED(address, 4) || - !IS_ALIGNED((unsigned long)data, 4)) - return -EIO; - - while ((nbytes >= 4) && ((ret = ath10k_pci_diag_read_access( - ar, address, (u32 *)data)) == 0)) { - nbytes -= sizeof(u32); - address += sizeof(u32); - data += sizeof(u32); - } - return ret; - } - ce_diag = ar_pci->ce_diag; /* @@ -376,7 +511,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, nbytes = min_t(unsigned int, remaining_bytes, DIAG_TRANSFER_LIMIT); - ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data); + ret = ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data); if (ret != 0) goto done; @@ -389,13 +524,11 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, * convert it from Target CPU virtual address space * to CE address space */ - ath10k_pci_wake(ar); address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address); - ath10k_pci_sleep(ar); ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0, - 0); + 0); if (ret) goto done; @@ -415,7 +548,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, goto done; } - if (buf != (u32) address) { + if (buf != (u32)address) { ret = -EIO; goto done; } @@ -448,15 +581,10 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, } done: - if (ret == 0) { - /* Copy data from allocated DMA buf to caller's buf */ - WARN_ON_ONCE(orig_nbytes & 3); - for (i = 0; i < orig_nbytes / sizeof(__le32); i++) { - ((u32 *)data)[i] = - __le32_to_cpu(((__le32 *)data_buf)[i]); - } - } else - ath10k_warn("failed to read diag value at 0x%x: %d\n", + if (ret == 0) + memcpy(data, data_buf, orig_nbytes); + else + ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n", address, ret); if (data_buf) @@ -466,20 +594,45 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, return ret; } -/* Read 4-byte aligned data from Target memory or register */ -static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address, - u32 *data) +static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value) { - /* Assume range doesn't cross this boundary */ - if (address >= DRAM_BASE_ADDRESS) - return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32)); + __le32 val = 0; + int ret; + + ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val)); + *value = __le32_to_cpu(val); + + return ret; +} + +static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest, + u32 src, u32 len) +{ + u32 host_addr, addr; + int ret; + + host_addr = host_interest_item_address(src); + + ret = ath10k_pci_diag_read32(ar, host_addr, &addr); + if (ret != 0) { + ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n", + src, ret); + return ret; + } + + ret = ath10k_pci_diag_read_mem(ar, addr, dest, len); + if (ret != 0) { + ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n", + addr, len, ret); + return ret; + } - ath10k_pci_wake(ar); - *data = ath10k_pci_read32(ar, address); - ath10k_pci_sleep(ar); return 0; } +#define ath10k_pci_diag_read_hi(ar, dest, src, len) \ + __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len) + static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, const void *data, int nbytes) { @@ -514,9 +667,7 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, } /* Copy caller's data to allocated DMA buf */ - WARN_ON_ONCE(orig_nbytes & 3); - for (i = 0; i < orig_nbytes / sizeof(__le32); i++) - ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]); + memcpy(data_buf, data, orig_nbytes); /* * The address supplied by the caller is in the @@ -528,9 +679,7 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, * to * CE address space */ - ath10k_pci_wake(ar); address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address); - ath10k_pci_sleep(ar); remaining_bytes = orig_nbytes; ce_data = ce_data_base; @@ -539,7 +688,7 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT); /* Set up to receive directly into Target(!) address */ - ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address); + ret = ath10k_ce_rx_post_buf(ce_diag, NULL, address); if (ret != 0) goto done; @@ -547,7 +696,7 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, * Request CE to send caller-supplied data that * was copied to bounce buffer to Target(!) address. */ - ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data, + ret = ath10k_ce_send(ce_diag, NULL, (u32)ce_data, nbytes, 0, 0); if (ret != 0) goto done; @@ -608,66 +757,34 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, } if (ret != 0) - ath10k_warn("failed to write diag value at 0x%x: %d\n", + ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n", address, ret); return ret; } -/* Write 4B data to Target memory or register */ -static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address, - u32 data) +static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value) { - /* Assume range doesn't cross this boundary */ - if (address >= DRAM_BASE_ADDRESS) - return ath10k_pci_diag_write_mem(ar, address, &data, - sizeof(u32)); + __le32 val = __cpu_to_le32(value); - ath10k_pci_wake(ar); - ath10k_pci_write32(ar, address, data); - ath10k_pci_sleep(ar); - return 0; + return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val)); } -static bool ath10k_pci_target_is_awake(struct ath10k *ar) +static bool ath10k_pci_is_awake(struct ath10k *ar) { - void __iomem *mem = ath10k_pci_priv(ar)->mem; - u32 val; - val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS + - RTC_STATE_ADDRESS); - return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON); + u32 val = ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS); + + return RTC_STATE_V_GET(val) == RTC_STATE_V_ON; } -int ath10k_do_pci_wake(struct ath10k *ar) +static int ath10k_pci_wake_wait(struct ath10k *ar) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - void __iomem *pci_addr = ar_pci->mem; int tot_delay = 0; int curr_delay = 5; - if (atomic_read(&ar_pci->keep_awake_count) == 0) { - /* Force AWAKE */ - iowrite32(PCIE_SOC_WAKE_V_MASK, - pci_addr + PCIE_LOCAL_BASE_ADDRESS + - PCIE_SOC_WAKE_ADDRESS); - } - atomic_inc(&ar_pci->keep_awake_count); - - if (ar_pci->verified_awake) - return 0; - - for (;;) { - if (ath10k_pci_target_is_awake(ar)) { - ar_pci->verified_awake = true; + while (tot_delay < PCIE_WAKE_TIMEOUT) { + if (ath10k_pci_is_awake(ar)) return 0; - } - - if (tot_delay > PCIE_WAKE_TIMEOUT) { - ath10k_warn("target took longer %d us to wake up (awake count %d)\n", - PCIE_WAKE_TIMEOUT, - atomic_read(&ar_pci->keep_awake_count)); - return -ETIMEDOUT; - } udelay(curr_delay); tot_delay += curr_delay; @@ -675,20 +792,21 @@ int ath10k_do_pci_wake(struct ath10k *ar) if (curr_delay < 50) curr_delay += 5; } + + return -ETIMEDOUT; } -void ath10k_do_pci_sleep(struct ath10k *ar) +static int ath10k_pci_wake(struct ath10k *ar) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - void __iomem *pci_addr = ar_pci->mem; + ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS, + PCIE_SOC_WAKE_V_MASK); + return ath10k_pci_wake_wait(ar); +} - if (atomic_dec_and_test(&ar_pci->keep_awake_count)) { - /* Allow sleep */ - ar_pci->verified_awake = false; - iowrite32(PCIE_SOC_WAKE_RESET, - pci_addr + PCIE_LOCAL_BASE_ADDRESS + - PCIE_SOC_WAKE_ADDRESS); - } +static void ath10k_pci_sleep(struct ath10k *ar) +{ + ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS, + PCIE_SOC_WAKE_RESET); } /* Called by lower (CE) layer when a send to Target completes. */ @@ -726,19 +844,17 @@ static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state) unsigned int nbytes, max_nbytes; unsigned int transfer_id; unsigned int flags; - int err, num_replenish = 0; while (ath10k_ce_completed_recv_next(ce_state, &transfer_context, &ce_data, &nbytes, &transfer_id, &flags) == 0) { - num_replenish++; skb = transfer_context; max_nbytes = skb->len + skb_tailroom(skb); dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr, max_nbytes, DMA_FROM_DEVICE); if (unlikely(max_nbytes < nbytes)) { - ath10k_warn("rxed more than expected (nbytes %d, max %d)", + ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)", nbytes, max_nbytes); dev_kfree_skb_any(skb); continue; @@ -748,12 +864,7 @@ static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state) cb->rx_completion(ar, skb, pipe_info->pipe_num); } - err = ath10k_pci_post_rx_pipe(pipe_info, num_replenish); - if (unlikely(err)) { - /* FIXME: retry */ - ath10k_warn("failed to replenish CE rx ring %d (%d bufs): %d\n", - pipe_info->pipe_num, num_replenish, err); - } + ath10k_pci_rx_post_pipe(pipe_info); } static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, @@ -781,10 +892,10 @@ static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, } for (i = 0; i < n_items - 1; i++) { - ath10k_dbg(ATH10K_DBG_PCI, + ath10k_dbg(ar, ATH10K_DBG_PCI, "pci tx item %d paddr 0x%08x len %d n_items %d\n", i, items[i].paddr, items[i].len, n_items); - ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ", + ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ", items[i].vaddr, items[i].len); err = ath10k_ce_send_nolock(ce_pipe, @@ -799,10 +910,10 @@ static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, /* `i` is equal to `n_items -1` after for() */ - ath10k_dbg(ATH10K_DBG_PCI, + ath10k_dbg(ar, ATH10K_DBG_PCI, "pci tx item %d paddr 0x%08x len %d n_items %d\n", i, items[i].paddr, items[i].len, n_items); - ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ", + ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ", items[i].vaddr, items[i].len); err = ath10k_ce_send_nolock(ce_pipe, @@ -829,52 +940,64 @@ static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - ath10k_dbg(ATH10K_DBG_PCI, "pci hif get free queue number\n"); + ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n"); return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl); } -static void ath10k_pci_hif_dump_area(struct ath10k *ar) +static void ath10k_pci_dump_registers(struct ath10k *ar, + struct ath10k_fw_crash_data *crash_data) { - u32 reg_dump_area = 0; - u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {}; - u32 host_addr; - int ret; - u32 i; + __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {}; + int i, ret; - ath10k_err("firmware crashed!\n"); - ath10k_err("hardware name %s version 0x%x\n", - ar->hw_params.name, ar->target_version); - ath10k_err("firmware version: %s\n", ar->hw->wiphy->fw_version); + lockdep_assert_held(&ar->data_lock); - host_addr = host_interest_item_address(HI_ITEM(hi_failure_state)); - ret = ath10k_pci_diag_read_mem(ar, host_addr, - ®_dump_area, sizeof(u32)); + ret = ath10k_pci_diag_read_hi(ar, ®_dump_values[0], + hi_failure_state, + REG_DUMP_COUNT_QCA988X * sizeof(__le32)); if (ret) { - ath10k_err("failed to read FW dump area address: %d\n", ret); - return; - } - - ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area); - - ret = ath10k_pci_diag_read_mem(ar, reg_dump_area, - ®_dump_values[0], - REG_DUMP_COUNT_QCA988X * sizeof(u32)); - if (ret != 0) { - ath10k_err("failed to read FW dump area: %d\n", ret); + ath10k_err(ar, "failed to read firmware dump area: %d\n", ret); return; } BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4); - ath10k_err("target Register Dump\n"); + ath10k_err(ar, "firmware register dump:\n"); for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4) - ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n", + ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n", i, - reg_dump_values[i], - reg_dump_values[i + 1], - reg_dump_values[i + 2], - reg_dump_values[i + 3]); + __le32_to_cpu(reg_dump_values[i]), + __le32_to_cpu(reg_dump_values[i + 1]), + __le32_to_cpu(reg_dump_values[i + 2]), + __le32_to_cpu(reg_dump_values[i + 3])); + + if (!crash_data) + return; + + for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++) + crash_data->registers[i] = reg_dump_values[i]; +} + +static void ath10k_pci_fw_crashed_dump(struct ath10k *ar) +{ + struct ath10k_fw_crash_data *crash_data; + char uuid[50]; + + spin_lock_bh(&ar->data_lock); + + crash_data = ath10k_debug_get_new_fw_crash_data(ar); + + if (crash_data) + scnprintf(uuid, sizeof(uuid), "%pUl", &crash_data->uuid); + else + scnprintf(uuid, sizeof(uuid), "n/a"); + + ath10k_err(ar, "firmware crashed! (uuid %s)\n", uuid); + ath10k_print_driver_info(ar); + ath10k_pci_dump_registers(ar, crash_data); + + spin_unlock_bh(&ar->data_lock); queue_work(ar->workqueue, &ar->restart_work); } @@ -882,7 +1005,7 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar) static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, int force) { - ath10k_dbg(ATH10K_DBG_PCI, "pci hif send complete check\n"); + ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n"); if (!force) { int resources; @@ -910,43 +1033,12 @@ static void ath10k_pci_hif_set_callbacks(struct ath10k *ar, { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - ath10k_dbg(ATH10K_DBG_PCI, "pci hif set callbacks\n"); + ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif set callbacks\n"); memcpy(&ar_pci->msg_callbacks_current, callbacks, sizeof(ar_pci->msg_callbacks_current)); } -static int ath10k_pci_setup_ce_irq(struct ath10k *ar) -{ - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - const struct ce_attr *attr; - struct ath10k_pci_pipe *pipe_info; - int pipe_num, disable_interrupts; - - for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { - pipe_info = &ar_pci->pipe_info[pipe_num]; - - /* Handle Diagnostic CE specially */ - if (pipe_info->ce_hdl == ar_pci->ce_diag) - continue; - - attr = &host_ce_config_wlan[pipe_num]; - - if (attr->src_nentries) { - disable_interrupts = attr->flags & CE_ATTR_DIS_INTR; - ath10k_ce_send_cb_register(pipe_info->ce_hdl, - ath10k_pci_ce_send_done, - disable_interrupts); - } - - if (attr->dest_nentries) - ath10k_ce_recv_cb_register(pipe_info->ce_hdl, - ath10k_pci_ce_recv_data); - } - - return 0; -} - static void ath10k_pci_kill_tasklet(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); @@ -954,82 +1046,72 @@ static void ath10k_pci_kill_tasklet(struct ath10k *ar) tasklet_kill(&ar_pci->intr_tq); tasklet_kill(&ar_pci->msi_fw_err); - tasklet_kill(&ar_pci->early_irq_tasklet); for (i = 0; i < CE_COUNT; i++) tasklet_kill(&ar_pci->pipe_info[i].intr); + + del_timer_sync(&ar_pci->rx_post_retry); } -/* TODO - temporary mapping while we have too few CE's */ static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id, u8 *ul_pipe, u8 *dl_pipe, int *ul_is_polled, int *dl_is_polled) { - int ret = 0; + const struct service_to_pipe *entry; + bool ul_set = false, dl_set = false; + int i; - ath10k_dbg(ATH10K_DBG_PCI, "pci hif map service\n"); + ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n"); /* polling for received messages not supported */ *dl_is_polled = 0; - switch (service_id) { - case ATH10K_HTC_SVC_ID_HTT_DATA_MSG: - /* - * Host->target HTT gets its own pipe, so it can be polled - * while other pipes are interrupt driven. - */ - *ul_pipe = 4; - /* - * Use the same target->host pipe for HTC ctrl, HTC raw - * streams, and HTT. - */ - *dl_pipe = 1; - break; + for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) { + entry = &target_service_to_ce_map_wlan[i]; - case ATH10K_HTC_SVC_ID_RSVD_CTRL: - case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS: - /* - * Note: HTC_RAW_STREAMS_SVC is currently unused, and - * HTC_CTRL_RSVD_SVC could share the same pipe as the - * WMI services. So, if another CE is needed, change - * this to *ul_pipe = 3, which frees up CE 0. - */ - /* *ul_pipe = 3; */ - *ul_pipe = 0; - *dl_pipe = 1; - break; - - case ATH10K_HTC_SVC_ID_WMI_DATA_BK: - case ATH10K_HTC_SVC_ID_WMI_DATA_BE: - case ATH10K_HTC_SVC_ID_WMI_DATA_VI: - case ATH10K_HTC_SVC_ID_WMI_DATA_VO: + if (__le32_to_cpu(entry->service_id) != service_id) + continue; - case ATH10K_HTC_SVC_ID_WMI_CONTROL: - *ul_pipe = 3; - *dl_pipe = 2; - break; + switch (__le32_to_cpu(entry->pipedir)) { + case PIPEDIR_NONE: + break; + case PIPEDIR_IN: + WARN_ON(dl_set); + *dl_pipe = __le32_to_cpu(entry->pipenum); + dl_set = true; + break; + case PIPEDIR_OUT: + WARN_ON(ul_set); + *ul_pipe = __le32_to_cpu(entry->pipenum); + ul_set = true; + break; + case PIPEDIR_INOUT: + WARN_ON(dl_set); + WARN_ON(ul_set); + *dl_pipe = __le32_to_cpu(entry->pipenum); + *ul_pipe = __le32_to_cpu(entry->pipenum); + dl_set = true; + ul_set = true; + break; + } + } - /* pipe 5 unused */ - /* pipe 6 reserved */ - /* pipe 7 reserved */ + if (WARN_ON(!ul_set || !dl_set)) + return -ENOENT; - default: - ret = -1; - break; - } *ul_is_polled = (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0; - return ret; + return 0; } static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, - u8 *ul_pipe, u8 *dl_pipe) + u8 *ul_pipe, u8 *dl_pipe) { int ul_is_polled, dl_is_polled; - ath10k_dbg(ATH10K_DBG_PCI, "pci hif get default pipe\n"); + ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n"); (void)ath10k_pci_hif_map_service_to_pipe(ar, ATH10K_HTC_SVC_ID_RSVD_CTRL, @@ -1039,141 +1121,34 @@ static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, &dl_is_polled); } -static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info, - int num) +static void ath10k_pci_irq_disable(struct ath10k *ar) { - struct ath10k *ar = pipe_info->hif_ce_state; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl; - struct sk_buff *skb; - dma_addr_t ce_data; - int i, ret = 0; - - if (pipe_info->buf_sz == 0) - return 0; - - for (i = 0; i < num; i++) { - skb = dev_alloc_skb(pipe_info->buf_sz); - if (!skb) { - ath10k_warn("failed to allocate skbuff for pipe %d\n", - num); - ret = -ENOMEM; - goto err; - } - - WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb"); - - ce_data = dma_map_single(ar->dev, skb->data, - skb->len + skb_tailroom(skb), - DMA_FROM_DEVICE); - - if (unlikely(dma_mapping_error(ar->dev, ce_data))) { - ath10k_warn("failed to DMA map sk_buff\n"); - dev_kfree_skb_any(skb); - ret = -EIO; - goto err; - } - - ATH10K_SKB_CB(skb)->paddr = ce_data; - - pci_dma_sync_single_for_device(ar_pci->pdev, ce_data, - pipe_info->buf_sz, - PCI_DMA_FROMDEVICE); - - ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb, - ce_data); - if (ret) { - ath10k_warn("failed to enqueue to pipe %d: %d\n", - num, ret); - goto err; - } - } + int i; - return ret; + ath10k_ce_disable_interrupts(ar); + ath10k_pci_disable_and_clear_legacy_irq(ar); + /* FIXME: How to mask all MSI interrupts? */ -err: - ath10k_pci_rx_pipe_cleanup(pipe_info); - return ret; + for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++) + synchronize_irq(ar_pci->pdev->irq + i); } -static int ath10k_pci_post_rx(struct ath10k *ar) +static void ath10k_pci_irq_enable(struct ath10k *ar) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ath10k_pci_pipe *pipe_info; - const struct ce_attr *attr; - int pipe_num, ret = 0; - - for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { - pipe_info = &ar_pci->pipe_info[pipe_num]; - attr = &host_ce_config_wlan[pipe_num]; - - if (attr->dest_nentries == 0) - continue; - - ret = ath10k_pci_post_rx_pipe(pipe_info, - attr->dest_nentries - 1); - if (ret) { - ath10k_warn("failed to post RX buffer for pipe %d: %d\n", - pipe_num, ret); - - for (; pipe_num >= 0; pipe_num--) { - pipe_info = &ar_pci->pipe_info[pipe_num]; - ath10k_pci_rx_pipe_cleanup(pipe_info); - } - return ret; - } - } - - return 0; + ath10k_ce_enable_interrupts(ar); + ath10k_pci_enable_legacy_irq(ar); + /* FIXME: How to unmask all MSI interrupts? */ } static int ath10k_pci_hif_start(struct ath10k *ar) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - int ret, ret_early; - - ath10k_dbg(ATH10K_DBG_BOOT, "boot hif start\n"); - - ath10k_pci_free_early_irq(ar); - ath10k_pci_kill_tasklet(ar); - - ret = ath10k_pci_request_irq(ar); - if (ret) { - ath10k_warn("failed to post RX buffers for all pipes: %d\n", - ret); - goto err_early_irq; - } - - ret = ath10k_pci_setup_ce_irq(ar); - if (ret) { - ath10k_warn("failed to setup CE interrupts: %d\n", ret); - goto err_stop; - } + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n"); - /* Post buffers once to start things off. */ - ret = ath10k_pci_post_rx(ar); - if (ret) { - ath10k_warn("failed to post RX buffers for all pipes: %d\n", - ret); - goto err_stop; - } + ath10k_pci_irq_enable(ar); + ath10k_pci_rx_post(ar); - ar_pci->started = 1; return 0; - -err_stop: - ath10k_ce_disable_interrupts(ar); - ath10k_pci_free_irq(ar); - ath10k_pci_kill_tasklet(ar); -err_early_irq: - /* Though there should be no interrupts (device was reset) - * power_down() expects the early IRQ to be installed as per the - * driver lifecycle. */ - ret_early = ath10k_pci_request_early_irq(ar); - if (ret_early) - ath10k_warn("failed to re-enable early irq: %d\n", ret_early); - - return ret; } static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info) @@ -1193,10 +1168,6 @@ static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info) ar = pipe_info->hif_ce_state; ar_pci = ath10k_pci_priv(ar); - - if (!ar_pci->started) - return; - ce_hdl = pipe_info->ce_hdl; while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf, @@ -1227,10 +1198,6 @@ static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info) ar = pipe_info->hif_ce_state; ar_pci = ath10k_pci_priv(ar); - - if (!ar_pci->started) - return; - ce_hdl = pipe_info->ce_hdl; while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf, @@ -1275,41 +1242,31 @@ static void ath10k_pci_ce_deinit(struct ath10k *ar) ath10k_ce_deinit_pipe(ar, i); } -static void ath10k_pci_hif_stop(struct ath10k *ar) +static void ath10k_pci_flush(struct ath10k *ar) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - int ret; - - ath10k_dbg(ATH10K_DBG_BOOT, "boot hif stop\n"); - - if (WARN_ON(!ar_pci->started)) - return; - - ret = ath10k_ce_disable_interrupts(ar); - if (ret) - ath10k_warn("failed to disable CE interrupts: %d\n", ret); - - ath10k_pci_free_irq(ar); ath10k_pci_kill_tasklet(ar); - - ret = ath10k_pci_request_early_irq(ar); - if (ret) - ath10k_warn("failed to re-enable early irq: %d\n", ret); - - /* At this point, asynchronous threads are stopped, the target should - * not DMA nor interrupt. We process the leftovers and then free - * everything else up. */ - ath10k_pci_buffer_cleanup(ar); +} + +static void ath10k_pci_hif_stop(struct ath10k *ar) +{ + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n"); - /* Make the sure the device won't access any structures on the host by - * resetting it. The device was fed with PCI CE ringbuffer - * configuration during init. If ringbuffers are freed and the device - * were to access them this could lead to memory corruption on the - * host. */ + /* Most likely the device has HTT Rx ring configured. The only way to + * prevent the device from accessing (and possible corrupting) host + * memory is to reset the chip now. + * + * There's also no known way of masking MSI interrupts on the device. + * For ranged MSI the CE-related interrupts can be masked. However + * regardless how many MSI interrupts are assigned the first one + * is always used for firmware indications (crashes) and cannot be + * masked. To prevent the device from asserting the interrupt reset it + * before proceeding with cleanup. + */ ath10k_pci_warm_reset(ar); - ar_pci->started = 0; + ath10k_pci_irq_disable(ar); + ath10k_pci_flush(ar); } static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, @@ -1360,7 +1317,7 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, xfer.wait_for_resp = true; xfer.resp_len = 0; - ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr); + ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr); } ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0); @@ -1418,6 +1375,7 @@ static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state) static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state) { + struct ath10k *ar = ce_state->ar; struct bmi_xfer *xfer; u32 ce_data; unsigned int nbytes; @@ -1429,7 +1387,7 @@ static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state) return; if (!xfer->wait_for_resp) { - ath10k_warn("unexpected: BMI data received; ignoring\n"); + ath10k_warn(ar, "unexpected: BMI data received; ignoring\n"); return; } @@ -1456,130 +1414,18 @@ static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe, return -ETIMEDOUT; } -/* - * Map from service/endpoint to Copy Engine. - * This table is derived from the CE_PCI TABLE, above. - * It is passed to the Target at startup for use by firmware. - */ -static const struct service_to_pipe target_service_to_ce_map_wlan[] = { - { - ATH10K_HTC_SVC_ID_WMI_DATA_VO, - PIPEDIR_OUT, /* out = UL = host -> target */ - 3, - }, - { - ATH10K_HTC_SVC_ID_WMI_DATA_VO, - PIPEDIR_IN, /* in = DL = target -> host */ - 2, - }, - { - ATH10K_HTC_SVC_ID_WMI_DATA_BK, - PIPEDIR_OUT, /* out = UL = host -> target */ - 3, - }, - { - ATH10K_HTC_SVC_ID_WMI_DATA_BK, - PIPEDIR_IN, /* in = DL = target -> host */ - 2, - }, - { - ATH10K_HTC_SVC_ID_WMI_DATA_BE, - PIPEDIR_OUT, /* out = UL = host -> target */ - 3, - }, - { - ATH10K_HTC_SVC_ID_WMI_DATA_BE, - PIPEDIR_IN, /* in = DL = target -> host */ - 2, - }, - { - ATH10K_HTC_SVC_ID_WMI_DATA_VI, - PIPEDIR_OUT, /* out = UL = host -> target */ - 3, - }, - { - ATH10K_HTC_SVC_ID_WMI_DATA_VI, - PIPEDIR_IN, /* in = DL = target -> host */ - 2, - }, - { - ATH10K_HTC_SVC_ID_WMI_CONTROL, - PIPEDIR_OUT, /* out = UL = host -> target */ - 3, - }, - { - ATH10K_HTC_SVC_ID_WMI_CONTROL, - PIPEDIR_IN, /* in = DL = target -> host */ - 2, - }, - { - ATH10K_HTC_SVC_ID_RSVD_CTRL, - PIPEDIR_OUT, /* out = UL = host -> target */ - 0, /* could be moved to 3 (share with WMI) */ - }, - { - ATH10K_HTC_SVC_ID_RSVD_CTRL, - PIPEDIR_IN, /* in = DL = target -> host */ - 1, - }, - { - ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */ - PIPEDIR_OUT, /* out = UL = host -> target */ - 0, - }, - { - ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */ - PIPEDIR_IN, /* in = DL = target -> host */ - 1, - }, - { - ATH10K_HTC_SVC_ID_HTT_DATA_MSG, - PIPEDIR_OUT, /* out = UL = host -> target */ - 4, - }, - { - ATH10K_HTC_SVC_ID_HTT_DATA_MSG, - PIPEDIR_IN, /* in = DL = target -> host */ - 1, - }, - - /* (Additions here) */ - - { /* Must be last */ - 0, - 0, - 0, - }, -}; - /* * Send an interrupt to the device to wake up the Target CPU * so it has an opportunity to notice any changed state. */ static int ath10k_pci_wake_target_cpu(struct ath10k *ar) -{ - int ret; - u32 core_ctrl; - - ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS | - CORE_CTRL_ADDRESS, - &core_ctrl); - if (ret) { - ath10k_warn("failed to read core_ctrl: %d\n", ret); - return ret; - } - - /* A_INUM_FIRMWARE interrupt to Target CPU */ - core_ctrl |= CORE_CTRL_CPU_INTR_MASK; +{ + u32 addr, val; - ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS | - CORE_CTRL_ADDRESS, - core_ctrl); - if (ret) { - ath10k_warn("failed to set target CPU interrupt mask: %d\n", - ret); - return ret; - } + addr = SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS; + val = ath10k_pci_read32(ar, addr); + val |= CORE_CTRL_CPU_INTR_MASK; + ath10k_pci_write32(ar, addr, val); return 0; } @@ -1602,92 +1448,92 @@ static int ath10k_pci_init_config(struct ath10k *ar) host_interest_item_address(HI_ITEM(hi_interconnect_state)); /* Supply Target-side CE configuration */ - ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr, - &pcie_state_targ_addr); + ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr, + &pcie_state_targ_addr); if (ret != 0) { - ath10k_err("Failed to get pcie state addr: %d\n", ret); + ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret); return ret; } if (pcie_state_targ_addr == 0) { ret = -EIO; - ath10k_err("Invalid pcie state addr\n"); + ath10k_err(ar, "Invalid pcie state addr\n"); return ret; } - ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr + + ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr + offsetof(struct pcie_state, - pipe_cfg_addr), - &pipe_cfg_targ_addr); + pipe_cfg_addr)), + &pipe_cfg_targ_addr); if (ret != 0) { - ath10k_err("Failed to get pipe cfg addr: %d\n", ret); + ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret); return ret; } if (pipe_cfg_targ_addr == 0) { ret = -EIO; - ath10k_err("Invalid pipe cfg addr\n"); + ath10k_err(ar, "Invalid pipe cfg addr\n"); return ret; } ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr, - target_ce_config_wlan, - sizeof(target_ce_config_wlan)); + target_ce_config_wlan, + sizeof(target_ce_config_wlan)); if (ret != 0) { - ath10k_err("Failed to write pipe cfg: %d\n", ret); + ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret); return ret; } - ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr + + ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr + offsetof(struct pcie_state, - svc_to_pipe_map), - &svc_to_pipe_map); + svc_to_pipe_map)), + &svc_to_pipe_map); if (ret != 0) { - ath10k_err("Failed to get svc/pipe map: %d\n", ret); + ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret); return ret; } if (svc_to_pipe_map == 0) { ret = -EIO; - ath10k_err("Invalid svc_to_pipe map\n"); + ath10k_err(ar, "Invalid svc_to_pipe map\n"); return ret; } ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map, - target_service_to_ce_map_wlan, - sizeof(target_service_to_ce_map_wlan)); + target_service_to_ce_map_wlan, + sizeof(target_service_to_ce_map_wlan)); if (ret != 0) { - ath10k_err("Failed to write svc/pipe map: %d\n", ret); + ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret); return ret; } - ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr + + ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr + offsetof(struct pcie_state, - config_flags), - &pcie_config_flags); + config_flags)), + &pcie_config_flags); if (ret != 0) { - ath10k_err("Failed to get pcie config_flags: %d\n", ret); + ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret); return ret; } pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1; - ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr + - offsetof(struct pcie_state, config_flags), - &pcie_config_flags, - sizeof(pcie_config_flags)); + ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr + + offsetof(struct pcie_state, + config_flags)), + pcie_config_flags); if (ret != 0) { - ath10k_err("Failed to write pcie config_flags: %d\n", ret); + ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret); return ret; } /* configure early allocation */ ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc)); - ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value); + ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value); if (ret != 0) { - ath10k_err("Faile to get early alloc val: %d\n", ret); + ath10k_err(ar, "Faile to get early alloc val: %d\n", ret); return ret; } @@ -1697,26 +1543,26 @@ static int ath10k_pci_init_config(struct ath10k *ar) ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) & HI_EARLY_ALLOC_IRAM_BANKS_MASK); - ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value); + ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value); if (ret != 0) { - ath10k_err("Failed to set early alloc val: %d\n", ret); + ath10k_err(ar, "Failed to set early alloc val: %d\n", ret); return ret; } /* Tell Target to proceed with initialization */ flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2)); - ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value); + ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value); if (ret != 0) { - ath10k_err("Failed to get option val: %d\n", ret); + ath10k_err(ar, "Failed to get option val: %d\n", ret); return ret; } flag2_value |= HI_OPTION_EARLY_CFG_DONE; - ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value); + ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value); if (ret != 0) { - ath10k_err("Failed to set option val: %d\n", ret); + ath10k_err(ar, "Failed to set option val: %d\n", ret); return ret; } @@ -1730,7 +1576,7 @@ static int ath10k_pci_alloc_ce(struct ath10k *ar) for (i = 0; i < CE_COUNT; i++) { ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]); if (ret) { - ath10k_err("failed to allocate copy engine pipe %d: %d\n", + ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n", i, ret); return ret; } @@ -1761,9 +1607,11 @@ static int ath10k_pci_ce_init(struct ath10k *ar) pipe_info->hif_ce_state = ar; attr = &host_ce_config_wlan[pipe_num]; - ret = ath10k_ce_init_pipe(ar, pipe_num, attr); + ret = ath10k_ce_init_pipe(ar, pipe_num, attr, + ath10k_pci_ce_send_done, + ath10k_pci_ce_recv_data); if (ret) { - ath10k_err("failed to initialize copy engine pipe %d: %d\n", + ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n", pipe_num, ret); return ret; } @@ -1777,38 +1625,25 @@ static int ath10k_pci_ce_init(struct ath10k *ar) continue; } - pipe_info->buf_sz = (size_t) (attr->src_sz_max); + pipe_info->buf_sz = (size_t)(attr->src_sz_max); } return 0; } -static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar) +static bool ath10k_pci_has_fw_crashed(struct ath10k *ar) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - u32 fw_indicator; - - ath10k_pci_wake(ar); - - fw_indicator = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); - - if (fw_indicator & FW_IND_EVENT_PENDING) { - /* ACK: clear Target-side pending event */ - ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, - fw_indicator & ~FW_IND_EVENT_PENDING); + return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) & + FW_IND_EVENT_PENDING; +} - if (ar_pci->started) { - ath10k_pci_hif_dump_area(ar); - } else { - /* - * Probable Target failure before we're prepared - * to handle it. Generally unexpected. - */ - ath10k_warn("early firmware event indicated\n"); - } - } +static void ath10k_pci_fw_crashed_clear(struct ath10k *ar) +{ + u32 val; - ath10k_pci_sleep(ar); + val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); + val &= ~FW_IND_EVENT_PENDING; + ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val); } /* this function effectively clears target memory controller assert line */ @@ -1833,25 +1668,19 @@ static void ath10k_pci_warm_reset_si0(struct ath10k *ar) static int ath10k_pci_warm_reset(struct ath10k *ar) { - int ret = 0; u32 val; - ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset\n"); - - ret = ath10k_do_pci_wake(ar); - if (ret) { - ath10k_err("failed to wake up target: %d\n", ret); - return ret; - } + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n"); /* debug */ val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CAUSE_ADDRESS); - ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", + val); val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CPU_INTR_ADDRESS); - ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n", + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n", val); /* disable pending irqs */ @@ -1894,11 +1723,12 @@ static int ath10k_pci_warm_reset(struct ath10k *ar) /* debug */ val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CAUSE_ADDRESS); - ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", + val); val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CPU_INTR_ADDRESS); - ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n", + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n", val); /* CPU warm reset */ @@ -1909,20 +1739,18 @@ static int ath10k_pci_warm_reset(struct ath10k *ar) val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS); - ath10k_dbg(ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n", val); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n", + val); msleep(100); - ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset complete\n"); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n"); - ath10k_do_pci_sleep(ar); - return ret; + return 0; } static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - const char *irq_mode; int ret; /* @@ -1941,80 +1769,39 @@ static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset) ret = ath10k_pci_warm_reset(ar); if (ret) { - ath10k_err("failed to reset target: %d\n", ret); + ath10k_err(ar, "failed to reset target: %d\n", ret); goto err; } - if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) - /* Force AWAKE forever */ - ath10k_do_pci_wake(ar); - ret = ath10k_pci_ce_init(ar); if (ret) { - ath10k_err("failed to initialize CE: %d\n", ret); - goto err_ps; - } - - ret = ath10k_ce_disable_interrupts(ar); - if (ret) { - ath10k_err("failed to disable CE interrupts: %d\n", ret); - goto err_ce; - } - - ret = ath10k_pci_init_irq(ar); - if (ret) { - ath10k_err("failed to init irqs: %d\n", ret); - goto err_ce; - } - - ret = ath10k_pci_request_early_irq(ar); - if (ret) { - ath10k_err("failed to request early irq: %d\n", ret); - goto err_deinit_irq; + ath10k_err(ar, "failed to initialize CE: %d\n", ret); + goto err; } ret = ath10k_pci_wait_for_target_init(ar); if (ret) { - ath10k_err("failed to wait for target to init: %d\n", ret); - goto err_free_early_irq; + ath10k_err(ar, "failed to wait for target to init: %d\n", ret); + goto err_ce; } ret = ath10k_pci_init_config(ar); if (ret) { - ath10k_err("failed to setup init config: %d\n", ret); - goto err_free_early_irq; + ath10k_err(ar, "failed to setup init config: %d\n", ret); + goto err_ce; } ret = ath10k_pci_wake_target_cpu(ar); if (ret) { - ath10k_err("could not wake up target CPU: %d\n", ret); - goto err_free_early_irq; + ath10k_err(ar, "could not wake up target CPU: %d\n", ret); + goto err_ce; } - if (ar_pci->num_msi_intrs > 1) - irq_mode = "MSI-X"; - else if (ar_pci->num_msi_intrs == 1) - irq_mode = "MSI"; - else - irq_mode = "legacy"; - - if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags)) - ath10k_info("pci irq %s irq_mode %d reset_mode %d\n", - irq_mode, ath10k_pci_irq_mode, - ath10k_pci_reset_mode); - return 0; -err_free_early_irq: - ath10k_pci_free_early_irq(ar); -err_deinit_irq: - ath10k_pci_deinit_irq(ar); err_ce: ath10k_pci_ce_deinit(ar); ath10k_pci_warm_reset(ar); -err_ps: - if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) - ath10k_do_pci_sleep(ar); err: return ret; } @@ -2034,7 +1821,7 @@ static int ath10k_pci_hif_power_up_warm(struct ath10k *ar) if (ret == 0) break; - ath10k_warn("failed to warm reset (attempt %d out of %d): %d\n", + ath10k_warn(ar, "failed to warm reset (attempt %d out of %d): %d\n", i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS, ret); } @@ -2045,7 +1832,7 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar) { int ret; - ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power up\n"); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n"); /* * Hardware CUS232 version 2 has some issues with cold reset and the @@ -2057,17 +1844,17 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar) */ ret = ath10k_pci_hif_power_up_warm(ar); if (ret) { - ath10k_warn("failed to power up target using warm reset: %d\n", + ath10k_warn(ar, "failed to power up target using warm reset: %d\n", ret); if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) return ret; - ath10k_warn("trying cold reset\n"); + ath10k_warn(ar, "trying cold reset\n"); ret = __ath10k_pci_hif_power_up(ar, true); if (ret) { - ath10k_err("failed to power up target using cold reset too (%d)\n", + ath10k_err(ar, "failed to power up target using cold reset too (%d)\n", ret); return ret; } @@ -2078,18 +1865,9 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar) static void ath10k_pci_hif_power_down(struct ath10k *ar) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n"); - ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power down\n"); - - ath10k_pci_free_early_irq(ar); - ath10k_pci_kill_tasklet(ar); - ath10k_pci_deinit_irq(ar); - ath10k_pci_ce_deinit(ar); ath10k_pci_warm_reset(ar); - - if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) - ath10k_do_pci_sleep(ar); } #ifdef CONFIG_PM @@ -2171,7 +1949,13 @@ static void ath10k_msi_err_tasklet(unsigned long data) { struct ath10k *ar = (struct ath10k *)data; - ath10k_pci_fw_interrupt_handler(ar); + if (!ath10k_pci_has_fw_crashed(ar)) { + ath10k_warn(ar, "received unsolicited fw crash interrupt\n"); + return; + } + + ath10k_pci_fw_crashed_clear(ar); + ath10k_pci_fw_crashed_dump(ar); } /* @@ -2185,7 +1969,8 @@ static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg) int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL; if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) { - ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id); + ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq, + ce_id); return IRQ_HANDLED; } @@ -2232,36 +2017,17 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg) return IRQ_HANDLED; } -static void ath10k_pci_early_irq_tasklet(unsigned long data) +static void ath10k_pci_tasklet(unsigned long data) { struct ath10k *ar = (struct ath10k *)data; - u32 fw_ind; - int ret; + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - ret = ath10k_pci_wake(ar); - if (ret) { - ath10k_warn("failed to wake target in early irq tasklet: %d\n", - ret); + if (ath10k_pci_has_fw_crashed(ar)) { + ath10k_pci_fw_crashed_clear(ar); + ath10k_pci_fw_crashed_dump(ar); return; } - fw_ind = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); - if (fw_ind & FW_IND_EVENT_PENDING) { - ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, - fw_ind & ~FW_IND_EVENT_PENDING); - ath10k_pci_hif_dump_area(ar); - } - - ath10k_pci_sleep(ar); - ath10k_pci_enable_legacy_irq(ar); -} - -static void ath10k_pci_tasklet(unsigned long data) -{ - struct ath10k *ar = (struct ath10k *)data; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - - ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */ ath10k_ce_per_engine_service_any(ar); /* Re-enable legacy irq that was disabled in the irq handler */ @@ -2278,7 +2044,7 @@ static int ath10k_pci_request_irq_msix(struct ath10k *ar) ath10k_pci_msi_fw_handler, IRQF_SHARED, "ath10k_pci", ar); if (ret) { - ath10k_warn("failed to request MSI-X fw irq %d: %d\n", + ath10k_warn(ar, "failed to request MSI-X fw irq %d: %d\n", ar_pci->pdev->irq + MSI_ASSIGN_FW, ret); return ret; } @@ -2288,7 +2054,7 @@ static int ath10k_pci_request_irq_msix(struct ath10k *ar) ath10k_pci_per_engine_handler, IRQF_SHARED, "ath10k_pci", ar); if (ret) { - ath10k_warn("failed to request MSI-X ce irq %d: %d\n", + ath10k_warn(ar, "failed to request MSI-X ce irq %d: %d\n", ar_pci->pdev->irq + i, ret); for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--) @@ -2311,7 +2077,7 @@ static int ath10k_pci_request_irq_msi(struct ath10k *ar) ath10k_pci_interrupt_handler, IRQF_SHARED, "ath10k_pci", ar); if (ret) { - ath10k_warn("failed to request MSI irq %d: %d\n", + ath10k_warn(ar, "failed to request MSI irq %d: %d\n", ar_pci->pdev->irq, ret); return ret; } @@ -2328,7 +2094,7 @@ static int ath10k_pci_request_irq_legacy(struct ath10k *ar) ath10k_pci_interrupt_handler, IRQF_SHARED, "ath10k_pci", ar); if (ret) { - ath10k_warn("failed to request legacy irq %d: %d\n", + ath10k_warn(ar, "failed to request legacy irq %d: %d\n", ar_pci->pdev->irq, ret); return ret; } @@ -2349,7 +2115,7 @@ static int ath10k_pci_request_irq(struct ath10k *ar) return ath10k_pci_request_irq_msix(ar); } - ath10k_warn("unknown irq configuration upon request\n"); + ath10k_warn(ar, "unknown irq configuration upon request\n"); return -EINVAL; } @@ -2372,8 +2138,6 @@ static void ath10k_pci_init_irq_tasklets(struct ath10k *ar) tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar); tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet, (unsigned long)ar); - tasklet_init(&ar_pci->early_irq_tasklet, ath10k_pci_early_irq_tasklet, - (unsigned long)ar); for (i = 0; i < CE_COUNT; i++) { ar_pci->pipe_info[i].ar_pci = ar_pci; @@ -2385,21 +2149,19 @@ static void ath10k_pci_init_irq_tasklets(struct ath10k *ar) static int ath10k_pci_init_irq(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - bool msix_supported = test_bit(ATH10K_PCI_FEATURE_MSI_X, - ar_pci->features); int ret; ath10k_pci_init_irq_tasklets(ar); - if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO && - !test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags)) - ath10k_info("limiting irq mode to: %d\n", ath10k_pci_irq_mode); + if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO) + ath10k_info(ar, "limiting irq mode to: %d\n", + ath10k_pci_irq_mode); /* Try MSI-X */ - if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO && msix_supported) { + if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO) { ar_pci->num_msi_intrs = MSI_NUM_REQUEST; ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs, - ar_pci->num_msi_intrs); + ar_pci->num_msi_intrs); if (ret > 0) return 0; @@ -2426,34 +2188,16 @@ static int ath10k_pci_init_irq(struct ath10k *ar) * synchronization checking. */ ar_pci->num_msi_intrs = 0; - ret = ath10k_pci_wake(ar); - if (ret) { - ath10k_warn("failed to wake target: %d\n", ret); - return ret; - } - ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); - ath10k_pci_sleep(ar); return 0; } -static int ath10k_pci_deinit_irq_legacy(struct ath10k *ar) +static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar) { - int ret; - - ret = ath10k_pci_wake(ar); - if (ret) { - ath10k_warn("failed to wake target: %d\n", ret); - return ret; - } - ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, 0); - ath10k_pci_sleep(ar); - - return 0; } static int ath10k_pci_deinit_irq(struct ath10k *ar) @@ -2462,7 +2206,8 @@ static int ath10k_pci_deinit_irq(struct ath10k *ar) switch (ar_pci->num_msi_intrs) { case 0: - return ath10k_pci_deinit_irq_legacy(ar); + ath10k_pci_deinit_irq_legacy(ar); + return 0; case 1: /* fall-through */ case MSI_NUM_REQUEST: @@ -2472,7 +2217,7 @@ static int ath10k_pci_deinit_irq(struct ath10k *ar) pci_disable_msi(ar_pci->pdev); } - ath10k_warn("unknown irq configuration upon deinit\n"); + ath10k_warn(ar, "unknown irq configuration upon deinit\n"); return -EINVAL; } @@ -2480,23 +2225,17 @@ static int ath10k_pci_wait_for_target_init(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); unsigned long timeout; - int ret; u32 val; - ath10k_dbg(ATH10K_DBG_BOOT, "boot waiting target to initialise\n"); - - ret = ath10k_pci_wake(ar); - if (ret) { - ath10k_err("failed to wake up target for init: %d\n", ret); - return ret; - } + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n"); timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT); do { val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); - ath10k_dbg(ATH10K_DBG_BOOT, "boot target indicator %x\n", val); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n", + val); /* target should never return this */ if (val == 0xffffffff) @@ -2511,55 +2250,42 @@ static int ath10k_pci_wait_for_target_init(struct ath10k *ar) if (ar_pci->num_msi_intrs == 0) /* Fix potential race by repeating CORE_BASE writes */ - ath10k_pci_soc_write32(ar, PCIE_INTR_ENABLE_ADDRESS, - PCIE_INTR_FIRMWARE_MASK | - PCIE_INTR_CE_MASK_ALL); + ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + + PCIE_INTR_ENABLE_ADDRESS, + PCIE_INTR_FIRMWARE_MASK | + PCIE_INTR_CE_MASK_ALL); mdelay(10); } while (time_before(jiffies, timeout)); if (val == 0xffffffff) { - ath10k_err("failed to read device register, device is gone\n"); - ret = -EIO; - goto out; + ath10k_err(ar, "failed to read device register, device is gone\n"); + return -EIO; } if (val & FW_IND_EVENT_PENDING) { - ath10k_warn("device has crashed during init\n"); - ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, - val & ~FW_IND_EVENT_PENDING); - ath10k_pci_hif_dump_area(ar); - ret = -ECOMM; - goto out; + ath10k_warn(ar, "device has crashed during init\n"); + ath10k_pci_fw_crashed_clear(ar); + ath10k_pci_fw_crashed_dump(ar); + return -ECOMM; } if (!(val & FW_IND_INITIALIZED)) { - ath10k_err("failed to receive initialized event from target: %08x\n", + ath10k_err(ar, "failed to receive initialized event from target: %08x\n", val); - ret = -ETIMEDOUT; - goto out; + return -ETIMEDOUT; } - ath10k_dbg(ATH10K_DBG_BOOT, "boot target initialised\n"); - -out: - ath10k_pci_sleep(ar); - return ret; + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n"); + return 0; } static int ath10k_pci_cold_reset(struct ath10k *ar) { - int i, ret; + int i; u32 val; - ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset\n"); - - ret = ath10k_do_pci_wake(ar); - if (ret) { - ath10k_err("failed to wake up target: %d\n", - ret); - return ret; - } + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n"); /* Put Target, including PCIe, into RESET. */ val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS); @@ -2584,169 +2310,199 @@ static int ath10k_pci_cold_reset(struct ath10k *ar) msleep(1); } - ath10k_do_pci_sleep(ar); - - ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset complete\n"); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n"); return 0; } -static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci) -{ - int i; - - for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) { - if (!test_bit(i, ar_pci->features)) - continue; - - switch (i) { - case ATH10K_PCI_FEATURE_MSI_X: - ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n"); - break; - case ATH10K_PCI_FEATURE_SOC_POWER_SAVE: - ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n"); - break; - } - } -} - -static int ath10k_pci_probe(struct pci_dev *pdev, - const struct pci_device_id *pci_dev) +static int ath10k_pci_claim(struct ath10k *ar) { - void __iomem *mem; - int ret = 0; - struct ath10k *ar; - struct ath10k_pci *ar_pci; - u32 lcr_val, chip_id; - - ath10k_dbg(ATH10K_DBG_PCI, "pci probe\n"); - - ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL); - if (ar_pci == NULL) - return -ENOMEM; - - ar_pci->pdev = pdev; - ar_pci->dev = &pdev->dev; - - switch (pci_dev->device) { - case QCA988X_2_0_DEVICE_ID: - set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features); - break; - default: - ret = -ENODEV; - ath10k_err("Unknown device ID: %d\n", pci_dev->device); - goto err_ar_pci; - } - - if (ath10k_pci_target_ps) - set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features); - - ath10k_pci_dump_features(ar_pci); - - ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops); - if (!ar) { - ath10k_err("failed to create driver core\n"); - ret = -EINVAL; - goto err_ar_pci; - } - - ar_pci->ar = ar; - atomic_set(&ar_pci->keep_awake_count, 0); + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct pci_dev *pdev = ar_pci->pdev; + u32 lcr_val; + int ret; pci_set_drvdata(pdev, ar); ret = pci_enable_device(pdev); if (ret) { - ath10k_err("failed to enable PCI device: %d\n", ret); - goto err_ar; + ath10k_err(ar, "failed to enable pci device: %d\n", ret); + return ret; } - /* Request MMIO resources */ ret = pci_request_region(pdev, BAR_NUM, "ath"); if (ret) { - ath10k_err("failed to request MMIO region: %d\n", ret); + ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM, + ret); goto err_device; } - /* - * Target structures have a limit of 32 bit DMA pointers. - * DMA pointers can be wider than 32 bits by default on some systems. - */ + /* Target expects 32 bit DMA. Enforce it. */ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (ret) { - ath10k_err("failed to set DMA mask to 32-bit: %d\n", ret); + ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret); goto err_region; } ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (ret) { - ath10k_err("failed to set consistent DMA mask to 32-bit\n"); + ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n", + ret); goto err_region; } - /* Set bus master bit in PCI_COMMAND to enable DMA */ pci_set_master(pdev); - /* - * Temporary FIX: disable ASPM - * Will be removed after the OTP is programmed - */ + /* Workaround: Disable ASPM */ pci_read_config_dword(pdev, 0x80, &lcr_val); pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00)); /* Arrange for access to Target SoC registers. */ - mem = pci_iomap(pdev, BAR_NUM, 0); - if (!mem) { - ath10k_err("failed to perform IOMAP for BAR%d\n", BAR_NUM); + ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0); + if (!ar_pci->mem) { + ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM); ret = -EIO; goto err_master; } - ar_pci->mem = mem; + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem); + return 0; + +err_master: + pci_clear_master(pdev); + +err_region: + pci_release_region(pdev, BAR_NUM); + +err_device: + pci_disable_device(pdev); + + return ret; +} + +static void ath10k_pci_release(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct pci_dev *pdev = ar_pci->pdev; + + pci_iounmap(pdev, ar_pci->mem); + pci_release_region(pdev, BAR_NUM); + pci_clear_master(pdev); + pci_disable_device(pdev); +} + +static int ath10k_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *pci_dev) +{ + int ret = 0; + struct ath10k *ar; + struct ath10k_pci *ar_pci; + u32 chip_id; + + ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, + &ath10k_pci_hif_ops); + if (!ar) { + dev_err(&pdev->dev, "failed to allocate core\n"); + return -ENOMEM; + } + + ath10k_dbg(ar, ATH10K_DBG_PCI, "pci probe\n"); + + ar_pci = ath10k_pci_priv(ar); + ar_pci->pdev = pdev; + ar_pci->dev = &pdev->dev; + ar_pci->ar = ar; spin_lock_init(&ar_pci->ce_lock); + setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, + (unsigned long)ar); - ret = ath10k_do_pci_wake(ar); + ret = ath10k_pci_claim(ar); if (ret) { - ath10k_err("Failed to get chip id: %d\n", ret); - goto err_iomap; + ath10k_err(ar, "failed to claim device: %d\n", ret); + goto err_core_destroy; } - chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS); + ret = ath10k_pci_wake(ar); + if (ret) { + ath10k_err(ar, "failed to wake up: %d\n", ret); + goto err_release; + } - ath10k_do_pci_sleep(ar); + chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS); + if (chip_id == 0xffffffff) { + ath10k_err(ar, "failed to get chip id\n"); + goto err_sleep; + } ret = ath10k_pci_alloc_ce(ar); if (ret) { - ath10k_err("failed to allocate copy engine pipes: %d\n", ret); - goto err_iomap; + ath10k_err(ar, "failed to allocate copy engine pipes: %d\n", + ret); + goto err_sleep; } - ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem); + ath10k_pci_ce_deinit(ar); - ret = ath10k_core_register(ar, chip_id); + ret = ath10k_ce_disable_interrupts(ar); + if (ret) { + ath10k_err(ar, "failed to disable copy engine interrupts: %d\n", + ret); + goto err_free_ce; + } + + /* Workaround: There's no known way to mask all possible interrupts via + * device CSR. The only way to make sure device doesn't assert + * interrupts is to reset it. Interrupts are then disabled on host + * after handlers are registered. + */ + ath10k_pci_warm_reset(ar); + + ret = ath10k_pci_init_irq(ar); if (ret) { - ath10k_err("failed to register driver core: %d\n", ret); + ath10k_err(ar, "failed to init irqs: %d\n", ret); goto err_free_ce; } + ath10k_info(ar, "pci irq %s interrupts %d irq_mode %d reset_mode %d\n", + ath10k_pci_get_irq_method(ar), ar_pci->num_msi_intrs, + ath10k_pci_irq_mode, ath10k_pci_reset_mode); + + ret = ath10k_pci_request_irq(ar); + if (ret) { + ath10k_warn(ar, "failed to request irqs: %d\n", ret); + goto err_deinit_irq; + } + + /* This shouldn't race as the device has been reset above. */ + ath10k_pci_irq_disable(ar); + + ret = ath10k_core_register(ar, chip_id); + if (ret) { + ath10k_err(ar, "failed to register driver core: %d\n", ret); + goto err_free_irq; + } + return 0; +err_free_irq: + ath10k_pci_free_irq(ar); + ath10k_pci_kill_tasklet(ar); + +err_deinit_irq: + ath10k_pci_deinit_irq(ar); + err_free_ce: ath10k_pci_free_ce(ar); -err_iomap: - pci_iounmap(pdev, mem); -err_master: - pci_clear_master(pdev); -err_region: - pci_release_region(pdev, BAR_NUM); -err_device: - pci_disable_device(pdev); -err_ar: + +err_sleep: + ath10k_pci_sleep(ar); + +err_release: + ath10k_pci_release(ar); + +err_core_destroy: ath10k_core_destroy(ar); -err_ar_pci: - /* call HIF PCI free here */ - kfree(ar_pci); return ret; } @@ -2756,7 +2512,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev) struct ath10k *ar = pci_get_drvdata(pdev); struct ath10k_pci *ar_pci; - ath10k_dbg(ATH10K_DBG_PCI, "pci remove\n"); + ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n"); if (!ar) return; @@ -2767,15 +2523,14 @@ static void ath10k_pci_remove(struct pci_dev *pdev) return; ath10k_core_unregister(ar); + ath10k_pci_free_irq(ar); + ath10k_pci_kill_tasklet(ar); + ath10k_pci_deinit_irq(ar); + ath10k_pci_ce_deinit(ar); ath10k_pci_free_ce(ar); - - pci_iounmap(pdev, ar_pci->mem); - pci_release_region(pdev, BAR_NUM); - pci_clear_master(pdev); - pci_disable_device(pdev); - + ath10k_pci_sleep(ar); + ath10k_pci_release(ar); ath10k_core_destroy(ar); - kfree(ar_pci); } MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table); @@ -2793,7 +2548,8 @@ static int __init ath10k_pci_init(void) ret = pci_register_driver(&ath10k_pci_driver); if (ret) - ath10k_err("failed to register PCI driver: %d\n", ret); + printk(KERN_ERR "failed to register ath10k pci driver: %d\n", + ret); return ret; } @@ -2809,5 +2565,5 @@ module_exit(ath10k_pci_exit); MODULE_AUTHOR("Qualcomm Atheros"); MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices"); MODULE_LICENSE("Dual BSD/GPL"); -MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_2_FILE); +MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_3_FILE); MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE); diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h index 940129209990337a1a99f79599014abc48b6b605..cf36511c7f4dcf41db9610b5cd9487cefcfc59ad 100644 --- a/drivers/net/wireless/ath/ath10k/pci.h +++ b/drivers/net/wireless/ath/ath10k/pci.h @@ -23,9 +23,6 @@ #include "hw.h" #include "ce.h" -/* FW dump area */ -#define REG_DUMP_COUNT_QCA988X 60 - /* * maximum number of bytes that can be handled atomically by DiagRead/DiagWrite */ @@ -103,12 +100,12 @@ struct pcie_state { * NOTE: Structure is shared between Host software and Target firmware! */ struct ce_pipe_config { - u32 pipenum; - u32 pipedir; - u32 nentries; - u32 nbytes_max; - u32 flags; - u32 reserved; + __le32 pipenum; + __le32 pipedir; + __le32 nentries; + __le32 nbytes_max; + __le32 flags; + __le32 reserved; }; /* @@ -130,17 +127,9 @@ struct ce_pipe_config { /* Establish a mapping between a service/direction and a pipe. */ struct service_to_pipe { - u32 service_id; - u32 pipedir; - u32 pipenum; -}; - -enum ath10k_pci_features { - ATH10K_PCI_FEATURE_MSI_X = 0, - ATH10K_PCI_FEATURE_SOC_POWER_SAVE = 1, - - /* keep last */ - ATH10K_PCI_FEATURE_COUNT + __le32 service_id; + __le32 pipedir; + __le32 pipenum; }; /* Per-pipe state. */ @@ -169,8 +158,6 @@ struct ath10k_pci { struct ath10k *ar; void __iomem *mem; - DECLARE_BITMAP(features, ATH10K_PCI_FEATURE_COUNT); - /* * Number of MSI interrupts granted, 0 --> using legacy PCI line * interrupts. @@ -179,12 +166,6 @@ struct ath10k_pci { struct tasklet_struct intr_tq; struct tasklet_struct msi_fw_err; - struct tasklet_struct early_irq_tasklet; - - int started; - - atomic_t keep_awake_count; - bool verified_awake; struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX]; @@ -198,27 +179,15 @@ struct ath10k_pci { /* Map CE id to ce_state */ struct ath10k_ce_pipe ce_states[CE_COUNT_MAX]; + struct timer_list rx_post_retry; }; static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar) { - return ar->hif.priv; -} - -static inline u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr) -{ - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - - return ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr); -} - -static inline void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val) -{ - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - - iowrite32(val, ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr); + return (struct ath10k_pci *)ar->drv_priv; } +#define ATH10K_PCI_RX_POST_RETRY_MS 50 #define ATH_PCI_RESET_WAIT_MAX 10 /* ms */ #define PCIE_WAKE_TIMEOUT 5000 /* 5ms */ @@ -242,35 +211,17 @@ static inline void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val) /* Wait up to this many Ms for a Diagnostic Access CE operation to complete */ #define DIAG_ACCESS_CE_TIMEOUT_MS 10 -/* - * This API allows the Host to access Target registers directly - * and relatively efficiently over PCIe. - * This allows the Host to avoid extra overhead associated with - * sending a message to firmware and waiting for a response message - * from firmware, as is done on other interconnects. - * - * Yet there is some complexity with direct accesses because the - * Target's power state is not known a priori. The Host must issue - * special PCIe reads/writes in order to explicitly wake the Target - * and to verify that it is awake and will remain awake. - * - * Usage: +/* Target exposes its registers for direct access. However before host can + * access them it needs to make sure the target is awake (ath10k_pci_wake, + * ath10k_pci_wake_wait, ath10k_pci_is_awake). Once target is awake it won't go + * to sleep unless host tells it to (ath10k_pci_sleep). * - * Use ath10k_pci_read32 and ath10k_pci_write32 to access Target space. - * These calls must be bracketed by ath10k_pci_wake and - * ath10k_pci_sleep. A single BEGIN/END pair is adequate for - * multiple READ/WRITE operations. + * If host tries to access target registers without waking it up it can + * scribble over host memory. * - * Use ath10k_pci_wake to put the Target in a state in - * which it is legal for the Host to directly access it. This - * may involve waking the Target from a low power state, which - * may take up to 2Ms! - * - * Use ath10k_pci_sleep to tell the Target that as far as - * this code path is concerned, it no longer needs to remain - * directly accessible. BEGIN/END is under a reference counter; - * multiple code paths may issue BEGIN/END on a single targid. + * If target is asleep waking it up may take up to even 2ms. */ + static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value) { @@ -296,25 +247,18 @@ static inline void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val) ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val); } -int ath10k_do_pci_wake(struct ath10k *ar); -void ath10k_do_pci_sleep(struct ath10k *ar); - -static inline int ath10k_pci_wake(struct ath10k *ar) +static inline u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) - return ath10k_do_pci_wake(ar); - - return 0; + return ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr); } -static inline void ath10k_pci_sleep(struct ath10k *ar) +static inline void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) - ath10k_do_pci_sleep(ar); + iowrite32(val, ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr); } #endif /* _PCI_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h index 1c584c4b019cb7362bd611688fe57e01b3d38b02..e1ffdd57a18c8b34d7f2efb0705f65fa2533a655 100644 --- a/drivers/net/wireless/ath/ath10k/rx_desc.h +++ b/drivers/net/wireless/ath/ath10k/rx_desc.h @@ -839,7 +839,6 @@ struct rx_ppdu_start { * Reserved: HW should fill with 0, FW should ignore. */ - #define RX_PPDU_END_FLAGS_PHY_ERR (1 << 0) #define RX_PPDU_END_FLAGS_RX_LOCATION (1 << 1) #define RX_PPDU_END_FLAGS_TXBF_H_INFO (1 << 2) diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c new file mode 100644 index 0000000000000000000000000000000000000000..3e1454b74e007200ccc8c6ee4a3f1647cefceebd --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/spectral.c @@ -0,0 +1,561 @@ +/* + * Copyright (c) 2013 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include "core.h" +#include "debug.h" + +static void send_fft_sample(struct ath10k *ar, + const struct fft_sample_tlv *fft_sample_tlv) +{ + int length; + + if (!ar->spectral.rfs_chan_spec_scan) + return; + + length = __be16_to_cpu(fft_sample_tlv->length) + + sizeof(*fft_sample_tlv); + relay_write(ar->spectral.rfs_chan_spec_scan, fft_sample_tlv, length); +} + +static uint8_t get_max_exp(s8 max_index, u16 max_magnitude, size_t bin_len, + u8 *data) +{ + int dc_pos; + u8 max_exp; + + dc_pos = bin_len / 2; + + /* peak index outside of bins */ + if (dc_pos < max_index || -dc_pos >= max_index) + return 0; + + for (max_exp = 0; max_exp < 8; max_exp++) { + if (data[dc_pos + max_index] == (max_magnitude >> max_exp)) + break; + } + + /* max_exp not found */ + if (data[dc_pos + max_index] != (max_magnitude >> max_exp)) + return 0; + + return max_exp; +} + +int ath10k_spectral_process_fft(struct ath10k *ar, + struct wmi_single_phyerr_rx_event *event, + struct phyerr_fft_report *fftr, + size_t bin_len, u64 tsf) +{ + struct fft_sample_ath10k *fft_sample; + u8 buf[sizeof(*fft_sample) + SPECTRAL_ATH10K_MAX_NUM_BINS]; + u16 freq1, freq2, total_gain_db, base_pwr_db, length, peak_mag; + u32 reg0, reg1, nf_list1, nf_list2; + u8 chain_idx, *bins; + int dc_pos; + + fft_sample = (struct fft_sample_ath10k *)&buf; + + if (bin_len < 64 || bin_len > SPECTRAL_ATH10K_MAX_NUM_BINS) + return -EINVAL; + + reg0 = __le32_to_cpu(fftr->reg0); + reg1 = __le32_to_cpu(fftr->reg1); + + length = sizeof(*fft_sample) - sizeof(struct fft_sample_tlv) + bin_len; + fft_sample->tlv.type = ATH_FFT_SAMPLE_ATH10K; + fft_sample->tlv.length = __cpu_to_be16(length); + + /* TODO: there might be a reason why the hardware reports 20/40/80 MHz, + * but the results/plots suggest that its actually 22/44/88 MHz. + */ + switch (event->hdr.chan_width_mhz) { + case 20: + fft_sample->chan_width_mhz = 22; + break; + case 40: + fft_sample->chan_width_mhz = 44; + break; + case 80: + /* TODO: As experiments with an analogue sender and various + * configuaritions (fft-sizes of 64/128/256 and 20/40/80 Mhz) + * show, the particular configuration of 80 MHz/64 bins does + * not match with the other smaples at all. Until the reason + * for that is found, don't report these samples. + */ + if (bin_len == 64) + return -EINVAL; + fft_sample->chan_width_mhz = 88; + break; + default: + fft_sample->chan_width_mhz = event->hdr.chan_width_mhz; + } + + fft_sample->relpwr_db = MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB); + fft_sample->avgpwr_db = MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB); + + peak_mag = MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG); + fft_sample->max_magnitude = __cpu_to_be16(peak_mag); + fft_sample->max_index = MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX); + fft_sample->rssi = event->hdr.rssi_combined; + + total_gain_db = MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB); + base_pwr_db = MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB); + fft_sample->total_gain_db = __cpu_to_be16(total_gain_db); + fft_sample->base_pwr_db = __cpu_to_be16(base_pwr_db); + + freq1 = __le16_to_cpu(event->hdr.freq1); + freq2 = __le16_to_cpu(event->hdr.freq2); + fft_sample->freq1 = __cpu_to_be16(freq1); + fft_sample->freq2 = __cpu_to_be16(freq2); + + nf_list1 = __le32_to_cpu(event->hdr.nf_list_1); + nf_list2 = __le32_to_cpu(event->hdr.nf_list_2); + chain_idx = MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX); + + switch (chain_idx) { + case 0: + fft_sample->noise = __cpu_to_be16(nf_list1 & 0xffffu); + break; + case 1: + fft_sample->noise = __cpu_to_be16((nf_list1 >> 16) & 0xffffu); + break; + case 2: + fft_sample->noise = __cpu_to_be16(nf_list2 & 0xffffu); + break; + case 3: + fft_sample->noise = __cpu_to_be16((nf_list2 >> 16) & 0xffffu); + break; + } + + bins = (u8 *)fftr; + bins += sizeof(*fftr); + + fft_sample->tsf = __cpu_to_be64(tsf); + + /* max_exp has been directly reported by previous hardware (ath9k), + * maybe its possible to get it by other means? + */ + fft_sample->max_exp = get_max_exp(fft_sample->max_index, peak_mag, + bin_len, bins); + + memcpy(fft_sample->data, bins, bin_len); + + /* DC value (value in the middle) is the blind spot of the spectral + * sample and invalid, interpolate it. + */ + dc_pos = bin_len / 2; + fft_sample->data[dc_pos] = (fft_sample->data[dc_pos + 1] + + fft_sample->data[dc_pos - 1]) / 2; + + send_fft_sample(ar, &fft_sample->tlv); + + return 0; +} + +static struct ath10k_vif *ath10k_get_spectral_vdev(struct ath10k *ar) +{ + struct ath10k_vif *arvif; + + lockdep_assert_held(&ar->conf_mutex); + + if (list_empty(&ar->arvifs)) + return NULL; + + /* if there already is a vif doing spectral, return that. */ + list_for_each_entry(arvif, &ar->arvifs, list) + if (arvif->spectral_enabled) + return arvif; + + /* otherwise, return the first vif. */ + return list_first_entry(&ar->arvifs, typeof(*arvif), list); +} + +static int ath10k_spectral_scan_trigger(struct ath10k *ar) +{ + struct ath10k_vif *arvif; + int res; + int vdev_id; + + lockdep_assert_held(&ar->conf_mutex); + + arvif = ath10k_get_spectral_vdev(ar); + if (!arvif) + return -ENODEV; + vdev_id = arvif->vdev_id; + + if (ar->spectral.mode == SPECTRAL_DISABLED) + return 0; + + res = ath10k_wmi_vdev_spectral_enable(ar, vdev_id, + WMI_SPECTRAL_TRIGGER_CMD_CLEAR, + WMI_SPECTRAL_ENABLE_CMD_ENABLE); + if (res < 0) + return res; + + res = ath10k_wmi_vdev_spectral_enable(ar, vdev_id, + WMI_SPECTRAL_TRIGGER_CMD_TRIGGER, + WMI_SPECTRAL_ENABLE_CMD_ENABLE); + if (res < 0) + return res; + + return 0; +} + +static int ath10k_spectral_scan_config(struct ath10k *ar, + enum ath10k_spectral_mode mode) +{ + struct wmi_vdev_spectral_conf_arg arg; + struct ath10k_vif *arvif; + int vdev_id, count, res = 0; + + lockdep_assert_held(&ar->conf_mutex); + + arvif = ath10k_get_spectral_vdev(ar); + if (!arvif) + return -ENODEV; + + vdev_id = arvif->vdev_id; + + arvif->spectral_enabled = (mode != SPECTRAL_DISABLED); + ar->spectral.mode = mode; + + res = ath10k_wmi_vdev_spectral_enable(ar, vdev_id, + WMI_SPECTRAL_TRIGGER_CMD_CLEAR, + WMI_SPECTRAL_ENABLE_CMD_DISABLE); + if (res < 0) { + ath10k_warn(ar, "failed to enable spectral scan: %d\n", res); + return res; + } + + if (mode == SPECTRAL_DISABLED) + return 0; + + if (mode == SPECTRAL_BACKGROUND) + count = WMI_SPECTRAL_COUNT_DEFAULT; + else + count = max_t(u8, 1, ar->spectral.config.count); + + arg.vdev_id = vdev_id; + arg.scan_count = count; + arg.scan_period = WMI_SPECTRAL_PERIOD_DEFAULT; + arg.scan_priority = WMI_SPECTRAL_PRIORITY_DEFAULT; + arg.scan_fft_size = ar->spectral.config.fft_size; + arg.scan_gc_ena = WMI_SPECTRAL_GC_ENA_DEFAULT; + arg.scan_restart_ena = WMI_SPECTRAL_RESTART_ENA_DEFAULT; + arg.scan_noise_floor_ref = WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT; + arg.scan_init_delay = WMI_SPECTRAL_INIT_DELAY_DEFAULT; + arg.scan_nb_tone_thr = WMI_SPECTRAL_NB_TONE_THR_DEFAULT; + arg.scan_str_bin_thr = WMI_SPECTRAL_STR_BIN_THR_DEFAULT; + arg.scan_wb_rpt_mode = WMI_SPECTRAL_WB_RPT_MODE_DEFAULT; + arg.scan_rssi_rpt_mode = WMI_SPECTRAL_RSSI_RPT_MODE_DEFAULT; + arg.scan_rssi_thr = WMI_SPECTRAL_RSSI_THR_DEFAULT; + arg.scan_pwr_format = WMI_SPECTRAL_PWR_FORMAT_DEFAULT; + arg.scan_rpt_mode = WMI_SPECTRAL_RPT_MODE_DEFAULT; + arg.scan_bin_scale = WMI_SPECTRAL_BIN_SCALE_DEFAULT; + arg.scan_dbm_adj = WMI_SPECTRAL_DBM_ADJ_DEFAULT; + arg.scan_chn_mask = WMI_SPECTRAL_CHN_MASK_DEFAULT; + + res = ath10k_wmi_vdev_spectral_conf(ar, &arg); + if (res < 0) { + ath10k_warn(ar, "failed to configure spectral scan: %d\n", res); + return res; + } + + return 0; +} + +static ssize_t read_file_spec_scan_ctl(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + char *mode = ""; + unsigned int len; + enum ath10k_spectral_mode spectral_mode; + + mutex_lock(&ar->conf_mutex); + spectral_mode = ar->spectral.mode; + mutex_unlock(&ar->conf_mutex); + + switch (spectral_mode) { + case SPECTRAL_DISABLED: + mode = "disable"; + break; + case SPECTRAL_BACKGROUND: + mode = "background"; + break; + case SPECTRAL_MANUAL: + mode = "manual"; + break; + } + + len = strlen(mode); + return simple_read_from_buffer(user_buf, count, ppos, mode, len); +} + +static ssize_t write_file_spec_scan_ctl(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + char buf[32]; + ssize_t len; + int res; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + + buf[len] = '\0'; + + mutex_lock(&ar->conf_mutex); + + if (strncmp("trigger", buf, 7) == 0) { + if (ar->spectral.mode == SPECTRAL_MANUAL || + ar->spectral.mode == SPECTRAL_BACKGROUND) { + /* reset the configuration to adopt possibly changed + * debugfs parameters + */ + res = ath10k_spectral_scan_config(ar, + ar->spectral.mode); + if (res < 0) { + ath10k_warn(ar, "failed to reconfigure spectral scan: %d\n", + res); + } + res = ath10k_spectral_scan_trigger(ar); + if (res < 0) { + ath10k_warn(ar, "failed to trigger spectral scan: %d\n", + res); + } + } else { + res = -EINVAL; + } + } else if (strncmp("background", buf, 9) == 0) { + res = ath10k_spectral_scan_config(ar, SPECTRAL_BACKGROUND); + } else if (strncmp("manual", buf, 6) == 0) { + res = ath10k_spectral_scan_config(ar, SPECTRAL_MANUAL); + } else if (strncmp("disable", buf, 7) == 0) { + res = ath10k_spectral_scan_config(ar, SPECTRAL_DISABLED); + } else { + res = -EINVAL; + } + + mutex_unlock(&ar->conf_mutex); + + if (res < 0) + return res; + + return count; +} + +static const struct file_operations fops_spec_scan_ctl = { + .read = read_file_spec_scan_ctl, + .write = write_file_spec_scan_ctl, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t read_file_spectral_count(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + char buf[32]; + unsigned int len; + u8 spectral_count; + + mutex_lock(&ar->conf_mutex); + spectral_count = ar->spectral.config.count; + mutex_unlock(&ar->conf_mutex); + + len = sprintf(buf, "%d\n", spectral_count); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t write_file_spectral_count(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + unsigned long val; + char buf[32]; + ssize_t len; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + + buf[len] = '\0'; + if (kstrtoul(buf, 0, &val)) + return -EINVAL; + + if (val < 0 || val > 255) + return -EINVAL; + + mutex_lock(&ar->conf_mutex); + ar->spectral.config.count = val; + mutex_unlock(&ar->conf_mutex); + + return count; +} + +static const struct file_operations fops_spectral_count = { + .read = read_file_spectral_count, + .write = write_file_spectral_count, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t read_file_spectral_bins(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + char buf[32]; + unsigned int len, bins, fft_size, bin_scale; + + mutex_lock(&ar->conf_mutex); + + fft_size = ar->spectral.config.fft_size; + bin_scale = WMI_SPECTRAL_BIN_SCALE_DEFAULT; + bins = 1 << (fft_size - bin_scale); + + mutex_unlock(&ar->conf_mutex); + + len = sprintf(buf, "%d\n", bins); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t write_file_spectral_bins(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + unsigned long val; + char buf[32]; + ssize_t len; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + + buf[len] = '\0'; + if (kstrtoul(buf, 0, &val)) + return -EINVAL; + + if (val < 64 || val > SPECTRAL_ATH10K_MAX_NUM_BINS) + return -EINVAL; + + if (!is_power_of_2(val)) + return -EINVAL; + + mutex_lock(&ar->conf_mutex); + ar->spectral.config.fft_size = ilog2(val); + ar->spectral.config.fft_size += WMI_SPECTRAL_BIN_SCALE_DEFAULT; + mutex_unlock(&ar->conf_mutex); + + return count; +} + +static const struct file_operations fops_spectral_bins = { + .read = read_file_spectral_bins, + .write = write_file_spectral_bins, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static struct dentry *create_buf_file_handler(const char *filename, + struct dentry *parent, + umode_t mode, + struct rchan_buf *buf, + int *is_global) +{ + struct dentry *buf_file; + + buf_file = debugfs_create_file(filename, mode, parent, buf, + &relay_file_operations); + *is_global = 1; + return buf_file; +} + +static int remove_buf_file_handler(struct dentry *dentry) +{ + debugfs_remove(dentry); + + return 0; +} + +static struct rchan_callbacks rfs_spec_scan_cb = { + .create_buf_file = create_buf_file_handler, + .remove_buf_file = remove_buf_file_handler, +}; + +int ath10k_spectral_start(struct ath10k *ar) +{ + struct ath10k_vif *arvif; + + lockdep_assert_held(&ar->conf_mutex); + + list_for_each_entry(arvif, &ar->arvifs, list) + arvif->spectral_enabled = 0; + + ar->spectral.mode = SPECTRAL_DISABLED; + ar->spectral.config.count = WMI_SPECTRAL_COUNT_DEFAULT; + ar->spectral.config.fft_size = WMI_SPECTRAL_FFT_SIZE_DEFAULT; + + return 0; +} + +int ath10k_spectral_vif_stop(struct ath10k_vif *arvif) +{ + if (!arvif->spectral_enabled) + return 0; + + return ath10k_spectral_scan_config(arvif->ar, SPECTRAL_DISABLED); +} + +int ath10k_spectral_create(struct ath10k *ar) +{ + ar->spectral.rfs_chan_spec_scan = relay_open("spectral_scan", + ar->debug.debugfs_phy, + 1024, 256, + &rfs_spec_scan_cb, NULL); + debugfs_create_file("spectral_scan_ctl", + S_IRUSR | S_IWUSR, + ar->debug.debugfs_phy, ar, + &fops_spec_scan_ctl); + debugfs_create_file("spectral_count", + S_IRUSR | S_IWUSR, + ar->debug.debugfs_phy, ar, + &fops_spectral_count); + debugfs_create_file("spectral_bins", + S_IRUSR | S_IWUSR, + ar->debug.debugfs_phy, ar, + &fops_spectral_bins); + + return 0; +} + +void ath10k_spectral_destroy(struct ath10k *ar) +{ + if (ar->spectral.rfs_chan_spec_scan) { + relay_close(ar->spectral.rfs_chan_spec_scan); + ar->spectral.rfs_chan_spec_scan = NULL; + } +} diff --git a/drivers/net/wireless/ath/ath10k/spectral.h b/drivers/net/wireless/ath/ath10k/spectral.h new file mode 100644 index 0000000000000000000000000000000000000000..ddc57c55727214a386813e7f0a99393930161683 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/spectral.h @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2013 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef SPECTRAL_H +#define SPECTRAL_H + +#include "../spectral_common.h" + +/** + * struct ath10k_spec_scan - parameters for Atheros spectral scan + * + * @count: number of scan results requested for manual mode + * @fft_size: number of bins to be requested = 2^(fft_size - bin_scale) + */ +struct ath10k_spec_scan { + u8 count; + u8 fft_size; +}; + +/* enum ath10k_spectral_mode: + * + * @SPECTRAL_DISABLED: spectral mode is disabled + * @SPECTRAL_BACKGROUND: hardware sends samples when it is not busy with + * something else. + * @SPECTRAL_MANUAL: spectral scan is enabled, triggering for samples + * is performed manually. + */ +enum ath10k_spectral_mode { + SPECTRAL_DISABLED = 0, + SPECTRAL_BACKGROUND, + SPECTRAL_MANUAL, +}; + +#ifdef CONFIG_ATH10K_DEBUGFS + +int ath10k_spectral_process_fft(struct ath10k *ar, + struct wmi_single_phyerr_rx_event *event, + struct phyerr_fft_report *fftr, + size_t bin_len, u64 tsf); +int ath10k_spectral_start(struct ath10k *ar); +int ath10k_spectral_vif_stop(struct ath10k_vif *arvif); +int ath10k_spectral_create(struct ath10k *ar); +void ath10k_spectral_destroy(struct ath10k *ar); + +#else + +static inline int +ath10k_spectral_process_fft(struct ath10k *ar, + struct wmi_single_phyerr_rx_event *event, + struct phyerr_fft_report *fftr, + size_t bin_len, u64 tsf) +{ + return 0; +} + +static inline int ath10k_spectral_start(struct ath10k *ar) +{ + return 0; +} + +static inline int ath10k_spectral_vif_stop(struct ath10k_vif *arvif) +{ + return 0; +} + +static inline int ath10k_spectral_create(struct ath10k *ar) +{ + return 0; +} + +static inline void ath10k_spectral_destroy(struct ath10k *ar) +{ +} + +#endif /* CONFIG_ATH10K_DEBUGFS */ + +#endif /* SPECTRAL_H */ diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h index be7ba1e78afe878a6e9284071a5d1dc1f6983bfc..9d0ae30f9ff18f763351f7958f590936495a413b 100644 --- a/drivers/net/wireless/ath/ath10k/targaddrs.h +++ b/drivers/net/wireless/ath/ath10k/targaddrs.h @@ -284,7 +284,6 @@ Fw Mode/SubMode Mask #define HI_OPTION_ALL_FW_SUBMODE_MASK 0xFF00 #define HI_OPTION_ALL_FW_SUBMODE_SHIFT 0x8 - /* hi_option_flag2 options */ #define HI_OPTION_OFFLOAD_AMSDU 0x01 #define HI_OPTION_DFS_SUPPORT 0x02 /* Enable DFS support */ diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c new file mode 100644 index 0000000000000000000000000000000000000000..483db9cb8c963fb204db1feed911b8e6e75ba312 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/testmode.c @@ -0,0 +1,382 @@ +/* + * Copyright (c) 2014 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "testmode.h" + +#include +#include + +#include "debug.h" +#include "wmi.h" +#include "hif.h" +#include "hw.h" + +#include "testmode_i.h" + +static const struct nla_policy ath10k_tm_policy[ATH10K_TM_ATTR_MAX + 1] = { + [ATH10K_TM_ATTR_CMD] = { .type = NLA_U32 }, + [ATH10K_TM_ATTR_DATA] = { .type = NLA_BINARY, + .len = ATH10K_TM_DATA_MAX_LEN }, + [ATH10K_TM_ATTR_WMI_CMDID] = { .type = NLA_U32 }, + [ATH10K_TM_ATTR_VERSION_MAJOR] = { .type = NLA_U32 }, + [ATH10K_TM_ATTR_VERSION_MINOR] = { .type = NLA_U32 }, +}; + +/* Returns true if callee consumes the skb and the skb should be discarded. + * Returns false if skb is not used. Does not sleep. + */ +bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb) +{ + struct sk_buff *nl_skb; + bool consumed; + int ret; + + ath10k_dbg(ar, ATH10K_DBG_TESTMODE, + "testmode event wmi cmd_id %d skb %p skb->len %d\n", + cmd_id, skb, skb->len); + + ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", skb->data, skb->len); + + spin_lock_bh(&ar->data_lock); + + if (!ar->testmode.utf_monitor) { + consumed = false; + goto out; + } + + /* Only testmode.c should be handling events from utf firmware, + * otherwise all sort of problems will arise as mac80211 operations + * are not initialised. + */ + consumed = true; + + nl_skb = cfg80211_testmode_alloc_event_skb(ar->hw->wiphy, + 2 * sizeof(u32) + skb->len, + GFP_ATOMIC); + if (!nl_skb) { + ath10k_warn(ar, + "failed to allocate skb for testmode wmi event\n"); + goto out; + } + + ret = nla_put_u32(nl_skb, ATH10K_TM_ATTR_CMD, ATH10K_TM_CMD_WMI); + if (ret) { + ath10k_warn(ar, + "failed to to put testmode wmi event cmd attribute: %d\n", + ret); + kfree_skb(nl_skb); + goto out; + } + + ret = nla_put_u32(nl_skb, ATH10K_TM_ATTR_WMI_CMDID, cmd_id); + if (ret) { + ath10k_warn(ar, + "failed to to put testmode wmi even cmd_id: %d\n", + ret); + kfree_skb(nl_skb); + goto out; + } + + ret = nla_put(nl_skb, ATH10K_TM_ATTR_DATA, skb->len, skb->data); + if (ret) { + ath10k_warn(ar, + "failed to copy skb to testmode wmi event: %d\n", + ret); + kfree_skb(nl_skb); + goto out; + } + + cfg80211_testmode_event(nl_skb, GFP_ATOMIC); + +out: + spin_unlock_bh(&ar->data_lock); + + return consumed; +} + +static int ath10k_tm_cmd_get_version(struct ath10k *ar, struct nlattr *tb[]) +{ + struct sk_buff *skb; + int ret; + + ath10k_dbg(ar, ATH10K_DBG_TESTMODE, + "testmode cmd get version_major %d version_minor %d\n", + ATH10K_TESTMODE_VERSION_MAJOR, + ATH10K_TESTMODE_VERSION_MINOR); + + skb = cfg80211_testmode_alloc_reply_skb(ar->hw->wiphy, + nla_total_size(sizeof(u32))); + if (!skb) + return -ENOMEM; + + ret = nla_put_u32(skb, ATH10K_TM_ATTR_VERSION_MAJOR, + ATH10K_TESTMODE_VERSION_MAJOR); + if (ret) { + kfree_skb(skb); + return ret; + } + + ret = nla_put_u32(skb, ATH10K_TM_ATTR_VERSION_MINOR, + ATH10K_TESTMODE_VERSION_MINOR); + if (ret) { + kfree_skb(skb); + return ret; + } + + return cfg80211_testmode_reply(skb); +} + +static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[]) +{ + char filename[100]; + int ret; + + ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode cmd utf start\n"); + + mutex_lock(&ar->conf_mutex); + + if (ar->state == ATH10K_STATE_UTF) { + ret = -EALREADY; + goto err; + } + + /* start utf only when the driver is not in use */ + if (ar->state != ATH10K_STATE_OFF) { + ret = -EBUSY; + goto err; + } + + if (WARN_ON(ar->testmode.utf != NULL)) { + /* utf image is already downloaded, it shouldn't be */ + ret = -EEXIST; + goto err; + } + + snprintf(filename, sizeof(filename), "%s/%s", + ar->hw_params.fw.dir, ATH10K_FW_UTF_FILE); + + /* load utf firmware image */ + ret = request_firmware(&ar->testmode.utf, filename, ar->dev); + if (ret) { + ath10k_warn(ar, "failed to retrieve utf firmware '%s': %d\n", + filename, ret); + goto err; + } + + spin_lock_bh(&ar->data_lock); + + ar->testmode.utf_monitor = true; + + spin_unlock_bh(&ar->data_lock); + + BUILD_BUG_ON(sizeof(ar->fw_features) != + sizeof(ar->testmode.orig_fw_features)); + + memcpy(ar->testmode.orig_fw_features, ar->fw_features, + sizeof(ar->fw_features)); + + /* utf.bin firmware image does not advertise firmware features. Do + * an ugly hack where we force the firmware features so that wmi.c + * will use the correct WMI interface. + */ + memset(ar->fw_features, 0, sizeof(ar->fw_features)); + __set_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features); + + ret = ath10k_hif_power_up(ar); + if (ret) { + ath10k_err(ar, "failed to power up hif (testmode): %d\n", ret); + ar->state = ATH10K_STATE_OFF; + goto err_fw_features; + } + + ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_UTF); + if (ret) { + ath10k_err(ar, "failed to start core (testmode): %d\n", ret); + ar->state = ATH10K_STATE_OFF; + goto err_power_down; + } + + ar->state = ATH10K_STATE_UTF; + + ath10k_info(ar, "UTF firmware started\n"); + + mutex_unlock(&ar->conf_mutex); + + return 0; + +err_power_down: + ath10k_hif_power_down(ar); + +err_fw_features: + /* return the original firmware features */ + memcpy(ar->fw_features, ar->testmode.orig_fw_features, + sizeof(ar->fw_features)); + + release_firmware(ar->testmode.utf); + ar->testmode.utf = NULL; + +err: + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +static void __ath10k_tm_cmd_utf_stop(struct ath10k *ar) +{ + lockdep_assert_held(&ar->conf_mutex); + + ath10k_core_stop(ar); + ath10k_hif_power_down(ar); + + spin_lock_bh(&ar->data_lock); + + ar->testmode.utf_monitor = false; + + spin_unlock_bh(&ar->data_lock); + + /* return the original firmware features */ + memcpy(ar->fw_features, ar->testmode.orig_fw_features, + sizeof(ar->fw_features)); + + release_firmware(ar->testmode.utf); + ar->testmode.utf = NULL; + + ar->state = ATH10K_STATE_OFF; +} + +static int ath10k_tm_cmd_utf_stop(struct ath10k *ar, struct nlattr *tb[]) +{ + int ret; + + ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode cmd utf stop\n"); + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ATH10K_STATE_UTF) { + ret = -ENETDOWN; + goto out; + } + + __ath10k_tm_cmd_utf_stop(ar); + + ret = 0; + + ath10k_info(ar, "UTF firmware stopped\n"); + +out: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static int ath10k_tm_cmd_wmi(struct ath10k *ar, struct nlattr *tb[]) +{ + struct sk_buff *skb; + int ret, buf_len; + u32 cmd_id; + void *buf; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ATH10K_STATE_UTF) { + ret = -ENETDOWN; + goto out; + } + + if (!tb[ATH10K_TM_ATTR_DATA]) { + ret = -EINVAL; + goto out; + } + + if (!tb[ATH10K_TM_ATTR_WMI_CMDID]) { + ret = -EINVAL; + goto out; + } + + buf = nla_data(tb[ATH10K_TM_ATTR_DATA]); + buf_len = nla_len(tb[ATH10K_TM_ATTR_DATA]); + cmd_id = nla_get_u32(tb[ATH10K_TM_ATTR_WMI_CMDID]); + + ath10k_dbg(ar, ATH10K_DBG_TESTMODE, + "testmode cmd wmi cmd_id %d buf %p buf_len %d\n", + cmd_id, buf, buf_len); + + ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", buf, buf_len); + + skb = ath10k_wmi_alloc_skb(ar, buf_len); + if (!skb) { + ret = -ENOMEM; + goto out; + } + + memcpy(skb->data, buf, buf_len); + + ret = ath10k_wmi_cmd_send(ar, skb, cmd_id); + if (ret) { + ath10k_warn(ar, "failed to transmit wmi command (testmode): %d\n", + ret); + goto out; + } + + ret = 0; + +out: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +int ath10k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + void *data, int len) +{ + struct ath10k *ar = hw->priv; + struct nlattr *tb[ATH10K_TM_ATTR_MAX + 1]; + int ret; + + ret = nla_parse(tb, ATH10K_TM_ATTR_MAX, data, len, + ath10k_tm_policy); + if (ret) + return ret; + + if (!tb[ATH10K_TM_ATTR_CMD]) + return -EINVAL; + + switch (nla_get_u32(tb[ATH10K_TM_ATTR_CMD])) { + case ATH10K_TM_CMD_GET_VERSION: + return ath10k_tm_cmd_get_version(ar, tb); + case ATH10K_TM_CMD_UTF_START: + return ath10k_tm_cmd_utf_start(ar, tb); + case ATH10K_TM_CMD_UTF_STOP: + return ath10k_tm_cmd_utf_stop(ar, tb); + case ATH10K_TM_CMD_WMI: + return ath10k_tm_cmd_wmi(ar, tb); + default: + return -EOPNOTSUPP; + } +} + +void ath10k_testmode_destroy(struct ath10k *ar) +{ + mutex_lock(&ar->conf_mutex); + + if (ar->state != ATH10K_STATE_UTF) { + /* utf firmware is not running, nothing to do */ + goto out; + } + + __ath10k_tm_cmd_utf_stop(ar); + +out: + mutex_unlock(&ar->conf_mutex); +} diff --git a/drivers/net/wireless/ath/ath10k/testmode.h b/drivers/net/wireless/ath/ath10k/testmode.h new file mode 100644 index 0000000000000000000000000000000000000000..9cdd150815db838fb1780cc560eaaa0a65d2eb7c --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/testmode.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2014 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "core.h" + +#ifdef CONFIG_NL80211_TESTMODE + +void ath10k_testmode_destroy(struct ath10k *ar); + +bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb); +int ath10k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + void *data, int len); + +#else + +static inline void ath10k_testmode_destroy(struct ath10k *ar) +{ +} + +static inline bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, + struct sk_buff *skb) +{ + return false; +} + +static inline int ath10k_tm_cmd(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + void *data, int len) +{ + return 0; +} + +#endif diff --git a/drivers/net/wireless/ath/ath10k/testmode_i.h b/drivers/net/wireless/ath/ath10k/testmode_i.h new file mode 100644 index 0000000000000000000000000000000000000000..ba81bf66ce85aade4b60fe7ac90ac8c3b96dea40 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/testmode_i.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2014 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* "API" level of the ath10k testmode interface. Bump it after every + * incompatible interface change. + */ +#define ATH10K_TESTMODE_VERSION_MAJOR 1 + +/* Bump this after every _compatible_ interface change, for example + * addition of a new command or an attribute. + */ +#define ATH10K_TESTMODE_VERSION_MINOR 0 + +#define ATH10K_TM_DATA_MAX_LEN 5000 + +enum ath10k_tm_attr { + __ATH10K_TM_ATTR_INVALID = 0, + ATH10K_TM_ATTR_CMD = 1, + ATH10K_TM_ATTR_DATA = 2, + ATH10K_TM_ATTR_WMI_CMDID = 3, + ATH10K_TM_ATTR_VERSION_MAJOR = 4, + ATH10K_TM_ATTR_VERSION_MINOR = 5, + + /* keep last */ + __ATH10K_TM_ATTR_AFTER_LAST, + ATH10K_TM_ATTR_MAX = __ATH10K_TM_ATTR_AFTER_LAST - 1, +}; + +/* All ath10k testmode interface commands specified in + * ATH10K_TM_ATTR_CMD + */ +enum ath10k_tm_cmd { + /* Returns the supported ath10k testmode interface version in + * ATH10K_TM_ATTR_VERSION. Always guaranteed to work. User space + * uses this to verify it's using the correct version of the + * testmode interface + */ + ATH10K_TM_CMD_GET_VERSION = 0, + + /* Boots the UTF firmware, the netdev interface must be down at the + * time. + */ + ATH10K_TM_CMD_UTF_START = 1, + + /* Shuts down the UTF firmware and puts the driver back into OFF + * state. + */ + ATH10K_TM_CMD_UTF_STOP = 2, + + /* The command used to transmit a WMI command to the firmware and + * the event to receive WMI events from the firmware. Without + * struct wmi_cmd_hdr header, only the WMI payload. Command id is + * provided with ATH10K_TM_ATTR_WMI_CMDID and payload in + * ATH10K_TM_ATTR_DATA. + */ + ATH10K_TM_CMD_WMI = 3, +}; diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h index 4eb2ecbc06ef21d95339abf48dd990203dab7f8b..574b75ab260988a6525339ed44623e71e790a273 100644 --- a/drivers/net/wireless/ath/ath10k/trace.h +++ b/drivers/net/wireless/ath/ath10k/trace.h @@ -18,6 +18,7 @@ #if !defined(_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) #include +#include "core.h" #define _TRACE_H_ @@ -39,59 +40,79 @@ static inline void trace_ ## name(proto) {} #define ATH10K_MSG_MAX 200 DECLARE_EVENT_CLASS(ath10k_log_event, - TP_PROTO(struct va_format *vaf), - TP_ARGS(vaf), + TP_PROTO(struct ath10k *ar, struct va_format *vaf), + TP_ARGS(ar, vaf), TP_STRUCT__entry( + __string(device, dev_name(ar->dev)) + __string(driver, dev_driver_string(ar->dev)) __dynamic_array(char, msg, ATH10K_MSG_MAX) ), TP_fast_assign( + __assign_str(device, dev_name(ar->dev)); + __assign_str(driver, dev_driver_string(ar->dev)); WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg), ATH10K_MSG_MAX, vaf->fmt, *vaf->va) >= ATH10K_MSG_MAX); ), - TP_printk("%s", __get_str(msg)) + TP_printk( + "%s %s %s", + __get_str(driver), + __get_str(device), + __get_str(msg) + ) ); DEFINE_EVENT(ath10k_log_event, ath10k_log_err, - TP_PROTO(struct va_format *vaf), - TP_ARGS(vaf) + TP_PROTO(struct ath10k *ar, struct va_format *vaf), + TP_ARGS(ar, vaf) ); DEFINE_EVENT(ath10k_log_event, ath10k_log_warn, - TP_PROTO(struct va_format *vaf), - TP_ARGS(vaf) + TP_PROTO(struct ath10k *ar, struct va_format *vaf), + TP_ARGS(ar, vaf) ); DEFINE_EVENT(ath10k_log_event, ath10k_log_info, - TP_PROTO(struct va_format *vaf), - TP_ARGS(vaf) + TP_PROTO(struct ath10k *ar, struct va_format *vaf), + TP_ARGS(ar, vaf) ); TRACE_EVENT(ath10k_log_dbg, - TP_PROTO(unsigned int level, struct va_format *vaf), - TP_ARGS(level, vaf), + TP_PROTO(struct ath10k *ar, unsigned int level, struct va_format *vaf), + TP_ARGS(ar, level, vaf), TP_STRUCT__entry( + __string(device, dev_name(ar->dev)) + __string(driver, dev_driver_string(ar->dev)) __field(unsigned int, level) __dynamic_array(char, msg, ATH10K_MSG_MAX) ), TP_fast_assign( + __assign_str(device, dev_name(ar->dev)); + __assign_str(driver, dev_driver_string(ar->dev)); __entry->level = level; WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg), ATH10K_MSG_MAX, vaf->fmt, *vaf->va) >= ATH10K_MSG_MAX); ), - TP_printk("%s", __get_str(msg)) + TP_printk( + "%s %s %s", + __get_str(driver), + __get_str(device), + __get_str(msg) + ) ); TRACE_EVENT(ath10k_log_dbg_dump, - TP_PROTO(const char *msg, const char *prefix, + TP_PROTO(struct ath10k *ar, const char *msg, const char *prefix, const void *buf, size_t buf_len), - TP_ARGS(msg, prefix, buf, buf_len), + TP_ARGS(ar, msg, prefix, buf, buf_len), TP_STRUCT__entry( + __string(device, dev_name(ar->dev)) + __string(driver, dev_driver_string(ar->dev)) __string(msg, msg) __string(prefix, prefix) __field(size_t, buf_len) @@ -99,6 +120,8 @@ TRACE_EVENT(ath10k_log_dbg_dump, ), TP_fast_assign( + __assign_str(device, dev_name(ar->dev)); + __assign_str(driver, dev_driver_string(ar->dev)); __assign_str(msg, msg); __assign_str(prefix, prefix); __entry->buf_len = buf_len; @@ -106,16 +129,22 @@ TRACE_EVENT(ath10k_log_dbg_dump, ), TP_printk( - "%s/%s\n", __get_str(prefix), __get_str(msg) + "%s %s %s/%s\n", + __get_str(driver), + __get_str(device), + __get_str(prefix), + __get_str(msg) ) ); TRACE_EVENT(ath10k_wmi_cmd, - TP_PROTO(int id, void *buf, size_t buf_len, int ret), + TP_PROTO(struct ath10k *ar, int id, void *buf, size_t buf_len, int ret), - TP_ARGS(id, buf, buf_len, ret), + TP_ARGS(ar, id, buf, buf_len, ret), TP_STRUCT__entry( + __string(device, dev_name(ar->dev)) + __string(driver, dev_driver_string(ar->dev)) __field(unsigned int, id) __field(size_t, buf_len) __dynamic_array(u8, buf, buf_len) @@ -123,6 +152,8 @@ TRACE_EVENT(ath10k_wmi_cmd, ), TP_fast_assign( + __assign_str(device, dev_name(ar->dev)); + __assign_str(driver, dev_driver_string(ar->dev)); __entry->id = id; __entry->buf_len = buf_len; __entry->ret = ret; @@ -130,7 +161,9 @@ TRACE_EVENT(ath10k_wmi_cmd, ), TP_printk( - "id %d len %zu ret %d", + "%s %s id %d len %zu ret %d", + __get_str(driver), + __get_str(device), __entry->id, __entry->buf_len, __entry->ret @@ -138,67 +171,85 @@ TRACE_EVENT(ath10k_wmi_cmd, ); TRACE_EVENT(ath10k_wmi_event, - TP_PROTO(int id, void *buf, size_t buf_len), + TP_PROTO(struct ath10k *ar, int id, void *buf, size_t buf_len), - TP_ARGS(id, buf, buf_len), + TP_ARGS(ar, id, buf, buf_len), TP_STRUCT__entry( + __string(device, dev_name(ar->dev)) + __string(driver, dev_driver_string(ar->dev)) __field(unsigned int, id) __field(size_t, buf_len) __dynamic_array(u8, buf, buf_len) ), TP_fast_assign( + __assign_str(device, dev_name(ar->dev)); + __assign_str(driver, dev_driver_string(ar->dev)); __entry->id = id; __entry->buf_len = buf_len; memcpy(__get_dynamic_array(buf), buf, buf_len); ), TP_printk( - "id %d len %zu", + "%s %s id %d len %zu", + __get_str(driver), + __get_str(device), __entry->id, __entry->buf_len ) ); TRACE_EVENT(ath10k_htt_stats, - TP_PROTO(void *buf, size_t buf_len), + TP_PROTO(struct ath10k *ar, void *buf, size_t buf_len), - TP_ARGS(buf, buf_len), + TP_ARGS(ar, buf, buf_len), TP_STRUCT__entry( + __string(device, dev_name(ar->dev)) + __string(driver, dev_driver_string(ar->dev)) __field(size_t, buf_len) __dynamic_array(u8, buf, buf_len) ), TP_fast_assign( + __assign_str(device, dev_name(ar->dev)); + __assign_str(driver, dev_driver_string(ar->dev)); __entry->buf_len = buf_len; memcpy(__get_dynamic_array(buf), buf, buf_len); ), TP_printk( - "len %zu", + "%s %s len %zu", + __get_str(driver), + __get_str(device), __entry->buf_len ) ); TRACE_EVENT(ath10k_wmi_dbglog, - TP_PROTO(void *buf, size_t buf_len), + TP_PROTO(struct ath10k *ar, void *buf, size_t buf_len), - TP_ARGS(buf, buf_len), + TP_ARGS(ar, buf, buf_len), TP_STRUCT__entry( + __string(device, dev_name(ar->dev)) + __string(driver, dev_driver_string(ar->dev)) __field(size_t, buf_len) __dynamic_array(u8, buf, buf_len) ), TP_fast_assign( + __assign_str(device, dev_name(ar->dev)); + __assign_str(driver, dev_driver_string(ar->dev)); __entry->buf_len = buf_len; memcpy(__get_dynamic_array(buf), buf, buf_len); ), TP_printk( - "len %zu", + "%s %s len %zu", + __get_str(driver), + __get_str(device), __entry->buf_len ) ); diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c index f4fa22d1d5917f358ba408becb33e39e9954ef42..a0cbc21d0d4b9146ff4a4f578f29be2f88bcdc13 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.c +++ b/drivers/net/wireless/ath/ath10k/txrx.c @@ -32,14 +32,14 @@ static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb) * offchan_tx_skb. */ spin_lock_bh(&ar->data_lock); if (ar->offchan_tx_skb != skb) { - ath10k_warn("completed old offchannel frame\n"); + ath10k_warn(ar, "completed old offchannel frame\n"); goto out; } complete(&ar->offchan_tx_completed); ar->offchan_tx_skb = NULL; /* just for sanity */ - ath10k_dbg(ATH10K_DBG_HTT, "completed offchannel skb %p\n", skb); + ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %p\n", skb); out: spin_unlock_bh(&ar->data_lock); } @@ -47,18 +47,19 @@ static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb) void ath10k_txrx_tx_unref(struct ath10k_htt *htt, const struct htt_tx_done *tx_done) { - struct device *dev = htt->ar->dev; + struct ath10k *ar = htt->ar; + struct device *dev = ar->dev; struct ieee80211_tx_info *info; struct ath10k_skb_cb *skb_cb; struct sk_buff *msdu; lockdep_assert_held(&htt->tx_lock); - ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n", + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n", tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack); if (tx_done->msdu_id >= htt->max_num_pending_tx) { - ath10k_warn("warning: msdu_id %d too big, ignoring\n", + ath10k_warn(ar, "warning: msdu_id %d too big, ignoring\n", tx_done->msdu_id); return; } @@ -177,12 +178,12 @@ void ath10k_peer_map_event(struct ath10k_htt *htt, goto exit; peer->vdev_id = ev->vdev_id; - memcpy(peer->addr, ev->addr, ETH_ALEN); + ether_addr_copy(peer->addr, ev->addr); list_add(&peer->list, &ar->peers); wake_up(&ar->peer_mapping_wq); } - ath10k_dbg(ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n", + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n", ev->vdev_id, ev->addr, ev->peer_id); set_bit(ev->peer_id, peer->peer_ids); @@ -199,12 +200,12 @@ void ath10k_peer_unmap_event(struct ath10k_htt *htt, spin_lock_bh(&ar->data_lock); peer = ath10k_peer_find_by_id(ar, ev->peer_id); if (!peer) { - ath10k_warn("peer-unmap-event: unknown peer id %d\n", + ath10k_warn(ar, "peer-unmap-event: unknown peer id %d\n", ev->peer_id); goto exit; } - ath10k_dbg(ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n", + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n", peer->vdev_id, peer->addr, ev->peer_id); clear_bit(ev->peer_id, peer->peer_ids); diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index c2c87c916b5a579f8c6fb4ee2afc45050736e1a0..2c42bd504b792cce4bcf23e74c0184bcf6ba9569 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -23,6 +23,7 @@ #include "debug.h" #include "wmi.h" #include "mac.h" +#include "testmode.h" /* MAIN WMI cmd track */ static struct wmi_cmd_map wmi_cmd_map = { @@ -487,9 +488,131 @@ static struct wmi_pdev_param_map wmi_10x_pdev_param_map = { .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE, }; +/* firmware 10.2 specific mappings */ +static struct wmi_cmd_map wmi_10_2_cmd_map = { + .init_cmdid = WMI_10_2_INIT_CMDID, + .start_scan_cmdid = WMI_10_2_START_SCAN_CMDID, + .stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID, + .scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID, + .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID, + .pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID, + .pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID, + .pdev_pktlog_enable_cmdid = WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID, + .pdev_pktlog_disable_cmdid = WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID, + .pdev_set_wmm_params_cmdid = WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID, + .pdev_set_ht_cap_ie_cmdid = WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID, + .pdev_set_vht_cap_ie_cmdid = WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID, + .pdev_set_quiet_mode_cmdid = WMI_10_2_PDEV_SET_QUIET_MODE_CMDID, + .pdev_green_ap_ps_enable_cmdid = WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID, + .pdev_get_tpc_config_cmdid = WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID, + .pdev_set_base_macaddr_cmdid = WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID, + .vdev_create_cmdid = WMI_10_2_VDEV_CREATE_CMDID, + .vdev_delete_cmdid = WMI_10_2_VDEV_DELETE_CMDID, + .vdev_start_request_cmdid = WMI_10_2_VDEV_START_REQUEST_CMDID, + .vdev_restart_request_cmdid = WMI_10_2_VDEV_RESTART_REQUEST_CMDID, + .vdev_up_cmdid = WMI_10_2_VDEV_UP_CMDID, + .vdev_stop_cmdid = WMI_10_2_VDEV_STOP_CMDID, + .vdev_down_cmdid = WMI_10_2_VDEV_DOWN_CMDID, + .vdev_set_param_cmdid = WMI_10_2_VDEV_SET_PARAM_CMDID, + .vdev_install_key_cmdid = WMI_10_2_VDEV_INSTALL_KEY_CMDID, + .peer_create_cmdid = WMI_10_2_PEER_CREATE_CMDID, + .peer_delete_cmdid = WMI_10_2_PEER_DELETE_CMDID, + .peer_flush_tids_cmdid = WMI_10_2_PEER_FLUSH_TIDS_CMDID, + .peer_set_param_cmdid = WMI_10_2_PEER_SET_PARAM_CMDID, + .peer_assoc_cmdid = WMI_10_2_PEER_ASSOC_CMDID, + .peer_add_wds_entry_cmdid = WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID, + .peer_remove_wds_entry_cmdid = WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID, + .peer_mcast_group_cmdid = WMI_10_2_PEER_MCAST_GROUP_CMDID, + .bcn_tx_cmdid = WMI_10_2_BCN_TX_CMDID, + .pdev_send_bcn_cmdid = WMI_10_2_PDEV_SEND_BCN_CMDID, + .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED, + .bcn_filter_rx_cmdid = WMI_10_2_BCN_FILTER_RX_CMDID, + .prb_req_filter_rx_cmdid = WMI_10_2_PRB_REQ_FILTER_RX_CMDID, + .mgmt_tx_cmdid = WMI_10_2_MGMT_TX_CMDID, + .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED, + .addba_clear_resp_cmdid = WMI_10_2_ADDBA_CLEAR_RESP_CMDID, + .addba_send_cmdid = WMI_10_2_ADDBA_SEND_CMDID, + .addba_status_cmdid = WMI_10_2_ADDBA_STATUS_CMDID, + .delba_send_cmdid = WMI_10_2_DELBA_SEND_CMDID, + .addba_set_resp_cmdid = WMI_10_2_ADDBA_SET_RESP_CMDID, + .send_singleamsdu_cmdid = WMI_10_2_SEND_SINGLEAMSDU_CMDID, + .sta_powersave_mode_cmdid = WMI_10_2_STA_POWERSAVE_MODE_CMDID, + .sta_powersave_param_cmdid = WMI_10_2_STA_POWERSAVE_PARAM_CMDID, + .sta_mimo_ps_mode_cmdid = WMI_10_2_STA_MIMO_PS_MODE_CMDID, + .pdev_dfs_enable_cmdid = WMI_10_2_PDEV_DFS_ENABLE_CMDID, + .pdev_dfs_disable_cmdid = WMI_10_2_PDEV_DFS_DISABLE_CMDID, + .roam_scan_mode = WMI_10_2_ROAM_SCAN_MODE, + .roam_scan_rssi_threshold = WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD, + .roam_scan_period = WMI_10_2_ROAM_SCAN_PERIOD, + .roam_scan_rssi_change_threshold = + WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, + .roam_ap_profile = WMI_10_2_ROAM_AP_PROFILE, + .ofl_scan_add_ap_profile = WMI_10_2_OFL_SCAN_ADD_AP_PROFILE, + .ofl_scan_remove_ap_profile = WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE, + .ofl_scan_period = WMI_10_2_OFL_SCAN_PERIOD, + .p2p_dev_set_device_info = WMI_10_2_P2P_DEV_SET_DEVICE_INFO, + .p2p_dev_set_discoverability = WMI_10_2_P2P_DEV_SET_DISCOVERABILITY, + .p2p_go_set_beacon_ie = WMI_10_2_P2P_GO_SET_BEACON_IE, + .p2p_go_set_probe_resp_ie = WMI_10_2_P2P_GO_SET_PROBE_RESP_IE, + .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED, + .ap_ps_peer_param_cmdid = WMI_10_2_AP_PS_PEER_PARAM_CMDID, + .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED, + .peer_rate_retry_sched_cmdid = WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID, + .wlan_profile_trigger_cmdid = WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID, + .wlan_profile_set_hist_intvl_cmdid = + WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID, + .wlan_profile_get_profile_data_cmdid = + WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, + .wlan_profile_enable_profile_id_cmdid = + WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, + .wlan_profile_list_profile_id_cmdid = + WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID, + .pdev_suspend_cmdid = WMI_10_2_PDEV_SUSPEND_CMDID, + .pdev_resume_cmdid = WMI_10_2_PDEV_RESUME_CMDID, + .add_bcn_filter_cmdid = WMI_10_2_ADD_BCN_FILTER_CMDID, + .rmv_bcn_filter_cmdid = WMI_10_2_RMV_BCN_FILTER_CMDID, + .wow_add_wake_pattern_cmdid = WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID, + .wow_del_wake_pattern_cmdid = WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID, + .wow_enable_disable_wake_event_cmdid = + WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID, + .wow_enable_cmdid = WMI_10_2_WOW_ENABLE_CMDID, + .wow_hostwakeup_from_sleep_cmdid = + WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID, + .rtt_measreq_cmdid = WMI_10_2_RTT_MEASREQ_CMDID, + .rtt_tsf_cmdid = WMI_10_2_RTT_TSF_CMDID, + .vdev_spectral_scan_configure_cmdid = + WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID, + .vdev_spectral_scan_enable_cmdid = + WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID, + .request_stats_cmdid = WMI_10_2_REQUEST_STATS_CMDID, + .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED, + .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED, + .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED, + .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED, + .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED, + .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED, + .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED, + .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED, + .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED, + .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED, + .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED, + .echo_cmdid = WMI_10_2_ECHO_CMDID, + .pdev_utf_cmdid = WMI_10_2_PDEV_UTF_CMDID, + .dbglog_cfg_cmdid = WMI_10_2_DBGLOG_CFG_CMDID, + .pdev_qvit_cmdid = WMI_10_2_PDEV_QVIT_CMDID, + .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED, + .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED, + .gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID, + .gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID, +}; + int ath10k_wmi_wait_for_service_ready(struct ath10k *ar) { int ret; + ret = wait_for_completion_timeout(&ar->wmi.service_ready, WMI_SERVICE_READY_TIMEOUT_HZ); return ret; @@ -498,23 +621,24 @@ int ath10k_wmi_wait_for_service_ready(struct ath10k *ar) int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar) { int ret; + ret = wait_for_completion_timeout(&ar->wmi.unified_ready, WMI_UNIFIED_READY_TIMEOUT_HZ); return ret; } -static struct sk_buff *ath10k_wmi_alloc_skb(u32 len) +struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len) { struct sk_buff *skb; u32 round_len = roundup(len, 4); - skb = ath10k_htc_alloc_skb(WMI_SKB_HEADROOM + round_len); + skb = ath10k_htc_alloc_skb(ar, WMI_SKB_HEADROOM + round_len); if (!skb) return NULL; skb_reserve(skb, WMI_SKB_HEADROOM); if (!IS_ALIGNED((unsigned long)skb->data, 4)) - ath10k_warn("Unaligned WMI skb\n"); + ath10k_warn(ar, "Unaligned WMI skb\n"); skb_put(skb, round_len); memset(skb->data, 0, round_len); @@ -545,7 +669,7 @@ static int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb, memset(skb_cb, 0, sizeof(*skb_cb)); ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb); - trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len, ret); + trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len, ret); if (ret) goto err_pull; @@ -604,15 +728,14 @@ static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar) wake_up(&ar->wmi.tx_credits_wq); } -static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, - u32 cmd_id) +int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id) { int ret = -EOPNOTSUPP; might_sleep(); if (cmd_id == WMI_CMD_UNSUPPORTED) { - ath10k_warn("wmi command %d is not supported by firmware\n", + ath10k_warn(ar, "wmi command %d is not supported by firmware\n", cmd_id); return ret; } @@ -660,7 +783,7 @@ int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb) len = round_up(len, 4); - wmi_skb = ath10k_wmi_alloc_skb(len); + wmi_skb = ath10k_wmi_alloc_skb(ar, len); if (!wmi_skb) return -ENOMEM; @@ -671,10 +794,10 @@ int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb) cmd->hdr.tx_power = 0; cmd->hdr.buf_len = __cpu_to_le32(buf_len); - memcpy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr), ETH_ALEN); + ether_addr_copy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr)); memcpy(cmd->buf, skb->data, skb->len); - ath10k_dbg(ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n", + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n", wmi_skb, wmi_skb->len, fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE); @@ -690,6 +813,130 @@ int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb) return ret; } +static void ath10k_wmi_event_scan_started(struct ath10k *ar) +{ + lockdep_assert_held(&ar->data_lock); + + switch (ar->scan.state) { + case ATH10K_SCAN_IDLE: + case ATH10K_SCAN_RUNNING: + case ATH10K_SCAN_ABORTING: + ath10k_warn(ar, "received scan started event in an invalid scan state: %s (%d)\n", + ath10k_scan_state_str(ar->scan.state), + ar->scan.state); + break; + case ATH10K_SCAN_STARTING: + ar->scan.state = ATH10K_SCAN_RUNNING; + + if (ar->scan.is_roc) + ieee80211_ready_on_channel(ar->hw); + + complete(&ar->scan.started); + break; + } +} + +static void ath10k_wmi_event_scan_completed(struct ath10k *ar) +{ + lockdep_assert_held(&ar->data_lock); + + switch (ar->scan.state) { + case ATH10K_SCAN_IDLE: + case ATH10K_SCAN_STARTING: + /* One suspected reason scan can be completed while starting is + * if firmware fails to deliver all scan events to the host, + * e.g. when transport pipe is full. This has been observed + * with spectral scan phyerr events starving wmi transport + * pipe. In such case the "scan completed" event should be (and + * is) ignored by the host as it may be just firmware's scan + * state machine recovering. + */ + ath10k_warn(ar, "received scan completed event in an invalid scan state: %s (%d)\n", + ath10k_scan_state_str(ar->scan.state), + ar->scan.state); + break; + case ATH10K_SCAN_RUNNING: + case ATH10K_SCAN_ABORTING: + __ath10k_scan_finish(ar); + break; + } +} + +static void ath10k_wmi_event_scan_bss_chan(struct ath10k *ar) +{ + lockdep_assert_held(&ar->data_lock); + + switch (ar->scan.state) { + case ATH10K_SCAN_IDLE: + case ATH10K_SCAN_STARTING: + ath10k_warn(ar, "received scan bss chan event in an invalid scan state: %s (%d)\n", + ath10k_scan_state_str(ar->scan.state), + ar->scan.state); + break; + case ATH10K_SCAN_RUNNING: + case ATH10K_SCAN_ABORTING: + ar->scan_channel = NULL; + break; + } +} + +static void ath10k_wmi_event_scan_foreign_chan(struct ath10k *ar, u32 freq) +{ + lockdep_assert_held(&ar->data_lock); + + switch (ar->scan.state) { + case ATH10K_SCAN_IDLE: + case ATH10K_SCAN_STARTING: + ath10k_warn(ar, "received scan foreign chan event in an invalid scan state: %s (%d)\n", + ath10k_scan_state_str(ar->scan.state), + ar->scan.state); + break; + case ATH10K_SCAN_RUNNING: + case ATH10K_SCAN_ABORTING: + ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq); + + if (ar->scan.is_roc && ar->scan.roc_freq == freq) + complete(&ar->scan.on_channel); + break; + } +} + +static const char * +ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type, + enum wmi_scan_completion_reason reason) +{ + switch (type) { + case WMI_SCAN_EVENT_STARTED: + return "started"; + case WMI_SCAN_EVENT_COMPLETED: + switch (reason) { + case WMI_SCAN_REASON_COMPLETED: + return "completed"; + case WMI_SCAN_REASON_CANCELLED: + return "completed [cancelled]"; + case WMI_SCAN_REASON_PREEMPTED: + return "completed [preempted]"; + case WMI_SCAN_REASON_TIMEDOUT: + return "completed [timedout]"; + case WMI_SCAN_REASON_MAX: + break; + } + return "completed [unknown]"; + case WMI_SCAN_EVENT_BSS_CHANNEL: + return "bss channel"; + case WMI_SCAN_EVENT_FOREIGN_CHANNEL: + return "foreign channel"; + case WMI_SCAN_EVENT_DEQUEUED: + return "dequeued"; + case WMI_SCAN_EVENT_PREEMPTED: + return "preempted"; + case WMI_SCAN_EVENT_START_FAILED: + return "start failed"; + default: + return "unknown"; + } +} + static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb) { struct wmi_scan_event *event = (struct wmi_scan_event *)skb->data; @@ -707,81 +954,32 @@ static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb) scan_id = __le32_to_cpu(event->scan_id); vdev_id = __le32_to_cpu(event->vdev_id); - ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENTID\n"); - ath10k_dbg(ATH10K_DBG_WMI, - "scan event type %d reason %d freq %d req_id %d " - "scan_id %d vdev_id %d\n", - event_type, reason, freq, req_id, scan_id, vdev_id); - spin_lock_bh(&ar->data_lock); + ath10k_dbg(ar, ATH10K_DBG_WMI, + "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n", + ath10k_wmi_event_scan_type_str(event_type, reason), + event_type, reason, freq, req_id, scan_id, vdev_id, + ath10k_scan_state_str(ar->scan.state), ar->scan.state); + switch (event_type) { case WMI_SCAN_EVENT_STARTED: - ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_STARTED\n"); - if (ar->scan.in_progress && ar->scan.is_roc) - ieee80211_ready_on_channel(ar->hw); - - complete(&ar->scan.started); + ath10k_wmi_event_scan_started(ar); break; case WMI_SCAN_EVENT_COMPLETED: - ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_COMPLETED\n"); - switch (reason) { - case WMI_SCAN_REASON_COMPLETED: - ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_COMPLETED\n"); - break; - case WMI_SCAN_REASON_CANCELLED: - ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_CANCELED\n"); - break; - case WMI_SCAN_REASON_PREEMPTED: - ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_PREEMPTED\n"); - break; - case WMI_SCAN_REASON_TIMEDOUT: - ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_TIMEDOUT\n"); - break; - default: - break; - } - - ar->scan_channel = NULL; - if (!ar->scan.in_progress) { - ath10k_warn("no scan requested, ignoring\n"); - break; - } - - if (ar->scan.is_roc) { - ath10k_offchan_tx_purge(ar); - - if (!ar->scan.aborting) - ieee80211_remain_on_channel_expired(ar->hw); - } else { - ieee80211_scan_completed(ar->hw, ar->scan.aborting); - } - - del_timer(&ar->scan.timeout); - complete_all(&ar->scan.completed); - ar->scan.in_progress = false; + ath10k_wmi_event_scan_completed(ar); break; case WMI_SCAN_EVENT_BSS_CHANNEL: - ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_BSS_CHANNEL\n"); - ar->scan_channel = NULL; + ath10k_wmi_event_scan_bss_chan(ar); break; case WMI_SCAN_EVENT_FOREIGN_CHANNEL: - ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_FOREIGN_CHANNEL\n"); - ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq); - if (ar->scan.in_progress && ar->scan.is_roc && - ar->scan.roc_freq == freq) { - complete(&ar->scan.on_channel); - } - break; - case WMI_SCAN_EVENT_DEQUEUED: - ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_DEQUEUED\n"); - break; - case WMI_SCAN_EVENT_PREEMPTED: - ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENT_PREEMPTED\n"); + ath10k_wmi_event_scan_foreign_chan(ar, freq); break; case WMI_SCAN_EVENT_START_FAILED: - ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENT_START_FAILED\n"); + ath10k_warn(ar, "received scan start failure event\n"); break; + case WMI_SCAN_EVENT_DEQUEUED: + case WMI_SCAN_EVENT_PREEMPTED: default: break; } @@ -911,7 +1109,7 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) memset(status, 0, sizeof(*status)); - ath10k_dbg(ATH10K_DBG_MGMT, + ath10k_dbg(ar, ATH10K_DBG_MGMT, "event mgmt rx status %08x\n", rx_status); if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) { @@ -947,9 +1145,9 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) if (phy_mode == MODE_11B && status->band == IEEE80211_BAND_5GHZ) - ath10k_dbg(ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n"); + ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n"); } else { - ath10k_warn("using (unreliable) phy_mode to extract band for mgmt rx\n"); + ath10k_warn(ar, "using (unreliable) phy_mode to extract band for mgmt rx\n"); status->band = phy_mode_to_band(phy_mode); } @@ -979,12 +1177,12 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) } } - ath10k_dbg(ATH10K_DBG_MGMT, + ath10k_dbg(ar, ATH10K_DBG_MGMT, "event mgmt rx skb %p len %d ftype %02x stype %02x\n", skb, skb->len, fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE); - ath10k_dbg(ATH10K_DBG_MGMT, + ath10k_dbg(ar, ATH10K_DBG_MGMT, "event mgmt rx freq %d band %d snr %d, rate_idx %d\n", status->freq, status->band, status->signal, status->rate_idx); @@ -1034,21 +1232,26 @@ static void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb) rx_clear_count = __le32_to_cpu(ev->rx_clear_count); cycle_count = __le32_to_cpu(ev->cycle_count); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n", err_code, freq, cmd_flags, noise_floor, rx_clear_count, cycle_count); spin_lock_bh(&ar->data_lock); - if (!ar->scan.in_progress) { - ath10k_warn("chan info event without a scan request?\n"); + switch (ar->scan.state) { + case ATH10K_SCAN_IDLE: + case ATH10K_SCAN_STARTING: + ath10k_warn(ar, "received chan info event without a scan request, ignoring\n"); goto exit; + case ATH10K_SCAN_RUNNING: + case ATH10K_SCAN_ABORTING: + break; } idx = freq_to_idx(ar, freq); if (idx >= ARRAY_SIZE(ar->survey)) { - ath10k_warn("chan info: invalid frequency %d (idx %d out of bounds)\n", + ath10k_warn(ar, "chan info: invalid frequency %d (idx %d out of bounds)\n", freq, idx); goto exit; } @@ -1079,15 +1282,15 @@ static void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb) static void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n"); } static int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "wmi event debug mesg len %d\n", + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event debug mesg len %d\n", skb->len); - trace_ath10k_wmi_dbglog(skb->data, skb->len); + trace_ath10k_wmi_dbglog(ar, skb->data, skb->len); return 0; } @@ -1097,7 +1300,7 @@ static void ath10k_wmi_event_update_stats(struct ath10k *ar, { struct wmi_stats_event *ev = (struct wmi_stats_event *)skb->data; - ath10k_dbg(ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n"); ath10k_debug_read_target_stats(ar, ev); } @@ -1107,7 +1310,7 @@ static void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, { struct wmi_vdev_start_response_event *ev; - ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n"); ev = (struct wmi_vdev_start_response_event *)skb->data; @@ -1120,7 +1323,7 @@ static void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, static void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n"); complete(&ar->vdev_setup_done); } @@ -1132,14 +1335,14 @@ static void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, ev = (struct wmi_peer_sta_kickout_event *)skb->data; - ath10k_dbg(ATH10K_DBG_WMI, "wmi event peer sta kickout %pM\n", + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event peer sta kickout %pM\n", ev->peer_macaddr.addr); rcu_read_lock(); sta = ieee80211_find_sta_by_ifaddr(ar->hw, ev->peer_macaddr.addr, NULL); if (!sta) { - ath10k_warn("Spurious quick kickout for STA %pM\n", + ath10k_warn(ar, "Spurious quick kickout for STA %pM\n", ev->peer_macaddr.addr); goto exit; } @@ -1183,6 +1386,8 @@ static void ath10k_wmi_update_tim(struct ath10k *ar, struct ieee80211_tim_ie *tim; u8 *ies, *ie; u8 ie_len, pvm_len; + __le32 t; + u32 v; /* if next SWBA has no tim_changed the tim_bitmap is garbage. * we must copy the bitmap upon change and reuse it later */ @@ -1193,8 +1398,8 @@ static void ath10k_wmi_update_tim(struct ath10k *ar, sizeof(bcn_info->tim_info.tim_bitmap)); for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) { - __le32 t = bcn_info->tim_info.tim_bitmap[i / 4]; - u32 v = __le32_to_cpu(t); + t = bcn_info->tim_info.tim_bitmap[i / 4]; + v = __le32_to_cpu(t); arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF; } @@ -1216,7 +1421,7 @@ static void ath10k_wmi_update_tim(struct ath10k *ar, (u8 *)skb_tail_pointer(bcn) - ies); if (!ie) { if (arvif->vdev_type != WMI_VDEV_TYPE_IBSS) - ath10k_warn("no tim ie found;\n"); + ath10k_warn(ar, "no tim ie found;\n"); return; } @@ -1236,12 +1441,12 @@ static void ath10k_wmi_update_tim(struct ath10k *ar, ie_len += expand_size; pvm_len += expand_size; } else { - ath10k_warn("tim expansion failed\n"); + ath10k_warn(ar, "tim expansion failed\n"); } } if (pvm_len > sizeof(arvif->u.ap.tim_bitmap)) { - ath10k_warn("tim pvm length is too great (%d)\n", pvm_len); + ath10k_warn(ar, "tim pvm length is too great (%d)\n", pvm_len); return; } @@ -1255,7 +1460,7 @@ static void ath10k_wmi_update_tim(struct ath10k *ar, ATH10K_SKB_CB(bcn)->bcn.deliver_cab = true; } - ath10k_dbg(ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n", + ath10k_dbg(ar, ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n", tim->dtim_count, tim->dtim_period, tim->bitmap_ctrl, pvm_len); } @@ -1310,7 +1515,6 @@ static u32 ath10k_p2p_calc_noa_ie_len(struct wmi_p2p_noa_info *noa) u8 opp_ps_info = noa->ctwindow_oppps; bool opps_enabled = !!(opp_ps_info & WMI_P2P_OPPPS_ENABLE_BIT); - if (!noa_descriptors && !opps_enabled) return len; @@ -1333,7 +1537,7 @@ static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif, if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO) return; - ath10k_dbg(ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed); + ath10k_dbg(ar, ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed); if (noa->changed & WMI_P2P_NOA_CHANGED_BIT) { new_len = ath10k_p2p_calc_noa_ie_len(noa); if (!new_len) @@ -1367,7 +1571,6 @@ static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif, kfree(old_data); } - static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) { struct wmi_host_swba_event *ev; @@ -1381,7 +1584,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) ev = (struct wmi_host_swba_event *)skb->data; map = __le32_to_cpu(ev->vdev_map); - ath10k_dbg(ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n", + ath10k_dbg(ar, ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n", ev->vdev_map); for (; map; map >>= 1, vdev_id++) { @@ -1391,13 +1594,13 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) i++; if (i >= WMI_MAX_AP_VDEV) { - ath10k_warn("swba has corrupted vdev map\n"); + ath10k_warn(ar, "swba has corrupted vdev map\n"); break; } bcn_info = &ev->bcn_info[i]; - ath10k_dbg(ATH10K_DBG_MGMT, + ath10k_dbg(ar, ATH10K_DBG_MGMT, "mgmt event bcn_info %d tim_len %d mcast %d changed %d num_ps_pending %d bitmap 0x%08x%08x%08x%08x\n", i, __le32_to_cpu(bcn_info->tim_info.tim_len), @@ -1411,7 +1614,8 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) arvif = ath10k_get_arvif(ar, vdev_id); if (arvif == NULL) { - ath10k_warn("no vif for vdev_id %d found\n", vdev_id); + ath10k_warn(ar, "no vif for vdev_id %d found\n", + vdev_id); continue; } @@ -1428,7 +1632,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) bcn = ieee80211_beacon_get(ar->hw, arvif->vif); if (!bcn) { - ath10k_warn("could not get mac80211 beacon\n"); + ath10k_warn(ar, "could not get mac80211 beacon\n"); continue; } @@ -1440,7 +1644,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) if (arvif->beacon) { if (!arvif->beacon_sent) - ath10k_warn("SWBA overrun on vdev %d\n", + ath10k_warn(ar, "SWBA overrun on vdev %d\n", arvif->vdev_id); dma_unmap_single(arvif->ar->dev, @@ -1456,7 +1660,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) ret = dma_mapping_error(arvif->ar->dev, ATH10K_SKB_CB(bcn)->paddr); if (ret) { - ath10k_warn("failed to map beacon: %d\n", ret); + ath10k_warn(ar, "failed to map beacon: %d\n", ret); dev_kfree_skb_any(bcn); goto skip; } @@ -1473,7 +1677,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) static void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n"); } static void ath10k_dfs_radar_report(struct ath10k *ar, @@ -1489,20 +1693,20 @@ static void ath10k_dfs_radar_report(struct ath10k *ar, reg0 = __le32_to_cpu(rr->reg0); reg1 = __le32_to_cpu(rr->reg1); - ath10k_dbg(ATH10K_DBG_REGULATORY, + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "wmi phyerr radar report chirp %d max_width %d agc_total_gain %d pulse_delta_diff %d\n", MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP), MS(reg0, RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH), MS(reg0, RADAR_REPORT_REG0_AGC_TOTAL_GAIN), MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_DIFF)); - ath10k_dbg(ATH10K_DBG_REGULATORY, + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "wmi phyerr radar report pulse_delta_pean %d pulse_sidx %d fft_valid %d agc_mb_gain %d subchan_mask %d\n", MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_PEAK), MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX), MS(reg1, RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID), MS(reg1, RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN), MS(reg1, RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK)); - ath10k_dbg(ATH10K_DBG_REGULATORY, + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "wmi phyerr radar report pulse_tsf_offset 0x%X pulse_dur: %d\n", MS(reg1, RADAR_REPORT_REG1_PULSE_TSF_OFFSET), MS(reg1, RADAR_REPORT_REG1_PULSE_DUR)); @@ -1529,25 +1733,25 @@ static void ath10k_dfs_radar_report(struct ath10k *ar, pe.width = width; pe.rssi = rssi; - ath10k_dbg(ATH10K_DBG_REGULATORY, + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs add pulse freq: %d, width: %d, rssi %d, tsf: %llX\n", pe.freq, pe.width, pe.rssi, pe.ts); ATH10K_DFS_STAT_INC(ar, pulses_detected); if (!ar->dfs_detector->add_pulse(ar->dfs_detector, &pe)) { - ath10k_dbg(ATH10K_DBG_REGULATORY, + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs no pulse pattern detected, yet\n"); return; } - ath10k_dbg(ATH10K_DBG_REGULATORY, "dfs radar detected\n"); + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs radar detected\n"); ATH10K_DFS_STAT_INC(ar, radar_detected); /* Control radar events reporting in debugfs file dfs_block_radar_events */ if (ar->dfs_block_radar_events) { - ath10k_info("DFS Radar detected, but ignored as requested\n"); + ath10k_info(ar, "DFS Radar detected, but ignored as requested\n"); return; } @@ -1566,13 +1770,13 @@ static int ath10k_dfs_fft_report(struct ath10k *ar, reg1 = __le32_to_cpu(fftr->reg1); rssi = event->hdr.rssi_combined; - ath10k_dbg(ATH10K_DBG_REGULATORY, + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "wmi phyerr fft report total_gain_db %d base_pwr_db %d fft_chn_idx %d peak_sidx %d\n", MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB), MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB), MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX), MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX)); - ath10k_dbg(ATH10K_DBG_REGULATORY, + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "wmi phyerr fft report rel_pwr_db %d avgpwr_db %d peak_mag %d num_store_bin %d\n", MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB), MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB), @@ -1584,7 +1788,7 @@ static int ath10k_dfs_fft_report(struct ath10k *ar, /* false event detection */ if (rssi == DFS_RSSI_POSSIBLY_FALSE && peak_mag < 2 * DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE) { - ath10k_dbg(ATH10K_DBG_REGULATORY, "dfs false pulse detected\n"); + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs false pulse detected\n"); ATH10K_DFS_STAT_INC(ar, pulses_discarded); return -EINVAL; } @@ -1603,7 +1807,7 @@ static void ath10k_wmi_event_dfs(struct ath10k *ar, u8 *tlv_buf; buf_len = __le32_to_cpu(event->hdr.buf_len); - ath10k_dbg(ATH10K_DBG_REGULATORY, + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n", event->hdr.phy_err_code, event->hdr.rssi_combined, __le32_to_cpu(event->hdr.tsf_timestamp), tsf, buf_len); @@ -1616,21 +1820,22 @@ static void ath10k_wmi_event_dfs(struct ath10k *ar, while (i < buf_len) { if (i + sizeof(*tlv) > buf_len) { - ath10k_warn("too short buf for tlv header (%d)\n", i); + ath10k_warn(ar, "too short buf for tlv header (%d)\n", + i); return; } tlv = (struct phyerr_tlv *)&event->bufp[i]; tlv_len = __le16_to_cpu(tlv->len); tlv_buf = &event->bufp[i + sizeof(*tlv)]; - ath10k_dbg(ATH10K_DBG_REGULATORY, + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "wmi event dfs tlv_len %d tlv_tag 0x%02X tlv_sig 0x%02X\n", tlv_len, tlv->tag, tlv->sig); switch (tlv->tag) { case PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY: if (i + sizeof(*tlv) + sizeof(*rr) > buf_len) { - ath10k_warn("too short radar pulse summary (%d)\n", + ath10k_warn(ar, "too short radar pulse summary (%d)\n", i); return; } @@ -1640,7 +1845,8 @@ static void ath10k_wmi_event_dfs(struct ath10k *ar, break; case PHYERR_TLV_TAG_SEARCH_FFT_REPORT: if (i + sizeof(*tlv) + sizeof(*fftr) > buf_len) { - ath10k_warn("too short fft report (%d)\n", i); + ath10k_warn(ar, "too short fft report (%d)\n", + i); return; } @@ -1655,11 +1861,59 @@ static void ath10k_wmi_event_dfs(struct ath10k *ar, } } -static void ath10k_wmi_event_spectral_scan(struct ath10k *ar, - struct wmi_single_phyerr_rx_event *event, - u64 tsf) +static void +ath10k_wmi_event_spectral_scan(struct ath10k *ar, + struct wmi_single_phyerr_rx_event *event, + u64 tsf) { - ath10k_dbg(ATH10K_DBG_WMI, "wmi event spectral scan\n"); + int buf_len, tlv_len, res, i = 0; + struct phyerr_tlv *tlv; + u8 *tlv_buf; + struct phyerr_fft_report *fftr; + size_t fftr_len; + + buf_len = __le32_to_cpu(event->hdr.buf_len); + + while (i < buf_len) { + if (i + sizeof(*tlv) > buf_len) { + ath10k_warn(ar, "failed to parse phyerr tlv header at byte %d\n", + i); + return; + } + + tlv = (struct phyerr_tlv *)&event->bufp[i]; + tlv_len = __le16_to_cpu(tlv->len); + tlv_buf = &event->bufp[i + sizeof(*tlv)]; + + if (i + sizeof(*tlv) + tlv_len > buf_len) { + ath10k_warn(ar, "failed to parse phyerr tlv payload at byte %d\n", + i); + return; + } + + switch (tlv->tag) { + case PHYERR_TLV_TAG_SEARCH_FFT_REPORT: + if (sizeof(*fftr) > tlv_len) { + ath10k_warn(ar, "failed to parse fft report at byte %d\n", + i); + return; + } + + fftr_len = tlv_len - sizeof(*fftr); + fftr = (struct phyerr_fft_report *)tlv_buf; + res = ath10k_spectral_process_fft(ar, event, + fftr, fftr_len, + tsf); + if (res < 0) { + ath10k_warn(ar, "failed to process fft report: %d\n", + res); + return; + } + break; + } + + i += sizeof(*tlv) + tlv_len; + } } static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb) @@ -1674,7 +1928,7 @@ static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb) /* Check if combined event available */ if (left_len < sizeof(*comb_event)) { - ath10k_warn("wmi phyerr combined event wrong len\n"); + ath10k_warn(ar, "wmi phyerr combined event wrong len\n"); return; } @@ -1688,7 +1942,7 @@ static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb) tsf <<= 32; tsf |= __le32_to_cpu(comb_event->hdr.tsf_l32); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event phyerr count %d tsf64 0x%llX\n", count, tsf); @@ -1696,7 +1950,8 @@ static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb) for (i = 0; i < count; i++) { /* Check if we can read event header */ if (left_len < sizeof(*event)) { - ath10k_warn("single event (%d) wrong head len\n", i); + ath10k_warn(ar, "single event (%d) wrong head len\n", + i); return; } @@ -1706,7 +1961,7 @@ static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb) phy_err_code = event->hdr.phy_err_code; if (left_len < buf_len) { - ath10k_warn("single event (%d) wrong buf len\n", i); + ath10k_warn(ar, "single event (%d) wrong buf len\n", i); return; } @@ -1733,13 +1988,13 @@ static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb) static void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n"); } static void ath10k_wmi_event_profile_match(struct ath10k *ar, - struct sk_buff *skb) + struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n"); } static void ath10k_wmi_event_debug_print(struct ath10k *ar, @@ -1764,7 +2019,7 @@ static void ath10k_wmi_event_debug_print(struct ath10k *ar, } if (i == sizeof(buf) - 1) - ath10k_warn("wmi debug print truncated: %d\n", skb->len); + ath10k_warn(ar, "wmi debug print truncated: %d\n", skb->len); /* for some reason the debug prints end with \n, remove that */ if (skb->data[i - 1] == '\n') @@ -1773,112 +2028,112 @@ static void ath10k_wmi_event_debug_print(struct ath10k *ar, /* the last byte is always reserved for the null character */ buf[i] = '\0'; - ath10k_dbg(ATH10K_DBG_WMI, "wmi event debug print '%s'\n", buf); + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event debug print '%s'\n", buf); } static void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n"); } static void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n"); } static void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar, - struct sk_buff *skb) + struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n"); } static void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar, - struct sk_buff *skb) + struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n"); } static void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n"); } static void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n"); } static void ath10k_wmi_event_dcs_interference(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n"); } static void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_TPC_CONFIG_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_TPC_CONFIG_EVENTID\n"); } static void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n"); } static void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar, - struct sk_buff *skb) + struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n"); } static void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n"); } static void ath10k_wmi_event_delba_complete(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n"); } static void ath10k_wmi_event_addba_complete(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n"); } static void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar, - struct sk_buff *skb) + struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n"); } static void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n"); } static void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n"); } static void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n"); } static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id, - u32 num_units, u32 unit_len) + u32 num_units, u32 unit_len) { dma_addr_t paddr; u32 pool_size; @@ -1894,7 +2149,7 @@ static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id, &paddr, GFP_ATOMIC); if (!ar->wmi.mem_chunks[idx].vaddr) { - ath10k_warn("failed to allocate memory chunk\n"); + ath10k_warn(ar, "failed to allocate memory chunk\n"); return -ENOMEM; } @@ -1912,9 +2167,10 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar, struct sk_buff *skb) { struct wmi_service_ready_event *ev = (void *)skb->data; + DECLARE_BITMAP(svc_bmap, WMI_SERVICE_MAX) = {}; if (skb->len < sizeof(*ev)) { - ath10k_warn("Service ready event was %d B but expected %zu B. Wrong firmware version?\n", + ath10k_warn(ar, "Service ready event was %d B but expected %zu B. Wrong firmware version?\n", skb->len, sizeof(*ev)); return; } @@ -1937,7 +2193,7 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar, set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features); if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) { - ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n", + ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n", ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM); ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM; } @@ -1945,8 +2201,10 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar, ar->ath_common.regulatory.current_rd = __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd); - ath10k_debug_read_service_map(ar, ev->wmi_service_bitmap, - sizeof(ev->wmi_service_bitmap)); + wmi_main_svc_map(ev->wmi_service_bitmap, svc_bmap); + ath10k_debug_read_service_map(ar, svc_bmap, sizeof(svc_bmap)); + ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ", + ev->wmi_service_bitmap, sizeof(ev->wmi_service_bitmap)); if (strlen(ar->hw->wiphy->fw_version) == 0) { snprintf(ar->hw->wiphy->fw_version, @@ -1960,11 +2218,11 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar, /* FIXME: it probably should be better to support this */ if (__le32_to_cpu(ev->num_mem_reqs) > 0) { - ath10k_warn("target requested %d memory chunks; ignoring\n", + ath10k_warn(ar, "target requested %d memory chunks; ignoring\n", __le32_to_cpu(ev->num_mem_reqs)); } - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event service ready sw_ver 0x%08x sw_ver1 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n", __le32_to_cpu(ev->sw_version), __le32_to_cpu(ev->sw_version_1), @@ -1986,9 +2244,10 @@ static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar, u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i; int ret; struct wmi_service_ready_event_10x *ev = (void *)skb->data; + DECLARE_BITMAP(svc_bmap, WMI_SERVICE_MAX) = {}; if (skb->len < sizeof(*ev)) { - ath10k_warn("Service ready event was %d B but expected %zu B. Wrong firmware version?\n", + ath10k_warn(ar, "Service ready event was %d B but expected %zu B. Wrong firmware version?\n", skb->len, sizeof(*ev)); return; } @@ -2004,7 +2263,7 @@ static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar, ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains); if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) { - ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n", + ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n", ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM); ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM; } @@ -2012,8 +2271,10 @@ static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar, ar->ath_common.regulatory.current_rd = __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd); - ath10k_debug_read_service_map(ar, ev->wmi_service_bitmap, - sizeof(ev->wmi_service_bitmap)); + wmi_10x_svc_map(ev->wmi_service_bitmap, svc_bmap); + ath10k_debug_read_service_map(ar, svc_bmap, sizeof(svc_bmap)); + ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ", + ev->wmi_service_bitmap, sizeof(ev->wmi_service_bitmap)); if (strlen(ar->hw->wiphy->fw_version) == 0) { snprintf(ar->hw->wiphy->fw_version, @@ -2026,7 +2287,7 @@ static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar, num_mem_reqs = __le32_to_cpu(ev->num_mem_reqs); if (num_mem_reqs > ATH10K_MAX_MEM_REQS) { - ath10k_warn("requested memory chunks number (%d) exceeds the limit\n", + ath10k_warn(ar, "requested memory chunks number (%d) exceeds the limit\n", num_mem_reqs); return; } @@ -2034,7 +2295,7 @@ static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar, if (!num_mem_reqs) goto exit; - ath10k_dbg(ATH10K_DBG_WMI, "firmware has requested %d memory chunks\n", + ath10k_dbg(ar, ATH10K_DBG_WMI, "firmware has requested %d memory chunks\n", num_mem_reqs); for (i = 0; i < num_mem_reqs; ++i) { @@ -2052,7 +2313,7 @@ static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar, else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) num_units = TARGET_10X_NUM_VDEVS + 1; - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n", req_id, __le32_to_cpu(ev->mem_reqs[i].num_units), @@ -2067,7 +2328,7 @@ static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar, } exit: - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event service ready sw_ver 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n", __le32_to_cpu(ev->sw_version), __le32_to_cpu(ev->abi_version), @@ -2089,9 +2350,9 @@ static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb) if (WARN_ON(skb->len < sizeof(*ev))) return -EINVAL; - memcpy(ar->mac_addr, ev->mac_addr.addr, ETH_ALEN); + ether_addr_copy(ar->mac_addr, ev->mac_addr.addr); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d skb->len %i ev-sz %zu\n", __le32_to_cpu(ev->sw_version), __le32_to_cpu(ev->abi_version), @@ -2113,7 +2374,7 @@ static void ath10k_wmi_main_process_rx(struct ath10k *ar, struct sk_buff *skb) if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL) return; - trace_ath10k_wmi_event(id, skb->data, skb->len); + trace_ath10k_wmi_event(ar, id, skb->data, skb->len); switch (id) { case WMI_MGMT_RX_EVENTID: @@ -2211,7 +2472,7 @@ static void ath10k_wmi_main_process_rx(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_ready_event_rx(ar, skb); break; default: - ath10k_warn("Unknown eventid: %d\n", id); + ath10k_warn(ar, "Unknown eventid: %d\n", id); break; } @@ -2222,6 +2483,7 @@ static void ath10k_wmi_10x_process_rx(struct ath10k *ar, struct sk_buff *skb) { struct wmi_cmd_hdr *cmd_hdr; enum wmi_10x_event_id id; + bool consumed; cmd_hdr = (struct wmi_cmd_hdr *)skb->data; id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID); @@ -2229,7 +2491,19 @@ static void ath10k_wmi_10x_process_rx(struct ath10k *ar, struct sk_buff *skb) if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL) return; - trace_ath10k_wmi_event(id, skb->data, skb->len); + trace_ath10k_wmi_event(ar, id, skb->data, skb->len); + + consumed = ath10k_tm_event_wmi(ar, id, skb); + + /* Ready event must be handled normally also in UTF mode so that we + * know the UTF firmware has booted, others we are just bypass WMI + * events to testmode. + */ + if (consumed && id != WMI_10X_READY_EVENTID) { + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi testmode consumed 0x%x\n", id); + goto out; + } switch (id) { case WMI_10X_MGMT_RX_EVENTID: @@ -2317,28 +2591,156 @@ static void ath10k_wmi_10x_process_rx(struct ath10k *ar, struct sk_buff *skb) case WMI_10X_READY_EVENTID: ath10k_wmi_ready_event_rx(ar, skb); break; + case WMI_10X_PDEV_UTF_EVENTID: + /* ignore utf events */ + break; default: - ath10k_warn("Unknown eventid: %d\n", id); + ath10k_warn(ar, "Unknown eventid: %d\n", id); break; } +out: dev_kfree_skb(skb); } +static void ath10k_wmi_10_2_process_rx(struct ath10k *ar, struct sk_buff *skb) +{ + struct wmi_cmd_hdr *cmd_hdr; + enum wmi_10_2_event_id id; + + cmd_hdr = (struct wmi_cmd_hdr *)skb->data; + id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID); + + if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL) + return; + + trace_ath10k_wmi_event(ar, id, skb->data, skb->len); + + switch (id) { + case WMI_10_2_MGMT_RX_EVENTID: + ath10k_wmi_event_mgmt_rx(ar, skb); + /* mgmt_rx() owns the skb now! */ + return; + case WMI_10_2_SCAN_EVENTID: + ath10k_wmi_event_scan(ar, skb); + break; + case WMI_10_2_CHAN_INFO_EVENTID: + ath10k_wmi_event_chan_info(ar, skb); + break; + case WMI_10_2_ECHO_EVENTID: + ath10k_wmi_event_echo(ar, skb); + break; + case WMI_10_2_DEBUG_MESG_EVENTID: + ath10k_wmi_event_debug_mesg(ar, skb); + break; + case WMI_10_2_UPDATE_STATS_EVENTID: + ath10k_wmi_event_update_stats(ar, skb); + break; + case WMI_10_2_VDEV_START_RESP_EVENTID: + ath10k_wmi_event_vdev_start_resp(ar, skb); + break; + case WMI_10_2_VDEV_STOPPED_EVENTID: + ath10k_wmi_event_vdev_stopped(ar, skb); + break; + case WMI_10_2_PEER_STA_KICKOUT_EVENTID: + ath10k_wmi_event_peer_sta_kickout(ar, skb); + break; + case WMI_10_2_HOST_SWBA_EVENTID: + ath10k_wmi_event_host_swba(ar, skb); + break; + case WMI_10_2_TBTTOFFSET_UPDATE_EVENTID: + ath10k_wmi_event_tbttoffset_update(ar, skb); + break; + case WMI_10_2_PHYERR_EVENTID: + ath10k_wmi_event_phyerr(ar, skb); + break; + case WMI_10_2_ROAM_EVENTID: + ath10k_wmi_event_roam(ar, skb); + break; + case WMI_10_2_PROFILE_MATCH: + ath10k_wmi_event_profile_match(ar, skb); + break; + case WMI_10_2_DEBUG_PRINT_EVENTID: + ath10k_wmi_event_debug_print(ar, skb); + break; + case WMI_10_2_PDEV_QVIT_EVENTID: + ath10k_wmi_event_pdev_qvit(ar, skb); + break; + case WMI_10_2_WLAN_PROFILE_DATA_EVENTID: + ath10k_wmi_event_wlan_profile_data(ar, skb); + break; + case WMI_10_2_RTT_MEASUREMENT_REPORT_EVENTID: + ath10k_wmi_event_rtt_measurement_report(ar, skb); + break; + case WMI_10_2_TSF_MEASUREMENT_REPORT_EVENTID: + ath10k_wmi_event_tsf_measurement_report(ar, skb); + break; + case WMI_10_2_RTT_ERROR_REPORT_EVENTID: + ath10k_wmi_event_rtt_error_report(ar, skb); + break; + case WMI_10_2_WOW_WAKEUP_HOST_EVENTID: + ath10k_wmi_event_wow_wakeup_host(ar, skb); + break; + case WMI_10_2_DCS_INTERFERENCE_EVENTID: + ath10k_wmi_event_dcs_interference(ar, skb); + break; + case WMI_10_2_PDEV_TPC_CONFIG_EVENTID: + ath10k_wmi_event_pdev_tpc_config(ar, skb); + break; + case WMI_10_2_INST_RSSI_STATS_EVENTID: + ath10k_wmi_event_inst_rssi_stats(ar, skb); + break; + case WMI_10_2_VDEV_STANDBY_REQ_EVENTID: + ath10k_wmi_event_vdev_standby_req(ar, skb); + break; + case WMI_10_2_VDEV_RESUME_REQ_EVENTID: + ath10k_wmi_event_vdev_resume_req(ar, skb); + break; + case WMI_10_2_SERVICE_READY_EVENTID: + ath10k_wmi_10x_service_ready_event_rx(ar, skb); + break; + case WMI_10_2_READY_EVENTID: + ath10k_wmi_ready_event_rx(ar, skb); + break; + case WMI_10_2_RTT_KEEPALIVE_EVENTID: + case WMI_10_2_GPIO_INPUT_EVENTID: + case WMI_10_2_PEER_RATECODE_LIST_EVENTID: + case WMI_10_2_GENERIC_BUFFER_EVENTID: + case WMI_10_2_MCAST_BUF_RELEASE_EVENTID: + case WMI_10_2_MCAST_LIST_AGEOUT_EVENTID: + case WMI_10_2_WDS_PEER_EVENTID: + ath10k_dbg(ar, ATH10K_DBG_WMI, + "received event id %d not implemented\n", id); + break; + default: + ath10k_warn(ar, "Unknown eventid: %d\n", id); + break; + } + + dev_kfree_skb(skb); +} static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb) { - if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) - ath10k_wmi_10x_process_rx(ar, skb); - else + if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { + if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features)) + ath10k_wmi_10_2_process_rx(ar, skb); + else + ath10k_wmi_10x_process_rx(ar, skb); + } else { ath10k_wmi_main_process_rx(ar, skb); + } } /* WMI Initialization functions */ int ath10k_wmi_attach(struct ath10k *ar) { if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { - ar->wmi.cmd = &wmi_10x_cmd_map; + if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features)) + ar->wmi.cmd = &wmi_10_2_cmd_map; + else + ar->wmi.cmd = &wmi_10x_cmd_map; + ar->wmi.vdev_param = &wmi_10x_vdev_param_map; ar->wmi.pdev_param = &wmi_10x_pdev_param_map; } else { @@ -2388,7 +2790,7 @@ int ath10k_wmi_connect(struct ath10k *ar) status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp); if (status) { - ath10k_warn("failed to connect to WMI CONTROL service status: %d\n", + ath10k_warn(ar, "failed to connect to WMI CONTROL service status: %d\n", status); return status; } @@ -2404,7 +2806,7 @@ static int ath10k_wmi_main_pdev_set_regdomain(struct ath10k *ar, u16 rd, struct wmi_pdev_set_regdomain_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; @@ -2415,7 +2817,7 @@ static int ath10k_wmi_main_pdev_set_regdomain(struct ath10k *ar, u16 rd, cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g); cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n", rd, rd2g, rd5g, ctl2g, ctl5g); @@ -2431,7 +2833,7 @@ static int ath10k_wmi_10x_pdev_set_regdomain(struct ath10k *ar, u16 rd, struct wmi_pdev_set_regdomain_cmd_10x *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; @@ -2443,7 +2845,7 @@ static int ath10k_wmi_10x_pdev_set_regdomain(struct ath10k *ar, u16 rd, cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g); cmd->dfs_domain = __cpu_to_le32(dfs_reg); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x dfs_region %x\n", rd, rd2g, rd5g, ctl2g, ctl5g, dfs_reg); @@ -2473,7 +2875,7 @@ int ath10k_wmi_pdev_set_channel(struct ath10k *ar, if (arg->passive) return -EINVAL; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; @@ -2491,7 +2893,7 @@ int ath10k_wmi_pdev_set_channel(struct ath10k *ar, cmd->chan.reg_classid = arg->reg_class_id; cmd->chan.antenna_max = arg->max_antenna_gain; - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi set channel mode %d freq %d\n", arg->mode, arg->freq); @@ -2504,7 +2906,7 @@ int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt) struct wmi_pdev_suspend_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; @@ -2518,7 +2920,7 @@ int ath10k_wmi_pdev_resume_target(struct ath10k *ar) { struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(0); + skb = ath10k_wmi_alloc_skb(ar, 0); if (skb == NULL) return -ENOMEM; @@ -2531,11 +2933,12 @@ int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value) struct sk_buff *skb; if (id == WMI_PDEV_PARAM_UNSUPPORTED) { - ath10k_warn("pdev param %d not supported by firmware\n", id); + ath10k_warn(ar, "pdev param %d not supported by firmware\n", + id); return -EOPNOTSUPP; } - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; @@ -2543,7 +2946,7 @@ int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value) cmd->param_id = __cpu_to_le32(id); cmd->param_value = __cpu_to_le32(value); - ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n", + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n", id, value); return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid); } @@ -2610,7 +3013,7 @@ static int ath10k_wmi_main_cmd_init(struct ath10k *ar) len = sizeof(*cmd) + (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks); - buf = ath10k_wmi_alloc_skb(len); + buf = ath10k_wmi_alloc_skb(ar, len); if (!buf) return -ENOMEM; @@ -2621,7 +3024,7 @@ static int ath10k_wmi_main_cmd_init(struct ath10k *ar) goto out; } - ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n", + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n", ar->wmi.num_mem_chunks); cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks); @@ -2634,7 +3037,7 @@ static int ath10k_wmi_main_cmd_init(struct ath10k *ar) cmd->host_mem_chunks[i].req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi chunk %d len %d requested, addr 0x%llx\n", i, ar->wmi.mem_chunks[i].len, @@ -2643,7 +3046,7 @@ static int ath10k_wmi_main_cmd_init(struct ath10k *ar) out: memcpy(&cmd->resource_config, &config, sizeof(config)); - ath10k_dbg(ATH10K_DBG_WMI, "wmi init\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init\n"); return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid); } @@ -2701,7 +3104,7 @@ static int ath10k_wmi_10x_cmd_init(struct ath10k *ar) len = sizeof(*cmd) + (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks); - buf = ath10k_wmi_alloc_skb(len); + buf = ath10k_wmi_alloc_skb(ar, len); if (!buf) return -ENOMEM; @@ -2712,7 +3115,7 @@ static int ath10k_wmi_10x_cmd_init(struct ath10k *ar) goto out; } - ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n", + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n", ar->wmi.num_mem_chunks); cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks); @@ -2725,7 +3128,7 @@ static int ath10k_wmi_10x_cmd_init(struct ath10k *ar) cmd->host_mem_chunks[i].req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi chunk %d len %d requested, addr 0x%llx\n", i, ar->wmi.mem_chunks[i].len, @@ -2734,7 +3137,98 @@ static int ath10k_wmi_10x_cmd_init(struct ath10k *ar) out: memcpy(&cmd->resource_config, &config, sizeof(config)); - ath10k_dbg(ATH10K_DBG_WMI, "wmi init 10x\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10x\n"); + return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid); +} + +static int ath10k_wmi_10_2_cmd_init(struct ath10k *ar) +{ + struct wmi_init_cmd_10_2 *cmd; + struct sk_buff *buf; + struct wmi_resource_config_10x config = {}; + u32 len, val; + int i; + + config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS); + config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS); + config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS); + config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS); + config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT); + config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK); + config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK); + config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI); + config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI); + config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI); + config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI); + config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE); + + config.scan_max_pending_reqs = + __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS); + + config.bmiss_offload_max_vdev = + __cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV); + + config.roam_offload_max_vdev = + __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV); + + config.roam_offload_max_ap_profiles = + __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES); + + config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS); + config.num_mcast_table_elems = + __cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS); + + config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE); + config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE); + config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES); + config.dma_burst_size = __cpu_to_le32(TARGET_10X_DMA_BURST_SIZE); + config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM); + + val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK; + config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val); + + config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG); + + config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC); + config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES); + + len = sizeof(*cmd) + + (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks); + + buf = ath10k_wmi_alloc_skb(ar, len); + if (!buf) + return -ENOMEM; + + cmd = (struct wmi_init_cmd_10_2 *)buf->data; + + if (ar->wmi.num_mem_chunks == 0) { + cmd->num_host_mem_chunks = 0; + goto out; + } + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n", + ar->wmi.num_mem_chunks); + + cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks); + + for (i = 0; i < ar->wmi.num_mem_chunks; i++) { + cmd->host_mem_chunks[i].ptr = + __cpu_to_le32(ar->wmi.mem_chunks[i].paddr); + cmd->host_mem_chunks[i].size = + __cpu_to_le32(ar->wmi.mem_chunks[i].len); + cmd->host_mem_chunks[i].req_id = + __cpu_to_le32(ar->wmi.mem_chunks[i].req_id); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi chunk %d len %d requested, addr 0x%llx\n", + i, + ar->wmi.mem_chunks[i].len, + (unsigned long long)ar->wmi.mem_chunks[i].paddr); + } +out: + memcpy(&cmd->resource_config.common, &config, sizeof(config)); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.2\n"); return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid); } @@ -2742,10 +3236,14 @@ int ath10k_wmi_cmd_init(struct ath10k *ar) { int ret; - if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) - ret = ath10k_wmi_10x_cmd_init(ar); - else + if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { + if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features)) + ret = ath10k_wmi_10_2_cmd_init(ar); + else + ret = ath10k_wmi_10x_cmd_init(ar); + } else { ret = ath10k_wmi_main_cmd_init(ar); + } return ret; } @@ -2822,7 +3320,7 @@ int ath10k_wmi_start_scan(struct ath10k *ar, if (len < 0) return len; /* len contains error code here */ - skb = ath10k_wmi_alloc_skb(len); + skb = ath10k_wmi_alloc_skb(ar, len); if (!skb) return -ENOMEM; @@ -2865,8 +3363,8 @@ int ath10k_wmi_start_scan(struct ath10k *ar, channels->num_chan = __cpu_to_le32(arg->n_channels); for (i = 0; i < arg->n_channels; i++) - channels->channel_list[i] = - __cpu_to_le32(arg->channels[i]); + channels->channel_list[i].freq = + __cpu_to_le16(arg->channels[i]); off += sizeof(*channels); off += sizeof(__le32) * arg->n_channels; @@ -2918,7 +3416,7 @@ int ath10k_wmi_start_scan(struct ath10k *ar, return -EINVAL; } - ath10k_dbg(ATH10K_DBG_WMI, "wmi start scan\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi start scan\n"); return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid); } @@ -2960,7 +3458,7 @@ int ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg) if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF) return -EINVAL; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; @@ -2976,7 +3474,7 @@ int ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg) cmd->scan_id = __cpu_to_le32(scan_id); cmd->scan_req_id = __cpu_to_le32(req_id); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n", arg->req_id, arg->req_type, arg->u.scan_id); return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid); @@ -2990,7 +3488,7 @@ int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id, struct wmi_vdev_create_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; @@ -2998,9 +3496,9 @@ int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id, cmd->vdev_id = __cpu_to_le32(vdev_id); cmd->vdev_type = __cpu_to_le32(type); cmd->vdev_subtype = __cpu_to_le32(subtype); - memcpy(cmd->vdev_macaddr.addr, macaddr, ETH_ALEN); + ether_addr_copy(cmd->vdev_macaddr.addr, macaddr); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI vdev create: id %d type %d subtype %d macaddr %pM\n", vdev_id, type, subtype, macaddr); @@ -3012,22 +3510,23 @@ int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id) struct wmi_vdev_delete_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_vdev_delete_cmd *)skb->data; cmd->vdev_id = __cpu_to_le32(vdev_id); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI vdev delete id %d\n", vdev_id); return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid); } -static int ath10k_wmi_vdev_start_restart(struct ath10k *ar, - const struct wmi_vdev_start_request_arg *arg, - u32 cmd_id) +static int +ath10k_wmi_vdev_start_restart(struct ath10k *ar, + const struct wmi_vdev_start_request_arg *arg, + u32 cmd_id) { struct wmi_vdev_start_request_cmd *cmd; struct sk_buff *skb; @@ -3052,7 +3551,7 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar, else return -EINVAL; /* should not happen, we already check cmd_id */ - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; @@ -3090,9 +3589,9 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar, cmd->chan.reg_classid = arg->channel.reg_class_id; cmd->chan.antenna_max = arg->channel.max_antenna_gain; - ath10k_dbg(ATH10K_DBG_WMI, - "wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, " - "ch_flags: 0x%0X, max_power: %d\n", cmdname, arg->vdev_id, + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, ch_flags: 0x%0X, max_power: %d\n", + cmdname, arg->vdev_id, flags, arg->channel.freq, arg->channel.mode, cmd->chan.flags, arg->channel.max_power); @@ -3108,7 +3607,7 @@ int ath10k_wmi_vdev_start(struct ath10k *ar, } int ath10k_wmi_vdev_restart(struct ath10k *ar, - const struct wmi_vdev_start_request_arg *arg) + const struct wmi_vdev_start_request_arg *arg) { u32 cmd_id = ar->wmi.cmd->vdev_restart_request_cmdid; @@ -3120,14 +3619,14 @@ int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id) struct wmi_vdev_stop_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_vdev_stop_cmd *)skb->data; cmd->vdev_id = __cpu_to_le32(vdev_id); - ath10k_dbg(ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id); + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id); return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid); } @@ -3137,16 +3636,16 @@ int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid) struct wmi_vdev_up_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_vdev_up_cmd *)skb->data; cmd->vdev_id = __cpu_to_le32(vdev_id); cmd->vdev_assoc_id = __cpu_to_le32(aid); - memcpy(&cmd->vdev_bssid.addr, bssid, ETH_ALEN); + ether_addr_copy(cmd->vdev_bssid.addr, bssid); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n", vdev_id, aid, bssid); @@ -3158,14 +3657,14 @@ int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id) struct wmi_vdev_down_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_vdev_down_cmd *)skb->data; cmd->vdev_id = __cpu_to_le32(vdev_id); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt vdev down id 0x%x\n", vdev_id); return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid); @@ -3178,13 +3677,13 @@ int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, struct sk_buff *skb; if (param_id == WMI_VDEV_PARAM_UNSUPPORTED) { - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "vdev param %d not supported by firmware\n", param_id); return -EOPNOTSUPP; } - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; @@ -3193,7 +3692,7 @@ int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, cmd->param_id = __cpu_to_le32(param_id); cmd->param_value = __cpu_to_le32(param_value); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi vdev id 0x%x set param %d value %d\n", vdev_id, param_id, param_value); @@ -3211,7 +3710,7 @@ int ath10k_wmi_vdev_install_key(struct ath10k *ar, if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL) return -EINVAL; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd) + arg->key_len); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd) + arg->key_len); if (!skb) return -ENOMEM; @@ -3225,32 +3724,88 @@ int ath10k_wmi_vdev_install_key(struct ath10k *ar, cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len); if (arg->macaddr) - memcpy(cmd->peer_macaddr.addr, arg->macaddr, ETH_ALEN); + ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr); if (arg->key_data) memcpy(cmd->key_data, arg->key_data, arg->key_len); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi vdev install key idx %d cipher %d len %d\n", arg->key_idx, arg->key_cipher, arg->key_len); return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_install_key_cmdid); } +int ath10k_wmi_vdev_spectral_conf(struct ath10k *ar, + const struct wmi_vdev_spectral_conf_arg *arg) +{ + struct wmi_vdev_spectral_conf_cmd *cmd; + struct sk_buff *skb; + u32 cmdid; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_vdev_spectral_conf_cmd *)skb->data; + cmd->vdev_id = __cpu_to_le32(arg->vdev_id); + cmd->scan_count = __cpu_to_le32(arg->scan_count); + cmd->scan_period = __cpu_to_le32(arg->scan_period); + cmd->scan_priority = __cpu_to_le32(arg->scan_priority); + cmd->scan_fft_size = __cpu_to_le32(arg->scan_fft_size); + cmd->scan_gc_ena = __cpu_to_le32(arg->scan_gc_ena); + cmd->scan_restart_ena = __cpu_to_le32(arg->scan_restart_ena); + cmd->scan_noise_floor_ref = __cpu_to_le32(arg->scan_noise_floor_ref); + cmd->scan_init_delay = __cpu_to_le32(arg->scan_init_delay); + cmd->scan_nb_tone_thr = __cpu_to_le32(arg->scan_nb_tone_thr); + cmd->scan_str_bin_thr = __cpu_to_le32(arg->scan_str_bin_thr); + cmd->scan_wb_rpt_mode = __cpu_to_le32(arg->scan_wb_rpt_mode); + cmd->scan_rssi_rpt_mode = __cpu_to_le32(arg->scan_rssi_rpt_mode); + cmd->scan_rssi_thr = __cpu_to_le32(arg->scan_rssi_thr); + cmd->scan_pwr_format = __cpu_to_le32(arg->scan_pwr_format); + cmd->scan_rpt_mode = __cpu_to_le32(arg->scan_rpt_mode); + cmd->scan_bin_scale = __cpu_to_le32(arg->scan_bin_scale); + cmd->scan_dbm_adj = __cpu_to_le32(arg->scan_dbm_adj); + cmd->scan_chn_mask = __cpu_to_le32(arg->scan_chn_mask); + + cmdid = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid; + return ath10k_wmi_cmd_send(ar, skb, cmdid); +} + +int ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger, + u32 enable) +{ + struct wmi_vdev_spectral_enable_cmd *cmd; + struct sk_buff *skb; + u32 cmdid; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_vdev_spectral_enable_cmd *)skb->data; + cmd->vdev_id = __cpu_to_le32(vdev_id); + cmd->trigger_cmd = __cpu_to_le32(trigger); + cmd->enable_cmd = __cpu_to_le32(enable); + + cmdid = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid; + return ath10k_wmi_cmd_send(ar, skb, cmdid); +} + int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id, const u8 peer_addr[ETH_ALEN]) { struct wmi_peer_create_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_peer_create_cmd *)skb->data; cmd->vdev_id = __cpu_to_le32(vdev_id); - memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN); + ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi peer create vdev_id %d peer_addr %pM\n", vdev_id, peer_addr); return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid); @@ -3262,15 +3817,15 @@ int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id, struct wmi_peer_delete_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_peer_delete_cmd *)skb->data; cmd->vdev_id = __cpu_to_le32(vdev_id); - memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN); + ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi peer delete vdev_id %d peer_addr %pM\n", vdev_id, peer_addr); return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid); @@ -3282,16 +3837,16 @@ int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id, struct wmi_peer_flush_tids_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_peer_flush_tids_cmd *)skb->data; cmd->vdev_id = __cpu_to_le32(vdev_id); cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap); - memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN); + ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n", vdev_id, peer_addr, tid_bitmap); return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid); @@ -3304,7 +3859,7 @@ int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, struct wmi_peer_set_param_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; @@ -3312,9 +3867,9 @@ int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, cmd->vdev_id = __cpu_to_le32(vdev_id); cmd->param_id = __cpu_to_le32(param_id); cmd->param_value = __cpu_to_le32(param_value); - memcpy(&cmd->peer_macaddr.addr, peer_addr, ETH_ALEN); + ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi vdev %d peer 0x%pM set param %d value %d\n", vdev_id, peer_addr, param_id, param_value); @@ -3327,7 +3882,7 @@ int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id, struct wmi_sta_powersave_mode_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; @@ -3335,7 +3890,7 @@ int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id, cmd->vdev_id = __cpu_to_le32(vdev_id); cmd->sta_ps_mode = __cpu_to_le32(psmode); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi set powersave id 0x%x mode %d\n", vdev_id, psmode); @@ -3350,7 +3905,7 @@ int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id, struct wmi_sta_powersave_param_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; @@ -3359,7 +3914,7 @@ int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id, cmd->param_id = __cpu_to_le32(param_id); cmd->param_value = __cpu_to_le32(value); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sta ps param vdev_id 0x%x param %d value %d\n", vdev_id, param_id, value); return ath10k_wmi_cmd_send(ar, skb, @@ -3375,7 +3930,7 @@ int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac, if (!mac) return -EINVAL; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; @@ -3383,9 +3938,9 @@ int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac, cmd->vdev_id = __cpu_to_le32(vdev_id); cmd->param_id = __cpu_to_le32(param_id); cmd->param_value = __cpu_to_le32(value); - memcpy(&cmd->peer_macaddr, mac, ETH_ALEN); + ether_addr_copy(cmd->peer_macaddr.addr, mac); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n", vdev_id, param_id, value, mac); @@ -3405,7 +3960,7 @@ int ath10k_wmi_scan_chan_list(struct ath10k *ar, len = sizeof(*cmd) + arg->n_channels * sizeof(struct wmi_channel); - skb = ath10k_wmi_alloc_skb(len); + skb = ath10k_wmi_alloc_skb(ar, len); if (!skb) return -EINVAL; @@ -3447,24 +4002,12 @@ int ath10k_wmi_scan_chan_list(struct ath10k *ar, return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid); } -int ath10k_wmi_peer_assoc(struct ath10k *ar, - const struct wmi_peer_assoc_complete_arg *arg) +static void +ath10k_wmi_peer_assoc_fill(struct ath10k *ar, void *buf, + const struct wmi_peer_assoc_complete_arg *arg) { - struct wmi_peer_assoc_complete_cmd *cmd; - struct sk_buff *skb; + struct wmi_common_peer_assoc_complete_cmd *cmd = buf; - if (arg->peer_mpdu_density > 16) - return -EINVAL; - if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES) - return -EINVAL; - if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES) - return -EINVAL; - - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); - if (!skb) - return -ENOMEM; - - cmd = (struct wmi_peer_assoc_complete_cmd *)skb->data; cmd->vdev_id = __cpu_to_le32(arg->vdev_id); cmd->peer_new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1); cmd->peer_associd = __cpu_to_le32(arg->peer_aid); @@ -3479,7 +4022,7 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar, cmd->peer_vht_caps = __cpu_to_le32(arg->peer_vht_caps); cmd->peer_phymode = __cpu_to_le32(arg->peer_phymode); - memcpy(cmd->peer_macaddr.addr, arg->addr, ETH_ALEN); + ether_addr_copy(cmd->peer_macaddr.addr, arg->addr); cmd->peer_legacy_rates.num_rates = __cpu_to_le32(arg->peer_legacy_rates.num_rates); @@ -3499,8 +4042,80 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar, __cpu_to_le32(arg->peer_vht_rates.tx_max_rate); cmd->peer_vht_rates.tx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set); +} + +static void +ath10k_wmi_peer_assoc_fill_main(struct ath10k *ar, void *buf, + const struct wmi_peer_assoc_complete_arg *arg) +{ + struct wmi_main_peer_assoc_complete_cmd *cmd = buf; + + ath10k_wmi_peer_assoc_fill(ar, buf, arg); + memset(cmd->peer_ht_info, 0, sizeof(cmd->peer_ht_info)); +} + +static void +ath10k_wmi_peer_assoc_fill_10_1(struct ath10k *ar, void *buf, + const struct wmi_peer_assoc_complete_arg *arg) +{ + ath10k_wmi_peer_assoc_fill(ar, buf, arg); +} + +static void +ath10k_wmi_peer_assoc_fill_10_2(struct ath10k *ar, void *buf, + const struct wmi_peer_assoc_complete_arg *arg) +{ + struct wmi_10_2_peer_assoc_complete_cmd *cmd = buf; + int max_mcs, max_nss; + u32 info0; + + /* TODO: Is using max values okay with firmware? */ + max_mcs = 0xf; + max_nss = 0xf; + + info0 = SM(max_mcs, WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX) | + SM(max_nss, WMI_PEER_ASSOC_INFO0_MAX_NSS); + + ath10k_wmi_peer_assoc_fill(ar, buf, arg); + cmd->info0 = __cpu_to_le32(info0); +} + +int ath10k_wmi_peer_assoc(struct ath10k *ar, + const struct wmi_peer_assoc_complete_arg *arg) +{ + struct sk_buff *skb; + int len; + + if (arg->peer_mpdu_density > 16) + return -EINVAL; + if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES) + return -EINVAL; + if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES) + return -EINVAL; + + if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { + if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features)) + len = sizeof(struct wmi_10_2_peer_assoc_complete_cmd); + else + len = sizeof(struct wmi_10_1_peer_assoc_complete_cmd); + } else { + len = sizeof(struct wmi_main_peer_assoc_complete_cmd); + } + + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return -ENOMEM; + + if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { + if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features)) + ath10k_wmi_peer_assoc_fill_10_1(ar, skb->data, arg); + else + ath10k_wmi_peer_assoc_fill_10_2(ar, skb->data, arg); + } else { + ath10k_wmi_peer_assoc_fill_main(ar, skb->data, arg); + } - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi peer assoc vdev %d addr %pM (%s)\n", arg->vdev_id, arg->addr, arg->peer_reassoc ? "reassociate" : "new"); @@ -3518,7 +4133,7 @@ int ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif) int ret; u16 fc; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; @@ -3532,6 +4147,7 @@ int ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif) cmd->msdu_id = 0; cmd->frame_control = __cpu_to_le32(fc); cmd->flags = 0; + cmd->antenna_mask = __cpu_to_le32(WMI_BCN_TX_REF_DEF_ANTENNA); if (ATH10K_SKB_CB(beacon)->bcn.dtim_zero) cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO); @@ -3560,12 +4176,12 @@ static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params, } int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar, - const struct wmi_pdev_set_wmm_params_arg *arg) + const struct wmi_pdev_set_wmm_params_arg *arg) { struct wmi_pdev_set_wmm_params *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; @@ -3575,7 +4191,7 @@ int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar, ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vi, &arg->ac_vi); ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vo, &arg->ac_vo); - ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set wmm params\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set wmm params\n"); return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_wmm_params_cmdid); } @@ -3585,14 +4201,14 @@ int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id) struct wmi_request_stats_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_request_stats_cmd *)skb->data; cmd->stats_id = __cpu_to_le32(stats_id); - ath10k_dbg(ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id); + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id); return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid); } @@ -3602,7 +4218,7 @@ int ath10k_wmi_force_fw_hang(struct ath10k *ar, struct wmi_force_fw_hang_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; @@ -3610,7 +4226,7 @@ int ath10k_wmi_force_fw_hang(struct ath10k *ar, cmd->type = __cpu_to_le32(type); cmd->delay_ms = __cpu_to_le32(delay_ms); - ath10k_dbg(ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n", + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n", type, delay_ms); return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid); } @@ -3621,7 +4237,7 @@ int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable) struct sk_buff *skb; u32 cfg; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; @@ -3642,7 +4258,7 @@ int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable) cmd->config_enable = __cpu_to_le32(cfg); cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi dbglog cfg modules %08x %08x config %08x %08x\n", __le32_to_cpu(cmd->module_enable), __le32_to_cpu(cmd->module_valid), diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index e93df2c104136a10f03052908ef45ca38f514e6c..86f5ebccfe79c26a248935de47c6cc13f1b2cbd8 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -73,119 +73,280 @@ struct wmi_cmd_hdr { #define HTC_PROTOCOL_VERSION 0x0002 #define WMI_PROTOCOL_VERSION 0x0002 -enum wmi_service_id { - WMI_SERVICE_BEACON_OFFLOAD = 0, /* beacon offload */ - WMI_SERVICE_SCAN_OFFLOAD, /* scan offload */ - WMI_SERVICE_ROAM_OFFLOAD, /* roam offload */ - WMI_SERVICE_BCN_MISS_OFFLOAD, /* beacon miss offload */ - WMI_SERVICE_STA_PWRSAVE, /* fake sleep + basic power save */ - WMI_SERVICE_STA_ADVANCED_PWRSAVE, /* uapsd, pspoll, force sleep */ - WMI_SERVICE_AP_UAPSD, /* uapsd on AP */ - WMI_SERVICE_AP_DFS, /* DFS on AP */ - WMI_SERVICE_11AC, /* supports 11ac */ - WMI_SERVICE_BLOCKACK, /* Supports triggering ADDBA/DELBA from host*/ - WMI_SERVICE_PHYERR, /* PHY error */ - WMI_SERVICE_BCN_FILTER, /* Beacon filter support */ - WMI_SERVICE_RTT, /* RTT (round trip time) support */ - WMI_SERVICE_RATECTRL, /* Rate-control */ - WMI_SERVICE_WOW, /* WOW Support */ - WMI_SERVICE_RATECTRL_CACHE, /* Rate-control caching */ - WMI_SERVICE_IRAM_TIDS, /* TIDs in IRAM */ - WMI_SERVICE_ARPNS_OFFLOAD, /* ARP NS Offload support */ - WMI_SERVICE_NLO, /* Network list offload service */ - WMI_SERVICE_GTK_OFFLOAD, /* GTK offload */ - WMI_SERVICE_SCAN_SCH, /* Scan Scheduler Service */ - WMI_SERVICE_CSA_OFFLOAD, /* CSA offload service */ - WMI_SERVICE_CHATTER, /* Chatter service */ - WMI_SERVICE_COEX_FREQAVOID, /* FW report freq range to avoid */ - WMI_SERVICE_PACKET_POWER_SAVE, /* packet power save service */ - WMI_SERVICE_FORCE_FW_HANG, /* To test fw recovery mechanism */ - WMI_SERVICE_GPIO, /* GPIO service */ - WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM, /* Modulated DTIM support */ - WMI_STA_UAPSD_BASIC_AUTO_TRIG, /* UAPSD AC Trigger Generation */ - WMI_STA_UAPSD_VAR_AUTO_TRIG, /* -do- */ - WMI_SERVICE_STA_KEEP_ALIVE, /* STA keep alive mechanism support */ - WMI_SERVICE_TX_ENCAP, /* Packet type for TX encapsulation */ - - WMI_SERVICE_LAST, - WMI_MAX_SERVICE = 64 /* max service */ +enum wmi_service { + WMI_SERVICE_BEACON_OFFLOAD = 0, + WMI_SERVICE_SCAN_OFFLOAD, + WMI_SERVICE_ROAM_OFFLOAD, + WMI_SERVICE_BCN_MISS_OFFLOAD, + WMI_SERVICE_STA_PWRSAVE, + WMI_SERVICE_STA_ADVANCED_PWRSAVE, + WMI_SERVICE_AP_UAPSD, + WMI_SERVICE_AP_DFS, + WMI_SERVICE_11AC, + WMI_SERVICE_BLOCKACK, + WMI_SERVICE_PHYERR, + WMI_SERVICE_BCN_FILTER, + WMI_SERVICE_RTT, + WMI_SERVICE_RATECTRL, + WMI_SERVICE_WOW, + WMI_SERVICE_RATECTRL_CACHE, + WMI_SERVICE_IRAM_TIDS, + WMI_SERVICE_ARPNS_OFFLOAD, + WMI_SERVICE_NLO, + WMI_SERVICE_GTK_OFFLOAD, + WMI_SERVICE_SCAN_SCH, + WMI_SERVICE_CSA_OFFLOAD, + WMI_SERVICE_CHATTER, + WMI_SERVICE_COEX_FREQAVOID, + WMI_SERVICE_PACKET_POWER_SAVE, + WMI_SERVICE_FORCE_FW_HANG, + WMI_SERVICE_GPIO, + WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM, + WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, + WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, + WMI_SERVICE_STA_KEEP_ALIVE, + WMI_SERVICE_TX_ENCAP, + WMI_SERVICE_BURST, + WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT, + WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT, + + /* keep last */ + WMI_SERVICE_MAX, +}; + +enum wmi_10x_service { + WMI_10X_SERVICE_BEACON_OFFLOAD = 0, + WMI_10X_SERVICE_SCAN_OFFLOAD, + WMI_10X_SERVICE_ROAM_OFFLOAD, + WMI_10X_SERVICE_BCN_MISS_OFFLOAD, + WMI_10X_SERVICE_STA_PWRSAVE, + WMI_10X_SERVICE_STA_ADVANCED_PWRSAVE, + WMI_10X_SERVICE_AP_UAPSD, + WMI_10X_SERVICE_AP_DFS, + WMI_10X_SERVICE_11AC, + WMI_10X_SERVICE_BLOCKACK, + WMI_10X_SERVICE_PHYERR, + WMI_10X_SERVICE_BCN_FILTER, + WMI_10X_SERVICE_RTT, + WMI_10X_SERVICE_RATECTRL, + WMI_10X_SERVICE_WOW, + WMI_10X_SERVICE_RATECTRL_CACHE, + WMI_10X_SERVICE_IRAM_TIDS, + WMI_10X_SERVICE_BURST, + + /* introduced in 10.2 */ + WMI_10X_SERVICE_SMART_ANTENNA_SW_SUPPORT, + WMI_10X_SERVICE_FORCE_FW_HANG, + WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT, +}; + +enum wmi_main_service { + WMI_MAIN_SERVICE_BEACON_OFFLOAD = 0, + WMI_MAIN_SERVICE_SCAN_OFFLOAD, + WMI_MAIN_SERVICE_ROAM_OFFLOAD, + WMI_MAIN_SERVICE_BCN_MISS_OFFLOAD, + WMI_MAIN_SERVICE_STA_PWRSAVE, + WMI_MAIN_SERVICE_STA_ADVANCED_PWRSAVE, + WMI_MAIN_SERVICE_AP_UAPSD, + WMI_MAIN_SERVICE_AP_DFS, + WMI_MAIN_SERVICE_11AC, + WMI_MAIN_SERVICE_BLOCKACK, + WMI_MAIN_SERVICE_PHYERR, + WMI_MAIN_SERVICE_BCN_FILTER, + WMI_MAIN_SERVICE_RTT, + WMI_MAIN_SERVICE_RATECTRL, + WMI_MAIN_SERVICE_WOW, + WMI_MAIN_SERVICE_RATECTRL_CACHE, + WMI_MAIN_SERVICE_IRAM_TIDS, + WMI_MAIN_SERVICE_ARPNS_OFFLOAD, + WMI_MAIN_SERVICE_NLO, + WMI_MAIN_SERVICE_GTK_OFFLOAD, + WMI_MAIN_SERVICE_SCAN_SCH, + WMI_MAIN_SERVICE_CSA_OFFLOAD, + WMI_MAIN_SERVICE_CHATTER, + WMI_MAIN_SERVICE_COEX_FREQAVOID, + WMI_MAIN_SERVICE_PACKET_POWER_SAVE, + WMI_MAIN_SERVICE_FORCE_FW_HANG, + WMI_MAIN_SERVICE_GPIO, + WMI_MAIN_SERVICE_STA_DTIM_PS_MODULATED_DTIM, + WMI_MAIN_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, + WMI_MAIN_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, + WMI_MAIN_SERVICE_STA_KEEP_ALIVE, + WMI_MAIN_SERVICE_TX_ENCAP, }; static inline char *wmi_service_name(int service_id) { +#define SVCSTR(x) case x: return #x + switch (service_id) { - case WMI_SERVICE_BEACON_OFFLOAD: - return "BEACON_OFFLOAD"; - case WMI_SERVICE_SCAN_OFFLOAD: - return "SCAN_OFFLOAD"; - case WMI_SERVICE_ROAM_OFFLOAD: - return "ROAM_OFFLOAD"; - case WMI_SERVICE_BCN_MISS_OFFLOAD: - return "BCN_MISS_OFFLOAD"; - case WMI_SERVICE_STA_PWRSAVE: - return "STA_PWRSAVE"; - case WMI_SERVICE_STA_ADVANCED_PWRSAVE: - return "STA_ADVANCED_PWRSAVE"; - case WMI_SERVICE_AP_UAPSD: - return "AP_UAPSD"; - case WMI_SERVICE_AP_DFS: - return "AP_DFS"; - case WMI_SERVICE_11AC: - return "11AC"; - case WMI_SERVICE_BLOCKACK: - return "BLOCKACK"; - case WMI_SERVICE_PHYERR: - return "PHYERR"; - case WMI_SERVICE_BCN_FILTER: - return "BCN_FILTER"; - case WMI_SERVICE_RTT: - return "RTT"; - case WMI_SERVICE_RATECTRL: - return "RATECTRL"; - case WMI_SERVICE_WOW: - return "WOW"; - case WMI_SERVICE_RATECTRL_CACHE: - return "RATECTRL CACHE"; - case WMI_SERVICE_IRAM_TIDS: - return "IRAM TIDS"; - case WMI_SERVICE_ARPNS_OFFLOAD: - return "ARPNS_OFFLOAD"; - case WMI_SERVICE_NLO: - return "NLO"; - case WMI_SERVICE_GTK_OFFLOAD: - return "GTK_OFFLOAD"; - case WMI_SERVICE_SCAN_SCH: - return "SCAN_SCH"; - case WMI_SERVICE_CSA_OFFLOAD: - return "CSA_OFFLOAD"; - case WMI_SERVICE_CHATTER: - return "CHATTER"; - case WMI_SERVICE_COEX_FREQAVOID: - return "COEX_FREQAVOID"; - case WMI_SERVICE_PACKET_POWER_SAVE: - return "PACKET_POWER_SAVE"; - case WMI_SERVICE_FORCE_FW_HANG: - return "FORCE FW HANG"; - case WMI_SERVICE_GPIO: - return "GPIO"; - case WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM: - return "MODULATED DTIM"; - case WMI_STA_UAPSD_BASIC_AUTO_TRIG: - return "BASIC UAPSD"; - case WMI_STA_UAPSD_VAR_AUTO_TRIG: - return "VAR UAPSD"; - case WMI_SERVICE_STA_KEEP_ALIVE: - return "STA KEEP ALIVE"; - case WMI_SERVICE_TX_ENCAP: - return "TX ENCAP"; + SVCSTR(WMI_SERVICE_BEACON_OFFLOAD); + SVCSTR(WMI_SERVICE_SCAN_OFFLOAD); + SVCSTR(WMI_SERVICE_ROAM_OFFLOAD); + SVCSTR(WMI_SERVICE_BCN_MISS_OFFLOAD); + SVCSTR(WMI_SERVICE_STA_PWRSAVE); + SVCSTR(WMI_SERVICE_STA_ADVANCED_PWRSAVE); + SVCSTR(WMI_SERVICE_AP_UAPSD); + SVCSTR(WMI_SERVICE_AP_DFS); + SVCSTR(WMI_SERVICE_11AC); + SVCSTR(WMI_SERVICE_BLOCKACK); + SVCSTR(WMI_SERVICE_PHYERR); + SVCSTR(WMI_SERVICE_BCN_FILTER); + SVCSTR(WMI_SERVICE_RTT); + SVCSTR(WMI_SERVICE_RATECTRL); + SVCSTR(WMI_SERVICE_WOW); + SVCSTR(WMI_SERVICE_RATECTRL_CACHE); + SVCSTR(WMI_SERVICE_IRAM_TIDS); + SVCSTR(WMI_SERVICE_ARPNS_OFFLOAD); + SVCSTR(WMI_SERVICE_NLO); + SVCSTR(WMI_SERVICE_GTK_OFFLOAD); + SVCSTR(WMI_SERVICE_SCAN_SCH); + SVCSTR(WMI_SERVICE_CSA_OFFLOAD); + SVCSTR(WMI_SERVICE_CHATTER); + SVCSTR(WMI_SERVICE_COEX_FREQAVOID); + SVCSTR(WMI_SERVICE_PACKET_POWER_SAVE); + SVCSTR(WMI_SERVICE_FORCE_FW_HANG); + SVCSTR(WMI_SERVICE_GPIO); + SVCSTR(WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM); + SVCSTR(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG); + SVCSTR(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG); + SVCSTR(WMI_SERVICE_STA_KEEP_ALIVE); + SVCSTR(WMI_SERVICE_TX_ENCAP); + SVCSTR(WMI_SERVICE_BURST); + SVCSTR(WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT); + SVCSTR(WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT); default: - return "UNKNOWN SERVICE\n"; + return NULL; } + +#undef SVCSTR +} + +#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id) \ + (__le32_to_cpu((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \ + BIT((svc_id)%(sizeof(u32)))) + +#define SVCMAP(x, y) \ + do { \ + if (WMI_SERVICE_IS_ENABLED((in), (x))) \ + __set_bit(y, out); \ + } while (0) + +static inline void wmi_10x_svc_map(const __le32 *in, unsigned long *out) +{ + SVCMAP(WMI_10X_SERVICE_BEACON_OFFLOAD, + WMI_SERVICE_BEACON_OFFLOAD); + SVCMAP(WMI_10X_SERVICE_SCAN_OFFLOAD, + WMI_SERVICE_SCAN_OFFLOAD); + SVCMAP(WMI_10X_SERVICE_ROAM_OFFLOAD, + WMI_SERVICE_ROAM_OFFLOAD); + SVCMAP(WMI_10X_SERVICE_BCN_MISS_OFFLOAD, + WMI_SERVICE_BCN_MISS_OFFLOAD); + SVCMAP(WMI_10X_SERVICE_STA_PWRSAVE, + WMI_SERVICE_STA_PWRSAVE); + SVCMAP(WMI_10X_SERVICE_STA_ADVANCED_PWRSAVE, + WMI_SERVICE_STA_ADVANCED_PWRSAVE); + SVCMAP(WMI_10X_SERVICE_AP_UAPSD, + WMI_SERVICE_AP_UAPSD); + SVCMAP(WMI_10X_SERVICE_AP_DFS, + WMI_SERVICE_AP_DFS); + SVCMAP(WMI_10X_SERVICE_11AC, + WMI_SERVICE_11AC); + SVCMAP(WMI_10X_SERVICE_BLOCKACK, + WMI_SERVICE_BLOCKACK); + SVCMAP(WMI_10X_SERVICE_PHYERR, + WMI_SERVICE_PHYERR); + SVCMAP(WMI_10X_SERVICE_BCN_FILTER, + WMI_SERVICE_BCN_FILTER); + SVCMAP(WMI_10X_SERVICE_RTT, + WMI_SERVICE_RTT); + SVCMAP(WMI_10X_SERVICE_RATECTRL, + WMI_SERVICE_RATECTRL); + SVCMAP(WMI_10X_SERVICE_WOW, + WMI_SERVICE_WOW); + SVCMAP(WMI_10X_SERVICE_RATECTRL_CACHE, + WMI_SERVICE_RATECTRL_CACHE); + SVCMAP(WMI_10X_SERVICE_IRAM_TIDS, + WMI_SERVICE_IRAM_TIDS); + SVCMAP(WMI_10X_SERVICE_BURST, + WMI_SERVICE_BURST); + SVCMAP(WMI_10X_SERVICE_SMART_ANTENNA_SW_SUPPORT, + WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT); + SVCMAP(WMI_10X_SERVICE_FORCE_FW_HANG, + WMI_SERVICE_FORCE_FW_HANG); + SVCMAP(WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT, + WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT); } +static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out) +{ + SVCMAP(WMI_MAIN_SERVICE_BEACON_OFFLOAD, + WMI_SERVICE_BEACON_OFFLOAD); + SVCMAP(WMI_MAIN_SERVICE_SCAN_OFFLOAD, + WMI_SERVICE_SCAN_OFFLOAD); + SVCMAP(WMI_MAIN_SERVICE_ROAM_OFFLOAD, + WMI_SERVICE_ROAM_OFFLOAD); + SVCMAP(WMI_MAIN_SERVICE_BCN_MISS_OFFLOAD, + WMI_SERVICE_BCN_MISS_OFFLOAD); + SVCMAP(WMI_MAIN_SERVICE_STA_PWRSAVE, + WMI_SERVICE_STA_PWRSAVE); + SVCMAP(WMI_MAIN_SERVICE_STA_ADVANCED_PWRSAVE, + WMI_SERVICE_STA_ADVANCED_PWRSAVE); + SVCMAP(WMI_MAIN_SERVICE_AP_UAPSD, + WMI_SERVICE_AP_UAPSD); + SVCMAP(WMI_MAIN_SERVICE_AP_DFS, + WMI_SERVICE_AP_DFS); + SVCMAP(WMI_MAIN_SERVICE_11AC, + WMI_SERVICE_11AC); + SVCMAP(WMI_MAIN_SERVICE_BLOCKACK, + WMI_SERVICE_BLOCKACK); + SVCMAP(WMI_MAIN_SERVICE_PHYERR, + WMI_SERVICE_PHYERR); + SVCMAP(WMI_MAIN_SERVICE_BCN_FILTER, + WMI_SERVICE_BCN_FILTER); + SVCMAP(WMI_MAIN_SERVICE_RTT, + WMI_SERVICE_RTT); + SVCMAP(WMI_MAIN_SERVICE_RATECTRL, + WMI_SERVICE_RATECTRL); + SVCMAP(WMI_MAIN_SERVICE_WOW, + WMI_SERVICE_WOW); + SVCMAP(WMI_MAIN_SERVICE_RATECTRL_CACHE, + WMI_SERVICE_RATECTRL_CACHE); + SVCMAP(WMI_MAIN_SERVICE_IRAM_TIDS, + WMI_SERVICE_IRAM_TIDS); + SVCMAP(WMI_MAIN_SERVICE_ARPNS_OFFLOAD, + WMI_SERVICE_ARPNS_OFFLOAD); + SVCMAP(WMI_MAIN_SERVICE_NLO, + WMI_SERVICE_NLO); + SVCMAP(WMI_MAIN_SERVICE_GTK_OFFLOAD, + WMI_SERVICE_GTK_OFFLOAD); + SVCMAP(WMI_MAIN_SERVICE_SCAN_SCH, + WMI_SERVICE_SCAN_SCH); + SVCMAP(WMI_MAIN_SERVICE_CSA_OFFLOAD, + WMI_SERVICE_CSA_OFFLOAD); + SVCMAP(WMI_MAIN_SERVICE_CHATTER, + WMI_SERVICE_CHATTER); + SVCMAP(WMI_MAIN_SERVICE_COEX_FREQAVOID, + WMI_SERVICE_COEX_FREQAVOID); + SVCMAP(WMI_MAIN_SERVICE_PACKET_POWER_SAVE, + WMI_SERVICE_PACKET_POWER_SAVE); + SVCMAP(WMI_MAIN_SERVICE_FORCE_FW_HANG, + WMI_SERVICE_FORCE_FW_HANG); + SVCMAP(WMI_MAIN_SERVICE_GPIO, + WMI_SERVICE_GPIO); + SVCMAP(WMI_MAIN_SERVICE_STA_DTIM_PS_MODULATED_DTIM, + WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM); + SVCMAP(WMI_MAIN_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, + WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG); + SVCMAP(WMI_MAIN_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, + WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG); + SVCMAP(WMI_MAIN_SERVICE_STA_KEEP_ALIVE, + WMI_SERVICE_STA_KEEP_ALIVE); + SVCMAP(WMI_MAIN_SERVICE_TX_ENCAP, + WMI_SERVICE_TX_ENCAP); +} -#define WMI_SERVICE_BM_SIZE \ - ((WMI_MAX_SERVICE + sizeof(u32) - 1)/sizeof(u32)) +#undef SVCMAP /* 2 word representation of MAC addr */ struct wmi_mac_addr { @@ -803,6 +964,159 @@ enum wmi_10x_event_id { WMI_10X_PDEV_UTF_EVENTID = WMI_10X_END_EVENTID-1, }; +enum wmi_10_2_cmd_id { + WMI_10_2_START_CMDID = 0x9000, + WMI_10_2_END_CMDID = 0x9FFF, + WMI_10_2_INIT_CMDID, + WMI_10_2_START_SCAN_CMDID = WMI_10_2_START_CMDID, + WMI_10_2_STOP_SCAN_CMDID, + WMI_10_2_SCAN_CHAN_LIST_CMDID, + WMI_10_2_ECHO_CMDID, + WMI_10_2_PDEV_SET_REGDOMAIN_CMDID, + WMI_10_2_PDEV_SET_CHANNEL_CMDID, + WMI_10_2_PDEV_SET_PARAM_CMDID, + WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID, + WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID, + WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID, + WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID, + WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID, + WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID, + WMI_10_2_PDEV_SET_QUIET_MODE_CMDID, + WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID, + WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID, + WMI_10_2_VDEV_CREATE_CMDID, + WMI_10_2_VDEV_DELETE_CMDID, + WMI_10_2_VDEV_START_REQUEST_CMDID, + WMI_10_2_VDEV_RESTART_REQUEST_CMDID, + WMI_10_2_VDEV_UP_CMDID, + WMI_10_2_VDEV_STOP_CMDID, + WMI_10_2_VDEV_DOWN_CMDID, + WMI_10_2_VDEV_STANDBY_RESPONSE_CMDID, + WMI_10_2_VDEV_RESUME_RESPONSE_CMDID, + WMI_10_2_VDEV_SET_PARAM_CMDID, + WMI_10_2_VDEV_INSTALL_KEY_CMDID, + WMI_10_2_VDEV_SET_DSCP_TID_MAP_CMDID, + WMI_10_2_PEER_CREATE_CMDID, + WMI_10_2_PEER_DELETE_CMDID, + WMI_10_2_PEER_FLUSH_TIDS_CMDID, + WMI_10_2_PEER_SET_PARAM_CMDID, + WMI_10_2_PEER_ASSOC_CMDID, + WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID, + WMI_10_2_PEER_UPDATE_WDS_ENTRY_CMDID, + WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID, + WMI_10_2_PEER_MCAST_GROUP_CMDID, + WMI_10_2_BCN_TX_CMDID, + WMI_10_2_BCN_PRB_TMPL_CMDID, + WMI_10_2_BCN_FILTER_RX_CMDID, + WMI_10_2_PRB_REQ_FILTER_RX_CMDID, + WMI_10_2_MGMT_TX_CMDID, + WMI_10_2_ADDBA_CLEAR_RESP_CMDID, + WMI_10_2_ADDBA_SEND_CMDID, + WMI_10_2_ADDBA_STATUS_CMDID, + WMI_10_2_DELBA_SEND_CMDID, + WMI_10_2_ADDBA_SET_RESP_CMDID, + WMI_10_2_SEND_SINGLEAMSDU_CMDID, + WMI_10_2_STA_POWERSAVE_MODE_CMDID, + WMI_10_2_STA_POWERSAVE_PARAM_CMDID, + WMI_10_2_STA_MIMO_PS_MODE_CMDID, + WMI_10_2_DBGLOG_CFG_CMDID, + WMI_10_2_PDEV_DFS_ENABLE_CMDID, + WMI_10_2_PDEV_DFS_DISABLE_CMDID, + WMI_10_2_PDEV_QVIT_CMDID, + WMI_10_2_ROAM_SCAN_MODE, + WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD, + WMI_10_2_ROAM_SCAN_PERIOD, + WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, + WMI_10_2_ROAM_AP_PROFILE, + WMI_10_2_OFL_SCAN_ADD_AP_PROFILE, + WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE, + WMI_10_2_OFL_SCAN_PERIOD, + WMI_10_2_P2P_DEV_SET_DEVICE_INFO, + WMI_10_2_P2P_DEV_SET_DISCOVERABILITY, + WMI_10_2_P2P_GO_SET_BEACON_IE, + WMI_10_2_P2P_GO_SET_PROBE_RESP_IE, + WMI_10_2_AP_PS_PEER_PARAM_CMDID, + WMI_10_2_AP_PS_PEER_UAPSD_COEX_CMDID, + WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID, + WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID, + WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID, + WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, + WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, + WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID, + WMI_10_2_PDEV_SUSPEND_CMDID, + WMI_10_2_PDEV_RESUME_CMDID, + WMI_10_2_ADD_BCN_FILTER_CMDID, + WMI_10_2_RMV_BCN_FILTER_CMDID, + WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID, + WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID, + WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID, + WMI_10_2_WOW_ENABLE_CMDID, + WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID, + WMI_10_2_RTT_MEASREQ_CMDID, + WMI_10_2_RTT_TSF_CMDID, + WMI_10_2_RTT_KEEPALIVE_CMDID, + WMI_10_2_PDEV_SEND_BCN_CMDID, + WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID, + WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID, + WMI_10_2_REQUEST_STATS_CMDID, + WMI_10_2_GPIO_CONFIG_CMDID, + WMI_10_2_GPIO_OUTPUT_CMDID, + WMI_10_2_VDEV_RATEMASK_CMDID, + WMI_10_2_PDEV_SMART_ANT_ENABLE_CMDID, + WMI_10_2_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID, + WMI_10_2_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID, + WMI_10_2_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID, + WMI_10_2_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID, + WMI_10_2_FORCE_FW_HANG_CMDID, + WMI_10_2_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID, + WMI_10_2_PDEV_SET_CTL_TABLE_CMDID, + WMI_10_2_PDEV_SET_MIMOGAIN_TABLE_CMDID, + WMI_10_2_PDEV_RATEPWR_TABLE_CMDID, + WMI_10_2_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID, + WMI_10_2_PDEV_UTF_CMDID = WMI_10_2_END_CMDID - 1, +}; + +enum wmi_10_2_event_id { + WMI_10_2_SERVICE_READY_EVENTID = 0x8000, + WMI_10_2_READY_EVENTID, + WMI_10_2_DEBUG_MESG_EVENTID, + WMI_10_2_START_EVENTID = 0x9000, + WMI_10_2_END_EVENTID = 0x9FFF, + WMI_10_2_SCAN_EVENTID = WMI_10_2_START_EVENTID, + WMI_10_2_ECHO_EVENTID, + WMI_10_2_UPDATE_STATS_EVENTID, + WMI_10_2_INST_RSSI_STATS_EVENTID, + WMI_10_2_VDEV_START_RESP_EVENTID, + WMI_10_2_VDEV_STANDBY_REQ_EVENTID, + WMI_10_2_VDEV_RESUME_REQ_EVENTID, + WMI_10_2_VDEV_STOPPED_EVENTID, + WMI_10_2_PEER_STA_KICKOUT_EVENTID, + WMI_10_2_HOST_SWBA_EVENTID, + WMI_10_2_TBTTOFFSET_UPDATE_EVENTID, + WMI_10_2_MGMT_RX_EVENTID, + WMI_10_2_CHAN_INFO_EVENTID, + WMI_10_2_PHYERR_EVENTID, + WMI_10_2_ROAM_EVENTID, + WMI_10_2_PROFILE_MATCH, + WMI_10_2_DEBUG_PRINT_EVENTID, + WMI_10_2_PDEV_QVIT_EVENTID, + WMI_10_2_WLAN_PROFILE_DATA_EVENTID, + WMI_10_2_RTT_MEASUREMENT_REPORT_EVENTID, + WMI_10_2_TSF_MEASUREMENT_REPORT_EVENTID, + WMI_10_2_RTT_ERROR_REPORT_EVENTID, + WMI_10_2_RTT_KEEPALIVE_EVENTID, + WMI_10_2_WOW_WAKEUP_HOST_EVENTID, + WMI_10_2_DCS_INTERFERENCE_EVENTID, + WMI_10_2_PDEV_TPC_CONFIG_EVENTID, + WMI_10_2_GPIO_INPUT_EVENTID, + WMI_10_2_PEER_RATECODE_LIST_EVENTID, + WMI_10_2_GENERIC_BUFFER_EVENTID, + WMI_10_2_MCAST_BUF_RELEASE_EVENTID, + WMI_10_2_MCAST_LIST_AGEOUT_EVENTID, + WMI_10_2_WDS_PEER_EVENTID, + WMI_10_2_PDEV_UTF_EVENTID = WMI_10_2_END_EVENTID - 1, +}; + enum wmi_phy_mode { MODE_11A = 0, /* 11a Mode */ MODE_11G = 1, /* 11b/g Mode */ @@ -955,7 +1269,6 @@ enum wmi_channel_change_cause { WMI_HT_CAP_RX_STBC | \ WMI_HT_CAP_LDPC) - /* * WMI_VHT_CAP_* these maps to ieee 802.11ac vht capability information * field. The fields not defined here are not supported, or reserved. @@ -1076,10 +1389,6 @@ struct wlan_host_mem_req { __le32 num_units; } __packed; -#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id) \ - ((((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \ - (1 << ((svc_id)%(sizeof(u32))))) != 0) - /* * The following struct holds optional payload for * wmi_service_ready_event,e.g., 11ac pass some of the @@ -1093,7 +1402,7 @@ struct wmi_service_ready_event { __le32 phy_capability; /* Maximum number of frag table entries that SW will populate less 1 */ __le32 max_frag_entry; - __le32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE]; + __le32 wmi_service_bitmap[16]; __le32 num_rf_chains; /* * The following field is only valid for service type @@ -1132,7 +1441,7 @@ struct wmi_service_ready_event_10x { /* Maximum number of frag table entries that SW will populate less 1 */ __le32 max_frag_entry; - __le32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE]; + __le32 wmi_service_bitmap[16]; __le32 num_rf_chains; /* @@ -1161,7 +1470,6 @@ struct wmi_service_ready_event_10x { struct wlan_host_mem_req mem_reqs[1]; } __packed; - #define WMI_SERVICE_READY_TIMEOUT_HZ (5*HZ) #define WMI_UNIFIED_READY_TIMEOUT_HZ (5*HZ) @@ -1551,6 +1859,16 @@ struct wmi_resource_config_10x { __le32 max_frag_entries; } __packed; +struct wmi_resource_config_10_2 { + struct wmi_resource_config_10x common; + __le32 max_peer_ext_stats; + __le32 smart_ant_cap; /* 0-disable, 1-enable */ + __le32 bk_min_free; + __le32 be_min_free; + __le32 vi_min_free; + __le32 vo_min_free; + __le32 rx_batchmode; /* 0-disable, 1-enable */ +} __packed; #define NUM_UNITS_IS_NUM_VDEVS 0x1 #define NUM_UNITS_IS_NUM_PEERS 0x2 @@ -1588,11 +1906,28 @@ struct wmi_init_cmd_10x { struct host_memory_chunk host_mem_chunks[1]; } __packed; +struct wmi_init_cmd_10_2 { + struct wmi_resource_config_10_2 resource_config; + __le32 num_host_mem_chunks; + + /* + * variable number of host memory chunks. + * This should be the last element in the structure + */ + struct host_memory_chunk host_mem_chunks[1]; +} __packed; + +struct wmi_chan_list_entry { + __le16 freq; + u8 phy_mode; /* valid for 10.2 only */ + u8 reserved; +} __packed; + /* TLV for channel list */ struct wmi_chan_list { __le32 tag; /* WMI_CHAN_LIST_TAG */ __le32 num_chan; - __le32 channel_list[0]; + struct wmi_chan_list_entry channel_list[0]; } __packed; struct wmi_bssid_list { @@ -1788,7 +2123,6 @@ struct wmi_start_scan_cmd_10x { */ } __packed; - struct wmi_ssid_arg { int len; const u8 *ssid; @@ -1821,7 +2155,7 @@ struct wmi_start_scan_arg { u32 n_bssids; u8 ie[WLAN_SCAN_PARAMS_MAX_IE_LEN]; - u32 channels[64]; + u16 channels[64]; struct wmi_ssid_arg ssids[WLAN_SCAN_PARAMS_MAX_SSID]; struct wmi_bssid_arg bssids[WLAN_SCAN_PARAMS_MAX_BSSID]; }; @@ -1849,7 +2183,6 @@ struct wmi_start_scan_arg { /* WMI_SCAN_CLASS_MASK must be the same value as IEEE80211_SCAN_CLASS_MASK */ #define WMI_SCAN_CLASS_MASK 0xFF000000 - enum wmi_stop_scan_type { WMI_SCAN_STOP_ONE = 0x00000000, /* stop by scan_id */ WMI_SCAN_STOP_VDEV_ALL = 0x01000000, /* stop by vdev_id */ @@ -2034,7 +2367,6 @@ struct wmi_single_phyerr_rx_hdr { __le32 nf_list_1; __le32 nf_list_2; - /* Length of the frame */ __le32 buf_len; } __packed; @@ -2067,6 +2399,7 @@ struct wmi_comb_phyerr_rx_event { #define PHYERR_TLV_SIG 0xBB #define PHYERR_TLV_TAG_SEARCH_FFT_REPORT 0xFB #define PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY 0xF8 +#define PHYERR_TLV_TAG_SPECTRAL_SUMMARY_REPORT 0xF9 struct phyerr_radar_report { __le32 reg0; /* RADAR_REPORT_REG0_* */ @@ -2135,7 +2468,6 @@ struct phyerr_fft_report { #define SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB_MASK 0x000000FF #define SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB_LSB 0 - struct phyerr_tlv { __le16 len; u8 tag; @@ -2166,7 +2498,6 @@ struct wmi_echo_cmd { __le32 value; } __packed; - struct wmi_pdev_set_regdomain_cmd { __le32 reg_domain; __le32 reg_domain_2G; @@ -2215,7 +2546,6 @@ struct wmi_pdev_set_quiet_cmd { __le32 enabled; } __packed; - /* * 802.11g protection mode. */ @@ -2515,6 +2845,19 @@ enum wmi_10x_pdev_param { WMI_10X_PDEV_PARAM_BURST_DUR, /* Set Bursting Enable*/ WMI_10X_PDEV_PARAM_BURST_ENABLE, + + /* following are available as of firmware 10.2 */ + WMI_10X_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA, + WMI_10X_PDEV_PARAM_IGMPMLD_OVERRIDE, + WMI_10X_PDEV_PARAM_IGMPMLD_TID, + WMI_10X_PDEV_PARAM_ANTENNA_GAIN, + WMI_10X_PDEV_PARAM_RX_DECAP_MODE, + WMI_10X_PDEV_PARAM_RX_FILTER, + WMI_10X_PDEV_PARAM_SET_MCAST_TO_UCAST_TID, + WMI_10X_PDEV_PARAM_PROXY_STA_MODE, + WMI_10X_PDEV_PARAM_SET_MCAST2UCAST_MODE, + WMI_10X_PDEV_PARAM_SET_MCAST2UCAST_BUFFER, + WMI_10X_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER, }; struct wmi_pdev_set_param_cmd { @@ -3387,6 +3730,14 @@ enum wmi_10x_vdev_param { WMI_10X_VDEV_PARAM_ENABLE_RTSCTS, WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS, + + /* following are available as of firmware 10.2 */ + WMI_10X_VDEV_PARAM_TX_ENCAP_TYPE, + WMI_10X_VDEV_PARAM_CABQ_MAXDUR, + WMI_10X_VDEV_PARAM_MFPTEST_SET, + WMI_10X_VDEV_PARAM_RTS_FIXED_RATE, + WMI_10X_VDEV_PARAM_VHT_SGIMASK, + WMI_10X_VDEV_PARAM_VHT80_RATEMASK, }; /* slot time long */ @@ -3444,6 +3795,98 @@ struct wmi_vdev_simple_event { /* unsupported VDEV combination */ #define WMI_INIFIED_VDEV_START_RESPONSE_NOT_SUPPORTED 0x2 +/* TODO: please add more comments if you have in-depth information */ +struct wmi_vdev_spectral_conf_cmd { + __le32 vdev_id; + + /* number of fft samples to send (0 for infinite) */ + __le32 scan_count; + __le32 scan_period; + __le32 scan_priority; + + /* number of bins in the FFT: 2^(fft_size - bin_scale) */ + __le32 scan_fft_size; + __le32 scan_gc_ena; + __le32 scan_restart_ena; + __le32 scan_noise_floor_ref; + __le32 scan_init_delay; + __le32 scan_nb_tone_thr; + __le32 scan_str_bin_thr; + __le32 scan_wb_rpt_mode; + __le32 scan_rssi_rpt_mode; + __le32 scan_rssi_thr; + __le32 scan_pwr_format; + + /* rpt_mode: Format of FFT report to software for spectral scan + * triggered FFTs: + * 0: No FFT report (only spectral scan summary report) + * 1: 2-dword summary of metrics for each completed FFT + spectral + * scan summary report + * 2: 2-dword summary of metrics for each completed FFT + + * 1x- oversampled bins(in-band) per FFT + spectral scan summary + * report + * 3: 2-dword summary of metrics for each completed FFT + + * 2x- oversampled bins (all) per FFT + spectral scan summary + */ + __le32 scan_rpt_mode; + __le32 scan_bin_scale; + __le32 scan_dbm_adj; + __le32 scan_chn_mask; +} __packed; + +struct wmi_vdev_spectral_conf_arg { + u32 vdev_id; + u32 scan_count; + u32 scan_period; + u32 scan_priority; + u32 scan_fft_size; + u32 scan_gc_ena; + u32 scan_restart_ena; + u32 scan_noise_floor_ref; + u32 scan_init_delay; + u32 scan_nb_tone_thr; + u32 scan_str_bin_thr; + u32 scan_wb_rpt_mode; + u32 scan_rssi_rpt_mode; + u32 scan_rssi_thr; + u32 scan_pwr_format; + u32 scan_rpt_mode; + u32 scan_bin_scale; + u32 scan_dbm_adj; + u32 scan_chn_mask; +}; + +#define WMI_SPECTRAL_ENABLE_DEFAULT 0 +#define WMI_SPECTRAL_COUNT_DEFAULT 0 +#define WMI_SPECTRAL_PERIOD_DEFAULT 35 +#define WMI_SPECTRAL_PRIORITY_DEFAULT 1 +#define WMI_SPECTRAL_FFT_SIZE_DEFAULT 7 +#define WMI_SPECTRAL_GC_ENA_DEFAULT 1 +#define WMI_SPECTRAL_RESTART_ENA_DEFAULT 0 +#define WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT -96 +#define WMI_SPECTRAL_INIT_DELAY_DEFAULT 80 +#define WMI_SPECTRAL_NB_TONE_THR_DEFAULT 12 +#define WMI_SPECTRAL_STR_BIN_THR_DEFAULT 8 +#define WMI_SPECTRAL_WB_RPT_MODE_DEFAULT 0 +#define WMI_SPECTRAL_RSSI_RPT_MODE_DEFAULT 0 +#define WMI_SPECTRAL_RSSI_THR_DEFAULT 0xf0 +#define WMI_SPECTRAL_PWR_FORMAT_DEFAULT 0 +#define WMI_SPECTRAL_RPT_MODE_DEFAULT 2 +#define WMI_SPECTRAL_BIN_SCALE_DEFAULT 1 +#define WMI_SPECTRAL_DBM_ADJ_DEFAULT 1 +#define WMI_SPECTRAL_CHN_MASK_DEFAULT 1 + +struct wmi_vdev_spectral_enable_cmd { + __le32 vdev_id; + __le32 trigger_cmd; + __le32 enable_cmd; +} __packed; + +#define WMI_SPECTRAL_TRIGGER_CMD_TRIGGER 1 +#define WMI_SPECTRAL_TRIGGER_CMD_CLEAR 2 +#define WMI_SPECTRAL_ENABLE_CMD_ENABLE 1 +#define WMI_SPECTRAL_ENABLE_CMD_DISABLE 2 + /* Beacon processing related command and event structures */ struct wmi_bcn_tx_hdr { __le32 vdev_id; @@ -3470,6 +3913,11 @@ enum wmi_bcn_tx_ref_flags { WMI_BCN_TX_REF_FLAG_DELIVER_CAB = 0x2, }; +/* TODO: It is unclear why "no antenna" works while any other seemingly valid + * chainmask yields no beacons on the air at all. + */ +#define WMI_BCN_TX_REF_DEF_ANTENNA 0 + struct wmi_bcn_tx_ref_cmd { __le32 vdev_id; __le32 data_len; @@ -3481,6 +3929,8 @@ struct wmi_bcn_tx_ref_cmd { __le32 frame_control; /* to control CABQ traffic: WMI_BCN_TX_REF_FLAG_ */ __le32 flags; + /* introduced in 10.2 */ + __le32 antenna_mask; } __packed; /* Beacon filter */ @@ -3833,7 +4283,6 @@ struct wmi_tbtt_offset_event { __le32 tbttoffset_list[WMI_MAX_AP_VDEV]; } __packed; - struct wmi_peer_create_cmd { __le32 vdev_id; struct wmi_mac_addr peer_macaddr; @@ -4053,7 +4502,7 @@ struct wmi_peer_set_q_empty_callback_cmd { /* Maximum listen interval supported by hw in units of beacon interval */ #define ATH10K_MAX_HW_LISTEN_INTERVAL 5 -struct wmi_peer_assoc_complete_cmd { +struct wmi_common_peer_assoc_complete_cmd { struct wmi_mac_addr peer_macaddr; __le32 vdev_id; __le32 peer_new_assoc; /* 1=assoc, 0=reassoc */ @@ -4071,11 +4520,30 @@ struct wmi_peer_assoc_complete_cmd { __le32 peer_vht_caps; __le32 peer_phymode; struct wmi_vht_rate_set peer_vht_rates; +}; + +struct wmi_main_peer_assoc_complete_cmd { + struct wmi_common_peer_assoc_complete_cmd cmd; + /* HT Operation Element of the peer. Five bytes packed in 2 * INT32 array and filled from lsb to msb. */ __le32 peer_ht_info[2]; } __packed; +struct wmi_10_1_peer_assoc_complete_cmd { + struct wmi_common_peer_assoc_complete_cmd cmd; +} __packed; + +#define WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX_LSB 0 +#define WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX_MASK 0x0f +#define WMI_PEER_ASSOC_INFO0_MAX_NSS_LSB 4 +#define WMI_PEER_ASSOC_INFO0_MAX_NSS_MASK 0xf0 + +struct wmi_10_2_peer_assoc_complete_cmd { + struct wmi_common_peer_assoc_complete_cmd cmd; + __le32 info0; /* WMI_PEER_ASSOC_INFO0_ */ +} __packed; + struct wmi_peer_assoc_complete_arg { u8 addr[ETH_ALEN]; u32 vdev_id; @@ -4260,6 +4728,10 @@ int ath10k_wmi_wait_for_service_ready(struct ath10k *ar); int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar); int ath10k_wmi_connect(struct ath10k *ar); + +struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len); +int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id); + int ath10k_wmi_pdev_set_channel(struct ath10k *ar, const struct wmi_channel_arg *); int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt); @@ -4290,12 +4762,16 @@ int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id, u32 param_value); int ath10k_wmi_vdev_install_key(struct ath10k *ar, const struct wmi_vdev_install_key_arg *arg); +int ath10k_wmi_vdev_spectral_conf(struct ath10k *ar, + const struct wmi_vdev_spectral_conf_arg *arg); +int ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger, + u32 enable); int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id, - const u8 peer_addr[ETH_ALEN]); + const u8 peer_addr[ETH_ALEN]); int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id, - const u8 peer_addr[ETH_ALEN]); + const u8 peer_addr[ETH_ALEN]); int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id, - const u8 peer_addr[ETH_ALEN], u32 tid_bitmap); + const u8 peer_addr[ETH_ALEN], u32 tid_bitmap); int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr, enum wmi_peer_param param_id, u32 param_value); @@ -4312,7 +4788,7 @@ int ath10k_wmi_scan_chan_list(struct ath10k *ar, const struct wmi_scan_chan_list_arg *arg); int ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif); int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar, - const struct wmi_pdev_set_wmm_params_arg *arg); + const struct wmi_pdev_set_wmm_params_arg *arg); int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id); int ath10k_wmi_force_fw_hang(struct ath10k *ar, enum wmi_force_fw_hang_type type, u32 delay_ms); diff --git a/drivers/net/wireless/ath/ath5k/Kconfig b/drivers/net/wireless/ath/ath5k/Kconfig index c9f81a388f1572cb6a9f233809731f61c4ffe209..93caf8e689016f57db1c2e47ae0369de813ecbd7 100644 --- a/drivers/net/wireless/ath/ath5k/Kconfig +++ b/drivers/net/wireless/ath/ath5k/Kconfig @@ -1,13 +1,12 @@ config ATH5K tristate "Atheros 5xxx wireless cards support" - depends on (PCI || ATHEROS_AR231X) && MAC80211 + depends on PCI && MAC80211 select ATH_COMMON select MAC80211_LEDS select LEDS_CLASS select NEW_LEDS select AVERAGE - select ATH5K_AHB if (ATHEROS_AR231X && !PCI) - select ATH5K_PCI if (!ATHEROS_AR231X && PCI) + select ATH5K_PCI ---help--- This module adds support for wireless adapters based on Atheros 5xxx chipset. @@ -52,16 +51,9 @@ config ATH5K_TRACER If unsure, say N. -config ATH5K_AHB - bool "Atheros 5xxx AHB bus support" - depends on (ATHEROS_AR231X && !PCI) - ---help--- - This adds support for WiSoC type chipsets of the 5xxx Atheros - family. - config ATH5K_PCI bool "Atheros 5xxx PCI bus support" - depends on (!ATHEROS_AR231X && PCI) + depends on PCI ---help--- This adds support for PCI type chipsets of the 5xxx Atheros family. diff --git a/drivers/net/wireless/ath/ath5k/Makefile b/drivers/net/wireless/ath/ath5k/Makefile index 1b3a34f7f224d03c3bf0fbe53f9b85ae6f680252..51e2d8668041827492bd4df14ed6c860c9983181 100644 --- a/drivers/net/wireless/ath/ath5k/Makefile +++ b/drivers/net/wireless/ath/ath5k/Makefile @@ -17,6 +17,5 @@ ath5k-y += ani.o ath5k-y += sysfs.o ath5k-y += mac80211-ops.o ath5k-$(CONFIG_ATH5K_DEBUG) += debug.o -ath5k-$(CONFIG_ATH5K_AHB) += ahb.o ath5k-$(CONFIG_ATH5K_PCI) += pci.o obj-$(CONFIG_ATH5K) += ath5k.o diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c deleted file mode 100644 index 79bffe165caba6e1f963b9fa81deed4844cb42d1..0000000000000000000000000000000000000000 --- a/drivers/net/wireless/ath/ath5k/ahb.c +++ /dev/null @@ -1,234 +0,0 @@ -/* - * Copyright (c) 2008-2009 Atheros Communications Inc. - * Copyright (c) 2009 Gabor Juhos - * Copyright (c) 2009 Imre Kaloz - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#include -#include -#include -#include -#include -#include "ath5k.h" -#include "debug.h" -#include "base.h" -#include "reg.h" - -/* return bus cachesize in 4B word units */ -static void ath5k_ahb_read_cachesize(struct ath_common *common, int *csz) -{ - *csz = L1_CACHE_BYTES >> 2; -} - -static bool -ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data) -{ - struct ath5k_hw *ah = common->priv; - struct platform_device *pdev = to_platform_device(ah->dev); - struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev); - u16 *eeprom, *eeprom_end; - - eeprom = (u16 *) bcfg->radio; - eeprom_end = ((void *) bcfg->config) + BOARD_CONFIG_BUFSZ; - - eeprom += off; - if (eeprom > eeprom_end) - return false; - - *data = *eeprom; - return true; -} - -int ath5k_hw_read_srev(struct ath5k_hw *ah) -{ - struct platform_device *pdev = to_platform_device(ah->dev); - struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev); - ah->ah_mac_srev = bcfg->devid; - return 0; -} - -static int ath5k_ahb_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac) -{ - struct platform_device *pdev = to_platform_device(ah->dev); - struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev); - u8 *cfg_mac; - - if (to_platform_device(ah->dev)->id == 0) - cfg_mac = bcfg->config->wlan0_mac; - else - cfg_mac = bcfg->config->wlan1_mac; - - memcpy(mac, cfg_mac, ETH_ALEN); - return 0; -} - -static const struct ath_bus_ops ath_ahb_bus_ops = { - .ath_bus_type = ATH_AHB, - .read_cachesize = ath5k_ahb_read_cachesize, - .eeprom_read = ath5k_ahb_eeprom_read, - .eeprom_read_mac = ath5k_ahb_eeprom_read_mac, -}; - -/*Initialization*/ -static int ath_ahb_probe(struct platform_device *pdev) -{ - struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev); - struct ath5k_hw *ah; - struct ieee80211_hw *hw; - struct resource *res; - void __iomem *mem; - int irq; - int ret = 0; - u32 reg; - - if (!dev_get_platdata(&pdev->dev)) { - dev_err(&pdev->dev, "no platform data specified\n"); - ret = -EINVAL; - goto err_out; - } - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (res == NULL) { - dev_err(&pdev->dev, "no memory resource found\n"); - ret = -ENXIO; - goto err_out; - } - - mem = ioremap_nocache(res->start, resource_size(res)); - if (mem == NULL) { - dev_err(&pdev->dev, "ioremap failed\n"); - ret = -ENOMEM; - goto err_out; - } - - res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (res == NULL) { - dev_err(&pdev->dev, "no IRQ resource found\n"); - ret = -ENXIO; - goto err_iounmap; - } - - irq = res->start; - - hw = ieee80211_alloc_hw(sizeof(struct ath5k_hw), &ath5k_hw_ops); - if (hw == NULL) { - dev_err(&pdev->dev, "no memory for ieee80211_hw\n"); - ret = -ENOMEM; - goto err_iounmap; - } - - ah = hw->priv; - ah->hw = hw; - ah->dev = &pdev->dev; - ah->iobase = mem; - ah->irq = irq; - ah->devid = bcfg->devid; - - if (bcfg->devid >= AR5K_SREV_AR2315_R6) { - /* Enable WMAC AHB arbitration */ - reg = ioread32((void __iomem *) AR5K_AR2315_AHB_ARB_CTL); - reg |= AR5K_AR2315_AHB_ARB_CTL_WLAN; - iowrite32(reg, (void __iomem *) AR5K_AR2315_AHB_ARB_CTL); - - /* Enable global WMAC swapping */ - reg = ioread32((void __iomem *) AR5K_AR2315_BYTESWAP); - reg |= AR5K_AR2315_BYTESWAP_WMAC; - iowrite32(reg, (void __iomem *) AR5K_AR2315_BYTESWAP); - } else { - /* Enable WMAC DMA access (assuming 5312 or 231x*/ - /* TODO: check other platforms */ - reg = ioread32((void __iomem *) AR5K_AR5312_ENABLE); - if (to_platform_device(ah->dev)->id == 0) - reg |= AR5K_AR5312_ENABLE_WLAN0; - else - reg |= AR5K_AR5312_ENABLE_WLAN1; - iowrite32(reg, (void __iomem *) AR5K_AR5312_ENABLE); - - /* - * On a dual-band AR5312, the multiband radio is only - * used as pass-through. Disable 2 GHz support in the - * driver for it - */ - if (to_platform_device(ah->dev)->id == 0 && - (bcfg->config->flags & (BD_WLAN0 | BD_WLAN1)) == - (BD_WLAN1 | BD_WLAN0)) - ah->ah_capabilities.cap_needs_2GHz_ovr = true; - else - ah->ah_capabilities.cap_needs_2GHz_ovr = false; - } - - ret = ath5k_init_ah(ah, &ath_ahb_bus_ops); - if (ret != 0) { - dev_err(&pdev->dev, "failed to attach device, err=%d\n", ret); - ret = -ENODEV; - goto err_free_hw; - } - - platform_set_drvdata(pdev, hw); - - return 0; - - err_free_hw: - ieee80211_free_hw(hw); - err_iounmap: - iounmap(mem); - err_out: - return ret; -} - -static int ath_ahb_remove(struct platform_device *pdev) -{ - struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev); - struct ieee80211_hw *hw = platform_get_drvdata(pdev); - struct ath5k_hw *ah; - u32 reg; - - if (!hw) - return 0; - - ah = hw->priv; - - if (bcfg->devid >= AR5K_SREV_AR2315_R6) { - /* Disable WMAC AHB arbitration */ - reg = ioread32((void __iomem *) AR5K_AR2315_AHB_ARB_CTL); - reg &= ~AR5K_AR2315_AHB_ARB_CTL_WLAN; - iowrite32(reg, (void __iomem *) AR5K_AR2315_AHB_ARB_CTL); - } else { - /*Stop DMA access */ - reg = ioread32((void __iomem *) AR5K_AR5312_ENABLE); - if (to_platform_device(ah->dev)->id == 0) - reg &= ~AR5K_AR5312_ENABLE_WLAN0; - else - reg &= ~AR5K_AR5312_ENABLE_WLAN1; - iowrite32(reg, (void __iomem *) AR5K_AR5312_ENABLE); - } - - ath5k_deinit_ah(ah); - iounmap(ah->iobase); - ieee80211_free_hw(hw); - - return 0; -} - -static struct platform_driver ath_ahb_driver = { - .probe = ath_ahb_probe, - .remove = ath_ahb_remove, - .driver = { - .name = "ar231x-wmac", - .owner = THIS_MODULE, - }, -}; - -module_platform_driver(ath_ahb_driver); diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h index 85316bb3f8c66a51baffad61a5b306337f4ec04f..ed24682202163438338e93fd74743287810d7ba6 100644 --- a/drivers/net/wireless/ath/ath5k/ath5k.h +++ b/drivers/net/wireless/ath/ath5k/ath5k.h @@ -1647,32 +1647,6 @@ static inline struct ath_regulatory *ath5k_hw_regulatory(struct ath5k_hw *ah) return &(ath5k_hw_common(ah)->regulatory); } -#ifdef CONFIG_ATHEROS_AR231X -#define AR5K_AR2315_PCI_BASE ((void __iomem *)0xb0100000) - -static inline void __iomem *ath5k_ahb_reg(struct ath5k_hw *ah, u16 reg) -{ - /* On AR2315 and AR2317 the PCI clock domain registers - * are outside of the WMAC register space */ - if (unlikely((reg >= 0x4000) && (reg < 0x5000) && - (ah->ah_mac_srev >= AR5K_SREV_AR2315_R6))) - return AR5K_AR2315_PCI_BASE + reg; - - return ah->iobase + reg; -} - -static inline u32 ath5k_hw_reg_read(struct ath5k_hw *ah, u16 reg) -{ - return ioread32(ath5k_ahb_reg(ah, reg)); -} - -static inline void ath5k_hw_reg_write(struct ath5k_hw *ah, u32 val, u16 reg) -{ - iowrite32(val, ath5k_ahb_reg(ah, reg)); -} - -#else - static inline u32 ath5k_hw_reg_read(struct ath5k_hw *ah, u16 reg) { return ioread32(ah->iobase + reg); @@ -1683,8 +1657,6 @@ static inline void ath5k_hw_reg_write(struct ath5k_hw *ah, u32 val, u16 reg) iowrite32(val, ah->iobase + reg); } -#endif - static inline enum ath_bus_type ath5k_get_bus_type(struct ath5k_hw *ah) { return ath5k_hw_common(ah)->bus_ops->ath_bus_type; diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c index 7106547a14ddc6819751b48041540ab3846e7f68..66b6366158b90a92054dbb9ce5cd97a665abc65c 100644 --- a/drivers/net/wireless/ath/ath5k/attach.c +++ b/drivers/net/wireless/ath/ath5k/attach.c @@ -351,8 +351,7 @@ void ath5k_hw_deinit(struct ath5k_hw *ah) { __set_bit(ATH_STAT_INVALID, ah->status); - if (ah->ah_rf_banks != NULL) - kfree(ah->ah_rf_banks); + kfree(ah->ah_rf_banks); ath5k_eeprom_detach(ah); diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index 8ad2550bce7ff4820511a606a30d10f81e09b91d..a4a09bb8f2f3cb91c9ffd96336b6eab25ce63e41 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -99,15 +99,6 @@ static int ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan, /* Known SREVs */ static const struct ath5k_srev_name srev_names[] = { -#ifdef CONFIG_ATHEROS_AR231X - { "5312", AR5K_VERSION_MAC, AR5K_SREV_AR5312_R2 }, - { "5312", AR5K_VERSION_MAC, AR5K_SREV_AR5312_R7 }, - { "2313", AR5K_VERSION_MAC, AR5K_SREV_AR2313_R8 }, - { "2315", AR5K_VERSION_MAC, AR5K_SREV_AR2315_R6 }, - { "2315", AR5K_VERSION_MAC, AR5K_SREV_AR2315_R7 }, - { "2317", AR5K_VERSION_MAC, AR5K_SREV_AR2317_R1 }, - { "2317", AR5K_VERSION_MAC, AR5K_SREV_AR2317_R2 }, -#else { "5210", AR5K_VERSION_MAC, AR5K_SREV_AR5210 }, { "5311", AR5K_VERSION_MAC, AR5K_SREV_AR5311 }, { "5311A", AR5K_VERSION_MAC, AR5K_SREV_AR5311A }, @@ -126,7 +117,6 @@ static const struct ath5k_srev_name srev_names[] = { { "5418", AR5K_VERSION_MAC, AR5K_SREV_AR5418 }, { "2425", AR5K_VERSION_MAC, AR5K_SREV_AR2425 }, { "2417", AR5K_VERSION_MAC, AR5K_SREV_AR2417 }, -#endif { "xxxxx", AR5K_VERSION_MAC, AR5K_SREV_UNKNOWN }, { "5110", AR5K_VERSION_RAD, AR5K_SREV_RAD_5110 }, { "5111", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111 }, @@ -142,10 +132,6 @@ static const struct ath5k_srev_name srev_names[] = { { "5413", AR5K_VERSION_RAD, AR5K_SREV_RAD_5413 }, { "5424", AR5K_VERSION_RAD, AR5K_SREV_RAD_5424 }, { "5133", AR5K_VERSION_RAD, AR5K_SREV_RAD_5133 }, -#ifdef CONFIG_ATHEROS_AR231X - { "2316", AR5K_VERSION_RAD, AR5K_SREV_RAD_2316 }, - { "2317", AR5K_VERSION_RAD, AR5K_SREV_RAD_2317 }, -#endif { "xxxxx", AR5K_VERSION_RAD, AR5K_SREV_UNKNOWN }, }; @@ -1423,7 +1409,7 @@ ath5k_receive_frame(struct ath5k_hw *ah, struct sk_buff *skb, break; } - if (rxs->rate_idx >= 0 && rs->rs_rate == + if (rs->rs_rate == ah->sbands[ah->curchan->band].bitrates[rxs->rate_idx].hw_value_short) rxs->flag |= RX_FLAG_SHORTPRE; diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c index b8d031ae63c2f8019a0c997295e52f9b2b01b46c..c70782e8f07bd704b2ff495339eb5ab6b65864bc 100644 --- a/drivers/net/wireless/ath/ath5k/debug.c +++ b/drivers/net/wireless/ath/ath5k/debug.c @@ -62,9 +62,11 @@ #include #include +#include #include #include +#include #include "debug.h" #include "ath5k.h" #include "reg.h" @@ -894,6 +896,100 @@ static const struct file_operations fops_queue = { .llseek = default_llseek, }; +/* debugfs: eeprom */ + +struct eeprom_private { + u16 *buf; + int len; +}; + +static int open_file_eeprom(struct inode *inode, struct file *file) +{ + struct eeprom_private *ep; + struct ath5k_hw *ah = inode->i_private; + bool res; + int i, ret; + u32 eesize; + u16 val, *buf; + + /* Get eeprom size */ + + res = ath5k_hw_nvram_read(ah, AR5K_EEPROM_SIZE_UPPER, &val); + if (!res) + return -EACCES; + + if (val == 0) { + eesize = AR5K_EEPROM_INFO_MAX + AR5K_EEPROM_INFO_BASE; + } else { + eesize = (val & AR5K_EEPROM_SIZE_UPPER_MASK) << + AR5K_EEPROM_SIZE_ENDLOC_SHIFT; + ath5k_hw_nvram_read(ah, AR5K_EEPROM_SIZE_LOWER, &val); + eesize = eesize | val; + } + + if (eesize > 4096) + return -EINVAL; + + /* Create buffer and read in eeprom */ + + buf = vmalloc(eesize); + if (!buf) { + ret = -ENOMEM; + goto err; + } + + for (i = 0; i < eesize; ++i) { + AR5K_EEPROM_READ(i, val); + buf[i] = val; + } + + /* Create private struct and assign to file */ + + ep = kmalloc(sizeof(*ep), GFP_KERNEL); + if (!ep) { + ret = -ENOMEM; + goto freebuf; + } + + ep->buf = buf; + ep->len = i; + + file->private_data = (void *)ep; + + return 0; + +freebuf: + vfree(buf); +err: + return ret; + +} + +static ssize_t read_file_eeprom(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct eeprom_private *ep = file->private_data; + + return simple_read_from_buffer(user_buf, count, ppos, ep->buf, ep->len); +} + +static int release_file_eeprom(struct inode *inode, struct file *file) +{ + struct eeprom_private *ep = file->private_data; + + vfree(ep->buf); + kfree(ep); + + return 0; +} + +static const struct file_operations fops_eeprom = { + .open = open_file_eeprom, + .read = read_file_eeprom, + .release = release_file_eeprom, + .owner = THIS_MODULE, +}; + void ath5k_debug_init_device(struct ath5k_hw *ah) @@ -921,6 +1017,8 @@ ath5k_debug_init_device(struct ath5k_hw *ah) debugfs_create_file("misc", S_IRUSR, phydir, ah, &fops_misc); + debugfs_create_file("eeprom", S_IRUSR, phydir, ah, &fops_eeprom); + debugfs_create_file("frameerrors", S_IWUSR | S_IRUSR, phydir, ah, &fops_frameerrors); diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c index 48a6a69b57bcb6bcce63f23032e7ee629c124d90..0beb7e7d60755643975d796aec31e0c4c8ee8e2c 100644 --- a/drivers/net/wireless/ath/ath5k/led.c +++ b/drivers/net/wireless/ath/ath5k/led.c @@ -130,6 +130,7 @@ ath5k_register_led(struct ath5k_hw *ah, struct ath5k_led *led, led->ah = ah; strncpy(led->name, name, sizeof(led->name)); + led->name[sizeof(led->name)-1] = 0; led->led_dev.name = led->name; led->led_dev.default_trigger = trigger; led->led_dev.brightness_set = ath5k_led_brightness_set; @@ -162,20 +163,14 @@ int ath5k_init_leds(struct ath5k_hw *ah) { int ret = 0; struct ieee80211_hw *hw = ah->hw; -#ifndef CONFIG_ATHEROS_AR231X struct pci_dev *pdev = ah->pdev; -#endif char name[ATH5K_LED_MAX_NAME_LEN + 1]; const struct pci_device_id *match; if (!ah->pdev) return 0; -#ifdef CONFIG_ATHEROS_AR231X - match = NULL; -#else match = pci_match_id(&ath5k_led_devices[0], pdev); -#endif if (match) { __set_bit(ATH_STAT_LEDSOFT, ah->status); ah->led_pin = ATH_PIN(match->driver_data); diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c index b65c38fdaa4b23723ac8f59add51e46acc7d16f1..ab2709a437686ecec7d6a858756c340184ad10b6 100644 --- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c +++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c @@ -704,7 +704,7 @@ ath5k_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey) * reset. */ static void -ath5k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class) +ath5k_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class) { struct ath5k_hw *ah = hw->priv; diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c index e535807c3d8912863da3604dc9e698ee664f0a48..ba60e37213eb1d289fa983e8eacbce48c1616b0a 100644 --- a/drivers/net/wireless/ath/ath6kl/cfg80211.c +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c @@ -717,6 +717,7 @@ ath6kl_add_bss_if_needed(struct ath6kl_vif *vif, memcpy(ie + 2, vif->ssid, vif->ssid_len); memcpy(ie + 2 + vif->ssid_len, beacon_ie, beacon_ie_len); bss = cfg80211_inform_bss(ar->wiphy, chan, + CFG80211_BSS_FTYPE_UNKNOWN, bssid, 0, cap_val, 100, ie, 2 + vif->ssid_len + beacon_ie_len, 0, GFP_KERNEL); diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c index fffd52355123f31e7bae15561fd867297d73f924..6e473fa4b13cae0df30a33fd9162729afaf76f6b 100644 --- a/drivers/net/wireless/ath/ath6kl/init.c +++ b/drivers/net/wireless/ath/ath6kl/init.c @@ -1049,7 +1049,7 @@ static int ath6kl_fetch_fw_apin(struct ath6kl *ar, const char *name) ar->hw.reserved_ram_size = le32_to_cpup(val); ath6kl_dbg(ATH6KL_DBG_BOOT, - "found reserved ram size ie 0x%d\n", + "found reserved ram size ie %d\n", ar->hw.reserved_ram_size); break; case ATH6KL_FW_IE_CAPABILITIES: diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c index 21516bc657857b8a44405c5c22944a275271e5f2..933aef025698d90326f91b8d9746a417786769e1 100644 --- a/drivers/net/wireless/ath/ath6kl/main.c +++ b/drivers/net/wireless/ath/ath6kl/main.c @@ -225,7 +225,7 @@ int ath6kl_diag_write32(struct ath6kl *ar, u32 address, __le32 value) ret = ath6kl_hif_diag_write32(ar, address, value); if (ret) { - ath6kl_err("failed to write 0x%x during diagnose window to 0x%d\n", + ath6kl_err("failed to write 0x%x during diagnose window to 0x%x\n", address, value); return ret; } diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c index 339d89f14d32b1991c3d3d646f460b90f41411bb..eab0ab976af29ebb0b355f82b01caeb39bd07152 100644 --- a/drivers/net/wireless/ath/ath6kl/sdio.c +++ b/drivers/net/wireless/ath/ath6kl/sdio.c @@ -1400,6 +1400,7 @@ static const struct sdio_device_id ath6kl_sdio_devices[] = { {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x1))}, {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x0))}, {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x1))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x2))}, {}, }; diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c index c44325856b81fc4cee191ec9aceb2102f2f0b23b..a6a5e40b3e981453b72233e930e712c8412bf60e 100644 --- a/drivers/net/wireless/ath/ath6kl/usb.c +++ b/drivers/net/wireless/ath/ath6kl/usb.c @@ -1229,26 +1229,7 @@ static struct usb_driver ath6kl_usb_driver = { .disable_hub_initiated_lpm = 1, }; -static int ath6kl_usb_init(void) -{ - int ret; - - ret = usb_register(&ath6kl_usb_driver); - if (ret) { - ath6kl_err("usb registration failed: %d\n", ret); - return ret; - } - - return 0; -} - -static void ath6kl_usb_exit(void) -{ - usb_deregister(&ath6kl_usb_driver); -} - -module_init(ath6kl_usb_init); -module_exit(ath6kl_usb_exit); +module_usb_driver(ath6kl_usb_driver); MODULE_AUTHOR("Atheros Communications, Inc."); MODULE_DESCRIPTION("Driver support for Atheros AR600x USB devices"); diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c index 94df345d08c24210fefe4b707079f85fe75e386f..b921005ad7eef7bb9ab25dedadf6c50f7ad85d26 100644 --- a/drivers/net/wireless/ath/ath6kl/wmi.c +++ b/drivers/net/wireless/ath/ath6kl/wmi.c @@ -619,8 +619,7 @@ static int ath6kl_wmi_rx_probe_req_event_rx(struct wmi *wmi, u8 *datap, int len, dlen, freq, vif->probe_req_report); if (vif->probe_req_report || vif->nw_type == AP_NETWORK) - cfg80211_rx_mgmt(&vif->wdev, freq, 0, ev->data, dlen, 0, - GFP_ATOMIC); + cfg80211_rx_mgmt(&vif->wdev, freq, 0, ev->data, dlen, 0); return 0; } @@ -659,7 +658,7 @@ static int ath6kl_wmi_rx_action_event_rx(struct wmi *wmi, u8 *datap, int len, return -EINVAL; } ath6kl_dbg(ATH6KL_DBG_WMI, "rx_action: len=%u freq=%u\n", dlen, freq); - cfg80211_rx_mgmt(&vif->wdev, freq, 0, ev->data, dlen, 0, GFP_ATOMIC); + cfg80211_rx_mgmt(&vif->wdev, freq, 0, ev->data, dlen, 0); return 0; } @@ -1093,7 +1092,6 @@ static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len, u8 *buf; struct ieee80211_channel *channel; struct ath6kl *ar = wmi->parent_dev; - struct ieee80211_mgmt *mgmt; struct cfg80211_bss *bss; if (len <= sizeof(struct wmi_bss_info_hdr2)) @@ -1139,39 +1137,15 @@ static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len, } } - /* - * In theory, use of cfg80211_inform_bss() would be more natural here - * since we do not have the full frame. However, at least for now, - * cfg80211 can only distinguish Beacon and Probe Response frames from - * each other when using cfg80211_inform_bss_frame(), so let's build a - * fake IEEE 802.11 header to be able to take benefit of this. - */ - mgmt = kmalloc(24 + len, GFP_ATOMIC); - if (mgmt == NULL) - return -EINVAL; - - if (bih->frame_type == BEACON_FTYPE) { - mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | - IEEE80211_STYPE_BEACON); - memset(mgmt->da, 0xff, ETH_ALEN); - } else { - struct net_device *dev = vif->ndev; - - mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | - IEEE80211_STYPE_PROBE_RESP); - memcpy(mgmt->da, dev->dev_addr, ETH_ALEN); - } - mgmt->duration = cpu_to_le16(0); - memcpy(mgmt->sa, bih->bssid, ETH_ALEN); - memcpy(mgmt->bssid, bih->bssid, ETH_ALEN); - mgmt->seq_ctrl = cpu_to_le16(0); - - memcpy(&mgmt->u.beacon, buf, len); - - bss = cfg80211_inform_bss_frame(ar->wiphy, channel, mgmt, - 24 + len, (bih->snr - 95) * 100, - GFP_ATOMIC); - kfree(mgmt); + bss = cfg80211_inform_bss(ar->wiphy, channel, + bih->frame_type == BEACON_FTYPE ? + CFG80211_BSS_FTYPE_BEACON : + CFG80211_BSS_FTYPE_PRESP, + bih->bssid, get_unaligned_le64((__le64 *)buf), + get_unaligned_le16(((__le16 *)buf) + 5), + get_unaligned_le16(((__le16 *)buf) + 4), + buf + 8 + 2 + 2, len - 8 - 2 - 2, + (bih->snr - 95) * 100, GFP_ATOMIC); if (bss == NULL) return -ENOMEM; cfg80211_put_bss(ar->wiphy, bss); diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig index 8fcc029a76a60690ba542e49ddb94bb855bda3ba..896e63281b3b8464b9bbdbd5bab6d9d1f09b4302 100644 --- a/drivers/net/wireless/ath/ath9k/Kconfig +++ b/drivers/net/wireless/ath/ath9k/Kconfig @@ -92,6 +92,15 @@ config ATH9K_DFS_CERTIFIED developed. At this point enabling this option won't do anything except increase code size. +config ATH9K_DYNACK + bool "Atheros ath9k ACK timeout estimation algorithm (EXPERIMENTAL)" + depends on ATH9K + default n + ---help--- + This option enables ath9k dynamic ACK timeout estimation algorithm + based on ACK frame RX timestamp, TX frame timestamp and frame + duration + config ATH9K_TX99 bool "Atheros ath9k TX99 testing support" depends on ATH9K_DEBUGFS && CFG80211_CERTIFICATION_ONUS @@ -130,6 +139,15 @@ config ATH9K_RFKILL seconds. Turn off to save power, but enable it if you have a platform that can toggle the RF-Kill GPIO. +config ATH9K_CHANNEL_CONTEXT + bool "Channel Context support" + depends on ATH9K + default n + ---help--- + This option enables channel context support in ath9k, which is needed + for multi-channel concurrency. Enable this if P2P PowerSave support + is required. + config ATH9K_HTC tristate "Atheros HTC based wireless cards support" depends on USB && MAC80211 diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile index 6b4020a5798477e3ab21b22ce2219aea3cf6553c..73704c1be736f188dd5cb9f9c746a51dc4d35a94 100644 --- a/drivers/net/wireless/ath/ath9k/Makefile +++ b/drivers/net/wireless/ath/ath9k/Makefile @@ -49,6 +49,9 @@ ath9k_hw-$(CONFIG_ATH9K_WOW) += ar9003_wow.o ath9k_hw-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += btcoex.o \ ar9003_mci.o + +ath9k_hw-$(CONFIG_ATH9K_DYNACK) += dynack.o + obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o obj-$(CONFIG_ATH9K_COMMON) += ath9k_common.o diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c index 00fb8badbacc939bbb3ed1bbeae1b3d63edc4e23..b72d0be716dbdc91c56f822f1ec3d352f82e4e80 100644 --- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c @@ -1004,9 +1004,11 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah, case ATH9K_ANI_FIRSTEP_LEVEL:{ u32 level = param; - value = level; + value = level * 2; REG_RMW_FIELD(ah, AR_PHY_FIND_SIG, AR_PHY_FIND_SIG_FIRSTEP, value); + REG_RMW_FIELD(ah, AR_PHY_FIND_SIG_LOW, + AR_PHY_FIND_SIG_FIRSTEP_LOW, value); if (level != aniState->firstepLevel) { ath_dbg(common, ANI, @@ -1040,9 +1042,8 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah, REG_RMW_FIELD(ah, AR_PHY_TIMING5, AR_PHY_TIMING5_CYCPWR_THR1, value); - if (IS_CHAN_HT40(ah->curchan)) - REG_RMW_FIELD(ah, AR_PHY_EXT_CCA, - AR_PHY_EXT_TIMING5_CYCPWR_THR1, value); + REG_RMW_FIELD(ah, AR_PHY_EXT_CCA, + AR_PHY_EXT_TIMING5_CYCPWR_THR1, value - 1); if (level != aniState->spurImmunityLevel) { ath_dbg(common, ANI, diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c index 59af9f9712da852e0ebf4e05747b35da7373dbeb..2a93519f4bdf43479a423a7f9ad5e8773f4b9ab9 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c @@ -384,6 +384,24 @@ static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds, return 0; } +static int ar9002_hw_get_duration(struct ath_hw *ah, const void *ds, int index) +{ + struct ar5416_desc *ads = AR5416DESC(ds); + + switch (index) { + case 0: + return MS(ACCESS_ONCE(ads->ds_ctl4), AR_PacketDur0); + case 1: + return MS(ACCESS_ONCE(ads->ds_ctl4), AR_PacketDur1); + case 2: + return MS(ACCESS_ONCE(ads->ds_ctl5), AR_PacketDur2); + case 3: + return MS(ACCESS_ONCE(ads->ds_ctl5), AR_PacketDur3); + default: + return -1; + } +} + void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds, u32 size, u32 flags) { @@ -406,4 +424,5 @@ void ar9002_hw_attach_mac_ops(struct ath_hw *ah) ops->get_isr = ar9002_hw_get_isr; ops->set_txdesc = ar9002_set_txdesc; ops->proc_txdesc = ar9002_hw_proc_txdesc; + ops->get_duration = ar9002_hw_get_duration; } diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c index 71e38e85aa99801546e8e64332bbce3458edd604..057b1657c4287f1418058ac3c4f8ec6a305619f9 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c @@ -431,6 +431,24 @@ static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds, return 0; } +static int ar9003_hw_get_duration(struct ath_hw *ah, const void *ds, int index) +{ + const struct ar9003_txc *adc = ds; + + switch (index) { + case 0: + return MS(ACCESS_ONCE(adc->ctl15), AR_PacketDur0); + case 1: + return MS(ACCESS_ONCE(adc->ctl15), AR_PacketDur1); + case 2: + return MS(ACCESS_ONCE(adc->ctl16), AR_PacketDur2); + case 3: + return MS(ACCESS_ONCE(adc->ctl16), AR_PacketDur3); + default: + return 0; + } +} + void ar9003_hw_attach_mac_ops(struct ath_hw *hw) { struct ath_hw_ops *ops = ath9k_hw_ops(hw); @@ -440,6 +458,7 @@ void ar9003_hw_attach_mac_ops(struct ath_hw *hw) ops->get_isr = ar9003_hw_get_isr; ops->set_txdesc = ar9003_set_txdesc; ops->proc_txdesc = ar9003_hw_proc_txdesc; + ops->get_duration = ar9003_hw_get_duration; } void ath9k_hw_set_rx_bufsize(struct ath_hw *ah, u16 buf_size) diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index 542a8d51d3b033bd21521a6b34972107eb44ff04..697c4ae90af006f9c7f962bd21ea938af1892455 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -517,6 +517,23 @@ static void ar9003_hw_spur_mitigate(struct ath_hw *ah, ar9003_hw_spur_mitigate_ofdm(ah, chan); } +static u32 ar9003_hw_compute_pll_control_soc(struct ath_hw *ah, + struct ath9k_channel *chan) +{ + u32 pll; + + pll = SM(0x5, AR_RTC_9300_SOC_PLL_REFDIV); + + if (chan && IS_CHAN_HALF_RATE(chan)) + pll |= SM(0x1, AR_RTC_9300_SOC_PLL_CLKSEL); + else if (chan && IS_CHAN_QUARTER_RATE(chan)) + pll |= SM(0x2, AR_RTC_9300_SOC_PLL_CLKSEL); + + pll |= SM(0x2c, AR_RTC_9300_SOC_PLL_DIV_INT); + + return pll; +} + static u32 ar9003_hw_compute_pll_control(struct ath_hw *ah, struct ath9k_channel *chan) { @@ -1781,7 +1798,12 @@ void ar9003_hw_attach_phy_ops(struct ath_hw *ah) priv_ops->rf_set_freq = ar9003_hw_set_channel; priv_ops->spur_mitigate_freq = ar9003_hw_spur_mitigate; - priv_ops->compute_pll_control = ar9003_hw_compute_pll_control; + + if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah)) + priv_ops->compute_pll_control = ar9003_hw_compute_pll_control_soc; + else + priv_ops->compute_pll_control = ar9003_hw_compute_pll_control; + priv_ops->set_channel_regs = ar9003_hw_set_channel_regs; priv_ops->init_bb = ar9003_hw_init_bb; priv_ops->process_ini = ar9003_hw_process_ini; diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 7fc13a8da6757347e4daff1d1ede61da93648d3d..bfa0b1518da1b5b7d95ce69170f5f7934ee375a1 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -31,6 +31,7 @@ #include "spectral.h" struct ath_node; +struct ath_vif; extern struct ieee80211_ops ath9k_ops; extern int ath9k_modparam_nohwcrypt; @@ -273,6 +274,9 @@ struct ath_node { struct ath_rx_rate_stats rx_rate_stats; #endif u8 key_idx[4]; + + u32 ackto; + struct list_head list; }; struct ath_tx_control { @@ -313,7 +317,6 @@ struct ath_rx { bool discard_next; u32 *rxlink; u32 num_pkts; - unsigned int rxfilter; struct list_head rxbuf; struct ath_descdma rxdma; struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX]; @@ -324,6 +327,10 @@ struct ath_rx { u32 ampdu_ref; }; +/*******************/ +/* Channel Context */ +/*******************/ + struct ath_chanctx { struct cfg80211_chan_def chandef; struct list_head vifs; @@ -345,6 +352,10 @@ struct ath_chanctx { bool active; bool assigned; bool switch_after_beacon; + + short nvifs; + short nvifs_assigned; + unsigned int rxfilter; }; enum ath_chanctx_event { @@ -354,7 +365,9 @@ enum ath_chanctx_event { ATH_CHANCTX_EVENT_BEACON_RECEIVED, ATH_CHANCTX_EVENT_ASSOC, ATH_CHANCTX_EVENT_SWITCH, + ATH_CHANCTX_EVENT_ASSIGN, ATH_CHANCTX_EVENT_UNASSIGN, + ATH_CHANCTX_EVENT_CHANGE, ATH_CHANCTX_EVENT_ENABLE_MULTICHANNEL, }; @@ -369,6 +382,9 @@ enum ath_chanctx_state { struct ath_chanctx_sched { bool beacon_pending; bool offchannel_pending; + bool wait_switch; + bool force_noa_update; + bool extend_absence; enum ath_chanctx_state state; u8 beacon_miss; @@ -403,38 +419,130 @@ struct ath_offchannel { int roc_duration; int duration; }; + +#define case_rtn_string(val) case val: return #val + #define ath_for_each_chanctx(_sc, _ctx) \ for (ctx = &sc->chanctx[0]; \ ctx <= &sc->chanctx[ARRAY_SIZE(sc->chanctx) - 1]; \ ctx++) -void ath9k_fill_chanctx_ops(void); -void ath9k_chanctx_force_active(struct ieee80211_hw *hw, - struct ieee80211_vif *vif); +void ath_chanctx_init(struct ath_softc *sc); +void ath_chanctx_set_channel(struct ath_softc *sc, struct ath_chanctx *ctx, + struct cfg80211_chan_def *chandef); + +#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT + static inline struct ath_chanctx * ath_chanctx_get(struct ieee80211_chanctx_conf *ctx) { struct ath_chanctx **ptr = (void *) ctx->drv_priv; return *ptr; } -void ath_chanctx_init(struct ath_softc *sc); -void ath_chanctx_set_channel(struct ath_softc *sc, struct ath_chanctx *ctx, - struct cfg80211_chan_def *chandef); -void ath_chanctx_switch(struct ath_softc *sc, struct ath_chanctx *ctx, - struct cfg80211_chan_def *chandef); + +bool ath9k_is_chanctx_enabled(void); +void ath9k_fill_chanctx_ops(void); +void ath9k_init_channel_context(struct ath_softc *sc); +void ath9k_offchannel_init(struct ath_softc *sc); +void ath9k_deinit_channel_context(struct ath_softc *sc); +int ath9k_init_p2p(struct ath_softc *sc); +void ath9k_deinit_p2p(struct ath_softc *sc); +void ath9k_p2p_remove_vif(struct ath_softc *sc, + struct ieee80211_vif *vif); +void ath9k_p2p_beacon_sync(struct ath_softc *sc); +void ath9k_p2p_bss_info_changed(struct ath_softc *sc, + struct ieee80211_vif *vif); +void ath9k_beacon_add_noa(struct ath_softc *sc, struct ath_vif *avp, + struct sk_buff *skb); +void ath9k_p2p_ps_timer(void *priv); +void ath9k_chanctx_wake_queues(struct ath_softc *sc, struct ath_chanctx *ctx); +void ath9k_chanctx_stop_queues(struct ath_softc *sc, struct ath_chanctx *ctx); void ath_chanctx_check_active(struct ath_softc *sc, struct ath_chanctx *ctx); -void ath_offchannel_timer(unsigned long data); -void ath_offchannel_channel_change(struct ath_softc *sc); -void ath_chanctx_offchan_switch(struct ath_softc *sc, - struct ieee80211_channel *chan); -struct ath_chanctx *ath_chanctx_get_oper_chan(struct ath_softc *sc, - bool active); + +void ath_chanctx_beacon_recv_ev(struct ath_softc *sc, + enum ath_chanctx_event ev); +void ath_chanctx_beacon_sent_ev(struct ath_softc *sc, + enum ath_chanctx_event ev); void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif, enum ath_chanctx_event ev); -void ath_chanctx_timer(unsigned long data); +void ath_chanctx_set_next(struct ath_softc *sc, bool force); +void ath_offchannel_next(struct ath_softc *sc); +void ath_scan_complete(struct ath_softc *sc, bool abort); +void ath_roc_complete(struct ath_softc *sc, bool abort); + +#else + +static inline bool ath9k_is_chanctx_enabled(void) +{ + return false; +} +static inline void ath9k_fill_chanctx_ops(void) +{ +} +static inline void ath9k_init_channel_context(struct ath_softc *sc) +{ +} +static inline void ath9k_offchannel_init(struct ath_softc *sc) +{ +} +static inline void ath9k_deinit_channel_context(struct ath_softc *sc) +{ +} +static inline void ath_chanctx_beacon_recv_ev(struct ath_softc *sc, + enum ath_chanctx_event ev) +{ +} +static inline void ath_chanctx_beacon_sent_ev(struct ath_softc *sc, + enum ath_chanctx_event ev) +{ +} +static inline void ath_chanctx_event(struct ath_softc *sc, + struct ieee80211_vif *vif, + enum ath_chanctx_event ev) +{ +} +static inline int ath9k_init_p2p(struct ath_softc *sc) +{ + return 0; +} +static inline void ath9k_deinit_p2p(struct ath_softc *sc) +{ +} +static inline void ath9k_p2p_remove_vif(struct ath_softc *sc, + struct ieee80211_vif *vif) +{ +} +static inline void ath9k_p2p_beacon_sync(struct ath_softc *sc) +{ +} +static inline void ath9k_p2p_bss_info_changed(struct ath_softc *sc, + struct ieee80211_vif *vif) +{ +} +static inline void ath9k_beacon_add_noa(struct ath_softc *sc, struct ath_vif *avp, + struct sk_buff *skb) +{ +} +static inline void ath9k_p2p_ps_timer(struct ath_softc *sc) +{ +} +static inline void ath9k_chanctx_wake_queues(struct ath_softc *sc, + struct ath_chanctx *ctx) +{ +} +static inline void ath9k_chanctx_stop_queues(struct ath_softc *sc, + struct ath_chanctx *ctx) +{ +} +static inline void ath_chanctx_check_active(struct ath_softc *sc, + struct ath_chanctx *ctx) +{ +} + +#endif /* CONFIG_ATH9K_CHANNEL_CONTEXT */ int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan); -int ath_startrecv(struct ath_softc *sc); +void ath_startrecv(struct ath_softc *sc); bool ath_stoprecv(struct ath_softc *sc); u32 ath_calcrxfilter(struct ath_softc *sc); int ath_rx_init(struct ath_softc *sc, int nbufs); @@ -479,9 +587,16 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw, /* VIFs */ /********/ +#define P2P_DEFAULT_CTWIN 10 + struct ath_vif { struct list_head list; + /* BSS info */ + u8 bssid[ETH_ALEN]; + u16 aid; + bool assoc; + struct ieee80211_vif *vif; struct ath_node mcast_node; int av_bslot; @@ -497,8 +612,10 @@ struct ath_vif { u32 offchannel_start; u32 offchannel_duration; - u32 periodic_noa_start; - u32 periodic_noa_duration; + /* These are used for both periodic and one-shot */ + u32 noa_start; + u32 noa_duration; + bool periodic_noa; }; struct ath9k_vif_iter_data { @@ -583,7 +700,6 @@ void ath9k_csa_update(struct ath_softc *sc); #define ATH_PAPRD_TIMEOUT 100 /* msecs */ #define ATH_PLL_WORK_INTERVAL 100 -void ath_chanctx_work(struct work_struct *work); void ath_tx_complete_poll_work(struct work_struct *work); void ath_reset_work(struct work_struct *work); bool ath_hw_check(struct ath_softc *sc); @@ -597,8 +713,6 @@ int ath_update_survey_stats(struct ath_softc *sc); void ath_update_survey_nf(struct ath_softc *sc, int channel); void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type); void ath_ps_full_sleep(unsigned long data); -void ath9k_p2p_ps_timer(void *priv); -void ath9k_update_p2p_ps(struct ath_softc *sc, struct ieee80211_vif *vif); void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop); /**********/ @@ -849,12 +963,17 @@ struct ath_softc { struct mutex mutex; struct work_struct paprd_work; struct work_struct hw_reset_work; - struct work_struct chanctx_work; struct completion paprd_complete; wait_queue_head_t tx_wait; +#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT + struct work_struct chanctx_work; struct ath_gen_timer *p2p_ps_timer; struct ath_vif *p2p_ps_vif; + struct ath_chanctx_sched sched; + struct ath_offchannel offchannel; + struct ath_chanctx *next_chan; +#endif unsigned long driver_data; @@ -865,7 +984,6 @@ struct ath_softc { bool ps_enabled; bool ps_idle; short nbcnvifs; - short nvifs; unsigned long ps_usecount; struct ath_rx rx; @@ -875,10 +993,7 @@ struct ath_softc { struct cfg80211_chan_def cur_chandef; struct ath_chanctx chanctx[ATH9K_NUM_CHANCTX]; struct ath_chanctx *cur_chan; - struct ath_chanctx *next_chan; spinlock_t chan_lock; - struct ath_offchannel offchannel; - struct ath_chanctx_sched sched; #ifdef CONFIG_MAC80211_LEDS bool led_registered; diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c index eaf8f058c15154941c1abfe0cc34585a5a217733..a6af855ef6ed9adb41e5a2c5d4ca90d197036c46 100644 --- a/drivers/net/wireless/ath/ath9k/beacon.c +++ b/drivers/net/wireless/ath/ath9k/beacon.c @@ -108,55 +108,6 @@ static void ath9k_beacon_setup(struct ath_softc *sc, struct ieee80211_vif *vif, ath9k_hw_set_txdesc(ah, bf->bf_desc, &info); } -static void ath9k_beacon_add_noa(struct ath_softc *sc, struct ath_vif *avp, - struct sk_buff *skb) -{ - static const u8 noa_ie_hdr[] = { - WLAN_EID_VENDOR_SPECIFIC, /* type */ - 0, /* length */ - 0x50, 0x6f, 0x9a, /* WFA OUI */ - 0x09, /* P2P subtype */ - 0x0c, /* Notice of Absence */ - 0x00, /* LSB of little-endian len */ - 0x00, /* MSB of little-endian len */ - }; - - struct ieee80211_p2p_noa_attr *noa; - int noa_len, noa_desc, i = 0; - u8 *hdr; - - if (!avp->offchannel_duration && !avp->periodic_noa_duration) - return; - - noa_desc = !!avp->offchannel_duration + !!avp->periodic_noa_duration; - noa_len = 2 + sizeof(struct ieee80211_p2p_noa_desc) * noa_desc; - - hdr = skb_put(skb, sizeof(noa_ie_hdr)); - memcpy(hdr, noa_ie_hdr, sizeof(noa_ie_hdr)); - hdr[1] = sizeof(noa_ie_hdr) + noa_len - 2; - hdr[7] = noa_len; - - noa = (void *) skb_put(skb, noa_len); - memset(noa, 0, noa_len); - - noa->index = avp->noa_index; - if (avp->periodic_noa_duration) { - u32 interval = TU_TO_USEC(sc->cur_chan->beacon.beacon_interval); - - noa->desc[i].count = 255; - noa->desc[i].start_time = cpu_to_le32(avp->periodic_noa_start); - noa->desc[i].duration = cpu_to_le32(avp->periodic_noa_duration); - noa->desc[i].interval = cpu_to_le32(interval); - i++; - } - - if (avp->offchannel_duration) { - noa->desc[i].count = 1; - noa->desc[i].start_time = cpu_to_le32(avp->offchannel_start); - noa->desc[i].duration = cpu_to_le32(avp->offchannel_duration); - } -} - static struct ath_buf *ath9k_beacon_generate(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { @@ -232,7 +183,7 @@ static struct ath_buf *ath9k_beacon_generate(struct ieee80211_hw *hw, spin_unlock_bh(&cabq->axq_lock); if (skb && cabq_depth) { - if (sc->nvifs > 1) { + if (sc->cur_chan->nvifs > 1) { ath_dbg(common, BEACON, "Flushing previous cabq traffic\n"); ath_draintxq(sc, cabq); @@ -427,9 +378,10 @@ void ath9k_beacon_tasklet(unsigned long data) /* EDMA devices check that in the tx completion function. */ if (!edma) { - if (sc->sched.beacon_pending) - ath_chanctx_event(sc, NULL, + if (ath9k_is_chanctx_enabled()) { + ath_chanctx_beacon_sent_ev(sc, ATH_CHANCTX_EVENT_BEACON_SENT); + } if (ath9k_csa_is_finished(sc, vif)) return; @@ -438,7 +390,10 @@ void ath9k_beacon_tasklet(unsigned long data) if (!vif || !vif->bss_conf.enable_beacon) return; - ath_chanctx_event(sc, vif, ATH_CHANCTX_EVENT_BEACON_PREPARE); + if (ath9k_is_chanctx_enabled()) { + ath_chanctx_event(sc, vif, ATH_CHANCTX_EVENT_BEACON_PREPARE); + } + bf = ath9k_beacon_generate(sc->hw, vif); if (sc->beacon.bmisscnt != 0) { @@ -559,6 +514,18 @@ static bool ath9k_allow_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif) { struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct ath_vif *avp = (void *)vif->drv_priv; + + if (ath9k_is_chanctx_enabled()) { + /* + * If the VIF is not present in the current channel context, + * then we can't do the usual opmode checks. Allow the + * beacon config for the VIF to be updated in this case and + * return immediately. + */ + if (sc->cur_chan != avp->chanctx) + return true; + } if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) { if ((vif->type != NL80211_IFTYPE_AP) || diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c index ba214ebdcd167163cb38e50fb225b749d46005bf..945c89826b14cc70f1dd5f36b4bcd2b9efc21b10 100644 --- a/drivers/net/wireless/ath/ath9k/channel.c +++ b/drivers/net/wireless/ath/ath9k/channel.c @@ -83,8 +83,6 @@ static int ath_set_channel(struct ath_softc *sc) if (hw->conf.radar_enabled) { u32 rxfilter; - /* set HW specific DFS configuration */ - ath9k_hw_set_radar_params(ah); rxfilter = ath9k_hw_getrxfilter(ah); rxfilter |= ATH9K_RX_FILTER_PHYRADAR | ATH9K_RX_FILTER_PHYERR; @@ -101,51 +99,100 @@ static int ath_set_channel(struct ath_softc *sc) return 0; } -static bool -ath_chanctx_send_vif_ps_frame(struct ath_softc *sc, struct ath_vif *avp, - bool powersave) +void ath_chanctx_init(struct ath_softc *sc) { - struct ieee80211_vif *vif = avp->vif; - struct ieee80211_sta *sta = NULL; - struct ieee80211_hdr_3addr *nullfunc; - struct ath_tx_control txctl; - struct sk_buff *skb; - int band = sc->cur_chan->chandef.chan->band; + struct ath_chanctx *ctx; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct ieee80211_supported_band *sband; + struct ieee80211_channel *chan; + int i, j; - switch (vif->type) { - case NL80211_IFTYPE_STATION: - if (!vif->bss_conf.assoc) - return false; + sband = &common->sbands[IEEE80211_BAND_2GHZ]; + if (!sband->n_channels) + sband = &common->sbands[IEEE80211_BAND_5GHZ]; - skb = ieee80211_nullfunc_get(sc->hw, vif); - if (!skb) - return false; + chan = &sband->channels[0]; + for (i = 0; i < ATH9K_NUM_CHANCTX; i++) { + ctx = &sc->chanctx[i]; + cfg80211_chandef_create(&ctx->chandef, chan, NL80211_CHAN_HT20); + INIT_LIST_HEAD(&ctx->vifs); + ctx->txpower = ATH_TXPOWER_MAX; + for (j = 0; j < ARRAY_SIZE(ctx->acq); j++) + INIT_LIST_HEAD(&ctx->acq[j]); + } +} - nullfunc = (struct ieee80211_hdr_3addr *) skb->data; - if (powersave) - nullfunc->frame_control |= - cpu_to_le16(IEEE80211_FCTL_PM); +void ath_chanctx_set_channel(struct ath_softc *sc, struct ath_chanctx *ctx, + struct cfg80211_chan_def *chandef) +{ + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + bool cur_chan; - skb_set_queue_mapping(skb, IEEE80211_AC_VO); - if (!ieee80211_tx_prepare_skb(sc->hw, vif, skb, band, &sta)) { - dev_kfree_skb_any(skb); - return false; - } - break; + spin_lock_bh(&sc->chan_lock); + if (chandef) + memcpy(&ctx->chandef, chandef, sizeof(*chandef)); + cur_chan = sc->cur_chan == ctx; + spin_unlock_bh(&sc->chan_lock); + + if (!cur_chan) { + ath_dbg(common, CHAN_CTX, + "Current context differs from the new context\n"); + return; + } + + ath_set_channel(sc); +} + +#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT + +/**********************************************************/ +/* Functions to handle the channel context state machine. */ +/**********************************************************/ + +static const char *offchannel_state_string(enum ath_offchannel_state state) +{ + switch (state) { + case_rtn_string(ATH_OFFCHANNEL_IDLE); + case_rtn_string(ATH_OFFCHANNEL_PROBE_SEND); + case_rtn_string(ATH_OFFCHANNEL_PROBE_WAIT); + case_rtn_string(ATH_OFFCHANNEL_SUSPEND); + case_rtn_string(ATH_OFFCHANNEL_ROC_START); + case_rtn_string(ATH_OFFCHANNEL_ROC_WAIT); + case_rtn_string(ATH_OFFCHANNEL_ROC_DONE); default: - return false; + return "unknown"; } +} - memset(&txctl, 0, sizeof(txctl)); - txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO]; - txctl.sta = sta; - txctl.force_channel = true; - if (ath_tx_start(sc->hw, skb, &txctl)) { - ieee80211_free_txskb(sc->hw, skb); - return false; +static const char *chanctx_event_string(enum ath_chanctx_event ev) +{ + switch (ev) { + case_rtn_string(ATH_CHANCTX_EVENT_BEACON_PREPARE); + case_rtn_string(ATH_CHANCTX_EVENT_BEACON_SENT); + case_rtn_string(ATH_CHANCTX_EVENT_TSF_TIMER); + case_rtn_string(ATH_CHANCTX_EVENT_BEACON_RECEIVED); + case_rtn_string(ATH_CHANCTX_EVENT_ASSOC); + case_rtn_string(ATH_CHANCTX_EVENT_SWITCH); + case_rtn_string(ATH_CHANCTX_EVENT_ASSIGN); + case_rtn_string(ATH_CHANCTX_EVENT_UNASSIGN); + case_rtn_string(ATH_CHANCTX_EVENT_CHANGE); + case_rtn_string(ATH_CHANCTX_EVENT_ENABLE_MULTICHANNEL); + default: + return "unknown"; } +} - return true; +static const char *chanctx_state_string(enum ath_chanctx_state state) +{ + switch (state) { + case_rtn_string(ATH_CHANCTX_STATE_IDLE); + case_rtn_string(ATH_CHANCTX_STATE_WAIT_FOR_BEACON); + case_rtn_string(ATH_CHANCTX_STATE_WAIT_FOR_TIMER); + case_rtn_string(ATH_CHANCTX_STATE_SWITCH); + case_rtn_string(ATH_CHANCTX_STATE_FORCE_ACTIVE); + default: + return "unknown"; + } } void ath_chanctx_check_active(struct ath_softc *sc, struct ath_chanctx *ctx) @@ -164,7 +211,7 @@ void ath_chanctx_check_active(struct ath_softc *sc, struct ath_chanctx *ctx) switch (vif->type) { case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_STATION: - if (vif->bss_conf.assoc) + if (avp->assoc) active = true; break; default: @@ -186,354 +233,134 @@ void ath_chanctx_check_active(struct ath_softc *sc, struct ath_chanctx *ctx) } if (test_and_set_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags)) return; - ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_ENABLE_MULTICHANNEL); -} - -static bool -ath_chanctx_send_ps_frame(struct ath_softc *sc, bool powersave) -{ - struct ath_vif *avp; - bool sent = false; - rcu_read_lock(); - list_for_each_entry(avp, &sc->cur_chan->vifs, list) { - if (ath_chanctx_send_vif_ps_frame(sc, avp, powersave)) - sent = true; + if (ath9k_is_chanctx_enabled()) { + ath_chanctx_event(sc, NULL, + ATH_CHANCTX_EVENT_ENABLE_MULTICHANNEL); } - rcu_read_unlock(); - - return sent; } -static bool ath_chanctx_defer_switch(struct ath_softc *sc) +static struct ath_chanctx * +ath_chanctx_get_next(struct ath_softc *sc, struct ath_chanctx *ctx) { - if (sc->cur_chan == &sc->offchannel.chan) - return false; - - switch (sc->sched.state) { - case ATH_CHANCTX_STATE_SWITCH: - return false; - case ATH_CHANCTX_STATE_IDLE: - if (!sc->cur_chan->switch_after_beacon) - return false; - - sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_BEACON; - break; - default: - break; - } + int idx = ctx - &sc->chanctx[0]; - return true; + return &sc->chanctx[!idx]; } -static void ath_chanctx_set_next(struct ath_softc *sc, bool force) +static void ath_chanctx_adjust_tbtt_delta(struct ath_softc *sc) { + struct ath_chanctx *prev, *cur; struct timespec ts; - bool measure_time = false; - bool send_ps = false; - - spin_lock_bh(&sc->chan_lock); - if (!sc->next_chan) { - spin_unlock_bh(&sc->chan_lock); - return; - } - - if (!force && ath_chanctx_defer_switch(sc)) { - spin_unlock_bh(&sc->chan_lock); - return; - } - - if (sc->cur_chan != sc->next_chan) { - sc->cur_chan->stopped = true; - spin_unlock_bh(&sc->chan_lock); + u32 cur_tsf, prev_tsf, beacon_int; + s32 offset; - if (sc->next_chan == &sc->offchannel.chan) { - getrawmonotonic(&ts); - measure_time = true; - } - __ath9k_flush(sc->hw, ~0, true); + beacon_int = TU_TO_USEC(sc->cur_chan->beacon.beacon_interval); - if (ath_chanctx_send_ps_frame(sc, true)) - __ath9k_flush(sc->hw, BIT(IEEE80211_AC_VO), false); + cur = sc->cur_chan; + prev = ath_chanctx_get_next(sc, cur); - send_ps = true; - spin_lock_bh(&sc->chan_lock); + if (!prev->switch_after_beacon) + return; - if (sc->cur_chan != &sc->offchannel.chan) { - getrawmonotonic(&sc->cur_chan->tsf_ts); - sc->cur_chan->tsf_val = ath9k_hw_gettsf64(sc->sc_ah); - } - } - sc->cur_chan = sc->next_chan; - sc->cur_chan->stopped = false; - sc->next_chan = NULL; - sc->sched.offchannel_duration = 0; - if (sc->sched.state != ATH_CHANCTX_STATE_FORCE_ACTIVE) - sc->sched.state = ATH_CHANCTX_STATE_IDLE; + getrawmonotonic(&ts); + cur_tsf = (u32) cur->tsf_val + + ath9k_hw_get_tsf_offset(&cur->tsf_ts, &ts); - spin_unlock_bh(&sc->chan_lock); + prev_tsf = prev->last_beacon - (u32) prev->tsf_val + cur_tsf; + prev_tsf -= ath9k_hw_get_tsf_offset(&prev->tsf_ts, &ts); - if (sc->sc_ah->chip_fullsleep || - memcmp(&sc->cur_chandef, &sc->cur_chan->chandef, - sizeof(sc->cur_chandef))) { - ath_set_channel(sc); - if (measure_time) - sc->sched.channel_switch_time = - ath9k_hw_get_tsf_offset(&ts, NULL); - } - if (send_ps) - ath_chanctx_send_ps_frame(sc, false); + /* Adjust the TSF time of the AP chanctx to keep its beacons + * at half beacon interval offset relative to the STA chanctx. + */ + offset = cur_tsf - prev_tsf; - ath_offchannel_channel_change(sc); - ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_SWITCH); -} + /* Ignore stale data or spurious timestamps */ + if (offset < 0 || offset > 3 * beacon_int) + return; -void ath_chanctx_work(struct work_struct *work) -{ - struct ath_softc *sc = container_of(work, struct ath_softc, - chanctx_work); - mutex_lock(&sc->mutex); - ath_chanctx_set_next(sc, false); - mutex_unlock(&sc->mutex); + offset = beacon_int / 2 - (offset % beacon_int); + prev->tsf_val += offset; } -void ath_chanctx_init(struct ath_softc *sc) +/* Configure the TSF based hardware timer for a channel switch. + * Also set up backup software timer, in case the gen timer fails. + * This could be caused by a hardware reset. + */ +static void ath_chanctx_setup_timer(struct ath_softc *sc, u32 tsf_time) { - struct ath_chanctx *ctx; struct ath_common *common = ath9k_hw_common(sc->sc_ah); - struct ieee80211_supported_band *sband; - struct ieee80211_channel *chan; - int i, j; - - sband = &common->sbands[IEEE80211_BAND_2GHZ]; - if (!sband->n_channels) - sband = &common->sbands[IEEE80211_BAND_5GHZ]; + struct ath_hw *ah = sc->sc_ah; - chan = &sband->channels[0]; - for (i = 0; i < ATH9K_NUM_CHANCTX; i++) { - ctx = &sc->chanctx[i]; - cfg80211_chandef_create(&ctx->chandef, chan, NL80211_CHAN_HT20); - INIT_LIST_HEAD(&ctx->vifs); - ctx->txpower = ATH_TXPOWER_MAX; - for (j = 0; j < ARRAY_SIZE(ctx->acq); j++) - INIT_LIST_HEAD(&ctx->acq[j]); - } - ctx = &sc->offchannel.chan; - cfg80211_chandef_create(&ctx->chandef, chan, NL80211_CHAN_HT20); - INIT_LIST_HEAD(&ctx->vifs); - ctx->txpower = ATH_TXPOWER_MAX; - for (j = 0; j < ARRAY_SIZE(ctx->acq); j++) - INIT_LIST_HEAD(&ctx->acq[j]); - sc->offchannel.chan.offchannel = true; + ath9k_hw_gen_timer_start(ah, sc->p2p_ps_timer, tsf_time, 1000000); + tsf_time -= ath9k_hw_gettsf32(ah); + tsf_time = msecs_to_jiffies(tsf_time / 1000) + 1; + mod_timer(&sc->sched.timer, jiffies + tsf_time); + ath_dbg(common, CHAN_CTX, + "Setup chanctx timer with timeout: %d ms\n", jiffies_to_msecs(tsf_time)); } -void ath9k_chanctx_force_active(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) +void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif, + enum ath_chanctx_event ev) { - struct ath_softc *sc = hw->priv; - struct ath_common *common = ath9k_hw_common(sc->sc_ah); - struct ath_vif *avp = (struct ath_vif *) vif->drv_priv; - bool changed = false; - - if (!test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags)) - return; - - if (!avp->chanctx) - return; + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); + struct ath_beacon_config *cur_conf; + struct ath_vif *avp = NULL; + struct ath_chanctx *ctx; + u32 tsf_time; + u32 beacon_int; - mutex_lock(&sc->mutex); + if (vif) + avp = (struct ath_vif *) vif->drv_priv; spin_lock_bh(&sc->chan_lock); - if (sc->next_chan || (sc->cur_chan != avp->chanctx)) { - sc->next_chan = avp->chanctx; - changed = true; - } - sc->sched.state = ATH_CHANCTX_STATE_FORCE_ACTIVE; - spin_unlock_bh(&sc->chan_lock); - if (changed) - ath_chanctx_set_next(sc, true); + ath_dbg(common, CHAN_CTX, "cur_chan: %d MHz, event: %s, state: %s\n", + sc->cur_chan->chandef.center_freq1, + chanctx_event_string(ev), + chanctx_state_string(sc->sched.state)); - mutex_unlock(&sc->mutex); -} + switch (ev) { + case ATH_CHANCTX_EVENT_BEACON_PREPARE: + if (avp->offchannel_duration) + avp->offchannel_duration = 0; -void ath_chanctx_switch(struct ath_softc *sc, struct ath_chanctx *ctx, - struct cfg80211_chan_def *chandef) -{ - struct ath_common *common = ath9k_hw_common(sc->sc_ah); + if (avp->chanctx != sc->cur_chan) { + ath_dbg(common, CHAN_CTX, + "Contexts differ, not preparing beacon\n"); + break; + } - spin_lock_bh(&sc->chan_lock); - - if (test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags) && - (sc->cur_chan != ctx) && (ctx == &sc->offchannel.chan)) { - sc->sched.offchannel_pending = true; - spin_unlock_bh(&sc->chan_lock); - return; - } - - sc->next_chan = ctx; - if (chandef) - ctx->chandef = *chandef; - - if (sc->next_chan == &sc->offchannel.chan) { - sc->sched.offchannel_duration = - TU_TO_USEC(sc->offchannel.duration) + - sc->sched.channel_switch_time; - } - spin_unlock_bh(&sc->chan_lock); - ieee80211_queue_work(sc->hw, &sc->chanctx_work); -} - -void ath_chanctx_set_channel(struct ath_softc *sc, struct ath_chanctx *ctx, - struct cfg80211_chan_def *chandef) -{ - bool cur_chan; - - spin_lock_bh(&sc->chan_lock); - if (chandef) - memcpy(&ctx->chandef, chandef, sizeof(*chandef)); - cur_chan = sc->cur_chan == ctx; - spin_unlock_bh(&sc->chan_lock); - - if (!cur_chan) - return; - - ath_set_channel(sc); -} - -struct ath_chanctx *ath_chanctx_get_oper_chan(struct ath_softc *sc, bool active) -{ - struct ath_chanctx *ctx; - - ath_for_each_chanctx(sc, ctx) { - if (!ctx->assigned || list_empty(&ctx->vifs)) - continue; - if (active && !ctx->active) - continue; - - if (ctx->switch_after_beacon) - return ctx; - } - - return &sc->chanctx[0]; -} - -void ath_chanctx_offchan_switch(struct ath_softc *sc, - struct ieee80211_channel *chan) -{ - struct cfg80211_chan_def chandef; - - cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT); - - ath_chanctx_switch(sc, &sc->offchannel.chan, &chandef); -} - -static struct ath_chanctx * -ath_chanctx_get_next(struct ath_softc *sc, struct ath_chanctx *ctx) -{ - int idx = ctx - &sc->chanctx[0]; - - return &sc->chanctx[!idx]; -} - -static void ath_chanctx_adjust_tbtt_delta(struct ath_softc *sc) -{ - struct ath_chanctx *prev, *cur; - struct timespec ts; - u32 cur_tsf, prev_tsf, beacon_int; - s32 offset; - - beacon_int = TU_TO_USEC(sc->cur_chan->beacon.beacon_interval); - - cur = sc->cur_chan; - prev = ath_chanctx_get_next(sc, cur); - - getrawmonotonic(&ts); - cur_tsf = (u32) cur->tsf_val + - ath9k_hw_get_tsf_offset(&cur->tsf_ts, &ts); - - prev_tsf = prev->last_beacon - (u32) prev->tsf_val + cur_tsf; - prev_tsf -= ath9k_hw_get_tsf_offset(&prev->tsf_ts, &ts); - - /* Adjust the TSF time of the AP chanctx to keep its beacons - * at half beacon interval offset relative to the STA chanctx. - */ - offset = cur_tsf - prev_tsf; - - /* Ignore stale data or spurious timestamps */ - if (offset < 0 || offset > 3 * beacon_int) - return; - - offset = beacon_int / 2 - (offset % beacon_int); - prev->tsf_val += offset; -} - -void ath_chanctx_timer(unsigned long data) -{ - struct ath_softc *sc = (struct ath_softc *) data; - - ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_TSF_TIMER); -} - -/* Configure the TSF based hardware timer for a channel switch. - * Also set up backup software timer, in case the gen timer fails. - * This could be caused by a hardware reset. - */ -static void ath_chanctx_setup_timer(struct ath_softc *sc, u32 tsf_time) -{ - struct ath_hw *ah = sc->sc_ah; - - ath9k_hw_gen_timer_start(ah, sc->p2p_ps_timer, tsf_time, 1000000); - tsf_time -= ath9k_hw_gettsf32(ah); - tsf_time = msecs_to_jiffies(tsf_time / 1000) + 1; - mod_timer(&sc->sched.timer, tsf_time); -} - -void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif, - enum ath_chanctx_event ev) -{ - struct ath_hw *ah = sc->sc_ah; - struct ath_common *common = ath9k_hw_common(ah); - struct ath_beacon_config *cur_conf; - struct ath_vif *avp = NULL; - struct ath_chanctx *ctx; - u32 tsf_time; - u32 beacon_int; - bool noa_changed = false; - - if (vif) - avp = (struct ath_vif *) vif->drv_priv; - - spin_lock_bh(&sc->chan_lock); - - switch (ev) { - case ATH_CHANCTX_EVENT_BEACON_PREPARE: - if (avp->offchannel_duration) - avp->offchannel_duration = 0; - - if (avp->chanctx != sc->cur_chan) - break; - - if (sc->sched.offchannel_pending) { + if (sc->sched.offchannel_pending && !sc->sched.wait_switch) { sc->sched.offchannel_pending = false; sc->next_chan = &sc->offchannel.chan; sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_BEACON; + ath_dbg(common, CHAN_CTX, + "Setting offchannel_pending to false\n"); } ctx = ath_chanctx_get_next(sc, sc->cur_chan); if (ctx->active && sc->sched.state == ATH_CHANCTX_STATE_IDLE) { sc->next_chan = ctx; sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_BEACON; + ath_dbg(common, CHAN_CTX, + "Set next context, move chanctx state to WAIT_FOR_BEACON\n"); } /* if the timer missed its window, use the next interval */ - if (sc->sched.state == ATH_CHANCTX_STATE_WAIT_FOR_TIMER) + if (sc->sched.state == ATH_CHANCTX_STATE_WAIT_FOR_TIMER) { sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_BEACON; + ath_dbg(common, CHAN_CTX, + "Move chanctx state from WAIT_FOR_TIMER to WAIT_FOR_BEACON\n"); + } if (sc->sched.state != ATH_CHANCTX_STATE_WAIT_FOR_BEACON) break; + ath_dbg(common, CHAN_CTX, "Preparing beacon for vif: %pM\n", vif->addr); + sc->sched.beacon_pending = true; sc->sched.next_tbtt = REG_READ(ah, AR_NEXT_TBTT_TIMER); @@ -545,47 +372,107 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif, sc->sched.switch_start_time = tsf_time; sc->cur_chan->last_beacon = sc->sched.next_tbtt; - /* Prevent wrap-around issues */ - if (avp->periodic_noa_duration && - tsf_time - avp->periodic_noa_start > BIT(30)) - avp->periodic_noa_duration = 0; - - if (ctx->active && !avp->periodic_noa_duration) { - avp->periodic_noa_start = tsf_time; - avp->periodic_noa_duration = - TU_TO_USEC(cur_conf->beacon_interval) / 2 - - sc->sched.channel_switch_time; - noa_changed = true; - } else if (!ctx->active && avp->periodic_noa_duration) { - avp->periodic_noa_duration = 0; - noa_changed = true; + /* + * If an offchannel switch is scheduled to happen after + * a beacon transmission, update the NoA with one-shot + * values and increment the index. + */ + if (sc->next_chan == &sc->offchannel.chan) { + avp->noa_index++; + avp->offchannel_start = tsf_time; + avp->offchannel_duration = sc->sched.offchannel_duration; + + ath_dbg(common, CHAN_CTX, + "offchannel noa_duration: %d, noa_start: %d, noa_index: %d\n", + avp->offchannel_duration, + avp->offchannel_start, + avp->noa_index); + + /* + * When multiple contexts are active, the NoA + * has to be recalculated and advertised after + * an offchannel operation. + */ + if (ctx->active && avp->noa_duration) + avp->noa_duration = 0; + + break; + } + + /* + * Clear the extend_absence flag if it had been + * set during the previous beacon transmission, + * since we need to revert to the normal NoA + * schedule. + */ + if (ctx->active && sc->sched.extend_absence) { + avp->noa_duration = 0; + sc->sched.extend_absence = false; } /* If at least two consecutive beacons were missed on the STA * chanctx, stay on the STA channel for one extra beacon period, * to resync the timer properly. */ - if (ctx->active && sc->sched.beacon_miss >= 2) - sc->sched.offchannel_duration = 3 * beacon_int / 2; - - if (sc->sched.offchannel_duration) { - noa_changed = true; - avp->offchannel_start = tsf_time; - avp->offchannel_duration = - sc->sched.offchannel_duration; + if (ctx->active && sc->sched.beacon_miss >= 2) { + avp->noa_duration = 0; + sc->sched.extend_absence = true; } - if (noa_changed) + /* Prevent wrap-around issues */ + if (avp->noa_duration && tsf_time - avp->noa_start > BIT(30)) + avp->noa_duration = 0; + + /* + * If multiple contexts are active, start periodic + * NoA and increment the index for the first + * announcement. + */ + if (ctx->active && + (!avp->noa_duration || sc->sched.force_noa_update)) { avp->noa_index++; + avp->noa_start = tsf_time; + + if (sc->sched.extend_absence) + avp->noa_duration = (3 * beacon_int / 2) + + sc->sched.channel_switch_time; + else + avp->noa_duration = + TU_TO_USEC(cur_conf->beacon_interval) / 2 + + sc->sched.channel_switch_time; + + if (test_bit(ATH_OP_SCANNING, &common->op_flags) || + sc->sched.extend_absence) + avp->periodic_noa = false; + else + avp->periodic_noa = true; + + ath_dbg(common, CHAN_CTX, + "noa_duration: %d, noa_start: %d, noa_index: %d, periodic: %d\n", + avp->noa_duration, + avp->noa_start, + avp->noa_index, + avp->periodic_noa); + } + + if (ctx->active && sc->sched.force_noa_update) + sc->sched.force_noa_update = false; + break; case ATH_CHANCTX_EVENT_BEACON_SENT: - if (!sc->sched.beacon_pending) + if (!sc->sched.beacon_pending) { + ath_dbg(common, CHAN_CTX, + "No pending beacon\n"); break; + } sc->sched.beacon_pending = false; if (sc->sched.state != ATH_CHANCTX_STATE_WAIT_FOR_BEACON) break; + ath_dbg(common, CHAN_CTX, + "Move chanctx state to WAIT_FOR_TIMER\n"); + sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_TIMER; ath_chanctx_setup_timer(sc, sc->sched.switch_start_time); break; @@ -597,6 +484,9 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif, sc->sched.beacon_pending) sc->sched.beacon_miss++; + ath_dbg(common, CHAN_CTX, + "Move chanctx state to SWITCH\n"); + sc->sched.state = ATH_CHANCTX_STATE_SWITCH; ieee80211_queue_work(sc->hw, &sc->chanctx_work); break; @@ -625,6 +515,9 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif, avp->chanctx != sc->cur_chan) break; + ath_dbg(common, CHAN_CTX, + "Move chanctx state from FORCE_ACTIVE to IDLE\n"); + sc->sched.state = ATH_CHANCTX_STATE_IDLE; /* fall through */ case ATH_CHANCTX_EVENT_SWITCH: @@ -640,10 +533,15 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif, sc->next_chan = ath_chanctx_get_next(sc, sc->cur_chan); cur_conf = &sc->cur_chan->beacon; + ath_dbg(common, CHAN_CTX, + "Move chanctx state to WAIT_FOR_TIMER (event SWITCH)\n"); + sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_TIMER; + sc->sched.wait_switch = false; tsf_time = TU_TO_USEC(cur_conf->beacon_interval) / 2; - if (sc->sched.beacon_miss >= 2) { + + if (sc->sched.extend_absence) { sc->sched.beacon_miss = 0; tsf_time *= 3; } @@ -679,7 +577,874 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif, sc->next_chan = ctx; ieee80211_queue_work(sc->hw, &sc->chanctx_work); break; + case ATH_CHANCTX_EVENT_ASSIGN: + /* + * When adding a new channel context, check if a scan + * is in progress and abort it since the addition of + * a new channel context is usually followed by VIF + * assignment, in which case we have to start multi-channel + * operation. + */ + if (test_bit(ATH_OP_SCANNING, &common->op_flags)) { + ath_dbg(common, CHAN_CTX, + "Aborting HW scan to add new context\n"); + + spin_unlock_bh(&sc->chan_lock); + del_timer_sync(&sc->offchannel.timer); + ath_scan_complete(sc, true); + spin_lock_bh(&sc->chan_lock); + } + break; + case ATH_CHANCTX_EVENT_CHANGE: + break; + } + + spin_unlock_bh(&sc->chan_lock); +} + +void ath_chanctx_beacon_sent_ev(struct ath_softc *sc, + enum ath_chanctx_event ev) +{ + if (sc->sched.beacon_pending) + ath_chanctx_event(sc, NULL, ev); +} + +void ath_chanctx_beacon_recv_ev(struct ath_softc *sc, + enum ath_chanctx_event ev) +{ + ath_chanctx_event(sc, NULL, ev); +} + +static int ath_scan_channel_duration(struct ath_softc *sc, + struct ieee80211_channel *chan) +{ + struct cfg80211_scan_request *req = sc->offchannel.scan_req; + + if (!req->n_ssids || (chan->flags & IEEE80211_CHAN_NO_IR)) + return (HZ / 9); /* ~110 ms */ + + return (HZ / 16); /* ~60 ms */ +} + +static void ath_chanctx_switch(struct ath_softc *sc, struct ath_chanctx *ctx, + struct cfg80211_chan_def *chandef) +{ + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + + spin_lock_bh(&sc->chan_lock); + + if (test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags) && + (sc->cur_chan != ctx) && (ctx == &sc->offchannel.chan)) { + if (chandef) + ctx->chandef = *chandef; + + sc->sched.offchannel_pending = true; + sc->sched.wait_switch = true; + sc->sched.offchannel_duration = + jiffies_to_usecs(sc->offchannel.duration) + + sc->sched.channel_switch_time; + + spin_unlock_bh(&sc->chan_lock); + ath_dbg(common, CHAN_CTX, + "Set offchannel_pending to true\n"); + return; + } + + sc->next_chan = ctx; + if (chandef) { + ctx->chandef = *chandef; + ath_dbg(common, CHAN_CTX, + "Assigned next_chan to %d MHz\n", chandef->center_freq1); } + if (sc->next_chan == &sc->offchannel.chan) { + sc->sched.offchannel_duration = + jiffies_to_usecs(sc->offchannel.duration) + + sc->sched.channel_switch_time; + + if (chandef) { + ath_dbg(common, CHAN_CTX, + "Offchannel duration for chan %d MHz : %u\n", + chandef->center_freq1, + sc->sched.offchannel_duration); + } + } spin_unlock_bh(&sc->chan_lock); + ieee80211_queue_work(sc->hw, &sc->chanctx_work); +} + +static void ath_chanctx_offchan_switch(struct ath_softc *sc, + struct ieee80211_channel *chan) +{ + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct cfg80211_chan_def chandef; + + cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT); + ath_dbg(common, CHAN_CTX, + "Channel definition created: %d MHz\n", chandef.center_freq1); + + ath_chanctx_switch(sc, &sc->offchannel.chan, &chandef); +} + +static struct ath_chanctx *ath_chanctx_get_oper_chan(struct ath_softc *sc, + bool active) +{ + struct ath_chanctx *ctx; + + ath_for_each_chanctx(sc, ctx) { + if (!ctx->assigned || list_empty(&ctx->vifs)) + continue; + if (active && !ctx->active) + continue; + + if (ctx->switch_after_beacon) + return ctx; + } + + return &sc->chanctx[0]; +} + +static void +ath_scan_next_channel(struct ath_softc *sc) +{ + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct cfg80211_scan_request *req = sc->offchannel.scan_req; + struct ieee80211_channel *chan; + + if (sc->offchannel.scan_idx >= req->n_channels) { + ath_dbg(common, CHAN_CTX, + "Moving offchannel state to ATH_OFFCHANNEL_IDLE, " + "scan_idx: %d, n_channels: %d\n", + sc->offchannel.scan_idx, + req->n_channels); + + sc->offchannel.state = ATH_OFFCHANNEL_IDLE; + ath_chanctx_switch(sc, ath_chanctx_get_oper_chan(sc, false), + NULL); + return; + } + + ath_dbg(common, CHAN_CTX, + "Moving offchannel state to ATH_OFFCHANNEL_PROBE_SEND, scan_idx: %d\n", + sc->offchannel.scan_idx); + + chan = req->channels[sc->offchannel.scan_idx++]; + sc->offchannel.duration = ath_scan_channel_duration(sc, chan); + sc->offchannel.state = ATH_OFFCHANNEL_PROBE_SEND; + + ath_chanctx_offchan_switch(sc, chan); +} + +void ath_offchannel_next(struct ath_softc *sc) +{ + struct ieee80211_vif *vif; + + if (sc->offchannel.scan_req) { + vif = sc->offchannel.scan_vif; + sc->offchannel.chan.txpower = vif->bss_conf.txpower; + ath_scan_next_channel(sc); + } else if (sc->offchannel.roc_vif) { + vif = sc->offchannel.roc_vif; + sc->offchannel.chan.txpower = vif->bss_conf.txpower; + sc->offchannel.duration = + msecs_to_jiffies(sc->offchannel.roc_duration); + sc->offchannel.state = ATH_OFFCHANNEL_ROC_START; + ath_chanctx_offchan_switch(sc, sc->offchannel.roc_chan); + } else { + ath_chanctx_switch(sc, ath_chanctx_get_oper_chan(sc, false), + NULL); + sc->offchannel.state = ATH_OFFCHANNEL_IDLE; + if (sc->ps_idle) + ath_cancel_work(sc); + } } + +void ath_roc_complete(struct ath_softc *sc, bool abort) +{ + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + + if (abort) + ath_dbg(common, CHAN_CTX, "RoC aborted\n"); + else + ath_dbg(common, CHAN_CTX, "RoC expired\n"); + + sc->offchannel.roc_vif = NULL; + sc->offchannel.roc_chan = NULL; + if (!abort) + ieee80211_remain_on_channel_expired(sc->hw); + ath_offchannel_next(sc); + ath9k_ps_restore(sc); +} + +void ath_scan_complete(struct ath_softc *sc, bool abort) +{ + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + + if (abort) + ath_dbg(common, CHAN_CTX, "HW scan aborted\n"); + else + ath_dbg(common, CHAN_CTX, "HW scan complete\n"); + + sc->offchannel.scan_req = NULL; + sc->offchannel.scan_vif = NULL; + sc->offchannel.state = ATH_OFFCHANNEL_IDLE; + ieee80211_scan_completed(sc->hw, abort); + clear_bit(ATH_OP_SCANNING, &common->op_flags); + spin_lock_bh(&sc->chan_lock); + if (test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags)) + sc->sched.force_noa_update = true; + spin_unlock_bh(&sc->chan_lock); + ath_offchannel_next(sc); + ath9k_ps_restore(sc); +} + +static void ath_scan_send_probe(struct ath_softc *sc, + struct cfg80211_ssid *ssid) +{ + struct cfg80211_scan_request *req = sc->offchannel.scan_req; + struct ieee80211_vif *vif = sc->offchannel.scan_vif; + struct ath_tx_control txctl = {}; + struct sk_buff *skb; + struct ieee80211_tx_info *info; + int band = sc->offchannel.chan.chandef.chan->band; + + skb = ieee80211_probereq_get(sc->hw, vif, + ssid->ssid, ssid->ssid_len, req->ie_len); + if (!skb) + return; + + info = IEEE80211_SKB_CB(skb); + if (req->no_cck) + info->flags |= IEEE80211_TX_CTL_NO_CCK_RATE; + + if (req->ie_len) + memcpy(skb_put(skb, req->ie_len), req->ie, req->ie_len); + + skb_set_queue_mapping(skb, IEEE80211_AC_VO); + + if (!ieee80211_tx_prepare_skb(sc->hw, vif, skb, band, NULL)) + goto error; + + txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO]; + txctl.force_channel = true; + if (ath_tx_start(sc->hw, skb, &txctl)) + goto error; + + return; + +error: + ieee80211_free_txskb(sc->hw, skb); +} + +static void ath_scan_channel_start(struct ath_softc *sc) +{ + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct cfg80211_scan_request *req = sc->offchannel.scan_req; + int i; + + if (!(sc->cur_chan->chandef.chan->flags & IEEE80211_CHAN_NO_IR) && + req->n_ssids) { + for (i = 0; i < req->n_ssids; i++) + ath_scan_send_probe(sc, &req->ssids[i]); + + } + + ath_dbg(common, CHAN_CTX, + "Moving offchannel state to ATH_OFFCHANNEL_PROBE_WAIT\n"); + + sc->offchannel.state = ATH_OFFCHANNEL_PROBE_WAIT; + mod_timer(&sc->offchannel.timer, jiffies + sc->offchannel.duration); +} + +static void ath_chanctx_timer(unsigned long data) +{ + struct ath_softc *sc = (struct ath_softc *) data; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + + ath_dbg(common, CHAN_CTX, + "Channel context timer invoked\n"); + + ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_TSF_TIMER); +} + +static void ath_offchannel_timer(unsigned long data) +{ + struct ath_softc *sc = (struct ath_softc *)data; + struct ath_chanctx *ctx; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + + ath_dbg(common, CHAN_CTX, "%s: offchannel state: %s\n", + __func__, offchannel_state_string(sc->offchannel.state)); + + switch (sc->offchannel.state) { + case ATH_OFFCHANNEL_PROBE_WAIT: + if (!sc->offchannel.scan_req) + return; + + /* get first active channel context */ + ctx = ath_chanctx_get_oper_chan(sc, true); + if (ctx->active) { + ath_dbg(common, CHAN_CTX, + "Switch to oper/active context, " + "move offchannel state to ATH_OFFCHANNEL_SUSPEND\n"); + + sc->offchannel.state = ATH_OFFCHANNEL_SUSPEND; + ath_chanctx_switch(sc, ctx, NULL); + mod_timer(&sc->offchannel.timer, jiffies + HZ / 10); + break; + } + /* fall through */ + case ATH_OFFCHANNEL_SUSPEND: + if (!sc->offchannel.scan_req) + return; + + ath_scan_next_channel(sc); + break; + case ATH_OFFCHANNEL_ROC_START: + case ATH_OFFCHANNEL_ROC_WAIT: + ctx = ath_chanctx_get_oper_chan(sc, false); + sc->offchannel.state = ATH_OFFCHANNEL_ROC_DONE; + ath_chanctx_switch(sc, ctx, NULL); + break; + default: + break; + } +} + +static bool +ath_chanctx_send_vif_ps_frame(struct ath_softc *sc, struct ath_vif *avp, + bool powersave) +{ + struct ieee80211_vif *vif = avp->vif; + struct ieee80211_sta *sta = NULL; + struct ieee80211_hdr_3addr *nullfunc; + struct ath_tx_control txctl; + struct sk_buff *skb; + int band = sc->cur_chan->chandef.chan->band; + + switch (vif->type) { + case NL80211_IFTYPE_STATION: + if (!avp->assoc) + return false; + + skb = ieee80211_nullfunc_get(sc->hw, vif); + if (!skb) + return false; + + nullfunc = (struct ieee80211_hdr_3addr *) skb->data; + if (powersave) + nullfunc->frame_control |= + cpu_to_le16(IEEE80211_FCTL_PM); + + skb_set_queue_mapping(skb, IEEE80211_AC_VO); + if (!ieee80211_tx_prepare_skb(sc->hw, vif, skb, band, &sta)) { + dev_kfree_skb_any(skb); + return false; + } + break; + default: + return false; + } + + memset(&txctl, 0, sizeof(txctl)); + txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO]; + txctl.sta = sta; + txctl.force_channel = true; + if (ath_tx_start(sc->hw, skb, &txctl)) { + ieee80211_free_txskb(sc->hw, skb); + return false; + } + + return true; +} + +static bool +ath_chanctx_send_ps_frame(struct ath_softc *sc, bool powersave) +{ + struct ath_vif *avp; + bool sent = false; + + rcu_read_lock(); + list_for_each_entry(avp, &sc->cur_chan->vifs, list) { + if (ath_chanctx_send_vif_ps_frame(sc, avp, powersave)) + sent = true; + } + rcu_read_unlock(); + + return sent; +} + +static bool ath_chanctx_defer_switch(struct ath_softc *sc) +{ + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + + if (sc->cur_chan == &sc->offchannel.chan) + return false; + + switch (sc->sched.state) { + case ATH_CHANCTX_STATE_SWITCH: + return false; + case ATH_CHANCTX_STATE_IDLE: + if (!sc->cur_chan->switch_after_beacon) + return false; + + ath_dbg(common, CHAN_CTX, + "Defer switch, set chanctx state to WAIT_FOR_BEACON\n"); + + sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_BEACON; + break; + default: + break; + } + + return true; +} + +static void ath_offchannel_channel_change(struct ath_softc *sc) +{ + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + + ath_dbg(common, CHAN_CTX, "%s: offchannel state: %s\n", + __func__, offchannel_state_string(sc->offchannel.state)); + + switch (sc->offchannel.state) { + case ATH_OFFCHANNEL_PROBE_SEND: + if (!sc->offchannel.scan_req) + return; + + if (sc->cur_chan->chandef.chan != + sc->offchannel.chan.chandef.chan) + return; + + ath_scan_channel_start(sc); + break; + case ATH_OFFCHANNEL_IDLE: + if (!sc->offchannel.scan_req) + return; + + ath_scan_complete(sc, false); + break; + case ATH_OFFCHANNEL_ROC_START: + if (sc->cur_chan != &sc->offchannel.chan) + break; + + sc->offchannel.state = ATH_OFFCHANNEL_ROC_WAIT; + mod_timer(&sc->offchannel.timer, + jiffies + sc->offchannel.duration); + ieee80211_ready_on_channel(sc->hw); + break; + case ATH_OFFCHANNEL_ROC_DONE: + ath_roc_complete(sc, false); + break; + default: + break; + } +} + +void ath_chanctx_set_next(struct ath_softc *sc, bool force) +{ + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct ath_chanctx *old_ctx; + struct timespec ts; + bool measure_time = false; + bool send_ps = false; + bool queues_stopped = false; + + spin_lock_bh(&sc->chan_lock); + if (!sc->next_chan) { + spin_unlock_bh(&sc->chan_lock); + return; + } + + if (!force && ath_chanctx_defer_switch(sc)) { + spin_unlock_bh(&sc->chan_lock); + return; + } + + ath_dbg(common, CHAN_CTX, + "%s: current: %d MHz, next: %d MHz\n", + __func__, + sc->cur_chan->chandef.center_freq1, + sc->next_chan->chandef.center_freq1); + + if (sc->cur_chan != sc->next_chan) { + ath_dbg(common, CHAN_CTX, + "Stopping current chanctx: %d\n", + sc->cur_chan->chandef.center_freq1); + sc->cur_chan->stopped = true; + spin_unlock_bh(&sc->chan_lock); + + if (sc->next_chan == &sc->offchannel.chan) { + getrawmonotonic(&ts); + measure_time = true; + } + + ath9k_chanctx_stop_queues(sc, sc->cur_chan); + queues_stopped = true; + + __ath9k_flush(sc->hw, ~0, true); + + if (ath_chanctx_send_ps_frame(sc, true)) + __ath9k_flush(sc->hw, BIT(IEEE80211_AC_VO), false); + + send_ps = true; + spin_lock_bh(&sc->chan_lock); + + if (sc->cur_chan != &sc->offchannel.chan) { + getrawmonotonic(&sc->cur_chan->tsf_ts); + sc->cur_chan->tsf_val = ath9k_hw_gettsf64(sc->sc_ah); + } + } + old_ctx = sc->cur_chan; + sc->cur_chan = sc->next_chan; + sc->cur_chan->stopped = false; + sc->next_chan = NULL; + + if (!sc->sched.offchannel_pending) + sc->sched.offchannel_duration = 0; + + if (sc->sched.state != ATH_CHANCTX_STATE_FORCE_ACTIVE) + sc->sched.state = ATH_CHANCTX_STATE_IDLE; + + spin_unlock_bh(&sc->chan_lock); + + if (sc->sc_ah->chip_fullsleep || + memcmp(&sc->cur_chandef, &sc->cur_chan->chandef, + sizeof(sc->cur_chandef))) { + ath_dbg(common, CHAN_CTX, + "%s: Set channel %d MHz\n", + __func__, sc->cur_chan->chandef.center_freq1); + ath_set_channel(sc); + if (measure_time) + sc->sched.channel_switch_time = + ath9k_hw_get_tsf_offset(&ts, NULL); + /* + * A reset will ensure that all queues are woken up, + * so there is no need to awaken them again. + */ + goto out; + } + + if (queues_stopped) + ath9k_chanctx_wake_queues(sc, old_ctx); +out: + if (send_ps) + ath_chanctx_send_ps_frame(sc, false); + + ath_offchannel_channel_change(sc); + ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_SWITCH); +} + +static void ath_chanctx_work(struct work_struct *work) +{ + struct ath_softc *sc = container_of(work, struct ath_softc, + chanctx_work); + mutex_lock(&sc->mutex); + ath_chanctx_set_next(sc, false); + mutex_unlock(&sc->mutex); +} + +void ath9k_offchannel_init(struct ath_softc *sc) +{ + struct ath_chanctx *ctx; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct ieee80211_supported_band *sband; + struct ieee80211_channel *chan; + int i; + + sband = &common->sbands[IEEE80211_BAND_2GHZ]; + if (!sband->n_channels) + sband = &common->sbands[IEEE80211_BAND_5GHZ]; + + chan = &sband->channels[0]; + + ctx = &sc->offchannel.chan; + INIT_LIST_HEAD(&ctx->vifs); + ctx->txpower = ATH_TXPOWER_MAX; + cfg80211_chandef_create(&ctx->chandef, chan, NL80211_CHAN_HT20); + + for (i = 0; i < ARRAY_SIZE(ctx->acq); i++) + INIT_LIST_HEAD(&ctx->acq[i]); + + sc->offchannel.chan.offchannel = true; +} + +void ath9k_init_channel_context(struct ath_softc *sc) +{ + INIT_WORK(&sc->chanctx_work, ath_chanctx_work); + + setup_timer(&sc->offchannel.timer, ath_offchannel_timer, + (unsigned long)sc); + setup_timer(&sc->sched.timer, ath_chanctx_timer, + (unsigned long)sc); +} + +void ath9k_deinit_channel_context(struct ath_softc *sc) +{ + cancel_work_sync(&sc->chanctx_work); +} + +bool ath9k_is_chanctx_enabled(void) +{ + return (ath9k_use_chanctx == 1); +} + +/********************/ +/* Queue management */ +/********************/ + +void ath9k_chanctx_stop_queues(struct ath_softc *sc, struct ath_chanctx *ctx) +{ + struct ath_hw *ah = sc->sc_ah; + int i; + + if (ctx == &sc->offchannel.chan) { + ieee80211_stop_queue(sc->hw, + sc->hw->offchannel_tx_hw_queue); + } else { + for (i = 0; i < IEEE80211_NUM_ACS; i++) + ieee80211_stop_queue(sc->hw, + ctx->hw_queue_base + i); + } + + if (ah->opmode == NL80211_IFTYPE_AP) + ieee80211_stop_queue(sc->hw, sc->hw->queues - 2); +} + + +void ath9k_chanctx_wake_queues(struct ath_softc *sc, struct ath_chanctx *ctx) +{ + struct ath_hw *ah = sc->sc_ah; + int i; + + if (ctx == &sc->offchannel.chan) { + ieee80211_wake_queue(sc->hw, + sc->hw->offchannel_tx_hw_queue); + } else { + for (i = 0; i < IEEE80211_NUM_ACS; i++) + ieee80211_wake_queue(sc->hw, + ctx->hw_queue_base + i); + } + + if (ah->opmode == NL80211_IFTYPE_AP) + ieee80211_wake_queue(sc->hw, sc->hw->queues - 2); +} + +/*****************/ +/* P2P Powersave */ +/*****************/ + +static void ath9k_update_p2p_ps_timer(struct ath_softc *sc, struct ath_vif *avp) +{ + struct ath_hw *ah = sc->sc_ah; + s32 tsf, target_tsf; + + if (!avp || !avp->noa.has_next_tsf) + return; + + ath9k_hw_gen_timer_stop(ah, sc->p2p_ps_timer); + + tsf = ath9k_hw_gettsf32(sc->sc_ah); + + target_tsf = avp->noa.next_tsf; + if (!avp->noa.absent) + target_tsf -= ATH_P2P_PS_STOP_TIME; + + if (target_tsf - tsf < ATH_P2P_PS_STOP_TIME) + target_tsf = tsf + ATH_P2P_PS_STOP_TIME; + + ath9k_hw_gen_timer_start(ah, sc->p2p_ps_timer, (u32) target_tsf, 1000000); +} + +static void ath9k_update_p2p_ps(struct ath_softc *sc, struct ieee80211_vif *vif) +{ + struct ath_vif *avp = (void *)vif->drv_priv; + u32 tsf; + + if (!sc->p2p_ps_timer) + return; + + if (vif->type != NL80211_IFTYPE_STATION || !vif->p2p) + return; + + sc->p2p_ps_vif = avp; + tsf = ath9k_hw_gettsf32(sc->sc_ah); + ieee80211_parse_p2p_noa(&vif->bss_conf.p2p_noa_attr, &avp->noa, tsf); + ath9k_update_p2p_ps_timer(sc, avp); +} + +static u8 ath9k_get_ctwin(struct ath_softc *sc, struct ath_vif *avp) +{ + struct ath_beacon_config *cur_conf = &sc->cur_chan->beacon; + u8 switch_time, ctwin; + + /* + * Channel switch in multi-channel mode is deferred + * by a quarter beacon interval when handling + * ATH_CHANCTX_EVENT_BEACON_PREPARE, so the P2P-GO + * interface is guaranteed to be discoverable + * for that duration after a TBTT. + */ + switch_time = cur_conf->beacon_interval / 4; + + ctwin = avp->vif->bss_conf.p2p_noa_attr.oppps_ctwindow; + if (ctwin && (ctwin < switch_time)) + return ctwin; + + if (switch_time < P2P_DEFAULT_CTWIN) + return 0; + + return P2P_DEFAULT_CTWIN; +} + +void ath9k_beacon_add_noa(struct ath_softc *sc, struct ath_vif *avp, + struct sk_buff *skb) +{ + static const u8 noa_ie_hdr[] = { + WLAN_EID_VENDOR_SPECIFIC, /* type */ + 0, /* length */ + 0x50, 0x6f, 0x9a, /* WFA OUI */ + 0x09, /* P2P subtype */ + 0x0c, /* Notice of Absence */ + 0x00, /* LSB of little-endian len */ + 0x00, /* MSB of little-endian len */ + }; + + struct ieee80211_p2p_noa_attr *noa; + int noa_len, noa_desc, i = 0; + u8 *hdr; + + if (!avp->offchannel_duration && !avp->noa_duration) + return; + + noa_desc = !!avp->offchannel_duration + !!avp->noa_duration; + noa_len = 2 + sizeof(struct ieee80211_p2p_noa_desc) * noa_desc; + + hdr = skb_put(skb, sizeof(noa_ie_hdr)); + memcpy(hdr, noa_ie_hdr, sizeof(noa_ie_hdr)); + hdr[1] = sizeof(noa_ie_hdr) + noa_len - 2; + hdr[7] = noa_len; + + noa = (void *) skb_put(skb, noa_len); + memset(noa, 0, noa_len); + + noa->index = avp->noa_index; + noa->oppps_ctwindow = ath9k_get_ctwin(sc, avp); + + if (avp->noa_duration) { + if (avp->periodic_noa) { + u32 interval = TU_TO_USEC(sc->cur_chan->beacon.beacon_interval); + noa->desc[i].count = 255; + noa->desc[i].interval = cpu_to_le32(interval); + } else { + noa->desc[i].count = 1; + } + + noa->desc[i].start_time = cpu_to_le32(avp->noa_start); + noa->desc[i].duration = cpu_to_le32(avp->noa_duration); + i++; + } + + if (avp->offchannel_duration) { + noa->desc[i].count = 1; + noa->desc[i].start_time = cpu_to_le32(avp->offchannel_start); + noa->desc[i].duration = cpu_to_le32(avp->offchannel_duration); + } +} + +void ath9k_p2p_ps_timer(void *priv) +{ + struct ath_softc *sc = priv; + struct ath_vif *avp = sc->p2p_ps_vif; + struct ieee80211_vif *vif; + struct ieee80211_sta *sta; + struct ath_node *an; + u32 tsf; + + del_timer_sync(&sc->sched.timer); + ath9k_hw_gen_timer_stop(sc->sc_ah, sc->p2p_ps_timer); + ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_TSF_TIMER); + + if (!avp || avp->chanctx != sc->cur_chan) + return; + + tsf = ath9k_hw_gettsf32(sc->sc_ah); + if (!avp->noa.absent) + tsf += ATH_P2P_PS_STOP_TIME; + + if (!avp->noa.has_next_tsf || + avp->noa.next_tsf - tsf > BIT(31)) + ieee80211_update_p2p_noa(&avp->noa, tsf); + + ath9k_update_p2p_ps_timer(sc, avp); + + rcu_read_lock(); + + vif = avp->vif; + sta = ieee80211_find_sta(vif, avp->bssid); + if (!sta) + goto out; + + an = (void *) sta->drv_priv; + if (an->sleeping == !!avp->noa.absent) + goto out; + + an->sleeping = avp->noa.absent; + if (an->sleeping) + ath_tx_aggr_sleep(sta, sc, an); + else + ath_tx_aggr_wakeup(sc, an); + +out: + rcu_read_unlock(); +} + +void ath9k_p2p_bss_info_changed(struct ath_softc *sc, + struct ieee80211_vif *vif) +{ + unsigned long flags; + + spin_lock_bh(&sc->sc_pcu_lock); + spin_lock_irqsave(&sc->sc_pm_lock, flags); + if (!(sc->ps_flags & PS_BEACON_SYNC)) + ath9k_update_p2p_ps(sc, vif); + spin_unlock_irqrestore(&sc->sc_pm_lock, flags); + spin_unlock_bh(&sc->sc_pcu_lock); +} + +void ath9k_p2p_beacon_sync(struct ath_softc *sc) +{ + if (sc->p2p_ps_vif) + ath9k_update_p2p_ps(sc, sc->p2p_ps_vif->vif); +} + +void ath9k_p2p_remove_vif(struct ath_softc *sc, + struct ieee80211_vif *vif) +{ + struct ath_vif *avp = (void *)vif->drv_priv; + + spin_lock_bh(&sc->sc_pcu_lock); + if (avp == sc->p2p_ps_vif) { + sc->p2p_ps_vif = NULL; + ath9k_update_p2p_ps_timer(sc, NULL); + } + spin_unlock_bh(&sc->sc_pcu_lock); +} + +int ath9k_init_p2p(struct ath_softc *sc) +{ + sc->p2p_ps_timer = ath_gen_timer_alloc(sc->sc_ah, ath9k_p2p_ps_timer, + NULL, sc, AR_FIRST_NDP_TIMER); + if (!sc->p2p_ps_timer) + return -ENOMEM; + + return 0; +} + +void ath9k_deinit_p2p(struct ath_softc *sc) +{ + if (sc->p2p_ps_timer) + ath_gen_timer_free(sc->sc_ah, sc->p2p_ps_timer); +} + +#endif /* CONFIG_ATH9K_CHANNEL_CONTEXT */ diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index d2279365be6fcc6a85b2252a9c68a2502f94c211..46f20a309b5f53c10e6d487c999d560de4f5f6e3 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -838,7 +838,7 @@ static ssize_t read_file_misc(struct file *file, char __user *user_buf, iter_data.nmeshes, iter_data.nwds); len += scnprintf(buf + len, sizeof(buf) - len, " ADHOC: %i TOTAL: %hi BEACON-VIF: %hi\n", - iter_data.nadhocs, sc->nvifs, sc->nbcnvifs); + iter_data.nadhocs, sc->cur_chan->nvifs, sc->nbcnvifs); } if (len > sizeof(buf)) @@ -1169,6 +1169,29 @@ static const struct file_operations fops_btcoex = { }; #endif +#ifdef CONFIG_ATH9K_DYNACK +static ssize_t read_file_ackto(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + struct ath_hw *ah = sc->sc_ah; + char buf[32]; + unsigned int len; + + len = sprintf(buf, "%u %c\n", ah->dynack.ackto, + (ah->dynack.enabled) ? 'A' : 'S'); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_ackto = { + .read = read_file_ackto, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; +#endif + /* Ethtool support for get-stats */ #define AMKSTR(nm) #nm "_BE", #nm "_BK", #nm "_VI", #nm "_VO" @@ -1374,5 +1397,10 @@ int ath9k_init_debug(struct ath_hw *ah) &fops_btcoex); #endif +#ifdef CONFIG_ATH9K_DYNACK + debugfs_create_file("ack_to", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, + sc, &fops_ackto); +#endif + return 0; } diff --git a/drivers/net/wireless/ath/ath9k/dynack.c b/drivers/net/wireless/ath/ath9k/dynack.c new file mode 100644 index 0000000000000000000000000000000000000000..22b3cc4c27cda58f83a2a6deb69957fdcfa502ed --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/dynack.c @@ -0,0 +1,351 @@ +/* + * Copyright (c) 2014, Lorenzo Bianconi + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "ath9k.h" +#include "hw.h" +#include "dynack.h" + +#define COMPUTE_TO (5 * HZ) +#define LATEACK_DELAY (10 * HZ) +#define LATEACK_TO 256 +#define MAX_DELAY 300 +#define EWMA_LEVEL 96 +#define EWMA_DIV 128 + +/** + * ath_dynack_ewma - EWMA (Exponentially Weighted Moving Average) calculation + * + */ +static inline u32 ath_dynack_ewma(u32 old, u32 new) +{ + return (new * (EWMA_DIV - EWMA_LEVEL) + old * EWMA_LEVEL) / EWMA_DIV; +} + +/** + * ath_dynack_get_sifs - get sifs time based on phy used + * @ah: ath hw + * @phy: phy used + * + */ +static inline u32 ath_dynack_get_sifs(struct ath_hw *ah, int phy) +{ + u32 sifs = CCK_SIFS_TIME; + + if (phy == WLAN_RC_PHY_OFDM) { + if (IS_CHAN_QUARTER_RATE(ah->curchan)) + sifs = OFDM_SIFS_TIME_QUARTER; + else if (IS_CHAN_HALF_RATE(ah->curchan)) + sifs = OFDM_SIFS_TIME_HALF; + else + sifs = OFDM_SIFS_TIME; + } + return sifs; +} + +/** + * ath_dynack_bssidmask - filter out ACK frames based on BSSID mask + * @ah: ath hw + * @mac: receiver address + */ +static inline bool ath_dynack_bssidmask(struct ath_hw *ah, const u8 *mac) +{ + int i; + struct ath_common *common = ath9k_hw_common(ah); + + for (i = 0; i < ETH_ALEN; i++) { + if ((common->macaddr[i] & common->bssidmask[i]) != + (mac[i] & common->bssidmask[i])) + return false; + } + + return true; +} + +/** + * ath_dynack_compute_ackto - compute ACK timeout as the maximum STA timeout + * @ah: ath hw + * + * should be called while holding qlock + */ +static void ath_dynack_compute_ackto(struct ath_hw *ah) +{ + struct ath_node *an; + u32 to = 0; + struct ath_dynack *da = &ah->dynack; + struct ath_common *common = ath9k_hw_common(ah); + + list_for_each_entry(an, &da->nodes, list) + if (an->ackto > to) + to = an->ackto; + + if (to && da->ackto != to) { + u32 slottime; + + slottime = (to - 3) / 2; + da->ackto = to; + ath_dbg(common, DYNACK, "ACK timeout %u slottime %u\n", + da->ackto, slottime); + ath9k_hw_setslottime(ah, slottime); + ath9k_hw_set_ack_timeout(ah, da->ackto); + ath9k_hw_set_cts_timeout(ah, da->ackto); + } +} + +/** + * ath_dynack_compute_to - compute STA ACK timeout + * @ah: ath hw + * + * should be called while holding qlock + */ +static void ath_dynack_compute_to(struct ath_hw *ah) +{ + u32 ackto, ack_ts; + u8 *dst, *src; + struct ieee80211_sta *sta; + struct ath_node *an; + struct ts_info *st_ts; + struct ath_dynack *da = &ah->dynack; + + rcu_read_lock(); + + while (da->st_rbf.h_rb != da->st_rbf.t_rb && + da->ack_rbf.h_rb != da->ack_rbf.t_rb) { + ack_ts = da->ack_rbf.tstamp[da->ack_rbf.h_rb]; + st_ts = &da->st_rbf.ts[da->st_rbf.h_rb]; + dst = da->st_rbf.addr[da->st_rbf.h_rb].h_dest; + src = da->st_rbf.addr[da->st_rbf.h_rb].h_src; + + ath_dbg(ath9k_hw_common(ah), DYNACK, + "ack_ts %u st_ts %u st_dur %u [%u-%u]\n", + ack_ts, st_ts->tstamp, st_ts->dur, + da->ack_rbf.h_rb, da->st_rbf.h_rb); + + if (ack_ts > st_ts->tstamp + st_ts->dur) { + ackto = ack_ts - st_ts->tstamp - st_ts->dur; + + if (ackto < MAX_DELAY) { + sta = ieee80211_find_sta_by_ifaddr(ah->hw, dst, + src); + if (sta) { + an = (struct ath_node *)sta->drv_priv; + an->ackto = ath_dynack_ewma(an->ackto, + ackto); + ath_dbg(ath9k_hw_common(ah), DYNACK, + "%pM to %u\n", dst, an->ackto); + if (time_is_before_jiffies(da->lto)) { + ath_dynack_compute_ackto(ah); + da->lto = jiffies + COMPUTE_TO; + } + } + INCR(da->ack_rbf.h_rb, ATH_DYN_BUF); + } + INCR(da->st_rbf.h_rb, ATH_DYN_BUF); + } else { + INCR(da->ack_rbf.h_rb, ATH_DYN_BUF); + } + } + + rcu_read_unlock(); +} + +/** + * ath_dynack_sample_tx_ts - status timestamp sampling method + * @ah: ath hw + * @skb: socket buffer + * @ts: tx status info + * + */ +void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb, + struct ath_tx_status *ts) +{ + u8 ridx; + struct ieee80211_hdr *hdr; + struct ath_dynack *da = &ah->dynack; + struct ath_common *common = ath9k_hw_common(ah); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + if ((info->flags & IEEE80211_TX_CTL_NO_ACK) || !da->enabled) + return; + + spin_lock_bh(&da->qlock); + + hdr = (struct ieee80211_hdr *)skb->data; + + /* late ACK */ + if (ts->ts_status & ATH9K_TXERR_XRETRY) { + if (ieee80211_is_assoc_req(hdr->frame_control) || + ieee80211_is_assoc_resp(hdr->frame_control)) { + ath_dbg(common, DYNACK, "late ack\n"); + ath9k_hw_setslottime(ah, (LATEACK_TO - 3) / 2); + ath9k_hw_set_ack_timeout(ah, LATEACK_TO); + ath9k_hw_set_cts_timeout(ah, LATEACK_TO); + da->lto = jiffies + LATEACK_DELAY; + } + + spin_unlock_bh(&da->qlock); + return; + } + + ridx = ts->ts_rateindex; + + da->st_rbf.ts[da->st_rbf.t_rb].tstamp = ts->ts_tstamp; + da->st_rbf.ts[da->st_rbf.t_rb].dur = ts->duration; + ether_addr_copy(da->st_rbf.addr[da->st_rbf.t_rb].h_dest, hdr->addr1); + ether_addr_copy(da->st_rbf.addr[da->st_rbf.t_rb].h_src, hdr->addr2); + + if (!(info->status.rates[ridx].flags & IEEE80211_TX_RC_MCS)) { + u32 phy, sifs; + const struct ieee80211_rate *rate; + struct ieee80211_tx_rate *rates = info->status.rates; + + rate = &common->sbands[info->band].bitrates[rates[ridx].idx]; + if (info->band == IEEE80211_BAND_2GHZ && + !(rate->flags & IEEE80211_RATE_ERP_G)) + phy = WLAN_RC_PHY_CCK; + else + phy = WLAN_RC_PHY_OFDM; + + sifs = ath_dynack_get_sifs(ah, phy); + da->st_rbf.ts[da->st_rbf.t_rb].dur -= sifs; + } + + ath_dbg(common, DYNACK, "{%pM} tx sample %u [dur %u][h %u-t %u]\n", + hdr->addr1, da->st_rbf.ts[da->st_rbf.t_rb].tstamp, + da->st_rbf.ts[da->st_rbf.t_rb].dur, da->st_rbf.h_rb, + (da->st_rbf.t_rb + 1) % ATH_DYN_BUF); + + INCR(da->st_rbf.t_rb, ATH_DYN_BUF); + if (da->st_rbf.t_rb == da->st_rbf.h_rb) + INCR(da->st_rbf.h_rb, ATH_DYN_BUF); + + ath_dynack_compute_to(ah); + + spin_unlock_bh(&da->qlock); +} +EXPORT_SYMBOL(ath_dynack_sample_tx_ts); + +/** + * ath_dynack_sample_ack_ts - ACK timestamp sampling method + * @ah: ath hw + * @skb: socket buffer + * @ts: rx timestamp + * + */ +void ath_dynack_sample_ack_ts(struct ath_hw *ah, struct sk_buff *skb, + u32 ts) +{ + struct ath_dynack *da = &ah->dynack; + struct ath_common *common = ath9k_hw_common(ah); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + + if (!ath_dynack_bssidmask(ah, hdr->addr1) || !da->enabled) + return; + + spin_lock_bh(&da->qlock); + da->ack_rbf.tstamp[da->ack_rbf.t_rb] = ts; + + ath_dbg(common, DYNACK, "rx sample %u [h %u-t %u]\n", + da->ack_rbf.tstamp[da->ack_rbf.t_rb], + da->ack_rbf.h_rb, (da->ack_rbf.t_rb + 1) % ATH_DYN_BUF); + + INCR(da->ack_rbf.t_rb, ATH_DYN_BUF); + if (da->ack_rbf.t_rb == da->ack_rbf.h_rb) + INCR(da->ack_rbf.h_rb, ATH_DYN_BUF); + + ath_dynack_compute_to(ah); + + spin_unlock_bh(&da->qlock); +} +EXPORT_SYMBOL(ath_dynack_sample_ack_ts); + +/** + * ath_dynack_node_init - init ath_node related info + * @ah: ath hw + * @an: ath node + * + */ +void ath_dynack_node_init(struct ath_hw *ah, struct ath_node *an) +{ + /* ackto = slottime + sifs + air delay */ + u32 ackto = ATH9K_SLOT_TIME_9 + 16 + 64; + struct ath_dynack *da = &ah->dynack; + + an->ackto = ackto; + + spin_lock(&da->qlock); + list_add_tail(&an->list, &da->nodes); + spin_unlock(&da->qlock); +} +EXPORT_SYMBOL(ath_dynack_node_init); + +/** + * ath_dynack_node_deinit - deinit ath_node related info + * @ah: ath hw + * @an: ath node + * + */ +void ath_dynack_node_deinit(struct ath_hw *ah, struct ath_node *an) +{ + struct ath_dynack *da = &ah->dynack; + + spin_lock(&da->qlock); + list_del(&an->list); + spin_unlock(&da->qlock); +} +EXPORT_SYMBOL(ath_dynack_node_deinit); + +/** + * ath_dynack_reset - reset dynack processing + * @ah: ath hw + * + */ +void ath_dynack_reset(struct ath_hw *ah) +{ + /* ackto = slottime + sifs + air delay */ + u32 ackto = ATH9K_SLOT_TIME_9 + 16 + 64; + struct ath_dynack *da = &ah->dynack; + + da->lto = jiffies; + da->ackto = ackto; + + da->st_rbf.t_rb = 0; + da->st_rbf.h_rb = 0; + da->ack_rbf.t_rb = 0; + da->ack_rbf.h_rb = 0; + + /* init acktimeout */ + ath9k_hw_setslottime(ah, (ackto - 3) / 2); + ath9k_hw_set_ack_timeout(ah, ackto); + ath9k_hw_set_cts_timeout(ah, ackto); +} +EXPORT_SYMBOL(ath_dynack_reset); + +/** + * ath_dynack_init - init dynack data structure + * @ah: ath hw + * + */ +void ath_dynack_init(struct ath_hw *ah) +{ + struct ath_dynack *da = &ah->dynack; + + memset(da, 0, sizeof(struct ath_dynack)); + + spin_lock_init(&da->qlock); + INIT_LIST_HEAD(&da->nodes); + + ah->hw->wiphy->features |= NL80211_FEATURE_ACKTO_ESTIMATION; +} diff --git a/drivers/net/wireless/ath/ath9k/dynack.h b/drivers/net/wireless/ath/ath9k/dynack.h new file mode 100644 index 0000000000000000000000000000000000000000..6d7bef976742c1a87b498c35927e27476c314853 --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/dynack.h @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2014, Lorenzo Bianconi + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef DYNACK_H +#define DYNACK_H + +#define ATH_DYN_BUF 64 + +struct ath_hw; +struct ath_node; + +/** + * struct ath_dyn_rxbuf - ACK frame ring buffer + * @h_rb: ring buffer head + * @t_rb: ring buffer tail + * @tstamp: ACK RX timestamp buffer + */ +struct ath_dyn_rxbuf { + u16 h_rb, t_rb; + u32 tstamp[ATH_DYN_BUF]; +}; + +struct ts_info { + u32 tstamp; + u32 dur; +}; + +struct haddr_pair { + u8 h_dest[ETH_ALEN]; + u8 h_src[ETH_ALEN]; +}; + +/** + * struct ath_dyn_txbuf - tx frame ring buffer + * @h_rb: ring buffer head + * @t_rb: ring buffer tail + * @addr: dest/src address pair for a given TX frame + * @ts: TX frame timestamp buffer + */ +struct ath_dyn_txbuf { + u16 h_rb, t_rb; + struct haddr_pair addr[ATH_DYN_BUF]; + struct ts_info ts[ATH_DYN_BUF]; +}; + +/** + * struct ath_dynack - dynack processing info + * @enabled: enable dyn ack processing + * @ackto: current ACK timeout + * @lto: last ACK timeout computation + * @nodes: ath_node linked list + * @qlock: ts queue spinlock + * @ack_rbf: ACK ts ring buffer + * @st_rbf: status ts ring buffer + */ +struct ath_dynack { + bool enabled; + int ackto; + unsigned long lto; + + struct list_head nodes; + + /* protect timestamp queue access */ + spinlock_t qlock; + struct ath_dyn_rxbuf ack_rbf; + struct ath_dyn_txbuf st_rbf; +}; + +#if defined(CONFIG_ATH9K_DYNACK) +void ath_dynack_reset(struct ath_hw *ah); +void ath_dynack_node_init(struct ath_hw *ah, struct ath_node *an); +void ath_dynack_node_deinit(struct ath_hw *ah, struct ath_node *an); +void ath_dynack_init(struct ath_hw *ah); +void ath_dynack_sample_ack_ts(struct ath_hw *ah, struct sk_buff *skb, u32 ts); +void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb, + struct ath_tx_status *ts); +#else +static inline void ath_dynack_init(struct ath_hw *ah) {} +static inline void ath_dynack_node_init(struct ath_hw *ah, + struct ath_node *an) {} +static inline void ath_dynack_node_deinit(struct ath_hw *ah, + struct ath_node *an) {} +static inline void ath_dynack_sample_ack_ts(struct ath_hw *ah, + struct sk_buff *skb, u32 ts) {} +static inline void ath_dynack_sample_tx_ts(struct ath_hw *ah, + struct sk_buff *skb, + struct ath_tx_status *ts) {} +#endif + +#endif /* DYNACK_H */ diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index 8a3bd5fe3a548f9ac438691ea57a1f583f0291af..d779f4fa50e3a8392c697aaf0612bdea7ad99ad1 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -592,6 +592,8 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv, hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN | WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; + hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; + hw->queues = 4; hw->max_listen_interval = 1; diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index 5627917c5ff761137061467e1b9d71f281ce0652..994fff1ff5198db245b3a3e2250105bfb6e6a8b9 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -1722,7 +1722,7 @@ static int ath9k_htc_set_rts_threshold(struct ieee80211_hw *hw, u32 value) } static void ath9k_htc_set_coverage_class(struct ieee80211_hw *hw, - u8 coverage_class) + s16 coverage_class) { struct ath9k_htc_priv *priv = hw->priv; diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h index a47ea8423f1eb2a6a8d43f4d451475f55bac6674..8e85efeaeffcf67d08dcc9e94d793f5bb0559a9f 100644 --- a/drivers/net/wireless/ath/ath9k/hw-ops.h +++ b/drivers/net/wireless/ath/ath9k/hw-ops.h @@ -67,6 +67,12 @@ static inline int ath9k_hw_txprocdesc(struct ath_hw *ah, void *ds, return ath9k_hw_ops(ah)->proc_txdesc(ah, ds, ts); } +static inline int ath9k_hw_get_duration(struct ath_hw *ah, const void *ds, + int index) +{ + return ath9k_hw_ops(ah)->get_duration(ah, ds, index); +} + static inline void ath9k_hw_antdiv_comb_conf_get(struct ath_hw *ah, struct ath_hw_antcomb_conf *antconf) { diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 69bbea1184d211b5f47b1d99bce99226b6ba9f65..8be4b145339426b7ea8a47a4788c9dd6be04b238 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -222,31 +222,28 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah) { u32 val; + if (ah->get_mac_revision) + ah->hw_version.macRev = ah->get_mac_revision(); + switch (ah->hw_version.devid) { case AR5416_AR9100_DEVID: ah->hw_version.macVersion = AR_SREV_VERSION_9100; break; case AR9300_DEVID_AR9330: ah->hw_version.macVersion = AR_SREV_VERSION_9330; - if (ah->get_mac_revision) { - ah->hw_version.macRev = ah->get_mac_revision(); - } else { + if (!ah->get_mac_revision) { val = REG_READ(ah, AR_SREV); ah->hw_version.macRev = MS(val, AR_SREV_REVISION2); } return; case AR9300_DEVID_AR9340: ah->hw_version.macVersion = AR_SREV_VERSION_9340; - val = REG_READ(ah, AR_SREV); - ah->hw_version.macRev = MS(val, AR_SREV_REVISION2); return; case AR9300_DEVID_QCA955X: ah->hw_version.macVersion = AR_SREV_VERSION_9550; return; case AR9300_DEVID_AR953X: ah->hw_version.macVersion = AR_SREV_VERSION_9531; - if (ah->get_mac_revision) - ah->hw_version.macRev = ah->get_mac_revision(); return; } @@ -647,6 +644,8 @@ int ath9k_hw_init(struct ath_hw *ah) return ret; } + ath_dynack_init(ah); + return 0; } EXPORT_SYMBOL(ath9k_hw_init); @@ -702,6 +701,8 @@ static void ath9k_hw_init_pll(struct ath_hw *ah, { u32 pll; + pll = ath9k_hw_compute_pll_control(ah, chan); + if (AR_SREV_9485(ah) || AR_SREV_9565(ah)) { /* program BB PLL ki and kd value, ki=0x4, kd=0x40 */ REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, @@ -752,7 +753,8 @@ static void ath9k_hw_init_pll(struct ath_hw *ah, REG_RMW_FIELD(ah, AR_CH0_DDR_DPLL3, AR_CH0_DPLL3_PHASE_SHIFT, 0x1); - REG_WRITE(ah, AR_RTC_PLL_CONTROL, 0x1142c); + REG_WRITE(ah, AR_RTC_PLL_CONTROL, + pll | AR_RTC_9300_PLL_BYPASS); udelay(1000); /* program refdiv, nint, frac to RTC register */ @@ -768,7 +770,8 @@ static void ath9k_hw_init_pll(struct ath_hw *ah, } else if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah)) { u32 regval, pll2_divint, pll2_divfrac, refdiv; - REG_WRITE(ah, AR_RTC_PLL_CONTROL, 0x1142c); + REG_WRITE(ah, AR_RTC_PLL_CONTROL, + pll | AR_RTC_9300_SOC_PLL_BYPASS); udelay(1000); REG_SET_BIT(ah, AR_PHY_PLL_MODE, 0x1 << 16); @@ -841,7 +844,6 @@ static void ath9k_hw_init_pll(struct ath_hw *ah, udelay(1000); } - pll = ath9k_hw_compute_pll_control(ah, chan); if (AR_SREV_9565(ah)) pll |= 0x40000; REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll); @@ -935,21 +937,21 @@ static void ath9k_hw_set_sifs_time(struct ath_hw *ah, u32 us) REG_WRITE(ah, AR_D_GBL_IFS_SIFS, val); } -static void ath9k_hw_setslottime(struct ath_hw *ah, u32 us) +void ath9k_hw_setslottime(struct ath_hw *ah, u32 us) { u32 val = ath9k_hw_mac_to_clks(ah, us); val = min(val, (u32) 0xFFFF); REG_WRITE(ah, AR_D_GBL_IFS_SLOT, val); } -static void ath9k_hw_set_ack_timeout(struct ath_hw *ah, u32 us) +void ath9k_hw_set_ack_timeout(struct ath_hw *ah, u32 us) { u32 val = ath9k_hw_mac_to_clks(ah, us); val = min(val, (u32) MS(0xFFFFFFFF, AR_TIME_OUT_ACK)); REG_RMW_FIELD(ah, AR_TIME_OUT, AR_TIME_OUT_ACK, val); } -static void ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us) +void ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us) { u32 val = ath9k_hw_mac_to_clks(ah, us); val = min(val, (u32) MS(0xFFFFFFFF, AR_TIME_OUT_CTS)); @@ -1053,6 +1055,14 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah) ctstimeout += 48 - sifstime - ah->slottime; } + if (ah->dynack.enabled) { + acktimeout = ah->dynack.ackto; + ctstimeout = acktimeout; + slottime = (acktimeout - 3) / 2; + } else { + ah->dynack.ackto = acktimeout; + } + ath9k_hw_set_sifs_time(ah, sifstime); ath9k_hw_setslottime(ah, slottime); ath9k_hw_set_ack_timeout(ah, acktimeout); @@ -1182,9 +1192,12 @@ static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode) switch (opmode) { case NL80211_IFTYPE_ADHOC: - set |= AR_STA_ID1_ADHOC; - REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION); - break; + if (!AR_SREV_9340_13(ah)) { + set |= AR_STA_ID1_ADHOC; + REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION); + break; + } + /* fall through */ case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_AP: set |= AR_STA_ID1_STA_AP; @@ -1954,6 +1967,12 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, if (AR_SREV_9565(ah) && common->bt_ant_diversity) REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV, AR_BTCOEX_WL_LNADIV_FORCE_ON); + if (ah->hw->conf.radar_enabled) { + /* set HW specific DFS configuration */ + ah->radar_conf.ext_channel = IS_CHAN_HT40(chan); + ath9k_hw_set_radar_params(ah); + } + return 0; } EXPORT_SYMBOL(ath9k_hw_reset); diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index 51b4ebe04c04faaf612c744b2bbad93001c46622..975074fc11bcdda44e65e4eaabe05b75b51c90c9 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -29,6 +29,7 @@ #include "reg.h" #include "phy.h" #include "btcoex.h" +#include "dynack.h" #include "../regd.h" @@ -690,6 +691,7 @@ struct ath_hw_ops { struct ath_tx_info *i); int (*proc_txdesc)(struct ath_hw *ah, void *ds, struct ath_tx_status *ts); + int (*get_duration)(struct ath_hw *ah, const void *ds, int index); void (*antdiv_comb_conf_get)(struct ath_hw *ah, struct ath_hw_antcomb_conf *antconf); void (*antdiv_comb_conf_set)(struct ath_hw *ah, @@ -924,6 +926,8 @@ struct ath_hw { int (*external_reset)(void); const struct firmware *eeprom_blob; + + struct ath_dynack dynack; }; struct ath_bus_ops { @@ -1080,6 +1084,10 @@ void ar9002_hw_load_ani_reg(struct ath_hw *ah, struct ath9k_channel *chan); void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning); void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan); +void ath9k_hw_set_ack_timeout(struct ath_hw *ah, u32 us); +void ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us); +void ath9k_hw_setslottime(struct ath_hw *ah, u32 us); + #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT static inline bool ath9k_hw_btcoex_is_enabled(struct ath_hw *ah) { diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 39419ea845cc0640ea30927e600e5c2f769cf075..156a944134dcfb09ee139ccc2ffde1af306bb936 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -61,10 +61,14 @@ static int ath9k_ps_enable; module_param_named(ps_enable, ath9k_ps_enable, int, 0444); MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave"); +#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT + int ath9k_use_chanctx; module_param_named(use_chanctx, ath9k_use_chanctx, int, 0444); MODULE_PARM_DESC(use_chanctx, "Enable channel context for concurrency"); +#endif /* CONFIG_ATH9K_CHANNEL_CONTEXT */ + bool is_ath9k_unloaded; #ifdef CONFIG_MAC80211_LEDS @@ -511,7 +515,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, sc->tx99_power = MAX_RATE_POWER + 1; init_waitqueue_head(&sc->tx_wait); sc->cur_chan = &sc->chanctx[0]; - if (!ath9k_use_chanctx) + if (!ath9k_is_chanctx_enabled()) sc->cur_chan->hw_queue_base = 0; if (!pdata || pdata->use_eeprom) { @@ -567,11 +571,9 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, setup_timer(&sc->sleep_timer, ath_ps_full_sleep, (unsigned long)sc); INIT_WORK(&sc->hw_reset_work, ath_reset_work); INIT_WORK(&sc->paprd_work, ath_paprd_calibrate); - INIT_WORK(&sc->chanctx_work, ath_chanctx_work); INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work); - setup_timer(&sc->offchannel.timer, ath_offchannel_timer, - (unsigned long)sc); - setup_timer(&sc->sched.timer, ath_chanctx_timer, (unsigned long)sc); + + ath9k_init_channel_context(sc); /* * Cache line size is used to size and align various @@ -600,13 +602,15 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, if (ret) goto err_btcoex; - sc->p2p_ps_timer = ath_gen_timer_alloc(sc->sc_ah, ath9k_p2p_ps_timer, - NULL, sc, AR_FIRST_NDP_TIMER); + ret = ath9k_init_p2p(sc); + if (ret) + goto err_btcoex; ath9k_cmn_init_crypto(sc->sc_ah); ath9k_init_misc(sc); ath_fill_led_pin(sc); ath_chanctx_init(sc); + ath9k_offchannel_init(sc); if (common->bus_ops->aspm_init) common->bus_ops->aspm_init(common); @@ -672,18 +676,14 @@ static const struct ieee80211_iface_limit wds_limits[] = { { .max = 2048, .types = BIT(NL80211_IFTYPE_WDS) }, }; +#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT + static const struct ieee80211_iface_limit if_limits_multi[] = { - { .max = 1, .types = BIT(NL80211_IFTYPE_STATION) }, - { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_CLIENT) | + { .max = 2, .types = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO) }, -}; - -static const struct ieee80211_iface_limit if_dfs_limits[] = { - { .max = 1, .types = BIT(NL80211_IFTYPE_AP) | -#ifdef CONFIG_MAC80211_MESH - BIT(NL80211_IFTYPE_MESH_POINT) | -#endif - BIT(NL80211_IFTYPE_ADHOC) }, + { .max = 1, .types = BIT(NL80211_IFTYPE_ADHOC) }, }; static const struct ieee80211_iface_combination if_comb_multi[] = { @@ -696,6 +696,16 @@ static const struct ieee80211_iface_combination if_comb_multi[] = { }, }; +#endif /* CONFIG_ATH9K_CHANNEL_CONTEXT */ + +static const struct ieee80211_iface_limit if_dfs_limits[] = { + { .max = 1, .types = BIT(NL80211_IFTYPE_AP) | +#ifdef CONFIG_MAC80211_MESH + BIT(NL80211_IFTYPE_MESH_POINT) | +#endif + BIT(NL80211_IFTYPE_ADHOC) }, +}; + static const struct ieee80211_iface_combination if_comb[] = { { .limits = if_limits, @@ -753,8 +763,9 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || ath9k_modparam_nohwcrypt) hw->flags |= IEEE80211_HW_MFP_CAPABLE; - hw->wiphy->features |= (NL80211_FEATURE_ACTIVE_MONITOR | - NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE); + hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR | + NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | + NL80211_FEATURE_P2P_GO_CTWIN; if (!config_enabled(CONFIG_ATH9K_TX99)) { hw->wiphy->interface_modes = @@ -763,24 +774,31 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC) | - BIT(NL80211_IFTYPE_MESH_POINT); - if (!ath9k_use_chanctx) { + BIT(NL80211_IFTYPE_MESH_POINT) | + BIT(NL80211_IFTYPE_WDS); + hw->wiphy->iface_combinations = if_comb; hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); - hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_WDS); - } else { - hw->wiphy->iface_combinations = if_comb_multi; - hw->wiphy->n_iface_combinations = - ARRAY_SIZE(if_comb_multi); - hw->wiphy->max_scan_ssids = 255; - hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; - hw->wiphy->max_remain_on_channel_duration = 10000; - hw->chanctx_data_size = sizeof(void *); - hw->extra_beacon_tailroom = - sizeof(struct ieee80211_p2p_noa_attr) + 9; - } } +#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT + + if (ath9k_is_chanctx_enabled()) { + hw->wiphy->interface_modes &= ~ BIT(NL80211_IFTYPE_WDS); + hw->wiphy->iface_combinations = if_comb_multi; + hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb_multi); + hw->wiphy->max_scan_ssids = 255; + hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; + hw->wiphy->max_remain_on_channel_duration = 10000; + hw->chanctx_data_size = sizeof(void *); + hw->extra_beacon_tailroom = + sizeof(struct ieee80211_p2p_noa_attr) + 9; + + ath_dbg(common, CHAN_CTX, "Use channel contexts\n"); + } + +#endif /* CONFIG_ATH9K_CHANNEL_CONTEXT */ + hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; @@ -793,7 +811,7 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) /* allow 4 queues per channel context + * 1 cab queue + 1 offchannel tx queue */ - hw->queues = 10; + hw->queues = ATH9K_NUM_TX_QUEUES; /* last queue for offchannel */ hw->offchannel_tx_hw_queue = hw->queues - 1; hw->max_rates = 4; @@ -915,9 +933,7 @@ static void ath9k_deinit_softc(struct ath_softc *sc) { int i = 0; - if (sc->p2p_ps_timer) - ath_gen_timer_free(sc->sc_ah, sc->p2p_ps_timer); - + ath9k_deinit_p2p(sc); ath9k_deinit_btcoex(sc); for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h index 6c56cafa5ca491fb284adad4fb156af9caa322b4..aa69ceaad0be325e05f1c98e6f22c14b8f4b84d5 100644 --- a/drivers/net/wireless/ath/ath9k/mac.h +++ b/drivers/net/wireless/ath/ath9k/mac.h @@ -121,6 +121,7 @@ struct ath_tx_status { u32 evm0; u32 evm1; u32 evm2; + u32 duration; }; struct ath_rx_status { diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 4b148bbb2bf6c40d58d38fd835c75216d9139bac..205162449b726a135fa7d4b27ab90274e7758ca8 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -60,8 +60,10 @@ static bool ath9k_has_pending_frames(struct ath_softc *sc, struct ath_txq *txq) spin_lock_bh(&txq->axq_lock); - if (txq->axq_depth) + if (txq->axq_depth) { pending = true; + goto out; + } if (txq->mac80211_qnum >= 0) { struct list_head *list; @@ -70,6 +72,7 @@ static bool ath9k_has_pending_frames(struct ath_softc *sc, struct ath_txq *txq) if (!list_empty(list)) pending = true; } +out: spin_unlock_bh(&txq->axq_lock); return pending; } @@ -223,18 +226,12 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start) struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); unsigned long flags; - int i; - - if (ath_startrecv(sc) != 0) { - ath_err(common, "Unable to restart recv logic\n"); - return false; - } + ath9k_calculate_summary_state(sc, sc->cur_chan); + ath_startrecv(sc); ath9k_cmn_update_txpow(ah, sc->curtxpow, sc->cur_chan->txpower, &sc->curtxpow); - clear_bit(ATH_OP_HW_RESET, &common->op_flags); - ath9k_calculate_summary_state(sc, sc->cur_chan); if (!sc->cur_chan->offchannel && start) { /* restore per chanctx TSF timer */ @@ -267,22 +264,7 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start) ath9k_hw_set_interrupts(ah); ath9k_hw_enable_interrupts(ah); - - if (!ath9k_use_chanctx) - ieee80211_wake_queues(sc->hw); - else { - if (sc->cur_chan == &sc->offchannel.chan) - ieee80211_wake_queue(sc->hw, - sc->hw->offchannel_tx_hw_queue); - else { - for (i = 0; i < IEEE80211_NUM_ACS; i++) - ieee80211_wake_queue(sc->hw, - sc->cur_chan->hw_queue_base + i); - } - if (ah->opmode == NL80211_IFTYPE_AP) - ieee80211_wake_queue(sc->hw, sc->hw->queues - 2); - } - + ieee80211_wake_queues(sc->hw); ath9k_p2p_ps_timer(sc); return true; @@ -314,6 +296,9 @@ int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan) if (!ath_prepare_reset(sc)) fastcc = false; + if (ath9k_is_chanctx_enabled()) + fastcc = false; + spin_lock_bh(&sc->chan_lock); sc->cur_chandef = sc->cur_chan->chandef; spin_unlock_bh(&sc->chan_lock); @@ -358,12 +343,16 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta, memset(&an->key_idx, 0, sizeof(an->key_idx)); ath_tx_node_init(sc, an); + + ath_dynack_node_init(sc->sc_ah, an); } static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta) { struct ath_node *an = (struct ath_node *)sta->drv_priv; ath_tx_node_cleanup(sc, an); + + ath_dynack_node_deinit(sc->sc_ah, an); } void ath9k_tasklet(unsigned long data) @@ -822,7 +811,8 @@ static void ath9k_stop(struct ieee80211_hw *hw) struct ath_common *common = ath9k_hw_common(ah); bool prev_idle; - cancel_work_sync(&sc->chanctx_work); + ath9k_deinit_channel_context(sc); + mutex_lock(&sc->mutex); ath_cancel_work(sc); @@ -903,9 +893,10 @@ static bool ath9k_uses_beacons(int type) } } -static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) +static void ath9k_vif_iter(struct ath9k_vif_iter_data *iter_data, + u8 *mac, struct ieee80211_vif *vif) { - struct ath9k_vif_iter_data *iter_data = data; + struct ath_vif *avp = (struct ath_vif *)vif->drv_priv; int i; if (iter_data->has_hw_macaddr) { @@ -923,12 +914,10 @@ static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) switch (vif->type) { case NL80211_IFTYPE_AP: iter_data->naps++; - if (vif->bss_conf.enable_beacon) - iter_data->beacons = true; break; case NL80211_IFTYPE_STATION: iter_data->nstations++; - if (vif->bss_conf.assoc && !iter_data->primary_sta) + if (avp->assoc && !iter_data->primary_sta) iter_data->primary_sta = vif; break; case NL80211_IFTYPE_ADHOC: @@ -949,6 +938,34 @@ static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) } } +static void ath9k_update_bssid_mask(struct ath_softc *sc, + struct ath_chanctx *ctx, + struct ath9k_vif_iter_data *iter_data) +{ + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct ath_vif *avp; + int i; + + if (!ath9k_is_chanctx_enabled()) + return; + + list_for_each_entry(avp, &ctx->vifs, list) { + if (ctx->nvifs_assigned != 1) + continue; + + if (!avp->vif->p2p || !iter_data->has_hw_macaddr) + continue; + + ether_addr_copy(common->curbssid, avp->bssid); + + /* perm_addr will be used as the p2p device address. */ + for (i = 0; i < ETH_ALEN; i++) + iter_data->mask[i] &= + ~(iter_data->hw_macaddr[i] ^ + sc->hw->wiphy->perm_addr[i]); + } +} + /* Called with sc->mutex held. */ void ath9k_calculate_iter_data(struct ath_softc *sc, struct ath_chanctx *ctx, @@ -968,38 +985,20 @@ void ath9k_calculate_iter_data(struct ath_softc *sc, list_for_each_entry(avp, &ctx->vifs, list) ath9k_vif_iter(iter_data, avp->vif->addr, avp->vif); - if (ctx == &sc->offchannel.chan) { - struct ieee80211_vif *vif; - - if (sc->offchannel.state < ATH_OFFCHANNEL_ROC_START) - vif = sc->offchannel.scan_vif; - else - vif = sc->offchannel.roc_vif; - - if (vif) - ath9k_vif_iter(iter_data, vif->addr, vif); - iter_data->beacons = false; - } + ath9k_update_bssid_mask(sc, ctx, iter_data); } static void ath9k_set_assoc_state(struct ath_softc *sc, struct ieee80211_vif *vif, bool changed) { struct ath_common *common = ath9k_hw_common(sc->sc_ah); - struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; + struct ath_vif *avp = (struct ath_vif *)vif->drv_priv; unsigned long flags; set_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags); - /* Set the AID, BSSID and do beacon-sync only when - * the HW opmode is STATION. - * - * But the primary bit is set above in any case. - */ - if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION) - return; - ether_addr_copy(common->curbssid, bss_conf->bssid); - common->curaid = bss_conf->aid; + ether_addr_copy(common->curbssid, avp->bssid); + common->curaid = avp->aid; ath9k_hw_write_associd(sc->sc_ah); if (changed) { @@ -1019,6 +1018,43 @@ static void ath9k_set_assoc_state(struct ath_softc *sc, vif->addr, common->curbssid); } +#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT +static void ath9k_set_offchannel_state(struct ath_softc *sc) +{ + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); + struct ieee80211_vif *vif = NULL; + + ath9k_ps_wakeup(sc); + + if (sc->offchannel.state < ATH_OFFCHANNEL_ROC_START) + vif = sc->offchannel.scan_vif; + else + vif = sc->offchannel.roc_vif; + + if (WARN_ON(!vif)) + goto exit; + + eth_zero_addr(common->curbssid); + eth_broadcast_addr(common->bssidmask); + ether_addr_copy(common->macaddr, vif->addr); + common->curaid = 0; + ah->opmode = vif->type; + ah->imask &= ~ATH9K_INT_SWBA; + ah->imask &= ~ATH9K_INT_TSFOOR; + ah->slottime = ATH9K_SLOT_TIME_9; + + ath_hw_setbssidmask(common); + ath9k_hw_setopmode(ah); + ath9k_hw_write_associd(sc->sc_ah); + ath9k_hw_set_interrupts(ah); + ath9k_hw_init_global_settings(ah); + +exit: + ath9k_ps_restore(sc); +} +#endif + /* Called with sc->mutex held. */ void ath9k_calculate_summary_state(struct ath_softc *sc, struct ath_chanctx *ctx) @@ -1026,12 +1062,18 @@ void ath9k_calculate_summary_state(struct ath_softc *sc, struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); struct ath9k_vif_iter_data iter_data; + struct ath_beacon_config *cur_conf; ath_chanctx_check_active(sc, ctx); if (ctx != sc->cur_chan) return; +#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT + if (ctx == &sc->offchannel.chan) + return ath9k_set_offchannel_state(sc); +#endif + ath9k_ps_wakeup(sc); ath9k_calculate_iter_data(sc, ctx, &iter_data); @@ -1042,8 +1084,11 @@ void ath9k_calculate_summary_state(struct ath_softc *sc, ath_hw_setbssidmask(common); if (iter_data.naps > 0) { + cur_conf = &ctx->beacon; ath9k_hw_set_tsfadjust(ah, true); ah->opmode = NL80211_IFTYPE_AP; + if (cur_conf->enable_beacon) + iter_data.beacons = true; } else { ath9k_hw_set_tsfadjust(ah, false); @@ -1072,13 +1117,11 @@ void ath9k_calculate_summary_state(struct ath_softc *sc, if (ah->opmode == NL80211_IFTYPE_STATION) { bool changed = (iter_data.primary_sta != ctx->primary_sta); - iter_data.beacons = true; if (iter_data.primary_sta) { + iter_data.beacons = true; ath9k_set_assoc_state(sc, iter_data.primary_sta, changed); - if (!ctx->primary_sta || - !ctx->primary_sta->bss_conf.assoc) - ctx->primary_sta = iter_data.primary_sta; + ctx->primary_sta = iter_data.primary_sta; } else { ctx->primary_sta = NULL; memset(common->curbssid, 0, ETH_ALEN); @@ -1107,11 +1150,27 @@ void ath9k_calculate_summary_state(struct ath_softc *sc, else clear_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags); - ctx->primary_sta = iter_data.primary_sta; + ath_dbg(common, CONFIG, + "macaddr: %pM, bssid: %pM, bssidmask: %pM\n", + common->macaddr, common->curbssid, common->bssidmask); ath9k_ps_restore(sc); } +static void ath9k_assign_hw_queues(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + int i; + + for (i = 0; i < IEEE80211_NUM_ACS; i++) + vif->hw_queue[i] = i; + + if (vif->type == NL80211_IFTYPE_AP) + vif->cab_queue = hw->queues - 2; + else + vif->cab_queue = IEEE80211_INVAL_HW_QUEUE; +} + static int ath9k_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { @@ -1120,12 +1179,11 @@ static int ath9k_add_interface(struct ieee80211_hw *hw, struct ath_common *common = ath9k_hw_common(ah); struct ath_vif *avp = (void *)vif->drv_priv; struct ath_node *an = &avp->mcast_node; - int i; mutex_lock(&sc->mutex); if (config_enabled(CONFIG_ATH9K_TX99)) { - if (sc->nvifs >= 1) { + if (sc->cur_chan->nvifs >= 1) { mutex_unlock(&sc->mutex); return -EOPNOTSUPP; } @@ -1133,22 +1191,18 @@ static int ath9k_add_interface(struct ieee80211_hw *hw, } ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type); - sc->nvifs++; + sc->cur_chan->nvifs++; if (ath9k_uses_beacons(vif->type)) ath9k_beacon_assign_slot(sc, vif); avp->vif = vif; - if (!ath9k_use_chanctx) { + if (!ath9k_is_chanctx_enabled()) { avp->chanctx = sc->cur_chan; list_add_tail(&avp->list, &avp->chanctx->vifs); } - for (i = 0; i < IEEE80211_NUM_ACS; i++) - vif->hw_queue[i] = i; - if (vif->type == NL80211_IFTYPE_AP) - vif->cab_queue = hw->queues - 2; - else - vif->cab_queue = IEEE80211_INVAL_HW_QUEUE; + + ath9k_assign_hw_queues(hw, vif); an->sc = sc; an->sta = NULL; @@ -1168,7 +1222,6 @@ static int ath9k_change_interface(struct ieee80211_hw *hw, struct ath_softc *sc = hw->priv; struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_vif *avp = (void *)vif->drv_priv; - int i; mutex_lock(&sc->mutex); @@ -1188,43 +1241,13 @@ static int ath9k_change_interface(struct ieee80211_hw *hw, if (ath9k_uses_beacons(vif->type)) ath9k_beacon_assign_slot(sc, vif); - for (i = 0; i < IEEE80211_NUM_ACS; i++) - vif->hw_queue[i] = i; - - if (vif->type == NL80211_IFTYPE_AP) - vif->cab_queue = hw->queues - 2; - else - vif->cab_queue = IEEE80211_INVAL_HW_QUEUE; - + ath9k_assign_hw_queues(hw, vif); ath9k_calculate_summary_state(sc, avp->chanctx); mutex_unlock(&sc->mutex); return 0; } -static void -ath9k_update_p2p_ps_timer(struct ath_softc *sc, struct ath_vif *avp) -{ - struct ath_hw *ah = sc->sc_ah; - s32 tsf, target_tsf; - - if (!avp || !avp->noa.has_next_tsf) - return; - - ath9k_hw_gen_timer_stop(ah, sc->p2p_ps_timer); - - tsf = ath9k_hw_gettsf32(sc->sc_ah); - - target_tsf = avp->noa.next_tsf; - if (!avp->noa.absent) - target_tsf -= ATH_P2P_PS_STOP_TIME; - - if (target_tsf - tsf < ATH_P2P_PS_STOP_TIME) - target_tsf = tsf + ATH_P2P_PS_STOP_TIME; - - ath9k_hw_gen_timer_start(ah, sc->p2p_ps_timer, (u32) target_tsf, 1000000); -} - static void ath9k_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { @@ -1236,16 +1259,11 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw, mutex_lock(&sc->mutex); - spin_lock_bh(&sc->sc_pcu_lock); - if (avp == sc->p2p_ps_vif) { - sc->p2p_ps_vif = NULL; - ath9k_update_p2p_ps_timer(sc, NULL); - } - spin_unlock_bh(&sc->sc_pcu_lock); + ath9k_p2p_remove_vif(sc, vif); - sc->nvifs--; + sc->cur_chan->nvifs--; sc->tx99_vif = NULL; - if (!ath9k_use_chanctx) + if (!ath9k_is_chanctx_enabled()) list_del(&avp->list); if (ath9k_uses_beacons(vif->type)) @@ -1423,7 +1441,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) } } - if (!ath9k_use_chanctx && (changed & IEEE80211_CONF_CHANGE_CHANNEL)) { + if (!ath9k_is_chanctx_enabled() && (changed & IEEE80211_CONF_CHANGE_CHANNEL)) { ctx->offchannel = !!(conf->flags & IEEE80211_CONF_OFFCHANNEL); ath_chanctx_set_channel(sc, ctx, &hw->conf.chandef); } @@ -1463,7 +1481,10 @@ static void ath9k_configure_filter(struct ieee80211_hw *hw, changed_flags &= SUPPORTED_FILTERS; *total_flags &= SUPPORTED_FILTERS; - sc->rx.rxfilter = *total_flags; + spin_lock_bh(&sc->chan_lock); + sc->cur_chan->rxfilter = *total_flags; + spin_unlock_bh(&sc->chan_lock); + ath9k_ps_wakeup(sc); rfilt = ath_calcrxfilter(sc); ath9k_hw_setrxfilter(sc->sc_ah, rfilt); @@ -1687,70 +1708,6 @@ static int ath9k_set_key(struct ieee80211_hw *hw, return ret; } -void ath9k_p2p_ps_timer(void *priv) -{ - struct ath_softc *sc = priv; - struct ath_vif *avp = sc->p2p_ps_vif; - struct ieee80211_vif *vif; - struct ieee80211_sta *sta; - struct ath_node *an; - u32 tsf; - - del_timer_sync(&sc->sched.timer); - ath9k_hw_gen_timer_stop(sc->sc_ah, sc->p2p_ps_timer); - ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_TSF_TIMER); - - if (!avp || avp->chanctx != sc->cur_chan) - return; - - tsf = ath9k_hw_gettsf32(sc->sc_ah); - if (!avp->noa.absent) - tsf += ATH_P2P_PS_STOP_TIME; - - if (!avp->noa.has_next_tsf || - avp->noa.next_tsf - tsf > BIT(31)) - ieee80211_update_p2p_noa(&avp->noa, tsf); - - ath9k_update_p2p_ps_timer(sc, avp); - - rcu_read_lock(); - - vif = avp->vif; - sta = ieee80211_find_sta(vif, vif->bss_conf.bssid); - if (!sta) - goto out; - - an = (void *) sta->drv_priv; - if (an->sleeping == !!avp->noa.absent) - goto out; - - an->sleeping = avp->noa.absent; - if (an->sleeping) - ath_tx_aggr_sleep(sta, sc, an); - else - ath_tx_aggr_wakeup(sc, an); - -out: - rcu_read_unlock(); -} - -void ath9k_update_p2p_ps(struct ath_softc *sc, struct ieee80211_vif *vif) -{ - struct ath_vif *avp = (void *)vif->drv_priv; - u32 tsf; - - if (!sc->p2p_ps_timer) - return; - - if (vif->type != NL80211_IFTYPE_STATION || !vif->p2p) - return; - - sc->p2p_ps_vif = avp; - tsf = ath9k_hw_gettsf32(sc->sc_ah); - ieee80211_parse_p2p_noa(&vif->bss_conf.p2p_noa_attr, &avp->noa, tsf); - ath9k_update_p2p_ps_timer(sc, avp); -} - static void ath9k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, @@ -1765,7 +1722,6 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw, struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); struct ath_vif *avp = (void *)vif->drv_priv; - unsigned long flags; int slottime; ath9k_ps_wakeup(sc); @@ -1775,9 +1731,17 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw, ath_dbg(common, CONFIG, "BSSID %pM Changed ASSOC %d\n", bss_conf->bssid, bss_conf->assoc); + ether_addr_copy(avp->bssid, bss_conf->bssid); + avp->aid = bss_conf->aid; + avp->assoc = bss_conf->assoc; + ath9k_calculate_summary_state(sc, avp->chanctx); - if (bss_conf->assoc) - ath_chanctx_event(sc, vif, ATH_CHANCTX_EVENT_ASSOC); + + if (ath9k_is_chanctx_enabled()) { + if (bss_conf->assoc) + ath_chanctx_event(sc, vif, + ATH_CHANCTX_EVENT_ASSOC); + } } if (changed & BSS_CHANGED_IBSS) { @@ -1789,9 +1753,9 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw, if ((changed & BSS_CHANGED_BEACON_ENABLED) || (changed & BSS_CHANGED_BEACON_INT) || (changed & BSS_CHANGED_BEACON_INFO)) { + ath9k_beacon_config(sc, vif, changed); if (changed & BSS_CHANGED_BEACON_ENABLED) ath9k_calculate_summary_state(sc, avp->chanctx); - ath9k_beacon_config(sc, vif, changed); } if ((avp->chanctx == sc->cur_chan) && @@ -1814,14 +1778,8 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw, } } - if (changed & BSS_CHANGED_P2P_PS) { - spin_lock_bh(&sc->sc_pcu_lock); - spin_lock_irqsave(&sc->sc_pm_lock, flags); - if (!(sc->ps_flags & PS_BEACON_SYNC)) - ath9k_update_p2p_ps(sc, vif); - spin_unlock_irqrestore(&sc->sc_pm_lock, flags); - spin_unlock_bh(&sc->sc_pcu_lock); - } + if (changed & BSS_CHANGED_P2P_PS) + ath9k_p2p_bss_info_changed(sc, vif); if (changed & CHECK_ANI) ath_check_ani(sc); @@ -1959,7 +1917,22 @@ static int ath9k_get_survey(struct ieee80211_hw *hw, int idx, return 0; } -static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class) +static void ath9k_enable_dynack(struct ath_softc *sc) +{ +#ifdef CONFIG_ATH9K_DYNACK + u32 rfilt; + struct ath_hw *ah = sc->sc_ah; + + ath_dynack_reset(ah); + + ah->dynack.enabled = true; + rfilt = ath_calcrxfilter(sc); + ath9k_hw_setrxfilter(ah, rfilt); +#endif +} + +static void ath9k_set_coverage_class(struct ieee80211_hw *hw, + s16 coverage_class) { struct ath_softc *sc = hw->priv; struct ath_hw *ah = sc->sc_ah; @@ -1968,11 +1941,22 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class) return; mutex_lock(&sc->mutex); - ah->coverage_class = coverage_class; - ath9k_ps_wakeup(sc); - ath9k_hw_init_global_settings(ah); - ath9k_ps_restore(sc); + if (coverage_class >= 0) { + ah->coverage_class = coverage_class; + if (ah->dynack.enabled) { + u32 rfilt; + + ah->dynack.enabled = false; + rfilt = ath_calcrxfilter(sc); + ath9k_hw_setrxfilter(ah, rfilt); + } + ath9k_ps_wakeup(sc); + ath9k_hw_init_global_settings(ah); + ath9k_ps_restore(sc); + } else if (!ah->dynack.enabled) { + ath9k_enable_dynack(sc); + } mutex_unlock(&sc->mutex); } @@ -1985,9 +1969,6 @@ static bool ath9k_has_tx_pending(struct ath_softc *sc) if (!ATH_TXQ_SETUP(sc, i)) continue; - if (!sc->tx.txq[i].axq_depth) - continue; - npend = ath9k_has_pending_frames(sc, &sc->tx.txq[i]); if (npend) break; @@ -2013,7 +1994,6 @@ void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop) struct ath_common *common = ath9k_hw_common(ah); int timeout = HZ / 5; /* 200 ms */ bool drain_txq; - int i; cancel_delayed_work_sync(&sc->tx_complete_work); @@ -2041,10 +2021,6 @@ void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop) ath_reset(sc); ath9k_ps_restore(sc); - for (i = 0; i < IEEE80211_NUM_ACS; i++) { - ieee80211_wake_queue(sc->hw, - sc->cur_chan->hw_queue_base + i); - } } ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0); @@ -2053,16 +2029,8 @@ void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop) static bool ath9k_tx_frames_pending(struct ieee80211_hw *hw) { struct ath_softc *sc = hw->priv; - int i; - - for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { - if (!ATH_TXQ_SETUP(sc, i)) - continue; - if (ath9k_has_pending_frames(sc, &sc->tx.txq[i])) - return true; - } - return false; + return ath9k_has_tx_pending(sc); } static int ath9k_tx_last_beacon(struct ieee80211_hw *hw) @@ -2207,207 +2175,7 @@ static void ath9k_sw_scan_complete(struct ieee80211_hw *hw) clear_bit(ATH_OP_SCANNING, &common->op_flags); } -static int ath_scan_channel_duration(struct ath_softc *sc, - struct ieee80211_channel *chan) -{ - struct cfg80211_scan_request *req = sc->offchannel.scan_req; - - if (!req->n_ssids || (chan->flags & IEEE80211_CHAN_NO_IR)) - return (HZ / 9); /* ~110 ms */ - - return (HZ / 16); /* ~60 ms */ -} - -static void -ath_scan_next_channel(struct ath_softc *sc) -{ - struct cfg80211_scan_request *req = sc->offchannel.scan_req; - struct ieee80211_channel *chan; - - if (sc->offchannel.scan_idx >= req->n_channels) { - sc->offchannel.state = ATH_OFFCHANNEL_IDLE; - ath_chanctx_switch(sc, ath_chanctx_get_oper_chan(sc, false), - NULL); - return; - } - - chan = req->channels[sc->offchannel.scan_idx++]; - sc->offchannel.duration = ath_scan_channel_duration(sc, chan); - sc->offchannel.state = ATH_OFFCHANNEL_PROBE_SEND; - ath_chanctx_offchan_switch(sc, chan); -} - -static void ath_offchannel_next(struct ath_softc *sc) -{ - struct ieee80211_vif *vif; - - if (sc->offchannel.scan_req) { - vif = sc->offchannel.scan_vif; - sc->offchannel.chan.txpower = vif->bss_conf.txpower; - ath_scan_next_channel(sc); - } else if (sc->offchannel.roc_vif) { - vif = sc->offchannel.roc_vif; - sc->offchannel.chan.txpower = vif->bss_conf.txpower; - sc->offchannel.duration = sc->offchannel.roc_duration; - sc->offchannel.state = ATH_OFFCHANNEL_ROC_START; - ath_chanctx_offchan_switch(sc, sc->offchannel.roc_chan); - } else { - ath_chanctx_switch(sc, ath_chanctx_get_oper_chan(sc, false), - NULL); - sc->offchannel.state = ATH_OFFCHANNEL_IDLE; - if (sc->ps_idle) - ath_cancel_work(sc); - } -} - -static void ath_roc_complete(struct ath_softc *sc, bool abort) -{ - sc->offchannel.roc_vif = NULL; - sc->offchannel.roc_chan = NULL; - if (!abort) - ieee80211_remain_on_channel_expired(sc->hw); - ath_offchannel_next(sc); - ath9k_ps_restore(sc); -} - -static void ath_scan_complete(struct ath_softc *sc, bool abort) -{ - struct ath_common *common = ath9k_hw_common(sc->sc_ah); - - sc->offchannel.scan_req = NULL; - sc->offchannel.scan_vif = NULL; - sc->offchannel.state = ATH_OFFCHANNEL_IDLE; - ieee80211_scan_completed(sc->hw, abort); - clear_bit(ATH_OP_SCANNING, &common->op_flags); - ath_offchannel_next(sc); - ath9k_ps_restore(sc); -} - -static void ath_scan_send_probe(struct ath_softc *sc, - struct cfg80211_ssid *ssid) -{ - struct cfg80211_scan_request *req = sc->offchannel.scan_req; - struct ieee80211_vif *vif = sc->offchannel.scan_vif; - struct ath_tx_control txctl = {}; - struct sk_buff *skb; - struct ieee80211_tx_info *info; - int band = sc->offchannel.chan.chandef.chan->band; - - skb = ieee80211_probereq_get(sc->hw, vif, - ssid->ssid, ssid->ssid_len, req->ie_len); - if (!skb) - return; - - info = IEEE80211_SKB_CB(skb); - if (req->no_cck) - info->flags |= IEEE80211_TX_CTL_NO_CCK_RATE; - - if (req->ie_len) - memcpy(skb_put(skb, req->ie_len), req->ie, req->ie_len); - - skb_set_queue_mapping(skb, IEEE80211_AC_VO); - - if (!ieee80211_tx_prepare_skb(sc->hw, vif, skb, band, NULL)) - goto error; - - txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO]; - txctl.force_channel = true; - if (ath_tx_start(sc->hw, skb, &txctl)) - goto error; - - return; - -error: - ieee80211_free_txskb(sc->hw, skb); -} - -static void ath_scan_channel_start(struct ath_softc *sc) -{ - struct cfg80211_scan_request *req = sc->offchannel.scan_req; - int i; - - if (!(sc->cur_chan->chandef.chan->flags & IEEE80211_CHAN_NO_IR) && - req->n_ssids) { - for (i = 0; i < req->n_ssids; i++) - ath_scan_send_probe(sc, &req->ssids[i]); - - } - - sc->offchannel.state = ATH_OFFCHANNEL_PROBE_WAIT; - mod_timer(&sc->offchannel.timer, jiffies + sc->offchannel.duration); -} - -void ath_offchannel_channel_change(struct ath_softc *sc) -{ - switch (sc->offchannel.state) { - case ATH_OFFCHANNEL_PROBE_SEND: - if (!sc->offchannel.scan_req) - return; - - if (sc->cur_chan->chandef.chan != - sc->offchannel.chan.chandef.chan) - return; - - ath_scan_channel_start(sc); - break; - case ATH_OFFCHANNEL_IDLE: - if (!sc->offchannel.scan_req) - return; - - ath_scan_complete(sc, false); - break; - case ATH_OFFCHANNEL_ROC_START: - if (sc->cur_chan != &sc->offchannel.chan) - break; - - sc->offchannel.state = ATH_OFFCHANNEL_ROC_WAIT; - mod_timer(&sc->offchannel.timer, jiffies + - msecs_to_jiffies(sc->offchannel.duration)); - ieee80211_ready_on_channel(sc->hw); - break; - case ATH_OFFCHANNEL_ROC_DONE: - ath_roc_complete(sc, false); - break; - default: - break; - } -} - -void ath_offchannel_timer(unsigned long data) -{ - struct ath_softc *sc = (struct ath_softc *)data; - struct ath_chanctx *ctx; - - switch (sc->offchannel.state) { - case ATH_OFFCHANNEL_PROBE_WAIT: - if (!sc->offchannel.scan_req) - return; - - /* get first active channel context */ - ctx = ath_chanctx_get_oper_chan(sc, true); - if (ctx->active) { - sc->offchannel.state = ATH_OFFCHANNEL_SUSPEND; - ath_chanctx_switch(sc, ctx, NULL); - mod_timer(&sc->offchannel.timer, jiffies + HZ / 10); - break; - } - /* fall through */ - case ATH_OFFCHANNEL_SUSPEND: - if (!sc->offchannel.scan_req) - return; - - ath_scan_next_channel(sc); - break; - case ATH_OFFCHANNEL_ROC_START: - case ATH_OFFCHANNEL_ROC_WAIT: - ctx = ath_chanctx_get_oper_chan(sc, false); - sc->offchannel.state = ATH_OFFCHANNEL_ROC_DONE; - ath_chanctx_switch(sc, ctx, NULL); - break; - default: - break; - } -} +#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT static int ath9k_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_scan_request *hw_req) @@ -2430,8 +2198,13 @@ static int ath9k_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, sc->offchannel.scan_req = req; sc->offchannel.scan_idx = 0; - if (sc->offchannel.state == ATH_OFFCHANNEL_IDLE) + ath_dbg(common, CHAN_CTX, "HW scan request received on vif: %pM\n", + vif->addr); + + if (sc->offchannel.state == ATH_OFFCHANNEL_IDLE) { + ath_dbg(common, CHAN_CTX, "Starting HW scan\n"); ath_offchannel_next(sc); + } out: mutex_unlock(&sc->mutex); @@ -2443,6 +2216,9 @@ static void ath9k_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ath_softc *sc = hw->priv; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + + ath_dbg(common, CHAN_CTX, "Cancel HW scan on vif: %pM\n", vif->addr); mutex_lock(&sc->mutex); del_timer_sync(&sc->offchannel.timer); @@ -2456,6 +2232,7 @@ static int ath9k_remain_on_channel(struct ieee80211_hw *hw, enum ieee80211_roc_type type) { struct ath_softc *sc = hw->priv; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); int ret = 0; mutex_lock(&sc->mutex); @@ -2470,8 +2247,14 @@ static int ath9k_remain_on_channel(struct ieee80211_hw *hw, sc->offchannel.roc_chan = chan; sc->offchannel.roc_duration = duration; - if (sc->offchannel.state == ATH_OFFCHANNEL_IDLE) + ath_dbg(common, CHAN_CTX, + "RoC request on vif: %pM, type: %d duration: %d\n", + vif->addr, type, duration); + + if (sc->offchannel.state == ATH_OFFCHANNEL_IDLE) { + ath_dbg(common, CHAN_CTX, "Starting RoC period\n"); ath_offchannel_next(sc); + } out: mutex_unlock(&sc->mutex); @@ -2482,9 +2265,11 @@ static int ath9k_remain_on_channel(struct ieee80211_hw *hw, static int ath9k_cancel_remain_on_channel(struct ieee80211_hw *hw) { struct ath_softc *sc = hw->priv; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); mutex_lock(&sc->mutex); + ath_dbg(common, CHAN_CTX, "Cancel RoC\n"); del_timer_sync(&sc->offchannel.timer); if (sc->offchannel.roc_vif) { @@ -2501,6 +2286,7 @@ static int ath9k_add_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *conf) { struct ath_softc *sc = hw->priv; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_chanctx *ctx, **ptr; int pos; @@ -2515,10 +2301,18 @@ static int ath9k_add_chanctx(struct ieee80211_hw *hw, ctx->assigned = true; pos = ctx - &sc->chanctx[0]; ctx->hw_queue_base = pos * IEEE80211_NUM_ACS; + + ath_dbg(common, CHAN_CTX, + "Add channel context: %d MHz\n", + conf->def.chan->center_freq); + ath_chanctx_set_channel(sc, ctx, &conf->def); + ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_ASSIGN); + mutex_unlock(&sc->mutex); return 0; } + mutex_unlock(&sc->mutex); return -ENOSPC; } @@ -2528,12 +2322,19 @@ static void ath9k_remove_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *conf) { struct ath_softc *sc = hw->priv; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_chanctx *ctx = ath_chanctx_get(conf); mutex_lock(&sc->mutex); + + ath_dbg(common, CHAN_CTX, + "Remove channel context: %d MHz\n", + conf->def.chan->center_freq); + ctx->assigned = false; ctx->hw_queue_base = -1; ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_UNASSIGN); + mutex_unlock(&sc->mutex); } @@ -2542,9 +2343,13 @@ static void ath9k_change_chanctx(struct ieee80211_hw *hw, u32 changed) { struct ath_softc *sc = hw->priv; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_chanctx *ctx = ath_chanctx_get(conf); mutex_lock(&sc->mutex); + ath_dbg(common, CHAN_CTX, + "Change channel context: %d MHz\n", + conf->def.chan->center_freq); ath_chanctx_set_channel(sc, ctx, &conf->def); mutex_unlock(&sc->mutex); } @@ -2554,16 +2359,25 @@ static int ath9k_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *conf) { struct ath_softc *sc = hw->priv; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_vif *avp = (void *)vif->drv_priv; struct ath_chanctx *ctx = ath_chanctx_get(conf); int i; mutex_lock(&sc->mutex); + + ath_dbg(common, CHAN_CTX, + "Assign VIF (addr: %pM, type: %d, p2p: %d) to channel context: %d MHz\n", + vif->addr, vif->type, vif->p2p, + conf->def.chan->center_freq); + avp->chanctx = ctx; + ctx->nvifs_assigned++; list_add_tail(&avp->list, &ctx->vifs); ath9k_calculate_summary_state(sc, ctx); for (i = 0; i < IEEE80211_NUM_ACS; i++) vif->hw_queue[i] = ctx->hw_queue_base + i; + mutex_unlock(&sc->mutex); return 0; @@ -2574,36 +2388,80 @@ static void ath9k_unassign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *conf) { struct ath_softc *sc = hw->priv; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_vif *avp = (void *)vif->drv_priv; struct ath_chanctx *ctx = ath_chanctx_get(conf); int ac; mutex_lock(&sc->mutex); + + ath_dbg(common, CHAN_CTX, + "Remove VIF (addr: %pM, type: %d, p2p: %d) from channel context: %d MHz\n", + vif->addr, vif->type, vif->p2p, + conf->def.chan->center_freq); + avp->chanctx = NULL; + ctx->nvifs_assigned--; list_del(&avp->list); ath9k_calculate_summary_state(sc, ctx); for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) vif->hw_queue[ac] = IEEE80211_INVAL_HW_QUEUE; + + mutex_unlock(&sc->mutex); +} + +static void ath9k_mgd_prepare_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct ath_softc *sc = hw->priv; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct ath_vif *avp = (struct ath_vif *) vif->drv_priv; + bool changed = false; + + if (!test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags)) + return; + + if (!avp->chanctx) + return; + + mutex_lock(&sc->mutex); + + spin_lock_bh(&sc->chan_lock); + if (sc->next_chan || (sc->cur_chan != avp->chanctx)) { + sc->next_chan = avp->chanctx; + changed = true; + } + ath_dbg(common, CHAN_CTX, + "%s: Set chanctx state to FORCE_ACTIVE, changed: %d\n", + __func__, changed); + sc->sched.state = ATH_CHANCTX_STATE_FORCE_ACTIVE; + spin_unlock_bh(&sc->chan_lock); + + if (changed) + ath_chanctx_set_next(sc, true); + mutex_unlock(&sc->mutex); } void ath9k_fill_chanctx_ops(void) { - if (!ath9k_use_chanctx) + if (!ath9k_is_chanctx_enabled()) return; - ath9k_ops.hw_scan = ath9k_hw_scan; - ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan; - ath9k_ops.remain_on_channel = ath9k_remain_on_channel; + ath9k_ops.hw_scan = ath9k_hw_scan; + ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan; + ath9k_ops.remain_on_channel = ath9k_remain_on_channel; ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel; - ath9k_ops.add_chanctx = ath9k_add_chanctx; - ath9k_ops.remove_chanctx = ath9k_remove_chanctx; - ath9k_ops.change_chanctx = ath9k_change_chanctx; - ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx; - ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx; - ath9k_ops.mgd_prepare_tx = ath9k_chanctx_force_active; + ath9k_ops.add_chanctx = ath9k_add_chanctx; + ath9k_ops.remove_chanctx = ath9k_remove_chanctx; + ath9k_ops.change_chanctx = ath9k_change_chanctx; + ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx; + ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx; + ath9k_ops.mgd_prepare_tx = ath9k_mgd_prepare_tx; } +#endif + struct ieee80211_ops ath9k_ops = { .tx = ath9k_tx, .start = ath9k_start, diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 74ab1d02013bf002bf04c0cfd197a2ecd8cdc29a..6914e21816e44e02652103698f71623e51aa05c4 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -387,7 +387,9 @@ u32 ath_calcrxfilter(struct ath_softc *sc) if (sc->hw->conf.radar_enabled) rfilt |= ATH9K_RX_FILTER_PHYRADAR | ATH9K_RX_FILTER_PHYERR; - if (sc->rx.rxfilter & FIF_PROBE_REQ) + spin_lock_bh(&sc->chan_lock); + + if (sc->cur_chan->rxfilter & FIF_PROBE_REQ) rfilt |= ATH9K_RX_FILTER_PROBEREQ; /* @@ -398,24 +400,25 @@ u32 ath_calcrxfilter(struct ath_softc *sc) if (sc->sc_ah->is_monitoring) rfilt |= ATH9K_RX_FILTER_PROM; - if (sc->rx.rxfilter & FIF_CONTROL) + if ((sc->cur_chan->rxfilter & FIF_CONTROL) || + sc->sc_ah->dynack.enabled) rfilt |= ATH9K_RX_FILTER_CONTROL; if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) && - (sc->nvifs <= 1) && - !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC)) + (sc->cur_chan->nvifs <= 1) && + !(sc->cur_chan->rxfilter & FIF_BCN_PRBRESP_PROMISC)) rfilt |= ATH9K_RX_FILTER_MYBEACON; else rfilt |= ATH9K_RX_FILTER_BEACON; if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) || - (sc->rx.rxfilter & FIF_PSPOLL)) + (sc->cur_chan->rxfilter & FIF_PSPOLL)) rfilt |= ATH9K_RX_FILTER_PSPOLL; - if (conf_is_ht(&sc->hw->conf)) + if (sc->cur_chandef.width != NL80211_CHAN_WIDTH_20_NOHT) rfilt |= ATH9K_RX_FILTER_COMP_BAR; - if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) { + if (sc->cur_chan->nvifs > 1 || (sc->cur_chan->rxfilter & FIF_OTHER_BSS)) { /* This is needed for older chips */ if (sc->sc_ah->hw_version.macVersion <= AR_SREV_VERSION_9160) rfilt |= ATH9K_RX_FILTER_PROM; @@ -425,22 +428,24 @@ u32 ath_calcrxfilter(struct ath_softc *sc) if (AR_SREV_9550(sc->sc_ah) || AR_SREV_9531(sc->sc_ah)) rfilt |= ATH9K_RX_FILTER_4ADDRESS; - if (ath9k_use_chanctx && + if (ath9k_is_chanctx_enabled() && test_bit(ATH_OP_SCANNING, &common->op_flags)) rfilt |= ATH9K_RX_FILTER_BEACON; + spin_unlock_bh(&sc->chan_lock); + return rfilt; } -int ath_startrecv(struct ath_softc *sc) +void ath_startrecv(struct ath_softc *sc) { struct ath_hw *ah = sc->sc_ah; struct ath_rxbuf *bf, *tbf; if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { ath_edma_start_recv(sc); - return 0; + return; } if (list_empty(&sc->rx.rxbuf)) @@ -463,8 +468,6 @@ int ath_startrecv(struct ath_softc *sc) start_recv: ath_opmode_init(sc); ath9k_hw_startpcureceive(ah, sc->cur_chan->offchannel); - - return 0; } static void ath_flushrecv(struct ath_softc *sc) @@ -535,6 +538,7 @@ static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb) static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) { struct ath_common *common = ath9k_hw_common(sc->sc_ah); + bool skip_beacon = false; if (skb->len < 24 + 8 + 2 + 2) return; @@ -545,10 +549,19 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) sc->ps_flags &= ~PS_BEACON_SYNC; ath_dbg(common, PS, "Reconfigure beacon timers based on synchronized timestamp\n"); - if (!(WARN_ON_ONCE(sc->cur_chan->beacon.beacon_interval == 0))) + +#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT + if (ath9k_is_chanctx_enabled()) { + if (sc->cur_chan == &sc->offchannel.chan) + skip_beacon = true; + } +#endif + + if (!skip_beacon && + !(WARN_ON_ONCE(sc->cur_chan->beacon.beacon_interval == 0))) ath9k_set_beacon(sc); - if (sc->p2p_ps_vif) - ath9k_update_p2p_ps(sc, sc->p2p_ps_vif->vif); + + ath9k_p2p_beacon_sync(sc); } if (ath_beacon_dtim_pending_cab(skb)) { @@ -867,8 +880,13 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, * everything but the rate is checked here, the rate check is done * separately to avoid doing two lookups for a rate for each frame. */ - if (!ath9k_cmn_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error, sc->rx.rxfilter)) + spin_lock_bh(&sc->chan_lock); + if (!ath9k_cmn_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error, + sc->cur_chan->rxfilter)) { + spin_unlock_bh(&sc->chan_lock); return -EINVAL; + } + spin_unlock_bh(&sc->chan_lock); if (ath_is_mybeacon(common, hdr)) { RX_STAT_INC(rx_beacons); @@ -892,9 +910,10 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, return -EINVAL; } - if (rx_stats->is_mybeacon) { - sc->sched.next_tbtt = rx_stats->rs_tstamp; - ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_BEACON_RECEIVED); + if (ath9k_is_chanctx_enabled()) { + if (rx_stats->is_mybeacon) + ath_chanctx_beacon_recv_ev(sc, + ATH_CHANCTX_EVENT_BEACON_RECEIVED); } ath9k_cmn_process_rssi(common, hw, rx_stats, rx_status); @@ -991,6 +1010,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) unsigned long flags; dma_addr_t new_buf_addr; unsigned int budget = 512; + struct ieee80211_hdr *hdr; if (edma) dma_type = DMA_BIDIRECTIONAL; @@ -1120,6 +1140,10 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) ath9k_apply_ampdu_details(sc, &rs, rxs); ath_debug_rate_stats(sc, &rs, skb); + hdr = (struct ieee80211_hdr *)skb->data; + if (ieee80211_is_ack(hdr->frame_control)) + ath_dynack_sample_ack_ts(sc->sc_ah, skb, rs.rs_tstamp); + ieee80211_rx(hw, skb); requeue_drop_frag: diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h index a1499700bcf26d243fd159f0d6d918b459c8239f..2a938f4feac5fd91e33fd3c61e5364de1809b591 100644 --- a/drivers/net/wireless/ath/ath9k/reg.h +++ b/drivers/net/wireless/ath/ath9k/reg.h @@ -903,6 +903,10 @@ #define AR_SREV_9340(_ah) \ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9340)) +#define AR_SREV_9340_13(_ah) \ + (AR_SREV_9340((_ah)) && \ + ((_ah)->hw_version.macRev == AR_SREV_REVISION_9340_13)) + #define AR_SREV_9340_13_OR_LATER(_ah) \ (AR_SREV_9340((_ah)) && \ ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9340_13)) @@ -1240,12 +1244,23 @@ enum { #define AR_CH0_DPLL3_PHASE_SHIFT_S 23 #define AR_PHY_CCA_NOM_VAL_2GHZ -118 +#define AR_RTC_9300_SOC_PLL_DIV_INT 0x0000003f +#define AR_RTC_9300_SOC_PLL_DIV_INT_S 0 +#define AR_RTC_9300_SOC_PLL_DIV_FRAC 0x000fffc0 +#define AR_RTC_9300_SOC_PLL_DIV_FRAC_S 6 +#define AR_RTC_9300_SOC_PLL_REFDIV 0x01f00000 +#define AR_RTC_9300_SOC_PLL_REFDIV_S 20 +#define AR_RTC_9300_SOC_PLL_CLKSEL 0x06000000 +#define AR_RTC_9300_SOC_PLL_CLKSEL_S 25 +#define AR_RTC_9300_SOC_PLL_BYPASS 0x08000000 + #define AR_RTC_9300_PLL_DIV 0x000003ff #define AR_RTC_9300_PLL_DIV_S 0 #define AR_RTC_9300_PLL_REFDIV 0x00003C00 #define AR_RTC_9300_PLL_REFDIV_S 10 #define AR_RTC_9300_PLL_CLKSEL 0x0000C000 #define AR_RTC_9300_PLL_CLKSEL_S 14 +#define AR_RTC_9300_PLL_BYPASS 0x00010000 #define AR_RTC_9160_PLL_DIV 0x000003ff #define AR_RTC_9160_PLL_DIV_S 0 diff --git a/drivers/net/wireless/ath/ath9k/spectral.h b/drivers/net/wireless/ath/ath9k/spectral.h index ead63412ee1ae9b6137f14b2299d3bbc243340d6..7b410c6858b08741a4bf5bcea49b458ebe5c4e79 100644 --- a/drivers/net/wireless/ath/ath9k/spectral.h +++ b/drivers/net/wireless/ath/ath9k/spectral.h @@ -17,6 +17,8 @@ #ifndef SPECTRAL_H #define SPECTRAL_H +#include "../spectral_common.h" + /* enum spectral_mode: * * @SPECTRAL_DISABLED: spectral mode is disabled @@ -54,8 +56,6 @@ struct ath_ht20_mag_info { u8 max_exp; } __packed; -#define SPECTRAL_HT20_NUM_BINS 56 - /* WARNING: don't actually use this struct! MAC may vary the amount of * data by -1/+2. This struct is for reference only. */ @@ -83,8 +83,6 @@ struct ath_ht20_40_mag_info { u8 max_exp; } __packed; -#define SPECTRAL_HT20_40_NUM_BINS 128 - /* WARNING: don't actually use this struct! MAC may vary the amount of * data. This struct is for reference only. */ @@ -125,71 +123,6 @@ static inline u8 spectral_bitmap_weight(u8 *bins) return bins[0] & 0x3f; } -/* FFT sample format given to userspace via debugfs. - * - * Please keep the type/length at the front position and change - * other fields after adding another sample type - * - * TODO: this might need rework when switching to nl80211-based - * interface. - */ -enum ath_fft_sample_type { - ATH_FFT_SAMPLE_HT20 = 1, - ATH_FFT_SAMPLE_HT20_40, -}; - -struct fft_sample_tlv { - u8 type; /* see ath_fft_sample */ - __be16 length; - /* type dependent data follows */ -} __packed; - -struct fft_sample_ht20 { - struct fft_sample_tlv tlv; - - u8 max_exp; - - __be16 freq; - s8 rssi; - s8 noise; - - __be16 max_magnitude; - u8 max_index; - u8 bitmap_weight; - - __be64 tsf; - - u8 data[SPECTRAL_HT20_NUM_BINS]; -} __packed; - -struct fft_sample_ht20_40 { - struct fft_sample_tlv tlv; - - u8 channel_type; - __be16 freq; - - s8 lower_rssi; - s8 upper_rssi; - - __be64 tsf; - - s8 lower_noise; - s8 upper_noise; - - __be16 lower_max_magnitude; - __be16 upper_max_magnitude; - - u8 lower_max_index; - u8 upper_max_index; - - u8 lower_bitmap_weight; - u8 upper_bitmap_weight; - - u8 max_exp; - - u8 data[SPECTRAL_HT20_40_NUM_BINS]; -} __packed; - void ath9k_spectral_init_debug(struct ath_softc *sc); void ath9k_spectral_deinit_debug(struct ath_softc *sc); diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c index 23972924c774becd09f633f0708159594bc1aae1..8a69d08ec55c574349dda86348c04e709c4debfe 100644 --- a/drivers/net/wireless/ath/ath9k/tx99.c +++ b/drivers/net/wireless/ath/ath9k/tx99.c @@ -174,7 +174,7 @@ static ssize_t write_file_tx99(struct file *file, const char __user *user_buf, ssize_t len; int r; - if (sc->nvifs > 1) + if (sc->cur_chan->nvifs > 1) return -EOPNOTSUPP; len = min(count, sizeof(buf) - 1); diff --git a/drivers/net/wireless/ath/ath9k/wow.c b/drivers/net/wireless/ath/ath9k/wow.c index a4f4f0da81f6e2a1cc71349a0c68185a2f07e546..5f30e580d94291e787aa87e572e51225ce7c67a1 100644 --- a/drivers/net/wireless/ath/ath9k/wow.c +++ b/drivers/net/wireless/ath/ath9k/wow.c @@ -193,7 +193,8 @@ int ath9k_suspend(struct ieee80211_hw *hw, u32 wow_triggers_enabled = 0; int ret = 0; - cancel_work_sync(&sc->chanctx_work); + ath9k_deinit_channel_context(sc); + mutex_lock(&sc->mutex); ath_cancel_work(sc); @@ -231,7 +232,7 @@ int ath9k_suspend(struct ieee80211_hw *hw, goto fail_wow; } - if (sc->nvifs > 1) { + if (sc->cur_chan->nvifs > 1) { ath_dbg(common, WOW, "WoW for multivif is not yet supported\n"); ret = 1; goto fail_wow; diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 704fcbcbe20b5a59723063c77ac0d98ee482a99f..151ae49fa57e54c415d81a0c07bba000471af910 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -158,7 +158,6 @@ static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq, { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ath_frame_info *fi = get_frame_info(skb); - int hw_queue; int q = fi->txq; if (q < 0) @@ -168,10 +167,9 @@ static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq, if (WARN_ON(--txq->pending_frames < 0)) txq->pending_frames = 0; - hw_queue = (info->hw_queue >= sc->hw->queues - 2) ? q : info->hw_queue; if (txq->stopped && txq->pending_frames < sc->tx.txq_max_pending[q]) { - ieee80211_wake_queue(sc->hw, hw_queue); + ieee80211_wake_queue(sc->hw, info->hw_queue); txq->stopped = false; } } @@ -587,6 +585,10 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, memcpy(tx_info->control.rates, rates, sizeof(rates)); ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok); rc_update = false; + if (bf == bf->bf_lastbf) + ath_dynack_sample_tx_ts(sc->sc_ah, + bf->bf_mpdu, + ts); } ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, @@ -681,12 +683,15 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq, if (bf_is_ampdu_not_probing(bf)) txq->axq_ampdu_depth--; + ts->duration = ath9k_hw_get_duration(sc->sc_ah, bf->bf_desc, + ts->ts_rateindex); if (!bf_isampdu(bf)) { if (!flush) { info = IEEE80211_SKB_CB(bf->bf_mpdu); memcpy(info->control.rates, bf->rates, sizeof(info->control.rates)); ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok); + ath_dynack_sample_tx_ts(sc->sc_ah, bf->bf_mpdu, ts); } ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok); } else @@ -1836,15 +1841,17 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) if (txq->mac80211_qnum < 0) return; + if (test_bit(ATH_OP_HW_RESET, &common->op_flags)) + return; + spin_lock_bh(&sc->chan_lock); ac_list = &sc->cur_chan->acq[txq->mac80211_qnum]; - spin_unlock_bh(&sc->chan_lock); - if (test_bit(ATH_OP_HW_RESET, &common->op_flags) || - list_empty(ac_list)) + if (list_empty(ac_list)) { + spin_unlock_bh(&sc->chan_lock); return; + } - spin_lock_bh(&sc->chan_lock); rcu_read_lock(); last_ac = list_entry(ac_list->prev, struct ath_atx_ac, list); @@ -2202,9 +2209,8 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, struct ath_txq *txq = txctl->txq; struct ath_atx_tid *tid = NULL; struct ath_buf *bf; - bool queue; - int q, hw_queue; - int ret; + bool queue, skip_uapsd = false; + int q, ret; if (vif) avp = (void *)vif->drv_priv; @@ -2223,14 +2229,13 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, */ q = skb_get_queue_mapping(skb); - hw_queue = (info->hw_queue >= sc->hw->queues - 2) ? q : info->hw_queue; ath_txq_lock(sc, txq); if (txq == sc->tx.txq_map[q]) { fi->txq = q; if (++txq->pending_frames > sc->tx.txq_max_pending[q] && !txq->stopped) { - ieee80211_stop_queue(sc->hw, hw_queue); + ieee80211_stop_queue(sc->hw, info->hw_queue); txq->stopped = true; } } @@ -2245,15 +2250,14 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, sc->cur_chan->stopped) && !txctl->force_channel) { if (!txctl->an) txctl->an = &avp->mcast_node; - info->flags &= ~IEEE80211_TX_CTL_PS_RESPONSE; queue = true; + skip_uapsd = true; } if (txctl->an && queue) tid = ath_get_skb_tid(sc, txctl->an, skb); - if (info->flags & (IEEE80211_TX_CTL_PS_RESPONSE | - IEEE80211_TX_CTL_TX_OFFCHAN)) { + if (!skip_uapsd && (info->flags & IEEE80211_TX_CTL_PS_RESPONSE)) { ath_txq_unlock(sc, txq); txq = sc->tx.uapsdq; ath_txq_lock(sc, txq); @@ -2632,8 +2636,11 @@ void ath_tx_edma_tasklet(struct ath_softc *sc) sc->beacon.tx_processed = true; sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK); - ath_chanctx_event(sc, NULL, - ATH_CHANCTX_EVENT_BEACON_SENT); + if (ath9k_is_chanctx_enabled()) { + ath_chanctx_event(sc, NULL, + ATH_CHANCTX_EVENT_BEACON_SENT); + } + ath9k_csa_update(sc); continue; } diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c index f8ded84b7be8c5e3b6038910a8829364a5e5ba1e..ef5b6dc7b7f17b52388ba45132e836aa0ff37bf4 100644 --- a/drivers/net/wireless/ath/carl9170/main.c +++ b/drivers/net/wireless/ath/carl9170/main.c @@ -1430,18 +1430,10 @@ static int carl9170_op_ampdu_action(struct ieee80211_hw *hw, if (!sta_info->ht_sta) return -EOPNOTSUPP; - rcu_read_lock(); - if (rcu_dereference(sta_info->agg[tid])) { - rcu_read_unlock(); - return -EBUSY; - } - tid_info = kzalloc(sizeof(struct carl9170_sta_tid), GFP_ATOMIC); - if (!tid_info) { - rcu_read_unlock(); + if (!tid_info) return -ENOMEM; - } tid_info->hsn = tid_info->bsn = tid_info->snx = (*ssn); tid_info->state = CARL9170_TID_STATE_PROGRESS; @@ -1460,7 +1452,6 @@ static int carl9170_op_ampdu_action(struct ieee80211_hw *hw, list_add_tail_rcu(&tid_info->list, &ar->tx_ampdu_list); rcu_assign_pointer(sta_info->agg[tid], tid_info); spin_unlock_bh(&ar->tx_ampdu_list_lock); - rcu_read_unlock(); ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c index 4cadfd48ffdf8bd19f0206c138337f2e58177622..ae86a600d9207a4a59530710daae08f857155d34 100644 --- a/drivers/net/wireless/ath/carl9170/tx.c +++ b/drivers/net/wireless/ath/carl9170/tx.c @@ -1557,7 +1557,7 @@ static struct carl9170_vif_info *carl9170_pick_beaconing_vif(struct ar9170 *ar) } out: - rcu_assign_pointer(ar->beacon_iter, cvif); + RCU_INIT_POINTER(ar->beacon_iter, cvif); return cvif; } diff --git a/drivers/net/wireless/ath/main.c b/drivers/net/wireless/ath/main.c index 8b0ac14d5c32dc2030515f4b103e3e4b1bcb0fb2..83f47af19280de4c3cd0cdf528c997b3d149187d 100644 --- a/drivers/net/wireless/ath/main.c +++ b/drivers/net/wireless/ath/main.c @@ -20,6 +20,7 @@ #include #include "ath.h" +#include "trace.h" MODULE_AUTHOR("Atheros Communications"); MODULE_DESCRIPTION("Shared library for Atheros wireless LAN cards."); @@ -84,6 +85,8 @@ void ath_printk(const char *level, const struct ath_common* common, else printk("%sath: %pV", level, &vaf); + trace_ath_log(common->hw->wiphy, &vaf); + va_end(args); } EXPORT_SYMBOL(ath_printk); diff --git a/drivers/net/wireless/ath/spectral_common.h b/drivers/net/wireless/ath/spectral_common.h new file mode 100644 index 0000000000000000000000000000000000000000..0d742acb15993eca2aa36e2487ae2f991f5dcf2b --- /dev/null +++ b/drivers/net/wireless/ath/spectral_common.h @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2013 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef SPECTRAL_COMMON_H +#define SPECTRAL_COMMON_H + +#define SPECTRAL_HT20_NUM_BINS 56 +#define SPECTRAL_HT20_40_NUM_BINS 128 + +/* TODO: could possibly be 512, but no samples this large + * could be acquired so far. + */ +#define SPECTRAL_ATH10K_MAX_NUM_BINS 256 + +/* FFT sample format given to userspace via debugfs. + * + * Please keep the type/length at the front position and change + * other fields after adding another sample type + * + * TODO: this might need rework when switching to nl80211-based + * interface. + */ +enum ath_fft_sample_type { + ATH_FFT_SAMPLE_HT20 = 1, + ATH_FFT_SAMPLE_HT20_40, + ATH_FFT_SAMPLE_ATH10K, +}; + +struct fft_sample_tlv { + u8 type; /* see ath_fft_sample */ + __be16 length; + /* type dependent data follows */ +} __packed; + +struct fft_sample_ht20 { + struct fft_sample_tlv tlv; + + u8 max_exp; + + __be16 freq; + s8 rssi; + s8 noise; + + __be16 max_magnitude; + u8 max_index; + u8 bitmap_weight; + + __be64 tsf; + + u8 data[SPECTRAL_HT20_NUM_BINS]; +} __packed; + +struct fft_sample_ht20_40 { + struct fft_sample_tlv tlv; + + u8 channel_type; + __be16 freq; + + s8 lower_rssi; + s8 upper_rssi; + + __be64 tsf; + + s8 lower_noise; + s8 upper_noise; + + __be16 lower_max_magnitude; + __be16 upper_max_magnitude; + + u8 lower_max_index; + u8 upper_max_index; + + u8 lower_bitmap_weight; + u8 upper_bitmap_weight; + + u8 max_exp; + + u8 data[SPECTRAL_HT20_40_NUM_BINS]; +} __packed; + +struct fft_sample_ath10k { + struct fft_sample_tlv tlv; + u8 chan_width_mhz; + __be16 freq1; + __be16 freq2; + __be16 noise; + __be16 max_magnitude; + __be16 total_gain_db; + __be16 base_pwr_db; + __be64 tsf; + s8 max_index; + u8 rssi; + u8 relpwr_db; + u8 avgpwr_db; + u8 max_exp; + + u8 data[0]; +} __packed; + +#endif /* SPECTRAL_COMMON_H */ diff --git a/drivers/net/wireless/ath/trace.c b/drivers/net/wireless/ath/trace.c new file mode 100644 index 0000000000000000000000000000000000000000..18fb3a0719312ece35b7a6258785d34251dea79b --- /dev/null +++ b/drivers/net/wireless/ath/trace.c @@ -0,0 +1,20 @@ +/* + * Copyright (c) 2014 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include + +#define CREATE_TRACE_POINTS +#include "trace.h" diff --git a/drivers/net/wireless/ath/trace.h b/drivers/net/wireless/ath/trace.h new file mode 100644 index 0000000000000000000000000000000000000000..ba711644d27ee1055de593945256fb6756b6faa7 --- /dev/null +++ b/drivers/net/wireless/ath/trace.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2014 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#if !defined(_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_H + +#include +#include "ath.h" + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM ath + +#if !defined(CONFIG_ATH_TRACEPOINTS) + +#undef TRACE_EVENT +#define TRACE_EVENT(name, proto, ...) static inline void trace_ ## name(proto) {} + +#endif /* CONFIG_ATH_TRACEPOINTS */ + +TRACE_EVENT(ath_log, + + TP_PROTO(struct wiphy *wiphy, + struct va_format *vaf), + + TP_ARGS(wiphy, vaf), + + TP_STRUCT__entry( + __string(device, wiphy_name(wiphy)) + __string(driver, KBUILD_MODNAME) + __dynamic_array(char, msg, ATH_DBG_MAX_LEN) + ), + + TP_fast_assign( + __assign_str(device, wiphy_name(wiphy)); + __assign_str(driver, KBUILD_MODNAME); + WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg), + ATH_DBG_MAX_LEN, + vaf->fmt, + *vaf->va) >= ATH_DBG_MAX_LEN); + ), + + TP_printk( + "%s %s %s", + __get_str(driver), + __get_str(device), + __get_str(msg) + ) +); + +#endif /* _TRACE_H || TRACE_HEADER_MULTI_READ */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace + +/* This part must be outside protection */ +#include diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig index ce8c0381825e674c2eca80510761ae186283073f..481680a3aa5513564686bbf14c9c06a495e00e2c 100644 --- a/drivers/net/wireless/ath/wil6210/Kconfig +++ b/drivers/net/wireless/ath/wil6210/Kconfig @@ -39,3 +39,12 @@ config WIL6210_TRACING option if you are interested in debugging the driver. If unsure, say Y to make it easier to debug problems. + +config WIL6210_PLATFORM_MSM + bool "wil6210 MSM platform specific support" + depends on WIL6210 + depends on ARCH_MSM + default y + ---help--- + Say Y here to enable wil6210 driver support for MSM + platform specific features diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile index c7a3465fd02ad3b6942fd43bb1aa88bf4bc0e44f..8ad4b5f97e043528d3661b7fc23010eb9b9ef17e 100644 --- a/drivers/net/wireless/ath/wil6210/Makefile +++ b/drivers/net/wireless/ath/wil6210/Makefile @@ -10,7 +10,12 @@ wil6210-y += interrupt.o wil6210-y += txrx.o wil6210-y += debug.o wil6210-y += rx_reorder.o +wil6210-y += ioctl.o +wil6210-y += fw.o wil6210-$(CONFIG_WIL6210_TRACING) += trace.o +wil6210-y += wil_platform.o +wil6210-$(CONFIG_WIL6210_PLATFORM_MSM) += wil_platform_msm.o +wil6210-y += ethtool.o # for tracing framework to find trace.h CFLAGS_trace.o := -I$(src) diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 4ac2c208c9ba3ff109e8c8942b1b9637e9b7cf0d..d9f4b30dd343b01e1d58ac8c9deadbb246a82abd 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2014 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -296,6 +296,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy, n = min(request->n_channels, 4U); for (i = 0; i < n; i++) { int ch = request->channels[i]->hw_value; + if (ch == 0) { wil_err(wil, "Scan requested for unknown frequency %dMhz\n", @@ -308,15 +309,47 @@ static int wil_cfg80211_scan(struct wiphy *wiphy, request->channels[i]->center_freq); } + if (request->ie_len) + print_hex_dump_bytes("Scan IE ", DUMP_PREFIX_OFFSET, + request->ie, request->ie_len); + else + wil_dbg_misc(wil, "Scan has no IE's\n"); + + rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, request->ie_len, + request->ie); + if (rc) { + wil_err(wil, "Aborting scan, set_ie failed: %d\n", rc); + goto out; + } + rc = wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) + cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0])); - if (rc) +out: + if (rc) { + del_timer_sync(&wil->scan_timer); wil->scan_request = NULL; + } return rc; } +static void wil_print_connect_params(struct wil6210_priv *wil, + struct cfg80211_connect_params *sme) +{ + wil_info(wil, "Connecting to:\n"); + if (sme->channel) { + wil_info(wil, " Channel: %d freq %d\n", + sme->channel->hw_value, sme->channel->center_freq); + } + if (sme->bssid) + wil_info(wil, " BSSID: %pM\n", sme->bssid); + if (sme->ssid) + print_hex_dump(KERN_INFO, " SSID: ", DUMP_PREFIX_OFFSET, + 16, 1, sme->ssid, sme->ssid_len, true); + wil_info(wil, " Privacy: %s\n", sme->privacy ? "secure" : "open"); +} + static int wil_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_connect_params *sme) @@ -333,6 +366,8 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, test_bit(wil_status_fwconnected, &wil->status)) return -EALREADY; + wil_print_connect_params(wil, sme); + bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid, sme->ssid, sme->ssid_len, WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); @@ -358,22 +393,22 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, sme->ie_len); goto out; } - /* - * For secure assoc, send: - * (1) WMI_DELETE_CIPHER_KEY_CMD - * (2) WMI_SET_APPIE_CMD - */ + /* For secure assoc, send WMI_DELETE_CIPHER_KEY_CMD */ rc = wmi_del_cipher_key(wil, 0, bss->bssid); if (rc) { wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD failed\n"); goto out; } - /* WMI_SET_APPIE_CMD */ - rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_REQ, sme->ie_len, sme->ie); - if (rc) { - wil_err(wil, "WMI_SET_APPIE_CMD failed\n"); - goto out; - } + } + + /* WMI_SET_APPIE_CMD. ie may contain rsn info as well as other info + * elements. Send it also in case it's empty, to erase previously set + * ies in FW. + */ + rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_REQ, sme->ie_len, sme->ie); + if (rc) { + wil_err(wil, "WMI_SET_APPIE_CMD failed\n"); + goto out; } /* WMI_CONNECT_CMD */ @@ -619,6 +654,45 @@ static int wil_fix_bcon(struct wil6210_priv *wil, return rc; } +static int wil_cfg80211_change_beacon(struct wiphy *wiphy, + struct net_device *ndev, + struct cfg80211_beacon_data *bcon) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + int rc; + + wil_dbg_misc(wil, "%s()\n", __func__); + + if (wil_fix_bcon(wil, bcon)) { + wil_dbg_misc(wil, "Fixed bcon\n"); + wil_print_bcon_data(bcon); + } + + /* FW do not form regular beacon, so bcon IE's are not set + * For the DMG bcon, when it will be supported, bcon IE's will + * be reused; add something like: + * wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len, + * bcon->beacon_ies); + */ + rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, + bcon->proberesp_ies_len, + bcon->proberesp_ies); + if (rc) { + wil_err(wil, "set_ie(PROBE_RESP) failed\n"); + return rc; + } + + rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, + bcon->assocresp_ies_len, + bcon->assocresp_ies); + if (rc) { + wil_err(wil, "set_ie(ASSOC_RESP) failed\n"); + return rc; + } + + return 0; +} + static int wil_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_ap_settings *info) @@ -654,14 +728,12 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy, wil_print_bcon_data(bcon); } - mutex_lock(&wil->mutex); + wil_set_recovery_state(wil, fw_recovery_idle); - rc = wil_reset(wil); - if (rc) - goto out; + mutex_lock(&wil->mutex); - /* Rx VRING. */ - rc = wil_rx_init(wil); + __wil_down(wil); + rc = __wil_up(wil); if (rc) goto out; @@ -669,9 +741,6 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy, if (rc) goto out; - /* MAC address - pre-requisite for other commands */ - wmi_set_mac_address(wil, ndev->dev_addr); - /* IE's */ /* bcon 'head IE's are not relevant for 60g band */ /* @@ -693,7 +762,6 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy, if (rc) goto out; - netif_carrier_on(ndev); out: @@ -704,17 +772,23 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy, static int wil_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev) { - int rc = 0; + int rc, rc1; struct wil6210_priv *wil = wiphy_to_wil(wiphy); wil_dbg_misc(wil, "%s()\n", __func__); + wil_set_recovery_state(wil, fw_recovery_idle); + mutex_lock(&wil->mutex); rc = wmi_pcp_stop(wil); + __wil_down(wil); + rc1 = __wil_up(wil); + mutex_unlock(&wil->mutex); - return rc; + + return min(rc, rc1); } static int wil_cfg80211_del_station(struct wiphy *wiphy, @@ -744,6 +818,7 @@ static struct cfg80211_ops wil_cfg80211_ops = { .del_key = wil_cfg80211_del_key, .set_default_key = wil_cfg80211_set_default_key, /* AP mode */ + .change_beacon = wil_cfg80211_change_beacon, .start_ap = wil_cfg80211_start_ap, .stop_ap = wil_cfg80211_stop_ap, .del_station = wil_cfg80211_del_station, @@ -753,6 +828,7 @@ static void wil_wiphy_init(struct wiphy *wiphy) { /* TODO: set real value */ wiphy->max_scan_ssids = 10; + wiphy->max_scan_ie_len = WMI_MAX_IE_LEN; wiphy->max_num_pmkids = 0 /* TODO: */; wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | @@ -762,8 +838,8 @@ static void wil_wiphy_init(struct wiphy *wiphy) */ wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME | WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; - dev_warn(wiphy_dev(wiphy), "%s : flags = 0x%08x\n", - __func__, wiphy->flags); + dev_dbg(wiphy_dev(wiphy), "%s : flags = 0x%08x\n", + __func__, wiphy->flags); wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | @@ -784,7 +860,9 @@ struct wireless_dev *wil_cfg80211_init(struct device *dev) int rc = 0; struct wireless_dev *wdev; - wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL); + dev_dbg(dev, "%s()\n", __func__); + + wdev = kzalloc(sizeof(*wdev), GFP_KERNEL); if (!wdev) return ERR_PTR(-ENOMEM); @@ -816,6 +894,8 @@ void wil_wdev_free(struct wil6210_priv *wil) { struct wireless_dev *wdev = wil_to_wdev(wil); + dev_dbg(wil_to_dev(wil), "%s()\n", __func__); + if (!wdev) return; diff --git a/drivers/net/wireless/ath/wil6210/debug.c b/drivers/net/wireless/ath/wil6210/debug.c index 9eeabf4a587949c65fd3ee27cc089d65d64273c5..8d99021d27a8806cef563879280c8bdf17c80c10 100644 --- a/drivers/net/wireless/ath/wil6210/debug.c +++ b/drivers/net/wireless/ath/wil6210/debug.c @@ -17,43 +17,37 @@ #include "wil6210.h" #include "trace.h" -int wil_err(struct wil6210_priv *wil, const char *fmt, ...) +void wil_err(struct wil6210_priv *wil, const char *fmt, ...) { struct net_device *ndev = wil_to_ndev(wil); struct va_format vaf = { .fmt = fmt, }; va_list args; - int ret; va_start(args, fmt); vaf.va = &args; - ret = netdev_err(ndev, "%pV", &vaf); + netdev_err(ndev, "%pV", &vaf); trace_wil6210_log_err(&vaf); va_end(args); - - return ret; } -int wil_info(struct wil6210_priv *wil, const char *fmt, ...) +void wil_info(struct wil6210_priv *wil, const char *fmt, ...) { struct net_device *ndev = wil_to_ndev(wil); struct va_format vaf = { .fmt = fmt, }; va_list args; - int ret; va_start(args, fmt); vaf.va = &args; - ret = netdev_info(ndev, "%pV", &vaf); + netdev_info(ndev, "%pV", &vaf); trace_wil6210_log_info(&vaf); va_end(args); - - return ret; } -int wil_dbg_trace(struct wil6210_priv *wil, const char *fmt, ...) +void wil_dbg_trace(struct wil6210_priv *wil, const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, @@ -64,6 +58,4 @@ int wil_dbg_trace(struct wil6210_priv *wil, const char *fmt, ...) vaf.va = &args; trace_wil6210_log_dbg(&vaf); va_end(args); - - return 0; } diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index 8f66186adb8c59d67103ad75ddb759063736bbaa..54a6ddc6301bcabe3c48784e850033c6a170410e 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2014 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -22,6 +22,7 @@ #include #include "wil6210.h" +#include "wmi.h" #include "txrx.h" /* Nasty hack. Better have per device instances */ @@ -29,6 +30,21 @@ static u32 mem_addr; static u32 dbg_txdesc_index; static u32 dbg_vring_index; /* 24+ for Rx, 0..23 for Tx */ +enum dbg_off_type { + doff_u32 = 0, + doff_x32 = 1, + doff_ulong = 2, + doff_io32 = 3, +}; + +/* offset to "wil" */ +struct dbg_off { + const char *name; + umode_t mode; + ulong off; + enum dbg_off_type type; +}; + static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil, const char *name, struct vring *vring, char _s, char _h) @@ -45,20 +61,22 @@ static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil, if (x) seq_printf(s, "0x%08x\n", ioread32(x)); else - seq_printf(s, "???\n"); + seq_puts(s, "???\n"); if (vring->va && (vring->size < 1025)) { uint i; + for (i = 0; i < vring->size; i++) { volatile struct vring_tx_desc *d = &vring->va[i].tx; + if ((i % 64) == 0 && (i != 0)) - seq_printf(s, "\n"); + seq_puts(s, "\n"); seq_printf(s, "%c", (d->dma.status & BIT(0)) ? _s : (vring->ctx[i].skb ? _h : 'h')); } - seq_printf(s, "\n"); + seq_puts(s, "\n"); } - seq_printf(s, "}\n"); + seq_puts(s, "}\n"); } static int wil_vring_debugfs_show(struct seq_file *s, void *data) @@ -69,7 +87,7 @@ static int wil_vring_debugfs_show(struct seq_file *s, void *data) wil_print_vring(s, wil, "rx", &wil->vring_rx, 'S', '_'); for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) { - struct vring *vring = &(wil->vring_tx[i]); + struct vring *vring = &wil->vring_tx[i]; struct vring_tx_data *txdata = &wil->vring_tx_data[i]; if (vring->va) { @@ -147,7 +165,7 @@ static void wil_print_ring(struct seq_file *s, const char *prefix, if (!wmi_addr(wil, r.base) || !wmi_addr(wil, r.tail) || !wmi_addr(wil, r.head)) { - seq_printf(s, " ??? pointers are garbage?\n"); + seq_puts(s, " ??? pointers are garbage?\n"); goto out; } @@ -166,6 +184,7 @@ static void wil_print_ring(struct seq_file *s, const char *prefix, le32_to_cpu(d.addr)); if (0 == wmi_read_hdr(wil, d.addr, &hdr)) { u16 len = le16_to_cpu(hdr.len); + seq_printf(s, " -> %04x %04x %04x %02x\n", le16_to_cpu(hdr.seq), len, le16_to_cpu(hdr.type), hdr.flags); @@ -183,6 +202,7 @@ static void wil_print_ring(struct seq_file *s, const char *prefix, wil_memcpy_fromio_32(databuf, src, len); while (n < len) { int l = min(len - n, 16); + hex_dump_to_buffer(databuf + n, l, 16, 1, printbuf, sizeof(printbuf), @@ -192,11 +212,11 @@ static void wil_print_ring(struct seq_file *s, const char *prefix, } } } else { - seq_printf(s, "\n"); + seq_puts(s, "\n"); } } out: - seq_printf(s, "}\n"); + seq_puts(s, "}\n"); } static int wil_mbox_debugfs_show(struct seq_file *s, void *data) @@ -244,9 +264,9 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, wil_debugfs_iomem_x32_get, static struct dentry *wil_debugfs_create_iomem_x32(const char *name, umode_t mode, struct dentry *parent, - void __iomem *value) + void *value) { - return debugfs_create_file(name, mode, parent, (void * __force)value, + return debugfs_create_file(name, mode, parent, value, &fops_iomem_x32); } @@ -255,11 +275,13 @@ static int wil_debugfs_ulong_set(void *data, u64 val) *(ulong *)data = val; return 0; } + static int wil_debugfs_ulong_get(void *data, u64 *val) { *val = *(ulong *)data; return 0; } + DEFINE_SIMPLE_ATTRIBUTE(wil_fops_ulong, wil_debugfs_ulong_get, wil_debugfs_ulong_set, "%llu\n"); @@ -270,6 +292,62 @@ static struct dentry *wil_debugfs_create_ulong(const char *name, umode_t mode, return debugfs_create_file(name, mode, parent, value, &wil_fops_ulong); } +/** + * wil6210_debugfs_init_offset - create set of debugfs files + * @wil - driver's context, used for printing + * @dbg - directory on the debugfs, where files will be created + * @base - base address used in address calculation + * @tbl - table with file descriptions. Should be terminated with empty element. + * + * Creates files accordingly to the @tbl. + */ +static void wil6210_debugfs_init_offset(struct wil6210_priv *wil, + struct dentry *dbg, void *base, + const struct dbg_off * const tbl) +{ + int i; + + for (i = 0; tbl[i].name; i++) { + struct dentry *f; + + switch (tbl[i].type) { + case doff_u32: + f = debugfs_create_u32(tbl[i].name, tbl[i].mode, dbg, + base + tbl[i].off); + break; + case doff_x32: + f = debugfs_create_x32(tbl[i].name, tbl[i].mode, dbg, + base + tbl[i].off); + break; + case doff_ulong: + f = wil_debugfs_create_ulong(tbl[i].name, tbl[i].mode, + dbg, base + tbl[i].off); + break; + case doff_io32: + f = wil_debugfs_create_iomem_x32(tbl[i].name, + tbl[i].mode, dbg, + base + tbl[i].off); + break; + default: + f = ERR_PTR(-EINVAL); + } + if (IS_ERR_OR_NULL(f)) + wil_err(wil, "Create file \"%s\": err %ld\n", + tbl[i].name, PTR_ERR(f)); + } +} + +static const struct dbg_off isr_off[] = { + {"ICC", S_IRUGO | S_IWUSR, offsetof(struct RGF_ICR, ICC), doff_io32}, + {"ICR", S_IRUGO | S_IWUSR, offsetof(struct RGF_ICR, ICR), doff_io32}, + {"ICM", S_IRUGO | S_IWUSR, offsetof(struct RGF_ICR, ICM), doff_io32}, + {"ICS", S_IWUSR, offsetof(struct RGF_ICR, ICS), doff_io32}, + {"IMV", S_IRUGO | S_IWUSR, offsetof(struct RGF_ICR, IMV), doff_io32}, + {"IMS", S_IWUSR, offsetof(struct RGF_ICR, IMS), doff_io32}, + {"IMC", S_IWUSR, offsetof(struct RGF_ICR, IMC), doff_io32}, + {}, +}; + static int wil6210_debugfs_create_ISR(struct wil6210_priv *wil, const char *name, struct dentry *parent, u32 off) @@ -279,24 +357,19 @@ static int wil6210_debugfs_create_ISR(struct wil6210_priv *wil, if (IS_ERR_OR_NULL(d)) return -ENODEV; - wil_debugfs_create_iomem_x32("ICC", S_IRUGO | S_IWUSR, d, - wil->csr + off); - wil_debugfs_create_iomem_x32("ICR", S_IRUGO | S_IWUSR, d, - wil->csr + off + 4); - wil_debugfs_create_iomem_x32("ICM", S_IRUGO | S_IWUSR, d, - wil->csr + off + 8); - wil_debugfs_create_iomem_x32("ICS", S_IWUSR, d, - wil->csr + off + 12); - wil_debugfs_create_iomem_x32("IMV", S_IRUGO | S_IWUSR, d, - wil->csr + off + 16); - wil_debugfs_create_iomem_x32("IMS", S_IWUSR, d, - wil->csr + off + 20); - wil_debugfs_create_iomem_x32("IMC", S_IWUSR, d, - wil->csr + off + 24); + wil6210_debugfs_init_offset(wil, d, (void * __force)wil->csr + off, + isr_off); return 0; } +static const struct dbg_off pseudo_isr_off[] = { + {"CAUSE", S_IRUGO, HOSTADDR(RGF_DMA_PSEUDO_CAUSE), doff_io32}, + {"MASK_SW", S_IRUGO, HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW), doff_io32}, + {"MASK_FW", S_IRUGO, HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_FW), doff_io32}, + {}, +}; + static int wil6210_debugfs_create_pseudo_ISR(struct wil6210_priv *wil, struct dentry *parent) { @@ -305,16 +378,19 @@ static int wil6210_debugfs_create_pseudo_ISR(struct wil6210_priv *wil, if (IS_ERR_OR_NULL(d)) return -ENODEV; - wil_debugfs_create_iomem_x32("CAUSE", S_IRUGO, d, wil->csr + - HOSTADDR(RGF_DMA_PSEUDO_CAUSE)); - wil_debugfs_create_iomem_x32("MASK_SW", S_IRUGO, d, wil->csr + - HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW)); - wil_debugfs_create_iomem_x32("MASK_FW", S_IRUGO, d, wil->csr + - HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_FW)); + wil6210_debugfs_init_offset(wil, d, (void * __force)wil->csr, + pseudo_isr_off); return 0; } +static const struct dbg_off itr_cnt_off[] = { + {"TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_CNT_TRSH), doff_io32}, + {"DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_CNT_DATA), doff_io32}, + {"CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_CNT_CRL), doff_io32}, + {}, +}; + static int wil6210_debugfs_create_ITR_CNT(struct wil6210_priv *wil, struct dentry *parent) { @@ -323,12 +399,8 @@ static int wil6210_debugfs_create_ITR_CNT(struct wil6210_priv *wil, if (IS_ERR_OR_NULL(d)) return -ENODEV; - wil_debugfs_create_iomem_x32("TRSH", S_IRUGO | S_IWUSR, d, wil->csr + - HOSTADDR(RGF_DMA_ITR_CNT_TRSH)); - wil_debugfs_create_iomem_x32("DATA", S_IRUGO | S_IWUSR, d, wil->csr + - HOSTADDR(RGF_DMA_ITR_CNT_DATA)); - wil_debugfs_create_iomem_x32("CTL", S_IRUGO | S_IWUSR, d, wil->csr + - HOSTADDR(RGF_DMA_ITR_CNT_CRL)); + wil6210_debugfs_init_offset(wil, d, (void * __force)wil->csr, + itr_cnt_off); return 0; } @@ -359,7 +431,7 @@ static const struct file_operations fops_memread = { }; static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) + size_t count, loff_t *ppos) { enum { max_count = 4096 }; struct debugfs_blob_wrapper *blob = file->private_data; @@ -411,6 +483,7 @@ struct dentry *wil_debugfs_create_ioblob(const char *name, { return debugfs_create_file(name, mode, parent, blob, &fops_ioblob); } + /*---reset---*/ static ssize_t wil_write_file_reset(struct file *file, const char __user *buf, size_t len, loff_t *ppos) @@ -436,6 +509,7 @@ static const struct file_operations fops_reset = { .write = wil_write_file_reset, .open = simple_open, }; + /*---write channel 1..4 to rxon for it, 0 to rxoff---*/ static ssize_t wil_write_file_rxon(struct file *file, const char __user *buf, size_t len, loff_t *ppos) @@ -446,6 +520,7 @@ static ssize_t wil_write_file_rxon(struct file *file, const char __user *buf, bool on; char *kbuf = kmalloc(len + 1, GFP_KERNEL); + if (!kbuf) return -ENOMEM; if (copy_from_user(kbuf, buf, len)) { @@ -482,6 +557,7 @@ static const struct file_operations fops_rxon = { .write = wil_write_file_rxon, .open = simple_open, }; + /*---tx_mgmt---*/ /* Write mgmt frame to this file to send it */ static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf, @@ -492,8 +568,8 @@ static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf, struct wireless_dev *wdev = wil_to_wdev(wil); struct cfg80211_mgmt_tx_params params; int rc; - void *frame = kmalloc(len, GFP_KERNEL); + if (!frame) return -ENOMEM; @@ -562,8 +638,10 @@ static void wil_seq_hexdump(struct seq_file *s, void *p, int len, { char printbuf[16 * 3 + 2]; int i = 0; + while (i < len) { int l = min(len - i, 16); + hex_dump_to_buffer(p + i, l, 16, 1, printbuf, sizeof(printbuf), false); seq_printf(s, "%s%s\n", prefix, printbuf); @@ -601,10 +679,8 @@ static int wil_txdesc_debugfs_show(struct seq_file *s, void *data) struct wil6210_priv *wil = s->private; struct vring *vring; bool tx = (dbg_vring_index < WIL6210_MAX_TX_RINGS); - if (tx) - vring = &(wil->vring_tx[dbg_vring_index]); - else - vring = &wil->vring_rx; + + vring = tx ? &wil->vring_tx[dbg_vring_index] : &wil->vring_rx; if (!vring->va) { if (tx) @@ -619,7 +695,7 @@ static int wil_txdesc_debugfs_show(struct seq_file *s, void *data) * only field used, .dma.length, is the same */ volatile struct vring_tx_desc *d = - &(vring->va[dbg_txdesc_index].tx); + &vring->va[dbg_txdesc_index].tx; volatile u32 *u = (volatile u32 *)d; struct sk_buff *skb = vring->ctx[dbg_txdesc_index].skb; @@ -639,7 +715,7 @@ static int wil_txdesc_debugfs_show(struct seq_file *s, void *data) wil_seq_print_skb(s, skb); kfree_skb(skb); } - seq_printf(s, "}\n"); + seq_puts(s, "}\n"); } else { if (tx) seq_printf(s, "[%2d] TxDesc index (%d) >= size (%d)\n", @@ -666,16 +742,79 @@ static const struct file_operations fops_txdesc = { }; /*---------beamforming------------*/ +static char *wil_bfstatus_str(u32 status) +{ + switch (status) { + case 0: + return "Failed"; + case 1: + return "OK"; + case 2: + return "Retrying"; + default: + return "??"; + } +} + +static bool is_all_zeros(void * const x_, size_t sz) +{ + /* if reply is all-0, ignore this CID */ + u32 *x = x_; + int n; + + for (n = 0; n < sz / sizeof(*x); n++) + if (x[n]) + return false; + + return true; +} + static int wil_bf_debugfs_show(struct seq_file *s, void *data) { + int rc; + int i; struct wil6210_priv *wil = s->private; - seq_printf(s, - "TSF : 0x%016llx\n" - "TxMCS : %d\n" - "Sectors(rx:tx) my %2d:%2d peer %2d:%2d\n", - wil->stats.tsf, wil->stats.bf_mcs, - wil->stats.my_rx_sector, wil->stats.my_tx_sector, - wil->stats.peer_rx_sector, wil->stats.peer_tx_sector); + struct wmi_notify_req_cmd cmd = { + .interval_usec = 0, + }; + struct { + struct wil6210_mbox_hdr_wmi wmi; + struct wmi_notify_req_done_event evt; + } __packed reply; + + for (i = 0; i < ARRAY_SIZE(wil->sta); i++) { + u32 status; + + cmd.cid = i; + rc = wmi_call(wil, WMI_NOTIFY_REQ_CMDID, &cmd, sizeof(cmd), + WMI_NOTIFY_REQ_DONE_EVENTID, &reply, + sizeof(reply), 20); + /* if reply is all-0, ignore this CID */ + if (rc || is_all_zeros(&reply.evt, sizeof(reply.evt))) + continue; + + status = le32_to_cpu(reply.evt.status); + seq_printf(s, "CID %d {\n" + " TSF = 0x%016llx\n" + " TxMCS = %2d TxTpt = %4d\n" + " SQI = %4d\n" + " Status = 0x%08x %s\n" + " Sectors(rx:tx) my %2d:%2d peer %2d:%2d\n" + " Goodput(rx:tx) %4d:%4d\n" + "}\n", + i, + le64_to_cpu(reply.evt.tsf), + le16_to_cpu(reply.evt.bf_mcs), + le32_to_cpu(reply.evt.tx_tpt), + reply.evt.sqi, + status, wil_bfstatus_str(status), + le16_to_cpu(reply.evt.my_rx_sector), + le16_to_cpu(reply.evt.my_tx_sector), + le16_to_cpu(reply.evt.other_rx_sector), + le16_to_cpu(reply.evt.other_tx_sector), + le32_to_cpu(reply.evt.rx_goodput), + le32_to_cpu(reply.evt.tx_goodput)); + } return 0; } @@ -690,6 +829,7 @@ static const struct file_operations fops_bf = { .read = seq_read, .llseek = seq_lseek, }; + /*---------SSID------------*/ static ssize_t wil_read_file_ssid(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@ -752,10 +892,10 @@ static int wil_temp_debugfs_show(struct seq_file *s, void *data) { struct wil6210_priv *wil = s->private; u32 t_m, t_r; - int rc = wmi_get_temperature(wil, &t_m, &t_r); + if (rc) { - seq_printf(s, "Failed\n"); + seq_puts(s, "Failed\n"); return 0; } @@ -811,6 +951,7 @@ static int wil_link_debugfs_show(struct seq_file *s, void *data) for (i = 0; i < ARRAY_SIZE(wil->sta); i++) { struct wil_sta_info *p = &wil->sta[i]; char *status = "unknown"; + switch (p->status) { case wil_sta_unused: status = "unused "; @@ -871,7 +1012,6 @@ static int wil_info_debugfs_show(struct seq_file *s, void *data) rxf_old = rxf; txf_old = txf; - #define CHECK_QSTATE(x) (state & BIT(__QUEUE_STATE_ ## x)) ? \ " " __stringify(x) : "" @@ -901,11 +1041,77 @@ static const struct file_operations fops_info = { .llseek = seq_lseek, }; +/*---------recovery------------*/ +/* mode = [manual|auto] + * state = [idle|pending|running] + */ +static ssize_t wil_read_file_recovery(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct wil6210_priv *wil = file->private_data; + char buf[80]; + int n; + static const char * const sstate[] = {"idle", "pending", "running"}; + + n = snprintf(buf, sizeof(buf), "mode = %s\nstate = %s\n", + no_fw_recovery ? "manual" : "auto", + sstate[wil->recovery_state]); + + n = min_t(int, n, sizeof(buf)); + + return simple_read_from_buffer(user_buf, count, ppos, + buf, n); +} + +static ssize_t wil_write_file_recovery(struct file *file, + const char __user *buf_, + size_t count, loff_t *ppos) +{ + struct wil6210_priv *wil = file->private_data; + static const char run_command[] = "run"; + char buf[sizeof(run_command) + 1]; /* to detect "runx" */ + ssize_t rc; + + if (wil->recovery_state != fw_recovery_pending) { + wil_err(wil, "No recovery pending\n"); + return -EINVAL; + } + + if (*ppos != 0) { + wil_err(wil, "Offset [%d]\n", (int)*ppos); + return -EINVAL; + } + + if (count > sizeof(buf)) { + wil_err(wil, "Input too long, len = %d\n", (int)count); + return -EINVAL; + } + + rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, buf_, count); + if (rc < 0) + return rc; + + buf[rc] = '\0'; + if (0 == strcmp(buf, run_command)) + wil_set_recovery_state(wil, fw_recovery_running); + else + wil_err(wil, "Bad recovery command \"%s\"\n", buf); + + return rc; +} + +static const struct file_operations fops_recovery = { + .read = wil_read_file_recovery, + .write = wil_write_file_recovery, + .open = simple_open, +}; + /*---------Station matrix------------*/ static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r) { int i; u16 index = ((r->head_seq_num - r->ssn) & 0xfff) % r->buf_size; + seq_printf(s, "0x%03x [", r->head_seq_num); for (i = 0; i < r->buf_size; i++) { if (i == index) @@ -920,10 +1126,12 @@ static int wil_sta_debugfs_show(struct seq_file *s, void *data) { struct wil6210_priv *wil = s->private; int i, tid; + unsigned long flags; for (i = 0; i < ARRAY_SIZE(wil->sta); i++) { struct wil_sta_info *p = &wil->sta[i]; char *status = "unknown"; + switch (p->status) { case wil_sta_unused: status = "unused "; @@ -939,13 +1147,16 @@ static int wil_sta_debugfs_show(struct seq_file *s, void *data) (p->data_port_open ? " data_port_open" : "")); if (p->status == wil_sta_connected) { + spin_lock_irqsave(&p->tid_rx_lock, flags); for (tid = 0; tid < WIL_STA_TID_NUM; tid++) { struct wil_tid_ampdu_rx *r = p->tid_rx[tid]; + if (r) { seq_printf(s, "[%2d] ", tid); wil_print_rxtid(s, r); } } + spin_unlock_irqrestore(&p->tid_rx_lock, flags); } } @@ -985,6 +1196,89 @@ static void wil6210_debugfs_init_blobs(struct wil6210_priv *wil, } } +/* misc files */ +static const struct { + const char *name; + umode_t mode; + const struct file_operations *fops; +} dbg_files[] = { + {"mbox", S_IRUGO, &fops_mbox}, + {"vrings", S_IRUGO, &fops_vring}, + {"stations", S_IRUGO, &fops_sta}, + {"desc", S_IRUGO, &fops_txdesc}, + {"bf", S_IRUGO, &fops_bf}, + {"ssid", S_IRUGO | S_IWUSR, &fops_ssid}, + {"mem_val", S_IRUGO, &fops_memread}, + {"reset", S_IWUSR, &fops_reset}, + {"rxon", S_IWUSR, &fops_rxon}, + {"tx_mgmt", S_IWUSR, &fops_txmgmt}, + {"wmi_send", S_IWUSR, &fops_wmi}, + {"temp", S_IRUGO, &fops_temp}, + {"freq", S_IRUGO, &fops_freq}, + {"link", S_IRUGO, &fops_link}, + {"info", S_IRUGO, &fops_info}, + {"recovery", S_IRUGO | S_IWUSR, &fops_recovery}, +}; + +static void wil6210_debugfs_init_files(struct wil6210_priv *wil, + struct dentry *dbg) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(dbg_files); i++) + debugfs_create_file(dbg_files[i].name, dbg_files[i].mode, dbg, + wil, dbg_files[i].fops); +} + +/* interrupt control blocks */ +static const struct { + const char *name; + u32 icr_off; +} dbg_icr[] = { + {"USER_ICR", HOSTADDR(RGF_USER_USER_ICR)}, + {"DMA_EP_TX_ICR", HOSTADDR(RGF_DMA_EP_TX_ICR)}, + {"DMA_EP_RX_ICR", HOSTADDR(RGF_DMA_EP_RX_ICR)}, + {"DMA_EP_MISC_ICR", HOSTADDR(RGF_DMA_EP_MISC_ICR)}, +}; + +static void wil6210_debugfs_init_isr(struct wil6210_priv *wil, + struct dentry *dbg) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(dbg_icr); i++) + wil6210_debugfs_create_ISR(wil, dbg_icr[i].name, dbg, + dbg_icr[i].icr_off); +} + +#define WIL_FIELD(name, mode, type) { __stringify(name), mode, \ + offsetof(struct wil6210_priv, name), type} + +/* fields in struct wil6210_priv */ +static const struct dbg_off dbg_wil_off[] = { + WIL_FIELD(secure_pcp, S_IRUGO | S_IWUSR, doff_u32), + WIL_FIELD(status, S_IRUGO | S_IWUSR, doff_ulong), + WIL_FIELD(fw_version, S_IRUGO, doff_u32), + WIL_FIELD(hw_version, S_IRUGO, doff_x32), + WIL_FIELD(recovery_count, S_IRUGO, doff_u32), + {}, +}; + +static const struct dbg_off dbg_wil_regs[] = { + {"RGF_MAC_MTRL_COUNTER_0", S_IRUGO, HOSTADDR(RGF_MAC_MTRL_COUNTER_0), + doff_io32}, + {"RGF_USER_USAGE_1", S_IRUGO, HOSTADDR(RGF_USER_USAGE_1), doff_io32}, + {}, +}; + +/* static parameters */ +static const struct dbg_off dbg_statics[] = { + {"desc_index", S_IRUGO | S_IWUSR, (ulong)&dbg_txdesc_index, doff_u32}, + {"vring_index", S_IRUGO | S_IWUSR, (ulong)&dbg_vring_index, doff_u32}, + {"mem_addr", S_IRUGO | S_IWUSR, (ulong)&mem_addr, doff_u32}, + {}, +}; + int wil6210_debugfs_init(struct wil6210_priv *wil) { struct dentry *dbg = wil->debug = debugfs_create_dir(WIL_NAME, @@ -993,51 +1287,17 @@ int wil6210_debugfs_init(struct wil6210_priv *wil) if (IS_ERR_OR_NULL(dbg)) return -ENODEV; - debugfs_create_file("mbox", S_IRUGO, dbg, wil, &fops_mbox); - debugfs_create_file("vrings", S_IRUGO, dbg, wil, &fops_vring); - debugfs_create_file("stations", S_IRUGO, dbg, wil, &fops_sta); - debugfs_create_file("desc", S_IRUGO, dbg, wil, &fops_txdesc); - debugfs_create_u32("desc_index", S_IRUGO | S_IWUSR, dbg, - &dbg_txdesc_index); - debugfs_create_u32("vring_index", S_IRUGO | S_IWUSR, dbg, - &dbg_vring_index); - - debugfs_create_file("bf", S_IRUGO, dbg, wil, &fops_bf); - debugfs_create_file("ssid", S_IRUGO | S_IWUSR, dbg, wil, &fops_ssid); - debugfs_create_u32("secure_pcp", S_IRUGO | S_IWUSR, dbg, - &wil->secure_pcp); - wil_debugfs_create_ulong("status", S_IRUGO | S_IWUSR, dbg, - &wil->status); - debugfs_create_u32("fw_version", S_IRUGO, dbg, &wil->fw_version); - debugfs_create_x32("hw_version", S_IRUGO, dbg, &wil->hw_version); - - wil6210_debugfs_create_ISR(wil, "USER_ICR", dbg, - HOSTADDR(RGF_USER_USER_ICR)); - wil6210_debugfs_create_ISR(wil, "DMA_EP_TX_ICR", dbg, - HOSTADDR(RGF_DMA_EP_TX_ICR)); - wil6210_debugfs_create_ISR(wil, "DMA_EP_RX_ICR", dbg, - HOSTADDR(RGF_DMA_EP_RX_ICR)); - wil6210_debugfs_create_ISR(wil, "DMA_EP_MISC_ICR", dbg, - HOSTADDR(RGF_DMA_EP_MISC_ICR)); - wil6210_debugfs_create_pseudo_ISR(wil, dbg); - wil6210_debugfs_create_ITR_CNT(wil, dbg); + wil6210_debugfs_init_files(wil, dbg); + wil6210_debugfs_init_isr(wil, dbg); + wil6210_debugfs_init_blobs(wil, dbg); + wil6210_debugfs_init_offset(wil, dbg, wil, dbg_wil_off); + wil6210_debugfs_init_offset(wil, dbg, (void * __force)wil->csr, + dbg_wil_regs); + wil6210_debugfs_init_offset(wil, dbg, NULL, dbg_statics); - wil_debugfs_create_iomem_x32("RGF_USER_USAGE_1", S_IRUGO, dbg, - wil->csr + - HOSTADDR(RGF_USER_USAGE_1)); - debugfs_create_u32("mem_addr", S_IRUGO | S_IWUSR, dbg, &mem_addr); - debugfs_create_file("mem_val", S_IRUGO, dbg, wil, &fops_memread); - - debugfs_create_file("reset", S_IWUSR, dbg, wil, &fops_reset); - debugfs_create_file("rxon", S_IWUSR, dbg, wil, &fops_rxon); - debugfs_create_file("tx_mgmt", S_IWUSR, dbg, wil, &fops_txmgmt); - debugfs_create_file("wmi_send", S_IWUSR, dbg, wil, &fops_wmi); - debugfs_create_file("temp", S_IRUGO, dbg, wil, &fops_temp); - debugfs_create_file("freq", S_IRUGO, dbg, wil, &fops_freq); - debugfs_create_file("link", S_IRUGO, dbg, wil, &fops_link); - debugfs_create_file("info", S_IRUGO, dbg, wil, &fops_info); + wil6210_debugfs_create_pseudo_ISR(wil, dbg); - wil6210_debugfs_init_blobs(wil, dbg); + wil6210_debugfs_create_ITR_CNT(wil, dbg); return 0; } diff --git a/drivers/net/wireless/ath/wil6210/ethtool.c b/drivers/net/wireless/ath/wil6210/ethtool.c new file mode 100644 index 0000000000000000000000000000000000000000..d686638972bee238852dd1b7ae97715d6a533a2f --- /dev/null +++ b/drivers/net/wireless/ath/wil6210/ethtool.c @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2014 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include + +#include "wil6210.h" + +static int wil_ethtoolops_begin(struct net_device *ndev) +{ + struct wil6210_priv *wil = ndev_to_wil(ndev); + + mutex_lock(&wil->mutex); + + wil_dbg_misc(wil, "%s()\n", __func__); + + return 0; +} + +static void wil_ethtoolops_complete(struct net_device *ndev) +{ + struct wil6210_priv *wil = ndev_to_wil(ndev); + + wil_dbg_misc(wil, "%s()\n", __func__); + + mutex_unlock(&wil->mutex); +} + +static int wil_ethtoolops_get_coalesce(struct net_device *ndev, + struct ethtool_coalesce *cp) +{ + struct wil6210_priv *wil = ndev_to_wil(ndev); + u32 itr_en, itr_val = 0; + + wil_dbg_misc(wil, "%s()\n", __func__); + + itr_en = ioread32(wil->csr + HOSTADDR(RGF_DMA_ITR_CNT_CRL)); + if (itr_en & BIT_DMA_ITR_CNT_CRL_EN) + itr_val = ioread32(wil->csr + HOSTADDR(RGF_DMA_ITR_CNT_TRSH)); + + cp->rx_coalesce_usecs = itr_val; + + return 0; +} + +static int wil_ethtoolops_set_coalesce(struct net_device *ndev, + struct ethtool_coalesce *cp) +{ + struct wil6210_priv *wil = ndev_to_wil(ndev); + + wil_dbg_misc(wil, "%s(%d usec)\n", __func__, cp->rx_coalesce_usecs); + + if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) { + wil_dbg_misc(wil, "No IRQ coalescing in monitor mode\n"); + return -EINVAL; + } + + /* only @rx_coalesce_usecs supported, ignore + * other parameters + */ + + if (cp->rx_coalesce_usecs > WIL6210_ITR_TRSH_MAX) + goto out_bad; + + wil->itr_trsh = cp->rx_coalesce_usecs; + wil_set_itr_trsh(wil); + + return 0; + +out_bad: + wil_dbg_misc(wil, "Unsupported coalescing params. Raw command:\n"); + print_hex_dump_debug("DBG[MISC] coal ", DUMP_PREFIX_OFFSET, 16, 4, + cp, sizeof(*cp), false); + return -EINVAL; +} + +static const struct ethtool_ops wil_ethtool_ops = { + .begin = wil_ethtoolops_begin, + .complete = wil_ethtoolops_complete, + .get_drvinfo = cfg80211_get_drvinfo, + .get_coalesce = wil_ethtoolops_get_coalesce, + .set_coalesce = wil_ethtoolops_set_coalesce, +}; + +void wil_set_ethtoolops(struct net_device *ndev) +{ + ndev->ethtool_ops = &wil_ethtool_ops; +} diff --git a/drivers/net/wireless/ath/wil6210/fw.c b/drivers/net/wireless/ath/wil6210/fw.c new file mode 100644 index 0000000000000000000000000000000000000000..8c6f3b041f7773991c57b056229583f6e326d6c8 --- /dev/null +++ b/drivers/net/wireless/ath/wil6210/fw.c @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2014 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#include +#include +#include +#include +#include "wil6210.h" +#include "fw.h" + +MODULE_FIRMWARE(WIL_FW_NAME); + +/* target operations */ +/* register read */ +#define R(a) ioread32(wil->csr + HOSTADDR(a)) +/* register write. wmb() to make sure it is completed */ +#define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0) +/* register set = read, OR, write */ +#define S(a, v) W(a, R(a) | v) +/* register clear = read, AND with inverted, write */ +#define C(a, v) W(a, R(a) & ~v) + +static +void wil_memset_toio_32(volatile void __iomem *dst, u32 val, + size_t count) +{ + volatile u32 __iomem *d = dst; + + for (count += 4; count > 4; count -= 4) + __raw_writel(val, d++); +} + +#include "fw_inc.c" diff --git a/drivers/net/wireless/ath/wil6210/fw.h b/drivers/net/wireless/ath/wil6210/fw.h new file mode 100644 index 0000000000000000000000000000000000000000..7a2c6c129ad5b72527c8db05d904aa03ea91adbe --- /dev/null +++ b/drivers/net/wireless/ath/wil6210/fw.h @@ -0,0 +1,149 @@ +/* + * Copyright (c) 2014 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#define WIL_FW_SIGNATURE (0x36323130) /* '0126' */ +#define WIL_FW_FMT_VERSION (1) /* format version driver supports */ + +enum wil_fw_record_type { + wil_fw_type_comment = 1, + wil_fw_type_data = 2, + wil_fw_type_fill = 3, + wil_fw_type_action = 4, + wil_fw_type_verify = 5, + wil_fw_type_file_header = 6, + wil_fw_type_direct_write = 7, + wil_fw_type_gateway_data = 8, + wil_fw_type_gateway_data4 = 9, +}; + +struct wil_fw_record_head { + __le16 type; /* enum wil_fw_record_type */ + __le16 flags; /* to be defined */ + __le32 size; /* whole record, bytes after head */ +} __packed; + +/* data block. write starting from @addr + * data_size inferred from the @head.size. For this case, + * data_size = @head.size - offsetof(struct wil_fw_record_data, data) + */ +struct wil_fw_record_data { /* type == wil_fw_type_data */ + __le32 addr; + __le32 data[0]; /* [data_size], see above */ +} __packed; + +/* fill with constant @value, @size bytes starting from @addr */ +struct wil_fw_record_fill { /* type == wil_fw_type_fill */ + __le32 addr; + __le32 value; + __le32 size; +} __packed; + +/* free-form comment + * for informational purpose, data_size is @head.size from record header + */ +struct wil_fw_record_comment { /* type == wil_fw_type_comment */ + u8 data[0]; /* free-form data [data_size], see above */ +} __packed; + +/* perform action + * data_size = @head.size - offsetof(struct wil_fw_record_action, data) + */ +struct wil_fw_record_action { /* type == wil_fw_type_action */ + __le32 action; /* action to perform: reset, wait for fw ready etc. */ + __le32 data[0]; /* action specific, [data_size], see above */ +} __packed; + +/* data block for struct wil_fw_record_direct_write */ +struct wil_fw_data_dwrite { + __le32 addr; + __le32 value; + __le32 mask; +} __packed; + +/* write @value to the @addr, + * preserve original bits accordingly to the @mask + * data_size is @head.size where @head is record header + */ +struct wil_fw_record_direct_write { /* type == wil_fw_type_direct_write */ + struct wil_fw_data_dwrite data[0]; +} __packed; + +/* verify condition: [@addr] & @mask == @value + * if condition not met, firmware download fails + */ +struct wil_fw_record_verify { /* type == wil_fw_verify */ + __le32 addr; /* read from this address */ + __le32 value; /* reference value */ + __le32 mask; /* mask for verification */ +} __packed; + +/* file header + * First record of every file + */ +struct wil_fw_record_file_header { + __le32 signature ; /* Wilocity signature */ + __le32 reserved; + __le32 crc; /* crc32 of the following data */ + __le32 version; /* format version */ + __le32 data_len; /* total data in file, including this record */ + u8 comment[32]; /* short description */ +} __packed; + +/* 1-dword gateway */ +/* data block for the struct wil_fw_record_gateway_data */ +struct wil_fw_data_gw { + __le32 addr; + __le32 value; +} __packed; + +/* gateway write block. + * write starting address and values from the data buffer + * through the gateway + * data_size inferred from the @head.size. For this case, + * data_size = @head.size - offsetof(struct wil_fw_record_gateway_data, data) + */ +struct wil_fw_record_gateway_data { /* type == wil_fw_type_gateway_data */ + __le32 gateway_addr_addr; + __le32 gateway_value_addr; + __le32 gateway_cmd_addr; + __le32 gateway_ctrl_address; +#define WIL_FW_GW_CTL_BUSY BIT(29) /* gateway busy performing operation */ +#define WIL_FW_GW_CTL_RUN BIT(30) /* start gateway operation */ + __le32 command; + struct wil_fw_data_gw data[0]; /* total size [data_size], see above */ +} __packed; + +/* 4-dword gateway */ +/* data block for the struct wil_fw_record_gateway_data4 */ +struct wil_fw_data_gw4 { + __le32 addr; + __le32 value[4]; +} __packed; + +/* gateway write block. + * write starting address and values from the data buffer + * through the gateway + * data_size inferred from the @head.size. For this case, + * data_size = @head.size - offsetof(struct wil_fw_record_gateway_data4, data) + */ +struct wil_fw_record_gateway_data4 { /* type == wil_fw_type_gateway_data4 */ + __le32 gateway_addr_addr; + __le32 gateway_value_addr[4]; + __le32 gateway_cmd_addr; + __le32 gateway_ctrl_address; /* same logic as for 1-dword gw */ + __le32 command; + struct wil_fw_data_gw4 data[0]; /* total size [data_size], see above */ +} __packed; diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c new file mode 100644 index 0000000000000000000000000000000000000000..44cb71f5ea5b76a0ccb3c9f9db6029aa7b1b6ec8 --- /dev/null +++ b/drivers/net/wireless/ath/wil6210/fw_inc.c @@ -0,0 +1,495 @@ +/* + * Copyright (c) 2014 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* Algorithmic part of the firmware download. + * To be included in the container file providing framework + */ + +#define wil_err_fw(wil, fmt, arg...) wil_err(wil, "ERR[ FW ]" fmt, ##arg) +#define wil_dbg_fw(wil, fmt, arg...) wil_dbg(wil, "DBG[ FW ]" fmt, ##arg) +#define wil_hex_dump_fw(prefix_str, prefix_type, rowsize, \ + groupsize, buf, len, ascii) \ + print_hex_dump_debug("DBG[ FW ]" prefix_str, \ + prefix_type, rowsize, \ + groupsize, buf, len, ascii) + +#define FW_ADDR_CHECK(ioaddr, val, msg) do { \ + ioaddr = wmi_buffer(wil, val); \ + if (!ioaddr) { \ + wil_err_fw(wil, "bad " msg ": 0x%08x\n", \ + le32_to_cpu(val)); \ + return -EINVAL; \ + } \ + } while (0) + +/** + * wil_fw_verify - verify firmware file validity + * + * perform various checks for the firmware file header. + * records are not validated. + * + * Return file size or negative error + */ +static int wil_fw_verify(struct wil6210_priv *wil, const u8 *data, size_t size) +{ + const struct wil_fw_record_head *hdr = (const void *)data; + struct wil_fw_record_file_header fh; + const struct wil_fw_record_file_header *fh_; + u32 crc; + u32 dlen; + + if (size % 4) { + wil_err_fw(wil, "image size not aligned: %zu\n", size); + return -EINVAL; + } + /* have enough data for the file header? */ + if (size < sizeof(*hdr) + sizeof(fh)) { + wil_err_fw(wil, "file too short: %zu bytes\n", size); + return -EINVAL; + } + + /* start with the file header? */ + if (le16_to_cpu(hdr->type) != wil_fw_type_file_header) { + wil_err_fw(wil, "no file header\n"); + return -EINVAL; + } + + /* data_len */ + fh_ = (struct wil_fw_record_file_header *)&hdr[1]; + dlen = le32_to_cpu(fh_->data_len); + if (dlen % 4) { + wil_err_fw(wil, "data length not aligned: %lu\n", (ulong)dlen); + return -EINVAL; + } + if (size < dlen) { + wil_err_fw(wil, "file truncated at %zu/%lu\n", + size, (ulong)dlen); + return -EINVAL; + } + if (dlen < sizeof(*hdr) + sizeof(fh)) { + wil_err_fw(wil, "data length too short: %lu\n", (ulong)dlen); + return -EINVAL; + } + + /* signature */ + if (le32_to_cpu(fh_->signature) != WIL_FW_SIGNATURE) { + wil_err_fw(wil, "bad header signature: 0x%08x\n", + le32_to_cpu(fh_->signature)); + return -EINVAL; + } + + /* version */ + if (le32_to_cpu(fh_->version) > WIL_FW_FMT_VERSION) { + wil_err_fw(wil, "unsupported header version: %d\n", + le32_to_cpu(fh_->version)); + return -EINVAL; + } + + /* checksum. ~crc32(~0, data, size) when fh.crc set to 0*/ + fh = *fh_; + fh.crc = 0; + + crc = crc32_le(~0, (unsigned char const *)hdr, sizeof(*hdr)); + crc = crc32_le(crc, (unsigned char const *)&fh, sizeof(fh)); + crc = crc32_le(crc, (unsigned char const *)&fh_[1], + dlen - sizeof(*hdr) - sizeof(fh)); + crc = ~crc; + + if (crc != le32_to_cpu(fh_->crc)) { + wil_err_fw(wil, "checksum mismatch:" + " calculated for %lu bytes 0x%08x != 0x%08x\n", + (ulong)dlen, crc, le32_to_cpu(fh_->crc)); + return -EINVAL; + } + + return (int)dlen; +} + +static int fw_handle_comment(struct wil6210_priv *wil, const void *data, + size_t size) +{ + wil_hex_dump_fw("", DUMP_PREFIX_OFFSET, 16, 1, data, size, true); + + return 0; +} + +static int fw_handle_data(struct wil6210_priv *wil, const void *data, + size_t size) +{ + const struct wil_fw_record_data *d = data; + void __iomem *dst; + size_t s = size - sizeof(*d); + + if (size < sizeof(*d) + sizeof(u32)) { + wil_err_fw(wil, "data record too short: %zu\n", size); + return -EINVAL; + } + + FW_ADDR_CHECK(dst, d->addr, "address"); + wil_dbg_fw(wil, "write [0x%08x] <== %zu bytes\n", le32_to_cpu(d->addr), + s); + wil_memcpy_toio_32(dst, d->data, s); + wmb(); /* finish before processing next record */ + + return 0; +} + +static int fw_handle_fill(struct wil6210_priv *wil, const void *data, + size_t size) +{ + const struct wil_fw_record_fill *d = data; + void __iomem *dst; + u32 v; + size_t s = (size_t)le32_to_cpu(d->size); + + if (size != sizeof(*d)) { + wil_err_fw(wil, "bad size for fill record: %zu\n", size); + return -EINVAL; + } + + if (s < sizeof(u32)) { + wil_err_fw(wil, "fill size too short: %zu\n", s); + return -EINVAL; + } + + if (s % sizeof(u32)) { + wil_err_fw(wil, "fill size not aligned: %zu\n", s); + return -EINVAL; + } + + FW_ADDR_CHECK(dst, d->addr, "address"); + + v = le32_to_cpu(d->value); + wil_dbg_fw(wil, "fill [0x%08x] <== 0x%08x, %zu bytes\n", + le32_to_cpu(d->addr), v, s); + wil_memset_toio_32(dst, v, s); + wmb(); /* finish before processing next record */ + + return 0; +} + +static int fw_handle_file_header(struct wil6210_priv *wil, const void *data, + size_t size) +{ + const struct wil_fw_record_file_header *d = data; + + if (size != sizeof(*d)) { + wil_err_fw(wil, "file header length incorrect: %zu\n", size); + return -EINVAL; + } + + wil_dbg_fw(wil, "new file, ver. %d, %i bytes\n", + d->version, d->data_len); + wil_hex_dump_fw("", DUMP_PREFIX_OFFSET, 16, 1, d->comment, + sizeof(d->comment), true); + + return 0; +} + +static int fw_handle_direct_write(struct wil6210_priv *wil, const void *data, + size_t size) +{ + const struct wil_fw_record_direct_write *d = data; + const struct wil_fw_data_dwrite *block = d->data; + int n, i; + + if (size % sizeof(*block)) { + wil_err_fw(wil, "record size not aligned on %zu: %zu\n", + sizeof(*block), size); + return -EINVAL; + } + n = size / sizeof(*block); + + for (i = 0; i < n; i++) { + void __iomem *dst; + u32 m = le32_to_cpu(block[i].mask); + u32 v = le32_to_cpu(block[i].value); + u32 x, y; + + FW_ADDR_CHECK(dst, block[i].addr, "address"); + + x = ioread32(dst); + y = (x & m) | (v & ~m); + wil_dbg_fw(wil, "write [0x%08x] <== 0x%08x " + "(old 0x%08x val 0x%08x mask 0x%08x)\n", + le32_to_cpu(block[i].addr), y, x, v, m); + iowrite32(y, dst); + wmb(); /* finish before processing next record */ + } + + return 0; +} + +static int gw_write(struct wil6210_priv *wil, void __iomem *gwa_addr, + void __iomem *gwa_cmd, void __iomem *gwa_ctl, u32 gw_cmd, + u32 a) +{ + unsigned delay = 0; + + iowrite32(a, gwa_addr); + iowrite32(gw_cmd, gwa_cmd); + wmb(); /* finish before activate gw */ + + iowrite32(WIL_FW_GW_CTL_RUN, gwa_ctl); /* activate gw */ + do { + udelay(1); /* typical time is few usec */ + if (delay++ > 100) { + wil_err_fw(wil, "gw timeout\n"); + return -EINVAL; + } + } while (ioread32(gwa_ctl) & WIL_FW_GW_CTL_BUSY); /* gw done? */ + + return 0; +} + +static int fw_handle_gateway_data(struct wil6210_priv *wil, const void *data, + size_t size) +{ + const struct wil_fw_record_gateway_data *d = data; + const struct wil_fw_data_gw *block = d->data; + void __iomem *gwa_addr; + void __iomem *gwa_val; + void __iomem *gwa_cmd; + void __iomem *gwa_ctl; + u32 gw_cmd; + int n, i; + + if (size < sizeof(*d) + sizeof(*block)) { + wil_err_fw(wil, "gateway record too short: %zu\n", size); + return -EINVAL; + } + + if ((size - sizeof(*d)) % sizeof(*block)) { + wil_err_fw(wil, "gateway record data size" + " not aligned on %zu: %zu\n", + sizeof(*block), size - sizeof(*d)); + return -EINVAL; + } + n = (size - sizeof(*d)) / sizeof(*block); + + gw_cmd = le32_to_cpu(d->command); + + wil_dbg_fw(wil, "gw write record [%3d] blocks, cmd 0x%08x\n", + n, gw_cmd); + + FW_ADDR_CHECK(gwa_addr, d->gateway_addr_addr, "gateway_addr_addr"); + FW_ADDR_CHECK(gwa_val, d->gateway_value_addr, "gateway_value_addr"); + FW_ADDR_CHECK(gwa_cmd, d->gateway_cmd_addr, "gateway_cmd_addr"); + FW_ADDR_CHECK(gwa_ctl, d->gateway_ctrl_address, "gateway_ctrl_address"); + + wil_dbg_fw(wil, "gw addresses: addr 0x%08x val 0x%08x" + " cmd 0x%08x ctl 0x%08x\n", + le32_to_cpu(d->gateway_addr_addr), + le32_to_cpu(d->gateway_value_addr), + le32_to_cpu(d->gateway_cmd_addr), + le32_to_cpu(d->gateway_ctrl_address)); + + for (i = 0; i < n; i++) { + int rc; + u32 a = le32_to_cpu(block[i].addr); + u32 v = le32_to_cpu(block[i].value); + + wil_dbg_fw(wil, " gw write[%3d] [0x%08x] <== 0x%08x\n", + i, a, v); + + iowrite32(v, gwa_val); + rc = gw_write(wil, gwa_addr, gwa_cmd, gwa_ctl, gw_cmd, a); + if (rc) + return rc; + } + + return 0; +} + +static int fw_handle_gateway_data4(struct wil6210_priv *wil, const void *data, + size_t size) +{ + const struct wil_fw_record_gateway_data4 *d = data; + const struct wil_fw_data_gw4 *block = d->data; + void __iomem *gwa_addr; + void __iomem *gwa_val[ARRAY_SIZE(block->value)]; + void __iomem *gwa_cmd; + void __iomem *gwa_ctl; + u32 gw_cmd; + int n, i, k; + + if (size < sizeof(*d) + sizeof(*block)) { + wil_err_fw(wil, "gateway4 record too short: %zu\n", size); + return -EINVAL; + } + + if ((size - sizeof(*d)) % sizeof(*block)) { + wil_err_fw(wil, "gateway4 record data size" + " not aligned on %zu: %zu\n", + sizeof(*block), size - sizeof(*d)); + return -EINVAL; + } + n = (size - sizeof(*d)) / sizeof(*block); + + gw_cmd = le32_to_cpu(d->command); + + wil_dbg_fw(wil, "gw4 write record [%3d] blocks, cmd 0x%08x\n", + n, gw_cmd); + + FW_ADDR_CHECK(gwa_addr, d->gateway_addr_addr, "gateway_addr_addr"); + for (k = 0; k < ARRAY_SIZE(block->value); k++) + FW_ADDR_CHECK(gwa_val[k], d->gateway_value_addr[k], + "gateway_value_addr"); + FW_ADDR_CHECK(gwa_cmd, d->gateway_cmd_addr, "gateway_cmd_addr"); + FW_ADDR_CHECK(gwa_ctl, d->gateway_ctrl_address, "gateway_ctrl_address"); + + wil_dbg_fw(wil, "gw4 addresses: addr 0x%08x cmd 0x%08x ctl 0x%08x\n", + le32_to_cpu(d->gateway_addr_addr), + le32_to_cpu(d->gateway_cmd_addr), + le32_to_cpu(d->gateway_ctrl_address)); + wil_hex_dump_fw("val addresses: ", DUMP_PREFIX_NONE, 16, 4, + d->gateway_value_addr, sizeof(d->gateway_value_addr), + false); + + for (i = 0; i < n; i++) { + int rc; + u32 a = le32_to_cpu(block[i].addr); + u32 v[ARRAY_SIZE(block->value)]; + + for (k = 0; k < ARRAY_SIZE(block->value); k++) + v[k] = le32_to_cpu(block[i].value[k]); + + wil_dbg_fw(wil, " gw4 write[%3d] [0x%08x] <==\n", i, a); + wil_hex_dump_fw(" val ", DUMP_PREFIX_NONE, 16, 4, v, + sizeof(v), false); + + for (k = 0; k < ARRAY_SIZE(block->value); k++) + iowrite32(v[k], gwa_val[k]); + rc = gw_write(wil, gwa_addr, gwa_cmd, gwa_ctl, gw_cmd, a); + if (rc) + return rc; + } + + return 0; +} + +static const struct { + int type; + int (*handler)(struct wil6210_priv *wil, const void *data, size_t size); +} wil_fw_handlers[] = { + {wil_fw_type_comment, fw_handle_comment}, + {wil_fw_type_data, fw_handle_data}, + {wil_fw_type_fill, fw_handle_fill}, + /* wil_fw_type_action */ + /* wil_fw_type_verify */ + {wil_fw_type_file_header, fw_handle_file_header}, + {wil_fw_type_direct_write, fw_handle_direct_write}, + {wil_fw_type_gateway_data, fw_handle_gateway_data}, + {wil_fw_type_gateway_data4, fw_handle_gateway_data4}, +}; + +static int wil_fw_handle_record(struct wil6210_priv *wil, int type, + const void *data, size_t size) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(wil_fw_handlers); i++) { + if (wil_fw_handlers[i].type == type) + return wil_fw_handlers[i].handler(wil, data, size); + } + + wil_err_fw(wil, "unknown record type: %d\n", type); + return -EINVAL; +} + +/** + * wil_fw_load - load FW into device + * + * Load the FW and uCode code and data to the corresponding device + * memory regions + * + * Return error code + */ +static int wil_fw_load(struct wil6210_priv *wil, const void *data, size_t size) +{ + int rc = 0; + const struct wil_fw_record_head *hdr; + size_t s, hdr_sz; + + for (hdr = data;; hdr = (const void *)hdr + s, size -= s) { + if (size < sizeof(*hdr)) + break; + hdr_sz = le32_to_cpu(hdr->size); + s = sizeof(*hdr) + hdr_sz; + if (s > size) + break; + if (hdr_sz % 4) { + wil_err_fw(wil, "unaligned record size: %zu\n", + hdr_sz); + return -EINVAL; + } + rc = wil_fw_handle_record(wil, le16_to_cpu(hdr->type), + &hdr[1], hdr_sz); + if (rc) + return rc; + } + if (size) { + wil_err_fw(wil, "unprocessed bytes: %zu\n", size); + if (size >= sizeof(*hdr)) { + wil_err_fw(wil, "Stop at offset %ld" + " record type %d [%zd bytes]\n", + (const void *)hdr - data, + le16_to_cpu(hdr->type), hdr_sz); + } + return -EINVAL; + } + /* Mark FW as loaded from host */ + S(RGF_USER_USAGE_6, 1); + + return rc; +} + +/** + * wil_request_firmware - Request firmware and load to device + * + * Request firmware image from the file and load it to device + * + * Return error code + */ +int wil_request_firmware(struct wil6210_priv *wil, const char *name) +{ + int rc, rc1; + const struct firmware *fw; + size_t sz; + const void *d; + + rc = request_firmware(&fw, name, wil_to_pcie_dev(wil)); + if (rc) { + wil_err_fw(wil, "Failed to load firmware %s\n", name); + return rc; + } + wil_dbg_fw(wil, "Loading <%s>, %zu bytes\n", name, fw->size); + + for (sz = fw->size, d = fw->data; sz; sz -= rc1, d += rc1) { + rc1 = wil_fw_verify(wil, d, sz); + if (rc1 < 0) { + rc = rc1; + goto out; + } + rc = wil_fw_load(wil, d, rc1); + if (rc < 0) + goto out; + } + +out: + release_firmware(fw); + return rc; +} diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c index 67f1002a03a13a88d7224a3a8abea8f234aa9ae2..90f416f239bd1478132c7abc693e1b060ac3c63b 100644 --- a/drivers/net/wireless/ath/wil6210/interrupt.c +++ b/drivers/net/wireless/ath/wil6210/interrupt.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2014 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -135,7 +135,7 @@ static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil) HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW)); } -void wil6210_disable_irq(struct wil6210_priv *wil) +void wil_mask_irq(struct wil6210_priv *wil) { wil_dbg_irq(wil, "%s()\n", __func__); @@ -145,7 +145,7 @@ void wil6210_disable_irq(struct wil6210_priv *wil) wil6210_mask_irq_pseudo(wil); } -void wil6210_enable_irq(struct wil6210_priv *wil) +void wil_unmask_irq(struct wil6210_priv *wil) { wil_dbg_irq(wil, "%s()\n", __func__); @@ -157,17 +157,7 @@ void wil6210_enable_irq(struct wil6210_priv *wil) offsetof(struct RGF_ICR, ICC)); /* interrupt moderation parameters */ - if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) { - /* disable interrupt moderation for monitor - * to get better timestamp precision - */ - iowrite32(0, wil->csr + HOSTADDR(RGF_DMA_ITR_CNT_CRL)); - } else { - iowrite32(WIL6210_ITR_TRSH, - wil->csr + HOSTADDR(RGF_DMA_ITR_CNT_TRSH)); - iowrite32(BIT_DMA_ITR_CNT_CRL_EN, - wil->csr + HOSTADDR(RGF_DMA_ITR_CNT_CRL)); - } + wil_set_itr_trsh(wil); wil6210_unmask_irq_pseudo(wil); wil6210_unmask_irq_tx(wil); @@ -196,8 +186,13 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie) wil_dbg_irq(wil, "RX done\n"); isr &= ~BIT_DMA_EP_RX_ICR_RX_DONE; if (test_bit(wil_status_reset_done, &wil->status)) { - wil_dbg_txrx(wil, "NAPI(Rx) schedule\n"); - napi_schedule(&wil->napi_rx); + if (test_bit(wil_status_napi_en, &wil->status)) { + wil_dbg_txrx(wil, "NAPI(Rx) schedule\n"); + napi_schedule(&wil->napi_rx); + } else { + wil_err(wil, "Got Rx interrupt while " + "stopping interface\n"); + } } else { wil_err(wil, "Got Rx interrupt while in reset\n"); } @@ -506,7 +501,8 @@ static int wil6210_request_3msi(struct wil6210_priv *wil, int irq) return rc; } -/* can't use wil_ioread32_and_clear because ICC value is not ser yet */ + +/* can't use wil_ioread32_and_clear because ICC value is not set yet */ static inline void wil_clear32(void __iomem *addr) { u32 x = ioread32(addr); @@ -522,11 +518,15 @@ void wil6210_clear_irq(struct wil6210_priv *wil) offsetof(struct RGF_ICR, ICR)); wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) + offsetof(struct RGF_ICR, ICR)); + wmb(); /* make sure write completed */ } int wil6210_init_irq(struct wil6210_priv *wil, int irq) { int rc; + + wil_dbg_misc(wil, "%s() n_msi=%d\n", __func__, wil->n_msi); + if (wil->n_msi == 3) rc = wil6210_request_3msi(wil, irq); else @@ -534,17 +534,14 @@ int wil6210_init_irq(struct wil6210_priv *wil, int irq) wil6210_thread_irq, wil->n_msi ? 0 : IRQF_SHARED, WIL_NAME, wil); - if (rc) - return rc; - - wil6210_enable_irq(wil); - - return 0; + return rc; } void wil6210_fini_irq(struct wil6210_priv *wil, int irq) { - wil6210_disable_irq(wil); + wil_dbg_misc(wil, "%s()\n", __func__); + + wil_mask_irq(wil); free_irq(irq, wil); if (wil->n_msi == 3) { free_irq(irq + 1, wil); diff --git a/drivers/net/wireless/ath/wil6210/ioctl.c b/drivers/net/wireless/ath/wil6210/ioctl.c new file mode 100644 index 0000000000000000000000000000000000000000..e9c0673819c624613ce25e5612f7886b592424d8 --- /dev/null +++ b/drivers/net/wireless/ath/wil6210/ioctl.c @@ -0,0 +1,173 @@ +/* + * Copyright (c) 2014 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include + +#include "wil6210.h" +#include + +#define wil_hex_dump_ioctl(prefix_str, buf, len) \ + print_hex_dump_debug("DBG[IOC ]" prefix_str, \ + DUMP_PREFIX_OFFSET, 16, 1, buf, len, true) +#define wil_dbg_ioctl(wil, fmt, arg...) wil_dbg(wil, "DBG[IOC ]" fmt, ##arg) + +static void __iomem *wil_ioc_addr(struct wil6210_priv *wil, uint32_t addr, + uint32_t size, enum wil_memio_op op) +{ + void __iomem *a; + u32 off; + + switch (op & wil_mmio_addr_mask) { + case wil_mmio_addr_linker: + a = wmi_buffer(wil, cpu_to_le32(addr)); + break; + case wil_mmio_addr_ahb: + a = wmi_addr(wil, addr); + break; + case wil_mmio_addr_bar: + a = wmi_addr(wil, addr + WIL6210_FW_HOST_OFF); + break; + default: + wil_err(wil, "Unsupported address mode, op = 0x%08x\n", op); + return NULL; + } + + off = a - wil->csr; + if (size >= WIL6210_MEM_SIZE - off) { + wil_err(wil, "Requested block does not fit into memory: " + "off = 0x%08x size = 0x%08x\n", off, size); + return NULL; + } + + return a; +} + +static int wil_ioc_memio_dword(struct wil6210_priv *wil, void __user *data) +{ + struct wil_memio io; + void __iomem *a; + bool need_copy = false; + + if (copy_from_user(&io, data, sizeof(io))) + return -EFAULT; + + wil_dbg_ioctl(wil, "IO: addr = 0x%08x val = 0x%08x op = 0x%08x\n", + io.addr, io.val, io.op); + + a = wil_ioc_addr(wil, io.addr, sizeof(u32), io.op); + if (!a) { + wil_err(wil, "invalid address 0x%08x, op = 0x%08x\n", io.addr, + io.op); + return -EINVAL; + } + /* operation */ + switch (io.op & wil_mmio_op_mask) { + case wil_mmio_read: + io.val = ioread32(a); + need_copy = true; + break; + case wil_mmio_write: + iowrite32(io.val, a); + wmb(); /* make sure write propagated to HW */ + break; + default: + wil_err(wil, "Unsupported operation, op = 0x%08x\n", io.op); + return -EINVAL; + } + + if (need_copy) { + wil_dbg_ioctl(wil, "IO done: addr = 0x%08x" + " val = 0x%08x op = 0x%08x\n", + io.addr, io.val, io.op); + if (copy_to_user(data, &io, sizeof(io))) + return -EFAULT; + } + + return 0; +} + +static int wil_ioc_memio_block(struct wil6210_priv *wil, void __user *data) +{ + struct wil_memio_block io; + void *block; + void __iomem *a; + int rc = 0; + + if (copy_from_user(&io, data, sizeof(io))) + return -EFAULT; + + wil_dbg_ioctl(wil, "IO: addr = 0x%08x size = 0x%08x op = 0x%08x\n", + io.addr, io.size, io.op); + + /* size */ + if (io.size % 4) { + wil_err(wil, "size is not multiple of 4: 0x%08x\n", io.size); + return -EINVAL; + } + + a = wil_ioc_addr(wil, io.addr, io.size, io.op); + if (!a) { + wil_err(wil, "invalid address 0x%08x, op = 0x%08x\n", io.addr, + io.op); + return -EINVAL; + } + + block = kmalloc(io.size, GFP_USER); + if (!block) + return -ENOMEM; + + /* operation */ + switch (io.op & wil_mmio_op_mask) { + case wil_mmio_read: + wil_memcpy_fromio_32(block, a, io.size); + wil_hex_dump_ioctl("Read ", block, io.size); + if (copy_to_user(io.block, block, io.size)) { + rc = -EFAULT; + goto out_free; + } + break; + case wil_mmio_write: + if (copy_from_user(block, io.block, io.size)) { + rc = -EFAULT; + goto out_free; + } + wil_memcpy_toio_32(a, block, io.size); + wmb(); /* make sure write propagated to HW */ + wil_hex_dump_ioctl("Write ", block, io.size); + break; + default: + wil_err(wil, "Unsupported operation, op = 0x%08x\n", io.op); + rc = -EINVAL; + break; + } + +out_free: + kfree(block); + return rc; +} + +int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd) +{ + switch (cmd) { + case WIL_IOCTL_MEMIO: + return wil_ioc_memio_dword(wil, data); + case WIL_IOCTL_MEMIO_BLOCK: + return wil_ioc_memio_block(wil, data); + default: + wil_dbg_ioctl(wil, "Unsupported IOCTL 0x%04x\n", cmd); + return -ENOIOCTLCMD; + } +} diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 3704d2a434f340b4374f26e5bde083b9830e09a3..6500caf8d609ccd90fae94ce704635df1b50cb16 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2014 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -20,10 +20,26 @@ #include "wil6210.h" #include "txrx.h" +#include "wmi.h" -static bool no_fw_recovery; +#define WAIT_FOR_DISCONNECT_TIMEOUT_MS 2000 +#define WAIT_FOR_DISCONNECT_INTERVAL_MS 10 + +bool no_fw_recovery; module_param(no_fw_recovery, bool, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(no_fw_recovery, " disable FW error recovery"); +MODULE_PARM_DESC(no_fw_recovery, " disable automatic FW error recovery"); + +static bool no_fw_load = true; +module_param(no_fw_load, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(no_fw_load, " do not download FW, use one in on-card flash."); + +static unsigned int itr_trsh = WIL6210_ITR_TRSH_DEFAULT; + +module_param(itr_trsh, uint, S_IRUGO); +MODULE_PARM_DESC(itr_trsh, " Interrupt moderation threshold, usecs."); + +#define RST_DELAY (20) /* msec, for loop in @wil_target_reset */ +#define RST_COUNT (1 + 1000/RST_DELAY) /* round up to be above 1 sec total */ /* * Due to a hardware issue, @@ -64,6 +80,7 @@ static void wil_disconnect_cid(struct wil6210_priv *wil, int cid) struct net_device *ndev = wil_to_ndev(wil); struct wireless_dev *wdev = wil->wdev; struct wil_sta_info *sta = &wil->sta[cid]; + wil_dbg_misc(wil, "%s(CID %d, status %d)\n", __func__, cid, sta->status); @@ -83,9 +100,16 @@ static void wil_disconnect_cid(struct wil6210_priv *wil, int cid) } for (i = 0; i < WIL_STA_TID_NUM; i++) { - struct wil_tid_ampdu_rx *r = sta->tid_rx[i]; + struct wil_tid_ampdu_rx *r; + unsigned long flags; + + spin_lock_irqsave(&sta->tid_rx_lock, flags); + + r = sta->tid_rx[i]; sta->tid_rx[i] = NULL; wil_tid_ampdu_rx_free(wil, r); + + spin_unlock_irqrestore(&sta->tid_rx_lock, flags); } for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) { if (wil->vring2cid_tid[i][0] == cid) @@ -167,17 +191,38 @@ static void wil_scan_timer_fn(ulong x) schedule_work(&wil->fw_error_worker); } +static int wil_wait_for_recovery(struct wil6210_priv *wil) +{ + if (wait_event_interruptible(wil->wq, wil->recovery_state != + fw_recovery_pending)) { + wil_err(wil, "Interrupt, canceling recovery\n"); + return -ERESTARTSYS; + } + if (wil->recovery_state != fw_recovery_running) { + wil_info(wil, "Recovery cancelled\n"); + return -EINTR; + } + wil_info(wil, "Proceed with recovery\n"); + return 0; +} + +void wil_set_recovery_state(struct wil6210_priv *wil, int state) +{ + wil_dbg_misc(wil, "%s(%d -> %d)\n", __func__, + wil->recovery_state, state); + + wil->recovery_state = state; + wake_up_interruptible(&wil->wq); +} + static void wil_fw_error_worker(struct work_struct *work) { - struct wil6210_priv *wil = container_of(work, - struct wil6210_priv, fw_error_worker); + struct wil6210_priv *wil = container_of(work, struct wil6210_priv, + fw_error_worker); struct wireless_dev *wdev = wil->wdev; wil_dbg_misc(wil, "fw error worker\n"); - if (no_fw_recovery) - return; - /* increment @recovery_count if less then WIL6210_FW_RECOVERY_TO * passed since last recovery attempt */ @@ -200,12 +245,15 @@ static void wil_fw_error_worker(struct work_struct *work) case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_MONITOR: - wil_info(wil, "fw error recovery started (try %d)...\n", + wil_info(wil, "fw error recovery requested (try %d)...\n", wil->recovery_count); - wil_reset(wil); + if (!no_fw_recovery) + wil->recovery_state = fw_recovery_running; + if (0 != wil_wait_for_recovery(wil)) + break; - /* need to re-allocate Rx ring after reset */ - wil_rx_init(wil); + __wil_down(wil); + __wil_up(wil); break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: @@ -220,6 +268,7 @@ static void wil_fw_error_worker(struct work_struct *work) static int wil_find_free_vring(struct wil6210_priv *wil) { int i; + for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { if (!wil->vring_tx[i].va) return i; @@ -254,14 +303,19 @@ static void wil_connect_worker(struct work_struct *work) int wil_priv_init(struct wil6210_priv *wil) { + uint i; + wil_dbg_misc(wil, "%s()\n", __func__); memset(wil->sta, 0, sizeof(wil->sta)); + for (i = 0; i < WIL6210_MAX_CID; i++) + spin_lock_init(&wil->sta[i].tid_rx_lock); mutex_init(&wil->mutex); mutex_init(&wil->wmi_mutex); init_completion(&wil->wmi_ready); + init_completion(&wil->wmi_call); wil->pending_connect_cid = -1; setup_timer(&wil->connect_timer, wil_connect_timer_fn, (ulong)wil); @@ -274,6 +328,7 @@ int wil_priv_init(struct wil6210_priv *wil) INIT_LIST_HEAD(&wil->pending_wmi_ev); spin_lock_init(&wil->wmi_ev_lock); + init_waitqueue_head(&wil->wq); wil->wmi_wq = create_singlethread_workqueue(WIL_NAME"_wmi"); if (!wil->wmi_wq) @@ -286,18 +341,24 @@ int wil_priv_init(struct wil6210_priv *wil) } wil->last_fw_recovery = jiffies; + wil->itr_trsh = itr_trsh; return 0; } void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid) { + wil_dbg_misc(wil, "%s()\n", __func__); + del_timer_sync(&wil->connect_timer); _wil6210_disconnect(wil, bssid); } void wil_priv_deinit(struct wil6210_priv *wil) { + wil_dbg_misc(wil, "%s()\n", __func__); + + wil_set_recovery_state(wil, fw_recovery_idle); del_timer_sync(&wil->scan_timer); cancel_work_sync(&wil->disconnect_worker); cancel_work_sync(&wil->fw_error_worker); @@ -309,7 +370,29 @@ void wil_priv_deinit(struct wil6210_priv *wil) destroy_workqueue(wil->wmi_wq); } -static void wil_target_reset(struct wil6210_priv *wil) +/* target operations */ +/* register read */ +#define R(a) ioread32(wil->csr + HOSTADDR(a)) +/* register write. wmb() to make sure it is completed */ +#define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0) +/* register set = read, OR, write */ +#define S(a, v) W(a, R(a) | v) +/* register clear = read, AND with inverted, write */ +#define C(a, v) W(a, R(a) & ~v) + +static inline void wil_halt_cpu(struct wil6210_priv *wil) +{ + W(RGF_USER_USER_CPU_0, BIT_USER_USER_CPU_MAN_RST); + W(RGF_USER_MAC_CPU_0, BIT_USER_MAC_CPU_MAN_RST); +} + +static inline void wil_release_cpu(struct wil6210_priv *wil) +{ + /* Start CPU */ + W(RGF_USER_USER_CPU_0, 1); +} + +static int wil_target_reset(struct wil6210_priv *wil) { int delay = 0; u32 hw_state; @@ -318,57 +401,41 @@ static void wil_target_reset(struct wil6210_priv *wil) wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->board->name); - /* register read */ -#define R(a) ioread32(wil->csr + HOSTADDR(a)) - /* register write */ -#define W(a, v) iowrite32(v, wil->csr + HOSTADDR(a)) - /* register set = read, OR, write */ -#define S(a, v) W(a, R(a) | v) - /* register clear = read, AND with inverted, write */ -#define C(a, v) W(a, R(a) & ~v) - wil->hw_version = R(RGF_USER_FW_REV_ID); rev_id = wil->hw_version & 0xff; /* Clear MAC link up */ S(RGF_HP_CTRL, BIT(15)); - /* hpal_perst_from_pad_src_n_mask */ - S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT(6)); - /* car_perst_rst_src_n_mask */ - S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT(7)); - wmb(); /* order is important here */ + S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_HPAL_PERST_FROM_PAD); + S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_CAR_PERST_RST); + + wil_halt_cpu(wil); + C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_CAR_AHB_SW_SEL); /* 40 MHz */ if (is_sparrow) { W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f); - wmb(); /* order is important here */ + W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf); } - W(RGF_USER_MAC_CPU_0, BIT(1)); /* mac_cpu_man_rst */ - W(RGF_USER_USER_CPU_0, BIT(1)); /* user_cpu_man_rst */ - wmb(); /* order is important here */ - W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000); W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F); - W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, is_sparrow ? 0x000000B0 : 0x00000170); - W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FC00); - wmb(); /* order is important here */ + W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, is_sparrow ? 0x000000f0 : 0x00000170); + W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FE00); if (is_sparrow) { W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0); - wmb(); /* order is important here */ + W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0); } W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0); W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0); W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0); W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0); - wmb(); /* order is important here */ if (is_sparrow) { W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000003); /* reset A2 PCIE AHB */ W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000); - } else { W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000001); if (rev_id == 1) { @@ -378,21 +445,19 @@ static void wil_target_reset(struct wil6210_priv *wil) W(RGF_PCIE_LOS_COUNTER_CTL, BIT(6) | BIT(8)); W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000); } - } /* TODO: check order here!!! Erez code is different */ W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0); - wmb(); /* order is important here */ - /* wait until device ready */ + /* wait until device ready. typical time is 200..250 msec */ do { - msleep(1); + msleep(RST_DELAY); hw_state = R(RGF_USER_HW_MACHINE_STATE); - if (delay++ > 100) { + if (delay++ > RST_COUNT) { wil_err(wil, "Reset not completed, hw_state 0x%08x\n", hw_state); - return; + return -ETIME; } } while (hw_state != HW_MACHINE_BOOT_DONE); @@ -401,15 +466,35 @@ static void wil_target_reset(struct wil6210_priv *wil) W(RGF_PCIE_LOS_COUNTER_CTL, BIT(8)); C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD); - wmb(); /* order is important here */ - wil_dbg_misc(wil, "Reset completed in %d ms\n", delay); + wil_dbg_misc(wil, "Reset completed in %d ms\n", delay * RST_DELAY); + return 0; +} + +/** + * wil_set_itr_trsh: - apply interrupt coalescing params + */ +void wil_set_itr_trsh(struct wil6210_priv *wil) +{ + /* disable, use usec resolution */ + W(RGF_DMA_ITR_CNT_CRL, BIT_DMA_ITR_CNT_CRL_EXT_TICK); + + /* disable interrupt moderation for monitor + * to get better timestamp precision + */ + if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) + return; + + wil_info(wil, "set ITR_TRSH = %d usec\n", wil->itr_trsh); + W(RGF_DMA_ITR_CNT_TRSH, wil->itr_trsh); + W(RGF_DMA_ITR_CNT_CRL, BIT_DMA_ITR_CNT_CRL_EN | + BIT_DMA_ITR_CNT_CRL_EXT_TICK); /* start it */ +} #undef R #undef W #undef S #undef C -} void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r) { @@ -424,6 +509,7 @@ static int wil_wait_for_fw_ready(struct wil6210_priv *wil) { ulong to = msecs_to_jiffies(1000); ulong left = wait_for_completion_timeout(&wil->wmi_ready, to); + if (0 == left) { wil_err(wil, "Firmware not ready\n"); return -ETIME; @@ -443,15 +529,15 @@ int wil_reset(struct wil6210_priv *wil) { int rc; + wil_dbg_misc(wil, "%s()\n", __func__); + WARN_ON(!mutex_is_locked(&wil->mutex)); + WARN_ON(test_bit(wil_status_napi_en, &wil->status)); cancel_work_sync(&wil->disconnect_worker); wil6210_disconnect(wil, NULL); wil->status = 0; /* prevent NAPI from being scheduled */ - if (test_bit(wil_status_napi_en, &wil->status)) { - napi_synchronize(&wil->napi_rx); - } if (wil->scan_request) { wil_dbg_misc(wil, "Abort scan_request 0x%p\n", @@ -461,24 +547,50 @@ int wil_reset(struct wil6210_priv *wil) wil->scan_request = NULL; } - wil6210_disable_irq(wil); + wil_mask_irq(wil); wmi_event_flush(wil); flush_workqueue(wil->wmi_wq_conn); flush_workqueue(wil->wmi_wq); - /* TODO: put MAC in reset */ - wil_target_reset(wil); - + rc = wil_target_reset(wil); wil_rx_fini(wil); + if (rc) + return rc; + + if (!no_fw_load) { + wil_info(wil, "Use firmware <%s>\n", WIL_FW_NAME); + wil_halt_cpu(wil); + /* Loading f/w from the file */ + rc = wil_request_firmware(wil, WIL_FW_NAME); + if (rc) + return rc; + + /* clear any interrupts which on-card-firmware may have set */ + wil6210_clear_irq(wil); + { /* CAF_ICR - clear and mask */ + u32 a = HOSTADDR(RGF_CAF_ICR) + + offsetof(struct RGF_ICR, ICR); + u32 m = HOSTADDR(RGF_CAF_ICR) + + offsetof(struct RGF_ICR, IMV); + u32 icr = ioread32(wil->csr + a); + + iowrite32(icr, wil->csr + a); /* W1C */ + iowrite32(~0, wil->csr + m); + wmb(); /* wait for completion */ + } + wil_release_cpu(wil); + } else { + wil_info(wil, "Use firmware from on-card flash\n"); + } /* init after reset */ wil->pending_connect_cid = -1; reinit_completion(&wil->wmi_ready); + reinit_completion(&wil->wmi_call); - /* TODO: release MAC reset */ - wil6210_enable_irq(wil); + wil_unmask_irq(wil); /* we just started MAC, wait for FW ready */ rc = wil_wait_for_fw_ready(wil); @@ -489,6 +601,7 @@ int wil_reset(struct wil6210_priv *wil) void wil_fw_error_recovery(struct wil6210_priv *wil) { wil_dbg_misc(wil, "starting fw error recovery\n"); + wil->recovery_state = fw_recovery_pending; schedule_work(&wil->fw_error_worker); } @@ -514,7 +627,7 @@ void wil_link_off(struct wil6210_priv *wil) netif_carrier_off(ndev); } -static int __wil_up(struct wil6210_priv *wil) +int __wil_up(struct wil6210_priv *wil) { struct net_device *ndev = wil_to_ndev(wil); struct wireless_dev *wdev = wil->wdev; @@ -560,11 +673,15 @@ static int __wil_up(struct wil6210_priv *wil) /* MAC address - pre-requisite for other commands */ wmi_set_mac_address(wil, ndev->dev_addr); - + wil_dbg_misc(wil, "NAPI enable\n"); napi_enable(&wil->napi_rx); napi_enable(&wil->napi_tx); set_bit(wil_status_napi_en, &wil->status); + if (wil->platform_ops.bus_request) + wil->platform_ops.bus_request(wil->platform_handle, + WIL_MAX_BUS_REQUEST_KBPS); + return 0; } @@ -572,6 +689,8 @@ int wil_up(struct wil6210_priv *wil) { int rc; + wil_dbg_misc(wil, "%s()\n", __func__); + mutex_lock(&wil->mutex); rc = __wil_up(wil); mutex_unlock(&wil->mutex); @@ -579,13 +698,23 @@ int wil_up(struct wil6210_priv *wil) return rc; } -static int __wil_down(struct wil6210_priv *wil) +int __wil_down(struct wil6210_priv *wil) { + int iter = WAIT_FOR_DISCONNECT_TIMEOUT_MS / + WAIT_FOR_DISCONNECT_INTERVAL_MS; + WARN_ON(!mutex_is_locked(&wil->mutex)); - clear_bit(wil_status_napi_en, &wil->status); - napi_disable(&wil->napi_rx); - napi_disable(&wil->napi_tx); + if (wil->platform_ops.bus_request) + wil->platform_ops.bus_request(wil->platform_handle, 0); + + wil_disable_irq(wil); + if (test_and_clear_bit(wil_status_napi_en, &wil->status)) { + napi_disable(&wil->napi_rx); + napi_disable(&wil->napi_tx); + wil_dbg_misc(wil, "NAPI disable\n"); + } + wil_enable_irq(wil); if (wil->scan_request) { wil_dbg_misc(wil, "Abort scan_request 0x%p\n", @@ -595,7 +724,24 @@ static int __wil_down(struct wil6210_priv *wil) wil->scan_request = NULL; } - wil6210_disconnect(wil, NULL); + if (test_bit(wil_status_fwconnected, &wil->status) || + test_bit(wil_status_fwconnecting, &wil->status)) + wmi_send(wil, WMI_DISCONNECT_CMDID, NULL, 0); + + /* make sure wil is idle (not connected) */ + mutex_unlock(&wil->mutex); + while (iter--) { + int idle = !test_bit(wil_status_fwconnected, &wil->status) && + !test_bit(wil_status_fwconnecting, &wil->status); + if (idle) + break; + msleep(WAIT_FOR_DISCONNECT_INTERVAL_MS); + } + mutex_lock(&wil->mutex); + + if (!iter) + wil_err(wil, "timeout waiting for idle FW/HW\n"); + wil_rx_fini(wil); return 0; @@ -605,6 +751,9 @@ int wil_down(struct wil6210_priv *wil) { int rc; + wil_dbg_misc(wil, "%s()\n", __func__); + + wil_set_recovery_state(wil, fw_recovery_idle); mutex_lock(&wil->mutex); rc = __wil_down(wil); mutex_unlock(&wil->mutex); diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c index 7afce6e8c5078fb92257a64f6c1e3fabaa65298c..239965106c05d1f878f52cd8b552047d2743d998 100644 --- a/drivers/net/wireless/ath/wil6210/netdev.c +++ b/drivers/net/wireless/ath/wil6210/netdev.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2014 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -17,11 +17,14 @@ #include #include "wil6210.h" +#include "txrx.h" static int wil_open(struct net_device *ndev) { struct wil6210_priv *wil = ndev_to_wil(ndev); + wil_dbg_misc(wil, "%s()\n", __func__); + return wil_up(wil); } @@ -29,6 +32,8 @@ static int wil_stop(struct net_device *ndev) { struct wil6210_priv *wil = ndev_to_wil(ndev); + wil_dbg_misc(wil, "%s()\n", __func__); + return wil_down(wil); } @@ -36,8 +41,10 @@ static int wil_change_mtu(struct net_device *ndev, int new_mtu) { struct wil6210_priv *wil = ndev_to_wil(ndev); - if (new_mtu < 68 || new_mtu > IEEE80211_MAX_DATA_LEN_DMG) + if (new_mtu < 68 || new_mtu > (TX_BUF_LEN - ETH_HLEN)) { + wil_err(wil, "invalid MTU %d\n", new_mtu); return -EINVAL; + } wil_dbg_misc(wil, "change MTU %d -> %d\n", ndev->mtu, new_mtu); ndev->mtu = new_mtu; @@ -45,6 +52,17 @@ static int wil_change_mtu(struct net_device *ndev, int new_mtu) return 0; } +static int wil_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) +{ + struct wil6210_priv *wil = ndev_to_wil(ndev); + + int ret = wil_ioctl(wil, ifr->ifr_data, cmd); + + wil_dbg_misc(wil, "ioctl(0x%04x) -> %d\n", cmd, ret); + + return ret; +} + static const struct net_device_ops wil_netdev_ops = { .ndo_open = wil_open, .ndo_stop = wil_stop, @@ -52,6 +70,7 @@ static const struct net_device_ops wil_netdev_ops = { .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = wil_change_mtu, + .ndo_do_ioctl = wil_do_ioctl, }; static int wil6210_netdev_poll_rx(struct napi_struct *napi, int budget) @@ -121,6 +140,8 @@ void *wil_if_alloc(struct device *dev, void __iomem *csr) wil->csr = csr; wil->wdev = wdev; + wil_dbg_misc(wil, "%s()\n", __func__); + rc = wil_priv_init(wil); if (rc) { dev_err(dev, "wil_priv_init failed\n"); @@ -140,6 +161,7 @@ void *wil_if_alloc(struct device *dev, void __iomem *csr) } ndev->netdev_ops = &wil_netdev_ops; + wil_set_ethtoolops(ndev); ndev->ieee80211_ptr = wdev; ndev->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GRO; @@ -168,11 +190,17 @@ void *wil_if_alloc(struct device *dev, void __iomem *csr) void wil_if_free(struct wil6210_priv *wil) { struct net_device *ndev = wil_to_ndev(wil); + + wil_dbg_misc(wil, "%s()\n", __func__); + if (!ndev) return; - free_netdev(ndev); wil_priv_deinit(wil); + + wil_to_ndev(wil) = NULL; + free_netdev(ndev); + wil_wdev_free(wil); } @@ -181,6 +209,8 @@ int wil_if_add(struct wil6210_priv *wil) struct net_device *ndev = wil_to_ndev(wil); int rc; + wil_dbg_misc(wil, "%s()\n", __func__); + rc = register_netdev(ndev); if (rc < 0) { dev_err(&ndev->dev, "Failed to register netdev: %d\n", rc); @@ -196,5 +226,7 @@ void wil_if_remove(struct wil6210_priv *wil) { struct net_device *ndev = wil_to_ndev(wil); + wil_dbg_misc(wil, "%s()\n", __func__); + unregister_netdev(ndev); } diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c index d3fbfa28db62c8af2c95f824f9801b516635abff..66626a8ee728de0d06016455d20a2bb4b201c4ac 100644 --- a/drivers/net/wireless/ath/wil6210/pcie_bus.c +++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2014 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -17,6 +17,7 @@ #include #include #include +#include #include "wil6210.h" @@ -30,6 +31,28 @@ static bool debug_fw; /* = false; */ module_param(debug_fw, bool, S_IRUGO); MODULE_PARM_DESC(debug_fw, " load driver if FW not ready. For FW debug"); +void wil_disable_irq(struct wil6210_priv *wil) +{ + int irq = wil->pdev->irq; + + disable_irq(irq); + if (wil->n_msi == 3) { + disable_irq(irq + 1); + disable_irq(irq + 2); + } +} + +void wil_enable_irq(struct wil6210_priv *wil) +{ + int irq = wil->pdev->irq; + + enable_irq(irq); + if (wil->n_msi == 3) { + enable_irq(irq + 1); + enable_irq(irq + 2); + } +} + /* Bus ops */ static int wil_if_pcie_enable(struct wil6210_priv *wil) { @@ -41,6 +64,8 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil) */ int msi_only = pdev->msi_enabled; + wil_dbg_misc(wil, "%s()\n", __func__); + pdev->msi_enabled = 0; pci_set_master(pdev); @@ -107,6 +132,8 @@ static int wil_if_pcie_disable(struct wil6210_priv *wil) { struct pci_dev *pdev = wil->pdev; + wil_dbg_misc(wil, "%s()\n", __func__); + pci_clear_master(pdev); /* disable and release IRQ */ wil6210_fini_irq(wil, pdev->irq); @@ -180,6 +207,10 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) wil->board = board; wil6210_clear_irq(wil); + + wil->platform_handle = + wil_platform_init(&pdev->dev, &wil->platform_ops); + /* FW should raise IRQ when ready */ rc = wil_if_pcie_enable(wil); if (rc) { @@ -204,6 +235,8 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) bus_disable: wil_if_pcie_disable(wil); if_free: + if (wil->platform_ops.uninit) + wil->platform_ops.uninit(wil->platform_handle); wil_if_free(wil); err_iounmap: pci_iounmap(pdev, csr); @@ -218,12 +251,17 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) static void wil_pcie_remove(struct pci_dev *pdev) { struct wil6210_priv *wil = pci_get_drvdata(pdev); + void __iomem *csr = wil->csr; + + wil_dbg_misc(wil, "%s()\n", __func__); wil6210_debugfs_remove(wil); - wil_if_pcie_disable(wil); wil_if_remove(wil); + wil_if_pcie_disable(wil); + if (wil->platform_ops.uninit) + wil->platform_ops.uninit(wil->platform_handle); wil_if_free(wil); - pci_iounmap(pdev, wil->csr); + pci_iounmap(pdev, csr); pci_release_region(pdev, 0); pci_disable_device(pdev); } @@ -243,6 +281,8 @@ static const struct pci_device_id wil6210_pcie_ids[] = { .driver_data = (kernel_ulong_t)&wil_board_marlon }, { PCI_DEVICE(0x1ae9, 0x0310), .driver_data = (kernel_ulong_t)&wil_board_sparrow }, + { PCI_DEVICE(0x1ae9, 0x0302), /* same as above, firmware broken */ + .driver_data = (kernel_ulong_t)&wil_board_sparrow }, { /* end: all zeroes */ }, }; MODULE_DEVICE_TABLE(pci, wil6210_pcie_ids); diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c index 180ca4793904a37d7723198f5652b02e95142430..489cb73d139bd351b8cde09c6ae9e402c130c936 100644 --- a/drivers/net/wireless/ath/wil6210/rx_reorder.c +++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c @@ -1,3 +1,19 @@ +/* + * Copyright (c) 2014 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + #include "wil6210.h" #include "txrx.h" @@ -82,22 +98,25 @@ void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb) int mid = wil_rxdesc_mid(d); u16 seq = wil_rxdesc_seq(d); struct wil_sta_info *sta = &wil->sta[cid]; - struct wil_tid_ampdu_rx *r = sta->tid_rx[tid]; + struct wil_tid_ampdu_rx *r; u16 hseq; int index; + unsigned long flags; wil_dbg_txrx(wil, "MID %d CID %d TID %d Seq 0x%03x\n", mid, cid, tid, seq); + spin_lock_irqsave(&sta->tid_rx_lock, flags); + + r = sta->tid_rx[tid]; if (!r) { + spin_unlock_irqrestore(&sta->tid_rx_lock, flags); wil_netif_rx_any(skb, ndev); return; } hseq = r->head_seq_num; - spin_lock(&r->reorder_lock); - /** Due to the race between WMI events, where BACK establishment * reported, and data Rx, few packets may be pass up before reorder * buffer get allocated. Catch up by pretending SSN is what we @@ -160,13 +179,14 @@ void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb) wil_reorder_release(wil, r); out: - spin_unlock(&r->reorder_lock); + spin_unlock_irqrestore(&sta->tid_rx_lock, flags); } struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil, int size, u16 ssn) { struct wil_tid_ampdu_rx *r = kzalloc(sizeof(*r), GFP_KERNEL); + if (!r) return NULL; @@ -181,7 +201,6 @@ struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil, return NULL; } - spin_lock_init(&r->reorder_lock); r->ssn = ssn; r->head_seq_num = ssn; r->buf_size = size; diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index d3467943d39db60972bae0ce0413b4e5eaa04585..2936ef0c18cbc700003760e5a93a9f504b9d8840 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2014 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -52,6 +52,7 @@ static inline int wil_vring_is_full(struct vring *vring) { return wil_vring_next_tail(vring) == vring->swhead; } + /* * Available space in Tx Vring */ @@ -86,6 +87,8 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring) size_t sz = vring->size * sizeof(vring->va[0]); uint i; + wil_dbg_misc(wil, "%s()\n", __func__); + BUILD_BUG_ON(sizeof(vring->va[0]) != 32); vring->swhead = 0; @@ -110,7 +113,8 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring) * we can use any */ for (i = 0; i < vring->size; i++) { - volatile struct vring_tx_desc *_d = &(vring->va[i].tx); + volatile struct vring_tx_desc *_d = &vring->va[i].tx; + _d->dma.status = TX_DMA_STATUS_DU; } @@ -125,6 +129,7 @@ static void wil_txdesc_unmap(struct device *dev, struct vring_tx_desc *d, { dma_addr_t pa = wil_desc_addr(&d->dma.addr); u16 dmalen = le16_to_cpu(d->dma.length); + switch (ctx->mapped_as) { case wil_mapped_as_single: dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE); @@ -143,6 +148,18 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring, struct device *dev = wil_to_dev(wil); size_t sz = vring->size * sizeof(vring->va[0]); + if (tx) { + int vring_index = vring - wil->vring_tx; + + wil_dbg_misc(wil, "free Tx vring %d [%d] 0x%p:%pad 0x%p\n", + vring_index, vring->size, vring->va, + &vring->pa, vring->ctx); + } else { + wil_dbg_misc(wil, "free Rx vring [%d] 0x%p:%pad 0x%p\n", + vring->size, vring->va, + &vring->pa, vring->ctx); + } + while (!wil_vring_is_empty(vring)) { dma_addr_t pa; u16 dmalen; @@ -191,11 +208,12 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring, struct device *dev = wil_to_dev(wil); unsigned int sz = RX_BUF_LEN; struct vring_rx_desc dd, *d = ⅆ - volatile struct vring_rx_desc *_d = &(vring->va[i].rx); + volatile struct vring_rx_desc *_d = &vring->va[i].rx; dma_addr_t pa; /* TODO align */ struct sk_buff *skb = dev_alloc_skb(sz + headroom); + if (unlikely(!skb)) return -ENOMEM; @@ -274,9 +292,11 @@ static void wil_rx_add_radiotap_header(struct wil6210_priv *wil, */ int len = min_t(int, 8 + sizeof(phy_data), wil_rxdesc_phy_length(d)); + if (len > 8) { void *p = skb_tail_pointer(skb); void *pa = PTR_ALIGN(p, 8); + if (skb_tailroom(skb) >= len + (pa - p)) { phy_length = len - 8; memcpy(phy_data, pa, phy_length); @@ -372,13 +392,12 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, int cid; struct wil_net_stats *stats; - BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb)); if (wil_vring_is_empty(vring)) return NULL; - _d = &(vring->va[vring->swhead].rx); + _d = &vring->va[vring->swhead].rx; if (!(_d->dma.status & RX_DMA_STATUS_DU)) { /* it is not error, we just reached end of Rx done area */ return NULL; @@ -414,7 +433,6 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, cid = wil_rxdesc_cid(d); stats = &wil->sta[cid].stats; stats->last_mcs_rx = wil_rxdesc_mcs(d); - wil->stats.last_mcs_rx = stats->last_mcs_rx; /* use radiotap header only if required */ if (ndev->type == ARPHRD_IEEE80211_RADIOTAP) @@ -533,7 +551,7 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) [GRO_NORMAL] = "GRO_NORMAL", [GRO_DROP] = "GRO_DROP", }; - wil_dbg_txrx(wil, "Rx complete %d bytes => %s,\n", + wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n", len, gro_res_str[rc]); } } @@ -574,7 +592,6 @@ void wil_rx_handle(struct wil6210_priv *wil, int *quota) else wil_netif_rx_any(skb, ndev); } - } wil_rx_refill(wil, v->size); } @@ -584,6 +601,8 @@ int wil_rx_init(struct wil6210_priv *wil) struct vring *vring = &wil->vring_rx; int rc; + wil_dbg_misc(wil, "%s()\n", __func__); + if (vring->va) { wil_err(wil, "Rx ring already allocated\n"); return -EINVAL; @@ -613,6 +632,8 @@ void wil_rx_fini(struct wil6210_priv *wil) { struct vring *vring = &wil->vring_rx; + wil_dbg_misc(wil, "%s()\n", __func__); + if (vring->va) wil_vring_free(wil, vring, 0); } @@ -647,6 +668,9 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size, struct vring *vring = &wil->vring_tx[id]; struct vring_tx_data *txdata = &wil->vring_tx_data[id]; + wil_dbg_misc(wil, "%s() max_mpdu_size %d\n", __func__, + cmd.vring_cfg.tx_sw_ring.max_mpdu_size); + if (vring->va) { wil_err(wil, "Tx ring [%d] already allocated\n", id); rc = -EINVAL; @@ -696,6 +720,8 @@ void wil_vring_fini_tx(struct wil6210_priv *wil, int id) if (!vring->va) return; + wil_dbg_misc(wil, "%s() id=%d\n", __func__, id); + /* make sure NAPI won't touch this vring */ wil->vring_tx_data[id].enabled = 0; if (test_bit(wil_status_napi_en, &wil->status)) @@ -722,6 +748,7 @@ static struct vring *wil_find_tx_vring(struct wil6210_priv *wil, for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) { if (wil->vring2cid_tid[i][0] == cid) { struct vring *v = &wil->vring_tx[i]; + wil_dbg_txrx(wil, "%s(%pM) -> [%d]\n", __func__, eth->h_dest, i); if (v->va) { @@ -741,6 +768,7 @@ static void wil_set_da_for_vring(struct wil6210_priv *wil, { struct ethhdr *eth = (void *)skb->data; int cid = wil->vring2cid_tid[vring_index][0]; + memcpy(eth->h_dest, wil->sta[cid].addr, ETH_ALEN); } @@ -751,7 +779,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, * duplicate skb and send it to other active vrings */ static struct vring *wil_tx_bcast(struct wil6210_priv *wil, - struct sk_buff *skb) + struct sk_buff *skb) { struct vring *v, *v2; struct sk_buff *skb2; @@ -834,8 +862,8 @@ void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags) } static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil, - struct vring_tx_desc *d, - struct sk_buff *skb) + struct vring_tx_desc *d, + struct sk_buff *skb) { int protocol; @@ -903,10 +931,9 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, 1 + nr_frags); return -ENOMEM; } - _d = &(vring->va[i].tx); + _d = &vring->va[i].tx; - pa = dma_map_single(dev, skb->data, - skb_headlen(skb), DMA_TO_DEVICE); + pa = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); wil_dbg_txrx(wil, "Tx skb %d bytes 0x%p -> %pad\n", skb_headlen(skb), skb->data, &pa); @@ -935,10 +962,11 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[f]; int len = skb_frag_size(frag); + i = (swhead + f + 1) % vring->size; - _d = &(vring->va[i].tx); + _d = &vring->va[i].tx; pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag), - DMA_TO_DEVICE); + DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev, pa))) goto dma_error; vring->ctx[i].mapped_as = wil_mapped_as_page; @@ -983,7 +1011,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, i = (swhead + f) % vring->size; ctx = &vring->ctx[i]; - _d = &(vring->va[i].tx); + _d = &vring->va[i].tx; *d = *_d; _d->dma.status = TX_DMA_STATUS_DU; wil_txdesc_unmap(dev, d, ctx); @@ -997,7 +1025,6 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, return -EINVAL; } - netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct wil6210_priv *wil = ndev_to_wil(ndev); @@ -1025,15 +1052,15 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) pr_once_fw = false; /* find vring */ - if (is_unicast_ether_addr(eth->h_dest)) { + if (is_unicast_ether_addr(eth->h_dest)) vring = wil_find_tx_vring(wil, skb); - } else { + else vring = wil_tx_bcast(wil, skb); - } if (!vring) { wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest); goto drop; } + /* set up vring entry */ rc = wil_tx_vring(wil, vring, skb); diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h index bc5706a4f007b11eadd4b53faece30b45e49266f..de046716d2b7e2de3c96e2f8aaff266796c78967 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.h +++ b/drivers/net/wireless/ath/wil6210/txrx.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2014 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -20,9 +20,9 @@ #define BUF_SW_OWNED (1) #define BUF_HW_OWNED (0) -/* size of max. Rx packet */ -#define RX_BUF_LEN (2048) -#define TX_BUF_LEN (2048) +/* size of max. Tx/Rx buffers, as supported by FW */ +#define RX_BUF_LEN (2242) +#define TX_BUF_LEN (2242) /* how many bytes to reserve for rtap header? */ #define WIL6210_RTAP_SIZE (128) @@ -237,7 +237,6 @@ struct vring_tx_mac { #define DMA_CFG_DESC_TX_0_L4_TYPE_LEN 2 #define DMA_CFG_DESC_TX_0_L4_TYPE_MSK 0xC0000000 /* L4 type: 0-UDP, 2-TCP */ - #define DMA_CFG_DESC_TX_OFFLOAD_CFG_MAC_LEN_POS 0 #define DMA_CFG_DESC_TX_OFFLOAD_CFG_MAC_LEN_LEN 7 #define DMA_CFG_DESC_TX_OFFLOAD_CFG_MAC_LEN_MSK 0x7F /* MAC hdr len */ @@ -246,7 +245,6 @@ struct vring_tx_mac { #define DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_LEN 1 #define DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_MSK 0x80 /* 1-IPv4, 0-IPv6 */ - #define TX_DMA_STATUS_DU BIT(0) struct vring_tx_dma { @@ -347,7 +345,6 @@ struct vring_rx_mac { #define RX_DMA_ERROR_L3_ERR BIT(4) #define RX_DMA_ERROR_L4_ERR BIT(5) - /* Status field */ #define RX_DMA_STATUS_DU BIT(0) #define RX_DMA_STATUS_ERROR BIT(2) diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 67e9624f7111b8842492d81d07665c5ec0973705..ce6488e42091cf8a935bfe87dd84baad34224656 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2014 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -21,8 +21,14 @@ #include #include #include +#include "wil_platform.h" + +extern bool no_fw_recovery; #define WIL_NAME "wil6210" +#define WIL_FW_NAME "wil6210.fw" + +#define WIL_MAX_BUS_REQUEST_KBPS 800000 /* ~6.1Gbps */ struct wil_board { int board; @@ -47,7 +53,9 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1) #define WIL6210_MAX_TX_RINGS (24) /* HW limit */ #define WIL6210_MAX_CID (8) /* HW limit */ #define WIL6210_NAPI_BUDGET (16) /* arbitrary */ -#define WIL6210_ITR_TRSH (10000) /* arbitrary - about 15 IRQs/msec */ +/* Max supported by wil6210 value for interrupt threshold is 5sec. */ +#define WIL6210_ITR_TRSH_MAX (5000000) +#define WIL6210_ITR_TRSH_DEFAULT (300) /* usec */ #define WIL6210_FW_RECOVERY_RETRIES (5) /* try to recover this many times */ #define WIL6210_FW_RECOVERY_TO msecs_to_jiffies(5000) #define WIL6210_SCAN_TO msecs_to_jiffies(10000) @@ -86,22 +94,29 @@ struct RGF_ICR { /* registers - FW addresses */ #define RGF_USER_USAGE_1 (0x880004) +#define RGF_USER_USAGE_6 (0x880018) #define RGF_USER_HW_MACHINE_STATE (0x8801dc) #define HW_MACHINE_BOOT_DONE (0x3fffffd) #define RGF_USER_USER_CPU_0 (0x8801e0) + #define BIT_USER_USER_CPU_MAN_RST BIT(1) /* user_cpu_man_rst */ #define RGF_USER_MAC_CPU_0 (0x8801fc) + #define BIT_USER_MAC_CPU_MAN_RST BIT(1) /* mac_cpu_man_rst */ #define RGF_USER_USER_SCRATCH_PAD (0x8802bc) #define RGF_USER_FW_REV_ID (0x880a8c) /* chip revision */ #define RGF_USER_CLKS_CTL_0 (0x880abc) + #define BIT_USER_CLKS_CAR_AHB_SW_SEL BIT(1) /* ref clk/PLL */ #define BIT_USER_CLKS_RST_PWGD BIT(11) /* reset on "power good" */ #define RGF_USER_CLKS_CTL_SW_RST_VEC_0 (0x880b04) #define RGF_USER_CLKS_CTL_SW_RST_VEC_1 (0x880b08) #define RGF_USER_CLKS_CTL_SW_RST_VEC_2 (0x880b0c) #define RGF_USER_CLKS_CTL_SW_RST_VEC_3 (0x880b10) #define RGF_USER_CLKS_CTL_SW_RST_MASK_0 (0x880b14) + #define BIT_HPAL_PERST_FROM_PAD BIT(6) + #define BIT_CAR_PERST_RST BIT(7) #define RGF_USER_USER_ICR (0x880b4c) /* struct RGF_ICR */ #define BIT_USER_USER_ICR_SW_INT_2 BIT(18) #define RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0 (0x880c18) +#define RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1 (0x880c2c) #define RGF_DMA_EP_TX_ICR (0x881bb4) /* struct RGF_ICR */ #define BIT_DMA_EP_TX_ICR_TX_DONE BIT(0) @@ -133,6 +148,11 @@ struct RGF_ICR { #define RGF_HP_CTRL (0x88265c) #define RGF_PCIE_LOS_COUNTER_CTL (0x882dc4) +/* MAC timer, usec, for packet lifetime */ +#define RGF_MAC_MTRL_COUNTER_0 (0x886aa8) + +#define RGF_CAF_ICR (0x88946c) /* struct RGF_ICR */ + /* popular locations */ #define HOST_MBOX HOSTADDR(RGF_USER_USER_SCRATCH_PAD) #define HOST_SW_INT (HOSTADDR(RGF_USER_USER_ICR) + \ @@ -151,6 +171,7 @@ struct fw_map { u32 host; /* PCI/Host address - BAR0 + 0x880000 */ const char *name; /* for debugfs */ }; + /* array size should be in sync with actual definition in the wmi.c */ extern const struct fw_map fw_mapping[7]; @@ -300,18 +321,12 @@ struct pci_dev; * @timeout: reset timer value (in TUs). * @dialog_token: dialog token for aggregation session * @rcu_head: RCU head used for freeing this struct - * @reorder_lock: serializes access to reorder buffer, see below. * * This structure's lifetime is managed by RCU, assignments to * the array holding it must hold the aggregation mutex. * - * The @reorder_lock is used to protect the members of this - * struct, except for @timeout, @buf_size and @dialog_token, - * which are constant across the lifetime of the struct (the - * dialog token being used only for debugging). */ struct wil_tid_ampdu_rx { - spinlock_t reorder_lock; /* see above */ struct sk_buff **reorder_buf; unsigned long *reorder_time; struct timer_list session_timer; @@ -327,17 +342,6 @@ struct wil_tid_ampdu_rx { bool first_time; /* is it 1-st time this buffer used? */ }; -struct wil6210_stats { - u64 tsf; - u32 snr; - u16 last_mcs_rx; - u16 bf_mcs; /* last BF, used for Tx */ - u16 my_rx_sector; - u16 my_tx_sector; - u16 peer_rx_sector; - u16 peer_tx_sector; -}; - enum wil_sta_status { wil_sta_unused = 0, wil_sta_conn_pending = 1, @@ -371,10 +375,17 @@ struct wil_sta_info { bool data_port_open; /* can send any data, not only EAPOL */ /* Rx BACK */ struct wil_tid_ampdu_rx *tid_rx[WIL_STA_TID_NUM]; + spinlock_t tid_rx_lock; /* guarding tid_rx array */ unsigned long tid_rx_timer_expired[BITS_TO_LONGS(WIL_STA_TID_NUM)]; unsigned long tid_rx_stop_requested[BITS_TO_LONGS(WIL_STA_TID_NUM)]; }; +enum { + fw_recovery_idle = 0, + fw_recovery_pending = 1, + fw_recovery_running = 2, +}; + struct wil6210_priv { struct pci_dev *pdev; int n_msi; @@ -385,18 +396,22 @@ struct wil6210_priv { u32 hw_version; struct wil_board *board; u8 n_mids; /* number of additional MIDs as reported by FW */ - int recovery_count; /* num of FW recovery attempts in a short time */ + u32 recovery_count; /* num of FW recovery attempts in a short time */ + u32 recovery_state; /* FW recovery state machine */ unsigned long last_fw_recovery; /* jiffies of last fw recovery */ + wait_queue_head_t wq; /* for all wait_event() use */ /* profile */ u32 monitor_flags; u32 secure_pcp; /* create secure PCP? */ int sinfo_gen; + u32 itr_trsh; /* cached ISR registers */ u32 isr_misc; /* mailbox related */ struct mutex wmi_mutex; struct wil6210_mbox_ctl mbox_ctl; struct completion wmi_ready; + struct completion wmi_call; u16 wmi_seq; u16 reply_id; /**< wait for this WMI event */ void *reply_buf; @@ -430,11 +445,13 @@ struct wil6210_priv { struct mutex mutex; /* for wil6210_priv access in wil_{up|down} */ /* statistics */ - struct wil6210_stats stats; atomic_t isr_count_rx, isr_count_tx; /* debugfs */ struct dentry *debug; struct debugfs_blob_wrapper blobs[ARRAY_SIZE(fw_mapping)]; + + void *platform_handle; + struct wil_platform_ops platform_ops; }; #define wil_to_wiphy(i) (i->wdev->wiphy) @@ -444,10 +461,11 @@ struct wil6210_priv { #define wdev_to_wil(w) (struct wil6210_priv *)(wdev_priv(w)) #define wil_to_ndev(i) (wil_to_wdev(i)->netdev) #define ndev_to_wil(n) (wdev_to_wil(n->ieee80211_ptr)) +#define wil_to_pcie_dev(i) (&i->pdev->dev) -int wil_dbg_trace(struct wil6210_priv *wil, const char *fmt, ...); -int wil_err(struct wil6210_priv *wil, const char *fmt, ...); -int wil_info(struct wil6210_priv *wil, const char *fmt, ...); +void wil_dbg_trace(struct wil6210_priv *wil, const char *fmt, ...); +void wil_err(struct wil6210_priv *wil, const char *fmt, ...); +void wil_info(struct wil6210_priv *wil, const char *fmt, ...); #define wil_dbg(wil, fmt, arg...) do { \ netdev_dbg(wil_to_ndev(wil), fmt, ##arg); \ wil_dbg_trace(wil, fmt, ##arg); \ @@ -458,6 +476,7 @@ int wil_info(struct wil6210_priv *wil, const char *fmt, ...); #define wil_dbg_wmi(wil, fmt, arg...) wil_dbg(wil, "DBG[ WMI]" fmt, ##arg) #define wil_dbg_misc(wil, fmt, arg...) wil_dbg(wil, "DBG[MISC]" fmt, ##arg) +#if defined(CONFIG_DYNAMIC_DEBUG) #define wil_hex_dump_txrx(prefix_str, prefix_type, rowsize, \ groupsize, buf, len, ascii) \ print_hex_dump_debug("DBG[TXRX]" prefix_str,\ @@ -469,6 +488,19 @@ int wil_info(struct wil6210_priv *wil, const char *fmt, ...); print_hex_dump_debug("DBG[ WMI]" prefix_str,\ prefix_type, rowsize, \ groupsize, buf, len, ascii) +#else /* defined(CONFIG_DYNAMIC_DEBUG) */ +static inline +void wil_hex_dump_txrx(const char *prefix_str, int prefix_type, int rowsize, + int groupsize, const void *buf, size_t len, bool ascii) +{ +} + +static inline +void wil_hex_dump_wmi(const char *prefix_str, int prefix_type, int rowsize, + int groupsize, const void *buf, size_t len, bool ascii) +{ +} +#endif /* defined(CONFIG_DYNAMIC_DEBUG) */ void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src, size_t count); @@ -482,13 +514,18 @@ void wil_if_remove(struct wil6210_priv *wil); int wil_priv_init(struct wil6210_priv *wil); void wil_priv_deinit(struct wil6210_priv *wil); int wil_reset(struct wil6210_priv *wil); +void wil_set_itr_trsh(struct wil6210_priv *wil); void wil_fw_error_recovery(struct wil6210_priv *wil); +void wil_set_recovery_state(struct wil6210_priv *wil, int state); void wil_link_on(struct wil6210_priv *wil); void wil_link_off(struct wil6210_priv *wil); int wil_up(struct wil6210_priv *wil); +int __wil_up(struct wil6210_priv *wil); int wil_down(struct wil6210_priv *wil); +int __wil_down(struct wil6210_priv *wil); void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r); int wil_find_cid(struct wil6210_priv *wil, const u8 *mac); +void wil_set_ethtoolops(struct net_device *ndev); void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr); void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr); @@ -519,8 +556,10 @@ int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason); void wil6210_clear_irq(struct wil6210_priv *wil); int wil6210_init_irq(struct wil6210_priv *wil, int irq); void wil6210_fini_irq(struct wil6210_priv *wil, int irq); -void wil6210_disable_irq(struct wil6210_priv *wil); -void wil6210_enable_irq(struct wil6210_priv *wil); +void wil_mask_irq(struct wil6210_priv *wil); +void wil_unmask_irq(struct wil6210_priv *wil); +void wil_disable_irq(struct wil6210_priv *wil); +void wil_enable_irq(struct wil6210_priv *wil); int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_mgmt_tx_params *params, u64 *cookie); @@ -556,4 +595,7 @@ void wil6210_unmask_irq_rx(struct wil6210_priv *wil); int wil_iftype_nl2wmi(enum nl80211_iftype type); +int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd); +int wil_request_firmware(struct wil6210_priv *wil, const char *name); + #endif /* __WIL6210_H__ */ diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.c b/drivers/net/wireless/ath/wil6210/wil_platform.c new file mode 100644 index 0000000000000000000000000000000000000000..8f1d78f8a74d5dfb052b1b818cb3a5c8313a8861 --- /dev/null +++ b/drivers/net/wireless/ath/wil6210/wil_platform.c @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2014 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "linux/device.h" +#include "wil_platform.h" + +#ifdef CONFIG_WIL6210_PLATFORM_MSM +#include "wil_platform_msm.h" +#endif + +/** + * wil_platform_init() - wil6210 platform module init + * + * The function must be called before all other functions in this module. + * It returns a handle which is used with the rest of the API + * + */ +void *wil_platform_init(struct device *dev, struct wil_platform_ops *ops) +{ + void *handle = NULL; + + if (!ops) { + dev_err(dev, "Invalid parameter. Cannot init platform module\n"); + return NULL; + } + +#ifdef CONFIG_WIL6210_PLATFORM_MSM + handle = wil_platform_msm_init(dev, ops); + if (handle) + return handle; +#endif + + /* other platform specific init functions should be called here */ + + return handle; +} diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.h b/drivers/net/wireless/ath/wil6210/wil_platform.h new file mode 100644 index 0000000000000000000000000000000000000000..158c73b049a9b583602b54763d322d9bfd27bffa --- /dev/null +++ b/drivers/net/wireless/ath/wil6210/wil_platform.h @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2014 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __WIL_PLATFORM_H__ +#define __WIL_PLATFORM_H__ + +struct device; + +/** + * struct wil_platform_ops - wil platform module callbacks + */ +struct wil_platform_ops { + int (*bus_request)(void *handle, uint32_t kbps /* KBytes/Sec */); + int (*suspend)(void *handle); + int (*resume)(void *handle); + void (*uninit)(void *handle); +}; + +void *wil_platform_init(struct device *dev, struct wil_platform_ops *ops); + +#endif /* __WIL_PLATFORM_H__ */ diff --git a/drivers/net/wireless/ath/wil6210/wil_platform_msm.c b/drivers/net/wireless/ath/wil6210/wil_platform_msm.c new file mode 100644 index 0000000000000000000000000000000000000000..b354a743240d0d0e658f55bb77bfc416edb94478 --- /dev/null +++ b/drivers/net/wireless/ath/wil6210/wil_platform_msm.c @@ -0,0 +1,257 @@ +/* + * Copyright (c) 2014 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include + +#include "wil_platform.h" +#include "wil_platform_msm.h" + +/** + * struct wil_platform_msm - wil6210 msm platform module info + * + * @dev: device object + * @msm_bus_handle: handle for using msm_bus API + * @pdata: bus scale info retrieved from DT + */ +struct wil_platform_msm { + struct device *dev; + uint32_t msm_bus_handle; + struct msm_bus_scale_pdata *pdata; +}; + +#define KBTOB(a) (a * 1000ULL) + +/** + * wil_platform_get_pdata() - Generate bus client data from device tree + * provided by clients. + * + * dev: device object + * of_node: Device tree node to extract information from + * + * The function returns a valid pointer to the allocated bus-scale-pdata + * if the vectors were correctly read from the client's device node. + * Any error in reading or parsing the device node will return NULL + * to the caller. + */ +static struct msm_bus_scale_pdata *wil_platform_get_pdata( + struct device *dev, + struct device_node *of_node) +{ + struct msm_bus_scale_pdata *pdata; + struct msm_bus_paths *usecase; + int i, j, ret, len; + unsigned int num_usecases, num_paths, mem_size; + const uint32_t *vec_arr; + struct msm_bus_vectors *vectors; + + /* first read num_usecases and num_paths so we can calculate + * amount of memory to allocate + */ + ret = of_property_read_u32(of_node, "qcom,msm-bus,num-cases", + &num_usecases); + if (ret) { + dev_err(dev, "Error: num-usecases not found\n"); + return NULL; + } + + ret = of_property_read_u32(of_node, "qcom,msm-bus,num-paths", + &num_paths); + if (ret) { + dev_err(dev, "Error: num_paths not found\n"); + return NULL; + } + + /* pdata memory layout: + * msm_bus_scale_pdata + * msm_bus_paths[num_usecases] + * msm_bus_vectors[num_usecases][num_paths] + */ + mem_size = sizeof(struct msm_bus_scale_pdata) + + sizeof(struct msm_bus_paths) * num_usecases + + sizeof(struct msm_bus_vectors) * num_usecases * num_paths; + + pdata = kzalloc(mem_size, GFP_KERNEL); + if (!pdata) + return NULL; + + ret = of_property_read_string(of_node, "qcom,msm-bus,name", + &pdata->name); + if (ret) { + dev_err(dev, "Error: Client name not found\n"); + goto err; + } + + if (of_property_read_bool(of_node, "qcom,msm-bus,active-only")) { + pdata->active_only = 1; + } else { + dev_info(dev, "active_only flag absent.\n"); + dev_info(dev, "Using dual context by default\n"); + } + + pdata->num_usecases = num_usecases; + pdata->usecase = (struct msm_bus_paths *)(pdata + 1); + + vec_arr = of_get_property(of_node, "qcom,msm-bus,vectors-KBps", &len); + if (vec_arr == NULL) { + dev_err(dev, "Error: Vector array not found\n"); + goto err; + } + + if (len != num_usecases * num_paths * sizeof(uint32_t) * 4) { + dev_err(dev, "Error: Length-error on getting vectors\n"); + goto err; + } + + vectors = (struct msm_bus_vectors *)(pdata->usecase + num_usecases); + for (i = 0; i < num_usecases; i++) { + usecase = &pdata->usecase[i]; + usecase->num_paths = num_paths; + usecase->vectors = &vectors[i]; + + for (j = 0; j < num_paths; j++) { + int index = ((i * num_paths) + j) * 4; + + usecase->vectors[j].src = be32_to_cpu(vec_arr[index]); + usecase->vectors[j].dst = + be32_to_cpu(vec_arr[index + 1]); + usecase->vectors[j].ab = (uint64_t) + KBTOB(be32_to_cpu(vec_arr[index + 2])); + usecase->vectors[j].ib = (uint64_t) + KBTOB(be32_to_cpu(vec_arr[index + 3])); + } + } + + return pdata; + +err: + kfree(pdata); + + return NULL; +} + +/* wil_platform API (callbacks) */ + +static int wil_platform_bus_request(void *handle, + uint32_t kbps /* KBytes/Sec */) +{ + int rc, i; + struct wil_platform_msm *msm = (struct wil_platform_msm *)handle; + int vote = 0; /* vote 0 in case requested kbps cannot be satisfied */ + struct msm_bus_paths *usecase; + uint32_t usecase_kbps; + uint32_t min_kbps = ~0; + + /* find the lowest usecase that is bigger than requested kbps */ + for (i = 0; i < msm->pdata->num_usecases; i++) { + usecase = &msm->pdata->usecase[i]; + /* assume we have single path (vectors[0]). If we ever + * have multiple paths, need to define the behavior */ + usecase_kbps = div64_u64(usecase->vectors[0].ib, 1000); + if (usecase_kbps >= kbps && usecase_kbps < min_kbps) { + min_kbps = usecase_kbps; + vote = i; + } + } + + rc = msm_bus_scale_client_update_request(msm->msm_bus_handle, vote); + if (rc) + dev_err(msm->dev, "Failed msm_bus voting. kbps=%d vote=%d, rc=%d\n", + kbps, vote, rc); + else + /* TOOD: remove */ + dev_info(msm->dev, "msm_bus_scale_client_update_request succeeded. kbps=%d vote=%d\n", + kbps, vote); + + return rc; +} + +static void wil_platform_uninit(void *handle) +{ + struct wil_platform_msm *msm = (struct wil_platform_msm *)handle; + + dev_info(msm->dev, "wil_platform_uninit\n"); + + if (msm->msm_bus_handle) + msm_bus_scale_unregister_client(msm->msm_bus_handle); + + kfree(msm->pdata); + kfree(msm); +} + +static int wil_platform_msm_bus_register(struct wil_platform_msm *msm, + struct device_node *node) +{ + msm->pdata = wil_platform_get_pdata(msm->dev, node); + if (!msm->pdata) { + dev_err(msm->dev, "Failed getting DT info\n"); + return -EINVAL; + } + + msm->msm_bus_handle = msm_bus_scale_register_client(msm->pdata); + if (!msm->msm_bus_handle) { + dev_err(msm->dev, "Failed msm_bus registration\n"); + return -EINVAL; + } + + dev_info(msm->dev, "msm_bus registration succeeded! handle 0x%x\n", + msm->msm_bus_handle); + + return 0; +} + +/** + * wil_platform_msm_init() - wil6210 msm platform module init + * + * The function must be called before all other functions in this module. + * It returns a handle which is used with the rest of the API + * + */ +void *wil_platform_msm_init(struct device *dev, struct wil_platform_ops *ops) +{ + struct device_node *of_node; + struct wil_platform_msm *msm; + int rc; + + of_node = of_find_compatible_node(NULL, NULL, "qcom,wil6210"); + if (!of_node) { + /* this could mean non-msm platform */ + dev_err(dev, "DT node not found\n"); + return NULL; + } + + msm = kzalloc(sizeof(*msm), GFP_KERNEL); + if (!msm) + return NULL; + + msm->dev = dev; + + /* register with msm_bus module for scaling requests */ + rc = wil_platform_msm_bus_register(msm, of_node); + if (rc) + goto cleanup; + + memset(ops, 0, sizeof(*ops)); + ops->bus_request = wil_platform_bus_request; + ops->uninit = wil_platform_uninit; + + return (void *)msm; + +cleanup: + kfree(msm); + return NULL; +} diff --git a/drivers/net/wireless/ath/wil6210/wil_platform_msm.h b/drivers/net/wireless/ath/wil6210/wil_platform_msm.h new file mode 100644 index 0000000000000000000000000000000000000000..2f2229edb498db08bd172647062277764819a031 --- /dev/null +++ b/drivers/net/wireless/ath/wil6210/wil_platform_msm.h @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2014 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __WIL_PLATFORM__MSM_H__ +#define __WIL_PLATFORM_MSM_H__ + +#include "wil_platform.h" + +void *wil_platform_msm_init(struct device *dev, struct wil_platform_ops *ops); + +#endif /* __WIL_PLATFORM__MSM_H__ */ diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 1d1d0afdd2e195c6856372299e3fc635b213df23..4311df982c6074cf54e2fe43a8971f4aa355725e 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2014 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -14,6 +14,7 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +#include #include #include @@ -22,6 +23,10 @@ #include "wmi.h" #include "trace.h" +static uint max_assoc_sta = 1; +module_param(max_assoc_sta, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(max_assoc_sta, " Max number of stations associated to the AP"); + /** * WMI event receiving - theory of operations * @@ -152,6 +157,7 @@ int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr, struct wil6210_mbox_hdr *hdr) { void __iomem *src = wmi_buffer(wil, ptr); + if (!src) return -EINVAL; @@ -273,6 +279,7 @@ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len) struct net_device *ndev = wil_to_ndev(wil); struct wireless_dev *wdev = wil->wdev; struct wmi_ready_event *evt = d; + wil->fw_version = le32_to_cpu(evt->sw_version); wil->n_mids = evt->numof_additional_mids; @@ -292,8 +299,9 @@ static void wmi_evt_fw_ready(struct wil6210_priv *wil, int id, void *d, { wil_dbg_wmi(wil, "WMI: got FW ready event\n"); + wil_set_recovery_state(wil, fw_recovery_idle); set_bit(wil_status_fwready, &wil->status); - /* reuse wmi_ready for the firmware ready indication */ + /* let the reset sequence continue */ complete(&wil->wmi_ready); } @@ -346,11 +354,11 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len) rx_mgmt_frame->bssid); cfg80211_put_bss(wiphy, bss); } else { - wil_err(wil, "cfg80211_inform_bss() failed\n"); + wil_err(wil, "cfg80211_inform_bss_frame() failed\n"); } } else { cfg80211_rx_mgmt(wil->wdev, freq, signal, - (void *)rx_mgmt_frame, d_len, 0, GFP_KERNEL); + (void *)rx_mgmt_frame, d_len, 0); } } @@ -482,33 +490,6 @@ static void wmi_evt_disconnect(struct wil6210_priv *wil, int id, mutex_unlock(&wil->mutex); } -static void wmi_evt_notify(struct wil6210_priv *wil, int id, void *d, int len) -{ - struct wmi_notify_req_done_event *evt = d; - - if (len < sizeof(*evt)) { - wil_err(wil, "Short NOTIFY event\n"); - return; - } - - wil->stats.tsf = le64_to_cpu(evt->tsf); - wil->stats.snr = le32_to_cpu(evt->snr_val); - wil->stats.bf_mcs = le16_to_cpu(evt->bf_mcs); - wil->stats.my_rx_sector = le16_to_cpu(evt->my_rx_sector); - wil->stats.my_tx_sector = le16_to_cpu(evt->my_tx_sector); - wil->stats.peer_rx_sector = le16_to_cpu(evt->other_rx_sector); - wil->stats.peer_tx_sector = le16_to_cpu(evt->other_tx_sector); - wil_dbg_wmi(wil, "Link status, MCS %d TSF 0x%016llx\n" - "BF status 0x%08x SNR 0x%08x SQI %d%%\n" - "Tx Tpt %d goodput %d Rx goodput %d\n" - "Sectors(rx:tx) my %d:%d peer %d:%d\n", - wil->stats.bf_mcs, wil->stats.tsf, evt->status, - wil->stats.snr, evt->sqi, le32_to_cpu(evt->tx_tpt), - le32_to_cpu(evt->tx_goodput), le32_to_cpu(evt->rx_goodput), - wil->stats.my_rx_sector, wil->stats.my_tx_sector, - wil->stats.peer_rx_sector, wil->stats.peer_tx_sector); -} - /* * Firmware reports EAPOL frame using WME event. * Reconstruct Ethernet frame and deliver it via normal Rx @@ -617,27 +598,40 @@ static void wmi_evt_ba_status(struct wil6210_priv *wil, int id, void *d, return; } + mutex_lock(&wil->mutex); + cid = wil->vring2cid_tid[evt->ringid][0]; if (cid >= WIL6210_MAX_CID) { wil_err(wil, "invalid CID %d for vring %d\n", cid, evt->ringid); - return; + goto out; } sta = &wil->sta[cid]; if (sta->status == wil_sta_unused) { wil_err(wil, "CID %d unused\n", cid); - return; + goto out; } wil_dbg_wmi(wil, "BACK for CID %d %pM\n", cid, sta->addr); for (i = 0; i < WIL_STA_TID_NUM; i++) { - struct wil_tid_ampdu_rx *r = sta->tid_rx[i]; + struct wil_tid_ampdu_rx *r; + unsigned long flags; + + spin_lock_irqsave(&sta->tid_rx_lock, flags); + + r = sta->tid_rx[i]; sta->tid_rx[i] = NULL; wil_tid_ampdu_rx_free(wil, r); + + spin_unlock_irqrestore(&sta->tid_rx_lock, flags); + if ((evt->status == WMI_BA_AGREED) && evt->agg_wsize) sta->tid_rx[i] = wil_tid_ampdu_rx_alloc(wil, evt->agg_wsize, 0); } + +out: + mutex_unlock(&wil->mutex); } static const struct { @@ -651,7 +645,6 @@ static const struct { {WMI_SCAN_COMPLETE_EVENTID, wmi_evt_scan_complete}, {WMI_CONNECT_EVENTID, wmi_evt_connect}, {WMI_DISCONNECT_EVENTID, wmi_evt_disconnect}, - {WMI_NOTIFY_REQ_DONE_EVENTID, wmi_evt_notify}, {WMI_EAPOL_RX_EVENTID, wmi_evt_eapol_rx}, {WMI_DATA_PORT_OPEN_EVENTID, wmi_evt_linkup}, {WMI_WBE_LINKDOWN_EVENTID, wmi_evt_linkdown}, @@ -676,7 +669,7 @@ void wmi_recv_cmd(struct wil6210_priv *wil) unsigned n; if (!test_bit(wil_status_reset_done, &wil->status)) { - wil_err(wil, "Reset not completed\n"); + wil_err(wil, "Reset in progress. Cannot handle WMI event\n"); return; } @@ -731,6 +724,7 @@ void wmi_recv_cmd(struct wil6210_priv *wil) struct wil6210_mbox_hdr_wmi *wmi = &evt->event.wmi; u16 id = le16_to_cpu(wmi->id); u32 tstamp = le32_to_cpu(wmi->timestamp); + wil_dbg_wmi(wil, "WMI event 0x%04x MID %d @%d msec\n", id, wmi->mid, tstamp); trace_wil6210_wmi_event(wmi, &wmi[1], @@ -771,8 +765,8 @@ int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len, wil->reply_id = reply_id; wil->reply_buf = reply; wil->reply_size = reply_size; - remain = wait_for_completion_timeout(&wil->wmi_ready, - msecs_to_jiffies(to_msec)); + remain = wait_for_completion_timeout(&wil->wmi_call, + msecs_to_jiffies(to_msec)); if (0 == remain) { wil_err(wil, "wmi_call(0x%04x->0x%04x) timeout %d msec\n", cmdid, reply_id, to_msec); @@ -822,7 +816,7 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan) .network_type = wmi_nettype, .disable_sec_offload = 1, .channel = chan - 1, - .pcp_max_assoc_sta = WIL6210_MAX_CID, + .pcp_max_assoc_sta = max_assoc_sta, }; struct { struct wil6210_mbox_hdr_wmi wmi; @@ -832,6 +826,14 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan) if (!wil->secure_pcp) cmd.disable_sec = 1; + if ((cmd.pcp_max_assoc_sta > WIL6210_MAX_CID) || + (cmd.pcp_max_assoc_sta <= 0)) { + wil_info(wil, + "Requested connection limit %u, valid values are 1 - %d. Setting to %d\n", + max_assoc_sta, WIL6210_MAX_CID, WIL6210_MAX_CID); + cmd.pcp_max_assoc_sta = WIL6210_MAX_CID; + } + /* * Processing time may be huge, in case of secure AP it takes about * 3500ms for FW to start AP @@ -968,8 +970,11 @@ int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie) int rc; u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len; struct wmi_set_appie_cmd *cmd = kzalloc(len, GFP_KERNEL); + if (!cmd) return -ENOMEM; + if (!ie) + ie_len = 0; cmd->mgmt_frm_type = type; /* BUG: FW API define ieLen as u8. Will fix FW */ @@ -1143,6 +1148,9 @@ static void wmi_event_handle(struct wil6210_priv *wil, struct wil6210_mbox_hdr_wmi *wmi = (void *)(&hdr[1]); void *evt_data = (void *)(&wmi[1]); u16 id = le16_to_cpu(wmi->id); + + wil_dbg_wmi(wil, "Handle WMI 0x%04x (reply_id 0x%04x)\n", + id, wil->reply_id); /* check if someone waits for this event */ if (wil->reply_id && wil->reply_id == id) { if (wil->reply_buf) { @@ -1153,7 +1161,7 @@ static void wmi_event_handle(struct wil6210_priv *wil, len - sizeof(*wmi)); } wil_dbg_wmi(wil, "Complete WMI 0x%04x\n", id); - complete(&wil->wmi_ready); + complete(&wil->wmi_call); return; } /* unsolicited event */ @@ -1199,9 +1207,11 @@ void wmi_event_worker(struct work_struct *work) struct pending_wmi_event *evt; struct list_head *lh; + wil_dbg_wmi(wil, "Start %s\n", __func__); while ((lh = next_wmi_ev(wil)) != NULL) { evt = list_entry(lh, struct pending_wmi_event, list); wmi_event_handle(wil, &evt->event.hdr); kfree(evt); } + wil_dbg_wmi(wil, "Finished %s\n", __func__); } diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h index 17334c852866c3cc8a061eecb829052ebd9415cf..27b97432d1c2e70b3b709a0e0d34c0439e4b0a3a 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.h +++ b/drivers/net/wireless/ath/wil6210/wmi.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2014 Qualcomm Atheros, Inc. * Copyright (c) 2006-2012 Wilocity . * * Permission to use, copy, modify, and/or distribute this software for any @@ -179,7 +179,6 @@ enum wmi_crypto_type { WMI_CRYPT_AES_GCMP = 0x20, }; - enum wmi_connect_ctrl_flag_bits { WMI_CONNECT_ASSOC_POLICY_USER = 0x0001, WMI_CONNECT_SEND_REASSOC = 0x0002, @@ -219,7 +218,6 @@ struct wmi_disconnect_sta_cmd { __le16 disconnect_reason; } __packed; - /* * WMI_SET_PMK_CMDID */ @@ -234,7 +232,6 @@ struct wmi_set_pmk_cmd { u8 pmk[WMI_PMK_LEN]; } __packed; - /* * WMI_SET_PASSPHRASE_CMDID */ @@ -273,7 +270,6 @@ struct wmi_delete_cipher_key_cmd { u8 mac[WMI_MAC_LEN]; } __packed; - /* * WMI_START_SCAN_CMDID * @@ -325,7 +321,6 @@ struct wmi_probed_ssid_cmd { u8 ssid[WMI_MAX_SSID_LEN]; } __packed; - /* * WMI_SET_APPIE_CMDID * Add Application specified IE to a management frame @@ -351,7 +346,6 @@ struct wmi_set_appie_cmd { u8 ie_info[0]; } __packed; - /* * WMI_PXMT_RANGE_CFG_CMDID */ @@ -380,7 +374,6 @@ struct wmi_rf_mgmt_cmd { __le32 rf_mgmt_type; } __packed; - /* * WMI_RF_RX_TEST_CMDID */ @@ -426,7 +419,6 @@ struct wmi_bcon_ctrl_cmd { u8 disable_sec; } __packed; - /******* P2P ***********/ /* @@ -797,7 +789,6 @@ struct wmi_temp_sense_cmd { __le32 measure_marlon_r_en; } __packed; - /* * WMI Events */ @@ -887,7 +878,6 @@ enum wmi_event_id { * Events data structures */ - enum wmi_fw_status { WMI_FW_STATUS_SUCCESS, WMI_FW_STATUS_FAILURE, @@ -980,7 +970,7 @@ struct wmi_ready_event { * WMI_NOTIFY_REQ_DONE_EVENTID */ struct wmi_notify_req_done_event { - __le32 status; + __le32 status; /* beamforming status, 0: fail; 1: OK; 2: retrying */ __le64 tsf; __le32 snr_val; __le32 tx_tpt; @@ -1038,8 +1028,8 @@ struct wmi_disconnect_event { __le16 protocol_reason_status; /* reason code, see 802.11 spec. */ u8 bssid[WMI_MAC_LEN]; /* set if known */ u8 disconnect_reason; /* see wmi_disconnect_reason */ - u8 assoc_resp_len; /* not in use */ - u8 assoc_info[0]; /* not in use */ + u8 assoc_resp_len; /* not used */ + u8 assoc_info[0]; /* not used */ } __packed; /* @@ -1081,7 +1071,6 @@ struct wmi_delba_event { __le16 reason; } __packed; - /* * WMI_VRING_CFG_DONE_EVENTID */ @@ -1147,7 +1136,6 @@ struct wmi_data_port_open_event { u8 reserved[3]; } __packed; - /* * WMI_GET_PCP_CHANNEL_EVENTID */ @@ -1156,7 +1144,6 @@ struct wmi_get_pcp_channel_event { u8 reserved[3]; } __packed; - /* * WMI_PORT_ALLOCATED_EVENTID */ @@ -1260,7 +1247,6 @@ struct wmi_rx_mgmt_info { u8 channel; /* From Radio MNGR */ } __packed; - /* * WMI_TX_MGMT_PACKET_EVENTID */ diff --git a/drivers/net/wireless/atmel_cs.c b/drivers/net/wireless/atmel_cs.c index 4cfb4d99ced0c3ceb22c05814008d5d21250ef04..7afc9c5329fb1b307a83773cf19e3d9d1b2b3cba 100644 --- a/drivers/net/wireless/atmel_cs.c +++ b/drivers/net/wireless/atmel_cs.c @@ -66,18 +66,18 @@ static void atmel_release(struct pcmcia_device *link); static void atmel_detach(struct pcmcia_device *p_dev); -typedef struct local_info_t { +struct local_info { struct net_device *eth_dev; -} local_info_t; +}; static int atmel_probe(struct pcmcia_device *p_dev) { - local_info_t *local; + struct local_info *local; dev_dbg(&p_dev->dev, "atmel_attach()\n"); /* Allocate space for private device-specific data */ - local = kzalloc(sizeof(local_info_t), GFP_KERNEL); + local = kzalloc(sizeof(*local), GFP_KERNEL); if (!local) return -ENOMEM; @@ -117,7 +117,7 @@ static int atmel_config_check(struct pcmcia_device *p_dev, void *priv_data) static int atmel_config(struct pcmcia_device *link) { - local_info_t *dev; + struct local_info *dev; int ret; const struct pcmcia_device_id *did; @@ -141,14 +141,14 @@ static int atmel_config(struct pcmcia_device *link) if (ret) goto failed; - ((local_info_t*)link->priv)->eth_dev = + ((struct local_info *)link->priv)->eth_dev = init_atmel_card(link->irq, link->resource[0]->start, did ? did->driver_info : ATMEL_FW_TYPE_NONE, &link->dev, card_present, link); - if (!((local_info_t*)link->priv)->eth_dev) + if (!((struct local_info *)link->priv)->eth_dev) goto failed; @@ -161,20 +161,20 @@ static int atmel_config(struct pcmcia_device *link) static void atmel_release(struct pcmcia_device *link) { - struct net_device *dev = ((local_info_t*)link->priv)->eth_dev; + struct net_device *dev = ((struct local_info *)link->priv)->eth_dev; dev_dbg(&link->dev, "atmel_release\n"); if (dev) stop_atmel_card(dev); - ((local_info_t*)link->priv)->eth_dev = NULL; + ((struct local_info *)link->priv)->eth_dev = NULL; pcmcia_disable_device(link); } static int atmel_suspend(struct pcmcia_device *link) { - local_info_t *local = link->priv; + struct local_info *local = link->priv; netif_device_detach(local->eth_dev); @@ -183,7 +183,7 @@ static int atmel_suspend(struct pcmcia_device *link) static int atmel_resume(struct pcmcia_device *link) { - local_info_t *local = link->priv; + struct local_info *local = link->priv; atmel_open(local->eth_dev); netif_device_attach(local->eth_dev); diff --git a/drivers/net/wireless/b43/Makefile b/drivers/net/wireless/b43/Makefile index 6e00b8804ada1f2a1274a5af33aef5540aa85ecc..9f7965aae93dc0b1d73de69f5c66c67ee0beb368 100644 --- a/drivers/net/wireless/b43/Makefile +++ b/drivers/net/wireless/b43/Makefile @@ -18,6 +18,7 @@ b43-y += xmit.o b43-y += dma.o b43-y += pio.o b43-y += rfkill.o +b43-y += ppr.o b43-$(CONFIG_B43_LEDS) += leds.o b43-$(CONFIG_B43_PCMCIA) += pcmcia.o b43-$(CONFIG_B43_SDIO) += sdio.o diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h index 4113b69347640359aa22020eb0dcfd7efe0b7c14..bb12586cd7cdfb05b0ac1e909a37a15d5bdc985e 100644 --- a/drivers/net/wireless/b43/b43.h +++ b/drivers/net/wireless/b43/b43.h @@ -45,6 +45,7 @@ #define B43_MMIO_RAM_DATA 0x134 #define B43_MMIO_PS_STATUS 0x140 #define B43_MMIO_RADIO_HWENABLED_HI 0x158 +#define B43_MMIO_MAC_HW_CAP 0x15C /* MAC capabilities (corerev >= 13) */ #define B43_MMIO_SHM_CONTROL 0x160 #define B43_MMIO_SHM_DATA 0x164 #define B43_MMIO_SHM_DATA_UNALIGNED 0x166 @@ -253,6 +254,8 @@ enum { #define B43_SHM_SH_CHAN 0x00A0 /* Current channel (low 8bit only) */ #define B43_SHM_SH_CHAN_5GHZ 0x0100 /* Bit set, if 5 Ghz channel */ #define B43_SHM_SH_CHAN_40MHZ 0x0200 /* Bit set, if 40 Mhz channel width */ +#define B43_SHM_SH_MACHW_L 0x00C0 /* Location where the ucode expects the MAC capabilities */ +#define B43_SHM_SH_MACHW_H 0x00C2 /* Location where the ucode expects the MAC capabilities */ #define B43_SHM_SH_HOSTF5 0x00D4 /* Hostflags 5 for ucode options */ #define B43_SHM_SH_BCMCFIFOID 0x0108 /* Last posted cookie to the bcast/mcast FIFO */ /* TSSI information */ @@ -297,6 +300,7 @@ enum { #define B43_SHM_SH_LFFBLIM 0x0046 /* Long frame fallback retry limit */ #define B43_SHM_SH_BEACPHYCTL 0x0054 /* Beacon PHY TX control word (see PHY TX control) */ #define B43_SHM_SH_EXTNPHYCTL 0x00B0 /* Extended bytes for beacon PHY control (N) */ +#define B43_SHM_SH_BCN_LI 0x00B6 /* beacon listen interval */ /* SHM_SHARED ACK/CTS control */ #define B43_SHM_SH_ACKCTSPHYCTL 0x0022 /* ACK/CTS PHY control word (see PHY TX control) */ /* SHM_SHARED probe response variables */ @@ -457,6 +461,7 @@ enum { #define B43_MACCTL_RADIOLOCK 0x00080000 /* Radio lock */ #define B43_MACCTL_BEACPROMISC 0x00100000 /* Beacon Promiscuous */ #define B43_MACCTL_KEEP_BADPLCP 0x00200000 /* Keep frames with bad PLCP */ +#define B43_MACCTL_PHY_LOCK 0x00200000 #define B43_MACCTL_KEEP_CTL 0x00400000 /* Keep control frames */ #define B43_MACCTL_KEEP_BAD 0x00800000 /* Keep bad frames (FCS) */ #define B43_MACCTL_PROMISC 0x01000000 /* Promiscuous mode */ @@ -475,6 +480,11 @@ enum { #define B43_MACCMD_CCA 0x00000008 /* Clear channel assessment */ #define B43_MACCMD_BGNOISE 0x00000010 /* Background noise */ +/* B43_MMIO_PSM_PHY_HDR bits */ +#define B43_PSM_HDR_MAC_PHY_RESET 0x00000001 +#define B43_PSM_HDR_MAC_PHY_CLOCK_EN 0x00000002 +#define B43_PSM_HDR_MAC_PHY_FORCE_CLK 0x00000004 + /* See BCMA_CLKCTLST_EXTRESREQ and BCMA_CLKCTLST_EXTRESST */ #define B43_BCMA_CLKCTLST_80211_PLL_REQ 0x00000100 #define B43_BCMA_CLKCTLST_PHY_PLL_REQ 0x00000200 @@ -791,6 +801,13 @@ struct b43_firmware { bool pcm_request_failed; }; +enum b43_band { + B43_BAND_2G = 0, + B43_BAND_5G_LO = 1, + B43_BAND_5G_MI = 2, + B43_BAND_5G_HI = 3, +}; + /* Device (802.11 core) initialization status. */ enum { B43_STAT_UNINIT = 0, /* Uninitialized. */ @@ -1012,6 +1029,16 @@ static inline void b43_write16(struct b43_wldev *dev, u16 offset, u16 value) dev->dev->write16(dev->dev, offset, value); } +/* To optimize this check for flush_writes on BCM47XX_BCMA only. */ +static inline void b43_write16f(struct b43_wldev *dev, u16 offset, u16 value) +{ + b43_write16(dev, offset, value); +#if defined(CONFIG_BCM47XX_BCMA) + if (dev->dev->flush_writes) + b43_read16(dev, offset); +#endif +} + static inline void b43_maskset16(struct b43_wldev *dev, u16 offset, u16 mask, u16 set) { diff --git a/drivers/net/wireless/b43/bus.c b/drivers/net/wireless/b43/bus.c index 565fdbdd6915f33e36cdbea616db209cf633bc4e..17d16a391fe6556dc66ab9a6bd5e4d0bd0c954a3 100644 --- a/drivers/net/wireless/b43/bus.c +++ b/drivers/net/wireless/b43/bus.c @@ -22,6 +22,10 @@ */ +#ifdef CONFIG_BCM47XX_BCMA +#include +#endif + #include "b43.h" #include "bus.h" @@ -102,6 +106,12 @@ struct b43_bus_dev *b43_bus_dev_bcma_init(struct bcma_device *core) dev->write32 = b43_bus_bcma_write32; dev->block_read = b43_bus_bcma_block_read; dev->block_write = b43_bus_bcma_block_write; +#ifdef CONFIG_BCM47XX_BCMA + if (b43_bus_host_is_pci(dev) && + bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA && + bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4716) + dev->flush_writes = true; +#endif dev->dev = &core->dev; dev->dma_dev = core->dma_dev; diff --git a/drivers/net/wireless/b43/bus.h b/drivers/net/wireless/b43/bus.h index f3205c6988bc4354a14a5226011f762868253936..256c2c17939afe7720a643bae1c32187a9850bc5 100644 --- a/drivers/net/wireless/b43/bus.h +++ b/drivers/net/wireless/b43/bus.h @@ -33,6 +33,7 @@ struct b43_bus_dev { size_t count, u16 offset, u8 reg_width); void (*block_write)(struct b43_bus_dev *dev, const void *buffer, size_t count, u16 offset, u8 reg_width); + bool flush_writes; struct device *dev; struct device *dma_dev; @@ -60,7 +61,21 @@ static inline bool b43_bus_host_is_pcmcia(struct b43_bus_dev *dev) #else return false; #endif +}; + +static inline bool b43_bus_host_is_pci(struct b43_bus_dev *dev) +{ +#ifdef CONFIG_B43_BCMA + if (dev->bus_type == B43_BUS_BCMA) + return (dev->bdev->bus->hosttype == BCMA_HOSTTYPE_PCI); +#endif +#ifdef CONFIG_B43_SSB + if (dev->bus_type == B43_BUS_SSB) + return (dev->sdev->bus->bustype == SSB_BUSTYPE_PCI); +#endif + return false; } + static inline bool b43_bus_host_is_sdio(struct b43_bus_dev *dev) { #ifdef CONFIG_B43_SSB diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 2af1ac396eb451897371f6ff576f565843781e69..5d4173ee55bcc4aaa32cbab54390fe3c3c242502 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c @@ -1204,6 +1204,36 @@ void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags) } } +/* http://bcm-v4.sipsolutions.net/802.11/PHY/BmacCorePllReset */ +void b43_wireless_core_phy_pll_reset(struct b43_wldev *dev) +{ + struct bcma_drv_cc *bcma_cc __maybe_unused; + struct ssb_chipcommon *ssb_cc __maybe_unused; + + switch (dev->dev->bus_type) { +#ifdef CONFIG_B43_BCMA + case B43_BUS_BCMA: + bcma_cc = &dev->dev->bdev->bus->drv_cc; + + bcma_cc_write32(bcma_cc, BCMA_CC_CHIPCTL_ADDR, 0); + bcma_cc_mask32(bcma_cc, BCMA_CC_CHIPCTL_DATA, ~0x4); + bcma_cc_set32(bcma_cc, BCMA_CC_CHIPCTL_DATA, 0x4); + bcma_cc_mask32(bcma_cc, BCMA_CC_CHIPCTL_DATA, ~0x4); + break; +#endif +#ifdef CONFIG_B43_SSB + case B43_BUS_SSB: + ssb_cc = &dev->dev->sdev->bus->chipco; + + chipco_write32(ssb_cc, SSB_CHIPCO_CHIPCTL_ADDR, 0); + chipco_mask32(ssb_cc, SSB_CHIPCO_CHIPCTL_DATA, ~0x4); + chipco_set32(ssb_cc, SSB_CHIPCO_CHIPCTL_DATA, 0x4); + chipco_mask32(ssb_cc, SSB_CHIPCO_CHIPCTL_DATA, ~0x4); + break; +#endif + } +} + #ifdef CONFIG_B43_BCMA static void b43_bcma_phy_reset(struct b43_wldev *dev) { @@ -2985,7 +3015,22 @@ void b43_mac_switch_freq(struct b43_wldev *dev, u8 spurmode) { u16 chip_id = dev->dev->chip_id; - if (chip_id == BCMA_CHIP_ID_BCM43131 || + if (chip_id == BCMA_CHIP_ID_BCM4331) { + switch (spurmode) { + case 2: /* 168 Mhz: 2^26/168 = 0x61862 */ + b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_LOW, 0x1862); + b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_HIGH, 0x6); + break; + case 1: /* 164 Mhz: 2^26/164 = 0x63e70 */ + b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_LOW, 0x3e70); + b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_HIGH, 0x6); + break; + default: /* 160 Mhz: 2^26/160 = 0x66666 */ + b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_LOW, 0x6666); + b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_HIGH, 0x6); + break; + } + } else if (chip_id == BCMA_CHIP_ID_BCM43131 || chip_id == BCMA_CHIP_ID_BCM43217 || chip_id == BCMA_CHIP_ID_BCM43222 || chip_id == BCMA_CHIP_ID_BCM43224 || @@ -3106,6 +3151,7 @@ static void b43_rate_memory_init(struct b43_wldev *dev) case B43_PHYTYPE_HT: case B43_PHYTYPE_LCN: b43_rate_memory_write(dev, B43_OFDM_RATE_6MB, 1); + b43_rate_memory_write(dev, B43_OFDM_RATE_9MB, 1); b43_rate_memory_write(dev, B43_OFDM_RATE_12MB, 1); b43_rate_memory_write(dev, B43_OFDM_RATE_18MB, 1); b43_rate_memory_write(dev, B43_OFDM_RATE_24MB, 1); @@ -3884,6 +3930,12 @@ static int b43_switch_band(struct b43_wldev *dev, return 0; } +static void b43_set_beacon_listen_interval(struct b43_wldev *dev, u16 interval) +{ + interval = min_t(u16, interval, (u16)0xFF); + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_BCN_LI, interval); +} + /* Write the short and long frame retry limit values. */ static void b43_set_retry_limits(struct b43_wldev *dev, unsigned int short_retry, @@ -3912,6 +3964,9 @@ static int b43_op_config(struct ieee80211_hw *hw, u32 changed) mutex_lock(&wl->mutex); b43_mac_suspend(dev); + if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) + b43_set_beacon_listen_interval(dev, conf->listen_interval); + if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { phy->chandef = &conf->chandef; phy->channel = conf->chandef.chan->hw_value; @@ -4466,10 +4521,10 @@ static int b43_phy_versioning(struct b43_wldev *dev) if (core_rev == 40 || core_rev == 42) { radio_manuf = 0x17F; - b43_write16(dev, B43_MMIO_RADIO24_CONTROL, 0); + b43_write16f(dev, B43_MMIO_RADIO24_CONTROL, 0); radio_rev = b43_read16(dev, B43_MMIO_RADIO24_DATA); - b43_write16(dev, B43_MMIO_RADIO24_CONTROL, 1); + b43_write16f(dev, B43_MMIO_RADIO24_CONTROL, 1); radio_id = b43_read16(dev, B43_MMIO_RADIO24_DATA); radio_ver = 0; /* Is there version somewhere? */ @@ -4477,7 +4532,7 @@ static int b43_phy_versioning(struct b43_wldev *dev) u16 radio24[3]; for (tmp = 0; tmp < 3; tmp++) { - b43_write16(dev, B43_MMIO_RADIO24_CONTROL, tmp); + b43_write16f(dev, B43_MMIO_RADIO24_CONTROL, tmp); radio24[tmp] = b43_read16(dev, B43_MMIO_RADIO24_DATA); } @@ -4494,13 +4549,12 @@ static int b43_phy_versioning(struct b43_wldev *dev) else tmp = 0x5205017F; } else { - b43_write16(dev, B43_MMIO_RADIO_CONTROL, - B43_RADIOCTL_ID); + b43_write16f(dev, B43_MMIO_RADIO_CONTROL, + B43_RADIOCTL_ID); tmp = b43_read16(dev, B43_MMIO_RADIO_DATA_LOW); - b43_write16(dev, B43_MMIO_RADIO_CONTROL, - B43_RADIOCTL_ID); - tmp |= (u32)b43_read16(dev, B43_MMIO_RADIO_DATA_HIGH) - << 16; + b43_write16f(dev, B43_MMIO_RADIO_CONTROL, + B43_RADIOCTL_ID); + tmp |= b43_read16(dev, B43_MMIO_RADIO_DATA_HIGH) << 16; } radio_manuf = (tmp & 0x00000FFF); radio_id = (tmp & 0x0FFFF000) >> 12; @@ -4813,6 +4867,16 @@ static int b43_wireless_core_init(struct b43_wldev *dev) hf &= ~B43_HF_SKCFPUP; b43_hf_write(dev, hf); + /* tell the ucode MAC capabilities */ + if (dev->dev->core_rev >= 13) { + u32 mac_hw_cap = b43_read32(dev, B43_MMIO_MAC_HW_CAP); + + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_MACHW_L, + mac_hw_cap & 0xffff); + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_MACHW_H, + (mac_hw_cap >> 16) & 0xffff); + } + b43_set_retry_limits(dev, B43_DEFAULT_SHORT_RETRY_LIMIT, B43_DEFAULT_LONG_RETRY_LIMIT); b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_SFFBLIM, 3); @@ -4835,6 +4899,10 @@ static int b43_wireless_core_init(struct b43_wldev *dev) /* Maximum Contention Window */ b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MAXCONT, 0x3FF); + /* write phytype and phyvers */ + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_PHYTYPE, phy->type); + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_PHYVER, phy->rev); + if (b43_bus_host_is_pcmcia(dev->dev) || b43_bus_host_is_sdio(dev->dev)) { dev->__using_pio_transfers = true; diff --git a/drivers/net/wireless/b43/main.h b/drivers/net/wireless/b43/main.h index 9f22e4b4c13297207c309d7ea6a1fd884d061655..c46430cc725c168cec55bcc32eb8a9c26ed2f8f6 100644 --- a/drivers/net/wireless/b43/main.h +++ b/drivers/net/wireless/b43/main.h @@ -96,6 +96,8 @@ void b43_controller_restart(struct b43_wldev *dev, const char *reason); #define B43_PS_ASLEEP (1 << 3) /* Force device asleep */ void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags); +void b43_wireless_core_phy_pll_reset(struct b43_wldev *dev); + void b43_mac_suspend(struct b43_wldev *dev); void b43_mac_enable(struct b43_wldev *dev); void b43_mac_phy_clock_set(struct b43_wldev *dev, bool on); diff --git a/drivers/net/wireless/b43/phy_a.c b/drivers/net/wireless/b43/phy_a.c index 25e40432d68b9c0c9dcc25ff44f2386b56e6ca68..99c036f5ecb7b51319d4c2b26d9b3719af028947 100644 --- a/drivers/net/wireless/b43/phy_a.c +++ b/drivers/net/wireless/b43/phy_a.c @@ -444,14 +444,14 @@ static inline u16 adjust_phyreg(struct b43_wldev *dev, u16 offset) static u16 b43_aphy_op_read(struct b43_wldev *dev, u16 reg) { reg = adjust_phyreg(dev, reg); - b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); + b43_write16f(dev, B43_MMIO_PHY_CONTROL, reg); return b43_read16(dev, B43_MMIO_PHY_DATA); } static void b43_aphy_op_write(struct b43_wldev *dev, u16 reg, u16 value) { reg = adjust_phyreg(dev, reg); - b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); + b43_write16f(dev, B43_MMIO_PHY_CONTROL, reg); b43_write16(dev, B43_MMIO_PHY_DATA, value); } diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c index 3cbef21b4726dc106c5746f79689a10d0db6c4e0..1dfc682a805513f5e5d160f6198f6d17bec644fc 100644 --- a/drivers/net/wireless/b43/phy_common.c +++ b/drivers/net/wireless/b43/phy_common.c @@ -222,12 +222,18 @@ static inline void assert_mac_suspended(struct b43_wldev *dev) u16 b43_radio_read(struct b43_wldev *dev, u16 reg) { assert_mac_suspended(dev); + dev->phy.writes_counter = 0; return dev->phy.ops->radio_read(dev, reg); } void b43_radio_write(struct b43_wldev *dev, u16 reg, u16 value) { assert_mac_suspended(dev); + if (b43_bus_host_is_pci(dev->dev) && + ++dev->phy.writes_counter > B43_MAX_WRITES_IN_ROW) { + b43_read32(dev, B43_MMIO_MACCTL); + dev->phy.writes_counter = 1; + } dev->phy.ops->radio_write(dev, reg, value); } @@ -268,17 +274,28 @@ u16 b43_phy_read(struct b43_wldev *dev, u16 reg) { assert_mac_suspended(dev); dev->phy.writes_counter = 0; - return dev->phy.ops->phy_read(dev, reg); + + if (dev->phy.ops->phy_read) + return dev->phy.ops->phy_read(dev, reg); + + b43_write16f(dev, B43_MMIO_PHY_CONTROL, reg); + return b43_read16(dev, B43_MMIO_PHY_DATA); } void b43_phy_write(struct b43_wldev *dev, u16 reg, u16 value) { assert_mac_suspended(dev); - dev->phy.ops->phy_write(dev, reg, value); - if (++dev->phy.writes_counter == B43_MAX_WRITES_IN_ROW) { + if (b43_bus_host_is_pci(dev->dev) && + ++dev->phy.writes_counter > B43_MAX_WRITES_IN_ROW) { b43_read16(dev, B43_MMIO_PHY_VER); - dev->phy.writes_counter = 0; + dev->phy.writes_counter = 1; } + + if (dev->phy.ops->phy_write) + return dev->phy.ops->phy_write(dev, reg, value); + + b43_write16f(dev, B43_MMIO_PHY_CONTROL, reg); + b43_write16(dev, B43_MMIO_PHY_DATA, value); } void b43_phy_copy(struct b43_wldev *dev, u16 destreg, u16 srcreg) diff --git a/drivers/net/wireless/b43/phy_g.c b/drivers/net/wireless/b43/phy_g.c index 8f5c14bc10e6fedcab2d7b88a0009470772dad8a..727ce6edb4b3831faf6d254ad789b4a9a1a7c336 100644 --- a/drivers/net/wireless/b43/phy_g.c +++ b/drivers/net/wireless/b43/phy_g.c @@ -2555,13 +2555,13 @@ static void b43_gphy_op_exit(struct b43_wldev *dev) static u16 b43_gphy_op_read(struct b43_wldev *dev, u16 reg) { - b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); + b43_write16f(dev, B43_MMIO_PHY_CONTROL, reg); return b43_read16(dev, B43_MMIO_PHY_DATA); } static void b43_gphy_op_write(struct b43_wldev *dev, u16 reg, u16 value) { - b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); + b43_write16f(dev, B43_MMIO_PHY_CONTROL, reg); b43_write16(dev, B43_MMIO_PHY_DATA, value); } @@ -2572,7 +2572,7 @@ static u16 b43_gphy_op_radio_read(struct b43_wldev *dev, u16 reg) /* G-PHY needs 0x80 for read access. */ reg |= 0x80; - b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg); + b43_write16f(dev, B43_MMIO_RADIO_CONTROL, reg); return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW); } @@ -2581,7 +2581,7 @@ static void b43_gphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value) /* Register 1 is a 32-bit register. */ B43_WARN_ON(reg == 1); - b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg); + b43_write16f(dev, B43_MMIO_RADIO_CONTROL, reg); b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value); } diff --git a/drivers/net/wireless/b43/phy_ht.c b/drivers/net/wireless/b43/phy_ht.c index f2974c6b1c01fa3ddd11753a74badac91bc9b43e..bd68945965d6967ba75f2a4a25d5328d3d649d12 100644 --- a/drivers/net/wireless/b43/phy_ht.c +++ b/drivers/net/wireless/b43/phy_ht.c @@ -81,80 +81,104 @@ static void b43_radio_2059_channel_setup(struct b43_wldev *dev, udelay(50); /* Calibration */ - b43_radio_mask(dev, 0x2b, ~0x1); - b43_radio_mask(dev, 0x2e, ~0x4); - b43_radio_set(dev, 0x2e, 0x4); - b43_radio_set(dev, 0x2b, 0x1); + b43_radio_mask(dev, R2059_RFPLL_MISC_EN, ~0x1); + b43_radio_mask(dev, R2059_RFPLL_MISC_CAL_RESETN, ~0x4); + b43_radio_set(dev, R2059_RFPLL_MISC_CAL_RESETN, 0x4); + b43_radio_set(dev, R2059_RFPLL_MISC_EN, 0x1); udelay(300); } -static void b43_radio_2059_init(struct b43_wldev *dev) +/* Calibrate resistors in LPF of PLL? */ +static void b43_radio_2059_rcal(struct b43_wldev *dev) +{ + /* Enable */ + b43_radio_set(dev, R2059_C3 | R2059_RCAL_CONFIG, 0x1); + usleep_range(10, 20); + + b43_radio_set(dev, R2059_C3 | 0x0BF, 0x1); + b43_radio_maskset(dev, R2059_C3 | 0x19B, 0x3, 0x2); + + /* Start */ + b43_radio_set(dev, R2059_C3 | R2059_RCAL_CONFIG, 0x2); + usleep_range(100, 200); + + /* Stop */ + b43_radio_mask(dev, R2059_C3 | R2059_RCAL_CONFIG, ~0x2); + + if (!b43_radio_wait_value(dev, R2059_C3 | R2059_RCAL_STATUS, 1, 1, 100, + 1000000)) + b43err(dev->wl, "Radio 0x2059 rcal timeout\n"); + + /* Disable */ + b43_radio_mask(dev, R2059_C3 | R2059_RCAL_CONFIG, ~0x1); + + b43_radio_set(dev, 0xa, 0x60); +} + +/* Calibrate the internal RC oscillator? */ +static void b43_radio_2057_rccal(struct b43_wldev *dev) { - const u16 routing[] = { R2059_C1, R2059_C2, R2059_C3 }; const u16 radio_values[3][2] = { { 0x61, 0xE9 }, { 0x69, 0xD5 }, { 0x73, 0x99 }, }; - u16 i, j; + int i; - b43_radio_write(dev, R2059_ALL | 0x51, 0x0070); - b43_radio_write(dev, R2059_ALL | 0x5a, 0x0003); + for (i = 0; i < 3; i++) { + b43_radio_write(dev, R2059_RCCAL_MASTER, radio_values[i][0]); + b43_radio_write(dev, R2059_RCCAL_X1, 0x6E); + b43_radio_write(dev, R2059_RCCAL_TRC0, radio_values[i][1]); - for (i = 0; i < ARRAY_SIZE(routing); i++) - b43_radio_set(dev, routing[i] | 0x146, 0x3); + /* Start */ + b43_radio_write(dev, R2059_RCCAL_START_R1_Q1_P1, 0x55); - b43_radio_set(dev, 0x2e, 0x0078); - b43_radio_set(dev, 0xc0, 0x0080); - msleep(2); - b43_radio_mask(dev, 0x2e, ~0x0078); - b43_radio_mask(dev, 0xc0, ~0x0080); + /* Wait */ + if (!b43_radio_wait_value(dev, R2059_RCCAL_DONE_OSCCAP, 2, 2, + 500, 5000000)) + b43err(dev->wl, "Radio 0x2059 rccal timeout\n"); - if (1) { /* FIXME */ - b43_radio_set(dev, R2059_C3 | 0x4, 0x1); - udelay(10); - b43_radio_set(dev, R2059_C3 | 0x0BF, 0x1); - b43_radio_maskset(dev, R2059_C3 | 0x19B, 0x3, 0x2); + /* Stop */ + b43_radio_write(dev, R2059_RCCAL_START_R1_Q1_P1, 0x15); + } - b43_radio_set(dev, R2059_C3 | 0x4, 0x2); - udelay(100); - b43_radio_mask(dev, R2059_C3 | 0x4, ~0x2); + b43_radio_mask(dev, R2059_RCCAL_MASTER, ~0x1); +} - for (i = 0; i < 10000; i++) { - if (b43_radio_read(dev, R2059_C3 | 0x145) & 1) { - i = 0; - break; - } - udelay(100); - } - if (i) - b43err(dev->wl, "radio 0x945 timeout\n"); - - b43_radio_mask(dev, R2059_C3 | 0x4, ~0x1); - b43_radio_set(dev, 0xa, 0x60); - - for (i = 0; i < 3; i++) { - b43_radio_write(dev, 0x17F, radio_values[i][0]); - b43_radio_write(dev, 0x13D, 0x6E); - b43_radio_write(dev, 0x13E, radio_values[i][1]); - b43_radio_write(dev, 0x13C, 0x55); - - for (j = 0; j < 10000; j++) { - if (b43_radio_read(dev, 0x140) & 2) { - j = 0; - break; - } - udelay(500); - } - if (j) - b43err(dev->wl, "radio 0x140 timeout\n"); +static void b43_radio_2059_init_pre(struct b43_wldev *dev) +{ + b43_phy_mask(dev, B43_PHY_HT_RF_CTL_CMD, ~B43_PHY_HT_RF_CTL_CMD_CHIP0_PU); + b43_phy_set(dev, B43_PHY_HT_RF_CTL_CMD, B43_PHY_HT_RF_CTL_CMD_FORCE); + b43_phy_mask(dev, B43_PHY_HT_RF_CTL_CMD, ~B43_PHY_HT_RF_CTL_CMD_FORCE); + b43_phy_set(dev, B43_PHY_HT_RF_CTL_CMD, B43_PHY_HT_RF_CTL_CMD_CHIP0_PU); +} - b43_radio_write(dev, 0x13C, 0x15); - } +static void b43_radio_2059_init(struct b43_wldev *dev) +{ + const u16 routing[] = { R2059_C1, R2059_C2, R2059_C3 }; + int i; - b43_radio_mask(dev, 0x17F, ~0x1); + /* Prepare (reset?) radio */ + b43_radio_2059_init_pre(dev); + + r2059_upload_inittabs(dev); + + for (i = 0; i < ARRAY_SIZE(routing); i++) + b43_radio_set(dev, routing[i] | 0x146, 0x3); + + /* Post init starts below */ + + b43_radio_set(dev, R2059_RFPLL_MISC_CAL_RESETN, 0x0078); + b43_radio_set(dev, R2059_XTAL_CONFIG2, 0x0080); + msleep(2); + b43_radio_mask(dev, R2059_RFPLL_MISC_CAL_RESETN, ~0x0078); + b43_radio_mask(dev, R2059_XTAL_CONFIG2, ~0x0080); + + if (1) { /* FIXME */ + b43_radio_2059_rcal(dev); + b43_radio_2057_rccal(dev); } - b43_radio_mask(dev, 0x11, ~0x0008); + b43_radio_mask(dev, R2059_RFPLL_MASTER, ~0x0008); } /************************************************** @@ -297,6 +321,26 @@ static void b43_phy_ht_bphy_init(struct b43_wldev *dev) b43_phy_write(dev, B43_PHY_N_BMODE(0x38), 0x668); } +static void b43_phy_ht_bphy_reset(struct b43_wldev *dev, bool reset) +{ + u16 tmp; + + tmp = b43_read16(dev, B43_MMIO_PSM_PHY_HDR); + b43_write16(dev, B43_MMIO_PSM_PHY_HDR, + tmp | B43_PSM_HDR_MAC_PHY_FORCE_CLK); + + /* Put BPHY in or take it out of the reset */ + if (reset) + b43_phy_set(dev, B43_PHY_B_BBCFG, + B43_PHY_B_BBCFG_RSTCCA | B43_PHY_B_BBCFG_RSTRX); + else + b43_phy_mask(dev, B43_PHY_B_BBCFG, + (u16)~(B43_PHY_B_BBCFG_RSTCCA | + B43_PHY_B_BBCFG_RSTRX)); + + b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp); +} + /************************************************** * Samples **************************************************/ @@ -704,7 +748,6 @@ static void b43_phy_ht_spur_avoid(struct b43_wldev *dev, { struct bcma_device *core = dev->dev->bdev; int spuravoid = 0; - u16 tmp; /* Check for 13 and 14 is just a guess, we don't have enough logs. */ if (new_channel->hw_value == 13 || new_channel->hw_value == 14) @@ -717,22 +760,9 @@ static void b43_phy_ht_spur_avoid(struct b43_wldev *dev, B43_BCMA_CLKCTLST_80211_PLL_ST | B43_BCMA_CLKCTLST_PHY_PLL_ST, false); - /* Values has been taken from wlc_bmac_switch_macfreq comments */ - switch (spuravoid) { - case 2: /* 126MHz */ - tmp = 0x2082; - break; - case 1: /* 123MHz */ - tmp = 0x5341; - break; - default: /* 120MHz */ - tmp = 0x8889; - } - - b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_LOW, tmp); - b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_HIGH, 0x8); + b43_mac_switch_freq(dev, spuravoid); - /* TODO: reset PLL */ + b43_wireless_core_phy_pll_reset(dev); if (spuravoid) b43_phy_set(dev, B43_PHY_HT_BBCFG, B43_PHY_HT_BBCFG_RSTRX); @@ -747,13 +777,19 @@ static void b43_phy_ht_channel_setup(struct b43_wldev *dev, const struct b43_phy_ht_channeltab_e_phy *e, struct ieee80211_channel *new_channel) { - bool old_band_5ghz; + if (new_channel->band == IEEE80211_BAND_5GHZ) { + /* Switch to 2 GHz for a moment to access B-PHY regs */ + b43_phy_mask(dev, B43_PHY_HT_BANDCTL, ~B43_PHY_HT_BANDCTL_5GHZ); + + b43_phy_ht_bphy_reset(dev, true); + + /* Switch to 5 GHz */ + b43_phy_set(dev, B43_PHY_HT_BANDCTL, B43_PHY_HT_BANDCTL_5GHZ); + } else { + /* Switch to 2 GHz */ + b43_phy_mask(dev, B43_PHY_HT_BANDCTL, ~B43_PHY_HT_BANDCTL_5GHZ); - old_band_5ghz = b43_phy_read(dev, B43_PHY_HT_BANDCTL) & 0; /* FIXME */ - if (new_channel->band == IEEE80211_BAND_5GHZ && !old_band_5ghz) { - /* TODO */ - } else if (new_channel->band == IEEE80211_BAND_2GHZ && old_band_5ghz) { - /* TODO */ + b43_phy_ht_bphy_reset(dev, false); } b43_phy_write(dev, B43_PHY_HT_BW1, e->bw1); @@ -1002,19 +1038,10 @@ static void b43_phy_ht_op_software_rfkill(struct b43_wldev *dev, if (b43_read32(dev, B43_MMIO_MACCTL) & B43_MACCTL_ENABLED) b43err(dev->wl, "MAC not suspended\n"); - /* In the following PHY ops we copy wl's dummy behaviour. - * TODO: Find out if reads (currently hidden in masks/masksets) are - * needed and replace following ops with just writes or w&r. - * Note: B43_PHY_HT_RF_CTL1 register is tricky, wrong operation can - * cause delayed (!) machine lock up. */ if (blocked) { - b43_phy_mask(dev, B43_PHY_HT_RF_CTL1, 0); + b43_phy_mask(dev, B43_PHY_HT_RF_CTL_CMD, + ~B43_PHY_HT_RF_CTL_CMD_CHIP0_PU); } else { - b43_phy_mask(dev, B43_PHY_HT_RF_CTL1, 0); - b43_phy_maskset(dev, B43_PHY_HT_RF_CTL1, 0, 0x1); - b43_phy_mask(dev, B43_PHY_HT_RF_CTL1, 0); - b43_phy_maskset(dev, B43_PHY_HT_RF_CTL1, 0, 0x2); - if (dev->phy.radio_ver == 0x2059) b43_radio_2059_init(dev); else @@ -1071,22 +1098,10 @@ static unsigned int b43_phy_ht_op_get_default_chan(struct b43_wldev *dev) * R/W ops. **************************************************/ -static u16 b43_phy_ht_op_read(struct b43_wldev *dev, u16 reg) -{ - b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); - return b43_read16(dev, B43_MMIO_PHY_DATA); -} - -static void b43_phy_ht_op_write(struct b43_wldev *dev, u16 reg, u16 value) -{ - b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); - b43_write16(dev, B43_MMIO_PHY_DATA, value); -} - static void b43_phy_ht_op_maskset(struct b43_wldev *dev, u16 reg, u16 mask, u16 set) { - b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); + b43_write16f(dev, B43_MMIO_PHY_CONTROL, reg); b43_write16(dev, B43_MMIO_PHY_DATA, (b43_read16(dev, B43_MMIO_PHY_DATA) & mask) | set); } @@ -1096,14 +1111,14 @@ static u16 b43_phy_ht_op_radio_read(struct b43_wldev *dev, u16 reg) /* HT-PHY needs 0x200 for read access */ reg |= 0x200; - b43_write16(dev, B43_MMIO_RADIO24_CONTROL, reg); + b43_write16f(dev, B43_MMIO_RADIO24_CONTROL, reg); return b43_read16(dev, B43_MMIO_RADIO24_DATA); } static void b43_phy_ht_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value) { - b43_write16(dev, B43_MMIO_RADIO24_CONTROL, reg); + b43_write16f(dev, B43_MMIO_RADIO24_CONTROL, reg); b43_write16(dev, B43_MMIO_RADIO24_DATA, value); } @@ -1126,8 +1141,6 @@ const struct b43_phy_operations b43_phyops_ht = { .free = b43_phy_ht_op_free, .prepare_structs = b43_phy_ht_op_prepare_structs, .init = b43_phy_ht_op_init, - .phy_read = b43_phy_ht_op_read, - .phy_write = b43_phy_ht_op_write, .phy_maskset = b43_phy_ht_op_maskset, .radio_read = b43_phy_ht_op_radio_read, .radio_write = b43_phy_ht_op_radio_write, diff --git a/drivers/net/wireless/b43/phy_ht.h b/drivers/net/wireless/b43/phy_ht.h index 6cae370d10184653440495f03cd69b71822a359f..c086f56ce4785e9c1758995375ea400b23795ac3 100644 --- a/drivers/net/wireless/b43/phy_ht.h +++ b/drivers/net/wireless/b43/phy_ht.h @@ -81,7 +81,9 @@ #define B43_PHY_HT_RF_SEQ_STATUS B43_PHY_EXTG(0x004) /* Values for the status are the same as for the trigger */ -#define B43_PHY_HT_RF_CTL1 B43_PHY_EXTG(0x010) +#define B43_PHY_HT_RF_CTL_CMD 0x810 +#define B43_PHY_HT_RF_CTL_CMD_FORCE 0x0001 +#define B43_PHY_HT_RF_CTL_CMD_CHIP0_PU 0x0002 #define B43_PHY_HT_RF_CTL_INT_C1 B43_PHY_EXTG(0x04c) #define B43_PHY_HT_RF_CTL_INT_C2 B43_PHY_EXTG(0x06c) @@ -104,6 +106,9 @@ #define B43_PHY_HT_TXPCTL_TARG_PWR2_C3_SHIFT 0 #define B43_PHY_HT_TX_PCTL_STATUS_C3 B43_PHY_EXTG(0x169) +#define B43_PHY_B_BBCFG B43_PHY_N_BMODE(0x001) +#define B43_PHY_B_BBCFG_RSTCCA 0x4000 /* Reset CCA */ +#define B43_PHY_B_BBCFG_RSTRX 0x8000 /* Reset RX */ #define B43_PHY_HT_TEST B43_PHY_N_BMODE(0x00A) diff --git a/drivers/net/wireless/b43/phy_lcn.c b/drivers/net/wireless/b43/phy_lcn.c index e76bbdf3247e9b095af94bf907b2439f3f25f5f3..97461ccf3e1e58ba4033b97756c0dd50491b13bf 100644 --- a/drivers/net/wireless/b43/phy_lcn.c +++ b/drivers/net/wireless/b43/phy_lcn.c @@ -810,22 +810,10 @@ static void b43_phy_lcn_op_adjust_txpower(struct b43_wldev *dev) * R/W ops. **************************************************/ -static u16 b43_phy_lcn_op_read(struct b43_wldev *dev, u16 reg) -{ - b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); - return b43_read16(dev, B43_MMIO_PHY_DATA); -} - -static void b43_phy_lcn_op_write(struct b43_wldev *dev, u16 reg, u16 value) -{ - b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); - b43_write16(dev, B43_MMIO_PHY_DATA, value); -} - static void b43_phy_lcn_op_maskset(struct b43_wldev *dev, u16 reg, u16 mask, u16 set) { - b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); + b43_write16f(dev, B43_MMIO_PHY_CONTROL, reg); b43_write16(dev, B43_MMIO_PHY_DATA, (b43_read16(dev, B43_MMIO_PHY_DATA) & mask) | set); } @@ -835,14 +823,14 @@ static u16 b43_phy_lcn_op_radio_read(struct b43_wldev *dev, u16 reg) /* LCN-PHY needs 0x200 for read access */ reg |= 0x200; - b43_write16(dev, B43_MMIO_RADIO24_CONTROL, reg); + b43_write16f(dev, B43_MMIO_RADIO24_CONTROL, reg); return b43_read16(dev, B43_MMIO_RADIO24_DATA); } static void b43_phy_lcn_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value) { - b43_write16(dev, B43_MMIO_RADIO24_CONTROL, reg); + b43_write16f(dev, B43_MMIO_RADIO24_CONTROL, reg); b43_write16(dev, B43_MMIO_RADIO24_DATA, value); } @@ -855,8 +843,6 @@ const struct b43_phy_operations b43_phyops_lcn = { .free = b43_phy_lcn_op_free, .prepare_structs = b43_phy_lcn_op_prepare_structs, .init = b43_phy_lcn_op_init, - .phy_read = b43_phy_lcn_op_read, - .phy_write = b43_phy_lcn_op_write, .phy_maskset = b43_phy_lcn_op_maskset, .radio_read = b43_phy_lcn_op_radio_read, .radio_write = b43_phy_lcn_op_radio_write, diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c index 92190dacf6895c31cfbd9748930465e4dc5a4f59..058a9f2320503ef623748ed595ccbfdb8dbd9414 100644 --- a/drivers/net/wireless/b43/phy_lp.c +++ b/drivers/net/wireless/b43/phy_lp.c @@ -1985,22 +1985,10 @@ static void lpphy_calibration(struct b43_wldev *dev) b43_mac_enable(dev); } -static u16 b43_lpphy_op_read(struct b43_wldev *dev, u16 reg) -{ - b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); - return b43_read16(dev, B43_MMIO_PHY_DATA); -} - -static void b43_lpphy_op_write(struct b43_wldev *dev, u16 reg, u16 value) -{ - b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); - b43_write16(dev, B43_MMIO_PHY_DATA, value); -} - static void b43_lpphy_op_maskset(struct b43_wldev *dev, u16 reg, u16 mask, u16 set) { - b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); + b43_write16f(dev, B43_MMIO_PHY_CONTROL, reg); b43_write16(dev, B43_MMIO_PHY_DATA, (b43_read16(dev, B43_MMIO_PHY_DATA) & mask) | set); } @@ -2016,7 +2004,7 @@ static u16 b43_lpphy_op_radio_read(struct b43_wldev *dev, u16 reg) } else reg |= 0x200; - b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg); + b43_write16f(dev, B43_MMIO_RADIO_CONTROL, reg); return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW); } @@ -2025,7 +2013,7 @@ static void b43_lpphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value) /* Register 1 is a 32-bit register. */ B43_WARN_ON(reg == 1); - b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg); + b43_write16f(dev, B43_MMIO_RADIO_CONTROL, reg); b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value); } @@ -2713,8 +2701,6 @@ const struct b43_phy_operations b43_phyops_lp = { .free = b43_lpphy_op_free, .prepare_structs = b43_lpphy_op_prepare_structs, .init = b43_lpphy_op_init, - .phy_read = b43_lpphy_op_read, - .phy_write = b43_lpphy_op_write, .phy_maskset = b43_lpphy_op_maskset, .radio_read = b43_lpphy_op_radio_read, .radio_write = b43_lpphy_op_radio_write, diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c index e2a3f0d5bcc27cc5a2f960989ad866fb11a644b9..9f0bcf3b8414c7cb11713158f4f0fc485df07618 100644 --- a/drivers/net/wireless/b43/phy_n.c +++ b/drivers/net/wireless/b43/phy_n.c @@ -34,6 +34,7 @@ #include "radio_2056.h" #include "radio_2057.h" #include "main.h" +#include "ppr.h" struct nphy_txgains { u16 tx_lpf[2]; @@ -3606,16 +3607,6 @@ static void b43_nphy_iq_cal_gain_params(struct b43_wldev *dev, u16 core, * Tx and Rx **************************************************/ -static void b43_nphy_op_adjust_txpower(struct b43_wldev *dev) -{//TODO -} - -static enum b43_txpwr_result b43_nphy_op_recalc_txpower(struct b43_wldev *dev, - bool ignore_tssi) -{//TODO - return B43_TXPWR_RES_DONE; -} - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlEnable */ static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable) { @@ -4069,6 +4060,7 @@ static void b43_nphy_tx_power_ctl_setup(struct b43_wldev *dev) s16 a1[2], b0[2], b1[2]; u8 idle[2]; + u8 ppr_max; s8 target[2]; s32 num, den, pwr; u32 regval[64]; @@ -4147,7 +4139,12 @@ static void b43_nphy_tx_power_ctl_setup(struct b43_wldev *dev) b1[0] = b1[1] = -1393; } } - /* target[0] = target[1] = nphy->tx_power_max; */ + + ppr_max = b43_ppr_get_max(dev, &nphy->tx_pwr_max_ppr); + if (ppr_max) { + target[0] = ppr_max; + target[1] = ppr_max; + } if (dev->phy.rev >= 3) { if (sprom->fem.ghz2.tssipos) @@ -4235,8 +4232,9 @@ static void b43_nphy_tx_gain_table_upload(struct b43_wldev *dev) const u32 *table = NULL; u32 rfpwr_offset; - u8 pga_gain; + u8 pga_gain, pad_gain; int i; + const s16 *uninitialized_var(rf_pwr_offset_table); table = b43_nphy_get_tx_gain_table(dev); if (!table) @@ -4252,13 +4250,27 @@ static void b43_nphy_tx_gain_table_upload(struct b43_wldev *dev) nphy->gmval = (table[0] >> 16) & 0x7000; #endif + if (phy->rev >= 19) { + return; + } else if (phy->rev >= 7) { + rf_pwr_offset_table = b43_ntab_get_rf_pwr_offset_table(dev); + if (!rf_pwr_offset_table) + return; + /* TODO: Enable this once we have gains configured */ + return; + } + for (i = 0; i < 128; i++) { if (phy->rev >= 19) { /* TODO */ return; } else if (phy->rev >= 7) { - /* TODO */ - return; + pga_gain = (table[i] >> 24) & 0xf; + pad_gain = (table[i] >> 19) & 0x1f; + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + rfpwr_offset = rf_pwr_offset_table[pad_gain]; + else + rfpwr_offset = rf_pwr_offset_table[pga_gain]; } else { pga_gain = (table[i] >> 24) & 0xF; if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) @@ -5874,6 +5886,69 @@ static void b43_nphy_set_rx_core_state(struct b43_wldev *dev, u8 mask) b43_mac_enable(dev); } +static enum b43_txpwr_result b43_nphy_op_recalc_txpower(struct b43_wldev *dev, + bool ignore_tssi) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_n *nphy = dev->phy.n; + struct ieee80211_channel *channel = dev->wl->hw->conf.chandef.chan; + struct b43_ppr *ppr = &nphy->tx_pwr_max_ppr; + u8 max; /* qdBm */ + bool tx_pwr_state; + + if (nphy->tx_pwr_last_recalc_freq == channel->center_freq && + nphy->tx_pwr_last_recalc_limit == phy->desired_txpower) + return B43_TXPWR_RES_DONE; + + /* Make sure we have a clean PPR */ + b43_ppr_clear(dev, ppr); + + /* HW limitations */ + b43_ppr_load_max_from_sprom(dev, ppr, B43_BAND_2G); + + /* Regulatory & user settings */ + max = INT_TO_Q52(phy->chandef->chan->max_power); + if (phy->desired_txpower) + max = min_t(u8, max, INT_TO_Q52(phy->desired_txpower)); + b43_ppr_apply_max(dev, ppr, max); + if (b43_debug(dev, B43_DBG_XMITPOWER)) + b43dbg(dev->wl, "Calculated TX power: " Q52_FMT "\n", + Q52_ARG(b43_ppr_get_max(dev, ppr))); + + /* TODO: Enable this once we get gains working */ +#if 0 + /* Some extra gains */ + hw_gain = 6; /* N-PHY specific */ + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + hw_gain += sprom->antenna_gain.a0; + else + hw_gain += sprom->antenna_gain.a1; + b43_ppr_add(dev, ppr, -hw_gain); +#endif + + /* Make sure we didn't go too low */ + b43_ppr_apply_min(dev, ppr, INT_TO_Q52(8)); + + /* Apply */ + tx_pwr_state = nphy->txpwrctrl; + b43_mac_suspend(dev); + b43_nphy_tx_power_ctl_setup(dev); + if (dev->dev->core_rev == 11 || dev->dev->core_rev == 12) { + b43_maskset32(dev, B43_MMIO_MACCTL, ~0, B43_MACCTL_PHY_LOCK); + b43_read32(dev, B43_MMIO_MACCTL); + udelay(1); + } + b43_nphy_tx_power_ctrl(dev, nphy->txpwrctrl); + if (dev->dev->core_rev == 11 || dev->dev->core_rev == 12) + b43_maskset32(dev, B43_MMIO_MACCTL, ~B43_MACCTL_PHY_LOCK, 0); + b43_mac_enable(dev); + + nphy->tx_pwr_last_recalc_freq = channel->center_freq; + nphy->tx_pwr_last_recalc_limit = phy->desired_txpower; + + return B43_TXPWR_RES_DONE; +} + /************************************************** * N-PHY init **************************************************/ @@ -6294,7 +6369,7 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev, b43_mac_switch_freq(dev, spuravoid); if (dev->phy.rev == 3 || dev->phy.rev == 4) - ; /* TODO: reset PLL */ + b43_wireless_core_phy_pll_reset(dev); if (spuravoid) b43_phy_set(dev, B43_NPHY_BBCFG, B43_NPHY_BBCFG_RSTRX); @@ -6407,6 +6482,7 @@ static int b43_nphy_op_allocate(struct b43_wldev *dev) nphy = kzalloc(sizeof(*nphy), GFP_KERNEL); if (!nphy) return -ENOMEM; + dev->phy.n = nphy; return 0; @@ -6497,26 +6573,13 @@ static inline void check_phyreg(struct b43_wldev *dev, u16 offset) #endif /* B43_DEBUG */ } -static u16 b43_nphy_op_read(struct b43_wldev *dev, u16 reg) -{ - check_phyreg(dev, reg); - b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); - return b43_read16(dev, B43_MMIO_PHY_DATA); -} - -static void b43_nphy_op_write(struct b43_wldev *dev, u16 reg, u16 value) -{ - check_phyreg(dev, reg); - b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); - b43_write16(dev, B43_MMIO_PHY_DATA, value); -} - static void b43_nphy_op_maskset(struct b43_wldev *dev, u16 reg, u16 mask, u16 set) { check_phyreg(dev, reg); - b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); + b43_write16f(dev, B43_MMIO_PHY_CONTROL, reg); b43_maskset16(dev, B43_MMIO_PHY_DATA, mask, set); + dev->phy.writes_counter = 1; } static u16 b43_nphy_op_radio_read(struct b43_wldev *dev, u16 reg) @@ -6529,7 +6592,7 @@ static u16 b43_nphy_op_radio_read(struct b43_wldev *dev, u16 reg) else reg |= 0x100; - b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg); + b43_write16f(dev, B43_MMIO_RADIO_CONTROL, reg); return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW); } @@ -6538,7 +6601,7 @@ static void b43_nphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value) /* Register 1 is a 32-bit register. */ B43_WARN_ON(dev->phy.rev < 7 && reg == 1); - b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg); + b43_write16f(dev, B43_MMIO_RADIO_CONTROL, reg); b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value); } @@ -6652,8 +6715,6 @@ const struct b43_phy_operations b43_phyops_n = { .free = b43_nphy_op_free, .prepare_structs = b43_nphy_op_prepare_structs, .init = b43_nphy_op_init, - .phy_read = b43_nphy_op_read, - .phy_write = b43_nphy_op_write, .phy_maskset = b43_nphy_op_maskset, .radio_read = b43_nphy_op_radio_read, .radio_write = b43_nphy_op_radio_write, @@ -6662,5 +6723,4 @@ const struct b43_phy_operations b43_phyops_n = { .switch_channel = b43_nphy_op_switch_channel, .get_default_chan = b43_nphy_op_get_default_chan, .recalc_txpower = b43_nphy_op_recalc_txpower, - .adjust_txpower = b43_nphy_op_adjust_txpower, }; diff --git a/drivers/net/wireless/b43/phy_n.h b/drivers/net/wireless/b43/phy_n.h index 30bec815b969651ee4ad1cca2c6c74e607cc834f..a6da2c31a99cf7c2dcc646b674eb2b35d6133ef8 100644 --- a/drivers/net/wireless/b43/phy_n.h +++ b/drivers/net/wireless/b43/phy_n.h @@ -2,6 +2,7 @@ #define B43_NPHY_H_ #include "phy_common.h" +#include "ppr.h" /* N-PHY registers. */ @@ -967,6 +968,9 @@ struct b43_phy_n { struct b43_phy_n_txpwrindex txpwrindex[2]; struct b43_phy_n_pwr_ctl_info pwr_ctl_info[2]; struct b43_chanspec txiqlocal_chanspec; + struct b43_ppr tx_pwr_max_ppr; + u16 tx_pwr_last_recalc_freq; + int tx_pwr_last_recalc_limit; u8 txrx_chain; u16 tx_rx_cal_phy_saveregs[11]; diff --git a/drivers/net/wireless/b43/ppr.c b/drivers/net/wireless/b43/ppr.c new file mode 100644 index 0000000000000000000000000000000000000000..9a770279c4159f6c7729644c9b731eb7effbe408 --- /dev/null +++ b/drivers/net/wireless/b43/ppr.c @@ -0,0 +1,199 @@ +/* + * Broadcom B43 wireless driver + * PPR (Power Per Rate) management + * + * Copyright (c) 2014 RafaÅ‚ MiÅ‚ecki + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "ppr.h" +#include "b43.h" + +#define ppr_for_each_entry(ppr, i, entry) \ + for (i = 0, entry = &(ppr)->__all_rates[i]; \ + i < B43_PPR_RATES_NUM; \ + i++, entry++) + +void b43_ppr_clear(struct b43_wldev *dev, struct b43_ppr *ppr) +{ + memset(ppr, 0, sizeof(*ppr)); + + /* Compile-time PPR check */ + BUILD_BUG_ON(sizeof(struct b43_ppr) != B43_PPR_RATES_NUM * sizeof(u8)); +} + +void b43_ppr_add(struct b43_wldev *dev, struct b43_ppr *ppr, int diff) +{ + int i; + u8 *rate; + + ppr_for_each_entry(ppr, i, rate) { + *rate = clamp_val(*rate + diff, 0, 127); + } +} + +void b43_ppr_apply_max(struct b43_wldev *dev, struct b43_ppr *ppr, u8 max) +{ + int i; + u8 *rate; + + ppr_for_each_entry(ppr, i, rate) { + *rate = min(*rate, max); + } +} + +void b43_ppr_apply_min(struct b43_wldev *dev, struct b43_ppr *ppr, u8 min) +{ + int i; + u8 *rate; + + ppr_for_each_entry(ppr, i, rate) { + *rate = max(*rate, min); + } +} + +u8 b43_ppr_get_max(struct b43_wldev *dev, struct b43_ppr *ppr) +{ + u8 res = 0; + int i; + u8 *rate; + + ppr_for_each_entry(ppr, i, rate) { + res = max(*rate, res); + } + + return res; +} + +bool b43_ppr_load_max_from_sprom(struct b43_wldev *dev, struct b43_ppr *ppr, + enum b43_band band) +{ + struct b43_ppr_rates *rates = &ppr->rates; + struct ssb_sprom *sprom = dev->dev->bus_sprom; + struct b43_phy *phy = &dev->phy; + u8 maxpwr, off; + u32 sprom_ofdm_po; + u16 *sprom_mcs_po; + u8 extra_cdd_po, extra_stbc_po; + int i; + + switch (band) { + case B43_BAND_2G: + maxpwr = min(sprom->core_pwr_info[0].maxpwr_2g, + sprom->core_pwr_info[1].maxpwr_2g); + sprom_ofdm_po = sprom->ofdm2gpo; + sprom_mcs_po = sprom->mcs2gpo; + extra_cdd_po = (sprom->cddpo >> 0) & 0xf; + extra_stbc_po = (sprom->stbcpo >> 0) & 0xf; + break; + case B43_BAND_5G_LO: + maxpwr = min(sprom->core_pwr_info[0].maxpwr_5gl, + sprom->core_pwr_info[1].maxpwr_5gl); + sprom_ofdm_po = sprom->ofdm5glpo; + sprom_mcs_po = sprom->mcs5glpo; + extra_cdd_po = (sprom->cddpo >> 8) & 0xf; + extra_stbc_po = (sprom->stbcpo >> 8) & 0xf; + break; + case B43_BAND_5G_MI: + maxpwr = min(sprom->core_pwr_info[0].maxpwr_5g, + sprom->core_pwr_info[1].maxpwr_5g); + sprom_ofdm_po = sprom->ofdm5gpo; + sprom_mcs_po = sprom->mcs5gpo; + extra_cdd_po = (sprom->cddpo >> 4) & 0xf; + extra_stbc_po = (sprom->stbcpo >> 4) & 0xf; + break; + case B43_BAND_5G_HI: + maxpwr = min(sprom->core_pwr_info[0].maxpwr_5gh, + sprom->core_pwr_info[1].maxpwr_5gh); + sprom_ofdm_po = sprom->ofdm5ghpo; + sprom_mcs_po = sprom->mcs5ghpo; + extra_cdd_po = (sprom->cddpo >> 12) & 0xf; + extra_stbc_po = (sprom->stbcpo >> 12) & 0xf; + break; + default: + WARN_ON_ONCE(1); + return false; + } + + if (band == B43_BAND_2G) { + for (i = 0; i < 4; i++) { + off = ((sprom->cck2gpo >> (i * 4)) & 0xf) * 2; + rates->cck[i] = maxpwr - off; + } + } + + /* OFDM */ + for (i = 0; i < 8; i++) { + off = ((sprom_ofdm_po >> (i * 4)) & 0xf) * 2; + rates->ofdm[i] = maxpwr - off; + } + + /* MCS 20 SISO */ + rates->mcs_20[0] = rates->ofdm[0]; + rates->mcs_20[1] = rates->ofdm[2]; + rates->mcs_20[2] = rates->ofdm[3]; + rates->mcs_20[3] = rates->ofdm[4]; + rates->mcs_20[4] = rates->ofdm[5]; + rates->mcs_20[5] = rates->ofdm[6]; + rates->mcs_20[6] = rates->ofdm[7]; + rates->mcs_20[7] = rates->ofdm[7]; + + /* MCS 20 CDD */ + for (i = 0; i < 4; i++) { + off = ((sprom_mcs_po[0] >> (i * 4)) & 0xf) * 2; + rates->mcs_20_cdd[i] = maxpwr - off; + if (phy->type == B43_PHYTYPE_N && phy->rev >= 3) + rates->mcs_20_cdd[i] -= extra_cdd_po; + } + for (i = 0; i < 4; i++) { + off = ((sprom_mcs_po[1] >> (i * 4)) & 0xf) * 2; + rates->mcs_20_cdd[4 + i] = maxpwr - off; + if (phy->type == B43_PHYTYPE_N && phy->rev >= 3) + rates->mcs_20_cdd[4 + i] -= extra_cdd_po; + } + + /* OFDM 20 CDD */ + rates->ofdm_20_cdd[0] = rates->mcs_20_cdd[0]; + rates->ofdm_20_cdd[1] = rates->mcs_20_cdd[0]; + rates->ofdm_20_cdd[2] = rates->mcs_20_cdd[1]; + rates->ofdm_20_cdd[3] = rates->mcs_20_cdd[2]; + rates->ofdm_20_cdd[4] = rates->mcs_20_cdd[3]; + rates->ofdm_20_cdd[5] = rates->mcs_20_cdd[4]; + rates->ofdm_20_cdd[6] = rates->mcs_20_cdd[5]; + rates->ofdm_20_cdd[7] = rates->mcs_20_cdd[6]; + + /* MCS 20 STBC */ + for (i = 0; i < 4; i++) { + off = ((sprom_mcs_po[0] >> (i * 4)) & 0xf) * 2; + rates->mcs_20_stbc[i] = maxpwr - off; + if (phy->type == B43_PHYTYPE_N && phy->rev >= 3) + rates->mcs_20_stbc[i] -= extra_stbc_po; + } + for (i = 0; i < 4; i++) { + off = ((sprom_mcs_po[1] >> (i * 4)) & 0xf) * 2; + rates->mcs_20_stbc[4 + i] = maxpwr - off; + if (phy->type == B43_PHYTYPE_N && phy->rev >= 3) + rates->mcs_20_stbc[4 + i] -= extra_stbc_po; + } + + /* MCS 20 SDM */ + for (i = 0; i < 4; i++) { + off = ((sprom_mcs_po[2] >> (i * 4)) & 0xf) * 2; + rates->mcs_20_sdm[i] = maxpwr - off; + } + for (i = 0; i < 4; i++) { + off = ((sprom_mcs_po[3] >> (i * 4)) & 0xf) * 2; + rates->mcs_20_sdm[4 + i] = maxpwr - off; + } + + return true; +} diff --git a/drivers/net/wireless/b43/ppr.h b/drivers/net/wireless/b43/ppr.h new file mode 100644 index 0000000000000000000000000000000000000000..24d7447e9f01e51e4ad69c935f5c58cc28567851 --- /dev/null +++ b/drivers/net/wireless/b43/ppr.h @@ -0,0 +1,45 @@ +#ifndef LINUX_B43_PPR_H_ +#define LINUX_B43_PPR_H_ + +#include + +#define B43_PPR_CCK_RATES_NUM 4 +#define B43_PPR_OFDM_RATES_NUM 8 +#define B43_PPR_MCS_RATES_NUM 8 + +#define B43_PPR_RATES_NUM (B43_PPR_CCK_RATES_NUM + \ + B43_PPR_OFDM_RATES_NUM * 2 + \ + B43_PPR_MCS_RATES_NUM * 4) + +struct b43_ppr_rates { + u8 cck[B43_PPR_CCK_RATES_NUM]; + u8 ofdm[B43_PPR_OFDM_RATES_NUM]; + u8 ofdm_20_cdd[B43_PPR_OFDM_RATES_NUM]; + u8 mcs_20[B43_PPR_MCS_RATES_NUM]; /* SISO */ + u8 mcs_20_cdd[B43_PPR_MCS_RATES_NUM]; + u8 mcs_20_stbc[B43_PPR_MCS_RATES_NUM]; + u8 mcs_20_sdm[B43_PPR_MCS_RATES_NUM]; +}; + +struct b43_ppr { + /* All powers are in qdbm (Q5.2) */ + union { + u8 __all_rates[B43_PPR_RATES_NUM]; + struct b43_ppr_rates rates; + }; +}; + +struct b43_wldev; +enum b43_band; + +void b43_ppr_clear(struct b43_wldev *dev, struct b43_ppr *ppr); + +void b43_ppr_add(struct b43_wldev *dev, struct b43_ppr *ppr, int diff); +void b43_ppr_apply_max(struct b43_wldev *dev, struct b43_ppr *ppr, u8 max); +void b43_ppr_apply_min(struct b43_wldev *dev, struct b43_ppr *ppr, u8 min); +u8 b43_ppr_get_max(struct b43_wldev *dev, struct b43_ppr *ppr); + +bool b43_ppr_load_max_from_sprom(struct b43_wldev *dev, struct b43_ppr *ppr, + enum b43_band band); + +#endif /* LINUX_B43_PPR_H_ */ diff --git a/drivers/net/wireless/b43/radio_2059.c b/drivers/net/wireless/b43/radio_2059.c index 38e31d857e3e8bb1eb1c0875931cbd538ac0a379..a3cf9efd7e216cef5cbeb59e266d765bcbf07e05 100644 --- a/drivers/net/wireless/b43/radio_2059.c +++ b/drivers/net/wireless/b43/radio_2059.c @@ -25,6 +25,13 @@ #include "b43.h" #include "radio_2059.h" +/* Extracted from MMIO dump of 6.30.223.141 */ +static u16 r2059_phy_rev1_init[][2] = { + { 0x051, 0x70 }, { 0x05a, 0x03 }, { 0x079, 0x01 }, { 0x082, 0x70 }, + { 0x083, 0x00 }, { 0x084, 0x70 }, { 0x09a, 0x7f }, { 0x0b6, 0x10 }, + { 0x188, 0x05 }, +}; + #define RADIOREGS(r00, r01, r02, r03, r04, r05, r06, r07, r08, r09, \ r10, r11, r12, r13, r14, r15, r16, r17, r18, r19, \ r20) \ @@ -58,73 +65,87 @@ .phy_regs.bw5 = r4, \ .phy_regs.bw6 = r5 +/* Extracted from MMIO dump of 6.30.223.141 + * TODO: Values for channels 12 & 13 are outdated (from some old 5.x driver)! + */ static const struct b43_phy_ht_channeltab_e_radio2059 b43_phy_ht_channeltab_radio2059[] = { - { .freq = 2412, - RADIOREGS(0x48, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x6c, - 0x09, 0x0f, 0x0a, 0x00, 0x0a, 0x00, 0x61, 0x03, - 0x00, 0x00, 0x00, 0xf0, 0x00), - PHYREGS(0x03c9, 0x03c5, 0x03c1, 0x043a, 0x043f, 0x0443), - }, - { .freq = 2417, - RADIOREGS(0x4b, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x71, - 0x09, 0x0f, 0x0a, 0x00, 0x0a, 0x00, 0x61, 0x03, - 0x00, 0x00, 0x00, 0xf0, 0x00), - PHYREGS(0x03cb, 0x03c7, 0x03c3, 0x0438, 0x043d, 0x0441), - }, - { .freq = 2422, - RADIOREGS(0x4e, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x76, - 0x09, 0x0f, 0x09, 0x00, 0x09, 0x00, 0x61, 0x03, - 0x00, 0x00, 0x00, 0xf0, 0x00), - PHYREGS(0x03cd, 0x03c9, 0x03c5, 0x0436, 0x043a, 0x043f), - }, - { .freq = 2427, - RADIOREGS(0x52, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x7b, - 0x09, 0x0f, 0x09, 0x00, 0x09, 0x00, 0x61, 0x03, - 0x00, 0x00, 0x00, 0xf0, 0x00), - PHYREGS(0x03cf, 0x03cb, 0x03c7, 0x0434, 0x0438, 0x043d), - }, - { .freq = 2432, - RADIOREGS(0x55, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x80, - 0x09, 0x0f, 0x08, 0x00, 0x08, 0x00, 0x61, 0x03, - 0x00, 0x00, 0x00, 0xf0, 0x00), - PHYREGS(0x03d1, 0x03cd, 0x03c9, 0x0431, 0x0436, 0x043a), - }, - { .freq = 2437, - RADIOREGS(0x58, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x85, - 0x09, 0x0f, 0x08, 0x00, 0x08, 0x00, 0x61, 0x03, - 0x00, 0x00, 0x00, 0xf0, 0x00), - PHYREGS(0x03d3, 0x03cf, 0x03cb, 0x042f, 0x0434, 0x0438), - }, - { .freq = 2442, - RADIOREGS(0x5c, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x8a, - 0x09, 0x0f, 0x07, 0x00, 0x07, 0x00, 0x61, 0x03, - 0x00, 0x00, 0x00, 0xf0, 0x00), - PHYREGS(0x03d5, 0x03d1, 0x03cd, 0x042d, 0x0431, 0x0436), - }, - { .freq = 2447, - RADIOREGS(0x5f, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x8f, - 0x09, 0x0f, 0x07, 0x00, 0x07, 0x00, 0x61, 0x03, - 0x00, 0x00, 0x00, 0xf0, 0x00), - PHYREGS(0x03d7, 0x03d3, 0x03cf, 0x042b, 0x042f, 0x0434), - }, - { .freq = 2452, - RADIOREGS(0x62, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x94, - 0x09, 0x0f, 0x07, 0x00, 0x07, 0x00, 0x61, 0x03, - 0x00, 0x00, 0x00, 0xf0, 0x00), - PHYREGS(0x03d9, 0x03d5, 0x03d1, 0x0429, 0x042d, 0x0431), - }, - { .freq = 2457, - RADIOREGS(0x66, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x99, - 0x09, 0x0f, 0x06, 0x00, 0x06, 0x00, 0x61, 0x03, - 0x00, 0x00, 0x00, 0xf0, 0x00), - PHYREGS(0x03db, 0x03d7, 0x03d3, 0x0427, 0x042b, 0x042f), - }, - { .freq = 2462, - RADIOREGS(0x69, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x9e, - 0x09, 0x0f, 0x06, 0x00, 0x06, 0x00, 0x61, 0x03, - 0x00, 0x00, 0x00, 0xf0, 0x00), - PHYREGS(0x03dd, 0x03d9, 0x03d5, 0x0424, 0x0429, 0x042d), - }, + { + .freq = 2412, + RADIOREGS(0x48, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x6c, + 0x09, 0x0f, 0x0a, 0x00, 0x0a, 0x00, 0x61, 0x73, + 0x00, 0x00, 0x00, 0xd0, 0x00), + PHYREGS(0x03c9, 0x03c5, 0x03c1, 0x043a, 0x043f, 0x0443), + }, + { + .freq = 2417, + RADIOREGS(0x4b, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x71, + 0x09, 0x0f, 0x0a, 0x00, 0x0a, 0x00, 0x61, 0x73, + 0x00, 0x00, 0x00, 0xd0, 0x00), + PHYREGS(0x03cb, 0x03c7, 0x03c3, 0x0438, 0x043d, 0x0441), + }, + { + .freq = 2422, + RADIOREGS(0x4e, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x76, + 0x09, 0x0f, 0x09, 0x00, 0x09, 0x00, 0x61, 0x73, + 0x00, 0x00, 0x00, 0xd0, 0x00), + PHYREGS(0x03cd, 0x03c9, 0x03c5, 0x0436, 0x043a, 0x043f), + }, + { + .freq = 2427, + RADIOREGS(0x52, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x7b, + 0x09, 0x0f, 0x09, 0x00, 0x09, 0x00, 0x61, 0x73, + 0x00, 0x00, 0x00, 0xa0, 0x00), + PHYREGS(0x03cf, 0x03cb, 0x03c7, 0x0434, 0x0438, 0x043d), + }, + { + .freq = 2432, + RADIOREGS(0x55, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x80, + 0x09, 0x0f, 0x08, 0x00, 0x08, 0x00, 0x61, 0x73, + 0x00, 0x00, 0x00, 0xa0, 0x00), + PHYREGS(0x03d1, 0x03cd, 0x03c9, 0x0431, 0x0436, 0x043a), + }, + { + .freq = 2437, + RADIOREGS(0x58, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x85, + 0x09, 0x0f, 0x08, 0x00, 0x08, 0x00, 0x61, 0x73, + 0x00, 0x00, 0x00, 0xa0, 0x00), + PHYREGS(0x03d3, 0x03cf, 0x03cb, 0x042f, 0x0434, 0x0438), + }, + { + .freq = 2442, + RADIOREGS(0x5c, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x8a, + 0x09, 0x0f, 0x07, 0x00, 0x07, 0x00, 0x61, 0x73, + 0x00, 0x00, 0x00, 0x80, 0x00), + PHYREGS(0x03d5, 0x03d1, 0x03cd, 0x042d, 0x0431, 0x0436), + }, + { + .freq = 2447, + RADIOREGS(0x5f, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x8f, + 0x09, 0x0f, 0x07, 0x00, 0x07, 0x00, 0x61, 0x73, + 0x00, 0x00, 0x00, 0x80, 0x00), + PHYREGS(0x03d7, 0x03d3, 0x03cf, 0x042b, 0x042f, 0x0434), + }, + { + .freq = 2452, + RADIOREGS(0x62, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x94, + 0x09, 0x0f, 0x07, 0x00, 0x07, 0x00, 0x61, 0x73, + 0x00, 0x00, 0x00, 0x80, 0x00), + PHYREGS(0x03d9, 0x03d5, 0x03d1, 0x0429, 0x042d, 0x0431), + }, + { + .freq = 2457, + RADIOREGS(0x66, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x99, + 0x09, 0x0f, 0x06, 0x00, 0x06, 0x00, 0x61, 0x73, + 0x00, 0x00, 0x00, 0x60, 0x00), + PHYREGS(0x03db, 0x03d7, 0x03d3, 0x0427, 0x042b, 0x042f), + }, + { + .freq = 2462, + RADIOREGS(0x69, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x9e, + 0x09, 0x0f, 0x06, 0x00, 0x06, 0x00, 0x61, 0x73, + 0x00, 0x00, 0x00, 0x60, 0x00), + PHYREGS(0x03dd, 0x03d9, 0x03d5, 0x0424, 0x0429, 0x042d), + }, { .freq = 2467, RADIOREGS(0x6c, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0xa3, 0x09, 0x0f, 0x05, 0x00, 0x05, 0x00, 0x61, 0x03, @@ -137,8 +158,196 @@ static const struct b43_phy_ht_channeltab_e_radio2059 b43_phy_ht_channeltab_radi 0x00, 0x00, 0x00, 0xf0, 0x00), PHYREGS(0x03e1, 0x03dd, 0x03d9, 0x0420, 0x0424, 0x0429), }, + { + .freq = 5180, + RADIOREGS(0xbe, 0x16, 0x10, 0x1f, 0x08, 0x08, 0x3f, 0x06, + 0x02, 0x0c, 0x00, 0x0c, 0x00, 0x0c, 0x00, 0x00, + 0x0f, 0x4f, 0xa3, 0x00, 0xfc), + PHYREGS(0x081c, 0x0818, 0x0814, 0x01f9, 0x01fa, 0x01fb), + }, + { + .freq = 5200, + RADIOREGS(0xc5, 0x16, 0x10, 0x1f, 0x08, 0x08, 0x3f, 0x08, + 0x02, 0x0c, 0x00, 0x0c, 0x00, 0x0c, 0x00, 0x00, + 0x0f, 0x4f, 0x93, 0x00, 0xfb), + PHYREGS(0x0824, 0x0820, 0x081c, 0x01f7, 0x01f8, 0x01f9), + }, + { + .freq = 5220, + RADIOREGS(0xcc, 0x16, 0x10, 0x1f, 0x08, 0x08, 0x3f, 0x0a, + 0x02, 0x0c, 0x00, 0x0c, 0x00, 0x0c, 0x00, 0x00, + 0x0f, 0x4f, 0x93, 0x00, 0xea), + PHYREGS(0x082c, 0x0828, 0x0824, 0x01f5, 0x01f6, 0x01f7), + }, + { + .freq = 5240, + RADIOREGS(0xd2, 0x16, 0x10, 0x1f, 0x08, 0x08, 0x3f, 0x0c, + 0x02, 0x0c, 0x00, 0x0c, 0x00, 0x0c, 0x00, 0x00, + 0x0f, 0x4f, 0x93, 0x00, 0xda), + PHYREGS(0x0834, 0x0830, 0x082c, 0x01f3, 0x01f4, 0x01f5), + }, + { + .freq = 5260, + RADIOREGS(0xd9, 0x16, 0x10, 0x1f, 0x08, 0x08, 0x3f, 0x0e, + 0x02, 0x0b, 0x00, 0x0b, 0x00, 0x0b, 0x00, 0x00, + 0x0f, 0x4f, 0x93, 0x00, 0xca), + PHYREGS(0x083c, 0x0838, 0x0834, 0x01f1, 0x01f2, 0x01f3), + }, + { + .freq = 5280, + RADIOREGS(0xe0, 0x16, 0x10, 0x1f, 0x08, 0x08, 0x3f, 0x10, + 0x02, 0x0b, 0x00, 0x0b, 0x00, 0x0b, 0x00, 0x00, + 0x0f, 0x4f, 0x93, 0x00, 0xb9), + PHYREGS(0x0844, 0x0840, 0x083c, 0x01f0, 0x01f0, 0x01f1), + }, + { + .freq = 5300, + RADIOREGS(0xe6, 0x16, 0x10, 0x1f, 0x08, 0x08, 0x3f, 0x12, + 0x02, 0x0b, 0x00, 0x0b, 0x00, 0x0b, 0x00, 0x00, + 0x0f, 0x4c, 0x83, 0x00, 0xb8), + PHYREGS(0x084c, 0x0848, 0x0844, 0x01ee, 0x01ef, 0x01f0), + }, + { + .freq = 5320, + RADIOREGS(0xed, 0x16, 0x10, 0x1f, 0x08, 0x08, 0x3f, 0x14, + 0x02, 0x0b, 0x00, 0x0b, 0x00, 0x0b, 0x00, 0x00, + 0x0f, 0x4c, 0x83, 0x00, 0xa8), + PHYREGS(0x0854, 0x0850, 0x084c, 0x01ec, 0x01ed, 0x01ee), + }, + { + .freq = 5500, + RADIOREGS(0x29, 0x17, 0x10, 0x1f, 0x08, 0x08, 0x3f, 0x26, + 0x02, 0x09, 0x00, 0x09, 0x00, 0x09, 0x00, 0x00, + 0x0a, 0x46, 0x43, 0x00, 0x75), + PHYREGS(0x089c, 0x0898, 0x0894, 0x01dc, 0x01dd, 0x01dd), + }, + { + .freq = 5520, + RADIOREGS(0x30, 0x17, 0x10, 0x1f, 0x08, 0x08, 0x3f, 0x28, + 0x02, 0x08, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, + 0x0a, 0x46, 0x43, 0x00, 0x75), + PHYREGS(0x08a4, 0x08a0, 0x089c, 0x01da, 0x01db, 0x01dc), + }, + { + .freq = 5540, + RADIOREGS(0x36, 0x17, 0x10, 0x1f, 0x08, 0x08, 0x3f, 0x2a, + 0x02, 0x08, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, + 0x0a, 0x46, 0x43, 0x00, 0x75), + PHYREGS(0x08ac, 0x08a8, 0x08a4, 0x01d8, 0x01d9, 0x01da), + }, + { + .freq = 5560, + RADIOREGS(0x3d, 0x17, 0x10, 0x1f, 0x08, 0x08, 0x3f, 0x2c, + 0x02, 0x08, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, + 0x0a, 0x46, 0x43, 0x00, 0x75), + PHYREGS(0x08b4, 0x08b0, 0x08ac, 0x01d7, 0x01d7, 0x01d8), + }, + { + .freq = 5580, + RADIOREGS(0x44, 0x17, 0x10, 0x1f, 0x08, 0x08, 0x3f, 0x2e, + 0x02, 0x08, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, + 0x0a, 0x46, 0x43, 0x00, 0x74), + PHYREGS(0x08bc, 0x08b8, 0x08b4, 0x01d5, 0x01d6, 0x01d7), + }, + { + .freq = 5600, + RADIOREGS(0x4a, 0x17, 0x10, 0x1f, 0x08, 0x08, 0x3f, 0x30, + 0x02, 0x08, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, + 0x09, 0x44, 0x23, 0x00, 0x54), + PHYREGS(0x08c4, 0x08c0, 0x08bc, 0x01d3, 0x01d4, 0x01d5), + }, + { + .freq = 5620, + RADIOREGS(0x51, 0x17, 0x10, 0x1f, 0x08, 0x08, 0x3f, 0x32, + 0x02, 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x00, + 0x09, 0x44, 0x23, 0x00, 0x54), + PHYREGS(0x08cc, 0x08c8, 0x08c4, 0x01d2, 0x01d2, 0x01d3), + }, + { + .freq = 5640, + RADIOREGS(0x58, 0x17, 0x10, 0x1f, 0x08, 0x08, 0x3f, 0x34, + 0x02, 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x00, + 0x09, 0x44, 0x23, 0x00, 0x43), + PHYREGS(0x08d4, 0x08d0, 0x08cc, 0x01d0, 0x01d1, 0x01d2), + }, + { + .freq = 5660, + RADIOREGS(0x5e, 0x17, 0x10, 0x1f, 0x08, 0x08, 0x3f, 0x36, + 0x02, 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x00, + 0x09, 0x43, 0x23, 0x00, 0x43), + PHYREGS(0x08dc, 0x08d8, 0x08d4, 0x01ce, 0x01cf, 0x01d0), + }, + { + .freq = 5680, + RADIOREGS(0x65, 0x17, 0x10, 0x1f, 0x08, 0x08, 0x3f, 0x38, + 0x02, 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x00, + 0x09, 0x42, 0x23, 0x00, 0x43), + PHYREGS(0x08e4, 0x08e0, 0x08dc, 0x01cd, 0x01ce, 0x01ce), + }, + { + .freq = 5700, + RADIOREGS(0x6c, 0x17, 0x10, 0x1f, 0x08, 0x08, 0x3f, 0x3a, + 0x02, 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x00, + 0x08, 0x42, 0x13, 0x00, 0x32), + PHYREGS(0x08ec, 0x08e8, 0x08e4, 0x01cb, 0x01cc, 0x01cd), + }, + { + .freq = 5745, + RADIOREGS(0x7b, 0x17, 0x20, 0x1f, 0x08, 0x08, 0x3f, 0x7d, + 0x04, 0x06, 0x00, 0x06, 0x00, 0x06, 0x00, 0x00, + 0x08, 0x42, 0x13, 0x00, 0x21), + PHYREGS(0x08fe, 0x08fa, 0x08f6, 0x01c8, 0x01c8, 0x01c9), + }, + { + .freq = 5765, + RADIOREGS(0x81, 0x17, 0x20, 0x1f, 0x08, 0x08, 0x3f, 0x81, + 0x04, 0x06, 0x00, 0x06, 0x00, 0x06, 0x00, 0x00, + 0x08, 0x42, 0x13, 0x00, 0x11), + PHYREGS(0x0906, 0x0902, 0x08fe, 0x01c6, 0x01c7, 0x01c8), + }, + { + .freq = 5785, + RADIOREGS(0x88, 0x17, 0x20, 0x1f, 0x08, 0x08, 0x3f, 0x85, + 0x04, 0x05, 0x00, 0x05, 0x00, 0x05, 0x00, 0x00, + 0x08, 0x42, 0x13, 0x00, 0x00), + PHYREGS(0x090e, 0x090a, 0x0906, 0x01c4, 0x01c5, 0x01c6), + }, + { + .freq = 5805, + RADIOREGS(0x8f, 0x17, 0x20, 0x1f, 0x08, 0x08, 0x3f, 0x89, + 0x04, 0x05, 0x00, 0x05, 0x00, 0x05, 0x00, 0x00, + 0x06, 0x41, 0x03, 0x00, 0x00), + PHYREGS(0x0916, 0x0912, 0x090e, 0x01c3, 0x01c4, 0x01c4), + }, + { + .freq = 5825, + RADIOREGS(0x95, 0x17, 0x20, 0x1f, 0x08, 0x08, 0x3f, 0x8d, + 0x04, 0x05, 0x00, 0x05, 0x00, 0x05, 0x00, 0x00, + 0x06, 0x41, 0x03, 0x00, 0x00), + PHYREGS(0x091e, 0x091a, 0x0916, 0x01c1, 0x01c2, 0x01c3), + }, }; +void r2059_upload_inittabs(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + u16 *table = NULL; + u16 size, i; + + switch (phy->rev) { + case 1: + table = r2059_phy_rev1_init[0]; + size = ARRAY_SIZE(r2059_phy_rev1_init); + break; + default: + B43_WARN_ON(1); + return; + } + + for (i = 0; i < size; i++, table += 2) + b43_radio_write(dev, R2059_ALL | table[0], table[1]); +} + const struct b43_phy_ht_channeltab_e_radio2059 *b43_phy_ht_get_channeltab_e_r2059(struct b43_wldev *dev, u16 freq) { diff --git a/drivers/net/wireless/b43/radio_2059.h b/drivers/net/wireless/b43/radio_2059.h index 40a82d7f510c93b1219a98547d3042ba95d97303..9e22fb60588b2204f58cf3506498cc80342eb43d 100644 --- a/drivers/net/wireless/b43/radio_2059.h +++ b/drivers/net/wireless/b43/radio_2059.h @@ -10,6 +10,18 @@ #define R2059_C3 0x800 #define R2059_ALL 0xC00 +#define R2059_RCAL_CONFIG 0x004 +#define R2059_RFPLL_MASTER 0x011 +#define R2059_RFPLL_MISC_EN 0x02b +#define R2059_RFPLL_MISC_CAL_RESETN 0x02e +#define R2059_XTAL_CONFIG2 0x0c0 +#define R2059_RCCAL_START_R1_Q1_P1 0x13c +#define R2059_RCCAL_X1 0x13d +#define R2059_RCCAL_TRC0 0x13e +#define R2059_RCCAL_DONE_OSCCAP 0x140 +#define R2059_RCAL_STATUS 0x145 +#define R2059_RCCAL_MASTER 0x17f + /* Values for various registers uploaded on channel switching */ struct b43_phy_ht_channeltab_e_radio2059 { /* The channel frequency in MHz */ @@ -40,6 +52,8 @@ struct b43_phy_ht_channeltab_e_radio2059 { struct b43_phy_ht_channeltab_e_phy phy_regs; }; +void r2059_upload_inittabs(struct b43_wldev *dev); + const struct b43_phy_ht_channeltab_e_radio2059 *b43_phy_ht_get_channeltab_e_r2059(struct b43_wldev *dev, u16 freq); diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c index 4b5885077b01bd5b223a3d45868adfc52a744882..25d1cbd34306e03ea4827e09509c345d11b5a1df 100644 --- a/drivers/net/wireless/b43/tables_nphy.c +++ b/drivers/net/wireless/b43/tables_nphy.c @@ -2878,6 +2878,40 @@ const s8 b43_ntab_papd_pga_gain_delta_ipa_2g[] = { -54, -46, -39, -31, -23, -15, -8, 0 }; +/* Extracted from MMIO dump of 6.30.223.248 + * Entries: 0, 15, 17, 21, 24, 26, 27, 29, 30 were guessed + */ +static const s16 b43_ntab_rf_pwr_offset_2057_rev9_2g[] = { + -133, -133, -107, -92, -81, + -73, -66, -61, -56, -52, + -48, -44, -41, -37, -34, + -31, -28, -25, -22, -19, + -17, -14, -12, -10, -9, + -7, -5, -4, -3, -2, + -1, 0, +}; + +/* Extracted from MMIO dump of 6.30.223.248 */ +static const s16 b43_ntab_rf_pwr_offset_2057_rev9_5g[] = { + -101, -94, -86, -79, -72, + -65, -57, -50, -42, -35, + -28, -21, -16, -9, -4, + 0, +}; + +/* Extracted from MMIO dump of 6.30.223.248 + * Entries: 0, 26, 28, 29, 30, 31 were guessed + */ +static const s16 b43_ntab_rf_pwr_offset_2057_rev14_2g[] = { + -111, -111, -111, -84, -70, + -59, -52, -45, -40, -36, + -32, -29, -26, -23, -21, + -18, -16, -15, -13, -11, + -10, -8, -7, -6, -5, + -4, -4, -3, -3, -2, + -2, -1, +}; + const u16 tbl_iqcal_gainparams[2][9][8] = { { { 0x000, 0, 0, 2, 0x69, 0x69, 0x69, 0x69 }, @@ -3197,7 +3231,7 @@ static struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_workaround[2][4] = { { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 }, 0x527E, /* invalid for external LNA! */ { 0x513F, 0x513F, 0x513F, 0x513F }, /* invalid for external LNA! */ - 0x1076, 0x0066, 0x0000, /* low is invalid (the last one) */ + 0x007E, 0x0066, 0x0000, /* low is invalid (the last one) */ 0x18, 0x18, 0x18, 0x01D0, 0x5, }, @@ -3708,9 +3742,43 @@ const u32 *b43_nphy_get_tx_gain_table(struct b43_wldev *dev) } } +const s16 *b43_ntab_get_rf_pwr_offset_table(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + switch (phy->rev) { + case 17: + if (phy->radio_rev == 14) + return b43_ntab_rf_pwr_offset_2057_rev14_2g; + break; + case 16: + if (phy->radio_rev == 9) + return b43_ntab_rf_pwr_offset_2057_rev9_2g; + break; + } + + b43err(dev->wl, + "No 2GHz RF power table available for this device\n"); + return NULL; + } else { + switch (phy->rev) { + case 16: + if (phy->radio_rev == 9) + return b43_ntab_rf_pwr_offset_2057_rev9_5g; + break; + } + + b43err(dev->wl, + "No 5GHz RF power table available for this device\n"); + return NULL; + } +} + struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent( struct b43_wldev *dev, bool ghz5, bool ext_lna) { + struct b43_phy *phy = &dev->phy; struct nphy_gain_ctl_workaround_entry *e; u8 phy_idx; @@ -3729,37 +3797,49 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent( e = &nphy_gain_ctl_workaround[ghz5][phy_idx]; /* Some workarounds to the workarounds... */ - if (ghz5 && dev->phy.rev >= 6) { - if (dev->phy.radio_rev == 11 && - !b43_is_40mhz(dev)) - e->cliplo_gain = 0x2d; - } else if (!ghz5 && dev->phy.rev >= 5) { - static const int gain_data[] = {0x0062, 0x0064, 0x006a, 0x106a, - 0x106c, 0x1074, 0x107c, 0x207c}; + if (!ghz5) { u8 tr_iso = dev->dev->bus_sprom->fem.ghz2.tr_iso; - if (ext_lna) { + if (tr_iso > 7) + tr_iso = 3; + + if (phy->rev >= 6) { + static const int gain_data[] = { 0x106a, 0x106c, 0x1074, + 0x107c, 0x007e, 0x107e, + 0x207e, 0x307e, }; + + e->cliplo_gain = gain_data[tr_iso]; + } else if (phy->rev == 5) { + static const int gain_data[] = { 0x0062, 0x0064, 0x006a, + 0x106a, 0x106c, 0x1074, + 0x107c, 0x207c, }; + + e->cliplo_gain = gain_data[tr_iso]; + } + + if (phy->rev >= 5 && ext_lna) { e->rfseq_init[0] &= ~0x4000; e->rfseq_init[1] &= ~0x4000; e->rfseq_init[2] &= ~0x4000; e->rfseq_init[3] &= ~0x4000; e->init_gain &= ~0x4000; } - if (tr_iso > 7) - tr_iso = 3; - e->cliplo_gain = gain_data[tr_iso]; - - } else if (ghz5 && dev->phy.rev == 4 && ext_lna) { - e->rfseq_init[0] &= ~0x4000; - e->rfseq_init[1] &= ~0x4000; - e->rfseq_init[2] &= ~0x4000; - e->rfseq_init[3] &= ~0x4000; - e->init_gain &= ~0x4000; - e->rfseq_init[0] |= 0x1000; - e->rfseq_init[1] |= 0x1000; - e->rfseq_init[2] |= 0x1000; - e->rfseq_init[3] |= 0x1000; - e->init_gain |= 0x1000; + } else { + if (phy->rev >= 6) { + if (phy->radio_rev == 11 && !b43_is_40mhz(dev)) + e->crsminu = 0x2d; + } else if (phy->rev == 4 && ext_lna) { + e->rfseq_init[0] &= ~0x4000; + e->rfseq_init[1] &= ~0x4000; + e->rfseq_init[2] &= ~0x4000; + e->rfseq_init[3] &= ~0x4000; + e->init_gain &= ~0x4000; + e->rfseq_init[0] |= 0x1000; + e->rfseq_init[1] |= 0x1000; + e->rfseq_init[2] |= 0x1000; + e->rfseq_init[3] |= 0x1000; + e->init_gain |= 0x1000; + } } return e; diff --git a/drivers/net/wireless/b43/tables_nphy.h b/drivers/net/wireless/b43/tables_nphy.h index 3ce2e6f3a2781d168cf0955f6246b24a1d5d1181..b51f386db02fa8faf40a98481110b251d2295fe7 100644 --- a/drivers/net/wireless/b43/tables_nphy.h +++ b/drivers/net/wireless/b43/tables_nphy.h @@ -191,6 +191,8 @@ void b43_nphy_tables_init(struct b43_wldev *dev); const u32 *b43_nphy_get_tx_gain_table(struct b43_wldev *dev); +const s16 *b43_ntab_get_rf_pwr_offset_table(struct b43_wldev *dev); + extern const s8 b43_ntab_papd_pga_gain_delta_ipa_2g[]; extern const u16 tbl_iqcal_gainparams[2][9][8]; diff --git a/drivers/net/wireless/b43/xmit.h b/drivers/net/wireless/b43/xmit.h index 98d90747836a482fa836f6e30c8fc173e1c935e5..ba611530806812f9359beabd2149a9ff10ea8f22 100644 --- a/drivers/net/wireless/b43/xmit.h +++ b/drivers/net/wireless/b43/xmit.h @@ -97,9 +97,13 @@ struct b43_tx_legacy_rate_phy_ctl_entry { }; /* MAC TX control */ +#define B43_TXH_MAC_RTS_FB_SHORTPRMBL 0x80000000 /* RTS fallback preamble */ +#define B43_TXH_MAC_RTS_SHORTPRMBL 0x40000000 /* RTS main rate preamble */ +#define B43_TXH_MAC_FB_SHORTPRMBL 0x20000000 /* Main fallback preamble */ #define B43_TXH_MAC_USEFBR 0x10000000 /* Use fallback rate for this AMPDU */ #define B43_TXH_MAC_KEYIDX 0x0FF00000 /* Security key index */ #define B43_TXH_MAC_KEYIDX_SHIFT 20 +#define B43_TXH_MAC_ALT_TXPWR 0x00080000 /* Use alternate txpwr defined at loc. M_ALT_TXPWR_IDX */ #define B43_TXH_MAC_KEYALG 0x00070000 /* Security key algorithm */ #define B43_TXH_MAC_KEYALG_SHIFT 16 #define B43_TXH_MAC_AMIC 0x00008000 /* AMIC */ @@ -126,25 +130,25 @@ struct b43_tx_legacy_rate_phy_ctl_entry { #define B43_TXH_EFT_FB 0x03 /* Data frame fallback encoding */ #define B43_TXH_EFT_FB_CCK 0x00 /* CCK */ #define B43_TXH_EFT_FB_OFDM 0x01 /* OFDM */ -#define B43_TXH_EFT_FB_EWC 0x02 /* EWC */ -#define B43_TXH_EFT_FB_N 0x03 /* N */ +#define B43_TXH_EFT_FB_HT 0x02 /* HT */ +#define B43_TXH_EFT_FB_VHT 0x03 /* VHT */ #define B43_TXH_EFT_RTS 0x0C /* RTS/CTS encoding */ #define B43_TXH_EFT_RTS_CCK 0x00 /* CCK */ #define B43_TXH_EFT_RTS_OFDM 0x04 /* OFDM */ -#define B43_TXH_EFT_RTS_EWC 0x08 /* EWC */ -#define B43_TXH_EFT_RTS_N 0x0C /* N */ +#define B43_TXH_EFT_RTS_HT 0x08 /* HT */ +#define B43_TXH_EFT_RTS_VHT 0x0C /* VHT */ #define B43_TXH_EFT_RTSFB 0x30 /* RTS/CTS fallback encoding */ #define B43_TXH_EFT_RTSFB_CCK 0x00 /* CCK */ #define B43_TXH_EFT_RTSFB_OFDM 0x10 /* OFDM */ -#define B43_TXH_EFT_RTSFB_EWC 0x20 /* EWC */ -#define B43_TXH_EFT_RTSFB_N 0x30 /* N */ +#define B43_TXH_EFT_RTSFB_HT 0x20 /* HT */ +#define B43_TXH_EFT_RTSFB_VHT 0x30 /* VHT */ /* PHY TX control word */ #define B43_TXH_PHY_ENC 0x0003 /* Data frame encoding */ #define B43_TXH_PHY_ENC_CCK 0x0000 /* CCK */ #define B43_TXH_PHY_ENC_OFDM 0x0001 /* OFDM */ -#define B43_TXH_PHY_ENC_EWC 0x0002 /* EWC */ -#define B43_TXH_PHY_ENC_N 0x0003 /* N */ +#define B43_TXH_PHY_ENC_HT 0x0002 /* HT */ +#define B43_TXH_PHY_ENC_VHT 0x0003 /* VHT */ #define B43_TXH_PHY_SHORTPRMBL 0x0010 /* Use short preamble */ #define B43_TXH_PHY_ANT 0x03C0 /* Antenna selection */ #define B43_TXH_PHY_ANT0 0x0000 /* Use antenna 0 */ @@ -162,7 +166,7 @@ struct b43_tx_legacy_rate_phy_ctl_entry { #define B43_TXH_PHY1_BW_20 0x0002 /* 20 MHz */ #define B43_TXH_PHY1_BW_20U 0x0003 /* 20 MHz upper */ #define B43_TXH_PHY1_BW_40 0x0004 /* 40 MHz */ -#define B43_TXH_PHY1_BW_40DUP 0x0005 /* 50 MHz duplicate */ +#define B43_TXH_PHY1_BW_40DUP 0x0005 /* 40 MHz duplicate */ #define B43_TXH_PHY1_MODE 0x0038 /* Mode */ #define B43_TXH_PHY1_MODE_SISO 0x0000 /* SISO */ #define B43_TXH_PHY1_MODE_CDD 0x0008 /* CDD */ diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h index 3122b86050a1a6adddf37dbda41fdd1bcc5bd8f1..80e73a1262be81b6d143f4a8967e960def536fa7 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h @@ -67,6 +67,7 @@ struct brcmf_bus_dcmd { * @txctl: transmit a control request message to dongle. * @rxctl: receive a control response message from dongle. * @gettxq: obtain a reference of bus transmit queue (optional). + * @wowl_config: specify if dongle is configured for wowl when going to suspend * * This structure provides an abstract interface towards the * bus specific driver. For control messages to common driver @@ -80,6 +81,7 @@ struct brcmf_bus_ops { int (*txctl)(struct device *dev, unsigned char *msg, uint len); int (*rxctl)(struct device *dev, unsigned char *msg, uint len); struct pktq * (*gettxq)(struct device *dev); + void (*wowl_config)(struct device *dev, bool enabled); }; @@ -114,6 +116,7 @@ struct brcmf_bus_msgbuf { * @dstats: dongle-based statistical data. * @dcmd_list: bus/device specific dongle initialization commands. * @chip: device identifier of the dongle chip. + * @wowl_supported: is wowl supported by bus driver. * @chiprev: revision of the dongle chip. */ struct brcmf_bus { @@ -131,6 +134,7 @@ struct brcmf_bus { u32 chip; u32 chiprev; bool always_use_fws_queue; + bool wowl_supported; struct brcmf_bus_ops *ops; struct brcmf_bus_msgbuf *msgbuf; @@ -177,6 +181,13 @@ struct pktq *brcmf_bus_gettxq(struct brcmf_bus *bus) return bus->ops->gettxq(bus->dev); } +static inline +void brcmf_bus_wowl_config(struct brcmf_bus *bus, bool enabled) +{ + if (bus->ops->wowl_config) + bus->ops->wowl_config(bus->dev, enabled); +} + static inline bool brcmf_bus_ready(struct brcmf_bus *bus) { return bus->state == BRCMF_BUS_LOAD || bus->state == BRCMF_BUS_DATA; diff --git a/drivers/net/wireless/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/brcm80211/brcmfmac/feature.c index 50877e3c5d2fdbe91b49bb6ab6878e23a168e686..aed53acef456a4404e2f26c9273c2ea966ea5605 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/feature.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/feature.c @@ -107,6 +107,8 @@ void brcmf_feat_attach(struct brcmf_pub *drvr) struct brcmf_if *ifp = drvr->iflist[0]; brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_MCHAN, "mchan"); + if (drvr->bus_if->wowl_supported) + brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl"); /* set chip related quirks */ switch (drvr->bus_if->chip) { diff --git a/drivers/net/wireless/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/brcm80211/brcmfmac/feature.h index 961d175f8afb0d5bb062b7a2e51530d10d9483a8..b9a796d0a44d9d1f3b8b01092d76937998dccd09 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/feature.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/feature.h @@ -22,7 +22,8 @@ * MCHAN: multi-channel for concurrent P2P. */ #define BRCMF_FEAT_LIST \ - BRCMF_FEAT_DEF(MCHAN) + BRCMF_FEAT_DEF(MCHAN) \ + BRCMF_FEAT_DEF(WOWL) /* * Quirks: * diff --git a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c index a1016b811284cd748af4537fa95fe3f9297fb003..1faa929f5fff2c5fcfe667d7b585ca8621448d5a 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c @@ -354,7 +354,7 @@ struct brcmf_flowring *brcmf_flowring_attach(struct device *dev, u16 nrofrings) struct brcmf_flowring *flow; u32 i; - flow = kzalloc(sizeof(*flow), GFP_ATOMIC); + flow = kzalloc(sizeof(*flow), GFP_KERNEL); if (flow) { flow->dev = dev; flow->nrofrings = nrofrings; @@ -364,7 +364,7 @@ struct brcmf_flowring *brcmf_flowring_attach(struct device *dev, u16 nrofrings) for (i = 0; i < ARRAY_SIZE(flow->hash); i++) flow->hash[i].ifidx = BRCMF_FLOWRING_INVALID_IFIDX; flow->rings = kcalloc(nrofrings, sizeof(*flow->rings), - GFP_ATOMIC); + GFP_KERNEL); if (!flow->rings) { kfree(flow); flow = NULL; diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h index 2bc68a2137fccb535f5a34fb34e6394db85ec645..5ff5cd0bb0325f0d23c2ad1359b113b8f69b2cc8 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h @@ -53,6 +53,62 @@ #define BRCMF_OBSS_COEX_OFF 0 #define BRCMF_OBSS_COEX_ON 1 +/* WOWL bits */ +/* Wakeup on Magic packet: */ +#define WL_WOWL_MAGIC (1 << 0) +/* Wakeup on Netpattern */ +#define WL_WOWL_NET (1 << 1) +/* Wakeup on loss-of-link due to Disassoc/Deauth: */ +#define WL_WOWL_DIS (1 << 2) +/* Wakeup on retrograde TSF: */ +#define WL_WOWL_RETR (1 << 3) +/* Wakeup on loss of beacon: */ +#define WL_WOWL_BCN (1 << 4) +/* Wakeup after test: */ +#define WL_WOWL_TST (1 << 5) +/* Wakeup after PTK refresh: */ +#define WL_WOWL_M1 (1 << 6) +/* Wakeup after receipt of EAP-Identity Req: */ +#define WL_WOWL_EAPID (1 << 7) +/* Wakeind via PME(0) or GPIO(1): */ +#define WL_WOWL_PME_GPIO (1 << 8) +/* need tkip phase 1 key to be updated by the driver: */ +#define WL_WOWL_NEEDTKIP1 (1 << 9) +/* enable wakeup if GTK fails: */ +#define WL_WOWL_GTK_FAILURE (1 << 10) +/* support extended magic packets: */ +#define WL_WOWL_EXTMAGPAT (1 << 11) +/* support ARP/NS/keepalive offloading: */ +#define WL_WOWL_ARPOFFLOAD (1 << 12) +/* read protocol version for EAPOL frames: */ +#define WL_WOWL_WPA2 (1 << 13) +/* If the bit is set, use key rotaton: */ +#define WL_WOWL_KEYROT (1 << 14) +/* If the bit is set, frm received was bcast frame: */ +#define WL_WOWL_BCAST (1 << 15) +/* If the bit is set, scan offload is enabled: */ +#define WL_WOWL_SCANOL (1 << 16) +/* Wakeup on tcpkeep alive timeout: */ +#define WL_WOWL_TCPKEEP_TIME (1 << 17) +/* Wakeup on mDNS Conflict Resolution: */ +#define WL_WOWL_MDNS_CONFLICT (1 << 18) +/* Wakeup on mDNS Service Connect: */ +#define WL_WOWL_MDNS_SERVICE (1 << 19) +/* tcp keepalive got data: */ +#define WL_WOWL_TCPKEEP_DATA (1 << 20) +/* Firmware died in wowl mode: */ +#define WL_WOWL_FW_HALT (1 << 21) +/* Enable detection of radio button changes: */ +#define WL_WOWL_ENAB_HWRADIO (1 << 22) +/* Offloads detected MIC failure(s): */ +#define WL_WOWL_MIC_FAIL (1 << 23) +/* Wakeup in Unassociated state (Net/Magic Pattern): */ +#define WL_WOWL_UNASSOC (1 << 24) +/* Wakeup if received matched secured pattern: */ +#define WL_WOWL_SECURE (1 << 25) +/* Link Down indication in WoWL mode: */ +#define WL_WOWL_LINKDOWN (1 << 31) + /* join preference types for join_pref iovar */ enum brcmf_join_pref_types { BRCMF_JOIN_PREF_RSSI = 1, diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c index d42f7d04b65f115a6a370f2015015b6b03718b2a..183f08d7fc8c8e1ece4a6965fd604313385580f4 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c @@ -1636,7 +1636,7 @@ int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len, if (!signal_len) return 0; /* if flow control disabled, skip to packet data and leave */ - if (!fws->fw_signals) { + if ((!fws) || (!fws->fw_signals)) { skb_pull(skb, signal_len); return 0; } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c index 8f8b9373de95b6b2de89c017c2983d3abb247e98..11cc051f97cd42152ed6530257e1c1fcfbcf46e4 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c @@ -208,6 +208,14 @@ struct msgbuf_flowring_flush_resp { __le32 rsvd0[3]; }; +struct brcmf_msgbuf_work_item { + struct list_head queue; + u32 flowid; + int ifidx; + u8 sa[ETH_ALEN]; + u8 da[ETH_ALEN]; +}; + struct brcmf_msgbuf { struct brcmf_pub *drvr; @@ -230,7 +238,7 @@ struct brcmf_msgbuf { dma_addr_t ioctbuf_handle; u32 ioctbuf_phys_hi; u32 ioctbuf_phys_lo; - u32 ioctl_resp_status; + int ioctl_resp_status; u32 ioctl_resp_ret_len; u32 ioctl_resp_pktid; @@ -248,6 +256,10 @@ struct brcmf_msgbuf { struct work_struct txflow_work; unsigned long *flow_map; unsigned long *txstatus_done_map; + + struct work_struct flowring_work; + spinlock_t flowring_work_lock; + struct list_head work_queue; }; struct brcmf_msgbuf_pktid { @@ -284,11 +296,11 @@ brcmf_msgbuf_init_pktids(u32 nr_array_entries, struct brcmf_msgbuf_pktid *array; struct brcmf_msgbuf_pktids *pktids; - array = kcalloc(nr_array_entries, sizeof(*array), GFP_ATOMIC); + array = kcalloc(nr_array_entries, sizeof(*array), GFP_KERNEL); if (!array) return NULL; - pktids = kzalloc(sizeof(*pktids), GFP_ATOMIC); + pktids = kzalloc(sizeof(*pktids), GFP_KERNEL); if (!pktids) { kfree(array); return NULL; @@ -544,11 +556,29 @@ brcmf_msgbuf_remove_flowring(struct brcmf_msgbuf *msgbuf, u16 flowid) } -static u32 brcmf_msgbuf_flowring_create(struct brcmf_msgbuf *msgbuf, int ifidx, - struct sk_buff *skb) +static struct brcmf_msgbuf_work_item * +brcmf_msgbuf_dequeue_work(struct brcmf_msgbuf *msgbuf) +{ + struct brcmf_msgbuf_work_item *work = NULL; + ulong flags; + + spin_lock_irqsave(&msgbuf->flowring_work_lock, flags); + if (!list_empty(&msgbuf->work_queue)) { + work = list_first_entry(&msgbuf->work_queue, + struct brcmf_msgbuf_work_item, queue); + list_del(&work->queue); + } + spin_unlock_irqrestore(&msgbuf->flowring_work_lock, flags); + + return work; +} + + +static u32 +brcmf_msgbuf_flowring_create_worker(struct brcmf_msgbuf *msgbuf, + struct brcmf_msgbuf_work_item *work) { struct msgbuf_tx_flowring_create_req *create; - struct ethhdr *eh = (struct ethhdr *)(skb->data); struct brcmf_commonring *commonring; void *ret_ptr; u32 flowid; @@ -557,16 +587,11 @@ static u32 brcmf_msgbuf_flowring_create(struct brcmf_msgbuf *msgbuf, int ifidx, long long address; int err; - flowid = brcmf_flowring_create(msgbuf->flow, eh->h_dest, - skb->priority, ifidx); - if (flowid == BRCMF_FLOWRING_INVALID_ID) - return flowid; - + flowid = work->flowid; dma_sz = BRCMF_H2D_TXFLOWRING_MAX_ITEM * BRCMF_H2D_TXFLOWRING_ITEMSIZE; - dma_buf = dma_alloc_coherent(msgbuf->drvr->bus_if->dev, dma_sz, &msgbuf->flowring_dma_handle[flowid], - GFP_ATOMIC); + GFP_KERNEL); if (!dma_buf) { brcmf_err("dma_alloc_coherent failed\n"); brcmf_flowring_delete(msgbuf->flow, flowid); @@ -589,13 +614,13 @@ static u32 brcmf_msgbuf_flowring_create(struct brcmf_msgbuf *msgbuf, int ifidx, create = (struct msgbuf_tx_flowring_create_req *)ret_ptr; create->msg.msgtype = MSGBUF_TYPE_FLOW_RING_CREATE; - create->msg.ifidx = ifidx; + create->msg.ifidx = work->ifidx; create->msg.request_id = 0; create->tid = brcmf_flowring_tid(msgbuf->flow, flowid); create->flow_ring_id = cpu_to_le16(flowid + BRCMF_NROF_H2D_COMMON_MSGRINGS); - memcpy(create->sa, eh->h_source, ETH_ALEN); - memcpy(create->da, eh->h_dest, ETH_ALEN); + memcpy(create->sa, work->sa, ETH_ALEN); + memcpy(create->da, work->da, ETH_ALEN); address = (long long)(long)msgbuf->flowring_dma_handle[flowid]; create->flow_ring_addr.high_addr = cpu_to_le32(address >> 32); create->flow_ring_addr.low_addr = cpu_to_le32(address & 0xffffffff); @@ -603,7 +628,7 @@ static u32 brcmf_msgbuf_flowring_create(struct brcmf_msgbuf *msgbuf, int ifidx, create->len_item = cpu_to_le16(BRCMF_H2D_TXFLOWRING_ITEMSIZE); brcmf_dbg(MSGBUF, "Send Flow Create Req flow ID %d for peer %pM prio %d ifindex %d\n", - flowid, eh->h_dest, create->tid, ifidx); + flowid, work->da, create->tid, work->ifidx); err = brcmf_commonring_write_complete(commonring); brcmf_commonring_unlock(commonring); @@ -617,6 +642,53 @@ static u32 brcmf_msgbuf_flowring_create(struct brcmf_msgbuf *msgbuf, int ifidx, } +static void brcmf_msgbuf_flowring_worker(struct work_struct *work) +{ + struct brcmf_msgbuf *msgbuf; + struct brcmf_msgbuf_work_item *create; + + msgbuf = container_of(work, struct brcmf_msgbuf, flowring_work); + + while ((create = brcmf_msgbuf_dequeue_work(msgbuf))) { + brcmf_msgbuf_flowring_create_worker(msgbuf, create); + kfree(create); + } +} + + +static u32 brcmf_msgbuf_flowring_create(struct brcmf_msgbuf *msgbuf, int ifidx, + struct sk_buff *skb) +{ + struct brcmf_msgbuf_work_item *create; + struct ethhdr *eh = (struct ethhdr *)(skb->data); + u32 flowid; + ulong flags; + + create = kzalloc(sizeof(*create), GFP_ATOMIC); + if (create == NULL) + return BRCMF_FLOWRING_INVALID_ID; + + flowid = brcmf_flowring_create(msgbuf->flow, eh->h_dest, + skb->priority, ifidx); + if (flowid == BRCMF_FLOWRING_INVALID_ID) { + kfree(create); + return flowid; + } + + create->flowid = flowid; + create->ifidx = ifidx; + memcpy(create->sa, eh->h_source, ETH_ALEN); + memcpy(create->da, eh->h_dest, ETH_ALEN); + + spin_lock_irqsave(&msgbuf->flowring_work_lock, flags); + list_add_tail(&create->queue, &msgbuf->work_queue); + spin_unlock_irqrestore(&msgbuf->flowring_work_lock, flags); + schedule_work(&msgbuf->flowring_work); + + return flowid; +} + + static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u8 flowid) { struct brcmf_flowring *flow = msgbuf->flow; @@ -767,7 +839,8 @@ brcmf_msgbuf_process_ioctl_complete(struct brcmf_msgbuf *msgbuf, void *buf) ioctl_resp = (struct msgbuf_ioctl_resp_hdr *)buf; - msgbuf->ioctl_resp_status = le16_to_cpu(ioctl_resp->compl_hdr.status); + msgbuf->ioctl_resp_status = + (s16)le16_to_cpu(ioctl_resp->compl_hdr.status); msgbuf->ioctl_resp_ret_len = le16_to_cpu(ioctl_resp->resp_len); msgbuf->ioctl_resp_pktid = le32_to_cpu(ioctl_resp->msg.request_id); @@ -1271,7 +1344,7 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr) u32 count; if_msgbuf = drvr->bus_if->msgbuf; - msgbuf = kzalloc(sizeof(*msgbuf), GFP_ATOMIC); + msgbuf = kzalloc(sizeof(*msgbuf), GFP_KERNEL); if (!msgbuf) goto fail; @@ -1282,11 +1355,11 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr) } INIT_WORK(&msgbuf->txflow_work, brcmf_msgbuf_txflow_worker); count = BITS_TO_LONGS(if_msgbuf->nrof_flowrings); - msgbuf->flow_map = kzalloc(count, GFP_ATOMIC); + msgbuf->flow_map = kzalloc(count, GFP_KERNEL); if (!msgbuf->flow_map) goto fail; - msgbuf->txstatus_done_map = kzalloc(count, GFP_ATOMIC); + msgbuf->txstatus_done_map = kzalloc(count, GFP_KERNEL); if (!msgbuf->txstatus_done_map) goto fail; @@ -1294,7 +1367,7 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr) msgbuf->ioctbuf = dma_alloc_coherent(drvr->bus_if->dev, BRCMF_TX_IOCTL_MAX_MSG_SIZE, &msgbuf->ioctbuf_handle, - GFP_ATOMIC); + GFP_KERNEL); if (!msgbuf->ioctbuf) goto fail; address = (long long)(long)msgbuf->ioctbuf_handle; @@ -1317,7 +1390,7 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr) msgbuf->flowrings = (struct brcmf_commonring **)if_msgbuf->flowrings; msgbuf->nrof_flowrings = if_msgbuf->nrof_flowrings; msgbuf->flowring_dma_handle = kzalloc(msgbuf->nrof_flowrings * - sizeof(*msgbuf->flowring_dma_handle), GFP_ATOMIC); + sizeof(*msgbuf->flowring_dma_handle), GFP_KERNEL); if (!msgbuf->flowring_dma_handle) goto fail; @@ -1357,6 +1430,10 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr) brcmf_msgbuf_rxbuf_event_post(msgbuf); brcmf_msgbuf_rxbuf_ioctlresp_post(msgbuf); + INIT_WORK(&msgbuf->flowring_work, brcmf_msgbuf_flowring_worker); + spin_lock_init(&msgbuf->flowring_work_lock); + INIT_LIST_HEAD(&msgbuf->work_queue); + return 0; fail: @@ -1379,11 +1456,19 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr) void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr) { struct brcmf_msgbuf *msgbuf; + struct brcmf_msgbuf_work_item *work; brcmf_dbg(TRACE, "Enter\n"); if (drvr->proto->pd) { msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; - + cancel_work_sync(&msgbuf->flowring_work); + while (!list_empty(&msgbuf->work_queue)) { + work = list_first_entry(&msgbuf->work_queue, + struct brcmf_msgbuf_work_item, + queue); + list_del(&work->queue); + kfree(work); + } kfree(msgbuf->flow_map); kfree(msgbuf->txstatus_done_map); if (msgbuf->txflow_wq) diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c index 057b982ea8b37f90792fc53ca9337a9f13897e32..d54c58a32faa52bf9b6dc2cb36edd35c07993302 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c @@ -440,8 +440,11 @@ static int brcmf_p2p_set_firmware(struct brcmf_if *ifp, u8 *p2p_mac) /* In case of COB type, firmware has default mac address * After Initializing firmware, we have to set current mac address to - * firmware for P2P device address + * firmware for P2P device address. This must be done with discovery + * disabled. */ + brcmf_fil_iovar_int_set(ifp, "p2p_disc", 0); + ret = brcmf_fil_iovar_data_set(ifp, "p2p_da_override", p2p_mac, ETH_ALEN); if (ret) @@ -1431,8 +1434,7 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp, IEEE80211_BAND_5GHZ); wdev = &ifp->vif->wdev; - cfg80211_rx_mgmt(wdev, freq, 0, (u8 *)mgmt_frame, mgmt_frame_len, 0, - GFP_ATOMIC); + cfg80211_rx_mgmt(wdev, freq, 0, (u8 *)mgmt_frame, mgmt_frame_len, 0); kfree(mgmt_frame); return 0; @@ -1896,8 +1898,7 @@ s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp, IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ); - cfg80211_rx_mgmt(&vif->wdev, freq, 0, mgmt_frame, mgmt_frame_len, 0, - GFP_ATOMIC); + cfg80211_rx_mgmt(&vif->wdev, freq, 0, mgmt_frame, mgmt_frame_len, 0); brcmf_dbg(INFO, "mgmt_frame_len (%d) , e->datalen (%d), chanspec (%04x), freq (%d)\n", mgmt_frame_len, e->datalen, chanspec, freq); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c index e5101b287e4eebd944e516d6b9a83a09ba2fa7e5..8c0632ec9f7a6041e72a0a3c6e6243a00955d97f 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c @@ -165,6 +165,8 @@ enum brcmf_pcie_state { #define BRCMF_H2D_HOST_D3_INFORM 0x00000001 #define BRCMF_H2D_HOST_DS_ACK 0x00000002 +#define BRCMF_H2D_HOST_D0_INFORM_IN_USE 0x00000008 +#define BRCMF_H2D_HOST_D0_INFORM 0x00000010 #define BRCMF_PCIE_MBDATA_TIMEOUT 2000 @@ -243,6 +245,7 @@ struct brcmf_pciedev_info { wait_queue_head_t mbdata_resp_wait; bool mbdata_completed; bool irq_allocated; + bool wowl_enabled; }; struct brcmf_pcie_ringbuf { @@ -537,7 +540,7 @@ static int brcmf_pcie_exit_download_state(struct brcmf_pciedev_info *devinfo, } -static void +static int brcmf_pcie_send_mb_data(struct brcmf_pciedev_info *devinfo, u32 htod_mb_data) { struct brcmf_pcie_shared_info *shared; @@ -558,13 +561,15 @@ brcmf_pcie_send_mb_data(struct brcmf_pciedev_info *devinfo, u32 htod_mb_data) msleep(10); i++; if (i > 100) - break; + return -EIO; cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr); } brcmf_pcie_write_tcm32(devinfo, addr, htod_mb_data); pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1); pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1); + + return 0; } @@ -1229,11 +1234,27 @@ static int brcmf_pcie_rx_ctlpkt(struct device *dev, unsigned char *msg, } +static void brcmf_pcie_wowl_config(struct device *dev, bool enabled) +{ + struct brcmf_bus *bus_if = dev_get_drvdata(dev); + struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie; + struct brcmf_pciedev_info *devinfo = buspub->devinfo; + + brcmf_dbg(PCIE, "Configuring WOWL, enabled=%d\n", enabled); + devinfo->wowl_enabled = enabled; + if (enabled) + device_set_wakeup_enable(&devinfo->pdev->dev, true); + else + device_set_wakeup_enable(&devinfo->pdev->dev, false); +} + + static struct brcmf_bus_ops brcmf_pcie_bus_ops = { .txdata = brcmf_pcie_tx, .stop = brcmf_pcie_down, .txctl = brcmf_pcie_tx_ctlpkt, .rxctl = brcmf_pcie_rx_ctlpkt, + .wowl_config = brcmf_pcie_wowl_config, }; @@ -1668,6 +1689,7 @@ brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) bus->ops = &brcmf_pcie_bus_ops; bus->proto_type = BRCMF_PROTO_MSGBUF; bus->chip = devinfo->coreid; + bus->wowl_supported = pci_pme_capable(pdev, PCI_D3hot); dev_set_drvdata(&pdev->dev, bus); ret = brcmf_pcie_get_fwnames(devinfo); @@ -1759,36 +1781,62 @@ static int brcmf_pcie_suspend(struct pci_dev *pdev, pm_message_t state) brcmf_err("Timeout on response for entering D3 substate\n"); return -EIO; } - brcmf_pcie_release_irq(devinfo); + brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D0_INFORM_IN_USE); err = pci_save_state(pdev); - if (err) { + if (err) brcmf_err("pci_save_state failed, err=%d\n", err); - return err; + if ((err) || (!devinfo->wowl_enabled)) { + brcmf_chip_detach(devinfo->ci); + devinfo->ci = NULL; + brcmf_pcie_remove(pdev); + return 0; } - brcmf_chip_detach(devinfo->ci); - devinfo->ci = NULL; - - brcmf_pcie_remove(pdev); - return pci_prepare_to_sleep(pdev); } - static int brcmf_pcie_resume(struct pci_dev *pdev) { + struct brcmf_pciedev_info *devinfo; + struct brcmf_bus *bus; int err; - brcmf_dbg(PCIE, "Enter, pdev=%p\n", pdev); + bus = dev_get_drvdata(&pdev->dev); + brcmf_dbg(PCIE, "Enter, pdev=%p, bus=%p\n", pdev, bus); err = pci_set_power_state(pdev, PCI_D0); if (err) { brcmf_err("pci_set_power_state failed, err=%d\n", err); - return err; + goto cleanup; } pci_restore_state(pdev); + pci_enable_wake(pdev, PCI_D3hot, false); + pci_enable_wake(pdev, PCI_D3cold, false); + + /* Check if device is still up and running, if so we are ready */ + if (bus) { + devinfo = bus->bus_priv.pcie->devinfo; + if (brcmf_pcie_read_reg32(devinfo, + BRCMF_PCIE_PCIE2REG_INTMASK) != 0) { + if (brcmf_pcie_send_mb_data(devinfo, + BRCMF_H2D_HOST_D0_INFORM)) + goto cleanup; + brcmf_dbg(PCIE, "Hot resume, continue....\n"); + brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2); + brcmf_bus_change_state(bus, BRCMF_BUS_DATA); + brcmf_pcie_intr_enable(devinfo); + return 0; + } + } +cleanup: + if (bus) { + devinfo = bus->bus_priv.pcie->devinfo; + brcmf_chip_detach(devinfo->ci); + devinfo->ci = NULL; + brcmf_pcie_remove(pdev); + } err = brcmf_pcie_probe(pdev, NULL); if (err) brcmf_err("probe after resume failed, err=%d\n", err); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c index 16a246bfc34380dbaa8329e7675583f5269b26d1..28fa25b509db8c3154d2ff0220d4b05c5f83c99d 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c @@ -37,6 +37,7 @@ #include "fwil.h" #include "proto.h" #include "vendor.h" +#include "dhd_bus.h" #define BRCMF_SCAN_IE_LEN_MAX 2048 #define BRCMF_PNO_VERSION 2 @@ -2397,9 +2398,13 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg, brcmf_dbg(CONN, "Beacon interval: %d\n", notify_interval); brcmf_dbg(CONN, "Signal: %d\n", notify_signal); - bss = cfg80211_inform_bss(wiphy, notify_channel, (const u8 *)bi->BSSID, - 0, notify_capability, notify_interval, notify_ie, - notify_ielen, notify_signal, GFP_KERNEL); + bss = cfg80211_inform_bss(wiphy, notify_channel, + CFG80211_BSS_FTYPE_UNKNOWN, + (const u8 *)bi->BSSID, + 0, notify_capability, + notify_interval, notify_ie, + notify_ielen, notify_signal, + GFP_KERNEL); if (!bss) return -ENOMEM; @@ -2425,7 +2430,7 @@ static s32 brcmf_inform_bss(struct brcmf_cfg80211_info *cfg) s32 err = 0; int i; - bss_list = cfg->bss_list; + bss_list = (struct brcmf_scan_results *)cfg->escan_info.escan_buf; if (bss_list->count != 0 && bss_list->version != BRCMF_BSS_INFO_VERSION) { brcmf_err("Version %d != WL_BSS_INFO_VERSION\n", @@ -2501,9 +2506,11 @@ static s32 wl_inform_ibss(struct brcmf_cfg80211_info *cfg, brcmf_dbg(CONN, "beacon interval: %d\n", notify_interval); brcmf_dbg(CONN, "signal: %d\n", notify_signal); - bss = cfg80211_inform_bss(wiphy, notify_channel, bssid, - 0, notify_capability, notify_interval, - notify_ie, notify_ielen, notify_signal, GFP_KERNEL); + bss = cfg80211_inform_bss(wiphy, notify_channel, + CFG80211_BSS_FTYPE_UNKNOWN, bssid, 0, + notify_capability, notify_interval, + notify_ie, notify_ielen, notify_signal, + GFP_KERNEL); if (!bss) { err = -ENOMEM; @@ -2599,6 +2606,7 @@ static void brcmf_cfg80211_escan_timeout_worker(struct work_struct *work) container_of(work, struct brcmf_cfg80211_info, escan_timeout_work); + brcmf_inform_bss(cfg); brcmf_notify_escan_complete(cfg, cfg->escan_info.ifp, true, true); } @@ -2737,12 +2745,9 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp, if (brcmf_p2p_scan_finding_common_channel(cfg, NULL)) goto exit; if (cfg->scan_request) { - cfg->bss_list = (struct brcmf_scan_results *) - cfg->escan_info.escan_buf; brcmf_inform_bss(cfg); aborted = status != BRCMF_E_STATUS_SUCCESS; - brcmf_notify_escan_complete(cfg, ifp, aborted, - false); + brcmf_notify_escan_complete(cfg, ifp, aborted, false); } else brcmf_dbg(SCAN, "Ignored scan complete result 0x%x\n", status); @@ -2776,50 +2781,91 @@ static __always_inline void brcmf_delay(u32 ms) static s32 brcmf_cfg80211_resume(struct wiphy *wiphy) { + struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct net_device *ndev = cfg_to_ndev(cfg); + struct brcmf_if *ifp = netdev_priv(ndev); + brcmf_dbg(TRACE, "Enter\n"); + if (cfg->wowl_enabled) { + brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PM, + cfg->pre_wowl_pmmode); + brcmf_fil_iovar_data_set(ifp, "wowl_pattern", "clr", 4); + brcmf_fil_iovar_int_set(ifp, "wowl_clear", 0); + cfg->wowl_enabled = false; + } return 0; } +static void brcmf_configure_wowl(struct brcmf_cfg80211_info *cfg, + struct brcmf_if *ifp, + struct cfg80211_wowlan *wowl) +{ + u32 wowl_config; + + brcmf_dbg(TRACE, "Suspend, wowl config.\n"); + + brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_PM, &cfg->pre_wowl_pmmode); + brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PM, PM_MAX); + + wowl_config = 0; + if (wowl->disconnect) + wowl_config |= WL_WOWL_DIS | WL_WOWL_BCN | WL_WOWL_RETR; + /* Note: if "wowl" target and not "wowlpf" then wowl_bcn_loss + * should be configured. This paramater is not supported by + * wowlpf. + */ + if (wowl->magic_pkt) + wowl_config |= WL_WOWL_MAGIC; + brcmf_fil_iovar_int_set(ifp, "wowl", wowl_config); + brcmf_fil_iovar_int_set(ifp, "wowl_activate", 1); + brcmf_bus_wowl_config(cfg->pub->bus_if, true); + cfg->wowl_enabled = true; +} + static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy, - struct cfg80211_wowlan *wow) + struct cfg80211_wowlan *wowl) { struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct net_device *ndev = cfg_to_ndev(cfg); + struct brcmf_if *ifp = netdev_priv(ndev); struct brcmf_cfg80211_vif *vif; brcmf_dbg(TRACE, "Enter\n"); - /* - * if the primary net_device is not READY there is nothing + /* if the primary net_device is not READY there is nothing * we can do but pray resume goes smoothly. */ - vif = ((struct brcmf_if *)netdev_priv(ndev))->vif; - if (!check_vif_up(vif)) + if (!check_vif_up(ifp->vif)) goto exit; - list_for_each_entry(vif, &cfg->vif_list, list) { - if (!test_bit(BRCMF_VIF_STATUS_READY, &vif->sme_state)) - continue; - /* - * While going to suspend if associated with AP disassociate - * from AP to save power while system is in suspended state - */ - brcmf_link_down(vif); - - /* Make sure WPA_Supplicant receives all the event - * generated due to DISASSOC call to the fw to keep - * the state fw and WPA_Supplicant state consistent - */ - brcmf_delay(500); - } - /* end any scanning */ if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) brcmf_abort_scanning(cfg); - /* Turn off watchdog timer */ - brcmf_set_mpc(netdev_priv(ndev), 1); + if (wowl == NULL) { + brcmf_bus_wowl_config(cfg->pub->bus_if, false); + list_for_each_entry(vif, &cfg->vif_list, list) { + if (!test_bit(BRCMF_VIF_STATUS_READY, &vif->sme_state)) + continue; + /* While going to suspend if associated with AP + * disassociate from AP to save power while system is + * in suspended state + */ + brcmf_link_down(vif); + /* Make sure WPA_Supplicant receives all the event + * generated due to DISASSOC call to the fw to keep + * the state fw and WPA_Supplicant state consistent + */ + brcmf_delay(500); + } + /* Configure MPC */ + brcmf_set_mpc(ifp, 1); + + } else { + /* Configure WOWL paramaters */ + brcmf_configure_wowl(cfg, ifp, wowl); + } exit: brcmf_dbg(TRACE, "Exit\n"); @@ -5394,6 +5440,21 @@ static void brcmf_wiphy_pno_params(struct wiphy *wiphy) wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; } + +#ifdef CONFIG_PM +static const struct wiphy_wowlan_support brcmf_wowlan_support = { + .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT, +}; +#endif + +static void brcmf_wiphy_wowl_params(struct wiphy *wiphy) +{ +#ifdef CONFIG_PM + /* wowl settings */ + wiphy->wowlan = &brcmf_wowlan_support; +#endif +} + static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp) { struct ieee80211_iface_combination ifc_combo; @@ -5431,6 +5492,9 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp) wiphy->vendor_commands = brcmf_vendor_cmds; wiphy->n_vendor_commands = BRCMF_VNDR_CMDS_LAST - 1; + if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL)) + brcmf_wiphy_wowl_params(wiphy); + return brcmf_setup_wiphybands(wiphy); } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h index f9fb10998e7960d68ca03db0afbf2a7d84f780b2..6abf94e41d3d0683b3f5e95da0fadb3b1f0f8c3b 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h @@ -35,7 +35,7 @@ #define WL_SCAN_PASSIVE_TIME 120 #define WL_ESCAN_BUF_SIZE (1024 * 64) -#define WL_ESCAN_TIMER_INTERVAL_MS 8000 /* E-Scan timeout */ +#define WL_ESCAN_TIMER_INTERVAL_MS 10000 /* E-Scan timeout */ #define WL_ESCAN_ACTION_START 1 #define WL_ESCAN_ACTION_CONTINUE 2 @@ -363,6 +363,8 @@ struct brcmf_cfg80211_vif_event { * @vif_list: linked list of vif instances. * @vif_cnt: number of vif instances. * @vif_event: vif event signalling. + * @wowl_enabled; set during suspend, is wowl used. + * @pre_wowl_pmmode: intermediate storage of pm mode during wowl. */ struct brcmf_cfg80211_info { struct wiphy *wiphy; @@ -371,7 +373,6 @@ struct brcmf_cfg80211_info { struct brcmf_btcoex_info *btcoex; struct cfg80211_scan_request *scan_request; struct mutex usr_sync; - struct brcmf_scan_results *bss_list; struct brcmf_cfg80211_scan_req scan_req_int; struct wl_cfg80211_bss_info *bss_info; struct brcmf_cfg80211_ie ie; @@ -397,6 +398,8 @@ struct brcmf_cfg80211_info { struct brcmf_cfg80211_vif_event vif_event; struct completion vif_disabled; struct brcmu_d11inf d11inf; + bool wowl_enabled; + u32 pre_wowl_pmmode; }; /** diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/brcm80211/brcmsmac/dma.c index 4fb9635d3919d29a5c927d55086946d1afe18269..796f5f9d5d5a45bc621b0fd7e880a112aee1067a 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/dma.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.c @@ -746,7 +746,7 @@ dma64_dd_upd(struct dma_info *di, struct dma64desc *ddring, /* !! may be called with core in reset */ void dma_detach(struct dma_pub *pub) { - struct dma_info *di = (struct dma_info *)pub; + struct dma_info *di = container_of(pub, struct dma_info, dma); brcms_dbg_dma(di->core, "%s:\n", di->name); @@ -842,7 +842,7 @@ static void _dma_rxenable(struct dma_info *di) void dma_rxinit(struct dma_pub *pub) { - struct dma_info *di = (struct dma_info *)pub; + struct dma_info *di = container_of(pub, struct dma_info, dma); brcms_dbg_dma(di->core, "%s:\n", di->name); @@ -924,7 +924,7 @@ static struct sk_buff *_dma_getnextrxp(struct dma_info *di, bool forceall) */ int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list) { - struct dma_info *di = (struct dma_info *)pub; + struct dma_info *di = container_of(pub, struct dma_info, dma); struct sk_buff_head dma_frames; struct sk_buff *p, *next; uint len; @@ -1022,7 +1022,7 @@ static bool dma64_txidle(struct dma_info *di) */ bool dma_rxfill(struct dma_pub *pub) { - struct dma_info *di = (struct dma_info *)pub; + struct dma_info *di = container_of(pub, struct dma_info, dma); struct sk_buff *p; u16 rxin, rxout; u32 flags = 0; @@ -1106,7 +1106,7 @@ bool dma_rxfill(struct dma_pub *pub) void dma_rxreclaim(struct dma_pub *pub) { - struct dma_info *di = (struct dma_info *)pub; + struct dma_info *di = container_of(pub, struct dma_info, dma); struct sk_buff *p; brcms_dbg_dma(di->core, "%s:\n", di->name); @@ -1126,7 +1126,7 @@ void dma_counterreset(struct dma_pub *pub) /* get the address of the var in order to change later */ unsigned long dma_getvar(struct dma_pub *pub, const char *name) { - struct dma_info *di = (struct dma_info *)pub; + struct dma_info *di = container_of(pub, struct dma_info, dma); if (!strcmp(name, "&txavail")) return (unsigned long)&(di->dma.txavail); @@ -1137,7 +1137,7 @@ unsigned long dma_getvar(struct dma_pub *pub, const char *name) void dma_txinit(struct dma_pub *pub) { - struct dma_info *di = (struct dma_info *)pub; + struct dma_info *di = container_of(pub, struct dma_info, dma); u32 control = D64_XC_XE; brcms_dbg_dma(di->core, "%s:\n", di->name); @@ -1170,7 +1170,7 @@ void dma_txinit(struct dma_pub *pub) void dma_txsuspend(struct dma_pub *pub) { - struct dma_info *di = (struct dma_info *)pub; + struct dma_info *di = container_of(pub, struct dma_info, dma); brcms_dbg_dma(di->core, "%s:\n", di->name); @@ -1182,7 +1182,7 @@ void dma_txsuspend(struct dma_pub *pub) void dma_txresume(struct dma_pub *pub) { - struct dma_info *di = (struct dma_info *)pub; + struct dma_info *di = container_of(pub, struct dma_info, dma); brcms_dbg_dma(di->core, "%s:\n", di->name); @@ -1194,7 +1194,7 @@ void dma_txresume(struct dma_pub *pub) bool dma_txsuspended(struct dma_pub *pub) { - struct dma_info *di = (struct dma_info *)pub; + struct dma_info *di = container_of(pub, struct dma_info, dma); return (di->ntxd == 0) || ((bcma_read32(di->core, @@ -1204,7 +1204,7 @@ bool dma_txsuspended(struct dma_pub *pub) void dma_txreclaim(struct dma_pub *pub, enum txd_range range) { - struct dma_info *di = (struct dma_info *)pub; + struct dma_info *di = container_of(pub, struct dma_info, dma); struct sk_buff *p; brcms_dbg_dma(di->core, "%s: %s\n", @@ -1225,7 +1225,7 @@ void dma_txreclaim(struct dma_pub *pub, enum txd_range range) bool dma_txreset(struct dma_pub *pub) { - struct dma_info *di = (struct dma_info *)pub; + struct dma_info *di = container_of(pub, struct dma_info, dma); u32 status; if (di->ntxd == 0) @@ -1252,7 +1252,7 @@ bool dma_txreset(struct dma_pub *pub) bool dma_rxreset(struct dma_pub *pub) { - struct dma_info *di = (struct dma_info *)pub; + struct dma_info *di = container_of(pub, struct dma_info, dma); u32 status; if (di->nrxd == 0) @@ -1377,7 +1377,7 @@ static void dma_update_txavail(struct dma_info *di) int dma_txfast(struct brcms_c_info *wlc, struct dma_pub *pub, struct sk_buff *p) { - struct dma_info *di = (struct dma_info *)pub; + struct dma_info *di = container_of(pub, struct dma_info, dma); struct brcms_ampdu_session *session = &di->ampdu_session; struct ieee80211_tx_info *tx_info; bool is_ampdu; @@ -1427,7 +1427,7 @@ int dma_txfast(struct brcms_c_info *wlc, struct dma_pub *pub, void dma_txflush(struct dma_pub *pub) { - struct dma_info *di = (struct dma_info *)pub; + struct dma_info *di = container_of(pub, struct dma_info, dma); struct brcms_ampdu_session *session = &di->ampdu_session; if (!skb_queue_empty(&session->skb_list)) @@ -1436,7 +1436,7 @@ void dma_txflush(struct dma_pub *pub) int dma_txpending(struct dma_pub *pub) { - struct dma_info *di = (struct dma_info *)pub; + struct dma_info *di = container_of(pub, struct dma_info, dma); return ntxdactive(di, di->txin, di->txout); } @@ -1446,7 +1446,7 @@ int dma_txpending(struct dma_pub *pub) */ void dma_kick_tx(struct dma_pub *pub) { - struct dma_info *di = (struct dma_info *)pub; + struct dma_info *di = container_of(pub, struct dma_info, dma); struct brcms_ampdu_session *session = &di->ampdu_session; if (!skb_queue_empty(&session->skb_list) && dma64_txidle(di)) @@ -1465,7 +1465,7 @@ void dma_kick_tx(struct dma_pub *pub) */ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range) { - struct dma_info *di = (struct dma_info *)pub; + struct dma_info *di = container_of(pub, struct dma_info, dma); u16 start, end, i; u16 active_desc; struct sk_buff *txp; @@ -1547,7 +1547,7 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range) void dma_walk_packets(struct dma_pub *dmah, void (*callback_fnc) (void *pkt, void *arg_a), void *arg_a) { - struct dma_info *di = (struct dma_info *) dmah; + struct dma_info *di = container_of(dmah, struct dma_info, dma); uint i = di->txin; uint end = di->txout; struct sk_buff *skb; diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c index 57ecc05802e965546a47346e7b8b8914ab83c062..941b1e41f3664cc0ea7f5708a337e9edceb7271b 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c @@ -128,19 +128,19 @@ static const u8 ofdm_rate_lookup[] = { void wlc_phyreg_enter(struct brcms_phy_pub *pih) { - struct brcms_phy *pi = (struct brcms_phy *) pih; + struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro); wlapi_bmac_ucode_wake_override_phyreg_set(pi->sh->physhim); } void wlc_phyreg_exit(struct brcms_phy_pub *pih) { - struct brcms_phy *pi = (struct brcms_phy *) pih; + struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro); wlapi_bmac_ucode_wake_override_phyreg_clear(pi->sh->physhim); } void wlc_radioreg_enter(struct brcms_phy_pub *pih) { - struct brcms_phy *pi = (struct brcms_phy *) pih; + struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro); wlapi_bmac_mctrl(pi->sh->physhim, MCTL_LOCK_RADIO, MCTL_LOCK_RADIO); udelay(10); @@ -148,7 +148,7 @@ void wlc_radioreg_enter(struct brcms_phy_pub *pih) void wlc_radioreg_exit(struct brcms_phy_pub *pih) { - struct brcms_phy *pi = (struct brcms_phy *) pih; + struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro); (void)bcma_read16(pi->d11core, D11REGOFFS(phyversion)); pi->phy_wreg = 0; @@ -586,7 +586,7 @@ wlc_phy_attach(struct shared_phy *sh, struct bcma_device *d11core, void wlc_phy_detach(struct brcms_phy_pub *pih) { - struct brcms_phy *pi = (struct brcms_phy *) pih; + struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro); if (pih) { if (--pi->refcnt) @@ -613,7 +613,7 @@ bool wlc_phy_get_phyversion(struct brcms_phy_pub *pih, u16 *phytype, u16 *phyrev, u16 *radioid, u16 *radiover) { - struct brcms_phy *pi = (struct brcms_phy *) pih; + struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro); *phytype = (u16) pi->pubpi.phy_type; *phyrev = (u16) pi->pubpi.phy_rev; *radioid = pi->pubpi.radioid; @@ -624,19 +624,19 @@ wlc_phy_get_phyversion(struct brcms_phy_pub *pih, u16 *phytype, u16 *phyrev, bool wlc_phy_get_encore(struct brcms_phy_pub *pih) { - struct brcms_phy *pi = (struct brcms_phy *) pih; + struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro); return pi->pubpi.abgphy_encore; } u32 wlc_phy_get_coreflags(struct brcms_phy_pub *pih) { - struct brcms_phy *pi = (struct brcms_phy *) pih; + struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro); return pi->pubpi.coreflags; } void wlc_phy_anacore(struct brcms_phy_pub *pih, bool on) { - struct brcms_phy *pi = (struct brcms_phy *) pih; + struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro); if (ISNPHY(pi)) { if (on) { @@ -673,7 +673,7 @@ void wlc_phy_anacore(struct brcms_phy_pub *pih, bool on) u32 wlc_phy_clk_bwbits(struct brcms_phy_pub *pih) { - struct brcms_phy *pi = (struct brcms_phy *) pih; + struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro); u32 phy_bw_clkbits = 0; @@ -698,14 +698,14 @@ u32 wlc_phy_clk_bwbits(struct brcms_phy_pub *pih) void wlc_phy_por_inform(struct brcms_phy_pub *ppi) { - struct brcms_phy *pi = (struct brcms_phy *) ppi; + struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro); pi->phy_init_por = true; } void wlc_phy_edcrs_lock(struct brcms_phy_pub *pih, bool lock) { - struct brcms_phy *pi = (struct brcms_phy *) pih; + struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro); pi->edcrs_threshold_lock = lock; @@ -717,14 +717,14 @@ void wlc_phy_edcrs_lock(struct brcms_phy_pub *pih, bool lock) void wlc_phy_initcal_enable(struct brcms_phy_pub *pih, bool initcal) { - struct brcms_phy *pi = (struct brcms_phy *) pih; + struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro); pi->do_initcal = initcal; } void wlc_phy_hw_clk_state_upd(struct brcms_phy_pub *pih, bool newstate) { - struct brcms_phy *pi = (struct brcms_phy *) pih; + struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro); if (!pi || !pi->sh) return; @@ -734,7 +734,7 @@ void wlc_phy_hw_clk_state_upd(struct brcms_phy_pub *pih, bool newstate) void wlc_phy_hw_state_upd(struct brcms_phy_pub *pih, bool newstate) { - struct brcms_phy *pi = (struct brcms_phy *) pih; + struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro); if (!pi || !pi->sh) return; @@ -746,7 +746,7 @@ void wlc_phy_init(struct brcms_phy_pub *pih, u16 chanspec) { u32 mc; void (*phy_init)(struct brcms_phy *) = NULL; - struct brcms_phy *pi = (struct brcms_phy *) pih; + struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro); if (pi->init_in_progress) return; @@ -798,7 +798,7 @@ void wlc_phy_init(struct brcms_phy_pub *pih, u16 chanspec) void wlc_phy_cal_init(struct brcms_phy_pub *pih) { - struct brcms_phy *pi = (struct brcms_phy *) pih; + struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro); void (*cal_init)(struct brcms_phy *) = NULL; if (WARN((bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & @@ -816,7 +816,7 @@ void wlc_phy_cal_init(struct brcms_phy_pub *pih) int wlc_phy_down(struct brcms_phy_pub *pih) { - struct brcms_phy *pi = (struct brcms_phy *) pih; + struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro); int callbacks = 0; if (pi->phycal_timer @@ -1070,7 +1070,7 @@ void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on) void wlc_phy_hold_upd(struct brcms_phy_pub *pih, u32 id, bool set) { - struct brcms_phy *pi = (struct brcms_phy *) pih; + struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro); if (set) mboolset(pi->measure_hold, id); @@ -1082,7 +1082,7 @@ void wlc_phy_hold_upd(struct brcms_phy_pub *pih, u32 id, bool set) void wlc_phy_mute_upd(struct brcms_phy_pub *pih, bool mute, u32 flags) { - struct brcms_phy *pi = (struct brcms_phy *) pih; + struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro); if (mute) mboolset(pi->measure_hold, PHY_HOLD_FOR_MUTE); @@ -1096,7 +1096,7 @@ void wlc_phy_mute_upd(struct brcms_phy_pub *pih, bool mute, u32 flags) void wlc_phy_clear_tssi(struct brcms_phy_pub *pih) { - struct brcms_phy *pi = (struct brcms_phy *) pih; + struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro); if (ISNPHY(pi)) { return; @@ -1115,7 +1115,7 @@ static bool wlc_phy_cal_txpower_recalc_sw(struct brcms_phy *pi) void wlc_phy_switch_radio(struct brcms_phy_pub *pih, bool on) { - struct brcms_phy *pi = (struct brcms_phy *) pih; + struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro); (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol)); if (ISNPHY(pi)) { @@ -1149,35 +1149,35 @@ void wlc_phy_switch_radio(struct brcms_phy_pub *pih, bool on) u16 wlc_phy_bw_state_get(struct brcms_phy_pub *ppi) { - struct brcms_phy *pi = (struct brcms_phy *) ppi; + struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro); return pi->bw; } void wlc_phy_bw_state_set(struct brcms_phy_pub *ppi, u16 bw) { - struct brcms_phy *pi = (struct brcms_phy *) ppi; + struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro); pi->bw = bw; } void wlc_phy_chanspec_radio_set(struct brcms_phy_pub *ppi, u16 newch) { - struct brcms_phy *pi = (struct brcms_phy *) ppi; + struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro); pi->radio_chanspec = newch; } u16 wlc_phy_chanspec_get(struct brcms_phy_pub *ppi) { - struct brcms_phy *pi = (struct brcms_phy *) ppi; + struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro); return pi->radio_chanspec; } void wlc_phy_chanspec_set(struct brcms_phy_pub *ppi, u16 chanspec) { - struct brcms_phy *pi = (struct brcms_phy *) ppi; + struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro); u16 m_cur_channel; void (*chanspec_set)(struct brcms_phy *, u16) = NULL; m_cur_channel = CHSPEC_CHANNEL(chanspec); @@ -1226,7 +1226,7 @@ int wlc_phy_chanspec_bandrange_get(struct brcms_phy *pi, u16 chanspec) void wlc_phy_chanspec_ch14_widefilter_set(struct brcms_phy_pub *ppi, bool wide_filter) { - struct brcms_phy *pi = (struct brcms_phy *) ppi; + struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro); pi->channel_14_wide_filter = wide_filter; @@ -1246,7 +1246,7 @@ void wlc_phy_chanspec_band_validch(struct brcms_phy_pub *ppi, uint band, struct brcms_chanvec *channels) { - struct brcms_phy *pi = (struct brcms_phy *) ppi; + struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro); uint i; uint channel; @@ -1267,7 +1267,7 @@ wlc_phy_chanspec_band_validch(struct brcms_phy_pub *ppi, uint band, u16 wlc_phy_chanspec_band_firstch(struct brcms_phy_pub *ppi, uint band) { - struct brcms_phy *pi = (struct brcms_phy *) ppi; + struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro); uint i; uint channel; u16 chspec; @@ -1311,7 +1311,7 @@ u16 wlc_phy_chanspec_band_firstch(struct brcms_phy_pub *ppi, uint band) int wlc_phy_txpower_get(struct brcms_phy_pub *ppi, uint *qdbm, bool *override) { - struct brcms_phy *pi = (struct brcms_phy *) ppi; + struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro); *qdbm = pi->tx_user_target[0]; if (override != NULL) @@ -1323,7 +1323,7 @@ void wlc_phy_txpower_target_set(struct brcms_phy_pub *ppi, struct txpwr_limits *txpwr) { bool mac_enabled = false; - struct brcms_phy *pi = (struct brcms_phy *) ppi; + struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro); memcpy(&pi->tx_user_target[TXP_FIRST_CCK], &txpwr->cck[0], BRCMS_NUM_RATES_CCK); @@ -1371,7 +1371,7 @@ void wlc_phy_txpower_target_set(struct brcms_phy_pub *ppi, int wlc_phy_txpower_set(struct brcms_phy_pub *ppi, uint qdbm, bool override) { - struct brcms_phy *pi = (struct brcms_phy *) ppi; + struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro); int i; if (qdbm > 127) @@ -1407,7 +1407,7 @@ void wlc_phy_txpower_sromlimit(struct brcms_phy_pub *ppi, uint channel, u8 *min_pwr, u8 *max_pwr, int txp_rate_idx) { - struct brcms_phy *pi = (struct brcms_phy *) ppi; + struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro); uint i; *min_pwr = pi->min_txpower * BRCMS_TXPWR_DB_FACTOR; @@ -1456,7 +1456,7 @@ void wlc_phy_txpower_sromlimit_max_get(struct brcms_phy_pub *ppi, uint chan, u8 *max_txpwr, u8 *min_txpwr) { - struct brcms_phy *pi = (struct brcms_phy *) ppi; + struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro); u8 tx_pwr_max = 0; u8 tx_pwr_min = 255; u8 max_num_rate; @@ -1493,14 +1493,14 @@ wlc_phy_txpower_boardlimit_band(struct brcms_phy_pub *ppi, uint bandunit, u8 wlc_phy_txpower_get_target_min(struct brcms_phy_pub *ppi) { - struct brcms_phy *pi = (struct brcms_phy *) ppi; + struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro); return pi->tx_power_min; } u8 wlc_phy_txpower_get_target_max(struct brcms_phy_pub *ppi) { - struct brcms_phy *pi = (struct brcms_phy *) ppi; + struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro); return pi->tx_power_max; } @@ -1812,21 +1812,21 @@ wlc_phy_txpower_reg_limit_calc(struct brcms_phy *pi, struct txpwr_limits *txpwr, void wlc_phy_txpwr_percent_set(struct brcms_phy_pub *ppi, u8 txpwr_percent) { - struct brcms_phy *pi = (struct brcms_phy *) ppi; + struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro); pi->txpwr_percent = txpwr_percent; } void wlc_phy_machwcap_set(struct brcms_phy_pub *ppi, u32 machwcap) { - struct brcms_phy *pi = (struct brcms_phy *) ppi; + struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro); pi->sh->machwcap = machwcap; } void wlc_phy_runbist_config(struct brcms_phy_pub *ppi, bool start_end) { - struct brcms_phy *pi = (struct brcms_phy *) ppi; + struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro); u16 rxc; rxc = 0; @@ -1857,7 +1857,7 @@ void wlc_phy_txpower_limit_set(struct brcms_phy_pub *ppi, struct txpwr_limits *txpwr, u16 chanspec) { - struct brcms_phy *pi = (struct brcms_phy *) ppi; + struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro); wlc_phy_txpower_reg_limit_calc(pi, txpwr, chanspec); @@ -1881,14 +1881,14 @@ wlc_phy_txpower_limit_set(struct brcms_phy_pub *ppi, struct txpwr_limits *txpwr, void wlc_phy_ofdm_rateset_war(struct brcms_phy_pub *pih, bool war) { - struct brcms_phy *pi = (struct brcms_phy *) pih; + struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro); pi->ofdm_rateset_war = war; } void wlc_phy_bf_preempt_enable(struct brcms_phy_pub *pih, bool bf_preempt) { - struct brcms_phy *pi = (struct brcms_phy *) pih; + struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro); pi->bf_preempt_4306 = bf_preempt; } @@ -1945,7 +1945,7 @@ void wlc_phy_txpower_update_shm(struct brcms_phy *pi) bool wlc_phy_txpower_hw_ctrl_get(struct brcms_phy_pub *ppi) { - struct brcms_phy *pi = (struct brcms_phy *) ppi; + struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro); if (ISNPHY(pi)) return pi->nphy_txpwrctrl; @@ -1955,7 +1955,7 @@ bool wlc_phy_txpower_hw_ctrl_get(struct brcms_phy_pub *ppi) void wlc_phy_txpower_hw_ctrl_set(struct brcms_phy_pub *ppi, bool hwpwrctrl) { - struct brcms_phy *pi = (struct brcms_phy *) ppi; + struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro); bool suspend; if (!pi->hwpwrctrl_capable) @@ -2038,7 +2038,7 @@ void wlc_phy_txpower_get_current(struct brcms_phy_pub *ppi, struct tx_power *power, uint channel) { - struct brcms_phy *pi = (struct brcms_phy *) ppi; + struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro); uint rate, num_rates; u8 min_pwr, max_pwr; @@ -2136,21 +2136,21 @@ wlc_phy_txpower_get_current(struct brcms_phy_pub *ppi, struct tx_power *power, void wlc_phy_antsel_type_set(struct brcms_phy_pub *ppi, u8 antsel_type) { - struct brcms_phy *pi = (struct brcms_phy *) ppi; + struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro); pi->antsel_type = antsel_type; } bool wlc_phy_test_ison(struct brcms_phy_pub *ppi) { - struct brcms_phy *pi = (struct brcms_phy *) ppi; + struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro); return pi->phytest_on; } void wlc_phy_ant_rxdiv_set(struct brcms_phy_pub *ppi, u8 val) { - struct brcms_phy *pi = (struct brcms_phy *) ppi; + struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro); bool suspend; pi->sh->rx_antdiv = val; @@ -2283,7 +2283,7 @@ static s8 wlc_phy_noise_read_shmem(struct brcms_phy *pi) void wlc_phy_noise_sample_intr(struct brcms_phy_pub *pih) { - struct brcms_phy *pi = (struct brcms_phy *) pih; + struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro); u16 jssi_aux; u8 channel = 0; s8 noise_dbm = PHY_NOISE_FIXED_VAL_NPHY; @@ -2339,7 +2339,7 @@ void wlc_phy_noise_sample_intr(struct brcms_phy_pub *pih) static void wlc_phy_noise_sample_request(struct brcms_phy_pub *pih, u8 reason, u8 ch) { - struct brcms_phy *pi = (struct brcms_phy *) pih; + struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro); s8 noise_dbm = PHY_NOISE_FIXED_VAL_NPHY; bool sampling_in_progress = (pi->phynoise_state != 0); bool wait_for_intr = true; @@ -2531,7 +2531,7 @@ int wlc_phy_rssi_compute(struct brcms_phy_pub *pih, { int rssi = rxh->PhyRxStatus_1 & PRXS1_JSSI_MASK; uint radioid = pih->radioid; - struct brcms_phy *pi = (struct brcms_phy *) pih; + struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro); if ((pi->sh->corerev >= 11) && !(rxh->RxStatus2 & RXS_PHYRXST_VALID)) { @@ -2591,7 +2591,7 @@ void wlc_phy_set_deaf(struct brcms_phy_pub *ppi, bool user_flag) void wlc_phy_watchdog(struct brcms_phy_pub *pih) { - struct brcms_phy *pi = (struct brcms_phy *) pih; + struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro); bool delay_phy_cal = false; pi->sh->now++; @@ -2651,7 +2651,7 @@ void wlc_phy_watchdog(struct brcms_phy_pub *pih) void wlc_phy_BSSinit(struct brcms_phy_pub *pih, bool bonlyap, int rssi) { - struct brcms_phy *pi = (struct brcms_phy *) pih; + struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro); uint i; uint k; @@ -2711,7 +2711,7 @@ void wlc_phy_cal_perical(struct brcms_phy_pub *pih, u8 reason) s16 nphy_currtemp = 0; s16 delta_temp = 0; bool do_periodic_cal = true; - struct brcms_phy *pi = (struct brcms_phy *) pih; + struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro); if (!ISNPHY(pi)) return; @@ -2804,7 +2804,7 @@ u8 wlc_phy_nbits(s32 value) void wlc_phy_stf_chain_init(struct brcms_phy_pub *pih, u8 txchain, u8 rxchain) { - struct brcms_phy *pi = (struct brcms_phy *) pih; + struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro); pi->sh->hw_phytxchain = txchain; pi->sh->hw_phyrxchain = rxchain; @@ -2815,7 +2815,7 @@ void wlc_phy_stf_chain_init(struct brcms_phy_pub *pih, u8 txchain, u8 rxchain) void wlc_phy_stf_chain_set(struct brcms_phy_pub *pih, u8 txchain, u8 rxchain) { - struct brcms_phy *pi = (struct brcms_phy *) pih; + struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro); pi->sh->phytxchain = txchain; @@ -2827,7 +2827,7 @@ void wlc_phy_stf_chain_set(struct brcms_phy_pub *pih, u8 txchain, u8 rxchain) void wlc_phy_stf_chain_get(struct brcms_phy_pub *pih, u8 *txchain, u8 *rxchain) { - struct brcms_phy *pi = (struct brcms_phy *) pih; + struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro); *txchain = pi->sh->phytxchain; *rxchain = pi->sh->phyrxchain; @@ -2837,7 +2837,7 @@ u8 wlc_phy_stf_chain_active_get(struct brcms_phy_pub *pih) { s16 nphy_currtemp; u8 active_bitmap; - struct brcms_phy *pi = (struct brcms_phy *) pih; + struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro); active_bitmap = (pi->phy_txcore_heatedup) ? 0x31 : 0x33; @@ -2867,7 +2867,7 @@ u8 wlc_phy_stf_chain_active_get(struct brcms_phy_pub *pih) s8 wlc_phy_stf_ssmode_get(struct brcms_phy_pub *pih, u16 chanspec) { - struct brcms_phy *pi = (struct brcms_phy *) pih; + struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro); u8 siso_mcs_id, cdd_mcs_id; siso_mcs_id = @@ -2944,7 +2944,7 @@ s8 wlc_phy_upd_rssi_offset(struct brcms_phy *pi, s8 rssi, u16 chanspec) bool wlc_phy_txpower_ipa_ison(struct brcms_phy_pub *ppi) { - struct brcms_phy *pi = (struct brcms_phy *) ppi; + struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro); if (ISNPHY(pi)) return wlc_phy_n_txpower_ipa_ison(pi); diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c index b2d6d6da3daf7236466db120520aad8e1a3513f7..5f1366234a0dc3e906d085b927b2360b60f05a66 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c @@ -2865,7 +2865,7 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi) { bool suspend, tx_gain_override_old; struct lcnphy_txgains old_gains; - struct brcms_phy *pi = (struct brcms_phy *) ppi; + struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro); u16 idleTssi, idleTssi0_2C, idleTssi0_OB, idleTssi0_regvalue_OB, idleTssi0_regvalue_2C; u16 SAVE_txpwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi); @@ -3084,7 +3084,7 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi) s32 a1, b0, b1; s32 tssi, pwr, maxtargetpwr, mintargetpwr; bool suspend; - struct brcms_phy *pi = (struct brcms_phy *) ppi; + struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro); suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & MCTL_EN_MAC)); @@ -4348,7 +4348,7 @@ void wlc_lcnphy_tx_power_adjustment(struct brcms_phy_pub *ppi) { s8 index; u16 index2; - struct brcms_phy *pi = (struct brcms_phy *) ppi; + struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro); struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy; u16 SAVE_txpwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi); if (wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi) && diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c index 93869e89aa3dcf9794f56ea505d35d47c34e780c..084f18f4f95039c921b1c82c548c904fcb6ccf67 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c @@ -14121,7 +14121,7 @@ static u8 ant_sw_ctrl_tbl_rev8_2057v7_core1[] = { bool wlc_phy_bist_check_phy(struct brcms_phy_pub *pih) { - struct brcms_phy *pi = (struct brcms_phy *) pih; + struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro); u32 phybist0, phybist1, phybist2, phybist3, phybist4; if (NREV_GE(pi->pubpi.phy_rev, 16)) @@ -19734,7 +19734,7 @@ void wlc_phy_rxcore_setstate_nphy(struct brcms_phy_pub *pih, u8 rxcore_bitmask) u16 regval; u16 tbl_buf[16]; uint i; - struct brcms_phy *pi = (struct brcms_phy *) pih; + struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro); u16 tbl_opcode; bool suspend; @@ -19812,7 +19812,7 @@ void wlc_phy_rxcore_setstate_nphy(struct brcms_phy_pub *pih, u8 rxcore_bitmask) u8 wlc_phy_rxcore_getstate_nphy(struct brcms_phy_pub *pih) { u16 regval, rxen_bits; - struct brcms_phy *pi = (struct brcms_phy *) pih; + struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro); regval = read_phy_reg(pi, 0xa2); rxen_bits = (regval >> 4) & 0xf; @@ -21342,7 +21342,7 @@ void wlc_phy_chanspec_set_nphy(struct brcms_phy *pi, u16 chanspec) void wlc_phy_antsel_init(struct brcms_phy_pub *ppi, bool lut_init) { - struct brcms_phy *pi = (struct brcms_phy *) ppi; + struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro); u16 mask = 0xfc00; u32 mc = 0; diff --git a/drivers/net/wireless/brcm80211/include/defs.h b/drivers/net/wireless/brcm80211/include/defs.h index fb7cbcf81179bbc31f70e9557def1db9593dffb8..8d1e85e0ed51021b89fff661d7ed9e87cefd5637 100644 --- a/drivers/net/wireless/brcm80211/include/defs.h +++ b/drivers/net/wireless/brcm80211/include/defs.h @@ -74,10 +74,6 @@ #define BRCM_BAND_2G 2 /* 2.4 Ghz */ #define BRCM_BAND_ALL 3 /* all bands */ -/* Values for PM */ -#define PM_OFF 0 -#define PM_MAX 1 - /* Debug levels */ #define BRCM_DL_INFO 0x00000001 #define BRCM_DL_MAC80211 0x00000002 @@ -87,6 +83,7 @@ #define BRCM_DL_DMA 0x00000020 #define BRCM_DL_HT 0x00000040 +/* Values for PM */ #define PM_OFF 0 #define PM_MAX 1 #define PM_FAST 2 diff --git a/drivers/net/wireless/cw1200/cw1200_spi.c b/drivers/net/wireless/cw1200/cw1200_spi.c index 40078f5f932ec6b1ea3a26af5453f2bc5d2206ec..964b64ab7fe3a10aed8024f79abe6a23df6afb38 100644 --- a/drivers/net/wireless/cw1200/cw1200_spi.c +++ b/drivers/net/wireless/cw1200/cw1200_spi.c @@ -398,7 +398,7 @@ static int cw1200_spi_probe(struct spi_device *func) return -1; } - self = kzalloc(sizeof(*self), GFP_KERNEL); + self = devm_kzalloc(&func->dev, sizeof(*self), GFP_KERNEL); if (!self) { pr_err("Can't allocate SPI hwbus_priv."); return -ENOMEM; @@ -424,7 +424,6 @@ static int cw1200_spi_probe(struct spi_device *func) if (status) { cw1200_spi_irq_unsubscribe(self); cw1200_spi_off(plat_data); - kfree(self); } return status; @@ -441,7 +440,6 @@ static int cw1200_spi_disconnect(struct spi_device *func) cw1200_core_release(self->core); self->core = NULL; } - kfree(self); } cw1200_spi_off(dev_get_platdata(&func->dev)); diff --git a/drivers/net/wireless/hostap/hostap_proc.c b/drivers/net/wireless/hostap/hostap_proc.c index 4e5c0f8c949610e9f3a6696f16018ce5bd3fe54e..8efd17c52f65c5307b6136f14fcd94e2def45b2b 100644 --- a/drivers/net/wireless/hostap/hostap_proc.c +++ b/drivers/net/wireless/hostap/hostap_proc.c @@ -186,11 +186,9 @@ static int prism2_bss_list_proc_show(struct seq_file *m, void *v) bss->ssid[i] : '_'); seq_putc(m, '\t'); - for (i = 0; i < bss->ssid_len; i++) - seq_printf(m, "%02x", bss->ssid[i]); + seq_printf(m, "%*phN", (int)bss->ssid_len, bss->ssid); seq_putc(m, '\t'); - for (i = 0; i < bss->wpa_ie_len; i++) - seq_printf(m, "%02x", bss->wpa_ie[i]); + seq_printf(m, "%*phN", (int)bss->wpa_ie_len, bss->wpa_ie); seq_putc(m, '\n'); return 0; } diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c index a42f9c335090697297165bd6d15d8b6c5594a855..f0c3c77a48d38eb7e2318280ab9492582091c39f 100644 --- a/drivers/net/wireless/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/ipw2x00/ipw2200.c @@ -5552,7 +5552,7 @@ static int ipw_find_adhoc_network(struct ipw_priv *priv, min(network->ssid_len, priv->essid_len)))) { char escaped[IW_ESSID_MAX_SIZE * 2 + 1]; - strncpy(escaped, + strlcpy(escaped, print_ssid(ssid, network->ssid, network->ssid_len), sizeof(escaped)); @@ -5765,7 +5765,7 @@ static int ipw_best_network(struct ipw_priv *priv, memcmp(network->ssid, priv->essid, min(network->ssid_len, priv->essid_len)))) { char escaped[IW_ESSID_MAX_SIZE * 2 + 1]; - strncpy(escaped, + strlcpy(escaped, print_ssid(ssid, network->ssid, network->ssid_len), sizeof(escaped)); @@ -5782,7 +5782,7 @@ static int ipw_best_network(struct ipw_priv *priv, * testing everything else. */ if (match->network && match->network->stats.rssi > network->stats.rssi) { char escaped[IW_ESSID_MAX_SIZE * 2 + 1]; - strncpy(escaped, + strlcpy(escaped, print_ssid(ssid, network->ssid, network->ssid_len), sizeof(escaped)); IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded because " diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c index 3dcbe2cd2b287eebce041abc45b5de72d4f3d935..26fec54dcd0300a7f6ec5cc96267acc917de56ea 100644 --- a/drivers/net/wireless/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/iwlegacy/4965-mac.c @@ -4633,7 +4633,7 @@ il4965_store_tx_power(struct device *d, struct device_attribute *attr, else { ret = il_set_tx_power(il, val, false); if (ret) - IL_ERR("failed setting tx power (0x%d).\n", ret); + IL_ERR("failed setting tx power (0x%08x).\n", ret); else ret = count; } @@ -5757,9 +5757,8 @@ il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length) IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_SUPPORTS_DYNAMIC_PS; if (il->cfg->sku & IL_SKU_N) - hw->flags |= - IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | - IEEE80211_HW_SUPPORTS_STATIC_SMPS; + hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS | + NL80211_FEATURE_STATIC_SMPS; hw->sta_data_size = sizeof(struct il_station_priv); hw->vif_data_size = sizeof(struct il_vif_priv); diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig index 824f5e2877835d8a829f643d8b15cf1ed85e2572..267e48a2915e7885993979337619d777c8cb74c8 100644 --- a/drivers/net/wireless/iwlwifi/Kconfig +++ b/drivers/net/wireless/iwlwifi/Kconfig @@ -85,6 +85,16 @@ config IWLWIFI_BCAST_FILTERING If unsure, don't enable this option, as some programs might expect incoming broadcasts for their normal operations. +config IWLWIFI_UAPSD + bool "enable U-APSD by default" + depends on IWLMVM + help + Say Y here to enable U-APSD by default. This may cause + interoperability problems with some APs, manifesting in lower than + expected throughput due to those APs not enabling aggregation + + If unsure, say N. + menu "Debugging Options" config IWLWIFI_DEBUG diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c index afb98f4fdaf3608afd3c9ab844ed34c8dff6851a..2364a3c09b9eb8037ba6237112c679749a3ac94c 100644 --- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c @@ -125,8 +125,8 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv, */ if (priv->nvm_data->sku_cap_11n_enable) - hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | - IEEE80211_HW_SUPPORTS_STATIC_SMPS; + hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS | + NL80211_FEATURE_STATIC_SMPS; /* * Enable 11w if advertised by firmware and software crypto diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c index 3255a1723d176f59ee5547e7981a9d3bbe7d9583..d1ce3ce13591717099db5b83571334db75efd0fb 100644 --- a/drivers/net/wireless/iwlwifi/dvm/tx.c +++ b/drivers/net/wireless/iwlwifi/dvm/tx.c @@ -580,7 +580,7 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, * time, or we hadn't time to drain the AC queues. */ if (agg_state == IWL_AGG_ON) - iwl_trans_txq_disable(priv->trans, txq_id); + iwl_trans_txq_disable(priv->trans, txq_id, true); else IWL_DEBUG_TX_QUEUES(priv, "Don't disable tx agg: %d\n", agg_state); @@ -686,7 +686,7 @@ int iwlagn_tx_agg_flush(struct iwl_priv *priv, struct ieee80211_vif *vif, * time, or we hadn't time to drain the AC queues. */ if (agg_state == IWL_AGG_ON) - iwl_trans_txq_disable(priv->trans, txq_id); + iwl_trans_txq_disable(priv->trans, txq_id, true); else IWL_DEBUG_TX_QUEUES(priv, "Don't disable tx agg: %d\n", agg_state); @@ -781,7 +781,7 @@ static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid) "Can continue DELBA flow ssn = next_recl = %d\n", tid_data->next_reclaimed); iwl_trans_txq_disable(priv->trans, - tid_data->agg.txq_id); + tid_data->agg.txq_id, true); iwlagn_dealloc_agg_txq(priv, tid_data->agg.txq_id); tid_data->agg.state = IWL_AGG_OFF; ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid); diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c index d53adc245497cf29940374510c933a40a4704f77..b04b8858c6905ae700ac4081e57f56f59668eaf8 100644 --- a/drivers/net/wireless/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/iwlwifi/iwl-7000.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -129,7 +131,8 @@ static const struct iwl_ht_params iwl7000_ht_params = { .max_data_size = IWL60_RTC_DATA_SIZE, \ .base_params = &iwl7000_base_params, \ .led_mode = IWL_LED_RF_STATE, \ - .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_7000 + .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_7000, \ + .non_shared_ant = ANT_A const struct iwl_cfg iwl7260_2ac_cfg = { @@ -218,6 +221,12 @@ static const struct iwl_pwr_tx_backoff iwl7265_pwr_tx_backoffs[] = { {0}, }; +static const struct iwl_ht_params iwl7265_ht_params = { + .stbc = true, + .ldpc = true, + .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), +}; + const struct iwl_cfg iwl3165_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 3165", .fw_name_pre = IWL3165_FW_PRE, @@ -232,7 +241,7 @@ const struct iwl_cfg iwl7265_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 7265", .fw_name_pre = IWL7265_FW_PRE, IWL_DEVICE_7000, - .ht_params = &iwl7000_ht_params, + .ht_params = &iwl7265_ht_params, .nvm_ver = IWL7265_NVM_VERSION, .nvm_calib_ver = IWL7265_TX_POWER_VERSION, .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, @@ -242,7 +251,7 @@ const struct iwl_cfg iwl7265_2n_cfg = { .name = "Intel(R) Dual Band Wireless N 7265", .fw_name_pre = IWL7265_FW_PRE, IWL_DEVICE_7000, - .ht_params = &iwl7000_ht_params, + .ht_params = &iwl7265_ht_params, .nvm_ver = IWL7265_NVM_VERSION, .nvm_calib_ver = IWL7265_TX_POWER_VERSION, .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, @@ -252,7 +261,7 @@ const struct iwl_cfg iwl7265_n_cfg = { .name = "Intel(R) Wireless N 7265", .fw_name_pre = IWL7265_FW_PRE, IWL_DEVICE_7000, - .ht_params = &iwl7000_ht_params, + .ht_params = &iwl7265_ht_params, .nvm_ver = IWL7265_NVM_VERSION, .nvm_calib_ver = IWL7265_TX_POWER_VERSION, .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c index e93c6972290b84de3cf26214ca6894d0db0c4f79..e4351487ca7269aefe06d4bf2da3c2fedc1f34da 100644 --- a/drivers/net/wireless/iwlwifi/iwl-8000.c +++ b/drivers/net/wireless/iwlwifi/iwl-8000.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -79,7 +81,7 @@ #define IWL8000_NVM_VERSION 0x0a1d #define IWL8000_TX_POWER_VERSION 0xffff /* meaningless */ -#define IWL8000_FW_PRE "iwlwifi-8000-" +#define IWL8000_FW_PRE "iwlwifi-8000" #define IWL8000_MODULE_FIRMWARE(api) IWL8000_FW_PRE __stringify(api) ".ucode" #define NVM_HW_SECTION_NUM_FAMILY_8000 10 @@ -101,6 +103,7 @@ static const struct iwl_base_params iwl8000_base_params = { }; static const struct iwl_ht_params iwl8000_ht_params = { + .ldpc = true, .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), }; @@ -113,7 +116,17 @@ static const struct iwl_ht_params iwl8000_ht_params = { .max_data_size = IWL60_RTC_DATA_SIZE, \ .base_params = &iwl8000_base_params, \ .led_mode = IWL_LED_RF_STATE, \ - .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000 + .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000, \ + .non_shared_ant = ANT_A + +const struct iwl_cfg iwl8260_2n_cfg = { + .name = "Intel(R) Dual Band Wireless N 8260", + .fw_name_pre = IWL8000_FW_PRE, + IWL_DEVICE_8000, + .ht_params = &iwl8000_ht_params, + .nvm_ver = IWL8000_NVM_VERSION, + .nvm_calib_ver = IWL8000_TX_POWER_VERSION, +}; const struct iwl_cfg iwl8260_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 8260", @@ -133,6 +146,7 @@ const struct iwl_cfg iwl8260_2ac_sdio_cfg = { .nvm_calib_ver = IWL8000_TX_POWER_VERSION, .default_nvm_file = DEFAULT_NVM_FILE_FAMILY_8000, .max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO, + .disable_dummy_notification = true, }; MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_OK)); diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h index 3d7cc37420ae9d5b052e2f39de6c2705a8780a81..2ef83a39ff10bb877f9ff15430a10b9ac2b3b836 100644 --- a/drivers/net/wireless/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/iwlwifi/iwl-config.h @@ -171,6 +171,7 @@ struct iwl_base_params { /* * @stbc: support Tx STBC and 1*SS Rx STBC + * @ldpc: support Tx/Rx with LDPC * @use_rts_for_aggregation: use rts/cts protection for HT traffic * @ht40_bands: bitmap of bands (using %IEEE80211_BAND_*) that support HT40 */ @@ -178,6 +179,7 @@ struct iwl_ht_params { enum ieee80211_smps_mode smps_mode; const bool ht_greenfield_support; /* if used set to true */ const bool stbc; + const bool ldpc; bool use_rts_for_aggregation; u8 ht40_bands; }; @@ -228,6 +230,7 @@ struct iwl_pwr_tx_backoff { * @max_data_size: The maximal length of the fw data section * @valid_tx_ant: valid transmit antenna * @valid_rx_ant: valid receive antenna + * @non_shared_ant: the antenna that is for WiFi only * @nvm_ver: NVM version * @nvm_calib_ver: NVM calibration version * @lib: pointer to the lib ops @@ -260,6 +263,7 @@ struct iwl_cfg { const u32 max_inst_size; u8 valid_tx_ant; u8 valid_rx_ant; + u8 non_shared_ant; bool bt_shared_single_ant; u16 nvm_ver; u16 nvm_calib_ver; @@ -280,6 +284,7 @@ struct iwl_cfg { bool no_power_up_nic_in_init; const char *default_nvm_file; unsigned int max_rx_agg_size; + bool disable_dummy_notification; }; /* @@ -341,6 +346,7 @@ extern const struct iwl_cfg iwl3165_2ac_cfg; extern const struct iwl_cfg iwl7265_2ac_cfg; extern const struct iwl_cfg iwl7265_2n_cfg; extern const struct iwl_cfg iwl7265_n_cfg; +extern const struct iwl_cfg iwl8260_2n_cfg; extern const struct iwl_cfg iwl8260_2ac_cfg; extern const struct iwl_cfg iwl8260_2ac_sdio_cfg; #endif /* CONFIG_IWLMVM */ diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h index fe129c94ae3ea52e068d5cdc7f7e15732c033d4e..3f6f015285e57ee17666af25414e5a40ffb6dd78 100644 --- a/drivers/net/wireless/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/iwlwifi/iwl-csr.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -293,6 +295,16 @@ #define CSR_HW_REV_DASH(_val) (((_val) & 0x0000003) >> 0) #define CSR_HW_REV_STEP(_val) (((_val) & 0x000000C) >> 2) + +/** + * hw_rev values + */ +enum { + SILICON_A_STEP = 0, + SILICON_B_STEP, +}; + + #define CSR_HW_REV_TYPE_MSK (0x000FFF0) #define CSR_HW_REV_TYPE_5300 (0x0000020) #define CSR_HW_REV_TYPE_5350 (0x0000030) diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h index 295083510e729a76f13347a568c661acbdfdda52..0a70bcd241f5b9703096726e9e3e81b1cb84b5f0 100644 --- a/drivers/net/wireless/iwlwifi/iwl-debug.h +++ b/drivers/net/wireless/iwlwifi/iwl-debug.h @@ -145,6 +145,7 @@ do { \ #define IWL_DL_HCMD 0x00000004 #define IWL_DL_STATE 0x00000008 /* 0x000000F0 - 0x00000010 */ +#define IWL_DL_QUOTA 0x00000010 #define IWL_DL_TE 0x00000020 #define IWL_DL_EEPROM 0x00000040 #define IWL_DL_RADIO 0x00000080 @@ -189,6 +190,7 @@ do { \ #define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a) #define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a) #define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a) +#define IWL_DEBUG_QUOTA(p, f, a...) IWL_DEBUG(p, IWL_DL_QUOTA, f, ## a) #define IWL_DEBUG_TE(p, f, a...) IWL_DEBUG(p, IWL_DL_TE, f, ## a) #define IWL_DEBUG_EEPROM(d, f, a...) IWL_DEBUG_DEV(d, IWL_DL_EEPROM, f, ## a) #define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a) diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c index 23e7351e02decaa49026b8f628c67990f1607d2a..90987d6f348e8f253bf2611edc5e82768401efdf 100644 --- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c +++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c @@ -36,15 +36,8 @@ EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite8); EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ioread32); EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite32); -EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_rx); -EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_tx); EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event); EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error); EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event); EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_wrap_event); -EXPORT_TRACEPOINT_SYMBOL(iwlwifi_info); -EXPORT_TRACEPOINT_SYMBOL(iwlwifi_warn); -EXPORT_TRACEPOINT_SYMBOL(iwlwifi_crit); -EXPORT_TRACEPOINT_SYMBOL(iwlwifi_err); -EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dbg); #endif diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c index 77e3178040b2a80716ff97cb3c4771b356682897..0f1084f09caabfdf1265c4e34ba54bbd682cffaa 100644 --- a/drivers/net/wireless/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/iwlwifi/iwl-drv.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -67,6 +69,7 @@ #include #include "iwl-drv.h" +#include "iwl-csr.h" #include "iwl-debug.h" #include "iwl-trans.h" #include "iwl-op-mode.h" @@ -242,6 +245,23 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first) snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode", name_pre, tag); + /* + * Starting 8000B - FW name format has changed. This overwrites the + * previous name and uses the new format. + */ + if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) { + char rev_step[2] = { + 'A' + CSR_HW_REV_STEP(drv->trans->hw_rev), 0 + }; + + /* A-step doesn't have an indication */ + if (CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_A_STEP) + rev_step[0] = 0; + + snprintf(drv->firmware_name, sizeof(drv->firmware_name), + "%s%s-%s.ucode", name_pre, rev_step, tag); + } + IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n", (drv->fw_index == UCODE_EXPERIMENTAL_INDEX) ? "EXPERIMENTAL " : "", @@ -1254,7 +1274,9 @@ struct iwl_mod_params iwlwifi_mod_params = { .bt_coex_active = true, .power_level = IWL_POWER_INDEX_1, .wd_disable = true, - .uapsd_disable = false, +#ifndef CONFIG_IWLWIFI_UAPSD + .uapsd_disable = true, +#endif /* CONFIG_IWLWIFI_UAPSD */ /* the rest are 0 by default */ }; IWL_EXPORT_SYMBOL(iwlwifi_mod_params); @@ -1359,7 +1381,7 @@ MODULE_PARM_DESC(fw_restart, "restart firmware in case of error (default true)") module_param_named(antenna_coupling, iwlwifi_mod_params.ant_coupling, int, S_IRUGO); MODULE_PARM_DESC(antenna_coupling, - "specify antenna coupling in dB (defualt: 0 dB)"); + "specify antenna coupling in dB (default: 0 dB)"); module_param_named(wd_disable, iwlwifi_mod_params.wd_disable, int, S_IRUGO); MODULE_PARM_DESC(wd_disable, @@ -1370,7 +1392,11 @@ MODULE_PARM_DESC(nvm_file, "NVM file name"); module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, bool, S_IRUGO); +#ifdef CONFIG_IWLWIFI_UAPSD MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality (default: N)"); +#else +MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality (default: Y)"); +#endif /* * set bt_coex_active to true, uCode will do kill/defer diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.h b/drivers/net/wireless/iwlwifi/iwl-drv.h index 3c72cb710b0cb8dc4b4d7d7be478488acc569029..be4f8972241a9bf59324848746172bb3c57150c1 100644 --- a/drivers/net/wireless/iwlwifi/iwl-drv.h +++ b/drivers/net/wireless/iwlwifi/iwl-drv.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c index 07ff7e0028ee81518f0b7b2db618787cd5d1882e..74b796dc424287172b363fc11bc430c93bf9ef97 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c @@ -758,6 +758,9 @@ void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg, ht_info->cap |= IEEE80211_HT_CAP_TX_STBC; } + if (cfg->ht_params->ldpc) + ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING; + if (iwlwifi_mod_params.amsdu_size_8K) ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU; diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h index de5994a776c765a99c66bc86e0533451d4c7a027..e30a41d04c8b3ac1af8878ab1d72fc02419aa80a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h index 929a8063354ca43e456b9f46fbc9d4e51c22a8a1..401f7be36b934b7524d56508c579e663212c8782 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h index 1bb5193c5b1b5097c11bbf5a986f4c4d6e409c58..4f6e66892acc4658473aed57fc4c6cc72dd33fc4 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -125,6 +127,9 @@ enum iwl_ucode_tlv_flag { * @IWL_UCODE_TLV_API_CSA_FLOW: ucode can do unbind-bind flow for CSA. * @IWL_UCODE_TLV_API_DISABLE_STA_TX: ucode supports tx_disable bit. * @IWL_UCODE_TLV_API_LMAC_SCAN: This ucode uses LMAC unified scan API. + * @IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF: ucode supports disabling dummy notif. + * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time + * longer than the passive one, which is essential for fragmented scan. */ enum iwl_ucode_tlv_api { IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0), @@ -133,14 +138,31 @@ enum iwl_ucode_tlv_api { IWL_UCODE_TLV_API_CSA_FLOW = BIT(4), IWL_UCODE_TLV_API_DISABLE_STA_TX = BIT(5), IWL_UCODE_TLV_API_LMAC_SCAN = BIT(6), + IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7), + IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8), }; /** * enum iwl_ucode_tlv_capa - ucode capabilities * @IWL_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3 + * @IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT: supports insertion of current + * tx power value into TPC Report action frame and Link Measurement Report + * action frame + * @IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT: supports adding DS params + * element in probe requests. + * @IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT: supports adding TPC Report IE in + * probe requests. + * @IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT: supports Quiet Period requests + * @IWL_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA), + * which also implies support for the scheduler configuration command */ enum iwl_ucode_tlv_capa { - IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0), + IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0), + IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = BIT(8), + IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = BIT(9), + IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = BIT(10), + IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = BIT(11), + IWL_UCODE_TLV_CAPA_DQA_SUPPORT = BIT(12), }; /* The default calibrate table size if not specified by firmware file */ diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c index 5eef4ae7333bad13527327cc851f022f23e3f991..7a2cbf6f90dbf39185c4a2e0ff52eddfb1b9d951 100644 --- a/drivers/net/wireless/iwlwifi/iwl-io.c +++ b/drivers/net/wireless/iwlwifi/iwl-io.c @@ -193,7 +193,7 @@ void iwl_force_nmi(struct iwl_trans *trans) * DEVICE_SET_NMI_8000B_REG - is used. */ if ((trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) || - ((trans->hw_rev & 0xc) == 0x0)) + (CSR_HW_REV_STEP(trans->hw_rev) == SILICON_A_STEP)) iwl_write_prph(trans, DEVICE_SET_NMI_REG, DEVICE_SET_NMI_VAL); else iwl_write_prph(trans, DEVICE_SET_NMI_8000B_REG, diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c index 354255f087544ea455b708ef853bdb7f1a0fbb72..c302e7468559ced39edc4871466a52938091a039 100644 --- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -332,6 +334,9 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg, 3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT | 7 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; + if (cfg->ht_params->ldpc) + vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC; + if (num_tx_ants > 1) vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC; else diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h index 99785c892f963c7435b0048c4a097f6cae9e7808..b6d666ee8359df46c5052135fd7ddcfb82525868 100644 --- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h +++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h index 47033a35a40293f100312ede34bfc3a5b43c9efd..1560f4576c7d3ce69e7373f5fdb688a0243f2cb1 100644 --- a/drivers/net/wireless/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/iwlwifi/iwl-prph.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -281,6 +283,7 @@ #define SCD_CHAINEXT_EN (SCD_BASE + 0x244) #define SCD_AGGR_SEL (SCD_BASE + 0x248) #define SCD_INTERRUPT_MASK (SCD_BASE + 0x108) +#define SCD_EN_CTRL (SCD_BASE + 0x254) static inline unsigned int SCD_QUEUE_WRPTR(unsigned int chnl) { diff --git a/drivers/net/wireless/iwlwifi/iwl-scd.h b/drivers/net/wireless/iwlwifi/iwl-scd.h new file mode 100644 index 0000000000000000000000000000000000000000..6c622b21bba71d5b6abcc882a17ebd8ba6d664ea --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-scd.h @@ -0,0 +1,118 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2014 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2014 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_scd_h__ +#define __iwl_scd_h__ + +#include "iwl-trans.h" +#include "iwl-io.h" +#include "iwl-prph.h" + + +static inline void iwl_scd_txq_set_inactive(struct iwl_trans *trans, + u16 txq_id) +{ + iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), + (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)| + (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); +} + +static inline void iwl_scd_txq_set_chain(struct iwl_trans *trans, + u16 txq_id) +{ + iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id)); +} + +static inline void iwl_scd_txq_enable_agg(struct iwl_trans *trans, + u16 txq_id) +{ + iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); +} + +static inline void iwl_scd_txq_disable_agg(struct iwl_trans *trans, + u16 txq_id) +{ + iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); +} + +static inline void iwl_scd_disable_agg(struct iwl_trans *trans) +{ + iwl_set_bits_prph(trans, SCD_AGGR_SEL, 0); +} + +static inline void iwl_scd_activate_fifos(struct iwl_trans *trans) +{ + iwl_write_prph(trans, SCD_TXFACT, IWL_MASK(0, 7)); +} + +static inline void iwl_scd_deactivate_fifos(struct iwl_trans *trans) +{ + iwl_write_prph(trans, SCD_TXFACT, 0); +} + +static inline void iwl_scd_enable_set_active(struct iwl_trans *trans, + u32 value) +{ + iwl_write_prph(trans, SCD_EN_CTRL, value); +} +#endif diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h index 656371a668daa5e2325e45fe0b576fc3e13bd307..9eb85249e89c46aa980ae22893b48af2150ee4df 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/iwlwifi/iwl-trans.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -375,6 +377,7 @@ enum iwl_trans_status { * if unset 4k will be the RX buffer size * @bc_table_dword: set to true if the BC table expects the byte count to be * in DWORD (as opposed to bytes) + * @scd_set_active: should the transport configure the SCD for HCMD queue * @queue_watchdog_timeout: time (in ms) after which queues * are considered stuck and will trigger device restart * @command_names: array of command names, must be 256 entries @@ -390,6 +393,7 @@ struct iwl_trans_config { bool rx_buf_size_8k; bool bc_table_dword; + bool scd_set_active; unsigned int queue_watchdog_timeout; const char *const *command_names; }; @@ -401,6 +405,14 @@ struct iwl_trans_dump_data { struct iwl_trans; +struct iwl_trans_txq_scd_cfg { + u8 fifo; + s8 sta_id; + u8 tid; + bool aggregate; + int frame_limit; +}; + /** * struct iwl_trans_ops - transport specific operations * @@ -437,7 +449,9 @@ struct iwl_trans; * Must be atomic * @txq_enable: setup a queue. To setup an AC queue, use the * iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before - * this one. The op_mode must not configure the HCMD queue. May sleep. + * this one. The op_mode must not configure the HCMD queue. The scheduler + * configuration may be %NULL, in which case the hardware will not be + * configured. May sleep. * @txq_disable: de-configure a Tx queue to send AMPDUs * Must be atomic * @wait_tx_queue_empty: wait until tx queues are empty. May sleep. @@ -492,9 +506,10 @@ struct iwl_trans_ops { void (*reclaim)(struct iwl_trans *trans, int queue, int ssn, struct sk_buff_head *skbs); - void (*txq_enable)(struct iwl_trans *trans, int queue, int fifo, - int sta_id, int tid, int frame_limit, u16 ssn); - void (*txq_disable)(struct iwl_trans *trans, int queue); + void (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn, + const struct iwl_trans_txq_scd_cfg *cfg); + void (*txq_disable)(struct iwl_trans *trans, int queue, + bool configure_scd); int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir); int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm); @@ -766,29 +781,51 @@ static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue, trans->ops->reclaim(trans, queue, ssn, skbs); } -static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue) +static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue, + bool configure_scd) { - trans->ops->txq_disable(trans, queue); + trans->ops->txq_disable(trans, queue, configure_scd); } -static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue, - int fifo, int sta_id, int tid, - int frame_limit, u16 ssn) +static inline void +iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn, + const struct iwl_trans_txq_scd_cfg *cfg) { might_sleep(); if (unlikely((trans->state != IWL_TRANS_FW_ALIVE))) IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); - trans->ops->txq_enable(trans, queue, fifo, sta_id, tid, - frame_limit, ssn); + trans->ops->txq_enable(trans, queue, ssn, cfg); +} + +static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue, + int fifo, int sta_id, int tid, + int frame_limit, u16 ssn) +{ + struct iwl_trans_txq_scd_cfg cfg = { + .fifo = fifo, + .sta_id = sta_id, + .tid = tid, + .frame_limit = frame_limit, + .aggregate = sta_id >= 0, + }; + + iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg); } static inline void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo) { - iwl_trans_txq_enable(trans, queue, fifo, -1, - IWL_MAX_TID_COUNT, IWL_FRAME_LIMIT, 0); + struct iwl_trans_txq_scd_cfg cfg = { + .fifo = fifo, + .sta_id = -1, + .tid = IWL_MAX_TID_COUNT, + .frame_limit = IWL_FRAME_LIMIT, + .aggregate = false, + }; + + iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg); } static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans, diff --git a/drivers/net/wireless/iwlwifi/mvm/Makefile b/drivers/net/wireless/iwlwifi/mvm/Makefile index a28235913c2cf94c5b07b9c0e3dc3c8768a7d990..2d7c3ea3c4f8ba4db22fde31245ac43051f52a53 100644 --- a/drivers/net/wireless/iwlwifi/mvm/Makefile +++ b/drivers/net/wireless/iwlwifi/mvm/Makefile @@ -3,7 +3,7 @@ iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o sf.o iwlmvm-y += scan.o time-event.o rs.o iwlmvm-y += power.o coex.o coex_legacy.o -iwlmvm-y += tt.o offloading.o +iwlmvm-y += tt.o offloading.o tdls.o iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o iwlmvm-$(CONFIG_PM_SLEEP) += d3.o diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c index ce71625f497ff8f34384b803121b9312aa35afde..8df2021f9856365b2bec8465bb4ddbb02e907586 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/iwlwifi/mvm/coex.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -1144,6 +1146,10 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm, bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm) { + /* there is no other antenna, shared antenna is always available */ + if (mvm->cfg->bt_shared_single_ant) + return true; + if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm); diff --git a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c index a3be3335992766348a670028d3bfa7f2a390ea60..585c0ab4a3ec5fc59ca8d2e84395c74b27b4fcd2 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c +++ b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/mvm/constants.h b/drivers/net/wireless/iwlwifi/mvm/constants.h index ca79f7160573445110485b313d9a31a871d990b2..d4dfbe4cb66da5330b9e06251e0766a2ceaa8bea 100644 --- a/drivers/net/wireless/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/iwlwifi/mvm/constants.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -63,12 +65,18 @@ #ifndef __MVM_CONSTANTS_H #define __MVM_CONSTANTS_H +#include + #define IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT (100 * USEC_PER_MSEC) #define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC) #define IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT (10 * USEC_PER_MSEC) #define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT (10 * USEC_PER_MSEC) #define IWL_MVM_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC) #define IWL_MVM_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC) +#define IWL_MVM_UAPSD_QUEUES (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |\ + IEEE80211_WMM_IE_STA_QOSINFO_AC_VI |\ + IEEE80211_WMM_IE_STA_QOSINFO_AC_BK |\ + IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) #define IWL_MVM_PS_HEAVY_TX_THLD_PACKETS 20 #define IWL_MVM_PS_HEAVY_RX_THLD_PACKETS 8 #define IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS 30 @@ -82,7 +90,10 @@ #define IWL_MVM_BT_COEX_EN_RED_TXP_THRESH 62 #define IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH 65 #define IWL_MVM_BT_COEX_SYNC2SCO 1 -#define IWL_MVM_BT_COEX_CORUNNING 1 +#define IWL_MVM_BT_COEX_CORUNNING 0 #define IWL_MVM_BT_COEX_MPLUT 1 +#define IWL_MVM_FW_MCAST_FILTER_PASS_ALL 0 +#define IWL_MVM_QUOTA_THRESHOLD 8 +#define IWL_MVM_RS_RSSI_BASED_INIT_RATE 0 #endif /* __MVM_CONSTANTS_H */ diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c index 645b3cfc29a5e5c0bcb10f6c9eaded6a1827e5ed..c17be0fb7283a4f51c570002d1fc7c743d354daa 100644 --- a/drivers/net/wireless/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/iwlwifi/mvm/d3.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -700,7 +702,7 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return ret; rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta); - ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false); + ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); if (ret) return ret; diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c index 87e517bffedc6647a0b1e9fbc27758647ea6907e..9aa2311a776c29f11a595de66eb5f8134c60cec6 100644 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -118,6 +120,10 @@ static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm, IWL_DEBUG_POWER(mvm, "uapsd_misbehaving_enable=%d\n", val); dbgfs_pm->uapsd_misbehaving = val; break; + case MVM_DEBUGFS_PM_USE_PS_POLL: + IWL_DEBUG_POWER(mvm, "use_ps_poll=%d\n", val); + dbgfs_pm->use_ps_poll = val; + break; } } @@ -168,6 +174,10 @@ static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf, if (sscanf(buf + 18, "%d", &val) != 1) return -EINVAL; param = MVM_DEBUGFS_PM_UAPSD_MISBEHAVING; + } else if (!strncmp("use_ps_poll=", buf, 12)) { + if (sscanf(buf + 12, "%d", &val) != 1) + return -EINVAL; + param = MVM_DEBUGFS_PM_USE_PS_POLL; } else { return -EINVAL; } diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c index 7d18f466fbb3351b3b173fdcf4aa15bddd5d0b75..50527a9bb26739b572e1b0faad8b7afa5114662d 100644 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -257,6 +259,96 @@ static ssize_t iwl_dbgfs_sram_write(struct iwl_mvm *mvm, char *buf, return count; } +static ssize_t iwl_dbgfs_set_nic_temperature_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + char buf[16]; + int pos; + + if (!mvm->temperature_test) + pos = scnprintf(buf , sizeof(buf), "disabled\n"); + else + pos = scnprintf(buf , sizeof(buf), "%d\n", mvm->temperature); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +/* + * Set NIC Temperature + * Cause the driver to ignore the actual NIC temperature reported by the FW + * Enable: any value between IWL_MVM_DEBUG_SET_TEMPERATURE_MIN - + * IWL_MVM_DEBUG_SET_TEMPERATURE_MAX + * Disable: IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE + */ +static ssize_t iwl_dbgfs_set_nic_temperature_write(struct iwl_mvm *mvm, + char *buf, size_t count, + loff_t *ppos) +{ + int temperature; + + if (!mvm->ucode_loaded && !mvm->temperature_test) + return -EIO; + + if (kstrtoint(buf, 10, &temperature)) + return -EINVAL; + /* not a legal temperature */ + if ((temperature > IWL_MVM_DEBUG_SET_TEMPERATURE_MAX && + temperature != IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE) || + temperature < IWL_MVM_DEBUG_SET_TEMPERATURE_MIN) + return -EINVAL; + + mutex_lock(&mvm->mutex); + if (temperature == IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE) { + if (!mvm->temperature_test) + goto out; + + mvm->temperature_test = false; + /* Since we can't read the temp while awake, just set + * it to zero until we get the next RX stats from the + * firmware. + */ + mvm->temperature = 0; + } else { + mvm->temperature_test = true; + mvm->temperature = temperature; + } + IWL_DEBUG_TEMP(mvm, "%sabling debug set temperature (temp = %d)\n", + mvm->temperature_test ? "En" : "Dis" , + mvm->temperature); + /* handle the temperature change */ + iwl_mvm_tt_handler(mvm); + +out: + mutex_unlock(&mvm->mutex); + + return count; +} + +static ssize_t iwl_dbgfs_nic_temp_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + char buf[16]; + int pos, temp; + + if (!mvm->ucode_loaded) + return -EIO; + + mutex_lock(&mvm->mutex); + temp = iwl_mvm_get_temp(mvm); + mutex_unlock(&mvm->mutex); + + if (temp < 0) + return temp; + + pos = scnprintf(buf , sizeof(buf), "%d\n", temp); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { @@ -1190,6 +1282,18 @@ static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file, PRINT_MVM_REF(IWL_MVM_REF_P2P_CLIENT); PRINT_MVM_REF(IWL_MVM_REF_AP_IBSS); PRINT_MVM_REF(IWL_MVM_REF_USER); + PRINT_MVM_REF(IWL_MVM_REF_TX); + PRINT_MVM_REF(IWL_MVM_REF_TX_AGG); + PRINT_MVM_REF(IWL_MVM_REF_ADD_IF); + PRINT_MVM_REF(IWL_MVM_REF_START_AP); + PRINT_MVM_REF(IWL_MVM_REF_BSS_CHANGED); + PRINT_MVM_REF(IWL_MVM_REF_PREPARE_TX); + PRINT_MVM_REF(IWL_MVM_REF_PROTECT_TDLS); + PRINT_MVM_REF(IWL_MVM_REF_CHECK_CTKILL); + PRINT_MVM_REF(IWL_MVM_REF_PRPH_READ); + PRINT_MVM_REF(IWL_MVM_REF_PRPH_WRITE); + PRINT_MVM_REF(IWL_MVM_REF_NMI); + PRINT_MVM_REF(IWL_MVM_REF_TM_CMD); PRINT_MVM_REF(IWL_MVM_REF_EXIT_WORK); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); @@ -1296,6 +1400,8 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64); MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16); MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain, 8); MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram, 64); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(set_nic_temperature, 64); +MVM_DEBUGFS_READ_FILE_OPS(nic_temp); MVM_DEBUGFS_READ_FILE_OPS(stations); MVM_DEBUGFS_READ_FILE_OPS(bt_notif); MVM_DEBUGFS_READ_FILE_OPS(bt_cmd); @@ -1336,6 +1442,9 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) MVM_DEBUGFS_ADD_FILE(tx_flush, mvm->debugfs_dir, S_IWUSR); MVM_DEBUGFS_ADD_FILE(sta_drain, mvm->debugfs_dir, S_IWUSR); MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, S_IWUSR | S_IRUSR); + MVM_DEBUGFS_ADD_FILE(set_nic_temperature, mvm->debugfs_dir, + S_IWUSR | S_IRUSR); + MVM_DEBUGFS_ADD_FILE(nic_temp, dbgfs_dir, S_IRUSR); MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, S_IRUSR); MVM_DEBUGFS_ADD_FILE(fw_error_dump, dbgfs_dir, S_IRUSR); MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR); @@ -1380,6 +1489,13 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) goto err; #endif + if (!debugfs_create_u8("low_latency_agg_frame_limit", S_IRUSR | S_IWUSR, + mvm->debugfs_dir, + &mvm->low_latency_agg_frame_limit)) + goto err; + if (!debugfs_create_u8("ps_disabled", S_IRUSR, + mvm->debugfs_dir, &mvm->ps_disabled)) + goto err; if (!debugfs_create_blob("nvm_hw", S_IRUSR, mvm->debugfs_dir, &mvm->nvm_hw_blob)) goto err; diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.h b/drivers/net/wireless/iwlwifi/mvm/debugfs.h index e3a9774af495d3ac814f24e923d10e124afbf2ca..8c4190e7e027e1b82319127bc333c82a384a3d80 100644 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs.h +++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h index 69875716dcdb80abeb688949d826267eb391ccf0..816883f9ff94f541906cb945d0bf8002f8f3770b 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h index 13696fe419b778c68c9d72d7a289a3dd3c453b39..e74cdf2132f87a5a94ec2ff9f0c588b709833f24 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h index c3a8c86b550d45aaca72a274252960d8397d0ef6..27dd86395b39dd0f5800efe9c73a52294a2099ed 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h index c02a9e45ec5eaec78ce9ef49c47957c6fcd2c7da..1354c68f6468a79d78da53882e053aea6c900d9c 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -668,6 +670,8 @@ struct iwl_scan_channel_opt { * @IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE: send iteration complete notification * @IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS multiple SSID matching * @IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED: all passive scans will be fragmented + * @IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED: insert WFA vendor-specific TPC report + * and DS parameter set IEs into probe requests. */ enum iwl_mvm_lmac_scan_flags { IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL = BIT(0), @@ -676,6 +680,7 @@ enum iwl_mvm_lmac_scan_flags { IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE = BIT(3), IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS = BIT(4), IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED = BIT(5), + IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED = BIT(6), }; enum iwl_scan_priority { diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h index 47bd0406355d2738f00477d237c57bcd3d2706f2..21dd5b771660f0f689798b650fbb4acd1eed3d21 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h index d6073f67b212e12c2c7565cadf44c50268763cd8..5bca1f8bfebf3d87177cacdbcc919f221aaee020 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h @@ -66,6 +66,7 @@ /** * enum iwl_tx_flags - bitmasks for tx_flags in TX command * @TX_CMD_FLG_PROT_REQUIRE: use RTS or CTS-to-self to protect the frame + * @TX_CMD_FLG_WRITE_TX_POWER: update current tx power value in the mgmt frame * @TX_CMD_FLG_ACK: expect ACK from receiving station * @TX_CMD_FLG_STA_RATE: use RS table with initial index from the TX command. * Otherwise, use rate_n_flags from the TX command @@ -97,6 +98,7 @@ */ enum iwl_tx_flags { TX_CMD_FLG_PROT_REQUIRE = BIT(0), + TX_CMD_FLG_WRITE_TX_POWER = BIT(1), TX_CMD_FLG_ACK = BIT(3), TX_CMD_FLG_STA_RATE = BIT(4), TX_CMD_FLG_BAR = BIT(6), diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h index 9a922f3bd16bc00b7a5ce0e6484178db9372a24d..667a92274c87773c1d123b41911afb5e38d19405 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -73,16 +75,20 @@ #include "fw-api-coex.h" #include "fw-api-scan.h" -/* maximal number of Tx queues in any platform */ -#define IWL_MVM_MAX_QUEUES 20 - /* Tx queue numbers */ enum { IWL_MVM_OFFCHANNEL_QUEUE = 8, IWL_MVM_CMD_QUEUE = 9, }; -#define IWL_MVM_CMD_FIFO 7 +enum iwl_mvm_tx_fifo { + IWL_MVM_TX_FIFO_BK = 0, + IWL_MVM_TX_FIFO_BE, + IWL_MVM_TX_FIFO_VI, + IWL_MVM_TX_FIFO_VO, + IWL_MVM_TX_FIFO_MCAST = 5, + IWL_MVM_TX_FIFO_CMD = 7, +}; #define IWL_MVM_STATION_COUNT 16 @@ -110,6 +116,9 @@ enum { TXPATH_FLUSH = 0x1e, MGMT_MCAST_KEY = 0x1f, + /* scheduler config */ + SCD_QUEUE_CFG = 0x1d, + /* global key */ WEP_KEY = 0x20, @@ -184,6 +193,8 @@ enum { REPLY_RX_MPDU_CMD = 0xc1, BA_NOTIF = 0xc5, + MARKER_CMD = 0xcb, + /* BT Coex */ BT_COEX_PRIO_TABLE = 0xcc, BT_COEX_PROT_ENV = 0xcd, @@ -197,6 +208,10 @@ enum { REPLY_SF_CFG_CMD = 0xd1, REPLY_BEACON_FILTERING_CMD = 0xd2, + /* DTS measurements */ + CMD_DTS_MEASUREMENT_TRIGGER = 0xdc, + DTS_MEASUREMENT_NOTIFICATION = 0xdd, + REPLY_DEBUG_CMD = 0xf0, DEBUG_LOG_MSG = 0xf7, @@ -542,7 +557,7 @@ enum iwl_time_event_type { TE_WIDI_TX_SYNC, /* Channel Switch NoA */ - TE_P2P_GO_CSA_NOA, + TE_CHANNEL_SWITCH_PERIOD, TE_MAX }; /* MAC_EVENT_TYPE_API_E_VER_1 */ @@ -1307,6 +1322,38 @@ struct iwl_bcast_filter_cmd { struct iwl_fw_bcast_mac macs[NUM_MAC_INDEX_DRIVER]; } __packed; /* BCAST_FILTERING_HCMD_API_S_VER_1 */ +/* + * enum iwl_mvm_marker_id - maker ids + * + * The ids for different type of markers to insert into the usniffer logs + */ +enum iwl_mvm_marker_id { + MARKER_ID_TX_FRAME_LATENCY = 1, +}; /* MARKER_ID_API_E_VER_1 */ + +/** + * struct iwl_mvm_marker - mark info into the usniffer logs + * + * (MARKER_CMD = 0xcb) + * + * Mark the UTC time stamp into the usniffer logs together with additional + * metadata, so the usniffer output can be parsed. + * In the command response the ucode will return the GP2 time. + * + * @dw_len: The amount of dwords following this byte including this byte. + * @marker_id: A unique marker id (iwl_mvm_marker_id). + * @reserved: reserved. + * @timestamp: in milliseconds since 1970-01-01 00:00:00 UTC + * @metadata: additional meta data that will be written to the unsiffer log + */ +struct iwl_mvm_marker { + u8 dwLen; + u8 markerId; + __le16 reserved; + __le64 timestamp; + __le32 metadata[0]; +} __packed; /* MARKER_API_S_VER_1 */ + struct mvm_statistics_dbg { __le32 burst_check; __le32 burst_count; @@ -1561,6 +1608,8 @@ enum iwl_sf_scenario { #define SF_LONG_DELAY_AGING_TIMER 1000000 /* 1 Sec */ +#define SF_CFG_DUMMY_NOTIF_OFF BIT(16) + /** * Smart Fifo configuration command. * @state: smart fifo state, types listed in enum %iwl_sf_sate. @@ -1576,4 +1625,89 @@ struct iwl_sf_cfg_cmd { __le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES]; } __packed; /* SF_CFG_API_S_VER_2 */ +/* DTS measurements */ + +enum iwl_dts_measurement_flags { + DTS_TRIGGER_CMD_FLAGS_TEMP = BIT(0), + DTS_TRIGGER_CMD_FLAGS_VOLT = BIT(1), +}; + +/** + * iwl_dts_measurement_cmd - request DTS temperature and/or voltage measurements + * + * @flags: indicates which measurements we want as specified in &enum + * iwl_dts_measurement_flags + */ +struct iwl_dts_measurement_cmd { + __le32 flags; +} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_CMD_S */ + +/** + * iwl_dts_measurement_notif - notification received with the measurements + * + * @temp: the measured temperature + * @voltage: the measured voltage + */ +struct iwl_dts_measurement_notif { + __le32 temp; + __le32 voltage; +} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S */ + +/** + * enum iwl_scd_control - scheduler config command control flags + * @IWL_SCD_CONTROL_RM_TID: remove TID from this queue + * @IWL_SCD_CONTROL_SET_SSN: use the SSN and program it into HW + */ +enum iwl_scd_control { + IWL_SCD_CONTROL_RM_TID = BIT(4), + IWL_SCD_CONTROL_SET_SSN = BIT(5), +}; + +/** + * enum iwl_scd_flags - scheduler config command flags + * @IWL_SCD_FLAGS_SHARE_TID: multiple TIDs map to this queue + * @IWL_SCD_FLAGS_SHARE_RA: multiple RAs map to this queue + * @IWL_SCD_FLAGS_DQA_ENABLED: DQA is enabled + */ +enum iwl_scd_flags { + IWL_SCD_FLAGS_SHARE_TID = BIT(0), + IWL_SCD_FLAGS_SHARE_RA = BIT(1), + IWL_SCD_FLAGS_DQA_ENABLED = BIT(2), +}; + +#define IWL_SCDQ_INVALID_STA 0xff + +/** + * struct iwl_scd_txq_cfg_cmd - New txq hw scheduler config command + * @token: dialog token addba - unused legacy + * @sta_id: station id 4-bit + * @tid: TID 0..7 + * @scd_queue: TFD queue num 0 .. 31 + * @enable: 1 queue enable, 0 queue disable + * @aggregate: 1 aggregated queue, 0 otherwise + * @tx_fifo: tx fifo num 0..7 + * @window: up to 64 + * @ssn: starting seq num 12-bit + * @control: command control flags + * @flags: flags - see &enum iwl_scd_flags + * + * Note that every time the command is sent, all parameters must + * be filled with the exception of + * - the SSN, which is only used with @IWL_SCD_CONTROL_SET_SSN + * - the window, which is only relevant when starting aggregation + */ +struct iwl_scd_txq_cfg_cmd { + u8 token; + u8 sta_id; + u8 tid; + u8 scd_queue; + u8 enable; + u8 aggregate; + u8 tx_fifo; + u8 window; + __le16 ssn; + u8 control; + u8 flags; +} __packed; + #endif /* __fw_api_h__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c index 883e702152d5289163f48f7464cf630f6795ec2f..23fd711a67e456055b50d171ec243a124ca3ce6c 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/iwlwifi/mvm/fw.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -242,10 +244,10 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, mvm->queue_to_mac80211[i] = i; else mvm->queue_to_mac80211[i] = IWL_INVALID_MAC80211_QUEUE; - atomic_set(&mvm->queue_stop_count[i], 0); } - mvm->transport_queue_stop = 0; + for (i = 0; i < IEEE80211_MAX_QUEUES; i++) + atomic_set(&mvm->mac80211_queue_stop_count[i], 0); mvm->ucode_loaded = true; @@ -452,6 +454,9 @@ int iwl_mvm_up(struct iwl_mvm *mvm) for (i = 0; i < IWL_MVM_STATION_COUNT; i++) RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL); + /* reset quota debouncing buffer - 0xff will yield invalid data */ + memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd)); + /* Add auxiliary station for scanning */ ret = iwl_mvm_add_aux_sta(mvm); if (ret) diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c index 8242e689ddb114af0a2e17a0d5ce266c19392535..0c5c0b0e23f5bbe167193c12bb954fbd2dfe8ba1 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -81,7 +83,7 @@ struct iwl_mvm_mac_iface_iterator_data { struct ieee80211_vif *vif; unsigned long available_mac_ids[BITS_TO_LONGS(NUM_MAC_INDEX_DRIVER)]; unsigned long available_tsf_ids[BITS_TO_LONGS(NUM_TSF_IDS)]; - unsigned long used_hw_queues[BITS_TO_LONGS(IWL_MVM_MAX_QUEUES)]; + u32 used_hw_queues; enum iwl_tsf_id preferred_tsf; bool found_vif; }; @@ -192,12 +194,31 @@ static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac, data->preferred_tsf = NUM_TSF_IDS; } +/* + * Get the mask of the queues used by the vif + */ +u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + u32 qmask = 0, ac; + + if (vif->type == NL80211_IFTYPE_P2P_DEVICE) + return BIT(IWL_MVM_OFFCHANNEL_QUEUE); + + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) + qmask |= BIT(vif->hw_queue[ac]); + + if (vif->type == NL80211_IFTYPE_AP) + qmask |= BIT(vif->cab_queue); + + return qmask; +} + static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_mac_iface_iterator_data *data = _data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - u32 ac; /* Iterator may already find the interface being added -- skip it */ if (vif == data->vif) { @@ -206,12 +227,7 @@ static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac, } /* Mark the queues used by the vif */ - for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) - if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE) - __set_bit(vif->hw_queue[ac], data->used_hw_queues); - - if (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE) - __set_bit(vif->cab_queue, data->used_hw_queues); + data->used_hw_queues |= iwl_mvm_mac_get_queues_mask(data->mvm, vif); /* Mark MAC IDs as used by clearing the available bit, and * (below) mark TSFs as used if their existing use is not @@ -225,24 +241,6 @@ static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac, iwl_mvm_mac_tsf_id_iter(_data, mac, vif); } -/* - * Get the mask of the queus used by the vif - */ -u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) -{ - u32 qmask = 0, ac; - - if (vif->type == NL80211_IFTYPE_P2P_DEVICE) - return BIT(IWL_MVM_OFFCHANNEL_QUEUE); - - for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) - if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE) - qmask |= BIT(vif->hw_queue[ac]); - - return qmask; -} - void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { @@ -277,15 +275,15 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, .available_tsf_ids = { (1 << NUM_TSF_IDS) - 1 }, /* no preference yet */ .preferred_tsf = NUM_TSF_IDS, - .used_hw_queues = { + .used_hw_queues = BIT(IWL_MVM_OFFCHANNEL_QUEUE) | BIT(mvm->aux_queue) | - BIT(IWL_MVM_CMD_QUEUE) - }, + BIT(IWL_MVM_CMD_QUEUE), .found_vif = false, }; u32 ac; int ret, i; + unsigned long used_hw_queues; /* * Allocate a MAC ID and a TSF for this MAC, along with the queues @@ -368,9 +366,11 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, return 0; } + used_hw_queues = data.used_hw_queues; + /* Find available queues, and allocate them to the ACs */ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { - u8 queue = find_first_zero_bit(data.used_hw_queues, + u8 queue = find_first_zero_bit(&used_hw_queues, mvm->first_agg_queue); if (queue >= mvm->first_agg_queue) { @@ -379,13 +379,13 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, goto exit_fail; } - __set_bit(queue, data.used_hw_queues); + __set_bit(queue, &used_hw_queues); vif->hw_queue[ac] = queue; } /* Allocate the CAB queue for softAP and GO interfaces */ if (vif->type == NL80211_IFTYPE_AP) { - u8 queue = find_first_zero_bit(data.used_hw_queues, + u8 queue = find_first_zero_bit(&used_hw_queues, mvm->first_agg_queue); if (queue >= mvm->first_agg_queue) { @@ -427,17 +427,17 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) switch (vif->type) { case NL80211_IFTYPE_P2P_DEVICE: - iwl_trans_ac_txq_enable(mvm->trans, IWL_MVM_OFFCHANNEL_QUEUE, - IWL_MVM_TX_FIFO_VO); + iwl_mvm_enable_ac_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE, + IWL_MVM_TX_FIFO_VO); break; case NL80211_IFTYPE_AP: - iwl_trans_ac_txq_enable(mvm->trans, vif->cab_queue, - IWL_MVM_TX_FIFO_MCAST); + iwl_mvm_enable_ac_txq(mvm, vif->cab_queue, + IWL_MVM_TX_FIFO_MCAST); /* fall through */ default: for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) - iwl_trans_ac_txq_enable(mvm->trans, vif->hw_queue[ac], - iwl_mvm_ac_to_tx_fifo[ac]); + iwl_mvm_enable_ac_txq(mvm, vif->hw_queue[ac], + iwl_mvm_ac_to_tx_fifo[ac]); break; } @@ -452,14 +452,14 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif) switch (vif->type) { case NL80211_IFTYPE_P2P_DEVICE: - iwl_trans_txq_disable(mvm->trans, IWL_MVM_OFFCHANNEL_QUEUE); + iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE); break; case NL80211_IFTYPE_AP: - iwl_trans_txq_disable(mvm->trans, vif->cab_queue); + iwl_mvm_disable_txq(mvm, vif->cab_queue); /* fall through */ default: for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) - iwl_trans_txq_disable(mvm->trans, vif->hw_queue[ac]); + iwl_mvm_disable_txq(mvm, vif->hw_queue[ac]); } } @@ -586,6 +586,7 @@ static void iwl_mvm_mac_ctxt_set_ht_flags(struct iwl_mvm *mvm, static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mac_ctx_cmd *cmd, + const u8 *bssid_override, u32 action) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); @@ -593,6 +594,7 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm, bool ht_enabled = !!(vif->bss_conf.ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION); u8 cck_ack_rates, ofdm_ack_rates; + const u8 *bssid = bssid_override ?: vif->bss_conf.bssid; int i; cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, @@ -625,8 +627,9 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm, cmd->tsf_id = cpu_to_le32(mvmvif->tsf_id); memcpy(cmd->node_addr, vif->addr, ETH_ALEN); - if (vif->bss_conf.bssid) - memcpy(cmd->bssid_addr, vif->bss_conf.bssid, ETH_ALEN); + + if (bssid) + memcpy(cmd->bssid_addr, bssid, ETH_ALEN); else eth_broadcast_addr(cmd->bssid_addr); @@ -695,7 +698,8 @@ static int iwl_mvm_mac_ctxt_send_cmd(struct iwl_mvm *mvm, static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - u32 action, bool force_assoc_off) + u32 action, bool force_assoc_off, + const u8 *bssid_override) { struct iwl_mac_ctx_cmd cmd = {}; struct iwl_mac_data_sta *ctxt_sta; @@ -703,7 +707,7 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, WARN_ON(vif->type != NL80211_IFTYPE_STATION); /* Fill the common data for all mac context types */ - iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action); + iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, bssid_override, action); if (vif->p2p) { struct ieee80211_p2p_noa_attr *noa = @@ -784,7 +788,7 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm, WARN_ON(vif->type != NL80211_IFTYPE_MONITOR); - iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action); + iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROMISC | MAC_FILTER_IN_CONTROL_AND_MGMT | @@ -805,7 +809,7 @@ static int iwl_mvm_mac_ctxt_cmd_ibss(struct iwl_mvm *mvm, WARN_ON(vif->type != NL80211_IFTYPE_ADHOC); - iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action); + iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_BEACON | MAC_FILTER_IN_PROBE_REQUEST); @@ -844,7 +848,7 @@ static int iwl_mvm_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm, WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE); - iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action); + iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); cmd.protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT); @@ -1072,7 +1076,7 @@ static int iwl_mvm_mac_ctxt_cmd_ap(struct iwl_mvm *mvm, WARN_ON(vif->type != NL80211_IFTYPE_AP || vif->p2p); /* Fill the common data for all mac context types */ - iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action); + iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); /* * pass probe requests and beacons from other APs (needed @@ -1098,7 +1102,7 @@ static int iwl_mvm_mac_ctxt_cmd_go(struct iwl_mvm *mvm, WARN_ON(vif->type != NL80211_IFTYPE_AP || !vif->p2p); /* Fill the common data for all mac context types */ - iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action); + iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); /* * pass probe requests and beacons from other APs (needed @@ -1121,12 +1125,14 @@ static int iwl_mvm_mac_ctxt_cmd_go(struct iwl_mvm *mvm, } static int iwl_mvm_mac_ctx_send(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - u32 action, bool force_assoc_off) + u32 action, bool force_assoc_off, + const u8 *bssid_override) { switch (vif->type) { case NL80211_IFTYPE_STATION: return iwl_mvm_mac_ctxt_cmd_sta(mvm, vif, action, - force_assoc_off); + force_assoc_off, + bssid_override); break; case NL80211_IFTYPE_AP: if (!vif->p2p) @@ -1157,7 +1163,7 @@ int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif) return -EIO; ret = iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_ADD, - true); + true, NULL); if (ret) return ret; @@ -1169,7 +1175,7 @@ int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif) } int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - bool force_assoc_off) + bool force_assoc_off, const u8 *bssid_override) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); @@ -1178,7 +1184,7 @@ int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return -EIO; return iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_MODIFY, - force_assoc_off); + force_assoc_off, bssid_override); } int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif) @@ -1226,13 +1232,13 @@ static void iwl_mvm_csa_count_down(struct iwl_mvm *mvm, !iwl_mvm_te_scheduled(&mvmvif->time_event_data) && gp2) { u32 rel_time = (c + 1) * csa_vif->bss_conf.beacon_int - - IWL_MVM_CHANNEL_SWITCH_TIME; + IWL_MVM_CHANNEL_SWITCH_TIME_GO; u32 apply_time = gp2 + rel_time * 1024; - iwl_mvm_schedule_csa_noa(mvm, csa_vif, - IWL_MVM_CHANNEL_SWITCH_TIME - - IWL_MVM_CHANNEL_SWITCH_MARGIN, - apply_time); + iwl_mvm_schedule_csa_period(mvm, csa_vif, + IWL_MVM_CHANNEL_SWITCH_TIME_GO - + IWL_MVM_CHANNEL_SWITCH_MARGIN, + apply_time); } } else if (!iwl_mvm_te_scheduled(&mvmvif->time_event_data)) { /* we don't have CSA NoA scheduled yet, switch now */ diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index cdc272d776e7f298baa6c71dc156ce576021e151..c7a73c68bdabddcb2eb2536256e0a7736be06dc1 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -277,14 +279,6 @@ static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm) } } -static int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm) -{ - /* we create the 802.11 header and SSID element */ - if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID) - return mvm->fw->ucode_capa.max_probe_length - 24 - 2; - return mvm->fw->ucode_capa.max_probe_length - 24 - 34; -} - int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) { struct ieee80211_hw *hw = mvm->hw; @@ -302,8 +296,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) IEEE80211_HW_TIMING_BEACON_ONLY | IEEE80211_HW_CONNECTION_MONITOR | IEEE80211_HW_CHANCTX_STA_CSA | - IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | - IEEE80211_HW_SUPPORTS_STATIC_SMPS; + IEEE80211_HW_SUPPORTS_CLONED_SKBS; hw->queues = mvm->first_agg_queue; hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE; @@ -325,7 +318,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) IWL_UCODE_API(mvm->fw->ucode_ver) >= 9 && !iwlwifi_mod_params.uapsd_disable) { hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD; - hw->uapsd_queues = IWL_UAPSD_AC_INFO; + hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES; hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP; } @@ -378,7 +371,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) iwl_mvm_reset_phy_ctxts(mvm); - hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm); + hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm, false); hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; @@ -407,7 +400,25 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN | NL80211_FEATURE_LOW_PRIORITY_SCAN | - NL80211_FEATURE_P2P_GO_OPPPS; + NL80211_FEATURE_P2P_GO_OPPPS | + NL80211_FEATURE_DYNAMIC_SMPS | + NL80211_FEATURE_STATIC_SMPS; + + if (mvm->fw->ucode_capa.capa[0] & + IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) + hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION; + if (mvm->fw->ucode_capa.capa[0] & + IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT) + hw->wiphy->features |= NL80211_FEATURE_QUIET; + + if (mvm->fw->ucode_capa.capa[0] & + IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT) + hw->wiphy->features |= + NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES; + + if (mvm->fw->ucode_capa.capa[0] & + IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT) + hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES; mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; @@ -668,8 +679,9 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac, } #ifdef CONFIG_IWLWIFI_DEBUGFS -static void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) +void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) { + static char *env[] = { "DRIVER=iwlwifi", "EVENT=error_dump", NULL }; struct iwl_fw_error_dump_file *dump_file; struct iwl_fw_error_dump_data *dump_data; struct iwl_fw_error_dump_info *dump_info; @@ -761,23 +773,20 @@ static void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) file_len += fw_error_dump->trans_ptr->len; dump_file->file_len = cpu_to_le32(file_len); mvm->fw_error_dump = fw_error_dump; + + /* notify the userspace about the error we had */ + kobject_uevent_env(&mvm->hw->wiphy->dev.kobj, KOBJ_CHANGE, env); } #endif static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) { -#ifdef CONFIG_IWLWIFI_DEBUGFS - static char *env[] = { "DRIVER=iwlwifi", "EVENT=error_dump", NULL }; - iwl_mvm_fw_error_dump(mvm); - /* notify the userspace about the error we had */ - kobject_uevent_env(&mvm->hw->wiphy->dev.kobj, KOBJ_CHANGE, env); -#endif - iwl_trans_stop_device(mvm->trans); mvm->scan_status = IWL_MVM_SCAN_NONE; + mvm->ps_disabled = false; /* just in case one was running */ ieee80211_remain_on_channel_expired(mvm->hw); @@ -805,16 +814,18 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) * ucode_down ref until reconfig is complete */ iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN); + /* clear any stale d0i3 state */ + clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status); + mvm->vif_count = 0; mvm->rx_ba_sessions = 0; } -static int iwl_mvm_mac_start(struct ieee80211_hw *hw) +int __iwl_mvm_mac_start(struct iwl_mvm *mvm) { - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; - mutex_lock(&mvm->mutex); + lockdep_assert_held(&mvm->mutex); /* Clean up some internal and mac80211 state on restart */ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) @@ -831,6 +842,16 @@ static int iwl_mvm_mac_start(struct ieee80211_hw *hw) iwl_mvm_d0i3_enable_tx(mvm, NULL); } + return ret; +} + +static int iwl_mvm_mac_start(struct ieee80211_hw *hw) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + int ret; + + mutex_lock(&mvm->mutex); + ret = __iwl_mvm_mac_start(mvm); mutex_unlock(&mvm->mutex); return ret; @@ -856,14 +877,9 @@ static void iwl_mvm_mac_restart_complete(struct ieee80211_hw *hw) mutex_unlock(&mvm->mutex); } -static void iwl_mvm_mac_stop(struct ieee80211_hw *hw) +void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) { - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - - flush_work(&mvm->d0i3_exit_work); - flush_work(&mvm->async_handlers_wk); - - mutex_lock(&mvm->mutex); + lockdep_assert_held(&mvm->mutex); /* disallow low power states when the FW is down */ iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); @@ -882,8 +898,21 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw) /* async_handlers_list is empty and will stay empty: HW is stopped */ /* the fw is stopped, the aux sta is dead: clean up driver state */ - iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); + iwl_mvm_del_aux_sta(mvm); + + mvm->ucode_loaded = false; +} + +static void iwl_mvm_mac_stop(struct ieee80211_hw *hw) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + flush_work(&mvm->d0i3_exit_work); + flush_work(&mvm->async_handlers_wk); + flush_work(&mvm->fw_error_dump_wk); + mutex_lock(&mvm->mutex); + __iwl_mvm_mac_stop(mvm); mutex_unlock(&mvm->mutex); /* @@ -967,10 +996,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, */ if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) { - u32 qmask = iwl_mvm_mac_get_queues_mask(mvm, vif); - ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, - qmask, - ieee80211_vif_type_p2p(vif)); + ret = iwl_mvm_alloc_bcast_sta(mvm, vif); if (ret) { IWL_ERR(mvm, "Failed to allocate bcast sta\n"); goto out_release; @@ -1018,7 +1044,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, if (ret) goto out_unref_phy; - ret = iwl_mvm_add_bcast_sta(mvm, vif, &mvmvif->bcast_sta); + ret = iwl_mvm_add_bcast_sta(mvm, vif); if (ret) goto out_unbind; @@ -1059,14 +1085,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { - u32 tfd_msk = 0, ac; - - for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) - if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE) - tfd_msk |= BIT(vif->hw_queue[ac]); - - if (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE) - tfd_msk |= BIT(vif->cab_queue); + u32 tfd_msk = iwl_mvm_mac_get_queues_mask(mvm, vif); if (tfd_msk) { mutex_lock(&mvm->mutex); @@ -1122,13 +1141,13 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, mvm->noa_duration = 0; } #endif - iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta); + iwl_mvm_dealloc_bcast_sta(mvm, vif); goto out_release; } if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { mvm->p2p_device_vif = NULL; - iwl_mvm_rm_bcast_sta(mvm, &mvmvif->bcast_sta); + iwl_mvm_rm_bcast_sta(mvm, vif); iwl_mvm_binding_remove_vif(mvm, vif); iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); mvmvif->phy_ctxt = NULL; @@ -1202,14 +1221,15 @@ static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw, struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mcast_filter_cmd *cmd; struct netdev_hw_addr *addr; - int addr_count = netdev_hw_addr_list_count(mc_list); - bool pass_all = false; + int addr_count; + bool pass_all; int len; - if (addr_count > MAX_MCAST_FILTERING_ADDRESSES) { - pass_all = true; + addr_count = netdev_hw_addr_list_count(mc_list); + pass_all = addr_count > MAX_MCAST_FILTERING_ADDRESSES || + IWL_MVM_FW_MCAST_FILTER_PASS_ALL; + if (pass_all) addr_count = 0; - } len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4); cmd = kzalloc(len, GFP_ATOMIC); @@ -1409,28 +1429,6 @@ static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm, } #endif -static void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm) -{ - struct ieee80211_sta *sta; - struct iwl_mvm_sta *mvmsta; - int i; - - lockdep_assert_held(&mvm->mutex); - - for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { - sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], - lockdep_is_held(&mvm->mutex)); - if (!sta || IS_ERR(sta) || !sta->tdls) - continue; - - mvmsta = iwl_mvm_sta_from_mac80211(sta); - ieee80211_tdls_oper_request(mvmsta->vif, sta->addr, - NL80211_TDLS_TEARDOWN, - WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED, - GFP_KERNEL); - } -} - static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, @@ -1447,10 +1445,23 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); - ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false); + /* + * If we're not associated yet, take the (new) BSSID before associating + * so the firmware knows. If we're already associated, then use the old + * BSSID here, and we'll send a cleared one later in the CHANGED_ASSOC + * branch for disassociation below. + */ + if (changes & BSS_CHANGED_BSSID && !mvmvif->associated) + memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN); + + ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, mvmvif->bssid); if (ret) IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); + /* after sending it once, adopt mac80211 data */ + memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN); + mvmvif->associated = bss_conf->assoc; + if (changes & BSS_CHANGED_ASSOC) { if (bss_conf->assoc) { /* add quota for this interface */ @@ -1478,13 +1489,17 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, */ u32 dur = (11 * vif->bss_conf.beacon_int) / 10; iwl_mvm_protect_session(mvm, vif, dur, dur, - 5 * dur); + 5 * dur, false); } iwl_mvm_sf_update(mvm, vif, false); iwl_mvm_power_vif_assoc(mvm, vif); - if (vif->p2p) + if (vif->p2p) { iwl_mvm_ref(mvm, IWL_MVM_REF_P2P_CLIENT); + iwl_mvm_update_smps(mvm, vif, + IWL_MVM_SMPS_REQ_PROT, + IEEE80211_SMPS_DYNAMIC); + } } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) { /* * If update fails - SF might be running in associated @@ -1508,6 +1523,13 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, if (vif->p2p) iwl_mvm_unref(mvm, IWL_MVM_REF_P2P_CLIENT); + + /* this will take the cleared BSSID from bss_conf */ + ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); + if (ret) + IWL_ERR(mvm, + "failed to update MAC %pM (clear after unassoc)\n", + vif->addr); } iwl_mvm_recalc_multicast(mvm); @@ -1604,7 +1626,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, /* Send the bcast station. At this stage the TBTT and DTIM time events * are added and applied to the scheduler */ - ret = iwl_mvm_send_bcast_sta(mvm, vif, &mvmvif->bcast_sta); + ret = iwl_mvm_send_add_bcast_sta(mvm, vif); if (ret) goto out_unbind; @@ -1620,7 +1642,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ if (vif->p2p && mvm->p2p_device_vif) - iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false); + iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL); iwl_mvm_ref(mvm, IWL_MVM_REF_AP_IBSS); @@ -1636,7 +1658,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, out_quota_failed: iwl_mvm_power_update_mac(mvm); mvmvif->ap_ibss_active = false; - iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta); + iwl_mvm_send_rm_bcast_sta(mvm, vif); out_unbind: iwl_mvm_binding_remove_vif(mvm, vif); out_remove: @@ -1678,10 +1700,10 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw, /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ if (vif->p2p && mvm->p2p_device_vif) - iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false); + iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL); iwl_mvm_update_quotas(mvm, NULL); - iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta); + iwl_mvm_send_rm_bcast_sta(mvm, vif); iwl_mvm_binding_remove_vif(mvm, vif); iwl_mvm_power_update_mac(mvm); @@ -1704,8 +1726,8 @@ iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm, return; if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT | - BSS_CHANGED_BANDWIDTH) && - iwl_mvm_mac_ctxt_changed(mvm, vif, false)) + BSS_CHANGED_BANDWIDTH | BSS_CHANGED_QOS) && + iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL)) IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); /* Need to send a new beacon template to the FW */ @@ -1935,48 +1957,6 @@ static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw, mutex_unlock(&mvm->mutex); } -int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif) -{ - struct ieee80211_sta *sta; - struct iwl_mvm_sta *mvmsta; - int count = 0; - int i; - - lockdep_assert_held(&mvm->mutex); - - for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { - sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], - lockdep_is_held(&mvm->mutex)); - if (!sta || IS_ERR(sta) || !sta->tdls) - continue; - - if (vif) { - mvmsta = iwl_mvm_sta_from_mac80211(sta); - if (mvmsta->vif != vif) - continue; - } - - count++; - } - - return count; -} - -static void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - bool sta_added) -{ - int tdls_sta_cnt = iwl_mvm_tdls_sta_count(mvm, vif); - - /* - * Disable ps when the first TDLS sta is added and re-enable it - * when the last TDLS sta is removed - */ - if ((tdls_sta_cnt == 1 && sta_added) || - (tdls_sta_cnt == 0 && !sta_added)) - iwl_mvm_power_update_mac(mvm); -} - static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, @@ -2116,7 +2096,7 @@ static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw, int ret; mutex_lock(&mvm->mutex); - ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false); + ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); mutex_unlock(&mvm->mutex); return ret; } @@ -2144,33 +2124,12 @@ static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw, mutex_lock(&mvm->mutex); /* Try really hard to protect the session and hear a beacon */ - iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500); + iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500, false); mutex_unlock(&mvm->mutex); iwl_mvm_unref(mvm, IWL_MVM_REF_PREPARE_TX); } -static void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int; - - /* - * iwl_mvm_protect_session() reads directly from the device - * (the system time), so make sure it is available. - */ - if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_TDLS)) - return; - - mutex_lock(&mvm->mutex); - /* Protect the session to hear the TDLS setup response on the channel */ - iwl_mvm_protect_session(mvm, vif, duration, duration, 100); - mutex_unlock(&mvm->mutex); - - iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_TDLS); -} - static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_sched_scan_request *req, @@ -2185,7 +2144,13 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw, mutex_lock(&mvm->mutex); - if (!iwl_mvm_is_idle(mvm)) { + /* Newest FW fixes sched scan while connected on another interface */ + if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) { + if (!vif->bss_conf.idle) { + ret = -EBUSY; + goto out; + } + } else if (!iwl_mvm_is_idle(mvm)) { ret = -EBUSY; goto out; } @@ -2703,7 +2668,10 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, ret = 0; goto out; case NL80211_IFTYPE_STATION: + break; case NL80211_IFTYPE_MONITOR: + /* always disable PS when a monitor interface is active */ + mvmvif->ps_disabled = true; break; default: ret = -EINVAL; @@ -2735,7 +2703,20 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, if ((vif->type == NL80211_IFTYPE_AP) || (switching_chanctx && (vif->type == NL80211_IFTYPE_STATION))) { iwl_mvm_update_quotas(mvm, NULL); - iwl_mvm_mac_ctxt_changed(mvm, vif, false); + iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); + } + + if (vif->csa_active && vif->type == NL80211_IFTYPE_STATION) { + struct iwl_mvm_sta *mvmsta; + + mvmsta = iwl_mvm_sta_from_staid_protected(mvm, + mvmvif->ap_sta_id); + + if (WARN_ON(!mvmsta)) + goto out; + + /* TODO: only re-enable after the first beacon */ + iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false); } goto out; @@ -2769,6 +2750,7 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm, { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct ieee80211_vif *disabled_vif = NULL; + struct iwl_mvm_sta *mvmsta; lockdep_assert_held(&mvm->mutex); @@ -2779,6 +2761,7 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm, goto out; case NL80211_IFTYPE_MONITOR: mvmvif->monitor_active = false; + mvmvif->ps_disabled = false; break; case NL80211_IFTYPE_AP: /* This part is triggered only during CSA */ @@ -2799,7 +2782,13 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm, disabled_vif = vif; - iwl_mvm_mac_ctxt_changed(mvm, vif, true); + mvmsta = iwl_mvm_sta_from_staid_protected(mvm, + mvmvif->ap_sta_id); + + if (!WARN_ON(!mvmsta)) + iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true); + + iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL); break; default: break; diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index 2e73d3bd7757605e2483fa00f147086abe32b09c..b153ced7015bfef8984a9ecb78f844d4533ed7d9 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -85,11 +87,11 @@ /* A TimeUnit is 1024 microsecond */ #define MSEC_TO_TU(_msec) (_msec*1000/1024) -/* - * The CSA NoA is scheduled IWL_MVM_CHANNEL_SWITCH_TIME TUs before "beacon 0" - * TBTT. This value should be big enough to ensure that we switch in time. +/* This value represents the number of TUs before CSA "beacon 0" TBTT + * when the CSA time-event needs to be scheduled to start. It must be + * big enough to ensure that we switch in time. */ -#define IWL_MVM_CHANNEL_SWITCH_TIME 40 +#define IWL_MVM_CHANNEL_SWITCH_TIME_GO 40 /* * This value (in TUs) is used to fine tune the CSA NoA end time which should @@ -103,14 +105,6 @@ */ #define IWL_MVM_CS_UNBLOCK_TX_TIMEOUT 3 -enum iwl_mvm_tx_fifo { - IWL_MVM_TX_FIFO_BK = 0, - IWL_MVM_TX_FIFO_BE, - IWL_MVM_TX_FIFO_VI, - IWL_MVM_TX_FIFO_VO, - IWL_MVM_TX_FIFO_MCAST = 5, -}; - extern const struct ieee80211_ops iwl_mvm_hw_ops; /** @@ -186,10 +180,6 @@ enum iwl_power_scheme { }; #define IWL_CONN_MAX_LISTEN_INTERVAL 10 -#define IWL_UAPSD_AC_INFO (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |\ - IEEE80211_WMM_IE_STA_QOSINFO_AC_VI |\ - IEEE80211_WMM_IE_STA_QOSINFO_AC_BK |\ - IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) #define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_2 #ifdef CONFIG_IWLWIFI_DEBUGFS @@ -203,6 +193,7 @@ enum iwl_dbgfs_pm_mask { MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7), MVM_DEBUGFS_PM_SNOOZE_ENABLE = BIT(8), MVM_DEBUGFS_PM_UAPSD_MISBEHAVING = BIT(9), + MVM_DEBUGFS_PM_USE_PS_POLL = BIT(10), }; struct iwl_dbgfs_pm { @@ -215,6 +206,7 @@ struct iwl_dbgfs_pm { u32 lprx_rssi_threshold; bool snooze_ena; bool uapsd_misbehaving; + bool use_ps_poll; int mask; }; @@ -253,6 +245,7 @@ struct iwl_dbgfs_bf { enum iwl_mvm_smps_type_request { IWL_MVM_SMPS_REQ_BT_COEX, IWL_MVM_SMPS_REQ_TT, + IWL_MVM_SMPS_REQ_PROT, NUM_IWL_MVM_SMPS_REQ, }; @@ -277,6 +270,8 @@ enum iwl_mvm_ref_type { IWL_MVM_REF_TM_CMD, IWL_MVM_REF_EXIT_WORK, + /* update debugfs.c when changing this */ + IWL_MVM_REF_COUNT, }; @@ -315,6 +310,9 @@ struct iwl_mvm_vif_bf_data { * @id: between 0 and 3 * @color: to solve races upon MAC addition and removal * @ap_sta_id: the sta_id of the AP - valid only if VIF type is STA + * @bssid: BSSID for this (client) interface + * @associated: indicates that we're currently associated, used only for + * managing the firmware state in iwl_mvm_bss_info_changed_station() * @uploaded: indicates the MAC context has been added to the device * @ap_ibss_active: indicates that AP/IBSS is configured and that the interface * should get quota etc. @@ -323,6 +321,7 @@ struct iwl_mvm_vif_bf_data { * interface should get quota etc. * @low_latency: indicates that this interface is in low-latency mode * (VMACLowLatencyMode) + * @ps_disabled: indicates that this interface requires PS to be disabled * @queue_params: QoS params for this MAC * @bcast_sta: station used for broadcast packets. Used by the following * vifs: P2P_DEVICE, GO and AP. @@ -335,11 +334,15 @@ struct iwl_mvm_vif { u16 color; u8 ap_sta_id; + u8 bssid[ETH_ALEN]; + bool associated; + bool uploaded; bool ap_ibss_active; bool pm_enabled; bool monitor_active; bool low_latency; + bool ps_disabled; struct iwl_mvm_vif_bf_data bf_data; u32 ap_beacon_time; @@ -512,6 +515,10 @@ enum { D0I3_PENDING_WAKEUP, }; +#define IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE 0xff +#define IWL_MVM_DEBUG_SET_TEMPERATURE_MIN -100 +#define IWL_MVM_DEBUG_SET_TEMPERATURE_MAX 200 + struct iwl_mvm { /* for logger access */ struct device *dev; @@ -553,9 +560,8 @@ struct iwl_mvm { struct mvm_statistics_rx rx_stats; - unsigned long transport_queue_stop; u8 queue_to_mac80211[IWL_MAX_HW_QUEUES]; - atomic_t queue_stop_count[IWL_MAX_HW_QUEUES]; + atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES]; const char *nvm_file_name; struct iwl_nvm_data *nvm_data; @@ -641,6 +647,7 @@ struct iwl_mvm { /* -1 for always, 0 for never, >0 for that many times */ s8 restart_fw; + struct work_struct fw_error_dump_wk; struct iwl_mvm_dump_ptrs *fw_error_dump; #ifdef CONFIG_IWLWIFI_LEDS @@ -694,6 +701,14 @@ struct iwl_mvm { /* Thermal Throttling and CTkill */ struct iwl_mvm_tt_mgmt thermal_throttle; s32 temperature; /* Celsius */ + /* + * Debug option to set the NIC temperature. This option makes the + * driver think this is the actual NIC temperature, and ignore the + * real temperature that is received from the fw + */ + bool temperature_test; /* Debug test temperature is enabled */ + + struct iwl_time_quota_cmd last_quota_cmd; #ifdef CONFIG_NL80211_TESTMODE u32 noa_duration; @@ -706,7 +721,7 @@ struct iwl_mvm { u8 last_agg_queue; /* Indicate if device power save is allowed */ - bool ps_disabled; + u8 ps_disabled; /* u8 instead of bool to ease debugfs_create_* usage */ struct ieee80211_vif __rcu *csa_vif; struct ieee80211_vif __rcu *csa_tx_blocked_vif; @@ -714,6 +729,8 @@ struct iwl_mvm { /* system time of last beacon (for AP/GO interface) */ u32 ap_last_beacon_gp2; + + u8 low_latency_agg_frame_limit; }; /* Extract MVM priv from op_mode and _hw */ @@ -762,6 +779,11 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm) (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_D0I3_SUPPORT); } +static inline bool iwl_mvm_is_dqa_supported(struct iwl_mvm *mvm) +{ + return mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_DQA_SUPPORT; +} + extern const u8 iwl_mvm_ac_to_tx_fifo[]; struct iwl_rate_info { @@ -772,6 +794,9 @@ struct iwl_rate_info { u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */ }; +void __iwl_mvm_mac_stop(struct iwl_mvm *mvm); +int __iwl_mvm_mac_start(struct iwl_mvm *mvm); + /****************** * MVM Methods ******************/ @@ -878,7 +903,7 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - bool force_assoc_off); + bool force_assoc_off, const u8 *bssid_override); int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif); u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm, struct ieee80211_vif *vif); @@ -910,6 +935,7 @@ int iwl_mvm_rx_scan_response(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, struct iwl_device_cmd *cmd); int iwl_mvm_cancel_scan(struct iwl_mvm *mvm); +int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm, bool is_sched_scan); /* Scheduled scan */ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm, @@ -964,10 +990,14 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, struct iwl_mvm_frame_stats *stats, u32 rate, bool agg); int rs_pretty_print_rate(char *buf, const u32 rate); +void rs_update_last_rssi(struct iwl_mvm *mvm, + struct iwl_lq_sta *lq_sta, + struct ieee80211_rx_status *rx_status); /* power management */ int iwl_mvm_power_update_device(struct iwl_mvm *mvm); int iwl_mvm_power_update_mac(struct iwl_mvm *mvm); +int iwl_mvm_power_update_ps(struct iwl_mvm *mvm); int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif, char *buf, int bufsz); @@ -1120,6 +1150,39 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif) return mvmvif->low_latency; } +/* hw scheduler queue config */ +void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, u16 ssn, + const struct iwl_trans_txq_scd_cfg *cfg); +void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue); + +static inline void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, + u8 fifo) +{ + struct iwl_trans_txq_scd_cfg cfg = { + .fifo = fifo, + .tid = IWL_MAX_TID_COUNT, + .aggregate = false, + .frame_limit = IWL_FRAME_LIMIT, + }; + + iwl_mvm_enable_txq(mvm, queue, 0, &cfg); +} + +static inline void iwl_mvm_enable_agg_txq(struct iwl_mvm *mvm, int queue, + int fifo, int sta_id, int tid, + int frame_limit, u16 ssn) +{ + struct iwl_trans_txq_scd_cfg cfg = { + .fifo = fifo, + .sta_id = sta_id, + .tid = tid, + .frame_limit = frame_limit, + .aggregate = true, + }; + + iwl_mvm_enable_txq(mvm, queue, ssn, &cfg); +} + /* Assoc status */ bool iwl_mvm_is_idle(struct iwl_mvm *mvm); @@ -1129,6 +1192,7 @@ void iwl_mvm_tt_handler(struct iwl_mvm *mvm); void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff); void iwl_mvm_tt_exit(struct iwl_mvm *mvm); void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state); +int iwl_mvm_get_temp(struct iwl_mvm *mvm); /* smart fifo */ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *vif, @@ -1136,7 +1200,17 @@ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *vif, /* TDLS */ int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm); +void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + bool sta_added); +void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error); +#ifdef CONFIG_IWLWIFI_DEBUGFS +void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm); +#else +static inline void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) {} +#endif #endif /* __IWL_MVM_H__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c index cfdd314fdd5decd15b57bf02b3ad63eb1ab16cd0..af074563e77076fafb88ae6c3d1c21a6f4da53e3 100644 --- a/drivers/net/wireless/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -62,6 +64,7 @@ *****************************************************************************/ #include #include "iwl-trans.h" +#include "iwl-csr.h" #include "mvm.h" #include "iwl-eeprom-parse.h" #include "iwl-eeprom-read.h" @@ -347,7 +350,7 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm) /* Maximal size depends on HW family and step */ if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) max_section_size = IWL_MAX_NVM_SECTION_SIZE; - else if ((mvm->trans->hw_rev & 0xc) == 0) /* Family 8000 A-step */ + else if (CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_A_STEP) max_section_size = IWL_MAX_NVM_8000A_SECTION_SIZE; else /* Family 8000 B-step */ max_section_size = IWL_MAX_NVM_8000B_SECTION_SIZE; diff --git a/drivers/net/wireless/iwlwifi/mvm/offloading.c b/drivers/net/wireless/iwlwifi/mvm/offloading.c index 9bfb95e89cfbbdab1b446c55d5423a795eb87e03..adcbf4c8edd86c37400029db065dd1720b8cdfed 100644 --- a/drivers/net/wireless/iwlwifi/mvm/offloading.c +++ b/drivers/net/wireless/iwlwifi/mvm/offloading.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c index 610dbcb0dc279d97dda32ded4df2b387a4577333..15aa298ee79c749b5e7e7e7401d92375a270c29e 100644 --- a/drivers/net/wireless/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/iwlwifi/mvm/ops.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -330,6 +332,8 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = { CMD(BCAST_FILTER_CMD), CMD(REPLY_SF_CFG_CMD), CMD(REPLY_BEACON_FILTERING_CMD), + CMD(CMD_DTS_MEASUREMENT_TRIGGER), + CMD(DTS_MEASUREMENT_NOTIFICATION), CMD(REPLY_THERMAL_MNG_BACKOFF), CMD(MAC_PM_POWER_TABLE), CMD(BT_COEX_CI), @@ -338,6 +342,7 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = { CMD(BT_COEX_UPDATE_REDUCED_TXP), CMD(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION), CMD(ANTENNA_COUPLING_NOTIFICATION), + CMD(SCD_QUEUE_CFG), }; #undef CMD @@ -362,6 +367,8 @@ static u32 calc_min_backoff(struct iwl_trans *trans, const struct iwl_cfg *cfg) return 0; } +static void iwl_mvm_fw_error_dump_wk(struct work_struct *work); + static struct iwl_op_mode * iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, const struct iwl_fw *fw, struct dentry *dbgfs_dir) @@ -415,6 +422,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mvm->first_agg_queue = 12; } mvm->sf_state = SF_UNINIT; + mvm->low_latency_agg_frame_limit = 6; mutex_init(&mvm->mutex); mutex_init(&mvm->d0i3_suspend_mutex); @@ -428,6 +436,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk); INIT_WORK(&mvm->sta_drained_wk, iwl_mvm_sta_drained_wk); INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work); + INIT_WORK(&mvm->fw_error_dump_wk, iwl_mvm_fw_error_dump_wk); spin_lock_init(&mvm->d0i3_tx_lock); spin_lock_init(&mvm->refs_lock); @@ -456,7 +465,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, trans_cfg.command_names = iwl_mvm_cmd_strings; trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE; - trans_cfg.cmd_fifo = IWL_MVM_CMD_FIFO; + trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD; + trans_cfg.scd_set_active = true; snprintf(mvm->hw->wiphy->fw_version, sizeof(mvm->hw->wiphy->fw_version), @@ -494,7 +504,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, goto out_free; /* - * Even if nvm exists in the nvm_file driver should read agin the nvm + * Even if nvm exists in the nvm_file driver should read again the nvm * from the nic because there might be entries that exist in the OTP * and not in the file. * for nics with no_power_up_nic_in_init: rely completley on nvm_file @@ -700,14 +710,13 @@ static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int queue) if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE)) return; - if (atomic_inc_return(&mvm->queue_stop_count[mq]) > 1) { + if (atomic_inc_return(&mvm->mac80211_queue_stop_count[mq]) > 1) { IWL_DEBUG_TX_QUEUES(mvm, "queue %d (mac80211 %d) already stopped\n", queue, mq); return; } - set_bit(mq, &mvm->transport_queue_stop); ieee80211_stop_queue(mvm->hw, mq); } @@ -719,15 +728,13 @@ static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int queue) if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE)) return; - if (atomic_dec_return(&mvm->queue_stop_count[mq]) > 0) { + if (atomic_dec_return(&mvm->mac80211_queue_stop_count[mq]) > 0) { IWL_DEBUG_TX_QUEUES(mvm, - "queue %d (mac80211 %d) already awake\n", + "queue %d (mac80211 %d) still stopped\n", queue, mq); return; } - clear_bit(mq, &mvm->transport_queue_stop); - ieee80211_wake_queue(mvm->hw, mq); } @@ -781,6 +788,16 @@ static void iwl_mvm_reprobe_wk(struct work_struct *wk) module_put(THIS_MODULE); } +static void iwl_mvm_fw_error_dump_wk(struct work_struct *work) +{ + struct iwl_mvm *mvm = + container_of(work, struct iwl_mvm, fw_error_dump_wk); + + mutex_lock(&mvm->mutex); + iwl_mvm_fw_error_dump(mvm); + mutex_unlock(&mvm->mutex); +} + void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) { iwl_abort_notification_waits(&mvm->notif_wait); @@ -846,6 +863,8 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) if (fw_error && mvm->restart_fw > 0) mvm->restart_fw--; ieee80211_restart_hw(mvm->hw); + } else if (fw_error) { + schedule_work(&mvm->fw_error_dump_wk); } } diff --git a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c index 6cc243f7cf602b8f926baa11d4a59c30db965e0c..12283b55ee84ff5aa49b629d3bd4b702b9826720 100644 --- a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c +++ b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c index d9769a23c68b44e9b16e3ce6bddd7aa191c45e27..5b85b0cc7a2aa84675ed685a216cd6fe6f1d7f64 100644 --- a/drivers/net/wireless/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/iwlwifi/mvm/power.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -198,8 +200,15 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm, } } - if (!(cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) + if (!(cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) { +#ifdef CONFIG_IWLWIFI_DEBUGFS + /* set advanced pm flag with no uapsd ACs to enable ps-poll */ + if (mvmvif->dbgfs_pm.use_ps_poll) + cmd->flags |= + cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK); +#endif return; + } cmd->flags |= cpu_to_le16(POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK); @@ -277,12 +286,28 @@ static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm, return true; } +static bool iwl_mvm_power_is_radar(struct ieee80211_vif *vif) +{ + struct ieee80211_chanctx_conf *chanctx_conf; + struct ieee80211_channel *chan; + bool radar_detect = false; + + rcu_read_lock(); + chanctx_conf = rcu_dereference(vif->chanctx_conf); + WARN_ON(!chanctx_conf); + if (chanctx_conf) { + chan = chanctx_conf->def.chan; + radar_detect = chan->flags & IEEE80211_CHAN_RADAR; + } + rcu_read_unlock(); + + return radar_detect; +} + static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mac_power_cmd *cmd) { - struct ieee80211_chanctx_conf *chanctx_conf; - struct ieee80211_channel *chan; int dtimper, dtimper_msec; int keep_alive; bool radar_detect = false; @@ -311,7 +336,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK); if (!vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif) || - !mvmvif->pm_enabled) + !mvmvif->pm_enabled || iwl_mvm_tdls_sta_count(mvm, vif)) return; cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK); @@ -324,14 +349,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, } /* Check if radar detection is required on current channel */ - rcu_read_lock(); - chanctx_conf = rcu_dereference(vif->chanctx_conf); - WARN_ON(!chanctx_conf); - if (chanctx_conf) { - chan = chanctx_conf->def.chan; - radar_detect = chan->flags & IEEE80211_CHAN_RADAR; - } - rcu_read_unlock(); + radar_detect = iwl_mvm_power_is_radar(vif); /* Check skip over DTIM conditions */ if (!radar_detect && (dtimper <= 10) && @@ -492,17 +510,33 @@ struct iwl_power_vifs { bool bss_active; bool ap_active; bool monitor_active; - bool bss_tdls; - bool p2p_tdls; }; -static void iwl_mvm_power_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) +static void iwl_mvm_power_disable_pm_iterator(void *_data, u8* mac, + struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_power_vifs *power_iterator = _data; mvmvif->pm_enabled = false; +} + +static void iwl_mvm_power_ps_disabled_iterator(void *_data, u8* mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + bool *disable_ps = _data; + + if (mvmvif->phy_ctxt) + if (mvmvif->phy_ctxt->id < MAX_PHYS) + *disable_ps |= mvmvif->ps_disabled; +} + +static void iwl_mvm_power_get_vifs_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_power_vifs *power_iterator = _data; + switch (ieee80211_vif_type_p2p(vif)) { case NL80211_IFTYPE_P2P_DEVICE: break; @@ -530,8 +564,6 @@ static void iwl_mvm_power_iterator(void *_data, u8 *mac, /* only a single MAC of the same type */ WARN_ON(power_iterator->p2p_vif); power_iterator->p2p_vif = vif; - power_iterator->p2p_tdls = - !!iwl_mvm_tdls_sta_count(power_iterator->mvm, vif); if (mvmvif->phy_ctxt) if (mvmvif->phy_ctxt->id < MAX_PHYS) power_iterator->p2p_active = true; @@ -541,8 +573,6 @@ static void iwl_mvm_power_iterator(void *_data, u8 *mac, /* only a single MAC of the same type */ WARN_ON(power_iterator->bss_vif); power_iterator->bss_vif = vif; - power_iterator->bss_tdls = - !!iwl_mvm_tdls_sta_count(power_iterator->mvm, vif); if (mvmvif->phy_ctxt) if (mvmvif->phy_ctxt->id < MAX_PHYS) power_iterator->bss_active = true; @@ -558,9 +588,8 @@ static void iwl_mvm_power_iterator(void *_data, u8 *mac, } } -static void -iwl_mvm_power_set_pm(struct iwl_mvm *mvm, - struct iwl_power_vifs *vifs) +static void iwl_mvm_power_set_pm(struct iwl_mvm *mvm, + struct iwl_power_vifs *vifs) { struct iwl_mvm_vif *bss_mvmvif = NULL; struct iwl_mvm_vif *p2p_mvmvif = NULL; @@ -570,10 +599,11 @@ iwl_mvm_power_set_pm(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); - /* get vifs info + set pm_enable to false */ + /* set pm_enable to false */ ieee80211_iterate_active_interfaces_atomic(mvm->hw, - IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_power_iterator, vifs); + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_power_disable_pm_iterator, + NULL); if (vifs->bss_vif) bss_mvmvif = iwl_mvm_vif_from_mac80211(vifs->bss_vif); @@ -585,15 +615,13 @@ iwl_mvm_power_set_pm(struct iwl_mvm *mvm, ap_mvmvif = iwl_mvm_vif_from_mac80211(vifs->ap_vif); /* enable PM on bss if bss stand alone */ - if (vifs->bss_active && !vifs->p2p_active && !vifs->ap_active && - !vifs->bss_tdls) { + if (vifs->bss_active && !vifs->p2p_active && !vifs->ap_active) { bss_mvmvif->pm_enabled = true; return; } /* enable PM on p2p if p2p stand alone */ - if (vifs->p2p_active && !vifs->bss_active && !vifs->ap_active && - !vifs->p2p_tdls) { + if (vifs->p2p_active && !vifs->bss_active && !vifs->ap_active) { if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM) p2p_mvmvif->pm_enabled = true; return; @@ -816,32 +844,92 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, return ret; } -int iwl_mvm_power_update_mac(struct iwl_mvm *mvm) +static int iwl_mvm_power_set_ps(struct iwl_mvm *mvm) +{ + bool disable_ps; + int ret; + + /* disable PS if CAM */ + disable_ps = (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM); + /* ...or if any of the vifs require PS to be off */ + ieee80211_iterate_active_interfaces_atomic(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_power_ps_disabled_iterator, + &disable_ps); + + /* update device power state if it has changed */ + if (mvm->ps_disabled != disable_ps) { + bool old_ps_disabled = mvm->ps_disabled; + + mvm->ps_disabled = disable_ps; + ret = iwl_mvm_power_update_device(mvm); + if (ret) { + mvm->ps_disabled = old_ps_disabled; + return ret; + } + } + + return 0; +} + +static int iwl_mvm_power_set_ba(struct iwl_mvm *mvm, + struct iwl_power_vifs *vifs) { struct iwl_mvm_vif *mvmvif; + bool ba_enable; + + if (!vifs->bf_vif) + return 0; + + mvmvif = iwl_mvm_vif_from_mac80211(vifs->bf_vif); + + ba_enable = !(!mvmvif->pm_enabled || mvm->ps_disabled || + !vifs->bf_vif->bss_conf.ps || + iwl_mvm_vif_low_latency(mvmvif)); + + return iwl_mvm_update_beacon_abort(mvm, vifs->bf_vif, ba_enable); +} + +int iwl_mvm_power_update_ps(struct iwl_mvm *mvm) +{ + struct iwl_power_vifs vifs = { + .mvm = mvm, + }; + int ret; + + lockdep_assert_held(&mvm->mutex); + + /* get vifs info */ + ieee80211_iterate_active_interfaces_atomic(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_power_get_vifs_iterator, &vifs); + + ret = iwl_mvm_power_set_ps(mvm); + if (ret) + return ret; + + return iwl_mvm_power_set_ba(mvm, &vifs); +} + +int iwl_mvm_power_update_mac(struct iwl_mvm *mvm) +{ struct iwl_power_vifs vifs = { .mvm = mvm, }; - bool ba_enable; int ret; lockdep_assert_held(&mvm->mutex); + /* get vifs info */ + ieee80211_iterate_active_interfaces_atomic(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_power_get_vifs_iterator, &vifs); + iwl_mvm_power_set_pm(mvm, &vifs); - /* disable PS if CAM */ - if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM) { - mvm->ps_disabled = true; - } else { - /* don't update device power state unless we add / remove monitor */ - if (vifs.monitor_vif) { - if (vifs.monitor_active) - mvm->ps_disabled = true; - ret = iwl_mvm_power_update_device(mvm); - if (ret) - return ret; - } - } + ret = iwl_mvm_power_set_ps(mvm); + if (ret) + return ret; if (vifs.bss_vif) { ret = iwl_mvm_power_send_cmd(mvm, vifs.bss_vif); @@ -855,16 +943,7 @@ int iwl_mvm_power_update_mac(struct iwl_mvm *mvm) return ret; } - if (!vifs.bf_vif) - return 0; - - mvmvif = iwl_mvm_vif_from_mac80211(vifs.bf_vif); - - ba_enable = !(!mvmvif->pm_enabled || mvm->ps_disabled || - !vifs.bf_vif->bss_conf.ps || - iwl_mvm_vif_low_latency(mvmvif)); - - return iwl_mvm_update_beacon_abort(mvm, vifs.bf_vif, ba_enable); + return iwl_mvm_power_set_ba(mvm, &vifs); } int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm, @@ -883,17 +962,22 @@ int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm, iwl_mvm_power_build_cmd(mvm, vif, &cmd); if (enable) { - /* configure skip over dtim up to 300 msec */ + /* configure skip over dtim up to 306TU - 314 msec */ int dtimper = vif->bss_conf.dtim_period ?: 1; - int dtimper_msec = dtimper * vif->bss_conf.beacon_int; + int dtimper_tu = dtimper * vif->bss_conf.beacon_int; + bool radar_detect = iwl_mvm_power_is_radar(vif); - if (WARN_ON(!dtimper_msec)) + if (WARN_ON(!dtimper_tu)) return 0; - cmd.skip_dtim_periods = 300 / dtimper_msec; - if (cmd.skip_dtim_periods) - cmd.flags |= - cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK); + /* Check skip over DTIM conditions */ + /* TODO: check that multicast wake lock is off */ + if (!radar_detect && (dtimper < 10)) { + cmd.skip_dtim_periods = 306 / dtimper_tu; + if (cmd.skip_dtim_periods) + cmd.flags |= cpu_to_le16( + POWER_FLAGS_SKIP_OVER_DTIM_MSK); + } } iwl_mvm_power_log(mvm, &cmd); #ifdef CONFIG_IWLWIFI_DEBUGFS diff --git a/drivers/net/wireless/iwlwifi/mvm/quota.c b/drivers/net/wireless/iwlwifi/mvm/quota.c index 4e20b3ce2b6a320e7fd163fe34a02674788645fc..dbb2594390e96195c194f9c9ebe8d0967b3924b7 100644 --- a/drivers/net/wireless/iwlwifi/mvm/quota.c +++ b/drivers/net/wireless/iwlwifi/mvm/quota.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -161,6 +163,9 @@ static void iwl_mvm_adjust_quota_for_noa(struct iwl_mvm *mvm, quota *= (beacon_int - mvm->noa_duration); quota /= beacon_int; + IWL_DEBUG_QUOTA(mvm, "quota: adjust for NoA from %d to %d\n", + le32_to_cpu(cmd->quotas[i].quota), quota); + cmd->quotas[i].quota = cpu_to_le32(quota); } #endif @@ -170,12 +175,14 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *disabled_vif) { struct iwl_time_quota_cmd cmd = {}; - int i, idx, ret, num_active_macs, quota, quota_rem, n_non_lowlat; + int i, idx, err, num_active_macs, quota, quota_rem, n_non_lowlat; struct iwl_mvm_quota_iterator_data data = { .n_interfaces = {}, .colors = { -1, -1, -1, -1 }, .disabled_vif = disabled_vif, }; + struct iwl_time_quota_cmd *last = &mvm->last_quota_cmd; + bool send = false; lockdep_assert_held(&mvm->mutex); @@ -222,6 +229,9 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, quota = (QUOTA_100 - QUOTA_LOWLAT_MIN) / n_non_lowlat; quota_rem = QUOTA_100 - n_non_lowlat * quota - QUOTA_LOWLAT_MIN; + IWL_DEBUG_QUOTA(mvm, + "quota: low-latency binding active, remaining quota per other binding: %d\n", + quota); } else if (num_active_macs) { /* * There are 0 or more than 1 low latency bindings, or all the @@ -230,6 +240,9 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, */ quota = QUOTA_100 / num_active_macs; quota_rem = QUOTA_100 % num_active_macs; + IWL_DEBUG_QUOTA(mvm, + "quota: splitting evenly per binding: %d\n", + quota); } else { /* values don't really matter - won't be used */ quota = 0; @@ -271,6 +284,9 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, for (i = 0; i < MAX_BINDINGS; i++) { if (le32_to_cpu(cmd.quotas[i].quota) != 0) { le32_add_cpu(&cmd.quotas[i].quota, quota_rem); + IWL_DEBUG_QUOTA(mvm, + "quota: giving remainder of %d to binding %d\n", + quota_rem, i); break; } } @@ -279,15 +295,33 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, /* check that we have non-zero quota for all valid bindings */ for (i = 0; i < MAX_BINDINGS; i++) { + if (cmd.quotas[i].id_and_color != last->quotas[i].id_and_color) + send = true; + if (cmd.quotas[i].max_duration != last->quotas[i].max_duration) + send = true; + if (abs((int)le32_to_cpu(cmd.quotas[i].quota) - + (int)le32_to_cpu(last->quotas[i].quota)) + > IWL_MVM_QUOTA_THRESHOLD) + send = true; if (cmd.quotas[i].id_and_color == cpu_to_le32(FW_CTXT_INVALID)) continue; WARN_ONCE(cmd.quotas[i].quota == 0, "zero quota on binding %d\n", i); } - ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0, - sizeof(cmd), &cmd); - if (ret) - IWL_ERR(mvm, "Failed to send quota: %d\n", ret); - return ret; + if (!send) { + /* don't send a practically unchanged command, the firmware has + * to re-initialize a lot of state and that can have an adverse + * impact on it + */ + return 0; + } + + err = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0, sizeof(cmd), &cmd); + + if (err) + IWL_ERR(mvm, "Failed to send quota: %d\n", err); + else + mvm->last_quota_cmd = cmd; + return err; } diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c index c70e959bf0e3d443b17ca8e055ee3dd57fda30a2..18a539999580410dbb9caafdae6d975d15def1fe 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/iwlwifi/mvm/rs.c @@ -1,6 +1,7 @@ /****************************************************************************** * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -376,9 +377,9 @@ static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags) } static void rs_rate_scale_perform(struct iwl_mvm *mvm, - struct sk_buff *skb, - struct ieee80211_sta *sta, - struct iwl_lq_sta *lq_sta); + struct ieee80211_sta *sta, + struct iwl_lq_sta *lq_sta, + int tid); static void rs_fill_lq_cmd(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_lq_sta *lq_sta, @@ -504,10 +505,10 @@ static const char *rs_pretty_lq_type(enum iwl_table_type type) static inline void rs_dump_rate(struct iwl_mvm *mvm, const struct rs_rate *rate, const char *prefix) { - IWL_DEBUG_RATE(mvm, "%s: (%s: %d) ANT: %s BW: %d SGI: %d\n", + IWL_DEBUG_RATE(mvm, "%s: (%s: %d) ANT: %s BW: %d SGI: %d LDPC: %d\n", prefix, rs_pretty_lq_type(rate->type), rate->index, rs_pretty_ant(rate->ant), - rate->bw, rate->sgi); + rate->bw, rate->sgi, rate->ldpc); } static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window) @@ -671,8 +672,10 @@ static int rs_collect_tx_data(struct iwl_lq_sta *lq_sta, return -EINVAL; if (tbl->column != RS_COLUMN_INVALID) { - lq_sta->tx_stats[tbl->column][scale_index].total += attempts; - lq_sta->tx_stats[tbl->column][scale_index].success += successes; + struct lq_sta_pers *pers = &lq_sta->pers; + + pers->tx_stats[tbl->column][scale_index].total += attempts; + pers->tx_stats[tbl->column][scale_index].success += successes; } /* Select window for current tx bit rate */ @@ -741,6 +744,8 @@ static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm, ucode_rate |= rate->bw; if (rate->sgi) ucode_rate |= RATE_MCS_SGI_MSK; + if (rate->ldpc) + ucode_rate |= RATE_MCS_LDPC_MSK; return ucode_rate; } @@ -778,6 +783,8 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate, /* HT or VHT */ if (ucode_rate & RATE_MCS_SGI_MSK) rate->sgi = true; + if (ucode_rate & RATE_MCS_LDPC_MSK) + rate->ldpc = true; rate->bw = ucode_rate & RATE_MCS_CHAN_WIDTH_MSK; @@ -964,13 +971,13 @@ static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta, rate->index > IWL_RATE_MCS_9_INDEX); rate->index = rs_ht_to_legacy[rate->index]; + rate->ldpc = false; } else { /* Downgrade to SISO with same MCS if in MIMO */ rate->type = is_vht_mimo2(rate) ? LQ_VHT_SISO : LQ_HT_SISO; } - if (num_of_ant(rate->ant) > 1) rate->ant = first_antenna(mvm->fw->valid_tx_ant); @@ -1000,27 +1007,35 @@ static u32 rs_ch_width_from_mac_flags(enum mac80211_rate_control_flags flags) return RATE_MCS_CHAN_WIDTH_20; } -/* - * mac80211 sends us Tx status - */ -static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband, - struct ieee80211_sta *sta, void *priv_sta, - struct sk_buff *skb) +static u8 rs_get_tid(struct ieee80211_hdr *hdr) +{ + u8 tid = IWL_MAX_TID_COUNT; + + if (ieee80211_is_data_qos(hdr->frame_control)) { + u8 *qc = ieee80211_get_qos_ctl(hdr); + tid = qc[0] & 0xf; + } + + if (unlikely(tid > IWL_MAX_TID_COUNT)) + tid = IWL_MAX_TID_COUNT; + + return tid; +} + +void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + int tid, struct ieee80211_tx_info *info) { int legacy_success; int retries; int mac_index, i; - struct iwl_lq_sta *lq_sta = priv_sta; struct iwl_lq_cmd *table; - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - struct iwl_op_mode *op_mode = (struct iwl_op_mode *)mvm_r; - struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); enum mac80211_rate_control_flags mac_flags; u32 ucode_rate; struct rs_rate rate; struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl; u8 reduced_txp = (uintptr_t)info->status.status_driver_data[0]; + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta; /* Treat uninitialized rate scaling data same as non-existing. */ if (!lq_sta) { @@ -1038,10 +1053,6 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband, return; } #endif - if (!ieee80211_is_data(hdr->frame_control) || - info->flags & IEEE80211_TX_CTL_NO_ACK) - return; - /* This packet was aggregated but doesn't carry status info */ if ((info->flags & IEEE80211_TX_CTL_AMPDU) && !(info->flags & IEEE80211_TX_STAT_AMPDU)) @@ -1087,7 +1098,7 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband, for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) ieee80211_stop_tx_ba_session(sta, tid); - iwl_mvm_rs_rate_init(mvm, sta, sband->band, false); + iwl_mvm_rs_rate_init(mvm, sta, info->band, false); return; } lq_sta->last_tx = jiffies; @@ -1214,8 +1225,28 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband, IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp); done: /* See if there's a better rate or modulation mode to try. */ - if (sta && sta->supp_rates[sband->band]) - rs_rate_scale_perform(mvm, skb, sta, lq_sta); + if (sta && sta->supp_rates[info->band]) + rs_rate_scale_perform(mvm, sta, lq_sta, tid); +} + +/* + * mac80211 sends us Tx status + */ +static void rs_mac80211_tx_status(void *mvm_r, + struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, void *priv_sta, + struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct iwl_op_mode *op_mode = (struct iwl_op_mode *)mvm_r; + struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + if (!ieee80211_is_data(hdr->frame_control) || + info->flags & IEEE80211_TX_CTL_NO_ACK) + return; + + iwl_mvm_rs_tx_status(mvm, sta, rs_get_tid(hdr), info); } /* @@ -1486,22 +1517,6 @@ static void rs_update_rate_tbl(struct iwl_mvm *mvm, iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false); } -static u8 rs_get_tid(struct iwl_lq_sta *lq_data, - struct ieee80211_hdr *hdr) -{ - u8 tid = IWL_MAX_TID_COUNT; - - if (ieee80211_is_data_qos(hdr->frame_control)) { - u8 *qc = ieee80211_get_qos_ctl(hdr); - tid = qc[0] & 0xf; - } - - if (unlikely(tid > IWL_MAX_TID_COUNT)) - tid = IWL_MAX_TID_COUNT; - - return tid; -} - static enum rs_column rs_get_next_column(struct iwl_mvm *mvm, struct iwl_lq_sta *lq_sta, struct ieee80211_sta *sta, @@ -1620,6 +1635,7 @@ static int rs_switch_to_column(struct iwl_mvm *mvm, } rate->bw = rs_bw_from_sta_bw(sta); + rate->ldpc = lq_sta->ldpc; search_tbl->column = col_id; rs_set_expected_tpt_table(lq_sta, search_tbl); @@ -1939,12 +1955,10 @@ static bool rs_tpc_perform(struct iwl_mvm *mvm, * Do rate scaling and search for new modulation mode. */ static void rs_rate_scale_perform(struct iwl_mvm *mvm, - struct sk_buff *skb, struct ieee80211_sta *sta, - struct iwl_lq_sta *lq_sta) + struct iwl_lq_sta *lq_sta, + int tid) { - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; int low = IWL_RATE_INVALID; int high = IWL_RATE_INVALID; int index; @@ -1961,29 +1975,12 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm, u8 done_search = 0; u16 high_low; s32 sr; - u8 tid = IWL_MAX_TID_COUNT; u8 prev_agg = lq_sta->is_agg; struct iwl_mvm_sta *sta_priv = (void *)sta->drv_priv; struct iwl_mvm_tid_data *tid_data; struct rs_rate *rate; - /* Send management frames and NO_ACK data using lowest rate. */ - /* TODO: this could probably be improved.. */ - if (!ieee80211_is_data(hdr->frame_control) || - info->flags & IEEE80211_TX_CTL_NO_ACK) - return; - - tid = rs_get_tid(lq_sta, hdr); - if ((tid != IWL_MAX_TID_COUNT) && - (lq_sta->tx_agg_tid_en & (1 << tid))) { - tid_data = &sta_priv->tid_data[tid]; - if (tid_data->state == IWL_AGG_OFF) - lq_sta->is_agg = 0; - else - lq_sta->is_agg = 1; - } else { - lq_sta->is_agg = 0; - } + lq_sta->is_agg = !!sta_priv->agg_tids; /* * Select rate-scale / modulation-mode table to work with in @@ -2030,18 +2027,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm, return; } - /* force user max rate if set by user */ - if ((lq_sta->max_rate_idx != -1) && - (lq_sta->max_rate_idx < index)) { - index = lq_sta->max_rate_idx; - update_lq = 1; - window = &(tbl->win[index]); - IWL_DEBUG_RATE(mvm, - "Forcing user max rate %d\n", - index); - goto lq_update; - } - + /* TODO: handle rate_idx_mask and rate_idx_mcs_mask */ window = &(tbl->win[index]); /* @@ -2129,10 +2115,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm, low = high_low & 0xff; high = (high_low >> 8) & 0xff; - /* If user set max rate, dont allow higher than user constrain */ - if ((lq_sta->max_rate_idx != -1) && - (lq_sta->max_rate_idx < high)) - high = IWL_RATE_INVALID; + /* TODO: handle rate_idx_mask and rate_idx_mcs_mask */ sr = window->success_ratio; @@ -2294,6 +2277,110 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm, lq_sta->last_txrate_idx = index; } +struct rs_init_rate_info { + s8 rssi; + u8 rate_idx; +}; + +static const struct rs_init_rate_info rs_init_rates_24ghz[] = { + { -60, IWL_RATE_54M_INDEX }, + { -64, IWL_RATE_48M_INDEX }, + { -68, IWL_RATE_36M_INDEX }, + { -80, IWL_RATE_24M_INDEX }, + { -84, IWL_RATE_18M_INDEX }, + { -85, IWL_RATE_12M_INDEX }, + { -86, IWL_RATE_11M_INDEX }, + { -88, IWL_RATE_5M_INDEX }, + { -90, IWL_RATE_2M_INDEX }, + { S8_MIN, IWL_RATE_1M_INDEX }, +}; + +static const struct rs_init_rate_info rs_init_rates_5ghz[] = { + { -60, IWL_RATE_54M_INDEX }, + { -64, IWL_RATE_48M_INDEX }, + { -72, IWL_RATE_36M_INDEX }, + { -80, IWL_RATE_24M_INDEX }, + { -84, IWL_RATE_18M_INDEX }, + { -85, IWL_RATE_12M_INDEX }, + { -87, IWL_RATE_9M_INDEX }, + { S8_MIN, IWL_RATE_6M_INDEX }, +}; + +/* Choose an initial legacy rate and antenna to use based on the RSSI + * of last Rx + */ +static void rs_get_initial_rate(struct iwl_mvm *mvm, + struct iwl_lq_sta *lq_sta, + enum ieee80211_band band, + struct rs_rate *rate) +{ + int i, nentries; + s8 best_rssi = S8_MIN; + u8 best_ant = ANT_NONE; + u8 valid_tx_ant = mvm->fw->valid_tx_ant; + const struct rs_init_rate_info *initial_rates; + + for (i = 0; i < ARRAY_SIZE(lq_sta->pers.chain_signal); i++) { + if (!(lq_sta->pers.chains & BIT(i))) + continue; + + if (lq_sta->pers.chain_signal[i] > best_rssi) { + best_rssi = lq_sta->pers.chain_signal[i]; + best_ant = BIT(i); + } + } + + IWL_DEBUG_RATE(mvm, "Best ANT: %s Best RSSI: %d\n", + rs_pretty_ant(best_ant), best_rssi); + + if (best_ant != ANT_A && best_ant != ANT_B) + rate->ant = first_antenna(valid_tx_ant); + else + rate->ant = best_ant; + + rate->sgi = false; + rate->ldpc = false; + rate->bw = RATE_MCS_CHAN_WIDTH_20; + + rate->index = find_first_bit(&lq_sta->active_legacy_rate, + BITS_PER_LONG); + + if (band == IEEE80211_BAND_5GHZ) { + rate->type = LQ_LEGACY_A; + initial_rates = rs_init_rates_5ghz; + nentries = ARRAY_SIZE(rs_init_rates_5ghz); + } else { + rate->type = LQ_LEGACY_G; + initial_rates = rs_init_rates_24ghz; + nentries = ARRAY_SIZE(rs_init_rates_24ghz); + } + + if (IWL_MVM_RS_RSSI_BASED_INIT_RATE) { + for (i = 0; i < nentries; i++) { + int rate_idx = initial_rates[i].rate_idx; + if ((best_rssi >= initial_rates[i].rssi) && + (BIT(rate_idx) & lq_sta->active_legacy_rate)) { + rate->index = rate_idx; + break; + } + } + } + + IWL_DEBUG_RATE(mvm, "rate_idx %d ANT %s\n", rate->index, + rs_pretty_ant(rate->ant)); +} + +/* Save info about RSSI of last Rx */ +void rs_update_last_rssi(struct iwl_mvm *mvm, + struct iwl_lq_sta *lq_sta, + struct ieee80211_rx_status *rx_status) +{ + lq_sta->pers.chains = rx_status->chains; + lq_sta->pers.chain_signal[0] = rx_status->chain_signal[0]; + lq_sta->pers.chain_signal[1] = rx_status->chain_signal[1]; + lq_sta->pers.chain_signal[2] = rx_status->chain_signal[2]; +} + /** * rs_initialize_lq - Initialize a station's hardware rate table * @@ -2316,17 +2403,11 @@ static void rs_initialize_lq(struct iwl_mvm *mvm, { struct iwl_scale_tbl_info *tbl; struct rs_rate *rate; - int i; u8 active_tbl = 0; - u8 valid_tx_ant; if (!sta || !lq_sta) return; - i = lq_sta->last_txrate_idx; - - valid_tx_ant = mvm->fw->valid_tx_ant; - if (!lq_sta->search_better_tbl) active_tbl = lq_sta->active_tbl; else @@ -2335,17 +2416,8 @@ static void rs_initialize_lq(struct iwl_mvm *mvm, tbl = &(lq_sta->lq_info[active_tbl]); rate = &tbl->rate; - if ((i < 0) || (i >= IWL_RATE_COUNT)) - i = 0; - - rate->index = i; - rate->ant = first_antenna(valid_tx_ant); - rate->sgi = false; - rate->bw = RATE_MCS_CHAN_WIDTH_20; - if (band == IEEE80211_BAND_5GHZ) - rate->type = LQ_LEGACY_A; - else - rate->type = LQ_LEGACY_G; + rs_get_initial_rate(mvm, lq_sta, band, rate); + lq_sta->last_txrate_idx = rate->index; WARN_ON_ONCE(rate->ant != ANT_A && rate->ant != ANT_B); if (rate->ant == ANT_A) @@ -2363,23 +2435,13 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta, struct ieee80211_tx_rate_control *txrc) { struct sk_buff *skb = txrc->skb; - struct ieee80211_supported_band *sband = txrc->sband; struct iwl_op_mode *op_mode __maybe_unused = (struct iwl_op_mode *)mvm_r; struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct iwl_lq_sta *lq_sta = mvm_sta; - /* Get max rate if user set max rate */ - if (lq_sta) { - lq_sta->max_rate_idx = txrc->max_rate_idx; - if ((sband->band == IEEE80211_BAND_5GHZ) && - (lq_sta->max_rate_idx != -1)) - lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE; - if ((lq_sta->max_rate_idx < 0) || - (lq_sta->max_rate_idx >= IWL_RATE_COUNT)) - lq_sta->max_rate_idx = -1; - } + /* TODO: handle rate_idx_mask and rate_idx_mcs_mask */ /* Treat uninitialized rate scaling data same as non-existing. */ if (lq_sta && !lq_sta->pers.drv) { @@ -2412,6 +2474,8 @@ static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta, lq_sta->pers.dbg_fixed_rate = 0; lq_sta->pers.dbg_fixed_txp_reduction = TPC_INVALID; #endif + lq_sta->pers.chains = 0; + memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal)); return &sta_priv->lq_sta; } @@ -2580,7 +2644,6 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, * previous packets? Need to have IEEE 802.1X auth succeed immediately * after assoc.. */ - lq_sta->max_rate_idx = -1; lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX; lq_sta->band = sband->band; /* @@ -2609,9 +2672,16 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE; lq_sta->is_vht = false; + if (mvm->cfg->ht_params->ldpc && + (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)) + lq_sta->ldpc = true; } else { rs_vht_set_enabled_rates(sta, vht_cap, lq_sta); lq_sta->is_vht = true; + + if (mvm->cfg->ht_params->ldpc && + (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC)) + lq_sta->ldpc = true; } lq_sta->max_legacy_rate_idx = find_last_bit(&lq_sta->active_legacy_rate, @@ -2621,11 +2691,12 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, lq_sta->max_mimo2_rate_idx = find_last_bit(&lq_sta->active_mimo2_rate, BITS_PER_LONG); - IWL_DEBUG_RATE(mvm, "RATE MASK: LEGACY=%lX SISO=%lX MIMO2=%lX VHT=%d\n", + IWL_DEBUG_RATE(mvm, + "RATE MASK: LEGACY=%lX SISO=%lX MIMO2=%lX VHT=%d LDPC=%d\n", lq_sta->active_legacy_rate, lq_sta->active_siso_rate, lq_sta->active_mimo2_rate, - lq_sta->is_vht); + lq_sta->is_vht, lq_sta->ldpc); IWL_DEBUG_RATE(mvm, "MAX RATE: LEGACY=%d SISO=%d MIMO2=%d\n", lq_sta->max_legacy_rate_idx, lq_sta->max_siso_rate_idx, @@ -2638,11 +2709,6 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, /* as default allow aggregation for all tids */ lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID; - - /* Set last_txrate_idx to lowest rate */ - lq_sta->last_txrate_idx = rate_lowest_index(sband, sta); - if (sband->band == IEEE80211_BAND_5GHZ) - lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE; lq_sta->is_agg = 0; #ifdef CONFIG_IWLWIFI_DEBUGFS iwl_mvm_reset_frame_stats(mvm, &mvm->drv_rx_stats); @@ -2678,6 +2744,7 @@ static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm, int i; int num_rates = ARRAY_SIZE(lq_cmd->rs_table); __le32 ucode_rate_le32 = cpu_to_le32(ucode_rate); + u8 ant = (ucode_rate & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS; for (i = 0; i < num_rates; i++) lq_cmd->rs_table[i] = ucode_rate_le32; @@ -2688,6 +2755,13 @@ static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm, lq_cmd->mimo_delim = num_rates - 1; else lq_cmd->mimo_delim = 0; + + lq_cmd->reduced_tpc = 0; + + if (num_of_ant(ant) == 1) + lq_cmd->single_stream_ant_msk = ant; + + lq_cmd->agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF; } #endif /* CONFIG_MAC80211_DEBUGFS */ @@ -2811,31 +2885,55 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm, const struct rs_rate *initial_rate) { struct iwl_lq_cmd *lq_cmd = &lq_sta->lq; - u8 ant = initial_rate->ant; + struct iwl_mvm_sta *mvmsta; + struct iwl_mvm_vif *mvmvif; + + lq_cmd->agg_disable_start_th = LINK_QUAL_AGG_DISABLE_START_DEF; + lq_cmd->agg_time_limit = + cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF); #ifdef CONFIG_MAC80211_DEBUGFS if (lq_sta->pers.dbg_fixed_rate) { rs_build_rates_table_from_fixed(mvm, lq_cmd, lq_sta->band, lq_sta->pers.dbg_fixed_rate); - lq_cmd->reduced_tpc = 0; - ant = (lq_sta->pers.dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) >> - RATE_MCS_ANT_POS; - } else + return; + } #endif - rs_build_rates_table(mvm, lq_sta, initial_rate); + if (WARN_ON_ONCE(!sta || !initial_rate)) + return; - if (num_of_ant(ant) == 1) - lq_cmd->single_stream_ant_msk = ant; + rs_build_rates_table(mvm, lq_sta, initial_rate); - lq_cmd->agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF; - lq_cmd->agg_disable_start_th = LINK_QUAL_AGG_DISABLE_START_DEF; + if (num_of_ant(initial_rate->ant) == 1) + lq_cmd->single_stream_ant_msk = initial_rate->ant; - lq_cmd->agg_time_limit = - cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF); + mvmsta = iwl_mvm_sta_from_mac80211(sta); + mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); - if (sta) - lq_cmd->agg_time_limit = + if (num_of_ant(initial_rate->ant) == 1) + lq_cmd->single_stream_ant_msk = initial_rate->ant; + + lq_cmd->agg_frame_cnt_limit = mvmsta->max_agg_bufsize; + + /* + * In case of low latency, tell the firwmare to leave a frame in the + * Tx Fifo so that it can start a transaction in the same TxOP. This + * basically allows the firmware to send bursts. + */ + if (iwl_mvm_vif_low_latency(mvmvif)) { + lq_cmd->agg_frame_cnt_limit--; + + if (mvm->low_latency_agg_frame_limit) + lq_cmd->agg_frame_cnt_limit = + min(lq_cmd->agg_frame_cnt_limit, + mvm->low_latency_agg_frame_limit); + } + + if (mvmsta->vif->p2p) + lq_cmd->flags |= LQ_FLAG_USE_RTS_MSK; + + lq_cmd->agg_time_limit = cpu_to_le16(iwl_mvm_coex_agg_time_limit(mvm, sta)); } @@ -2932,10 +3030,7 @@ static void rs_program_fix_rate(struct iwl_mvm *mvm, lq_sta->lq.sta_id, lq_sta->pers.dbg_fixed_rate); if (lq_sta->pers.dbg_fixed_rate) { - struct rs_rate rate; - rs_rate_from_ucode_rate(lq_sta->pers.dbg_fixed_rate, - lq_sta->band, &rate); - rs_fill_lq_cmd(mvm, NULL, lq_sta, &rate); + rs_fill_lq_cmd(mvm, NULL, lq_sta, NULL); iwl_mvm_send_lq_cmd(lq_sta->pers.drv, &lq_sta->lq, false); } } @@ -3002,8 +3097,9 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file, (is_ht20(rate)) ? "20MHz" : (is_ht40(rate)) ? "40MHz" : (is_ht80(rate)) ? "80Mhz" : "BAD BW"); - desc += sprintf(buff+desc, " %s %s\n", + desc += sprintf(buff+desc, " %s %s %s\n", (rate->sgi) ? "SGI" : "NGI", + (rate->ldpc) ? "LDPC" : "BCC", (lq_sta->is_agg) ? "AGG on" : ""); } desc += sprintf(buff+desc, "last tx rate=0x%X\n", @@ -3151,7 +3247,7 @@ static ssize_t rs_sta_dbgfs_drv_tx_stats_read(struct file *file, "%s,", column_name[col]); for (rate = 0; rate < IWL_RATE_COUNT; rate++) { - stats = &(lq_sta->tx_stats[col][rate]); + stats = &(lq_sta->pers.tx_stats[col][rate]); pos += scnprintf(pos, endpos - pos, "%llu/%llu,", stats->success, @@ -3170,7 +3266,7 @@ static ssize_t rs_sta_dbgfs_drv_tx_stats_write(struct file *file, size_t count, loff_t *ppos) { struct iwl_lq_sta *lq_sta = file->private_data; - memset(lq_sta->tx_stats, 0, sizeof(lq_sta->tx_stats)); + memset(lq_sta->pers.tx_stats, 0, sizeof(lq_sta->pers.tx_stats)); return count; } @@ -3216,7 +3312,7 @@ static void rs_rate_init_stub(void *mvm_r, static const struct rate_control_ops rs_mvm_ops = { .name = RS_NAME, - .tx_status = rs_tx_status, + .tx_status = rs_mac80211_tx_status, .get_rate = rs_get_rate, .rate_init = rs_rate_init_stub, .alloc = rs_alloc, diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h index f27b9d687a25e9e1854c6e6a9a04331e8994717e..eb34c1209acc2d6fdd9d35fd2b03d2ea3e565780 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.h +++ b/drivers/net/wireless/iwlwifi/mvm/rs.h @@ -207,6 +207,7 @@ struct rs_rate { u8 ant; u32 bw; bool sgi; + bool ldpc; }; @@ -329,10 +330,9 @@ struct iwl_lq_sta { */ u64 last_tx; bool is_vht; + bool ldpc; /* LDPC Rx is supported by the STA */ enum ieee80211_band band; - struct rs_rate_stats tx_stats[RS_COLUMN_COUNT][IWL_RATE_COUNT]; - /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */ unsigned long active_legacy_rate; unsigned long active_siso_rate; @@ -343,7 +343,6 @@ struct iwl_lq_sta { u8 max_siso_rate_idx; u8 max_mimo2_rate_idx; - s8 max_rate_idx; /* Max rate set by user */ u8 missed_rate_counter; struct iwl_lq_cmd lq; @@ -361,11 +360,14 @@ struct iwl_lq_sta { int tpc_reduce; /* persistent fields - initialized only once - keep last! */ - struct { + struct lq_sta_pers { #ifdef CONFIG_MAC80211_DEBUGFS u32 dbg_fixed_rate; u8 dbg_fixed_txp_reduction; #endif + u8 chains; + s8 chain_signal[IEEE80211_MAX_CHAINS]; + struct rs_rate_stats tx_stats[RS_COLUMN_COUNT][IWL_RATE_COUNT]; struct iwl_mvm *drv; } pers; }; @@ -374,6 +376,10 @@ struct iwl_lq_sta { void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, enum ieee80211_band band, bool init); +/* Notify RS about Tx status */ +void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + int tid, struct ieee80211_tx_info *info); + /** * iwl_rate_control_register - Register the rate control algorithm callbacks * diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c index bf5cd8c8b0f798d2a0cebf24236f0730304e6b4a..3cf40f3f58ec2214323b16aa2708dad1c3430163 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/iwlwifi/mvm/rx.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -244,6 +246,7 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rx_phy_info *phy_info; struct iwl_rx_mpdu_res_start *rx_res; + struct ieee80211_sta *sta; u32 len; u32 ampdu_status; u32 rate_n_flags; @@ -258,23 +261,6 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, memset(&rx_status, 0, sizeof(rx_status)); - /* - * We have tx blocked stations (with CS bit). If we heard frames from - * a blocked station on a new channel we can TX to it again. - */ - if (unlikely(mvm->csa_tx_block_bcn_timeout)) { - struct ieee80211_sta *sta; - - rcu_read_lock(); - - sta = ieee80211_find_sta( - rcu_dereference(mvm->csa_tx_blocked_vif), hdr->addr2); - if (sta) - iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, false); - - rcu_read_unlock(); - } - /* * drop the packet if it has failed being decrypted by HW */ @@ -323,6 +309,29 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status.signal, (unsigned long long)rx_status.mactime); + rcu_read_lock(); + /* + * We have tx blocked stations (with CS bit). If we heard frames from + * a blocked station on a new channel we can TX to it again. + */ + if (unlikely(mvm->csa_tx_block_bcn_timeout)) { + sta = ieee80211_find_sta( + rcu_dereference(mvm->csa_tx_blocked_vif), hdr->addr2); + if (sta) + iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, false); + } + + /* This is fine since we don't support multiple AP interfaces */ + sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL); + if (sta) { + struct iwl_mvm_sta *mvmsta; + mvmsta = iwl_mvm_sta_from_mac80211(sta); + rs_update_last_rssi(mvm, &mvmsta->lq_sta, + &rx_status); + } + + rcu_read_unlock(); + /* set the preamble flag if appropriate */ if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_SHORT_PREAMBLE)) rx_status.flag |= RX_FLAG_SHORTPRE; @@ -491,10 +500,29 @@ int iwl_mvm_rx_statistics(struct iwl_mvm *mvm, .mvm = mvm, }; + /* + * set temperature debug enabled - ignore FW temperature updates + * and use the user set temperature. + */ + if (mvm->temperature_test) { + if (mvm->temperature < le32_to_cpu(common->temperature)) + IWL_DEBUG_TEMP(mvm, + "Ignoring FW temperature update that is greater than the debug set temperature (debug temp = %d, fw temp = %d)\n", + mvm->temperature, + le32_to_cpu(common->temperature)); + /* + * skip iwl_mvm_tt_handler since we are in + * temperature debug mode and we are ignoring + * the new temperature value + */ + goto update; + } + if (mvm->temperature != le32_to_cpu(common->temperature)) { mvm->temperature = le32_to_cpu(common->temperature); iwl_mvm_tt_handler(mvm); } +update: iwl_mvm_update_rx_statistics(mvm, stats); ieee80211_iterate_active_interfaces(mvm->hw, diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index 004b1f5d031429a2798cc1d35464daa6ae59db00..cb85e63c20aa340d70f2be0a3c4f48c3781e8182 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -158,8 +160,8 @@ static void iwl_mvm_scan_fill_ssids(struct iwl_ssid_ie *cmd_ssid, static u16 iwl_mvm_get_active_dwell(enum ieee80211_band band, int n_ssids) { if (band == IEEE80211_BAND_2GHZ) - return 30 + 3 * (n_ssids + 1); - return 20 + 2 * (n_ssids + 1); + return 20 + 3 * (n_ssids + 1); + return 10 + 2 * (n_ssids + 1); } static u16 iwl_mvm_get_passive_dwell(enum ieee80211_band band) @@ -279,6 +281,7 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm, { bool global_bound = false; enum ieee80211_band band; + u8 frag_passive_dwell = 0; ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, @@ -288,12 +291,36 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm, if (!global_bound) goto not_bound; - params->suspend_time = 100; - params->max_out_time = 600; + params->suspend_time = 30; + params->max_out_time = 170; if (iwl_mvm_low_latency(mvm)) { - params->suspend_time = 250; - params->max_out_time = 250; + if (mvm->fw->ucode_capa.api[0] & + IWL_UCODE_TLV_API_FRAGMENTED_SCAN) { + params->suspend_time = 105; + params->max_out_time = 70; + frag_passive_dwell = 20; + } else { + params->suspend_time = 120; + params->max_out_time = 120; + } + } + + if (frag_passive_dwell && (mvm->fw->ucode_capa.api[0] & + IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) { + /* + * P2P device scan should not be fragmented to avoid negative + * impact on P2P device discovery. Configure max_out_time to be + * equal to dwell time on passive channel. Take a longest + * possible value, one that corresponds to 2GHz band + */ + if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { + u32 passive_dwell = + iwl_mvm_get_passive_dwell(IEEE80211_BAND_2GHZ); + params->max_out_time = passive_dwell; + } else { + params->passive_fragmented = true; + } } if (flags & NL80211_SCAN_FLAG_LOW_PRIORITY) @@ -302,12 +329,65 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm, not_bound: for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) { - params->dwell[band].passive = iwl_mvm_get_passive_dwell(band); + if (params->passive_fragmented) + params->dwell[band].passive = frag_passive_dwell; + else + params->dwell[band].passive = + iwl_mvm_get_passive_dwell(band); params->dwell[band].active = iwl_mvm_get_active_dwell(band, n_ssids); } } +static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm) +{ + /* require rrm scan whenever the fw supports it */ + return mvm->fw->ucode_capa.capa[0] & + IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT; +} + +static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm, + bool is_sched_scan) +{ + int max_probe_len; + + if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) + max_probe_len = SCAN_OFFLOAD_PROBE_REQ_SIZE; + else + max_probe_len = mvm->fw->ucode_capa.max_probe_length; + + /* we create the 802.11 header and SSID element */ + max_probe_len -= 24 + 2; + + /* basic ssid is added only for hw_scan with and old api */ + if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID) && + !(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) && + !is_sched_scan) + max_probe_len -= 32; + + return max_probe_len; +} + +int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm, bool is_sched_scan) +{ + int max_ie_len = iwl_mvm_max_scan_ie_fw_cmd_room(mvm, is_sched_scan); + + if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)) + return max_ie_len; + + /* TODO: [BUG] This function should return the maximum allowed size of + * scan IEs, however the LMAC scan api contains both 2GHZ and 5GHZ IEs + * in the same command. So the correct implementation of this function + * is just iwl_mvm_max_scan_ie_fw_cmd_room() / 2. Currently the scan + * command has only 512 bytes and it would leave us with about 240 + * bytes for scan IEs, which is clearly not enough. So meanwhile + * we will report an incorrect value. This may result in a failure to + * issue a scan in unified_scan_lmac and unified_sched_scan_lmac + * functions with -ENOBUFS, if a large enough probe will be provided. + */ + return max_ie_len; +} + int iwl_mvm_scan_request(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_scan_request *req) @@ -1100,10 +1180,11 @@ iwl_mvm_build_generic_unified_scan_cmd(struct iwl_mvm *mvm, struct iwl_mvm_scan_params *params) { memset(cmd, 0, ksize(cmd)); - cmd->active_dwell = (u8)params->dwell[IEEE80211_BAND_2GHZ].active; - cmd->passive_dwell = (u8)params->dwell[IEEE80211_BAND_2GHZ].passive; - /* TODO: Use params; now fragmented isn't used. */ - cmd->fragmented_dwell = 0; + cmd->active_dwell = params->dwell[IEEE80211_BAND_2GHZ].active; + cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive; + if (params->passive_fragmented) + cmd->fragmented_dwell = + params->dwell[IEEE80211_BAND_2GHZ].passive; cmd->rx_chain_select = iwl_mvm_scan_rx_chain(mvm); cmd->max_out_time = cpu_to_le32(params->max_out_time); cmd->suspend_time = cpu_to_le32(params->suspend_time); @@ -1121,6 +1202,10 @@ iwl_mvm_build_generic_unified_scan_cmd(struct iwl_mvm *mvm, IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); } + + if (iwl_mvm_rrm_scan_needed(mvm)) + cmd->scan_flags |= + cpu_to_le32(IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED); } int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm, @@ -1148,13 +1233,12 @@ int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm, if (WARN_ON(mvm->scan_cmd == NULL)) return -ENOMEM; - if (WARN_ON_ONCE(req->req.n_ssids > PROBE_OPTION_MAX || - req->ies.common_ie_len + req->ies.len[0] + - req->ies.len[1] + 24 + 2 > - SCAN_OFFLOAD_PROBE_REQ_SIZE || - req->req.n_channels > - mvm->fw->ucode_capa.n_scan_channels)) - return -1; + if (req->req.n_ssids > PROBE_OPTION_MAX || + req->ies.common_ie_len + req->ies.len[NL80211_BAND_2GHZ] + + req->ies.len[NL80211_BAND_5GHZ] > + iwl_mvm_max_scan_ie_fw_cmd_room(mvm, false) || + req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels) + return -ENOBUFS; mvm->scan_status = IWL_MVM_SCAN_OS; @@ -1176,7 +1260,7 @@ int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm, if (req->req.n_ssids == 0) flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE; - cmd->scan_flags = cpu_to_le32(flags); + cmd->scan_flags |= cpu_to_le32(flags); cmd->flags = iwl_mvm_scan_rxon_flags(req->req.channels[0]->band); cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP | @@ -1242,10 +1326,11 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm, if (WARN_ON(mvm->scan_cmd == NULL)) return -ENOMEM; - if (WARN_ON_ONCE(req->n_ssids > PROBE_OPTION_MAX || - ies->common_ie_len + ies->len[0] + ies->len[1] + 24 + 2 - > SCAN_OFFLOAD_PROBE_REQ_SIZE || - req->n_channels > mvm->fw->ucode_capa.n_scan_channels)) + if (req->n_ssids > PROBE_OPTION_MAX || + ies->common_ie_len + ies->len[NL80211_BAND_2GHZ] + + ies->len[NL80211_BAND_5GHZ] > + iwl_mvm_max_scan_ie_fw_cmd_room(mvm, true) || + req->n_channels > mvm->fw->ucode_capa.n_scan_channels) return -ENOBUFS; iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, 0, ¶ms); @@ -1273,7 +1358,7 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm, if (req->n_ssids == 0) flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE; - cmd->scan_flags = cpu_to_le32(flags); + cmd->scan_flags |= cpu_to_le32(flags); cmd->flags = iwl_mvm_scan_rxon_flags(req->channels[0]->band); cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP | diff --git a/drivers/net/wireless/iwlwifi/mvm/sf.c b/drivers/net/wireless/iwlwifi/mvm/sf.c index e843b67f2201f5c3159747c4128d08b1c28638a8..7eb78e2c240a6e7778810869b9c31dc9b1ee2850 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sf.c +++ b/drivers/net/wireless/iwlwifi/mvm/sf.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -177,6 +179,10 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id, struct ieee80211_sta *sta; int ret = 0; + if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF && + mvm->cfg->disable_dummy_notification) + sf_cmd.state |= cpu_to_le32(SF_CFG_DUMMY_NOTIF_OFF); + /* * If an associated AP sta changed its antenna configuration, the state * will remain FULL_ON but SF parameters need to be reconsidered. diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c index 7635488803999cba22a501ef2990f1d96fcc974a..1731c205c81db57929da67a4527b90190316fa71 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/iwlwifi/mvm/sta.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -245,15 +247,20 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i])); mvm_sta->tid_data[i].seq_number = seq; } + mvm_sta->agg_tids = 0; ret = iwl_mvm_sta_send_to_fw(mvm, sta, false); if (ret) return ret; - /* The first station added is the AP, the others are TDLS STAs */ - if (vif->type == NL80211_IFTYPE_STATION && - mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) - mvmvif->ap_sta_id = sta_id; + if (vif->type == NL80211_IFTYPE_STATION) { + if (!sta->tdls) { + WARN_ON(mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT); + mvmvif->ap_sta_id = sta_id; + } else { + WARN_ON(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT); + } + } rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta); @@ -458,8 +465,9 @@ int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm, return ret; } -int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta, - u32 qmask, enum nl80211_iftype iftype) +static int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, + struct iwl_mvm_int_sta *sta, + u32 qmask, enum nl80211_iftype iftype) { if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype); @@ -474,7 +482,8 @@ int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta, return 0; } -void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta) +static void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, + struct iwl_mvm_int_sta *sta) { RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL); memset(sta, 0, sizeof(struct iwl_mvm_int_sta)); @@ -527,8 +536,8 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) lockdep_assert_held(&mvm->mutex); /* Map Aux queue to fifo - needs to happen before adding Aux station */ - iwl_trans_ac_txq_enable(mvm->trans, mvm->aux_queue, - IWL_MVM_TX_FIFO_MCAST); + iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, + IWL_MVM_TX_FIFO_MCAST); /* Allocate aux station and assign to it the aux queue */ ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue), @@ -544,6 +553,13 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) return ret; } +void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm) +{ + lockdep_assert_held(&mvm->mutex); + + iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); +} + /* * Send the add station command for the vif's broadcast station. * Assumes that the station was already allocated. @@ -552,10 +568,10 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) * @vif: the interface to which the broadcast station is added * @bsta: the broadcast station to add. */ -int iwl_mvm_send_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct iwl_mvm_int_sta *bsta) +int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta; static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; const u8 *baddr = _baddr; @@ -573,19 +589,40 @@ int iwl_mvm_send_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, /* Send the FW a request to remove the station from it's internal data * structures, but DO NOT remove the entry from the local data structures. */ -int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, - struct iwl_mvm_int_sta *bsta) +int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; lockdep_assert_held(&mvm->mutex); - ret = iwl_mvm_rm_sta_common(mvm, bsta->sta_id); + ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id); if (ret) IWL_WARN(mvm, "Failed sending remove station\n"); return ret; } +int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + u32 qmask; + + lockdep_assert_held(&mvm->mutex); + + qmask = iwl_mvm_mac_get_queues_mask(mvm, vif); + + /* + * The firmware defines the TFD queue mask to only be relevant + * for *unicast* queues, so the multicast (CAB) queue shouldn't + * be included. + */ + if (vif->type == NL80211_IFTYPE_AP) + qmask &= ~BIT(vif->cab_queue); + + return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask, + ieee80211_vif_type_p2p(vif)); +} + /* Allocate a new station entry for the broadcast station to the given vif, * and send it to the FW. * Note that each P2P mac should have its own broadcast station. @@ -593,45 +630,47 @@ int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, * @mvm: the mvm component * @vif: the interface to which the broadcast station is added * @bsta: the broadcast station to add. */ -int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct iwl_mvm_int_sta *bsta) +int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - static const u8 baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; - u32 qmask; + struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta; int ret; lockdep_assert_held(&mvm->mutex); - qmask = iwl_mvm_mac_get_queues_mask(mvm, vif); - ret = iwl_mvm_allocate_int_sta(mvm, bsta, qmask, - ieee80211_vif_type_p2p(vif)); + ret = iwl_mvm_alloc_bcast_sta(mvm, vif); if (ret) return ret; - ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr, - mvmvif->id, mvmvif->color); + ret = iwl_mvm_send_add_bcast_sta(mvm, vif); if (ret) iwl_mvm_dealloc_int_sta(mvm, bsta); + return ret; } +void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta); +} + /* * Send the FW a request to remove the station from it's internal data * structures, and in addition remove it from the local data structure. */ -int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *bsta) +int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { int ret; lockdep_assert_held(&mvm->mutex); - ret = iwl_mvm_rm_sta_common(mvm, bsta->sta_id); - if (ret) - return ret; + ret = iwl_mvm_send_rm_bcast_sta(mvm, vif); + + iwl_mvm_dealloc_bcast_sta(mvm, vif); - iwl_mvm_dealloc_int_sta(mvm, bsta); return ret; } @@ -834,12 +873,16 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, int queue, fifo, ret; u16 ssn; + BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE) + != IWL_MAX_TID_COUNT); + buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF); spin_lock_bh(&mvmsta->lock); ssn = tid_data->ssn; queue = tid_data->txq_id; tid_data->state = IWL_AGG_ON; + mvmsta->agg_tids |= BIT(tid); tid_data->ssn = 0xffff; spin_unlock_bh(&mvmsta->lock); @@ -849,8 +892,8 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (ret) return -EIO; - iwl_trans_txq_enable(mvm->trans, queue, fifo, mvmsta->sta_id, tid, - buf_size, ssn); + iwl_mvm_enable_agg_txq(mvm, queue, fifo, mvmsta->sta_id, tid, + buf_size, ssn); /* * Even though in theory the peer could have different @@ -894,6 +937,8 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n", mvmsta->sta_id, tid, txq_id, tid_data->state); + mvmsta->agg_tids &= ~BIT(tid); + switch (tid_data->state) { case IWL_AGG_ON: tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); @@ -910,8 +955,16 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, } tid_data->ssn = 0xffff; - iwl_trans_txq_disable(mvm->trans, txq_id); - /* fall through */ + tid_data->state = IWL_AGG_OFF; + mvm->queue_to_mac80211[txq_id] = IWL_INVALID_MAC80211_QUEUE; + spin_unlock_bh(&mvmsta->lock); + + ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); + + iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); + + iwl_mvm_disable_txq(mvm, txq_id); + return 0; case IWL_AGG_STARTING: case IWL_EMPTYING_HW_QUEUE_ADDBA: /* @@ -959,13 +1012,16 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, mvmsta->sta_id, tid, txq_id, tid_data->state); old_state = tid_data->state; tid_data->state = IWL_AGG_OFF; + mvmsta->agg_tids &= ~BIT(tid); spin_unlock_bh(&mvmsta->lock); if (old_state >= IWL_AGG_ON) { if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), true)) IWL_ERR(mvm, "Couldn't flush the AGG queue\n"); - iwl_trans_txq_disable(mvm->trans, tid_data->txq_id); + iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); + + iwl_mvm_disable_txq(mvm, tid_data->txq_id); } mvm->queue_to_mac80211[tid_data->txq_id] = diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h index 3b1c8bd6cb54356c41c102402657cf765f617a50..d9c0d7b0e9d4159506ead71fcbe148aa1d335a81 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/iwlwifi/mvm/sta.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -297,6 +299,7 @@ static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data) * @tx_protection: reference counter for controlling the Tx protection. * @tt_tx_protection: is thermal throttling enable Tx protection? * @disable_tx: is tx to this STA disabled? + * @agg_tids: bitmap of tids whose status is operational aggregated (IWL_AGG_ON) * * When mac80211 creates a station it reserves some space (hw->sta_data_size) * in the structure for use by driver. This structure is placed in that @@ -321,6 +324,7 @@ struct iwl_mvm_sta { bool tt_tx_protection; bool disable_tx; + u8 agg_tids; }; static inline struct iwl_mvm_sta * @@ -387,17 +391,15 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid); int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm); -int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta, - u32 qmask, enum nl80211_iftype iftype); -void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, - struct iwl_mvm_int_sta *sta); -int iwl_mvm_send_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct iwl_mvm_int_sta *bsta); -int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, - struct iwl_mvm_int_sta *bsta); -int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct iwl_mvm_int_sta *bsta); -int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *bsta); +void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm); + +int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); + void iwl_mvm_sta_drained_wk(struct work_struct *wk); void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm, struct ieee80211_sta *sta); diff --git a/drivers/net/wireless/iwlwifi/mvm/tdls.c b/drivers/net/wireless/iwlwifi/mvm/tdls.c new file mode 100644 index 0000000000000000000000000000000000000000..66c82df2d0a16eb948e011a263af0abd3d031db1 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/mvm/tdls.c @@ -0,0 +1,149 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2014 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2014 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include "mvm.h" +#include "time-event.h" + +void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm) +{ + struct ieee80211_sta *sta; + struct iwl_mvm_sta *mvmsta; + int i; + + lockdep_assert_held(&mvm->mutex); + + for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { + sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], + lockdep_is_held(&mvm->mutex)); + if (!sta || IS_ERR(sta) || !sta->tdls) + continue; + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + ieee80211_tdls_oper_request(mvmsta->vif, sta->addr, + NL80211_TDLS_TEARDOWN, + WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED, + GFP_KERNEL); + } +} + +int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct ieee80211_sta *sta; + struct iwl_mvm_sta *mvmsta; + int count = 0; + int i; + + lockdep_assert_held(&mvm->mutex); + + for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { + sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], + lockdep_is_held(&mvm->mutex)); + if (!sta || IS_ERR(sta) || !sta->tdls) + continue; + + if (vif) { + mvmsta = iwl_mvm_sta_from_mac80211(sta); + if (mvmsta->vif != vif) + continue; + } + + count++; + } + + return count; +} + +void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + bool sta_added) +{ + int tdls_sta_cnt = iwl_mvm_tdls_sta_count(mvm, vif); + + /* + * Disable ps when the first TDLS sta is added and re-enable it + * when the last TDLS sta is removed + */ + if ((tdls_sta_cnt == 1 && sta_added) || + (tdls_sta_cnt == 0 && !sta_added)) + iwl_mvm_power_update_mac(mvm); +} + +void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int; + + /* + * iwl_mvm_protect_session() reads directly from the device + * (the system time), so make sure it is available. + */ + if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_TDLS)) + return; + + mutex_lock(&mvm->mutex); + /* Protect the session to hear the TDLS setup response on the channel */ + iwl_mvm_protect_session(mvm, vif, duration, duration, 100, true); + mutex_unlock(&mvm->mutex); + + iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_TDLS); +} diff --git a/drivers/net/wireless/iwlwifi/mvm/testmode.h b/drivers/net/wireless/iwlwifi/mvm/testmode.h index 0241665925f704037faeb73999989a2cadff21b2..79ab6beb6b26c38bce8127ddd3ab2cf9511bc200 100644 --- a/drivers/net/wireless/iwlwifi/mvm/testmode.h +++ b/drivers/net/wireless/iwlwifi/mvm/testmode.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c index 33e5041f1efc191d69b847ad729c3bb434784dfc..b7f9e61d14e276f5505060a0e1cc78e3f2d1af4c 100644 --- a/drivers/net/wireless/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -348,6 +350,38 @@ int iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm, return 0; } +static bool iwl_mvm_te_notif(struct iwl_notif_wait_data *notif_wait, + struct iwl_rx_packet *pkt, void *data) +{ + struct iwl_mvm *mvm = + container_of(notif_wait, struct iwl_mvm, notif_wait); + struct iwl_mvm_time_event_data *te_data = data; + struct iwl_time_event_notif *resp; + int resp_len = iwl_rx_packet_payload_len(pkt); + + if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_NOTIFICATION)) + return true; + + if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { + IWL_ERR(mvm, "Invalid TIME_EVENT_NOTIFICATION response\n"); + return true; + } + + resp = (void *)pkt->data; + + /* te_data->uid is already set in the TIME_EVENT_CMD response */ + if (le32_to_cpu(resp->unique_id) != te_data->uid) + return false; + + IWL_DEBUG_TE(mvm, "TIME_EVENT_NOTIFICATION response - UID = 0x%x\n", + te_data->uid); + if (!resp->status) + IWL_ERR(mvm, + "TIME_EVENT_NOTIFICATION received but not executed\n"); + + return true; +} + static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt, void *data) { @@ -441,10 +475,12 @@ static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm, void iwl_mvm_protect_session(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 duration, u32 min_duration, - u32 max_delay) + u32 max_delay, bool wait_for_notif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; + const u8 te_notif_response[] = { TIME_EVENT_NOTIFICATION }; + struct iwl_notification_wait wait_te_notif; struct iwl_time_event_cmd time_cmd = {}; lockdep_assert_held(&mvm->mutex); @@ -489,7 +525,28 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm, TE_V2_NOTIF_HOST_EVENT_END | T2_V2_START_IMMEDIATELY); - iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); + if (!wait_for_notif) { + iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); + return; + } + + /* + * Create notification_wait for the TIME_EVENT_NOTIFICATION to use + * right after we send the time event + */ + iwl_init_notification_wait(&mvm->notif_wait, &wait_te_notif, + te_notif_response, + ARRAY_SIZE(te_notif_response), + iwl_mvm_te_notif, te_data); + + /* If TE was sent OK - wait for the notification that started */ + if (iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd)) { + IWL_ERR(mvm, "Failed to add TE to protect session\n"); + iwl_remove_notification(&mvm->notif_wait, &wait_te_notif); + } else if (iwl_wait_notification(&mvm->notif_wait, &wait_te_notif, + TU_TO_JIFFIES(max_delay))) { + IWL_ERR(mvm, "Failed to protect session until TE\n"); + } } /* @@ -643,9 +700,9 @@ void iwl_mvm_stop_p2p_roc(struct iwl_mvm *mvm) iwl_mvm_roc_finished(mvm); } -int iwl_mvm_schedule_csa_noa(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - u32 duration, u32 apply_time) +int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 duration, u32 apply_time) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; @@ -654,14 +711,14 @@ int iwl_mvm_schedule_csa_noa(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); if (te_data->running) { - IWL_DEBUG_TE(mvm, "CS NOA is already scheduled\n"); + IWL_DEBUG_TE(mvm, "CS period is already scheduled\n"); return -EBUSY; } time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD); time_cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); - time_cmd.id = cpu_to_le32(TE_P2P_GO_CSA_NOA); + time_cmd.id = cpu_to_le32(TE_CHANNEL_SWITCH_PERIOD); time_cmd.apply_time = cpu_to_le32(apply_time); time_cmd.max_frags = TE_V2_FRAG_NONE; time_cmd.duration = cpu_to_le32(duration); diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.h b/drivers/net/wireless/iwlwifi/mvm/time-event.h index 2f48a90d4ad3e61b010609e2c1d13357c92b3427..b350e47e19dab0606737a8950d7f2604e1f90016 100644 --- a/drivers/net/wireless/iwlwifi/mvm/time-event.h +++ b/drivers/net/wireless/iwlwifi/mvm/time-event.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -124,10 +126,12 @@ * @min_duration: will start a new session if the current session will end * in less than min_duration. * @max_delay: maximum delay before starting the time event (in TU) + * @wait_for_notif: true if it is required that a time event notification be + * waited for (that the time event has been scheduled before returning) * * This function can be used to start a session protection which means that the * fw will stay on the channel for %duration_ms milliseconds. This function - * will block (sleep) until the session starts. This function can also be used + * can block (sleep) until the session starts. This function can also be used * to extend a currently running session. * This function is meant to be used for BSS association for example, where we * want to make sure that the fw stays on the channel during the association. @@ -135,7 +139,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 duration, u32 min_duration, - u32 max_delay); + u32 max_delay, bool wait_for_notif); /** * iwl_mvm_stop_session_protection - cancel the session protection. @@ -215,7 +219,7 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm, void iwl_mvm_roc_done_wk(struct work_struct *wk); /** - * iwl_mvm_schedule_csa_noa - request NoA for channel switch + * iwl_mvm_schedule_csa_period - request channel switch absence period * @mvm: the mvm component * @vif: the virtual interface for which the channel switch is issued * @duration: the duration of the NoA in TU. @@ -224,9 +228,9 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk); * This function is used to schedule NoA time event and is used to perform * the channel switch flow. */ -int iwl_mvm_schedule_csa_noa(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - u32 duration, u32 apply_time); +int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 duration, u32 apply_time); /** * iwl_mvm_te_scheduled - check if the fw received the TE cmd diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/iwlwifi/mvm/tt.c index 0464599c111e07522fa5efb36fa3828639c17b0c..acca44a450864cc446d3a0a653c20e85314465fd 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/iwlwifi/mvm/tt.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -67,263 +69,99 @@ #include "iwl-csr.h" #include "iwl-prph.h" -#define OTP_DTS_DIODE_DEVIATION 96 /*in words*/ -/* VBG - Voltage Band Gap error data (temperature offset) */ -#define OTP_WP_DTS_VBG (OTP_DTS_DIODE_DEVIATION + 2) -#define MEAS_VBG_MIN_VAL 2300 -#define MEAS_VBG_MAX_VAL 3000 -#define MEAS_VBG_DEFAULT_VAL 2700 -#define DTS_DIODE_VALID(flags) (flags & DTS_DIODE_REG_FLAGS_PASS_ONCE) -#define MIN_TEMPERATURE 0 -#define MAX_TEMPERATURE 125 -#define TEMPERATURE_ERROR (MAX_TEMPERATURE + 1) -#define PTAT_DIGITAL_VALUE_MIN_VALUE 0 -#define PTAT_DIGITAL_VALUE_MAX_VALUE 0xFF -#define DTS_VREFS_NUM 5 -static inline u32 DTS_DIODE_GET_VREFS_ID(u32 flags) -{ - return (flags & DTS_DIODE_REG_FLAGS_VREFS_ID) >> - DTS_DIODE_REG_FLAGS_VREFS_ID_POS; -} +#define IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT HZ -#define CALC_VREFS_MIN_DIFF 43 -#define CALC_VREFS_MAX_DIFF 51 -#define CALC_LUT_SIZE (1 + CALC_VREFS_MAX_DIFF - CALC_VREFS_MIN_DIFF) -#define CALC_LUT_INDEX_OFFSET CALC_VREFS_MIN_DIFF -#define CALC_TEMPERATURE_RESULT_SHIFT_OFFSET 23 - -/* - * @digital_value: The diode's digital-value sampled (temperature/voltage) - * @vref_low: The lower voltage-reference (the vref just below the diode's - * sampled digital-value) - * @vref_high: The higher voltage-reference (the vref just above the diode's - * sampled digital-value) - * @flags: bits[1:0]: The ID of the Vrefs pair (lowVref,highVref) - * bits[6:2]: Reserved. - * bits[7:7]: Indicates completion of at least 1 successful sample - * since last DTS reset. - */ -struct iwl_mvm_dts_diode_bits { - u8 digital_value; - u8 vref_low; - u8 vref_high; - u8 flags; -} __packed; - -union dts_diode_results { - u32 reg_value; - struct iwl_mvm_dts_diode_bits bits; -} __packed; - -static s16 iwl_mvm_dts_get_volt_band_gap(struct iwl_mvm *mvm) +static void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm) { - struct iwl_nvm_section calib_sec; - const __le16 *calib; - u16 vbg; - - /* TODO: move parsing to NVM code */ - calib_sec = mvm->nvm_sections[NVM_SECTION_TYPE_CALIBRATION]; - calib = (__le16 *)calib_sec.data; + u32 duration = mvm->thermal_throttle.params->ct_kill_duration; - vbg = le16_to_cpu(calib[OTP_WP_DTS_VBG]); + if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) + return; - if (vbg < MEAS_VBG_MIN_VAL || vbg > MEAS_VBG_MAX_VAL) - vbg = MEAS_VBG_DEFAULT_VAL; + IWL_ERR(mvm, "Enter CT Kill\n"); + iwl_mvm_set_hw_ctkill_state(mvm, true); - return vbg; + /* Don't schedule an exit work if we're in test mode, since + * the temperature will not change unless we manually set it + * again (or disable testing). + */ + if (!mvm->temperature_test) + schedule_delayed_work(&mvm->thermal_throttle.ct_kill_exit, + round_jiffies_relative(duration * HZ)); } -static u16 iwl_mvm_dts_get_ptat_deviation_offset(struct iwl_mvm *mvm) +static void iwl_mvm_exit_ctkill(struct iwl_mvm *mvm) { - const u8 *calib; - u8 ptat, pa1, pa2, median; - - /* TODO: move parsing to NVM code */ - calib = mvm->nvm_sections[NVM_SECTION_TYPE_CALIBRATION].data; - ptat = calib[OTP_DTS_DIODE_DEVIATION * 2]; - pa1 = calib[OTP_DTS_DIODE_DEVIATION * 2 + 1]; - pa2 = calib[OTP_DTS_DIODE_DEVIATION * 2 + 2]; - - /* get the median: */ - if (ptat > pa1) { - if (ptat > pa2) - median = (pa1 > pa2) ? pa1 : pa2; - else - median = ptat; - } else { - if (pa1 > pa2) - median = (ptat > pa2) ? ptat : pa2; - else - median = pa1; - } + if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) + return; - return ptat - median; + IWL_ERR(mvm, "Exit CT Kill\n"); + iwl_mvm_set_hw_ctkill_state(mvm, false); } -static u8 iwl_mvm_dts_calibrate_ptat_deviation(struct iwl_mvm *mvm, u8 value) +static bool iwl_mvm_temp_notif(struct iwl_notif_wait_data *notif_wait, + struct iwl_rx_packet *pkt, void *data) { - /* Calibrate the PTAT digital value, based on PTAT deviation data: */ - s16 new_val = value - iwl_mvm_dts_get_ptat_deviation_offset(mvm); + struct iwl_mvm *mvm = + container_of(notif_wait, struct iwl_mvm, notif_wait); + int *temp = data; + struct iwl_dts_measurement_notif *notif; + int len = iwl_rx_packet_payload_len(pkt); + + if (WARN_ON_ONCE(len != sizeof(*notif))) { + IWL_ERR(mvm, "Invalid DTS_MEASUREMENT_NOTIFICATION\n"); + return true; + } - if (new_val > PTAT_DIGITAL_VALUE_MAX_VALUE) - new_val = PTAT_DIGITAL_VALUE_MAX_VALUE; - else if (new_val < PTAT_DIGITAL_VALUE_MIN_VALUE) - new_val = PTAT_DIGITAL_VALUE_MIN_VALUE; + notif = (void *)pkt->data; - return new_val; -} + *temp = le32_to_cpu(notif->temp); -static bool dts_get_adjacent_vrefs(struct iwl_mvm *mvm, - union dts_diode_results *avg_ptat) -{ - u8 vrefs_results[DTS_VREFS_NUM]; - u8 low_vref_index = 0, flags; - u32 reg; - - reg = iwl_read_prph(mvm->trans, DTSC_VREF_AVG); - memcpy(vrefs_results, ®, sizeof(reg)); - reg = iwl_read_prph(mvm->trans, DTSC_VREF5_AVG); - vrefs_results[4] = reg & 0xff; - - if (avg_ptat->bits.digital_value < vrefs_results[0] || - avg_ptat->bits.digital_value > vrefs_results[4]) - return false; - - if (avg_ptat->bits.digital_value > vrefs_results[3]) - low_vref_index = 3; - else if (avg_ptat->bits.digital_value > vrefs_results[2]) - low_vref_index = 2; - else if (avg_ptat->bits.digital_value > vrefs_results[1]) - low_vref_index = 1; - - avg_ptat->bits.vref_low = vrefs_results[low_vref_index]; - avg_ptat->bits.vref_high = vrefs_results[low_vref_index + 1]; - flags = avg_ptat->bits.flags; - avg_ptat->bits.flags = - (flags & ~DTS_DIODE_REG_FLAGS_VREFS_ID) | - (low_vref_index & DTS_DIODE_REG_FLAGS_VREFS_ID); - return true; -} + /* shouldn't be negative, but since it's s32, make sure it isn't */ + if (WARN_ON_ONCE(*temp < 0)) + *temp = 0; -/* - * return true it the results are valid, and false otherwise. - */ -static bool dts_read_ptat_avg_results(struct iwl_mvm *mvm, - union dts_diode_results *avg_ptat) -{ - u32 reg; - u8 tmp; - - /* fill the diode value and pass_once with avg-reg results */ - reg = iwl_read_prph(mvm->trans, DTSC_PTAT_AVG); - reg &= DTS_DIODE_REG_DIG_VAL | DTS_DIODE_REG_PASS_ONCE; - avg_ptat->reg_value = reg; - - /* calibrate the PTAT digital value */ - tmp = avg_ptat->bits.digital_value; - tmp = iwl_mvm_dts_calibrate_ptat_deviation(mvm, tmp); - avg_ptat->bits.digital_value = tmp; - - /* - * fill vrefs fields, based on the avgVrefs results - * and the diode value - */ - return dts_get_adjacent_vrefs(mvm, avg_ptat) && - DTS_DIODE_VALID(avg_ptat->bits.flags); + IWL_DEBUG_TEMP(mvm, "DTS_MEASUREMENT_NOTIFICATION - %d\n", *temp); + return true; } -static s32 calculate_nic_temperature(union dts_diode_results avg_ptat, - u16 volt_band_gap) +static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm) { - u32 tmp_result; - u8 vrefs_diff; - /* - * For temperature calculation (at the end, shift right by 23) - * LUT[(D2-D1)] = ROUND{ 2^23 / ((D2-D1)*9*10) } - * (D2-D1) == 43 44 45 46 47 48 49 50 51 - */ - static const u16 calc_lut[CALC_LUT_SIZE] = { - 2168, 2118, 2071, 2026, 1983, 1942, 1902, 1864, 1828, + struct iwl_dts_measurement_cmd cmd = { + .flags = cpu_to_le32(DTS_TRIGGER_CMD_FLAGS_TEMP), }; - /* - * The diff between the high and low voltage-references is assumed - * to be strictly be in range of [60,68] - */ - vrefs_diff = avg_ptat.bits.vref_high - avg_ptat.bits.vref_low; - - if (vrefs_diff < CALC_VREFS_MIN_DIFF || - vrefs_diff > CALC_VREFS_MAX_DIFF) - return TEMPERATURE_ERROR; - - /* calculate the result: */ - tmp_result = - vrefs_diff * (DTS_DIODE_GET_VREFS_ID(avg_ptat.bits.flags) + 9); - tmp_result += avg_ptat.bits.digital_value; - tmp_result -= avg_ptat.bits.vref_high; - - /* multiply by the LUT value (based on the diff) */ - tmp_result *= calc_lut[vrefs_diff - CALC_LUT_INDEX_OFFSET]; - - /* - * Get the BandGap (the voltage refereces source) error data - * (temperature offset) - */ - tmp_result *= volt_band_gap; - - /* - * here, tmp_result value can be up to 32-bits. We want to right-shift - * it *without* sign-extend. - */ - tmp_result = tmp_result >> CALC_TEMPERATURE_RESULT_SHIFT_OFFSET; - - /* - * at this point, tmp_result should be in the range: - * 200 <= tmp_result <= 365 - */ - return (s16)tmp_result - 240; + return iwl_mvm_send_cmd_pdu(mvm, CMD_DTS_MEASUREMENT_TRIGGER, 0, + sizeof(cmd), &cmd); } -static s32 check_nic_temperature(struct iwl_mvm *mvm) +int iwl_mvm_get_temp(struct iwl_mvm *mvm) { - u16 volt_band_gap; - union dts_diode_results avg_ptat; - - volt_band_gap = iwl_mvm_dts_get_volt_band_gap(mvm); - - /* disable DTS */ - iwl_write_prph(mvm->trans, SHR_MISC_WFM_DTS_EN, 0); + struct iwl_notification_wait wait_temp_notif; + static const u8 temp_notif[] = { DTS_MEASUREMENT_NOTIFICATION }; + int ret, temp; - /* SV initialization */ - iwl_write_prph(mvm->trans, SHR_MISC_WFM_DTS_EN, 1); - iwl_write_prph(mvm->trans, DTSC_CFG_MODE, - DTSC_CFG_MODE_PERIODIC); - - /* wait for results */ - msleep(100); - if (!dts_read_ptat_avg_results(mvm, &avg_ptat)) - return TEMPERATURE_ERROR; - - /* disable DTS */ - iwl_write_prph(mvm->trans, SHR_MISC_WFM_DTS_EN, 0); + lockdep_assert_held(&mvm->mutex); - return calculate_nic_temperature(avg_ptat, volt_band_gap); -} + iwl_init_notification_wait(&mvm->notif_wait, &wait_temp_notif, + temp_notif, ARRAY_SIZE(temp_notif), + iwl_mvm_temp_notif, &temp); -static void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm) -{ - u32 duration = mvm->thermal_throttle.params->ct_kill_duration; + ret = iwl_mvm_get_temp_cmd(mvm); + if (ret) { + IWL_ERR(mvm, "Failed to get the temperature (err=%d)\n", ret); + iwl_remove_notification(&mvm->notif_wait, &wait_temp_notif); + return ret; + } - IWL_ERR(mvm, "Enter CT Kill\n"); - iwl_mvm_set_hw_ctkill_state(mvm, true); - schedule_delayed_work(&mvm->thermal_throttle.ct_kill_exit, - round_jiffies_relative(duration * HZ)); -} + ret = iwl_wait_notification(&mvm->notif_wait, &wait_temp_notif, + IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT); + if (ret) { + IWL_ERR(mvm, "Getting the temperature timed out\n"); + return ret; + } -static void iwl_mvm_exit_ctkill(struct iwl_mvm *mvm) -{ - IWL_ERR(mvm, "Exit CT Kill\n"); - iwl_mvm_set_hw_ctkill_state(mvm, false); + return temp; } static void check_exit_ctkill(struct work_struct *work) @@ -338,28 +176,36 @@ static void check_exit_ctkill(struct work_struct *work) duration = tt->params->ct_kill_duration; + mutex_lock(&mvm->mutex); + + if (__iwl_mvm_mac_start(mvm)) + goto reschedule; + /* make sure the device is available for direct read/writes */ - if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_CHECK_CTKILL)) + if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_CHECK_CTKILL)) { + __iwl_mvm_mac_stop(mvm); goto reschedule; + } - iwl_trans_start_hw(mvm->trans); - temp = check_nic_temperature(mvm); - iwl_trans_stop_device(mvm->trans); + temp = iwl_mvm_get_temp(mvm); iwl_mvm_unref(mvm, IWL_MVM_REF_CHECK_CTKILL); - if (temp < MIN_TEMPERATURE || temp > MAX_TEMPERATURE) { - IWL_DEBUG_TEMP(mvm, "Failed to measure NIC temperature\n"); + __iwl_mvm_mac_stop(mvm); + + if (temp < 0) goto reschedule; - } + IWL_DEBUG_TEMP(mvm, "NIC temperature: %d\n", temp); if (temp <= tt->params->ct_kill_exit) { + mutex_unlock(&mvm->mutex); iwl_mvm_exit_ctkill(mvm); return; } reschedule: + mutex_unlock(&mvm->mutex); schedule_delayed_work(&mvm->thermal_throttle.ct_kill_exit, round_jiffies(duration * HZ)); } @@ -444,6 +290,12 @@ void iwl_mvm_tt_handler(struct iwl_mvm *mvm) return; } + if (params->support_ct_kill && + temperature <= tt->params->ct_kill_exit) { + iwl_mvm_exit_ctkill(mvm); + return; + } + if (params->support_dynamic_smps) { if (!tt->dynamic_smps && temperature >= params->dynamic_smps_entry) { diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index 9ee410bf6da243ced2b65bf61457eecbe3634916..1cb793a498ac88a221cccb9e5efa31267e4cb028 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -131,6 +133,11 @@ static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, !is_multicast_ether_addr(ieee80211_get_DA(hdr))) tx_flags |= TX_CMD_FLG_PROT_REQUIRE; + if ((mvm->fw->ucode_capa.capa[0] & + IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) && + ieee80211_action_contains_tpc(skb)) + tx_flags |= TX_CMD_FLG_WRITE_TX_POWER; + tx_cmd->tx_flags = cpu_to_le32(tx_flags); /* Total # bytes to be transmitted */ tx_cmd->len = cpu_to_le16((u16)skb->len); @@ -211,7 +218,7 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, if (info->band == IEEE80211_BAND_2GHZ && !iwl_mvm_bt_coex_is_shared_ant_avail(mvm)) - rate_flags = BIT(ANT_A) << RATE_MCS_ANT_POS; + rate_flags = BIT(mvm->cfg->non_shared_ant) << RATE_MCS_ANT_POS; else rate_flags = BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS; @@ -486,11 +493,11 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm, IWL_DEBUG_TX_QUEUES(mvm, "Can continue DELBA flow ssn = next_recl = %d\n", tid_data->next_reclaimed); - iwl_trans_txq_disable(mvm->trans, tid_data->txq_id); + iwl_mvm_disable_txq(mvm, tid_data->txq_id); tid_data->state = IWL_AGG_OFF; /* * we can't hold the mutex - but since we are after a sequence - * point (call to iwl_trans_txq_disable), so we don't even need + * point (call to iwl_mvm_disable_txq(), so we don't even need * a memory barrier. */ mvm->queue_to_mac80211[tid_data->txq_id] = @@ -866,6 +873,19 @@ int iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, return 0; } +static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info, + struct iwl_mvm_ba_notif *ba_notif, + struct iwl_mvm_tid_data *tid_data) +{ + info->flags |= IEEE80211_TX_STAT_AMPDU; + info->status.ampdu_ack_len = ba_notif->txed_2_done; + info->status.ampdu_len = ba_notif->txed; + iwl_mvm_hwrate_to_tx_status(tid_data->rate_n_flags, + info); + info->status.status_driver_data[0] = + (void *)(uintptr_t)tid_data->reduced_tpc; +} + int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, struct iwl_device_cmd *cmd) { @@ -952,21 +972,37 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, */ info->flags |= IEEE80211_TX_STAT_ACK; - if (freed == 1) { - /* this is the first skb we deliver in this batch */ - /* put the rate scaling data there */ - info->flags |= IEEE80211_TX_STAT_AMPDU; - info->status.ampdu_ack_len = ba_notif->txed_2_done; - info->status.ampdu_len = ba_notif->txed; - iwl_mvm_hwrate_to_tx_status(tid_data->rate_n_flags, - info); - info->status.status_driver_data[0] = - (void *)(uintptr_t)tid_data->reduced_tpc; - } + /* this is the first skb we deliver in this batch */ + /* put the rate scaling data there */ + if (freed == 1) + iwl_mvm_tx_info_from_ba_notif(info, ba_notif, tid_data); } spin_unlock_bh(&mvmsta->lock); + /* We got a BA notif with 0 acked or scd_ssn didn't progress which is + * possible (i.e. first MPDU in the aggregation wasn't acked) + * Still it's important to update RS about sent vs. acked. + */ + if (skb_queue_empty(&reclaimed_skbs)) { + struct ieee80211_tx_info ba_info = {}; + struct ieee80211_chanctx_conf *chanctx_conf = NULL; + + if (mvmsta->vif) + chanctx_conf = + rcu_dereference(mvmsta->vif->chanctx_conf); + + if (WARN_ON_ONCE(!chanctx_conf)) + goto out; + + ba_info.band = chanctx_conf->def.chan->band; + iwl_mvm_tx_info_from_ba_notif(&ba_info, ba_notif, tid_data); + + IWL_DEBUG_TX_REPLY(mvm, "No reclaim. Update rs directly\n"); + iwl_mvm_rs_tx_status(mvm, sta, tid, &ba_info); + } + +out: rcu_read_unlock(); while (!skb_queue_empty(&reclaimed_skbs)) { diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c index ac249da8a22b0840ce8de74638ba1969a119c67c..8021f6eec27f7d8a918b41404383d85cc317b7ca 100644 --- a/drivers/net/wireless/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/iwlwifi/mvm/utils.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -387,15 +389,19 @@ struct iwl_error_event_table { struct iwl_umac_error_event_table { u32 valid; /* (nonzero) valid, (0) log is empty */ u32 error_id; /* type of error */ - u32 pc; /* program counter */ u32 blink1; /* branch link */ u32 blink2; /* branch link */ u32 ilink1; /* interrupt link */ u32 ilink2; /* interrupt link */ u32 data1; /* error-specific data */ u32 data2; /* error-specific data */ - u32 line; /* source code line of error */ - u32 umac_ver; /* umac version */ + u32 data3; /* error-specific data */ + u32 umac_fw_ver; /* UMAC version */ + u32 umac_fw_api_ver; /* UMAC FW API ver */ + u32 frame_pointer; /* core register 27*/ + u32 stack_pointer; /* core register 28 */ + u32 cmd_header; /* latest host cmd sent to UMAC */ + u32 nic_isr_pref; /* ISR status register */ } __packed; #define ERROR_START_OFFSET (1 * sizeof(u32)) @@ -409,7 +415,7 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm) base = mvm->umac_error_event_table; - if (base < 0x800000 || base >= 0x80C000) { + if (base < 0x800000) { IWL_ERR(mvm, "Not valid error log pointer 0x%08X for %s uCode\n", base, @@ -428,14 +434,19 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm) IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id, desc_lookup(table.error_id)); - IWL_ERR(mvm, "0x%08X | umac uPc\n", table.pc); IWL_ERR(mvm, "0x%08X | umac branchlink1\n", table.blink1); IWL_ERR(mvm, "0x%08X | umac branchlink2\n", table.blink2); IWL_ERR(mvm, "0x%08X | umac interruptlink1\n", table.ilink1); IWL_ERR(mvm, "0x%08X | umac interruptlink2\n", table.ilink2); IWL_ERR(mvm, "0x%08X | umac data1\n", table.data1); IWL_ERR(mvm, "0x%08X | umac data2\n", table.data2); - IWL_ERR(mvm, "0x%08X | umac version\n", table.umac_ver); + IWL_ERR(mvm, "0x%08X | umac data3\n", table.data3); + IWL_ERR(mvm, "0x%08X | umac version\n", table.umac_fw_ver); + IWL_ERR(mvm, "0x%08X | umac api version\n", table.umac_fw_api_ver); + IWL_ERR(mvm, "0x%08X | frame pointer\n", table.frame_pointer); + IWL_ERR(mvm, "0x%08X | stack pointer\n", table.stack_pointer); + IWL_ERR(mvm, "0x%08X | last host cmd\n", table.cmd_header); + IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref); } void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) @@ -519,6 +530,52 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) iwl_mvm_dump_umac_error_log(mvm); } +void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, u16 ssn, + const struct iwl_trans_txq_scd_cfg *cfg) +{ + if (iwl_mvm_is_dqa_supported(mvm)) { + struct iwl_scd_txq_cfg_cmd cmd = { + .scd_queue = queue, + .enable = 1, + .window = cfg->frame_limit, + .sta_id = cfg->sta_id, + .ssn = cpu_to_le16(ssn), + .tx_fifo = cfg->fifo, + .aggregate = cfg->aggregate, + .flags = IWL_SCD_FLAGS_DQA_ENABLED, + .tid = cfg->tid, + .control = IWL_SCD_CONTROL_SET_SSN, + }; + int ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, + sizeof(cmd), &cmd); + if (ret) + IWL_ERR(mvm, + "Failed to configure queue %d on FIFO %d\n", + queue, cfg->fifo); + } + + iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, + iwl_mvm_is_dqa_supported(mvm) ? NULL : cfg); +} + +void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue) +{ + iwl_trans_txq_disable(mvm->trans, queue, + !iwl_mvm_is_dqa_supported(mvm)); + + if (iwl_mvm_is_dqa_supported(mvm)) { + struct iwl_scd_txq_cfg_cmd cmd = { + .scd_queue = queue, + .enable = 0, + }; + int ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, CMD_ASYNC, + sizeof(cmd), &cmd); + if (ret) + IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n", + queue, ret); + } +} + /** * iwl_mvm_send_lq_cmd() - Send link quality command * @init: This command is sent as part of station initialization right diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index 073a68b97a7224233c9c4a3a18af4f0f4f3afecd..6ced8549eb3a26d1e048f8d63cc8da26bad2caa1 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -273,6 +275,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4072, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4C60, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4C70, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x406A, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7260_2n_cfg)}, @@ -316,6 +320,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x08B1, 0xC770, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC760, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xCC70, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xCC60, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0xC272, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0xC26A, iwl7260_n_cfg)}, @@ -403,6 +409,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = { /* 8000 Series */ {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)}, #endif /* CONFIG_IWLMVM */ diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h index 78f72c34438aaabd7be9f94386a4e093689a2906..1aea6b66c594f20be0457c04a524e9db00fe8a7b 100644 --- a/drivers/net/wireless/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/iwlwifi/pcie/internal.h @@ -1,6 +1,7 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -256,6 +257,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx) * @cmd_queue - command queue number * @rx_buf_size_8k: 8 kB RX buffer size * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes) + * @scd_set_active: should the transport configure the SCD for HCMD queue * @rx_page_order: page order for receive buffer size * @wd_timeout: queue watchdog timeout (jiffies) * @reg_lock: protect hw register access @@ -305,6 +307,7 @@ struct iwl_trans_pcie { bool rx_buf_size_8k; bool bc_table_dword; + bool scd_set_active; u32 rx_page_order; const char *const *command_names; @@ -364,9 +367,10 @@ int iwl_pcie_tx_init(struct iwl_trans *trans); void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr); int iwl_pcie_tx_stop(struct iwl_trans *trans); void iwl_pcie_tx_free(struct iwl_trans *trans); -void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, - int sta_id, int tid, int frame_limit, u16 ssn); -void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue); +void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn, + const struct iwl_trans_txq_scd_cfg *cfg); +void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue, + bool configure_scd); int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_cmd *dev_cmd, int txq_id); void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans); diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c index a2698e5e062c990524d12e1d112818977aeccc2b..7b7e2f223fb230fad922e7fcaec08ec8ae7eb5ac 100644 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c @@ -1,6 +1,7 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -639,7 +640,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd); if (reclaim) { - kfree(txq->entries[cmd_index].free_buf); + kzfree(txq->entries[cmd_index].free_buf); txq->entries[cmd_index].free_buf = NULL; } diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 06e04aaf61eea9609d619be4e0afdf407dd307d4..1393bac0025c59de0b24b1453eb7932bebda1b22 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -1169,6 +1171,7 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans, trans_pcie->command_names = trans_cfg->command_names; trans_pcie->bc_table_dword = trans_cfg->bc_table_dword; + trans_pcie->scd_set_active = trans_cfg->scd_set_active; /* Initialize NAPI here - it should be before registering to mac80211 * in the opmode but after the HW struct is allocated. @@ -2187,7 +2190,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, */ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) trans->hw_rev = (trans->hw_rev & 0xfff0) | - ((trans->hw_rev << 2) & 0xc); + (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2); trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; snprintf(trans->hw_id_str, sizeof(trans->hw_id_str), diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index 6acccb19c4f3030956e0d701a037c1227d7121b1..eb8e2984c5e90f6d8f498e69b1cee4420dffb85d 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -1,6 +1,7 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -34,6 +35,7 @@ #include "iwl-csr.h" #include "iwl-prph.h" #include "iwl-io.h" +#include "iwl-scd.h" #include "iwl-op-mode.h" #include "internal.h" /* FIXME: need to abstract out TX command (once we know what it looks like) */ @@ -618,8 +620,8 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) /* De-alloc array of command/tx buffers */ if (txq_id == trans_pcie->cmd_queue) for (i = 0; i < txq->q.n_window; i++) { - kfree(txq->entries[i].cmd); - kfree(txq->entries[i].free_buf); + kzfree(txq->entries[i].cmd); + kzfree(txq->entries[i].free_buf); } /* De-alloc circular buffer of TFDs */ @@ -644,17 +646,6 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) memset(txq, 0, sizeof(*txq)); } -/* - * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask - */ -static void iwl_pcie_txq_set_sched(struct iwl_trans *trans, u32 mask) -{ - struct iwl_trans_pcie __maybe_unused *trans_pcie = - IWL_TRANS_GET_PCIE_TRANS(trans); - - iwl_write_prph(trans, SCD_TXFACT, mask); -} - void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -692,7 +683,7 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) trans_pcie->cmd_fifo); /* Activate all Tx DMA/FIFO channels */ - iwl_pcie_txq_set_sched(trans, IWL_MASK(0, 7)); + iwl_scd_activate_fifos(trans); /* Enable DMA channel */ for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++) @@ -745,7 +736,7 @@ int iwl_pcie_tx_stop(struct iwl_trans *trans) /* Turn off all Tx DMA fifos */ spin_lock(&trans_pcie->irq_lock); - iwl_pcie_txq_set_sched(trans, 0); + iwl_scd_deactivate_fifos(trans); /* Stop each Tx DMA channel, and wait for it to be idle */ for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { @@ -886,7 +877,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans) spin_lock(&trans_pcie->irq_lock); /* Turn off all Tx DMA fifos */ - iwl_write_prph(trans, SCD_TXFACT, 0); + iwl_scd_deactivate_fifos(trans); /* Tell NIC where to find the "keep warm" buffer */ iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, @@ -1072,55 +1063,53 @@ static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, return 0; } -static inline void iwl_pcie_txq_set_inactive(struct iwl_trans *trans, - u16 txq_id) -{ - /* Simply stop the queue, but don't change any configuration; - * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ - iwl_write_prph(trans, - SCD_QUEUE_STATUS_BITS(txq_id), - (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)| - (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); -} - /* Receiver address (actually, Rx station's index into station table), * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */ #define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) -void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, - int sta_id, int tid, int frame_limit, u16 ssn) +void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, + const struct iwl_trans_txq_scd_cfg *cfg) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int fifo = -1; if (test_and_set_bit(txq_id, trans_pcie->queue_used)) WARN_ONCE(1, "queue %d already used - expect issues", txq_id); - /* Stop this Tx queue before configuring it */ - iwl_pcie_txq_set_inactive(trans, txq_id); + if (cfg) { + fifo = cfg->fifo; - /* Set this queue as a chain-building queue unless it is CMD queue */ - if (txq_id != trans_pcie->cmd_queue) - iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id)); + /* Disable the scheduler prior configuring the cmd queue */ + if (txq_id == trans_pcie->cmd_queue && + trans_pcie->scd_set_active) + iwl_scd_enable_set_active(trans, 0); - /* If this queue is mapped to a certain station: it is an AGG queue */ - if (sta_id >= 0) { - u16 ra_tid = BUILD_RAxTID(sta_id, tid); + /* Stop this Tx queue before configuring it */ + iwl_scd_txq_set_inactive(trans, txq_id); - /* Map receiver-address / traffic-ID to this queue */ - iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id); + /* Set this queue as a chain-building queue unless it is CMD */ + if (txq_id != trans_pcie->cmd_queue) + iwl_scd_txq_set_chain(trans, txq_id); - /* enable aggregations for the queue */ - iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); - trans_pcie->txq[txq_id].ampdu = true; - } else { - /* - * disable aggregations for the queue, this will also make the - * ra_tid mapping configuration irrelevant since it is now a - * non-AGG queue. - */ - iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); + if (cfg->aggregate) { + u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid); + + /* Map receiver-address / traffic-ID to this queue */ + iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id); + + /* enable aggregations for the queue */ + iwl_scd_txq_enable_agg(trans, txq_id); + trans_pcie->txq[txq_id].ampdu = true; + } else { + /* + * disable aggregations for the queue, this will also + * make the ra_tid mapping configuration irrelevant + * since it is now a non-AGG queue. + */ + iwl_scd_txq_disable_agg(trans, txq_id); - ssn = trans_pcie->txq[txq_id].q.read_ptr; + ssn = trans_pcie->txq[txq_id].q.read_ptr; + } } /* Place first TFD at index corresponding to start sequence number. @@ -1128,32 +1117,44 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff); trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff); - iwl_write_direct32(trans, HBUS_TARG_WRPTR, - (ssn & 0xff) | (txq_id << 8)); - iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn); + if (cfg) { + u8 frame_limit = cfg->frame_limit; + + iwl_write_direct32(trans, HBUS_TARG_WRPTR, + (ssn & 0xff) | (txq_id << 8)); + iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn); - /* Set up Tx window size and frame limit for this queue */ - iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr + - SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0); - iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr + + /* Set up Tx window size and frame limit for this queue */ + iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr + + SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0); + iwl_trans_write_mem32(trans, + trans_pcie->scd_base_addr + SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), ((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & - SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | + SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | ((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & - SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); - - /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ - iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), - (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) | - (fifo << SCD_QUEUE_STTS_REG_POS_TXF) | - (1 << SCD_QUEUE_STTS_REG_POS_WSL) | - SCD_QUEUE_STTS_REG_MSK); + SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); + + /* Set up status area in SRAM, map to Tx DMA/FIFO, activate */ + iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), + (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) | + (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) | + (1 << SCD_QUEUE_STTS_REG_POS_WSL) | + SCD_QUEUE_STTS_REG_MSK); + + /* enable the scheduler for this queue (only) */ + if (txq_id == trans_pcie->cmd_queue && + trans_pcie->scd_set_active) + iwl_scd_enable_set_active(trans, BIT(txq_id)); + } + trans_pcie->txq[txq_id].active = true; IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n", txq_id, fifo, ssn & 0xff); } -void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id) +void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, + bool configure_scd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 stts_addr = trans_pcie->scd_base_addr + @@ -1172,10 +1173,12 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id) return; } - iwl_pcie_txq_set_inactive(trans, txq_id); + if (configure_scd) { + iwl_scd_txq_set_inactive(trans, txq_id); - iwl_trans_write_mem(trans, stts_addr, (void *)zero_val, - ARRAY_SIZE(zero_val)); + iwl_trans_write_mem(trans, stts_addr, (void *)zero_val, + ARRAY_SIZE(zero_val)); + } iwl_pcie_txq_unmap(trans, txq_id); trans_pcie->txq[txq_id].ampdu = false; @@ -1406,7 +1409,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, out_meta->flags = cmd->flags; if (WARN_ON_ONCE(txq->entries[idx].free_buf)) - kfree(txq->entries[idx].free_buf); + kzfree(txq->entries[idx].free_buf); txq->entries[idx].free_buf = dup_buf; trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr); diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c index 40ab7f0b7be00a1a96b78884e534afa9fa55b3cb..818b1edaaa9aa5d38f3176828ca2ed3802fb3fde 100644 --- a/drivers/net/wireless/libertas/cfg.c +++ b/drivers/net/wireless/libertas/cfg.c @@ -653,6 +653,7 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy, if (channel && !(channel->flags & IEEE80211_CHAN_DISABLED)) { bss = cfg80211_inform_bss(wiphy, channel, + CFG80211_BSS_FTYPE_UNKNOWN, bssid, get_unaligned_le64(tsfdesc), capa, intvl, ie, ielen, LBS_SCAN_RSSI_TO_MBM(rssi), @@ -1754,6 +1755,7 @@ static void lbs_join_post(struct lbs_private *priv, bss = cfg80211_inform_bss(priv->wdev->wiphy, params->chandef.chan, + CFG80211_BSS_FTYPE_UNKNOWN, bssid, 0, capability, diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 1326f61218351b275060e524e32086f9d125fb32..babbdc1ce741c62024bc6220fdcf2a7bd3487790 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -2045,8 +2045,6 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2, hw->flags = IEEE80211_HW_MFP_CAPABLE | IEEE80211_HW_SIGNAL_DBM | - IEEE80211_HW_SUPPORTS_STATIC_SMPS | - IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | IEEE80211_HW_AMPDU_AGGREGATION | IEEE80211_HW_WANT_MONITOR_VIF | IEEE80211_HW_QUEUE_CONTROL | @@ -2059,8 +2057,10 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2, WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | WIPHY_FLAG_AP_UAPSD | WIPHY_FLAG_HAS_CHANNEL_SWITCH; - hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR; - hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE; + hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR | + NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | + NL80211_FEATURE_STATIC_SMPS | + NL80211_FEATURE_DYNAMIC_SMPS; /* ask mac80211 to reserve space for magic */ hw->vif_data_size = sizeof(struct hwsim_vif_priv); diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c index 06a2c215ef5e13129a53e72add714c05cbb77489..40057079ffb9537eae9549518be3e5df29f1a16b 100644 --- a/drivers/net/wireless/mwifiex/11n_rxreorder.c +++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c @@ -183,6 +183,15 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv, if (!tbl) return; + spin_lock_irqsave(&priv->adapter->rx_proc_lock, flags); + priv->adapter->rx_locked = true; + if (priv->adapter->rx_processing) { + spin_unlock_irqrestore(&priv->adapter->rx_proc_lock, flags); + flush_workqueue(priv->adapter->rx_workqueue); + } else { + spin_unlock_irqrestore(&priv->adapter->rx_proc_lock, flags); + } + start_win = (tbl->start_win + tbl->win_size) & (MAX_TID_VALUE - 1); mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, start_win); @@ -194,6 +203,11 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv, kfree(tbl->rx_reorder_ptr); kfree(tbl); + + spin_lock_irqsave(&priv->adapter->rx_proc_lock, flags); + priv->adapter->rx_locked = false; + spin_unlock_irqrestore(&priv->adapter->rx_proc_lock, flags); + } /* diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig index ecdf34505b54615eeb29f20b4f6a0f34805ad584..e70d0df9b0da4449c58efa03a8a047eeae9d173b 100644 --- a/drivers/net/wireless/mwifiex/Kconfig +++ b/drivers/net/wireless/mwifiex/Kconfig @@ -9,12 +9,12 @@ config MWIFIEX mwifiex. config MWIFIEX_SDIO - tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8897" + tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897" depends on MWIFIEX && MMC select FW_LOADER ---help--- This adds support for wireless adapters based on Marvell - 8786/8787/8797 chipsets with SDIO interface. + 8786/8787/8797/8887/8897 chipsets with SDIO interface. If you choose to build it as a module, it will be called mwifiex_sdio. diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index e2e6bf13c2d8c0858656f494c839da81d0731fa5..0dd672954ad10316bec599a0f2eeb56b70e66c75 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c @@ -246,7 +246,7 @@ mwifiex_cfg80211_remain_on_channel(struct wiphy *wiphy, } if (priv->roc_cfg.cookie) { - wiphy_dbg(wiphy, "info: ongoing ROC, cookie = 0x%llu\n", + wiphy_dbg(wiphy, "info: ongoing ROC, cookie = 0x%llx\n", priv->roc_cfg.cookie); return -EBUSY; } @@ -1557,6 +1557,7 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv) band)); bss = cfg80211_inform_bss(priv->wdev->wiphy, chan, + CFG80211_BSS_FTYPE_UNKNOWN, bss_info.bssid, 0, WLAN_CAPABILITY_IBSS, 0, ie_buf, ie_len, 0, GFP_KERNEL); cfg80211_put_bss(priv->wdev->wiphy, bss); @@ -1935,13 +1936,6 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, wiphy_dbg(wiphy, "info: received scan request on %s\n", dev->name); - if ((request->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) && - atomic_read(&priv->wmm.tx_pkts_queued) >= - MWIFIEX_MIN_TX_PENDING_TO_CANCEL_SCAN) { - dev_dbg(priv->adapter->dev, "scan rejected due to traffic\n"); - return -EBUSY; - } - /* Block scan request if scan operation or scan cleanup when interface * is disabled is in process */ @@ -1980,7 +1974,7 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, user_scan_cfg->chan_list[i].chan_number = chan->hw_value; user_scan_cfg->chan_list[i].radio_type = chan->band; - if (chan->flags & IEEE80211_CHAN_NO_IR) + if ((chan->flags & IEEE80211_CHAN_NO_IR) || !request->n_ssids) user_scan_cfg->chan_list[i].scan_type = MWIFIEX_SCAN_TYPE_PASSIVE; else @@ -1990,6 +1984,11 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, user_scan_cfg->chan_list[i].scan_time = 0; } + if (priv->adapter->scan_chan_gap_enabled && + mwifiex_is_any_intf_active(priv)) + user_scan_cfg->scan_chan_gap = + priv->adapter->scan_chan_gap_time; + ret = mwifiex_scan_networks(priv, user_scan_cfg); kfree(user_scan_cfg); if (ret) { @@ -2914,7 +2913,6 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) wiphy->features |= NL80211_FEATURE_HT_IBSS | NL80211_FEATURE_INACTIVITY_TIMER | - NL80211_FEATURE_LOW_PRIORITY_SCAN | NL80211_FEATURE_NEED_OBSS_SCAN; /* Reserve space for mwifiex specific private data for BSS */ diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c index baf0aab63c04d17bb1d01501e0c29f524b4a5f57..85597200badc0c4967f9a28bbe1188869891c58f 100644 --- a/drivers/net/wireless/mwifiex/cmdevt.c +++ b/drivers/net/wireless/mwifiex/cmdevt.c @@ -1470,7 +1470,7 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv, struct host_cmd_ds_get_hw_spec *hw_spec = &resp->params.hw_spec; struct mwifiex_adapter *adapter = priv->adapter; struct mwifiex_ie_types_header *tlv; - struct hw_spec_fw_api_rev *api_rev; + struct hw_spec_api_rev *api_rev; u16 resp_size, api_id; int i, left_len, parsed_len = 0; @@ -1538,23 +1538,30 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv, while (left_len > sizeof(struct mwifiex_ie_types_header)) { tlv = (void *)&hw_spec->tlvs + parsed_len; switch (le16_to_cpu(tlv->type)) { - case TLV_TYPE_FW_API_REV: - api_rev = (struct hw_spec_fw_api_rev *)tlv; + case TLV_TYPE_API_REV: + api_rev = (struct hw_spec_api_rev *)tlv; api_id = le16_to_cpu(api_rev->api_id); switch (api_id) { case KEY_API_VER_ID: - adapter->fw_key_api_major_ver = + adapter->key_api_major_ver = api_rev->major_ver; - adapter->fw_key_api_minor_ver = + adapter->key_api_minor_ver = api_rev->minor_ver; dev_dbg(adapter->dev, - "fw_key_api v%d.%d\n", - adapter->fw_key_api_major_ver, - adapter->fw_key_api_minor_ver); + "key_api v%d.%d\n", + adapter->key_api_major_ver, + adapter->key_api_minor_ver); + break; + case FW_API_VER_ID: + adapter->fw_api_ver = + api_rev->major_ver; + dev_dbg(adapter->dev, + "Firmware api version %d\n", + adapter->fw_api_ver); break; default: dev_warn(adapter->dev, - "Unknown FW api_id: %d\n", + "Unknown api_id: %d\n", api_id); break; } @@ -1567,7 +1574,8 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv, } parsed_len += le16_to_cpu(tlv->len) + sizeof(struct mwifiex_ie_types_header); - left_len -= parsed_len; + left_len -= le16_to_cpu(tlv->len) + + sizeof(struct mwifiex_ie_types_header); } } @@ -1605,5 +1613,8 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv, adapter->if_ops.update_mp_end_port(adapter, le16_to_cpu(hw_spec->mp_end_port)); + if (adapter->fw_api_ver == MWIFIEX_FW_V15) + adapter->scan_chan_gap_enabled = true; + return 0; } diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h index 0e03fe39fc35ea64fca0933a20b46966686a5208..e0d00a7f0ec314f9dd03e288af38cbd5c6aa1731 100644 --- a/drivers/net/wireless/mwifiex/decl.h +++ b/drivers/net/wireless/mwifiex/decl.h @@ -48,8 +48,8 @@ #define MWIFIEX_UAP_AMPDU_DEF_RXWINSIZE 16 #define MWIFIEX_11AC_STA_AMPDU_DEF_TXWINSIZE 64 #define MWIFIEX_11AC_STA_AMPDU_DEF_RXWINSIZE 64 -#define MWIFIEX_11AC_UAP_AMPDU_DEF_TXWINSIZE 48 -#define MWIFIEX_11AC_UAP_AMPDU_DEF_RXWINSIZE 32 +#define MWIFIEX_11AC_UAP_AMPDU_DEF_TXWINSIZE 64 +#define MWIFIEX_11AC_UAP_AMPDU_DEF_RXWINSIZE 64 #define MWIFIEX_DEFAULT_BLOCK_ACK_TIMEOUT 0xffff diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h index 49da2d53d29455830fb9958bc4ab7dbbcd5d5666..1eb61739071fd828bac55865a9df41709e1ccc08 100644 --- a/drivers/net/wireless/mwifiex/fw.h +++ b/drivers/net/wireless/mwifiex/fw.h @@ -83,7 +83,7 @@ enum KEY_TYPE_ID { #define WPA_PN_SIZE 8 #define KEY_PARAMS_FIXED_LEN 10 #define KEY_INDEX_MASK 0xf -#define FW_KEY_API_VER_MAJOR_V2 2 +#define KEY_API_VER_MAJOR_V2 2 #define KEY_MCAST BIT(0) #define KEY_UNICAST BIT(1) @@ -170,7 +170,8 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { #define TLV_TYPE_COALESCE_RULE (PROPRIETARY_TLV_BASE_ID + 154) #define TLV_TYPE_KEY_PARAM_V2 (PROPRIETARY_TLV_BASE_ID + 156) #define TLV_TYPE_TDLS_IDLE_TIMEOUT (PROPRIETARY_TLV_BASE_ID + 194) -#define TLV_TYPE_FW_API_REV (PROPRIETARY_TLV_BASE_ID + 199) +#define TLV_TYPE_SCAN_CHANNEL_GAP (PROPRIETARY_TLV_BASE_ID + 197) +#define TLV_TYPE_API_REV (PROPRIETARY_TLV_BASE_ID + 199) #define MWIFIEX_TX_DATA_BUF_SIZE_2K 2048 @@ -653,6 +654,12 @@ struct mwifiex_ie_types_num_probes { __le16 num_probes; } __packed; +struct mwifiex_ie_types_scan_chan_gap { + struct mwifiex_ie_types_header header; + /* time gap in TUs to be used between two consecutive channels scan */ + __le16 chan_gap; +} __packed; + struct mwifiex_ie_types_wildcard_ssid_params { struct mwifiex_ie_types_header header; u8 max_ssid_length; @@ -844,11 +851,12 @@ struct host_cmd_ds_802_11_ps_mode_enh { } params; } __packed; -enum FW_API_VER_ID { +enum API_VER_ID { KEY_API_VER_ID = 1, + FW_API_VER_ID = 2, }; -struct hw_spec_fw_api_rev { +struct hw_spec_api_rev { struct mwifiex_ie_types_header header; __le16 api_id; u8 major_ver; @@ -1248,6 +1256,7 @@ struct mwifiex_user_scan_cfg { u8 num_ssids; /* Variable number (fixed maximum) of channels to scan up */ struct mwifiex_user_scan_chan chan_list[MWIFIEX_USER_SCAN_CHAN_MAX]; + u16 scan_chan_gap; } __packed; struct ie_body { diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c index 269a277d0a2e6072c092f066c9b23943d8075419..580aa45ec4bcfe9716a3271661e98d8fa1c7c82a 100644 --- a/drivers/net/wireless/mwifiex/init.c +++ b/drivers/net/wireless/mwifiex/init.c @@ -212,6 +212,7 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter) adapter->specific_scan_time = MWIFIEX_SPECIFIC_SCAN_CHAN_TIME; adapter->active_scan_time = MWIFIEX_ACTIVE_SCAN_CHAN_TIME; adapter->passive_scan_time = MWIFIEX_PASSIVE_SCAN_CHAN_TIME; + adapter->scan_chan_gap_time = MWIFIEX_DEF_SCAN_CHAN_GAP_TIME; adapter->scan_probes = 1; @@ -280,10 +281,9 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter) memset(&adapter->arp_filter, 0, sizeof(adapter->arp_filter)); adapter->arp_filter_size = 0; adapter->max_mgmt_ie_index = MAX_MGMT_IE_INDEX; - adapter->empty_tx_q_cnt = 0; adapter->ext_scan = true; - adapter->fw_key_api_major_ver = 0; - adapter->fw_key_api_minor_ver = 0; + adapter->key_api_major_ver = 0; + adapter->key_api_minor_ver = 0; } /* @@ -447,8 +447,10 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter) spin_lock_init(&adapter->cmd_free_q_lock); spin_lock_init(&adapter->cmd_pending_q_lock); spin_lock_init(&adapter->scan_pending_q_lock); + spin_lock_init(&adapter->rx_proc_lock); skb_queue_head_init(&adapter->usb_rx_data_q); + skb_queue_head_init(&adapter->rx_data_q); for (i = 0; i < adapter->priv_num; ++i) { INIT_LIST_HEAD(&adapter->bss_prio_tbl[i].bss_prio_head); @@ -614,6 +616,7 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter) int ret = -EINPROGRESS; struct mwifiex_private *priv; s32 i; + unsigned long flags; struct sk_buff *skb; /* mwifiex already shutdown */ @@ -648,6 +651,21 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter) } } + spin_lock_irqsave(&adapter->rx_proc_lock, flags); + + while ((skb = skb_dequeue(&adapter->rx_data_q))) { + struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb); + + atomic_dec(&adapter->rx_pending); + priv = adapter->priv[rx_info->bss_num]; + if (priv) + priv->stats.rx_dropped++; + + dev_kfree_skb_any(skb); + } + + spin_unlock_irqrestore(&adapter->rx_proc_lock, flags); + spin_lock(&adapter->mwifiex_lock); if (adapter->if_ops.data_complete) { diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c index dfa37eadc4db566ecc75db890c838d8d04600917..d5070c444fe1fcb53ff95a67024b12013ed547dc 100644 --- a/drivers/net/wireless/mwifiex/main.c +++ b/drivers/net/wireless/mwifiex/main.c @@ -28,91 +28,6 @@ const char driver_version[] = "mwifiex " VERSION " (%s) "; static char *cal_data_cfg; module_param(cal_data_cfg, charp, 0); -static void scan_delay_timer_fn(unsigned long data) -{ - struct mwifiex_private *priv = (struct mwifiex_private *)data; - struct mwifiex_adapter *adapter = priv->adapter; - struct cmd_ctrl_node *cmd_node, *tmp_node; - spinlock_t *scan_q_lock = &adapter->scan_pending_q_lock; - unsigned long flags; - - if (adapter->surprise_removed) - return; - - if (adapter->scan_delay_cnt == MWIFIEX_MAX_SCAN_DELAY_CNT || - !adapter->scan_processing) { - /* - * Abort scan operation by cancelling all pending scan - * commands - */ - spin_lock_irqsave(scan_q_lock, flags); - list_for_each_entry_safe(cmd_node, tmp_node, - &adapter->scan_pending_q, list) { - list_del(&cmd_node->list); - mwifiex_insert_cmd_to_free_q(adapter, cmd_node); - } - spin_unlock_irqrestore(scan_q_lock, flags); - - spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); - adapter->scan_processing = false; - adapter->scan_delay_cnt = 0; - adapter->empty_tx_q_cnt = 0; - spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); - - if (priv->scan_request) { - dev_dbg(adapter->dev, "info: aborting scan\n"); - cfg80211_scan_done(priv->scan_request, 1); - priv->scan_request = NULL; - } else { - priv->scan_aborting = false; - dev_dbg(adapter->dev, "info: scan already aborted\n"); - } - goto done; - } - - if (!atomic_read(&priv->adapter->is_tx_received)) { - adapter->empty_tx_q_cnt++; - if (adapter->empty_tx_q_cnt == MWIFIEX_MAX_EMPTY_TX_Q_CNT) { - /* - * No Tx traffic for 200msec. Get scan command from - * scan pending queue and put to cmd pending queue to - * resume scan operation - */ - adapter->scan_delay_cnt = 0; - adapter->empty_tx_q_cnt = 0; - spin_lock_irqsave(scan_q_lock, flags); - - if (list_empty(&adapter->scan_pending_q)) { - spin_unlock_irqrestore(scan_q_lock, flags); - goto done; - } - - cmd_node = list_first_entry(&adapter->scan_pending_q, - struct cmd_ctrl_node, list); - list_del(&cmd_node->list); - spin_unlock_irqrestore(scan_q_lock, flags); - - mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, - true); - queue_work(adapter->workqueue, &adapter->main_work); - goto done; - } - } else { - adapter->empty_tx_q_cnt = 0; - } - - /* Delay scan operation further by 20msec */ - mod_timer(&priv->scan_delay_timer, jiffies + - msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC)); - adapter->scan_delay_cnt++; - -done: - if (atomic_read(&priv->adapter->is_tx_received)) - atomic_set(&priv->adapter->is_tx_received, false); - - return; -} - /* * This function registers the device and performs all the necessary * initializations. @@ -160,10 +75,6 @@ static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops, adapter->priv[i]->adapter = adapter; adapter->priv_num++; - - setup_timer(&adapter->priv[i]->scan_delay_timer, - scan_delay_timer_fn, - (unsigned long)adapter->priv[i]); } mwifiex_init_lock_list(adapter); @@ -207,7 +118,6 @@ static int mwifiex_unregister(struct mwifiex_adapter *adapter) for (i = 0; i < adapter->priv_num; i++) { if (adapter->priv[i]) { mwifiex_free_curr_bcn(adapter->priv[i]); - del_timer_sync(&adapter->priv[i]->scan_delay_timer); kfree(adapter->priv[i]); } } @@ -216,6 +126,38 @@ static int mwifiex_unregister(struct mwifiex_adapter *adapter) return 0; } +static int mwifiex_process_rx(struct mwifiex_adapter *adapter) +{ + unsigned long flags; + struct sk_buff *skb; + + spin_lock_irqsave(&adapter->rx_proc_lock, flags); + if (adapter->rx_processing || adapter->rx_locked) { + spin_unlock_irqrestore(&adapter->rx_proc_lock, flags); + goto exit_rx_proc; + } else { + adapter->rx_processing = true; + spin_unlock_irqrestore(&adapter->rx_proc_lock, flags); + } + + /* Check for Rx data */ + while ((skb = skb_dequeue(&adapter->rx_data_q))) { + atomic_dec(&adapter->rx_pending); + if (adapter->delay_main_work && + (atomic_read(&adapter->rx_pending) < LOW_RX_PENDING)) { + adapter->delay_main_work = false; + queue_work(adapter->workqueue, &adapter->main_work); + } + mwifiex_handle_rx_packet(adapter, skb); + } + spin_lock_irqsave(&adapter->rx_proc_lock, flags); + adapter->rx_processing = false; + spin_unlock_irqrestore(&adapter->rx_proc_lock, flags); + +exit_rx_proc: + return 0; +} + /* * The main process. * @@ -253,6 +195,19 @@ int mwifiex_main_process(struct mwifiex_adapter *adapter) (adapter->hw_status == MWIFIEX_HW_STATUS_NOT_READY)) break; + /* If we process interrupts first, it would increase RX pending + * even further. Avoid this by checking if rx_pending has + * crossed high threshold and schedule rx work queue + * and then process interrupts + */ + if (atomic_read(&adapter->rx_pending) >= HIGH_RX_PENDING) { + adapter->delay_main_work = true; + if (!adapter->rx_processing) + queue_work(adapter->rx_workqueue, + &adapter->rx_work); + break; + } + /* Handle pending interrupt if any */ if (adapter->int_status) { if (adapter->hs_activated) @@ -261,6 +216,9 @@ int mwifiex_main_process(struct mwifiex_adapter *adapter) adapter->if_ops.process_int_status(adapter); } + if (adapter->rx_work_enabled && adapter->data_received) + queue_work(adapter->rx_workqueue, &adapter->rx_work); + /* Need to wake up the card ? */ if ((adapter->ps_state == PS_STATE_SLEEP) && (adapter->pm_wakeup_card_req && @@ -273,6 +231,7 @@ int mwifiex_main_process(struct mwifiex_adapter *adapter) } if (IS_CARD_RX_RCVD(adapter)) { + adapter->data_received = false; adapter->pm_wakeup_fw_try = false; if (adapter->ps_state == PS_STATE_SLEEP) adapter->ps_state = PS_STATE_AWAKE; @@ -284,8 +243,8 @@ int mwifiex_main_process(struct mwifiex_adapter *adapter) adapter->tx_lock_flag) break; - if ((adapter->scan_processing && - !adapter->scan_delay_cnt) || adapter->data_sent || + if ((!adapter->scan_chan_gap_enabled && + adapter->scan_processing) || adapter->data_sent || mwifiex_wmm_lists_empty(adapter)) { if (adapter->cmd_sent || adapter->curr_cmd || (!is_command_pending(adapter))) @@ -339,7 +298,8 @@ int mwifiex_main_process(struct mwifiex_adapter *adapter) } } - if ((!adapter->scan_processing || adapter->scan_delay_cnt) && + if ((adapter->scan_chan_gap_enabled || + !adapter->scan_processing) && !adapter->data_sent && !mwifiex_wmm_lists_empty(adapter)) { mwifiex_wmm_process_tx(adapter); if (adapter->hs_activated) { @@ -366,7 +326,8 @@ int mwifiex_main_process(struct mwifiex_adapter *adapter) } while (true); spin_lock_irqsave(&adapter->main_proc_lock, flags); - if ((adapter->int_status) || IS_CARD_RX_RCVD(adapter)) { + if (!adapter->delay_main_work && + (adapter->int_status || IS_CARD_RX_RCVD(adapter))) { spin_unlock_irqrestore(&adapter->main_proc_lock, flags); goto process_start; } @@ -407,6 +368,12 @@ static void mwifiex_terminate_workqueue(struct mwifiex_adapter *adapter) flush_workqueue(adapter->workqueue); destroy_workqueue(adapter->workqueue); adapter->workqueue = NULL; + + if (adapter->rx_workqueue) { + flush_workqueue(adapter->rx_workqueue); + destroy_workqueue(adapter->rx_workqueue); + adapter->rx_workqueue = NULL; + } } /* @@ -598,9 +565,6 @@ int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb) atomic_inc(&priv->adapter->tx_pending); mwifiex_wmm_add_buf_txqueue(priv, skb); - if (priv->adapter->scan_delay_cnt) - atomic_set(&priv->adapter->is_tx_received, true); - queue_work(priv->adapter->workqueue, &priv->adapter->main_work); return 0; @@ -823,6 +787,21 @@ int is_command_pending(struct mwifiex_adapter *adapter) return !is_cmd_pend_q_empty; } +/* + * This is the RX work queue function. + * + * It handles the RX operations. + */ +static void mwifiex_rx_work_queue(struct work_struct *work) +{ + struct mwifiex_adapter *adapter = + container_of(work, struct mwifiex_adapter, rx_work); + + if (adapter->surprise_removed) + return; + mwifiex_process_rx(adapter); +} + /* * This is the main work queue function. * @@ -879,6 +858,11 @@ mwifiex_add_card(void *card, struct semaphore *sem, adapter->cmd_wait_q.status = 0; adapter->scan_wait_q_woken = false; + if (num_possible_cpus() > 1) { + adapter->rx_work_enabled = true; + pr_notice("rx work enabled, cpus %d\n", num_possible_cpus()); + } + adapter->workqueue = alloc_workqueue("MWIFIEX_WORK_QUEUE", WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1); @@ -886,6 +870,18 @@ mwifiex_add_card(void *card, struct semaphore *sem, goto err_kmalloc; INIT_WORK(&adapter->main_work, mwifiex_main_work_queue); + + if (adapter->rx_work_enabled) { + adapter->rx_workqueue = alloc_workqueue("MWIFIEX_RX_WORK_QUEUE", + WQ_HIGHPRI | + WQ_MEM_RECLAIM | + WQ_UNBOUND, 1); + if (!adapter->rx_workqueue) + goto err_kmalloc; + + INIT_WORK(&adapter->rx_work, mwifiex_rx_work_queue); + } + if (adapter->if_ops.iface_work) INIT_WORK(&adapter->iface_work, adapter->if_ops.iface_work); diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h index a2733b1e63f9ec0374f38ba5908b18e52345dfd6..e2635747d9669c21f30f08780a2024c70af2245e 100644 --- a/drivers/net/wireless/mwifiex/main.h +++ b/drivers/net/wireless/mwifiex/main.h @@ -58,6 +58,9 @@ enum { #define MAX_TX_PENDING 100 #define LOW_TX_PENDING 80 +#define HIGH_RX_PENDING 50 +#define LOW_RX_PENDING 20 + #define MWIFIEX_UPLD_SIZE (2312) #define MAX_EVENT_SIZE 2048 @@ -84,17 +87,12 @@ enum { #define MWIFIEX_PASSIVE_SCAN_CHAN_TIME 110 #define MWIFIEX_ACTIVE_SCAN_CHAN_TIME 30 #define MWIFIEX_SPECIFIC_SCAN_CHAN_TIME 30 +#define MWIFIEX_DEF_SCAN_CHAN_GAP_TIME 50 #define SCAN_RSSI(RSSI) (0x100 - ((u8)(RSSI))) #define MWIFIEX_MAX_TOTAL_SCAN_TIME (MWIFIEX_TIMER_10S - MWIFIEX_TIMER_1S) -#define MWIFIEX_MAX_SCAN_DELAY_CNT 50 -#define MWIFIEX_MAX_EMPTY_TX_Q_CNT 10 -#define MWIFIEX_SCAN_DELAY_MSEC 20 - -#define MWIFIEX_MIN_TX_PENDING_TO_CANCEL_SCAN 2 - #define RSN_GTK_OUI_OFFSET 2 #define MWIFIEX_OUI_NOT_PRESENT 0 @@ -415,6 +413,7 @@ struct mwifiex_roc_cfg { #define FW_DUMP_MAX_NAME_LEN 8 #define FW_DUMP_HOST_READY 0xEE #define FW_DUMP_DONE 0xFF +#define FW_DUMP_READ_DONE 0xFE struct memory_type_mapping { u8 mem_name[FW_DUMP_MAX_NAME_LEN]; @@ -547,7 +546,6 @@ struct mwifiex_private { u8 nick_name[16]; u16 current_key_index; struct semaphore async_sem; - u8 report_scan_result; struct cfg80211_scan_request *scan_request; u8 cfg_bssid[6]; struct wps wps; @@ -561,7 +559,6 @@ struct mwifiex_private { u16 proberesp_idx; u16 assocresp_idx; u16 rsn_idx; - struct timer_list scan_delay_timer; u8 ap_11n_enabled; u8 ap_11ac_enabled; u32 mgmt_frame_mask; @@ -721,6 +718,12 @@ struct mwifiex_adapter { atomic_t cmd_pending; struct workqueue_struct *workqueue; struct work_struct main_work; + struct workqueue_struct *rx_workqueue; + struct work_struct rx_work; + bool rx_work_enabled; + bool rx_processing; + bool delay_main_work; + bool rx_locked; struct mwifiex_bss_prio_tbl bss_prio_tbl[MWIFIEX_MAX_BSS_NUM]; /* spin lock for init/shutdown */ spinlock_t mwifiex_lock; @@ -761,6 +764,8 @@ struct mwifiex_adapter { struct list_head scan_pending_q; /* spin lock for scan_pending_q */ spinlock_t scan_pending_q_lock; + /* spin lock for RX processing routine */ + spinlock_t rx_proc_lock; struct sk_buff_head usb_rx_data_q; u32 scan_processing; u16 region_code; @@ -770,6 +775,7 @@ struct mwifiex_adapter { u16 specific_scan_time; u16 active_scan_time; u16 passive_scan_time; + u16 scan_chan_gap_time; u8 fw_bands; u8 adhoc_start_band; u8 config_bands; @@ -815,8 +821,6 @@ struct mwifiex_adapter { spinlock_t queue_lock; /* lock for tx queues */ u8 country_code[IEEE80211_COUNTRY_STRING_LEN]; u16 max_mgmt_ie_index; - u8 scan_delay_cnt; - u8 empty_tx_q_cnt; const struct firmware *cal_data; struct device_node *dt_node; @@ -828,17 +832,18 @@ struct mwifiex_adapter { u32 usr_dot_11ac_dev_cap_a; u32 usr_dot_11ac_mcs_support; - atomic_t is_tx_received; atomic_t pending_bridged_pkts; struct semaphore *card_sem; bool ext_scan; u8 fw_api_ver; - u8 fw_key_api_major_ver, fw_key_api_minor_ver; + u8 key_api_major_ver, key_api_minor_ver; struct work_struct iface_work; unsigned long iface_work_flags; struct memory_type_mapping *mem_type_mapping_tbl; u8 num_mem_types; u8 curr_mem_idx; + bool scan_chan_gap_enabled; + struct sk_buff_head rx_data_q; }; int mwifiex_init_lock_list(struct mwifiex_adapter *adapter); @@ -1139,6 +1144,25 @@ mwifiex_11h_get_csa_closed_channel(struct mwifiex_private *priv) return priv->csa_chan; } +static inline u8 mwifiex_is_any_intf_active(struct mwifiex_private *priv) +{ + struct mwifiex_private *priv_num; + int i; + + for (i = 0; i < priv->adapter->priv_num; i++) { + priv_num = priv->adapter->priv[i]; + if (priv_num) { + if ((GET_BSS_ROLE(priv_num) == MWIFIEX_BSS_ROLE_UAP && + priv_num->bss_started) || + (GET_BSS_ROLE(priv_num) == MWIFIEX_BSS_ROLE_STA && + priv_num->media_connected)) + return 1; + } + } + + return 0; +} + int mwifiex_init_shutdown_fw(struct mwifiex_private *priv, u32 func_init_shutdown); int mwifiex_add_card(void *, struct semaphore *, struct mwifiex_if_ops *, u8); @@ -1274,6 +1298,7 @@ void mwifiex_disable_all_tdls_links(struct mwifiex_private *priv); bool mwifiex_is_bss_in_11ac_mode(struct mwifiex_private *priv); u8 mwifiex_get_center_freq_index(struct mwifiex_private *priv, u8 band, u32 pri_chan, u8 chan_bw); +int mwifiex_init_channel_scan_gap(struct mwifiex_adapter *adapter); #ifdef CONFIG_DEBUG_FS void mwifiex_debugfs_init(void); diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c index ff0545888dd062eae8f125ee9a339314b8873426..c3a20f94f3c99f86fbbfdc6b23faba01b54ee5e5 100644 --- a/drivers/net/wireless/mwifiex/pcie.c +++ b/drivers/net/wireless/mwifiex/pcie.c @@ -42,6 +42,10 @@ static struct memory_type_mapping mem_type_mapping_tbl[] = { {"DTCM", NULL, 0, 0xF1}, {"SQRAM", NULL, 0, 0xF2}, {"IRAM", NULL, 0, 0xF3}, + {"APU", NULL, 0, 0xF4}, + {"CIU", NULL, 0, 0xF5}, + {"ICU", NULL, 0, 0xF6}, + {"MAC", NULL, 0, 0xF7}, }; static int @@ -1271,12 +1275,26 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter) */ pkt_len = *((__le16 *)skb_data->data); rx_len = le16_to_cpu(pkt_len); - skb_put(skb_data, rx_len); - dev_dbg(adapter->dev, - "info: RECV DATA: Rd=%#x, Wr=%#x, Len=%d\n", - card->rxbd_rdptr, wrptr, rx_len); - skb_pull(skb_data, INTF_HEADER_LEN); - mwifiex_handle_rx_packet(adapter, skb_data); + if (WARN_ON(rx_len <= INTF_HEADER_LEN || + rx_len > MWIFIEX_RX_DATA_BUF_SIZE)) { + dev_err(adapter->dev, + "Invalid RX len %d, Rd=%#x, Wr=%#x\n", + rx_len, card->rxbd_rdptr, wrptr); + dev_kfree_skb_any(skb_data); + } else { + skb_put(skb_data, rx_len); + dev_dbg(adapter->dev, + "info: RECV DATA: Rd=%#x, Wr=%#x, Len=%d\n", + card->rxbd_rdptr, wrptr, rx_len); + skb_pull(skb_data, INTF_HEADER_LEN); + if (adapter->rx_work_enabled) { + skb_queue_tail(&adapter->rx_data_q, skb_data); + adapter->data_received = true; + atomic_inc(&adapter->rx_pending); + } else { + mwifiex_handle_rx_packet(adapter, skb_data); + } + } skb_tmp = dev_alloc_skb(MWIFIEX_RX_DATA_BUF_SIZE); if (!skb_tmp) { @@ -1718,6 +1736,13 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter) buffer is released. This is just to make things simpler, we need to find a better method of managing these buffers. */ + } else { + if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, + CPU_INTR_EVENT_DONE)) { + dev_warn(adapter->dev, + "Write register failed\n"); + return -1; + } } return 0; @@ -2218,8 +2243,8 @@ mwifiex_pcie_rdwr_firmware(struct mwifiex_adapter *adapter, u8 doneflag) if (ctrl_data != FW_DUMP_HOST_READY) { dev_info(adapter->dev, "The ctrl reg was changed, re-try again!\n"); - mwifiex_write_reg(adapter, reg->fw_dump_ctrl, - FW_DUMP_HOST_READY); + ret = mwifiex_write_reg(adapter, reg->fw_dump_ctrl, + FW_DUMP_HOST_READY); if (ret) { dev_err(adapter->dev, "PCIE write err\n"); return RDWR_STATUS_FAILURE; @@ -2241,6 +2266,7 @@ static void mwifiex_pcie_fw_dump_work(struct mwifiex_adapter *adapter) u8 *dbg_ptr, *end_ptr, dump_num, idx, i, read_reg, doneflag = 0; enum rdwr_status stat; u32 memory_size; + int ret; static char *env[] = { "DRIVER=mwifiex_pcie", "EVENT=fw_dump", NULL }; if (!card->pcie.supports_fw_dump) @@ -2284,6 +2310,12 @@ static void mwifiex_pcie_fw_dump_work(struct mwifiex_adapter *adapter) if (memory_size == 0) { dev_info(adapter->dev, "Firmware dump Finished!\n"); + ret = mwifiex_write_reg(adapter, creg->fw_dump_ctrl, + FW_DUMP_READ_DONE); + if (ret) { + dev_err(adapter->dev, "PCIE write err\n"); + goto done; + } break; } @@ -2312,11 +2344,13 @@ static void mwifiex_pcie_fw_dump_work(struct mwifiex_adapter *adapter) reg_end = creg->fw_dump_end; for (reg = reg_start; reg <= reg_end; reg++) { mwifiex_read_reg_byte(adapter, reg, dbg_ptr); - if (dbg_ptr < end_ptr) + if (dbg_ptr < end_ptr) { dbg_ptr++; - else + } else { dev_err(adapter->dev, "Allocated buf not enough\n"); + goto done; + } } if (stat != RDWR_STATUS_DONE) diff --git a/drivers/net/wireless/mwifiex/pcie.h b/drivers/net/wireless/mwifiex/pcie.h index a1a8fd3bc1be5355289b34457cb4ff1cafe894ba..200e8b0cb5824e565ee0eda1c21055d7940be755 100644 --- a/drivers/net/wireless/mwifiex/pcie.h +++ b/drivers/net/wireless/mwifiex/pcie.h @@ -40,8 +40,8 @@ #define MWIFIEX_TXBD_MASK 0x3F #define MWIFIEX_RXBD_MASK 0x3F -#define MWIFIEX_MAX_EVT_BD 0x04 -#define MWIFIEX_EVTBD_MASK 0x07 +#define MWIFIEX_MAX_EVT_BD 0x08 +#define MWIFIEX_EVTBD_MASK 0x0f /* PCIE INTERNAL REGISTERS */ #define PCIE_SCRATCH_0_REG 0xC10 @@ -69,6 +69,7 @@ #define CPU_INTR_DOOR_BELL BIT(1) #define CPU_INTR_SLEEP_CFM_DONE BIT(2) #define CPU_INTR_RESET BIT(3) +#define CPU_INTR_EVENT_DONE BIT(5) #define HOST_INTR_DNLD_DONE BIT(0) #define HOST_INTR_UPLD_RDY BIT(1) diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c index dee717a19ddb560956175c43789b2c802f029554..ca64d4c941127511bde59dd76d8b2c47299db47b 100644 --- a/drivers/net/wireless/mwifiex/scan.c +++ b/drivers/net/wireless/mwifiex/scan.c @@ -799,6 +799,7 @@ mwifiex_config_scan(struct mwifiex_private *priv, { struct mwifiex_adapter *adapter = priv->adapter; struct mwifiex_ie_types_num_probes *num_probes_tlv; + struct mwifiex_ie_types_scan_chan_gap *chan_gap_tlv; struct mwifiex_ie_types_wildcard_ssid_params *wildcard_ssid_tlv; struct mwifiex_ie_types_bssid_list *bssid_tlv; u8 *tlv_pos; @@ -925,6 +926,23 @@ mwifiex_config_scan(struct mwifiex_private *priv, if ((i && ssid_filter) || !is_zero_ether_addr(scan_cfg_out->specific_bssid)) *filtered_scan = true; + + if (user_scan_in->scan_chan_gap) { + dev_dbg(adapter->dev, "info: scan: channel gap = %d\n", + user_scan_in->scan_chan_gap); + *max_chan_per_scan = + MWIFIEX_MAX_CHANNELS_PER_SPECIFIC_SCAN; + + chan_gap_tlv = (void *)tlv_pos; + chan_gap_tlv->header.type = + cpu_to_le16(TLV_TYPE_SCAN_CHANNEL_GAP); + chan_gap_tlv->header.len = + cpu_to_le16(sizeof(chan_gap_tlv->chan_gap)); + chan_gap_tlv->chan_gap = + cpu_to_le16((user_scan_in->scan_chan_gap)); + tlv_pos += + sizeof(struct mwifiex_ie_types_scan_chan_gap); + } } else { scan_cfg_out->bss_mode = (u8) adapter->scan_mode; num_probes = adapter->scan_probes; @@ -1050,12 +1068,6 @@ mwifiex_config_scan(struct mwifiex_private *priv, *filtered_scan); } - /* - * In associated state we will reduce the number of channels scanned per - * scan command to 1 to avoid any traffic delay/loss. - */ - if (priv->media_connected) - *max_chan_per_scan = 1; } /* @@ -1719,7 +1731,8 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info, if (chan && !(chan->flags & IEEE80211_CHAN_DISABLED)) { bss = cfg80211_inform_bss(priv->wdev->wiphy, - chan, bssid, timestamp, + chan, CFG80211_BSS_FTYPE_UNKNOWN, + bssid, timestamp, cap_info_bitmap, beacon_period, ie_buf, ie_len, rssi, GFP_KERNEL); bss_priv = (struct mwifiex_bss_priv *)bss->priv; @@ -1754,7 +1767,7 @@ static void mwifiex_complete_scan(struct mwifiex_private *priv) static void mwifiex_check_next_scan_command(struct mwifiex_private *priv) { struct mwifiex_adapter *adapter = priv->adapter; - struct cmd_ctrl_node *cmd_node; + struct cmd_ctrl_node *cmd_node, *tmp_node; unsigned long flags; spin_lock_irqsave(&adapter->scan_pending_q_lock, flags); @@ -1767,9 +1780,6 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv) if (!adapter->ext_scan) mwifiex_complete_scan(priv); - if (priv->report_scan_result) - priv->report_scan_result = false; - if (priv->scan_request) { dev_dbg(adapter->dev, "info: notifying scan done\n"); cfg80211_scan_done(priv->scan_request, 0); @@ -1778,37 +1788,36 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv) priv->scan_aborting = false; dev_dbg(adapter->dev, "info: scan already aborted\n"); } - } else { - if ((priv->scan_aborting && !priv->scan_request) || - priv->scan_block) { - spin_unlock_irqrestore(&adapter->scan_pending_q_lock, - flags); - adapter->scan_delay_cnt = MWIFIEX_MAX_SCAN_DELAY_CNT; - mod_timer(&priv->scan_delay_timer, jiffies); - dev_dbg(priv->adapter->dev, - "info: %s: triggerring scan abort\n", __func__); - } else if (!mwifiex_wmm_lists_empty(adapter) && - (priv->scan_request && (priv->scan_request->flags & - NL80211_SCAN_FLAG_LOW_PRIORITY))) { - spin_unlock_irqrestore(&adapter->scan_pending_q_lock, - flags); - adapter->scan_delay_cnt = 1; - mod_timer(&priv->scan_delay_timer, jiffies + - msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC)); - dev_dbg(priv->adapter->dev, - "info: %s: deferring scan\n", __func__); - } else { - /* Get scan command from scan_pending_q and put to - * cmd_pending_q - */ - cmd_node = list_first_entry(&adapter->scan_pending_q, - struct cmd_ctrl_node, list); + } else if ((priv->scan_aborting && !priv->scan_request) || + priv->scan_block) { + list_for_each_entry_safe(cmd_node, tmp_node, + &adapter->scan_pending_q, list) { list_del(&cmd_node->list); - spin_unlock_irqrestore(&adapter->scan_pending_q_lock, - flags); - mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, - true); + mwifiex_insert_cmd_to_free_q(adapter, cmd_node); + } + spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); + + spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); + adapter->scan_processing = false; + spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); + + if (priv->scan_request) { + dev_dbg(adapter->dev, "info: aborting scan\n"); + cfg80211_scan_done(priv->scan_request, 1); + priv->scan_request = NULL; + } else { + priv->scan_aborting = false; + dev_dbg(adapter->dev, "info: scan already aborted\n"); } + } else { + /* Get scan command from scan_pending_q and put to + * cmd_pending_q + */ + cmd_node = list_first_entry(&adapter->scan_pending_q, + struct cmd_ctrl_node, list); + list_del(&cmd_node->list); + spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); + mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true); } return; @@ -1970,9 +1979,34 @@ int mwifiex_cmd_802_11_scan_ext(struct mwifiex_private *priv, /* This function handles the command response of extended scan */ int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv) { + struct mwifiex_adapter *adapter = priv->adapter; + struct host_cmd_ds_command *cmd_ptr; + struct cmd_ctrl_node *cmd_node; + unsigned long cmd_flags, scan_flags; + bool complete_scan = false; + dev_dbg(priv->adapter->dev, "info: EXT scan returns successfully\n"); - mwifiex_complete_scan(priv); + spin_lock_irqsave(&adapter->cmd_pending_q_lock, cmd_flags); + spin_lock_irqsave(&adapter->scan_pending_q_lock, scan_flags); + if (list_empty(&adapter->scan_pending_q)) { + complete_scan = true; + list_for_each_entry(cmd_node, &adapter->cmd_pending_q, list) { + cmd_ptr = (void *)cmd_node->cmd_skb->data; + if (le16_to_cpu(cmd_ptr->command) == + HostCmd_CMD_802_11_SCAN_EXT) { + dev_dbg(priv->adapter->dev, + "Scan pending in command pending list"); + complete_scan = false; + break; + } + } + } + spin_unlock_irqrestore(&adapter->scan_pending_q_lock, scan_flags); + spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, cmd_flags); + + if (complete_scan) + mwifiex_complete_scan(priv); return 0; } diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c index 1770fa3fc1e621fc2965cde14581c337c1691d98..b25766b43b9f958120c398a1ced188416508f42f 100644 --- a/drivers/net/wireless/mwifiex/sdio.c +++ b/drivers/net/wireless/mwifiex/sdio.c @@ -279,6 +279,8 @@ static int mwifiex_sdio_suspend(struct device *dev) #define SDIO_DEVICE_ID_MARVELL_8797 (0x9129) /* Device ID for SD8897 */ #define SDIO_DEVICE_ID_MARVELL_8897 (0x912d) +/* Device ID for SD8887 */ +#define SDIO_DEVICE_ID_MARVELL_8887 (0x9135) /* WLAN IDs */ static const struct sdio_device_id mwifiex_ids[] = { @@ -290,6 +292,8 @@ static const struct sdio_device_id mwifiex_ids[] = { .driver_data = (unsigned long) &mwifiex_sdio_sd8797}, {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8897), .driver_data = (unsigned long) &mwifiex_sdio_sd8897}, + {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8887), + .driver_data = (unsigned long)&mwifiex_sdio_sd8887}, {}, }; @@ -448,28 +452,31 @@ static int mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter) static int mwifiex_init_sdio_new_mode(struct mwifiex_adapter *adapter) { u8 reg; + struct sdio_mmc_card *card = adapter->card; adapter->ioport = MEM_PORT; /* enable sdio new mode */ - if (mwifiex_read_reg(adapter, CARD_CONFIG_2_1_REG, ®)) + if (mwifiex_read_reg(adapter, card->reg->card_cfg_2_1_reg, ®)) return -1; - if (mwifiex_write_reg(adapter, CARD_CONFIG_2_1_REG, + if (mwifiex_write_reg(adapter, card->reg->card_cfg_2_1_reg, reg | CMD53_NEW_MODE)) return -1; /* Configure cmd port and enable reading rx length from the register */ - if (mwifiex_read_reg(adapter, CMD_CONFIG_0, ®)) + if (mwifiex_read_reg(adapter, card->reg->cmd_cfg_0, ®)) return -1; - if (mwifiex_write_reg(adapter, CMD_CONFIG_0, reg | CMD_PORT_RD_LEN_EN)) + if (mwifiex_write_reg(adapter, card->reg->cmd_cfg_0, + reg | CMD_PORT_RD_LEN_EN)) return -1; /* Enable Dnld/Upld ready auto reset for cmd port after cmd53 is * completed */ - if (mwifiex_read_reg(adapter, CMD_CONFIG_1, ®)) + if (mwifiex_read_reg(adapter, card->reg->cmd_cfg_1, ®)) return -1; - if (mwifiex_write_reg(adapter, CMD_CONFIG_1, reg | CMD_PORT_AUTO_EN)) + if (mwifiex_write_reg(adapter, card->reg->cmd_cfg_1, + reg | CMD_PORT_AUTO_EN)) return -1; return 0; @@ -496,17 +503,17 @@ static int mwifiex_init_sdio_ioport(struct mwifiex_adapter *adapter) } /* Read the IO port */ - if (!mwifiex_read_reg(adapter, IO_PORT_0_REG, ®)) + if (!mwifiex_read_reg(adapter, card->reg->io_port_0_reg, ®)) adapter->ioport |= (reg & 0xff); else return -1; - if (!mwifiex_read_reg(adapter, IO_PORT_1_REG, ®)) + if (!mwifiex_read_reg(adapter, card->reg->io_port_1_reg, ®)) adapter->ioport |= ((reg & 0xff) << 8); else return -1; - if (!mwifiex_read_reg(adapter, IO_PORT_2_REG, ®)) + if (!mwifiex_read_reg(adapter, card->reg->io_port_2_reg, ®)) adapter->ioport |= ((reg & 0xff) << 16); else return -1; @@ -514,8 +521,8 @@ static int mwifiex_init_sdio_ioport(struct mwifiex_adapter *adapter) pr_debug("info: SDIO FUNC1 IO port: %#x\n", adapter->ioport); /* Set Host interrupt reset to read to clear */ - if (!mwifiex_read_reg(adapter, HOST_INT_RSR_REG, ®)) - mwifiex_write_reg(adapter, HOST_INT_RSR_REG, + if (!mwifiex_read_reg(adapter, card->reg->host_int_rsr_reg, ®)) + mwifiex_write_reg(adapter, card->reg->host_int_rsr_reg, reg | card->reg->sdio_int_mask); else return -1; @@ -622,22 +629,15 @@ static int mwifiex_get_wr_port_data(struct mwifiex_adapter *adapter, u32 *port) dev_dbg(adapter->dev, "data: mp_wr_bitmap=0x%08x\n", wr_bitmap); - if (card->supports_sdio_new_mode && - !(wr_bitmap & reg->data_port_mask)) { + if (!(wr_bitmap & card->mp_data_port_mask)) { adapter->data_sent = true; return -EBUSY; - } else if (!card->supports_sdio_new_mode && - !(wr_bitmap & card->mp_data_port_mask)) { - return -1; } if (card->mp_wr_bitmap & (1 << card->curr_wr_port)) { card->mp_wr_bitmap &= (u32) (~(1 << card->curr_wr_port)); *port = card->curr_wr_port; - if (((card->supports_sdio_new_mode) && - (++card->curr_wr_port == card->max_ports)) || - ((!card->supports_sdio_new_mode) && - (++card->curr_wr_port == card->mp_end_port))) + if (++card->curr_wr_port == card->mp_end_port) card->curr_wr_port = reg->start_wr_port; } else { adapter->data_sent = true; @@ -715,7 +715,7 @@ static void mwifiex_sdio_disable_host_int(struct mwifiex_adapter *adapter) struct sdio_func *func = card->func; sdio_claim_host(func); - mwifiex_write_reg_locked(func, HOST_INT_MASK_REG, 0); + mwifiex_write_reg_locked(func, card->reg->host_int_mask_reg, 0); sdio_release_irq(func); sdio_release_host(func); } @@ -736,7 +736,7 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter) return; } - sdio_ireg = card->mp_regs[HOST_INTSTATUS_REG]; + sdio_ireg = card->mp_regs[card->reg->host_int_status_reg]; if (sdio_ireg) { /* * DN_LD_HOST_INT_STATUS and/or UP_LD_HOST_INT_STATUS @@ -801,7 +801,7 @@ static int mwifiex_sdio_enable_host_int(struct mwifiex_adapter *adapter) } /* Simply write the mask to the register */ - ret = mwifiex_write_reg_locked(func, HOST_INT_MASK_REG, + ret = mwifiex_write_reg_locked(func, card->reg->host_int_mask_reg, card->reg->host_int_enable); if (ret) { dev_err(adapter->dev, "enable host interrupt failed\n"); @@ -1055,7 +1055,13 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter, switch (upld_typ) { case MWIFIEX_TYPE_DATA: dev_dbg(adapter->dev, "info: --- Rx: Data packet ---\n"); - mwifiex_handle_rx_packet(adapter, skb); + if (adapter->rx_work_enabled) { + skb_queue_tail(&adapter->rx_data_q, skb); + adapter->data_received = true; + atomic_inc(&adapter->rx_pending); + } else { + mwifiex_handle_rx_packet(adapter, skb); + } break; case MWIFIEX_TYPE_CMD: @@ -1335,8 +1341,8 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) u32 pkt_type; /* read the len of control packet */ - rx_len = card->mp_regs[CMD_RD_LEN_1] << 8; - rx_len |= (u16) card->mp_regs[CMD_RD_LEN_0]; + rx_len = card->mp_regs[reg->cmd_rd_len_1] << 8; + rx_len |= (u16)card->mp_regs[reg->cmd_rd_len_0]; rx_blocks = DIV_ROUND_UP(rx_len, MWIFIEX_SDIO_BLOCK_SIZE); if (rx_len <= INTF_HEADER_LEN || (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE) > @@ -1527,8 +1533,7 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter, __func__); if (MP_TX_AGGR_IN_PROGRESS(card)) { - if (!mp_tx_aggr_port_limit_reached(card) && - MP_TX_AGGR_BUF_HAS_ROOM(card, pkt_len)) { + if (MP_TX_AGGR_BUF_HAS_ROOM(card, pkt_len)) { f_precopy_cur_buf = 1; if (!(card->mp_wr_bitmap & @@ -1540,8 +1545,7 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter, /* No room in Aggr buf, send it */ f_send_aggr_buf = 1; - if (mp_tx_aggr_port_limit_reached(card) || - !(card->mp_wr_bitmap & + if (!(card->mp_wr_bitmap & (1 << card->curr_wr_port))) f_send_cur_buf = 1; else @@ -1826,11 +1830,11 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter) sdio_set_drvdata(card->func, card); /* - * Read the HOST_INT_STATUS_REG for ACK the first interrupt got + * Read the host_int_status_reg for ACK the first interrupt got * from the bootloader. If we don't do this we get a interrupt * as soon as we register the irq. */ - mwifiex_read_reg(adapter, HOST_INTSTATUS_REG, &sdio_ireg); + mwifiex_read_reg(adapter, card->reg->host_int_status_reg, &sdio_ireg); /* Get SDIO ioport */ mwifiex_init_sdio_ioport(adapter); @@ -2233,3 +2237,4 @@ MODULE_FIRMWARE(SD8786_DEFAULT_FW_NAME); MODULE_FIRMWARE(SD8787_DEFAULT_FW_NAME); MODULE_FIRMWARE(SD8797_DEFAULT_FW_NAME); MODULE_FIRMWARE(SD8897_DEFAULT_FW_NAME); +MODULE_FIRMWARE(SD8887_DEFAULT_FW_NAME); diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h index 6b8835ec88f1a802dc9cbabf4efdb38f23b7973c..20cd9adc98d3c5c5c98edf5e873b1b65e104c6ee 100644 --- a/drivers/net/wireless/mwifiex/sdio.h +++ b/drivers/net/wireless/mwifiex/sdio.h @@ -33,6 +33,7 @@ #define SD8787_DEFAULT_FW_NAME "mrvl/sd8787_uapsta.bin" #define SD8797_DEFAULT_FW_NAME "mrvl/sd8797_uapsta.bin" #define SD8897_DEFAULT_FW_NAME "mrvl/sd8897_uapsta.bin" +#define SD8887_DEFAULT_FW_NAME "mrvl/sd8887_uapsta.bin" #define BLOCK_MODE 1 #define BYTE_MODE 0 @@ -52,13 +53,9 @@ #define HOST_TERM_CMD53 (0x1U << 2) #define REG_PORT 0 #define MEM_PORT 0x10000 -#define CMD_RD_LEN_0 0xB4 -#define CMD_RD_LEN_1 0xB5 -#define CARD_CONFIG_2_1_REG 0xCD + #define CMD53_NEW_MODE (0x1U << 0) -#define CMD_CONFIG_0 0xB8 #define CMD_PORT_RD_LEN_EN (0x1U << 2) -#define CMD_CONFIG_1 0xB9 #define CMD_PORT_AUTO_EN (0x1U << 0) #define CMD_PORT_SLCT 0x8000 #define UP_LD_CMD_PORT_HOST_INT_STATUS (0x40U) @@ -70,38 +67,23 @@ /* Misc. Config Register : Auto Re-enable interrupts */ #define AUTO_RE_ENABLE_INT BIT(4) -/* Host Control Registers */ -/* Host Control Registers : I/O port 0 */ -#define IO_PORT_0_REG 0x78 -/* Host Control Registers : I/O port 1 */ -#define IO_PORT_1_REG 0x79 -/* Host Control Registers : I/O port 2 */ -#define IO_PORT_2_REG 0x7A - /* Host Control Registers : Configuration */ #define CONFIGURATION_REG 0x00 /* Host Control Registers : Host power up */ #define HOST_POWER_UP (0x1U << 1) -/* Host Control Registers : Host interrupt mask */ -#define HOST_INT_MASK_REG 0x02 /* Host Control Registers : Upload host interrupt mask */ #define UP_LD_HOST_INT_MASK (0x1U) /* Host Control Registers : Download host interrupt mask */ #define DN_LD_HOST_INT_MASK (0x2U) -/* Host Control Registers : Host interrupt status */ -#define HOST_INTSTATUS_REG 0x03 /* Host Control Registers : Upload host interrupt status */ #define UP_LD_HOST_INT_STATUS (0x1U) /* Host Control Registers : Download host interrupt status */ #define DN_LD_HOST_INT_STATUS (0x2U) -/* Host Control Registers : Host interrupt RSR */ -#define HOST_INT_RSR_REG 0x01 - /* Host Control Registers : Host interrupt status */ -#define HOST_INT_STATUS_REG 0x28 +#define CARD_INT_STATUS_REG 0x28 /* Card Control Registers : Card I/O ready */ #define CARD_IO_READY (0x1U << 3) @@ -203,10 +185,16 @@ struct mwifiex_sdio_card_reg { u8 base_1_reg; u8 poll_reg; u8 host_int_enable; + u8 host_int_rsr_reg; + u8 host_int_status_reg; + u8 host_int_mask_reg; u8 status_reg_0; u8 status_reg_1; u8 sdio_int_mask; u32 data_port_mask; + u8 io_port_0_reg; + u8 io_port_1_reg; + u8 io_port_2_reg; u8 max_mp_regs; u8 rd_bitmap_l; u8 rd_bitmap_u; @@ -219,6 +207,15 @@ struct mwifiex_sdio_card_reg { u8 rd_len_p0_l; u8 rd_len_p0_u; u8 card_misc_cfg_reg; + u8 card_cfg_2_1_reg; + u8 cmd_rd_len_0; + u8 cmd_rd_len_1; + u8 cmd_rd_len_2; + u8 cmd_rd_len_3; + u8 cmd_cfg_0; + u8 cmd_cfg_1; + u8 cmd_cfg_2; + u8 cmd_cfg_3; u8 fw_dump_ctrl; u8 fw_dump_start; u8 fw_dump_end; @@ -274,10 +271,16 @@ static const struct mwifiex_sdio_card_reg mwifiex_reg_sd87xx = { .base_1_reg = 0x0041, .poll_reg = 0x30, .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK, + .host_int_rsr_reg = 0x1, + .host_int_mask_reg = 0x02, + .host_int_status_reg = 0x03, .status_reg_0 = 0x60, .status_reg_1 = 0x61, .sdio_int_mask = 0x3f, .data_port_mask = 0x0000fffe, + .io_port_0_reg = 0x78, + .io_port_1_reg = 0x79, + .io_port_2_reg = 0x7A, .max_mp_regs = 64, .rd_bitmap_l = 0x04, .rd_bitmap_u = 0x05, @@ -296,10 +299,16 @@ static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8897 = { .poll_reg = 0x50, .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK | CMD_PORT_UPLD_INT_MASK | CMD_PORT_DNLD_INT_MASK, + .host_int_rsr_reg = 0x1, + .host_int_status_reg = 0x03, + .host_int_mask_reg = 0x02, .status_reg_0 = 0xc0, .status_reg_1 = 0xc1, .sdio_int_mask = 0xff, .data_port_mask = 0xffffffff, + .io_port_0_reg = 0xD8, + .io_port_1_reg = 0xD9, + .io_port_2_reg = 0xDA, .max_mp_regs = 184, .rd_bitmap_l = 0x04, .rd_bitmap_u = 0x05, @@ -312,11 +321,61 @@ static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8897 = { .rd_len_p0_l = 0x0c, .rd_len_p0_u = 0x0d, .card_misc_cfg_reg = 0xcc, + .card_cfg_2_1_reg = 0xcd, + .cmd_rd_len_0 = 0xb4, + .cmd_rd_len_1 = 0xb5, + .cmd_rd_len_2 = 0xb6, + .cmd_rd_len_3 = 0xb7, + .cmd_cfg_0 = 0xb8, + .cmd_cfg_1 = 0xb9, + .cmd_cfg_2 = 0xba, + .cmd_cfg_3 = 0xbb, .fw_dump_ctrl = 0xe2, .fw_dump_start = 0xe3, .fw_dump_end = 0xea, }; +static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8887 = { + .start_rd_port = 0, + .start_wr_port = 0, + .base_0_reg = 0x6C, + .base_1_reg = 0x6D, + .poll_reg = 0x5C, + .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK | + CMD_PORT_UPLD_INT_MASK | CMD_PORT_DNLD_INT_MASK, + .host_int_rsr_reg = 0x4, + .host_int_status_reg = 0x0C, + .host_int_mask_reg = 0x08, + .status_reg_0 = 0x90, + .status_reg_1 = 0x91, + .sdio_int_mask = 0xff, + .data_port_mask = 0xffffffff, + .io_port_0_reg = 0xE4, + .io_port_1_reg = 0xE5, + .io_port_2_reg = 0xE6, + .max_mp_regs = 196, + .rd_bitmap_l = 0x10, + .rd_bitmap_u = 0x11, + .rd_bitmap_1l = 0x12, + .rd_bitmap_1u = 0x13, + .wr_bitmap_l = 0x14, + .wr_bitmap_u = 0x15, + .wr_bitmap_1l = 0x16, + .wr_bitmap_1u = 0x17, + .rd_len_p0_l = 0x18, + .rd_len_p0_u = 0x19, + .card_misc_cfg_reg = 0xd8, + .card_cfg_2_1_reg = 0xd9, + .cmd_rd_len_0 = 0xc0, + .cmd_rd_len_1 = 0xc1, + .cmd_rd_len_2 = 0xc2, + .cmd_rd_len_3 = 0xc3, + .cmd_cfg_0 = 0xc4, + .cmd_cfg_1 = 0xc5, + .cmd_cfg_2 = 0xc6, + .cmd_cfg_3 = 0xc7, +}; + static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = { .firmware = SD8786_DEFAULT_FW_NAME, .reg = &mwifiex_reg_sd87xx, @@ -369,6 +428,19 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = { .supports_fw_dump = true, }; +static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = { + .firmware = SD8887_DEFAULT_FW_NAME, + .reg = &mwifiex_reg_sd8887, + .max_ports = 32, + .mp_agg_pkt_limit = 16, + .supports_sdio_new_mode = true, + .has_control_mask = false, + .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K, + .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K, + .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K, + .supports_fw_dump = false, +}; + /* * .cmdrsp_complete handler */ diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c index 733de92a4c611e39da93fb580a272203d269e54f..1c2ca291d1f5351447130cc30cfcccc00b99dc17 100644 --- a/drivers/net/wireless/mwifiex/sta_cmd.c +++ b/drivers/net/wireless/mwifiex/sta_cmd.c @@ -938,7 +938,7 @@ mwifiex_cmd_802_11_key_material_v1(struct mwifiex_private *priv, cmd->size = cpu_to_le16(sizeof(key_material->action) + S_DS_GEN + key_param_len); - if (priv->bss_type == MWIFIEX_BSS_TYPE_UAP) { + if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) { tlv_mac = (void *)((u8 *)&key_material->key_param_set + key_param_len); tlv_mac->header.type = @@ -965,7 +965,7 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv, u16 cmd_action, u32 cmd_oid, struct mwifiex_ds_encrypt_key *enc_key) { - if (priv->adapter->fw_key_api_major_ver == FW_KEY_API_VER_MAJOR_V2) + if (priv->adapter->key_api_major_ver == KEY_API_VER_MAJOR_V2) return mwifiex_cmd_802_11_key_material_v2(priv, cmd, cmd_action, cmd_oid, enc_key); diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c index 08b78baeb846d65ef18680e462991c226c94d5e3..4aad44685f8dff43f80e5ed79a729bf99dbe8d5c 100644 --- a/drivers/net/wireless/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c @@ -85,8 +85,6 @@ mwifiex_process_cmdresp_error(struct mwifiex_private *priv, spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); adapter->scan_processing = false; spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); - if (priv->report_scan_result) - priv->report_scan_result = false; break; case HostCmd_CMD_MAC_CONTROL: @@ -637,7 +635,7 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv, static int mwifiex_ret_802_11_key_material(struct mwifiex_private *priv, struct host_cmd_ds_command *resp) { - if (priv->adapter->fw_key_api_major_ver == FW_KEY_API_VER_MAJOR_V2) + if (priv->adapter->key_api_major_ver == KEY_API_VER_MAJOR_V2) return mwifiex_ret_802_11_key_material_v2(priv, resp); else return mwifiex_ret_802_11_key_material_v1(priv, resp); diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c index caae9738100aa732087e2d03477b063bc1534fe2..92f3eb8398662700298e6ba81eab855ecdbe554b 100644 --- a/drivers/net/wireless/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/mwifiex/sta_ioctl.c @@ -287,10 +287,13 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss, return -1; if (mwifiex_band_to_radio_type(bss_desc->bss_band) == - HostCmd_SCAN_RADIO_TYPE_BG) + HostCmd_SCAN_RADIO_TYPE_BG) { config_bands = BAND_B | BAND_G | BAND_GN; - else - config_bands = BAND_A | BAND_AN | BAND_AAC; + } else { + config_bands = BAND_A | BAND_AN; + if (adapter->fw_bands & BAND_AAC) + config_bands |= BAND_AAC; + } if (!((config_bands | adapter->fw_bands) & ~adapter->fw_bands)) adapter->config_bands = config_bands; @@ -877,7 +880,7 @@ static int mwifiex_sec_ioctl_set_wep_key(struct mwifiex_private *priv, return -1; } - if (adapter->fw_key_api_major_ver == FW_KEY_API_VER_MAJOR_V2) { + if (adapter->key_api_major_ver == KEY_API_VER_MAJOR_V2) { memcpy(encrypt_key->key_material, wep_key->key_material, wep_key->key_length); encrypt_key->key_len = wep_key->key_length; @@ -903,7 +906,7 @@ static int mwifiex_sec_ioctl_set_wep_key(struct mwifiex_private *priv, memset(&priv->wep_key[index], 0, sizeof(struct mwifiex_wep_key)); - if (adapter->fw_key_api_major_ver == FW_KEY_API_VER_MAJOR_V2) + if (adapter->key_api_major_ver == KEY_API_VER_MAJOR_V2) enc_key = encrypt_key; else enc_key = NULL; diff --git a/drivers/net/wireless/mwifiex/tdls.c b/drivers/net/wireless/mwifiex/tdls.c index 4c5fd953893dcb22712f60c618f68499351c1dff..e2949077f5b5dee5e02cea4d16efbb2456232eea 100644 --- a/drivers/net/wireless/mwifiex/tdls.c +++ b/drivers/net/wireless/mwifiex/tdls.c @@ -871,7 +871,9 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv, break; case WLAN_EID_RSN: memcpy((u8 *)&sta_ptr->tdls_cap.rsn_ie, pos, - sizeof(struct ieee_types_header) + pos[1]); + sizeof(struct ieee_types_header) + + min_t(u8, pos[1], IEEE_MAX_IE_SIZE - + sizeof(struct ieee_types_header))); break; case WLAN_EID_QOS_CAPA: sta_ptr->tdls_cap.qos_info = pos[2]; diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c index 7118a18b91ba9f2a98a4333a2a5070f00d17a433..4371e12b36f38af6e0946969288ce918dc8487b1 100644 --- a/drivers/net/wireless/mwifiex/usb.c +++ b/drivers/net/wireless/mwifiex/usb.c @@ -357,7 +357,7 @@ static int mwifiex_usb_probe(struct usb_interface *intf, card->usb_boot_state = USB8XXX_FW_READY; break; default: - pr_warning("unknown id_product %#x\n", id_product); + pr_warn("unknown id_product %#x\n", id_product); card->usb_boot_state = USB8XXX_FW_DNLD; break; } diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c index cee028321a9ab73eac65ced1048e21fed2ce8e7e..ec79c49de0975651a5e2cdebdd058fc312308537 100644 --- a/drivers/net/wireless/mwifiex/util.c +++ b/drivers/net/wireless/mwifiex/util.c @@ -172,7 +172,7 @@ mwifiex_process_mgmt_packet(struct mwifiex_private *priv, cfg80211_rx_mgmt(priv->wdev, priv->roc_cfg.chan.center_freq, CAL_RSSI(rx_pd->snr, rx_pd->nf), skb->data, pkt_len, - 0, GFP_ATOMIC); + 0); return 0; } diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c index d3cf7c3ebfd656d4a592384f41fd348797ed17c8..995846422dc0a9e6176f70ad90927833d1b38614 100644 --- a/drivers/net/wireless/orinoco/orinoco_usb.c +++ b/drivers/net/wireless/orinoco/orinoco_usb.c @@ -534,7 +534,7 @@ static void ezusb_request_out_callback(struct urb *urb) if (ctx->killed) { spin_unlock_irqrestore(&upriv->req_lock, flags); - pr_warning("interrupt called with dead ctx"); + pr_warn("interrupt called with dead ctx\n"); goto out; } @@ -671,8 +671,8 @@ static void ezusb_request_in_callback(struct ezusb_priv *upriv, default: spin_unlock_irqrestore(&upriv->req_lock, flags); - pr_warning("Matched IN URB, unexpected context state(0x%x)", - state); + pr_warn("Matched IN URB, unexpected context state(0x%x)\n", + state); /* Throw this CTX away and try submitting another */ del_timer(&ctx->timer); ctx->outurb->transfer_flags |= URB_ASYNC_UNLINK; @@ -1394,12 +1394,12 @@ static void ezusb_bulk_in_callback(struct urb *urb) /* When a device gets unplugged we get this every time * we resubmit, flooding the logs. Since we don't use * USB timeouts, it shouldn't happen any other time*/ - pr_warning("%s: urb timed out, not resubmiting", __func__); + pr_warn("%s: urb timed out, not resubmitting\n", __func__); return; } if (urb->status == -ECONNABORTED) { - pr_warning("%s: connection abort, resubmiting urb", - __func__); + pr_warn("%s: connection abort, resubmitting urb\n", + __func__); goto resubmit; } if ((urb->status == -EILSEQ) @@ -1605,13 +1605,10 @@ static int ezusb_probe(struct usb_interface *interface, for (i = 0; i < iface_desc->bNumEndpoints; ++i) { ep = &interface->altsetting[0].endpoint[i].desc; - if (((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK) - == USB_DIR_IN) && - ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) - == USB_ENDPOINT_XFER_BULK)) { + if (usb_endpoint_is_bulk_in(ep)) { /* we found a bulk in endpoint */ if (upriv->read_urb != NULL) { - pr_warning("Found a second bulk in ep, ignored"); + pr_warn("Found a second bulk in ep, ignored\n"); continue; } @@ -1621,10 +1618,10 @@ static int ezusb_probe(struct usb_interface *interface, goto error; } if (le16_to_cpu(ep->wMaxPacketSize) != 64) - pr_warning("bulk in: wMaxPacketSize!= 64"); + pr_warn("bulk in: wMaxPacketSize!= 64\n"); if (ep->bEndpointAddress != (2 | USB_DIR_IN)) - pr_warning("bulk in: bEndpointAddress: %d", - ep->bEndpointAddress); + pr_warn("bulk in: bEndpointAddress: %d\n", + ep->bEndpointAddress); upriv->read_pipe = usb_rcvbulkpipe(udev, ep-> bEndpointAddress); @@ -1636,21 +1633,18 @@ static int ezusb_probe(struct usb_interface *interface, } } - if (((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK) - == USB_DIR_OUT) && - ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) - == USB_ENDPOINT_XFER_BULK)) { + if (usb_endpoint_is_bulk_out(ep)) { /* we found a bulk out endpoint */ if (upriv->bap_buf != NULL) { - pr_warning("Found a second bulk out ep, ignored"); + pr_warn("Found a second bulk out ep, ignored\n"); continue; } if (le16_to_cpu(ep->wMaxPacketSize) != 64) - pr_warning("bulk out: wMaxPacketSize != 64"); + pr_warn("bulk out: wMaxPacketSize != 64\n"); if (ep->bEndpointAddress != 2) - pr_warning("bulk out: bEndpointAddress: %d", - ep->bEndpointAddress); + pr_warn("bulk out: bEndpointAddress: %d\n", + ep->bEndpointAddress); upriv->write_pipe = usb_sndbulkpipe(udev, ep-> bEndpointAddress); diff --git a/drivers/net/wireless/orinoco/scan.c b/drivers/net/wireless/orinoco/scan.c index e175b9b8561b594299d69d237624e3604932d22f..2c66166add705ca64b0390cfe4602d2d15c70fdd 100644 --- a/drivers/net/wireless/orinoco/scan.c +++ b/drivers/net/wireless/orinoco/scan.c @@ -123,9 +123,10 @@ static void orinoco_add_hostscan_result(struct orinoco_private *priv, beacon_interval = le16_to_cpu(bss->a.beacon_interv); signal = SIGNAL_TO_MBM(le16_to_cpu(bss->a.level)); - cbss = cfg80211_inform_bss(wiphy, channel, bss->a.bssid, timestamp, - capability, beacon_interval, ie_buf, ie_len, - signal, GFP_KERNEL); + cbss = cfg80211_inform_bss(wiphy, channel, CFG80211_BSS_FTYPE_UNKNOWN, + bss->a.bssid, timestamp, capability, + beacon_interval, ie_buf, ie_len, signal, + GFP_KERNEL); cfg80211_put_bss(wiphy, cbss); } @@ -156,9 +157,10 @@ void orinoco_add_extscan_result(struct orinoco_private *priv, ie = bss->data; signal = SIGNAL_TO_MBM(bss->level); - cbss = cfg80211_inform_bss(wiphy, channel, bss->bssid, timestamp, - capability, beacon_interval, ie, ie_len, - signal, GFP_KERNEL); + cbss = cfg80211_inform_bss(wiphy, channel, CFG80211_BSS_FTYPE_UNKNOWN, + bss->bssid, timestamp, capability, + beacon_interval, ie, ie_len, signal, + GFP_KERNEL); cfg80211_put_bss(wiphy, cbss); } diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c index 7be3a4839640c6eda6b8254fd7b81bbe5d01fd33..97aeff0edb84b52b5bbe5821caae9541e13ef554 100644 --- a/drivers/net/wireless/p54/main.c +++ b/drivers/net/wireless/p54/main.c @@ -696,7 +696,8 @@ static void p54_flush(struct ieee80211_hw *dev, struct ieee80211_vif *vif, WARN(total, "tx flush timeout, unresponsive firmware"); } -static void p54_set_coverage_class(struct ieee80211_hw *dev, u8 coverage_class) +static void p54_set_coverage_class(struct ieee80211_hw *dev, + s16 coverage_class) { struct p54_common *priv = dev->priv; diff --git a/drivers/net/wireless/ray_cs.h b/drivers/net/wireless/ray_cs.h index e79848fbcca1e1e5397410745589703e3cad903a..524c2f02dd821196a843c961687e73096ea02d65 100644 --- a/drivers/net/wireless/ray_cs.h +++ b/drivers/net/wireless/ray_cs.h @@ -3,7 +3,8 @@ Written by Corey Thomas */ -#ifndef RAYLINK_H +#ifndef _RAY_CS_H_ +#define _RAY_CS_H_ struct beacon_rx { struct mac_header mac; @@ -69,4 +70,4 @@ typedef struct ray_dev_t { } ray_dev_t; /*****************************************************************************/ -#endif /* RAYLINK_H */ +#endif /* _RAY_CS_H_ */ diff --git a/drivers/net/wireless/rayctl.h b/drivers/net/wireless/rayctl.h index 3c3b98b152c30bb7a5b1f2cd3ce20793eb63c212..b21ed64e15df1e3842ae1b48e89fc3ffc918d6c7 100644 --- a/drivers/net/wireless/rayctl.h +++ b/drivers/net/wireless/rayctl.h @@ -1,4 +1,5 @@ -#ifndef RAYLINK_H +#ifndef _RAYCTL_H_ +#define _RAYCTL_H_ typedef unsigned char UCHAR; @@ -729,4 +730,4 @@ typedef struct snaphdr_t #define RAY_IPX_TYPE 0x8137 #define APPLEARP_TYPE 0x80f3 /*****************************************************************************/ -#endif /* #ifndef RAYLINK_H */ +#endif /* _RAYCTL_H_ */ diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index d2a9a08210be1379b4e56ad266c1695ca6486d17..1a4facd1fbf335625ff092064b3dfa58f4e869d1 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c @@ -2022,9 +2022,10 @@ static bool rndis_bss_info_update(struct usbnet *usbdev, capability = le16_to_cpu(fixed->capabilities); beacon_interval = le16_to_cpu(fixed->beacon_interval); - bss = cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid->mac, - timestamp, capability, beacon_interval, ie, ie_len, signal, - GFP_KERNEL); + bss = cfg80211_inform_bss(priv->wdev.wiphy, channel, + CFG80211_BSS_FTYPE_UNKNOWN, bssid->mac, + timestamp, capability, beacon_interval, + ie, ie_len, signal, GFP_KERNEL); cfg80211_put_bss(priv->wdev.wiphy, bss); return (bss != NULL); @@ -2711,9 +2712,10 @@ static void rndis_wlan_craft_connected_bss(struct usbnet *usbdev, u8 *bssid, bssid, (u32)timestamp, capability, beacon_period, ie_len, ssid.essid, signal); - bss = cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid, - timestamp, capability, beacon_period, ie_buf, ie_len, - signal, GFP_KERNEL); + bss = cfg80211_inform_bss(priv->wdev.wiphy, channel, + CFG80211_BSS_FTYPE_UNKNOWN, bssid, + timestamp, capability, beacon_period, + ie_buf, ie_len, signal, GFP_KERNEL); cfg80211_put_bss(priv->wdev.wiphy, bss); } diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h index a394a9a95919b5dcbaf0f87a19e289e43bafe853..ebd5625d13f1832767f266f932ef012122f72e8e 100644 --- a/drivers/net/wireless/rt2x00/rt2800.h +++ b/drivers/net/wireless/rt2x00/rt2800.h @@ -52,6 +52,7 @@ * RF5592 2.4G/5G 2T2R * RF3070 2.4G 1T1R * RF5360 2.4G 1T1R + * RF5362 2.4G 1T1R * RF5370 2.4G 1T1R * RF5390 2.4G 1T1R */ @@ -72,6 +73,7 @@ #define RF3070 0x3070 #define RF3290 0x3290 #define RF5360 0x5360 +#define RF5362 0x5362 #define RF5370 0x5370 #define RF5372 0x5372 #define RF5390 0x5390 @@ -2039,7 +2041,7 @@ struct mac_iveiv_entry { * 2 - drop tx power by 12dBm, * 3 - increase tx power by 6dBm */ -#define BBP1_TX_POWER_CTRL FIELD8(0x07) +#define BBP1_TX_POWER_CTRL FIELD8(0x03) #define BBP1_TX_ANTENNA FIELD8(0x18) /* @@ -2145,7 +2147,7 @@ struct mac_iveiv_entry { /* Bits [7-4] for RF3320 (RT3370/RT3390), on other chipsets reserved */ #define RFCSR3_PA1_BIAS_CCK FIELD8(0x70) #define RFCSR3_PA2_CASCODE_BIAS_CCKK FIELD8(0x80) -/* Bits for RF3290/RF5360/RF5370/RF5372/RF5390/RF5392 */ +/* Bits for RF3290/RF5360/RF5362/RF5370/RF5372/RF5390/RF5392 */ #define RFCSR3_VCOCAL_EN FIELD8(0x80) /* Bits for RF3050 */ #define RFCSR3_BIT1 FIELD8(0x02) diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 893c9d5f3d6f09c659710a855fd8425a5475d28a..9f57a2db791cf879fd9780b65926df300e461f92 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -3186,6 +3186,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, break; case RF3070: case RF5360: + case RF5362: case RF5370: case RF5372: case RF5390: @@ -3203,6 +3204,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, rt2x00_rf(rt2x00dev, RF3290) || rt2x00_rf(rt2x00dev, RF3322) || rt2x00_rf(rt2x00dev, RF5360) || + rt2x00_rf(rt2x00dev, RF5362) || rt2x00_rf(rt2x00dev, RF5370) || rt2x00_rf(rt2x00dev, RF5372) || rt2x00_rf(rt2x00dev, RF5390) || @@ -4317,6 +4319,7 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev) case RF3070: case RF3290: case RF5360: + case RF5362: case RF5370: case RF5372: case RF5390: @@ -7095,6 +7098,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev) case RF3320: case RF3322: case RF5360: + case RF5362: case RF5370: case RF5372: case RF5390: @@ -7551,6 +7555,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) case RF3320: case RF3322: case RF5360: + case RF5362: case RF5370: case RF5372: case RF5390: @@ -7680,6 +7685,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) case RF3070: case RF3290: case RF5360: + case RF5362: case RF5370: case RF5372: case RF5390: diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c index 026d912f516be07e4fb169e0007ff75113e388d0..ded967aa6ecb6f69fbad672485052b6f55e66423 100644 --- a/drivers/net/wireless/rtl818x/rtl8180/dev.c +++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c @@ -189,6 +189,9 @@ static const int rtl8187se_queues_map[RTL8187SE_NR_TX_QUEUES] = {5, 4, 3, 2, 7}; static const int rtl8180_queues_map[RTL8180_NR_TX_QUEUES] = {4, 7}; +/* LNA gain table for rtl8187se */ +static const u8 rtl8187se_lna_gain[4] = {02, 17, 29, 39}; + void rtl8180_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data) { struct rtl8180_priv *priv = dev->priv; @@ -210,13 +213,14 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev) struct rtl8180_priv *priv = dev->priv; struct rtl818x_rx_cmd_desc *cmd_desc; unsigned int count = 32; - u8 agc, sq, signal = 1; + u8 agc, sq; + s8 signal = 1; dma_addr_t mapping; while (count--) { void *entry = priv->rx_ring + priv->rx_idx * priv->rx_ring_sz; struct sk_buff *skb = priv->rx_buf[priv->rx_idx]; - u32 flags, flags2; + u32 flags, flags2, flags3 = 0; u64 tsft; if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) { @@ -229,6 +233,7 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev) * the ownership flag */ rmb(); + flags3 = le32_to_cpu(desc->flags3); flags2 = le32_to_cpu(desc->flags2); tsft = le64_to_cpu(desc->tsft); } else { @@ -287,8 +292,21 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev) signal = priv->rf->calc_rssi(agc, sq); break; case RTL818X_CHIP_FAMILY_RTL8187SE: - /* TODO: rtl8187se rssi */ - signal = 10; + /* OFDM measure reported by HW is signed, + * in 0.5dBm unit, with zero centered @ -41dBm + * input signal. + */ + if (rx_status.rate_idx > 3) { + signal = (s8)((flags3 >> 16) & 0xff); + signal = signal / 2 - 41; + } else { + int idx, bb; + + idx = (agc & 0x60) >> 5; + bb = (agc & 0x1F) * 2; + /* bias + BB gain + LNA gain */ + signal = 4 - bb - rtl8187se_lna_gain[idx]; + } break; } rx_status.signal = signal; @@ -1835,7 +1853,7 @@ static int rtl8180_probe(struct pci_dev *pdev, pci_try_set_mwi(pdev); } - if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8185) + if (priv->chip_family != RTL818X_CHIP_FAMILY_RTL8180) dev->flags |= IEEE80211_HW_SIGNAL_DBM; else dev->flags |= IEEE80211_HW_SIGNAL_UNSPEC; diff --git a/drivers/net/wireless/rtlwifi/Kconfig b/drivers/net/wireless/rtlwifi/Kconfig index bf3cf124e4eaac4e16fe9cbcd9299dbbcb9df250..5cf509d346e8f61f76ee00254c6cebab1f53ac2b 100644 --- a/drivers/net/wireless/rtlwifi/Kconfig +++ b/drivers/net/wireless/rtlwifi/Kconfig @@ -5,7 +5,8 @@ menuconfig RTL_CARDS ---help--- This option will enable support for the Realtek mac80211-based wireless drivers. Drivers rtl8192ce, rtl8192cu, rtl8192se, rtl8192de, - rtl8723ae, rtl8723be, and rtl8188ae share some common code. + rtl8723ae, rtl8723be, rtl8188ee, rtl8192ee, and rtl8821ae share + some common code. if RTL_CARDS @@ -80,6 +81,30 @@ config RTL8188EE If you choose to build it as a module, it will be called rtl8188ee +config RTL8192EE + tristate "Realtek RTL8192EE Wireless Network Adapter" + depends on PCI + select RTLWIFI + select RTLWIFI_PCI + select RTLBTCOEXIST + ---help--- + This is the driver for Realtek RTL8192EE 802.11n PCIe + wireless network adapters. + + If you choose to build it as a module, it will be called rtl8192ee + +config RTL8821AE + tristate "Realtek RTL8821AE/RTL8812AE Wireless Network Adapter" + depends on PCI + select RTLWIFI + select RTLWIFI_PCI + select RTLBTCOEXIST + ---help--- + This is the driver for Realtek RTL8i821AE/RTL8812AE 802.11av PCIe + wireless network adapters. + + If you choose to build it as a module, it will be called rtl8821ae + config RTL8192CU tristate "Realtek RTL8192CU/RTL8188CU USB Wireless Network Adapter" depends on USB @@ -123,7 +148,7 @@ config RTL8723_COMMON config RTLBTCOEXIST tristate - depends on RTL8723AE || RTL8723BE + depends on RTL8723AE || RTL8723BE || RTL8821AE || RTL8192EE default y endif diff --git a/drivers/net/wireless/rtlwifi/Makefile b/drivers/net/wireless/rtlwifi/Makefile index bba36a06abcc4c3cf3d896eab771ef0406611993..ad6d3c52ec572247067544a4a5ca81b09d020d68 100644 --- a/drivers/net/wireless/rtlwifi/Makefile +++ b/drivers/net/wireless/rtlwifi/Makefile @@ -28,5 +28,7 @@ obj-$(CONFIG_RTL8723BE) += rtl8723be/ obj-$(CONFIG_RTL8188EE) += rtl8188ee/ obj-$(CONFIG_RTLBTCOEXIST) += btcoexist/ obj-$(CONFIG_RTL8723_COMMON) += rtl8723com/ +obj-$(CONFIG_RTL8821AE) += rtl8821ae/ +obj-$(CONFIG_RTL8192EE) += rtl8192ee/ ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c index 93bb384eb0014e17e51d12457159f6d9e08781ff..58ba718308862391b108a41e0eab1e7ff7fa226a 100644 --- a/drivers/net/wireless/rtlwifi/base.c +++ b/drivers/net/wireless/rtlwifi/base.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -34,7 +30,7 @@ #include "cam.h" #include "ps.h" #include "regd.h" - +#include "pci.h" #include #include #include @@ -211,7 +207,6 @@ static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw, *highest supported RX rate */ if (rtlpriv->dm.supp_phymode_switch) { - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Support phy mode switch\n"); @@ -244,6 +239,83 @@ static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw, } } +static void _rtl_init_hw_vht_capab(struct ieee80211_hw *hw, + struct ieee80211_sta_vht_cap *vht_cap) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { + u16 mcs_map; + + vht_cap->vht_supported = true; + vht_cap->cap = + IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 | + IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 | + IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 | + IEEE80211_VHT_CAP_SHORT_GI_80 | + IEEE80211_VHT_CAP_TXSTBC | + IEEE80211_VHT_CAP_RXSTBC_1 | + IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | + IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | + IEEE80211_VHT_CAP_HTC_VHT | + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK | + IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN | + IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN | + 0; + + mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | + IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 | + IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 | + IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 | + IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 | + IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 | + IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 | + IEEE80211_VHT_MCS_NOT_SUPPORTED << 14; + + vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map); + vht_cap->vht_mcs.rx_highest = + cpu_to_le16(MAX_BIT_RATE_SHORT_GI_2NSS_80MHZ_MCS9); + vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map); + vht_cap->vht_mcs.tx_highest = + cpu_to_le16(MAX_BIT_RATE_SHORT_GI_2NSS_80MHZ_MCS9); + } else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) { + u16 mcs_map; + + vht_cap->vht_supported = true; + vht_cap->cap = + IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 | + IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 | + IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 | + IEEE80211_VHT_CAP_SHORT_GI_80 | + IEEE80211_VHT_CAP_TXSTBC | + IEEE80211_VHT_CAP_RXSTBC_1 | + IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | + IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | + IEEE80211_VHT_CAP_HTC_VHT | + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK | + IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN | + IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN | + 0; + + mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | + IEEE80211_VHT_MCS_NOT_SUPPORTED << 2 | + IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 | + IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 | + IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 | + IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 | + IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 | + IEEE80211_VHT_MCS_NOT_SUPPORTED << 14; + + vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map); + vht_cap->vht_mcs.rx_highest = + cpu_to_le16(MAX_BIT_RATE_SHORT_GI_1NSS_80MHZ_MCS9); + vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map); + vht_cap->vht_mcs.tx_highest = + cpu_to_le16(MAX_BIT_RATE_SHORT_GI_1NSS_80MHZ_MCS9); + } +} + static void _rtl_init_mac80211(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -252,9 +324,8 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw) struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct ieee80211_supported_band *sband; - - if (rtlhal->macphymode == SINGLEMAC_SINGLEPHY && rtlhal->bandset == - BAND_ON_BOTH) { + if (rtlhal->macphymode == SINGLEMAC_SINGLEPHY && + rtlhal->bandset == BAND_ON_BOTH) { /* 1: 2.4 G bands */ /* <1> use mac->bands as mem for hw->wiphy->bands */ sband = &(rtlmac->bands[IEEE80211_BAND_2GHZ]); @@ -282,6 +353,7 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw) /* <3> init ht cap base on ant_num */ _rtl_init_hw_ht_capab(hw, &sband->ht_cap); + _rtl_init_hw_vht_capab(hw, &sband->vht_cap); /* <4> set mac->sband to wiphy->sband */ hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband; } else { @@ -292,8 +364,8 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw) /* <2> set hw->wiphy->bands[IEEE80211_BAND_2GHZ] * to default value(1T1R) */ memcpy(&(rtlmac->bands[IEEE80211_BAND_2GHZ]), - &rtl_band_2ghz, - sizeof(struct ieee80211_supported_band)); + &rtl_band_2ghz, + sizeof(struct ieee80211_supported_band)); /* <3> init ht cap base on ant_num */ _rtl_init_hw_ht_capab(hw, &sband->ht_cap); @@ -307,12 +379,13 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw) /* <2> set hw->wiphy->bands[IEEE80211_BAND_5GHZ] * to default value(1T1R) */ memcpy(&(rtlmac->bands[IEEE80211_BAND_5GHZ]), - &rtl_band_5ghz, - sizeof(struct ieee80211_supported_band)); + &rtl_band_5ghz, + sizeof(struct ieee80211_supported_band)); /* <3> init ht cap base on ant_num */ _rtl_init_hw_ht_capab(hw, &sband->ht_cap); + _rtl_init_hw_vht_capab(hw, &sband->vht_cap); /* <4> set mac->sband to wiphy->sband */ hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband; } else { @@ -326,7 +399,6 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw) IEEE80211_HW_AMPDU_AGGREGATION | IEEE80211_HW_CONNECTION_MONITOR | /* IEEE80211_HW_SUPPORTS_CQM_RSSI | */ - IEEE80211_HW_CONNECTION_MONITOR | IEEE80211_HW_MFP_CAPABLE | IEEE80211_HW_REPORTS_TX_ACK_STATUS | 0; @@ -336,7 +408,6 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw) IEEE80211_HW_PS_NULLFUNC_STACK | /* IEEE80211_HW_SUPPORTS_DYNAMIC_PS | */ 0; - hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_STATION) | @@ -344,8 +415,10 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw) BIT(NL80211_IFTYPE_MESH_POINT) | BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO); - hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; + + hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; + hw->wiphy->rts_threshold = 2347; hw->queues = AC_MAX; @@ -358,6 +431,21 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw) /* hw->max_rates = 1; */ hw->sta_data_size = sizeof(struct rtl_sta_info); +/* wowlan is not supported by kernel if CONFIG_PM is not defined */ +#ifdef CONFIG_PM + if (rtlpriv->psc.wo_wlan_mode) { + if (rtlpriv->psc.wo_wlan_mode & WAKE_ON_MAGIC_PACKET) + rtlpriv->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT; + if (rtlpriv->psc.wo_wlan_mode & WAKE_ON_PATTERN_MATCH) { + rtlpriv->wowlan.n_patterns = + MAX_SUPPORT_WOL_PATTERN_NUM; + rtlpriv->wowlan.pattern_min_len = MIN_WOL_PATTERN_SIZE; + rtlpriv->wowlan.pattern_max_len = MAX_WOL_PATTERN_SIZE; + } + hw->wiphy->wowlan = &rtlpriv->wowlan; + } +#endif + /* <6> mac address */ if (is_valid_ether_addr(rtlefuse->dev_addr)) { SET_IEEE80211_PERM_ADDR(hw, rtlefuse->dev_addr); @@ -366,7 +454,6 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw) get_random_bytes((rtlmac1 + (ETH_ALEN - 1)), 1); SET_IEEE80211_PERM_ADDR(hw, rtlmac1); } - } static void _rtl_init_deferred_work(struct ieee80211_hw *hw) @@ -378,10 +465,9 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw) rtl_watch_dog_timer_callback, (unsigned long)hw); setup_timer(&rtlpriv->works.dualmac_easyconcurrent_retrytimer, rtl_easy_concurrent_retrytimer_callback, (unsigned long)hw); - /* <2> work queue */ rtlpriv->works.hw = hw; - rtlpriv->works.rtl_wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name); + rtlpriv->works.rtl_wq = alloc_workqueue(rtlpriv->cfg->name, 0, 0); INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq, (void *)rtl_watchdog_wq_callback); INIT_DELAYED_WORK(&rtlpriv->works.ips_nic_off_wq, @@ -424,7 +510,7 @@ void rtl_init_rfkill(struct ieee80211_hw *hw) radio_state = rtlpriv->cfg->ops->radio_onoff_checking(hw, &valid); if (valid) { - pr_info("wireless switch is %s\n", + pr_info("rtlwifi: wireless switch is %s\n", rtlpriv->rfkill.rfkill_state ? "on" : "off"); rtlpriv->rfkill.rfkill_state = radio_state; @@ -466,22 +552,18 @@ int rtl_init_core(struct ieee80211_hw *hw) /* <4> locks */ mutex_init(&rtlpriv->locks.conf_mutex); - mutex_init(&rtlpriv->locks.ps_mutex); spin_lock_init(&rtlpriv->locks.ips_lock); spin_lock_init(&rtlpriv->locks.irq_th_lock); - spin_lock_init(&rtlpriv->locks.irq_pci_lock); - spin_lock_init(&rtlpriv->locks.tx_lock); spin_lock_init(&rtlpriv->locks.h2c_lock); spin_lock_init(&rtlpriv->locks.rf_ps_lock); spin_lock_init(&rtlpriv->locks.rf_lock); spin_lock_init(&rtlpriv->locks.waitq_lock); spin_lock_init(&rtlpriv->locks.entry_list_lock); - spin_lock_init(&rtlpriv->locks.fw_ps_lock); spin_lock_init(&rtlpriv->locks.cck_and_rw_pagea_lock); spin_lock_init(&rtlpriv->locks.check_sendpkt_lock); spin_lock_init(&rtlpriv->locks.fw_ps_lock); spin_lock_init(&rtlpriv->locks.lps_lock); - + spin_lock_init(&rtlpriv->locks.iqk_lock); /* <5> init list */ INIT_LIST_HEAD(&rtlpriv->entry_list); @@ -539,6 +621,7 @@ static void _rtl_query_shortgi(struct ieee80211_hw *hw, struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); u8 rate_flag = info->control.rates[0].flags; u8 sgi_40 = 0, sgi_20 = 0, bw_40 = 0; + u8 sgi_80 = 0, bw_80 = 0; tcb_desc->use_shortgi = false; if (sta == NULL) @@ -546,24 +629,35 @@ static void _rtl_query_shortgi(struct ieee80211_hw *hw, sgi_40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40; sgi_20 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20; + sgi_80 = sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80; - if (!(sta->ht_cap.ht_supported)) + if ((!sta->ht_cap.ht_supported) && (!sta->vht_cap.vht_supported)) return; if (!sgi_40 && !sgi_20) return; - if (mac->opmode == NL80211_IFTYPE_STATION) + if (mac->opmode == NL80211_IFTYPE_STATION) { bw_40 = mac->bw_40; - else if (mac->opmode == NL80211_IFTYPE_AP || + bw_80 = mac->bw_80; + } else if (mac->opmode == NL80211_IFTYPE_AP || mac->opmode == NL80211_IFTYPE_ADHOC || - mac->opmode == NL80211_IFTYPE_MESH_POINT) - bw_40 = sta->bandwidth >= IEEE80211_STA_RX_BW_40; + mac->opmode == NL80211_IFTYPE_MESH_POINT) { + bw_40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40; + bw_80 = sta->vht_cap.vht_supported; + } - if (bw_40 && sgi_40) - tcb_desc->use_shortgi = true; - else if ((bw_40 == false) && sgi_20) - tcb_desc->use_shortgi = true; + if (bw_80) { + if (sgi_80) + tcb_desc->use_shortgi = true; + else + tcb_desc->use_shortgi = false; + } else { + if (bw_40 && sgi_40) + tcb_desc->use_shortgi = true; + else if (!bw_40 && sgi_20) + tcb_desc->use_shortgi = true; + } if (!(rate_flag & IEEE80211_TX_RC_SHORT_GI)) tcb_desc->use_shortgi = false; @@ -613,7 +707,7 @@ static void _rtl_txrate_selectmode(struct ieee80211_hw *hw, if (mac->opmode == NL80211_IFTYPE_STATION) { tcb_desc->ratr_index = 0; } else if (mac->opmode == NL80211_IFTYPE_ADHOC || - mac->opmode == NL80211_IFTYPE_MESH_POINT) { + mac->opmode == NL80211_IFTYPE_MESH_POINT) { if (tcb_desc->multicast || tcb_desc->broadcast) { tcb_desc->hw_rate = rtlpriv->cfg->maps[RTL_RC_CCK_RATE2M]; @@ -634,7 +728,13 @@ static void _rtl_txrate_selectmode(struct ieee80211_hw *hw, mac->opmode == NL80211_IFTYPE_MESH_POINT) { tcb_desc->mac_id = 0; - if (mac->mode == WIRELESS_MODE_N_24G) + if (mac->mode == WIRELESS_MODE_AC_5G) + tcb_desc->ratr_index = + RATR_INX_WIRELESS_AC_5N; + else if (mac->mode == WIRELESS_MODE_AC_24G) + tcb_desc->ratr_index = + RATR_INX_WIRELESS_AC_24N; + else if (mac->mode == WIRELESS_MODE_N_24G) tcb_desc->ratr_index = RATR_INX_WIRELESS_NGB; else if (mac->mode == WIRELESS_MODE_N_5G) tcb_desc->ratr_index = RATR_INX_WIRELESS_NG; @@ -644,8 +744,9 @@ static void _rtl_txrate_selectmode(struct ieee80211_hw *hw, tcb_desc->ratr_index = RATR_INX_WIRELESS_B; else if (mac->mode & WIRELESS_MODE_A) tcb_desc->ratr_index = RATR_INX_WIRELESS_G; + } else if (mac->opmode == NL80211_IFTYPE_AP || - mac->opmode == NL80211_IFTYPE_ADHOC) { + mac->opmode == NL80211_IFTYPE_ADHOC) { if (NULL != sta) { if (sta->aid > 0) tcb_desc->mac_id = sta->aid + 1; @@ -671,7 +772,8 @@ static void _rtl_query_bandwidth_mode(struct ieee80211_hw *hw, if (mac->opmode == NL80211_IFTYPE_AP || mac->opmode == NL80211_IFTYPE_ADHOC || mac->opmode == NL80211_IFTYPE_MESH_POINT) { - if (sta->bandwidth == IEEE80211_STA_RX_BW_20) + if (!(sta->ht_cap.ht_supported) || + !(sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)) return; } else if (mac->opmode == NL80211_IFTYPE_STATION) { if (!mac->bw_40 || !(sta->ht_cap.ht_supported)) @@ -684,16 +786,74 @@ static void _rtl_query_bandwidth_mode(struct ieee80211_hw *hw, if (tcb_desc->hw_rate <= rtlpriv->cfg->maps[RTL_RC_OFDM_RATE54M]) return; - tcb_desc->packet_bw = true; + tcb_desc->packet_bw = HT_CHANNEL_WIDTH_20_40; + + if (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8812AE || + rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8821AE) { + if (mac->opmode == NL80211_IFTYPE_AP || + mac->opmode == NL80211_IFTYPE_ADHOC || + mac->opmode == NL80211_IFTYPE_MESH_POINT) { + if (!(sta->vht_cap.vht_supported)) + return; + } else if (mac->opmode == NL80211_IFTYPE_STATION) { + if (!mac->bw_80 || + !(sta->vht_cap.vht_supported)) + return; + } + if (tcb_desc->hw_rate <= + rtlpriv->cfg->maps[RTL_RC_HT_RATEMCS15]) + return; + tcb_desc->packet_bw = HT_CHANNEL_WIDTH_80; + } } -static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw) +static u8 _rtl_get_vht_highest_n_rate(struct ieee80211_hw *hw, + struct ieee80211_sta *sta) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); u8 hw_rate; + u16 tx_mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.tx_mcs_map); + + if ((get_rf_type(rtlphy) == RF_2T2R) && + (tx_mcs_map & 0x000c) != 0x000c) { + if ((tx_mcs_map & 0x000c) >> 2 == + IEEE80211_VHT_MCS_SUPPORT_0_7) + hw_rate = + rtlpriv->cfg->maps[RTL_RC_VHT_RATE_2SS_MCS7]; + else if ((tx_mcs_map & 0x000c) >> 2 == + IEEE80211_VHT_MCS_SUPPORT_0_8) + hw_rate = + rtlpriv->cfg->maps[RTL_RC_VHT_RATE_2SS_MCS9]; + else + hw_rate = + rtlpriv->cfg->maps[RTL_RC_VHT_RATE_2SS_MCS9]; + } else { + if ((tx_mcs_map & 0x0003) == + IEEE80211_VHT_MCS_SUPPORT_0_7) + hw_rate = + rtlpriv->cfg->maps[RTL_RC_VHT_RATE_1SS_MCS7]; + else if ((tx_mcs_map & 0x0003) == + IEEE80211_VHT_MCS_SUPPORT_0_8) + hw_rate = + rtlpriv->cfg->maps[RTL_RC_VHT_RATE_1SS_MCS9]; + else + hw_rate = + rtlpriv->cfg->maps[RTL_RC_VHT_RATE_1SS_MCS9]; + } - if (get_rf_type(rtlphy) == RF_2T2R) + return hw_rate; +} + +static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw, + struct ieee80211_sta *sta) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u8 hw_rate; + + if ((get_rf_type(rtlphy) == RF_2T2R) && + (sta->ht_cap.mcs.rx_mask[1] != 0)) hw_rate = rtlpriv->cfg->maps[RTL_RC_HT_RATEMCS15]; else hw_rate = rtlpriv->cfg->maps[RTL_RC_HT_RATEMCS7]; @@ -801,9 +961,7 @@ int rtlwifi_rate_mapping(struct ieee80211_hw *hw, break; } } - } else { - switch (desc_rate) { case DESC92_RATEMCS0: rate_idx = 0; @@ -862,31 +1020,6 @@ int rtlwifi_rate_mapping(struct ieee80211_hw *hw, } EXPORT_SYMBOL(rtlwifi_rate_mapping); -bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb) -{ - struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - struct rtl_priv *rtlpriv = rtl_priv(hw); - __le16 fc = rtl_get_fc(skb); - - if (rtlpriv->dm.supp_phymode_switch && - mac->link_state < MAC80211_LINKED && - (ieee80211_is_auth(fc) || ieee80211_is_probe_req(fc))) { - if (rtlpriv->cfg->ops->chk_switch_dmdp) - rtlpriv->cfg->ops->chk_switch_dmdp(hw); - } - if (ieee80211_is_auth(fc)) { - RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "MAC80211_LINKING\n"); - rtl_ips_nic_on(hw); - - mac->link_state = MAC80211_LINKING; - /* Dual mac */ - rtlpriv->phy.need_iqk = true; - } - - return true; -} -EXPORT_SYMBOL_GPL(rtl_tx_mgmt_proc); - void rtl_get_tcb_desc(struct ieee80211_hw *hw, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, @@ -896,13 +1029,11 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw, struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw)); struct ieee80211_hdr *hdr = rtl_get_hdr(skb); struct ieee80211_rate *txrate; - __le16 fc = hdr->frame_control; + __le16 fc = rtl_get_fc(skb); txrate = ieee80211_get_tx_rate(hw, info); if (txrate) tcb_desc->hw_rate = txrate->hw_value; - else - tcb_desc->hw_rate = 0; if (ieee80211_is_data(fc)) { /* @@ -929,15 +1060,21 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw, *and N rate will all be controlled by FW *when tcb_desc->use_driver_rate = false */ - if (sta && (sta->ht_cap.ht_supported)) { - tcb_desc->hw_rate = _rtl_get_highest_n_rate(hw); + if (sta && sta->vht_cap.vht_supported) { + tcb_desc->hw_rate = + _rtl_get_vht_highest_n_rate(hw, sta); } else { - if (rtlmac->mode == WIRELESS_MODE_B) { + if (sta && (sta->ht_cap.ht_supported)) { tcb_desc->hw_rate = - rtlpriv->cfg->maps[RTL_RC_CCK_RATE11M]; + _rtl_get_highest_n_rate(hw, sta); } else { - tcb_desc->hw_rate = - rtlpriv->cfg->maps[RTL_RC_OFDM_RATE54M]; + if (rtlmac->mode == WIRELESS_MODE_B) { + tcb_desc->hw_rate = + rtlpriv->cfg->maps[RTL_RC_CCK_RATE11M]; + } else { + tcb_desc->hw_rate = + rtlpriv->cfg->maps[RTL_RC_OFDM_RATE54M]; + } } } } @@ -962,54 +1099,58 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw, } EXPORT_SYMBOL(rtl_get_tcb_desc); -static bool addbareq_rx(struct ieee80211_hw *hw, struct sk_buff *skb) +bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb) { + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_priv *rtlpriv = rtl_priv(hw); - struct ieee80211_sta *sta = NULL; - struct ieee80211_hdr *hdr = rtl_get_hdr(skb); - struct rtl_sta_info *sta_entry = NULL; - struct ieee80211_mgmt *mgmt = (void *)skb->data; - u16 capab = 0, tid = 0; - struct rtl_tid_data *tid_data; - struct sk_buff *skb_delba = NULL; - struct ieee80211_rx_status rx_status = { 0 }; + __le16 fc = rtl_get_fc(skb); - rcu_read_lock(); - sta = rtl_find_sta(hw, hdr->addr3); - if (sta == NULL) { - RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_EMERG, - "sta is NULL\n"); - rcu_read_unlock(); - return true; + if (rtlpriv->dm.supp_phymode_switch && + mac->link_state < MAC80211_LINKED && + (ieee80211_is_auth(fc) || ieee80211_is_probe_req(fc))) { + if (rtlpriv->cfg->ops->chk_switch_dmdp) + rtlpriv->cfg->ops->chk_switch_dmdp(hw); } + if (ieee80211_is_auth(fc)) { + RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "MAC80211_LINKING\n"); + rtl_ips_nic_on(hw); + + mac->link_state = MAC80211_LINKING; + /* Dul mac */ + rtlpriv->phy.need_iqk = true; - sta_entry = (struct rtl_sta_info *)sta->drv_priv; - if (!sta_entry) { - rcu_read_unlock(); - return true; } - capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab); - tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; - tid_data = &sta_entry->tids[tid]; - if (tid_data->agg.rx_agg_state == RTL_RX_AGG_START) { - skb_delba = rtl_make_del_ba(hw, hdr->addr2, hdr->addr3, tid); - if (skb_delba) { - rx_status.freq = hw->conf.chandef.chan->center_freq; - rx_status.band = hw->conf.chandef.chan->band; - rx_status.flag |= RX_FLAG_DECRYPTED; - rx_status.flag |= RX_FLAG_MACTIME_END; - rx_status.rate_idx = 0; - rx_status.signal = 50 + 10; - memcpy(IEEE80211_SKB_RXCB(skb_delba), &rx_status, - sizeof(rx_status)); - RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, - "fake del\n", skb_delba->data, - skb_delba->len); - ieee80211_rx_irqsafe(hw, skb_delba); - } + + return true; +} +EXPORT_SYMBOL_GPL(rtl_tx_mgmt_proc); + +struct sk_buff *rtl_make_del_ba(struct ieee80211_hw *hw, u8 *sa, + u8 *bssid, u16 tid); + +static void process_agg_start(struct ieee80211_hw *hw, + struct ieee80211_hdr *hdr, u16 tid) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct ieee80211_rx_status rx_status = { 0 }; + struct sk_buff *skb_delba = NULL; + + skb_delba = rtl_make_del_ba(hw, hdr->addr2, hdr->addr3, tid); + if (skb_delba) { + rx_status.freq = hw->conf.chandef.chan->center_freq; + rx_status.band = hw->conf.chandef.chan->band; + rx_status.flag |= RX_FLAG_DECRYPTED; + rx_status.flag |= RX_FLAG_MACTIME_START; + rx_status.rate_idx = 0; + rx_status.signal = 50 + 10; + memcpy(IEEE80211_SKB_RXCB(skb_delba), + &rx_status, sizeof(rx_status)); + RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, + "fake del\n", + skb_delba->data, + skb_delba->len); + ieee80211_rx_irqsafe(hw, skb_delba); } - rcu_read_unlock(); - return false; } bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx) @@ -1017,8 +1158,8 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx) struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct ieee80211_hdr *hdr = rtl_get_hdr(skb); struct rtl_priv *rtlpriv = rtl_priv(hw); - __le16 fc = hdr->frame_control; - u8 *act = (u8 *)skb->data + MAC80211_3ADDR_LEN; + __le16 fc = rtl_get_fc(skb); + u8 *act = (u8 *)(((u8 *)skb->data + MAC80211_3ADDR_LEN)); u8 category; if (!ieee80211_is_action(fc)) @@ -1034,18 +1175,47 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx) return false; RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG, - "%s ACT_ADDBAREQ From :%pM\n", - is_tx ? "Tx" : "Rx", hdr->addr2); + "%s ACT_ADDBAREQ From :%pM\n", + is_tx ? "Tx" : "Rx", hdr->addr2); RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "req\n", - skb->data, skb->len); - if (!is_tx) - if (addbareq_rx(hw, skb)) + skb->data, skb->len); + if (!is_tx) { + struct ieee80211_sta *sta = NULL; + struct rtl_sta_info *sta_entry = NULL; + struct rtl_tid_data *tid_data; + struct ieee80211_mgmt *mgmt = (void *)skb->data; + u16 capab = 0, tid = 0; + + rcu_read_lock(); + sta = rtl_find_sta(hw, hdr->addr3); + if (sta == NULL) { + RT_TRACE(rtlpriv, COMP_SEND | COMP_RECV, + DBG_DMESG, "sta is NULL\n"); + rcu_read_unlock(); + return true; + } + + sta_entry = + (struct rtl_sta_info *)sta->drv_priv; + if (!sta_entry) { + rcu_read_unlock(); return true; + } + capab = + le16_to_cpu(mgmt->u.action.u.addba_req.capab); + tid = (capab & + IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; + tid_data = &sta_entry->tids[tid]; + if (tid_data->agg.rx_agg_state == + RTL_RX_AGG_START) + process_agg_start(hw, hdr, tid); + rcu_read_unlock(); + } break; case ACT_ADDBARSP: RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG, "%s ACT_ADDBARSP From :%pM\n", - is_tx ? "Tx" : "Rx", hdr->addr2); + is_tx ? "Tx" : "Rx", hdr->addr2); break; case ACT_DELBA: RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG, @@ -1061,6 +1231,17 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx) } EXPORT_SYMBOL_GPL(rtl_action_proc); +static void setup_arp_tx(struct rtl_priv *rtlpriv, struct rtl_ps_ctl *ppsc) +{ + rtlpriv->ra.is_special_data = true; + if (rtlpriv->cfg->ops->get_btc_status()) + rtlpriv->btcoexist.btc_ops->btc_special_packet_notify( + rtlpriv, 1); + rtlpriv->enter_ps = false; + schedule_work(&rtlpriv->works.lps_change_work); + ppsc->last_delaylps_stamp_jiffies = jiffies; +} + /*should call before software enc*/ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx) { @@ -1069,57 +1250,77 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx) __le16 fc = rtl_get_fc(skb); u16 ether_type; u8 mac_hdr_len = ieee80211_get_hdrlen_from_skb(skb); + u8 encrypt_header_len = 0; + u8 offset; const struct iphdr *ip; if (!ieee80211_is_data(fc)) - return false; - - ip = (const struct iphdr *)(skb->data + mac_hdr_len + - SNAP_SIZE + PROTOC_TYPE_SIZE); - ether_type = be16_to_cpup((__be16 *) - (skb->data + mac_hdr_len + SNAP_SIZE)); - - switch (ether_type) { - case ETH_P_IP: { - struct udphdr *udp; - u16 src; - u16 dst; + goto end; - if (ip->protocol != IPPROTO_UDP) - return false; - udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2)); - src = be16_to_cpu(udp->source); - dst = be16_to_cpu(udp->dest); - - /* If this case involves port 68 (UDP BOOTP client) connecting - * with port 67 (UDP BOOTP server), then return true so that - * the lowest speed is used. - */ - if (!((src == 68 && dst == 67) || (src == 67 && dst == 68))) - return false; - - RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG, - "dhcp %s !!\n", is_tx ? "Tx" : "Rx"); + switch (rtlpriv->sec.pairwise_enc_algorithm) { + case WEP40_ENCRYPTION: + case WEP104_ENCRYPTION: + encrypt_header_len = 4;/*WEP_IV_LEN*/ break; - } - case ETH_P_ARP: + case TKIP_ENCRYPTION: + encrypt_header_len = 8;/*TKIP_IV_LEN*/ break; - case ETH_P_PAE: - RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG, - "802.1X %s EAPOL pkt!!\n", is_tx ? "Tx" : "Rx"); + case AESCCMP_ENCRYPTION: + encrypt_header_len = 8;/*CCMP_HDR_LEN;*/ break; - case ETH_P_IPV6: - /* TODO: Is this right? */ - return false; default: - return false; + break; } - if (is_tx) { - rtlpriv->enter_ps = false; - schedule_work(&rtlpriv->works.lps_change_work); - ppsc->last_delaylps_stamp_jiffies = jiffies; + + offset = mac_hdr_len + SNAP_SIZE + encrypt_header_len; + ether_type = be16_to_cpup((__be16 *)(skb->data + offset)); + + if (ETH_P_IP == ether_type) { + ip = (struct iphdr *)((u8 *)skb->data + offset + + PROTOC_TYPE_SIZE); + if (IPPROTO_UDP == ip->protocol) { + struct udphdr *udp = (struct udphdr *)((u8 *)ip + + (ip->ihl << 2)); + if (((((u8 *)udp)[1] == 68) && + (((u8 *)udp)[3] == 67)) || + ((((u8 *)udp)[1] == 67) && + (((u8 *)udp)[3] == 68))) { + /* 68 : UDP BOOTP client + * 67 : UDP BOOTP server + */ + RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), + DBG_DMESG, "dhcp %s !!\n", + (is_tx) ? "Tx" : "Rx"); + + if (is_tx) + setup_arp_tx(rtlpriv, ppsc); + return true; + } + } + } else if (ETH_P_ARP == ether_type) { + if (is_tx) + setup_arp_tx(rtlpriv, ppsc); + + return true; + } else if (ETH_P_PAE == ether_type) { + RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG, + "802.1X %s EAPOL pkt!!\n", (is_tx) ? "Tx" : "Rx"); + + if (is_tx) { + rtlpriv->ra.is_special_data = true; + rtlpriv->enter_ps = false; + schedule_work(&rtlpriv->works.lps_change_work); + ppsc->last_delaylps_stamp_jiffies = jiffies; + } + + return true; + } else if (0x86DD == ether_type) { + return true; } - return true; + +end: + rtlpriv->ra.is_special_data = false; + return false; } EXPORT_SYMBOL_GPL(rtl_is_special_data); @@ -1128,12 +1329,11 @@ EXPORT_SYMBOL_GPL(rtl_is_special_data); * functions called by core.c * *********************************************************/ -int rtl_tx_agg_start(struct ieee80211_hw *hw, - struct ieee80211_sta *sta, u16 tid, u16 *ssn) +int rtl_tx_agg_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid, u16 *ssn) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_tid_data *tid_data; - struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_sta_info *sta_entry = NULL; if (sta == NULL) @@ -1147,43 +1347,38 @@ int rtl_tx_agg_start(struct ieee80211_hw *hw, return -ENXIO; tid_data = &sta_entry->tids[tid]; - RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "on ra = %pM tid = %d seq:%d\n", - sta->addr, tid, tid_data->seq_number); + RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, + "on ra = %pM tid = %d seq:%d\n", sta->addr, tid, + tid_data->seq_number); *ssn = tid_data->seq_number; tid_data->agg.agg_state = RTL_AGG_START; - ieee80211_start_tx_ba_cb_irqsafe(mac->vif, sta->addr, tid); - + ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); return 0; } -int rtl_tx_agg_stop(struct ieee80211_hw *hw, - struct ieee80211_sta *sta, u16 tid) +int rtl_tx_agg_stop(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_tid_data *tid_data; struct rtl_sta_info *sta_entry = NULL; if (sta == NULL) return -EINVAL; - if (!sta->addr) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "ra = NULL\n"); - return -EINVAL; - } - - RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "on ra = %pM tid = %d\n", - sta->addr, tid); + RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, + "on ra = %pM tid = %d\n", sta->addr, tid); if (unlikely(tid >= MAX_TID_COUNT)) return -EINVAL; sta_entry = (struct rtl_sta_info *)sta->drv_priv; + tid_data = &sta_entry->tids[tid]; sta_entry->tids[tid].agg.agg_state = RTL_AGG_STOP; - ieee80211_stop_tx_ba_cb_irqsafe(mac->vif, sta->addr, tid); - + ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); return 0; } @@ -1222,11 +1417,6 @@ int rtl_rx_agg_stop(struct ieee80211_hw *hw, if (sta == NULL) return -EINVAL; - if (!sta->addr) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "ra = NULL\n"); - return -EINVAL; - } - RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "on ra = %pM tid = %d\n", sta->addr, tid); @@ -1238,7 +1428,6 @@ int rtl_rx_agg_stop(struct ieee80211_hw *hw, return 0; } - int rtl_tx_agg_oper(struct ieee80211_hw *hw, struct ieee80211_sta *sta, u16 tid) { @@ -1248,13 +1437,8 @@ int rtl_tx_agg_oper(struct ieee80211_hw *hw, if (sta == NULL) return -EINVAL; - if (!sta->addr) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "ra = NULL\n"); - return -EINVAL; - } - - RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "on ra = %pM tid = %d\n", - sta->addr, tid); + RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, + "on ra = %pM tid = %d\n", sta->addr, tid); if (unlikely(tid >= MAX_TID_COUNT)) return -EINVAL; @@ -1292,7 +1476,7 @@ void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb) return; /* and only beacons from the associated BSSID, please */ - if (!ether_addr_equal_64bits(hdr->addr3, rtlpriv->mac80211.bssid)) + if (!ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid)) return; rtlpriv->link_info.bcn_rx_inperiod++; @@ -1332,8 +1516,7 @@ void rtl_watchdog_wq_callback(void *data) mac->cnt_after_linked = 0; } - /* - *<2> to check if traffic busy, if + /* <2> to check if traffic busy, if * busytraffic we don't change channel */ if (mac->link_state >= MAC80211_LINKED) { @@ -1381,32 +1564,29 @@ void rtl_watchdog_wq_callback(void *data) for (tid = 0; tid <= 7; tid++) { for (idx = 0; idx <= 2; idx++) rtlpriv->link_info.tidtx_in4period[tid][idx] = - rtlpriv->link_info.tidtx_in4period[tid] - [idx + 1]; + rtlpriv->link_info.tidtx_in4period[tid] + [idx + 1]; rtlpriv->link_info.tidtx_in4period[tid][3] = rtlpriv->link_info.tidtx_inperiod[tid]; for (idx = 0; idx <= 3; idx++) tidtx_inp4eriod[tid] += - rtlpriv->link_info.tidtx_in4period[tid][idx]; + rtlpriv->link_info.tidtx_in4period[tid][idx]; aver_tidtx_inperiod[tid] = tidtx_inp4eriod[tid] / 4; if (aver_tidtx_inperiod[tid] > 5000) rtlpriv->link_info.higher_busytxtraffic[tid] = - true; + true; else rtlpriv->link_info.higher_busytxtraffic[tid] = - false; + false; } if (((rtlpriv->link_info.num_rx_inperiod + rtlpriv->link_info.num_tx_inperiod) > 8) || (rtlpriv->link_info.num_rx_inperiod > 2)) - rtlpriv->enter_ps = true; + rtl_lps_enter(hw); else - rtlpriv->enter_ps = false; - - /* LeisurePS only work in infra mode. */ - schedule_work(&rtlpriv->works.lps_change_work); + rtl_lps_leave(hw); } rtlpriv->link_info.num_rx_inperiod = 0; @@ -1421,32 +1601,37 @@ void rtl_watchdog_wq_callback(void *data) rtlpriv->link_info.higher_busyrxtraffic = higher_busyrxtraffic; /* <3> DM */ - rtlpriv->cfg->ops->dm_watchdog(hw); + if (!rtlpriv->cfg->mod_params->disable_watchdog) + rtlpriv->cfg->ops->dm_watchdog(hw); /* <4> roaming */ if (mac->link_state == MAC80211_LINKED && mac->opmode == NL80211_IFTYPE_STATION) { if ((rtlpriv->link_info.bcn_rx_inperiod + - rtlpriv->link_info.num_rx_inperiod) == 0) { + rtlpriv->link_info.num_rx_inperiod) == 0) { rtlpriv->link_info.roam_times++; RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG, "AP off for %d s\n", - (rtlpriv->link_info.roam_times * 2)); + (rtlpriv->link_info.roam_times * 2)); - /* if we can't recv beacon for 6s, we should - * reconnect this AP + /* if we can't recv beacon for 10s, + * we should reconnect this AP */ - if ((rtlpriv->link_info.roam_times >= 3) && - !is_zero_ether_addr(rtlpriv->mac80211.bssid)) { + if (rtlpriv->link_info.roam_times >= 5) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AP off, try to reconnect now\n"); rtlpriv->link_info.roam_times = 0; - ieee80211_connection_loss(rtlpriv->mac80211.vif); + ieee80211_connection_loss( + rtlpriv->mac80211.vif); } } else { rtlpriv->link_info.roam_times = 0; } } + + if (rtlpriv->cfg->ops->get_btc_status()) + rtlpriv->btcoexist.btc_ops->btc_periodical(rtlpriv); + rtlpriv->link_info.bcn_rx_inperiod = 0; } @@ -1461,7 +1646,6 @@ void rtl_watch_dog_timer_callback(unsigned long data) mod_timer(&rtlpriv->works.watchdog_timer, jiffies + MSECS(RTL_WATCH_DOG_TIME)); } - void rtl_fwevt_wq_callback(void *data) { struct rtl_works *rtlworks = @@ -1471,7 +1655,6 @@ void rtl_fwevt_wq_callback(void *data) rtlpriv->cfg->ops->c2h_command_handle(hw); } - void rtl_easy_concurrent_retrytimer_callback(unsigned long data) { struct ieee80211_hw *hw = (struct ieee80211_hw *)data; @@ -1483,7 +1666,6 @@ void rtl_easy_concurrent_retrytimer_callback(unsigned long data) rtlpriv->cfg->ops->dualmac_easy_concurrent(hw); } - /********************************************************* * * frame process functions @@ -1511,7 +1693,8 @@ u8 *rtl_find_ie(u8 *data, unsigned int len, u8 ie) /* when we use 2 rx ants we send IEEE80211_SMPS_OFF */ /* when we use 1 rx ant we send IEEE80211_SMPS_STATIC */ static struct sk_buff *rtl_make_smps_action(struct ieee80211_hw *hw, - enum ieee80211_smps_mode smps, u8 *da, u8 *bssid) + enum ieee80211_smps_mode smps, + u8 *da, u8 *bssid) { struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct sk_buff *skb; @@ -1536,6 +1719,9 @@ static struct sk_buff *rtl_make_smps_action(struct ieee80211_hw *hw, case IEEE80211_SMPS_AUTOMATIC:/* 0 */ case IEEE80211_SMPS_NUM_MODES:/* 4 */ WARN_ON(1); + /* Here will get a 'MISSING_BREAK' in Coverity Test, just ignore it. + * According to Kernel Code, here is right. + */ case IEEE80211_SMPS_OFF:/* 1 */ /*MIMO_PS_NOLIMIT*/ action_frame->u.action.u.ht_smps.smps_control = WLAN_HT_SMPS_CONTROL_DISABLED;/* 0 */ @@ -1554,8 +1740,8 @@ static struct sk_buff *rtl_make_smps_action(struct ieee80211_hw *hw, } int rtl_send_smps_action(struct ieee80211_hw *hw, - struct ieee80211_sta *sta, - enum ieee80211_smps_mode smps) + struct ieee80211_sta *sta, + enum ieee80211_smps_mode smps) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); @@ -1590,6 +1776,7 @@ int rtl_send_smps_action(struct ieee80211_hw *hw, struct rtl_sta_info *sta_entry = (struct rtl_sta_info *) sta->drv_priv; sta_entry->mimo_ps = smps; + /* rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0); */ info->control.rates[0].idx = 0; info->band = hw->conf.chandef.chan->band; @@ -1631,10 +1818,10 @@ void rtl_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation) } EXPORT_SYMBOL(rtl_phy_scan_operation_backup); -/* There seem to be issues in mac80211 regarding when del ba frames can be - * received. As a work around, we make a fake del_ba if we receive a ba_req; - * however, rx_agg was opened to let mac80211 release some ba related - * resources. This del_ba is for tx only. +/* because mac80211 have issues when can receive del ba + * so here we just make a fake del_ba if we receive a ba_req + * but rx_agg was opened to let mac80211 release some ba + * related resources, so please this del_ba for tx */ struct sk_buff *rtl_make_del_ba(struct ieee80211_hw *hw, u8 *sa, u8 *bssid, u16 tid) @@ -1660,7 +1847,7 @@ struct sk_buff *rtl_make_del_ba(struct ieee80211_hw *hw, action_frame->u.action.category = WLAN_CATEGORY_BACK; action_frame->u.action.u.delba.action_code = WLAN_ACTION_DELBA; params = (u16)(1 << 11); /* bit 11 initiator */ - params |= (u16)(tid << 12); /* bit 15:12 TID number */ + params |= (u16)(tid << 12); /* bit 15:12 TID number */ action_frame->u.action.u.delba.params = cpu_to_le16(params); action_frame->u.action.u.delba.reason_code = @@ -1675,7 +1862,7 @@ struct sk_buff *rtl_make_del_ba(struct ieee80211_hw *hw, * *********************************************************/ static bool rtl_chk_vendor_ouisub(struct ieee80211_hw *hw, - struct octet_string vendor_ie) + struct octet_string vendor_ie) { struct rtl_priv *rtlpriv = rtl_priv(hw); bool matched = false; @@ -1848,11 +2035,13 @@ static ssize_t rtl_store_debug_level(struct device *d, ret = kstrtoul(buf, 0, &val); if (ret) { - printk(KERN_DEBUG "%s is not in hex or decimal form.\n", buf); + RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG, + "%s is not in hex or decimal form.\n", buf); } else { rtlpriv->dbg.global_debuglevel = val; - printk(KERN_DEBUG "debuglevel:%x\n", - rtlpriv->dbg.global_debuglevel); + RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG, + "debuglevel:%x\n", + rtlpriv->dbg.global_debuglevel); } return strnlen(buf, count); @@ -1892,7 +2081,7 @@ EXPORT_SYMBOL_GPL(rtl_global_var); static int __init rtl_core_module_init(void) { if (rtl_rate_control_register()) - pr_err("Unable to register rtl_rc, use default RC !!\n"); + pr_err("rtl: Unable to register rtl_rc, use default RC !!\n"); /* init some global vars */ INIT_LIST_HEAD(&rtl_global_var.glb_priv_list); diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/rtlwifi/base.h index 0cd07420777a702afca02422cab06f16e06a9121..982f2450feeade4c452ff84474ac8b6aaf493c19 100644 --- a/drivers/net/wireless/rtlwifi/base.h +++ b/drivers/net/wireless/rtlwifi/base.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -41,7 +37,7 @@ enum ap_peer { PEER_MARV = 7, PEER_AIRGO = 9, PEER_MAX = 10, -} ; +}; #define RTL_DUMMY_OFFSET 0 #define RTL_DUMMY_UNIT 8 @@ -55,6 +51,16 @@ enum ap_peer { #define MAX_BIT_RATE_40MHZ_MCS15 300 /* Mbps */ #define MAX_BIT_RATE_40MHZ_MCS7 150 /* Mbps */ +#define MAX_BIT_RATE_SHORT_GI_2NSS_80MHZ_MCS9 867 /* Mbps */ +#define MAX_BIT_RATE_SHORT_GI_2NSS_80MHZ_MCS7 650 /* Mbps */ +#define MAX_BIT_RATE_LONG_GI_2NSS_80MHZ_MCS9 780 /* Mbps */ +#define MAX_BIT_RATE_LONG_GI_2NSS_80MHZ_MCS7 585 /* Mbps */ + +#define MAX_BIT_RATE_SHORT_GI_1NSS_80MHZ_MCS9 434 /* Mbps */ +#define MAX_BIT_RATE_SHORT_GI_1NSS_80MHZ_MCS7 325 /* Mbps */ +#define MAX_BIT_RATE_LONG_GI_1NSS_80MHZ_MCS9 390 /* Mbps */ +#define MAX_BIT_RATE_LONG_GI_1NSS_80MHZ_MCS7 293 /* Mbps */ + #define RTL_RATE_COUNT_LEGACY 12 #define RTL_CHANNEL_COUNT 14 @@ -78,9 +84,9 @@ enum ap_peer { #define SET_80211_PS_POLL_AID(_hdr, _val) \ (*(u16 *)((u8 *)(_hdr) + 2) = _val) #define SET_80211_PS_POLL_BSSID(_hdr, _val) \ - memcpy(((u8 *)(_hdr)) + 4, (u8 *)(_val), ETH_ALEN) + ether_addr_copy(((u8 *)(_hdr)) + 4, (u8 *)(_val)) #define SET_80211_PS_POLL_TA(_hdr, _val) \ - memcpy(((u8 *)(_hdr)) + 10, (u8 *)(_val), ETH_ALEN) + ether_addr_copy(((u8 *)(_hdr))+10, (u8 *)(_val)) #define SET_80211_HDR_DURATION(_hdr, _val) \ (*(u16 *)((u8 *)(_hdr) + FRAME_OFFSET_DURATION) = le16_to_cpu(_val)) @@ -113,23 +119,27 @@ void rtl_init_rx_config(struct ieee80211_hw *hw); void rtl_init_rfkill(struct ieee80211_hw *hw); void rtl_deinit_rfkill(struct ieee80211_hw *hw); -void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb); +void rtl_watch_dog_timer_callback(unsigned long data); void rtl_deinit_deferred_work(struct ieee80211_hw *hw); bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx); +int rtlwifi_rate_mapping(struct ieee80211_hw *hw, + bool isht, u8 desc_rate, bool first_ampdu); +bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb); u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx); +void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb); void rtl_watch_dog_timer_callback(unsigned long data); -int rtl_tx_agg_start(struct ieee80211_hw *hw, struct ieee80211_sta *sta, - u16 tid, u16 *ssn); -int rtl_tx_agg_stop(struct ieee80211_hw *hw, struct ieee80211_sta *sta, - u16 tid); -int rtl_tx_agg_oper(struct ieee80211_hw *hw, struct ieee80211_sta *sta, - u16 tid); -int rtl_rx_agg_start(struct ieee80211_hw *hw, struct ieee80211_sta *sta, - u16 tid); -int rtl_rx_agg_stop(struct ieee80211_hw *hw, struct ieee80211_sta *sta, - u16 tid); +int rtl_tx_agg_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid, u16 *ssn); +int rtl_tx_agg_stop(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid); +int rtl_tx_agg_oper(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, u16 tid); +int rtl_rx_agg_start(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, u16 tid); +int rtl_rx_agg_stop(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, u16 tid); void rtl_watchdog_wq_callback(void *data); void rtl_fwevt_wq_callback(void *data); @@ -139,19 +149,14 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw, struct sk_buff *skb, struct rtl_tcb_desc *tcb_desc); int rtl_send_smps_action(struct ieee80211_hw *hw, - struct ieee80211_sta *sta, - enum ieee80211_smps_mode smps); + struct ieee80211_sta *sta, + enum ieee80211_smps_mode smps); u8 *rtl_find_ie(u8 *data, unsigned int len, u8 ie); void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len); u8 rtl_tid_to_ac(u8 tid); extern struct attribute_group rtl_attribute_group; void rtl_easy_concurrent_retrytimer_callback(unsigned long data); extern struct rtl_global_var rtl_global_var; -int rtlwifi_rate_mapping(struct ieee80211_hw *hw, - bool isht, u8 desc_rate, bool first_ampdu); -bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb); -struct sk_buff *rtl_make_del_ba(struct ieee80211_hw *hw, - u8 *sa, u8 *bssid, u16 tid); void rtl_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation); #endif diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbt_precomp.h b/drivers/net/wireless/rtlwifi/btcoexist/halbt_precomp.h index d76684eb24d0913b1fffed06e0a2e1c1654daf27..39b9a3309cfd653f5fe0abbcaeb619b002b77dc2 100644 --- a/drivers/net/wireless/rtlwifi/btcoexist/halbt_precomp.h +++ b/drivers/net/wireless/rtlwifi/btcoexist/halbt_precomp.h @@ -37,7 +37,13 @@ #include "halbtcoutsrc.h" +#include "halbtc8192e2ant.h" +#include "halbtc8723b1ant.h" #include "halbtc8723b2ant.h" +#include "halbtc8821a2ant.h" +#include "halbtc8821a1ant.h" + +#define GetDefaultAdapter(padapter) padapter #define BIT0 0x00000001 #define BIT1 0x00000002 diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8192e2ant.c b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8192e2ant.c new file mode 100644 index 0000000000000000000000000000000000000000..53261d6f857810ed1bf40f4d163760d338fa4cb4 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8192e2ant.c @@ -0,0 +1,3849 @@ +/****************************************************************************** + * + * Copyright(c) 2012 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ +/************************************************************** + * Description: + * + * This file is for RTL8192E Co-exist mechanism + * + * History + * 2012/11/15 Cosa first check in. + * + **************************************************************/ + +/************************************************************** + * include files + **************************************************************/ +#include "halbt_precomp.h" +/************************************************************** + * Global variables, these are static variables + **************************************************************/ +static struct coex_dm_8192e_2ant glcoex_dm_8192e_2ant; +static struct coex_dm_8192e_2ant *coex_dm = &glcoex_dm_8192e_2ant; +static struct coex_sta_8192e_2ant glcoex_sta_8192e_2ant; +static struct coex_sta_8192e_2ant *coex_sta = &glcoex_sta_8192e_2ant; + +static const char *const GLBtInfoSrc8192e2Ant[] = { + "BT Info[wifi fw]", + "BT Info[bt rsp]", + "BT Info[bt auto report]", +}; + +static u32 glcoex_ver_date_8192e_2ant = 20130902; +static u32 glcoex_ver_8192e_2ant = 0x34; + +/************************************************************** + * local function proto type if needed + **************************************************************/ +/************************************************************** + * local function start with halbtc8192e2ant_ + **************************************************************/ +static u8 halbtc8192e2ant_btrssi_state(u8 level_num, u8 rssi_thresh, + u8 rssi_thresh1) +{ + int btrssi = 0; + u8 btrssi_state = coex_sta->pre_bt_rssi_state; + + btrssi = coex_sta->bt_rssi; + + if (level_num == 2) { + if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) || + (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "BT Rssi pre state = LOW\n"); + if (btrssi >= (rssi_thresh + + BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) { + btrssi_state = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "BT Rssi state switch to High\n"); + } else { + btrssi_state = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "BT Rssi state stay at Low\n"); + } + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "BT Rssi pre state = HIGH\n"); + if (btrssi < rssi_thresh) { + btrssi_state = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "BT Rssi state switch to Low\n"); + } else { + btrssi_state = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "BT Rssi state stay at High\n"); + } + } + } else if (level_num == 3) { + if (rssi_thresh > rssi_thresh1) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "BT Rssi thresh error!!\n"); + return coex_sta->pre_bt_rssi_state; + } + + if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) || + (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "BT Rssi pre state = LOW\n"); + if (btrssi >= (rssi_thresh + + BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) { + btrssi_state = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "BT Rssi state switch to Medium\n"); + } else { + btrssi_state = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "BT Rssi state stay at Low\n"); + } + } else if ((coex_sta->pre_bt_rssi_state == + BTC_RSSI_STATE_MEDIUM) || + (coex_sta->pre_bt_rssi_state == + BTC_RSSI_STATE_STAY_MEDIUM)) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi pre state = MEDIUM\n"); + if (btrssi >= (rssi_thresh1 + + BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) { + btrssi_state = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "BT Rssi state switch to High\n"); + } else if (btrssi < rssi_thresh) { + btrssi_state = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "BT Rssi state switch to Low\n"); + } else { + btrssi_state = BTC_RSSI_STATE_STAY_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "BT Rssi state stay at Medium\n"); + } + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "BT Rssi pre state = HIGH\n"); + if (btrssi < rssi_thresh1) { + btrssi_state = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "BT Rssi state switch to Medium\n"); + } else { + btrssi_state = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "BT Rssi state stay at High\n"); + } + } + } + + coex_sta->pre_bt_rssi_state = btrssi_state; + + return btrssi_state; +} + +static u8 halbtc8192e2ant_wifirssi_state(struct btc_coexist *btcoexist, + u8 index, u8 level_num, u8 rssi_thresh, + u8 rssi_thresh1) +{ + int wifirssi = 0; + u8 wifirssi_state = coex_sta->pre_wifi_rssi_state[index]; + + btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifirssi); + + if (level_num == 2) { + if ((coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_LOW) || + (coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_STAY_LOW)) { + if (wifirssi >= (rssi_thresh + + BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) { + wifirssi_state = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "wifi RSSI state switch to High\n"); + } else { + wifirssi_state = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "wifi RSSI state stay at Low\n"); + } + } else { + if (wifirssi < rssi_thresh) { + wifirssi_state = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "wifi RSSI state switch to Low\n"); + } else { + wifirssi_state = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "wifi RSSI state stay at High\n"); + } + } + } else if (level_num == 3) { + if (rssi_thresh > rssi_thresh1) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, + "wifi RSSI thresh error!!\n"); + return coex_sta->pre_wifi_rssi_state[index]; + } + + if ((coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_LOW) || + (coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_STAY_LOW)) { + if (wifirssi >= (rssi_thresh + + BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) { + wifirssi_state = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "wifi RSSI state switch to Medium\n"); + } else { + wifirssi_state = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "wifi RSSI state stay at Low\n"); + } + } else if ((coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_MEDIUM) || + (coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_STAY_MEDIUM)) { + if (wifirssi >= (rssi_thresh1 + + BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) { + wifirssi_state = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "wifi RSSI state switch to High\n"); + } else if (wifirssi < rssi_thresh) { + wifirssi_state = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "wifi RSSI state switch to Low\n"); + } else { + wifirssi_state = BTC_RSSI_STATE_STAY_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "wifi RSSI state stay at Medium\n"); + } + } else { + if (wifirssi < rssi_thresh1) { + wifirssi_state = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "wifi RSSI state switch to Medium\n"); + } else { + wifirssi_state = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "wifi RSSI state stay at High\n"); + } + } + } + + coex_sta->pre_wifi_rssi_state[index] = wifirssi_state; + + return wifirssi_state; +} + +static void btc8192e2ant_monitor_bt_enable_dis(struct btc_coexist *btcoexist) +{ + static bool pre_bt_disabled; + static u32 bt_disable_cnt; + bool bt_active = true, bt_disabled = false; + + /* This function check if bt is disabled */ + + if (coex_sta->high_priority_tx == 0 && + coex_sta->high_priority_rx == 0 && + coex_sta->low_priority_tx == 0 && + coex_sta->low_priority_rx == 0) + bt_active = false; + + if (coex_sta->high_priority_tx == 0xffff && + coex_sta->high_priority_rx == 0xffff && + coex_sta->low_priority_tx == 0xffff && + coex_sta->low_priority_rx == 0xffff) + bt_active = false; + + if (bt_active) { + bt_disable_cnt = 0; + bt_disabled = false; + btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE, + &bt_disabled); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, + "[BTCoex], BT is enabled !!\n"); + } else { + bt_disable_cnt++; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, + "[BTCoex], bt all counters = 0, %d times!!\n", + bt_disable_cnt); + if (bt_disable_cnt >= 2) { + bt_disabled = true; + btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE, + &bt_disabled); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, + "[BTCoex], BT is disabled !!\n"); + } + } + if (pre_bt_disabled != bt_disabled) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, + "[BTCoex], BT is from %s to %s!!\n", + (pre_bt_disabled ? "disabled" : "enabled"), + (bt_disabled ? "disabled" : "enabled")); + pre_bt_disabled = bt_disabled; + } +} + +static u32 halbtc8192e2ant_decidera_mask(struct btc_coexist *btcoexist, + u8 sstype, u32 ra_masktype) +{ + u32 disra_mask = 0x0; + + switch (ra_masktype) { + case 0: /* normal mode */ + if (sstype == 2) + disra_mask = 0x0; /* enable 2ss */ + else + disra_mask = 0xfff00000;/* disable 2ss */ + break; + case 1: /* disable cck 1/2 */ + if (sstype == 2) + disra_mask = 0x00000003;/* enable 2ss */ + else + disra_mask = 0xfff00003;/* disable 2ss */ + break; + case 2: /* disable cck 1/2/5.5, ofdm 6/9/12/18/24, mcs 0/1/2/3/4 */ + if (sstype == 2) + disra_mask = 0x0001f1f7;/* enable 2ss */ + else + disra_mask = 0xfff1f1f7;/* disable 2ss */ + break; + default: + break; + } + + return disra_mask; +} + +static void halbtc8192e2ant_Updatera_mask(struct btc_coexist *btcoexist, + bool force_exec, u32 dis_ratemask) +{ + coex_dm->curra_mask = dis_ratemask; + + if (force_exec || (coex_dm->prera_mask != coex_dm->curra_mask)) + btcoexist->btc_set(btcoexist, BTC_SET_ACT_UPDATE_ra_mask, + &coex_dm->curra_mask); + coex_dm->prera_mask = coex_dm->curra_mask; +} + +static void btc8192e2ant_autorate_fallback_retry(struct btc_coexist *btcoexist, + bool force_exec, u8 type) +{ + bool wifi_under_bmode = false; + + coex_dm->cur_arfrtype = type; + + if (force_exec || (coex_dm->pre_arfrtype != coex_dm->cur_arfrtype)) { + switch (coex_dm->cur_arfrtype) { + case 0: /* normal mode */ + btcoexist->btc_write_4byte(btcoexist, 0x430, + coex_dm->backup_arfr_cnt1); + btcoexist->btc_write_4byte(btcoexist, 0x434, + coex_dm->backup_arfr_cnt2); + break; + case 1: + btcoexist->btc_get(btcoexist, + BTC_GET_BL_WIFI_UNDER_B_MODE, + &wifi_under_bmode); + if (wifi_under_bmode) { + btcoexist->btc_write_4byte(btcoexist, 0x430, + 0x0); + btcoexist->btc_write_4byte(btcoexist, 0x434, + 0x01010101); + } else { + btcoexist->btc_write_4byte(btcoexist, 0x430, + 0x0); + btcoexist->btc_write_4byte(btcoexist, 0x434, + 0x04030201); + } + break; + default: + break; + } + } + + coex_dm->pre_arfrtype = coex_dm->cur_arfrtype; +} + +static void halbtc8192e2ant_retrylimit(struct btc_coexist *btcoexist, + bool force_exec, u8 type) +{ + coex_dm->cur_retrylimit_type = type; + + if (force_exec || (coex_dm->pre_retrylimit_type != + coex_dm->cur_retrylimit_type)) { + switch (coex_dm->cur_retrylimit_type) { + case 0: /* normal mode */ + btcoexist->btc_write_2byte(btcoexist, 0x42a, + coex_dm->backup_retrylimit); + break; + case 1: /* retry limit = 8 */ + btcoexist->btc_write_2byte(btcoexist, 0x42a, + 0x0808); + break; + default: + break; + } + } + + coex_dm->pre_retrylimit_type = coex_dm->cur_retrylimit_type; +} + +static void halbtc8192e2ant_ampdu_maxtime(struct btc_coexist *btcoexist, + bool force_exec, u8 type) +{ + coex_dm->cur_ampdutime_type = type; + + if (force_exec || (coex_dm->pre_ampdutime_type != + coex_dm->cur_ampdutime_type)) { + switch (coex_dm->cur_ampdutime_type) { + case 0: /* normal mode */ + btcoexist->btc_write_1byte(btcoexist, 0x456, + coex_dm->backup_ampdu_maxtime); + break; + case 1: /* AMPDU timw = 0x38 * 32us */ + btcoexist->btc_write_1byte(btcoexist, 0x456, 0x38); + break; + default: + break; + } + } + + coex_dm->pre_ampdutime_type = coex_dm->cur_ampdutime_type; +} + +static void halbtc8192e2ant_limited_tx(struct btc_coexist *btcoexist, + bool force_exec, u8 ra_masktype, + u8 arfr_type, u8 retrylimit_type, + u8 ampdutime_type) +{ + u32 disra_mask = 0x0; + + coex_dm->curra_masktype = ra_masktype; + disra_mask = halbtc8192e2ant_decidera_mask(btcoexist, + coex_dm->cur_sstype, + ra_masktype); + halbtc8192e2ant_Updatera_mask(btcoexist, force_exec, disra_mask); +btc8192e2ant_autorate_fallback_retry(btcoexist, force_exec, arfr_type); + halbtc8192e2ant_retrylimit(btcoexist, force_exec, retrylimit_type); + halbtc8192e2ant_ampdu_maxtime(btcoexist, force_exec, ampdutime_type); +} + +static void halbtc8192e2ant_limited_rx(struct btc_coexist *btcoexist, + bool force_exec, bool rej_ap_agg_pkt, + bool bt_ctrl_agg_buf_size, + u8 agg_buf_size) +{ + bool reject_rx_agg = rej_ap_agg_pkt; + bool bt_ctrl_rx_agg_size = bt_ctrl_agg_buf_size; + u8 rx_agg_size = agg_buf_size; + + /********************************************* + * Rx Aggregation related setting + *********************************************/ + btcoexist->btc_set(btcoexist, BTC_SET_BL_TO_REJ_AP_AGG_PKT, + &reject_rx_agg); + /* decide BT control aggregation buf size or not */ + btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_CTRL_AGG_SIZE, + &bt_ctrl_rx_agg_size); + /* aggregation buf size, only work + * when BT control Rx aggregation size. + */ + btcoexist->btc_set(btcoexist, BTC_SET_U1_AGG_BUF_SIZE, &rx_agg_size); + /* real update aggregation setting */ + btcoexist->btc_set(btcoexist, BTC_SET_ACT_AGGREGATE_CTRL, NULL); +} + +static void halbtc8192e2ant_monitor_bt_ctr(struct btc_coexist *btcoexist) +{ + u32 reg_hp_txrx, reg_lp_txrx, u32tmp; + u32 reg_hp_tx = 0, reg_hp_rx = 0, reg_lp_tx = 0, reg_lp_rx = 0; + + reg_hp_txrx = 0x770; + reg_lp_txrx = 0x774; + + u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_hp_txrx); + reg_hp_tx = u32tmp & MASKLWORD; + reg_hp_rx = (u32tmp & MASKHWORD)>>16; + + u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_lp_txrx); + reg_lp_tx = u32tmp & MASKLWORD; + reg_lp_rx = (u32tmp & MASKHWORD)>>16; + + coex_sta->high_priority_tx = reg_hp_tx; + coex_sta->high_priority_rx = reg_hp_rx; + coex_sta->low_priority_tx = reg_lp_tx; + coex_sta->low_priority_rx = reg_lp_rx; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, + "[BTCoex] High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n", + reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, + "[BTCoex] Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n", + reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx); + + /* reset counter */ + btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc); +} + +static void halbtc8192e2ant_querybt_info(struct btc_coexist *btcoexist) +{ + u8 h2c_parameter[1] = {0}; + + coex_sta->c2h_bt_info_req_sent = true; + + h2c_parameter[0] |= BIT0; /* trigger */ + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n", + h2c_parameter[0]); + + btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter); +} + +static void halbtc8192e2ant_update_btlink_info(struct btc_coexist *btcoexist) +{ + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + bool bt_hson = false; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson); + + bt_link_info->bt_link_exist = coex_sta->bt_link_exist; + bt_link_info->sco_exist = coex_sta->sco_exist; + bt_link_info->a2dp_exist = coex_sta->a2dp_exist; + bt_link_info->pan_exist = coex_sta->pan_exist; + bt_link_info->hid_exist = coex_sta->hid_exist; + + /* work around for HS mode. */ + if (bt_hson) { + bt_link_info->pan_exist = true; + bt_link_info->bt_link_exist = true; + } + + /* check if Sco only */ + if (bt_link_info->sco_exist && + !bt_link_info->a2dp_exist && + !bt_link_info->pan_exist && + !bt_link_info->hid_exist) + bt_link_info->sco_only = true; + else + bt_link_info->sco_only = false; + + /* check if A2dp only */ + if (!bt_link_info->sco_exist && + bt_link_info->a2dp_exist && + !bt_link_info->pan_exist && + !bt_link_info->hid_exist) + bt_link_info->a2dp_only = true; + else + bt_link_info->a2dp_only = false; + + /* check if Pan only */ + if (!bt_link_info->sco_exist && + !bt_link_info->a2dp_exist && + bt_link_info->pan_exist && + !bt_link_info->hid_exist) + bt_link_info->pan_only = true; + else + bt_link_info->pan_only = false; + + /* check if Hid only */ + if (!bt_link_info->sco_exist && + !bt_link_info->a2dp_exist && + !bt_link_info->pan_exist && + bt_link_info->hid_exist) + bt_link_info->hid_only = true; + else + bt_link_info->hid_only = false; +} + +static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist) +{ + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + struct btc_stack_info *stack_info = &btcoexist->stack_info; + bool bt_hson = false; + u8 algorithm = BT_8192E_2ANT_COEX_ALGO_UNDEFINED; + u8 numdiffprofile = 0; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson); + + if (!bt_link_info->bt_link_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "No BT link exists!!!\n"); + return algorithm; + } + + if (bt_link_info->sco_exist) + numdiffprofile++; + if (bt_link_info->hid_exist) + numdiffprofile++; + if (bt_link_info->pan_exist) + numdiffprofile++; + if (bt_link_info->a2dp_exist) + numdiffprofile++; + + if (numdiffprofile == 1) { + if (bt_link_info->sco_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "SCO only\n"); + algorithm = BT_8192E_2ANT_COEX_ALGO_SCO; + } else { + if (bt_link_info->hid_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "HID only\n"); + algorithm = BT_8192E_2ANT_COEX_ALGO_HID; + } else if (bt_link_info->a2dp_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "A2DP only\n"); + algorithm = BT_8192E_2ANT_COEX_ALGO_A2DP; + } else if (bt_link_info->pan_exist) { + if (bt_hson) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "PAN(HS) only\n"); + algorithm = + BT_8192E_2ANT_COEX_ALGO_PANHS; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "PAN(EDR) only\n"); + algorithm = + BT_8192E_2ANT_COEX_ALGO_PANEDR; + } + } + } + } else if (numdiffprofile == 2) { + if (bt_link_info->sco_exist) { + if (bt_link_info->hid_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "SCO + HID\n"); + algorithm = BT_8192E_2ANT_COEX_ALGO_SCO; + } else if (bt_link_info->a2dp_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "SCO + A2DP ==> SCO\n"); + algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID; + } else if (bt_link_info->pan_exist) { + if (bt_hson) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "SCO + PAN(HS)\n"); + algorithm = BT_8192E_2ANT_COEX_ALGO_SCO; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "SCO + PAN(EDR)\n"); + algorithm = + BT_8192E_2ANT_COEX_ALGO_SCO_PAN; + } + } + } else { + if (bt_link_info->hid_exist && + bt_link_info->a2dp_exist) { + if (stack_info->num_of_hid >= 2) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "HID*2 + A2DP\n"); + algorithm = + BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "HID + A2DP\n"); + algorithm = + BT_8192E_2ANT_COEX_ALGO_HID_A2DP; + } + } else if (bt_link_info->hid_exist && + bt_link_info->pan_exist) { + if (bt_hson) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "HID + PAN(HS)\n"); + algorithm = BT_8192E_2ANT_COEX_ALGO_HID; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "HID + PAN(EDR)\n"); + algorithm = + BT_8192E_2ANT_COEX_ALGO_PANEDR_HID; + } + } else if (bt_link_info->pan_exist && + bt_link_info->a2dp_exist) { + if (bt_hson) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "A2DP + PAN(HS)\n"); + algorithm = + BT_8192E_2ANT_COEX_ALGO_A2DP_PANHS; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "A2DP + PAN(EDR)\n"); + algorithm = + BT_8192E_2ANT_COEX_ALGO_PANEDR_A2DP; + } + } + } + } else if (numdiffprofile == 3) { + if (bt_link_info->sco_exist) { + if (bt_link_info->hid_exist && + bt_link_info->a2dp_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "SCO + HID + A2DP ==> HID\n"); + algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID; + } else if (bt_link_info->hid_exist && + bt_link_info->pan_exist) { + if (bt_hson) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "SCO + HID + PAN(HS)\n"); + algorithm = BT_8192E_2ANT_COEX_ALGO_SCO; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "SCO + HID + PAN(EDR)\n"); + algorithm = + BT_8192E_2ANT_COEX_ALGO_SCO_PAN; + } + } else if (bt_link_info->pan_exist && + bt_link_info->a2dp_exist) { + if (bt_hson) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "SCO + A2DP + PAN(HS)\n"); + algorithm = BT_8192E_2ANT_COEX_ALGO_SCO; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "SCO + A2DP + PAN(EDR)\n"); + algorithm = + BT_8192E_2ANT_COEX_ALGO_PANEDR_HID; + } + } + } else { + if (bt_link_info->hid_exist && + bt_link_info->pan_exist && + bt_link_info->a2dp_exist) { + if (bt_hson) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "HID + A2DP + PAN(HS)\n"); + algorithm = + BT_8192E_2ANT_COEX_ALGO_HID_A2DP; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "HID + A2DP + PAN(EDR)\n"); + algorithm = + BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR; + } + } + } + } else if (numdiffprofile >= 3) { + if (bt_link_info->sco_exist) { + if (bt_link_info->hid_exist && + bt_link_info->pan_exist && + bt_link_info->a2dp_exist) { + if (bt_hson) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "ErrorSCO+HID+A2DP+PAN(HS)\n"); + + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "SCO+HID+A2DP+PAN(EDR)\n"); + algorithm = + BT_8192E_2ANT_COEX_ALGO_PANEDR_HID; + } + } + } + } + + return algorithm; +} + +static void halbtc8192e2ant_setfw_dac_swinglevel(struct btc_coexist *btcoexist, + u8 dac_swinglvl) +{ + u8 h2c_parameter[1] = {0}; + + /* There are several type of dacswing + * 0x18/ 0x10/ 0xc/ 0x8/ 0x4/ 0x6 + */ + h2c_parameter[0] = dac_swinglvl; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swinglvl); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]); + + btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter); +} + +static void halbtc8192e2ant_set_fwdec_btpwr(struct btc_coexist *btcoexist, + u8 dec_btpwr_lvl) +{ + u8 h2c_parameter[1] = {0}; + + h2c_parameter[0] = dec_btpwr_lvl; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex] decrease Bt Power level = %d, FW write 0x62 = 0x%x\n", + dec_btpwr_lvl, h2c_parameter[0]); + + btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter); +} + +static void halbtc8192e2ant_dec_btpwr(struct btc_coexist *btcoexist, + bool force_exec, u8 dec_btpwr_lvl) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], %s Dec BT power level = %d\n", + (force_exec ? "force to" : ""), dec_btpwr_lvl); + coex_dm->cur_dec_bt_pwr = dec_btpwr_lvl; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], preBtDecPwrLvl=%d, curBtDecPwrLvl=%d\n", + coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr); + } + halbtc8192e2ant_set_fwdec_btpwr(btcoexist, coex_dm->cur_dec_bt_pwr); + + coex_dm->pre_dec_bt_pwr = coex_dm->cur_dec_bt_pwr; +} + +static void halbtc8192e2ant_set_bt_autoreport(struct btc_coexist *btcoexist, + bool enable_autoreport) +{ + u8 h2c_parameter[1] = {0}; + + h2c_parameter[0] = 0; + + if (enable_autoreport) + h2c_parameter[0] |= BIT0; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n", + (enable_autoreport ? "Enabled!!" : "Disabled!!"), + h2c_parameter[0]); + + btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter); +} + +static void halbtc8192e2ant_bt_autoreport(struct btc_coexist *btcoexist, + bool force_exec, + bool enable_autoreport) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], %s BT Auto report = %s\n", + (force_exec ? "force to" : ""), + ((enable_autoreport) ? "Enabled" : "Disabled")); + coex_dm->cur_bt_auto_report = enable_autoreport; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex] bPreBtAutoReport=%d, bCurBtAutoReport=%d\n", + coex_dm->pre_bt_auto_report, + coex_dm->cur_bt_auto_report); + + if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report) + return; + } + halbtc8192e2ant_set_bt_autoreport(btcoexist, + coex_dm->cur_bt_auto_report); + + coex_dm->pre_bt_auto_report = coex_dm->cur_bt_auto_report; +} + +static void halbtc8192e2ant_fw_dac_swinglvl(struct btc_coexist *btcoexist, + bool force_exec, u8 fw_dac_swinglvl) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], %s set FW Dac Swing level = %d\n", + (force_exec ? "force to" : ""), fw_dac_swinglvl); + coex_dm->cur_fw_dac_swing_lvl = fw_dac_swinglvl; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex] preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n", + coex_dm->pre_fw_dac_swing_lvl, + coex_dm->cur_fw_dac_swing_lvl); + + if (coex_dm->pre_fw_dac_swing_lvl == + coex_dm->cur_fw_dac_swing_lvl) + return; + } + + halbtc8192e2ant_setfw_dac_swinglevel(btcoexist, + coex_dm->cur_fw_dac_swing_lvl); + + coex_dm->pre_fw_dac_swing_lvl = coex_dm->cur_fw_dac_swing_lvl; +} + +static void btc8192e2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist, + bool rx_rf_shrink_on) +{ + if (rx_rf_shrink_on) { + /* Shrink RF Rx LPF corner */ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], Shrink RF Rx LPF corner!!\n"); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e, + 0xfffff, 0xffffc); + } else { + /* Resume RF Rx LPF corner + * After initialized, we can use coex_dm->btRf0x1eBackup + */ + if (btcoexist->initilized) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], Resume RF Rx LPF corner!!\n"); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e, + 0xfffff, + coex_dm->bt_rf0x1e_backup); + } + } +} + +static void halbtc8192e2ant_rf_shrink(struct btc_coexist *btcoexist, + bool force_exec, bool rx_rf_shrink_on) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, + "[BTCoex], %s turn Rx RF Shrink = %s\n", + (force_exec ? "force to" : ""), + ((rx_rf_shrink_on) ? "ON" : "OFF")); + coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, + "[BTCoex]bPreRfRxLpfShrink=%d,bCurRfRxLpfShrink=%d\n", + coex_dm->pre_rf_rx_lpf_shrink, + coex_dm->cur_rf_rx_lpf_shrink); + + if (coex_dm->pre_rf_rx_lpf_shrink == + coex_dm->cur_rf_rx_lpf_shrink) + return; + } + btc8192e2ant_set_sw_rf_rx_lpf_corner(btcoexist, + coex_dm->cur_rf_rx_lpf_shrink); + + coex_dm->pre_rf_rx_lpf_shrink = coex_dm->cur_rf_rx_lpf_shrink; +} + +static void halbtc8192e2ant_set_dac_swingreg(struct btc_coexist *btcoexist, + u32 level) +{ + u8 val = (u8)level; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], Write SwDacSwing = 0x%x\n", level); + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val); +} + +static void btc8192e2ant_setsw_full_swing(struct btc_coexist *btcoexist, + bool sw_dac_swingon, + u32 sw_dac_swinglvl) +{ + if (sw_dac_swingon) + halbtc8192e2ant_set_dac_swingreg(btcoexist, sw_dac_swinglvl); + else + halbtc8192e2ant_set_dac_swingreg(btcoexist, 0x18); +} + +static void halbtc8192e2ant_DacSwing(struct btc_coexist *btcoexist, + bool force_exec, bool dac_swingon, + u32 dac_swinglvl) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, + "[BTCoex], %s turn DacSwing=%s, dac_swinglvl = 0x%x\n", + (force_exec ? "force to" : ""), + ((dac_swingon) ? "ON" : "OFF"), dac_swinglvl); + coex_dm->cur_dac_swing_on = dac_swingon; + coex_dm->cur_dac_swing_lvl = dac_swinglvl; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, + "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl = 0x%x, ", + coex_dm->pre_dac_swing_on, + coex_dm->pre_dac_swing_lvl); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, + "bCurDacSwingOn=%d, curDacSwingLvl = 0x%x\n", + coex_dm->cur_dac_swing_on, + coex_dm->cur_dac_swing_lvl); + + if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) && + (coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl)) + return; + } + mdelay(30); + btc8192e2ant_setsw_full_swing(btcoexist, dac_swingon, dac_swinglvl); + + coex_dm->pre_dac_swing_on = coex_dm->cur_dac_swing_on; + coex_dm->pre_dac_swing_lvl = coex_dm->cur_dac_swing_lvl; +} + +static void halbtc8192e2ant_set_agc_table(struct btc_coexist *btcoexist, + bool agc_table_en) +{ + /* BB AGC Gain Table */ + if (agc_table_en) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], BB Agc Table On!\n"); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x0a1A0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x091B0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x081C0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x071D0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x061E0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x051F0001); + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], BB Agc Table Off!\n"); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xaa1A0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa91B0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa81C0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa71D0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa61E0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa51F0001); + } +} + +static void halbtc8192e2ant_AgcTable(struct btc_coexist *btcoexist, + bool force_exec, bool agc_table_en) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, + "[BTCoex], %s %s Agc Table\n", + (force_exec ? "force to" : ""), + ((agc_table_en) ? "Enable" : "Disable")); + coex_dm->cur_agc_table_en = agc_table_en; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, + "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n", + coex_dm->pre_agc_table_en, coex_dm->cur_agc_table_en); + + if (coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en) + return; + } + halbtc8192e2ant_set_agc_table(btcoexist, agc_table_en); + + coex_dm->pre_agc_table_en = coex_dm->cur_agc_table_en; +} + +static void halbtc8192e2ant_set_coex_table(struct btc_coexist *btcoexist, + u32 val0x6c0, u32 val0x6c4, + u32 val0x6c8, u8 val0x6cc) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0); + btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4); + btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8); + btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc); + btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc); +} + +static void halbtc8192e2ant_coex_table(struct btc_coexist *btcoexist, + bool force_exec, + u32 val0x6c0, u32 val0x6c4, + u32 val0x6c8, u8 val0x6cc) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, + "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, ", + (force_exec ? "force to" : ""), val0x6c0); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, + "0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n", + val0x6c4, val0x6c8, val0x6cc); + coex_dm->cur_val0x6c0 = val0x6c0; + coex_dm->cur_val0x6c4 = val0x6c4; + coex_dm->cur_val0x6c8 = val0x6c8; + coex_dm->cur_val0x6cc = val0x6cc; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, + "[BTCoex], preVal0x6c0 = 0x%x, preVal0x6c4 = 0x%x, ", + coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, + "preVal0x6c8 = 0x%x, preVal0x6cc = 0x%x !!\n", + coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, + "[BTCoex], curVal0x6c0 = 0x%x, curVal0x6c4 = 0x%x,\n", + coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, + "curVal0x6c8 = 0x%x, curVal0x6cc = 0x%x !!\n", + coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc); + + if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) && + (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) && + (coex_dm->pre_val0x6c8 == coex_dm->cur_val0x6c8) && + (coex_dm->pre_val0x6cc == coex_dm->cur_val0x6cc)) + return; + } + halbtc8192e2ant_set_coex_table(btcoexist, val0x6c0, val0x6c4, + val0x6c8, val0x6cc); + + coex_dm->pre_val0x6c0 = coex_dm->cur_val0x6c0; + coex_dm->pre_val0x6c4 = coex_dm->cur_val0x6c4; + coex_dm->pre_val0x6c8 = coex_dm->cur_val0x6c8; + coex_dm->pre_val0x6cc = coex_dm->cur_val0x6cc; +} + +static void btc8192e2ant_coex_tbl_w_type(struct btc_coexist *btcoexist, + bool force_exec, u8 type) +{ + switch (type) { + case 0: + halbtc8192e2ant_coex_table(btcoexist, force_exec, 0x55555555, + 0x5a5a5a5a, 0xffffff, 0x3); + break; + case 1: + halbtc8192e2ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a, + 0x5a5a5a5a, 0xffffff, 0x3); + break; + case 2: + halbtc8192e2ant_coex_table(btcoexist, force_exec, 0x55555555, + 0x5ffb5ffb, 0xffffff, 0x3); + break; + case 3: + halbtc8192e2ant_coex_table(btcoexist, force_exec, 0xdfffdfff, + 0x5fdb5fdb, 0xffffff, 0x3); + break; + case 4: + halbtc8192e2ant_coex_table(btcoexist, force_exec, 0xdfffdfff, + 0x5ffb5ffb, 0xffffff, 0x3); + break; + default: + break; + } +} + +static void halbtc8192e2ant_set_fw_ignore_wlanact(struct btc_coexist *btcoexist, + bool enable) +{ + u8 h2c_parameter[1] = {0}; + + if (enable) + h2c_parameter[0] |= BIT0; /* function enable */ + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex]set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n", + h2c_parameter[0]); + + btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter); +} + +static void halbtc8192e2ant_IgnoreWlanAct(struct btc_coexist *btcoexist, + bool force_exec, bool enable) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], %s turn Ignore WlanAct %s\n", + (force_exec ? "force to" : ""), (enable ? "ON" : "OFF")); + coex_dm->cur_ignore_wlan_act = enable; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], bPreIgnoreWlanAct = %d ", + coex_dm->pre_ignore_wlan_act); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "bCurIgnoreWlanAct = %d!!\n", + coex_dm->cur_ignore_wlan_act); + + if (coex_dm->pre_ignore_wlan_act == + coex_dm->cur_ignore_wlan_act) + return; + } + halbtc8192e2ant_set_fw_ignore_wlanact(btcoexist, enable); + + coex_dm->pre_ignore_wlan_act = coex_dm->cur_ignore_wlan_act; +} + +static void halbtc8192e2ant_SetFwPstdma(struct btc_coexist *btcoexist, u8 byte1, + u8 byte2, u8 byte3, u8 byte4, u8 byte5) +{ + u8 h2c_parameter[5] = {0}; + + h2c_parameter[0] = byte1; + h2c_parameter[1] = byte2; + h2c_parameter[2] = byte3; + h2c_parameter[3] = byte4; + h2c_parameter[4] = byte5; + + coex_dm->ps_tdma_para[0] = byte1; + coex_dm->ps_tdma_para[1] = byte2; + coex_dm->ps_tdma_para[2] = byte3; + coex_dm->ps_tdma_para[3] = byte4; + coex_dm->ps_tdma_para[4] = byte5; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n", + h2c_parameter[0], + h2c_parameter[1] << 24 | h2c_parameter[2] << 16 | + h2c_parameter[3] << 8 | h2c_parameter[4]); + + btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter); +} + +static void btc8192e2ant_sw_mec1(struct btc_coexist *btcoexist, + bool shrink_rx_lpf, bool low_penalty_ra, + bool limited_dig, bool btlan_constrain) +{ + halbtc8192e2ant_rf_shrink(btcoexist, NORMAL_EXEC, shrink_rx_lpf); +} + +static void btc8192e2ant_sw_mec2(struct btc_coexist *btcoexist, + bool agc_table_shift, bool adc_backoff, + bool sw_dac_swing, u32 dac_swinglvl) +{ + halbtc8192e2ant_AgcTable(btcoexist, NORMAL_EXEC, agc_table_shift); + halbtc8192e2ant_DacSwing(btcoexist, NORMAL_EXEC, sw_dac_swing, + dac_swinglvl); +} + +static void halbtc8192e2ant_ps_tdma(struct btc_coexist *btcoexist, + bool force_exec, bool turn_on, u8 type) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], %s turn %s PS TDMA, type=%d\n", + (force_exec ? "force to" : ""), + (turn_on ? "ON" : "OFF"), type); + coex_dm->cur_ps_tdma_on = turn_on; + coex_dm->cur_ps_tdma = type; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n", + coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n", + coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma); + + if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) && + (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma)) + return; + } + if (turn_on) { + switch (type) { + case 1: + default: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1a, + 0x1a, 0xe1, 0x90); + break; + case 2: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x12, + 0x12, 0xe1, 0x90); + break; + case 3: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1c, + 0x3, 0xf1, 0x90); + break; + case 4: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x10, + 0x3, 0xf1, 0x90); + break; + case 5: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1a, + 0x1a, 0x60, 0x90); + break; + case 6: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x12, + 0x12, 0x60, 0x90); + break; + case 7: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1c, + 0x3, 0x70, 0x90); + break; + case 8: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0xa3, 0x10, + 0x3, 0x70, 0x90); + break; + case 9: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1a, + 0x1a, 0xe1, 0x10); + break; + case 10: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x12, + 0x12, 0xe1, 0x10); + break; + case 11: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1c, + 0x3, 0xf1, 0x10); + break; + case 12: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x10, + 0x3, 0xf1, 0x10); + break; + case 13: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1a, + 0x1a, 0xe0, 0x10); + break; + case 14: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x12, + 0x12, 0xe0, 0x10); + break; + case 15: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1c, + 0x3, 0xf0, 0x10); + break; + case 16: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x12, + 0x3, 0xf0, 0x10); + break; + case 17: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0x61, 0x20, + 0x03, 0x10, 0x10); + break; + case 18: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x5, + 0x5, 0xe1, 0x90); + break; + case 19: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x25, + 0x25, 0xe1, 0x90); + break; + case 20: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x25, + 0x25, 0x60, 0x90); + break; + case 21: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x15, + 0x03, 0x70, 0x90); + break; + case 71: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1a, + 0x1a, 0xe1, 0x90); + break; + } + } else { + /* disable PS tdma */ + switch (type) { + default: + case 0: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0x8, 0x0, 0x0, + 0x0, 0x0); + btcoexist->btc_write_1byte(btcoexist, 0x92c, 0x4); + break; + case 1: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0x0, 0x0, 0x0, + 0x8, 0x0); + mdelay(5); + btcoexist->btc_write_1byte(btcoexist, 0x92c, 0x20); + break; + } + } + + /* update pre state */ + coex_dm->pre_ps_tdma_on = coex_dm->cur_ps_tdma_on; + coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma; +} + +static void halbtc8192e2ant_set_switch_sstype(struct btc_coexist *btcoexist, + u8 sstype) +{ + u8 mimops = BTC_MIMO_PS_DYNAMIC; + u32 disra_mask = 0x0; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], REAL set SS Type = %d\n", sstype); + + disra_mask = halbtc8192e2ant_decidera_mask(btcoexist, sstype, + coex_dm->curra_masktype); + halbtc8192e2ant_Updatera_mask(btcoexist, FORCE_EXEC, disra_mask); + + if (sstype == 1) { + halbtc8192e2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 1); + /* switch ofdm path */ + btcoexist->btc_write_1byte(btcoexist, 0xc04, 0x11); + btcoexist->btc_write_1byte(btcoexist, 0xd04, 0x1); + btcoexist->btc_write_4byte(btcoexist, 0x90c, 0x81111111); + /* switch cck patch */ + btcoexist->btc_write_1byte_bitmask(btcoexist, 0xe77, 0x4, 0x1); + btcoexist->btc_write_1byte(btcoexist, 0xa07, 0x81); + mimops = BTC_MIMO_PS_STATIC; + } else if (sstype == 2) { + halbtc8192e2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 0); + btcoexist->btc_write_1byte(btcoexist, 0xc04, 0x33); + btcoexist->btc_write_1byte(btcoexist, 0xd04, 0x3); + btcoexist->btc_write_4byte(btcoexist, 0x90c, 0x81121313); + btcoexist->btc_write_1byte_bitmask(btcoexist, 0xe77, 0x4, 0x0); + btcoexist->btc_write_1byte(btcoexist, 0xa07, 0x41); + mimops = BTC_MIMO_PS_DYNAMIC; + } + /* set rx 1ss or 2ss */ + btcoexist->btc_set(btcoexist, BTC_SET_ACT_SEND_MIMO_PS, &mimops); +} + +static void halbtc8192e2ant_switch_sstype(struct btc_coexist *btcoexist, + bool force_exec, u8 new_sstype) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], %s Switch SS Type = %d\n", + (force_exec ? "force to" : ""), new_sstype); + coex_dm->cur_sstype = new_sstype; + + if (!force_exec) { + if (coex_dm->pre_sstype == coex_dm->cur_sstype) + return; + } + halbtc8192e2ant_set_switch_sstype(btcoexist, coex_dm->cur_sstype); + + coex_dm->pre_sstype = coex_dm->cur_sstype; +} + +static void halbtc8192e2ant_coex_alloff(struct btc_coexist *btcoexist) +{ + /* fw all off */ + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); + halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6); + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); + + /* sw all off */ + btc8192e2ant_sw_mec1(btcoexist, false, false, false, false); + btc8192e2ant_sw_mec2(btcoexist, false, false, false, 0x18); + + /* hw all off */ + btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 0); +} + +static void halbtc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist) +{ + /* force to reset coex mechanism */ + + halbtc8192e2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 1); + halbtc8192e2ant_fw_dac_swinglvl(btcoexist, FORCE_EXEC, 6); + halbtc8192e2ant_dec_btpwr(btcoexist, FORCE_EXEC, 0); + + btc8192e2ant_coex_tbl_w_type(btcoexist, FORCE_EXEC, 0); + halbtc8192e2ant_switch_sstype(btcoexist, FORCE_EXEC, 2); + + btc8192e2ant_sw_mec1(btcoexist, false, false, false, false); + btc8192e2ant_sw_mec2(btcoexist, false, false, false, 0x18); +} + +static void halbtc8192e2ant_action_bt_inquiry(struct btc_coexist *btcoexist) +{ + bool low_pwr_disable = true; + + btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, + &low_pwr_disable); + + halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); + + btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 2); + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3); + halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6); + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); + + btc8192e2ant_sw_mec1(btcoexist, false, false, false, false); + btc8192e2ant_sw_mec2(btcoexist, false, false, false, 0x18); +} + +static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist) +{ + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + bool common = false, wifi_connected = false, wifi_busy = false; + bool bt_hson = false, low_pwr_disable = false; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, + &wifi_connected); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy); + + if (bt_link_info->sco_exist || bt_link_info->hid_exist) + halbtc8192e2ant_limited_tx(btcoexist, NORMAL_EXEC, 1, 0, 0, 0); + else + halbtc8192e2ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0); + + if (!wifi_connected) { + low_pwr_disable = false; + btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, + &low_pwr_disable); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Wifi non-connected idle!!\n"); + + if ((BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE == + coex_dm->bt_status) || + (BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE == + coex_dm->bt_status)) { + halbtc8192e2ant_switch_sstype(btcoexist, + NORMAL_EXEC, 2); + btc8192e2ant_coex_tbl_w_type(btcoexist, + NORMAL_EXEC, 1); + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + false, 0); + } else { + halbtc8192e2ant_switch_sstype(btcoexist, + NORMAL_EXEC, 1); + btc8192e2ant_coex_tbl_w_type(btcoexist, + NORMAL_EXEC, 0); + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + false, 1); + } + + halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6); + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); + + btc8192e2ant_sw_mec1(btcoexist, false, false, false, false); + btc8192e2ant_sw_mec2(btcoexist, false, false, false, 0x18); + + common = true; + } else { + if (BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE == + coex_dm->bt_status) { + low_pwr_disable = false; + btcoexist->btc_set(btcoexist, + BTC_SET_ACT_DISABLE_LOW_POWER, + &low_pwr_disable); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "Wifi connected + BT non connected-idle!!\n"); + + halbtc8192e2ant_switch_sstype(btcoexist, + NORMAL_EXEC, 2); + btc8192e2ant_coex_tbl_w_type(btcoexist, + NORMAL_EXEC, 1); + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + false, 0); + halbtc8192e2ant_fw_dac_swinglvl(btcoexist, + NORMAL_EXEC, 6); + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); + + btc8192e2ant_sw_mec1(btcoexist, false, false, + false, false); + btc8192e2ant_sw_mec2(btcoexist, false, false, + false, 0x18); + + common = true; + } else if (BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE == + coex_dm->bt_status) { + low_pwr_disable = true; + btcoexist->btc_set(btcoexist, + BTC_SET_ACT_DISABLE_LOW_POWER, + &low_pwr_disable); + + if (bt_hson) + return false; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "Wifi connected + BT connected-idle!!\n"); + + halbtc8192e2ant_switch_sstype(btcoexist, + NORMAL_EXEC, 2); + btc8192e2ant_coex_tbl_w_type(btcoexist, + NORMAL_EXEC, 1); + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + false, 0); + halbtc8192e2ant_fw_dac_swinglvl(btcoexist, + NORMAL_EXEC, 6); + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); + + btc8192e2ant_sw_mec1(btcoexist, true, false, + false, false); + btc8192e2ant_sw_mec2(btcoexist, false, false, + false, 0x18); + + common = true; + } else { + low_pwr_disable = true; + btcoexist->btc_set(btcoexist, + BTC_SET_ACT_DISABLE_LOW_POWER, + &low_pwr_disable); + + if (wifi_busy) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "Wifi Connected-Busy + BT Busy!!\n"); + common = false; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "Wifi Connected-Idle + BT Busy!!\n"); + + halbtc8192e2ant_switch_sstype(btcoexist, + NORMAL_EXEC, 1); + btc8192e2ant_coex_tbl_w_type(btcoexist, + NORMAL_EXEC, 2); + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 21); + halbtc8192e2ant_fw_dac_swinglvl(btcoexist, + NORMAL_EXEC, 6); + halbtc8192e2ant_dec_btpwr(btcoexist, + NORMAL_EXEC, 0); + btc8192e2ant_sw_mec1(btcoexist, false, + false, false, false); + btc8192e2ant_sw_mec2(btcoexist, false, + false, false, 0x18); + common = true; + } + } + } + return common; +} + +static void btc8192e_int1(struct btc_coexist *btcoexist, bool tx_pause, + int result) +{ + if (tx_pause) { + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 1\n"); + + if (coex_dm->cur_ps_tdma == 71) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 5); + coex_dm->tdma_adj_type = 5; + } else if (coex_dm->cur_ps_tdma == 1) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 5); + coex_dm->tdma_adj_type = 5; + } else if (coex_dm->cur_ps_tdma == 2) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 6); + coex_dm->tdma_adj_type = 6; + } else if (coex_dm->cur_ps_tdma == 3) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 4) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 8); + coex_dm->tdma_adj_type = 8; + } + if (coex_dm->cur_ps_tdma == 9) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 13); + coex_dm->tdma_adj_type = 13; + } else if (coex_dm->cur_ps_tdma == 10) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 14); + coex_dm->tdma_adj_type = 14; + } else if (coex_dm->cur_ps_tdma == 11) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 12) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 16); + coex_dm->tdma_adj_type = 16; + } + + if (result == -1) { + if (coex_dm->cur_ps_tdma == 5) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 6); + coex_dm->tdma_adj_type = 6; + } else if (coex_dm->cur_ps_tdma == 6) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 7) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 8); + coex_dm->tdma_adj_type = 8; + } else if (coex_dm->cur_ps_tdma == 13) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 14); + coex_dm->tdma_adj_type = 14; + } else if (coex_dm->cur_ps_tdma == 14) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 15) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 16); + coex_dm->tdma_adj_type = 16; + } + } else if (result == 1) { + if (coex_dm->cur_ps_tdma == 8) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 7) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 6); + coex_dm->tdma_adj_type = 6; + } else if (coex_dm->cur_ps_tdma == 6) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 5); + coex_dm->tdma_adj_type = 5; + } else if (coex_dm->cur_ps_tdma == 16) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 15) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 14); + coex_dm->tdma_adj_type = 14; + } else if (coex_dm->cur_ps_tdma == 14) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 13); + coex_dm->tdma_adj_type = 13; + } + } + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 0\n"); + if (coex_dm->cur_ps_tdma == 5) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 71); + coex_dm->tdma_adj_type = 71; + } else if (coex_dm->cur_ps_tdma == 6) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 2); + coex_dm->tdma_adj_type = 2; + } else if (coex_dm->cur_ps_tdma == 7) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->tdma_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 8) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 4); + coex_dm->tdma_adj_type = 4; + } + if (coex_dm->cur_ps_tdma == 13) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 9); + coex_dm->tdma_adj_type = 9; + } else if (coex_dm->cur_ps_tdma == 14) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 10); + coex_dm->tdma_adj_type = 10; + } else if (coex_dm->cur_ps_tdma == 15) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 16) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 12); + coex_dm->tdma_adj_type = 12; + } + + if (result == -1) { + if (coex_dm->cur_ps_tdma == 71) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 1); + coex_dm->tdma_adj_type = 1; + } else if (coex_dm->cur_ps_tdma == 1) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 2); + coex_dm->tdma_adj_type = 2; + } else if (coex_dm->cur_ps_tdma == 2) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->tdma_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 3) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 4); + coex_dm->tdma_adj_type = 4; + } else if (coex_dm->cur_ps_tdma == 9) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 10); + coex_dm->tdma_adj_type = 10; + } else if (coex_dm->cur_ps_tdma == 10) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 11) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 12); + coex_dm->tdma_adj_type = 12; + } + } else if (result == 1) { + if (coex_dm->cur_ps_tdma == 4) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->tdma_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 3) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 2); + coex_dm->tdma_adj_type = 2; + } else if (coex_dm->cur_ps_tdma == 2) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 1); + coex_dm->tdma_adj_type = 1; + } else if (coex_dm->cur_ps_tdma == 1) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 71); + coex_dm->tdma_adj_type = 71; + } else if (coex_dm->cur_ps_tdma == 12) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 11) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 10); + coex_dm->tdma_adj_type = 10; + } else if (coex_dm->cur_ps_tdma == 10) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 9); + coex_dm->tdma_adj_type = 9; + } + } + } +} + +static void btc8192e_int2(struct btc_coexist *btcoexist, bool tx_pause, + int result) +{ + if (tx_pause) { + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 1\n"); + if (coex_dm->cur_ps_tdma == 1) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 6); + coex_dm->tdma_adj_type = 6; + } else if (coex_dm->cur_ps_tdma == 2) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 6); + coex_dm->tdma_adj_type = 6; + } else if (coex_dm->cur_ps_tdma == 3) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 4) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 8); + coex_dm->tdma_adj_type = 8; + } + if (coex_dm->cur_ps_tdma == 9) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 14); + coex_dm->tdma_adj_type = 14; + } else if (coex_dm->cur_ps_tdma == 10) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 14); + coex_dm->tdma_adj_type = 14; + } else if (coex_dm->cur_ps_tdma == 11) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 12) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 16); + coex_dm->tdma_adj_type = 16; + } + if (result == -1) { + if (coex_dm->cur_ps_tdma == 5) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 6); + coex_dm->tdma_adj_type = 6; + } else if (coex_dm->cur_ps_tdma == 6) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 7) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 8); + coex_dm->tdma_adj_type = 8; + } else if (coex_dm->cur_ps_tdma == 13) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 14); + coex_dm->tdma_adj_type = 14; + } else if (coex_dm->cur_ps_tdma == 14) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 15) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 16); + coex_dm->tdma_adj_type = 16; + } + } else if (result == 1) { + if (coex_dm->cur_ps_tdma == 8) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 7) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 6); + coex_dm->tdma_adj_type = 6; + } else if (coex_dm->cur_ps_tdma == 6) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 6); + coex_dm->tdma_adj_type = 6; + } else if (coex_dm->cur_ps_tdma == 16) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 15) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 14); + coex_dm->tdma_adj_type = 14; + } else if (coex_dm->cur_ps_tdma == 14) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 14); + coex_dm->tdma_adj_type = 14; + } + } + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 0\n"); + if (coex_dm->cur_ps_tdma == 5) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 2); + coex_dm->tdma_adj_type = 2; + } else if (coex_dm->cur_ps_tdma == 6) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 2); + coex_dm->tdma_adj_type = 2; + } else if (coex_dm->cur_ps_tdma == 7) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->tdma_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 8) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 4); + coex_dm->tdma_adj_type = 4; + } + if (coex_dm->cur_ps_tdma == 13) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 10); + coex_dm->tdma_adj_type = 10; + } else if (coex_dm->cur_ps_tdma == 14) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 10); + coex_dm->tdma_adj_type = 10; + } else if (coex_dm->cur_ps_tdma == 15) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 16) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 12); + coex_dm->tdma_adj_type = 12; + } + if (result == -1) { + if (coex_dm->cur_ps_tdma == 1) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 2); + coex_dm->tdma_adj_type = 2; + } else if (coex_dm->cur_ps_tdma == 2) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->tdma_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 3) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 4); + coex_dm->tdma_adj_type = 4; + } else if (coex_dm->cur_ps_tdma == 9) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 10); + coex_dm->tdma_adj_type = 10; + } else if (coex_dm->cur_ps_tdma == 10) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 11) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 12); + coex_dm->tdma_adj_type = 12; + } + } else if (result == 1) { + if (coex_dm->cur_ps_tdma == 4) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->tdma_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 3) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 2); + coex_dm->tdma_adj_type = 2; + } else if (coex_dm->cur_ps_tdma == 2) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 2); + coex_dm->tdma_adj_type = 2; + } else if (coex_dm->cur_ps_tdma == 12) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 11) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 10); + coex_dm->tdma_adj_type = 10; + } else if (coex_dm->cur_ps_tdma == 10) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 10); + coex_dm->tdma_adj_type = 10; + } + } + } +} + +static void btc8192e_int3(struct btc_coexist *btcoexist, bool tx_pause, + int result) +{ + if (tx_pause) { + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 1\n"); + if (coex_dm->cur_ps_tdma == 1) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 2) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 3) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 4) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 8); + coex_dm->tdma_adj_type = 8; + } + if (coex_dm->cur_ps_tdma == 9) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 10) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 11) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 12) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 16); + coex_dm->tdma_adj_type = 16; + } + if (result == -1) { + if (coex_dm->cur_ps_tdma == 5) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 6) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 7) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 8); + coex_dm->tdma_adj_type = 8; + } else if (coex_dm->cur_ps_tdma == 13) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 14) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 15) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 16); + coex_dm->tdma_adj_type = 16; + } + } else if (result == 1) { + if (coex_dm->cur_ps_tdma == 8) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 7) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 6) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 16) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 15) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 14) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } + } + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 0\n"); + if (coex_dm->cur_ps_tdma == 5) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->tdma_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 6) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->tdma_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 7) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->tdma_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 8) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 4); + coex_dm->tdma_adj_type = 4; + } + if (coex_dm->cur_ps_tdma == 13) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 14) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 15) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 16) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 12); + coex_dm->tdma_adj_type = 12; + } + if (result == -1) { + if (coex_dm->cur_ps_tdma == 1) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->tdma_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 2) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->tdma_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 3) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 4); + coex_dm->tdma_adj_type = 4; + } else if (coex_dm->cur_ps_tdma == 9) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 10) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 11) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 12); + coex_dm->tdma_adj_type = 12; + } + } else if (result == 1) { + if (coex_dm->cur_ps_tdma == 4) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->tdma_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 3) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->tdma_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 2) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->tdma_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 12) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 11) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 10) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + } + } + } +} + +static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, + bool sco_hid, bool tx_pause, + u8 max_interval) +{ + static int up, dn, m, n, wait_cnt; + /* 0: no change, +1: increase WiFi duration, + * -1: decrease WiFi duration + */ + int result; + u8 retry_cnt = 0; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], TdmaDurationAdjust()\n"); + + if (!coex_dm->auto_tdma_adjust) { + coex_dm->auto_tdma_adjust = true; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], first run TdmaDurationAdjust()!!\n"); + if (sco_hid) { + if (tx_pause) { + if (max_interval == 1) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 13); + coex_dm->tdma_adj_type = 13; + } else if (max_interval == 2) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 14); + coex_dm->tdma_adj_type = 14; + } else if (max_interval == 3) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } else { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } + } else { + if (max_interval == 1) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 9); + coex_dm->tdma_adj_type = 9; + } else if (max_interval == 2) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 10); + coex_dm->tdma_adj_type = 10; + } else if (max_interval == 3) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + } else { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + } + } + } else { + if (tx_pause) { + if (max_interval == 1) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 5); + coex_dm->tdma_adj_type = 5; + } else if (max_interval == 2) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 6); + coex_dm->tdma_adj_type = 6; + } else if (max_interval == 3) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } else { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } + } else { + if (max_interval == 1) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 1); + coex_dm->tdma_adj_type = 1; + } else if (max_interval == 2) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 2); + coex_dm->tdma_adj_type = 2; + } else if (max_interval == 3) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->tdma_adj_type = 3; + } else { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->tdma_adj_type = 3; + } + } + } + + up = 0; + dn = 0; + m = 1; + n = 3; + result = 0; + wait_cnt = 0; + } else { + /* accquire the BT TRx retry count from BT_Info byte2 */ + retry_cnt = coex_sta->bt_retry_cnt; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], retry_cnt = %d\n", retry_cnt); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_cnt=%d\n", + up, dn, m, n, wait_cnt); + result = 0; + wait_cnt++; + /* no retry in the last 2-second duration */ + if (retry_cnt == 0) { + up++; + dn--; + + if (dn <= 0) + dn = 0; + + if (up >= n) { + wait_cnt = 0; + n = 3; + up = 0; + dn = 0; + result = 1; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_TRACE_FW_DETAIL, + "[BTCoex]Increase wifi duration!!\n"); + } + } else if (retry_cnt <= 3) { + up--; + dn++; + + if (up <= 0) + up = 0; + + if (dn == 2) { + if (wait_cnt <= 2) + m++; + else + m = 1; + + if (m >= 20) + m = 20; + + n = 3 * m; + up = 0; + dn = 0; + wait_cnt = 0; + result = -1; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_TRACE_FW_DETAIL, + "Reduce wifi duration for retry<3\n"); + } + } else { + if (wait_cnt == 1) + m++; + else + m = 1; + + if (m >= 20) + m = 20; + + n = 3*m; + up = 0; + dn = 0; + wait_cnt = 0; + result = -1; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "Decrease wifi duration for retryCounter>3!!\n"); + } + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], max Interval = %d\n", max_interval); + if (max_interval == 1) + btc8192e_int1(btcoexist, tx_pause, result); + else if (max_interval == 2) + btc8192e_int2(btcoexist, tx_pause, result); + else if (max_interval == 3) + btc8192e_int3(btcoexist, tx_pause, result); + } + + /* if current PsTdma not match with + * the recorded one (when scan, dhcp...), + * then we have to adjust it back to the previous record one. + */ + if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) { + bool scan = false, link = false, roam = false; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], PsTdma type dismatch!!!, "); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "curPsTdma=%d, recordPsTdma=%d\n", + coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); + + if (!scan && !link && !roam) + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, + coex_dm->tdma_adj_type); + else + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_TRACE_FW_DETAIL, + "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n"); + } +} + +/* SCO only or SCO+PAN(HS) */ +static void halbtc8192e2ant_action_sco(struct btc_coexist *btcoexist) +{ + u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_STAY_LOW; + u32 wifi_bw; + + wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0); + + halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); + halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); + + halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6); + + btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 4); + + btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42); + + if ((btrssi_state == BTC_RSSI_STATE_LOW) || + (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13); + } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) || + (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2); + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9); + } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) || + (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4); + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9); + } + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + /* sw mechanism */ + if (BTC_WIFI_BW_HT40 == wifi_bw) { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mec1(btcoexist, true, true, + false, false); + btc8192e2ant_sw_mec2(btcoexist, true, false, + false, 0x6); + } else { + btc8192e2ant_sw_mec1(btcoexist, true, true, + false, false); + btc8192e2ant_sw_mec2(btcoexist, false, false, + false, 0x6); + } + } else { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mec1(btcoexist, false, true, + false, false); + btc8192e2ant_sw_mec2(btcoexist, true, false, + false, 0x6); + } else { + btc8192e2ant_sw_mec1(btcoexist, false, true, + false, false); + btc8192e2ant_sw_mec2(btcoexist, false, false, + false, 0x6); + } + } +} + +static void halbtc8192e2ant_action_sco_pan(struct btc_coexist *btcoexist) +{ + u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_STAY_LOW; + u32 wifi_bw; + + wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0); + + halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); + halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); + + halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6); + + btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 4); + + btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42); + + if ((btrssi_state == BTC_RSSI_STATE_LOW) || + (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14); + } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) || + (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2); + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10); + } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) || + (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4); + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10); + } + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + /* sw mechanism */ + if (BTC_WIFI_BW_HT40 == wifi_bw) { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mec1(btcoexist, true, true, + false, false); + btc8192e2ant_sw_mec2(btcoexist, true, false, + false, 0x6); + } else { + btc8192e2ant_sw_mec1(btcoexist, true, true, + false, false); + btc8192e2ant_sw_mec2(btcoexist, false, false, + false, 0x6); + } + } else { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mec1(btcoexist, false, true, + false, false); + btc8192e2ant_sw_mec2(btcoexist, true, false, + false, 0x6); + } else { + btc8192e2ant_sw_mec1(btcoexist, false, true, + false, false); + btc8192e2ant_sw_mec2(btcoexist, false, false, + false, 0x6); + } + } +} + +static void halbtc8192e2ant_action_hid(struct btc_coexist *btcoexist) +{ + u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH; + u32 wifi_bw; + + wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0); + btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42); + + halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); + halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); + + halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 3); + + if ((btrssi_state == BTC_RSSI_STATE_LOW) || + (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13); + } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) || + (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2); + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9); + } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) || + (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4); + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9); + } + + /* sw mechanism */ + if (BTC_WIFI_BW_HT40 == wifi_bw) { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mec1(btcoexist, true, true, + false, false); + btc8192e2ant_sw_mec2(btcoexist, true, false, + false, 0x18); + } else { + btc8192e2ant_sw_mec1(btcoexist, true, true, + false, false); + btc8192e2ant_sw_mec2(btcoexist, false, false, + false, 0x18); + } + } else { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mec1(btcoexist, false, true, + false, false); + btc8192e2ant_sw_mec2(btcoexist, true, false, + false, 0x18); + } else { + btc8192e2ant_sw_mec1(btcoexist, false, true, + false, false); + btc8192e2ant_sw_mec2(btcoexist, false, false, + false, 0x18); + } + } +} + +/* A2DP only / PAN(EDR) only/ A2DP+PAN(HS) */ +static void halbtc8192e2ant_action_a2dp(struct btc_coexist *btcoexist) +{ + u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH; + u32 wifi_bw; + bool long_dist = false; + + wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0); + btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42); + + if ((btrssi_state == BTC_RSSI_STATE_LOW || + btrssi_state == BTC_RSSI_STATE_STAY_LOW) && + (wifirssi_state == BTC_RSSI_STATE_LOW || + wifirssi_state == BTC_RSSI_STATE_STAY_LOW)) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], A2dp, wifi/bt rssi both LOW!!\n"); + long_dist = true; + } + if (long_dist) { + halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 2); + halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, true, + 0x4); + } else { + halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); + halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, + 0x8); + } + + halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6); + + if (long_dist) + btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 0); + else + btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 2); + + if (long_dist) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 17); + coex_dm->auto_tdma_adjust = false; + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); + } else { + if ((btrssi_state == BTC_RSSI_STATE_LOW) || + (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) { + halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, + true, 1); + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); + } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) || + (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { + halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, + false, 1); + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2); + } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) || + (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, + false, 1); + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4); + } + } + + /* sw mechanism */ + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + if (BTC_WIFI_BW_HT40 == wifi_bw) { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mec1(btcoexist, true, false, + false, false); + btc8192e2ant_sw_mec2(btcoexist, true, false, + false, 0x18); + } else { + btc8192e2ant_sw_mec1(btcoexist, true, false, + false, false); + btc8192e2ant_sw_mec2(btcoexist, false, false, + false, 0x18); + } + } else { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mec1(btcoexist, false, false, + false, false); + btc8192e2ant_sw_mec2(btcoexist, true, false, + false, 0x18); + } else { + btc8192e2ant_sw_mec1(btcoexist, false, false, + false, false); + btc8192e2ant_sw_mec2(btcoexist, false, false, + false, 0x18); + } + } +} + +static void halbtc8192e2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist) +{ + u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH; + u32 wifi_bw; + + wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0); + btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42); + + halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); + halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); + + halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6); + btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 2); + + if ((btrssi_state == BTC_RSSI_STATE_LOW) || + (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) { + halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, true, 2); + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); + } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) || + (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { + halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, + false, 2); + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2); + } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) || + (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, + false, 2); + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4); + } + + /* sw mechanism */ + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + if (BTC_WIFI_BW_HT40 == wifi_bw) { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mec1(btcoexist, true, false, + false, false); + btc8192e2ant_sw_mec2(btcoexist, true, false, + true, 0x6); + } else { + btc8192e2ant_sw_mec1(btcoexist, true, false, + false, false); + btc8192e2ant_sw_mec2(btcoexist, false, false, + true, 0x6); + } + } else { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mec1(btcoexist, false, false, + false, false); + btc8192e2ant_sw_mec2(btcoexist, true, false, + true, 0x6); + } else { + btc8192e2ant_sw_mec1(btcoexist, false, false, + false, false); + btc8192e2ant_sw_mec2(btcoexist, false, false, + true, 0x6); + } + } +} + +static void halbtc8192e2ant_action_pan_edr(struct btc_coexist *btcoexist) +{ + u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH; + u32 wifi_bw; + + wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0); + btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42); + + halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); + halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); + + halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6); + + btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 2); + + if ((btrssi_state == BTC_RSSI_STATE_LOW) || + (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5); + } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) || + (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2); + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 1); + } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) || + (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4); + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 1); + } + + /* sw mechanism */ + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + if (BTC_WIFI_BW_HT40 == wifi_bw) { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mec1(btcoexist, true, false, + false, false); + btc8192e2ant_sw_mec2(btcoexist, true, false, + false, 0x18); + } else { + btc8192e2ant_sw_mec1(btcoexist, true, false, + false, false); + btc8192e2ant_sw_mec2(btcoexist, false, false, + false, 0x18); + } + } else { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mec1(btcoexist, false, false, + false, false); + btc8192e2ant_sw_mec2(btcoexist, true, false, + false, 0x18); + } else { + btc8192e2ant_sw_mec1(btcoexist, false, false, + false, false); + btc8192e2ant_sw_mec2(btcoexist, false, false, + false, 0x18); + } + } +} + +/* PAN(HS) only */ +static void halbtc8192e2ant_action_pan_hs(struct btc_coexist *btcoexist) +{ + u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH; + u32 wifi_bw; + + wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0); + btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42); + + halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); + halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); + + halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6); + + btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 2); + + if ((btrssi_state == BTC_RSSI_STATE_LOW) || + (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); + } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) || + (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2); + } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) || + (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4); + } + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + if (BTC_WIFI_BW_HT40 == wifi_bw) { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mec1(btcoexist, true, false, + false, false); + btc8192e2ant_sw_mec2(btcoexist, true, false, + false, 0x18); + } else { + btc8192e2ant_sw_mec1(btcoexist, true, false, + false, false); + btc8192e2ant_sw_mec2(btcoexist, false, false, + false, 0x18); + } + } else { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mec1(btcoexist, false, false, + false, false); + btc8192e2ant_sw_mec2(btcoexist, true, false, + false, 0x18); + } else { + btc8192e2ant_sw_mec1(btcoexist, false, false, + false, false); + btc8192e2ant_sw_mec2(btcoexist, false, false, + false, 0x18); + } + } +} + +/* PAN(EDR)+A2DP */ +static void halbtc8192e2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist) +{ + u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH; + u32 wifi_bw; + + wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0); + btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42); + + halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); + halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); + + halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6); + + btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 2); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + if ((btrssi_state == BTC_RSSI_STATE_LOW) || + (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); + halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, true, 3); + } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) || + (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2); + halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, + false, 3); + } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) || + (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4); + halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, + false, 3); + } + + /* sw mechanism */ + if (BTC_WIFI_BW_HT40 == wifi_bw) { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mec1(btcoexist, true, false, + false, false); + btc8192e2ant_sw_mec2(btcoexist, true, false, + false, 0x18); + } else { + btc8192e2ant_sw_mec1(btcoexist, true, false, + false, false); + btc8192e2ant_sw_mec2(btcoexist, false, false, + false, 0x18); + } + } else { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mec1(btcoexist, false, false, + false, false); + btc8192e2ant_sw_mec2(btcoexist, true, false, + false, 0x18); + } else { + btc8192e2ant_sw_mec1(btcoexist, false, false, + false, false); + btc8192e2ant_sw_mec2(btcoexist, false, false, + false, 0x18); + } + } +} + +static void halbtc8192e2ant_action_pan_edr_hid(struct btc_coexist *btcoexist) +{ + u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH; + u32 wifi_bw; + + wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0); + btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); + halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); + + halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6); + + btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 3); + + if ((btrssi_state == BTC_RSSI_STATE_LOW) || + (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14); + } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) || + (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2); + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 10); + } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) || + (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4); + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 10); + } + + /* sw mechanism */ + if (BTC_WIFI_BW_HT40 == wifi_bw) { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mec1(btcoexist, true, true, + false, false); + btc8192e2ant_sw_mec2(btcoexist, true, false, + false, 0x18); + } else { + btc8192e2ant_sw_mec1(btcoexist, true, true, + false, false); + btc8192e2ant_sw_mec2(btcoexist, false, false, + false, 0x18); + } + } else { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mec1(btcoexist, false, true, + false, false); + btc8192e2ant_sw_mec2(btcoexist, true, false, + false, 0x18); + } else { + btc8192e2ant_sw_mec1(btcoexist, false, true, + false, false); + btc8192e2ant_sw_mec2(btcoexist, false, false, + false, 0x18); + } + } +} + +/* HID+A2DP+PAN(EDR) */ +static void btc8192e2ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist) +{ + u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH; + u32 wifi_bw; + + wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0); + btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42); + + halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); + halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); + + halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 3); + + if ((btrssi_state == BTC_RSSI_STATE_LOW) || + (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); + halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, true, 3); + } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) || + (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2); + halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 3); + } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) || + (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4); + halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 3); + } + + /* sw mechanism */ + if (BTC_WIFI_BW_HT40 == wifi_bw) { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mec1(btcoexist, true, true, + false, false); + btc8192e2ant_sw_mec2(btcoexist, true, false, + false, 0x18); + } else { + btc8192e2ant_sw_mec1(btcoexist, true, true, + false, false); + btc8192e2ant_sw_mec2(btcoexist, false, false, + false, 0x18); + } + } else { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mec1(btcoexist, false, true, + false, false); + btc8192e2ant_sw_mec2(btcoexist, true, false, + false, 0x18); + } else { + btc8192e2ant_sw_mec1(btcoexist, false, true, + false, false); + btc8192e2ant_sw_mec2(btcoexist, false, false, + false, 0x18); + } + } +} + +static void halbtc8192e2ant_action_hid_a2dp(struct btc_coexist *btcoexist) +{ + u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH; + u32 wifi_bw; + + wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0); + btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42); + + halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); + halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 3); + + if ((btrssi_state == BTC_RSSI_STATE_LOW) || + (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); + halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, true, 2); + } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) || + (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2); + halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 2); + } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) || + (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4); + halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 2); + } + + /* sw mechanism */ + if (BTC_WIFI_BW_HT40 == wifi_bw) { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mec1(btcoexist, true, true, + false, false); + btc8192e2ant_sw_mec2(btcoexist, true, false, + false, 0x18); + } else { + btc8192e2ant_sw_mec1(btcoexist, true, true, + false, false); + btc8192e2ant_sw_mec2(btcoexist, false, false, + false, 0x18); + } + } else { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8192e2ant_sw_mec1(btcoexist, false, true, + false, false); + btc8192e2ant_sw_mec2(btcoexist, true, false, + false, 0x18); + } else { + btc8192e2ant_sw_mec1(btcoexist, false, true, + false, false); + btc8192e2ant_sw_mec2(btcoexist, false, false, + false, 0x18); + } + } +} + +static void halbtc8192e2ant_run_coexist_mechanism(struct btc_coexist *btcoexist) +{ + u8 algorithm = 0; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], RunCoexistMechanism()===>\n"); + + if (btcoexist->manual_control) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], return for Manual CTRL <===\n"); + return; + } + + if (coex_sta->under_ips) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], wifi is under IPS !!!\n"); + return; + } + + algorithm = halbtc8192e2ant_action_algorithm(btcoexist); + if (coex_sta->c2h_bt_inquiry_page && + (BT_8192E_2ANT_COEX_ALGO_PANHS != algorithm)) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT is under inquiry/page scan !!\n"); + halbtc8192e2ant_action_bt_inquiry(btcoexist); + return; + } + + coex_dm->cur_algorithm = algorithm; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm); + + if (halbtc8192e2ant_is_common_action(btcoexist)) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action 2-Ant common.\n"); + coex_dm->auto_tdma_adjust = false; + } else { + if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex] preAlgorithm=%d, curAlgorithm=%d\n", + coex_dm->pre_algorithm, + coex_dm->cur_algorithm); + coex_dm->auto_tdma_adjust = false; + } + switch (coex_dm->cur_algorithm) { + case BT_8192E_2ANT_COEX_ALGO_SCO: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "Action 2-Ant, algorithm = SCO.\n"); + halbtc8192e2ant_action_sco(btcoexist); + break; + case BT_8192E_2ANT_COEX_ALGO_SCO_PAN: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "Action 2-Ant, algorithm = SCO+PAN(EDR).\n"); + halbtc8192e2ant_action_sco_pan(btcoexist); + break; + case BT_8192E_2ANT_COEX_ALGO_HID: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "Action 2-Ant, algorithm = HID.\n"); + halbtc8192e2ant_action_hid(btcoexist); + break; + case BT_8192E_2ANT_COEX_ALGO_A2DP: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "Action 2-Ant, algorithm = A2DP.\n"); + halbtc8192e2ant_action_a2dp(btcoexist); + break; + case BT_8192E_2ANT_COEX_ALGO_A2DP_PANHS: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "Action 2-Ant, algorithm = A2DP+PAN(HS).\n"); + halbtc8192e2ant_action_a2dp_pan_hs(btcoexist); + break; + case BT_8192E_2ANT_COEX_ALGO_PANEDR: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "Action 2-Ant, algorithm = PAN(EDR).\n"); + halbtc8192e2ant_action_pan_edr(btcoexist); + break; + case BT_8192E_2ANT_COEX_ALGO_PANHS: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "Action 2-Ant, algorithm = HS mode.\n"); + halbtc8192e2ant_action_pan_hs(btcoexist); + break; + case BT_8192E_2ANT_COEX_ALGO_PANEDR_A2DP: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "Action 2-Ant, algorithm = PAN+A2DP.\n"); + halbtc8192e2ant_action_pan_edr_a2dp(btcoexist); + break; + case BT_8192E_2ANT_COEX_ALGO_PANEDR_HID: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "Action 2-Ant, algorithm = PAN(EDR)+HID.\n"); + halbtc8192e2ant_action_pan_edr_hid(btcoexist); + break; + case BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "Action 2-Ant, algorithm = HID+A2DP+PAN.\n"); + btc8192e2ant_action_hid_a2dp_pan_edr(btcoexist); + break; + case BT_8192E_2ANT_COEX_ALGO_HID_A2DP: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "Action 2-Ant, algorithm = HID+A2DP.\n"); + halbtc8192e2ant_action_hid_a2dp(btcoexist); + break; + default: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "Action 2-Ant, algorithm = unknown!!\n"); + /* halbtc8192e2ant_coex_alloff(btcoexist); */ + break; + } + coex_dm->pre_algorithm = coex_dm->cur_algorithm; + } +} + +static void halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist, + bool backup) +{ + u16 u16tmp = 0; + u8 u8tmp = 0; + + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], 2Ant Init HW Config!!\n"); + + if (backup) { + /* backup rf 0x1e value */ + coex_dm->bt_rf0x1e_backup = + btcoexist->btc_get_rf_reg(btcoexist, BTC_RF_A, + 0x1e, 0xfffff); + + coex_dm->backup_arfr_cnt1 = btcoexist->btc_read_4byte(btcoexist, + 0x430); + coex_dm->backup_arfr_cnt2 = btcoexist->btc_read_4byte(btcoexist, + 0x434); + coex_dm->backup_retrylimit = btcoexist->btc_read_2byte( + btcoexist, + 0x42a); + coex_dm->backup_ampdu_maxtime = btcoexist->btc_read_1byte( + btcoexist, + 0x456); + } + + /* antenna sw ctrl to bt */ + btcoexist->btc_write_1byte(btcoexist, 0x4f, 0x6); + btcoexist->btc_write_1byte(btcoexist, 0x944, 0x24); + btcoexist->btc_write_4byte(btcoexist, 0x930, 0x700700); + btcoexist->btc_write_1byte(btcoexist, 0x92c, 0x20); + if (btcoexist->chip_interface == BTC_INTF_USB) + btcoexist->btc_write_4byte(btcoexist, 0x64, 0x30430004); + else + btcoexist->btc_write_4byte(btcoexist, 0x64, 0x30030004); + + btc8192e2ant_coex_tbl_w_type(btcoexist, FORCE_EXEC, 0); + + /* antenna switch control parameter */ + btcoexist->btc_write_4byte(btcoexist, 0x858, 0x55555555); + + /* coex parameters */ + btcoexist->btc_write_1byte(btcoexist, 0x778, 0x3); + /* 0x790[5:0] = 0x5 */ + u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x790); + u8tmp &= 0xc0; + u8tmp |= 0x5; + btcoexist->btc_write_1byte(btcoexist, 0x790, u8tmp); + + /* enable counter statistics */ + btcoexist->btc_write_1byte(btcoexist, 0x76e, 0x4); + + /* enable PTA */ + btcoexist->btc_write_1byte(btcoexist, 0x40, 0x20); + /* enable mailbox interface */ + u16tmp = btcoexist->btc_read_2byte(btcoexist, 0x40); + u16tmp |= BIT9; + btcoexist->btc_write_2byte(btcoexist, 0x40, u16tmp); + + /* enable PTA I2C mailbox */ + u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x101); + u8tmp |= BIT4; + btcoexist->btc_write_1byte(btcoexist, 0x101, u8tmp); + + /* enable bt clock when wifi is disabled. */ + u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x93); + u8tmp |= BIT0; + btcoexist->btc_write_1byte(btcoexist, 0x93, u8tmp); + /* enable bt clock when suspend. */ + u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x7); + u8tmp |= BIT0; + btcoexist->btc_write_1byte(btcoexist, 0x7, u8tmp); +} + +/************************************************************* + * work around function start with wa_halbtc8192e2ant_ + *************************************************************/ + +/************************************************************ + * extern function start with EXhalbtc8192e2ant_ + ************************************************************/ + +void ex_halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist) +{ + halbtc8192e2ant_init_hwconfig(btcoexist, true); +} + +void ex_halbtc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist) +{ + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], Coex Mechanism Init!!\n"); + halbtc8192e2ant_init_coex_dm(btcoexist); +} + +void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist) +{ + struct btc_board_info *board_info = &btcoexist->board_info; + struct btc_stack_info *stack_info = &btcoexist->stack_info; + struct rtl_priv *rtlpriv = btcoexist->adapter; + u8 u8tmp[4], i, bt_info_ext, ps_tdma_case = 0; + u16 u16tmp[4]; + u32 u32tmp[4]; + bool roam = false, scan = false, link = false, wifi_under_5g = false; + bool bt_hson = false, wifi_busy = false; + int wifirssi = 0, bt_hs_rssi = 0; + u32 wifi_bw, wifi_traffic_dir; + u8 wifi_dot11_chnl, wifi_hs_chnl; + u32 fw_ver = 0, bt_patch_ver = 0; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n ============[BT Coexist info]============"); + + if (btcoexist->manual_control) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n ===========[Under Manual Control]==========="); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n =========================================="); + } + + if (!board_info->bt_exist) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n BT not exists !!!"); + return; + } + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:", + board_info->pg_ant_num, board_info->btdm_ant_num); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %d", + "BT stack/ hci ext ver", + ((stack_info->profile_notified) ? "Yes" : "No"), + stack_info->hci_version); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver); + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = %d_%d/ 0x%x/ 0x%x(%d)", + "CoexVer/ FwVer/ PatchVer", + glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant, + fw_ver, bt_patch_ver, bt_patch_ver); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson); + btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL, + &wifi_dot11_chnl); + btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d(%d)", + "Dot11 channel / HsMode(HsChnl)", + wifi_dot11_chnl, bt_hson, wifi_hs_chnl); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %02x %02x %02x ", + "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info[0], + coex_dm->wifi_chnl_info[1], coex_dm->wifi_chnl_info[2]); + + btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifirssi); + btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d", + "Wifi rssi/ HS rssi", wifirssi, bt_hs_rssi); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ", + "Wifi link/ roam/ scan", link, roam, scan); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g); + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy); + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, + &wifi_traffic_dir); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %s/ %s ", + "Wifi status", (wifi_under_5g ? "5G" : "2.4G"), + ((BTC_WIFI_BW_LEGACY == wifi_bw) ? "Legacy" : + (((BTC_WIFI_BW_HT40 == wifi_bw) ? "HT40" : "HT20"))), + ((!wifi_busy) ? "idle" : + ((BTC_WIFI_TRAFFIC_TX == wifi_traffic_dir) ? + "uplink" : "downlink"))); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = [%s/ %d/ %d] ", + "BT [status/ rssi/ retryCnt]", + ((btcoexist->bt_info.bt_disabled) ? ("disabled") : + ((coex_sta->c2h_bt_inquiry_page) ? + ("inquiry/page scan") : + ((BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE == + coex_dm->bt_status) ? "non-connected idle" : + ((BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE == + coex_dm->bt_status) ? "connected-idle" : "busy")))), + coex_sta->bt_rssi, coex_sta->bt_retry_cnt); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d / %d / %d", + "SCO/HID/PAN/A2DP", stack_info->sco_exist, + stack_info->hid_exist, stack_info->pan_exist, + stack_info->a2dp_exist); + btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO); + + bt_info_ext = coex_sta->bt_info_ext; + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s", + "BT Info A2DP rate", + (bt_info_ext&BIT0) ? "Basic rate" : "EDR rate"); + + for (i = 0; i < BT_INFO_SRC_8192E_2ANT_MAX; i++) { + if (coex_sta->bt_info_c2h_cnt[i]) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = %02x %02x %02x %02x ", + GLBtInfoSrc8192e2Ant[i], + coex_sta->bt_info_c2h[i][0], + coex_sta->bt_info_c2h[i][1], + coex_sta->bt_info_c2h[i][2], + coex_sta->bt_info_c2h[i][3]); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "%02x %02x %02x(%d)", + coex_sta->bt_info_c2h[i][4], + coex_sta->bt_info_c2h[i][5], + coex_sta->bt_info_c2h[i][6], + coex_sta->bt_info_c2h_cnt[i]); + } + } + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s/%s", + "PS state, IPS/LPS", + ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")), + ((coex_sta->under_lps ? "LPS ON" : "LPS OFF"))); + btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x ", "SS Type", + coex_dm->cur_sstype); + + /* Sw mechanism */ + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s", + "============[Sw mechanism]============"); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ", + "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink, + coex_dm->cur_low_penalty_ra, coex_dm->limited_dig); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d(0x%x) ", + "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]", + coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off, + coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x ", "Rate Mask", + btcoexist->bt_info.ra_mask); + + /* Fw mechanism */ + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s", + "============[Fw mechanism]============"); + + ps_tdma_case = coex_dm->cur_ps_tdma; + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = %02x %02x %02x %02x %02x case-%d (auto:%d)", + "PS TDMA", coex_dm->ps_tdma_para[0], + coex_dm->ps_tdma_para[1], coex_dm->ps_tdma_para[2], + coex_dm->ps_tdma_para[3], coex_dm->ps_tdma_para[4], + ps_tdma_case, coex_dm->auto_tdma_adjust); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d ", + "DecBtPwr/ IgnWlanAct", + coex_dm->cur_dec_bt_pwr, coex_dm->cur_ignore_wlan_act); + + /* Hw setting */ + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s", + "============[Hw setting]============"); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x", + "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x", + "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1, + coex_dm->backup_arfr_cnt2, coex_dm->backup_retrylimit, + coex_dm->backup_ampdu_maxtime); + + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x430); + u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x434); + u16tmp[0] = btcoexist->btc_read_2byte(btcoexist, 0x42a); + u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x456); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x", + "0x430/0x434/0x42a/0x456", + u32tmp[0], u32tmp[1], u16tmp[0], u8tmp[0]); + + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc04); + u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xd04); + u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x90c); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", + "0xc04/ 0xd04/ 0x90c", u32tmp[0], u32tmp[1], u32tmp[2]); + + u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x", "0x778", + u8tmp[0]); + + u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x92c); + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x930); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x", + "0x92c/ 0x930", (u8tmp[0]), u32tmp[0]); + + u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x40); + u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x4f); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x", + "0x40/ 0x4f", u8tmp[0], u8tmp[1]); + + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550); + u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x", + "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]); + + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x", "0xc50(dig)", + u32tmp[0]); + + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0); + u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4); + u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8); + u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", + "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)", + u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d", + "0x770(hp rx[31:16]/tx[15:0])", + coex_sta->high_priority_rx, coex_sta->high_priority_tx); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d", + "0x774(lp rx[31:16]/tx[15:0])", + coex_sta->low_priority_rx, coex_sta->low_priority_tx); +#if (BT_AUTO_REPORT_ONLY_8192E_2ANT == 1) + halbtc8192e2ant_monitor_bt_ctr(btcoexist); +#endif + btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS); +} + +void ex_halbtc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type) +{ + if (BTC_IPS_ENTER == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], IPS ENTER notify\n"); + coex_sta->under_ips = true; + halbtc8192e2ant_coex_alloff(btcoexist); + } else if (BTC_IPS_LEAVE == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], IPS LEAVE notify\n"); + coex_sta->under_ips = false; + } +} + +void ex_halbtc8192e2ant_lps_notify(struct btc_coexist *btcoexist, u8 type) +{ + if (BTC_LPS_ENABLE == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], LPS ENABLE notify\n"); + coex_sta->under_lps = true; + } else if (BTC_LPS_DISABLE == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], LPS DISABLE notify\n"); + coex_sta->under_lps = false; + } +} + +void ex_halbtc8192e2ant_scan_notify(struct btc_coexist *btcoexist, u8 type) +{ + if (BTC_SCAN_START == type) + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], SCAN START notify\n"); + else if (BTC_SCAN_FINISH == type) + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], SCAN FINISH notify\n"); +} + +void ex_halbtc8192e2ant_connect_notify(struct btc_coexist *btcoexist, u8 type) +{ + if (BTC_ASSOCIATE_START == type) + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], CONNECT START notify\n"); + else if (BTC_ASSOCIATE_FINISH == type) + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], CONNECT FINISH notify\n"); +} + +void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist, + u8 type) +{ + u8 h2c_parameter[3] = {0}; + u32 wifi_bw; + u8 wifi_center_chnl; + + if (btcoexist->manual_control || + btcoexist->stop_coex_dm || + btcoexist->bt_info.bt_disabled) + return; + + if (BTC_MEDIA_CONNECT == type) + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], MEDIA connect notify\n"); + else + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], MEDIA disconnect notify\n"); + + /* only 2.4G we need to inform bt the chnl mask */ + btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL, + &wifi_center_chnl); + if ((BTC_MEDIA_CONNECT == type) && + (wifi_center_chnl <= 14)) { + h2c_parameter[0] = 0x1; + h2c_parameter[1] = wifi_center_chnl; + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + if (BTC_WIFI_BW_HT40 == wifi_bw) + h2c_parameter[2] = 0x30; + else + h2c_parameter[2] = 0x20; + } + + coex_dm->wifi_chnl_info[0] = h2c_parameter[0]; + coex_dm->wifi_chnl_info[1] = h2c_parameter[1]; + coex_dm->wifi_chnl_info[2] = h2c_parameter[2]; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], FW write 0x66 = 0x%x\n", + h2c_parameter[0] << 16 | h2c_parameter[1] << 8 | + h2c_parameter[2]); + + btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter); +} + +void ex_halbtc8192e2ant_special_packet_notify(struct btc_coexist *btcoexist, + u8 type) +{ + if (type == BTC_PACKET_DHCP) + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], DHCP Packet notify\n"); +} + +void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist, + u8 *tmp_buf, u8 length) +{ + u8 bt_info = 0; + u8 i, rsp_source = 0; + bool bt_busy = false, limited_dig = false; + bool wifi_connected = false; + + coex_sta->c2h_bt_info_req_sent = false; + + rsp_source = tmp_buf[0] & 0xf; + if (rsp_source >= BT_INFO_SRC_8192E_2ANT_MAX) + rsp_source = BT_INFO_SRC_8192E_2ANT_WIFI_FW; + coex_sta->bt_info_c2h_cnt[rsp_source]++; + + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], Bt info[%d], length=%d, hex data = [", + rsp_source, length); + for (i = 0; i < length; i++) { + coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i]; + if (i == 1) + bt_info = tmp_buf[i]; + if (i == length-1) + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "0x%02x]\n", tmp_buf[i]); + else + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "0x%02x, ", tmp_buf[i]); + } + + if (BT_INFO_SRC_8192E_2ANT_WIFI_FW != rsp_source) { + coex_sta->bt_retry_cnt = /* [3:0] */ + coex_sta->bt_info_c2h[rsp_source][2] & 0xf; + + coex_sta->bt_rssi = + coex_sta->bt_info_c2h[rsp_source][3] * 2 + 10; + + coex_sta->bt_info_ext = + coex_sta->bt_info_c2h[rsp_source][4]; + + /* Here we need to resend some wifi info to BT + * because bt is reset and loss of the info. + */ + if ((coex_sta->bt_info_ext & BIT1)) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "bit1, send wifi BW&Chnl to BT!!\n"); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, + &wifi_connected); + if (wifi_connected) + ex_halbtc8192e2ant_media_status_notify( + btcoexist, + BTC_MEDIA_CONNECT); + else + ex_halbtc8192e2ant_media_status_notify( + btcoexist, + BTC_MEDIA_DISCONNECT); + } + + if ((coex_sta->bt_info_ext & BIT3)) { + if (!btcoexist->manual_control && + !btcoexist->stop_coex_dm) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "bit3, BT NOT ignore Wlan active!\n"); + halbtc8192e2ant_IgnoreWlanAct(btcoexist, + FORCE_EXEC, + false); + } + } else { + /* BT already NOT ignore Wlan active, + * do nothing here. + */ + } + +#if (BT_AUTO_REPORT_ONLY_8192E_2ANT == 0) + if ((coex_sta->bt_info_ext & BIT4)) { + /* BT auto report already enabled, do nothing */ + } else { + halbtc8192e2ant_bt_autoreport(btcoexist, FORCE_EXEC, + true); + } +#endif + } + + /* check BIT2 first ==> check if bt is under inquiry or page scan */ + if (bt_info & BT_INFO_8192E_2ANT_B_INQ_PAGE) + coex_sta->c2h_bt_inquiry_page = true; + else + coex_sta->c2h_bt_inquiry_page = false; + + /* set link exist status */ + if (!(bt_info&BT_INFO_8192E_2ANT_B_CONNECTION)) { + coex_sta->bt_link_exist = false; + coex_sta->pan_exist = false; + coex_sta->a2dp_exist = false; + coex_sta->hid_exist = false; + coex_sta->sco_exist = false; + } else {/* connection exists */ + coex_sta->bt_link_exist = true; + if (bt_info & BT_INFO_8192E_2ANT_B_FTP) + coex_sta->pan_exist = true; + else + coex_sta->pan_exist = false; + if (bt_info & BT_INFO_8192E_2ANT_B_A2DP) + coex_sta->a2dp_exist = true; + else + coex_sta->a2dp_exist = false; + if (bt_info & BT_INFO_8192E_2ANT_B_HID) + coex_sta->hid_exist = true; + else + coex_sta->hid_exist = false; + if (bt_info & BT_INFO_8192E_2ANT_B_SCO_ESCO) + coex_sta->sco_exist = true; + else + coex_sta->sco_exist = false; + } + + halbtc8192e2ant_update_btlink_info(btcoexist); + + if (!(bt_info&BT_INFO_8192E_2ANT_B_CONNECTION)) { + coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Non-Connected idle!!!\n"); + } else if (bt_info == BT_INFO_8192E_2ANT_B_CONNECTION) { + coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], bt_infoNotify(), BT Connected-idle!!!\n"); + } else if ((bt_info&BT_INFO_8192E_2ANT_B_SCO_ESCO) || + (bt_info&BT_INFO_8192E_2ANT_B_SCO_BUSY)) { + coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_SCO_BUSY; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], bt_infoNotify(), BT SCO busy!!!\n"); + } else if (bt_info&BT_INFO_8192E_2ANT_B_ACL_BUSY) { + coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_ACL_BUSY; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], bt_infoNotify(), BT ACL busy!!!\n"); + } else { + coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_MAX; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex]bt_infoNotify(), BT Non-Defined state!!!\n"); + } + + if ((BT_8192E_2ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) || + (BT_8192E_2ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) || + (BT_8192E_2ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status)) { + bt_busy = true; + limited_dig = true; + } else { + bt_busy = false; + limited_dig = false; + } + + btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bt_busy); + + coex_dm->limited_dig = limited_dig; + btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_LIMITED_DIG, &limited_dig); + + halbtc8192e2ant_run_coexist_mechanism(btcoexist); +} + +void ex_halbtc8192e2ant_stack_operation_notify(struct btc_coexist *btcoexist, + u8 type) +{ +} + +void ex_halbtc8192e2ant_halt_notify(struct btc_coexist *btcoexist) +{ + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Halt notify\n"); + + halbtc8192e2ant_IgnoreWlanAct(btcoexist, FORCE_EXEC, true); + ex_halbtc8192e2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT); +} + +void ex_halbtc8192e2ant_periodical(struct btc_coexist *btcoexist) +{ + static u8 dis_ver_info_cnt; + u32 fw_ver = 0, bt_patch_ver = 0; + struct btc_board_info *board_info = &btcoexist->board_info; + struct btc_stack_info *stack_info = &btcoexist->stack_info; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "=======================Periodical=======================\n"); + if (dis_ver_info_cnt <= 5) { + dis_ver_info_cnt += 1; + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "************************************************\n"); + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n", + board_info->pg_ant_num, board_info->btdm_ant_num, + board_info->btdm_ant_pos); + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "BT stack/ hci ext ver = %s / %d\n", + ((stack_info->profile_notified) ? "Yes" : "No"), + stack_info->hci_version); + btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, + &bt_patch_ver); + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n", + glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant, + fw_ver, bt_patch_ver, bt_patch_ver); + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "************************************************\n"); + } + +#if (BT_AUTO_REPORT_ONLY_8192E_2ANT == 0) + halbtc8192e2ant_querybt_info(btcoexist); + halbtc8192e2ant_monitor_bt_ctr(btcoexist); + btc8192e2ant_monitor_bt_enable_dis(btcoexist); +#else + if (halbtc8192e2ant_iswifi_status_changed(btcoexist) || + coex_dm->auto_tdma_adjust) + halbtc8192e2ant_run_coexist_mechanism(btcoexist); +#endif +} diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8192e2ant.h b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8192e2ant.h new file mode 100644 index 0000000000000000000000000000000000000000..75e1f7d0db0627ec99a64da62680aec13c61465a --- /dev/null +++ b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8192e2ant.h @@ -0,0 +1,185 @@ +/****************************************************************************** + * + * Copyright(c) 2012 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ +/***************************************************************** + * The following is for 8192E 2Ant BT Co-exist definition + *****************************************************************/ +#define BT_AUTO_REPORT_ONLY_8192E_2ANT 0 + +#define BT_INFO_8192E_2ANT_B_FTP BIT7 +#define BT_INFO_8192E_2ANT_B_A2DP BIT6 +#define BT_INFO_8192E_2ANT_B_HID BIT5 +#define BT_INFO_8192E_2ANT_B_SCO_BUSY BIT4 +#define BT_INFO_8192E_2ANT_B_ACL_BUSY BIT3 +#define BT_INFO_8192E_2ANT_B_INQ_PAGE BIT2 +#define BT_INFO_8192E_2ANT_B_SCO_ESCO BIT1 +#define BT_INFO_8192E_2ANT_B_CONNECTION BIT0 + +#define BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT 2 + +enum bt_info_src_8192e_2ant { + BT_INFO_SRC_8192E_2ANT_WIFI_FW = 0x0, + BT_INFO_SRC_8192E_2ANT_BT_RSP = 0x1, + BT_INFO_SRC_8192E_2ANT_BT_ACTIVE_SEND = 0x2, + BT_INFO_SRC_8192E_2ANT_MAX +}; + +enum bt_8192e_2ant_bt_status { + BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE = 0x0, + BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE = 0x1, + BT_8192E_2ANT_BT_STATUS_INQ_PAGE = 0x2, + BT_8192E_2ANT_BT_STATUS_ACL_BUSY = 0x3, + BT_8192E_2ANT_BT_STATUS_SCO_BUSY = 0x4, + BT_8192E_2ANT_BT_STATUS_ACL_SCO_BUSY = 0x5, + BT_8192E_2ANT_BT_STATUS_MAX +}; + +enum bt_8192e_2ant_coex_algo { + BT_8192E_2ANT_COEX_ALGO_UNDEFINED = 0x0, + BT_8192E_2ANT_COEX_ALGO_SCO = 0x1, + BT_8192E_2ANT_COEX_ALGO_SCO_PAN = 0x2, + BT_8192E_2ANT_COEX_ALGO_HID = 0x3, + BT_8192E_2ANT_COEX_ALGO_A2DP = 0x4, + BT_8192E_2ANT_COEX_ALGO_A2DP_PANHS = 0x5, + BT_8192E_2ANT_COEX_ALGO_PANEDR = 0x6, + BT_8192E_2ANT_COEX_ALGO_PANHS = 0x7, + BT_8192E_2ANT_COEX_ALGO_PANEDR_A2DP = 0x8, + BT_8192E_2ANT_COEX_ALGO_PANEDR_HID = 0x9, + BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR = 0xa, + BT_8192E_2ANT_COEX_ALGO_HID_A2DP = 0xb, + BT_8192E_2ANT_COEX_ALGO_MAX = 0xc +}; + +struct coex_dm_8192e_2ant { + /* fw mechanism */ + u8 pre_dec_bt_pwr; + u8 cur_dec_bt_pwr; + u8 pre_fw_dac_swing_lvl; + u8 cur_fw_dac_swing_lvl; + bool cur_ignore_wlan_act; + bool pre_ignore_wlan_act; + u8 pre_ps_tdma; + u8 cur_ps_tdma; + u8 ps_tdma_para[5]; + u8 tdma_adj_type; + bool reset_tdma_adjust; + bool auto_tdma_adjust; + bool pre_ps_tdma_on; + bool cur_ps_tdma_on; + bool pre_bt_auto_report; + bool cur_bt_auto_report; + + /* sw mechanism */ + bool pre_rf_rx_lpf_shrink; + bool cur_rf_rx_lpf_shrink; + u32 bt_rf0x1e_backup; + bool pre_low_penalty_ra; + bool cur_low_penalty_ra; + bool pre_dac_swing_on; + u32 pre_dac_swing_lvl; + bool cur_dac_swing_on; + u32 cur_dac_swing_lvl; + bool pre_adc_back_off; + bool cur_adc_back_off; + bool pre_agc_table_en; + bool cur_agc_table_en; + u32 pre_val0x6c0; + u32 cur_val0x6c0; + u32 pre_val0x6c4; + u32 cur_val0x6c4; + u32 pre_val0x6c8; + u32 cur_val0x6c8; + u8 pre_val0x6cc; + u8 cur_val0x6cc; + bool limited_dig; + + u32 backup_arfr_cnt1; /* Auto Rate Fallback Retry cnt */ + u32 backup_arfr_cnt2; /* Auto Rate Fallback Retry cnt */ + u16 backup_retrylimit; + u8 backup_ampdu_maxtime; + + /* algorithm related */ + u8 pre_algorithm; + u8 cur_algorithm; + u8 bt_status; + u8 wifi_chnl_info[3]; + + u8 pre_sstype; + u8 cur_sstype; + + u32 prera_mask; + u32 curra_mask; + u8 curra_masktype; + u8 pre_arfrtype; + u8 cur_arfrtype; + u8 pre_retrylimit_type; + u8 cur_retrylimit_type; + u8 pre_ampdutime_type; + u8 cur_ampdutime_type; +}; + +struct coex_sta_8192e_2ant { + bool bt_link_exist; + bool sco_exist; + bool a2dp_exist; + bool hid_exist; + bool pan_exist; + + bool under_lps; + bool under_ips; + u32 high_priority_tx; + u32 high_priority_rx; + u32 low_priority_tx; + u32 low_priority_rx; + u8 bt_rssi; + u8 pre_bt_rssi_state; + u8 pre_wifi_rssi_state[4]; + bool c2h_bt_info_req_sent; + u8 bt_info_c2h[BT_INFO_SRC_8192E_2ANT_MAX][10]; + u32 bt_info_c2h_cnt[BT_INFO_SRC_8192E_2ANT_MAX]; + bool c2h_bt_inquiry_page; + u8 bt_retry_cnt; + u8 bt_info_ext; +}; + +/**************************************************************** + * The following is interface which will notify coex module. + ****************************************************************/ +void ex_halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist); +void ex_halbtc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist); +void ex_halbtc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type); +void ex_halbtc8192e2ant_lps_notify(struct btc_coexist *btcoexist, u8 type); +void ex_halbtc8192e2ant_scan_notify(struct btc_coexist *btcoexist, u8 type); +void ex_halbtc8192e2ant_connect_notify(struct btc_coexist *btcoexist, u8 type); +void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist, + u8 type); +void ex_halbtc8192e2ant_special_packet_notify(struct btc_coexist *btcoexist, + u8 type); +void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist, + u8 *tmpbuf, u8 length); +void ex_halbtc8192e2ant_stack_operation_notify(struct btc_coexist *btcoexist, + u8 type); +void ex_halbtc8192e2ant_halt_notify(struct btc_coexist *btcoexist); +void ex_halbtc8192e2ant_periodical(struct btc_coexist *btcoexist); +void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist); diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b1ant.c b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b1ant.c new file mode 100644 index 0000000000000000000000000000000000000000..c4acd403e5f6e25d0f88a83b54cf38de0945dbab --- /dev/null +++ b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b1ant.c @@ -0,0 +1,3170 @@ +/****************************************************************************** + * + * Copyright(c) 2012 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +/*************************************************************** + * Description: + * + * This file is for RTL8723B Co-exist mechanism + * + * History + * 2012/11/15 Cosa first check in. + * + ***************************************************************/ + +/*************************************************************** + * include files + ***************************************************************/ +#include "halbt_precomp.h" +/*************************************************************** + * Global variables, these are static variables + ***************************************************************/ +static struct coex_dm_8723b_1ant glcoex_dm_8723b_1ant; +static struct coex_dm_8723b_1ant *coex_dm = &glcoex_dm_8723b_1ant; +static struct coex_sta_8723b_1ant glcoex_sta_8723b_1ant; +static struct coex_sta_8723b_1ant *coex_sta = &glcoex_sta_8723b_1ant; + +static const char *const GLBtInfoSrc8723b1Ant[] = { + "BT Info[wifi fw]", + "BT Info[bt rsp]", + "BT Info[bt auto report]", +}; + +static u32 glcoex_ver_date_8723b_1ant = 20130918; +static u32 glcoex_ver_8723b_1ant = 0x47; + +/*************************************************************** + * local function proto type if needed + ***************************************************************/ +/*************************************************************** + * local function start with halbtc8723b1ant_ + ***************************************************************/ +static u8 halbtc8723b1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh, + u8 rssi_thresh1) +{ + s32 bt_rssi = 0; + u8 bt_rssi_state = coex_sta->pre_bt_rssi_state; + + bt_rssi = coex_sta->bt_rssi; + + if (level_num == 2) { + if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) || + (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) { + if (bt_rssi >= rssi_thresh + + BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) { + bt_rssi_state = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state switch to High\n"); + } else { + bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state stay at Low\n"); + } + } else { + if (bt_rssi < rssi_thresh) { + bt_rssi_state = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state switch to Low\n"); + } else { + bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state stay at High\n"); + } + } + } else if (level_num == 3) { + if (rssi_thresh > rssi_thresh1) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi thresh error!!\n"); + return coex_sta->pre_bt_rssi_state; + } + + if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) || + (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) { + if (bt_rssi >= rssi_thresh + + BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) { + bt_rssi_state = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state switch to Medium\n"); + } else { + bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state stay at Low\n"); + } + } else if ((coex_sta->pre_bt_rssi_state == + BTC_RSSI_STATE_MEDIUM) || + (coex_sta->pre_bt_rssi_state == + BTC_RSSI_STATE_STAY_MEDIUM)) { + if (bt_rssi >= rssi_thresh1 + + BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) { + bt_rssi_state = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state switch to High\n"); + } else if (bt_rssi < rssi_thresh) { + bt_rssi_state = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state switch to Low\n"); + } else { + bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state stay at Medium\n"); + } + } else { + if (bt_rssi < rssi_thresh1) { + bt_rssi_state = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state switch to Medium\n"); + } else { + bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state stay at High\n"); + } + } + } + + coex_sta->pre_bt_rssi_state = bt_rssi_state; + + return bt_rssi_state; +} + +static u8 halbtc8723b1ant_wifi_rssi_state(struct btc_coexist *btcoexist, + u8 index, u8 level_num, + u8 rssi_thresh, u8 rssi_thresh1) +{ + s32 wifi_rssi = 0; + u8 wifi_rssi_state = coex_sta->pre_wifi_rssi_state[index]; + + btcoexist->btc_get(btcoexist, + BTC_GET_S4_WIFI_RSSI, &wifi_rssi); + + if (level_num == 2) { + if ((coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_LOW) || + (coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_STAY_LOW)) { + if (wifi_rssi >= rssi_thresh + + BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) { + wifi_rssi_state = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state switch to High\n"); + } else { + wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state stay at Low\n"); + } + } else { + if (wifi_rssi < rssi_thresh) { + wifi_rssi_state = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state switch to Low\n"); + } else { + wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state stay at High\n"); + } + } + } else if (level_num == 3) { + if (rssi_thresh > rssi_thresh1) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI thresh error!!\n"); + return coex_sta->pre_wifi_rssi_state[index]; + } + + if ((coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_LOW) || + (coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_STAY_LOW)) { + if (wifi_rssi >= rssi_thresh + + BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) { + wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state switch to Medium\n"); + } else { + wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state stay at Low\n"); + } + } else if ((coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_MEDIUM) || + (coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_STAY_MEDIUM)) { + if (wifi_rssi >= rssi_thresh1 + + BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) { + wifi_rssi_state = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state switch to High\n"); + } else if (wifi_rssi < rssi_thresh) { + wifi_rssi_state = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state switch to Low\n"); + } else { + wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state stay at Medium\n"); + } + } else { + if (wifi_rssi < rssi_thresh1) { + wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state switch to Medium\n"); + } else { + wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state stay at High\n"); + } + } + } + + coex_sta->pre_wifi_rssi_state[index] = wifi_rssi_state; + + return wifi_rssi_state; +} + +static void halbtc8723b1ant_updatera_mask(struct btc_coexist *btcoexist, + bool force_exec, u32 dis_rate_mask) +{ + coex_dm->curra_mask = dis_rate_mask; + + if (force_exec || (coex_dm->prera_mask != coex_dm->curra_mask)) + btcoexist->btc_set(btcoexist, BTC_SET_ACT_UPDATE_ra_mask, + &coex_dm->curra_mask); + + coex_dm->prera_mask = coex_dm->curra_mask; +} + +static void btc8723b1ant_auto_rate_fb_retry(struct btc_coexist *btcoexist, + bool force_exec, u8 type) +{ + bool wifi_under_bmode = false; + + coex_dm->cur_arfr_type = type; + + if (force_exec || (coex_dm->pre_arfr_type != coex_dm->cur_arfr_type)) { + switch (coex_dm->cur_arfr_type) { + case 0: /* normal mode */ + btcoexist->btc_write_4byte(btcoexist, 0x430, + coex_dm->backup_arfr_cnt1); + btcoexist->btc_write_4byte(btcoexist, 0x434, + coex_dm->backup_arfr_cnt2); + break; + case 1: + btcoexist->btc_get(btcoexist, + BTC_GET_BL_WIFI_UNDER_B_MODE, + &wifi_under_bmode); + if (wifi_under_bmode) { + btcoexist->btc_write_4byte(btcoexist, + 0x430, 0x0); + btcoexist->btc_write_4byte(btcoexist, + 0x434, 0x01010101); + } else { + btcoexist->btc_write_4byte(btcoexist, + 0x430, 0x0); + btcoexist->btc_write_4byte(btcoexist, + 0x434, 0x04030201); + } + break; + default: + break; + } + } + + coex_dm->pre_arfr_type = coex_dm->cur_arfr_type; +} + +static void halbtc8723b1ant_retry_limit(struct btc_coexist *btcoexist, + bool force_exec, u8 type) +{ + coex_dm->cur_retry_limit_type = type; + + if (force_exec || (coex_dm->pre_retry_limit_type != + coex_dm->cur_retry_limit_type)) { + switch (coex_dm->cur_retry_limit_type) { + case 0: /* normal mode */ + btcoexist->btc_write_2byte(btcoexist, 0x42a, + coex_dm->backup_retry_limit); + break; + case 1: /* retry limit = 8 */ + btcoexist->btc_write_2byte(btcoexist, 0x42a, 0x0808); + break; + default: + break; + } + } + + coex_dm->pre_retry_limit_type = coex_dm->cur_retry_limit_type; +} + +static void halbtc8723b1ant_ampdu_maxtime(struct btc_coexist *btcoexist, + bool force_exec, u8 type) +{ + coex_dm->cur_ampdu_time_type = type; + + if (force_exec || (coex_dm->pre_ampdu_time_type != + coex_dm->cur_ampdu_time_type)) { + switch (coex_dm->cur_ampdu_time_type) { + case 0: /* normal mode */ + btcoexist->btc_write_1byte(btcoexist, 0x456, + coex_dm->backup_ampdu_max_time); + break; + case 1: /* AMPDU timw = 0x38 * 32us */ + btcoexist->btc_write_1byte(btcoexist, + 0x456, 0x38); + break; + default: + break; + } + } + + coex_dm->pre_ampdu_time_type = coex_dm->cur_ampdu_time_type; +} + +static void halbtc8723b1ant_limited_tx(struct btc_coexist *btcoexist, + bool force_exec, u8 ra_masktype, + u8 arfr_type, u8 retry_limit_type, + u8 ampdu_time_type) +{ + switch (ra_masktype) { + case 0: /* normal mode */ + halbtc8723b1ant_updatera_mask(btcoexist, force_exec, 0x0); + break; + case 1: /* disable cck 1/2 */ + halbtc8723b1ant_updatera_mask(btcoexist, force_exec, + 0x00000003); + break; + /* disable cck 1/2/5.5, ofdm 6/9/12/18/24, mcs 0/1/2/3/4*/ + case 2: + halbtc8723b1ant_updatera_mask(btcoexist, force_exec, + 0x0001f1f7); + break; + default: + break; + } + + btc8723b1ant_auto_rate_fb_retry(btcoexist, force_exec, arfr_type); + halbtc8723b1ant_retry_limit(btcoexist, force_exec, retry_limit_type); + halbtc8723b1ant_ampdu_maxtime(btcoexist, force_exec, ampdu_time_type); +} + +static void halbtc8723b1ant_limited_rx(struct btc_coexist *btcoexist, + bool force_exec, bool rej_ap_agg_pkt, + bool bt_ctrl_agg_buf_size, + u8 agg_buf_size) +{ + bool reject_rx_agg = rej_ap_agg_pkt; + bool bt_ctrl_rx_agg_size = bt_ctrl_agg_buf_size; + u8 rxaggsize = agg_buf_size; + + /********************************************** + * Rx Aggregation related setting + **********************************************/ + btcoexist->btc_set(btcoexist, BTC_SET_BL_TO_REJ_AP_AGG_PKT, + &reject_rx_agg); + /* decide BT control aggregation buf size or not */ + btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_CTRL_AGG_SIZE, + &bt_ctrl_rx_agg_size); + /* aggregation buf size, only work + * when BT control Rx aggregation size. + */ + btcoexist->btc_set(btcoexist, BTC_SET_U1_AGG_BUF_SIZE, &rxaggsize); + /* real update aggregation setting */ + btcoexist->btc_set(btcoexist, BTC_SET_ACT_AGGREGATE_CTRL, NULL); +} + +static void halbtc8723b1ant_monitor_bt_ctr(struct btc_coexist *btcoexist) +{ + u32 reg_hp_txrx, reg_lp_txrx, u32tmp; + u32 reg_hp_tx = 0, reg_hp_rx = 0; + u32 reg_lp_tx = 0, reg_lp_rx = 0; + + reg_hp_txrx = 0x770; + reg_lp_txrx = 0x774; + + u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_hp_txrx); + reg_hp_tx = u32tmp & MASKLWORD; + reg_hp_rx = (u32tmp & MASKHWORD) >> 16; + + u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_lp_txrx); + reg_lp_tx = u32tmp & MASKLWORD; + reg_lp_rx = (u32tmp & MASKHWORD) >> 16; + + coex_sta->high_priority_tx = reg_hp_tx; + coex_sta->high_priority_rx = reg_hp_rx; + coex_sta->low_priority_tx = reg_lp_tx; + coex_sta->low_priority_rx = reg_lp_rx; + + /* reset counter */ + btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc); +} + +static void halbtc8723b1ant_query_bt_info(struct btc_coexist *btcoexist) +{ + u8 h2c_parameter[1] = {0}; + + coex_sta->c2h_bt_info_req_sent = true; + + h2c_parameter[0] |= BIT0; /* trigger*/ + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n", + h2c_parameter[0]); + + btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter); +} + +static bool btc8723b1ant_is_wifi_status_changed(struct btc_coexist *btcoexist) +{ + static bool pre_wifi_busy; + static bool pre_under_4way, pre_bt_hs_on; + bool wifi_busy = false, under_4way = false, bt_hs_on = false; + bool wifi_connected = false; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, + &wifi_connected); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy); + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS, + &under_4way); + + if (wifi_connected) { + if (wifi_busy != pre_wifi_busy) { + pre_wifi_busy = wifi_busy; + return true; + } + if (under_4way != pre_under_4way) { + pre_under_4way = under_4way; + return true; + } + if (bt_hs_on != pre_bt_hs_on) { + pre_bt_hs_on = bt_hs_on; + return true; + } + } + + return false; +} + +static void halbtc8723b1ant_update_bt_link_info(struct btc_coexist *btcoexist) +{ + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + bool bt_hs_on = false; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); + + bt_link_info->bt_link_exist = coex_sta->bt_link_exist; + bt_link_info->sco_exist = coex_sta->sco_exist; + bt_link_info->a2dp_exist = coex_sta->a2dp_exist; + bt_link_info->pan_exist = coex_sta->pan_exist; + bt_link_info->hid_exist = coex_sta->hid_exist; + + /* work around for HS mode. */ + if (bt_hs_on) { + bt_link_info->pan_exist = true; + bt_link_info->bt_link_exist = true; + } + + /* check if Sco only */ + if (bt_link_info->sco_exist && !bt_link_info->a2dp_exist && + !bt_link_info->pan_exist && !bt_link_info->hid_exist) + bt_link_info->sco_only = true; + else + bt_link_info->sco_only = false; + + /* check if A2dp only */ + if (!bt_link_info->sco_exist && bt_link_info->a2dp_exist && + !bt_link_info->pan_exist && !bt_link_info->hid_exist) + bt_link_info->a2dp_only = true; + else + bt_link_info->a2dp_only = false; + + /* check if Pan only */ + if (!bt_link_info->sco_exist && !bt_link_info->a2dp_exist && + bt_link_info->pan_exist && !bt_link_info->hid_exist) + bt_link_info->pan_only = true; + else + bt_link_info->pan_only = false; + + /* check if Hid only */ + if (!bt_link_info->sco_exist && !bt_link_info->a2dp_exist && + !bt_link_info->pan_exist && bt_link_info->hid_exist) + bt_link_info->hid_only = true; + else + bt_link_info->hid_only = false; +} + +static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist) +{ + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + bool bt_hs_on = false; + u8 algorithm = BT_8723B_1ANT_COEX_ALGO_UNDEFINED; + u8 numdiffprofile = 0; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); + + if (!bt_link_info->bt_link_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], No BT link exists!!!\n"); + return algorithm; + } + + if (bt_link_info->sco_exist) + numdiffprofile++; + if (bt_link_info->hid_exist) + numdiffprofile++; + if (bt_link_info->pan_exist) + numdiffprofile++; + if (bt_link_info->a2dp_exist) + numdiffprofile++; + + if (numdiffprofile == 1) { + if (bt_link_info->sco_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = SCO only\n"); + algorithm = BT_8723B_1ANT_COEX_ALGO_SCO; + } else { + if (bt_link_info->hid_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = HID only\n"); + algorithm = BT_8723B_1ANT_COEX_ALGO_HID; + } else if (bt_link_info->a2dp_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = A2DP only\n"); + algorithm = BT_8723B_1ANT_COEX_ALGO_A2DP; + } else if (bt_link_info->pan_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = PAN(HS) only\n"); + algorithm = + BT_8723B_1ANT_COEX_ALGO_PANHS; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = PAN(EDR) only\n"); + algorithm = + BT_8723B_1ANT_COEX_ALGO_PANEDR; + } + } + } + } else if (numdiffprofile == 2) { + if (bt_link_info->sco_exist) { + if (bt_link_info->hid_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = SCO + HID\n"); + algorithm = BT_8723B_1ANT_COEX_ALGO_HID; + } else if (bt_link_info->a2dp_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n"); + algorithm = BT_8723B_1ANT_COEX_ALGO_SCO; + } else if (bt_link_info->pan_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = SCO + PAN(HS)\n"); + algorithm = BT_8723B_1ANT_COEX_ALGO_SCO; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = SCO + PAN(EDR)\n"); + algorithm = + BT_8723B_1ANT_COEX_ALGO_PANEDR_HID; + } + } + } else { + if (bt_link_info->hid_exist && + bt_link_info->a2dp_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = HID + A2DP\n"); + algorithm = BT_8723B_1ANT_COEX_ALGO_HID_A2DP; + } else if (bt_link_info->hid_exist && + bt_link_info->pan_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = HID + PAN(HS)\n"); + algorithm = + BT_8723B_1ANT_COEX_ALGO_HID_A2DP; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = HID + PAN(EDR)\n"); + algorithm = + BT_8723B_1ANT_COEX_ALGO_PANEDR_HID; + } + } else if (bt_link_info->pan_exist && + bt_link_info->a2dp_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = A2DP + PAN(HS)\n"); + algorithm = + BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = A2DP + PAN(EDR)\n"); + algorithm = + BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP; + } + } + } + } else if (numdiffprofile == 3) { + if (bt_link_info->sco_exist) { + if (bt_link_info->hid_exist && + bt_link_info->a2dp_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n"); + algorithm = BT_8723B_1ANT_COEX_ALGO_HID; + } else if (bt_link_info->hid_exist && + bt_link_info->pan_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n"); + algorithm = + BT_8723B_1ANT_COEX_ALGO_HID_A2DP; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n"); + algorithm = + BT_8723B_1ANT_COEX_ALGO_PANEDR_HID; + } + } else if (bt_link_info->pan_exist && + bt_link_info->a2dp_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n"); + algorithm = BT_8723B_1ANT_COEX_ALGO_SCO; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n"); + algorithm = + BT_8723B_1ANT_COEX_ALGO_PANEDR_HID; + } + } + } else { + if (bt_link_info->hid_exist && + bt_link_info->pan_exist && + bt_link_info->a2dp_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n"); + algorithm = + BT_8723B_1ANT_COEX_ALGO_HID_A2DP; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n"); + algorithm = + BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR; + } + } + } + } else if (numdiffprofile >= 3) { + if (bt_link_info->sco_exist) { + if (bt_link_info->hid_exist && + bt_link_info->pan_exist && + bt_link_info->a2dp_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n"); + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n"); + algorithm = + BT_8723B_1ANT_COEX_ALGO_PANEDR_HID; + } + } + } + } + + return algorithm; +} + +static void btc8723b1ant_set_sw_pen_tx_rate_adapt(struct btc_coexist *btcoexist, + bool low_penalty_ra) +{ + u8 h2c_parameter[6] = {0}; + + h2c_parameter[0] = 0x6; /* opCode, 0x6= Retry_Penalty */ + + if (low_penalty_ra) { + h2c_parameter[1] |= BIT0; + /*normal rate except MCS7/6/5, OFDM54/48/36 */ + h2c_parameter[2] = 0x00; + h2c_parameter[3] = 0xf7; /*MCS7 or OFDM54 */ + h2c_parameter[4] = 0xf8; /*MCS6 or OFDM48 */ + h2c_parameter[5] = 0xf9; /*MCS5 or OFDM36 */ + } + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], set WiFi Low-Penalty Retry: %s", + (low_penalty_ra ? "ON!!" : "OFF!!")); + + btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter); +} + +static void halbtc8723b1ant_low_penalty_ra(struct btc_coexist *btcoexist, + bool force_exec, bool low_penalty_ra) +{ + coex_dm->cur_low_penalty_ra = low_penalty_ra; + + if (!force_exec) { + if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra) + return; + } + btc8723b1ant_set_sw_pen_tx_rate_adapt(btcoexist, + coex_dm->cur_low_penalty_ra); + + coex_dm->pre_low_penalty_ra = coex_dm->cur_low_penalty_ra; +} + +static void halbtc8723b1ant_set_coex_table(struct btc_coexist *btcoexist, + u32 val0x6c0, u32 val0x6c4, + u32 val0x6c8, u8 val0x6cc) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0); + btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4); + btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8); + btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc); + btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc); +} + +static void halbtc8723b1ant_coex_table(struct btc_coexist *btcoexist, + bool force_exec, u32 val0x6c0, + u32 val0x6c4, u32 val0x6c8, + u8 val0x6cc) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, + "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6cc = 0x%x\n", + (force_exec ? "force to" : ""), + val0x6c0, val0x6c4, val0x6cc); + coex_dm->cur_val0x6c0 = val0x6c0; + coex_dm->cur_val0x6c4 = val0x6c4; + coex_dm->cur_val0x6c8 = val0x6c8; + coex_dm->cur_val0x6cc = val0x6cc; + + if (!force_exec) { + if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) && + (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) && + (coex_dm->pre_val0x6c8 == coex_dm->cur_val0x6c8) && + (coex_dm->pre_val0x6cc == coex_dm->cur_val0x6cc)) + return; + } + halbtc8723b1ant_set_coex_table(btcoexist, val0x6c0, val0x6c4, + val0x6c8, val0x6cc); + + coex_dm->pre_val0x6c0 = coex_dm->cur_val0x6c0; + coex_dm->pre_val0x6c4 = coex_dm->cur_val0x6c4; + coex_dm->pre_val0x6c8 = coex_dm->cur_val0x6c8; + coex_dm->pre_val0x6cc = coex_dm->cur_val0x6cc; +} + +static void halbtc8723b1ant_coex_table_with_type(struct btc_coexist *btcoexist, + bool force_exec, u8 type) +{ + switch (type) { + case 0: + halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x55555555, + 0x55555555, 0xffffff, 0x3); + break; + case 1: + halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x55555555, + 0x5a5a5a5a, 0xffffff, 0x3); + break; + case 2: + halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a, + 0x5a5a5a5a, 0xffffff, 0x3); + break; + case 3: + halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x55555555, + 0xaaaaaaaa, 0xffffff, 0x3); + break; + case 4: + halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x55555555, + 0x5aaa5aaa, 0xffffff, 0x3); + break; + case 5: + halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a, + 0xaaaa5a5a, 0xffffff, 0x3); + break; + case 6: + halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x55555555, + 0xaaaa5a5a, 0xffffff, 0x3); + break; + case 7: + halbtc8723b1ant_coex_table(btcoexist, force_exec, 0xaaaaaaaa, + 0xaaaaaaaa, 0xffffff, 0x3); + break; + default: + break; + } +} + +static void halbtc8723b1ant_SetFwIgnoreWlanAct(struct btc_coexist *btcoexist, + bool enable) +{ + u8 h2c_parameter[1] = {0}; + + if (enable) + h2c_parameter[0] |= BIT0; /* function enable */ + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n", + h2c_parameter[0]); + + btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter); +} + +static void halbtc8723b1ant_ignore_wlan_act(struct btc_coexist *btcoexist, + bool force_exec, bool enable) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], %s turn Ignore WlanAct %s\n", + (force_exec ? "force to" : ""), (enable ? "ON" : "OFF")); + coex_dm->cur_ignore_wlan_act = enable; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n", + coex_dm->pre_ignore_wlan_act, + coex_dm->cur_ignore_wlan_act); + + if (coex_dm->pre_ignore_wlan_act == + coex_dm->cur_ignore_wlan_act) + return; + } + halbtc8723b1ant_SetFwIgnoreWlanAct(btcoexist, enable); + + coex_dm->pre_ignore_wlan_act = coex_dm->cur_ignore_wlan_act; +} + +static void halbtc8723b1ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, + u8 byte1, u8 byte2, u8 byte3, + u8 byte4, u8 byte5) +{ + u8 h2c_parameter[5] = {0}; + u8 real_byte1 = byte1, real_byte5 = byte5; + bool ap_enable = false; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE, + &ap_enable); + + if (ap_enable) { + if ((byte1 & BIT4) && !(byte1 & BIT5)) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], FW for 1Ant AP mode\n"); + real_byte1 &= ~BIT4; + real_byte1 |= BIT5; + + real_byte5 |= BIT5; + real_byte5 &= ~BIT6; + } + } + + h2c_parameter[0] = real_byte1; + h2c_parameter[1] = byte2; + h2c_parameter[2] = byte3; + h2c_parameter[3] = byte4; + h2c_parameter[4] = real_byte5; + + coex_dm->ps_tdma_para[0] = real_byte1; + coex_dm->ps_tdma_para[1] = byte2; + coex_dm->ps_tdma_para[2] = byte3; + coex_dm->ps_tdma_para[3] = byte4; + coex_dm->ps_tdma_para[4] = real_byte5; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n", + h2c_parameter[0], + h2c_parameter[1] << 24 | + h2c_parameter[2] << 16 | + h2c_parameter[3] << 8 | + h2c_parameter[4]); + + btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter); +} + +static void halbtc8723b1ant_set_lps_rpwm(struct btc_coexist *btcoexist, + u8 lps_val, u8 rpwm_val) +{ + u8 lps = lps_val; + u8 rpwm = rpwm_val; + + btcoexist->btc_set(btcoexist, BTC_SET_U1_LPS_VAL, &lps); + btcoexist->btc_set(btcoexist, BTC_SET_U1_RPWM_VAL, &rpwm); +} + +static void halbtc8723b1ant_LpsRpwm(struct btc_coexist *btcoexist, + bool force_exec, + u8 lps_val, u8 rpwm_val) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n", + (force_exec ? "force to" : ""), lps_val, rpwm_val); + coex_dm->cur_lps = lps_val; + coex_dm->cur_rpwm = rpwm_val; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], LPS-RxBeaconMode = 0x%x , LPS-RPWM = 0x%x!!\n", + coex_dm->cur_lps, coex_dm->cur_rpwm); + + if ((coex_dm->pre_lps == coex_dm->cur_lps) && + (coex_dm->pre_rpwm == coex_dm->cur_rpwm)) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], LPS-RPWM_Last = 0x%x , LPS-RPWM_Now = 0x%x!!\n", + coex_dm->pre_rpwm, coex_dm->cur_rpwm); + + return; + } + } + halbtc8723b1ant_set_lps_rpwm(btcoexist, lps_val, rpwm_val); + + coex_dm->pre_lps = coex_dm->cur_lps; + coex_dm->pre_rpwm = coex_dm->cur_rpwm; +} + +static void halbtc8723b1ant_sw_mechanism(struct btc_coexist *btcoexist, + bool low_penalty_ra) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, + "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra); + + halbtc8723b1ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra); +} + +static void halbtc8723b1ant_SetAntPath(struct btc_coexist *btcoexist, + u8 ant_pos_type, bool init_hw_cfg, + bool wifi_off) +{ + struct btc_board_info *board_info = &btcoexist->board_info; + u32 fw_ver = 0, u32tmp = 0; + bool pg_ext_switch = false; + bool use_ext_switch = false; + u8 h2c_parameter[2] = {0}; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_EXT_SWITCH, &pg_ext_switch); + /* [31:16] = fw ver, [15:0] = fw sub ver */ + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); + + if ((fw_ver < 0xc0000) || pg_ext_switch) + use_ext_switch = true; + + if (init_hw_cfg) { + /*BT select s0/s1 is controlled by WiFi */ + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, 0x20, 0x1); + + /*Force GNT_BT to Normal */ + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x0); + } else if (wifi_off) { + /*Force GNT_BT to High */ + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x3); + /*BT select s0/s1 is controlled by BT */ + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, 0x20, 0x0); + + /* 0x4c[24:23] = 00, Set Antenna control by BT_RFE_CTRL + * BT Vendor 0xac = 0xf002 + */ + u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c); + u32tmp &= ~BIT23; + u32tmp &= ~BIT24; + btcoexist->btc_write_4byte(btcoexist, 0x4c, u32tmp); + } + + if (use_ext_switch) { + if (init_hw_cfg) { + /* 0x4c[23] = 0, 0x4c[24] = 1 + * Antenna control by WL/BT + */ + u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c); + u32tmp &= ~BIT23; + u32tmp |= BIT24; + btcoexist->btc_write_4byte(btcoexist, 0x4c, u32tmp); + + if (board_info->btdm_ant_pos == + BTC_ANTENNA_AT_MAIN_PORT) { + /* Main Ant to BT for IPS case 0x4c[23] = 1 */ + btcoexist->btc_write_1byte_bitmask(btcoexist, + 0x64, 0x1, + 0x1); + + /*tell firmware "no antenna inverse"*/ + h2c_parameter[0] = 0; + h2c_parameter[1] = 1; /*ext switch type*/ + btcoexist->btc_fill_h2c(btcoexist, 0x65, 2, + h2c_parameter); + } else { + /*Aux Ant to BT for IPS case 0x4c[23] = 1 */ + btcoexist->btc_write_1byte_bitmask(btcoexist, + 0x64, 0x1, + 0x0); + + /*tell firmware "antenna inverse"*/ + h2c_parameter[0] = 1; + h2c_parameter[1] = 1; /*ext switch type*/ + btcoexist->btc_fill_h2c(btcoexist, 0x65, 2, + h2c_parameter); + } + } + + /* fixed internal switch first*/ + /* fixed internal switch S1->WiFi, S0->BT*/ + if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) + btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0); + else/* fixed internal switch S0->WiFi, S1->BT*/ + btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280); + + /* ext switch setting */ + switch (ant_pos_type) { + case BTC_ANT_PATH_WIFI: + if (board_info->btdm_ant_pos == + BTC_ANTENNA_AT_MAIN_PORT) + btcoexist->btc_write_1byte_bitmask(btcoexist, + 0x92c, 0x3, + 0x1); + else + btcoexist->btc_write_1byte_bitmask(btcoexist, + 0x92c, 0x3, + 0x2); + break; + case BTC_ANT_PATH_BT: + if (board_info->btdm_ant_pos == + BTC_ANTENNA_AT_MAIN_PORT) + btcoexist->btc_write_1byte_bitmask(btcoexist, + 0x92c, 0x3, + 0x2); + else + btcoexist->btc_write_1byte_bitmask(btcoexist, + 0x92c, 0x3, + 0x1); + break; + default: + case BTC_ANT_PATH_PTA: + if (board_info->btdm_ant_pos == + BTC_ANTENNA_AT_MAIN_PORT) + btcoexist->btc_write_1byte_bitmask(btcoexist, + 0x92c, 0x3, + 0x1); + else + btcoexist->btc_write_1byte_bitmask(btcoexist, + 0x92c, 0x3, + 0x2); + break; + } + + } else { + if (init_hw_cfg) { + /* 0x4c[23] = 1, 0x4c[24] = 0 Antenna control by 0x64*/ + u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c); + u32tmp |= BIT23; + u32tmp &= ~BIT24; + btcoexist->btc_write_4byte(btcoexist, 0x4c, u32tmp); + + if (board_info->btdm_ant_pos == + BTC_ANTENNA_AT_MAIN_PORT) { + /*Main Ant to WiFi for IPS case 0x4c[23] = 1*/ + btcoexist->btc_write_1byte_bitmask(btcoexist, + 0x64, 0x1, + 0x0); + + /*tell firmware "no antenna inverse"*/ + h2c_parameter[0] = 0; + h2c_parameter[1] = 0; /*internal switch type*/ + btcoexist->btc_fill_h2c(btcoexist, 0x65, 2, + h2c_parameter); + } else { + /*Aux Ant to BT for IPS case 0x4c[23] = 1*/ + btcoexist->btc_write_1byte_bitmask(btcoexist, + 0x64, 0x1, + 0x1); + + /*tell firmware "antenna inverse"*/ + h2c_parameter[0] = 1; + h2c_parameter[1] = 0; /*internal switch type*/ + btcoexist->btc_fill_h2c(btcoexist, 0x65, 2, + h2c_parameter); + } + } + + /* fixed external switch first*/ + /*Main->WiFi, Aux->BT*/ + if (board_info->btdm_ant_pos == + BTC_ANTENNA_AT_MAIN_PORT) + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c, + 0x3, 0x1); + else/*Main->BT, Aux->WiFi */ + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c, + 0x3, 0x2); + + /* internal switch setting*/ + switch (ant_pos_type) { + case BTC_ANT_PATH_WIFI: + if (board_info->btdm_ant_pos == + BTC_ANTENNA_AT_MAIN_PORT) + btcoexist->btc_write_2byte(btcoexist, 0x948, + 0x0); + else + btcoexist->btc_write_2byte(btcoexist, 0x948, + 0x280); + break; + case BTC_ANT_PATH_BT: + if (board_info->btdm_ant_pos == + BTC_ANTENNA_AT_MAIN_PORT) + btcoexist->btc_write_2byte(btcoexist, 0x948, + 0x280); + else + btcoexist->btc_write_2byte(btcoexist, 0x948, + 0x0); + break; + default: + case BTC_ANT_PATH_PTA: + if (board_info->btdm_ant_pos == + BTC_ANTENNA_AT_MAIN_PORT) + btcoexist->btc_write_2byte(btcoexist, 0x948, + 0x200); + else + btcoexist->btc_write_2byte(btcoexist, 0x948, + 0x80); + break; + } + } +} + +static void halbtc8723b1ant_ps_tdma(struct btc_coexist *btcoexist, + bool force_exec, bool turn_on, u8 type) +{ + bool wifi_busy = false; + u8 rssi_adjust_val = 0; + + coex_dm->cur_ps_tdma_on = turn_on; + coex_dm->cur_ps_tdma = type; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy); + + if (!force_exec) { + if (coex_dm->cur_ps_tdma_on) + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], ******** TDMA(on, %d) *********\n", + coex_dm->cur_ps_tdma); + else + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], ******** TDMA(off, %d) ********\n", + coex_dm->cur_ps_tdma); + + if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) && + (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma)) + return; + } + if (turn_on) { + switch (type) { + default: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x1a, + 0x1a, 0x0, 0x50); + break; + case 1: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x3a, + 0x03, 0x10, 0x50); + + rssi_adjust_val = 11; + break; + case 2: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x2b, + 0x03, 0x10, 0x50); + rssi_adjust_val = 14; + break; + case 3: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x1d, + 0x1d, 0x0, 0x52); + break; + case 4: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x93, 0x15, + 0x3, 0x14, 0x0); + rssi_adjust_val = 17; + break; + case 5: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x15, + 0x3, 0x11, 0x10); + break; + case 6: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x20, + 0x3, 0x11, 0x13); + break; + case 7: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x13, 0xc, + 0x5, 0x0, 0x0); + break; + case 8: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x93, 0x25, + 0x3, 0x10, 0x0); + break; + case 9: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x21, + 0x3, 0x10, 0x50); + rssi_adjust_val = 18; + break; + case 10: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x13, 0xa, + 0xa, 0x0, 0x40); + break; + case 11: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x15, + 0x03, 0x10, 0x50); + rssi_adjust_val = 20; + break; + case 12: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x0a, + 0x0a, 0x0, 0x50); + break; + case 13: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x15, + 0x15, 0x0, 0x50); + break; + case 14: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x21, + 0x3, 0x10, 0x52); + break; + case 15: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x13, 0xa, + 0x3, 0x8, 0x0); + break; + case 16: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x93, 0x15, + 0x3, 0x10, 0x0); + rssi_adjust_val = 18; + break; + case 18: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x93, 0x25, + 0x3, 0x10, 0x0); + rssi_adjust_val = 14; + break; + case 20: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x35, + 0x03, 0x11, 0x10); + break; + case 21: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x25, + 0x03, 0x11, 0x11); + break; + case 22: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x25, + 0x03, 0x11, 0x10); + break; + case 23: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25, + 0x3, 0x31, 0x18); + rssi_adjust_val = 22; + break; + case 24: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x15, + 0x3, 0x31, 0x18); + rssi_adjust_val = 22; + break; + case 25: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0xa, + 0x3, 0x31, 0x18); + rssi_adjust_val = 22; + break; + case 26: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0xa, + 0x3, 0x31, 0x18); + rssi_adjust_val = 22; + break; + case 27: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25, + 0x3, 0x31, 0x98); + rssi_adjust_val = 22; + break; + case 28: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x69, 0x25, + 0x3, 0x31, 0x0); + break; + case 29: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xab, 0x1a, + 0x1a, 0x1, 0x10); + break; + case 30: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x14, + 0x3, 0x10, 0x50); + break; + case 31: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x1a, + 0x1a, 0, 0x58); + break; + case 32: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x61, 0xa, + 0x3, 0x10, 0x0); + break; + case 33: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xa3, 0x25, + 0x3, 0x30, 0x90); + break; + case 34: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x53, 0x1a, + 0x1a, 0x0, 0x10); + break; + case 35: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x63, 0x1a, + 0x1a, 0x0, 0x10); + break; + case 36: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x12, + 0x3, 0x14, 0x50); + break; + /* SoftAP only with no sta associated,BT disable , + * TDMA mode for power saving + * here softap mode screen off will cost 70-80mA for phone + */ + case 40: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x23, 0x18, + 0x00, 0x10, 0x24); + break; + } + } else { + switch (type) { + case 8: /*PTA Control */ + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x8, 0x0, + 0x0, 0x0, 0x0); + halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_PTA, + false, false); + break; + case 0: + default: /*Software control, Antenna at BT side */ + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, + 0x0, 0x0, 0x0); + halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT, + false, false); + break; + case 9: /*Software control, Antenna at WiFi side */ + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, + 0x0, 0x0, 0x0); + halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_WIFI, + false, false); + break; + } + } + rssi_adjust_val = 0; + btcoexist->btc_set(btcoexist, + BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE, + &rssi_adjust_val); + + /* update pre state */ + coex_dm->pre_ps_tdma_on = coex_dm->cur_ps_tdma_on; + coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma; +} + +static bool halbtc8723b1ant_is_common_action(struct btc_coexist *btcoexist) +{ + bool commom = false, wifi_connected = false; + bool wifi_busy = false; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, + &wifi_connected); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy); + + if (!wifi_connected && + BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n"); + halbtc8723b1ant_sw_mechanism(btcoexist, false); + commom = true; + } else if (wifi_connected && + (BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE == + coex_dm->bt_status)) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Wifi connected + BT non connected-idle!!\n"); + halbtc8723b1ant_sw_mechanism(btcoexist, false); + commom = true; + } else if (!wifi_connected && + (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE == + coex_dm->bt_status)) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n"); + halbtc8723b1ant_sw_mechanism(btcoexist, false); + commom = true; + } else if (wifi_connected && + (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE == + coex_dm->bt_status)) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Wifi connected + BT connected-idle!!\n"); + halbtc8723b1ant_sw_mechanism(btcoexist, false); + commom = true; + } else if (!wifi_connected && + (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE != + coex_dm->bt_status)) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + ("[BTCoex], Wifi non connected-idle + BT Busy!!\n")); + halbtc8723b1ant_sw_mechanism(btcoexist, false); + commom = true; + } else { + if (wifi_busy) + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Wifi Connected-Busy + BT Busy!!\n"); + else + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Wifi Connected-Idle + BT Busy!!\n"); + + commom = false; + } + + return commom; +} + +static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist, + u8 wifi_status) +{ + static s32 up, dn, m, n, wait_count; + /* 0: no change, +1: increase WiFi duration, + * -1: decrease WiFi duration + */ + s32 result; + u8 retry_count = 0, bt_info_ext; + bool wifi_busy = false; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], TdmaDurationAdjustForAcl()\n"); + + if (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY == wifi_status) + wifi_busy = true; + else + wifi_busy = false; + + if ((BT_8723B_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN == + wifi_status) || + (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SCAN == wifi_status) || + (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SPECIAL_PKT == wifi_status)) { + if (coex_dm->cur_ps_tdma != 1 && coex_dm->cur_ps_tdma != 2 && + coex_dm->cur_ps_tdma != 3 && coex_dm->cur_ps_tdma != 9) { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 9); + coex_dm->tdma_adj_type = 9; + + up = 0; + dn = 0; + m = 1; + n = 3; + result = 0; + wait_count = 0; + } + return; + } + + if (!coex_dm->auto_tdma_adjust) { + coex_dm->auto_tdma_adjust = true; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], first run TdmaDurationAdjust()!!\n"); + + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2); + coex_dm->tdma_adj_type = 2; + + up = 0; + dn = 0; + m = 1; + n = 3; + result = 0; + wait_count = 0; + } else { + /*accquire the BT TRx retry count from BT_Info byte2 */ + retry_count = coex_sta->bt_retry_cnt; + bt_info_ext = coex_sta->bt_info_ext; + result = 0; + wait_count++; + /* no retry in the last 2-second duration */ + if (retry_count == 0) { + up++; + dn--; + + if (dn <= 0) + dn = 0; + + if (up >= n) { + wait_count = 0; + n = 3; + up = 0; + dn = 0; + result = 1; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_TRACE_FW_DETAIL, + "[BTCoex], Increase wifi duration!!\n"); + } + } else if (retry_count <= 3) { + up--; + dn++; + + if (up <= 0) + up = 0; + + if (dn == 2) { + if (wait_count <= 2) + m++; + else + m = 1; + + if (m >= 20) + m = 20; + + n = 3 * m; + up = 0; + dn = 0; + wait_count = 0; + result = -1; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_TRACE_FW_DETAIL, + "[BTCoex], Decrease wifi duration for retryCounter<3!!\n"); + } + } else { + if (wait_count == 1) + m++; + else + m = 1; + + if (m >= 20) + m = 20; + + n = 3 * m; + up = 0; + dn = 0; + wait_count = 0; + result = -1; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], Decrease wifi duration for retryCounter>3!!\n"); + } + + if (result == -1) { + if ((BT_INFO_8723B_1ANT_A2DP_BASIC_RATE(bt_info_ext)) && + ((coex_dm->cur_ps_tdma == 1) || + (coex_dm->cur_ps_tdma == 2))) { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 9); + coex_dm->tdma_adj_type = 9; + } else if (coex_dm->cur_ps_tdma == 1) { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 2); + coex_dm->tdma_adj_type = 2; + } else if (coex_dm->cur_ps_tdma == 2) { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 9); + coex_dm->tdma_adj_type = 9; + } else if (coex_dm->cur_ps_tdma == 9) { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + } + } else if (result == 1) { + if ((BT_INFO_8723B_1ANT_A2DP_BASIC_RATE(bt_info_ext)) && + ((coex_dm->cur_ps_tdma == 1) || + (coex_dm->cur_ps_tdma == 2))) { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 9); + coex_dm->tdma_adj_type = 9; + } else if (coex_dm->cur_ps_tdma == 11) { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 9); + coex_dm->tdma_adj_type = 9; + } else if (coex_dm->cur_ps_tdma == 9) { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 2); + coex_dm->tdma_adj_type = 2; + } else if (coex_dm->cur_ps_tdma == 2) { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 1); + coex_dm->tdma_adj_type = 1; + } + } else { /*no change */ + /*if busy / idle change */ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex],********* TDMA(on, %d) ********\n", + coex_dm->cur_ps_tdma); + } + + if (coex_dm->cur_ps_tdma != 1 && coex_dm->cur_ps_tdma != 2 && + coex_dm->cur_ps_tdma != 9 && coex_dm->cur_ps_tdma != 11) { + /* recover to previous adjust type */ + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, + coex_dm->tdma_adj_type); + } + } +} + +static void btc8723b1ant_pstdmachkpwrsave(struct btc_coexist *btcoexist, + bool new_ps_state) +{ + u8 lps_mode = 0x0; + + btcoexist->btc_get(btcoexist, BTC_GET_U1_LPS_MODE, &lps_mode); + + if (lps_mode) { /* already under LPS state */ + if (new_ps_state) { + /* keep state under LPS, do nothing. */ + } else { + /* will leave LPS state, turn off psTdma first */ + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, + false, 0); + } + } else { /* NO PS state */ + if (new_ps_state) { + /* will enter LPS state, turn off psTdma first */ + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, + false, 0); + } else { + /* keep state under NO PS state, do nothing. */ + } + } +} + +static void halbtc8723b1ant_power_save_state(struct btc_coexist *btcoexist, + u8 ps_type, u8 lps_val, + u8 rpwm_val) +{ + bool low_pwr_disable = false; + + switch (ps_type) { + case BTC_PS_WIFI_NATIVE: + /* recover to original 32k low power setting */ + low_pwr_disable = false; + btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, + &low_pwr_disable); + btcoexist->btc_set(btcoexist, BTC_SET_ACT_NORMAL_LPS, NULL); + break; + case BTC_PS_LPS_ON: + btc8723b1ant_pstdmachkpwrsave(btcoexist, true); + halbtc8723b1ant_LpsRpwm(btcoexist, NORMAL_EXEC, lps_val, + rpwm_val); + /* when coex force to enter LPS, do not enter 32k low power. */ + low_pwr_disable = true; + btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, + &low_pwr_disable); + /* power save must executed before psTdma. */ + btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL); + break; + case BTC_PS_LPS_OFF: + btc8723b1ant_pstdmachkpwrsave(btcoexist, false); + btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL); + break; + default: + break; + } +} + +/*************************************************** + * + * Software Coex Mechanism start + * + ***************************************************/ +/* SCO only or SCO+PAN(HS) */ +static void halbtc8723b1ant_action_sco(struct btc_coexist *btcoexist) +{ + halbtc8723b1ant_sw_mechanism(btcoexist, true); +} + +static void halbtc8723b1ant_action_hid(struct btc_coexist *btcoexist) +{ + halbtc8723b1ant_sw_mechanism(btcoexist, true); +} + +/*A2DP only / PAN(EDR) only/ A2DP+PAN(HS) */ +static void halbtc8723b1ant_action_a2dp(struct btc_coexist *btcoexist) +{ + halbtc8723b1ant_sw_mechanism(btcoexist, false); +} + +static void halbtc8723b1ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist) +{ + halbtc8723b1ant_sw_mechanism(btcoexist, false); +} + +static void halbtc8723b1ant_action_pan_edr(struct btc_coexist *btcoexist) +{ + halbtc8723b1ant_sw_mechanism(btcoexist, false); +} + +/* PAN(HS) only */ +static void halbtc8723b1ant_action_pan_hs(struct btc_coexist *btcoexist) +{ + halbtc8723b1ant_sw_mechanism(btcoexist, false); +} + +/*PAN(EDR)+A2DP */ +static void halbtc8723b1ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist) +{ + halbtc8723b1ant_sw_mechanism(btcoexist, false); +} + +static void halbtc8723b1ant_action_pan_edr_hid(struct btc_coexist *btcoexist) +{ + halbtc8723b1ant_sw_mechanism(btcoexist, true); +} + +/* HID+A2DP+PAN(EDR) */ +static void btc8723b1ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist) +{ + halbtc8723b1ant_sw_mechanism(btcoexist, true); +} + +static void halbtc8723b1ant_action_hid_a2dp(struct btc_coexist *btcoexist) +{ + halbtc8723b1ant_sw_mechanism(btcoexist, true); +} + +/***************************************************** + * + * Non-Software Coex Mechanism start + * + *****************************************************/ +static void halbtc8723b1ant_action_wifi_multiport(struct btc_coexist *btcoexist) +{ + halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8); + halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2); +} + +static void halbtc8723b1ant_action_hs(struct btc_coexist *btcoexist) +{ + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5); + halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2); +} + +static void halbtc8723b1ant_action_bt_inquiry(struct btc_coexist *btcoexist) +{ + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + bool wifi_connected = false, ap_enable = false; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE, + &ap_enable); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, + &wifi_connected); + + if (!wifi_connected) { + halbtc8723b1ant_power_save_state(btcoexist, + BTC_PS_WIFI_NATIVE, 0x0, 0x0); + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8); + halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2); + } else if (bt_link_info->sco_exist || bt_link_info->hid_only) { + /* SCO/HID-only busy */ + halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 32); + halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + } else { + if (ap_enable) + halbtc8723b1ant_power_save_state(btcoexist, + BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + else + halbtc8723b1ant_power_save_state(btcoexist, + BTC_PS_LPS_ON, + 0x50, 0x4); + + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 30); + halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + } +} + +static void btc8723b1ant_act_bt_sco_hid_only_busy(struct btc_coexist *btcoexist, + u8 wifi_status) +{ + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + bool wifi_connected = false; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, + &wifi_connected); + + /* tdma and coex table */ + + if (bt_link_info->sco_exist) { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5); + halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2); + } else { /* HID */ + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 6); + halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 5); + } +} + +static void halbtc8723b1ant_action_wifi_connected_bt_acl_busy( + struct btc_coexist *btcoexist, + u8 wifi_status) +{ + u8 bt_rssi_state; + + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + + bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 28, 0); + + if (bt_link_info->hid_only) { /*HID */ + btc8723b1ant_act_bt_sco_hid_only_busy(btcoexist, wifi_status); + coex_dm->auto_tdma_adjust = false; + return; + } else if (bt_link_info->a2dp_only) { /*A2DP */ + if (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_IDLE == wifi_status) { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, + false, 8); + halbtc8723b1ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 2); + coex_dm->auto_tdma_adjust = false; + } else if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8723b1ant_tdma_dur_adj_for_acl(btcoexist, + wifi_status); + halbtc8723b1ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 1); + } else { /*for low BT RSSI */ + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + halbtc8723b1ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 1); + coex_dm->auto_tdma_adjust = false; + } + } else if (bt_link_info->hid_exist && + bt_link_info->a2dp_exist) { /*HID+A2DP */ + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 14); + coex_dm->auto_tdma_adjust = false; + } else { /*for low BT RSSI*/ + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 14); + coex_dm->auto_tdma_adjust = false; + } + + halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 6); + /*PAN(OPP,FTP), HID+PAN(OPP,FTP) */ + } else if (bt_link_info->pan_only || + (bt_link_info->hid_exist && bt_link_info->pan_exist)) { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3); + halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 6); + coex_dm->auto_tdma_adjust = false; + /*A2DP+PAN(OPP,FTP), HID+A2DP+PAN(OPP,FTP)*/ + } else if ((bt_link_info->a2dp_exist && bt_link_info->pan_exist) || + (bt_link_info->hid_exist && bt_link_info->a2dp_exist && + bt_link_info->pan_exist)) { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13); + halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + coex_dm->auto_tdma_adjust = false; + } else { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11); + halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + coex_dm->auto_tdma_adjust = false; + } +} + +static void btc8723b1ant_action_wifi_not_conn(struct btc_coexist *btcoexist) +{ + /* power save state */ + halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + + /* tdma and coex table */ + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8); + halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0); +} + +static void btc8723b1ant_action_wifi_not_conn_scan(struct btc_coexist *btcoex) +{ + struct btc_bt_link_info *bt_link_info = &btcoex->bt_link_info; + + halbtc8723b1ant_power_save_state(btcoex, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + + /* tdma and coex table */ + if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) { + if (bt_link_info->a2dp_exist && bt_link_info->pan_exist) { + halbtc8723b1ant_ps_tdma(btcoex, NORMAL_EXEC, + true, 22); + halbtc8723b1ant_coex_table_with_type(btcoex, + NORMAL_EXEC, 1); + } else if (bt_link_info->pan_only) { + halbtc8723b1ant_ps_tdma(btcoex, NORMAL_EXEC, + true, 20); + halbtc8723b1ant_coex_table_with_type(btcoex, + NORMAL_EXEC, 2); + } else { + halbtc8723b1ant_ps_tdma(btcoex, NORMAL_EXEC, + true, 20); + halbtc8723b1ant_coex_table_with_type(btcoex, + NORMAL_EXEC, 1); + } + } else if ((BT_8723B_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) || + (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY == + coex_dm->bt_status)){ + btc8723b1ant_act_bt_sco_hid_only_busy(btcoex, + BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SCAN); + } else { + halbtc8723b1ant_ps_tdma(btcoex, NORMAL_EXEC, false, 8); + halbtc8723b1ant_coex_table_with_type(btcoex, NORMAL_EXEC, 2); + } +} + +static void btc8723b1ant_act_wifi_not_conn_asso_auth(struct btc_coexist *btcoex) +{ + struct btc_bt_link_info *bt_link_info = &btcoex->bt_link_info; + + halbtc8723b1ant_power_save_state(btcoex, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + + if ((BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status) || + (bt_link_info->sco_exist) || (bt_link_info->hid_only) || + (bt_link_info->a2dp_only) || (bt_link_info->pan_only)) { + halbtc8723b1ant_ps_tdma(btcoex, NORMAL_EXEC, false, 8); + halbtc8723b1ant_coex_table_with_type(btcoex, NORMAL_EXEC, 7); + } else { + halbtc8723b1ant_ps_tdma(btcoex, NORMAL_EXEC, true, 20); + halbtc8723b1ant_coex_table_with_type(btcoex, NORMAL_EXEC, 1); + } +} + +static void btc8723b1ant_action_wifi_conn_scan(struct btc_coexist *btcoexist) +{ + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + + halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + + /* tdma and coex table */ + if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) { + if (bt_link_info->a2dp_exist && bt_link_info->pan_exist) { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 22); + halbtc8723b1ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 1); + } else if (bt_link_info->pan_only) { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 20); + halbtc8723b1ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 2); + } else { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 20); + halbtc8723b1ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 1); + } + } else if ((BT_8723B_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) || + (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY == + coex_dm->bt_status)) { + btc8723b1ant_act_bt_sco_hid_only_busy(btcoexist, + BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SCAN); + } else { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8); + halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2); + } +} + +static void halbtc8723b1ant_action_wifi_connected_special_packet( + struct btc_coexist *btcoexist) +{ + bool hs_connecting = false; + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_CONNECTING, &hs_connecting); + + halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + + /* tdma and coex table */ + if ((BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status) || + (bt_link_info->sco_exist) || (bt_link_info->hid_only) || + (bt_link_info->a2dp_only) || (bt_link_info->pan_only)) { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8); + halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7); + } else { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20); + halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + } +} + +static void halbtc8723b1ant_action_wifi_connected(struct btc_coexist *btcoexist) +{ + bool wifi_busy = false; + bool scan = false, link = false, roam = false; + bool under_4way = false, ap_enable = false; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], CoexForWifiConnect()===>\n"); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS, + &under_4way); + if (under_4way) { + halbtc8723b1ant_action_wifi_connected_special_packet(btcoexist); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n"); + return; + } + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); + + if (scan || link || roam) { + if (scan) + btc8723b1ant_action_wifi_conn_scan(btcoexist); + else + halbtc8723b1ant_action_wifi_connected_special_packet( + btcoexist); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n"); + return; + } + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE, + &ap_enable); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy); + /* power save state */ + if (!ap_enable && + BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status && + !btcoexist->bt_link_info.hid_only) { + if (!wifi_busy && btcoexist->bt_link_info.a2dp_only) + halbtc8723b1ant_power_save_state(btcoexist, + BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + else + halbtc8723b1ant_power_save_state(btcoexist, + BTC_PS_LPS_ON, + 0x50, 0x4); + } else { + halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + } + /* tdma and coex table */ + if (!wifi_busy) { + if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) { + halbtc8723b1ant_action_wifi_connected_bt_acl_busy(btcoexist, + BT_8723B_1ANT_WIFI_STATUS_CONNECTED_IDLE); + } else if ((BT_8723B_1ANT_BT_STATUS_SCO_BUSY == + coex_dm->bt_status) || + (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY == + coex_dm->bt_status)) { + btc8723b1ant_act_bt_sco_hid_only_busy(btcoexist, + BT_8723B_1ANT_WIFI_STATUS_CONNECTED_IDLE); + } else { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, + false, 8); + halbtc8723b1ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 2); + } + } else { + if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) { + halbtc8723b1ant_action_wifi_connected_bt_acl_busy(btcoexist, + BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY); + } else if ((BT_8723B_1ANT_BT_STATUS_SCO_BUSY == + coex_dm->bt_status) || + (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY == + coex_dm->bt_status)) { + btc8723b1ant_act_bt_sco_hid_only_busy(btcoexist, + BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY); + } else { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, + false, 8); + halbtc8723b1ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 2); + } + } +} + +static void btc8723b1ant_run_sw_coex_mech(struct btc_coexist *btcoexist) +{ + u8 algorithm = 0; + + algorithm = halbtc8723b1ant_action_algorithm(btcoexist); + coex_dm->cur_algorithm = algorithm; + + if (!halbtc8723b1ant_is_common_action(btcoexist)) { + switch (coex_dm->cur_algorithm) { + case BT_8723B_1ANT_COEX_ALGO_SCO: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action algorithm = SCO.\n"); + halbtc8723b1ant_action_sco(btcoexist); + break; + case BT_8723B_1ANT_COEX_ALGO_HID: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action algorithm = HID.\n"); + halbtc8723b1ant_action_hid(btcoexist); + break; + case BT_8723B_1ANT_COEX_ALGO_A2DP: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action algorithm = A2DP.\n"); + halbtc8723b1ant_action_a2dp(btcoexist); + break; + case BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action algorithm = A2DP+PAN(HS).\n"); + halbtc8723b1ant_action_a2dp_pan_hs(btcoexist); + break; + case BT_8723B_1ANT_COEX_ALGO_PANEDR: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action algorithm = PAN(EDR).\n"); + halbtc8723b1ant_action_pan_edr(btcoexist); + break; + case BT_8723B_1ANT_COEX_ALGO_PANHS: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action algorithm = HS mode.\n"); + halbtc8723b1ant_action_pan_hs(btcoexist); + break; + case BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action algorithm = PAN+A2DP.\n"); + halbtc8723b1ant_action_pan_edr_a2dp(btcoexist); + break; + case BT_8723B_1ANT_COEX_ALGO_PANEDR_HID: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action algorithm = PAN(EDR)+HID.\n"); + halbtc8723b1ant_action_pan_edr_hid(btcoexist); + break; + case BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action algorithm = HID+A2DP+PAN.\n"); + btc8723b1ant_action_hid_a2dp_pan_edr(btcoexist); + break; + case BT_8723B_1ANT_COEX_ALGO_HID_A2DP: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action algorithm = HID+A2DP.\n"); + halbtc8723b1ant_action_hid_a2dp(btcoexist); + break; + default: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action algorithm = coexist All Off!!\n"); + break; + } + coex_dm->pre_algorithm = coex_dm->cur_algorithm; + } +} + +static void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist) +{ + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + bool wifi_connected = false, bt_hs_on = false; + bool increase_scan_dev_num = false; + bool bt_ctrl_agg_buf_size = false; + u8 agg_buf_size = 5; + u8 wifi_rssi_state = BTC_RSSI_STATE_HIGH; + u32 wifi_link_status = 0; + u32 num_of_wifi_link = 0; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], RunCoexistMechanism()===>\n"); + + if (btcoexist->manual_control) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n"); + return; + } + + if (btcoexist->stop_coex_dm) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n"); + return; + } + + if (coex_sta->under_ips) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], wifi is under IPS !!!\n"); + return; + } + + if ((BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) || + (BT_8723B_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) || + (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status)) { + increase_scan_dev_num = true; + } + + btcoexist->btc_set(btcoexist, BTC_SET_BL_INC_SCAN_DEV_NUM, + &increase_scan_dev_num); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, + &wifi_connected); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS, + &wifi_link_status); + num_of_wifi_link = wifi_link_status >> 16; + if (num_of_wifi_link >= 2) { + halbtc8723b1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0); + halbtc8723b1ant_limited_rx(btcoexist, NORMAL_EXEC, false, + bt_ctrl_agg_buf_size, + agg_buf_size); + halbtc8723b1ant_action_wifi_multiport(btcoexist); + return; + } + + if (!bt_link_info->sco_exist && !bt_link_info->hid_exist) { + halbtc8723b1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0); + } else { + if (wifi_connected) { + wifi_rssi_state = + halbtc8723b1ant_wifi_rssi_state(btcoexist, + 1, 2, 30, 0); + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b1ant_limited_tx(btcoexist, + NORMAL_EXEC, + 1, 1, 1, 1); + } else { + halbtc8723b1ant_limited_tx(btcoexist, + NORMAL_EXEC, + 1, 1, 1, 1); + } + } else { + halbtc8723b1ant_limited_tx(btcoexist, NORMAL_EXEC, + 0, 0, 0, 0); + } + } + + if (bt_link_info->sco_exist) { + bt_ctrl_agg_buf_size = true; + agg_buf_size = 0x3; + } else if (bt_link_info->hid_exist) { + bt_ctrl_agg_buf_size = true; + agg_buf_size = 0x5; + } else if (bt_link_info->a2dp_exist || bt_link_info->pan_exist) { + bt_ctrl_agg_buf_size = true; + agg_buf_size = 0x8; + } + halbtc8723b1ant_limited_rx(btcoexist, NORMAL_EXEC, false, + bt_ctrl_agg_buf_size, agg_buf_size); + + btc8723b1ant_run_sw_coex_mech(btcoexist); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); + + if (coex_sta->c2h_bt_inquiry_page) { + halbtc8723b1ant_action_bt_inquiry(btcoexist); + return; + } else if (bt_hs_on) { + halbtc8723b1ant_action_hs(btcoexist); + return; + } + + if (!wifi_connected) { + bool scan = false, link = false, roam = false; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], wifi is non connected-idle !!!\n"); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); + + if (scan || link || roam) { + if (scan) + btc8723b1ant_action_wifi_not_conn_scan( + btcoexist); + else + btc8723b1ant_act_wifi_not_conn_asso_auth( + btcoexist); + } else { + btc8723b1ant_action_wifi_not_conn(btcoexist); + } + } else { /* wifi LPS/Busy */ + halbtc8723b1ant_action_wifi_connected(btcoexist); + } +} + +static void halbtc8723b1ant_init_coex_dm(struct btc_coexist *btcoexist) +{ + /* sw all off */ + halbtc8723b1ant_sw_mechanism(btcoexist, false); + + halbtc8723b1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 8); + halbtc8723b1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0); +} + +static void halbtc8723b1ant_init_hw_config(struct btc_coexist *btcoexist, + bool backup) +{ + u32 u32tmp = 0; + u8 u8tmp = 0; + u32 cnt_bt_cal_chk = 0; + + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], 1Ant Init HW Config!!\n"); + + if (backup) {/* backup rf 0x1e value */ + coex_dm->backup_arfr_cnt1 = + btcoexist->btc_read_4byte(btcoexist, 0x430); + coex_dm->backup_arfr_cnt2 = + btcoexist->btc_read_4byte(btcoexist, 0x434); + coex_dm->backup_retry_limit = + btcoexist->btc_read_2byte(btcoexist, 0x42a); + coex_dm->backup_ampdu_max_time = + btcoexist->btc_read_1byte(btcoexist, 0x456); + } + + /* WiFi goto standby while GNT_BT 0-->1 */ + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x780); + /* BT goto standby while GNT_BT 1-->0 */ + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x2, 0xfffff, 0x500); + + btcoexist->btc_write_1byte(btcoexist, 0x974, 0xff); + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x944, 0x3, 0x3); + btcoexist->btc_write_1byte(btcoexist, 0x930, 0x77); + + /* BT calibration check */ + while (cnt_bt_cal_chk <= 20) { + u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x49d); + cnt_bt_cal_chk++; + if (u32tmp & BIT0) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], ########### BT calibration(cnt=%d) ###########\n", + cnt_bt_cal_chk); + mdelay(50); + } else { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], ********** BT NOT calibration (cnt=%d)**********\n", + cnt_bt_cal_chk); + break; + } + } + + /* 0x790[5:0] = 0x5 */ + u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x790); + u8tmp &= 0xc0; + u8tmp |= 0x5; + btcoexist->btc_write_1byte(btcoexist, 0x790, u8tmp); + + /* Enable counter statistics */ + /*0x76e[3] =1, WLAN_Act control by PTA */ + btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc); + btcoexist->btc_write_1byte(btcoexist, 0x778, 0x1); + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x40, 0x20, 0x1); + + /*Antenna config */ + halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_PTA, true, false); + /* PTA parameter */ + halbtc8723b1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0); +} + +static void halbtc8723b1ant_wifi_off_hw_cfg(struct btc_coexist *btcoexist) +{ + /* set wlan_act to low */ + btcoexist->btc_write_1byte(btcoexist, 0x76e, 0x4); +} + +/************************************************************** + * work around function start with wa_halbtc8723b1ant_ + **************************************************************/ +/************************************************************** + * extern function start with EXhalbtc8723b1ant_ + **************************************************************/ + +void ex_halbtc8723b1ant_init_hwconfig(struct btc_coexist *btcoexist) +{ + halbtc8723b1ant_init_hw_config(btcoexist, true); +} + +void ex_halbtc8723b1ant_init_coex_dm(struct btc_coexist *btcoexist) +{ + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], Coex Mechanism Init!!\n"); + + btcoexist->stop_coex_dm = false; + + halbtc8723b1ant_init_coex_dm(btcoexist); + + halbtc8723b1ant_query_bt_info(btcoexist); +} + +void ex_halbtc8723b1ant_display_coex_info(struct btc_coexist *btcoexist) +{ + struct btc_board_info *board_info = &btcoexist->board_info; + struct btc_stack_info *stack_info = &btcoexist->stack_info; + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + struct rtl_priv *rtlpriv = btcoexist->adapter; + u8 u8tmp[4], i, bt_info_ext, pstdmacase = 0; + u16 u16tmp[4]; + u32 u32tmp[4]; + bool roam = false, scan = false; + bool link = false, wifi_under_5g = false; + bool bt_hs_on = false, wifi_busy = false; + s32 wifi_rssi = 0, bt_hs_rssi = 0; + u32 wifi_bw, wifi_traffic_dir, fa_ofdm, fa_cck, wifi_link_status; + u8 wifi_dot11_chnl, wifi_hs_chnl; + u32 fw_ver = 0, bt_patch_ver = 0; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n ============[BT Coexist info]============"); + + if (btcoexist->manual_control) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n ============[Under Manual Control]=========="); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n =========================================="); + } + if (btcoexist->stop_coex_dm) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n ============[Coex is STOPPED]============"); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n =========================================="); + } + + if (!board_info->bt_exist) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n BT not exists !!!"); + return; + } + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d", + "Ant PG Num/ Ant Mech/ Ant Pos:", + board_info->pg_ant_num, board_info->btdm_ant_num, + board_info->btdm_ant_pos); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %d", + "BT stack/ hci ext ver", + ((stack_info->profile_notified) ? "Yes" : "No"), + stack_info->hci_version); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver); + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)", + "CoexVer/ FwVer/ PatchVer", + glcoex_ver_date_8723b_1ant, glcoex_ver_8723b_1ant, + fw_ver, bt_patch_ver, bt_patch_ver); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); + btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL, + &wifi_dot11_chnl); + btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d(%d)", + "Dot11 channel / HsChnl(HsMode)", + wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %02x %02x %02x ", + "H2C Wifi inform bt chnl Info", + coex_dm->wifi_chnl_info[0], coex_dm->wifi_chnl_info[1], + coex_dm->wifi_chnl_info[2]); + + btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi); + btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d", + "Wifi rssi/ HS rssi", wifi_rssi, bt_hs_rssi); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ", + "Wifi link/ roam/ scan", link, roam, scan); + + btcoexist->btc_get(btcoexist , BTC_GET_BL_WIFI_UNDER_5G, + &wifi_under_5g); + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy); + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, + &wifi_traffic_dir); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %s/ %s ", + "Wifi status", (wifi_under_5g ? "5G" : "2.4G"), + ((BTC_WIFI_BW_LEGACY == wifi_bw) ? "Legacy" : + (((BTC_WIFI_BW_HT40 == wifi_bw) ? "HT40" : "HT20"))), + ((!wifi_busy) ? "idle" : + ((BTC_WIFI_TRAFFIC_TX == wifi_traffic_dir) ? + "uplink" : "downlink"))); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS, + &wifi_link_status); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d/ %d/ %d", + "sta/vwifi/hs/p2pGo/p2pGc", + ((wifi_link_status & WIFI_STA_CONNECTED) ? 1 : 0), + ((wifi_link_status & WIFI_AP_CONNECTED) ? 1 : 0), + ((wifi_link_status & WIFI_HS_CONNECTED) ? 1 : 0), + ((wifi_link_status & WIFI_P2P_GO_CONNECTED) ? 1 : 0), + ((wifi_link_status & WIFI_P2P_GC_CONNECTED) ? 1 : 0)); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = [%s/ %d/ %d] ", + "BT [status/ rssi/ retryCnt]", + ((btcoexist->bt_info.bt_disabled) ? ("disabled") : + ((coex_sta->c2h_bt_inquiry_page) ? ("inquiry/page scan") : + ((BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE == + coex_dm->bt_status) ? + "non-connected idle" : + ((BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE == + coex_dm->bt_status) ? + "connected-idle" : "busy")))), + coex_sta->bt_rssi, coex_sta->bt_retry_cnt); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = %d / %d / %d / %d", + "SCO/HID/PAN/A2DP", bt_link_info->sco_exist, + bt_link_info->hid_exist, bt_link_info->pan_exist, + bt_link_info->a2dp_exist); + btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO); + + bt_info_ext = coex_sta->bt_info_ext; + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s", + "BT Info A2DP rate", + (bt_info_ext & BIT0) ? "Basic rate" : "EDR rate"); + + for (i = 0; i < BT_INFO_SRC_8723B_1ANT_MAX; i++) { + if (coex_sta->bt_info_c2h_cnt[i]) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = %02x %02x %02x %02x %02x %02x %02x(%d)", + GLBtInfoSrc8723b1Ant[i], + coex_sta->bt_info_c2h[i][0], + coex_sta->bt_info_c2h[i][1], + coex_sta->bt_info_c2h[i][2], + coex_sta->bt_info_c2h[i][3], + coex_sta->bt_info_c2h[i][4], + coex_sta->bt_info_c2h[i][5], + coex_sta->bt_info_c2h[i][6], + coex_sta->bt_info_c2h_cnt[i]); + } + } + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = %s/%s, (0x%x/0x%x)", + "PS state, IPS/LPS, (lps/rpwm)", + ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")), + ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")), + btcoexist->bt_info.lps_val, + btcoexist->bt_info.rpwm_val); + btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD); + + if (!btcoexist->manual_control) { + /* Sw mechanism */ + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s", + "============[Sw mechanism]============"); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/", + "SM[LowPenaltyRA]", coex_dm->cur_low_penalty_ra); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s/ %s/ %d ", + "DelBA/ BtCtrlAgg/ AggSize", + (btcoexist->bt_info.reject_agg_pkt ? "Yes" : "No"), + (btcoexist->bt_info.bt_ctrl_buf_size ? "Yes" : "No"), + btcoexist->bt_info.agg_buf_size); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x ", + "Rate Mask", btcoexist->bt_info.ra_mask); + + /* Fw mechanism */ + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s", + "============[Fw mechanism]============"); + + pstdmacase = coex_dm->cur_ps_tdma; + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = %02x %02x %02x %02x %02x case-%d (auto:%d)", + "PS TDMA", coex_dm->ps_tdma_para[0], + coex_dm->ps_tdma_para[1], coex_dm->ps_tdma_para[2], + coex_dm->ps_tdma_para[3], coex_dm->ps_tdma_para[4], + pstdmacase, coex_dm->auto_tdma_adjust); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d ", + "IgnWlanAct", coex_dm->cur_ignore_wlan_act); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x ", + "Latest error condition(should be 0)", + coex_dm->error_condition); + } + + /* Hw setting */ + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s", + "============[Hw setting]============"); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x", + "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1, + coex_dm->backup_arfr_cnt2, coex_dm->backup_retry_limit, + coex_dm->backup_ampdu_max_time); + + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x430); + u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x434); + u16tmp[0] = btcoexist->btc_read_2byte(btcoexist, 0x42a); + u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x456); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x", + "0x430/0x434/0x42a/0x456", + u32tmp[0], u32tmp[1], u16tmp[0], u8tmp[0]); + + u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778); + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6cc); + u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x880); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", + "0x778/0x6cc/0x880[29:25]", u8tmp[0], u32tmp[0], + (u32tmp[1] & 0x3e000000) >> 25); + + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x948); + u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x67); + u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x765); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", + "0x948/ 0x67[5] / 0x765", + u32tmp[0], ((u8tmp[0] & 0x20) >> 5), u8tmp[1]); + + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x92c); + u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x930); + u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x944); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", + "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]", + u32tmp[0] & 0x3, u32tmp[1] & 0xff, u32tmp[2] & 0x3); + + u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x39); + u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x40); + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c); + u8tmp[2] = btcoexist->btc_read_1byte(btcoexist, 0x64); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", + "0x38[11]/0x40/0x4c[24:23]/0x64[0]", + ((u8tmp[0] & 0x8)>>3), u8tmp[1], + ((u32tmp[0] & 0x01800000) >> 23), u8tmp[2] & 0x1); + + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550); + u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x", + "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]); + + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50); + u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x49c); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x", + "0xc50(dig)/0x49c(null-drop)", u32tmp[0] & 0xff, u8tmp[0]); + + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xda0); + u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xda4); + u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0xda8); + u32tmp[3] = btcoexist->btc_read_4byte(btcoexist, 0xcf0); + + u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0xa5b); + u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0xa5c); + + fa_ofdm = ((u32tmp[0] & 0xffff0000) >> 16) + + ((u32tmp[1] & 0xffff0000) >> 16) + + (u32tmp[1] & 0xffff) + + (u32tmp[2] & 0xffff) + + ((u32tmp[3] & 0xffff0000) >> 16) + + (u32tmp[3] & 0xffff); + fa_cck = (u8tmp[0] << 8) + u8tmp[1]; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", + "OFDM-CCA/OFDM-FA/CCK-FA", + u32tmp[0] & 0xffff, fa_ofdm, fa_cck); + + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0); + u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4); + u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", + "0x6c0/0x6c4/0x6c8(coexTable)", + u32tmp[0], u32tmp[1], u32tmp[2]); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d", + "0x770(high-pri rx/tx)", coex_sta->high_priority_rx, + coex_sta->high_priority_tx); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d", + "0x774(low-pri rx/tx)", coex_sta->low_priority_rx, + coex_sta->low_priority_tx); +#if (BT_AUTO_REPORT_ONLY_8723B_1ANT == 1) + halbtc8723b1ant_monitor_bt_ctr(btcoexist); +#endif + btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS); +} + +void ex_halbtc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type) +{ + if (btcoexist->manual_control || btcoexist->stop_coex_dm) + return; + + if (BTC_IPS_ENTER == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], IPS ENTER notify\n"); + coex_sta->under_ips = true; + + halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT, + false, true); + /* set PTA control */ + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0); + halbtc8723b1ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 0); + halbtc8723b1ant_wifi_off_hw_cfg(btcoexist); + } else if (BTC_IPS_LEAVE == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], IPS LEAVE notify\n"); + coex_sta->under_ips = false; + + halbtc8723b1ant_init_hw_config(btcoexist, false); + halbtc8723b1ant_init_coex_dm(btcoexist); + halbtc8723b1ant_query_bt_info(btcoexist); + } +} + +void ex_halbtc8723b1ant_lps_notify(struct btc_coexist *btcoexist, u8 type) +{ + if (btcoexist->manual_control || btcoexist->stop_coex_dm) + return; + + if (BTC_LPS_ENABLE == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], LPS ENABLE notify\n"); + coex_sta->under_lps = true; + } else if (BTC_LPS_DISABLE == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], LPS DISABLE notify\n"); + coex_sta->under_lps = false; + } +} + +void ex_halbtc8723b1ant_scan_notify(struct btc_coexist *btcoexist, u8 type) +{ + bool wifi_connected = false, bt_hs_on = false; + u32 wifi_link_status = 0; + u32 num_of_wifi_link = 0; + bool bt_ctrl_agg_buf_size = false; + u8 agg_buf_size = 5; + + if (btcoexist->manual_control || btcoexist->stop_coex_dm || + btcoexist->bt_info.bt_disabled) + return; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, + &wifi_connected); + + halbtc8723b1ant_query_bt_info(btcoexist); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS, + &wifi_link_status); + num_of_wifi_link = wifi_link_status >> 16; + if (num_of_wifi_link >= 2) { + halbtc8723b1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0); + halbtc8723b1ant_limited_rx(btcoexist, NORMAL_EXEC, false, + bt_ctrl_agg_buf_size, agg_buf_size); + halbtc8723b1ant_action_wifi_multiport(btcoexist); + return; + } + + if (coex_sta->c2h_bt_inquiry_page) { + halbtc8723b1ant_action_bt_inquiry(btcoexist); + return; + } else if (bt_hs_on) { + halbtc8723b1ant_action_hs(btcoexist); + return; + } + + if (BTC_SCAN_START == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], SCAN START notify\n"); + if (!wifi_connected) /* non-connected scan */ + btc8723b1ant_action_wifi_not_conn_scan(btcoexist); + else /* wifi is connected */ + btc8723b1ant_action_wifi_conn_scan(btcoexist); + } else if (BTC_SCAN_FINISH == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], SCAN FINISH notify\n"); + if (!wifi_connected) /* non-connected scan */ + btc8723b1ant_action_wifi_not_conn(btcoexist); + else + halbtc8723b1ant_action_wifi_connected(btcoexist); + } +} + +void ex_halbtc8723b1ant_connect_notify(struct btc_coexist *btcoexist, u8 type) +{ + bool wifi_connected = false, bt_hs_on = false; + u32 wifi_link_status = 0; + u32 num_of_wifi_link = 0; + bool bt_ctrl_agg_buf_size = false; + u8 agg_buf_size = 5; + + if (btcoexist->manual_control || btcoexist->stop_coex_dm || + btcoexist->bt_info.bt_disabled) + return; + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS, + &wifi_link_status); + num_of_wifi_link = wifi_link_status>>16; + if (num_of_wifi_link >= 2) { + halbtc8723b1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0); + halbtc8723b1ant_limited_rx(btcoexist, NORMAL_EXEC, false, + bt_ctrl_agg_buf_size, agg_buf_size); + halbtc8723b1ant_action_wifi_multiport(btcoexist); + return; + } + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); + if (coex_sta->c2h_bt_inquiry_page) { + halbtc8723b1ant_action_bt_inquiry(btcoexist); + return; + } else if (bt_hs_on) { + halbtc8723b1ant_action_hs(btcoexist); + return; + } + + if (BTC_ASSOCIATE_START == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], CONNECT START notify\n"); + btc8723b1ant_act_wifi_not_conn_asso_auth(btcoexist); + } else if (BTC_ASSOCIATE_FINISH == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], CONNECT FINISH notify\n"); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, + &wifi_connected); + if (!wifi_connected) /* non-connected scan */ + btc8723b1ant_action_wifi_not_conn(btcoexist); + else + halbtc8723b1ant_action_wifi_connected(btcoexist); + } +} + +void ex_halbtc8723b1ant_media_status_notify(struct btc_coexist *btcoexist, + u8 type) +{ + u8 h2c_parameter[3] = {0}; + u32 wifi_bw; + u8 wifiCentralChnl; + + if (btcoexist->manual_control || btcoexist->stop_coex_dm || + btcoexist->bt_info.bt_disabled) + return; + + if (BTC_MEDIA_CONNECT == type) + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], MEDIA connect notify\n"); + else + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], MEDIA disconnect notify\n"); + + /* only 2.4G we need to inform bt the chnl mask */ + btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL, + &wifiCentralChnl); + + if ((BTC_MEDIA_CONNECT == type) && + (wifiCentralChnl <= 14)) { + h2c_parameter[0] = 0x0; + h2c_parameter[1] = wifiCentralChnl; + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + if (BTC_WIFI_BW_HT40 == wifi_bw) + h2c_parameter[2] = 0x30; + else + h2c_parameter[2] = 0x20; + } + + coex_dm->wifi_chnl_info[0] = h2c_parameter[0]; + coex_dm->wifi_chnl_info[1] = h2c_parameter[1]; + coex_dm->wifi_chnl_info[2] = h2c_parameter[2]; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], FW write 0x66 = 0x%x\n", + h2c_parameter[0] << 16 | h2c_parameter[1] << 8 | + h2c_parameter[2]); + + btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter); +} + +void ex_halbtc8723b1ant_special_packet_notify(struct btc_coexist *btcoexist, + u8 type) +{ + bool bt_hs_on = false; + u32 wifi_link_status = 0; + u32 num_of_wifi_link = 0; + bool bt_ctrl_agg_buf_size = false; + u8 agg_buf_size = 5; + + if (btcoexist->manual_control || btcoexist->stop_coex_dm || + btcoexist->bt_info.bt_disabled) + return; + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS, + &wifi_link_status); + num_of_wifi_link = wifi_link_status >> 16; + if (num_of_wifi_link >= 2) { + halbtc8723b1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0); + halbtc8723b1ant_limited_rx(btcoexist, NORMAL_EXEC, false, + bt_ctrl_agg_buf_size, agg_buf_size); + halbtc8723b1ant_action_wifi_multiport(btcoexist); + return; + } + + coex_sta->special_pkt_period_cnt = 0; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); + if (coex_sta->c2h_bt_inquiry_page) { + halbtc8723b1ant_action_bt_inquiry(btcoexist); + return; + } else if (bt_hs_on) { + halbtc8723b1ant_action_hs(btcoexist); + return; + } + + if (BTC_PACKET_DHCP == type || + BTC_PACKET_EAPOL == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], special Packet(%d) notify\n", type); + halbtc8723b1ant_action_wifi_connected_special_packet(btcoexist); + } +} + +void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist, + u8 *tmp_buf, u8 length) +{ + u8 bt_info = 0; + u8 i, rsp_source = 0; + bool wifi_connected = false; + bool bt_busy = false; + + coex_sta->c2h_bt_info_req_sent = false; + + rsp_source = tmp_buf[0] & 0xf; + if (rsp_source >= BT_INFO_SRC_8723B_1ANT_MAX) + rsp_source = BT_INFO_SRC_8723B_1ANT_WIFI_FW; + coex_sta->bt_info_c2h_cnt[rsp_source]++; + + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], Bt info[%d], length=%d, hex data = [", + rsp_source, length); + for (i = 0; i < length; i++) { + coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i]; + if (i == 1) + bt_info = tmp_buf[i]; + if (i == length - 1) + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "0x%02x]\n", tmp_buf[i]); + else + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "0x%02x, ", tmp_buf[i]); + } + + if (BT_INFO_SRC_8723B_1ANT_WIFI_FW != rsp_source) { + coex_sta->bt_retry_cnt = /* [3:0] */ + coex_sta->bt_info_c2h[rsp_source][2] & 0xf; + + coex_sta->bt_rssi = + coex_sta->bt_info_c2h[rsp_source][3] * 2 + 10; + + coex_sta->bt_info_ext = + coex_sta->bt_info_c2h[rsp_source][4]; + + /* Here we need to resend some wifi info to BT + * because bt is reset and loss of the info. + */ + if (coex_sta->bt_info_ext & BIT1) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n"); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, + &wifi_connected); + if (wifi_connected) + ex_halbtc8723b1ant_media_status_notify(btcoexist, + BTC_MEDIA_CONNECT); + else + ex_halbtc8723b1ant_media_status_notify(btcoexist, + BTC_MEDIA_DISCONNECT); + } + + if (coex_sta->bt_info_ext & BIT3) { + if (!btcoexist->manual_control && + !btcoexist->stop_coex_dm) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT ext info bit3 check, set BT NOT ignore Wlan active!!\n"); + halbtc8723b1ant_ignore_wlan_act(btcoexist, + FORCE_EXEC, + false); + } + } else { + /* BT already NOT ignore Wlan active, do nothing here.*/ + } +#if (BT_AUTO_REPORT_ONLY_8723B_1ANT == 0) + if (coex_sta->bt_info_ext & BIT4) { + /* BT auto report already enabled, do nothing */ + } else { + halbtc8723b1ant_bt_auto_report(btcoexist, FORCE_EXEC, + true); + } +#endif + } + + /* check BIT2 first ==> check if bt is under inquiry or page scan */ + if (bt_info & BT_INFO_8723B_1ANT_B_INQ_PAGE) + coex_sta->c2h_bt_inquiry_page = true; + else + coex_sta->c2h_bt_inquiry_page = false; + + /* set link exist status */ + if (!(bt_info & BT_INFO_8723B_1ANT_B_CONNECTION)) { + coex_sta->bt_link_exist = false; + coex_sta->pan_exist = false; + coex_sta->a2dp_exist = false; + coex_sta->hid_exist = false; + coex_sta->sco_exist = false; + } else { /* connection exists */ + coex_sta->bt_link_exist = true; + if (bt_info & BT_INFO_8723B_1ANT_B_FTP) + coex_sta->pan_exist = true; + else + coex_sta->pan_exist = false; + if (bt_info & BT_INFO_8723B_1ANT_B_A2DP) + coex_sta->a2dp_exist = true; + else + coex_sta->a2dp_exist = false; + if (bt_info & BT_INFO_8723B_1ANT_B_HID) + coex_sta->hid_exist = true; + else + coex_sta->hid_exist = false; + if (bt_info & BT_INFO_8723B_1ANT_B_SCO_ESCO) + coex_sta->sco_exist = true; + else + coex_sta->sco_exist = false; + } + + halbtc8723b1ant_update_bt_link_info(btcoexist); + + if (!(bt_info&BT_INFO_8723B_1ANT_B_CONNECTION)) { + coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BtInfoNotify(), BT Non-Connected idle!\n"); + /* connection exists but no busy */ + } else if (bt_info == BT_INFO_8723B_1ANT_B_CONNECTION) { + coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n"); + } else if ((bt_info & BT_INFO_8723B_1ANT_B_SCO_ESCO) || + (bt_info & BT_INFO_8723B_1ANT_B_SCO_BUSY)) { + coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_SCO_BUSY; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n"); + } else if (bt_info & BT_INFO_8723B_1ANT_B_ACL_BUSY) { + if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY != coex_dm->bt_status) + coex_dm->auto_tdma_adjust = false; + + coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_ACL_BUSY; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n"); + } else { + coex_dm->bt_status = + BT_8723B_1ANT_BT_STATUS_MAX; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BtInfoNotify(), BT Non-Defined state!!\n"); + } + + if ((BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) || + (BT_8723B_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) || + (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status)) + bt_busy = true; + else + bt_busy = false; + btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bt_busy); + + halbtc8723b1ant_run_coexist_mechanism(btcoexist); +} + +void ex_halbtc8723b1ant_halt_notify(struct btc_coexist *btcoexist) +{ + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Halt notify\n"); + + btcoexist->stop_coex_dm = true; + + halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT, false, true); + + halbtc8723b1ant_wifi_off_hw_cfg(btcoexist); + halbtc8723b1ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true); + + halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + halbtc8723b1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 0); + + ex_halbtc8723b1ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT); +} + +void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state) +{ + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Pnp notify\n"); + + if (BTC_WIFI_PNP_SLEEP == pnp_state) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], Pnp notify to SLEEP\n"); + btcoexist->stop_coex_dm = true; + halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT, false, + true); + halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0); + halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2); + halbtc8723b1ant_wifi_off_hw_cfg(btcoexist); + } else if (BTC_WIFI_PNP_WAKE_UP == pnp_state) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], Pnp notify to WAKE UP\n"); + btcoexist->stop_coex_dm = false; + halbtc8723b1ant_init_hw_config(btcoexist, false); + halbtc8723b1ant_init_coex_dm(btcoexist); + halbtc8723b1ant_query_bt_info(btcoexist); + } +} + +void ex_halbtc8723b1ant_coex_dm_reset(struct btc_coexist *btcoexist) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], *****************Coex DM Reset****************\n"); + + halbtc8723b1ant_init_hw_config(btcoexist, false); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x2, 0xfffff, 0x0); + halbtc8723b1ant_init_coex_dm(btcoexist); +} + +void ex_halbtc8723b1ant_periodical(struct btc_coexist *btcoexist) +{ + struct btc_board_info *board_info = &btcoexist->board_info; + struct btc_stack_info *stack_info = &btcoexist->stack_info; + static u8 dis_ver_info_cnt; + u32 fw_ver = 0, bt_patch_ver = 0; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], ==========================Periodical===========================\n"); + + if (dis_ver_info_cnt <= 5) { + dis_ver_info_cnt += 1; + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], ****************************************************************\n"); + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n", + board_info->pg_ant_num, board_info->btdm_ant_num, + board_info->btdm_ant_pos); + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], BT stack/ hci ext ver = %s / %d\n", + ((stack_info->profile_notified) ? "Yes" : "No"), + stack_info->hci_version); + btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, + &bt_patch_ver); + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n", + glcoex_ver_date_8723b_1ant, + glcoex_ver_8723b_1ant, fw_ver, + bt_patch_ver, bt_patch_ver); + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], ****************************************************************\n"); + } + +#if (BT_AUTO_REPORT_ONLY_8723B_1ANT == 0) + halbtc8723b1ant_query_bt_info(btcoexist); + halbtc8723b1ant_monitor_bt_ctr(btcoexist); + halbtc8723b1ant_monitor_bt_enable_disable(btcoexist); +#else + if (btc8723b1ant_is_wifi_status_changed(btcoexist) || + coex_dm->auto_tdma_adjust) { + halbtc8723b1ant_run_coexist_mechanism(btcoexist); + } + + coex_sta->special_pkt_period_cnt++; +#endif +} diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b1ant.h b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b1ant.h new file mode 100644 index 0000000000000000000000000000000000000000..75f8094b7a34732ad7079e0718a35ee23efbe2fe --- /dev/null +++ b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b1ant.h @@ -0,0 +1,184 @@ +/****************************************************************************** + * + * Copyright(c) 2012 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ +/********************************************************************** + * The following is for 8723B 1ANT BT Co-exist definition + **********************************************************************/ +#define BT_AUTO_REPORT_ONLY_8723B_1ANT 1 + +#define BT_INFO_8723B_1ANT_B_FTP BIT7 +#define BT_INFO_8723B_1ANT_B_A2DP BIT6 +#define BT_INFO_8723B_1ANT_B_HID BIT5 +#define BT_INFO_8723B_1ANT_B_SCO_BUSY BIT4 +#define BT_INFO_8723B_1ANT_B_ACL_BUSY BIT3 +#define BT_INFO_8723B_1ANT_B_INQ_PAGE BIT2 +#define BT_INFO_8723B_1ANT_B_SCO_ESCO BIT1 +#define BT_INFO_8723B_1ANT_B_CONNECTION BIT0 + +#define BT_INFO_8723B_1ANT_A2DP_BASIC_RATE(_BT_INFO_EXT_) \ + (((_BT_INFO_EXT_&BIT0)) ? true : false) + +#define BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT 2 + +enum _BT_INFO_SRC_8723B_1ANT { + BT_INFO_SRC_8723B_1ANT_WIFI_FW = 0x0, + BT_INFO_SRC_8723B_1ANT_BT_RSP = 0x1, + BT_INFO_SRC_8723B_1ANT_BT_ACTIVE_SEND = 0x2, + BT_INFO_SRC_8723B_1ANT_MAX +}; + +enum _BT_8723B_1ANT_BT_STATUS { + BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE = 0x0, + BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE = 0x1, + BT_8723B_1ANT_BT_STATUS_INQ_PAGE = 0x2, + BT_8723B_1ANT_BT_STATUS_ACL_BUSY = 0x3, + BT_8723B_1ANT_BT_STATUS_SCO_BUSY = 0x4, + BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY = 0x5, + BT_8723B_1ANT_BT_STATUS_MAX +}; + +enum _BT_8723B_1ANT_WIFI_STATUS { + BT_8723B_1ANT_WIFI_STATUS_NON_CONNECTED_IDLE = 0x0, + BT_8723B_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN = 0x1, + BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SCAN = 0x2, + BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SPECIAL_PKT = 0x3, + BT_8723B_1ANT_WIFI_STATUS_CONNECTED_IDLE = 0x4, + BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY = 0x5, + BT_8723B_1ANT_WIFI_STATUS_MAX +}; + +enum _BT_8723B_1ANT_COEX_ALGO { + BT_8723B_1ANT_COEX_ALGO_UNDEFINED = 0x0, + BT_8723B_1ANT_COEX_ALGO_SCO = 0x1, + BT_8723B_1ANT_COEX_ALGO_HID = 0x2, + BT_8723B_1ANT_COEX_ALGO_A2DP = 0x3, + BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS = 0x4, + BT_8723B_1ANT_COEX_ALGO_PANEDR = 0x5, + BT_8723B_1ANT_COEX_ALGO_PANHS = 0x6, + BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP = 0x7, + BT_8723B_1ANT_COEX_ALGO_PANEDR_HID = 0x8, + BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR = 0x9, + BT_8723B_1ANT_COEX_ALGO_HID_A2DP = 0xa, + BT_8723B_1ANT_COEX_ALGO_MAX = 0xb, +}; + +struct coex_dm_8723b_1ant { + /* fw mechanism */ + bool cur_ignore_wlan_act; + bool pre_ignore_wlan_act; + u8 pre_ps_tdma; + u8 cur_ps_tdma; + u8 ps_tdma_para[5]; + u8 tdma_adj_type; + bool auto_tdma_adjust; + bool pre_ps_tdma_on; + bool cur_ps_tdma_on; + bool pre_bt_auto_report; + bool cur_bt_auto_report; + u8 pre_lps; + u8 cur_lps; + u8 pre_rpwm; + u8 cur_rpwm; + + /* sw mechanism */ + bool pre_low_penalty_ra; + bool cur_low_penalty_ra; + u32 pre_val0x6c0; + u32 cur_val0x6c0; + u32 pre_val0x6c4; + u32 cur_val0x6c4; + u32 pre_val0x6c8; + u32 cur_val0x6c8; + u8 pre_val0x6cc; + u8 cur_val0x6cc; + bool limited_dig; + + u32 backup_arfr_cnt1; /* Auto Rate Fallback Retry cnt */ + u32 backup_arfr_cnt2; /* Auto Rate Fallback Retry cnt */ + u16 backup_retry_limit; + u8 backup_ampdu_max_time; + + /* algorithm related */ + u8 pre_algorithm; + u8 cur_algorithm; + u8 bt_status; + u8 wifi_chnl_info[3]; + + u32 prera_mask; + u32 curra_mask; + u8 pre_arfr_type; + u8 cur_arfr_type; + u8 pre_retry_limit_type; + u8 cur_retry_limit_type; + u8 pre_ampdu_time_type; + u8 cur_ampdu_time_type; + + u8 error_condition; +}; + +struct coex_sta_8723b_1ant { + bool bt_link_exist; + bool sco_exist; + bool a2dp_exist; + bool hid_exist; + bool pan_exist; + + bool under_lps; + bool under_ips; + u32 special_pkt_period_cnt; + u32 high_priority_tx; + u32 high_priority_rx; + u32 low_priority_tx; + u32 low_priority_rx; + u8 bt_rssi; + u8 pre_bt_rssi_state; + u8 pre_wifi_rssi_state[4]; + bool c2h_bt_info_req_sent; + u8 bt_info_c2h[BT_INFO_SRC_8723B_1ANT_MAX][10]; + u32 bt_info_c2h_cnt[BT_INFO_SRC_8723B_1ANT_MAX]; + bool c2h_bt_inquiry_page; + u8 bt_retry_cnt; + u8 bt_info_ext; +}; + +/************************************************************************* + * The following is interface which will notify coex module. + *************************************************************************/ +void ex_halbtc8723b1ant_init_hwconfig(struct btc_coexist *btcoexist); +void ex_halbtc8723b1ant_init_coex_dm(struct btc_coexist *btcoexist); +void ex_halbtc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type); +void ex_halbtc8723b1ant_lps_notify(struct btc_coexist *btcoexist, u8 type); +void ex_halbtc8723b1ant_scan_notify(struct btc_coexist *btcoexist, u8 type); +void ex_halbtc8723b1ant_connect_notify(struct btc_coexist *btcoexist, u8 type); +void ex_halbtc8723b1ant_media_status_notify(struct btc_coexist *btcoexist, + u8 type); +void ex_halbtc8723b1ant_special_packet_notify(struct btc_coexist *btcoexist, + u8 type); +void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist, + u8 *tmpbuf, u8 length); +void ex_halbtc8723b1ant_halt_notify(struct btc_coexist *btcoexist); +void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnpstate); +void ex_halbtc8723b1ant_coex_dm_reset(struct btc_coexist *btcoexist); +void ex_halbtc8723b1ant_periodical(struct btc_coexist *btcoexist); +void ex_halbtc8723b1ant_display_coex_info(struct btc_coexist *btcoexist); diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c index d916ab9f3c387d7cf69b6ac57c7683097bf60525..cefe26991421b8e42c22ba6ef6beb4be0adb9d95 100644 --- a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c +++ b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c @@ -49,8 +49,8 @@ static const char *const glbt_info_src_8723b_2ant[] = { "BT Info[bt auto report]", }; -static u32 glcoex_ver_date_8723b_2ant = 20130731; -static u32 glcoex_ver_8723b_2ant = 0x3b; +static u32 glcoex_ver_date_8723b_2ant = 20131113; +static u32 glcoex_ver_8723b_2ant = 0x3f; /************************************************************** * local function proto type if needed @@ -303,6 +303,21 @@ static void btc8723b2ant_monitor_bt_ctr(struct btc_coexist *btcoexist) btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc); } +static void btc8723b2ant_query_bt_info(struct btc_coexist *btcoexist) +{ + u8 h2c_parameter[1] = {0}; + + coex_sta->c2h_bt_info_req_sent = true; + + h2c_parameter[0] |= BIT0; /* trigger */ + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n", + h2c_parameter[0]); + + btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter); +} + static bool btc8723b2ant_is_wifi_status_changed(struct btc_coexist *btcoexist) { static bool pre_wifi_busy; @@ -604,7 +619,7 @@ static bool btc8723b_need_dec_pwr(struct btc_coexist *btcoexist) if (!btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi)) return false; - bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0); + bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 29, 0); if (wifi_connected) { if (bt_hs_on) { @@ -824,7 +839,6 @@ static void btc8723b2ant_set_sw_fulltime_dac_swing(struct btc_coexist *btcoex, btc8723b2ant_set_dac_swing_reg(btcoex, 0x18); } - static void btc8723b2ant_dac_swing(struct btc_coexist *btcoexist, bool force_exec, bool dac_swing_on, u32 dac_swing_lvl) @@ -884,7 +898,6 @@ static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist, btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa4200001); } - /* RF Gain */ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x02000); if (agc_table_en) { @@ -1160,8 +1173,87 @@ static void btc8723b2ant_sw_mechanism2(struct btc_coexist *btcoexist, dac_swing_lvl); } +static void btc8723b2ant_set_ant_path(struct btc_coexist *btcoexist, + u8 antpos_type, bool init_hwcfg, + bool wifi_off) +{ + struct btc_board_info *board_info = &btcoexist->board_info; + u32 fw_ver = 0, u32tmp = 0; + bool pg_ext_switch = false; + bool use_ext_switch = false; + u8 h2c_parameter[2] = {0}; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_EXT_SWITCH, &pg_ext_switch); + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); + + if ((fw_ver < 0xc0000) || pg_ext_switch) + use_ext_switch = true; + + if (init_hwcfg) { + /* 0x4c[23] = 0, 0x4c[24] = 1 Antenna control by WL/BT */ + u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c); + u32tmp &= ~BIT23; + u32tmp |= BIT24; + btcoexist->btc_write_4byte(btcoexist, 0x4c, u32tmp); + + btcoexist->btc_write_1byte(btcoexist, 0x974, 0xff); + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x944, 0x3, 0x3); + btcoexist->btc_write_1byte(btcoexist, 0x930, 0x77); + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, 0x20, 0x1); + + /* Force GNT_BT to low */ + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x0); + btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0); + + if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) { + /* tell firmware "no antenna inverse" */ + h2c_parameter[0] = 0; + h2c_parameter[1] = 1; /* ext switch type */ + btcoexist->btc_fill_h2c(btcoexist, 0x65, 2, + h2c_parameter); + } else { + /* tell firmware "antenna inverse" */ + h2c_parameter[0] = 1; + h2c_parameter[1] = 1; /* ext switch type */ + btcoexist->btc_fill_h2c(btcoexist, 0x65, 2, + h2c_parameter); + } + } + + /* ext switch setting */ + if (use_ext_switch) { + /* fixed internal switch S1->WiFi, S0->BT */ + btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0); + switch (antpos_type) { + case BTC_ANT_WIFI_AT_MAIN: + /* ext switch main at wifi */ + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c, + 0x3, 0x1); + break; + case BTC_ANT_WIFI_AT_AUX: + /* ext switch aux at wifi */ + btcoexist->btc_write_1byte_bitmask(btcoexist, + 0x92c, 0x3, 0x2); + break; + } + } else { /* internal switch */ + /* fixed ext switch */ + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c, 0x3, 0x1); + switch (antpos_type) { + case BTC_ANT_WIFI_AT_MAIN: + /* fixed internal switch S1->WiFi, S0->BT */ + btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0); + break; + case BTC_ANT_WIFI_AT_AUX: + /* fixed internal switch S0->WiFi, S1->BT */ + btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280); + break; + } + } +} + static void btc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec, - bool turn_on, u8 type) + bool turn_on, u8 type) { BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, "[BTCoex], %s turn %s PS TDMA, type=%d\n", @@ -1351,7 +1443,8 @@ static void btc8723b2ant_action_bt_inquiry(struct btc_coexist *btcoexist) coex_dm->need_recover_0x948 = true; coex_dm->backup_0x948 = btcoexist->btc_read_2byte(btcoexist, 0x948); - btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280); + btc8723b2ant_set_ant_path(btcoexist, BTC_ANT_WIFI_AT_AUX, + false, false); } static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist) @@ -1520,7 +1613,9 @@ static void set_tdma_int1(struct btc_coexist *btcoexist, bool tx_pause, btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 8); coex_dm->tdma_adj_type = 8; - } else if (coex_dm->cur_ps_tdma == 9) { + } + + if (coex_dm->cur_ps_tdma == 9) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13); coex_dm->tdma_adj_type = 13; @@ -1607,7 +1702,9 @@ static void set_tdma_int1(struct btc_coexist *btcoexist, bool tx_pause, } else if (coex_dm->cur_ps_tdma == 8) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 4); coex_dm->tdma_adj_type = 4; - } else if (coex_dm->cur_ps_tdma == 13) { + } + + if (coex_dm->cur_ps_tdma == 13) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9); coex_dm->tdma_adj_type = 9; } else if (coex_dm->cur_ps_tdma == 14) { @@ -1652,23 +1749,34 @@ static void set_tdma_int1(struct btc_coexist *btcoexist, bool tx_pause, coex_dm->tdma_adj_type = 12; } } else if (result == 1) { - int tmp = coex_dm->cur_ps_tdma; - switch (tmp) { - case 4: - case 3: - case 2: - case 12: - case 11: - case 10: - btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, tmp - 1); - coex_dm->tdma_adj_type = tmp - 1; - break; - case 1: + if (coex_dm->cur_ps_tdma == 4) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->tdma_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 3) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 2); + coex_dm->tdma_adj_type = 2; + } else if (coex_dm->cur_ps_tdma == 2) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 1); + coex_dm->tdma_adj_type = 1; + } else if (coex_dm->cur_ps_tdma == 1) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 71); coex_dm->tdma_adj_type = 71; - break; + } else if (coex_dm->cur_ps_tdma == 12) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 11) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 10); + coex_dm->tdma_adj_type = 10; + } else if (coex_dm->cur_ps_tdma == 10) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 9); + coex_dm->tdma_adj_type = 9; } } } @@ -1694,7 +1802,8 @@ static void set_tdma_int2(struct btc_coexist *btcoexist, bool tx_pause, } else if (coex_dm->cur_ps_tdma == 4) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 8); coex_dm->tdma_adj_type = 8; - } else if (coex_dm->cur_ps_tdma == 9) { + } + if (coex_dm->cur_ps_tdma == 9) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14); coex_dm->tdma_adj_type = 14; } else if (coex_dm->cur_ps_tdma == 10) { @@ -1776,7 +1885,8 @@ static void set_tdma_int2(struct btc_coexist *btcoexist, bool tx_pause, } else if (coex_dm->cur_ps_tdma == 8) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 4); coex_dm->tdma_adj_type = 4; - } else if (coex_dm->cur_ps_tdma == 13) { + } + if (coex_dm->cur_ps_tdma == 13) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10); coex_dm->tdma_adj_type = 10; } else if (coex_dm->cur_ps_tdma == 14) { @@ -1865,7 +1975,8 @@ static void set_tdma_int3(struct btc_coexist *btcoexist, bool tx_pause, } else if (coex_dm->cur_ps_tdma == 4) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 8); coex_dm->tdma_adj_type = 8; - } else if (coex_dm->cur_ps_tdma == 9) { + } + if (coex_dm->cur_ps_tdma == 9) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 15); coex_dm->tdma_adj_type = 15; } else if (coex_dm->cur_ps_tdma == 10) { @@ -1935,101 +2046,80 @@ static void set_tdma_int3(struct btc_coexist *btcoexist, bool tx_pause, BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, "[BTCoex], TxPause = 0\n"); - switch (coex_dm->cur_ps_tdma) { - case 5: + if (coex_dm->cur_ps_tdma == 5) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3); coex_dm->tdma_adj_type = 3; - break; - case 6: + } else if (coex_dm->cur_ps_tdma == 6) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3); coex_dm->tdma_adj_type = 3; - break; - case 7: + } else if (coex_dm->cur_ps_tdma == 7) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3); coex_dm->tdma_adj_type = 3; - break; - case 8: + } else if (coex_dm->cur_ps_tdma == 8) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 4); coex_dm->tdma_adj_type = 4; - break; - case 13: + } + if (coex_dm->cur_ps_tdma == 13) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11); coex_dm->tdma_adj_type = 11; - break; - case 14: + } else if (coex_dm->cur_ps_tdma == 14) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11); coex_dm->tdma_adj_type = 11; - break; - case 15: + } else if (coex_dm->cur_ps_tdma == 15) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11); coex_dm->tdma_adj_type = 11; - break; - case 16: + } else if (coex_dm->cur_ps_tdma == 16) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 12); coex_dm->tdma_adj_type = 12; - break; } if (result == -1) { - switch (coex_dm->cur_ps_tdma) { - case 1: + if (coex_dm->cur_ps_tdma == 1) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3); coex_dm->tdma_adj_type = 3; - break; - case 2: + } else if (coex_dm->cur_ps_tdma == 2) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3); coex_dm->tdma_adj_type = 3; - break; - case 3: + } else if (coex_dm->cur_ps_tdma == 3) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 4); coex_dm->tdma_adj_type = 4; - break; - case 9: + } else if (coex_dm->cur_ps_tdma == 9) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11); coex_dm->tdma_adj_type = 11; - break; - case 10: + } else if (coex_dm->cur_ps_tdma == 10) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11); coex_dm->tdma_adj_type = 11; - break; - case 11: + } else if (coex_dm->cur_ps_tdma == 11) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 12); coex_dm->tdma_adj_type = 12; - break; } } else if (result == 1) { - switch (coex_dm->cur_ps_tdma) { - case 4: + if (coex_dm->cur_ps_tdma == 4) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3); coex_dm->tdma_adj_type = 3; - break; - case 3: + } else if (coex_dm->cur_ps_tdma == 3) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3); coex_dm->tdma_adj_type = 3; - break; - case 2: + } else if (coex_dm->cur_ps_tdma == 2) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3); coex_dm->tdma_adj_type = 3; - break; - case 12: + } else if (coex_dm->cur_ps_tdma == 12) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11); coex_dm->tdma_adj_type = 11; - break; - case 11: + } else if (coex_dm->cur_ps_tdma == 11) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11); coex_dm->tdma_adj_type = 11; - break; - case 10: + } else if (coex_dm->cur_ps_tdma == 10) { btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11); coex_dm->tdma_adj_type = 11; @@ -2328,7 +2418,7 @@ static void btc8723b2ant_action_hid(struct btc_coexist *btcoexist) wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); - bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0); + bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 29, 0); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); @@ -2385,12 +2475,43 @@ static void btc8723b2ant_action_hid(struct btc_coexist *btcoexist) /*A2DP only / PAN(EDR) only/ A2DP+PAN(HS)*/ static void btc8723b2ant_action_a2dp(struct btc_coexist *btcoexist) { - u8 wifi_rssi_state, bt_rssi_state; + u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state; u32 wifi_bw; + u8 ap_num = 0; wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); - bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0); + wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, + 1, 2, 40, 0); + bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 29, 0); + + btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM, &ap_num); + + /* define the office environment */ + /* driver don't know AP num in Linux, so we will never enter this if */ + if (ap_num >= 10 && BTC_RSSI_HIGH(wifi_rssi_state1)) { + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, + 0x0); + btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0); + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); + + /* sw mechanism */ + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + if (BTC_WIFI_BW_HT40 == wifi_bw) { + btc8723b2ant_sw_mechanism1(btcoexist, true, false, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, true, false, + true, 0x18); + } else { + btc8723b2ant_sw_mechanism1(btcoexist, false, false, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, true, false, + true, 0x18); + } + return; + } btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); @@ -2501,7 +2622,7 @@ static void btc8723b2ant_action_pan_edr(struct btc_coexist *btcoexist) wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); - bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0); + bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 29, 0); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); @@ -2612,7 +2733,7 @@ static void btc8723b2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist) wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); - bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0); + bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 29, 0); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); @@ -2676,7 +2797,7 @@ static void btc8723b2ant_action_pan_edr_hid(struct btc_coexist *btcoexist) wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); - bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0); + bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 29, 0); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); if (btc8723b_need_dec_pwr(btcoexist)) @@ -2746,7 +2867,7 @@ static void btc8723b2ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist) wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); - bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0); + bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 29, 0); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); @@ -2809,8 +2930,8 @@ static void btc8723b2ant_action_hid_a2dp(struct btc_coexist *btcoexist) u32 wifi_bw; wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, - 0, 2, 15, 0); - bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0); + 0, 2, 15, 0); + bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 29, 0); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); @@ -2982,7 +3103,15 @@ static void btc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist) } } - +static void btc8723b2ant_wifioff_hwcfg(struct btc_coexist *btcoexist) +{ + /* set wlan_act to low */ + btcoexist->btc_write_1byte(btcoexist, 0x76e, 0x4); + /* Force GNT_BT to High */ + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x3); + /* BT select s0/s1 is controlled by BT */ + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, 0x20, 0x0); +} /********************************************************************* * work around function start with wa_btc8723b2ant_ @@ -2990,98 +3119,24 @@ static void btc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist) /********************************************************************* * extern function start with EXbtc8723b2ant_ *********************************************************************/ -void ex_halbtc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist) +void ex_btc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist) { - struct btc_board_info *board_info = &btcoexist->board_info; - u32 u32tmp = 0, fw_ver; u8 u8tmp = 0; - u8 h2c_parameter[2] = {0}; - BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, "[BTCoex], 2Ant Init HW Config!!\n"); + coex_dm->bt_rf0x1e_backup = + btcoexist->btc_get_rf_reg(btcoexist, BTC_RF_A, 0x1e, 0xfffff); - /* backup rf 0x1e value */ - coex_dm->bt_rf0x1e_backup = btcoexist->btc_get_rf_reg(btcoexist, - BTC_RF_A, 0x1e, - 0xfffff); - - /* 0x4c[23]=0, 0x4c[24]=1 Antenna control by WL/BT */ - u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c); - u32tmp &= ~BIT23; - u32tmp |= BIT24; - btcoexist->btc_write_4byte(btcoexist, 0x4c, u32tmp); - - btcoexist->btc_write_1byte(btcoexist, 0x974, 0xff); - btcoexist->btc_write_1byte_bitmask(btcoexist, 0x944, 0x3, 0x3); - btcoexist->btc_write_1byte(btcoexist, 0x930, 0x77); - btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, 0x20, 0x1); - - /* Antenna switch control parameter */ - /* btcoexist->btc_write_4byte(btcoexist, 0x858, 0x55555555);*/ - - /*Force GNT_BT to low*/ - btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x0); - btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0); - - /* 0x790[5:0]=0x5 */ + /* 0x790[5:0] = 0x5 */ u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x790); u8tmp &= 0xc0; u8tmp |= 0x5; btcoexist->btc_write_1byte(btcoexist, 0x790, u8tmp); - - /*Antenna config */ - btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); - - /*ext switch for fw ver < 0xc */ - if (fw_ver < 0xc00) { - if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) { - btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c, - 0x3, 0x1); - /*Main Ant to BT for IPS case 0x4c[23]=1*/ - btcoexist->btc_write_1byte_bitmask(btcoexist, 0x64, 0x1, - 0x1); - - /*tell firmware "no antenna inverse"*/ - h2c_parameter[0] = 0; - h2c_parameter[1] = 1; /* ext switch type */ - btcoexist->btc_fill_h2c(btcoexist, 0x65, 2, - h2c_parameter); - } else { - btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c, - 0x3, 0x2); - /*Aux Ant to BT for IPS case 0x4c[23]=1*/ - btcoexist->btc_write_1byte_bitmask(btcoexist, 0x64, 0x1, - 0x0); - - /*tell firmware "antenna inverse"*/ - h2c_parameter[0] = 1; - h2c_parameter[1] = 1; /*ext switch type*/ - btcoexist->btc_fill_h2c(btcoexist, 0x65, 2, - h2c_parameter); - } - } else { - /*ext switch always at s1 (if exist) */ - btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c, 0x3, 0x1); - /*Main Ant to BT for IPS case 0x4c[23]=1*/ - btcoexist->btc_write_1byte_bitmask(btcoexist, 0x64, 0x1, 0x1); - - if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) { - /*tell firmware "no antenna inverse"*/ - h2c_parameter[0] = 0; - h2c_parameter[1] = 0; /*ext switch type*/ - btcoexist->btc_fill_h2c(btcoexist, 0x65, 2, - h2c_parameter); - } else { - /*tell firmware "antenna inverse"*/ - h2c_parameter[0] = 1; - h2c_parameter[1] = 0; /*ext switch type*/ - btcoexist->btc_fill_h2c(btcoexist, 0x65, 2, - h2c_parameter); - } - } - + /*Antenna config */ + btc8723b2ant_set_ant_path(btcoexist, BTC_ANT_WIFI_AT_MAIN, + true, false); /* PTA parameter */ btc8723b_coex_tbl_type(btcoexist, FORCE_EXEC, 0); @@ -3092,19 +3147,19 @@ void ex_halbtc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist) btcoexist->btc_write_1byte_bitmask(btcoexist, 0x40, 0x20, 0x1); } -void ex_halbtc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist) +void ex_btc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist) { BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, "[BTCoex], Coex Mechanism Init!!\n"); btc8723b2ant_init_coex_dm(btcoexist); } -void ex_halbtc8723b2ant_display_coex_info(struct btc_coexist *btcoexist) +void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist) { struct btc_board_info *board_info = &btcoexist->board_info; struct btc_stack_info *stack_info = &btcoexist->stack_info; struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; - u8 *cli_buf = btcoexist->cli_buf; + struct rtl_priv *rtlpriv = btcoexist->adapter; u8 u8tmp[4], i, bt_info_ext, ps_tdma_case = 0; u32 u32tmp[4]; bool roam = false, scan = false; @@ -3114,106 +3169,93 @@ void ex_halbtc8723b2ant_display_coex_info(struct btc_coexist *btcoexist) u32 wifi_bw, wifi_traffic_dir, fa_ofdm, fa_cck; u8 wifi_dot11_chnl, wifi_hs_chnl; u32 fw_ver = 0, bt_patch_ver = 0; + u8 ap_num = 0; - CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n ============[BT Coexist info]============"); - CL_PRINTF(cli_buf); if (btcoexist->manual_control) { - CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n ==========[Under Manual Control]============"); - CL_PRINTF(cli_buf); - CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n =========================================="); - CL_PRINTF(cli_buf); } if (!board_info->bt_exist) { - CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n BT not exists !!!"); - CL_PRINTF(cli_buf); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n BT not exists !!!"); return; } - CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ", + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:", board_info->pg_ant_num, board_info->btdm_ant_num); - CL_PRINTF(cli_buf); - CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %d", + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %d", "BT stack/ hci ext ver", ((stack_info->profile_notified) ? "Yes" : "No"), stack_info->hci_version); - CL_PRINTF(cli_buf); btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); - CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)", "CoexVer/ FwVer/ PatchVer", glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant, fw_ver, bt_patch_ver, bt_patch_ver); - CL_PRINTF(cli_buf); btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL, &wifi_dot11_chnl); btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl); - CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d(%d)", + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d(%d)", "Dot11 channel / HsChnl(HsMode)", wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on); - CL_PRINTF(cli_buf); - CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x ", + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %02x %02x %02x ", "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info[0], coex_dm->wifi_chnl_info[1], coex_dm->wifi_chnl_info[2]); - CL_PRINTF(cli_buf); btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi); btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi); - CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", - "Wifi rssi/ HS rssi", wifi_rssi, bt_hs_rssi); - CL_PRINTF(cli_buf); + btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM, &ap_num); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d", + "Wifi rssi/ HS rssi/ AP#", wifi_rssi, bt_hs_rssi, ap_num); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); - CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ", + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ", "Wifi link/ roam/ scan", link, roam, scan); - CL_PRINTF(cli_buf); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, &wifi_traffic_dir); - CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %s/ %s ", + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %s/ %s ", "Wifi status", (wifi_under_5g ? "5G" : "2.4G"), ((BTC_WIFI_BW_LEGACY == wifi_bw) ? "Legacy" : (((BTC_WIFI_BW_HT40 == wifi_bw) ? "HT40" : "HT20"))), ((!wifi_busy) ? "idle" : ((BTC_WIFI_TRAFFIC_TX == wifi_traffic_dir) ? "uplink" : "downlink"))); - CL_PRINTF(cli_buf); - CL_PRINTF(cli_buf); - CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d / %d / %d", + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP", bt_link_info->sco_exist, bt_link_info->hid_exist, bt_link_info->pan_exist, bt_link_info->a2dp_exist); - CL_PRINTF(cli_buf); btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO); bt_info_ext = coex_sta->bt_info_ext; - CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s", + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s", "BT Info A2DP rate", (bt_info_ext&BIT0) ? "Basic rate" : "EDR rate"); - CL_PRINTF(cli_buf); for (i = 0; i < BT_INFO_SRC_8723B_2ANT_MAX; i++) { if (coex_sta->bt_info_c2h_cnt[i]) { - CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %02x %02x %02x " "%02x %02x %02x %02x(%d)", glbt_info_src_8723b_2ant[i], @@ -3225,105 +3267,88 @@ void ex_halbtc8723b2ant_display_coex_info(struct btc_coexist *btcoexist) coex_sta->bt_info_c2h[i][5], coex_sta->bt_info_c2h[i][6], coex_sta->bt_info_c2h_cnt[i]); - CL_PRINTF(cli_buf); } } - CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s/%s", + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s/%s", "PS state, IPS/LPS", ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")), ((coex_sta->under_lps ? "LPS ON" : "LPS OFF"))); - CL_PRINTF(cli_buf); btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD); /* Sw mechanism */ - CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s", "============[Sw mechanism]============"); - CL_PRINTF(cli_buf); - CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ", + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ", "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink, coex_dm->cur_low_penalty_ra, coex_dm->limited_dig); - CL_PRINTF(cli_buf); - CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d(0x%x) ", + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d(0x%x) ", "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]", coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off, coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl); - CL_PRINTF(cli_buf); /* Fw mechanism */ - CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s", + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s", "============[Fw mechanism]============"); - CL_PRINTF(cli_buf); ps_tdma_case = coex_dm->cur_ps_tdma; - CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %02x %02x %02x %02x %02x case-%d (auto:%d)", "PS TDMA", coex_dm->ps_tdma_para[0], coex_dm->ps_tdma_para[1], coex_dm->ps_tdma_para[2], coex_dm->ps_tdma_para[3], coex_dm->ps_tdma_para[4], ps_tdma_case, coex_dm->auto_tdma_adjust); - CL_PRINTF(cli_buf); - CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ", + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d ", "DecBtPwr/ IgnWlanAct", coex_dm->cur_dec_bt_pwr, coex_dm->cur_ignore_wlan_act); - CL_PRINTF(cli_buf); /* Hw setting */ - CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s", + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s", "============[Hw setting]============"); - CL_PRINTF(cli_buf); - CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x", "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup); - CL_PRINTF(cli_buf); u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x880); - CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x", "0x778/0x880[29:25]", u8tmp[0], (u32tmp[0]&0x3e000000) >> 25); - CL_PRINTF(cli_buf); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x948); u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x67); u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x765); - CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", "0x948/ 0x67[5] / 0x765", u32tmp[0], ((u8tmp[0]&0x20) >> 5), u8tmp[1]); - CL_PRINTF(cli_buf); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x92c); u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x930); u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x944); - CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]", u32tmp[0]&0x3, u32tmp[1]&0xff, u32tmp[2]&0x3); - CL_PRINTF(cli_buf); - u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x39); u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x40); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c); u8tmp[2] = btcoexist->btc_read_1byte(btcoexist, 0x64); - CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", "0x38[11]/0x40/0x4c[24:23]/0x64[0]", ((u8tmp[0] & 0x8)>>3), u8tmp[1], ((u32tmp[0]&0x01800000)>>23), u8tmp[2]&0x1); - CL_PRINTF(cli_buf); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550); u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522); - CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x", "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]); - CL_PRINTF(cli_buf); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50); u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x49c); - CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x", "0xc50(dig)/0x49c(null-drop)", u32tmp[0]&0xff, u8tmp[0]); - CL_PRINTF(cli_buf); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xda0); u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xda4); @@ -3341,29 +3366,25 @@ void ex_halbtc8723b2ant_display_coex_info(struct btc_coexist *btcoexist) (u32tmp[3] & 0xffff); fa_cck = (u8tmp[0] << 8) + u8tmp[1]; - CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", "OFDM-CCA/OFDM-FA/CCK-FA", u32tmp[0]&0xffff, fa_ofdm, fa_cck); - CL_PRINTF(cli_buf); u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0); u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4); u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8); u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc); - CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)", u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]); - CL_PRINTF(cli_buf); - CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d", "0x770(high-pri rx/tx)", coex_sta->high_priority_rx, coex_sta->high_priority_tx); - CL_PRINTF(cli_buf); - CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d", "0x774(low-pri rx/tx)", coex_sta->low_priority_rx, coex_sta->low_priority_tx); - CL_PRINTF(cli_buf); #if (BT_AUTO_REPORT_ONLY_8723B_2ANT == 1) btc8723b2ant_monitor_bt_ctr(btcoexist); #endif @@ -3371,22 +3392,26 @@ void ex_halbtc8723b2ant_display_coex_info(struct btc_coexist *btcoexist) BTC_DBG_DISP_COEX_STATISTICS); } - -void ex_halbtc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type) +void ex_btc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type) { if (BTC_IPS_ENTER == type) { BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], IPS ENTER notify\n"); coex_sta->under_ips = true; + btc8723b2ant_wifioff_hwcfg(btcoexist); + btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true); btc8723b2ant_coex_alloff(btcoexist); } else if (BTC_IPS_LEAVE == type) { BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], IPS LEAVE notify\n"); coex_sta->under_ips = false; + ex_btc8723b2ant_init_hwconfig(btcoexist); + btc8723b2ant_init_coex_dm(btcoexist); + btc8723b2ant_query_bt_info(btcoexist); } } -void ex_halbtc8723b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type) +void ex_btc8723b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type) { if (BTC_LPS_ENABLE == type) { BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, @@ -3399,7 +3424,7 @@ void ex_halbtc8723b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type) } } -void ex_halbtc8723b2ant_scan_notify(struct btc_coexist *btcoexist, u8 type) +void ex_btc8723b2ant_scan_notify(struct btc_coexist *btcoexist, u8 type) { if (BTC_SCAN_START == type) BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, @@ -3409,7 +3434,7 @@ void ex_halbtc8723b2ant_scan_notify(struct btc_coexist *btcoexist, u8 type) "[BTCoex], SCAN FINISH notify\n"); } -void ex_halbtc8723b2ant_connect_notify(struct btc_coexist *btcoexist, u8 type) +void ex_btc8723b2ant_connect_notify(struct btc_coexist *btcoexist, u8 type) { if (BTC_ASSOCIATE_START == type) BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, @@ -3419,8 +3444,8 @@ void ex_halbtc8723b2ant_connect_notify(struct btc_coexist *btcoexist, u8 type) "[BTCoex], CONNECT FINISH notify\n"); } -void btc8723b_med_stat_notify(struct btc_coexist *btcoexist, - u8 type) +void ex_btc8723b2ant_media_status_notify(struct btc_coexist *btcoexist, + u8 type) { u8 h2c_parameter[3] = {0}; u32 wifi_bw; @@ -3460,16 +3485,16 @@ void btc8723b_med_stat_notify(struct btc_coexist *btcoexist, btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter); } -void ex_halbtc8723b2ant_special_packet_notify(struct btc_coexist *btcoexist, - u8 type) +void ex_btc8723b2ant_special_packet_notify(struct btc_coexist *btcoexist, + u8 type) { if (type == BTC_PACKET_DHCP) BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], DHCP Packet notify\n"); } -void ex_halbtc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist, - u8 *tmpbuf, u8 length) +void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist, + u8 *tmpbuf, u8 length) { u8 bt_info = 0; u8 i, rsp_source = 0; @@ -3516,7 +3541,7 @@ void ex_halbtc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist, coex_sta->bt_info_c2h[rsp_source][4]; /* Here we need to resend some wifi info to BT - * because bt is reset and loss of the info. + because bt is reset and loss of the info. */ if ((coex_sta->bt_info_ext & BIT1)) { BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, @@ -3525,11 +3550,13 @@ void ex_halbtc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist, btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected); if (wifi_connected) - btc8723b_med_stat_notify(btcoexist, - BTC_MEDIA_CONNECT); + ex_btc8723b2ant_media_status_notify( + btcoexist, + BTC_MEDIA_CONNECT); else - btc8723b_med_stat_notify(btcoexist, - BTC_MEDIA_DISCONNECT); + ex_btc8723b2ant_media_status_notify( + btcoexist, + BTC_MEDIA_DISCONNECT); } if ((coex_sta->bt_info_ext & BIT3)) { @@ -3564,7 +3591,7 @@ void ex_halbtc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist, coex_sta->a2dp_exist = false; coex_sta->hid_exist = false; coex_sta->sco_exist = false; - } else { /* connection exists */ + } else { /* connection exists */ coex_sta->bt_link_exist = true; if (bt_info & BT_INFO_8723B_2ANT_B_FTP) coex_sta->pan_exist = true; @@ -3601,7 +3628,7 @@ void ex_halbtc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist, coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_SCO_BUSY; BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n"); - } else if (bt_info & BT_INFO_8723B_2ANT_B_ACL_BUSY) { + } else if (bt_info&BT_INFO_8723B_2ANT_B_ACL_BUSY) { coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_ACL_BUSY; BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n"); @@ -3630,26 +3657,16 @@ void ex_halbtc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist, btc8723b2ant_run_coexist_mechanism(btcoexist); } -void ex_halbtc8723b2ant_stack_operation_notify(struct btc_coexist *btcoexist, - u8 type) -{ - if (BTC_STACK_OP_INQ_PAGE_PAIR_START == type) - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex],StackOP Inquiry/page/pair start notify\n"); - else if (BTC_STACK_OP_INQ_PAGE_PAIR_FINISH == type) - BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, - "[BTCoex],StackOP Inquiry/page/pair finish notify\n"); -} - -void ex_halbtc8723b2ant_halt_notify(struct btc_coexist *btcoexist) +void ex_btc8723b2ant_halt_notify(struct btc_coexist *btcoexist) { BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Halt notify\n"); + btc8723b2ant_wifioff_hwcfg(btcoexist); btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true); - btc8723b_med_stat_notify(btcoexist, BTC_MEDIA_DISCONNECT); + ex_btc8723b2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT); } -void ex_halbtc8723b2ant_periodical(struct btc_coexist *btcoexist) +void ex_btc8723b2ant_periodical(struct btc_coexist *btcoexist) { struct btc_board_info *board_info = &btcoexist->board_info; struct btc_stack_info *stack_info = &btcoexist->stack_info; @@ -3677,8 +3694,7 @@ void ex_halbtc8723b2ant_periodical(struct btc_coexist *btcoexist) &bt_patch_ver); btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, - "[BTCoex], CoexVer/ FwVer/ PatchVer = " - "%d_%x/ 0x%x/ 0x%x(%d)\n", + "[BTCoex], CoexVer/ fw_ver/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n", glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant, fw_ver, bt_patch_ver, bt_patch_ver); BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.h b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.h index e0ad8e545f82f3af62b26be4a39ae50231f83616..567f354caf95c5e6f1449c1f9296ec9877646fd0 100644 --- a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.h +++ b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.h @@ -153,21 +153,20 @@ struct coex_sta_8723b_2ant { /********************************************************************* * The following is interface which will notify coex module. *********************************************************************/ -void ex_halbtc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist); -void ex_halbtc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist); -void ex_halbtc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type); -void ex_halbtc8723b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type); -void ex_halbtc8723b2ant_scan_notify(struct btc_coexist *btcoexist, u8 type); -void ex_halbtc8723b2ant_connect_notify(struct btc_coexist *btcoexist, u8 type); -void btc8723b_med_stat_notify(struct btc_coexist *btcoexist, u8 type); -void ex_halbtc8723b2ant_special_packet_notify(struct btc_coexist *btcoexist, - u8 type); -void ex_halbtc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist, - u8 *tmpbuf, u8 length); -void ex_halbtc8723b2ant_stack_operation_notify(struct btc_coexist *btcoexist, - u8 type); -void ex_halbtc8723b2ant_halt_notify(struct btc_coexist *btcoexist); -void ex_halbtc8723b2ant_periodical(struct btc_coexist *btcoexist); -void ex_halbtc8723b2ant_display_coex_info(struct btc_coexist *btcoexist); +void ex_btc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist); +void ex_btc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist); +void ex_btc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type); +void ex_btc8723b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type); +void ex_btc8723b2ant_scan_notify(struct btc_coexist *btcoexist, u8 type); +void ex_btc8723b2ant_connect_notify(struct btc_coexist *btcoexist, u8 type); +void ex_btc8723b2ant_media_status_notify(struct btc_coexist *btcoexist, + u8 type); +void ex_btc8723b2ant_special_packet_notify(struct btc_coexist *btcoexist, + u8 type); +void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist, + u8 *tmpbuf, u8 length); +void ex_btc8723b2ant_halt_notify(struct btc_coexist *btcoexist); +void ex_btc8723b2ant_periodical(struct btc_coexist *btcoexist); +void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist); #endif diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a1ant.c b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a1ant.c new file mode 100644 index 0000000000000000000000000000000000000000..b72e5377bdbc5ff6469f8efc6420288f9089c23a --- /dev/null +++ b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a1ant.c @@ -0,0 +1,2970 @@ +/****************************************************************************** + * + * Copyright(c) 2012 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +/*============================================================ + * Description: + * + * This file is for RTL8821A Co-exist mechanism + * + * History + * 2012/11/15 Cosa first check in. + * + *============================================================ +*/ +/*============================================================ + * include files + *============================================================ + */ +#include "halbt_precomp.h" +/*============================================================ + * Global variables, these are static variables + *============================================================ + */ +static struct coex_dm_8821a_1ant glcoex_dm_8821a_1ant; +static struct coex_dm_8821a_1ant *coex_dm = &glcoex_dm_8821a_1ant; +static struct coex_sta_8821a_1ant glcoex_sta_8821a_1ant; +static struct coex_sta_8821a_1ant *coex_sta = &glcoex_sta_8821a_1ant; + +static const char *const glbt_info_src_8821a_1ant[] = { + "BT Info[wifi fw]", + "BT Info[bt rsp]", + "BT Info[bt auto report]", +}; + +static u32 glcoex_ver_date_8821a_1ant = 20130816; +static u32 glcoex_ver_8821a_1ant = 0x41; + +/*============================================================ + * local function proto type if needed + * + * local function start with halbtc8821a1ant_ + *============================================================ + */ +static u8 halbtc8821a1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh, + u8 rssi_thresh1) +{ + long bt_rssi = 0; + u8 bt_rssi_state = coex_sta->pre_bt_rssi_state; + + bt_rssi = coex_sta->bt_rssi; + + if (level_num == 2) { + if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) || + (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) { + if (bt_rssi >= (rssi_thresh + + BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) { + bt_rssi_state = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state switch to High\n"); + } else { + bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state stay at Low\n"); + } + } else { + if (bt_rssi < rssi_thresh) { + bt_rssi_state = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state switch to Low\n"); + } else { + bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state stay at High\n"); + } + } + } else if (level_num == 3) { + if (rssi_thresh > rssi_thresh1) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi thresh error!!\n"); + return coex_sta->pre_bt_rssi_state; + } + + if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) || + (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) { + if (bt_rssi >= (rssi_thresh + + BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) { + bt_rssi_state = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state switch to Medium\n"); + } else { + bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state stay at Low\n"); + } + } else if ((coex_sta->pre_bt_rssi_state == + BTC_RSSI_STATE_MEDIUM) || + (coex_sta->pre_bt_rssi_state == + BTC_RSSI_STATE_STAY_MEDIUM)) { + if (bt_rssi >= (rssi_thresh1 + + BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) { + bt_rssi_state = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state switch to High\n"); + } else if (bt_rssi < rssi_thresh) { + bt_rssi_state = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state switch to Low\n"); + } else { + bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state stay at Medium\n"); + } + } else { + if (bt_rssi < rssi_thresh1) { + bt_rssi_state = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state switch to Medium\n"); + } else { + bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state stay at High\n"); + } + } + } + coex_sta->pre_bt_rssi_state = bt_rssi_state; + + return bt_rssi_state; +} + +static u8 halbtc8821a1ant_WifiRssiState(struct btc_coexist *btcoexist, + u8 index, u8 level_num, u8 rssi_thresh, + u8 rssi_thresh1) +{ + long wifi_rssi = 0; + u8 wifi_rssi_state = coex_sta->pre_wifi_rssi_state[index]; + + btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi); + + if (level_num == 2) { + if ((coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_LOW) || + (coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_STAY_LOW)) { + if (wifi_rssi >= + (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) { + wifi_rssi_state = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state switch to High\n"); + } else { + wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state stay at Low\n"); + } + } else { + if (wifi_rssi < rssi_thresh) { + wifi_rssi_state = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state switch to Low\n"); + } else { + wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state stay at High\n"); + } + } + } else if (level_num == 3) { + if (rssi_thresh > rssi_thresh1) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI thresh error!!\n"); + return coex_sta->pre_wifi_rssi_state[index]; + } + + if ((coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_LOW) || + (coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_STAY_LOW)) { + if (wifi_rssi >= + (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) { + wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state switch to Medium\n"); + } else { + wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state stay at Low\n"); + } + } else if ((coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_MEDIUM) || + (coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_STAY_MEDIUM)) { + if (wifi_rssi >= + (rssi_thresh1 + + BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) { + wifi_rssi_state = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state switch to High\n"); + } else if (wifi_rssi < rssi_thresh) { + wifi_rssi_state = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state switch to Low\n"); + } else { + wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state stay at Medium\n"); + } + } else { + if (wifi_rssi < rssi_thresh1) { + wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state switch to Medium\n"); + } else { + wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state stay at High\n"); + } + } + } + coex_sta->pre_wifi_rssi_state[index] = wifi_rssi_state; + + return wifi_rssi_state; +} + +static void halbtc8821a1ant_update_ra_mask(struct btc_coexist *btcoexist, + bool force_exec, u32 dis_rate_mask) +{ + coex_dm->cur_ra_mask = dis_rate_mask; + + if (force_exec || + (coex_dm->pre_ra_mask != coex_dm->cur_ra_mask)) { + btcoexist->btc_set(btcoexist, BTC_SET_ACT_UPDATE_ra_mask, + &coex_dm->cur_ra_mask); + } + coex_dm->pre_ra_mask = coex_dm->cur_ra_mask; +} + +static void btc8821a1ant_auto_rate_fb_retry(struct btc_coexist *btcoexist, + bool force_exec, u8 type) +{ + bool wifi_under_b_mode = false; + + coex_dm->cur_arfr_type = type; + + if (force_exec || + (coex_dm->pre_arfr_type != coex_dm->cur_arfr_type)) { + switch (coex_dm->cur_arfr_type) { + case 0: /* normal mode*/ + btcoexist->btc_write_4byte(btcoexist, 0x430, + coex_dm->backup_arfr_cnt1); + btcoexist->btc_write_4byte(btcoexist, 0x434, + coex_dm->backup_arfr_cnt2); + break; + case 1: + btcoexist->btc_get(btcoexist, + BTC_GET_BL_WIFI_UNDER_B_MODE, + &wifi_under_b_mode); + if (wifi_under_b_mode) { + btcoexist->btc_write_4byte(btcoexist, 0x430, + 0x0); + btcoexist->btc_write_4byte(btcoexist, 0x434, + 0x01010101); + } else { + btcoexist->btc_write_4byte(btcoexist, 0x430, + 0x0); + btcoexist->btc_write_4byte(btcoexist, 0x434, + 0x04030201); + } + break; + default: + break; + } + } + + coex_dm->pre_arfr_type = coex_dm->cur_arfr_type; +} + +static void halbtc8821a1ant_retry_limit(struct btc_coexist *btcoexist, + bool force_exec, u8 type) +{ + coex_dm->cur_retry_limit_type = type; + + if (force_exec || + (coex_dm->pre_retry_limit_type != coex_dm->cur_retry_limit_type)) { + switch (coex_dm->cur_retry_limit_type) { + case 0: /* normal mode*/ + btcoexist->btc_write_2byte(btcoexist, 0x42a, + coex_dm->backup_retry_limit); + break; + case 1: /* retry limit = 8*/ + btcoexist->btc_write_2byte(btcoexist, 0x42a, 0x0808); + break; + default: + break; + } + } + coex_dm->pre_retry_limit_type = coex_dm->cur_retry_limit_type; +} + +static void halbtc8821a1ant_ampdu_max_time(struct btc_coexist *btcoexist, + bool force_exec, u8 type) +{ + coex_dm->cur_ampdu_time_type = type; + + if (force_exec || + (coex_dm->pre_ampdu_time_type != coex_dm->cur_ampdu_time_type)) { + switch (coex_dm->cur_ampdu_time_type) { + case 0: /* normal mode*/ + btcoexist->btc_write_1byte(btcoexist, 0x456, + coex_dm->backup_ampdu_max_time); + break; + case 1: /* AMPDU timw = 0x38 * 32us*/ + btcoexist->btc_write_1byte(btcoexist, 0x456, 0x38); + break; + default: + break; + } + } + + coex_dm->pre_ampdu_time_type = coex_dm->cur_ampdu_time_type; +} + +static void halbtc8821a1ant_limited_tx(struct btc_coexist *btcoexist, + bool force_exec, u8 ra_mask_type, + u8 arfr_type, u8 retry_limit_type, + u8 ampdu_time_type) +{ + switch (ra_mask_type) { + case 0: /* normal mode*/ + halbtc8821a1ant_update_ra_mask(btcoexist, force_exec, 0x0); + break; + case 1: /* disable cck 1/2*/ + halbtc8821a1ant_update_ra_mask(btcoexist, force_exec, + 0x00000003); + break; + case 2: /* disable cck 1/2/5.5, ofdm 6/9/12/18/24, mcs 0/1/2/3/4*/ + halbtc8821a1ant_update_ra_mask(btcoexist, force_exec, + 0x0001f1f7); + break; + default: + break; + } + + btc8821a1ant_auto_rate_fb_retry(btcoexist, force_exec, arfr_type); + halbtc8821a1ant_retry_limit(btcoexist, force_exec, retry_limit_type); + halbtc8821a1ant_ampdu_max_time(btcoexist, force_exec, ampdu_time_type); +} + +static void halbtc8821a1ant_limited_rx(struct btc_coexist *btcoexist, + bool force_exec, bool rej_ap_agg_pkt, + bool bt_ctrl_agg_buf_size, + u8 agg_buf_size) +{ + bool reject_rx_agg = rej_ap_agg_pkt; + bool bt_ctrl_rx_agg_size = bt_ctrl_agg_buf_size; + u8 rx_agg_size = agg_buf_size; + + /*============================================*/ + /* Rx Aggregation related setting*/ + /*============================================*/ + btcoexist->btc_set(btcoexist, + BTC_SET_BL_TO_REJ_AP_AGG_PKT, &reject_rx_agg); + /* decide BT control aggregation buf size or not*/ + btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_CTRL_AGG_SIZE, + &bt_ctrl_rx_agg_size); + /* aggregation buf size, only work when BT control Rx agg size.*/ + btcoexist->btc_set(btcoexist, BTC_SET_U1_AGG_BUF_SIZE, &rx_agg_size); + /* real update aggregation setting*/ + btcoexist->btc_set(btcoexist, BTC_SET_ACT_AGGREGATE_CTRL, NULL); +} + +static void halbtc8821a1ant_monitor_bt_ctr(struct btc_coexist *btcoexist) +{ + u32 reg_hp_tx_rx, reg_lp_tx_rx, u4_tmp; + u32 reg_hp_tx = 0, reg_hp_rx = 0, reg_lp_tx = 0, reg_lp_rx = 0; + + reg_hp_tx_rx = 0x770; + reg_lp_tx_rx = 0x774; + + u4_tmp = btcoexist->btc_read_4byte(btcoexist, reg_hp_tx_rx); + reg_hp_tx = u4_tmp & MASKLWORD; + reg_hp_rx = (u4_tmp & MASKHWORD)>>16; + + u4_tmp = btcoexist->btc_read_4byte(btcoexist, reg_lp_tx_rx); + reg_lp_tx = u4_tmp & MASKLWORD; + reg_lp_rx = (u4_tmp & MASKHWORD)>>16; + + coex_sta->high_priority_tx = reg_hp_tx; + coex_sta->high_priority_rx = reg_hp_rx; + coex_sta->low_priority_tx = reg_lp_tx; + coex_sta->low_priority_rx = reg_lp_rx; + + /* reset counter*/ + btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc); +} + +static void halbtc8821a1ant_query_bt_info(struct btc_coexist *btcoexist) +{ + u8 h2c_parameter[1] = {0}; + + coex_sta->c2h_bt_info_req_sent = true; + + h2c_parameter[0] |= BIT0; /* trigger*/ + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n", + h2c_parameter[0]); + + btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter); +} + +static void halbtc8821a1ant_update_bt_link_info(struct btc_coexist *btcoexist) +{ + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + bool bt_hs_on = false; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); + + bt_link_info->bt_link_exist = coex_sta->bt_link_exist; + bt_link_info->sco_exist = coex_sta->sco_exist; + bt_link_info->a2dp_exist = coex_sta->a2dp_exist; + bt_link_info->pan_exist = coex_sta->pan_exist; + bt_link_info->hid_exist = coex_sta->hid_exist; + + /* work around for HS mode.*/ + if (bt_hs_on) { + bt_link_info->pan_exist = true; + bt_link_info->bt_link_exist = true; + } + + /* check if Sco only*/ + if (bt_link_info->sco_exist && + !bt_link_info->a2dp_exist && + !bt_link_info->pan_exist && + !bt_link_info->hid_exist) + bt_link_info->sco_only = true; + else + bt_link_info->sco_only = false; + + /* check if A2dp only*/ + if (!bt_link_info->sco_exist && + bt_link_info->a2dp_exist && + !bt_link_info->pan_exist && + !bt_link_info->hid_exist) + bt_link_info->a2dp_only = true; + else + bt_link_info->a2dp_only = false; + + /* check if Pan only*/ + if (!bt_link_info->sco_exist && + !bt_link_info->a2dp_exist && + bt_link_info->pan_exist && + !bt_link_info->hid_exist) + bt_link_info->pan_only = true; + else + bt_link_info->pan_only = false; + + /* check if Hid only*/ + if (!bt_link_info->sco_exist && + !bt_link_info->a2dp_exist && + !bt_link_info->pan_exist && + bt_link_info->hid_exist) + bt_link_info->hid_only = true; + else + bt_link_info->hid_only = false; +} + +static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist) +{ + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + bool bt_hs_on = false; + u8 algorithm = BT_8821A_1ANT_COEX_ALGO_UNDEFINED; + u8 num_of_diff_profile = 0; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); + + if (!bt_link_info->bt_link_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], No BT link exists!!!\n"); + return algorithm; + } + + if (bt_link_info->sco_exist) + num_of_diff_profile++; + if (bt_link_info->hid_exist) + num_of_diff_profile++; + if (bt_link_info->pan_exist) + num_of_diff_profile++; + if (bt_link_info->a2dp_exist) + num_of_diff_profile++; + + if (num_of_diff_profile == 1) { + if (bt_link_info->sco_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = SCO only\n"); + algorithm = BT_8821A_1ANT_COEX_ALGO_SCO; + } else { + if (bt_link_info->hid_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = HID only\n"); + algorithm = BT_8821A_1ANT_COEX_ALGO_HID; + } else if (bt_link_info->a2dp_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = A2DP only\n"); + algorithm = BT_8821A_1ANT_COEX_ALGO_A2DP; + } else if (bt_link_info->pan_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = PAN(HS) only\n"); + algorithm = BT_8821A_1ANT_COEX_ALGO_PANHS; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = PAN(EDR) only\n"); + algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR; + } + } + } + } else if (num_of_diff_profile == 2) { + if (bt_link_info->sco_exist) { + if (bt_link_info->hid_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = SCO + HID\n"); + algorithm = BT_8821A_1ANT_COEX_ALGO_HID; + } else if (bt_link_info->a2dp_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n"); + algorithm = BT_8821A_1ANT_COEX_ALGO_SCO; + } else if (bt_link_info->pan_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = SCO + PAN(HS)\n"); + algorithm = BT_8821A_1ANT_COEX_ALGO_SCO; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = SCO + PAN(EDR)\n"); + algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID; + } + } + } else { + if (bt_link_info->hid_exist && + bt_link_info->a2dp_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = HID + A2DP\n"); + algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP; + } else if (bt_link_info->hid_exist && + bt_link_info->pan_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = HID + PAN(HS)\n"); + algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = HID + PAN(EDR)\n"); + algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID; + } + } else if (bt_link_info->pan_exist && + bt_link_info->a2dp_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = A2DP + PAN(HS)\n"); + algorithm = BT_8821A_1ANT_COEX_ALGO_A2DP_PANHS; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = A2DP + PAN(EDR)\n"); + algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_A2DP; + } + } + } + } else if (num_of_diff_profile == 3) { + if (bt_link_info->sco_exist) { + if (bt_link_info->hid_exist && + bt_link_info->a2dp_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n"); + algorithm = BT_8821A_1ANT_COEX_ALGO_HID; + } else if (bt_link_info->hid_exist && + bt_link_info->pan_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n"); + algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n"); + algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID; + } + } else if (bt_link_info->pan_exist && + bt_link_info->a2dp_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n"); + algorithm = BT_8821A_1ANT_COEX_ALGO_SCO; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n"); + algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID; + } + } + } else { + if (bt_link_info->hid_exist && + bt_link_info->pan_exist && + bt_link_info->a2dp_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n"); + algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n"); + algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP_PANEDR; + } + } + } + } else if (num_of_diff_profile >= 3) { + if (bt_link_info->sco_exist) { + if (bt_link_info->hid_exist && + bt_link_info->pan_exist && + bt_link_info->a2dp_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n"); + + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n"); + algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID; + } + } + } + } + return algorithm; +} + +static void halbtc8821a1ant_set_bt_auto_report(struct btc_coexist *btcoexist, + bool enable_auto_report) +{ + u8 h2c_parameter[1] = {0}; + + h2c_parameter[0] = 0; + + if (enable_auto_report) + h2c_parameter[0] |= BIT0; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n", + (enable_auto_report ? "Enabled!!" : "Disabled!!"), + h2c_parameter[0]); + + btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter); +} + +static void halbtc8821a1ant_bt_auto_report(struct btc_coexist *btcoexist, + bool force_exec, + bool enable_auto_report) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_TRACE_FW, "[BTCoex], %s BT Auto report = %s\n", + (force_exec ? "force to" : ""), ((enable_auto_report) ? + "Enabled" : "Disabled")); + coex_dm->cur_bt_auto_report = enable_auto_report; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n", + coex_dm->pre_bt_auto_report, + coex_dm->cur_bt_auto_report); + + if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report) + return; + } + halbtc8821a1ant_set_bt_auto_report(btcoexist, coex_dm->cur_bt_auto_report); + + coex_dm->pre_bt_auto_report = coex_dm->cur_bt_auto_report; +} + +static void btc8821a1ant_set_sw_pen_tx_rate(struct btc_coexist *btcoexist, + bool low_penalty_ra) +{ + u8 h2c_parameter[6] = {0}; + + h2c_parameter[0] = 0x6; /* opCode, 0x6= Retry_Penalty*/ + + if (low_penalty_ra) { + h2c_parameter[1] |= BIT0; + /*normal rate except MCS7/6/5, OFDM54/48/36*/ + h2c_parameter[2] = 0x00; + h2c_parameter[3] = 0xf7; /*MCS7 or OFDM54*/ + h2c_parameter[4] = 0xf8; /*MCS6 or OFDM48*/ + h2c_parameter[5] = 0xf9; /*MCS5 or OFDM36*/ + } + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], set WiFi Low-Penalty Retry: %s", + (low_penalty_ra ? "ON!!" : "OFF!!")); + + btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter); +} + +static void halbtc8821a1ant_low_penalty_ra(struct btc_coexist *btcoexist, + bool force_exec, bool low_penalty_ra) +{ + coex_dm->cur_low_penalty_ra = low_penalty_ra; + + if (!force_exec) { + if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra) + return; + } + btc8821a1ant_set_sw_pen_tx_rate(btcoexist, coex_dm->cur_low_penalty_ra); + + coex_dm->pre_low_penalty_ra = coex_dm->cur_low_penalty_ra; +} + +static void halbtc8821a1ant_set_coex_table(struct btc_coexist *btcoexist, + u32 val0x6c0, u32 val0x6c4, + u32 val0x6c8, u8 val0x6cc) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0); + btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4); + btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8); + btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc); + btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc); +} + +static void halbtc8821a1ant_coex_table(struct btc_coexist *btcoexist, + bool force_exec, u32 val0x6c0, + u32 val0x6c4, u32 val0x6c8, u8 val0x6cc) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, + "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n", + (force_exec ? "force to" : ""), val0x6c0, val0x6c4, + val0x6c8, val0x6cc); + coex_dm->cur_val_0x6c0 = val0x6c0; + coex_dm->cur_val_0x6c4 = val0x6c4; + coex_dm->cur_val_0x6c8 = val0x6c8; + coex_dm->cur_val_0x6cc = val0x6cc; + + if (!force_exec) { + if ((coex_dm->pre_val_0x6c0 == coex_dm->cur_val_0x6c0) && + (coex_dm->pre_val_0x6c4 == coex_dm->cur_val_0x6c4) && + (coex_dm->pre_val_0x6c8 == coex_dm->cur_val_0x6c8) && + (coex_dm->pre_val_0x6cc == coex_dm->cur_val_0x6cc)) + return; + } + halbtc8821a1ant_set_coex_table(btcoexist, val0x6c0, val0x6c4, + val0x6c8, val0x6cc); + + coex_dm->pre_val_0x6c0 = coex_dm->cur_val_0x6c0; + coex_dm->pre_val_0x6c4 = coex_dm->cur_val_0x6c4; + coex_dm->pre_val_0x6c8 = coex_dm->cur_val_0x6c8; + coex_dm->pre_val_0x6cc = coex_dm->cur_val_0x6cc; +} + +static void halbtc8821a1ant_coex_table_with_type(struct btc_coexist *btcoexist, + bool force_exec, u8 type) +{ + switch (type) { + case 0: + halbtc8821a1ant_coex_table(btcoexist, force_exec, 0x55555555, + 0x55555555, 0xffffff, 0x3); + break; + case 1: + halbtc8821a1ant_coex_table(btcoexist, force_exec, + 0x55555555, 0x5a5a5a5a, + 0xffffff, 0x3); + break; + case 2: + halbtc8821a1ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a, + 0x5a5a5a5a, 0xffffff, 0x3); + break; + case 3: + halbtc8821a1ant_coex_table(btcoexist, force_exec, 0x55555555, + 0xaaaaaaaa, 0xffffff, 0x3); + break; + case 4: + halbtc8821a1ant_coex_table(btcoexist, force_exec, 0xffffffff, + 0xffffffff, 0xffffff, 0x3); + break; + case 5: + halbtc8821a1ant_coex_table(btcoexist, force_exec, 0x5fff5fff, + 0x5fff5fff, 0xffffff, 0x3); + break; + case 6: + halbtc8821a1ant_coex_table(btcoexist, force_exec, 0x55ff55ff, + 0x5a5a5a5a, 0xffffff, 0x3); + break; + case 7: + halbtc8821a1ant_coex_table(btcoexist, force_exec, 0x5afa5afa, + 0x5afa5afa, 0xffffff, 0x3); + break; + default: + break; + } +} + +static void btc8821a1ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist, + bool enable) +{ + u8 h2c_parameter[1] = {0}; + + if (enable) + h2c_parameter[0] |= BIT0; /* function enable*/ + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n", + h2c_parameter[0]); + + btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter); +} + +static void halbtc8821a1ant_ignore_wlan_act(struct btc_coexist *btcoexist, + bool force_exec, bool enable) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], %s turn Ignore WlanAct %s\n", + (force_exec ? "force to" : ""), (enable ? "ON" : "OFF")); + coex_dm->cur_ignore_wlan_act = enable; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n", + coex_dm->pre_ignore_wlan_act, + coex_dm->cur_ignore_wlan_act); + + if (coex_dm->pre_ignore_wlan_act == + coex_dm->cur_ignore_wlan_act) + return; + } + btc8821a1ant_set_fw_ignore_wlan_act(btcoexist, enable); + + coex_dm->pre_ignore_wlan_act = coex_dm->cur_ignore_wlan_act; +} + +static void halbtc8821a1ant_set_fw_pstdma(struct btc_coexist *btcoexist, + u8 byte1, u8 byte2, u8 byte3, + u8 byte4, u8 byte5) +{ + u8 h2c_parameter[5] = {0}; + + h2c_parameter[0] = byte1; + h2c_parameter[1] = byte2; + h2c_parameter[2] = byte3; + h2c_parameter[3] = byte4; + h2c_parameter[4] = byte5; + + coex_dm->ps_tdma_para[0] = byte1; + coex_dm->ps_tdma_para[1] = byte2; + coex_dm->ps_tdma_para[2] = byte3; + coex_dm->ps_tdma_para[3] = byte4; + coex_dm->ps_tdma_para[4] = byte5; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n", + h2c_parameter[0], + h2c_parameter[1]<<24 | + h2c_parameter[2]<<16 | + h2c_parameter[3]<<8 | + h2c_parameter[4]); + btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter); +} + +static void halbtc8821a1ant_set_lps_rpwm(struct btc_coexist *btcoexist, + u8 lps_val, u8 rpwm_val) +{ + u8 lps = lps_val; + u8 rpwm = rpwm_val; + + btcoexist->btc_set(btcoexist, BTC_SET_U1_LPS_VAL, &lps); + btcoexist->btc_set(btcoexist, BTC_SET_U1_RPWM_VAL, &rpwm); +} + +static void halbtc8821a1ant_lps_rpwm(struct btc_coexist *btcoexist, + bool force_exec, u8 lps_val, u8 rpwm_val) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n", + (force_exec ? "force to" : ""), lps_val, rpwm_val); + coex_dm->cur_lps = lps_val; + coex_dm->cur_rpwm = rpwm_val; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], LPS-RxBeaconMode = 0x%x, LPS-RPWM = 0x%x!!\n", + coex_dm->cur_lps, coex_dm->cur_rpwm); + + if ((coex_dm->pre_lps == coex_dm->cur_lps) && + (coex_dm->pre_rpwm == coex_dm->cur_rpwm)) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], LPS-RPWM_Last = 0x%x, LPS-RPWM_Now = 0x%x!!\n", + coex_dm->pre_rpwm, coex_dm->cur_rpwm); + + return; + } + } + halbtc8821a1ant_set_lps_rpwm(btcoexist, lps_val, rpwm_val); + + coex_dm->pre_lps = coex_dm->cur_lps; + coex_dm->pre_rpwm = coex_dm->cur_rpwm; +} + +static void halbtc8821a1ant_sw_mechanism(struct btc_coexist *btcoexist, + bool low_penalty_ra) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, + "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra); + + halbtc8821a1ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra); +} + +static void halbtc8821a1ant_set_ant_path(struct btc_coexist *btcoexist, + u8 ant_pos_type, bool init_hw_cfg, + bool wifi_off) +{ + struct btc_board_info *board_info = &btcoexist->board_info; + u32 u4_tmp = 0; + u8 h2c_parameter[2] = {0}; + + if (init_hw_cfg) { + /* 0x4c[23] = 0, 0x4c[24] = 1 Antenna control by WL/BT*/ + u4_tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c); + u4_tmp &= ~BIT23; + u4_tmp |= BIT24; + btcoexist->btc_write_4byte(btcoexist, 0x4c, u4_tmp); + + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x975, 0x3, 0x3); + btcoexist->btc_write_1byte(btcoexist, 0xcb4, 0x77); + + if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) { + /*tell firmware "antenna inverse" ==> + * WRONG firmware antenna control code.==>need fw to fix + */ + h2c_parameter[0] = 1; + h2c_parameter[1] = 1; + btcoexist->btc_fill_h2c(btcoexist, 0x65, 2, + h2c_parameter); + /*Main Ant to BT for IPS case 0x4c[23] = 1*/ + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x64, + 0x1, 0x1); + } else { + /*tell firmware "no antenna inverse" ==> + * WRONG firmware antenna control code.==>need fw to fix + */ + h2c_parameter[0] = 0; + h2c_parameter[1] = 1; + btcoexist->btc_fill_h2c(btcoexist, 0x65, 2, + h2c_parameter); + /*Aux Ant to BT for IPS case 0x4c[23] = 1*/ + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x64, + 0x1, 0x0); + } + } else if (wifi_off) { + /* 0x4c[24:23] = 00, Set Antenna control + * by BT_RFE_CTRL BT Vendor 0xac = 0xf002 + */ + u4_tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c); + u4_tmp &= ~BIT23; + u4_tmp &= ~BIT24; + btcoexist->btc_write_4byte(btcoexist, 0x4c, u4_tmp); + } + + /* ext switch setting*/ + switch (ant_pos_type) { + case BTC_ANT_PATH_WIFI: + if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) + btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcb7, + 0x30, 0x1); + else + btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcb7, + 0x30, 0x2); + break; + case BTC_ANT_PATH_BT: + if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) + btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcb7, + 0x30, 0x2); + else + btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcb7, + 0x30, 0x1); + break; + default: + case BTC_ANT_PATH_PTA: + if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) + btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcb7, + 0x30, 0x1); + else + btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcb7, + 0x30, 0x2); + break; + } +} + +static void halbtc8821a1ant_ps_tdma(struct btc_coexist *btcoexist, + bool force_exec, bool turn_on, u8 type) +{ + u8 rssi_adjust_val = 0; + + coex_dm->cur_ps_tdma_on = turn_on; + coex_dm->cur_ps_tdma = type; + + if (!force_exec) { + if (coex_dm->cur_ps_tdma_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], ********** TDMA(on, %d) **********\n", + coex_dm->cur_ps_tdma); + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], ********** TDMA(off, %d) **********\n", + coex_dm->cur_ps_tdma); + } + if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) && + (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma)) + return; + } + if (turn_on) { + switch (type) { + default: + halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x51, 0x1a, + 0x1a, 0x0, 0x50); + break; + case 1: + halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x51, 0x3a, + 0x03, 0x10, 0x50); + rssi_adjust_val = 11; + break; + case 2: + halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x51, 0x2b, + 0x03, 0x10, 0x50); + rssi_adjust_val = 14; + break; + case 3: + halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x51, 0x1d, + 0x1d, 0x0, 0x10); + break; + case 4: + halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x93, 0x15, + 0x3, 0x14, 0x0); + rssi_adjust_val = 17; + break; + case 5: + halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x61, 0x15, + 0x3, 0x11, 0x10); + break; + case 6: + halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x13, 0xa, + 0x3, 0x0, 0x0); + break; + case 7: + halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x13, 0xc, + 0x5, 0x0, 0x0); + break; + case 8: + halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x93, 0x25, + 0x3, 0x10, 0x0); + break; + case 9: + halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x51, 0x21, + 0x3, 0x10, 0x50); + rssi_adjust_val = 18; + break; + case 10: + halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x13, 0xa, + 0xa, 0x0, 0x40); + break; + case 11: + halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x51, 0x14, + 0x03, 0x10, 0x10); + rssi_adjust_val = 20; + break; + case 12: + halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x51, 0x0a, + 0x0a, 0x0, 0x50); + break; + case 13: + halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x51, 0x18, + 0x18, 0x0, 0x10); + break; + case 14: + halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x51, 0x21, + 0x3, 0x10, 0x10); + break; + case 15: + halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x13, 0xa, + 0x3, 0x8, 0x0); + break; + case 16: + halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x93, 0x15, + 0x3, 0x10, 0x0); + rssi_adjust_val = 18; + break; + case 18: + halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x93, 0x25, + 0x3, 0x10, 0x0); + rssi_adjust_val = 14; + break; + case 20: + halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x61, 0x35, + 0x03, 0x11, 0x10); + break; + case 21: + halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x61, 0x15, + 0x03, 0x11, 0x10); + break; + case 22: + halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x61, 0x25, + 0x03, 0x11, 0x10); + break; + case 23: + halbtc8821a1ant_set_fw_pstdma(btcoexist, 0xe3, 0x25, + 0x3, 0x31, 0x18); + rssi_adjust_val = 22; + break; + case 24: + halbtc8821a1ant_set_fw_pstdma(btcoexist, 0xe3, 0x15, + 0x3, 0x31, 0x18); + rssi_adjust_val = 22; + break; + case 25: + halbtc8821a1ant_set_fw_pstdma(btcoexist, 0xe3, 0xa, + 0x3, 0x31, 0x18); + rssi_adjust_val = 22; + break; + case 26: + halbtc8821a1ant_set_fw_pstdma(btcoexist, 0xe3, 0xa, + 0x3, 0x31, 0x18); + rssi_adjust_val = 22; + break; + case 27: + halbtc8821a1ant_set_fw_pstdma(btcoexist, 0xe3, 0x25, + 0x3, 0x31, 0x98); + rssi_adjust_val = 22; + break; + case 28: + halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x69, 0x25, + 0x3, 0x31, 0x0); + break; + case 29: + halbtc8821a1ant_set_fw_pstdma(btcoexist, 0xab, 0x1a, + 0x1a, 0x1, 0x10); + break; + case 30: + halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x51, 0x14, + 0x3, 0x10, 0x50); + break; + case 31: + halbtc8821a1ant_set_fw_pstdma(btcoexist, 0xd3, 0x1a, + 0x1a, 0, 0x58); + break; + case 32: + halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x61, 0xa, + 0x3, 0x10, 0x0); + break; + case 33: + halbtc8821a1ant_set_fw_pstdma(btcoexist, 0xa3, 0x25, + 0x3, 0x30, 0x90); + break; + case 34: + halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x53, 0x1a, + 0x1a, 0x0, 0x10); + break; + case 35: + halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x63, 0x1a, + 0x1a, 0x0, 0x10); + break; + case 36: + halbtc8821a1ant_set_fw_pstdma(btcoexist, 0xd3, 0x12, + 0x3, 0x14, 0x50); + break; + } + } else { + /* disable PS tdma*/ + switch (type) { + case 8: /*PTA Control*/ + halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x8, 0x0, 0x0, + 0x0, 0x0); + halbtc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_PTA, + false, false); + break; + case 0: + default: /*Software control, Antenna at BT side*/ + halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x0, 0x0, 0x0, + 0x0, 0x0); + halbtc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT, + false, false); + break; + case 9: /*Software control, Antenna at WiFi side*/ + halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x0, 0x0, 0x0, + 0x0, 0x0); + halbtc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_WIFI, + false, false); + break; + case 10: /* under 5G*/ + halbtc8821a1ant_set_fw_pstdma(btcoexist, 0x0, 0x0, 0x0, + 0x8, 0x0); + halbtc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT, + false, false); + break; + } + } + rssi_adjust_val = 0; + btcoexist->btc_set(btcoexist, + BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE, &rssi_adjust_val); + + /* update pre state*/ + coex_dm->pre_ps_tdma_on = coex_dm->cur_ps_tdma_on; + coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma; +} + +static bool halbtc8821a1ant_is_common_action(struct btc_coexist *btcoexist) +{ + bool common = false, wifi_connected = false, wifi_busy = false; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, + &wifi_connected); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy); + + if (!wifi_connected && + BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE == + coex_dm->bt_status) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n"); + halbtc8821a1ant_sw_mechanism(btcoexist, false); + + common = true; + } else if (wifi_connected && + (BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE == + coex_dm->bt_status)) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Wifi connected + BT non connected-idle!!\n"); + halbtc8821a1ant_sw_mechanism(btcoexist, false); + + common = true; + } else if (!wifi_connected && + (BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE == + coex_dm->bt_status)) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n"); + halbtc8821a1ant_sw_mechanism(btcoexist, false); + + common = true; + } else if (wifi_connected && + (BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE == + coex_dm->bt_status)) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Wifi connected + BT connected-idle!!\n"); + halbtc8821a1ant_sw_mechanism(btcoexist, false); + + common = true; + } else if (!wifi_connected && + (BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE != + coex_dm->bt_status)) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Wifi non connected-idle + BT Busy!!\n"); + halbtc8821a1ant_sw_mechanism(btcoexist, false); + + common = true; + } else { + if (wifi_busy) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Wifi Connected-Busy + BT Busy!!\n"); + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Wifi Connected-Idle + BT Busy!!\n"); + } + + common = false; + } + + return common; +} + +static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist, + u8 wifi_status) +{ + static long up, dn, m, n, wait_count; + /*0: no change, +1: increase WiFi duration, -1: decrease WiFi duration*/ + long result; + u8 retry_count = 0, bt_info_ext; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], TdmaDurationAdjustForAcl()\n"); + + if ((BT_8821A_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN == + wifi_status) || + (BT_8821A_1ANT_WIFI_STATUS_CONNECTED_SCAN == + wifi_status) || + (BT_8821A_1ANT_WIFI_STATUS_CONNECTED_SPECIAL_PKT == + wifi_status)) { + if (coex_dm->cur_ps_tdma != 1 && + coex_dm->cur_ps_tdma != 2 && + coex_dm->cur_ps_tdma != 3 && + coex_dm->cur_ps_tdma != 9) { + halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 9); + coex_dm->tdma_adj_type = 9; + + up = 0; + dn = 0; + m = 1; + n = 3; + result = 0; + wait_count = 0; + } + return; + } + + if (!coex_dm->auto_tdma_adjust) { + coex_dm->auto_tdma_adjust = true; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], first run TdmaDurationAdjust()!!\n"); + + halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2); + coex_dm->tdma_adj_type = 2; + /*============*/ + up = 0; + dn = 0; + m = 1; + n = 3; + result = 0; + wait_count = 0; + } else { + /*accquire the BT TRx retry count from BT_Info byte2*/ + retry_count = coex_sta->bt_retry_cnt; + bt_info_ext = coex_sta->bt_info_ext; + result = 0; + wait_count++; + + if (retry_count == 0) { + /* no retry in the last 2-second duration*/ + up++; + dn--; + + if (dn <= 0) + dn = 0; + + if (up >= n) { + /* if (retry count == 0) for 2*n seconds , + * make WiFi duration wider + */ + wait_count = 0; + n = 3; + up = 0; + dn = 0; + result = 1; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_TRACE_FW_DETAIL, + "[BTCoex], Increase wifi duration!!\n"); + } + } else if (retry_count <= 3) { + /* <=3 retry in the last 2-second duration*/ + up--; + dn++; + + if (up <= 0) + up = 0; + + if (dn == 2) { + /* if retry count< 3 for 2*2 seconds, + * shrink wifi duration + */ + if (wait_count <= 2) + m++; /* avoid bounce in two levels */ + else + m = 1; + + if (m >= 20) { + /* m max value is 20, max time is 120 s, + * recheck if adjust WiFi duration. + */ + m = 20; + } + n = 3*m; + up = 0; + dn = 0; + wait_count = 0; + result = -1; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_TRACE_FW_DETAIL, + "[BTCoex], Decrease wifi duration for retryCounter<3!!\n"); + } + } else { + /* retry count > 3, if retry count > 3 happens once, + * shrink WiFi duration + */ + if (wait_count == 1) + m++; /* avoid bounce in two levels */ + else + m = 1; + /* m max value is 20, max time is 120 second, + * recheck if adjust WiFi duration. + */ + if (m >= 20) + m = 20; + + n = 3*m; + up = 0; + dn = 0; + wait_count = 0; + result = -1; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], Decrease wifi duration for retryCounter>3!!\n"); + } + + if (result == -1) { + if ((BT_INFO_8821A_1ANT_A2DP_BASIC_RATE(bt_info_ext)) && + ((coex_dm->cur_ps_tdma == 1) || + (coex_dm->cur_ps_tdma == 2))) { + halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 9); + coex_dm->tdma_adj_type = 9; + } else if (coex_dm->cur_ps_tdma == 1) { + halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 2); + coex_dm->tdma_adj_type = 2; + } else if (coex_dm->cur_ps_tdma == 2) { + halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 9); + coex_dm->tdma_adj_type = 9; + } else if (coex_dm->cur_ps_tdma == 9) { + halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + } + } else if (result == 1) { + if ((BT_INFO_8821A_1ANT_A2DP_BASIC_RATE(bt_info_ext)) && + ((coex_dm->cur_ps_tdma == 1) || + (coex_dm->cur_ps_tdma == 2))) { + halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 9); + coex_dm->tdma_adj_type = 9; + } else if (coex_dm->cur_ps_tdma == 11) { + halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 9); + coex_dm->tdma_adj_type = 9; + } else if (coex_dm->cur_ps_tdma == 9) { + halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 2); + coex_dm->tdma_adj_type = 2; + } else if (coex_dm->cur_ps_tdma == 2) { + halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 1); + coex_dm->tdma_adj_type = 1; + } + } else { + /*no change*/ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], ********** TDMA(on, %d) **********\n", + coex_dm->cur_ps_tdma); + } + + if (coex_dm->cur_ps_tdma != 1 && + coex_dm->cur_ps_tdma != 2 && + coex_dm->cur_ps_tdma != 9 && + coex_dm->cur_ps_tdma != 11) { + /* recover to previous adjust type*/ + halbtc8821a1ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, + coex_dm->tdma_adj_type); + } + } +} + +static void btc8821a1ant_ps_tdma_check_for_pwr_save(struct btc_coexist *btcoex, + bool new_ps_state) +{ + u8 lps_mode = 0x0; + + btcoex->btc_get(btcoex, BTC_GET_U1_LPS_MODE, &lps_mode); + + if (lps_mode) { + /* already under LPS state*/ + if (new_ps_state) { + /* keep state under LPS, do nothing.*/ + } else { + /* will leave LPS state, turn off psTdma first*/ + halbtc8821a1ant_ps_tdma(btcoex, NORMAL_EXEC, false, 0); + } + } else { + /* NO PS state*/ + if (new_ps_state) { + /* will enter LPS state, turn off psTdma first*/ + halbtc8821a1ant_ps_tdma(btcoex, NORMAL_EXEC, false, 0); + } else { + /* keep state under NO PS state, do nothing.*/ + } + } +} + +static void halbtc8821a1ant_power_save_state(struct btc_coexist *btcoexist, + u8 ps_type, u8 lps_val, + u8 rpwm_val) +{ + bool low_pwr_disable = false; + + switch (ps_type) { + case BTC_PS_WIFI_NATIVE: + /* recover to original 32k low power setting*/ + low_pwr_disable = false; + btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, + &low_pwr_disable); + btcoexist->btc_set(btcoexist, BTC_SET_ACT_NORMAL_LPS, NULL); + break; + case BTC_PS_LPS_ON: + btc8821a1ant_ps_tdma_check_for_pwr_save(btcoexist, + true); + halbtc8821a1ant_lps_rpwm(btcoexist, + NORMAL_EXEC, lps_val, rpwm_val); + /* when coex force to enter LPS, do not enter 32k low power.*/ + low_pwr_disable = true; + btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, + &low_pwr_disable); + /* power save must executed before psTdma.*/ + btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL); + break; + case BTC_PS_LPS_OFF: + btc8821a1ant_ps_tdma_check_for_pwr_save(btcoexist, false); + btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL); + break; + default: + break; + } +} + +static void halbtc8821a1ant_coex_under_5g(struct btc_coexist *btcoexist) +{ + halbtc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + halbtc8821a1ant_ignore_wlan_act(btcoexist, NORMAL_EXEC, true); + + halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 10); + + halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0); + + halbtc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0); + + halbtc8821a1ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 5); +} + +static void halbtc8821a1ant_action_wifi_only(struct btc_coexist *btcoexist) +{ + halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0); + halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 9); +} + +static void btc8821a1ant_mon_bt_en_dis(struct btc_coexist *btcoexist) +{ + static bool pre_bt_disabled; + static u32 bt_disable_cnt; + bool bt_active = true, bt_disabled = false; + + /* This function check if bt is disabled*/ + + if (coex_sta->high_priority_tx == 0 && + coex_sta->high_priority_rx == 0 && + coex_sta->low_priority_tx == 0 && + coex_sta->low_priority_rx == 0) { + bt_active = false; + } + if (coex_sta->high_priority_tx == 0xffff && + coex_sta->high_priority_rx == 0xffff && + coex_sta->low_priority_tx == 0xffff && + coex_sta->low_priority_rx == 0xffff) { + bt_active = false; + } + if (bt_active) { + bt_disable_cnt = 0; + bt_disabled = false; + btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE, + &bt_disabled); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, + "[BTCoex], BT is enabled !!\n"); + } else { + bt_disable_cnt++; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, + "[BTCoex], bt all counters = 0, %d times!!\n", + bt_disable_cnt); + if (bt_disable_cnt >= 2) { + bt_disabled = true; + btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE, + &bt_disabled); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, + "[BTCoex], BT is disabled !!\n"); + halbtc8821a1ant_action_wifi_only(btcoexist); + } + } + if (pre_bt_disabled != bt_disabled) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, + "[BTCoex], BT is from %s to %s!!\n", + (pre_bt_disabled ? "disabled" : "enabled"), + (bt_disabled ? "disabled" : "enabled")); + pre_bt_disabled = bt_disabled; + if (bt_disabled) { + btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, + NULL); + btcoexist->btc_set(btcoexist, BTC_SET_ACT_NORMAL_LPS, + NULL); + } + } +} + +/*=============================================*/ +/**/ +/* Software Coex Mechanism start*/ +/**/ +/*=============================================*/ + +/* SCO only or SCO+PAN(HS)*/ +static void halbtc8821a1ant_action_sco(struct btc_coexist *btcoexist) +{ + halbtc8821a1ant_sw_mechanism(btcoexist, true); +} + +static void halbtc8821a1ant_action_hid(struct btc_coexist *btcoexist) +{ + halbtc8821a1ant_sw_mechanism(btcoexist, true); +} + +/*A2DP only / PAN(EDR) only/ A2DP+PAN(HS)*/ +static void halbtc8821a1ant_action_a2dp(struct btc_coexist *btcoexist) +{ + halbtc8821a1ant_sw_mechanism(btcoexist, false); +} + +static void halbtc8821a1ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist) +{ + halbtc8821a1ant_sw_mechanism(btcoexist, false); +} + +static void halbtc8821a1ant_action_pan_edr(struct btc_coexist *btcoexist) +{ + halbtc8821a1ant_sw_mechanism(btcoexist, false); +} + +/*PAN(HS) only*/ +static void halbtc8821a1ant_action_pan_hs(struct btc_coexist *btcoexist) +{ + halbtc8821a1ant_sw_mechanism(btcoexist, false); +} + +/*PAN(EDR)+A2DP*/ +static void halbtc8821a1ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist) +{ + halbtc8821a1ant_sw_mechanism(btcoexist, false); +} + +static void halbtc8821a1ant_action_pan_edr_hid(struct btc_coexist *btcoexist) +{ + halbtc8821a1ant_sw_mechanism(btcoexist, true); +} + +/* HID+A2DP+PAN(EDR)*/ +static void btc8821a1ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist) +{ + halbtc8821a1ant_sw_mechanism(btcoexist, true); +} + +static void halbtc8821a1ant_action_hid_a2dp(struct btc_coexist *btcoexist) +{ + halbtc8821a1ant_sw_mechanism(btcoexist, true); +} + +/*=============================================*/ +/**/ +/* Non-Software Coex Mechanism start*/ +/**/ +/*=============================================*/ + +static void halbtc8821a1ant_action_hs(struct btc_coexist *btcoexist) +{ + halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5); + halbtc8821a1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 2); +} + +static void halbtc8821a1ant_action_bt_inquiry(struct btc_coexist *btcoexist) +{ + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + bool wifi_connected = false; + + btcoexist->btc_get(btcoexist, + BTC_GET_BL_WIFI_CONNECTED, &wifi_connected); + + if (!wifi_connected) { + halbtc8821a1ant_power_save_state(btcoexist, + BTC_PS_WIFI_NATIVE, 0x0, 0x0); + halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5); + halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + } else if ((bt_link_info->sco_exist) || + (bt_link_info->hid_only)) { + /* SCO/HID-only busy*/ + halbtc8821a1ant_power_save_state(btcoexist, + BTC_PS_WIFI_NATIVE, 0x0, 0x0); + halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 32); + halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + } else { + halbtc8821a1ant_power_save_state(btcoexist, BTC_PS_LPS_ON, + 0x50, 0x4); + halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 30); + halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + } +} + +static void btc8821a1ant_act_bt_sco_hid_only_busy(struct btc_coexist *btcoexist, + u8 wifi_status) { + /* tdma and coex table*/ + halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5); + + if (BT_8821A_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN == + wifi_status) + halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + else + halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); +} + +static void btc8821a1ant_act_wifi_con_bt_acl_busy(struct btc_coexist *btcoexist, + u8 wifi_status) +{ + u8 bt_rssi_state; + + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + + bt_rssi_state = halbtc8821a1ant_bt_rssi_state(2, 28, 0); + + if (bt_link_info->hid_only) { + /*HID*/ + btc8821a1ant_act_bt_sco_hid_only_busy(btcoexist, + wifi_status); + coex_dm->auto_tdma_adjust = false; + return; + } else if (bt_link_info->a2dp_only) { + /*A2DP*/ + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8821a1ant_tdma_dur_adj(btcoexist, wifi_status); + } else { + /*for low BT RSSI*/ + halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->auto_tdma_adjust = false; + } + + halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + } else if (bt_link_info->hid_exist && bt_link_info->a2dp_exist) { + /*HID+A2DP*/ + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 14); + coex_dm->auto_tdma_adjust = false; + } else { + /*for low BT RSSI*/ + halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->auto_tdma_adjust = false; + } + + halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + } else if ((bt_link_info->pan_only) || + (bt_link_info->hid_exist && bt_link_info->pan_exist)) { + /*PAN(OPP, FTP), HID+PAN(OPP, FTP)*/ + halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3); + halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + coex_dm->auto_tdma_adjust = false; + } else if (((bt_link_info->a2dp_exist) && (bt_link_info->pan_exist)) || + (bt_link_info->hid_exist && bt_link_info->a2dp_exist && + bt_link_info->pan_exist)) { + /*A2DP+PAN(OPP, FTP), HID+A2DP+PAN(OPP, FTP)*/ + halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13); + halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + coex_dm->auto_tdma_adjust = false; + } else { + halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11); + halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + coex_dm->auto_tdma_adjust = false; + } +} + +static void halbtc8821a1ant_action_wifi_not_connected( + struct btc_coexist *btcoexist) +{ + /* power save state*/ + halbtc8821a1ant_power_save_state(btcoexist, + BTC_PS_WIFI_NATIVE, 0x0, 0x0); + + /* tdma and coex table*/ + halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8); + halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0); +} + +static void btc8821a1ant_act_wifi_not_conn_scan(struct btc_coexist *btcoexist) +{ + halbtc8821a1ant_power_save_state(btcoexist, + BTC_PS_WIFI_NATIVE, 0x0, 0x0); + + halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22); + halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); +} + +static void halbtc8821a1ant_action_wifi_connected_scan( + struct btc_coexist *btcoexist) { + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + + /* power save state*/ + halbtc8821a1ant_power_save_state(btcoexist, + BTC_PS_WIFI_NATIVE, 0x0, 0x0); + + /* tdma and coex table*/ + if (BT_8821A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) { + if (bt_link_info->a2dp_exist && bt_link_info->pan_exist) { + halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 22); + halbtc8821a1ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 1); + } else { + halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20); + halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + } + } else if ((BT_8821A_1ANT_BT_STATUS_SCO_BUSY == + coex_dm->bt_status) || + (BT_8821A_1ANT_BT_STATUS_ACL_SCO_BUSY == + coex_dm->bt_status)) { + btc8821a1ant_act_bt_sco_hid_only_busy(btcoexist, + BT_8821A_1ANT_WIFI_STATUS_CONNECTED_SCAN); + } else { + halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20); + halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + } +} + +static void btc8821a1ant_act_wifi_conn_sp_pkt(struct btc_coexist *btcoexist) +{ + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + bool hs_connecting = false; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_CONNECTING, &hs_connecting); + + halbtc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + + /* tdma and coex table*/ + if (BT_8821A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) { + if (bt_link_info->a2dp_exist && bt_link_info->pan_exist) { + halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 22); + halbtc8821a1ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 1); + } else { + halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 20); + halbtc8821a1ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 1); + } + } else { + halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20); + halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + } +} + +static void halbtc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist) +{ + bool wifi_busy = false; + bool scan = false, link = false, roam = false; + bool under_4way = false; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], CoexForWifiConnect()===>\n"); + + btcoexist->btc_get(btcoexist, + BTC_GET_BL_WIFI_4_WAY_PROGRESS, &under_4way); + if (under_4way) { + btc8821a1ant_act_wifi_conn_sp_pkt(btcoexist); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n"); + return; + } + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); + if (scan || link || roam) { + halbtc8821a1ant_action_wifi_connected_scan(btcoexist); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n"); + return; + } + + /* power save state*/ + if (BT_8821A_1ANT_BT_STATUS_ACL_BUSY == + coex_dm->bt_status && !btcoexist->bt_link_info.hid_only) + halbtc8821a1ant_power_save_state(btcoexist, + BTC_PS_LPS_ON, 0x50, 0x4); + else + halbtc8821a1ant_power_save_state(btcoexist, + BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + + /* tdma and coex table*/ + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy); + if (!wifi_busy) { + if (BT_8821A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) { + btc8821a1ant_act_wifi_con_bt_acl_busy(btcoexist, + BT_8821A_1ANT_WIFI_STATUS_CONNECTED_IDLE); + } else if ((BT_8821A_1ANT_BT_STATUS_SCO_BUSY == + coex_dm->bt_status) || + (BT_8821A_1ANT_BT_STATUS_ACL_SCO_BUSY == + coex_dm->bt_status)) { + btc8821a1ant_act_bt_sco_hid_only_busy(btcoexist, + BT_8821A_1ANT_WIFI_STATUS_CONNECTED_IDLE); + } else { + halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 5); + halbtc8821a1ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 2); + } + } else { + if (BT_8821A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) { + btc8821a1ant_act_wifi_con_bt_acl_busy(btcoexist, + BT_8821A_1ANT_WIFI_STATUS_CONNECTED_BUSY); + } else if ((BT_8821A_1ANT_BT_STATUS_SCO_BUSY == + coex_dm->bt_status) || + (BT_8821A_1ANT_BT_STATUS_ACL_SCO_BUSY == + coex_dm->bt_status)) { + btc8821a1ant_act_bt_sco_hid_only_busy(btcoexist, + BT_8821A_1ANT_WIFI_STATUS_CONNECTED_BUSY); + } else { + halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 5); + halbtc8821a1ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 2); + } + } +} + +static void btc8821a1ant_run_sw_coex_mech(struct btc_coexist *btcoexist) +{ + u8 algorithm = 0; + + algorithm = halbtc8821a1ant_action_algorithm(btcoexist); + coex_dm->cur_algorithm = algorithm; + + if (!halbtc8821a1ant_is_common_action(btcoexist)) { + switch (coex_dm->cur_algorithm) { + case BT_8821A_1ANT_COEX_ALGO_SCO: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action algorithm = SCO.\n"); + halbtc8821a1ant_action_sco(btcoexist); + break; + case BT_8821A_1ANT_COEX_ALGO_HID: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action algorithm = HID.\n"); + halbtc8821a1ant_action_hid(btcoexist); + break; + case BT_8821A_1ANT_COEX_ALGO_A2DP: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action algorithm = A2DP.\n"); + halbtc8821a1ant_action_a2dp(btcoexist); + break; + case BT_8821A_1ANT_COEX_ALGO_A2DP_PANHS: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action algorithm = A2DP+PAN(HS).\n"); + halbtc8821a1ant_action_a2dp_pan_hs(btcoexist); + break; + case BT_8821A_1ANT_COEX_ALGO_PANEDR: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action algorithm = PAN(EDR).\n"); + halbtc8821a1ant_action_pan_edr(btcoexist); + break; + case BT_8821A_1ANT_COEX_ALGO_PANHS: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action algorithm = HS mode.\n"); + halbtc8821a1ant_action_pan_hs(btcoexist); + break; + case BT_8821A_1ANT_COEX_ALGO_PANEDR_A2DP: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action algorithm = PAN+A2DP.\n"); + halbtc8821a1ant_action_pan_edr_a2dp(btcoexist); + break; + case BT_8821A_1ANT_COEX_ALGO_PANEDR_HID: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action algorithm = PAN(EDR)+HID.\n"); + halbtc8821a1ant_action_pan_edr_hid(btcoexist); + break; + case BT_8821A_1ANT_COEX_ALGO_HID_A2DP_PANEDR: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action algorithm = HID+A2DP+PAN.\n"); + btc8821a1ant_action_hid_a2dp_pan_edr(btcoexist); + break; + case BT_8821A_1ANT_COEX_ALGO_HID_A2DP: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action algorithm = HID+A2DP.\n"); + halbtc8821a1ant_action_hid_a2dp(btcoexist); + break; + default: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action algorithm = coexist All Off!!\n"); + /*halbtc8821a1ant_coex_all_off(btcoexist);*/ + break; + } + coex_dm->pre_algorithm = coex_dm->cur_algorithm; + } +} + +static void halbtc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist) +{ + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + bool wifi_connected = false, bt_hs_on = false; + bool increase_scan_dev_num = false; + bool bt_ctrl_agg_buf_size = false; + u8 agg_buf_size = 5; + u8 wifi_rssi_state = BTC_RSSI_STATE_HIGH; + bool wifi_under_5g = false; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], RunCoexistMechanism()===>\n"); + + if (btcoexist->manual_control) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n"); + return; + } + + if (btcoexist->stop_coex_dm) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n"); + return; + } + + if (coex_sta->under_ips) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], wifi is under IPS !!!\n"); + return; + } + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g); + if (wifi_under_5g) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], RunCoexistMechanism(), return for 5G <===\n"); + halbtc8821a1ant_coex_under_5g(btcoexist); + return; + } + + if ((BT_8821A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) || + (BT_8821A_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) || + (BT_8821A_1ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status)) + increase_scan_dev_num = true; + + btcoexist->btc_set(btcoexist, BTC_SET_BL_INC_SCAN_DEV_NUM, + &increase_scan_dev_num); + + btcoexist->btc_get(btcoexist, + BTC_GET_BL_WIFI_CONNECTED, &wifi_connected); + + if (!bt_link_info->sco_exist && !bt_link_info->hid_exist) { + halbtc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0); + } else { + if (wifi_connected) { + wifi_rssi_state = + halbtc8821a1ant_WifiRssiState(btcoexist, 1, 2, + 30, 0); + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8821a1ant_limited_tx(btcoexist, + NORMAL_EXEC, 1, 1, + 1, 1); + } else { + halbtc8821a1ant_limited_tx(btcoexist, + NORMAL_EXEC, 1, 1, + 1, 1); + } + } else { + halbtc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC, + 0, 0, 0, 0); + } + } + + if (bt_link_info->sco_exist) { + bt_ctrl_agg_buf_size = true; + agg_buf_size = 0x3; + } else if (bt_link_info->hid_exist) { + bt_ctrl_agg_buf_size = true; + agg_buf_size = 0x5; + } else if (bt_link_info->a2dp_exist || bt_link_info->pan_exist) { + bt_ctrl_agg_buf_size = true; + agg_buf_size = 0x8; + } + halbtc8821a1ant_limited_rx(btcoexist, NORMAL_EXEC, false, + bt_ctrl_agg_buf_size, agg_buf_size); + + btc8821a1ant_run_sw_coex_mech(btcoexist); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); + if (coex_sta->c2h_bt_inquiry_page) { + halbtc8821a1ant_action_bt_inquiry(btcoexist); + return; + } else if (bt_hs_on) { + halbtc8821a1ant_action_hs(btcoexist); + return; + } + + if (!wifi_connected) { + bool scan = false, link = false, roam = false; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], wifi is non connected-idle !!!\n"); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); + + if (scan || link || roam) + btc8821a1ant_act_wifi_not_conn_scan(btcoexist); + else + halbtc8821a1ant_action_wifi_not_connected(btcoexist); + } else { + /* wifi LPS/Busy*/ + halbtc8821a1ant_action_wifi_connected(btcoexist); + } +} + +static void halbtc8821a1ant_init_coex_dm(struct btc_coexist *btcoexist) +{ + /* force to reset coex mechanism*/ + /* sw all off*/ + halbtc8821a1ant_sw_mechanism(btcoexist, false); + + halbtc8821a1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 8); + halbtc8821a1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0); +} + +static void halbtc8821a1ant_init_hw_config(struct btc_coexist *btcoexist, + bool back_up) +{ + u8 u1_tmp = 0; + bool wifi_under_5g = false; + + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], 1Ant Init HW Config!!\n"); + + if (back_up) { + coex_dm->backup_arfr_cnt1 = btcoexist->btc_read_4byte(btcoexist, + 0x430); + coex_dm->backup_arfr_cnt2 = btcoexist->btc_read_4byte(btcoexist, + 0x434); + coex_dm->backup_retry_limit = + btcoexist->btc_read_2byte(btcoexist, 0x42a); + coex_dm->backup_ampdu_max_time = + btcoexist->btc_read_1byte(btcoexist, 0x456); + } + + /* 0x790[5:0] = 0x5*/ + u1_tmp = btcoexist->btc_read_1byte(btcoexist, 0x790); + u1_tmp &= 0xc0; + u1_tmp |= 0x5; + btcoexist->btc_write_1byte(btcoexist, 0x790, u1_tmp); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g); + + /*Antenna config*/ + if (wifi_under_5g) + halbtc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT, + true, false); + else + halbtc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_PTA, + true, false); + /* PTA parameter*/ + halbtc8821a1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0); + + /* Enable counter statistics*/ + /*0x76e[3] =1, WLAN_Act control by PTA*/ + btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc); + btcoexist->btc_write_1byte(btcoexist, 0x778, 0x3); + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x40, 0x20, 0x1); +} + +/*============================================================*/ +/* work around function start with wa_halbtc8821a1ant_*/ +/*============================================================*/ +/*============================================================*/ +/* extern function start with EXhalbtc8821a1ant_*/ +/*============================================================*/ +void ex_halbtc8821a1ant_init_hwconfig(struct btc_coexist *btcoexist) +{ + halbtc8821a1ant_init_hw_config(btcoexist, true); +} + +void ex_halbtc8821a1ant_init_coex_dm(struct btc_coexist *btcoexist) +{ + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], Coex Mechanism Init!!\n"); + + btcoexist->stop_coex_dm = false; + + halbtc8821a1ant_init_coex_dm(btcoexist); + + halbtc8821a1ant_query_bt_info(btcoexist); +} + +void ex_halbtc8821a1ant_display_coex_info(struct btc_coexist *btcoexist) +{ + struct btc_board_info *board_info = &btcoexist->board_info; + struct btc_stack_info *stack_info = &btcoexist->stack_info; + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + struct rtl_priv *rtlpriv = btcoexist->adapter; + u8 u1_tmp[4], i, bt_info_ext, ps_tdma_case = 0; + u16 u2_tmp[4]; + u32 u4_tmp[4]; + bool roam = false, scan = false, link = false, wifi_under_5g = false; + bool bt_hs_on = false, wifi_busy = false; + long wifi_rssi = 0, bt_hs_rssi = 0; + u32 wifi_bw, wifi_traffic_dir; + u8 wifi_dot11_chnl, wifi_hs_chnl; + u32 fw_ver = 0, bt_patch_ver = 0; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n ============[BT Coexist info]============"); + + if (btcoexist->manual_control) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n ============[Under Manual Control]============"); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n =========================================="); + } + if (btcoexist->stop_coex_dm) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n ============[Coex is STOPPED]============"); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n =========================================="); + } + + if (!board_info->bt_exist) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n BT not exists !!!"); + return; + } + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = %d/ %d/ %d", + "Ant PG Num/ Ant Mech/ Ant Pos:", + board_info->pg_ant_num, + board_info->btdm_ant_num, + board_info->btdm_ant_pos); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = %s / %d", "BT stack/ hci ext ver", + ((stack_info->profile_notified) ? "Yes" : "No"), + stack_info->hci_version); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, + &bt_patch_ver); + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)", + "CoexVer/ FwVer/ PatchVer", + glcoex_ver_date_8821a_1ant, + glcoex_ver_8821a_1ant, + fw_ver, bt_patch_ver, + bt_patch_ver); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, + &bt_hs_on); + btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL, + &wifi_dot11_chnl); + btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, + &wifi_hs_chnl); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = %d / %d(%d)", + "Dot11 channel / HsChnl(HsMode)", + wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = %02x %02x %02x ", + "H2C Wifi inform bt chnl Info", + coex_dm->wifi_chnl_info[0], coex_dm->wifi_chnl_info[1], + coex_dm->wifi_chnl_info[2]); + + btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi); + btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = %d/ %d", "Wifi rssi/ HS rssi", + (int)wifi_rssi, (int)bt_hs_rssi); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = %d/ %d/ %d ", "Wifi link/ roam/ scan", + link, roam, scan); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, + &wifi_under_5g); + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, + &wifi_bw); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, + &wifi_busy); + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, + &wifi_traffic_dir); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = %s / %s/ %s ", "Wifi status", + (wifi_under_5g ? "5G" : "2.4G"), + ((BTC_WIFI_BW_LEGACY == wifi_bw) ? "Legacy" : + (((BTC_WIFI_BW_HT40 == wifi_bw) ? "HT40" : "HT20"))), + ((!wifi_busy) ? "idle" : + ((BTC_WIFI_TRAFFIC_TX == wifi_traffic_dir) ? + "uplink" : "downlink"))); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = [%s/ %d/ %d] ", "BT [status/ rssi/ retryCnt]", + ((btcoexist->bt_info.bt_disabled) ? ("disabled") : + ((coex_sta->c2h_bt_inquiry_page) ? ("inquiry/page scan") : + ((BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE == + coex_dm->bt_status) ? + "non-connected idle" : + ((BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE == + coex_dm->bt_status) ? + "connected-idle" : "busy")))), + coex_sta->bt_rssi, coex_sta->bt_retry_cnt); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP", + bt_link_info->sco_exist, + bt_link_info->hid_exist, + bt_link_info->pan_exist, + bt_link_info->a2dp_exist); + btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO); + + bt_info_ext = coex_sta->bt_info_ext; + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = %s", + "BT Info A2DP rate", + (bt_info_ext&BIT0) ? + "Basic rate" : "EDR rate"); + + for (i = 0; i < BT_INFO_SRC_8821A_1ANT_MAX; i++) { + if (coex_sta->bt_info_c2h_cnt[i]) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = %02x %02x %02x %02x %02x %02x %02x(%d)", + glbt_info_src_8821a_1ant[i], + coex_sta->bt_info_c2h[i][0], + coex_sta->bt_info_c2h[i][1], + coex_sta->bt_info_c2h[i][2], + coex_sta->bt_info_c2h[i][3], + coex_sta->bt_info_c2h[i][4], + coex_sta->bt_info_c2h[i][5], + coex_sta->bt_info_c2h[i][6], + coex_sta->bt_info_c2h_cnt[i]); + } + } + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = %s/%s, (0x%x/0x%x)", + "PS state, IPS/LPS, (lps/rpwm)", + ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")), + ((coex_sta->under_Lps ? "LPS ON" : "LPS OFF")), + btcoexist->bt_info.lps_val, + btcoexist->bt_info.rpwm_val); + btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD); + + if (!btcoexist->manual_control) { + /* Sw mechanism*/ + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s", "============[Sw mechanism]============"); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = %d", "SM[LowPenaltyRA]", + coex_dm->cur_low_penalty_ra); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = %s/ %s/ %d ", + "DelBA/ BtCtrlAgg/ AggSize", + (btcoexist->bt_info.reject_agg_pkt ? "Yes" : "No"), + (btcoexist->bt_info.bt_ctrl_buf_size ? "Yes" : "No"), + btcoexist->bt_info.agg_buf_size); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = 0x%x ", "Rate Mask", + btcoexist->bt_info.ra_mask); + + /* Fw mechanism*/ + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s", + "============[Fw mechanism]============"); + + ps_tdma_case = coex_dm->cur_ps_tdma; + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = %02x %02x %02x %02x %02x case-%d (auto:%d)", + "PS TDMA", + coex_dm->ps_tdma_para[0], + coex_dm->ps_tdma_para[1], + coex_dm->ps_tdma_para[2], + coex_dm->ps_tdma_para[3], + coex_dm->ps_tdma_para[4], + ps_tdma_case, + coex_dm->auto_tdma_adjust); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = 0x%x ", + "Latest error condition(should be 0)", + coex_dm->error_condition); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = %d ", "IgnWlanAct", + coex_dm->cur_ignore_wlan_act); + } + + /* Hw setting*/ + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s", "============[Hw setting]============"); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x", + "backup ARFR1/ARFR2/RL/AMaxTime", + coex_dm->backup_arfr_cnt1, + coex_dm->backup_arfr_cnt2, + coex_dm->backup_retry_limit, + coex_dm->backup_ampdu_max_time); + + u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x430); + u4_tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x434); + u2_tmp[0] = btcoexist->btc_read_2byte(btcoexist, 0x42a); + u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x456); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x", + "0x430/0x434/0x42a/0x456", + u4_tmp[0], u4_tmp[1], u2_tmp[0], u1_tmp[0]); + + u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778); + u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc58); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = 0x%x/ 0x%x", "0x778/ 0xc58[29:25]", + u1_tmp[0], (u4_tmp[0]&0x3e000000) >> 25); + + u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x8db); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = 0x%x", "0x8db[6:5]", + ((u1_tmp[0]&0x60)>>5)); + + u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x975); + u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xcb4); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", + "0xcb4[29:28]/0xcb4[7:0]/0x974[9:8]", + (u4_tmp[0] & 0x30000000)>>28, + u4_tmp[0] & 0xff, + u1_tmp[0] & 0x3); + + u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x40); + u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c); + u1_tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x64); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", + "0x40/0x4c[24:23]/0x64[0]", + u1_tmp[0], ((u4_tmp[0]&0x01800000)>>23), u1_tmp[1]&0x1); + + u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550); + u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = 0x%x/ 0x%x", "0x550(bcn ctrl)/0x522", + u4_tmp[0], u1_tmp[0]); + + u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = 0x%x", "0xc50(dig)", + u4_tmp[0]&0xff); + + u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xf48); + u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0xa5d); + u1_tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0xa5c); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = 0x%x/ 0x%x", "OFDM-FA/ CCK-FA", + u4_tmp[0], (u1_tmp[0]<<8) + u1_tmp[1]); + + u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0); + u4_tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4); + u4_tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8); + u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", + "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)", + u4_tmp[0], u4_tmp[1], u4_tmp[2], u1_tmp[0]); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = %d/ %d", "0x770(high-pri rx/tx)", + coex_sta->high_priority_rx, coex_sta->high_priority_tx); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = %d/ %d", "0x774(low-pri rx/tx)", + coex_sta->low_priority_rx, coex_sta->low_priority_tx); +#if (BT_AUTO_REPORT_ONLY_8821A_1ANT == 1) + halbtc8821a1ant_monitor_bt_ctr(btcoexist); +#endif + btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS); +} + +void ex_halbtc8821a1ant_ips_notify(struct btc_coexist *btcoexist, u8 type) +{ + if (btcoexist->manual_control || btcoexist->stop_coex_dm) + return; + + if (BTC_IPS_ENTER == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], IPS ENTER notify\n"); + coex_sta->under_ips = true; + halbtc8821a1ant_set_ant_path(btcoexist, + BTC_ANT_PATH_BT, false, true); + /*set PTA control*/ + halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8); + halbtc8821a1ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 0); + } else if (BTC_IPS_LEAVE == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], IPS LEAVE notify\n"); + coex_sta->under_ips = false; + + halbtc8821a1ant_run_coexist_mechanism(btcoexist); + } +} + +void ex_halbtc8821a1ant_lps_notify(struct btc_coexist *btcoexist, u8 type) +{ + if (btcoexist->manual_control || btcoexist->stop_coex_dm) + return; + + if (BTC_LPS_ENABLE == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], LPS ENABLE notify\n"); + coex_sta->under_Lps = true; + } else if (BTC_LPS_DISABLE == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], LPS DISABLE notify\n"); + coex_sta->under_Lps = false; + } +} + +void ex_halbtc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type) +{ + bool wifi_connected = false, bt_hs_on = false; + + if (btcoexist->manual_control || + btcoexist->stop_coex_dm || + btcoexist->bt_info.bt_disabled) + return; + + btcoexist->btc_get(btcoexist, + BTC_GET_BL_HS_OPERATION, &bt_hs_on); + btcoexist->btc_get(btcoexist, + BTC_GET_BL_WIFI_CONNECTED, &wifi_connected); + + halbtc8821a1ant_query_bt_info(btcoexist); + + if (coex_sta->c2h_bt_inquiry_page) { + halbtc8821a1ant_action_bt_inquiry(btcoexist); + return; + } else if (bt_hs_on) { + halbtc8821a1ant_action_hs(btcoexist); + return; + } + + if (BTC_SCAN_START == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], SCAN START notify\n"); + if (!wifi_connected) { + /* non-connected scan*/ + btc8821a1ant_act_wifi_not_conn_scan(btcoexist); + } else { + /* wifi is connected*/ + halbtc8821a1ant_action_wifi_connected_scan(btcoexist); + } + } else if (BTC_SCAN_FINISH == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], SCAN FINISH notify\n"); + if (!wifi_connected) { + /* non-connected scan*/ + halbtc8821a1ant_action_wifi_not_connected(btcoexist); + } else { + halbtc8821a1ant_action_wifi_connected(btcoexist); + } + } +} + +void ex_halbtc8821a1ant_connect_notify(struct btc_coexist *btcoexist, u8 type) +{ + bool wifi_connected = false, bt_hs_on = false; + + if (btcoexist->manual_control || + btcoexist->stop_coex_dm || + btcoexist->bt_info.bt_disabled) + return; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); + if (coex_sta->c2h_bt_inquiry_page) { + halbtc8821a1ant_action_bt_inquiry(btcoexist); + return; + } else if (bt_hs_on) { + halbtc8821a1ant_action_hs(btcoexist); + return; + } + + if (BTC_ASSOCIATE_START == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], CONNECT START notify\n"); + btc8821a1ant_act_wifi_not_conn_scan(btcoexist); + } else if (BTC_ASSOCIATE_FINISH == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], CONNECT FINISH notify\n"); + + btcoexist->btc_get(btcoexist, + BTC_GET_BL_WIFI_CONNECTED, &wifi_connected); + if (!wifi_connected) { + /* non-connected scan*/ + halbtc8821a1ant_action_wifi_not_connected(btcoexist); + } else { + halbtc8821a1ant_action_wifi_connected(btcoexist); + } + } +} + +void ex_halbtc8821a1ant_media_status_notify(struct btc_coexist *btcoexist, + u8 type) +{ + u8 h2c_parameter[3] = {0}; + u32 wifi_bw; + u8 wifi_central_chnl; + + if (btcoexist->manual_control || + btcoexist->stop_coex_dm || + btcoexist->bt_info.bt_disabled) + return; + + if (BTC_MEDIA_CONNECT == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], MEDIA connect notify\n"); + } else { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], MEDIA disconnect notify\n"); + } + + /* only 2.4G we need to inform bt the chnl mask*/ + btcoexist->btc_get(btcoexist, + BTC_GET_U1_WIFI_CENTRAL_CHNL, + &wifi_central_chnl); + if ((BTC_MEDIA_CONNECT == type) && + (wifi_central_chnl <= 14)) { + /*h2c_parameter[0] = 0x1;*/ + h2c_parameter[0] = 0x0; + h2c_parameter[1] = wifi_central_chnl; + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + if (BTC_WIFI_BW_HT40 == wifi_bw) + h2c_parameter[2] = 0x30; + else + h2c_parameter[2] = 0x20; + } + + coex_dm->wifi_chnl_info[0] = h2c_parameter[0]; + coex_dm->wifi_chnl_info[1] = h2c_parameter[1]; + coex_dm->wifi_chnl_info[2] = h2c_parameter[2]; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], FW write 0x66 = 0x%x\n", + h2c_parameter[0]<<16|h2c_parameter[1]<<8|h2c_parameter[2]); + + btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter); +} + +void ex_halbtc8821a1ant_special_packet_notify(struct btc_coexist *btcoexist, + u8 type) +{ + bool bt_hs_on = false; + + if (btcoexist->manual_control || + btcoexist->stop_coex_dm || + btcoexist->bt_info.bt_disabled) + return; + + coex_sta->special_pkt_period_cnt = 0; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); + if (coex_sta->c2h_bt_inquiry_page) { + halbtc8821a1ant_action_bt_inquiry(btcoexist); + return; + } else if (bt_hs_on) { + halbtc8821a1ant_action_hs(btcoexist); + return; + } + + if (BTC_PACKET_DHCP == type || + BTC_PACKET_EAPOL == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], special Packet(%d) notify\n", type); + btc8821a1ant_act_wifi_conn_sp_pkt(btcoexist); + } +} + +void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist, + u8 *tmp_buf, u8 length) +{ + u8 bt_info = 0; + u8 i, rsp_source = 0; + bool wifi_connected = false; + bool bt_busy = false; + bool wifi_under_5g = false; + + coex_sta->c2h_bt_info_req_sent = false; + + btcoexist->btc_get(btcoexist, + BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g); + + rsp_source = tmp_buf[0]&0xf; + if (rsp_source >= BT_INFO_SRC_8821A_1ANT_MAX) + rsp_source = BT_INFO_SRC_8821A_1ANT_WIFI_FW; + coex_sta->bt_info_c2h_cnt[rsp_source]++; + + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], Bt info[%d], length = %d, hex data = [", + rsp_source, length); + for (i = 0; i < length; i++) { + coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i]; + if (i == 1) + bt_info = tmp_buf[i]; + if (i == length-1) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "0x%02x]\n", tmp_buf[i]); + } else { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "0x%02x, ", tmp_buf[i]); + } + } + + if (BT_INFO_SRC_8821A_1ANT_WIFI_FW != rsp_source) { + coex_sta->bt_retry_cnt = /* [3:0]*/ + coex_sta->bt_info_c2h[rsp_source][2]&0xf; + + coex_sta->bt_rssi = + coex_sta->bt_info_c2h[rsp_source][3]*2+10; + + coex_sta->bt_info_ext = + coex_sta->bt_info_c2h[rsp_source][4]; + + /* Here we need to resend some wifi info to BT*/ + /* because bt is reset and loss of the info.*/ + if (coex_sta->bt_info_ext & BIT1) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n"); + btcoexist->btc_get(btcoexist, + BTC_GET_BL_WIFI_CONNECTED, + &wifi_connected); + if (wifi_connected) { + ex_halbtc8821a1ant_media_status_notify(btcoexist, + BTC_MEDIA_CONNECT); + } else { + ex_halbtc8821a1ant_media_status_notify(btcoexist, + BTC_MEDIA_DISCONNECT); + } + } + + if ((coex_sta->bt_info_ext & BIT3) && !wifi_under_5g) { + if (!btcoexist->manual_control && + !btcoexist->stop_coex_dm) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n"); + halbtc8821a1ant_ignore_wlan_act(btcoexist, + FORCE_EXEC, + false); + } + } +#if (BT_AUTO_REPORT_ONLY_8821A_1ANT == 0) + if (!(coex_sta->bt_info_ext & BIT4)) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT ext info bit4 check, set BT to enable Auto Report!!\n"); + halbtc8821a1ant_bt_auto_report(btcoexist, + FORCE_EXEC, true); + } +#endif + } + + /* check BIT2 first ==> check if bt is under inquiry or page scan*/ + if (bt_info & BT_INFO_8821A_1ANT_B_INQ_PAGE) + coex_sta->c2h_bt_inquiry_page = true; + else + coex_sta->c2h_bt_inquiry_page = false; + + /* set link exist status*/ + if (!(bt_info&BT_INFO_8821A_1ANT_B_CONNECTION)) { + coex_sta->bt_link_exist = false; + coex_sta->pan_exist = false; + coex_sta->a2dp_exist = false; + coex_sta->hid_exist = false; + coex_sta->sco_exist = false; + } else { + /* connection exists*/ + coex_sta->bt_link_exist = true; + if (bt_info & BT_INFO_8821A_1ANT_B_FTP) + coex_sta->pan_exist = true; + else + coex_sta->pan_exist = false; + if (bt_info & BT_INFO_8821A_1ANT_B_A2DP) + coex_sta->a2dp_exist = true; + else + coex_sta->a2dp_exist = false; + if (bt_info & BT_INFO_8821A_1ANT_B_HID) + coex_sta->hid_exist = true; + else + coex_sta->hid_exist = false; + if (bt_info & BT_INFO_8821A_1ANT_B_SCO_ESCO) + coex_sta->sco_exist = true; + else + coex_sta->sco_exist = false; + } + + halbtc8821a1ant_update_bt_link_info(btcoexist); + + if (!(bt_info&BT_INFO_8821A_1ANT_B_CONNECTION)) { + coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n"); + } else if (bt_info == BT_INFO_8821A_1ANT_B_CONNECTION) { + /* connection exists but no busy*/ + coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n"); + } else if ((bt_info&BT_INFO_8821A_1ANT_B_SCO_ESCO) || + (bt_info&BT_INFO_8821A_1ANT_B_SCO_BUSY)) { + coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_SCO_BUSY; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n"); + } else if (bt_info&BT_INFO_8821A_1ANT_B_ACL_BUSY) { + if (BT_8821A_1ANT_BT_STATUS_ACL_BUSY != coex_dm->bt_status) + coex_dm->auto_tdma_adjust = false; + coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_ACL_BUSY; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n"); + } else { + coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_MAX; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n"); + } + + if ((BT_8821A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) || + (BT_8821A_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) || + (BT_8821A_1ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status)) + bt_busy = true; + else + bt_busy = false; + btcoexist->btc_set(btcoexist, + BTC_SET_BL_BT_TRAFFIC_BUSY, &bt_busy); + + halbtc8821a1ant_run_coexist_mechanism(btcoexist); +} + +void ex_halbtc8821a1ant_halt_notify(struct btc_coexist *btcoexist) +{ + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], Halt notify\n"); + + btcoexist->stop_coex_dm = true; + + halbtc8821a1ant_set_ant_path(btcoexist, + BTC_ANT_PATH_BT, false, true); + halbtc8821a1ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true); + + halbtc8821a1ant_power_save_state(btcoexist, + BTC_PS_WIFI_NATIVE, 0x0, 0x0); + halbtc8821a1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 0); + + ex_halbtc8821a1ant_media_status_notify(btcoexist, + BTC_MEDIA_DISCONNECT); +} + +void ex_halbtc8821a1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state) +{ + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], Pnp notify\n"); + + if (BTC_WIFI_PNP_SLEEP == pnp_state) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], Pnp notify to SLEEP\n"); + btcoexist->stop_coex_dm = true; + halbtc8821a1ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true); + halbtc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 9); + } else if (BTC_WIFI_PNP_WAKE_UP == pnp_state) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], Pnp notify to WAKE UP\n"); + btcoexist->stop_coex_dm = false; + halbtc8821a1ant_init_hw_config(btcoexist, false); + halbtc8821a1ant_init_coex_dm(btcoexist); + halbtc8821a1ant_query_bt_info(btcoexist); + } +} + +void +ex_halbtc8821a1ant_periodical( + struct btc_coexist *btcoexist) { + static u8 dis_ver_info_cnt; + u32 fw_ver = 0, bt_patch_ver = 0; + struct btc_board_info *board_info = &btcoexist->board_info; + struct btc_stack_info *stack_info = &btcoexist->stack_info; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], ==========================Periodical===========================\n"); + + if (dis_ver_info_cnt <= 5) { + dis_ver_info_cnt += 1; + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], ****************************************************************\n"); + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n", + board_info->pg_ant_num, + board_info->btdm_ant_num, + board_info->btdm_ant_pos); + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], BT stack/ hci ext ver = %s / %d\n", + ((stack_info->profile_notified) ? "Yes" : "No"), + stack_info->hci_version); + btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, + &bt_patch_ver); + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n", + glcoex_ver_date_8821a_1ant, + glcoex_ver_8821a_1ant, + fw_ver, bt_patch_ver, + bt_patch_ver); + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], ****************************************************************\n"); + } + +#if (BT_AUTO_REPORT_ONLY_8821A_1ANT == 0) + halbtc8821a1ant_query_bt_info(btcoexist); + halbtc8821a1ant_monitor_bt_ctr(btcoexist); + btc8821a1ant_mon_bt_en_dis(btcoexist); +#else + if (halbtc8821a1ant_Is_wifi_status_changed(btcoexist) || + coex_dm->auto_tdma_adjust) { + if (coex_sta->special_pkt_period_cnt > 2) + halbtc8821a1ant_run_coexist_mechanism(btcoexist); + } + + coex_sta->special_pkt_period_cnt++; +#endif +} diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a1ant.h b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a1ant.h new file mode 100644 index 0000000000000000000000000000000000000000..20e904890fc243a175ddd31dc64ca1524e117aca --- /dev/null +++ b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a1ant.h @@ -0,0 +1,188 @@ +/****************************************************************************** + * + * Copyright(c) 2012 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +/*=========================================== + * The following is for 8821A 1ANT BT Co-exist definition + *=========================================== + */ +#define BT_AUTO_REPORT_ONLY_8821A_1ANT 0 + +#define BT_INFO_8821A_1ANT_B_FTP BIT7 +#define BT_INFO_8821A_1ANT_B_A2DP BIT6 +#define BT_INFO_8821A_1ANT_B_HID BIT5 +#define BT_INFO_8821A_1ANT_B_SCO_BUSY BIT4 +#define BT_INFO_8821A_1ANT_B_ACL_BUSY BIT3 +#define BT_INFO_8821A_1ANT_B_INQ_PAGE BIT2 +#define BT_INFO_8821A_1ANT_B_SCO_ESCO BIT1 +#define BT_INFO_8821A_1ANT_B_CONNECTION BIT0 + +#define BT_INFO_8821A_1ANT_A2DP_BASIC_RATE(_BT_INFO_EXT_) \ + (((_BT_INFO_EXT_&BIT0)) ? true : false) + +#define BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT 2 + +enum _BT_INFO_SRC_8821A_1ANT { + BT_INFO_SRC_8821A_1ANT_WIFI_FW = 0x0, + BT_INFO_SRC_8821A_1ANT_BT_RSP = 0x1, + BT_INFO_SRC_8821A_1ANT_BT_ACTIVE_SEND = 0x2, + BT_INFO_SRC_8821A_1ANT_MAX +}; + +enum _BT_8821A_1ANT_BT_STATUS { + BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE = 0x0, + BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE = 0x1, + BT_8821A_1ANT_BT_STATUS_INQ_PAGE = 0x2, + BT_8821A_1ANT_BT_STATUS_ACL_BUSY = 0x3, + BT_8821A_1ANT_BT_STATUS_SCO_BUSY = 0x4, + BT_8821A_1ANT_BT_STATUS_ACL_SCO_BUSY = 0x5, + BT_8821A_1ANT_BT_STATUS_MAX +}; + +enum _BT_8821A_1ANT_WIFI_STATUS { + BT_8821A_1ANT_WIFI_STATUS_NON_CONNECTED_IDLE = 0x0, + BT_8821A_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN = 0x1, + BT_8821A_1ANT_WIFI_STATUS_CONNECTED_SCAN = 0x2, + BT_8821A_1ANT_WIFI_STATUS_CONNECTED_SPECIAL_PKT = 0x3, + BT_8821A_1ANT_WIFI_STATUS_CONNECTED_IDLE = 0x4, + BT_8821A_1ANT_WIFI_STATUS_CONNECTED_BUSY = 0x5, + BT_8821A_1ANT_WIFI_STATUS_MAX +}; + +enum BT_8821A_1ANT_COEX_ALGO { + BT_8821A_1ANT_COEX_ALGO_UNDEFINED = 0x0, + BT_8821A_1ANT_COEX_ALGO_SCO = 0x1, + BT_8821A_1ANT_COEX_ALGO_HID = 0x2, + BT_8821A_1ANT_COEX_ALGO_A2DP = 0x3, + BT_8821A_1ANT_COEX_ALGO_A2DP_PANHS = 0x4, + BT_8821A_1ANT_COEX_ALGO_PANEDR = 0x5, + BT_8821A_1ANT_COEX_ALGO_PANHS = 0x6, + BT_8821A_1ANT_COEX_ALGO_PANEDR_A2DP = 0x7, + BT_8821A_1ANT_COEX_ALGO_PANEDR_HID = 0x8, + BT_8821A_1ANT_COEX_ALGO_HID_A2DP_PANEDR = 0x9, + BT_8821A_1ANT_COEX_ALGO_HID_A2DP = 0xa, + BT_8821A_1ANT_COEX_ALGO_MAX = 0xb, +}; + +struct coex_dm_8821a_1ant { + /* fw mechanism */ + bool cur_ignore_wlan_act; + bool pre_ignore_wlan_act; + u8 pre_ps_tdma; + u8 cur_ps_tdma; + u8 ps_tdma_para[5]; + u8 tdma_adj_type; + bool auto_tdma_adjust; + bool pre_ps_tdma_on; + bool cur_ps_tdma_on; + bool pre_bt_auto_report; + bool cur_bt_auto_report; + u8 pre_lps; + u8 cur_lps; + u8 pre_rpwm; + u8 cur_rpwm; + + /* sw mechanism */ + bool pre_low_penalty_ra; + bool cur_low_penalty_ra; + u32 pre_val_0x6c0; + u32 cur_val_0x6c0; + u32 pre_val_0x6c4; + u32 cur_val_0x6c4; + u32 pre_val_0x6c8; + u32 cur_val_0x6c8; + u8 pre_val_0x6cc; + u8 cur_val_0x6cc; + /* Auto Rate Fallback Retry cnt */ + u32 backup_arfr_cnt1; + /* Auto Rate Fallback Retry cnt */ + u32 backup_arfr_cnt2; + u16 backup_retry_limit; + u8 backup_ampdu_max_time; + + /* algorithm related */ + u8 pre_algorithm; + u8 cur_algorithm; + u8 bt_status; + u8 wifi_chnl_info[3]; + + u32 pre_ra_mask; + u32 cur_ra_mask; + u8 pre_arfr_type; + u8 cur_arfr_type; + u8 pre_retry_limit_type; + u8 cur_retry_limit_type; + u8 pre_ampdu_time_type; + u8 cur_ampdu_time_type; + + u8 error_condition; +}; + +struct coex_sta_8821a_1ant { + bool bt_link_exist; + bool sco_exist; + bool a2dp_exist; + bool hid_exist; + bool pan_exist; + + bool under_Lps; + bool under_ips; + u32 special_pkt_period_cnt; + u32 high_priority_tx; + u32 high_priority_rx; + u32 low_priority_tx; + u32 low_priority_rx; + u8 bt_rssi; + u8 pre_bt_rssi_state; + u8 pre_wifi_rssi_state[4]; + bool c2h_bt_info_req_sent; + u8 bt_info_c2h[BT_INFO_SRC_8821A_1ANT_MAX][10]; + u32 bt_info_c2h_cnt[BT_INFO_SRC_8821A_1ANT_MAX]; + bool c2h_bt_inquiry_page; + u8 bt_retry_cnt; + u8 bt_info_ext; +}; + +/*=========================================== + * The following is interface which will notify coex module. + *=========================================== + */ +void ex_halbtc8821a1ant_init_hwconfig(struct btc_coexist *btcoexist); +void ex_halbtc8821a1ant_init_coex_dm(struct btc_coexist *btcoexist); +void ex_halbtc8821a1ant_ips_notify(struct btc_coexist *btcoexist, u8 type); +void ex_halbtc8821a1ant_lps_notify(struct btc_coexist *btcoexist, u8 type); +void ex_halbtc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type); +void ex_halbtc8821a1ant_connect_notify(struct btc_coexist *btcoexist, u8 type); +void ex_halbtc8821a1ant_media_status_notify(struct btc_coexist *btcoexist, + u8 type); +void ex_halbtc8821a1ant_special_packet_notify(struct btc_coexist *btcoexist, + u8 type); +void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist, + u8 *tmpbuf, u8 length); +void ex_halbtc8821a1ant_halt_notify(struct btc_coexist *btcoexist); +void ex_halbtc8821a1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnpstate); +void ex_halbtc8821a1ant_periodical(struct btc_coexist *btcoexist); +void ex_halbtc8821a1ant_display_coex_info(struct btc_coexist *btcoexist); +void ex_halbtc8821a1ant_dbg_control(struct btc_coexist *btcoexist, u8 op_code, + u8 op_len, u8 *data); diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a2ant.c b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a2ant.c new file mode 100644 index 0000000000000000000000000000000000000000..cf819f02ed23fa944de7c4b418f63995ff769b32 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a2ant.c @@ -0,0 +1,3879 @@ +/****************************************************************************** + * + * Copyright(c) 2012 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +/*============================================================ + * Description: + * + * This file is for RTL8821A Co-exist mechanism + * + * History + * 2012/08/22 Cosa first check in. + * 2012/11/14 Cosa Revise for 8821A 2Ant out sourcing. + * + *============================================================ + */ + +/*============================================================ + * include files + *============================================================ +*/ +#include "halbt_precomp.h" +/*============================================================ + * Global variables, these are static variables + *============================================================ + */ +static struct coex_dm_8821a_2ant glcoex_dm_8821a_2ant; +static struct coex_dm_8821a_2ant *coex_dm = &glcoex_dm_8821a_2ant; +static struct coex_sta_8821a_2ant glcoex_sta_8821a_2ant; +static struct coex_sta_8821a_2ant *coex_sta = &glcoex_sta_8821a_2ant; + +static const char *const glbt_info_src_8821a_2ant[] = { + "BT Info[wifi fw]", + "BT Info[bt rsp]", + "BT Info[bt auto report]", +}; + +static u32 glcoex_ver_date_8821a_2ant = 20130618; +static u32 glcoex_ver_8821a_2ant = 0x5050; + +/*============================================================ + * local function proto type if needed + *============================================================ + *============================================================ + * local function start with halbtc8821a2ant_ + *============================================================ + */ +static u8 halbtc8821a2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh, + u8 rssi_thresh1) +{ + long bt_rssi = 0; + u8 bt_rssi_state = coex_sta->pre_bt_rssi_state; + + bt_rssi = coex_sta->bt_rssi; + + if (level_num == 2) { + if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) || + (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) { + long tmp = rssi_thresh + + BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT; + if (bt_rssi >= tmp) { + bt_rssi_state = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state switch to High\n"); + } else { + bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state stay at Low\n"); + } + } else { + if (bt_rssi < rssi_thresh) { + bt_rssi_state = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state switch to Low\n"); + } else { + bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state stay at High\n"); + } + } + } else if (level_num == 3) { + if (rssi_thresh > rssi_thresh1) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi thresh error!!\n"); + return coex_sta->pre_bt_rssi_state; + } + + if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) || + (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) { + if (bt_rssi >= + (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) { + bt_rssi_state = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state switch to Medium\n"); + } else { + bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state stay at Low\n"); + } + } else if ((coex_sta->pre_bt_rssi_state == + BTC_RSSI_STATE_MEDIUM) || + (coex_sta->pre_bt_rssi_state == + BTC_RSSI_STATE_STAY_MEDIUM)) { + if (bt_rssi >= + (rssi_thresh1 + + BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) { + bt_rssi_state = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state switch to High\n"); + } else if (bt_rssi < rssi_thresh) { + bt_rssi_state = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state switch to Low\n"); + } else { + bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state stay at Medium\n"); + } + } else { + if (bt_rssi < rssi_thresh1) { + bt_rssi_state = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state switch to Medium\n"); + } else { + bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state stay at High\n"); + } + } + } + + coex_sta->pre_bt_rssi_state = bt_rssi_state; + + return bt_rssi_state; +} + +static u8 halbtc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist, + u8 index, u8 level_num, + u8 rssi_thresh, u8 rssi_thresh1) +{ + long wifi_rssi = 0; + u8 wifi_rssi_state = coex_sta->pre_wifi_rssi_state[index]; + + btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi); + + if (level_num == 2) { + if ((coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_LOW) || + (coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_STAY_LOW)) { + if (wifi_rssi >= + (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) { + wifi_rssi_state = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state switch to High\n"); + } else { + wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state stay at Low\n"); + } + } else { + if (wifi_rssi < rssi_thresh) { + wifi_rssi_state = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state switch to Low\n"); + } else { + wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state stay at High\n"); + } + } + } else if (level_num == 3) { + if (rssi_thresh > rssi_thresh1) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI thresh error!!\n"); + return coex_sta->pre_wifi_rssi_state[index]; + } + + if ((coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_LOW) || + (coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_STAY_LOW)) { + if (wifi_rssi >= + (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) { + wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state switch to Medium\n"); + } else { + wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state stay at Low\n"); + } + } else if ((coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_MEDIUM) || + (coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_STAY_MEDIUM)) { + if (wifi_rssi >= (rssi_thresh1 + + BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) { + wifi_rssi_state = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state switch to High\n"); + } else if (wifi_rssi < rssi_thresh) { + wifi_rssi_state = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state switch to Low\n"); + } else { + wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state stay at Medium\n"); + } + } else { + if (wifi_rssi < rssi_thresh1) { + wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state switch to Medium\n"); + } else { + wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state stay at High\n"); + } + } + } + coex_sta->pre_wifi_rssi_state[index] = wifi_rssi_state; + + return wifi_rssi_state; +} + +static void btc8821a2ant_mon_bt_en_dis(struct btc_coexist *btcoexist) +{ + static bool pre_bt_disabled; + static u32 bt_disable_cnt; + bool bt_active = true, bt_disabled = false; + + /* This function check if bt is disabled*/ + + if (coex_sta->high_priority_tx == 0 && + coex_sta->high_priority_rx == 0 && + coex_sta->low_priority_tx == 0 && + coex_sta->low_priority_rx == 0) + bt_active = false; + if (coex_sta->high_priority_tx == 0xffff && + coex_sta->high_priority_rx == 0xffff && + coex_sta->low_priority_tx == 0xffff && + coex_sta->low_priority_rx == 0xffff) + bt_active = false; + if (bt_active) { + bt_disable_cnt = 0; + bt_disabled = false; + btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE, + &bt_disabled); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, + "[BTCoex], BT is enabled !!\n"); + } else { + bt_disable_cnt++; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, + "[BTCoex], bt all counters = 0, %d times!!\n", + bt_disable_cnt); + if (bt_disable_cnt >= 2) { + bt_disabled = true; + btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE, + &bt_disabled); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, + "[BTCoex], BT is disabled !!\n"); + } + } + if (pre_bt_disabled != bt_disabled) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, + "[BTCoex], BT is from %s to %s!!\n", + (pre_bt_disabled ? "disabled" : "enabled"), + (bt_disabled ? "disabled" : "enabled")); + pre_bt_disabled = bt_disabled; + } +} + +static void halbtc8821a2ant_monitor_bt_ctr(struct btc_coexist *btcoexist) +{ + u32 reg_hp_txrx, reg_lp_txrx, u4tmp; + u32 reg_hp_tx = 0, reg_hp_rx = 0, reg_lp_tx = 0, reg_lp_rx = 0; + + reg_hp_txrx = 0x770; + reg_lp_txrx = 0x774; + + u4tmp = btcoexist->btc_read_4byte(btcoexist, reg_hp_txrx); + reg_hp_tx = u4tmp & MASKLWORD; + reg_hp_rx = (u4tmp & MASKHWORD)>>16; + + u4tmp = btcoexist->btc_read_4byte(btcoexist, reg_lp_txrx); + reg_lp_tx = u4tmp & MASKLWORD; + reg_lp_rx = (u4tmp & MASKHWORD)>>16; + + coex_sta->high_priority_tx = reg_hp_tx; + coex_sta->high_priority_rx = reg_hp_rx; + coex_sta->low_priority_tx = reg_lp_tx; + coex_sta->low_priority_rx = reg_lp_rx; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, + "[BTCoex], High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n", + reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, + "[BTCoex], Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n", + reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx); + + /* reset counter */ + btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc); +} + +static void halbtc8821a2ant_query_bt_info(struct btc_coexist *btcoexist) +{ + u8 h2c_parameter[1] = {0}; + + coex_sta->c2h_bt_info_req_sent = true; + + h2c_parameter[0] |= BIT0; /* trigger */ + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n", + h2c_parameter[0]); + + btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter); +} + +static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist) +{ + struct btc_stack_info *stack_info = &btcoexist->stack_info; + bool bt_hs_on = false; + u8 algorithm = BT_8821A_2ANT_COEX_ALGO_UNDEFINED; + u8 num_of_diff_profile = 0; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); + + /*for win-8 stack HID report error*/ + /* sync BTInfo with BT firmware and stack */ + if (!stack_info->hid_exist) + stack_info->hid_exist = coex_sta->hid_exist; + /* when stack HID report error, here we use the info from bt fw. */ + if (!stack_info->bt_link_exist) + stack_info->bt_link_exist = coex_sta->bt_link_exist; + + if (!coex_sta->bt_link_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], No profile exists!!!\n"); + return algorithm; + } + + if (coex_sta->sco_exist) + num_of_diff_profile++; + if (coex_sta->hid_exist) + num_of_diff_profile++; + if (coex_sta->pan_exist) + num_of_diff_profile++; + if (coex_sta->a2dp_exist) + num_of_diff_profile++; + + if (num_of_diff_profile == 1) { + if (coex_sta->sco_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], SCO only\n"); + algorithm = BT_8821A_2ANT_COEX_ALGO_SCO; + } else { + if (coex_sta->hid_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], HID only\n"); + algorithm = BT_8821A_2ANT_COEX_ALGO_HID; + } else if (coex_sta->a2dp_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], A2DP only\n"); + algorithm = BT_8821A_2ANT_COEX_ALGO_A2DP; + } else if (coex_sta->pan_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], PAN(HS) only\n"); + algorithm = BT_8821A_2ANT_COEX_ALGO_PANHS; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], PAN(EDR) only\n"); + algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR; + } + } + } + } else if (num_of_diff_profile == 2) { + if (coex_sta->sco_exist) { + if (coex_sta->hid_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], SCO + HID\n"); + algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; + } else if (coex_sta->a2dp_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], SCO + A2DP ==> SCO\n"); + algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; + } else if (coex_sta->pan_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], SCO + PAN(HS)\n"); + algorithm = BT_8821A_2ANT_COEX_ALGO_SCO; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], SCO + PAN(EDR)\n"); + algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; + } + } + } else { + if (coex_sta->hid_exist && + coex_sta->a2dp_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], HID + A2DP\n"); + algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP; + } else if (coex_sta->hid_exist && + coex_sta->pan_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], HID + PAN(HS)\n"); + algorithm = BT_8821A_2ANT_COEX_ALGO_HID; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], HID + PAN(EDR)\n"); + algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; + } + } else if (coex_sta->pan_exist && + coex_sta->a2dp_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], A2DP + PAN(HS)\n"); + algorithm = BT_8821A_2ANT_COEX_ALGO_A2DP_PANHS; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], A2DP + PAN(EDR)\n"); + algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_A2DP; + } + } + } + } else if (num_of_diff_profile == 3) { + if (coex_sta->sco_exist) { + if (coex_sta->hid_exist && + coex_sta->a2dp_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], SCO + HID + A2DP ==> HID\n"); + algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; + } else if (coex_sta->hid_exist && + coex_sta->pan_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], SCO + HID + PAN(HS)\n"); + algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], SCO + HID + PAN(EDR)\n"); + algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; + } + } else if (coex_sta->pan_exist && + coex_sta->a2dp_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], SCO + A2DP + PAN(HS)\n"); + algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n"); + algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; + } + } + } else { + if (coex_sta->hid_exist && + coex_sta->pan_exist && + coex_sta->a2dp_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], HID + A2DP + PAN(HS)\n"); + algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], HID + A2DP + PAN(EDR)\n"); + algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP_PANEDR; + } + } + } + } else if (num_of_diff_profile >= 3) { + if (coex_sta->sco_exist) { + if (coex_sta->hid_exist && + coex_sta->pan_exist && + coex_sta->a2dp_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n"); + + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n"); + algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID; + } + } + } + } + return algorithm; +} + +static bool halbtc8821a2ant_need_to_dec_bt_pwr(struct btc_coexist *btcoexist) +{ + bool ret = false; + bool bt_hs_on = false, wifi_connected = false; + long bt_hs_rssi = 0; + u8 bt_rssi_state; + + if (!btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on)) + return false; + if (!btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, + &wifi_connected)) + return false; + if (!btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi)) + return false; + + bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0); + + if (wifi_connected) { + if (bt_hs_on) { + if (bt_hs_rssi > 37) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], Need to decrease bt power for HS mode!!\n"); + ret = true; + } + } else { + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], Need to decrease bt power for Wifi is connected!!\n"); + ret = true; + } + } + } + return ret; +} + +static void btc8821a2ant_set_fw_dac_swing_lev(struct btc_coexist *btcoexist, + u8 dac_swing_lvl) +{ + u8 h2c_parameter[1] = {0}; + + /* There are several type of dacswing + * 0x18/ 0x10/ 0xc/ 0x8/ 0x4/ 0x6 + */ + h2c_parameter[0] = dac_swing_lvl; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swing_lvl); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]); + + btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter); +} + +static void halbtc8821a2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist, + bool dec_bt_pwr) +{ + u8 h2c_parameter[1] = {0}; + + h2c_parameter[0] = 0; + + if (dec_bt_pwr) + h2c_parameter[0] |= BIT1; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], decrease Bt Power : %s, FW write 0x62 = 0x%x\n", + (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]); + + btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter); +} + +static void halbtc8821a2ant_dec_bt_pwr(struct btc_coexist *btcoexist, + bool force_exec, bool dec_bt_pwr) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], %s Dec BT power = %s\n", + (force_exec ? "force to" : ""), + ((dec_bt_pwr) ? "ON" : "OFF")); + coex_dm->cur_dec_bt_pwr = dec_bt_pwr; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], pre_dec_bt_pwr = %d, cur_dec_bt_pwr = %d\n", + coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr); + + if (coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr) + return; + } + halbtc8821a2ant_set_fw_dec_bt_pwr(btcoexist, coex_dm->cur_dec_bt_pwr); + + coex_dm->pre_dec_bt_pwr = coex_dm->cur_dec_bt_pwr; +} + +static void btc8821a2ant_set_fw_bt_lna_constr(struct btc_coexist *btcoexist, + bool bt_lna_cons_on) +{ + u8 h2c_parameter[2] = {0}; + + h2c_parameter[0] = 0x3; /* opCode, 0x3 = BT_SET_LNA_CONSTRAIN */ + + if (bt_lna_cons_on) + h2c_parameter[1] |= BIT0; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], set BT LNA Constrain: %s, FW write 0x69 = 0x%x\n", + (bt_lna_cons_on ? "ON!!" : "OFF!!"), + h2c_parameter[0]<<8|h2c_parameter[1]); + + btcoexist->btc_fill_h2c(btcoexist, 0x69, 2, h2c_parameter); +} + +static void btc8821a2_set_bt_lna_const(struct btc_coexist *btcoexist, + bool force_exec, bool bt_lna_cons_on) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], %s BT Constrain = %s\n", + (force_exec ? "force" : ""), + ((bt_lna_cons_on) ? "ON" : "OFF")); + coex_dm->cur_bt_lna_constrain = bt_lna_cons_on; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], pre_bt_lna_constrain = %d,cur_bt_lna_constrain = %d\n", + coex_dm->pre_bt_lna_constrain, + coex_dm->cur_bt_lna_constrain); + + if (coex_dm->pre_bt_lna_constrain == + coex_dm->cur_bt_lna_constrain) + return; + } + btc8821a2ant_set_fw_bt_lna_constr(btcoexist, + coex_dm->cur_bt_lna_constrain); + + coex_dm->pre_bt_lna_constrain = coex_dm->cur_bt_lna_constrain; +} + +static void halbtc8821a2ant_set_fw_bt_psd_mode(struct btc_coexist *btcoexist, + u8 bt_psd_mode) +{ + u8 h2c_parameter[2] = {0}; + + h2c_parameter[0] = 0x2; /* opCode, 0x2 = BT_SET_PSD_MODE */ + + h2c_parameter[1] = bt_psd_mode; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], set BT PSD mode = 0x%x, FW write 0x69 = 0x%x\n", + h2c_parameter[1], + h2c_parameter[0]<<8|h2c_parameter[1]); + + btcoexist->btc_fill_h2c(btcoexist, 0x69, 2, h2c_parameter); +} + +static void halbtc8821a2ant_set_bt_psd_mode(struct btc_coexist *btcoexist, + bool force_exec, u8 bt_psd_mode) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], %s BT PSD mode = 0x%x\n", + (force_exec ? "force" : ""), bt_psd_mode); + coex_dm->cur_bt_psd_mode = bt_psd_mode; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], pre_bt_psd_mode = 0x%x, cur_bt_psd_mode = 0x%x\n", + coex_dm->pre_bt_psd_mode, coex_dm->cur_bt_psd_mode); + + if (coex_dm->pre_bt_psd_mode == coex_dm->cur_bt_psd_mode) + return; + } + halbtc8821a2ant_set_fw_bt_psd_mode(btcoexist, + coex_dm->cur_bt_psd_mode); + + coex_dm->pre_bt_psd_mode = coex_dm->cur_bt_psd_mode; +} + +static void halbtc8821a2ant_set_bt_auto_report(struct btc_coexist *btcoexist, + bool enable_auto_report) +{ + u8 h2c_parameter[1] = {0}; + + h2c_parameter[0] = 0; + + if (enable_auto_report) + h2c_parameter[0] |= BIT0; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n", + (enable_auto_report ? "Enabled!!" : "Disabled!!"), + h2c_parameter[0]); + + btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter); +} + +static void halbtc8821a2ant_bt_auto_report(struct btc_coexist *btcoexist, + bool force_exec, + bool enable_auto_report) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], %s BT Auto report = %s\n", + (force_exec ? "force to" : ""), + ((enable_auto_report) ? "Enabled" : "Disabled")); + coex_dm->cur_bt_auto_report = enable_auto_report; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n", + coex_dm->pre_bt_auto_report, + coex_dm->cur_bt_auto_report); + + if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report) + return; + } + halbtc8821a2ant_set_bt_auto_report(btcoexist, + coex_dm->cur_bt_auto_report); + + coex_dm->pre_bt_auto_report = coex_dm->cur_bt_auto_report; +} + +static void halbtc8821a2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist, + bool force_exec, + u8 fw_dac_swing_lvl) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], %s set FW Dac Swing level = %d\n", + (force_exec ? "force to" : ""), fw_dac_swing_lvl); + coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], pre_fw_dac_swing_lvl = %d, cur_fw_dac_swing_lvl = %d\n", + coex_dm->pre_fw_dac_swing_lvl, + coex_dm->cur_fw_dac_swing_lvl); + + if (coex_dm->pre_fw_dac_swing_lvl == + coex_dm->cur_fw_dac_swing_lvl) + return; + } + + btc8821a2ant_set_fw_dac_swing_lev(btcoexist, + coex_dm->cur_fw_dac_swing_lvl); + + coex_dm->pre_fw_dac_swing_lvl = coex_dm->cur_fw_dac_swing_lvl; +} + +static void btc8821a2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist, + bool rx_rf_shrink_on) +{ + if (rx_rf_shrink_on) { + /* Shrink RF Rx LPF corner */ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], Shrink RF Rx LPF corner!!\n"); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e, + 0xfffff, 0xffffc); + } else { + /* Resume RF Rx LPF corner + * After initialized, we can use coex_dm->bt_rf0x1e_backup + */ + if (btcoexist->initilized) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], Resume RF Rx LPF corner!!\n"); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, + 0x1e, 0xfffff, + coex_dm->bt_rf0x1e_backup); + } + } +} + +static void halbtc8821a2ant_RfShrink(struct btc_coexist *btcoexist, + bool force_exec, bool rx_rf_shrink_on) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, + "[BTCoex], %s turn Rx RF Shrink = %s\n", + (force_exec ? "force to" : ""), + ((rx_rf_shrink_on) ? "ON" : "OFF")); + coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, + "[BTCoex], pre_rf_rx_lpf_shrink = %d, cur_rf_rx_lpf_shrink = %d\n", + coex_dm->pre_rf_rx_lpf_shrink, + coex_dm->cur_rf_rx_lpf_shrink); + + if (coex_dm->pre_rf_rx_lpf_shrink == + coex_dm->cur_rf_rx_lpf_shrink) + return; + } + btc8821a2ant_set_sw_rf_rx_lpf_corner(btcoexist, + coex_dm->cur_rf_rx_lpf_shrink); + + coex_dm->pre_rf_rx_lpf_shrink = coex_dm->cur_rf_rx_lpf_shrink; +} + +static void btc8821a2ant_SetSwPenTxRateAdapt(struct btc_coexist *btcoexist, + bool low_penalty_ra) +{ + u8 h2c_parameter[6] = {0}; + + h2c_parameter[0] = 0x6; /* opCode, 0x6 = Retry_Penalty */ + + if (low_penalty_ra) { + h2c_parameter[1] |= BIT0; + /*normal rate except MCS7/6/5, OFDM54/48/36 */ + h2c_parameter[2] = 0x00; + /*MCS7 or OFDM54 */ + h2c_parameter[3] = 0xf7; + /*MCS6 or OFDM48 */ + h2c_parameter[4] = 0xf8; + /*MCS5 or OFDM36 */ + h2c_parameter[5] = 0xf9; + } + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], set WiFi Low-Penalty Retry: %s", + (low_penalty_ra ? "ON!!" : "OFF!!")); + + btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter); +} + +static void halbtc8821a2ant_low_penalty_ra(struct btc_coexist *btcoexist, + bool force_exec, bool low_penalty_ra) +{ + /*return;*/ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, + "[BTCoex], %s turn LowPenaltyRA = %s\n", + (force_exec ? "force to" : ""), + ((low_penalty_ra) ? "ON" : "OFF")); + coex_dm->cur_low_penalty_ra = low_penalty_ra; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, + "[BTCoex], pre_low_penalty_ra = %d, cur_low_penalty_ra = %d\n", + coex_dm->pre_low_penalty_ra, + coex_dm->cur_low_penalty_ra); + + if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra) + return; + } + btc8821a2ant_SetSwPenTxRateAdapt(btcoexist, + coex_dm->cur_low_penalty_ra); + + coex_dm->pre_low_penalty_ra = coex_dm->cur_low_penalty_ra; +} + +static void halbtc8821a2ant_set_dac_swing_reg(struct btc_coexist *btcoexist, + u32 level) +{ + u8 val = (u8)level; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], Write SwDacSwing = 0x%x\n", level); + btcoexist->btc_write_1byte_bitmask(btcoexist, 0xc5b, 0x3e, val); +} + +static void btc8821a2ant_set_sw_full_dac_swing(struct btc_coexist *btcoexist, + bool sw_dac_swing_on, + u32 sw_dac_swing_lvl) +{ + if (sw_dac_swing_on) + halbtc8821a2ant_set_dac_swing_reg(btcoexist, sw_dac_swing_lvl); + else + halbtc8821a2ant_set_dac_swing_reg(btcoexist, 0x18); +} + +static void halbtc8821a2ant_dac_swing(struct btc_coexist *btcoexist, + bool force_exec, bool dac_swing_on, + u32 dac_swing_lvl) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, + "[BTCoex], %s turn DacSwing = %s, dac_swing_lvl = 0x%x\n", + (force_exec ? "force to" : ""), + ((dac_swing_on) ? "ON" : "OFF"), + dac_swing_lvl); + coex_dm->cur_dac_swing_on = dac_swing_on; + coex_dm->cur_dac_swing_lvl = dac_swing_lvl; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, + "[BTCoex], pre_dac_swing_on = %d, pre_dac_swing_lvl = 0x%x, cur_dac_swing_on = %d, cur_dac_swing_lvl = 0x%x\n", + coex_dm->pre_dac_swing_on, + coex_dm->pre_dac_swing_lvl, + coex_dm->cur_dac_swing_on, + coex_dm->cur_dac_swing_lvl); + + if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) && + (coex_dm->pre_dac_swing_lvl == + coex_dm->cur_dac_swing_lvl)) + return; + } + mdelay(30); + btc8821a2ant_set_sw_full_dac_swing(btcoexist, dac_swing_on, + dac_swing_lvl); + + coex_dm->pre_dac_swing_on = coex_dm->cur_dac_swing_on; + coex_dm->pre_dac_swing_lvl = coex_dm->cur_dac_swing_lvl; +} + +static void halbtc8821a2ant_set_adc_back_off(struct btc_coexist *btcoexist, + bool adc_back_off) +{ + if (adc_back_off) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], BB BackOff Level On!\n"); + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x3); + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], BB BackOff Level Off!\n"); + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x1); + } +} + +static void halbtc8821a2ant_adc_back_off(struct btc_coexist *btcoexist, + bool force_exec, bool adc_back_off) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, + "[BTCoex], %s turn AdcBackOff = %s\n", + (force_exec ? "force to" : ""), + ((adc_back_off) ? "ON" : "OFF")); + coex_dm->cur_adc_back_off = adc_back_off; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, + "[BTCoex], pre_adc_back_off = %d, cur_adc_back_off = %d\n", + coex_dm->pre_adc_back_off, coex_dm->cur_adc_back_off); + + if (coex_dm->pre_adc_back_off == coex_dm->cur_adc_back_off) + return; + } + halbtc8821a2ant_set_adc_back_off(btcoexist, coex_dm->cur_adc_back_off); + + coex_dm->pre_adc_back_off = coex_dm->cur_adc_back_off; +} + +static void halbtc8821a2ant_set_coex_table(struct btc_coexist *btcoexist, + u32 val0x6c0, u32 val0x6c4, + u32 val0x6c8, u8 val0x6cc) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0); + btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4); + btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8); + btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc); + btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc); +} + +static void halbtc8821a2ant_coex_table(struct btc_coexist *btcoexist, + bool force_exec, u32 val0x6c0, + u32 val0x6c4, u32 val0x6c8, u8 val0x6cc) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, + "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n", + (force_exec ? "force to" : ""), + val0x6c0, val0x6c4, val0x6c8, val0x6cc); + coex_dm->cur_val0x6c0 = val0x6c0; + coex_dm->cur_val0x6c4 = val0x6c4; + coex_dm->cur_val0x6c8 = val0x6c8; + coex_dm->cur_val0x6cc = val0x6cc; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, + "[BTCoex], pre_val0x6c0 = 0x%x, pre_val0x6c4 = 0x%x, pre_val0x6c8 = 0x%x, pre_val0x6cc = 0x%x !!\n", + coex_dm->pre_val0x6c0, + coex_dm->pre_val0x6c4, + coex_dm->pre_val0x6c8, + coex_dm->pre_val0x6cc); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, + "[BTCoex], cur_val0x6c0 = 0x%x, cur_val0x6c4 = 0x%x, cur_val0x6c8 = 0x%x, cur_val0x6cc = 0x%x !!\n", + coex_dm->cur_val0x6c0, + coex_dm->cur_val0x6c4, + coex_dm->cur_val0x6c8, + coex_dm->cur_val0x6cc); + + if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) && + (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) && + (coex_dm->pre_val0x6c8 == coex_dm->cur_val0x6c8) && + (coex_dm->pre_val0x6cc == coex_dm->cur_val0x6cc)) + return; + } + halbtc8821a2ant_set_coex_table(btcoexist, val0x6c0, val0x6c4, val0x6c8, + val0x6cc); + + coex_dm->pre_val0x6c0 = coex_dm->cur_val0x6c0; + coex_dm->pre_val0x6c4 = coex_dm->cur_val0x6c4; + coex_dm->pre_val0x6c8 = coex_dm->cur_val0x6c8; + coex_dm->pre_val0x6cc = coex_dm->cur_val0x6cc; +} + +static void halbtc8821a2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoex, + bool enable) +{ + u8 h2c_parameter[1] = {0}; + + if (enable) + h2c_parameter[0] |= BIT0;/* function enable */ + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n", + h2c_parameter[0]); + + btcoex->btc_fill_h2c(btcoex, 0x63, 1, h2c_parameter); +} + +static void halbtc8821a2ant_ignore_wlan_act(struct btc_coexist *btcoexist, + bool force_exec, bool enable) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], %s turn Ignore WlanAct %s\n", + (force_exec ? "force to" : ""), (enable ? "ON" : "OFF")); + coex_dm->cur_ignore_wlan_act = enable; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n", + coex_dm->pre_ignore_wlan_act, + coex_dm->cur_ignore_wlan_act); + + if (coex_dm->pre_ignore_wlan_act == + coex_dm->cur_ignore_wlan_act) + return; + } + halbtc8821a2ant_set_fw_ignore_wlan_act(btcoexist, enable); + + coex_dm->pre_ignore_wlan_act = coex_dm->cur_ignore_wlan_act; +} + +static void halbtc8821a2ant_set_fw_pstdma(struct btc_coexist *btcoexist, + u8 byte1, u8 byte2, u8 byte3, + u8 byte4, u8 byte5) +{ + u8 h2c_parameter[5]; + + h2c_parameter[0] = byte1; + h2c_parameter[1] = byte2; + h2c_parameter[2] = byte3; + h2c_parameter[3] = byte4; + h2c_parameter[4] = byte5; + + coex_dm->ps_tdma_para[0] = byte1; + coex_dm->ps_tdma_para[1] = byte2; + coex_dm->ps_tdma_para[2] = byte3; + coex_dm->ps_tdma_para[3] = byte4; + coex_dm->ps_tdma_para[4] = byte5; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n", + h2c_parameter[0], + h2c_parameter[1]<<24| + h2c_parameter[2]<<16| + h2c_parameter[3]<<8| + h2c_parameter[4]); + + btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter); +} + +static void btc8821a2ant_sw_mech1(struct btc_coexist *btcoexist, + bool shrink_rx_lpf, + bool low_penalty_ra, bool limited_dig, + bool bt_lna_constrain) +{ + u32 wifi_bw; + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + if (BTC_WIFI_BW_HT40 != wifi_bw) { + /*only shrink RF Rx LPF for HT40*/ + if (shrink_rx_lpf) + shrink_rx_lpf = false; + } + + halbtc8821a2ant_RfShrink(btcoexist, NORMAL_EXEC, shrink_rx_lpf); + halbtc8821a2ant_low_penalty_ra(btcoexist, + NORMAL_EXEC, low_penalty_ra); + + /* no limited DIG + * btc8821a2_set_bt_lna_const(btcoexist, + NORMAL_EXEC, bBTLNAConstrain); + */ +} + +static void btc8821a2ant_sw_mech2(struct btc_coexist *btcoexist, + bool agc_table_shift, + bool adc_back_off, bool sw_dac_swing, + u32 dac_swing_lvl) +{ + /* halbtc8821a2ant_AgcTable(btcoexist, NORMAL_EXEC, bAGCTableShift); */ + halbtc8821a2ant_adc_back_off(btcoexist, NORMAL_EXEC, adc_back_off); + halbtc8821a2ant_dac_swing(btcoexist, NORMAL_EXEC, sw_dac_swing, + sw_dac_swing); +} + +static void halbtc8821a2ant_set_ant_path(struct btc_coexist *btcoexist, + u8 ant_pos_type, bool init_hw_cfg, + bool wifi_off) +{ + struct btc_board_info *board_info = &btcoexist->board_info; + u32 u4tmp = 0; + u8 h2c_parameter[2] = {0}; + + if (init_hw_cfg) { + /* 0x4c[23] = 0, 0x4c[24] = 1 Antenna control by WL/BT */ + u4tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c); + u4tmp &= ~BIT23; + u4tmp |= BIT24; + btcoexist->btc_write_4byte(btcoexist, 0x4c, u4tmp); + + btcoexist->btc_write_4byte(btcoexist, 0x974, 0x3ff); + btcoexist->btc_write_1byte(btcoexist, 0xcb4, 0x77); + + if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) { + /* tell firmware "antenna inverse" ==> + * WRONG firmware antenna control code. + * ==>need fw to fix + */ + h2c_parameter[0] = 1; + h2c_parameter[1] = 1; + btcoexist->btc_fill_h2c(btcoexist, 0x65, 2, + h2c_parameter); + } else { + /* tell firmware "no antenna inverse" + * ==> WRONG firmware antenna control code. + * ==>need fw to fix + */ + h2c_parameter[0] = 0; + h2c_parameter[1] = 1; + btcoexist->btc_fill_h2c(btcoexist, 0x65, 2, + h2c_parameter); + } + } + + /* ext switch setting */ + switch (ant_pos_type) { + case BTC_ANT_WIFI_AT_MAIN: + btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcb7, 0x30, 0x1); + break; + case BTC_ANT_WIFI_AT_AUX: + btcoexist->btc_write_1byte_bitmask(btcoexist, 0xcb7, 0x30, 0x2); + break; + } +} + +static void halbtc8821a2ant_ps_tdma(struct btc_coexist *btcoexist, + bool force_exec, bool turn_on, u8 type) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], %s turn %s PS TDMA, type = %d\n", + (force_exec ? "force to" : ""), (turn_on ? "ON" : "OFF"), + type); + coex_dm->cur_ps_tdma_on = turn_on; + coex_dm->cur_ps_tdma = type; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], pre_ps_tdma_on = %d, cur_ps_tdma_on = %d!!\n", + coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], pre_ps_tdma = %d, cur_ps_tdma = %d!!\n", + coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma); + + if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) && + (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma)) + return; + } + if (turn_on) { + switch (type) { + case 1: + default: + halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x1a, + 0x1a, 0xe1, 0x90); + break; + case 2: + halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x12, + 0x12, 0xe1, 0x90); + break; + case 3: + halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x1c, + 0x3, 0xf1, 0x90); + break; + case 4: + halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x10, + 0x03, 0xf1, 0x90); + break; + case 5: + halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x1a, + 0x1a, 0x60, 0x90); + break; + case 6: + halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x12, + 0x12, 0x60, 0x90); + break; + case 7: + halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x1c, + 0x3, 0x70, 0x90); + break; + case 8: + halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xa3, 0x10, + 0x3, 0x70, 0x90); + break; + case 9: + halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x1a, + 0x1a, 0xe1, 0x90); + break; + case 10: + halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x12, + 0x12, 0xe1, 0x90); + break; + case 11: + halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0xa, + 0xa, 0xe1, 0x90); + break; + case 12: + halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x5, + 0x5, 0xe1, 0x90); + break; + case 13: + halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x1a, + 0x1a, 0x60, 0x90); + break; + case 14: + halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, + 0x12, 0x12, 0x60, 0x90); + break; + case 15: + halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0xa, + 0xa, 0x60, 0x90); + break; + case 16: + halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x5, + 0x5, 0x60, 0x90); + break; + case 17: + halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xa3, 0x2f, + 0x2f, 0x60, 0x90); + break; + case 18: + halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x5, + 0x5, 0xe1, 0x90); + break; + case 19: + halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x25, + 0x25, 0xe1, 0x90); + break; + case 20: + halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x25, + 0x25, 0x60, 0x90); + break; + case 21: + halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x15, + 0x03, 0x70, 0x90); + break; + case 71: + halbtc8821a2ant_set_fw_pstdma(btcoexist, 0xe3, 0x1a, + 0x1a, 0xe1, 0x90); + break; + } + } else { + /* disable PS tdma */ + switch (type) { + case 0: + halbtc8821a2ant_set_fw_pstdma(btcoexist, 0x0, 0x0, 0x0, + 0x40, 0x0); + break; + case 1: + halbtc8821a2ant_set_fw_pstdma(btcoexist, 0x0, 0x0, 0x0, + 0x48, 0x0); + break; + default: + halbtc8821a2ant_set_fw_pstdma(btcoexist, 0x0, 0x0, 0x0, + 0x40, 0x0); + break; + } + } + + /* update pre state */ + coex_dm->pre_ps_tdma_on = coex_dm->cur_ps_tdma_on; + coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma; +} + +static void halbtc8821a2ant_coex_all_off(struct btc_coexist *btcoexist) +{ + /* fw all off */ + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); + halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + /* sw all off */ + btc8821a2ant_sw_mech1(btcoexist, false, false, false, false); + btc8821a2ant_sw_mech2(btcoexist, false, false, false, 0x18); + + /* hw all off */ + halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, + 0x55555555, 0x55555555, 0xffff, 0x3); +} + +static void halbtc8821a2ant_coex_under_5g(struct btc_coexist *btcoexist) +{ + halbtc8821a2ant_coex_all_off(btcoexist); +} + +static void halbtc8821a2ant_init_coex_dm(struct btc_coexist *btcoexist) +{ + /* force to reset coex mechanism */ + halbtc8821a2ant_coex_table(btcoexist, FORCE_EXEC, 0x55555555, + 0x55555555, 0xffff, 0x3); + + halbtc8821a2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 1); + halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 6); + halbtc8821a2ant_dec_bt_pwr(btcoexist, FORCE_EXEC, false); + + btc8821a2ant_sw_mech1(btcoexist, false, false, false, false); + btc8821a2ant_sw_mech2(btcoexist, false, false, false, 0x18); +} + +static void halbtc8821a2ant_bt_inquiry_page(struct btc_coexist *btcoexist) +{ + bool low_pwr_disable = true; + + btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, + &low_pwr_disable); + + halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, + 0x5afa5afa, 0xffff, 0x3); + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3); +} + +static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist) +{ + bool common = false, wifi_connected = false, wifi_busy = false; + bool low_pwr_disable = false; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, + &wifi_connected); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy); + + halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, + 0x5afa5afa, 0xffff, 0x3); + + if (!wifi_connected && + BT_8821A_2ANT_BT_STATUS_IDLE == coex_dm->bt_status) { + low_pwr_disable = false; + btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, + &low_pwr_disable); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Wifi IPS + BT IPS!!\n"); + + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); + halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + btc8821a2ant_sw_mech1(btcoexist, false, false, false, false); + btc8821a2ant_sw_mech2(btcoexist, false, false, false, 0x18); + + common = true; + } else if (wifi_connected && + (BT_8821A_2ANT_BT_STATUS_IDLE == coex_dm->bt_status)) { + low_pwr_disable = false; + btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, + &low_pwr_disable); + + if (wifi_busy) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Wifi Busy + BT IPS!!\n"); + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + false, 1); + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Wifi LPS + BT IPS!!\n"); + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + false, 1); + } + + halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + btc8821a2ant_sw_mech1(btcoexist, false, false, false, false); + btc8821a2ant_sw_mech2(btcoexist, false, false, false, 0x18); + + common = true; + } else if (!wifi_connected && + (BT_8821A_2ANT_BT_STATUS_CON_IDLE == coex_dm->bt_status)) { + low_pwr_disable = true; + btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, + &low_pwr_disable); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Wifi IPS + BT LPS!!\n"); + + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); + halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + btc8821a2ant_sw_mech1(btcoexist, false, false, false, false); + btc8821a2ant_sw_mech2(btcoexist, false, false, false, 0x18); + common = true; + } else if (wifi_connected && + (BT_8821A_2ANT_BT_STATUS_CON_IDLE == coex_dm->bt_status)) { + low_pwr_disable = true; + btcoexist->btc_set(btcoexist, + BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable); + + if (wifi_busy) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Wifi Busy + BT LPS!!\n"); + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + false, 1); + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Wifi LPS + BT LPS!!\n"); + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + false, 1); + } + + halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + btc8821a2ant_sw_mech1(btcoexist, true, true, true, true); + btc8821a2ant_sw_mech2(btcoexist, false, false, false, 0x18); + + common = true; + } else if (!wifi_connected && + (BT_8821A_2ANT_BT_STATUS_NON_IDLE == + coex_dm->bt_status)) { + low_pwr_disable = false; + btcoexist->btc_set(btcoexist, + BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Wifi IPS + BT Busy!!\n"); + + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); + halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + btc8821a2ant_sw_mech1(btcoexist, false, false, + false, false); + btc8821a2ant_sw_mech2(btcoexist, false, false, + false, 0x18); + + common = true; + } else { + low_pwr_disable = true; + btcoexist->btc_set(btcoexist, + BTC_SET_ACT_DISABLE_LOW_POWER, + &low_pwr_disable); + + if (wifi_busy) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Wifi Busy + BT Busy!!\n"); + common = false; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Wifi LPS + BT Busy!!\n"); + halbtc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, true, 21); + + if (halbtc8821a2ant_need_to_dec_bt_pwr(btcoexist)) + halbtc8821a2ant_dec_bt_pwr(btcoexist, + NORMAL_EXEC, true); + else + halbtc8821a2ant_dec_bt_pwr(btcoexist, + NORMAL_EXEC, false); + + common = true; + } + btc8821a2ant_sw_mech1(btcoexist, true, true, true, true); + } + return common; +} + +static void btc8821a2_int1(struct btc_coexist *btcoexist, bool tx_pause, + int result) +{ + if (tx_pause) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 1\n"); + + if (coex_dm->cur_ps_tdma == 71) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 5); + coex_dm->tdma_adj_type = 5; + } else if (coex_dm->cur_ps_tdma == 1) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 5); + coex_dm->tdma_adj_type = 5; + } else if (coex_dm->cur_ps_tdma == 2) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 6); + coex_dm->tdma_adj_type = 6; + } else if (coex_dm->cur_ps_tdma == 3) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 4) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 8); + coex_dm->tdma_adj_type = 8; + } + if (coex_dm->cur_ps_tdma == 9) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 13); + coex_dm->tdma_adj_type = 13; + } else if (coex_dm->cur_ps_tdma == 10) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 14); + coex_dm->tdma_adj_type = 14; + } else if (coex_dm->cur_ps_tdma == 11) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 12) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 16); + coex_dm->tdma_adj_type = 16; + } + + if (result == -1) { + if (coex_dm->cur_ps_tdma == 5) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 6); + coex_dm->tdma_adj_type = 6; + } else if (coex_dm->cur_ps_tdma == 6) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 7) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 8); + coex_dm->tdma_adj_type = 8; + } else if (coex_dm->cur_ps_tdma == 13) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 14); + coex_dm->tdma_adj_type = 14; + } else if (coex_dm->cur_ps_tdma == 14) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 15) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 16); + coex_dm->tdma_adj_type = 16; + } + } else if (result == 1) { + if (coex_dm->cur_ps_tdma == 8) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 7) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 6); + coex_dm->tdma_adj_type = 6; + } else if (coex_dm->cur_ps_tdma == 6) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 5); + coex_dm->tdma_adj_type = 5; + } else if (coex_dm->cur_ps_tdma == 16) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 15) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 14); + coex_dm->tdma_adj_type = 14; + } else if (coex_dm->cur_ps_tdma == 14) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 13); + coex_dm->tdma_adj_type = 13; + } + } + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 0\n"); + if (coex_dm->cur_ps_tdma == 5) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 71); + coex_dm->tdma_adj_type = 71; + } else if (coex_dm->cur_ps_tdma == 6) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 2); + coex_dm->tdma_adj_type = 2; + } else if (coex_dm->cur_ps_tdma == 7) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->tdma_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 8) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 4); + coex_dm->tdma_adj_type = 4; + } + if (coex_dm->cur_ps_tdma == 13) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 9); + coex_dm->tdma_adj_type = 9; + } else if (coex_dm->cur_ps_tdma == 14) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 10); + coex_dm->tdma_adj_type = 10; + } else if (coex_dm->cur_ps_tdma == 15) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 16) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 12); + coex_dm->tdma_adj_type = 12; + } + + if (result == -1) { + if (coex_dm->cur_ps_tdma == 71) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 1); + coex_dm->tdma_adj_type = 1; + } else if (coex_dm->cur_ps_tdma == 1) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 2); + coex_dm->tdma_adj_type = 2; + } else if (coex_dm->cur_ps_tdma == 2) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->tdma_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 3) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 4); + coex_dm->tdma_adj_type = 4; + } else if (coex_dm->cur_ps_tdma == 9) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 10); + coex_dm->tdma_adj_type = 10; + } else if (coex_dm->cur_ps_tdma == 10) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 11) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 12); + coex_dm->tdma_adj_type = 12; + } + } else if (result == 1) { + if (coex_dm->cur_ps_tdma == 4) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->tdma_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 3) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 2); + coex_dm->tdma_adj_type = 2; + } else if (coex_dm->cur_ps_tdma == 2) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 1); + coex_dm->tdma_adj_type = 1; + } else if (coex_dm->cur_ps_tdma == 1) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 71); + coex_dm->tdma_adj_type = 71; + } else if (coex_dm->cur_ps_tdma == 12) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 11) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 10); + coex_dm->tdma_adj_type = 10; + } else if (coex_dm->cur_ps_tdma == 10) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 9); + coex_dm->tdma_adj_type = 9; + } + } + } +} + +static void btc8821a2_int2(struct btc_coexist *btcoexist, bool tx_pause, + int result) +{ + if (tx_pause) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 1\n"); + if (coex_dm->cur_ps_tdma == 1) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 6); + coex_dm->tdma_adj_type = 6; + } else if (coex_dm->cur_ps_tdma == 2) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 6); + coex_dm->tdma_adj_type = 6; + } else if (coex_dm->cur_ps_tdma == 3) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 4) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 8); + coex_dm->tdma_adj_type = 8; + } + if (coex_dm->cur_ps_tdma == 9) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 14); + coex_dm->tdma_adj_type = 14; + } else if (coex_dm->cur_ps_tdma == 10) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 14); + coex_dm->tdma_adj_type = 14; + } else if (coex_dm->cur_ps_tdma == 11) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 12) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 16); + coex_dm->tdma_adj_type = 16; + } + if (result == -1) { + if (coex_dm->cur_ps_tdma == 5) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 6); + coex_dm->tdma_adj_type = 6; + } else if (coex_dm->cur_ps_tdma == 6) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 7) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 8); + coex_dm->tdma_adj_type = 8; + } else if (coex_dm->cur_ps_tdma == 13) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 14); + coex_dm->tdma_adj_type = 14; + } else if (coex_dm->cur_ps_tdma == 14) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 15) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 16); + coex_dm->tdma_adj_type = 16; + } + } else if (result == 1) { + if (coex_dm->cur_ps_tdma == 8) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 7) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 6); + coex_dm->tdma_adj_type = 6; + } else if (coex_dm->cur_ps_tdma == 6) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 6); + coex_dm->tdma_adj_type = 6; + } else if (coex_dm->cur_ps_tdma == 16) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 15) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 14); + coex_dm->tdma_adj_type = 14; + } else if (coex_dm->cur_ps_tdma == 14) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 14); + coex_dm->tdma_adj_type = 14; + } + } + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 0\n"); + if (coex_dm->cur_ps_tdma == 5) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 2); + coex_dm->tdma_adj_type = 2; + } else if (coex_dm->cur_ps_tdma == 6) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 2); + coex_dm->tdma_adj_type = 2; + } else if (coex_dm->cur_ps_tdma == 7) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->tdma_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 8) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 4); + coex_dm->tdma_adj_type = 4; + } + if (coex_dm->cur_ps_tdma == 13) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 10); + coex_dm->tdma_adj_type = 10; + } else if (coex_dm->cur_ps_tdma == 14) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 10); + coex_dm->tdma_adj_type = 10; + } else if (coex_dm->cur_ps_tdma == 15) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 16) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 12); + coex_dm->tdma_adj_type = 12; + } + if (result == -1) { + if (coex_dm->cur_ps_tdma == 1) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 2); + coex_dm->tdma_adj_type = 2; + } else if (coex_dm->cur_ps_tdma == 2) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->tdma_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 3) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 4); + coex_dm->tdma_adj_type = 4; + } else if (coex_dm->cur_ps_tdma == 9) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 10); + coex_dm->tdma_adj_type = 10; + } else if (coex_dm->cur_ps_tdma == 10) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 11) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 12); + coex_dm->tdma_adj_type = 12; + } + } else if (result == 1) { + if (coex_dm->cur_ps_tdma == 4) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->tdma_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 3) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 2); + coex_dm->tdma_adj_type = 2; + } else if (coex_dm->cur_ps_tdma == 2) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 2); + coex_dm->tdma_adj_type = 2; + } else if (coex_dm->cur_ps_tdma == 12) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 11) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 10); + coex_dm->tdma_adj_type = 10; + } else if (coex_dm->cur_ps_tdma == 10) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 10); + coex_dm->tdma_adj_type = 10; + } + } + } +} + +static void btc8821a2_int3(struct btc_coexist *btcoexist, bool tx_pause, + int result) +{ + if (tx_pause) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 1\n"); + if (coex_dm->cur_ps_tdma == 1) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 2) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 3) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 4) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 8); + coex_dm->tdma_adj_type = 8; + } + if (coex_dm->cur_ps_tdma == 9) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 10) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 11) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 12) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 16); + coex_dm->tdma_adj_type = 16; + } + if (result == -1) { + if (coex_dm->cur_ps_tdma == 5) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 6) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 7) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 8); + coex_dm->tdma_adj_type = 8; + } else if (coex_dm->cur_ps_tdma == 13) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 14) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 15) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 16); + coex_dm->tdma_adj_type = 16; + } + } else if (result == 1) { + if (coex_dm->cur_ps_tdma == 8) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 7) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 6) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 16) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 15) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 14) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } + } + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 0\n"); + if (coex_dm->cur_ps_tdma == 5) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->tdma_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 6) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->tdma_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 7) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->tdma_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 8) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 4); + coex_dm->tdma_adj_type = 4; + } + if (coex_dm->cur_ps_tdma == 13) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 14) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 15) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 16) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 12); + coex_dm->tdma_adj_type = 12; + } + if (result == -1) { + if (coex_dm->cur_ps_tdma == 1) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->tdma_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 2) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->tdma_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 3) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 4); + coex_dm->tdma_adj_type = 4; + } else if (coex_dm->cur_ps_tdma == 9) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 10) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 11) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 12); + coex_dm->tdma_adj_type = 12; + } + } else if (result == 1) { + if (coex_dm->cur_ps_tdma == 4) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->tdma_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 3) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->tdma_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 2) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->tdma_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 12) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 11) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 10) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + } + } + } +} + +static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist, + bool sco_hid, bool tx_pause, + u8 max_interval) +{ + static long up, dn, m, n, wait_count; + /* 0: no change, +1: increase WiFi duration, + * -1: decrease WiFi duration + */ + int result; + u8 retry_count = 0; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], TdmaDurationAdjust()\n"); + + if (coex_dm->reset_tdma_adjust) { + coex_dm->reset_tdma_adjust = false; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], first run TdmaDurationAdjust()!!\n"); + if (sco_hid) { + if (tx_pause) { + if (max_interval == 1) { + halbtc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 13); + coex_dm->tdma_adj_type = 13; + } else if (max_interval == 2) { + halbtc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 14); + coex_dm->tdma_adj_type = 14; + } else if (max_interval == 3) { + halbtc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } else { + halbtc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } + } else { + if (max_interval == 1) { + halbtc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 9); + coex_dm->tdma_adj_type = 9; + } else if (max_interval == 2) { + halbtc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 10); + coex_dm->tdma_adj_type = 10; + } else if (max_interval == 3) { + halbtc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + } else { + halbtc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + } + } + } else { + if (tx_pause) { + if (max_interval == 1) { + halbtc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 5); + coex_dm->tdma_adj_type = 5; + } else if (max_interval == 2) { + halbtc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 6); + coex_dm->tdma_adj_type = 6; + } else if (max_interval == 3) { + halbtc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } else { + halbtc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } + } else { + if (max_interval == 1) { + halbtc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 1); + coex_dm->tdma_adj_type = 1; + } else if (max_interval == 2) { + halbtc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 2); + coex_dm->tdma_adj_type = 2; + } else if (max_interval == 3) { + halbtc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->tdma_adj_type = 3; + } else { + halbtc8821a2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->tdma_adj_type = 3; + } + } + } + + up = 0; + dn = 0; + m = 1; + n = 3; + result = 0; + wait_count = 0; + } else { + /* accquire the BT TRx retry count from BT_Info byte2 */ + retry_count = coex_sta->bt_retry_cnt; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], retry_count = %d\n", retry_count); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], up = %d, dn = %d, m = %d, n = %d, wait_count = %d\n", + (int)up, (int)dn, (int)m, (int)n, (int)wait_count); + result = 0; + wait_count++; + + if (retry_count == 0) { + /* no retry in the last 2-second duration */ + up++; + dn--; + + if (dn <= 0) + dn = 0; + + if (up >= n) { + /* if (retry count == 0) for 2*n seconds, + * make WiFi duration wider + */ + wait_count = 0; + n = 3; + up = 0; + dn = 0; + result = 1; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_TRACE_FW_DETAIL, + "[BTCoex], Increase wifi duration!!\n"); + } + } else if (retry_count <= 3) { + /* <=3 retry in the last 2-second duration */ + up--; + dn++; + + if (up <= 0) + up = 0; + + if (dn == 2) { + /* if retry count< 3 for 2*2 seconds, + * shrink wifi duration + */ + if (wait_count <= 2) + m++; /* avoid bounce in two levels */ + else + m = 1; + /* m max value is 20, max time is 120 second, + * recheck if adjust WiFi duration. + */ + if (m >= 20) + m = 20; + + n = 3*m; + up = 0; + dn = 0; + wait_count = 0; + result = -1; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_TRACE_FW_DETAIL, + "[BTCoex], Decrease wifi duration for retryCounter<3!!\n"); + } + } else { + /* retry count > 3, if retry count > 3 happens once, + * shrink WiFi duration + */ + if (wait_count == 1) + m++; /* avoid bounce in two levels */ + else + m = 1; + /* m max value is 20, max time is 120 second, + * recheck if adjust WiFi duration. + */ + if (m >= 20) + m = 20; + + n = 3*m; + up = 0; + dn = 0; + wait_count = 0; + result = -1; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], Decrease wifi duration for retryCounter>3!!\n"); + } + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], max Interval = %d\n", max_interval); + if (max_interval == 1) + btc8821a2_int1(btcoexist, tx_pause, result); + else if (max_interval == 2) + btc8821a2_int2(btcoexist, tx_pause, result); + else if (max_interval == 3) + btc8821a2_int3(btcoexist, tx_pause, result); + } + + /* if current PsTdma not match with the recorded one + * (when scan, dhcp...), then we have to adjust it back to + * the previous recorded one. + */ + if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) { + bool scan = false, link = false, roam = false; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], PsTdma type dismatch!!!, cur_ps_tdma = %d, recordPsTdma = %d\n", + coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); + + if (!scan && !link && !roam) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, + coex_dm->tdma_adj_type); + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n"); + } + } + + halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 0x6); +} + +/* SCO only or SCO+PAN(HS)*/ +static void halbtc8821a2ant_action_sco(struct btc_coexist *btcoexist) +{ + u8 wifi_rssi_state, bt_rssi_state; + u32 wifi_bw; + + wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, + 15, 0); + bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0); + + halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 4); + + if (halbtc8821a2ant_need_to_dec_bt_pwr(btcoexist)) + halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + else + halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + if (BTC_WIFI_BW_LEGACY == wifi_bw) { + /* for SCO quality at 11b/g mode */ + halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, + 0x5a5a5a5a, 0x5a5a5a5a, 0xffff, 0x3); + } else { + /* for SCO quality & wifi performance balance at 11n mode */ + halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, + 0x5aea5aea, 0x5aea5aea, 0xffff, 0x3); + } + + if (BTC_WIFI_BW_HT40 == wifi_bw) { + /* fw mechanism + * halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5); + */ + + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + false, 0); /*for voice quality*/ + } else { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + false, 0); /*for voice quality*/ + } + + /* sw mechanism */ + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8821a2ant_sw_mech1(btcoexist, true, true, + false, false); + btc8821a2ant_sw_mech2(btcoexist, true, false, + false, 0x18); + } else { + btc8821a2ant_sw_mech1(btcoexist, true, true, + false, false); + btc8821a2ant_sw_mech2(btcoexist, false, false, + false, 0x18); + } + } else { + /* fw mechanism + * halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5); + */ + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + false, 0); /*for voice quality*/ + } else { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + false, 0); /*for voice quality*/ + } + + /* sw mechanism */ + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8821a2ant_sw_mech1(btcoexist, false, true, + false, false); + btc8821a2ant_sw_mech2(btcoexist, true, false, + false, 0x18); + } else { + btc8821a2ant_sw_mech1(btcoexist, false, true, + false, false); + btc8821a2ant_sw_mech2(btcoexist, false, false, + false, 0x18); + } + } +} + +static void halbtc8821a2ant_action_hid(struct btc_coexist *btcoexist) +{ + u8 wifi_rssi_state, bt_rssi_state; + u32 wifi_bw; + + wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, + 0, 2, 15, 0); + bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0); + + halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + + if (halbtc8821a2ant_need_to_dec_bt_pwr(btcoexist)) + halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + else + halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + if (BTC_WIFI_BW_LEGACY == wifi_bw) { + /* for HID at 11b/g mode */ + halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, + 0x5a5a5a5a, 0xffff, 0x3); + } else { + /* for HID quality & wifi performance balance at 11n mode */ + halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, + 0x5aea5aea, 0xffff, 0x3); + } + + if (BTC_WIFI_BW_HT40 == wifi_bw) { + /* fw mechanism */ + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 9); + } else { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 13); + } + + /* sw mechanism */ + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8821a2ant_sw_mech1(btcoexist, true, true, + false, false); + btc8821a2ant_sw_mech2(btcoexist, true, false, + false, 0x18); + } else { + btc8821a2ant_sw_mech1(btcoexist, true, true, + false, false); + btc8821a2ant_sw_mech2(btcoexist, false, false, + false, 0x18); + } + } else { + /* fw mechanism */ + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 9); + } else { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 13); + } + + /* sw mechanism */ + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8821a2ant_sw_mech1(btcoexist, false, true, + false, false); + btc8821a2ant_sw_mech2(btcoexist, true, false, + false, 0x18); + } else { + btc8821a2ant_sw_mech1(btcoexist, false, true, + false, false); + btc8821a2ant_sw_mech2(btcoexist, false, false, + false, 0x18); + } + } +} + +/* A2DP only / PAN(EDR) only/ A2DP+PAN(HS) */ +static void halbtc8821a2ant_action_a2dp(struct btc_coexist *btcoexist) +{ + u8 wifi_rssi_state, bt_rssi_state; + u32 wifi_bw; + + wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, + 15, 0); + bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0); + + /* fw dac swing is called in btc8821a2ant_tdma_dur_adj() + * halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + */ + + if (halbtc8821a2ant_need_to_dec_bt_pwr(btcoexist)) + halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + else + halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + if (BTC_WIFI_BW_HT40 == wifi_bw) { + /* fw mechanism */ + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8821a2ant_tdma_dur_adj(btcoexist, false, false, 1); + } else { + btc8821a2ant_tdma_dur_adj(btcoexist, false, true, 1); + } + + /* sw mechanism */ + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8821a2ant_sw_mech1(btcoexist, true, false, + false, false); + btc8821a2ant_sw_mech2(btcoexist, true, false, + false, 0x18); + } else { + btc8821a2ant_sw_mech1(btcoexist, true, false, + false, false); + btc8821a2ant_sw_mech2(btcoexist, false, false, + false, 0x18); + } + } else { + /* fw mechanism */ + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8821a2ant_tdma_dur_adj(btcoexist, false, false, 1); + } else { + btc8821a2ant_tdma_dur_adj(btcoexist, false, true, 1); + } + + /* sw mechanism */ + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8821a2ant_sw_mech1(btcoexist, false, false, + false, false); + btc8821a2ant_sw_mech2(btcoexist, true, false, + false, 0x18); + } else { + btc8821a2ant_sw_mech1(btcoexist, false, false, + false, false); + btc8821a2ant_sw_mech2(btcoexist, false, false, + false, 0x18); + } + } +} + +static void halbtc8821a2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist) +{ + u8 wifi_rssi_state, bt_rssi_state, bt_info_ext; + u32 wifi_bw; + + bt_info_ext = coex_sta->bt_info_ext; + wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, + 15, 0); + bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0); + + /*fw dac swing is called in btc8821a2ant_tdma_dur_adj() + *halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + */ + + if (halbtc8821a2ant_need_to_dec_bt_pwr(btcoexist)) + halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + else + halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + if (BTC_WIFI_BW_HT40 == wifi_bw) { + /* fw mechanism */ + if (bt_info_ext&BIT0) { + /*a2dp basic rate*/ + btc8821a2ant_tdma_dur_adj(btcoexist, false, true, 2); + } else { + /*a2dp edr rate*/ + btc8821a2ant_tdma_dur_adj(btcoexist, false, true, 1); + } + + /* sw mechanism */ + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8821a2ant_sw_mech1(btcoexist, true, false, + false, false); + btc8821a2ant_sw_mech2(btcoexist, true, false, + false, 0x18); + } else { + btc8821a2ant_sw_mech1(btcoexist, true, false, + false, false); + btc8821a2ant_sw_mech2(btcoexist, false, false, + false, 0x18); + } + } else { + /* fw mechanism */ + if (bt_info_ext&BIT0) { + /* a2dp basic rate */ + btc8821a2ant_tdma_dur_adj(btcoexist, false, true, 2); + } else { + /* a2dp edr rate */ + btc8821a2ant_tdma_dur_adj(btcoexist, false, true, 1); + } + + /* sw mechanism */ + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8821a2ant_sw_mech1(btcoexist, false, false, + false, false); + btc8821a2ant_sw_mech2(btcoexist, true, false, + false, 0x18); + } else { + btc8821a2ant_sw_mech1(btcoexist, false, false, + false, false); + btc8821a2ant_sw_mech2(btcoexist, false, false, + false, 0x18); + } + } +} + +static void halbtc8821a2ant_action_pan_edr(struct btc_coexist *btcoexist) +{ + u8 wifi_rssi_state, bt_rssi_state; + u32 wifi_bw; + + wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, + 15, 0); + bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0); + + halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + + if (halbtc8821a2ant_need_to_dec_bt_pwr(btcoexist)) + halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + else + halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + if (BTC_WIFI_BW_LEGACY == wifi_bw) { + /* for HID at 11b/g mode */ + halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, + 0x5aff5aff, 0xffff, 0x3); + } else { + /* for HID quality & wifi performance balance at 11n mode */ + halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, + 0x5aff5aff, 0xffff, 0x3); + } + + if (BTC_WIFI_BW_HT40 == wifi_bw) { + /* fw mechanism */ + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 1); + } else { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 5); + } + + /* sw mechanism */ + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8821a2ant_sw_mech1(btcoexist, true, false, + false, false); + btc8821a2ant_sw_mech2(btcoexist, true, false, + false, 0x18); + } else { + btc8821a2ant_sw_mech1(btcoexist, true, false, + false, false); + btc8821a2ant_sw_mech2(btcoexist, false, false, + false, 0x18); + } + } else { + /* fw mechanism */ + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 1); + } else { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 5); + } + + /* sw mechanism */ + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8821a2ant_sw_mech1(btcoexist, false, false, + false, false); + btc8821a2ant_sw_mech2(btcoexist, true, false, + false, 0x18); + } else { + btc8821a2ant_sw_mech1(btcoexist, false, false, + false, false); + btc8821a2ant_sw_mech2(btcoexist, false, false, + false, 0x18); + } + } +} + +/* PAN(HS) only */ +static void halbtc8821a2ant_action_pan_hs(struct btc_coexist *btcoexist) +{ + u8 wifi_rssi_state, bt_rssi_state; + u32 wifi_bw; + + wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, + 0, 2, 15, 0); + bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0); + + halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + if (BTC_WIFI_BW_HT40 == wifi_bw) { + /* fw mechanism */ + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, + true); + } else { + halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, + false); + } + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); + + /* sw mechanism */ + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8821a2ant_sw_mech1(btcoexist, true, false, + false, false); + btc8821a2ant_sw_mech2(btcoexist, true, false, + false, 0x18); + } else { + btc8821a2ant_sw_mech1(btcoexist, true, false, + false, false); + btc8821a2ant_sw_mech2(btcoexist, false, false, + false, 0x18); + } + } else { + /* fw mechanism */ + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8821a2ant_dec_bt_pwr(btcoexist, + NORMAL_EXEC, true); + } else { + halbtc8821a2ant_dec_bt_pwr(btcoexist, + NORMAL_EXEC, false); + } + + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + false, 1); + } else { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + false, 1); + } + + /* sw mechanism */ + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8821a2ant_sw_mech1(btcoexist, false, false, + false, false); + btc8821a2ant_sw_mech2(btcoexist, true, false, + false, 0x18); + } else { + btc8821a2ant_sw_mech1(btcoexist, false, false, + false, false); + btc8821a2ant_sw_mech2(btcoexist, false, false, + false, 0x18); + } + } +} + +/* PAN(EDR)+A2DP */ +static void halbtc8821a2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist) +{ + u8 wifi_rssi_state, bt_rssi_state, bt_info_ext; + u32 wifi_bw; + + bt_info_ext = coex_sta->bt_info_ext; + wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, + 15, 0); + bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0); + + halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + + if (halbtc8821a2ant_need_to_dec_bt_pwr(btcoexist)) + halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + else + halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + if (BTC_WIFI_BW_LEGACY == wifi_bw) { + /* for HID at 11b/g mode */ + halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, + 0x5afa5afa, 0xffff, 0x3); + } else { + /* for HID quality & wifi performance balance at 11n mode */ + halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, + 0x5afa5afa, 0xffff, 0x3); + } + + if (BTC_WIFI_BW_HT40 == wifi_bw) { + /* fw mechanism */ + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + if (bt_info_ext&BIT0) { + /* a2dp basic rate */ + btc8821a2ant_tdma_dur_adj(btcoexist, false, + false, 3); + } else { + /* a2dp edr rate */ + btc8821a2ant_tdma_dur_adj(btcoexist, false, + false, 3); + } + } else { + if (bt_info_ext&BIT0) { + /* a2dp basic rate */ + btc8821a2ant_tdma_dur_adj(btcoexist, false, + true, 3); + } else { + /* a2dp edr rate */ + btc8821a2ant_tdma_dur_adj(btcoexist, false, + true, 3); + } + } + + /* sw mechanism */ + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8821a2ant_sw_mech1(btcoexist, true, false, + false, false); + btc8821a2ant_sw_mech2(btcoexist, true, false, + false, 0x18); + } else { + btc8821a2ant_sw_mech1(btcoexist, true, false, + false, false); + btc8821a2ant_sw_mech2(btcoexist, false, false, + false, 0x18); + }; + } else { + /* fw mechanism */ + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + if (bt_info_ext&BIT0) { + /* a2dp basic rate */ + btc8821a2ant_tdma_dur_adj(btcoexist, false, + false, 3); + } else { + /* a2dp edr rate */ + btc8821a2ant_tdma_dur_adj(btcoexist, false, + false, 3); + } + } else { + if (bt_info_ext&BIT0) { + /* a2dp basic rate */ + btc8821a2ant_tdma_dur_adj(btcoexist, false, + true, 3); + } else { + /* a2dp edr rate */ + btc8821a2ant_tdma_dur_adj(btcoexist, false, + true, 3); + } + } + + /* sw mechanism */ + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8821a2ant_sw_mech1(btcoexist, false, false, + false, false); + btc8821a2ant_sw_mech2(btcoexist, true, false, + false, 0x18); + } else { + btc8821a2ant_sw_mech1(btcoexist, false, false, + false, false); + btc8821a2ant_sw_mech2(btcoexist, false, false, + false, 0x18); + } + } +} + +static void halbtc8821a2ant_action_pan_edr_hid(struct btc_coexist *btcoexist) +{ + u8 wifi_rssi_state, bt_rssi_state; + u32 wifi_bw; + + wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, + 15, 0); + bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0); + + halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + + if (halbtc8821a2ant_need_to_dec_bt_pwr(btcoexist)) + halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + else + halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + if (BTC_WIFI_BW_LEGACY == wifi_bw) { + /* for HID at 11b/g mode */ + halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, + 0x5a5f5a5f, 0xffff, 0x3); + } else { + /* for HID quality & wifi performance balance at 11n mode */ + halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, + 0x5a5f5a5f, 0xffff, 0x3); + } + + if (BTC_WIFI_BW_HT40 == wifi_bw) { + halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 3); + /* fw mechanism */ + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 10); + } else { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 14); + } + + /* sw mechanism */ + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8821a2ant_sw_mech1(btcoexist, true, true, + false, false); + btc8821a2ant_sw_mech2(btcoexist, true, false, + false, 0x18); + } else { + btc8821a2ant_sw_mech1(btcoexist, true, true, + false, false); + btc8821a2ant_sw_mech2(btcoexist, false, false, + false, 0x18); + } + } else { + halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + /* fw mechanism */ + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 10); + } else { + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 14); + } + + /* sw mechanism */ + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8821a2ant_sw_mech1(btcoexist, false, true, + false, false); + btc8821a2ant_sw_mech2(btcoexist, true, false, + false, 0x18); + } else { + btc8821a2ant_sw_mech1(btcoexist, false, true, + false, false); + btc8821a2ant_sw_mech2(btcoexist, false, false, + false, 0x18); + } + } +} + +/* HID+A2DP+PAN(EDR) */ +static void btc8821a2ant_act_hid_a2dp_pan_edr(struct btc_coexist *btcoexist) +{ + u8 wifi_rssi_state, bt_rssi_state, bt_info_ext; + u32 wifi_bw; + + bt_info_ext = coex_sta->bt_info_ext; + wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, + 0, 2, 15, 0); + bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0); + + halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + + if (halbtc8821a2ant_need_to_dec_bt_pwr(btcoexist)) + halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + else + halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + if (BTC_WIFI_BW_LEGACY == wifi_bw) { + /* for HID at 11b/g mode */ + halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, + 0x5a5a5a5a, 0xffff, 0x3); + } else { + /* for HID quality & wifi performance balance at 11n mode */ + halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, + 0x5a5a5a5a, 0xffff, 0x3); + } + + if (BTC_WIFI_BW_HT40 == wifi_bw) { + /* fw mechanism */ + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + if (bt_info_ext&BIT0) { + /* a2dp basic rate */ + btc8821a2ant_tdma_dur_adj(btcoexist, true, + true, 3); + } else { + /* a2dp edr rate */ + btc8821a2ant_tdma_dur_adj(btcoexist, true, + true, 3); + } + } else { + if (bt_info_ext&BIT0) { + /* a2dp basic rate */ + btc8821a2ant_tdma_dur_adj(btcoexist, true, + true, 3); + } else { + /* a2dp edr rate */ + btc8821a2ant_tdma_dur_adj(btcoexist, true, + true, 3); + } + } + + /* sw mechanism */ + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8821a2ant_sw_mech1(btcoexist, true, true, + false, false); + btc8821a2ant_sw_mech2(btcoexist, true, false, + false, 0x18); + } else { + btc8821a2ant_sw_mech1(btcoexist, true, true, + false, false); + btc8821a2ant_sw_mech2(btcoexist, false, false, + false, 0x18); + } + } else { + /* fw mechanism */ + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + if (bt_info_ext&BIT0) { + /* a2dp basic rate */ + btc8821a2ant_tdma_dur_adj(btcoexist, true, + false, 3); + } else { + /* a2dp edr rate */ + btc8821a2ant_tdma_dur_adj(btcoexist, true, + false, 3); + } + } else { + if (bt_info_ext&BIT0) { + /* a2dp basic rate */ + btc8821a2ant_tdma_dur_adj(btcoexist, true, + true, 3); + } else { + /* a2dp edr rate */ + btc8821a2ant_tdma_dur_adj(btcoexist, true, + true, 3); + } + } + + /* sw mechanism */ + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8821a2ant_sw_mech1(btcoexist, false, true, + false, false); + btc8821a2ant_sw_mech2(btcoexist, true, false, + false, 0x18); + } else { + btc8821a2ant_sw_mech1(btcoexist, false, true, + false, false); + btc8821a2ant_sw_mech2(btcoexist, false, false, + false, 0x18); + } + } +} + +static void halbtc8821a2ant_action_hid_a2dp(struct btc_coexist *btcoexist) +{ + u8 wifi_rssi_state, bt_rssi_state, bt_info_ext; + u32 wifi_bw; + + bt_info_ext = coex_sta->bt_info_ext; + wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, + 15, 0); + bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0); + + if (halbtc8821a2ant_need_to_dec_bt_pwr(btcoexist)) + halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + else + halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + if (BTC_WIFI_BW_LEGACY == wifi_bw) { + /* for HID at 11b/g mode */ + halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, + 0x5f5b5f5b, 0xffffff, 0x3); + } else { + /*for HID quality & wifi performance balance at 11n mode*/ + halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, + 0x5f5b5f5b, 0xffffff, 0x3); + } + + if (BTC_WIFI_BW_HT40 == wifi_bw) { + /* fw mechanism */ + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + if (bt_info_ext&BIT0) { + /* a2dp basic rate */ + btc8821a2ant_tdma_dur_adj(btcoexist, + true, true, 2); + } else { + /* a2dp edr rate */ + btc8821a2ant_tdma_dur_adj(btcoexist, + true, true, 2); + } + } else { + if (bt_info_ext&BIT0) { + /* a2dp basic rate */ + btc8821a2ant_tdma_dur_adj(btcoexist, + true, true, 2); + } else { + /* a2dp edr rate */ + btc8821a2ant_tdma_dur_adj(btcoexist, + true, true, 2); + } + } + + /* sw mechanism */ + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8821a2ant_sw_mech1(btcoexist, true, true, + false, false); + btc8821a2ant_sw_mech2(btcoexist, true, false, + false, 0x18); + } else { + btc8821a2ant_sw_mech1(btcoexist, true, true, + false, false); + btc8821a2ant_sw_mech2(btcoexist, false, false, + false, 0x18); + } + } else { + /* fw mechanism */ + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + if (bt_info_ext&BIT0) { + /* a2dp basic rate */ + btc8821a2ant_tdma_dur_adj(btcoexist, + true, true, 2); + + } else { + /* a2dp edr rate */ + btc8821a2ant_tdma_dur_adj(btcoexist, + true, true, 2); + } + } else { + if (bt_info_ext&BIT0) { + /*a2dp basic rate*/ + btc8821a2ant_tdma_dur_adj(btcoexist, + true, true, 2); + } else { + /*a2dp edr rate*/ + btc8821a2ant_tdma_dur_adj(btcoexist, + true, true, 2); + } + } + + /* sw mechanism */ + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8821a2ant_sw_mech1(btcoexist, false, true, + false, false); + btc8821a2ant_sw_mech2(btcoexist, true, false, + false, 0x18); + } else { + btc8821a2ant_sw_mech1(btcoexist, false, true, + false, false); + btc8821a2ant_sw_mech2(btcoexist, false, false, + false, 0x18); + } + } +} + +static void halbtc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist) +{ + bool wifi_under_5g = false; + u8 algorithm = 0; + + if (btcoexist->manual_control) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Manual control!!!\n"); + return; + } + + btcoexist->btc_get(btcoexist, + BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g); + + if (wifi_under_5g) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], RunCoexistMechanism(), run 5G coex setting!!<===\n"); + halbtc8821a2ant_coex_under_5g(btcoexist); + return; + } + + algorithm = halbtc8821a2ant_action_algorithm(btcoexist); + if (coex_sta->c2h_bt_inquiry_page && + (BT_8821A_2ANT_COEX_ALGO_PANHS != algorithm)) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT is under inquiry/page scan !!\n"); + halbtc8821a2ant_bt_inquiry_page(btcoexist); + return; + } + + coex_dm->cur_algorithm = algorithm; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm); + + if (halbtc8821a2ant_is_common_action(btcoexist)) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action 2-Ant common.\n"); + coex_dm->reset_tdma_adjust = true; + } else { + if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], pre_algorithm = %d, cur_algorithm = %d\n", + coex_dm->pre_algorithm, coex_dm->cur_algorithm); + coex_dm->reset_tdma_adjust = true; + } + switch (coex_dm->cur_algorithm) { + case BT_8821A_2ANT_COEX_ALGO_SCO: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action 2-Ant, algorithm = SCO.\n"); + halbtc8821a2ant_action_sco(btcoexist); + break; + case BT_8821A_2ANT_COEX_ALGO_HID: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action 2-Ant, algorithm = HID.\n"); + halbtc8821a2ant_action_hid(btcoexist); + break; + case BT_8821A_2ANT_COEX_ALGO_A2DP: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action 2-Ant, algorithm = A2DP.\n"); + halbtc8821a2ant_action_a2dp(btcoexist); + break; + case BT_8821A_2ANT_COEX_ALGO_A2DP_PANHS: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS).\n"); + halbtc8821a2ant_action_a2dp_pan_hs(btcoexist); + break; + case BT_8821A_2ANT_COEX_ALGO_PANEDR: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action 2-Ant, algorithm = PAN(EDR).\n"); + halbtc8821a2ant_action_pan_edr(btcoexist); + break; + case BT_8821A_2ANT_COEX_ALGO_PANHS: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action 2-Ant, algorithm = HS mode.\n"); + halbtc8821a2ant_action_pan_hs(btcoexist); + break; + case BT_8821A_2ANT_COEX_ALGO_PANEDR_A2DP: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action 2-Ant, algorithm = PAN+A2DP.\n"); + halbtc8821a2ant_action_pan_edr_a2dp(btcoexist); + break; + case BT_8821A_2ANT_COEX_ALGO_PANEDR_HID: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID.\n"); + halbtc8821a2ant_action_pan_edr_hid(btcoexist); + break; + case BT_8821A_2ANT_COEX_ALGO_HID_A2DP_PANEDR: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN.\n"); + btc8821a2ant_act_hid_a2dp_pan_edr(btcoexist); + break; + case BT_8821A_2ANT_COEX_ALGO_HID_A2DP: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action 2-Ant, algorithm = HID+A2DP.\n"); + halbtc8821a2ant_action_hid_a2dp(btcoexist); + break; + default: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n"); + halbtc8821a2ant_coex_all_off(btcoexist); + break; + } + coex_dm->pre_algorithm = coex_dm->cur_algorithm; + } +} + +/*============================================================ + *work around function start with wa_halbtc8821a2ant_ + *============================================================ + *============================================================ + * extern function start with EXhalbtc8821a2ant_ + *============================================================ + */ +void ex_halbtc8821a2ant_init_hwconfig(struct btc_coexist *btcoexist) +{ + u8 u1tmp = 0; + + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], 2Ant Init HW Config!!\n"); + + /* backup rf 0x1e value */ + coex_dm->bt_rf0x1e_backup = + btcoexist->btc_get_rf_reg(btcoexist, BTC_RF_A, 0x1e, 0xfffff); + + /* 0x790[5:0] = 0x5 */ + u1tmp = btcoexist->btc_read_1byte(btcoexist, 0x790); + u1tmp &= 0xc0; + u1tmp |= 0x5; + btcoexist->btc_write_1byte(btcoexist, 0x790, u1tmp); + + /*Antenna config */ + halbtc8821a2ant_set_ant_path(btcoexist, + BTC_ANT_WIFI_AT_MAIN, true, false); + + /* PTA parameter */ + halbtc8821a2ant_coex_table(btcoexist, + FORCE_EXEC, 0x55555555, 0x55555555, + 0xffff, 0x3); + + /* Enable counter statistics */ + /*0x76e[3] = 1, WLAN_Act control by PTA*/ + btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc); + btcoexist->btc_write_1byte(btcoexist, 0x778, 0x3); + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x40, 0x20, 0x1); +} + +void +ex_halbtc8821a2ant_init_coex_dm( + struct btc_coexist *btcoexist + ) +{ + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], Coex Mechanism Init!!\n"); + + halbtc8821a2ant_init_coex_dm(btcoexist); +} + +void +ex_halbtc8821a2ant_display_coex_info( + struct btc_coexist *btcoexist + ) +{ + struct btc_board_info *board_info = &btcoexist->board_info; + struct btc_stack_info *stack_info = &btcoexist->stack_info; + struct rtl_priv *rtlpriv = btcoexist->adapter; + u8 u1tmp[4], i, bt_info_ext, ps_tdma_case = 0; + u32 u4tmp[4]; + bool roam = false, scan = false, link = false, wifi_under_5g = false; + bool bt_hs_on = false, wifi_busy = false; + long wifi_rssi = 0, bt_hs_rssi = 0; + u32 wifi_bw, wifi_traffic_dir; + u8 wifi_dot_11_chnl, wifi_hs_chnl; + u32 fw_ver = 0, bt_patch_ver = 0; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n ============[BT Coexist info]============"); + + if (!board_info->bt_exist) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n BT not exists !!!"); + return; + } + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:", + board_info->pg_ant_num, board_info->btdm_ant_num); + + if (btcoexist->manual_control) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s", "[Action Manual control]!!"); + } + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = %s / %d", "BT stack/ hci ext ver", + ((stack_info->profile_notified) ? "Yes" : "No"), + stack_info->hci_version); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver); + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = %d_%d/ 0x%x/ 0x%x(%d)", + "CoexVer/ FwVer/ PatchVer", + glcoex_ver_date_8821a_2ant, glcoex_ver_8821a_2ant, + fw_ver, bt_patch_ver, bt_patch_ver); + + btcoexist->btc_get(btcoexist, + BTC_GET_BL_HS_OPERATION, &bt_hs_on); + btcoexist->btc_get(btcoexist, + BTC_GET_U1_WIFI_DOT11_CHNL, &wifi_dot_11_chnl); + btcoexist->btc_get(btcoexist, + BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = %d / %d(%d)", + "Dot11 channel / HsMode(HsChnl)", + wifi_dot_11_chnl, bt_hs_on, wifi_hs_chnl); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = %02x %02x %02x ", + "H2C Wifi inform bt chnl Info", + coex_dm->wifi_chnl_info[0], coex_dm->wifi_chnl_info[1], + coex_dm->wifi_chnl_info[2]); + + btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi); + btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = %ld/ %ld", "Wifi rssi/ HS rssi", + wifi_rssi, bt_hs_rssi); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = %d/ %d/ %d ", "Wifi link/ roam/ scan", + link, roam, scan); + + btcoexist->btc_get(btcoexist, + BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g); + btcoexist->btc_get(btcoexist, + BTC_GET_U4_WIFI_BW, &wifi_bw); + btcoexist->btc_get(btcoexist, + BTC_GET_BL_WIFI_BUSY, &wifi_busy); + btcoexist->btc_get(btcoexist, + BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, &wifi_traffic_dir); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = %s / %s/ %s ", "Wifi status", + (wifi_under_5g ? "5G" : "2.4G"), + ((BTC_WIFI_BW_LEGACY == wifi_bw) ? "Legacy" : + (((BTC_WIFI_BW_HT40 == wifi_bw) ? "HT40" : "HT20"))), + ((!wifi_busy) ? "idle" : + ((BTC_WIFI_TRAFFIC_TX == wifi_traffic_dir) ? + "uplink" : "downlink"))); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = [%s/ %d/ %d] ", "BT [status/ rssi/ retryCnt]", + ((coex_sta->c2h_bt_inquiry_page) ? ("inquiry/page scan") : + ((BT_8821A_2ANT_BT_STATUS_IDLE == coex_dm->bt_status) + ? "idle" : ((BT_8821A_2ANT_BT_STATUS_CON_IDLE == + coex_dm->bt_status) ? "connected-idle" : "busy"))), + coex_sta->bt_rssi, coex_sta->bt_retry_cnt); + + if (stack_info->profile_notified) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP", + stack_info->sco_exist, stack_info->hid_exist, + stack_info->pan_exist, stack_info->a2dp_exist); + + btcoexist->btc_disp_dbg_msg(btcoexist, + BTC_DBG_DISP_BT_LINK_INFO); + } + + bt_info_ext = coex_sta->bt_info_ext; + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s", + "BT Info A2DP rate", + (bt_info_ext&BIT0) ? "Basic rate" : "EDR rate"); + + for (i = 0; i < BT_INFO_SRC_8821A_2ANT_MAX; i++) { + if (coex_sta->bt_info_c2h_cnt[i]) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = %02x %02x %02x %02x %02x %02x %02x(%d)", + glbt_info_src_8821a_2ant[i], + coex_sta->bt_info_c2h[i][0], + coex_sta->bt_info_c2h[i][1], + coex_sta->bt_info_c2h[i][2], + coex_sta->bt_info_c2h[i][3], + coex_sta->bt_info_c2h[i][4], + coex_sta->bt_info_c2h[i][5], + coex_sta->bt_info_c2h[i][6], + coex_sta->bt_info_c2h_cnt[i]); + } + } + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s/%s", + "PS state, IPS/LPS", + ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")), + ((coex_sta->under_lps ? "LPS ON" : "LPS OFF"))); + btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD); + + /* Sw mechanism*/ + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s", + "============[Sw mechanism]============"); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = %d/ %d/ %d/ %d ", + "SM1[ShRf/ LpRA/ LimDig/ btLna]", + coex_dm->cur_rf_rx_lpf_shrink, coex_dm->cur_low_penalty_ra, + coex_dm->limited_dig, coex_dm->cur_bt_lna_constrain); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = %d/ %d/ %d(0x%x) ", + "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]", + coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off, + coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl); + + /* Fw mechanism*/ + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s", + "============[Fw mechanism]============"); + + if (!btcoexist->manual_control) { + ps_tdma_case = coex_dm->cur_ps_tdma; + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = %02x %02x %02x %02x %02x case-%d", + "PS TDMA", + coex_dm->ps_tdma_para[0], coex_dm->ps_tdma_para[1], + coex_dm->ps_tdma_para[2], coex_dm->ps_tdma_para[3], + coex_dm->ps_tdma_para[4], ps_tdma_case); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = %d/ %d ", "DecBtPwr/ IgnWlanAct", + coex_dm->cur_dec_bt_pwr, + coex_dm->cur_ignore_wlan_act); + } + + /* Hw setting*/ + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s", "============[Hw setting]============"); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "\r\n %-35s = 0x%x", "RF-A, 0x1e initVal", + coex_dm->bt_rf0x1e_backup); + + u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778); + u1tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x6cc); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x ", + "0x778 (W_Act)/ 0x6cc (CoTab Sel)", + u1tmp[0], u1tmp[1]); + + u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x8db); + u1tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0xc5b); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x", + "0x8db(ADC)/0xc5b[29:25](DAC)", + ((u1tmp[0]&0x60)>>5), ((u1tmp[1]&0x3e)>>1)); + + u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xcb4); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x", + "0xcb4[7:0](ctrl)/ 0xcb4[29:28](val)", + u4tmp[0]&0xff, ((u4tmp[0]&0x30000000)>>28)); + + u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x40); + u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c); + u4tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x974); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", + "0x40/ 0x4c[24:23]/ 0x974", + u1tmp[0], ((u4tmp[0]&0x01800000)>>23), u4tmp[1]); + + u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550); + u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x", + "0x550(bcn ctrl)/0x522", + u4tmp[0], u1tmp[0]); + + u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50); + u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0xa0a); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x", + "0xc50(DIG)/0xa0a(CCK-TH)", + u4tmp[0], u1tmp[0]); + + u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xf48); + u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0xa5b); + u1tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0xa5c); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x", + "OFDM-FA/ CCK-FA", + u4tmp[0], (u1tmp[0]<<8) + u1tmp[1]); + + u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0); + u4tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4); + u4tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", + "0x6c0/0x6c4/0x6c8", + u4tmp[0], u4tmp[1], u4tmp[2]); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d", + "0x770 (hi-pri Rx/Tx)", + coex_sta->high_priority_rx, coex_sta->high_priority_tx); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d", + "0x774(low-pri Rx/Tx)", + coex_sta->low_priority_rx, coex_sta->low_priority_tx); + + /* Tx mgnt queue hang or not, 0x41b should = 0xf, ex: 0xd ==>hang*/ + u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x41b); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x", + "0x41b (mgntQ hang chk == 0xf)", + u1tmp[0]); + + btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS); +} + +void ex_halbtc8821a2ant_ips_notify(struct btc_coexist *btcoexist, u8 type) +{ + if (BTC_IPS_ENTER == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], IPS ENTER notify\n"); + coex_sta->under_ips = true; + halbtc8821a2ant_coex_all_off(btcoexist); + } else if (BTC_IPS_LEAVE == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], IPS LEAVE notify\n"); + coex_sta->under_ips = false; + /*halbtc8821a2ant_init_coex_dm(btcoexist);*/ + } +} + +void ex_halbtc8821a2ant_lps_notify(struct btc_coexist *btcoexist, u8 type) +{ + if (BTC_LPS_ENABLE == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], LPS ENABLE notify\n"); + coex_sta->under_lps = true; + } else if (BTC_LPS_DISABLE == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], LPS DISABLE notify\n"); + coex_sta->under_lps = false; + } +} + +void ex_halbtc8821a2ant_scan_notify(struct btc_coexist *btcoexist, u8 type) +{ + if (BTC_SCAN_START == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], SCAN START notify\n"); + } else if (BTC_SCAN_FINISH == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], SCAN FINISH notify\n"); + } +} + +void ex_halbtc8821a2ant_connect_notify(struct btc_coexist *btcoexist, u8 type) +{ + if (BTC_ASSOCIATE_START == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], CONNECT START notify\n"); + } else if (BTC_ASSOCIATE_FINISH == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], CONNECT FINISH notify\n"); + } +} + +void ex_halbtc8821a2ant_media_status_notify(struct btc_coexist *btcoexist, + u8 type) +{ + u8 h2c_parameter[3] = {0}; + u32 wifi_bw; + u8 wifi_central_chnl; + + if (BTC_MEDIA_CONNECT == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], MEDIA connect notify\n"); + } else { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], MEDIA disconnect notify\n"); + } + + /* only 2.4G we need to inform bt the chnl mask*/ + btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL, + &wifi_central_chnl); + if ((BTC_MEDIA_CONNECT == type) && + (wifi_central_chnl <= 14)) { + h2c_parameter[0] = 0x1; + h2c_parameter[1] = wifi_central_chnl; + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + if (BTC_WIFI_BW_HT40 == wifi_bw) + h2c_parameter[2] = 0x30; + else + h2c_parameter[2] = 0x20; + } + + coex_dm->wifi_chnl_info[0] = h2c_parameter[0]; + coex_dm->wifi_chnl_info[1] = h2c_parameter[1]; + coex_dm->wifi_chnl_info[2] = h2c_parameter[2]; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], FW write 0x66 = 0x%x\n", + h2c_parameter[0]<<16|h2c_parameter[1]<<8|h2c_parameter[2]); + + btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter); +} + +void ex_halbtc8821a2ant_special_packet_notify(struct btc_coexist *btcoexist, + u8 type) { + if (type == BTC_PACKET_DHCP) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], DHCP Packet notify\n"); + } +} + +void ex_halbtc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist, + u8 *tmp_buf, u8 length) +{ + u8 bt_info = 0; + u8 i, rsp_source = 0; + static u32 set_bt_lna_cnt, set_bt_psd_mode; + bool bt_busy = false, limited_dig = false; + bool wifi_connected = false, bt_hs_on = false; + + coex_sta->c2h_bt_info_req_sent = false; + + rsp_source = tmp_buf[0]&0xf; + if (rsp_source >= BT_INFO_SRC_8821A_2ANT_MAX) + rsp_source = BT_INFO_SRC_8821A_2ANT_WIFI_FW; + coex_sta->bt_info_c2h_cnt[rsp_source]++; + + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], Bt info[%d], length = %d, hex data = [", + rsp_source, length); + for (i = 0; i < length; i++) { + coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i]; + if (i == 1) + bt_info = tmp_buf[i]; + if (i == length-1) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "0x%02x]\n", tmp_buf[i]); + } else { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "0x%02x, ", tmp_buf[i]); + } + } + + if (BT_INFO_SRC_8821A_2ANT_WIFI_FW != rsp_source) { + coex_sta->bt_retry_cnt = /* [3:0]*/ + coex_sta->bt_info_c2h[rsp_source][2]&0xf; + + coex_sta->bt_rssi = + coex_sta->bt_info_c2h[rsp_source][3]*2+10; + + coex_sta->bt_info_ext = + coex_sta->bt_info_c2h[rsp_source][4]; + + /* Here we need to resend some wifi info to BT*/ + /* because bt is reset and loss of the info.*/ + if ((coex_sta->bt_info_ext & BIT1)) { + btcoexist->btc_get(btcoexist, + BTC_GET_BL_WIFI_CONNECTED, &wifi_connected); + if (wifi_connected) { + ex_halbtc8821a2ant_media_status_notify(btcoexist, + BTC_MEDIA_CONNECT); + } else { + ex_halbtc8821a2ant_media_status_notify(btcoexist, + BTC_MEDIA_DISCONNECT); + } + + set_bt_psd_mode = 0; + } + if (set_bt_psd_mode <= 3) { + halbtc8821a2ant_set_bt_psd_mode(btcoexist, FORCE_EXEC, + 0x0); /*fix CH-BW mode*/ + set_bt_psd_mode++; + } + + if (coex_dm->cur_bt_lna_constrain) { + if (!(coex_sta->bt_info_ext & BIT2)) { + if (set_bt_lna_cnt <= 3) { + btc8821a2_set_bt_lna_const(btcoexist, + FORCE_EXEC, + true); + set_bt_lna_cnt++; + } + } + } else { + set_bt_lna_cnt = 0; + } + + if ((coex_sta->bt_info_ext & BIT3)) { + halbtc8821a2ant_ignore_wlan_act(btcoexist, + FORCE_EXEC, false); + } else { + /* BT already NOT ignore Wlan active, do nothing here.*/ + } + + if ((coex_sta->bt_info_ext & BIT4)) { + /* BT auto report already enabled, do nothing*/ + } else { + halbtc8821a2ant_bt_auto_report(btcoexist, + FORCE_EXEC, true); + } + } + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); + /* check BIT2 first ==> check if bt is under inquiry or page scan*/ + if (bt_info & BT_INFO_8821A_2ANT_B_INQ_PAGE) { + coex_sta->c2h_bt_inquiry_page = true; + coex_dm->bt_status = BT_8821A_2ANT_BT_STATUS_NON_IDLE; + } else { + coex_sta->c2h_bt_inquiry_page = false; + if (bt_info == 0x1) { + /* connection exists but not busy*/ + coex_sta->bt_link_exist = true; + coex_dm->bt_status = BT_8821A_2ANT_BT_STATUS_CON_IDLE; + } else if (bt_info & BT_INFO_8821A_2ANT_B_CONNECTION) { + /* connection exists and some link is busy*/ + coex_sta->bt_link_exist = true; + if (bt_info & BT_INFO_8821A_2ANT_B_FTP) + coex_sta->pan_exist = true; + else + coex_sta->pan_exist = false; + if (bt_info & BT_INFO_8821A_2ANT_B_A2DP) + coex_sta->a2dp_exist = true; + else + coex_sta->a2dp_exist = false; + if (bt_info & BT_INFO_8821A_2ANT_B_HID) + coex_sta->hid_exist = true; + else + coex_sta->hid_exist = false; + if (bt_info & BT_INFO_8821A_2ANT_B_SCO_ESCO) + coex_sta->sco_exist = true; + else + coex_sta->sco_exist = false; + coex_dm->bt_status = BT_8821A_2ANT_BT_STATUS_NON_IDLE; + } else { + coex_sta->bt_link_exist = false; + coex_sta->pan_exist = false; + coex_sta->a2dp_exist = false; + coex_sta->hid_exist = false; + coex_sta->sco_exist = false; + coex_dm->bt_status = BT_8821A_2ANT_BT_STATUS_IDLE; + } + + if (bt_hs_on) + coex_dm->bt_status = BT_8821A_2ANT_BT_STATUS_NON_IDLE; + } + + if (BT_8821A_2ANT_BT_STATUS_NON_IDLE == coex_dm->bt_status) + bt_busy = true; + else + bt_busy = false; + btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bt_busy); + + if (BT_8821A_2ANT_BT_STATUS_IDLE != coex_dm->bt_status) + limited_dig = true; + else + limited_dig = false; + coex_dm->limited_dig = limited_dig; + btcoexist->btc_set(btcoexist, + BTC_SET_BL_BT_LIMITED_DIG, &limited_dig); + + halbtc8821a2ant_run_coexist_mechanism(btcoexist); +} + +void ex_halbtc8821a2ant_halt_notify(struct btc_coexist *btcoexist) +{ + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], Halt notify\n"); + + halbtc8821a2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true); + ex_halbtc8821a2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT); +} + +void ex_halbtc8821a2ant_periodical(struct btc_coexist *btcoexist) +{ + static u8 dis_ver_info_cnt; + u32 fw_ver = 0, bt_patch_ver = 0; + struct btc_board_info *board_info = &btcoexist->board_info; + struct btc_stack_info *stack_info = &btcoexist->stack_info; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], ==========================Periodical===========================\n"); + + if (dis_ver_info_cnt <= 5) { + dis_ver_info_cnt += 1; + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], ****************************************************************\n"); + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n", + board_info->pg_ant_num, + board_info->btdm_ant_num, + board_info->btdm_ant_pos); + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], BT stack/ hci ext ver = %s / %d\n", + ((stack_info->profile_notified) ? "Yes" : "No"), + stack_info->hci_version); + btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, + &bt_patch_ver); + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n", + glcoex_ver_date_8821a_2ant, glcoex_ver_8821a_2ant, + fw_ver, bt_patch_ver, bt_patch_ver); + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], ****************************************************************\n"); + } + + halbtc8821a2ant_query_bt_info(btcoexist); + halbtc8821a2ant_monitor_bt_ctr(btcoexist); + btc8821a2ant_mon_bt_en_dis(btcoexist); +} diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a2ant.h b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a2ant.h new file mode 100644 index 0000000000000000000000000000000000000000..b4cf1f53d5105845b875f3f5ebb9e408dcb7151d --- /dev/null +++ b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a2ant.h @@ -0,0 +1,205 @@ +/****************************************************************************** + * + * Copyright(c) 2012 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +/*=========================================== + * The following is for 8821A 2Ant BT Co-exist definition + *=========================================== +*/ +#define BT_INFO_8821A_2ANT_B_FTP BIT7 +#define BT_INFO_8821A_2ANT_B_A2DP BIT6 +#define BT_INFO_8821A_2ANT_B_HID BIT5 +#define BT_INFO_8821A_2ANT_B_SCO_BUSY BIT4 +#define BT_INFO_8821A_2ANT_B_ACL_BUSY BIT3 +#define BT_INFO_8821A_2ANT_B_INQ_PAGE BIT2 +#define BT_INFO_8821A_2ANT_B_SCO_ESCO BIT1 +#define BT_INFO_8821A_2ANT_B_CONNECTION BIT0 + +#define BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT 2 + +enum _BT_INFO_SRC_8821A_2ANT { + BT_INFO_SRC_8821A_2ANT_WIFI_FW = 0x0, + BT_INFO_SRC_8821A_2ANT_BT_RSP = 0x1, + BT_INFO_SRC_8821A_2ANT_BT_ACTIVE_SEND = 0x2, + BT_INFO_SRC_8821A_2ANT_MAX +}; + +enum _BT_8821A_2ANT_BT_STATUS { + BT_8821A_2ANT_BT_STATUS_IDLE = 0x0, + BT_8821A_2ANT_BT_STATUS_CON_IDLE = 0x1, + BT_8821A_2ANT_BT_STATUS_NON_IDLE = 0x2, + BT_8821A_2ANT_BT_STATUS_MAX +}; + +enum _BT_8821A_2ANT_COEX_ALGO { + BT_8821A_2ANT_COEX_ALGO_UNDEFINED = 0x0, + BT_8821A_2ANT_COEX_ALGO_SCO = 0x1, + BT_8821A_2ANT_COEX_ALGO_HID = 0x2, + BT_8821A_2ANT_COEX_ALGO_A2DP = 0x3, + BT_8821A_2ANT_COEX_ALGO_A2DP_PANHS = 0x4, + BT_8821A_2ANT_COEX_ALGO_PANEDR = 0x5, + BT_8821A_2ANT_COEX_ALGO_PANHS = 0x6, + BT_8821A_2ANT_COEX_ALGO_PANEDR_A2DP = 0x7, + BT_8821A_2ANT_COEX_ALGO_PANEDR_HID = 0x8, + BT_8821A_2ANT_COEX_ALGO_HID_A2DP_PANEDR = 0x9, + BT_8821A_2ANT_COEX_ALGO_HID_A2DP = 0xa, + BT_8821A_2ANT_COEX_ALGO_MAX = 0xb, +}; + +struct coex_dm_8821a_2ant { + /* fw mechanism */ + bool pre_dec_bt_pwr; + bool cur_dec_bt_pwr; + bool pre_bt_lna_constrain; + bool cur_bt_lna_constrain; + u8 pre_bt_psd_mode; + u8 cur_bt_psd_mode; + u8 pre_fw_dac_swing_lvl; + u8 cur_fw_dac_swing_lvl; + bool cur_ignore_wlan_act; + bool pre_ignore_wlan_act; + u8 pre_ps_tdma; + u8 cur_ps_tdma; + u8 ps_tdma_para[5]; + u8 tdma_adj_type; + bool reset_tdma_adjust; + bool pre_ps_tdma_on; + bool cur_ps_tdma_on; + bool pre_bt_auto_report; + bool cur_bt_auto_report; + + /* sw mechanism */ + bool pre_rf_rx_lpf_shrink; + bool cur_rf_rx_lpf_shrink; + u32 bt_rf0x1e_backup; + bool pre_low_penalty_ra; + bool cur_low_penalty_ra; + bool pre_dac_swing_on; + u32 pre_dac_swing_lvl; + bool cur_dac_swing_on; + u32 cur_dac_swing_lvl; + bool pre_adc_back_off; + bool cur_adc_back_off; + bool pre_agc_table_en; + bool cur_agc_table_en; + u32 pre_val0x6c0; + u32 cur_val0x6c0; + u32 pre_val0x6c4; + u32 cur_val0x6c4; + u32 pre_val0x6c8; + u32 cur_val0x6c8; + u8 pre_val0x6cc; + u8 cur_val0x6cc; + bool limited_dig; + + /* algorithm related */ + u8 pre_algorithm; + u8 cur_algorithm; + u8 bt_status; + u8 wifi_chnl_info[3]; +}; + +struct coex_sta_8821a_2ant { + bool bt_link_exist; + bool sco_exist; + bool a2dp_exist; + bool hid_exist; + bool pan_exist; + bool under_lps; + bool under_ips; + u32 high_priority_tx; + u32 high_priority_rx; + u32 low_priority_tx; + u32 low_priority_rx; + u8 bt_rssi; + u8 pre_bt_rssi_state; + u8 pre_wifi_rssi_state[4]; + bool c2h_bt_info_req_sent; + u8 bt_info_c2h[BT_INFO_SRC_8821A_2ANT_MAX][10]; + u32 bt_info_c2h_cnt[BT_INFO_SRC_8821A_2ANT_MAX]; + bool c2h_bt_inquiry_page; + u8 bt_retry_cnt; + u8 bt_info_ext; +}; + +/*=========================================== + * The following is interface which will notify coex module. + *=========================================== + */ +void +ex_halbtc8821a2ant_init_hwconfig( + struct btc_coexist *btcoexist + ); +void +ex_halbtc8821a2ant_init_coex_dm( + struct btc_coexist *btcoexist + ); +void +ex_halbtc8821a2ant_ips_notify( + struct btc_coexist *btcoexist, + u8 type + ); +void +ex_halbtc8821a2ant_lps_notify( + struct btc_coexist *btcoexist, + u8 type + ); +void +ex_halbtc8821a2ant_scan_notify( + struct btc_coexist *btcoexist, + u8 type + ); +void +ex_halbtc8821a2ant_connect_notify( + struct btc_coexist *btcoexist, + u8 type + ); +void +ex_halbtc8821a2ant_media_status_notify( + struct btc_coexist *btcoexist, + u8 type + ); +void +ex_halbtc8821a2ant_special_packet_notify( + struct btc_coexist *btcoexist, + u8 type + ); +void +ex_halbtc8821a2ant_bt_info_notify( + struct btc_coexist *btcoexist, + u8 *tmp_buf, + u8 length + ); +void +ex_halbtc8821a2ant_halt_notify( + struct btc_coexist *btcoexist + ); +void +ex_halbtc8821a2ant_periodical( + struct btc_coexist *btcoexist + ); +void +ex_halbtc8821a2ant_display_coex_info( + struct btc_coexist *btcoexist + ); diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c index d4bd550f505c7dcfa59bc60f4c852a5cf6aed39e..b2791c893417b071782dbb317f6baae53c0ecf74 100644 --- a/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c +++ b/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c @@ -32,7 +32,6 @@ struct btc_coexist gl_bt_coexist; u32 btc_dbg_type[BTC_MSG_MAX]; -static u8 btc_dbg_buf[100]; /*************************************************** * Debug related function @@ -389,7 +388,7 @@ static bool halbtc_set(void *void_btcoexist, u8 set_type, void *in_buf) btcoexist->bt_info.reject_agg_pkt = *bool_tmp; break; case BTC_SET_BL_BT_CTRL_AGG_SIZE: - btcoexist->bt_info.b_bt_ctrl_buf_size = *bool_tmp; + btcoexist->bt_info.bt_ctrl_buf_size = *bool_tmp; break; case BTC_SET_BL_INC_SCAN_DEV_NUM: btcoexist->bt_info.increase_scan_dev_num = *bool_tmp; @@ -417,10 +416,10 @@ static bool halbtc_set(void *void_btcoexist, u8 set_type, void *in_buf) /* rtlpriv->mlmepriv.scan_compensation = *u8_tmp; */ break; case BTC_SET_U1_1ANT_LPS: - btcoexist->bt_info.lps_1ant = *u8_tmp; + btcoexist->bt_info.lps_val = *u8_tmp; break; case BTC_SET_U1_1ANT_RPWM: - btcoexist->bt_info.rpwm_1ant = *u8_tmp; + btcoexist->bt_info.rpwm_val = *u8_tmp; break; /* the following are some action which will be triggered */ case BTC_SET_ACT_LEAVE_LPS: @@ -497,7 +496,7 @@ static u32 halbtc_read_4byte(void *bt_context, u32 reg_addr) return rtl_read_dword(rtlpriv, reg_addr); } -static void halbtc_write_1byte(void *bt_context, u32 reg_addr, u8 data) +static void halbtc_write_1byte(void *bt_context, u32 reg_addr, u32 data) { struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context; struct rtl_priv *rtlpriv = btcoexist->adapter; @@ -652,9 +651,7 @@ bool exhalbtc_initlize_variables(struct rtl_priv *adapter) btcoexist->btc_get = halbtc_get; btcoexist->btc_set = halbtc_set; - btcoexist->cli_buf = &btc_dbg_buf[0]; - - btcoexist->bt_info.b_bt_ctrl_buf_size = false; + btcoexist->bt_info.bt_ctrl_buf_size = false; btcoexist->bt_info.agg_buf_size = 5; btcoexist->bt_info.increase_scan_dev_num = false; @@ -672,7 +669,7 @@ void exhalbtc_init_hw_config(struct btc_coexist *btcoexist) btcoexist->statistics.cnt_init_hw_config++; if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) - ex_halbtc8723b2ant_init_hwconfig(btcoexist); + ex_btc8723b2ant_init_hwconfig(btcoexist); } void exhalbtc_init_coex_dm(struct btc_coexist *btcoexist) @@ -686,7 +683,7 @@ void exhalbtc_init_coex_dm(struct btc_coexist *btcoexist) btcoexist->statistics.cnt_init_coex_dm++; if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) - ex_halbtc8723b2ant_init_coex_dm(btcoexist); + ex_btc8723b2ant_init_coex_dm(btcoexist); btcoexist->initilized = true; } @@ -711,7 +708,7 @@ void exhalbtc_ips_notify(struct btc_coexist *btcoexist, u8 type) halbtc_leave_low_power(); if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) - ex_halbtc8723b2ant_ips_notify(btcoexist, ips_type); + ex_btc8723b2ant_ips_notify(btcoexist, ips_type); halbtc_nomal_low_power(); } @@ -734,7 +731,7 @@ void exhalbtc_lps_notify(struct btc_coexist *btcoexist, u8 type) lps_type = BTC_LPS_ENABLE; if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) - ex_halbtc8723b2ant_lps_notify(btcoexist, lps_type); + ex_btc8723b2ant_lps_notify(btcoexist, lps_type); } void exhalbtc_scan_notify(struct btc_coexist *btcoexist, u8 type) @@ -757,7 +754,7 @@ void exhalbtc_scan_notify(struct btc_coexist *btcoexist, u8 type) halbtc_leave_low_power(); if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) - ex_halbtc8723b2ant_scan_notify(btcoexist, scan_type); + ex_btc8723b2ant_scan_notify(btcoexist, scan_type); halbtc_nomal_low_power(); } @@ -782,14 +779,12 @@ void exhalbtc_connect_notify(struct btc_coexist *btcoexist, u8 action) halbtc_leave_low_power(); if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) - ex_halbtc8723b2ant_connect_notify(btcoexist, asso_type); + ex_btc8723b2ant_connect_notify(btcoexist, asso_type); } void exhalbtc_mediastatus_notify(struct btc_coexist *btcoexist, - enum _RT_MEDIA_STATUS media_status) + enum rt_media_status media_status) { - struct rtl_priv *rtlpriv = btcoexist->adapter; - struct rtl_hal *rtlhal = rtl_hal(rtlpriv); u8 status; if (!halbtc_is_bt_coexist_available(btcoexist)) @@ -805,9 +800,6 @@ void exhalbtc_mediastatus_notify(struct btc_coexist *btcoexist, halbtc_leave_low_power(); - if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) - btc8723b_med_stat_notify(btcoexist, status); - halbtc_nomal_low_power(); } @@ -828,8 +820,8 @@ void exhalbtc_special_packet_notify(struct btc_coexist *btcoexist, u8 pkt_type) halbtc_leave_low_power(); if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) - ex_halbtc8723b2ant_special_packet_notify(btcoexist, - packet_type); + ex_btc8723b2ant_special_packet_notify(btcoexist, + packet_type); halbtc_nomal_low_power(); } @@ -844,13 +836,11 @@ void exhalbtc_bt_info_notify(struct btc_coexist *btcoexist, btcoexist->statistics.cnt_bt_info_notify++; if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) - ex_halbtc8723b2ant_bt_info_notify(btcoexist, tmp_buf, length); + ex_btc8723b2ant_bt_info_notify(btcoexist, tmp_buf, length); } void exhalbtc_stack_operation_notify(struct btc_coexist *btcoexist, u8 type) { - struct rtl_priv *rtlpriv = btcoexist->adapter; - struct rtl_hal *rtlhal = rtl_hal(rtlpriv); u8 stack_op_type; if (!halbtc_is_bt_coexist_available(btcoexist)) @@ -863,10 +853,6 @@ void exhalbtc_stack_operation_notify(struct btc_coexist *btcoexist, u8 type) halbtc_leave_low_power(); - if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) - ex_halbtc8723b2ant_stack_operation_notify(btcoexist, - stack_op_type); - halbtc_nomal_low_power(); } @@ -878,7 +864,7 @@ void exhalbtc_halt_notify(struct btc_coexist *btcoexist) return; if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) - ex_halbtc8723b2ant_halt_notify(btcoexist); + ex_btc8723b2ant_halt_notify(btcoexist); } void exhalbtc_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state) @@ -898,7 +884,7 @@ void exhalbtc_periodical(struct btc_coexist *btcoexist) halbtc_leave_low_power(); if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) - ex_halbtc8723b2ant_periodical(btcoexist); + ex_btc8723b2ant_periodical(btcoexist); halbtc_nomal_low_power(); } @@ -997,5 +983,5 @@ void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist) return; if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) - ex_halbtc8723b2ant_display_coex_info(btcoexist); + ex_btc8723b2ant_display_coex_info(btcoexist); } diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.h b/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.h index 049f4c8d98a8675baa69bd993a1bfe268bd08f2f..0a903ea179ef5df67954b1c33c97a57a5c82eebe 100644 --- a/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.h +++ b/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.h @@ -55,9 +55,16 @@ #define BTC_RATE_DISABLE 0 #define BTC_RATE_ENABLE 1 +/* single Antenna definition */ #define BTC_ANT_PATH_WIFI 0 #define BTC_ANT_PATH_BT 1 #define BTC_ANT_PATH_PTA 2 +/* dual Antenna definition */ +#define BTC_ANT_WIFI_AT_MAIN 0 +#define BTC_ANT_WIFI_AT_AUX 1 +/* coupler Antenna definition */ +#define BTC_ANT_WIFI_AT_CPL_MAIN 0 +#define BTC_ANT_WIFI_AT_CPL_AUX 1 enum btc_chip_interface { BTC_INTF_UNKNOWN = 0, @@ -68,7 +75,7 @@ enum btc_chip_interface { BTC_INTF_MAX }; -enum BTC_CHIP_TYPE { +enum btc_chip_type { BTC_CHIP_UNDEF = 0, BTC_CHIP_CSR_BC4 = 1, BTC_CHIP_CSR_BC8 = 2, @@ -78,11 +85,12 @@ enum BTC_CHIP_TYPE { BTC_CHIP_MAX }; -enum BTC_MSG_TYPE { +enum btc_msg_type { BTC_MSG_INTERFACE = 0x0, BTC_MSG_ALGORITHM = 0x1, BTC_MSG_MAX }; + extern u32 btc_dbg_type[]; /* following is for BTC_MSG_INTERFACE */ @@ -101,20 +109,12 @@ extern u32 btc_dbg_type[]; #define ALGO_TRACE_SW_DETAIL BIT8 #define ALGO_TRACE_SW_EXEC BIT9 -#define BT_COEX_ANT_TYPE_PG 0 -#define BT_COEX_ANT_TYPE_ANTDIV 1 -#define BT_COEX_ANT_TYPE_DETECTED 2 -#define BTC_MIMO_PS_STATIC 0 -#define BTC_MIMO_PS_DYNAMIC 1 -#define BTC_RATE_DISABLE 0 -#define BTC_RATE_ENABLE 1 -#define BTC_ANT_PATH_WIFI 0 -#define BTC_ANT_PATH_BT 1 -#define BTC_ANT_PATH_PTA 2 - - -#define CL_SPRINTF snprintf -#define CL_PRINTF(buf) printk("%s", buf) +/* following is for wifi link status */ +#define WIFI_STA_CONNECTED BIT0 +#define WIFI_AP_CONNECTED BIT1 +#define WIFI_HS_CONNECTED BIT2 +#define WIFI_P2P_GO_CONNECTED BIT3 +#define WIFI_P2P_GC_CONNECTED BIT4 #define BTC_PRINT(dbgtype, dbgflag, printstr, ...) \ do { \ @@ -123,46 +123,15 @@ extern u32 btc_dbg_type[]; } \ } while (0) -#define BTC_PRINT_F(dbgtype, dbgflag, printstr, ...) \ - do { \ - if (unlikely(btc_dbg_type[dbgtype] & dbgflag)) {\ - pr_info("%s: ", __func__); \ - printk(printstr, ##__VA_ARGS__); \ - } \ - } while (0) - -#define BTC_PRINT_ADDR(dbgtype, dbgflag, printstr, _ptr) \ - do { \ - if (unlikely(btc_dbg_type[dbgtype] & dbgflag)) { \ - int __i; \ - u8 *__ptr = (u8 *)_ptr; \ - printk printstr; \ - for (__i = 0; __i < 6; __i++) \ - printk("%02X%s", __ptr[__i], (__i == 5) ? \ - "" : "-"); \ - pr_info("\n"); \ - } \ - } while (0) - -#define BTC_PRINT_DATA(dbgtype, dbgflag, _titlestring, _hexdata, _hexdatalen) \ - do { \ - if (unlikely(btc_dbg_type[dbgtype] & dbgflag)) { \ - int __i; \ - u8 *__ptr = (u8 *)_hexdata; \ - printk(_titlestring); \ - for (__i = 0; __i < (int)_hexdatalen; __i++) { \ - printk("%02X%s", __ptr[__i], (((__i + 1) % 4) \ - == 0) ? " " : " ");\ - if (((__i + 1) % 16) == 0) \ - printk("\n"); \ - } \ - pr_debug("\n"); \ - } \ - } while (0) - -#define BTC_ANT_PATH_WIFI 0 -#define BTC_ANT_PATH_BT 1 -#define BTC_ANT_PATH_PTA 2 +#define BTC_RSSI_HIGH(_rssi_) \ + ((_rssi_ == BTC_RSSI_STATE_HIGH || \ + _rssi_ == BTC_RSSI_STATE_STAY_HIGH) ? true : false) +#define BTC_RSSI_MEDIUM(_rssi_) \ + ((_rssi_ == BTC_RSSI_STATE_MEDIUM || \ + _rssi_ == BTC_RSSI_STATE_STAY_MEDIUM) ? true : false) +#define BTC_RSSI_LOW(_rssi_) \ + ((_rssi_ == BTC_RSSI_STATE_LOW || \ + _rssi_ == BTC_RSSI_STATE_STAY_LOW) ? true : false) enum btc_power_save_type { BTC_PS_WIFI_NATIVE = 0, @@ -224,7 +193,6 @@ enum btc_wifi_pnp { BTC_WIFI_PNP_MAX }; - enum btc_get_type { /* type bool */ BTC_GET_BL_HS_OPERATION, @@ -253,6 +221,7 @@ enum btc_get_type { BTC_GET_U4_WIFI_BW, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, BTC_GET_U4_WIFI_FW_VER, + BTC_GET_U4_WIFI_LINK_STATUS, BTC_GET_U4_BT_PATCH_VER, /* type u1Byte */ @@ -260,6 +229,7 @@ enum btc_get_type { BTC_GET_U1_WIFI_CENTRAL_CHNL, BTC_GET_U1_WIFI_HS_CHNL, BTC_GET_U1_MAC_PHY_MODE, + BTC_GET_U1_AP_NUM, /* for 1Ant */ BTC_GET_U1_LPS_MODE, @@ -270,7 +240,6 @@ enum btc_get_type { BTC_GET_MAX }; - enum btc_set_type { /* type bool */ BTC_SET_BL_BT_DISABLE, @@ -283,7 +252,6 @@ enum btc_set_type { /* type u1Byte */ BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON, - BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE, BTC_SET_UI_SCAN_SIG_COMPENSATION, BTC_SET_U1_AGG_BUF_SIZE, @@ -295,6 +263,9 @@ enum btc_set_type { /* type bool */ BTC_SET_BL_BT_SCO_BUSY, /* type u1Byte */ + BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE, + BTC_SET_U1_LPS_VAL, + BTC_SET_U1_RPWM_VAL, BTC_SET_U1_1ANT_LPS, BTC_SET_U1_1ANT_RPWM, /* type trigger some action */ @@ -358,6 +329,20 @@ enum btc_notify_type_special_packet { BTC_PACKET_MAX }; +enum hci_ext_bt_operation { + HCI_BT_OP_NONE = 0x0, + HCI_BT_OP_INQUIRY_START = 0x1, + HCI_BT_OP_INQUIRY_FINISH = 0x2, + HCI_BT_OP_PAGING_START = 0x3, + HCI_BT_OP_PAGING_SUCCESS = 0x4, + HCI_BT_OP_PAGING_UNSUCCESS = 0x5, + HCI_BT_OP_PAIRING_START = 0x6, + HCI_BT_OP_PAIRING_FINISH = 0x7, + HCI_BT_OP_BT_DEV_ENABLE = 0x8, + HCI_BT_OP_BT_DEV_DISABLE = 0x9, + HCI_BT_OP_MAX +}; + enum btc_notify_type_stack_operation { BTC_STACK_OP_NONE = 0x0, BTC_STACK_OP_INQ_PAGE_PAIR_START = 0x1, @@ -365,14 +350,13 @@ enum btc_notify_type_stack_operation { BTC_STACK_OP_MAX }; - typedef u8 (*bfp_btc_r1)(void *btc_context, u32 reg_addr); typedef u16 (*bfp_btc_r2)(void *btc_context, u32 reg_addr); typedef u32 (*bfp_btc_r4)(void *btc_context, u32 reg_addr); -typedef void (*bfp_btc_w1)(void *btc_context, u32 reg_addr, u8 data); +typedef void (*bfp_btc_w1)(void *btc_context, u32 reg_addr, u32 data); typedef void (*bfp_btc_w1_bit_mak)(void *btc_context, u32 reg_addr, u32 bit_mask, u8 data1b); @@ -413,20 +397,22 @@ struct btc_bt_info { u8 agg_buf_size; bool limited_dig; bool reject_agg_pkt; - bool b_bt_ctrl_buf_size; + bool bt_ctrl_buf_size; bool increase_scan_dev_num; u16 bt_hci_ver; u16 bt_real_fw_ver; u8 bt_fw_ver; + bool bt_disable_low_pwr; + /* the following is for 1Ant solution */ bool bt_ctrl_lps; bool bt_pwr_save_mode; bool bt_lps_on; bool force_to_roam; u8 force_exec_pwr_cmd_cnt; - u8 lps_1ant; - u8 rpwm_1ant; + u8 lps_val; + u8 rpwm_val; u32 ra_mask; }; @@ -457,6 +443,7 @@ struct btc_statistics { u32 cnt_special_packet_notify; u32 cnt_bt_info_notify; u32 cnt_periodical; + u32 cnt_coex_dm_switch; u32 cnt_stack_operation_notify; u32 cnt_dbg_ctrl; }; @@ -493,7 +480,6 @@ struct btc_coexist { bool initilized; bool stop_coex_dm; bool manual_control; - u8 *cli_buf; struct btc_statistics statistics; u8 pwr_mode_val[10]; @@ -509,7 +495,6 @@ struct btc_coexist { bfp_btc_set_bb_reg btc_set_bb_reg; bfp_btc_get_bb_reg btc_get_bb_reg; - bfp_btc_set_rf_reg btc_set_rf_reg; bfp_btc_get_rf_reg btc_get_rf_reg; @@ -533,13 +518,14 @@ void exhalbtc_lps_notify(struct btc_coexist *btcoexist, u8 type); void exhalbtc_scan_notify(struct btc_coexist *btcoexist, u8 type); void exhalbtc_connect_notify(struct btc_coexist *btcoexist, u8 action); void exhalbtc_mediastatus_notify(struct btc_coexist *btcoexist, - enum _RT_MEDIA_STATUS media_status); + enum rt_media_status media_status); void exhalbtc_special_packet_notify(struct btc_coexist *btcoexist, u8 pkt_type); void exhalbtc_bt_info_notify(struct btc_coexist *btcoexist, u8 *tmp_buf, u8 length); void exhalbtc_stack_operation_notify(struct btc_coexist *btcoexist, u8 type); void exhalbtc_halt_notify(struct btc_coexist *btcoexist); void exhalbtc_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state); +void exhalbtc_coex_dm_switch(struct btc_coexist *btcoexist); void exhalbtc_periodical(struct btc_coexist *btcoexist); void exhalbtc_dbg_control(struct btc_coexist *btcoexist, u8 code, u8 len, u8 *data); diff --git a/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.c b/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.c index 0ab94fe4cbbe000c328a6dbe6bfeb89f3ffdffb0..b9b0cb7af8eace6e357378729e5d8d8628cb282a 100644 --- a/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.c +++ b/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.c @@ -22,19 +22,19 @@ * Larry Finger * *****************************************************************************/ - #include "../wifi.h" -#include "rtl_btc.h" -#include "halbt_precomp.h" - #include #include +#include "rtl_btc.h" +#include "halbt_precomp.h" + static struct rtl_btc_ops rtl_btc_operation = { .btc_init_variables = rtl_btc_init_variables, .btc_init_hal_vars = rtl_btc_init_hal_vars, .btc_init_hw_config = rtl_btc_init_hw_config, .btc_ips_notify = rtl_btc_ips_notify, + .btc_lps_notify = rtl_btc_lps_notify, .btc_scan_notify = rtl_btc_scan_notify, .btc_connect_notify = rtl_btc_connect_notify, .btc_mediastatus_notify = rtl_btc_mediastatus_notify, @@ -44,6 +44,7 @@ static struct rtl_btc_ops rtl_btc_operation = { .btc_is_limited_dig = rtl_btc_is_limited_dig, .btc_is_disable_edca_turbo = rtl_btc_is_disable_edca_turbo, .btc_is_bt_disabled = rtl_btc_is_bt_disabled, + .btc_special_packet_notify = rtl_btc_special_packet_notify, }; void rtl_btc_init_variables(struct rtl_priv *rtlpriv) @@ -85,6 +86,11 @@ void rtl_btc_ips_notify(struct rtl_priv *rtlpriv, u8 type) exhalbtc_ips_notify(&gl_bt_coexist, type); } +void rtl_btc_lps_notify(struct rtl_priv *rtlpriv, u8 type) +{ + exhalbtc_lps_notify(&gl_bt_coexist, type); +} + void rtl_btc_scan_notify(struct rtl_priv *rtlpriv, u8 scantype) { exhalbtc_scan_notify(&gl_bt_coexist, scantype); @@ -96,13 +102,14 @@ void rtl_btc_connect_notify(struct rtl_priv *rtlpriv, u8 action) } void rtl_btc_mediastatus_notify(struct rtl_priv *rtlpriv, - enum _RT_MEDIA_STATUS mstatus) + enum rt_media_status mstatus) { exhalbtc_mediastatus_notify(&gl_bt_coexist, mstatus); } void rtl_btc_periodical(struct rtl_priv *rtlpriv) { + /*rtl_bt_dm_monitor();*/ exhalbtc_periodical(&gl_bt_coexist); } @@ -150,12 +157,18 @@ bool rtl_btc_is_disable_edca_turbo(struct rtl_priv *rtlpriv) bool rtl_btc_is_bt_disabled(struct rtl_priv *rtlpriv) { + /* It seems 'bt_disabled' is never be initialized or set. */ if (gl_bt_coexist.bt_info.bt_disabled) return true; else return false; } +void rtl_btc_special_packet_notify(struct rtl_priv *rtlpriv, u8 pkt_type) +{ + return exhalbtc_special_packet_notify(&gl_bt_coexist, pkt_type); +} + struct rtl_btc_ops *rtl_btc_get_ops_pointer(void) { return &rtl_btc_operation; @@ -174,11 +187,11 @@ u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv) return num; } -enum _RT_MEDIA_STATUS mgnt_link_status_query(struct ieee80211_hw *hw) +enum rt_media_status mgnt_link_status_query(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - enum _RT_MEDIA_STATUS m_status = RT_MEDIA_DISCONNECT; + enum rt_media_status m_status = RT_MEDIA_DISCONNECT; u8 bibss = (mac->opmode == NL80211_IFTYPE_ADHOC) ? 1 : 0; diff --git a/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.h b/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.h index 805b22cc8fc8fcbd55f87cf1b5d2ebc5e83c83ea..ccd5a0f91e3b1bf626ca53cef370d7f717c08f03 100644 --- a/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.h +++ b/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.h @@ -31,22 +31,24 @@ void rtl_btc_init_variables(struct rtl_priv *rtlpriv); void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv); void rtl_btc_init_hw_config(struct rtl_priv *rtlpriv); void rtl_btc_ips_notify(struct rtl_priv *rtlpriv, u8 type); +void rtl_btc_lps_notify(struct rtl_priv *rtlpriv, u8 type); void rtl_btc_scan_notify(struct rtl_priv *rtlpriv, u8 scantype); void rtl_btc_connect_notify(struct rtl_priv *rtlpriv, u8 action); void rtl_btc_mediastatus_notify(struct rtl_priv *rtlpriv, - enum _RT_MEDIA_STATUS mstatus); + enum rt_media_status mstatus); void rtl_btc_periodical(struct rtl_priv *rtlpriv); void rtl_btc_halt_notify(void); void rtl_btc_btinfo_notify(struct rtl_priv *rtlpriv, u8 *tmpbuf, u8 length); bool rtl_btc_is_limited_dig(struct rtl_priv *rtlpriv); bool rtl_btc_is_disable_edca_turbo(struct rtl_priv *rtlpriv); bool rtl_btc_is_bt_disabled(struct rtl_priv *rtlpriv); +void rtl_btc_special_packet_notify(struct rtl_priv *rtlpriv, u8 pkt_type); struct rtl_btc_ops *rtl_btc_get_ops_pointer(void); u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv); u8 rtl_get_hwpg_bt_exist(struct rtl_priv *rtlpriv); u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv); -enum _RT_MEDIA_STATUS mgnt_link_status_query(struct ieee80211_hw *hw); +enum rt_media_status mgnt_link_status_query(struct ieee80211_hw *hw); #endif diff --git a/drivers/net/wireless/rtlwifi/cam.c b/drivers/net/wireless/rtlwifi/cam.c index 0276153c72cc2b8f65a8eed5cfeb638b793012f9..8fe8b4cfae6c5aecb7ed58c1043ddc95f08d437f 100644 --- a/drivers/net/wireless/rtlwifi/cam.c +++ b/drivers/net/wireless/rtlwifi/cam.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -26,10 +22,9 @@ * Larry Finger * *****************************************************************************/ - -#include #include "wifi.h" #include "cam.h" +#include void rtl_cam_reset_sec_info(struct ieee80211_hw *hw) { @@ -52,8 +47,8 @@ static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no, u32 target_content = 0; u8 entry_i; - RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, "key_cont_128: %6phC\n", - key_cont_128); + RT_PRINT_DATA(rtlpriv, COMP_SEC, DBG_DMESG, "Key content :", + key_cont_128, 16); for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) { target_command = entry_i + CAM_CONTENT_COUNT * entry_no; @@ -68,11 +63,13 @@ static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no, rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], target_command); - RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, "WRITE %x: %x\n", + RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, + "WRITE %x: %x\n", rtlpriv->cfg->maps[WCAMI], target_content); RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, "The Key ID is %d\n", entry_no); - RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, "WRITE %x: %x\n", + RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, + "WRITE %x: %x\n", rtlpriv->cfg->maps[RWCAM], target_command); } else if (entry_i == 1) { @@ -87,10 +84,10 @@ static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no, rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], target_command); - RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, "WRITE A4: %x\n", - target_content); - RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, "WRITE A0: %x\n", - target_command); + RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, + "WRITE A4: %x\n", target_content); + RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, + "WRITE A0: %x\n", target_command); } else { @@ -107,15 +104,15 @@ static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no, target_command); udelay(100); - RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, "WRITE A4: %x\n", - target_content); - RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, "WRITE A0: %x\n", - target_command); + RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, + "WRITE A4: %x\n", target_content); + RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, + "WRITE A0: %x\n", target_command); } } - RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, "after set key, usconfig:%x\n", - us_config); + RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, + "after set key, usconfig:%x\n", us_config); } u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr, @@ -125,27 +122,26 @@ u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr, u32 us_config; struct rtl_priv *rtlpriv = rtl_priv(hw); - RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, + RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "EntryNo:%x, ulKeyId=%x, ulEncAlg=%x, ulUseDK=%x MacAddr %pM\n", ul_entry_idx, ul_key_id, ul_enc_alg, ul_default_key, mac_addr); if (ul_key_id == TOTAL_CAM_ENTRY) { RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, - "<=== ulKeyId exceed!\n"); + "ulKeyId exceed!\n"); return 0; } - if (ul_default_key == 1) { + if (ul_default_key == 1) us_config = CFG_VALID | ((u16) (ul_enc_alg) << 2); - } else { + else us_config = CFG_VALID | ((ul_enc_alg) << 2) | ul_key_id; - } rtl_cam_program_entry(hw, ul_entry_idx, mac_addr, - key_content, us_config); + (u8 *)key_content, us_config); - RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "<===\n"); + RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "end\n"); return 1; @@ -289,7 +285,8 @@ u8 rtl_cam_get_free_entry(struct ieee80211_hw *hw, u8 *sta_addr) u8 i, *addr; if (NULL == sta_addr) { - RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG, "sta_addr is NULL\n"); + RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG, + "sta_addr is NULL.\n"); return TOTAL_CAM_ENTRY; } /* Does STA already exist? */ @@ -322,7 +319,9 @@ void rtl_cam_del_entry(struct ieee80211_hw *hw, u8 *sta_addr) u8 i, *addr; if (NULL == sta_addr) { - RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG, "sta_addr is NULL\n"); + RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG, + "sta_addr is NULL.\n"); + return; } if (is_zero_ether_addr(sta_addr)) { @@ -339,8 +338,8 @@ void rtl_cam_del_entry(struct ieee80211_hw *hw, u8 *sta_addr) /* Remove from HW Security CAM */ eth_zero_addr(rtlpriv->sec.hwsec_cam_sta_addr[i]); rtlpriv->sec.hwsec_cam_bitmap &= ~(BIT(0) << i); - RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, - "del CAM entry %d\n", i); + RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, + "&&&&&&&&&del entry %d\n", i); } } return; diff --git a/drivers/net/wireless/rtlwifi/cam.h b/drivers/net/wireless/rtlwifi/cam.h index 0105e6c1901ed96fc6be6cfd4900b2285ed0835a..35508087c0c5ed4f80a969dfffd989c5a6d9eeed 100644 --- a/drivers/net/wireless/rtlwifi/cam.h +++ b/drivers/net/wireless/rtlwifi/cam.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -36,15 +32,15 @@ #define CFG_VALID BIT(15) #define PAIRWISE_KEYIDX 0 -#define CAM_PAIRWISE_KEY_POSITION 4 +#define CAM_PAIRWISE_KEY_POSITION 4 #define CAM_CONFIG_USEDK 1 #define CAM_CONFIG_NO_USEDK 0 void rtl_cam_reset_all_entry(struct ieee80211_hw *hw); u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr, - u32 ul_key_id, u32 ul_entry_idx, u32 ul_enc_alg, - u32 ul_default_key, u8 *key_content); + u32 ul_key_id, u32 ul_entry_idx, u32 ul_enc_alg, + u32 ul_default_key, u8 *key_content); int rtl_cam_delete_one_entry(struct ieee80211_hw *hw, u8 *mac_addr, u32 ul_key_id); void rtl_cam_mark_invalid(struct ieee80211_hw *hw, u8 uc_index); diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c index 56e218e0469cd418c29acb072ef8665b91cb5092..f6179bc060869cfdb6dd6d9d9c90d8cc43197904 100644 --- a/drivers/net/wireless/rtlwifi/core.c +++ b/drivers/net/wireless/rtlwifi/core.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -31,10 +27,13 @@ #include "core.h" #include "cam.h" #include "base.h" -#include "pci.h" #include "ps.h" +#include "pwrseqcmd.h" +#include "btcoexist/rtl_btc.h" +#include #include +#include void rtl_addr_delay(u32 addr) { @@ -103,7 +102,7 @@ void rtl_fw_cb(const struct firmware *firmware, void *context) int err; RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, - "Firmware callback routine entered!\n"); + "Firmware callback routine entered!\n"); complete(&rtlpriv->firmware_loading_complete); if (!firmware) { if (rtlpriv->cfg->alt_fw_name) { @@ -129,26 +128,13 @@ void rtl_fw_cb(const struct firmware *firmware, void *context) memcpy(rtlpriv->rtlhal.pfirmware, firmware->data, firmware->size); rtlpriv->rtlhal.fwsize = firmware->size; release_firmware(firmware); - - err = ieee80211_register_hw(hw); - if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Can't register mac80211 hw\n"); - return; - } else { - rtlpriv->mac80211.mac80211_registered = 1; - } - set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status); - - /*init rfkill */ - rtl_init_rfkill(hw); } EXPORT_SYMBOL(rtl_fw_cb); /*mutex for start & stop is must here. */ static int rtl_op_start(struct ieee80211_hw *hw) { - int err; + int err = 0; struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); @@ -170,28 +156,33 @@ static void rtl_op_stop(struct ieee80211_hw *hw) struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + bool support_remote_wakeup = false; if (is_hal_stop(rtlhal)) return; + rtlpriv->cfg->ops->get_hw_reg(hw, HAL_DEF_WOWLAN, + (u8 *)(&support_remote_wakeup)); /* here is must, because adhoc do stop and start, * but stop with RFOFF may cause something wrong, * like adhoc TP */ - if (unlikely(ppsc->rfpwr_state == ERFOFF)) { + if (unlikely(ppsc->rfpwr_state == ERFOFF)) rtl_ips_nic_on(hw); - } mutex_lock(&rtlpriv->locks.conf_mutex); + /* if wowlan supported, DON'T clear connected info */ + if (!(support_remote_wakeup && + rtlhal->enter_pnp_sleep)) { + mac->link_state = MAC80211_NOLINK; + memset(mac->bssid, 0, 6); + mac->vendor = PEER_UNKNOWN; - mac->link_state = MAC80211_NOLINK; - memset(mac->bssid, 0, ETH_ALEN); - mac->vendor = PEER_UNKNOWN; - - /*reset sec info */ - rtl_cam_reset_sec_info(hw); + /* reset sec info */ + rtl_cam_reset_sec_info(hw); - rtl_deinit_deferred_work(hw); + rtl_deinit_deferred_work(hw); + } rtlpriv->intf_ops->adapter_stop(hw); mutex_unlock(&rtlpriv->locks.conf_mutex); @@ -215,7 +206,6 @@ static void rtl_op_tx(struct ieee80211_hw *hw, if (!rtlpriv->intf_ops->waitq_insert(hw, control->sta, skb)) rtlpriv->intf_ops->adapter_tx(hw, control->sta, skb, &tcb_desc); - return; err_free: @@ -229,18 +219,17 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw, struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); int err = 0; - vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; - if (mac->vif) { RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "vif has been set!! mac->vif = 0x%p\n", mac->vif); return -EOPNOTSUPP; } + vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; + rtl_ips_nic_on(hw); mutex_lock(&rtlpriv->locks.conf_mutex); - switch (ieee80211_vif_type_p2p(vif)) { case NL80211_IFTYPE_P2P_CLIENT: mac->p2p = P2P_ROLE_CLIENT; @@ -251,10 +240,8 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw, "NL80211_IFTYPE_STATION\n"); mac->beacon_enabled = 0; rtlpriv->cfg->ops->update_interrupt_mask(hw, 0, - rtlpriv->cfg->maps - [RTL_IBSS_INT_MASKS]); + rtlpriv->cfg->maps[RTL_IBSS_INT_MASKS]); } - mac->link_state = MAC80211_LINKED; break; case NL80211_IFTYPE_ADHOC: RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, @@ -267,7 +254,7 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw, else mac->basic_rates = 0xff0; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE, - (u8 *) (&mac->basic_rates)); + (u8 *)(&mac->basic_rates)); break; case NL80211_IFTYPE_P2P_GO: @@ -284,7 +271,7 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw, else mac->basic_rates = 0xff0; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE, - (u8 *) (&mac->basic_rates)); + (u8 *)(&mac->basic_rates)); break; case NL80211_IFTYPE_MESH_POINT: RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, @@ -301,7 +288,7 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw, break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "operation mode %d is not supported!\n", vif->type); + "operation mode %d is not support!\n", vif->type); err = -EOPNOTSUPP; goto out; } @@ -339,8 +326,7 @@ static void rtl_op_remove_interface(struct ieee80211_hw *hw, if (mac->beacon_enabled == 1) { mac->beacon_enabled = 0; rtlpriv->cfg->ops->update_interrupt_mask(hw, 0, - rtlpriv->cfg->maps - [RTL_IBSS_INT_MASKS]); + rtlpriv->cfg->maps[RTL_IBSS_INT_MASKS]); } } @@ -355,12 +341,12 @@ static void rtl_op_remove_interface(struct ieee80211_hw *hw, mac->vendor = PEER_UNKNOWN; mac->opmode = NL80211_IFTYPE_UNSPECIFIED; rtlpriv->cfg->ops->set_network_type(hw, mac->opmode); + mutex_unlock(&rtlpriv->locks.conf_mutex); } - static int rtl_op_change_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - enum nl80211_iftype new_type, bool p2p) + struct ieee80211_vif *vif, + enum nl80211_iftype new_type, bool p2p) { struct rtl_priv *rtlpriv = rtl_priv(hw); int ret; @@ -370,10 +356,221 @@ static int rtl_op_change_interface(struct ieee80211_hw *hw, vif->p2p = p2p; ret = rtl_op_add_interface(hw, vif); RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, - "p2p %x\n", p2p); + "p2p %x\n", p2p); return ret; } +#ifdef CONFIG_PM +static u16 crc16_ccitt(u8 data, u16 crc) +{ + u8 shift_in, data_bit, crc_bit11, crc_bit4, crc_bit15; + u8 i; + u16 result; + + for (i = 0; i < 8; i++) { + crc_bit15 = ((crc & BIT(15)) ? 1 : 0); + data_bit = (data & (BIT(0) << i) ? 1 : 0); + shift_in = crc_bit15 ^ data_bit; + + result = crc << 1; + if (shift_in == 0) + result &= (~BIT(0)); + else + result |= BIT(0); + + crc_bit11 = ((crc & BIT(11)) ? 1 : 0) ^ shift_in; + if (crc_bit11 == 0) + result &= (~BIT(12)); + else + result |= BIT(12); + + crc_bit4 = ((crc & BIT(4)) ? 1 : 0) ^ shift_in; + if (crc_bit4 == 0) + result &= (~BIT(5)); + else + result |= BIT(5); + + crc = result; + } + + return crc; +} + +static u16 _calculate_wol_pattern_crc(u8 *pattern, u16 len) +{ + u16 crc = 0xffff; + u32 i; + + for (i = 0; i < len; i++) + crc = crc16_ccitt(pattern[i], crc); + + crc = ~crc; + + return crc; +} + +static void _rtl_add_wowlan_patterns(struct ieee80211_hw *hw, + struct cfg80211_wowlan *wow) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = &rtlpriv->mac80211; + struct cfg80211_pkt_pattern *patterns = wow->patterns; + struct rtl_wow_pattern rtl_pattern; + const u8 *pattern_os, *mask_os; + u8 mask[MAX_WOL_BIT_MASK_SIZE] = {0}; + u8 content[MAX_WOL_PATTERN_SIZE] = {0}; + u8 broadcast_addr[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + u8 multicast_addr1[2] = {0x33, 0x33}; + u8 multicast_addr2[3] = {0x01, 0x00, 0x5e}; + u8 i, mask_len; + u16 j, len; + + for (i = 0; i < wow->n_patterns; i++) { + memset(&rtl_pattern, 0, sizeof(struct rtl_wow_pattern)); + memset(mask, 0, MAX_WOL_BIT_MASK_SIZE); + if (patterns[i].pattern_len > MAX_WOL_PATTERN_SIZE) { + RT_TRACE(rtlpriv, COMP_POWER, DBG_WARNING, + "Pattern[%d] is too long\n", i); + continue; + } + pattern_os = patterns[i].pattern; + mask_len = DIV_ROUND_UP(patterns[i].pattern_len, 8); + mask_os = patterns[i].mask; + RT_PRINT_DATA(rtlpriv, COMP_POWER, DBG_TRACE, + "pattern content\n", pattern_os, + patterns[i].pattern_len); + RT_PRINT_DATA(rtlpriv, COMP_POWER, DBG_TRACE, + "mask content\n", mask_os, mask_len); + /* 1. unicast? multicast? or broadcast? */ + if (memcmp(pattern_os, broadcast_addr, 6) == 0) + rtl_pattern.type = BROADCAST_PATTERN; + else if (memcmp(pattern_os, multicast_addr1, 2) == 0 || + memcmp(pattern_os, multicast_addr2, 3) == 0) + rtl_pattern.type = MULTICAST_PATTERN; + else if (memcmp(pattern_os, mac->mac_addr, 6) == 0) + rtl_pattern.type = UNICAST_PATTERN; + else + rtl_pattern.type = UNKNOWN_TYPE; + + /* 2. translate mask_from_os to mask_for_hw */ + +/****************************************************************************** + * pattern from OS uses 'ethenet frame', like this: + + | 6 | 6 | 2 | 20 | Variable | 4 | + |--------+--------+------+-----------+------------+-----| + | 802.3 Mac Header | IP Header | TCP Packet | FCS | + | DA | SA | Type | + + * BUT, packet catched by our HW is in '802.11 frame', begin from LLC, + + | 24 or 30 | 6 | 2 | 20 | Variable | 4 | + |-------------------+--------+------+-----------+------------+-----| + | 802.11 MAC Header | LLC | IP Header | TCP Packet | FCS | + | Others | Tpye | + + * Therefore, we need translate mask_from_OS to mask_to_hw. + * We should left-shift mask by 6 bits, then set the new bit[0~5] = 0, + * because new mask[0~5] means 'SA', but our HW packet begins from LLC, + * bit[0~5] corresponds to first 6 Bytes in LLC, they just don't match. + ******************************************************************************/ + + /* Shift 6 bits */ + for (j = 0; j < mask_len - 1; j++) { + mask[j] = mask_os[j] >> 6; + mask[j] |= (mask_os[j + 1] & 0x3F) << 2; + } + mask[j] = (mask_os[j] >> 6) & 0x3F; + /* Set bit 0-5 to zero */ + mask[0] &= 0xC0; + + RT_PRINT_DATA(rtlpriv, COMP_POWER, DBG_TRACE, + "mask to hw\n", mask, mask_len); + for (j = 0; j < (MAX_WOL_BIT_MASK_SIZE + 1) / 4; j++) { + rtl_pattern.mask[j] = mask[j * 4]; + rtl_pattern.mask[j] |= (mask[j * 4 + 1] << 8); + rtl_pattern.mask[j] |= (mask[j * 4 + 2] << 16); + rtl_pattern.mask[j] |= (mask[j * 4 + 3] << 24); + } + + /* To get the wake up pattern from the mask. + * We do not count first 12 bits which means + * DA[6] and SA[6] in the pattern to match HW design. + */ + len = 0; + for (j = 12; j < patterns[i].pattern_len; j++) { + if ((mask_os[j / 8] >> (j % 8)) & 0x01) { + content[len] = pattern_os[j]; + len++; + } + } + + RT_PRINT_DATA(rtlpriv, COMP_POWER, DBG_TRACE, + "pattern to hw\n", content, len); + /* 3. calculate crc */ + rtl_pattern.crc = _calculate_wol_pattern_crc(content, len); + RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE, + "CRC_Remainder = 0x%x", rtl_pattern.crc); + + /* 4. write crc & mask_for_hw to hw */ + rtlpriv->cfg->ops->add_wowlan_pattern(hw, &rtl_pattern, i); + } + rtl_write_byte(rtlpriv, 0x698, wow->n_patterns); +} + +static int rtl_op_suspend(struct ieee80211_hw *hw, + struct cfg80211_wowlan *wow) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct timeval ts; + + RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, "\n"); + if (WARN_ON(!wow)) + return -EINVAL; + + /* to resolve s4 can not wake up*/ + do_gettimeofday(&ts); + rtlhal->last_suspend_sec = ts.tv_sec; + + if ((ppsc->wo_wlan_mode & WAKE_ON_PATTERN_MATCH) && wow->n_patterns) + _rtl_add_wowlan_patterns(hw, wow); + + rtlhal->driver_is_goingto_unload = true; + rtlhal->enter_pnp_sleep = true; + + rtl_lps_leave(hw); + rtl_op_stop(hw); + device_set_wakeup_enable(wiphy_dev(hw->wiphy), true); + return 0; +} + +static int rtl_op_resume(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct timeval ts; + + RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, "\n"); + rtlhal->driver_is_goingto_unload = false; + rtlhal->enter_pnp_sleep = false; + rtlhal->wake_from_pnp_sleep = true; + + /* to resovle s4 can not wake up*/ + do_gettimeofday(&ts); + if (ts.tv_sec - rtlhal->last_suspend_sec < 5) + return -1; + + rtl_op_start(hw); + device_set_wakeup_enable(wiphy_dev(hw->wiphy), false); + ieee80211_resume_disconnect(mac->vif); + rtlhal->wake_from_pnp_sleep = false; + return 0; +} +#endif + static int rtl_op_config(struct ieee80211_hw *hw, u32 changed) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -386,7 +583,7 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed) return 1; mutex_lock(&rtlpriv->locks.conf_mutex); - if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) { /*BIT(2)*/ + if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) { /* BIT(2)*/ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "IEEE80211_CONF_CHANGE_LISTEN_INTERVAL\n"); } @@ -421,8 +618,8 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed) * is worked very well */ if (!rtlpriv->psc.multi_buffered) queue_delayed_work(rtlpriv->works.rtl_wq, - &rtlpriv->works.ps_work, - MSECS(5)); + &rtlpriv->works.ps_work, + MSECS(5)); } else { rtl_swlps_rf_awake(hw); rtlpriv->psc.sw_ps_enabled = false; @@ -436,20 +633,26 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed) mac->retry_long = hw->conf.long_frame_max_tx_count; mac->retry_short = hw->conf.long_frame_max_tx_count; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT, - (u8 *) (&hw->conf. - long_frame_max_tx_count)); + (u8 *)(&hw->conf.long_frame_max_tx_count)); } - if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { + if (changed & IEEE80211_CONF_CHANGE_CHANNEL && + !rtlpriv->proximity.proxim_on) { struct ieee80211_channel *channel = hw->conf.chandef.chan; + enum nl80211_chan_width width = hw->conf.chandef.width; + enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; u8 wide_chan = (u8) channel->hw_value; + /* channel_type is for 20&40M */ + if (width < NL80211_CHAN_WIDTH_80) + channel_type = + cfg80211_get_chandef_type(&hw->conf.chandef); if (mac->act_scanning) mac->n_channels++; if (rtlpriv->dm.supp_phymode_switch && - mac->link_state < MAC80211_LINKED && - !mac->act_scanning) { + mac->link_state < MAC80211_LINKED && + !mac->act_scanning) { if (rtlpriv->cfg->ops->chk_switch_dmdp) rtlpriv->cfg->ops->chk_switch_dmdp(hw); } @@ -463,48 +666,98 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed) *info for cisco1253 bw20, so we modify *it here based on UPPER & LOWER */ - switch (cfg80211_get_chandef_type(&hw->conf.chandef)) { - case NL80211_CHAN_HT20: - case NL80211_CHAN_NO_HT: - /* SC */ - mac->cur_40_prime_sc = - PRIME_CHNL_OFFSET_DONT_CARE; - rtlphy->current_chan_bw = HT_CHANNEL_WIDTH_20; - mac->bw_40 = false; - break; - case NL80211_CHAN_HT40MINUS: - /* SC */ - mac->cur_40_prime_sc = PRIME_CHNL_OFFSET_UPPER; - rtlphy->current_chan_bw = - HT_CHANNEL_WIDTH_20_40; - mac->bw_40 = true; - - /*wide channel */ - wide_chan -= 2; - - break; - case NL80211_CHAN_HT40PLUS: - /* SC */ - mac->cur_40_prime_sc = PRIME_CHNL_OFFSET_LOWER; - rtlphy->current_chan_bw = - HT_CHANNEL_WIDTH_20_40; - mac->bw_40 = true; - - /*wide channel */ - wide_chan += 2; - - break; - default: - mac->bw_40 = false; - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case not processed\n"); - break; + + if (width >= NL80211_CHAN_WIDTH_80) { + if (width == NL80211_CHAN_WIDTH_80) { + u32 center = hw->conf.chandef.center_freq1; + u32 primary = + (u32)hw->conf.chandef.chan->center_freq; + + rtlphy->current_chan_bw = + HT_CHANNEL_WIDTH_80; + mac->bw_80 = true; + mac->bw_40 = true; + if (center > primary) { + mac->cur_80_prime_sc = + PRIME_CHNL_OFFSET_LOWER; + if (center - primary == 10) { + mac->cur_40_prime_sc = + PRIME_CHNL_OFFSET_UPPER; + + wide_chan += 2; + } else if (center - primary == 30) { + mac->cur_40_prime_sc = + PRIME_CHNL_OFFSET_LOWER; + + wide_chan += 6; + } + } else { + mac->cur_80_prime_sc = + PRIME_CHNL_OFFSET_UPPER; + if (primary - center == 10) { + mac->cur_40_prime_sc = + PRIME_CHNL_OFFSET_LOWER; + + wide_chan -= 2; + } else if (primary - center == 30) { + mac->cur_40_prime_sc = + PRIME_CHNL_OFFSET_UPPER; + + wide_chan -= 6; + } + } + } + } else { + switch (channel_type) { + case NL80211_CHAN_HT20: + case NL80211_CHAN_NO_HT: + /* SC */ + mac->cur_40_prime_sc = + PRIME_CHNL_OFFSET_DONT_CARE; + rtlphy->current_chan_bw = + HT_CHANNEL_WIDTH_20; + mac->bw_40 = false; + mac->bw_80 = false; + break; + case NL80211_CHAN_HT40MINUS: + /* SC */ + mac->cur_40_prime_sc = + PRIME_CHNL_OFFSET_UPPER; + rtlphy->current_chan_bw = + HT_CHANNEL_WIDTH_20_40; + mac->bw_40 = true; + mac->bw_80 = false; + + /*wide channel */ + wide_chan -= 2; + + break; + case NL80211_CHAN_HT40PLUS: + /* SC */ + mac->cur_40_prime_sc = + PRIME_CHNL_OFFSET_LOWER; + rtlphy->current_chan_bw = + HT_CHANNEL_WIDTH_20_40; + mac->bw_40 = true; + mac->bw_80 = false; + + /*wide channel */ + wide_chan += 2; + + break; + default: + mac->bw_40 = false; + mac->bw_80 = false; + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "switch case not processed\n"); + break; + } } if (wide_chan <= 0) wide_chan = 1; - /* In scanning, before we go offchannel we may send a ps = 1 + /* In scanning, when before we offchannel we may send a ps=1 * null to AP, and then we may send a ps = 0 null to AP quickly, * but first null may have caused AP to put lots of packet to * hw tx buffer. These packets must be tx'd before we go off @@ -516,12 +769,12 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed) rtlpriv->mac80211.offchan_delay = false; mdelay(50); } + rtlphy->current_channel = wide_chan; rtlpriv->cfg->ops->switch_channel(hw); rtlpriv->cfg->ops->set_channel_access(hw); - rtlpriv->cfg->ops->set_bw_mode(hw, - cfg80211_get_chandef_type(&hw->conf.chandef)); + rtlpriv->cfg->ops->set_bw_mode(hw, channel_type); } mutex_unlock(&rtlpriv->locks.conf_mutex); @@ -530,45 +783,25 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed) } static void rtl_op_configure_filter(struct ieee80211_hw *hw, - unsigned int changed_flags, - unsigned int *new_flags, u64 multicast) + unsigned int changed_flags, + unsigned int *new_flags, u64 multicast) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - u32 rx_conf; *new_flags &= RTL_SUPPORTED_FILTERS; - if (!changed_flags) + if (0 == changed_flags) return; - /* if ssid not set to hw don't check bssid - * here just used for linked scanning, & linked - * and nolink check bssid is set in set network_type */ - if ((changed_flags & FIF_BCN_PRBRESP_PROMISC) && - (mac->link_state >= MAC80211_LINKED)) { - if (mac->opmode != NL80211_IFTYPE_AP && - mac->opmode != NL80211_IFTYPE_MESH_POINT) { - if (*new_flags & FIF_BCN_PRBRESP_PROMISC) { - rtlpriv->cfg->ops->set_chk_bssid(hw, false); - } else { - rtlpriv->cfg->ops->set_chk_bssid(hw, true); - } - } - } - - /* must be called after set_chk_bssid since that function modifies the - * RCR register too. */ - rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *)(&rx_conf)); - /*TODO: we disable broadcase now, so enable here */ if (changed_flags & FIF_ALLMULTI) { if (*new_flags & FIF_ALLMULTI) { - rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AM] | + mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AM] | rtlpriv->cfg->maps[MAC_RCR_AB]; RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "Enable receive multicast frame\n"); } else { - rx_conf &= ~(rtlpriv->cfg->maps[MAC_RCR_AM] | + mac->rx_conf &= ~(rtlpriv->cfg->maps[MAC_RCR_AM] | rtlpriv->cfg->maps[MAC_RCR_AB]); RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "Disable receive multicast frame\n"); @@ -577,43 +810,55 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw, if (changed_flags & FIF_FCSFAIL) { if (*new_flags & FIF_FCSFAIL) { - rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACRC32]; + mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACRC32]; RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "Enable receive FCS error frame\n"); } else { - rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACRC32]; + mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACRC32]; RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "Disable receive FCS error frame\n"); } } + /* if ssid not set to hw don't check bssid + * here just used for linked scanning, & linked + * and nolink check bssid is set in set network_type + */ + if ((changed_flags & FIF_BCN_PRBRESP_PROMISC) && + (mac->link_state >= MAC80211_LINKED)) { + if (mac->opmode != NL80211_IFTYPE_AP && + mac->opmode != NL80211_IFTYPE_MESH_POINT) { + if (*new_flags & FIF_BCN_PRBRESP_PROMISC) + rtlpriv->cfg->ops->set_chk_bssid(hw, false); + else + rtlpriv->cfg->ops->set_chk_bssid(hw, true); + } + } if (changed_flags & FIF_CONTROL) { if (*new_flags & FIF_CONTROL) { - rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACF]; + mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACF]; RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, - "Enable receive control frame\n"); + "Enable receive control frame.\n"); } else { - rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACF]; + mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACF]; RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, - "Disable receive control frame\n"); + "Disable receive control frame.\n"); } } if (changed_flags & FIF_OTHER_BSS) { if (*new_flags & FIF_OTHER_BSS) { - rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AAP]; + mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AAP]; RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, - "Enable receive other BSS's frame\n"); + "Enable receive other BSS's frame.\n"); } else { - rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_AAP]; + mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_AAP]; RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, - "Disable receive other BSS's frame\n"); + "Disable receive other BSS's frame.\n"); } } - - rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)(&rx_conf)); } static int rtl_op_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, @@ -625,7 +870,7 @@ static int rtl_op_sta_add(struct ieee80211_hw *hw, struct rtl_sta_info *sta_entry; if (sta) { - sta_entry = (struct rtl_sta_info *) sta->drv_priv; + sta_entry = (struct rtl_sta_info *)sta->drv_priv; spin_lock_bh(&rtlpriv->locks.entry_list_lock); list_add_tail(&sta_entry->list, &rtlpriv->entry_list); spin_unlock_bh(&rtlpriv->locks.entry_list_lock); @@ -633,15 +878,17 @@ static int rtl_op_sta_add(struct ieee80211_hw *hw, sta_entry->wireless_mode = WIRELESS_MODE_G; if (sta->supp_rates[0] <= 0xf) sta_entry->wireless_mode = WIRELESS_MODE_B; - if (sta->ht_cap.ht_supported == true) + if (sta->ht_cap.ht_supported) sta_entry->wireless_mode = WIRELESS_MODE_N_24G; if (vif->type == NL80211_IFTYPE_ADHOC) sta_entry->wireless_mode = WIRELESS_MODE_G; } else if (rtlhal->current_bandtype == BAND_ON_5G) { sta_entry->wireless_mode = WIRELESS_MODE_A; - if (sta->ht_cap.ht_supported == true) - sta_entry->wireless_mode = WIRELESS_MODE_N_24G; + if (sta->ht_cap.ht_supported) + sta_entry->wireless_mode = WIRELESS_MODE_N_5G; + if (sta->vht_cap.vht_supported) + sta_entry->wireless_mode = WIRELESS_MODE_AC_5G; if (vif->type == NL80211_IFTYPE_ADHOC) sta_entry->wireless_mode = WIRELESS_MODE_A; @@ -652,9 +899,10 @@ static int rtl_op_sta_add(struct ieee80211_hw *hw, memcpy(sta_entry->mac_addr, sta->addr, ETH_ALEN); RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG, - "Add sta addr is %pM\n", sta->addr); + "Add sta addr is %pM\n", sta->addr); rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0); } + return 0; } @@ -667,17 +915,15 @@ static int rtl_op_sta_remove(struct ieee80211_hw *hw, if (sta) { RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG, "Remove sta addr is %pM\n", sta->addr); - sta_entry = (struct rtl_sta_info *) sta->drv_priv; + sta_entry = (struct rtl_sta_info *)sta->drv_priv; sta_entry->wireless_mode = 0; sta_entry->ratr_index = 0; - spin_lock_bh(&rtlpriv->locks.entry_list_lock); list_del(&sta_entry->list); spin_unlock_bh(&rtlpriv->locks.entry_list_lock); } return 0; } - static int _rtl_get_hal_qnum(u16 queue) { int qnum; @@ -707,8 +953,8 @@ static int _rtl_get_hal_qnum(u16 queue) *for rtl819x BE = 0, BK = 1, VI = 2, VO = 3 */ static int rtl_op_conf_tx(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, u16 queue, - const struct ieee80211_tx_queue_params *param) + struct ieee80211_vif *vif, u16 queue, + const struct ieee80211_tx_queue_params *param) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); @@ -731,14 +977,14 @@ static int rtl_op_conf_tx(struct ieee80211_hw *hw, } static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_bss_conf *bss_conf, u32 changed) + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u32 changed) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtlpriv); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); - struct ieee80211_sta *sta = NULL; mutex_lock(&rtlpriv->locks.conf_mutex); if ((vif->type == NL80211_IFTYPE_ADHOC) || @@ -756,15 +1002,14 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, mac->beacon_enabled = 1; rtlpriv->cfg->ops->update_interrupt_mask(hw, rtlpriv->cfg->maps - [RTL_IBSS_INT_MASKS], - 0); + [RTL_IBSS_INT_MASKS], 0); if (rtlpriv->cfg->ops->linked_set_reg) rtlpriv->cfg->ops->linked_set_reg(hw); } } if ((changed & BSS_CHANGED_BEACON_ENABLED && - !bss_conf->enable_beacon)) { + !bss_conf->enable_beacon)) { if (mac->beacon_enabled == 1) { RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG, "ADHOC DISABLE BEACON\n"); @@ -785,8 +1030,12 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, /*TODO: reference to enum ieee80211_bss_change */ if (changed & BSS_CHANGED_ASSOC) { + u8 mstatus; if (bss_conf->assoc) { struct ieee80211_sta *sta = NULL; + u8 keep_alive = 10; + + mstatus = RT_MEDIA_CONNECT; /* we should reset all sec info & cam * before set cam after linked, we should not * reset in disassoc, that will cause tkip->wep @@ -804,47 +1053,89 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, if (rtlpriv->cfg->ops->linked_set_reg) rtlpriv->cfg->ops->linked_set_reg(hw); + rcu_read_lock(); sta = ieee80211_find_sta(vif, (u8 *)bss_conf->bssid); if (!sta) { - pr_err("ieee80211_find_sta returned NULL\n"); rcu_read_unlock(); goto out; } - - if (vif->type == NL80211_IFTYPE_STATION && sta) - rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0); RT_TRACE(rtlpriv, COMP_EASY_CONCURRENT, DBG_LOUD, "send PS STATIC frame\n"); if (rtlpriv->dm.supp_phymode_switch) { if (sta->ht_cap.ht_supported) rtl_send_smps_action(hw, sta, - IEEE80211_SMPS_STATIC); + IEEE80211_SMPS_STATIC); + } + + if (rtlhal->current_bandtype == BAND_ON_5G) { + mac->mode = WIRELESS_MODE_A; + } else { + if (sta->supp_rates[0] <= 0xf) + mac->mode = WIRELESS_MODE_B; + else + mac->mode = WIRELESS_MODE_G; + } + + if (sta->ht_cap.ht_supported) { + if (rtlhal->current_bandtype == BAND_ON_2_4G) + mac->mode = WIRELESS_MODE_N_24G; + else + mac->mode = WIRELESS_MODE_N_5G; } + + if (sta->vht_cap.vht_supported) { + if (rtlhal->current_bandtype == BAND_ON_5G) + mac->mode = WIRELESS_MODE_AC_5G; + else + mac->mode = WIRELESS_MODE_AC_24G; + } + + if (vif->type == NL80211_IFTYPE_STATION && sta) + rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0); rcu_read_unlock(); + /* to avoid AP Disassociation caused by inactivity */ + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_KEEP_ALIVE, + (u8 *)(&keep_alive)); + RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG, "BSS_CHANGED_ASSOC\n"); } else { + mstatus = RT_MEDIA_DISCONNECT; + if (mac->link_state == MAC80211_LINKED) { rtlpriv->enter_ps = false; schedule_work(&rtlpriv->works.lps_change_work); } - if (ppsc->p2p_ps_info.p2p_ps_mode > P2P_PS_NONE) rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE); mac->link_state = MAC80211_NOLINK; memset(mac->bssid, 0, ETH_ALEN); mac->vendor = PEER_UNKNOWN; + mac->mode = 0; if (rtlpriv->dm.supp_phymode_switch) { if (rtlpriv->cfg->ops->chk_switch_dmdp) rtlpriv->cfg->ops->chk_switch_dmdp(hw); } - RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG, "BSS_CHANGED_UN_ASSOC\n"); } + rtlpriv->cfg->ops->set_network_type(hw, vif->type); + /* For FW LPS: + * To tell firmware we have connected or disconnected + */ + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_H2C_FW_JOINBSSRPT, + (u8 *)(&mstatus)); + ppsc->report_linked = (mstatus == RT_MEDIA_CONNECT) ? + true : false; + + if (rtlpriv->cfg->ops->get_btc_status()) + rtlpriv->btcoexist.btc_ops->btc_mediastatus_notify( + rtlpriv, mstatus); } if (changed & BSS_CHANGED_ERP_CTS_PROT) { @@ -856,11 +1147,11 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_ERP_PREAMBLE) { RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "BSS_CHANGED_ERP_PREAMBLE use short preamble:%x\n", - bss_conf->use_short_preamble); + bss_conf->use_short_preamble); mac->short_preamble = bss_conf->use_short_preamble; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACK_PREAMBLE, - &mac->short_preamble); + (u8 *)(&mac->short_preamble)); } if (changed & BSS_CHANGED_ERP_SLOT) { @@ -873,13 +1164,17 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, mac->slot_time = RTL_SLOT_TIME_20; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME, - &mac->slot_time); + (u8 *)(&mac->slot_time)); } if (changed & BSS_CHANGED_HT) { - RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE, "BSS_CHANGED_HT\n"); + struct ieee80211_sta *sta = NULL; + + RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE, + "BSS_CHANGED_HT\n"); + rcu_read_lock(); - sta = get_sta(hw, vif, bss_conf->bssid); + sta = ieee80211_find_sta(vif, (u8 *)bss_conf->bssid); if (sta) { if (sta->ht_cap.ampdu_density > mac->current_ampdu_density) @@ -893,7 +1188,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, rcu_read_unlock(); rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SHORTGI_DENSITY, - &mac->max_mss_density); + (u8 *)(&mac->max_mss_density)); rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AMPDU_FACTOR, &mac->current_ampdu_factor); rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AMPDU_MIN_SPACE, @@ -902,19 +1197,19 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_BSSID) { u32 basic_rates; + struct ieee80211_sta *sta = NULL; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BSSID, - (u8 *) bss_conf->bssid); + (u8 *)bss_conf->bssid); - RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG, "%pM\n", - bss_conf->bssid); + RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG, + "bssid: %pM\n", bss_conf->bssid); mac->vendor = PEER_UNKNOWN; memcpy(mac->bssid, bss_conf->bssid, ETH_ALEN); - rtlpriv->cfg->ops->set_network_type(hw, vif->type); rcu_read_lock(); - sta = get_sta(hw, vif, bss_conf->bssid); + sta = ieee80211_find_sta(vif, (u8 *)bss_conf->bssid); if (!sta) { rcu_read_unlock(); goto out; @@ -936,11 +1231,18 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, mac->mode = WIRELESS_MODE_N_5G; } + if (sta->vht_cap.vht_supported) { + if (rtlhal->current_bandtype == BAND_ON_5G) + mac->mode = WIRELESS_MODE_AC_5G; + else + mac->mode = WIRELESS_MODE_AC_24G; + } + /* just station need it, because ibss & ap mode will * set in sta_add, and will be NULL here */ - if (mac->opmode == NL80211_IFTYPE_STATION) { + if (vif->type == NL80211_IFTYPE_STATION) { struct rtl_sta_info *sta_entry; - sta_entry = (struct rtl_sta_info *) sta->drv_priv; + sta_entry = (struct rtl_sta_info *)sta->drv_priv; sta_entry->wireless_mode = mac->mode; } @@ -955,6 +1257,9 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, * */ } + if (sta->vht_cap.vht_supported) + mac->vht_enable = true; + if (changed & BSS_CHANGED_BASIC_RATES) { /* for 5G must << RATE_6M_INDEX = 4, * because 5G have no cck rate*/ @@ -969,40 +1274,6 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, } rcu_read_unlock(); } - - /* - * For FW LPS: - * To tell firmware we have connected - * to an AP. For 92SE/CE power save v2. - */ - if (changed & BSS_CHANGED_ASSOC) { - if (bss_conf->assoc) { - if (ppsc->fwctrl_lps) { - u8 mstatus = RT_MEDIA_CONNECT; - u8 keep_alive = 10; - rtlpriv->cfg->ops->set_hw_reg(hw, - HW_VAR_KEEP_ALIVE, - &keep_alive); - - rtlpriv->cfg->ops->set_hw_reg(hw, - HW_VAR_H2C_FW_JOINBSSRPT, - &mstatus); - ppsc->report_linked = true; - } - } else { - if (ppsc->fwctrl_lps) { - u8 mstatus = RT_MEDIA_DISCONNECT; - rtlpriv->cfg->ops->set_hw_reg(hw, - HW_VAR_H2C_FW_JOINBSSRPT, - &mstatus); - ppsc->report_linked = false; - } - } - if (rtlpriv->cfg->ops->bt_wifi_media_status_notify) - rtlpriv->cfg->ops->bt_wifi_media_status_notify(hw, - ppsc->report_linked); - } - out: mutex_unlock(&rtlpriv->locks.conf_mutex); } @@ -1012,28 +1283,27 @@ static u64 rtl_op_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) struct rtl_priv *rtlpriv = rtl_priv(hw); u64 tsf; - rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_CORRECT_TSF, (u8 *) (&tsf)); + rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_CORRECT_TSF, (u8 *)(&tsf)); return tsf; } -static void rtl_op_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - u64 tsf) +static void rtl_op_set_tsf(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, u64 tsf) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); u8 bibss = (mac->opmode == NL80211_IFTYPE_ADHOC) ? 1 : 0; mac->tsf = tsf; - rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_CORRECT_TSF, &bibss); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_CORRECT_TSF, (u8 *)(&bibss)); } -static void rtl_op_reset_tsf(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) +static void rtl_op_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 tmp = 0; - rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_DUAL_TSF_RST, &tmp); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_DUAL_TSF_RST, (u8 *)(&tmp)); } static void rtl_op_sta_notify(struct ieee80211_hw *hw, @@ -1063,13 +1333,13 @@ static int rtl_op_ampdu_action(struct ieee80211_hw *hw, case IEEE80211_AMPDU_TX_START: RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE, "IEEE80211_AMPDU_TX_START: TID:%d\n", tid); - return rtl_tx_agg_start(hw, sta, tid, ssn); + return rtl_tx_agg_start(hw, vif, sta, tid, ssn); case IEEE80211_AMPDU_TX_STOP_CONT: case IEEE80211_AMPDU_TX_STOP_FLUSH: case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE, "IEEE80211_AMPDU_TX_STOP: TID:%d\n", tid); - return rtl_tx_agg_stop(hw, sta, tid); + return rtl_tx_agg_stop(hw, vif, sta, tid); case IEEE80211_AMPDU_TX_OPERATIONAL: RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE, "IEEE80211_AMPDU_TX_OPERATIONAL:TID:%d\n", tid); @@ -1103,10 +1373,14 @@ static void rtl_op_sw_scan_start(struct ieee80211_hw *hw) return; } + if (rtlpriv->cfg->ops->get_btc_status()) + rtlpriv->btcoexist.btc_ops->btc_scan_notify(rtlpriv, 1); + if (rtlpriv->dm.supp_phymode_switch) { if (rtlpriv->cfg->ops->chk_switch_dmdp) rtlpriv->cfg->ops->chk_switch_dmdp(hw); } + if (mac->link_state == MAC80211_LINKED) { rtlpriv->enter_ps = false; schedule_work(&rtlpriv->works.lps_change_work); @@ -1115,11 +1389,11 @@ static void rtl_op_sw_scan_start(struct ieee80211_hw *hw) rtl_ips_nic_on(hw); } - /* Dual mac */ + /* Dul mac */ rtlpriv->rtlhal.load_imrandiqk_setting_for2g = false; rtlpriv->cfg->ops->led_control(hw, LED_CTL_SITE_SURVEY); - rtlpriv->cfg->ops->scan_operation_backup(hw, SCAN_OPT_BACKUP); + rtlpriv->cfg->ops->scan_operation_backup(hw, SCAN_OPT_BACKUP_BAND0); } static void rtl_op_sw_scan_complete(struct ieee80211_hw *hw) @@ -1133,13 +1407,13 @@ static void rtl_op_sw_scan_complete(struct ieee80211_hw *hw) if (rtlpriv->link_info.higher_busytraffic) return; - /*p2p will use 1/6/11 to scan */ + /* p2p will use 1/6/11 to scan */ if (mac->n_channels == 3) mac->p2p_in_use = true; else mac->p2p_in_use = false; mac->n_channels = 0; - /* Dual mac */ + /* Dul mac */ rtlpriv->rtlhal.load_imrandiqk_setting_for2g = false; if (mac->link_state == MAC80211_LINKED_SCANNING) { @@ -1151,6 +1425,8 @@ static void rtl_op_sw_scan_complete(struct ieee80211_hw *hw) } rtlpriv->cfg->ops->scan_operation_backup(hw, SCAN_OPT_RESTORE); + if (rtlpriv->cfg->ops->get_btc_status()) + rtlpriv->btcoexist.btc_ops->btc_scan_notify(rtlpriv, 0); } static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, @@ -1158,7 +1434,6 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_key_conf *key) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); u8 key_type = NO_ENCRYPTION; u8 key_idx; bool group_key = false; @@ -1174,13 +1449,13 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, } /* To support IBSS, use sw-crypto for GTK */ if (((vif->type == NL80211_IFTYPE_ADHOC) || - (vif->type == NL80211_IFTYPE_MESH_POINT)) && - !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) + (vif->type == NL80211_IFTYPE_MESH_POINT)) && + !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) return -ENOSPC; RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "%s hardware based encryption for keyidx: %d, mac: %pM\n", - cmd == SET_KEY ? "Using" : "Disabling", key->keyidx, - sta ? sta->addr : bcast_addr); + cmd == SET_KEY ? "Using" : "Disabling", key->keyidx, + sta ? sta->addr : bcast_addr); rtlpriv->sec.being_setkey = true; rtl_ips_nic_on(hw); mutex_lock(&rtlpriv->locks.conf_mutex); @@ -1204,21 +1479,23 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "alg:CCMP\n"); break; case WLAN_CIPHER_SUITE_AES_CMAC: - /*HW doesn't support CMAC encryption, use software CMAC */ + /* HW don't support CMAC encryption, + * use software CMAC encryption + */ key_type = AESCMAC_ENCRYPTION; RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "alg:CMAC\n"); RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, - "HW don't support CMAC encryption, use software CMAC\n"); + "HW don't support CMAC encrypiton, use software CMAC encrypiton\n"); err = -EOPNOTSUPP; goto out_unlock; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "alg_err:%x!!!!\n", - key->cipher); + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "alg_err:%x!!!!:\n", key->cipher); goto out_unlock; } if (key_type == WEP40_ENCRYPTION || - key_type == WEP104_ENCRYPTION || - mac->opmode == NL80211_IFTYPE_ADHOC) + key_type == WEP104_ENCRYPTION || + vif->type == NL80211_IFTYPE_ADHOC) rtlpriv->sec.use_defaultkey = true; /* <2> get key_idx */ @@ -1232,14 +1509,14 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, * 1) wep only: is just for wep enc, in this condition * rtlpriv->sec.pairwise_enc_algorithm == NO_ENCRYPTION * will be true & enable_hw_sec will be set when wep - * key setting. + * ke setting. * 2) wep(group) + AES(pairwise): some AP like cisco * may use it, in this condition enable_hw_sec will not * be set when wep key setting */ /* we must reset sec_info after lingked before set key, * or some flag will be wrong*/ if (vif->type == NL80211_IFTYPE_AP || - vif->type == NL80211_IFTYPE_MESH_POINT) { + vif->type == NL80211_IFTYPE_MESH_POINT) { if (!group_key || key_type == WEP40_ENCRYPTION || key_type == WEP104_ENCRYPTION) { if (group_key) @@ -1247,11 +1524,11 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, rtlpriv->cfg->ops->enable_hw_sec(hw); } } else { - if ((!group_key) || (mac->opmode == NL80211_IFTYPE_ADHOC) || - rtlpriv->sec.pairwise_enc_algorithm == NO_ENCRYPTION) { + if ((!group_key) || (vif->type == NL80211_IFTYPE_ADHOC) || + rtlpriv->sec.pairwise_enc_algorithm == NO_ENCRYPTION) { if (rtlpriv->sec.pairwise_enc_algorithm == NO_ENCRYPTION && - (key_type == WEP40_ENCRYPTION || + (key_type == WEP40_ENCRYPTION || key_type == WEP104_ENCRYPTION)) wep_only = true; rtlpriv->sec.pairwise_enc_algorithm = key_type; @@ -1323,7 +1600,7 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, "disable key delete one entry\n"); /*set local buf about wep key. */ if (vif->type == NL80211_IFTYPE_AP || - vif->type == NL80211_IFTYPE_MESH_POINT) { + vif->type == NL80211_IFTYPE_MESH_POINT) { if (sta) rtl_cam_del_entry(hw, sta->addr); } @@ -1336,13 +1613,10 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, *or clear all entry here. */ rtl_cam_delete_one_entry(hw, mac_addr, key_idx); - - rtl_cam_reset_sec_info(hw); - break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "cmd_err:%x!!!!\n", cmd); + "cmd_err:%x!!!!:\n", cmd); } out_unlock: mutex_unlock(&rtlpriv->locks.conf_mutex); @@ -1372,7 +1646,7 @@ static void rtl_op_rfkill_poll(struct ieee80211_hw *hw) RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, "wireless radio switch turned %s\n", - radio_state ? "on" : "off"); + radio_state ? "on" : "off"); blocked = (rtlpriv->rfkill.rfkill_state == 1) ? 0 : 1; wiphy_rfkill_set_hw_state(hw->wiphy, blocked); @@ -1383,18 +1657,148 @@ static void rtl_op_rfkill_poll(struct ieee80211_hw *hw) } /* this function is called by mac80211 to flush tx buffer - * before switch channel or power save, or tx buffer packet + * before switch channle or power save, or tx buffer packet * maybe send after offchannel or rf sleep, this may cause * dis-association by AP */ -static void rtl_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - u32 queues, bool drop) +static void rtl_op_flush(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u32 queues, + bool drop) { struct rtl_priv *rtlpriv = rtl_priv(hw); if (rtlpriv->intf_ops->flush) - rtlpriv->intf_ops->flush(hw, drop); + rtlpriv->intf_ops->flush(hw, queues, drop); } +/* Description: + * This routine deals with the Power Configuration CMD + * parsing for RTL8723/RTL8188E Series IC. + * Assumption: + * We should follow specific format that was released from HW SD. + */ +bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version, + u8 faversion, u8 interface_type, + struct wlan_pwr_cfg pwrcfgcmd[]) +{ + struct wlan_pwr_cfg cfg_cmd = {0}; + bool polling_bit = false; + u32 ary_idx = 0; + u8 value = 0; + u32 offset = 0; + u32 polling_count = 0; + u32 max_polling_cnt = 5000; + + do { + cfg_cmd = pwrcfgcmd[ary_idx]; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "rtl_hal_pwrseqcmdparsing(): offset(%#x),cut_msk(%#x), famsk(%#x), interface_msk(%#x), base(%#x), cmd(%#x), msk(%#x), value(%#x)\n", + GET_PWR_CFG_OFFSET(cfg_cmd), + GET_PWR_CFG_CUT_MASK(cfg_cmd), + GET_PWR_CFG_FAB_MASK(cfg_cmd), + GET_PWR_CFG_INTF_MASK(cfg_cmd), + GET_PWR_CFG_BASE(cfg_cmd), GET_PWR_CFG_CMD(cfg_cmd), + GET_PWR_CFG_MASK(cfg_cmd), GET_PWR_CFG_VALUE(cfg_cmd)); + + if ((GET_PWR_CFG_FAB_MASK(cfg_cmd)&faversion) && + (GET_PWR_CFG_CUT_MASK(cfg_cmd)&cut_version) && + (GET_PWR_CFG_INTF_MASK(cfg_cmd)&interface_type)) { + switch (GET_PWR_CFG_CMD(cfg_cmd)) { + case PWR_CMD_READ: + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "rtl_hal_pwrseqcmdparsing(): PWR_CMD_READ\n"); + break; + case PWR_CMD_WRITE: + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "rtl_hal_pwrseqcmdparsing(): PWR_CMD_WRITE\n"); + offset = GET_PWR_CFG_OFFSET(cfg_cmd); + + /*Read the value from system register*/ + value = rtl_read_byte(rtlpriv, offset); + value &= (~(GET_PWR_CFG_MASK(cfg_cmd))); + value |= (GET_PWR_CFG_VALUE(cfg_cmd) & + GET_PWR_CFG_MASK(cfg_cmd)); + + /*Write the value back to sytem register*/ + rtl_write_byte(rtlpriv, offset, value); + break; + case PWR_CMD_POLLING: + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "rtl_hal_pwrseqcmdparsing(): PWR_CMD_POLLING\n"); + polling_bit = false; + offset = GET_PWR_CFG_OFFSET(cfg_cmd); + + do { + value = rtl_read_byte(rtlpriv, offset); + + value &= GET_PWR_CFG_MASK(cfg_cmd); + if (value == + (GET_PWR_CFG_VALUE(cfg_cmd) & + GET_PWR_CFG_MASK(cfg_cmd))) + polling_bit = true; + else + udelay(10); + + if (polling_count++ > max_polling_cnt) + return false; + } while (!polling_bit); + break; + case PWR_CMD_DELAY: + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "rtl_hal_pwrseqcmdparsing(): PWR_CMD_DELAY\n"); + if (GET_PWR_CFG_VALUE(cfg_cmd) == + PWRSEQ_DELAY_US) + udelay(GET_PWR_CFG_OFFSET(cfg_cmd)); + else + mdelay(GET_PWR_CFG_OFFSET(cfg_cmd)); + break; + case PWR_CMD_END: + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "rtl_hal_pwrseqcmdparsing(): PWR_CMD_END\n"); + return true; + default: + RT_ASSERT(false, + "rtl_hal_pwrseqcmdparsing(): Unknown CMD!!\n"); + break; + } + } + ary_idx++; + } while (1); + + return true; +} +EXPORT_SYMBOL(rtl_hal_pwrseqcmdparsing); + +bool rtl_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl8192_tx_ring *ring; + struct rtl_tx_desc *pdesc; + unsigned long flags; + struct sk_buff *pskb = NULL; + + ring = &rtlpci->tx_ring[BEACON_QUEUE]; + + spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags); + pskb = __skb_dequeue(&ring->queue); + if (pskb) + kfree_skb(pskb); + + /*this is wrong, fill_tx_cmddesc needs update*/ + pdesc = &ring->desc[0]; + + rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *)pdesc, 1, 1, skb); + + __skb_queue_tail(&ring->queue, skb); + + spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); + + rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE); + + return true; +} +EXPORT_SYMBOL(rtl_cmd_send_packet); const struct ieee80211_ops rtl_ops = { .start = rtl_op_start, .stop = rtl_op_stop, @@ -1402,10 +1806,12 @@ const struct ieee80211_ops rtl_ops = { .add_interface = rtl_op_add_interface, .remove_interface = rtl_op_remove_interface, .change_interface = rtl_op_change_interface, +#ifdef CONFIG_PM + .suspend = rtl_op_suspend, + .resume = rtl_op_resume, +#endif .config = rtl_op_config, .configure_filter = rtl_op_configure_filter, - .sta_add = rtl_op_sta_add, - .sta_remove = rtl_op_sta_remove, .set_key = rtl_op_set_key, .conf_tx = rtl_op_conf_tx, .bss_info_changed = rtl_op_bss_info_changed, @@ -1417,6 +1823,8 @@ const struct ieee80211_ops rtl_ops = { .sw_scan_start = rtl_op_sw_scan_start, .sw_scan_complete = rtl_op_sw_scan_complete, .rfkill_poll = rtl_op_rfkill_poll, + .sta_add = rtl_op_sta_add, + .sta_remove = rtl_op_sta_remove, .flush = rtl_op_flush, }; EXPORT_SYMBOL_GPL(rtl_ops); diff --git a/drivers/net/wireless/rtlwifi/core.h b/drivers/net/wireless/rtlwifi/core.h index 027e75374dcc9467c4b01040060d6defaa6d6f55..59cd3b9dca25f895bcd6ee24f43f366867c8e41a 100644 --- a/drivers/net/wireless/rtlwifi/core.h +++ b/drivers/net/wireless/rtlwifi/core.h @@ -2,20 +2,16 @@ * * Copyright(c) 2009-2012 Realtek Corporation. * - * Tmis program is free software; you can redistribute it and/or modify it + * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * - * Tmis program is distributed in the hope that it will be useful, but WITHOUT + * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * tmis program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * Tme full GNU General Public License is included in this distribution in the + * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: @@ -45,5 +41,6 @@ void rtl_addr_delay(u32 addr); void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr, u32 mask, u32 data); void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data); +bool rtl_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/rtlwifi/debug.c b/drivers/net/wireless/rtlwifi/debug.c index 76e2086e137e5f0356325887ecccc5c768d17f6c..fd25abad2b9eebbe852582db0158e1436c0dd536 100644 --- a/drivers/net/wireless/rtlwifi/debug.c +++ b/drivers/net/wireless/rtlwifi/debug.c @@ -2,20 +2,16 @@ * * Copyright(c) 2009-2012 Realtek Corporation. * - * Tmis program is free software; you can redistribute it and/or modify it + * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * - * Tmis program is distributed in the hope that it will be useful, but WITHOUT + * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * tmis program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * Tme full GNU General Public License is included in this distribution in the + * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: diff --git a/drivers/net/wireless/rtlwifi/debug.h b/drivers/net/wireless/rtlwifi/debug.h index 6d669364e3d9923865f0a5d6f031647a3cf830bf..fc794b3e9f4afd3f04fd2fee1b9b5c336e6e2805 100644 --- a/drivers/net/wireless/rtlwifi/debug.h +++ b/drivers/net/wireless/rtlwifi/debug.h @@ -2,20 +2,16 @@ * * Copyright(c) 2009-2012 Realtek Corporation. * - * Tmis program is free software; you can redistribute it and/or modify it + * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * - * Tmis program is distributed in the hope that it will be useful, but WITHOUT + * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * tmis program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * Tme full GNU General Public License is included in this distribution in the + * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: @@ -108,6 +104,7 @@ #define COMP_USB BIT(29) #define COMP_EASY_CONCURRENT COMP_USB /* reuse of this bit is OK */ #define COMP_BT_COEXIST BIT(30) +#define COMP_IQK BIT(31) /*-------------------------------------------------------------- Define the rt_print components diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c index 2ffc7298f686ec6002ee54dd210ebc7e8b13b757..0b4082c9272a12e36063b3b5e951e48d49944952 100644 --- a/drivers/net/wireless/rtlwifi/efuse.c +++ b/drivers/net/wireless/rtlwifi/efuse.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * tmis program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * Tme full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -26,10 +22,9 @@ * Larry Finger * *****************************************************************************/ - -#include #include "wifi.h" #include "efuse.h" +#include static const u8 MAX_PGPKT_SIZE = 9; static const u8 PGPKT_DATA_SIZE = 8; @@ -63,21 +58,19 @@ static void efuse_shadow_write_2byte(struct ieee80211_hw *hw, u16 offset, u16 value); static void efuse_shadow_write_4byte(struct ieee80211_hw *hw, u16 offset, u32 value); -static int efuse_one_byte_read(struct ieee80211_hw *hw, u16 addr, - u8 *data); static int efuse_one_byte_write(struct ieee80211_hw *hw, u16 addr, - u8 data); + u8 data); static void efuse_read_all_map(struct ieee80211_hw *hw, u8 *efuse); static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset, - u8 *data); + u8 *data); static int efuse_pg_packet_write(struct ieee80211_hw *hw, u8 offset, u8 word_en, u8 *data); static void efuse_word_enable_data_read(u8 word_en, u8 *sourdata, u8 *targetdata); -static u8 efuse_word_enable_data_write(struct ieee80211_hw *hw, - u16 efuse_addr, u8 word_en, u8 *data); +static u8 enable_efuse_data_write(struct ieee80211_hw *hw, + u16 efuse_addr, u8 word_en, u8 *data); static void efuse_power_switch(struct ieee80211_hw *hw, u8 write, - u8 pwrstate); + u8 pwrstate); static u16 efuse_get_current_size(struct ieee80211_hw *hw); static u8 efuse_calculate_word_cnts(u8 word_en); @@ -258,7 +251,7 @@ void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf) } /* allocate memory for efuse_tbl and efuse_word */ - efuse_tbl = kmalloc(rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE] * + efuse_tbl = kzalloc(rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE] * sizeof(u8), GFP_ATOMIC); if (!efuse_tbl) return; @@ -266,7 +259,7 @@ void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf) if (!efuse_word) goto out; for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) { - efuse_word[i] = kmalloc(efuse_max_section * sizeof(u16), + efuse_word[i] = kzalloc(efuse_max_section * sizeof(u16), GFP_ATOMIC); if (!efuse_word[i]) goto done; @@ -413,8 +406,7 @@ bool efuse_shadow_update_chk(struct ieee80211_hw *hw) efuse_used = rtlefuse->efuse_usedbytes; if ((totalbytes + efuse_used) >= - (EFUSE_MAX_SIZE - - rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN])) + (EFUSE_MAX_SIZE - rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN])) result = false; RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, @@ -428,13 +420,14 @@ void efuse_shadow_read(struct ieee80211_hw *hw, u8 type, u16 offset, u32 *value) { if (type == 1) - efuse_shadow_read_1byte(hw, offset, (u8 *) value); + efuse_shadow_read_1byte(hw, offset, (u8 *)value); else if (type == 2) - efuse_shadow_read_2byte(hw, offset, (u16 *) value); + efuse_shadow_read_2byte(hw, offset, (u16 *)value); else if (type == 4) efuse_shadow_read_4byte(hw, offset, value); } +EXPORT_SYMBOL(efuse_shadow_read); void efuse_shadow_write(struct ieee80211_hw *hw, u8 type, u16 offset, u32 value) @@ -456,7 +449,7 @@ bool efuse_shadow_update(struct ieee80211_hw *hw) u8 word_en = 0x0F; u8 first_pg = false; - RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, "--->\n"); + RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, "\n"); if (!efuse_shadow_update_chk(hw)) { efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]); @@ -465,7 +458,7 @@ bool efuse_shadow_update(struct ieee80211_hw *hw) rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]); RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, - "<---efuse out of capacity!!\n"); + "efuse out of capacity!!\n"); return false; } efuse_power_switch(hw, true, true); @@ -477,7 +470,6 @@ bool efuse_shadow_update(struct ieee80211_hw *hw) for (i = 0; i < 8; i++) { if (first_pg) { - word_en &= ~(BIT(i / 2)); rtlefuse->efuse_map[EFUSE_INIT_MAP][base + i] = @@ -500,7 +492,7 @@ bool efuse_shadow_update(struct ieee80211_hw *hw) &rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base], 8); RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD, - "U-efuse", tmpdata, 8); + "U-efuse\n", tmpdata, 8); if (!efuse_pg_packet_write(hw, (u8) offset, word_en, tmpdata)) { @@ -519,7 +511,7 @@ bool efuse_shadow_update(struct ieee80211_hw *hw) &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]); - RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, "<---\n"); + RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, "\n"); return true; } @@ -529,14 +521,14 @@ void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw) struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); if (rtlefuse->autoload_failflag) - memset(&rtlefuse->efuse_map[EFUSE_INIT_MAP][0], 0xFF, - rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]); + memset((&rtlefuse->efuse_map[EFUSE_INIT_MAP][0]), + 0xFF, rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]); else efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]); memcpy(&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0], - &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], - rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]); + &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], + rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]); } EXPORT_SYMBOL(rtl_efuse_shadow_map_update); @@ -619,7 +611,7 @@ static void efuse_shadow_write_4byte(struct ieee80211_hw *hw, } -static int efuse_one_byte_read(struct ieee80211_hw *hw, u16 addr, u8 *data) +int efuse_one_byte_read(struct ieee80211_hw *hw, u16 addr, u8 *data) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 tmpidx = 0; @@ -650,14 +642,15 @@ static int efuse_one_byte_read(struct ieee80211_hw *hw, u16 addr, u8 *data) } return result; } +EXPORT_SYMBOL(efuse_one_byte_read); static int efuse_one_byte_write(struct ieee80211_hw *hw, u16 addr, u8 data) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 tmpidx = 0; - RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, "Addr = %x Data=%x\n", - addr, data); + RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, + "Addr = %x Data=%x\n", addr, data); rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 1, (u8) (addr & 0xff)); @@ -677,11 +670,10 @@ static int efuse_one_byte_write(struct ieee80211_hw *hw, u16 addr, u8 data) if (tmpidx < 100) return true; - return false; } -static void efuse_read_all_map(struct ieee80211_hw *hw, u8 * efuse) +static void efuse_read_all_map(struct ieee80211_hw *hw, u8 *efuse) { struct rtl_priv *rtlpriv = rtl_priv(hw); efuse_power_switch(hw, false, true); @@ -706,14 +698,14 @@ static void efuse_read_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr, if (hoffset == offset) { for (tmpidx = 0; tmpidx < word_cnts * 2; tmpidx++) { if (efuse_one_byte_read(hw, *efuse_addr + 1 + tmpidx, - &efuse_data)) { + &efuse_data)) { tmpdata[tmpidx] = efuse_data; if (efuse_data != 0xff) - dataempty = true; + dataempty = false; } } - if (dataempty) { + if (!dataempty) { *readstate = PG_STATE_DATA; } else { *efuse_addr = *efuse_addr + (word_cnts * 2) + 1; @@ -729,7 +721,9 @@ static void efuse_read_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr, static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset, u8 *data) { u8 readstate = PG_STATE_HEADER; + bool continual = true; + u8 efuse_data, word_cnts = 0; u16 efuse_addr = 0; u8 tmpdata[8]; @@ -747,9 +741,8 @@ static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset, u8 *data) if (efuse_one_byte_read(hw, efuse_addr, &efuse_data) && (efuse_data != 0xFF)) efuse_read_data_case1(hw, &efuse_addr, - efuse_data, - offset, tmpdata, - &readstate); + efuse_data, offset, + tmpdata, &readstate); else continual = false; } else if (readstate & PG_STATE_DATA) { @@ -771,13 +764,14 @@ static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset, u8 *data) } static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr, - u8 efuse_data, u8 offset, int *continual, - u8 *write_state, struct pgpkt_struct *target_pkt, - int *repeat_times, int *result, u8 word_en) + u8 efuse_data, u8 offset, + int *continual, u8 *write_state, + struct pgpkt_struct *target_pkt, + int *repeat_times, int *result, u8 word_en) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct pgpkt_struct tmp_pkt; - bool dataempty = true; + int dataempty = true; u8 originaldata[8 * sizeof(u8)]; u8 badworden = 0x0F; u8 match_word_en, tmp_word_en; @@ -794,9 +788,10 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr, *write_state = PG_STATE_HEADER; } else { for (tmpindex = 0; tmpindex < (tmp_word_cnts * 2); tmpindex++) { - u16 address = *efuse_addr + 1 + tmpindex; - if (efuse_one_byte_read(hw, address, - &efuse_data) && (efuse_data != 0xFF)) + if (efuse_one_byte_read(hw, + (*efuse_addr + 1 + tmpindex), + &efuse_data) && + (efuse_data != 0xFF)) dataempty = false; } @@ -806,33 +801,34 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr, } else { match_word_en = 0x0F; if (!((target_pkt->word_en & BIT(0)) | - (tmp_pkt.word_en & BIT(0)))) + (tmp_pkt.word_en & BIT(0)))) match_word_en &= (~BIT(0)); if (!((target_pkt->word_en & BIT(1)) | - (tmp_pkt.word_en & BIT(1)))) + (tmp_pkt.word_en & BIT(1)))) match_word_en &= (~BIT(1)); if (!((target_pkt->word_en & BIT(2)) | - (tmp_pkt.word_en & BIT(2)))) + (tmp_pkt.word_en & BIT(2)))) match_word_en &= (~BIT(2)); if (!((target_pkt->word_en & BIT(3)) | - (tmp_pkt.word_en & BIT(3)))) + (tmp_pkt.word_en & BIT(3)))) match_word_en &= (~BIT(3)); if ((match_word_en & 0x0F) != 0x0F) { - badworden = efuse_word_enable_data_write( - hw, *efuse_addr + 1, - tmp_pkt.word_en, - target_pkt->data); + badworden = + enable_efuse_data_write(hw, + *efuse_addr + 1, + tmp_pkt.word_en, + target_pkt->data); - if (0x0F != (badworden & 0x0F)) { + if (0x0F != (badworden & 0x0F)) { u8 reorg_offset = offset; u8 reorg_worden = badworden; efuse_pg_packet_write(hw, reorg_offset, - reorg_worden, - originaldata); + reorg_worden, + originaldata); } tmp_word_en = 0x0F; @@ -845,11 +841,11 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr, tmp_word_en &= (~BIT(1)); if ((target_pkt->word_en & BIT(2)) ^ - (match_word_en & BIT(2))) + (match_word_en & BIT(2))) tmp_word_en &= (~BIT(2)); if ((target_pkt->word_en & BIT(3)) ^ - (match_word_en & BIT(3))) + (match_word_en & BIT(3))) tmp_word_en &= (~BIT(3)); if ((tmp_word_en & 0x0F) != 0x0F) { @@ -873,7 +869,7 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr, } } } - RTPRINT(rtlpriv, FEEPROM, EFUSE_PG, "efuse PG_STATE_HEADER-1\n"); + RTPRINT(rtlpriv, FEEPROM, EFUSE_PG, "efuse PG_STATE_HEADER-1\n"); } static void efuse_write_data_case2(struct ieee80211_hw *hw, u16 *efuse_addr, @@ -908,12 +904,13 @@ static void efuse_write_data_case2(struct ieee80211_hw *hw, u16 *efuse_addr, tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en); - memset(originaldata, 0xff, 8 * sizeof(u8)); + memset(originaldata, 0xff, 8 * sizeof(u8)); if (efuse_pg_packet_read(hw, tmp_pkt.offset, originaldata)) { - badworden = efuse_word_enable_data_write(hw, - *efuse_addr + 1, tmp_pkt.word_en, - originaldata); + badworden = enable_efuse_data_write(hw, + *efuse_addr + 1, + tmp_pkt.word_en, + originaldata); if (0x0F != (badworden & 0x0F)) { u8 reorg_offset = tmp_pkt.offset; @@ -923,8 +920,8 @@ static void efuse_write_data_case2(struct ieee80211_hw *hw, u16 *efuse_addr, originaldata); *efuse_addr = efuse_get_current_size(hw); } else { - *efuse_addr = *efuse_addr + (tmp_word_cnts * 2) - + 1; + *efuse_addr = *efuse_addr + + (tmp_word_cnts * 2) + 1; } } else { *efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1; @@ -948,7 +945,7 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv = rtl_priv(hw); struct pgpkt_struct target_pkt; u8 write_state = PG_STATE_HEADER; - int continual = true, result = true; + int continual = true, dataempty = true, result = true; u16 efuse_addr = 0; u8 efuse_data; u8 target_word_cnts = 0; @@ -956,7 +953,7 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw, static int repeat_times; if (efuse_get_current_size(hw) >= (EFUSE_MAX_SIZE - - rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN])) { + rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN])) { RTPRINT(rtlpriv, FEEPROM, EFUSE_PG, "efuse_pg_packet_write error\n"); return false; @@ -965,17 +962,18 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw, target_pkt.offset = offset; target_pkt.word_en = word_en; - memset(target_pkt.data, 0xFF, 8 * sizeof(u8)); + memset(target_pkt.data, 0xFF, 8 * sizeof(u8)); efuse_word_enable_data_read(word_en, data, target_pkt.data); target_word_cnts = efuse_calculate_word_cnts(target_pkt.word_en); - RTPRINT(rtlpriv, FEEPROM, EFUSE_PG, "efuse Power ON\n"); + RTPRINT(rtlpriv, FEEPROM, EFUSE_PG, "efuse Power ON\n"); while (continual && (efuse_addr < (EFUSE_MAX_SIZE - - rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN]))) { + rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN]))) { if (write_state == PG_STATE_HEADER) { + dataempty = true; badworden = 0x0F; RTPRINT(rtlpriv, FEEPROM, EFUSE_PG, "efuse PG_STATE_HEADER\n"); @@ -985,7 +983,8 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw, efuse_write_data_case1(hw, &efuse_addr, efuse_data, offset, &continual, - &write_state, &target_pkt, + &write_state, + &target_pkt, &repeat_times, &result, word_en); else @@ -999,15 +998,17 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw, } else if (write_state == PG_STATE_DATA) { RTPRINT(rtlpriv, FEEPROM, EFUSE_PG, "efuse PG_STATE_DATA\n"); + badworden = 0x0f; badworden = - efuse_word_enable_data_write(hw, efuse_addr + 1, - target_pkt.word_en, - target_pkt.data); + enable_efuse_data_write(hw, efuse_addr + 1, + target_pkt.word_en, + target_pkt.data); if ((badworden & 0x0F) == 0x0F) { continual = false; } else { - efuse_addr += (2 * target_word_cnts) + 1; + efuse_addr = + efuse_addr + (2 * target_word_cnts) + 1; target_pkt.offset = offset; target_pkt.word_en = badworden; @@ -1027,7 +1028,7 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw, } if (efuse_addr >= (EFUSE_MAX_SIZE - - rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN])) { + rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN])) { RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, "efuse_addr(%#x) Out of size!!\n", efuse_addr); } @@ -1035,8 +1036,8 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw, return true; } -static void efuse_word_enable_data_read(u8 word_en, - u8 *sourdata, u8 *targetdata) +static void efuse_word_enable_data_read(u8 word_en, u8 *sourdata, + u8 *targetdata) { if (!(word_en & BIT(0))) { targetdata[0] = sourdata[0]; @@ -1059,8 +1060,8 @@ static void efuse_word_enable_data_read(u8 word_en, } } -static u8 efuse_word_enable_data_write(struct ieee80211_hw *hw, - u16 efuse_addr, u8 word_en, u8 *data) +static u8 enable_efuse_data_write(struct ieee80211_hw *hw, + u16 efuse_addr, u8 word_en, u8 *data) { struct rtl_priv *rtlpriv = rtl_priv(hw); u16 tmpaddr; @@ -1069,8 +1070,8 @@ static u8 efuse_word_enable_data_write(struct ieee80211_hw *hw, u8 tmpdata[8]; memset(tmpdata, 0xff, PGPKT_DATA_SIZE); - RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, "word_en = %x efuse_addr=%x\n", - word_en, efuse_addr); + RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, + "word_en = %x efuse_addr=%x\n", word_en, efuse_addr); if (!(word_en & BIT(0))) { tmpaddr = start_addr; @@ -1127,19 +1128,22 @@ static void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate) u16 tmpV16; if (pwrstate && (rtlhal->hw_type != HARDWARE_TYPE_RTL8192SE)) { - if (rtlhal->hw_type == HARDWARE_TYPE_RTL8188EE) - rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_ACCESS], - 0x69); - tmpV16 = rtl_read_word(rtlpriv, - rtlpriv->cfg->maps[SYS_ISO_CTRL]); - if (!(tmpV16 & rtlpriv->cfg->maps[EFUSE_PWC_EV12V])) { - tmpV16 |= rtlpriv->cfg->maps[EFUSE_PWC_EV12V]; - rtl_write_word(rtlpriv, - rtlpriv->cfg->maps[SYS_ISO_CTRL], - tmpV16); + if (rtlhal->hw_type != HARDWARE_TYPE_RTL8192CE && + rtlhal->hw_type != HARDWARE_TYPE_RTL8192DE) { + rtl_write_byte(rtlpriv, + rtlpriv->cfg->maps[EFUSE_ACCESS], 0x69); + } else { + tmpV16 = + rtl_read_word(rtlpriv, + rtlpriv->cfg->maps[SYS_ISO_CTRL]); + if (!(tmpV16 & rtlpriv->cfg->maps[EFUSE_PWC_EV12V])) { + tmpV16 |= rtlpriv->cfg->maps[EFUSE_PWC_EV12V]; + rtl_write_word(rtlpriv, + rtlpriv->cfg->maps[SYS_ISO_CTRL], + tmpV16); + } } - tmpV16 = rtl_read_word(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN]); if (!(tmpV16 & rtlpriv->cfg->maps[EFUSE_FEN_ELDR])) { @@ -1164,7 +1168,10 @@ static void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate) rtlpriv->cfg->maps[EFUSE_TEST] + 3); - if (rtlhal->hw_type != HARDWARE_TYPE_RTL8192SE) { + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { + tempval &= ~(BIT(3) | BIT(4) | BIT(5) | BIT(6)); + tempval |= (VOLTAGE_V25 << 3); + } else if (rtlhal->hw_type != HARDWARE_TYPE_RTL8192SE) { tempval &= 0x0F; tempval |= (VOLTAGE_V25 << 4); } @@ -1176,11 +1183,11 @@ static void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate) if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) { rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CLK], - 0x03); + 0x03); } - } else { - if (rtlhal->hw_type == HARDWARE_TYPE_RTL8188EE) + if (rtlhal->hw_type != HARDWARE_TYPE_RTL8192CE && + rtlhal->hw_type != HARDWARE_TYPE_RTL8192DE) rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_ACCESS], 0); @@ -1195,27 +1202,28 @@ static void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate) if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) { rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CLK], - 0x02); + 0x02); } - } - } static u16 efuse_get_current_size(struct ieee80211_hw *hw) { + int continual = true; u16 efuse_addr = 0; - u8 hworden; + u8 hoffset, hworden; u8 efuse_data, word_cnts; - while (efuse_one_byte_read(hw, efuse_addr, &efuse_data) && - efuse_addr < EFUSE_MAX_SIZE) { - if (efuse_data == 0xFF) - break; - - hworden = efuse_data & 0x0F; - word_cnts = efuse_calculate_word_cnts(hworden); - efuse_addr = efuse_addr + (word_cnts * 2) + 1; + while (continual && efuse_one_byte_read(hw, efuse_addr, &efuse_data) && + (efuse_addr < EFUSE_MAX_SIZE)) { + if (efuse_data != 0xFF) { + hoffset = (efuse_data >> 4) & 0x0F; + hworden = efuse_data & 0x0F; + word_cnts = efuse_calculate_word_cnts(hworden); + efuse_addr = efuse_addr + (word_cnts * 2) + 1; + } else { + continual = false; + } } return efuse_addr; diff --git a/drivers/net/wireless/rtlwifi/efuse.h b/drivers/net/wireless/rtlwifi/efuse.h index 1663b3afd41e90e67512823f0c5a0f7ee02ef80b..fdab8240a5d79da027f641ad52c170a658febdb7 100644 --- a/drivers/net/wireless/rtlwifi/efuse.h +++ b/drivers/net/wireless/rtlwifi/efuse.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -32,7 +28,6 @@ #define EFUSE_IC_ID_OFFSET 506 -#define EFUSE_MAP_LEN 128 #define EFUSE_MAX_WORD_UNIT 4 #define EFUSE_INIT_MAP 0 @@ -107,12 +102,14 @@ struct efuse_priv { void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf); void efuse_initialize(struct ieee80211_hw *hw); u8 efuse_read_1byte(struct ieee80211_hw *hw, u16 address); +int efuse_one_byte_read(struct ieee80211_hw *hw, u16 addr, u8 *data); void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value); -void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf); -void efuse_shadow_read(struct ieee80211_hw *hw, u8 type, u16 offset, - u32 *value); -void efuse_shadow_write(struct ieee80211_hw *hw, u8 type, u16 offset, - u32 value); +void read_efuse(struct ieee80211_hw *hw, u16 _offset, + u16 _size_byte, u8 *pbuf); +void efuse_shadow_read(struct ieee80211_hw *hw, u8 type, + u16 offset, u32 *value); +void efuse_shadow_write(struct ieee80211_hw *hw, u8 type, + u16 offset, u32 value); bool efuse_shadow_update(struct ieee80211_hw *hw); bool efuse_shadow_update_chk(struct ieee80211_hw *hw); void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw); diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c index 67d1ee6edcad6e2748cade61a4127db661cfcadf..667aba81246c9ac094db11a5c455be9c29d273ad 100644 --- a/drivers/net/wireless/rtlwifi/pci.c +++ b/drivers/net/wireless/rtlwifi/pci.c @@ -33,6 +33,7 @@ #include "base.h" #include "ps.h" #include "efuse.h" +#include #include #include #include @@ -44,10 +45,10 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("PCI basic driver for rtlwifi"); static const u16 pcibridge_vendors[PCI_BRIDGE_VENDOR_MAX] = { - PCI_VENDOR_ID_INTEL, - PCI_VENDOR_ID_ATI, - PCI_VENDOR_ID_AMD, - PCI_VENDOR_ID_SI + INTEL_VENDOR_ID, + ATI_VENDOR_ID, + AMD_VENDOR_ID, + SIS_VENDOR_ID }; static const u8 ac_to_hwq[] = { @@ -566,27 +567,25 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio) struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio]; while (skb_queue_len(&ring->queue)) { - struct rtl_tx_desc *entry = &ring->desc[ring->idx]; struct sk_buff *skb; struct ieee80211_tx_info *info; __le16 fc; u8 tid; + u8 *entry; - u8 own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) entry, true, - HW_DESC_OWN); + if (rtlpriv->use_new_trx_flow) + entry = (u8 *)(&ring->buffer_desc[ring->idx]); + else + entry = (u8 *)(&ring->desc[ring->idx]); - /*beacon packet will only use the first - *descriptor by defaut, and the own may not - *be cleared by the hardware - */ - if (own) + if (!rtlpriv->cfg->ops->is_tx_desc_closed(hw, prio, ring->idx)) return; ring->idx = (ring->idx + 1) % ring->entries; skb = __skb_dequeue(&ring->queue); pci_unmap_single(rtlpci->pdev, rtlpriv->cfg->ops-> - get_desc((u8 *) entry, true, + get_desc((u8 *)entry, true, HW_DESC_TXBUFF_ADDR), skb->len, PCI_DMA_TODEVICE); @@ -598,7 +597,7 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio) "new ring->idx:%d, free: skb_queue_len:%d, free: seq:%x\n", ring->idx, skb_queue_len(&ring->queue), - *(u16 *) (skb->data + 22)); + *(u16 *)(skb->data + 22)); if (prio == TXCMD_QUEUE) { dev_kfree_skb(skb); @@ -646,7 +645,7 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio) == 2) { RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, - "more desc left, wake skb_queue@%d, ring->idx = %d, skb_queue_len = 0x%d\n", + "more desc left, wake skb_queue@%d, ring->idx = %d, skb_queue_len = 0x%x\n", prio, ring->idx, skb_queue_len(&ring->queue)); @@ -666,175 +665,276 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio) } } -static void _rtl_receive_one(struct ieee80211_hw *hw, struct sk_buff *skb, - struct ieee80211_rx_status rx_status) +static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw, + u8 *entry, int rxring_idx, int desc_idx) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct ieee80211_hdr *hdr = rtl_get_hdr(skb); - __le16 fc = rtl_get_fc(skb); - bool unicast = false; - struct sk_buff *uskb = NULL; - u8 *pdata; - - - memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); - - if (is_broadcast_ether_addr(hdr->addr1)) { - ;/*TODO*/ - } else if (is_multicast_ether_addr(hdr->addr1)) { - ;/*TODO*/ + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + u32 bufferaddress; + u8 tmp_one = 1; + struct sk_buff *skb; + + skb = dev_alloc_skb(rtlpci->rxbuffersize); + if (!skb) + return 0; + rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb; + + /* just set skb->cb to mapping addr for pci_unmap_single use */ + *((dma_addr_t *)skb->cb) = + pci_map_single(rtlpci->pdev, skb_tail_pointer(skb), + rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE); + bufferaddress = *((dma_addr_t *)skb->cb); + if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress)) + return 0; + if (rtlpriv->use_new_trx_flow) { + rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false, + HW_DESC_RX_PREPARE, + (u8 *)&bufferaddress); } else { - unicast = true; - rtlpriv->stats.rxbytesunicast += skb->len; + rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false, + HW_DESC_RXBUFF_ADDR, + (u8 *)&bufferaddress); + rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false, + HW_DESC_RXPKT_LEN, + (u8 *)&rtlpci->rxbuffersize); + rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false, + HW_DESC_RXOWN, + (u8 *)&tmp_one); } + return 1; +} - if (ieee80211_is_data(fc)) { - rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX); - - if (unicast) - rtlpriv->link_info.num_rx_inperiod++; +/* inorder to receive 8K AMSDU we have set skb to + * 9100bytes in init rx ring, but if this packet is + * not a AMSDU, this large packet will be sent to + * TCP/IP directly, this cause big packet ping fail + * like: "ping -s 65507", so here we will realloc skb + * based on the true size of packet, Mac80211 + * Probably will do it better, but does not yet. + * + * Some platform will fail when alloc skb sometimes. + * in this condition, we will send the old skb to + * mac80211 directly, this will not cause any other + * issues, but only this packet will be lost by TCP/IP + */ +static void _rtl_pci_rx_to_mac80211(struct ieee80211_hw *hw, + struct sk_buff *skb, + struct ieee80211_rx_status rx_status) +{ + if (unlikely(!rtl_action_proc(hw, skb, false))) { + dev_kfree_skb_any(skb); + } else { + struct sk_buff *uskb = NULL; + u8 *pdata; + + uskb = dev_alloc_skb(skb->len + 128); + if (likely(uskb)) { + memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status, + sizeof(rx_status)); + pdata = (u8 *)skb_put(uskb, skb->len); + memcpy(pdata, skb->data, skb->len); + dev_kfree_skb_any(skb); + ieee80211_rx_irqsafe(hw, uskb); + } else { + ieee80211_rx_irqsafe(hw, skb); + } } +} - /* static bcn for roaming */ - rtl_beacon_statistic(hw, skb); - rtl_p2p_info(hw, (void *)skb->data, skb->len); - - /* for sw lps */ - rtl_swlps_beacon(hw, (void *)skb->data, skb->len); - rtl_recognize_peer(hw, (void *)skb->data, skb->len); - if ((rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP) && - (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G) && - (ieee80211_is_beacon(fc) || ieee80211_is_probe_resp(fc))) - return; - - if (unlikely(!rtl_action_proc(hw, skb, false))) - return; - - uskb = dev_alloc_skb(skb->len + 128); - if (!uskb) - return; /* exit if allocation failed */ - memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status, sizeof(rx_status)); - pdata = (u8 *)skb_put(uskb, skb->len); - memcpy(pdata, skb->data, skb->len); +/*hsisr interrupt handler*/ +static void _rtl_pci_hs_interrupt(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); - ieee80211_rx_irqsafe(hw, uskb); + rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[MAC_HSISR], + rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[MAC_HSISR]) | + rtlpci->sys_irq_mask); } static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); - int rx_queue_idx = RTL_PCI_RX_MPDU_QUEUE; - + int rxring_idx = RTL_PCI_RX_MPDU_QUEUE; struct ieee80211_rx_status rx_status = { 0 }; unsigned int count = rtlpci->rxringcount; u8 own; u8 tmp_one; - u32 bufferaddress; - + bool unicast = false; + u8 hw_queue = 0; + unsigned int rx_remained_cnt; struct rtl_stats stats = { .signal = 0, .rate = 0, }; - int index = rtlpci->rx_ring[rx_queue_idx].idx; - if (rtlpci->driver_is_goingto_unload) - return; /*RX NORMAL PKT */ while (count--) { - /*rx descriptor */ - struct rtl_rx_desc *pdesc = &rtlpci->rx_ring[rx_queue_idx].desc[ - index]; + struct ieee80211_hdr *hdr; + __le16 fc; + u16 len; + /*rx buffer descriptor */ + struct rtl_rx_buffer_desc *buffer_desc = NULL; + /*if use new trx flow, it means wifi info */ + struct rtl_rx_desc *pdesc = NULL; /*rx pkt */ - struct sk_buff *skb = rtlpci->rx_ring[rx_queue_idx].rx_buf[ - index]; - struct sk_buff *new_skb = NULL; - - own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc, - false, HW_DESC_OWN); - - /*wait data to be filled by hardware */ - if (own) - break; + struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[ + rtlpci->rx_ring[rxring_idx].idx]; + + if (rtlpriv->use_new_trx_flow) { + rx_remained_cnt = + rtlpriv->cfg->ops->rx_desc_buff_remained_cnt(hw, + hw_queue); + if (rx_remained_cnt < 1) + return; + + } else { /* rx descriptor */ + pdesc = &rtlpci->rx_ring[rxring_idx].desc[ + rtlpci->rx_ring[rxring_idx].idx]; + + own = (u8)rtlpriv->cfg->ops->get_desc((u8 *)pdesc, + false, + HW_DESC_OWN); + if (own) /* wait data to be filled by hardware */ + return; + } + /* Reaching this point means: data is filled already + * AAAAAAttention !!! + * We can NOT access 'skb' before 'pci_unmap_single' + */ + pci_unmap_single(rtlpci->pdev, *((dma_addr_t *)skb->cb), + rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE); + + if (rtlpriv->use_new_trx_flow) { + buffer_desc = + &rtlpci->rx_ring[rxring_idx].buffer_desc + [rtlpci->rx_ring[rxring_idx].idx]; + /*means rx wifi info*/ + pdesc = (struct rtl_rx_desc *)skb->data; + } + memset(&rx_status , 0 , sizeof(rx_status)); rtlpriv->cfg->ops->query_rx_desc(hw, &stats, - &rx_status, - (u8 *) pdesc, skb); + &rx_status, (u8 *)pdesc, skb); - if (stats.crc || stats.hwerror) - goto done; + if (rtlpriv->use_new_trx_flow) + rtlpriv->cfg->ops->rx_check_dma_ok(hw, + (u8 *)buffer_desc, + hw_queue); - new_skb = dev_alloc_skb(rtlpci->rxbuffersize); - if (unlikely(!new_skb)) { - RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV), DBG_DMESG, - "can't alloc skb for rx\n"); - goto done; - } - kmemleak_not_leak(new_skb); + len = rtlpriv->cfg->ops->get_desc((u8 *)pdesc, false, + HW_DESC_RXPKT_LEN); - pci_unmap_single(rtlpci->pdev, - *((dma_addr_t *) skb->cb), - rtlpci->rxbuffersize, - PCI_DMA_FROMDEVICE); + if (skb->end - skb->tail > len) { + skb_put(skb, len); + if (rtlpriv->use_new_trx_flow) + skb_reserve(skb, stats.rx_drvinfo_size + + stats.rx_bufshift + 24); + else + skb_reserve(skb, stats.rx_drvinfo_size + + stats.rx_bufshift); - skb_put(skb, rtlpriv->cfg->ops->get_desc((u8 *) pdesc, false, - HW_DESC_RXPKT_LEN)); - skb_reserve(skb, stats.rx_drvinfo_size + stats.rx_bufshift); + } else { + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + "skb->end - skb->tail = %d, len is %d\n", + skb->end - skb->tail, len); + break; + } + /* handle command packet here */ + if (rtlpriv->cfg->ops->rx_command_packet(hw, stats, skb)) { + dev_kfree_skb_any(skb); + goto end; + } /* * NOTICE This can not be use for mac80211, * this is done in mac80211 code, - * if you done here sec DHCP will fail + * if done here sec DHCP will fail * skb_trim(skb, skb->len - 4); */ - _rtl_receive_one(hw, skb, rx_status); + hdr = rtl_get_hdr(skb); + fc = rtl_get_fc(skb); + + if (!stats.crc && !stats.hwerror) { + memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, + sizeof(rx_status)); + + if (is_broadcast_ether_addr(hdr->addr1)) { + ;/*TODO*/ + } else if (is_multicast_ether_addr(hdr->addr1)) { + ;/*TODO*/ + } else { + unicast = true; + rtlpriv->stats.rxbytesunicast += skb->len; + } + rtl_is_special_data(hw, skb, false); + if (ieee80211_is_data(fc)) { + rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX); + if (unicast) + rtlpriv->link_info.num_rx_inperiod++; + } + /* static bcn for roaming */ + rtl_beacon_statistic(hw, skb); + rtl_p2p_info(hw, (void *)skb->data, skb->len); + /* for sw lps */ + rtl_swlps_beacon(hw, (void *)skb->data, skb->len); + rtl_recognize_peer(hw, (void *)skb->data, skb->len); + if ((rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP) && + (rtlpriv->rtlhal.current_bandtype == + BAND_ON_2_4G) && + (ieee80211_is_beacon(fc) || + ieee80211_is_probe_resp(fc))) { + dev_kfree_skb_any(skb); + } else { + _rtl_pci_rx_to_mac80211(hw, skb, rx_status); + } + } else { + dev_kfree_skb_any(skb); + } + if (rtlpriv->use_new_trx_flow) { + rtlpci->rx_ring[hw_queue].next_rx_rp += 1; + rtlpci->rx_ring[hw_queue].next_rx_rp %= + RTL_PCI_MAX_RX_COUNT; + + rx_remained_cnt--; + rtl_write_word(rtlpriv, 0x3B4, + rtlpci->rx_ring[hw_queue].next_rx_rp); + } if (((rtlpriv->link_info.num_rx_inperiod + rtlpriv->link_info.num_tx_inperiod) > 8) || (rtlpriv->link_info.num_rx_inperiod > 2)) { rtlpriv->enter_ps = false; schedule_work(&rtlpriv->works.lps_change_work); } +end: + if (rtlpriv->use_new_trx_flow) { + _rtl_pci_init_one_rxdesc(hw, (u8 *)buffer_desc, + rxring_idx, + rtlpci->rx_ring[rxring_idx].idx); + } else { + _rtl_pci_init_one_rxdesc(hw, (u8 *)pdesc, rxring_idx, + rtlpci->rx_ring[rxring_idx].idx); - dev_kfree_skb_any(skb); - skb = new_skb; - - rtlpci->rx_ring[rx_queue_idx].rx_buf[index] = skb; - *((dma_addr_t *) skb->cb) = - pci_map_single(rtlpci->pdev, skb_tail_pointer(skb), - rtlpci->rxbuffersize, - PCI_DMA_FROMDEVICE); - -done: - bufferaddress = (*((dma_addr_t *)skb->cb)); - if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress)) - return; - tmp_one = 1; - rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, false, - HW_DESC_RXBUFF_ADDR, - (u8 *)&bufferaddress); - rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, false, - HW_DESC_RXPKT_LEN, - (u8 *)&rtlpci->rxbuffersize); - - if (index == rtlpci->rxringcount - 1) - rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, false, - HW_DESC_RXERO, - &tmp_one); - - rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, false, HW_DESC_RXOWN, - &tmp_one); - - index = (index + 1) % rtlpci->rxringcount; + if (rtlpci->rx_ring[rxring_idx].idx == + rtlpci->rxringcount - 1) + rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, + false, + HW_DESC_RXERO, + (u8 *)&tmp_one); + } + rtlpci->rx_ring[rxring_idx].idx = + (rtlpci->rx_ring[rxring_idx].idx + 1) % + rtlpci->rxringcount; } - - rtlpci->rx_ring[rx_queue_idx].idx = index; } static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id) { struct ieee80211_hw *hw = dev_id; + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); unsigned long flags; @@ -842,16 +942,18 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id) u32 intb = 0; irqreturn_t ret = IRQ_HANDLED; - spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags); + if (rtlpci->irq_enabled == 0) + return ret; + + spin_lock_irqsave(&rtlpriv->locks.irq_th_lock , flags); + rtlpriv->cfg->ops->disable_interrupt(hw); /*read ISR: 4/8bytes */ rtlpriv->cfg->ops->interrupt_recognized(hw, &inta, &intb); /*Shared IRQ or HW disappared */ - if (!inta || inta == 0xffff) { - ret = IRQ_NONE; + if (!inta || inta == 0xffff) goto done; - } /*<1> beacon related */ if (inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK]) { @@ -874,8 +976,8 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id) tasklet_schedule(&rtlpriv->works.irq_prepare_bcn_tasklet); } - /*<3> Tx related */ - if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_TXFOVW])) + /*<2> Tx related */ + if (unlikely(intb & rtlpriv->cfg->maps[RTL_IMR_TXFOVW])) RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "IMR_TXFOVW!\n"); if (inta & rtlpriv->cfg->maps[RTL_IMR_MGNTDOK]) { @@ -932,7 +1034,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id) } } - /*<2> Rx related */ + /*<3> Rx related */ if (inta & rtlpriv->cfg->maps[RTL_IMR_ROK]) { RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "Rx ok interrupt!\n"); _rtl_pci_rx_interrupt(hw); @@ -944,12 +1046,12 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id) _rtl_pci_rx_interrupt(hw); } - if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_RXFOVW])) { + if (unlikely(intb & rtlpriv->cfg->maps[RTL_IMR_RXFOVW])) { RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "rx overflow !\n"); _rtl_pci_rx_interrupt(hw); } - /*fw related*/ + /*<4> fw related*/ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723AE) { if (inta & rtlpriv->cfg->maps[RTL_IMR_C2HCMD]) { RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, @@ -959,10 +1061,26 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id) } } + /*<5> hsisr related*/ + /* Only 8188EE & 8723BE Supported. + * If Other ICs Come in, System will corrupt, + * because maps[RTL_IMR_HSISR_IND] & maps[MAC_HSISR] + * are not initialized + */ + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8188EE || + rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) { + if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_HSISR_IND])) { + RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, + "hsisr interrupt!\n"); + _rtl_pci_hs_interrupt(hw); + } + } + if (rtlpriv->rtlhal.earlymode_enable) tasklet_schedule(&rtlpriv->works.irq_tasklet); done: + rtlpriv->cfg->ops->enable_interrupt(hw); spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); return ret; } @@ -990,13 +1108,8 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw) memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc)); ring = &rtlpci->tx_ring[BEACON_QUEUE]; pskb = __skb_dequeue(&ring->queue); - if (pskb) { - struct rtl_tx_desc *entry = &ring->desc[ring->idx]; - pci_unmap_single(rtlpci->pdev, rtlpriv->cfg->ops->get_desc( - (u8 *) entry, true, HW_DESC_TXBUFF_ADDR), - pskb->len, PCI_DMA_TODEVICE); + if (pskb) kfree_skb(pskb); - } /*NB: the beacon data buffer must be 32-bit aligned. */ pskb = ieee80211_beacon_get(hw, mac->vif); @@ -1005,7 +1118,10 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw) hdr = rtl_get_hdr(pskb); info = IEEE80211_SKB_CB(pskb); pdesc = &ring->desc[0]; - rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc, + if (rtlpriv->use_new_trx_flow) + pbuffer_desc = &ring->buffer_desc[0]; + + rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, (u8 *)pbuffer_desc, info, NULL, pskb, BEACON_QUEUE, &tcb_desc); @@ -1020,10 +1136,18 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw) static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw) { struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); u8 i; + u16 desc_num; + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) + desc_num = TX_DESC_NUM_92E; + else + desc_num = RT_TXDESC_NUM; for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) - rtlpci->txringcount[i] = RT_TXDESC_NUM; + rtlpci->txringcount[i] = desc_num; /* *we just alloc 2 desc for beacon queue, @@ -1031,12 +1155,12 @@ static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw) */ rtlpci->txringcount[BEACON_QUEUE] = 2; - /* - *BE queue need more descriptor for performance + /*BE queue need more descriptor for performance *consideration or, No more tx desc will happen, *and may cause mac80211 mem leakage. */ - rtlpci->txringcount[BE_QUEUE] = RT_TXDESC_NUM_BE_QUEUE; + if (!rtl_priv(hw)->use_new_trx_flow) + rtlpci->txringcount[BE_QUEUE] = RT_TXDESC_NUM_BE_QUEUE; rtlpci->rxbuffersize = 9100; /*2048/1024; */ rtlpci->rxringcount = RTL_PCI_MAX_RX_COUNT; /*64; */ @@ -1087,113 +1211,124 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw, { struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_tx_desc *ring; - dma_addr_t dma; + struct rtl_tx_buffer_desc *buffer_desc; + struct rtl_tx_desc *desc; + dma_addr_t buffer_desc_dma, desc_dma; u32 nextdescaddress; int i; - ring = pci_zalloc_consistent(rtlpci->pdev, sizeof(*ring) * entries, - &dma); - if (!ring || (unsigned long)ring & 0xFF) { + /* alloc tx buffer desc for new trx flow*/ + if (rtlpriv->use_new_trx_flow) { + buffer_desc = + pci_zalloc_consistent(rtlpci->pdev, + sizeof(*buffer_desc) * entries, + &buffer_desc_dma); + + if (!buffer_desc || (unsigned long)buffer_desc & 0xFF) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "Cannot allocate TX ring (prio = %d)\n", + prio); + return -ENOMEM; + } + + rtlpci->tx_ring[prio].buffer_desc = buffer_desc; + rtlpci->tx_ring[prio].buffer_desc_dma = buffer_desc_dma; + + rtlpci->tx_ring[prio].cur_tx_rp = 0; + rtlpci->tx_ring[prio].cur_tx_wp = 0; + rtlpci->tx_ring[prio].avl_desc = entries; + } + + /* alloc dma for this ring */ + desc = pci_zalloc_consistent(rtlpci->pdev, + sizeof(*desc) * entries, &desc_dma); + + if (!desc || (unsigned long)desc & 0xFF) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Cannot allocate TX ring (prio = %d)\n", prio); return -ENOMEM; } - rtlpci->tx_ring[prio].desc = ring; - rtlpci->tx_ring[prio].dma = dma; + rtlpci->tx_ring[prio].desc = desc; + rtlpci->tx_ring[prio].dma = desc_dma; + rtlpci->tx_ring[prio].idx = 0; rtlpci->tx_ring[prio].entries = entries; skb_queue_head_init(&rtlpci->tx_ring[prio].queue); RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "queue:%d, ring_addr:%p\n", - prio, ring); - - for (i = 0; i < entries; i++) { - nextdescaddress = (u32) dma + - ((i + 1) % entries) * - sizeof(*ring); - - rtlpriv->cfg->ops->set_desc(hw, (u8 *)&(ring[i]), - true, HW_DESC_TX_NEXTDESC_ADDR, - (u8 *)&nextdescaddress); + prio, desc); + + /* init every desc in this ring */ + if (!rtlpriv->use_new_trx_flow) { + for (i = 0; i < entries; i++) { + nextdescaddress = (u32)desc_dma + + ((i + 1) % entries) * + sizeof(*desc); + + rtlpriv->cfg->ops->set_desc(hw, (u8 *)&desc[i], + true, + HW_DESC_TX_NEXTDESC_ADDR, + (u8 *)&nextdescaddress); + } } - return 0; } -static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw) +static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx) { struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_rx_desc *entry = NULL; - int i, rx_queue_idx; - u8 tmp_one = 1; + int i; - /* - *rx_queue_idx 0:RX_MPDU_QUEUE - *rx_queue_idx 1:RX_CMD_QUEUE - */ - for (rx_queue_idx = 0; rx_queue_idx < RTL_PCI_MAX_RX_QUEUE; - rx_queue_idx++) { - rtlpci->rx_ring[rx_queue_idx].desc = - pci_zalloc_consistent(rtlpci->pdev, - sizeof(*rtlpci->rx_ring[rx_queue_idx].desc) * rtlpci->rxringcount, - &rtlpci->rx_ring[rx_queue_idx].dma); - - if (!rtlpci->rx_ring[rx_queue_idx].desc || - (unsigned long)rtlpci->rx_ring[rx_queue_idx].desc & 0xFF) { + if (rtlpriv->use_new_trx_flow) { + struct rtl_rx_buffer_desc *entry = NULL; + /* alloc dma for this ring */ + rtlpci->rx_ring[rxring_idx].buffer_desc = + pci_zalloc_consistent(rtlpci->pdev, + sizeof(*rtlpci->rx_ring[rxring_idx]. + buffer_desc) * + rtlpci->rxringcount, + &rtlpci->rx_ring[rxring_idx].dma); + if (!rtlpci->rx_ring[rxring_idx].buffer_desc || + (ulong)rtlpci->rx_ring[rxring_idx].buffer_desc & 0xFF) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Cannot allocate RX ring\n"); return -ENOMEM; } - rtlpci->rx_ring[rx_queue_idx].idx = 0; + /* init every desc in this ring */ + rtlpci->rx_ring[rxring_idx].idx = 0; + for (i = 0; i < rtlpci->rxringcount; i++) { + entry = &rtlpci->rx_ring[rxring_idx].buffer_desc[i]; + if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry, + rxring_idx, i)) + return -ENOMEM; + } + } else { + struct rtl_rx_desc *entry = NULL; + u8 tmp_one = 1; + /* alloc dma for this ring */ + rtlpci->rx_ring[rxring_idx].desc = + pci_zalloc_consistent(rtlpci->pdev, + sizeof(*rtlpci->rx_ring[rxring_idx]. + desc) * rtlpci->rxringcount, + &rtlpci->rx_ring[rxring_idx].dma); + if (!rtlpci->rx_ring[rxring_idx].desc || + (unsigned long)rtlpci->rx_ring[rxring_idx].desc & 0xFF) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "Cannot allocate RX ring\n"); + return -ENOMEM; + } - /* If amsdu_8k is disabled, set buffersize to 4096. This - * change will reduce memory fragmentation. - */ - if (rtlpci->rxbuffersize > 4096 && - rtlpriv->rtlhal.disable_amsdu_8k) - rtlpci->rxbuffersize = 4096; + /* init every desc in this ring */ + rtlpci->rx_ring[rxring_idx].idx = 0; for (i = 0; i < rtlpci->rxringcount; i++) { - struct sk_buff *skb = - dev_alloc_skb(rtlpci->rxbuffersize); - u32 bufferaddress; - if (!skb) - return 0; - kmemleak_not_leak(skb); - entry = &rtlpci->rx_ring[rx_queue_idx].desc[i]; - - /*skb->dev = dev; */ - - rtlpci->rx_ring[rx_queue_idx].rx_buf[i] = skb; - - /* - *just set skb->cb to mapping addr - *for pci_unmap_single use - */ - *((dma_addr_t *) skb->cb) = - pci_map_single(rtlpci->pdev, skb_tail_pointer(skb), - rtlpci->rxbuffersize, - PCI_DMA_FROMDEVICE); - - bufferaddress = (*((dma_addr_t *)skb->cb)); - if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress)) { - dev_kfree_skb_any(skb); - return 1; - } - rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false, - HW_DESC_RXBUFF_ADDR, - (u8 *)&bufferaddress); - rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false, - HW_DESC_RXPKT_LEN, - (u8 *)&rtlpci-> - rxbuffersize); - rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false, - HW_DESC_RXOWN, - &tmp_one); + entry = &rtlpci->rx_ring[rxring_idx].desc[i]; + if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry, + rxring_idx, i)) + return -ENOMEM; } rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false, @@ -1209,56 +1344,70 @@ static void _rtl_pci_free_tx_ring(struct ieee80211_hw *hw, struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio]; + /* free every desc in this ring */ while (skb_queue_len(&ring->queue)) { - struct rtl_tx_desc *entry = &ring->desc[ring->idx]; + u8 *entry; struct sk_buff *skb = __skb_dequeue(&ring->queue); + if (rtlpriv->use_new_trx_flow) + entry = (u8 *)(&ring->buffer_desc[ring->idx]); + else + entry = (u8 *)(&ring->desc[ring->idx]); + pci_unmap_single(rtlpci->pdev, rtlpriv->cfg-> - ops->get_desc((u8 *) entry, true, + ops->get_desc((u8 *)entry, true, HW_DESC_TXBUFF_ADDR), skb->len, PCI_DMA_TODEVICE); kfree_skb(skb); ring->idx = (ring->idx + 1) % ring->entries; } - if (ring->desc) { + /* free dma of this ring */ + pci_free_consistent(rtlpci->pdev, + sizeof(*ring->desc) * ring->entries, + ring->desc, ring->dma); + ring->desc = NULL; + if (rtlpriv->use_new_trx_flow) { pci_free_consistent(rtlpci->pdev, sizeof(*ring->desc) * ring->entries, - ring->desc, ring->dma); + ring->buffer_desc, ring->buffer_desc_dma); ring->desc = NULL; } } -static void _rtl_pci_free_rx_ring(struct rtl_pci *rtlpci) +static void _rtl_pci_free_rx_ring(struct ieee80211_hw *hw, int rxring_idx) { - int i, rx_queue_idx; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + int i; - /*rx_queue_idx 0:RX_MPDU_QUEUE */ - /*rx_queue_idx 1:RX_CMD_QUEUE */ - for (rx_queue_idx = 0; rx_queue_idx < RTL_PCI_MAX_RX_QUEUE; - rx_queue_idx++) { - for (i = 0; i < rtlpci->rxringcount; i++) { - struct sk_buff *skb = - rtlpci->rx_ring[rx_queue_idx].rx_buf[i]; - if (!skb) - continue; - - pci_unmap_single(rtlpci->pdev, - *((dma_addr_t *) skb->cb), - rtlpci->rxbuffersize, - PCI_DMA_FROMDEVICE); - kfree_skb(skb); - } + /* free every desc in this ring */ + for (i = 0; i < rtlpci->rxringcount; i++) { + struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[i]; - if (rtlpci->rx_ring[rx_queue_idx].desc) { - pci_free_consistent(rtlpci->pdev, - sizeof(*rtlpci->rx_ring[rx_queue_idx]. - desc) * rtlpci->rxringcount, - rtlpci->rx_ring[rx_queue_idx].desc, - rtlpci->rx_ring[rx_queue_idx].dma); - rtlpci->rx_ring[rx_queue_idx].desc = NULL; - } + if (!skb) + continue; + pci_unmap_single(rtlpci->pdev, *((dma_addr_t *)skb->cb), + rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE); + kfree_skb(skb); + } + + /* free dma of this ring */ + if (rtlpriv->use_new_trx_flow) { + pci_free_consistent(rtlpci->pdev, + sizeof(*rtlpci->rx_ring[rxring_idx]. + buffer_desc) * rtlpci->rxringcount, + rtlpci->rx_ring[rxring_idx].buffer_desc, + rtlpci->rx_ring[rxring_idx].dma); + rtlpci->rx_ring[rxring_idx].buffer_desc = NULL; + } else { + pci_free_consistent(rtlpci->pdev, + sizeof(*rtlpci->rx_ring[rxring_idx].desc) * + rtlpci->rxringcount, + rtlpci->rx_ring[rxring_idx].desc, + rtlpci->rx_ring[rxring_idx].dma); + rtlpci->rx_ring[rxring_idx].desc = NULL; } } @@ -1266,11 +1415,16 @@ static int _rtl_pci_init_trx_ring(struct ieee80211_hw *hw) { struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); int ret; - int i; + int i, rxring_idx; - ret = _rtl_pci_init_rx_ring(hw); - if (ret) - return ret; + /* rxring_idx 0:RX_MPDU_QUEUE + * rxring_idx 1:RX_CMD_QUEUE + */ + for (rxring_idx = 0; rxring_idx < RTL_PCI_MAX_RX_QUEUE; rxring_idx++) { + ret = _rtl_pci_init_rx_ring(hw, rxring_idx); + if (ret) + return ret; + } for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) { ret = _rtl_pci_init_tx_ring(hw, i, @@ -1282,10 +1436,12 @@ static int _rtl_pci_init_trx_ring(struct ieee80211_hw *hw) return 0; err_free_rings: - _rtl_pci_free_rx_ring(rtlpci); + for (rxring_idx = 0; rxring_idx < RTL_PCI_MAX_RX_QUEUE; rxring_idx++) + _rtl_pci_free_rx_ring(hw, rxring_idx); for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) - if (rtlpci->tx_ring[i].desc) + if (rtlpci->tx_ring[i].desc || + rtlpci->tx_ring[i].buffer_desc) _rtl_pci_free_tx_ring(hw, i); return 1; @@ -1293,11 +1449,11 @@ static int _rtl_pci_init_trx_ring(struct ieee80211_hw *hw) static int _rtl_pci_deinit_trx_ring(struct ieee80211_hw *hw) { - struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); - u32 i; + u32 i, rxring_idx; /*free rx rings */ - _rtl_pci_free_rx_ring(rtlpci); + for (rxring_idx = 0; rxring_idx < RTL_PCI_MAX_RX_QUEUE; rxring_idx++) + _rtl_pci_free_rx_ring(hw, rxring_idx); /*free tx rings */ for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) @@ -1310,48 +1466,76 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); - int i, rx_queue_idx; + int i, rxring_idx; unsigned long flags; u8 tmp_one = 1; - - /*rx_queue_idx 0:RX_MPDU_QUEUE */ - /*rx_queue_idx 1:RX_CMD_QUEUE */ - for (rx_queue_idx = 0; rx_queue_idx < RTL_PCI_MAX_RX_QUEUE; - rx_queue_idx++) { - /* - *force the rx_ring[RX_MPDU_QUEUE/ - *RX_CMD_QUEUE].idx to the first one - */ - if (rtlpci->rx_ring[rx_queue_idx].desc) { + u32 bufferaddress; + /* rxring_idx 0:RX_MPDU_QUEUE */ + /* rxring_idx 1:RX_CMD_QUEUE */ + for (rxring_idx = 0; rxring_idx < RTL_PCI_MAX_RX_QUEUE; rxring_idx++) { + /* force the rx_ring[RX_MPDU_QUEUE/ + * RX_CMD_QUEUE].idx to the first one + *new trx flow, do nothing + */ + if (!rtlpriv->use_new_trx_flow && + rtlpci->rx_ring[rxring_idx].desc) { struct rtl_rx_desc *entry = NULL; + rtlpci->rx_ring[rxring_idx].idx = 0; for (i = 0; i < rtlpci->rxringcount; i++) { - entry = &rtlpci->rx_ring[rx_queue_idx].desc[i]; - rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, - false, - HW_DESC_RXOWN, - &tmp_one); + entry = &rtlpci->rx_ring[rxring_idx].desc[i]; + bufferaddress = + rtlpriv->cfg->ops->get_desc((u8 *)entry, + false , HW_DESC_RXBUFF_ADDR); + memset((u8 *)entry , 0 , + sizeof(*rtlpci->rx_ring + [rxring_idx].desc));/*clear one entry*/ + if (rtlpriv->use_new_trx_flow) { + rtlpriv->cfg->ops->set_desc(hw, + (u8 *)entry, false, + HW_DESC_RX_PREPARE, + (u8 *)&bufferaddress); + } else { + rtlpriv->cfg->ops->set_desc(hw, + (u8 *)entry, false, + HW_DESC_RXBUFF_ADDR, + (u8 *)&bufferaddress); + rtlpriv->cfg->ops->set_desc(hw, + (u8 *)entry, false, + HW_DESC_RXPKT_LEN, + (u8 *)&rtlpci->rxbuffersize); + rtlpriv->cfg->ops->set_desc(hw, + (u8 *)entry, false, + HW_DESC_RXOWN, + (u8 *)&tmp_one); + } } - rtlpci->rx_ring[rx_queue_idx].idx = 0; + rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false, + HW_DESC_RXERO, (u8 *)&tmp_one); } + rtlpci->rx_ring[rxring_idx].idx = 0; } /* *after reset, release previous pending packet, *and force the tx idx to the first one */ + spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags); for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) { - if (rtlpci->tx_ring[i].desc) { + if (rtlpci->tx_ring[i].desc || + rtlpci->tx_ring[i].buffer_desc) { struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[i]; while (skb_queue_len(&ring->queue)) { - struct rtl_tx_desc *entry; - struct sk_buff *skb; + u8 *entry; + struct sk_buff *skb = + __skb_dequeue(&ring->queue); + if (rtlpriv->use_new_trx_flow) + entry = (u8 *)(&ring->buffer_desc + [ring->idx]); + else + entry = (u8 *)(&ring->desc[ring->idx]); - spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, - flags); - entry = &ring->desc[ring->idx]; - skb = __skb_dequeue(&ring->queue); pci_unmap_single(rtlpci->pdev, rtlpriv->cfg->ops-> get_desc((u8 *) @@ -1360,13 +1544,13 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw) HW_DESC_TXBUFF_ADDR), skb->len, PCI_DMA_TODEVICE); ring->idx = (ring->idx + 1) % ring->entries; - spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, - flags); kfree_skb(skb); + ring->idx = (ring->idx + 1) % ring->entries; } ring->idx = 0; } } + spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); return 0; } @@ -1421,7 +1605,7 @@ static int rtl_pci_tx(struct ieee80211_hw *hw, struct rtl8192_tx_ring *ring; struct rtl_tx_desc *pdesc; struct rtl_tx_buffer_desc *ptx_bd_desc = NULL; - u8 idx; + u16 idx; u8 hw_queue = _rtl_mac_to_hwqueue(hw, skb); unsigned long flags; struct ieee80211_hdr *hdr = rtl_get_hdr(skb); @@ -1454,11 +1638,15 @@ static int rtl_pci_tx(struct ieee80211_hw *hw, spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags); ring = &rtlpci->tx_ring[hw_queue]; - if (hw_queue != BEACON_QUEUE) - idx = (ring->idx + skb_queue_len(&ring->queue)) % - ring->entries; - else + if (hw_queue != BEACON_QUEUE) { + if (rtlpriv->use_new_trx_flow) + idx = ring->cur_tx_wp; + else + idx = (ring->idx + skb_queue_len(&ring->queue)) % + ring->entries; + } else { idx = 0; + } pdesc = &ring->desc[idx]; if (rtlpriv->use_new_trx_flow) { @@ -1469,7 +1657,7 @@ static int rtl_pci_tx(struct ieee80211_hw *hw, if ((own == 1) && (hw_queue != BEACON_QUEUE)) { RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, - "No more TX desc@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%d\n", + "No more TX desc@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%x\n", hw_queue, ring->idx, idx, skb_queue_len(&ring->queue)); @@ -1511,7 +1699,7 @@ static int rtl_pci_tx(struct ieee80211_hw *hw, if ((ring->entries - skb_queue_len(&ring->queue)) < 2 && hw_queue != BEACON_QUEUE) { RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, - "less desc left, stop skb_queue@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%d\n", + "less desc left, stop skb_queue@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%x\n", hw_queue, ring->idx, idx, skb_queue_len(&ring->queue)); @@ -1525,7 +1713,7 @@ static int rtl_pci_tx(struct ieee80211_hw *hw, return 0; } -static void rtl_pci_flush(struct ieee80211_hw *hw, bool drop) +static void rtl_pci_flush(struct ieee80211_hw *hw, u32 queues, bool drop) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); @@ -1540,6 +1728,11 @@ static void rtl_pci_flush(struct ieee80211_hw *hw, bool drop) for (queue_id = RTL_PCI_MAX_TX_QUEUE_COUNT - 1; queue_id >= 0;) { u32 queue_len; + + if (((queues >> queue_id) & 0x1) == 0) { + queue_id--; + continue; + } ring = &pcipriv->dev.tx_ring[queue_id]; queue_len = skb_queue_len(&ring->queue); if (queue_len == 0 || queue_id == BEACON_QUEUE || @@ -1603,6 +1796,10 @@ static int rtl_pci_start(struct ieee80211_hw *hw) rtl_pci_reset_trx_ring(hw); rtlpci->driver_is_goingto_unload = false; + if (rtlpriv->cfg->ops->get_btc_status()) { + rtlpriv->btcoexist.btc_ops->btc_init_variables(rtlpriv); + rtlpriv->btcoexist.btc_ops->btc_init_hal_vars(rtlpriv); + } err = rtlpriv->cfg->ops->hw_init(hw); if (err) { RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, @@ -1622,7 +1819,7 @@ static int rtl_pci_start(struct ieee80211_hw *hw) rtlpci->up_first_time = false; - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "OK\n"); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "rtl_pci_start OK\n"); return 0; } @@ -1635,6 +1832,9 @@ static void rtl_pci_stop(struct ieee80211_hw *hw) unsigned long flags; u8 RFInProgressTimeOut = 0; + if (rtlpriv->cfg->ops->get_btc_status()) + rtlpriv->btcoexist.btc_ops->btc_halt_notify(); + /* *should be before disable interrupt&adapter *and will do it immediately. @@ -1753,6 +1953,22 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev, rtlhal->hw_type = HARDWARE_TYPE_RTL8188EE; RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Find adapter, Hardware type is 8188EE\n"); + } else if (deviceid == RTL_PCI_8723BE_DID) { + rtlhal->hw_type = HARDWARE_TYPE_RTL8723BE; + RT_TRACE(rtlpriv, COMP_INIT , DBG_LOUD, + "Find adapter, Hardware type is 8723BE\n"); + } else if (deviceid == RTL_PCI_8192EE_DID) { + rtlhal->hw_type = HARDWARE_TYPE_RTL8192EE; + RT_TRACE(rtlpriv, COMP_INIT , DBG_LOUD, + "Find adapter, Hardware type is 8192EE\n"); + } else if (deviceid == RTL_PCI_8821AE_DID) { + rtlhal->hw_type = HARDWARE_TYPE_RTL8821AE; + RT_TRACE(rtlpriv, COMP_INIT , DBG_LOUD, + "Find adapter, Hardware type is 8821AE\n"); + } else if (deviceid == RTL_PCI_8812AE_DID) { + rtlhal->hw_type = HARDWARE_TYPE_RTL8812AE; + RT_TRACE(rtlpriv, COMP_INIT , DBG_LOUD, + "Find adapter, Hardware type is 8812AE\n"); } else { RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "Err: Unknown device - vid/did=%x/%x\n", @@ -1779,11 +1995,20 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev, rtlhal->interfaceindex = 0; } } + + /* 92ee use new trx flow */ + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) + rtlpriv->use_new_trx_flow = true; + else + rtlpriv->use_new_trx_flow = false; + /*find bus info */ pcipriv->ndis_adapter.busnumber = pdev->bus->number; pcipriv->ndis_adapter.devnumber = PCI_SLOT(pdev->devfn); pcipriv->ndis_adapter.funcnumber = PCI_FUNC(pdev->devfn); + /*find bridge info */ + pcipriv->ndis_adapter.pcibridge_vendor = PCI_BRIDGE_VENDOR_UNKNOWN; /* some ARM have no bridge_pdev and will crash here * so we should check if bridge_pdev is NULL */ @@ -1951,6 +2176,11 @@ int rtl_pci_probe(struct pci_dev *pdev, pcipriv = (void *)rtlpriv->priv; pcipriv->dev.pdev = pdev; init_completion(&rtlpriv->firmware_loading_complete); + /*proximity init here*/ + rtlpriv->proximity.proxim_on = false; + + pcipriv = (void *)rtlpriv->priv; + pcipriv->dev.pdev = pdev; /* init cfg & intf_ops */ rtlpriv->rtlhal.interface = INTF_PCI; @@ -2013,9 +2243,6 @@ int rtl_pci_probe(struct pci_dev *pdev, /*like read eeprom and so on */ rtlpriv->cfg->ops->read_eeprom_info(hw); - /*aspm */ - rtl_pci_init_aspm(hw); - /* Init mac80211 sw */ err = rtl_init_core(hw); if (err) { @@ -2036,9 +2263,20 @@ int rtl_pci_probe(struct pci_dev *pdev, err = -ENODEV; goto fail3; } - rtlpriv->cfg->ops->init_sw_leds(hw); + /*aspm */ + rtl_pci_init_aspm(hw); + + err = ieee80211_register_hw(hw); + if (err) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "Can't register mac80211 hw.\n"); + err = -ENODEV; + goto fail3; + } + rtlpriv->mac80211.mac80211_registered = 1; + err = sysfs_create_group(&pdev->dev.kobj, &rtl_attribute_group); if (err) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, @@ -2046,6 +2284,9 @@ int rtl_pci_probe(struct pci_dev *pdev, goto fail3; } + /*init rfkill */ + rtl_init_rfkill(hw); /* Init PCI sw */ + rtlpci = rtl_pcidev(pcipriv); err = rtl_pci_intr_mode_decide(hw); if (err) { @@ -2056,9 +2297,11 @@ int rtl_pci_probe(struct pci_dev *pdev, } rtlpci->irq_alloc = 1; + set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status); return 0; fail3: + pci_set_drvdata(pdev, NULL); rtl_deinit_core(hw); if (rtlpriv->io.pci_mem_start != 0) @@ -2128,6 +2371,8 @@ void rtl_pci_disconnect(struct pci_dev *pdev) rtl_pci_disable_aspm(hw); + pci_set_drvdata(pdev, NULL); + ieee80211_free_hw(hw); } EXPORT_SYMBOL(rtl_pci_disconnect); diff --git a/drivers/net/wireless/rtlwifi/pci.h b/drivers/net/wireless/rtlwifi/pci.h index 90174a814a6d26fc6e384171afc54324b98a0abd..5e832306dba956c7b29fffc9cf9d1b3c9a66b2e8 100644 --- a/drivers/net/wireless/rtlwifi/pci.h +++ b/drivers/net/wireless/rtlwifi/pci.h @@ -39,10 +39,11 @@ #define RTL_PCI_RX_CMD_QUEUE 1 #define RTL_PCI_MAX_RX_QUEUE 2 -#define RTL_PCI_MAX_RX_COUNT 64 +#define RTL_PCI_MAX_RX_COUNT 512/*64*/ #define RTL_PCI_MAX_TX_QUEUE_COUNT 9 #define RT_TXDESC_NUM 128 +#define TX_DESC_NUM_92E 512 #define RT_TXDESC_NUM_BE_QUEUE 256 #define BK_QUEUE 0 @@ -62,6 +63,12 @@ .subdevice = PCI_ANY_ID,\ .driver_data = (kernel_ulong_t)&(cfg) +#define INTEL_VENDOR_ID 0x8086 +#define SIS_VENDOR_ID 0x1039 +#define ATI_VENDOR_ID 0x1002 +#define ATI_DEVICE_ID 0x7914 +#define AMD_VENDOR_ID 0x1022 + #define PCI_MAX_BRIDGE_NUMBER 255 #define PCI_MAX_DEVICES 32 #define PCI_MAX_FUNCTION 8 @@ -69,6 +76,11 @@ #define PCI_CONF_ADDRESS 0x0CF8 /*PCI Configuration Space Address */ #define PCI_CONF_DATA 0x0CFC /*PCI Configuration Space Data */ +#define PCI_CLASS_BRIDGE_DEV 0x06 +#define PCI_SUBCLASS_BR_PCI_TO_PCI 0x04 +#define PCI_CAPABILITY_ID_PCI_EXPRESS 0x10 +#define PCI_CAP_ID_EXP 0x10 + #define U1DONTCARE 0xFF #define U2DONTCARE 0xFFFF #define U4DONTCARE 0xFFFFFFFF @@ -87,6 +99,7 @@ #define RTL_PCI_700F_DID 0x700F #define RTL_PCI_701F_DID 0x701F #define RTL_PCI_DLINK_DID 0x3304 +#define RTL_PCI_8723AE_DID 0x8723 /*8723e */ #define RTL_PCI_8192CET_DID 0x8191 /*8192ce */ #define RTL_PCI_8192CE_DID 0x8178 /*8192ce */ #define RTL_PCI_8191CE_DID 0x8177 /*8192ce */ @@ -95,6 +108,10 @@ #define RTL_PCI_8192DE_DID 0x8193 /*8192de */ #define RTL_PCI_8192DE_DID2 0x002B /*92DE*/ #define RTL_PCI_8188EE_DID 0x8179 /*8188ee*/ +#define RTL_PCI_8723BE_DID 0xB723 /*8723be*/ +#define RTL_PCI_8192EE_DID 0x818B /*8192ee*/ +#define RTL_PCI_8821AE_DID 0x8821 /*8821ae*/ +#define RTL_PCI_8812AE_DID 0x8812 /*8812ae*/ /*8192 support 16 pages of IO registers*/ #define RTL_MEM_MAPPED_IO_RANGE_8190PCI 0x1000 @@ -125,24 +142,34 @@ struct rtl_pci_capabilities_header { u8 next; }; -struct rtl_rx_desc { - u32 dword[8]; +/* In new TRX flow, Buffer_desc is new concept + * But TX wifi info == TX descriptor in old flow + * RX wifi info == RX descriptor in old flow + */ +struct rtl_tx_buffer_desc { +#if (RTL8192EE_SEG_NUM == 2) + u32 dword[2*(DMA_IS_64BIT + 1)*8]; /*seg = 8*/ +#elif (RTL8192EE_SEG_NUM == 1) + u32 dword[2*(DMA_IS_64BIT + 1)*4]; /*seg = 4*/ +#elif (RTL8192EE_SEG_NUM == 0) + u32 dword[2*(DMA_IS_64BIT + 1)*2]; /*seg = 2*/ +#endif } __packed; struct rtl_tx_desc { u32 dword[16]; } __packed; -struct rtl_tx_cmd_desc { - u32 dword[16]; +struct rtl_rx_buffer_desc { /*rx buffer desc*/ + u32 dword[2]; } __packed; -/* In new TRX flow, Buffer_desc is new concept - * But TX wifi info == TX descriptor in old flow - * RX wifi info == RX descriptor in old flow - */ -struct rtl_tx_buffer_desc { - u32 dword[8]; /*seg = 4*/ +struct rtl_rx_desc { /*old: rx desc new: rx wifi info*/ + u32 dword[8]; +} __packed; + +struct rtl_tx_cmd_desc { + u32 dword[16]; } __packed; struct rtl8192_tx_ring { @@ -153,6 +180,10 @@ struct rtl8192_tx_ring { struct sk_buff_head queue; /*add for new trx flow*/ struct rtl_tx_buffer_desc *buffer_desc; /*tx buffer descriptor*/ + dma_addr_t buffer_desc_dma; /*tx bufferd desc dma memory*/ + u16 avl_desc; /* available_desc_to_write */ + u16 cur_tx_wp; /* current_tx_write_point */ + u16 cur_tx_rp; /* current_tx_read_point */ }; struct rtl8192_rx_ring { @@ -160,6 +191,9 @@ struct rtl8192_rx_ring { dma_addr_t dma; unsigned int idx; struct sk_buff *rx_buf[RTL_PCI_MAX_RX_COUNT]; + /*add for new trx flow*/ + struct rtl_rx_buffer_desc *buffer_desc; /*rx buffer descriptor*/ + u16 next_rx_rp; /* next_rx_read_point */ }; struct rtl_pci { diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c index 50504942ded151f40a198099b6fd31013774ca93..b69321d45f043a8727f5e02341b1bc9cd91f4617 100644 --- a/drivers/net/wireless/rtlwifi/ps.c +++ b/drivers/net/wireless/rtlwifi/ps.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -27,110 +23,11 @@ * *****************************************************************************/ -#include #include "wifi.h" #include "base.h" #include "ps.h" - -/* Description: - * This routine deals with the Power Configuration CMD - * parsing for RTL8723/RTL8188E Series IC. - * Assumption: - * We should follow specific format that was released from HW SD. - */ -bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version, - u8 faversion, u8 interface_type, - struct wlan_pwr_cfg pwrcfgcmd[]) -{ - struct wlan_pwr_cfg cfg_cmd = {0}; - bool polling_bit = false; - u32 ary_idx = 0; - u8 value = 0; - u32 offset = 0; - u32 polling_count = 0; - u32 max_polling_cnt = 5000; - - do { - cfg_cmd = pwrcfgcmd[ary_idx]; - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "rtl_hal_pwrseqcmdparsing(): offset(%#x),cut_msk(%#x), famsk(%#x)," - "interface_msk(%#x), base(%#x), cmd(%#x), msk(%#x), value(%#x)\n", - GET_PWR_CFG_OFFSET(cfg_cmd), - GET_PWR_CFG_CUT_MASK(cfg_cmd), - GET_PWR_CFG_FAB_MASK(cfg_cmd), - GET_PWR_CFG_INTF_MASK(cfg_cmd), - GET_PWR_CFG_BASE(cfg_cmd), GET_PWR_CFG_CMD(cfg_cmd), - GET_PWR_CFG_MASK(cfg_cmd), GET_PWR_CFG_VALUE(cfg_cmd)); - - if ((GET_PWR_CFG_FAB_MASK(cfg_cmd)&faversion) && - (GET_PWR_CFG_CUT_MASK(cfg_cmd)&cut_version) && - (GET_PWR_CFG_INTF_MASK(cfg_cmd)&interface_type)) { - switch (GET_PWR_CFG_CMD(cfg_cmd)) { - case PWR_CMD_READ: - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "rtl_hal_pwrseqcmdparsing(): PWR_CMD_READ\n"); - break; - case PWR_CMD_WRITE: - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "rtl_hal_pwrseqcmdparsing(): PWR_CMD_WRITE\n"); - offset = GET_PWR_CFG_OFFSET(cfg_cmd); - - /*Read the value from system register*/ - value = rtl_read_byte(rtlpriv, offset); - value &= (~(GET_PWR_CFG_MASK(cfg_cmd))); - value |= (GET_PWR_CFG_VALUE(cfg_cmd) & - GET_PWR_CFG_MASK(cfg_cmd)); - - /*Write the value back to sytem register*/ - rtl_write_byte(rtlpriv, offset, value); - break; - case PWR_CMD_POLLING: - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "rtl_hal_pwrseqcmdparsing(): PWR_CMD_POLLING\n"); - polling_bit = false; - offset = GET_PWR_CFG_OFFSET(cfg_cmd); - - do { - value = rtl_read_byte(rtlpriv, offset); - - value &= GET_PWR_CFG_MASK(cfg_cmd); - if (value == - (GET_PWR_CFG_VALUE(cfg_cmd) - & GET_PWR_CFG_MASK(cfg_cmd))) - polling_bit = true; - else - udelay(10); - - if (polling_count++ > max_polling_cnt) - return false; - } while (!polling_bit); - break; - case PWR_CMD_DELAY: - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "rtl_hal_pwrseqcmdparsing(): PWR_CMD_DELAY\n"); - if (GET_PWR_CFG_VALUE(cfg_cmd) == - PWRSEQ_DELAY_US) - udelay(GET_PWR_CFG_OFFSET(cfg_cmd)); - else - mdelay(GET_PWR_CFG_OFFSET(cfg_cmd)); - break; - case PWR_CMD_END: - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "rtl_hal_pwrseqcmdparsing(): PWR_CMD_END\n"); - return true; - default: - RT_ASSERT(false, - "rtl_hal_pwrseqcmdparsing(): Unknown CMD!!\n"); - break; - } - - } - ary_idx++; - } while (1); - - return true; -} -EXPORT_SYMBOL(rtl_hal_pwrseqcmdparsing); +#include +#include "btcoexist/rtl_btc.h" bool rtl_ps_enable_nic(struct ieee80211_hw *hw) { @@ -181,11 +78,49 @@ EXPORT_SYMBOL(rtl_ps_disable_nic); bool rtl_ps_set_rf_state(struct ieee80211_hw *hw, enum rf_pwrstate state_toset, - u32 changesource) + u32 changesource, bool protect_or_not) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + enum rf_pwrstate rtstate; bool actionallowed = false; + u16 rfwait_cnt = 0; + + if (protect_or_not) + goto no_protect; + + /*Only one thread can change + *the RF state at one time, and others + *should wait to be executed. + */ + while (true) { + spin_lock(&rtlpriv->locks.rf_ps_lock); + if (ppsc->rfchange_inprogress) { + spin_unlock(&rtlpriv->locks.rf_ps_lock); + + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + "RF Change in progress! Wait to set..state_toset(%d).\n", + state_toset); + + /* Set RF after the previous action is done. */ + while (ppsc->rfchange_inprogress) { + rfwait_cnt++; + mdelay(1); + /*Wait too long, return false to avoid + *to be stuck here. + */ + if (rfwait_cnt > 100) + return false; + } + } else { + ppsc->rfchange_inprogress = true; + spin_unlock(&rtlpriv->locks.rf_ps_lock); + break; + } + } + +no_protect: + rtstate = ppsc->rfpwr_state; switch (state_toset) { case ERFON: @@ -227,6 +162,12 @@ bool rtl_ps_set_rf_state(struct ieee80211_hw *hw, if (actionallowed) rtlpriv->cfg->ops->set_rf_power_state(hw, state_toset); + if (!protect_or_not) { + spin_lock(&rtlpriv->locks.rf_ps_lock); + ppsc->rfchange_inprogress = false; + spin_unlock(&rtlpriv->locks.rf_ps_lock); + } + return actionallowed; } EXPORT_SYMBOL(rtl_ps_set_rf_state); @@ -249,12 +190,13 @@ static void _rtl_ps_inactive_ps(struct ieee80211_hw *hw) } } - rtl_ps_set_rf_state(hw, ppsc->inactive_pwrstate, RF_CHANGE_BY_IPS); + rtl_ps_set_rf_state(hw, ppsc->inactive_pwrstate, + RF_CHANGE_BY_IPS, false); if (ppsc->inactive_pwrstate == ERFOFF && rtlhal->interface == INTF_PCI) { if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM && - !RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) { + !RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) { rtlpriv->intf_ops->enable_aspm(hw); RT_SET_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM); } @@ -318,6 +260,11 @@ void rtl_ips_nic_off_wq_callback(void *data) ppsc->inactive_pwrstate = ERFOFF; ppsc->in_powersavemode = true; + /* call before RF off */ + if (rtlpriv->cfg->ops->get_btc_status()) + rtlpriv->btcoexist.btc_ops->btc_ips_notify(rtlpriv, + ppsc->inactive_pwrstate); + /*rtl_pci_reset_trx_ring(hw); */ _rtl_ps_inactive_ps(hw); } @@ -328,10 +275,9 @@ void rtl_ips_nic_off(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - /* - *because when link with ap, mac80211 will ask us - *to disable nic quickly after scan before linking, - *this will cause link failed, so we delay 100ms here + /* because when link with ap, mac80211 will ask us + * to disable nic quickly after scan before linking, + * this will cause link failed, so we delay 100ms here */ queue_delayed_work(rtlpriv->works.rtl_wq, &rtlpriv->works.ips_nic_off_wq, MSECS(100)); @@ -343,16 +289,12 @@ void rtl_ips_nic_off(struct ieee80211_hw *hw) void rtl_ips_nic_on(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); enum rf_pwrstate rtstate; - unsigned long flags; - - if (mac->opmode != NL80211_IFTYPE_STATION) - return; - spin_lock_irqsave(&rtlpriv->locks.ips_lock, flags); + cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq); + spin_lock(&rtlpriv->locks.ips_lock); if (ppsc->inactiveps) { rtstate = ppsc->rfpwr_state; @@ -362,12 +304,14 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw) ppsc->inactive_pwrstate = ERFON; ppsc->in_powersavemode = false; - _rtl_ps_inactive_ps(hw); + /* call after RF on */ + if (rtlpriv->cfg->ops->get_btc_status()) + rtlpriv->btcoexist.btc_ops->btc_ips_notify(rtlpriv, + ppsc->inactive_pwrstate); } } - - spin_unlock_irqrestore(&rtlpriv->locks.ips_lock, flags); + spin_unlock(&rtlpriv->locks.ips_lock); } EXPORT_SYMBOL_GPL(rtl_ips_nic_on); @@ -404,7 +348,7 @@ static bool rtl_get_fwlps_doze(struct ieee80211_hw *hw) } /* Change current and default preamble mode.*/ -static void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode) +void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); @@ -437,21 +381,24 @@ static void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode) if (ppsc->dot11_psmode == EACTIVE) { RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, "FW LPS leave ps_mode:%x\n", - FW_PS_ACTIVE_MODE); + FW_PS_ACTIVE_MODE); enter_fwlps = false; ppsc->pwr_mode = FW_PS_ACTIVE_MODE; ppsc->smart_ps = 0; - rtlpriv->cfg->ops->set_hw_reg(hw, - HW_VAR_FW_LPS_ACTION, - (u8 *)(&enter_fwlps)); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_LPS_ACTION, + (u8 *)(&enter_fwlps)); if (ppsc->p2p_ps_info.opp_ps) - rtl_p2p_ps_cmd(hw, P2P_PS_ENABLE); + rtl_p2p_ps_cmd(hw , P2P_PS_ENABLE); + if (rtlpriv->cfg->ops->get_btc_status()) + rtlpriv->btcoexist.btc_ops->btc_lps_notify(rtlpriv, rt_psmode); } else { if (rtl_get_fwlps_doze(hw)) { RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, "FW LPS enter ps_mode:%x\n", ppsc->fwctrl_psmode); + if (rtlpriv->cfg->ops->get_btc_status()) + rtlpriv->btcoexist.btc_ops->btc_lps_notify(rtlpriv, rt_psmode); enter_fwlps = true; ppsc->pwr_mode = ppsc->fwctrl_psmode; ppsc->smart_ps = 2; @@ -473,6 +420,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw) struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); struct rtl_priv *rtlpriv = rtl_priv(hw); + unsigned long flag; if (!ppsc->fwctrl_lps) return; @@ -493,7 +441,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw) if (mac->link_state != MAC80211_LINKED) return; - mutex_lock(&rtlpriv->locks.ps_mutex); + spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag); /* Idle for a while if we connect to AP a while ago. */ if (mac->cnt_after_linked >= 2) { @@ -505,8 +453,9 @@ void rtl_lps_enter(struct ieee80211_hw *hw) } } - mutex_unlock(&rtlpriv->locks.ps_mutex); + spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag); } +EXPORT_SYMBOL(rtl_lps_enter); /*Leave the leisure power save mode.*/ void rtl_lps_leave(struct ieee80211_hw *hw) @@ -514,14 +463,15 @@ void rtl_lps_leave(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + unsigned long flag; - mutex_lock(&rtlpriv->locks.ps_mutex); + spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag); if (ppsc->fwctrl_lps) { if (ppsc->dot11_psmode != EACTIVE) { /*FIX ME */ - rtlpriv->cfg->ops->enable_interrupt(hw); + /*rtlpriv->cfg->ops->enable_interrupt(hw); */ if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM && RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM) && @@ -536,8 +486,9 @@ void rtl_lps_leave(struct ieee80211_hw *hw) rtl_lps_set_psmode(hw, EACTIVE); } } - mutex_unlock(&rtlpriv->locks.ps_mutex); + spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag); } +EXPORT_SYMBOL(rtl_lps_leave); /* For sw LPS*/ void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len) @@ -613,7 +564,7 @@ void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len) /* back to low-power land. and delay is * prevent null power save frame tx fail */ queue_delayed_work(rtlpriv->works.rtl_wq, - &rtlpriv->works.ps_work, MSECS(5)); + &rtlpriv->works.ps_work, MSECS(5)); } else { RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, "u_bufferd: %x, m_buffered: %x\n", u_buffed, m_buffed); @@ -626,6 +577,7 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + unsigned long flag; if (!rtlpriv->psc.swctrl_lps) return; @@ -633,14 +585,14 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw) return; if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM && - RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) { + RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) { rtlpriv->intf_ops->disable_aspm(hw); RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM); } - mutex_lock(&rtlpriv->locks.ps_mutex); - rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS); - mutex_unlock(&rtlpriv->locks.ps_mutex); + spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag); + rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS, false); + spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag); } void rtl_swlps_rfon_wq_callback(void *data) @@ -657,6 +609,7 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + unsigned long flag; u8 sleep_intv; if (!rtlpriv->psc.sw_ps_enabled) @@ -673,12 +626,19 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw) if (rtlpriv->link_info.busytraffic) return; - mutex_lock(&rtlpriv->locks.ps_mutex); - rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS); - mutex_unlock(&rtlpriv->locks.ps_mutex); + spin_lock(&rtlpriv->locks.rf_ps_lock); + if (rtlpriv->psc.rfchange_inprogress) { + spin_unlock(&rtlpriv->locks.rf_ps_lock); + return; + } + spin_unlock(&rtlpriv->locks.rf_ps_lock); + + spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag); + rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS , false); + spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag); if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM && - !RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) { + !RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) { rtlpriv->intf_ops->enable_aspm(hw); RT_SET_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM); } @@ -706,7 +666,7 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw) * awake before every dtim */ RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, "dtim_counter:%x will sleep :%d beacon_intv\n", - rtlpriv->psc.dtim_counter, sleep_intv); + rtlpriv->psc.dtim_counter, sleep_intv); /* we tested that 40ms is enough for sw & hw sw delay */ queue_delayed_work(rtlpriv->works.rtl_wq, &rtlpriv->works.ps_rfon_wq, @@ -744,7 +704,7 @@ void rtl_swlps_wq_callback(void *data) if (rtlpriv->psc.state && !ps) { rtlpriv->psc.sleep_ms = jiffies_to_msecs(jiffies - - rtlpriv->psc.last_action); + rtlpriv->psc.last_action); } if (ps) @@ -764,7 +724,7 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data, u8 *pos, *end, *ie; u16 noa_len; static u8 p2p_oui_ie_type[4] = {0x50, 0x6f, 0x9a, 0x09}; - u8 noa_num, index, i, noa_index = 0; + u8 noa_num, index , i, noa_index = 0; bool find_p2p_ie = false , find_p2p_ps_ie = false; pos = (u8 *)mgmt->u.beacon.variable; end = data + len; @@ -814,7 +774,7 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data, index = 5; for (i = 0; i < noa_num; i++) { p2pinfo->noa_count_type[i] = - READEF1BYTE(ie+index); + READEF1BYTE(ie+index); index += 1; p2pinfo->noa_duration[i] = READEF4BYTE((__le32 *)ie+index); @@ -842,7 +802,7 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data, rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE); } } - break; + break; } ie += 3 + noa_len; } @@ -860,7 +820,7 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data, struct rtl_priv *rtlpriv = rtl_priv(hw); struct ieee80211_mgmt *mgmt = data; struct rtl_p2p_ps_info *p2pinfo = &(rtlpriv->psc.p2p_ps_info); - u8 noa_num, index, i, noa_index = 0; + u8 noa_num, index , i , noa_index = 0; u8 *pos, *end, *ie; u16 noa_len; static u8 p2p_oui_ie_type[4] = {0x50, 0x6f, 0x9a, 0x09}; @@ -906,7 +866,7 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data, index = 5; for (i = 0; i < noa_num; i++) { p2pinfo->noa_count_type[i] = - READEF1BYTE(ie+index); + READEF1BYTE(ie+index); index += 1; p2pinfo->noa_duration[i] = READEF4BYTE((__le32 *)ie+index); @@ -934,37 +894,37 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data, rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE); } } - break; + break; } ie += 3 + noa_len; } } -void rtl_p2p_ps_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state) +void rtl_p2p_ps_cmd(struct ieee80211_hw *hw , u8 p2p_ps_state) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *rtlps = rtl_psc(rtl_priv(hw)); struct rtl_p2p_ps_info *p2pinfo = &(rtlpriv->psc.p2p_ps_info); - RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, " p2p state %x\n", p2p_ps_state); + RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, " p2p state %x\n" , p2p_ps_state); switch (p2p_ps_state) { case P2P_PS_DISABLE: p2pinfo->p2p_ps_state = p2p_ps_state; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_P2P_PS_OFFLOAD, &p2p_ps_state); - p2pinfo->noa_index = 0; p2pinfo->ctwindow = 0; p2pinfo->opp_ps = 0; p2pinfo->noa_num = 0; p2pinfo->p2p_ps_mode = P2P_PS_NONE; - if (rtlps->fw_current_inpsmode == true) { + if (rtlps->fw_current_inpsmode) { if (rtlps->smart_ps == 0) { rtlps->smart_ps = 2; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE, &rtlps->pwr_mode); } + } break; case P2P_PS_ENABLE: @@ -982,6 +942,7 @@ void rtl_p2p_ps_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state) rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_P2P_PS_OFFLOAD, &p2p_ps_state); + } break; case P2P_PS_SCAN: @@ -998,12 +959,16 @@ void rtl_p2p_ps_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state) break; } RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, - "ctwindow %x oppps %x\n", p2pinfo->ctwindow, p2pinfo->opp_ps); + "ctwindow %x oppps %x\n", + p2pinfo->ctwindow , p2pinfo->opp_ps); RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "count %x duration %x index %x interval %x start time %x noa num %x\n", - p2pinfo->noa_count_type[0], p2pinfo->noa_duration[0], - p2pinfo->noa_index, p2pinfo->noa_interval[0], - p2pinfo->noa_start_time[0], p2pinfo->noa_num); + p2pinfo->noa_count_type[0], + p2pinfo->noa_duration[0], + p2pinfo->noa_index, + p2pinfo->noa_interval[0], + p2pinfo->noa_start_time[0], + p2pinfo->noa_num); RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "end\n"); } @@ -1032,8 +997,8 @@ void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len) return; if (ieee80211_is_action(hdr->frame_control)) - rtl_p2p_action_ie(hw, data, len - FCS_LEN); + rtl_p2p_action_ie(hw , data , len - FCS_LEN); else - rtl_p2p_noa_ie(hw, data, len - FCS_LEN); + rtl_p2p_noa_ie(hw , data , len - FCS_LEN); } EXPORT_SYMBOL_GPL(rtl_p2p_info); diff --git a/drivers/net/wireless/rtlwifi/ps.h b/drivers/net/wireless/rtlwifi/ps.h index 3bd41f958974308e6b9a3b25a9670541cd3f56eb..29dfc514212dea82b61cfeba5fdcf520b59c7d47 100644 --- a/drivers/net/wireless/rtlwifi/ps.h +++ b/drivers/net/wireless/rtlwifi/ps.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -32,68 +28,9 @@ #define MAX_SW_LPS_SLEEP_INTV 5 -/*--------------------------------------------- - * 3 The value of cmd: 4 bits - *--------------------------------------------- - */ -#define PWR_CMD_READ 0x00 -#define PWR_CMD_WRITE 0x01 -#define PWR_CMD_POLLING 0x02 -#define PWR_CMD_DELAY 0x03 -#define PWR_CMD_END 0x04 - -/* define the base address of each block */ -#define PWR_BASEADDR_MAC 0x00 -#define PWR_BASEADDR_USB 0x01 -#define PWR_BASEADDR_PCIE 0x02 -#define PWR_BASEADDR_SDIO 0x03 - -#define PWR_FAB_ALL_MSK (BIT(0)|BIT(1)|BIT(2)|BIT(3)) -#define PWR_CUT_TESTCHIP_MSK BIT(0) -#define PWR_CUT_A_MSK BIT(1) -#define PWR_CUT_B_MSK BIT(2) -#define PWR_CUT_C_MSK BIT(3) -#define PWR_CUT_D_MSK BIT(4) -#define PWR_CUT_E_MSK BIT(5) -#define PWR_CUT_F_MSK BIT(6) -#define PWR_CUT_G_MSK BIT(7) -#define PWR_CUT_ALL_MSK 0xFF -#define PWR_INTF_SDIO_MSK BIT(0) -#define PWR_INTF_USB_MSK BIT(1) -#define PWR_INTF_PCI_MSK BIT(2) -#define PWR_INTF_ALL_MSK (BIT(0)|BIT(1)|BIT(2)|BIT(3)) - -enum pwrseq_delay_unit { - PWRSEQ_DELAY_US, - PWRSEQ_DELAY_MS, -}; - -struct wlan_pwr_cfg { - u16 offset; - u8 cut_msk; - u8 fab_msk:4; - u8 interface_msk:4; - u8 base:4; - u8 cmd:4; - u8 msk; - u8 value; -}; - -#define GET_PWR_CFG_OFFSET(__PWR_CMD) (__PWR_CMD.offset) -#define GET_PWR_CFG_CUT_MASK(__PWR_CMD) (__PWR_CMD.cut_msk) -#define GET_PWR_CFG_FAB_MASK(__PWR_CMD) (__PWR_CMD.fab_msk) -#define GET_PWR_CFG_INTF_MASK(__PWR_CMD) (__PWR_CMD.interface_msk) -#define GET_PWR_CFG_BASE(__PWR_CMD) (__PWR_CMD.base) -#define GET_PWR_CFG_CMD(__PWR_CMD) (__PWR_CMD.cmd) -#define GET_PWR_CFG_MASK(__PWR_CMD) (__PWR_CMD.msk) -#define GET_PWR_CFG_VALUE(__PWR_CMD) (__PWR_CMD.value) - -bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version, - u8 fab_version, u8 interface_type, - struct wlan_pwr_cfg pwrcfgcmd[]); - bool rtl_ps_set_rf_state(struct ieee80211_hw *hw, - enum rf_pwrstate state_toset, u32 changesource); + enum rf_pwrstate state_toset, u32 changesource, + bool protect_or_not); bool rtl_ps_enable_nic(struct ieee80211_hw *hw); bool rtl_ps_disable_nic(struct ieee80211_hw *hw); void rtl_ips_nic_off(struct ieee80211_hw *hw); @@ -102,12 +39,14 @@ void rtl_ips_nic_off_wq_callback(void *data); void rtl_lps_enter(struct ieee80211_hw *hw); void rtl_lps_leave(struct ieee80211_hw *hw); +void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode); + void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len); void rtl_swlps_wq_callback(void *data); void rtl_swlps_rfon_wq_callback(void *data); void rtl_swlps_rf_awake(struct ieee80211_hw *hw); void rtl_swlps_rf_sleep(struct ieee80211_hw *hw); -void rtl_p2p_ps_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state); +void rtl_p2p_ps_cmd(struct ieee80211_hw *hw , u8 p2p_ps_state); void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len); void rtl_lps_change_work_callback(struct work_struct *work); diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseqcmd.h b/drivers/net/wireless/rtlwifi/pwrseqcmd.h similarity index 92% rename from drivers/net/wireless/rtlwifi/rtl8723ae/pwrseqcmd.h rename to drivers/net/wireless/rtlwifi/pwrseqcmd.h index 6e0f3ea37ec0ab9b0e0de5f5ec511053f736a1f4..17ce0cb2c35c5e81607caffd53f28529b7d0568c 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseqcmd.h +++ b/drivers/net/wireless/rtlwifi/pwrseqcmd.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -30,7 +26,7 @@ #ifndef __RTL8723E_PWRSEQCMD_H__ #define __RTL8723E_PWRSEQCMD_H__ -#include "../wifi.h" +#include "wifi.h" /*--------------------------------------------- * 3 The value of cmd: 4 bits *--------------------------------------------- diff --git a/drivers/net/wireless/rtlwifi/rc.c b/drivers/net/wireless/rtlwifi/rc.c index ee28a1a3d0100deeb0b3a50296109e814528a85e..7863bd278b227f6c7dd5057405d64a042f2fe69a 100644 --- a/drivers/net/wireless/rtlwifi/rc.c +++ b/drivers/net/wireless/rtlwifi/rc.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -35,13 +31,13 @@ *Finds the highest rate index we can use *if skb is special data like DHCP/EAPOL, we set should *it to lowest rate CCK_1M, otherwise we set rate to - *CCK11M or OFDM_54M based on wireless mode. + *highest rate based on wireless mode used for iwconfig + *show Tx rate. */ static u8 _rtl_rc_get_highest_rix(struct rtl_priv *rtlpriv, struct ieee80211_sta *sta, struct sk_buff *skb, bool not_data) { - struct rtl_mac *rtlmac = rtl_mac(rtlpriv); struct rtl_hal *rtlhal = rtl_hal(rtlpriv); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_sta_info *sta_entry = NULL; @@ -54,21 +50,13 @@ static u8 _rtl_rc_get_highest_rix(struct rtl_priv *rtlpriv, *2.in rtl_get_tcb_desc when we check rate is * 1M we will not use FW rate but user rate. */ - if (rtlmac->opmode == NL80211_IFTYPE_AP || - rtlmac->opmode == NL80211_IFTYPE_ADHOC || - rtlmac->opmode == NL80211_IFTYPE_MESH_POINT) { - if (sta) { - sta_entry = (struct rtl_sta_info *) sta->drv_priv; - wireless_mode = sta_entry->wireless_mode; - } else { - return 0; - } - } else { - wireless_mode = rtlmac->mode; + + if (sta) { + sta_entry = (struct rtl_sta_info *)sta->drv_priv; + wireless_mode = sta_entry->wireless_mode; } - if (rtl_is_special_data(rtlpriv->mac80211.hw, skb, true) || - not_data) { + if (rtl_is_special_data(rtlpriv->mac80211.hw, skb, true) || not_data) { return 0; } else { if (rtlhal->current_bandtype == BAND_ON_2_4G) { @@ -76,21 +64,27 @@ static u8 _rtl_rc_get_highest_rix(struct rtl_priv *rtlpriv, return B_MODE_MAX_RIX; } else if (wireless_mode == WIRELESS_MODE_G) { return G_MODE_MAX_RIX; - } else { + } else if (wireless_mode == WIRELESS_MODE_N_24G) { if (get_rf_type(rtlphy) != RF_2T2R) return N_MODE_MCS7_RIX; else return N_MODE_MCS15_RIX; + } else if (wireless_mode == WIRELESS_MODE_AC_24G) { + return AC_MODE_MCS9_RIX; } + return 0; } else { if (wireless_mode == WIRELESS_MODE_A) { return A_MODE_MAX_RIX; - } else { + } else if (wireless_mode == WIRELESS_MODE_N_5G) { if (get_rf_type(rtlphy) != RF_2T2R) return N_MODE_MCS7_RIX; else return N_MODE_MCS15_RIX; + } else if (wireless_mode == WIRELESS_MODE_AC_5G) { + return AC_MODE_MCS9_RIX; } + return 0; } } } @@ -103,35 +97,52 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv, bool not_data) { struct rtl_mac *mac = rtl_mac(rtlpriv); - u8 sgi_20 = 0, sgi_40 = 0; + struct rtl_sta_info *sta_entry = NULL; + u8 wireless_mode = 0; + u8 sgi_20 = 0, sgi_40 = 0, sgi_80 = 0; if (sta) { sgi_20 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20; sgi_40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40; + sgi_80 = sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80; + sta_entry = (struct rtl_sta_info *)sta->drv_priv; + wireless_mode = sta_entry->wireless_mode; } rate->count = tries; rate->idx = rix >= 0x00 ? rix : 0x00; + if (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8812AE && + wireless_mode == WIRELESS_MODE_AC_5G) + rate->idx += 0x10;/*2NSS for 8812AE*/ if (!not_data) { if (txrc->short_preamble) rate->flags |= IEEE80211_TX_RC_USE_SHORT_PREAMBLE; if (mac->opmode == NL80211_IFTYPE_AP || - mac->opmode == NL80211_IFTYPE_ADHOC) { - if (sta && (sta->bandwidth >= IEEE80211_STA_RX_BW_40)) + mac->opmode == NL80211_IFTYPE_ADHOC) { + if (sta && (sta->ht_cap.cap & + IEEE80211_HT_CAP_SUP_WIDTH_20_40)) rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; + if (sta && (sta->vht_cap.vht_supported)) + rate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH; } else { if (mac->bw_40) rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; + if (mac->bw_80) + rate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH; } - if (sgi_20 || sgi_40) + + if (sgi_20 || sgi_40 || sgi_80) rate->flags |= IEEE80211_TX_RC_SHORT_GI; - if (sta && sta->ht_cap.ht_supported) + if (sta && sta->ht_cap.ht_supported && + ((wireless_mode == WIRELESS_MODE_N_5G) || + (wireless_mode == WIRELESS_MODE_N_24G))) rate->flags |= IEEE80211_TX_RC_MCS; } } static void rtl_get_rate(void *ppriv, struct ieee80211_sta *sta, - void *priv_sta, struct ieee80211_tx_rate_control *txrc) + void *priv_sta, + struct ieee80211_tx_rate_control *txrc) { struct rtl_priv *rtlpriv = ppriv; struct sk_buff *skb = txrc->skb; @@ -158,7 +169,7 @@ static void rtl_get_rate(void *ppriv, struct ieee80211_sta *sta, } static bool _rtl_tx_aggr_check(struct rtl_priv *rtlpriv, - struct rtl_sta_info *sta_entry, u16 tid) + struct rtl_sta_info *sta_entry, u16 tid) { struct rtl_mac *mac = rtl_mac(rtlpriv); @@ -166,7 +177,7 @@ static bool _rtl_tx_aggr_check(struct rtl_priv *rtlpriv, return false; if (mac->opmode == NL80211_IFTYPE_STATION && - mac->cnt_after_linked < 3) + mac->cnt_after_linked < 3) return false; if (sta_entry->tids[tid].agg.agg_state == RTL_AGG_STOP) @@ -193,23 +204,23 @@ static void rtl_tx_status(void *ppriv, if (rtl_is_special_data(mac->hw, skb, true)) return; - if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) - || is_broadcast_ether_addr(ieee80211_get_DA(hdr))) + if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) || + is_broadcast_ether_addr(ieee80211_get_DA(hdr))) return; if (sta) { /* Check if aggregation has to be enabled for this tid */ sta_entry = (struct rtl_sta_info *) sta->drv_priv; if ((sta->ht_cap.ht_supported) && - !(skb->protocol == cpu_to_be16(ETH_P_PAE))) { + !(skb->protocol == cpu_to_be16(ETH_P_PAE))) { if (ieee80211_is_data_qos(fc)) { u8 tid = rtl_get_tid(skb); if (_rtl_tx_aggr_check(rtlpriv, sta_entry, - tid)) { + tid)) { sta_entry->tids[tid].agg.agg_state = - RTL_AGG_PROGRESS; - ieee80211_start_tx_ba_session(sta, - tid, 5000); + RTL_AGG_PROGRESS; + ieee80211_start_tx_ba_session(sta, tid, + 5000); } } } @@ -223,8 +234,15 @@ static void rtl_rate_init(void *ppriv, { } -static void *rtl_rate_alloc(struct ieee80211_hw *hw, - struct dentry *debugfsdir) +static void rtl_rate_update(void *ppriv, + struct ieee80211_supported_band *sband, + struct cfg80211_chan_def *chandef, + struct ieee80211_sta *sta, void *priv_sta, + u32 changed) +{ +} + +static void *rtl_rate_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) { struct rtl_priv *rtlpriv = rtl_priv(hw); return rtlpriv; @@ -260,13 +278,14 @@ static void rtl_rate_free_sta(void *rtlpriv, kfree(rate_priv); } -static const struct rate_control_ops rtl_rate_ops = { +static struct rate_control_ops rtl_rate_ops = { .name = "rtl_rc", .alloc = rtl_rate_alloc, .free = rtl_rate_free, .alloc_sta = rtl_rate_alloc_sta, .free_sta = rtl_rate_free_sta, .rate_init = rtl_rate_init, + .rate_update = rtl_rate_update, .tx_status = rtl_tx_status, .get_rate = rtl_get_rate, }; diff --git a/drivers/net/wireless/rtlwifi/rc.h b/drivers/net/wireless/rtlwifi/rc.h index 4d61761606101faa35b0a9da503dc62a409f1a75..f29643d60d6baef1bde95ce560254f4a43efce56 100644 --- a/drivers/net/wireless/rtlwifi/rc.h +++ b/drivers/net/wireless/rtlwifi/rc.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -38,10 +34,15 @@ #define N_MODE_MCS7_RIX 7 #define N_MODE_MCS15_RIX 15 +#define AC_MODE_MCS7_RIX 7 +#define AC_MODE_MCS8_RIX 8 +#define AC_MODE_MCS9_RIX 9 + struct rtl_rate_priv { u8 ht_cap; }; int rtl_rate_control_register(void); void rtl_rate_control_unregister(void); + #endif diff --git a/drivers/net/wireless/rtlwifi/regd.c b/drivers/net/wireless/rtlwifi/regd.c index a4eb9b271438657e863dc07af8d9ca20ac431301..1893d01b9e789c1bca2f25f1e0fa5d067da99cfc 100644 --- a/drivers/net/wireless/rtlwifi/regd.c +++ b/drivers/net/wireless/rtlwifi/regd.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -59,26 +55,23 @@ static struct country_code_to_enum_rd allCountries[] = { */ #define RTL819x_2GHZ_CH12_13 \ REG_RULE(2467-10, 2472+10, 40, 0, 20,\ - NL80211_RRF_NO_IR) + NL80211_RRF_PASSIVE_SCAN) #define RTL819x_2GHZ_CH14 \ REG_RULE(2484-10, 2484+10, 40, 0, 20, \ - NL80211_RRF_NO_IR | NL80211_RRF_NO_OFDM) + NL80211_RRF_PASSIVE_SCAN | \ + NL80211_RRF_NO_OFDM) + /* 5G chan 36 - chan 64*/ #define RTL819x_5GHZ_5150_5350 \ - REG_RULE(5150-10, 5350+10, 40, 0, 30, \ - NL80211_RRF_NO_IR) - + REG_RULE(5150-10, 5350+10, 80, 0, 30, 0) /* 5G chan 100 - chan 165*/ #define RTL819x_5GHZ_5470_5850 \ - REG_RULE(5470-10, 5850+10, 40, 0, 30, \ - NL80211_RRF_NO_IR) - + REG_RULE(5470-10, 5850+10, 80, 0, 30, 0) /* 5G chan 149 - chan 165*/ #define RTL819x_5GHZ_5725_5850 \ - REG_RULE(5725-10, 5850+10, 40, 0, 30, \ - NL80211_RRF_NO_IR) + REG_RULE(5725-10, 5850+10, 80, 0, 30, 0) #define RTL819x_5GHZ_ALL \ (RTL819x_5GHZ_5150_5350, RTL819x_5GHZ_5470_5850) @@ -143,7 +136,7 @@ static const struct ieee80211_regdomain rtl_regdom_14 = { static bool _rtl_is_radar_freq(u16 center_freq) { - return (center_freq >= 5260 && center_freq <= 5700); + return center_freq >= 5260 && center_freq <= 5700; } static void _rtl_reg_apply_beaconing_flags(struct wiphy *wiphy, @@ -169,10 +162,9 @@ static void _rtl_reg_apply_beaconing_flags(struct wiphy *wiphy, continue; if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) { reg_rule = freq_reg_info(wiphy, - MHZ_TO_KHZ(ch->center_freq)); + ch->center_freq); if (IS_ERR(reg_rule)) continue; - /* *If 11d had a rule for this channel ensure *we enable adhoc/beaconing if it allows us to @@ -182,11 +174,16 @@ static void _rtl_reg_apply_beaconing_flags(struct wiphy *wiphy, *regulatory_hint(). */ - if (!(reg_rule->flags & NL80211_RRF_NO_IR)) - ch->flags &= ~IEEE80211_CHAN_NO_IR; + if (!(reg_rule->flags & NL80211_RRF_NO_IBSS)) + ch->flags &= ~IEEE80211_CHAN_NO_IBSS; + if (!(reg_rule->flags & + NL80211_RRF_PASSIVE_SCAN)) + ch->flags &= + ~IEEE80211_CHAN_PASSIVE_SCAN; } else { if (ch->beacon_found) - ch->flags &= ~IEEE80211_CHAN_NO_IR; + ch->flags &= ~(IEEE80211_CHAN_NO_IBSS | + IEEE80211_CHAN_PASSIVE_SCAN); } } } @@ -211,35 +208,35 @@ static void _rtl_reg_apply_active_scan_flags(struct wiphy *wiphy, */ if (initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE) { ch = &sband->channels[11]; /* CH 12 */ - if (ch->flags & IEEE80211_CHAN_NO_IR) - ch->flags &= ~IEEE80211_CHAN_NO_IR; + if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN) + ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN; ch = &sband->channels[12]; /* CH 13 */ - if (ch->flags & IEEE80211_CHAN_NO_IR) - ch->flags &= ~IEEE80211_CHAN_NO_IR; + if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN) + ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN; return; } /* - *If a country IE has been received check its rule for this + *If a country IE has been recieved check its rule for this *channel first before enabling active scan. The passive scan *would have been enforced by the initial processing of our *custom regulatory domain. */ ch = &sband->channels[11]; /* CH 12 */ - reg_rule = freq_reg_info(wiphy, MHZ_TO_KHZ(ch->center_freq)); + reg_rule = freq_reg_info(wiphy, ch->center_freq); if (!IS_ERR(reg_rule)) { - if (!(reg_rule->flags & NL80211_RRF_NO_IR)) - if (ch->flags & IEEE80211_CHAN_NO_IR) - ch->flags &= ~IEEE80211_CHAN_NO_IR; + if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN)) + if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN) + ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN; } ch = &sband->channels[12]; /* CH 13 */ - reg_rule = freq_reg_info(wiphy, MHZ_TO_KHZ(ch->center_freq)); + reg_rule = freq_reg_info(wiphy, ch->center_freq); if (!IS_ERR(reg_rule)) { - if (!(reg_rule->flags & NL80211_RRF_NO_IR)) - if (ch->flags & IEEE80211_CHAN_NO_IR) - ch->flags &= ~IEEE80211_CHAN_NO_IR; + if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN)) + if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN) + ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN; } } @@ -276,7 +273,8 @@ static void _rtl_reg_apply_radar_flags(struct wiphy *wiphy) */ if (!(ch->flags & IEEE80211_CHAN_DISABLED)) ch->flags |= IEEE80211_CHAN_RADAR | - IEEE80211_CHAN_NO_IR; + IEEE80211_CHAN_NO_IBSS | + IEEE80211_CHAN_PASSIVE_SCAN; } } @@ -289,9 +287,25 @@ static void _rtl_reg_apply_world_flags(struct wiphy *wiphy, return; } -static void _rtl_reg_notifier_apply(struct wiphy *wiphy, - struct regulatory_request *request, - struct rtl_regulatory *reg) +static void _rtl_dump_channel_map(struct wiphy *wiphy) +{ + enum ieee80211_band band; + struct ieee80211_supported_band *sband; + struct ieee80211_channel *ch; + unsigned int i; + + for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + if (!wiphy->bands[band]) + continue; + sband = wiphy->bands[band]; + for (i = 0; i < sband->n_channels; i++) + ch = &sband->channels[i]; + } +} + +static int _rtl_reg_notifier_apply(struct wiphy *wiphy, + struct regulatory_request *request, + struct rtl_regulatory *reg) { /* We always apply this */ _rtl_reg_apply_radar_flags(wiphy); @@ -305,10 +319,14 @@ static void _rtl_reg_notifier_apply(struct wiphy *wiphy, _rtl_reg_apply_world_flags(wiphy, request->initiator, reg); break; } + + _rtl_dump_channel_map(wiphy); + + return 0; } static const struct ieee80211_regdomain *_rtl_regdomain_select( - struct rtl_regulatory *reg) + struct rtl_regulatory *reg) { switch (reg->country_code) { case COUNTRY_CODE_FCC: @@ -337,9 +355,9 @@ static const struct ieee80211_regdomain *_rtl_regdomain_select( static int _rtl_regd_init_wiphy(struct rtl_regulatory *reg, struct wiphy *wiphy, - void (*reg_notifier) (struct wiphy *wiphy, - struct regulatory_request * - request)) + void (*reg_notifier)(struct wiphy *wiphy, + struct regulatory_request * + request)) { const struct ieee80211_regdomain *regd; @@ -348,7 +366,6 @@ static int _rtl_regd_init_wiphy(struct rtl_regulatory *reg, wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG; wiphy->regulatory_flags &= ~REGULATORY_STRICT_REG; wiphy->regulatory_flags &= ~REGULATORY_DISABLE_BEACON_HINTS; - regd = _rtl_regdomain_select(reg); wiphy_apply_custom_regulatory(wiphy, regd); _rtl_reg_apply_radar_flags(wiphy); @@ -368,7 +385,7 @@ static struct country_code_to_enum_rd *_rtl_regd_find_country(u16 countrycode) } int rtl_regd_init(struct ieee80211_hw *hw, - void (*reg_notifier) (struct wiphy *wiphy, + void (*reg_notifier)(struct wiphy *wiphy, struct regulatory_request *request)) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -382,7 +399,8 @@ int rtl_regd_init(struct ieee80211_hw *hw, rtlpriv->regd.country_code = rtlpriv->efuse.channel_plan; RT_TRACE(rtlpriv, COMP_REGD, DBG_TRACE, - "rtl: EEPROM regdomain: 0x%0x\n", rtlpriv->regd.country_code); + "rtl: EEPROM regdomain: 0x%0x\n", + rtlpriv->regd.country_code); if (rtlpriv->regd.country_code >= COUNTRY_CODE_MAX) { RT_TRACE(rtlpriv, COMP_REGD, DBG_DMESG, @@ -403,7 +421,7 @@ int rtl_regd_init(struct ieee80211_hw *hw, RT_TRACE(rtlpriv, COMP_REGD, DBG_TRACE, "rtl: Country alpha2 being used: %c%c\n", - rtlpriv->regd.alpha2[0], rtlpriv->regd.alpha2[1]); + rtlpriv->regd.alpha2[0], rtlpriv->regd.alpha2[1]); _rtl_regd_init_wiphy(&rtlpriv->regd, wiphy, reg_notifier); diff --git a/drivers/net/wireless/rtlwifi/regd.h b/drivers/net/wireless/rtlwifi/regd.h index 4e1f4f00e6e9633ed30c59ca2220039e733fdb39..3bbbaaa68530ef8678fbc0b65e5b91f81d50e301 100644 --- a/drivers/net/wireless/rtlwifi/regd.h +++ b/drivers/net/wireless/rtlwifi/regd.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -30,6 +26,10 @@ #ifndef __RTL_REGD_H__ #define __RTL_REGD_H__ +/* for kernel 3.14 , both value are changed to IEEE80211_CHAN_NO_IR*/ +#define IEEE80211_CHAN_NO_IBSS IEEE80211_CHAN_NO_IR +#define IEEE80211_CHAN_PASSIVE_SCAN IEEE80211_CHAN_NO_IR + struct country_code_to_enum_rd { u16 countrycode; const char *iso_name; @@ -56,6 +56,7 @@ enum country_code_type_t { int rtl_regd_init(struct ieee80211_hw *hw, void (*reg_notifier) (struct wiphy *wiphy, - struct regulatory_request *request)); + struct regulatory_request *request)); void rtl_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request); + #endif diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/def.h b/drivers/net/wireless/rtlwifi/rtl8188ee/def.h index c764fff9ebe6eae893fba9462ad04f7106ab98f7..d9ea9d0c79a526694210f7f9f46c7e9747a31285 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/def.h +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/def.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -111,7 +107,6 @@ #define CHIP_BONDING_IDENTIFIER(_value) (((_value)>>22)&0x3) - /* [15:12] IC version(CUT): A-cut=0, B-cut=1, C-cut=2, D-cut=3 * [7] Manufacturer: TSMC=0, UMC=1 * [6:4] RF type: 1T1R=0, 1T2R=1, 2T2R=2 @@ -130,7 +125,6 @@ #define D_CUT_VERSION ((BIT(12)|BIT(13))) #define E_CUT_VERSION BIT(14) - /* MASK */ #define IC_TYPE_MASK (BIT(0)|BIT(1)|BIT(2)) #define CHIP_TYPE_MASK BIT(3) @@ -147,7 +141,6 @@ #define GET_CVID_ROM_VERSION(version) ((version) & ROM_VERSION_MASK) #define GET_CVID_CUT_VERSION(version) ((version) & CUT_VERSION_MASK) - #define IS_81XXC(version) \ ((GET_CVID_IC_TYPE(version) == 0) ? true : false) #define IS_8723_SERIES(version) \ @@ -174,7 +167,7 @@ #define IS_81xxC_VENDOR_UMC_A_CUT(version) \ (IS_81XXC(version) ? ((IS_CHIP_VENDOR_UMC(version)) ? \ ((GET_CVID_CUT_VERSION(version)) ? false : true) : false) : false) -#define IS_81xxC_VENDOR_UMC_B_CUT(version) \ +#define IS_81XXC_VENDOR_UMC_B_CUT(version) \ (IS_81XXC(version) ? (IS_CHIP_VENDOR_UMC(version) ? \ ((GET_CVID_CUT_VERSION(version) == B_CUT_VERSION) ? true \ : false) : false) : false) @@ -225,44 +218,37 @@ enum power_polocy_config { }; enum interface_select_pci { - INTF_SEL1_MINICARD, - INTF_SEL0_PCIE, - INTF_SEL2_RSV, - INTF_SEL3_RSV, + INTF_SEL1_MINICARD = 0, + INTF_SEL0_PCIE = 1, + INTF_SEL2_RSV = 2, + INTF_SEL3_RSV = 3, }; enum hal_fw_c2h_cmd_id { - HAL_FW_C2H_CMD_Read_MACREG, - HAL_FW_C2H_CMD_Read_BBREG, - HAL_FW_C2H_CMD_Read_RFREG, - HAL_FW_C2H_CMD_Read_EEPROM, - HAL_FW_C2H_CMD_Read_EFUSE, - HAL_FW_C2H_CMD_Read_CAM, - HAL_FW_C2H_CMD_Get_BasicRate, - HAL_FW_C2H_CMD_Get_DataRate, - HAL_FW_C2H_CMD_Survey, - HAL_FW_C2H_CMD_SurveyDone, - HAL_FW_C2H_CMD_JoinBss, - HAL_FW_C2H_CMD_AddSTA, - HAL_FW_C2H_CMD_DelSTA, - HAL_FW_C2H_CMD_AtimDone, - HAL_FW_C2H_CMD_TX_Report, - HAL_FW_C2H_CMD_CCX_Report, - HAL_FW_C2H_CMD_DTM_Report, - HAL_FW_C2H_CMD_TX_Rate_Statistics, - HAL_FW_C2H_CMD_C2HLBK, - HAL_FW_C2H_CMD_C2HDBG, - HAL_FW_C2H_CMD_C2HFEEDBACK, + HAL_FW_C2H_CMD_READ_MACREG = 0, + HAL_FW_C2H_CMD_READ_BBREG = 1, + HAL_FW_C2H_CMD_READ_RFREG = 2, + HAL_FW_C2H_CMD_READ_EEPROM = 3, + HAL_FW_C2H_CMD_READ_EFUSE = 4, + HAL_FW_C2H_CMD_READ_CAM = 5, + HAL_FW_C2H_CMD_GET_BASICRATE = 6, + HAL_FW_C2H_CMD_GET_DATARATE = 7, + HAL_FW_C2H_CMD_SURVEY = 8, + HAL_FW_C2H_CMD_SURVEYDONE = 9, + HAL_FW_C2H_CMD_JOINBSS = 10, + HAL_FW_C2H_CMD_ADDSTA = 11, + HAL_FW_C2H_CMD_DELSTA = 12, + HAL_FW_C2H_CMD_ATIMDONE = 13, + HAL_FW_C2H_CMD_TX_REPORT = 14, + HAL_FW_C2H_CMD_CCX_REPORT = 15, + HAL_FW_C2H_CMD_DTM_REPORT = 16, + HAL_FW_C2H_CMD_TX_RATE_STATISTICS = 17, + HAL_FW_C2H_CMD_C2HLBK = 18, + HAL_FW_C2H_CMD_C2HDBG = 19, + HAL_FW_C2H_CMD_C2HFEEDBACK = 20, HAL_FW_C2H_CMD_MAX }; -enum wake_on_wlan_mode { - ewowlandisable, - ewakeonmagicpacketonly, - ewakeonpatternmatchonly, - ewakeonbothtypepacket -}; - enum rtl_desc_qsel { QSLT_BK = 0x2, QSLT_BE = 0x0, diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c index f8daa61cf1c32df10b6ff69428d8aa4e434f090d..2aa34d9055f0e6eebbb9dc91f0c7a356a3a207a7 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -188,21 +184,24 @@ static void rtl88e_set_iqk_matrix(struct ieee80211_hw *hw, switch (rfpath) { case RF90_PATH_A: value32 = (ele_d << 22)|((ele_c & 0x3F)<<16) | ele_a; - rtl_set_bbreg(hw, ROFDM0_XATXIQIMBAL, MASKDWORD, - value32); + rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, + MASKDWORD, value32); value32 = (ele_c & 0x000003C0) >> 6; - rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS, value32); + rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS, + value32); value32 = ((iqk_result_x * ele_d) >> 7) & 0x01; - rtl_set_bbreg(hw, ROFDM0_ECCATHRES, BIT(24), value32); + rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(24), + value32); break; case RF90_PATH_B: value32 = (ele_d << 22)|((ele_c & 0x3F)<<16) | ele_a; - rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBAL, - MASKDWORD, value32); + rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, MASKDWORD, + value32); value32 = (ele_c & 0x000003C0) >> 6; rtl_set_bbreg(hw, ROFDM0_XDTXAFE, MASKH4BITS, value32); value32 = ((iqk_result_x * ele_d) >> 7) & 0x01; - rtl_set_bbreg(hw, ROFDM0_ECCATHRES, BIT(28), value32); + rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(28), + value32); break; default: break; @@ -210,16 +209,20 @@ static void rtl88e_set_iqk_matrix(struct ieee80211_hw *hw, } else { switch (rfpath) { case RF90_PATH_A: - rtl_set_bbreg(hw, ROFDM0_XATXIQIMBAL, MASKDWORD, - ofdmswing_table[ofdm_index]); - rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS, 0x00); - rtl_set_bbreg(hw, ROFDM0_ECCATHRES, BIT(24), 0x00); + rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, + MASKDWORD, ofdmswing_table[ofdm_index]); + rtl_set_bbreg(hw, ROFDM0_XCTXAFE, + MASKH4BITS, 0x00); + rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, + BIT(24), 0x00); break; case RF90_PATH_B: - rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBAL, MASKDWORD, - ofdmswing_table[ofdm_index]); - rtl_set_bbreg(hw, ROFDM0_XDTXAFE, MASKH4BITS, 0x00); - rtl_set_bbreg(hw, ROFDM0_ECCATHRES, BIT(28), 0x00); + rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, + MASKDWORD, ofdmswing_table[ofdm_index]); + rtl_set_bbreg(hw, ROFDM0_XDTXAFE, + MASKH4BITS, 0x00); + rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, + BIT(28), 0x00); break; default: break; @@ -244,7 +247,7 @@ void rtl88e_dm_txpower_track_adjust(struct ieee80211_hw *hw, pwr_val = ofdm_base - ofdm_val; } else { *pdirection = 2; - pwr_val = ofdm_val - ofdm_base; + pwr_val = ofdm_base - ofdm_val; } } else if (type == 1) { if (cck_val <= cck_base) { @@ -263,46 +266,75 @@ void rtl88e_dm_txpower_track_adjust(struct ieee80211_hw *hw, (pwr_val << 24); } - -static void rtl88e_chk_tx_track(struct ieee80211_hw *hw, - enum pwr_track_control_method method, - u8 rfpath, u8 index) +static void dm_tx_pwr_track_set_pwr(struct ieee80211_hw *hw, + enum pwr_track_control_method method, + u8 rfpath, u8 channel_mapped_index) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); - int jj = rtldm->swing_idx_cck; - int i; if (method == TXAGC) { - if (rtldm->swing_flag_ofdm == true || - rtldm->swing_flag_cck == true) { - u8 chan = rtlphy->current_channel; - rtl88e_phy_set_txpower_level(hw, chan); + if (rtldm->swing_flag_ofdm || + rtldm->swing_flag_cck) { + rtl88e_phy_set_txpower_level(hw, + rtlphy->current_channel); rtldm->swing_flag_ofdm = false; rtldm->swing_flag_cck = false; } } else if (method == BBSWING) { if (!rtldm->cck_inch14) { - for (i = 0; i < 8; i++) - rtl_write_byte(rtlpriv, 0xa22 + i, - cck_tbl_ch1_13[jj][i]); + rtl_write_byte(rtlpriv, 0xa22, + cck_tbl_ch1_13[rtldm->swing_idx_cck][0]); + rtl_write_byte(rtlpriv, 0xa23, + cck_tbl_ch1_13[rtldm->swing_idx_cck][1]); + rtl_write_byte(rtlpriv, 0xa24, + cck_tbl_ch1_13[rtldm->swing_idx_cck][2]); + rtl_write_byte(rtlpriv, 0xa25, + cck_tbl_ch1_13[rtldm->swing_idx_cck][3]); + rtl_write_byte(rtlpriv, 0xa26, + cck_tbl_ch1_13[rtldm->swing_idx_cck][4]); + rtl_write_byte(rtlpriv, 0xa27, + cck_tbl_ch1_13[rtldm->swing_idx_cck][5]); + rtl_write_byte(rtlpriv, 0xa28, + cck_tbl_ch1_13[rtldm->swing_idx_cck][6]); + rtl_write_byte(rtlpriv, 0xa29, + cck_tbl_ch1_13[rtldm->swing_idx_cck][7]); } else { - for (i = 0; i < 8; i++) - rtl_write_byte(rtlpriv, 0xa22 + i, - cck_tbl_ch14[jj][i]); + rtl_write_byte(rtlpriv, 0xa22, + cck_tbl_ch14[rtldm->swing_idx_cck][0]); + rtl_write_byte(rtlpriv, 0xa23, + cck_tbl_ch14[rtldm->swing_idx_cck][1]); + rtl_write_byte(rtlpriv, 0xa24, + cck_tbl_ch14[rtldm->swing_idx_cck][2]); + rtl_write_byte(rtlpriv, 0xa25, + cck_tbl_ch14[rtldm->swing_idx_cck][3]); + rtl_write_byte(rtlpriv, 0xa26, + cck_tbl_ch14[rtldm->swing_idx_cck][4]); + rtl_write_byte(rtlpriv, 0xa27, + cck_tbl_ch14[rtldm->swing_idx_cck][5]); + rtl_write_byte(rtlpriv, 0xa28, + cck_tbl_ch14[rtldm->swing_idx_cck][6]); + rtl_write_byte(rtlpriv, 0xa29, + cck_tbl_ch14[rtldm->swing_idx_cck][7]); } if (rfpath == RF90_PATH_A) { - long x = rtlphy->iqk_matrix[index].value[0][0]; - long y = rtlphy->iqk_matrix[index].value[0][1]; - u8 indx = rtldm->swing_idx_ofdm[rfpath]; - rtl88e_set_iqk_matrix(hw, indx, rfpath, x, y); + rtl88e_set_iqk_matrix(hw, rtldm->swing_idx_ofdm[rfpath], + rfpath, rtlphy->iqk_matrix + [channel_mapped_index]. + value[0][0], + rtlphy->iqk_matrix + [channel_mapped_index]. + value[0][1]); } else if (rfpath == RF90_PATH_B) { - u8 indx = rtldm->swing_idx_ofdm[rfpath]; - long x = rtlphy->iqk_matrix[indx].value[0][4]; - long y = rtlphy->iqk_matrix[indx].value[0][5]; - rtl88e_set_iqk_matrix(hw, indx, rfpath, x, y); + rtl88e_set_iqk_matrix(hw, rtldm->swing_idx_ofdm[rfpath], + rfpath, rtlphy->iqk_matrix + [channel_mapped_index]. + value[0][4], + rtlphy->iqk_matrix + [channel_mapped_index]. + value[0][5]); } } else { return; @@ -317,7 +349,7 @@ static void rtl88e_dm_diginit(struct ieee80211_hw *hw) dm_dig->dig_enable_flag = true; dm_dig->cur_igvalue = rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f); dm_dig->pre_igvalue = 0; - dm_dig->cursta_cstate = DIG_STA_DISCONNECT; + dm_dig->cur_sta_cstate = DIG_STA_DISCONNECT; dm_dig->presta_cstate = DIG_STA_DISCONNECT; dm_dig->curmultista_cstate = DIG_MULTISTA_DISCONNECT; dm_dig->rssi_lowthresh = DM_DIG_THRESH_LOW; @@ -348,22 +380,23 @@ static u8 rtl88e_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw) long rssi_val_min = 0; if ((dm_dig->curmultista_cstate == DIG_MULTISTA_CONNECT) && - (dm_dig->cursta_cstate == DIG_STA_CONNECT)) { + (dm_dig->cur_sta_cstate == DIG_STA_CONNECT)) { if (rtlpriv->dm.entry_min_undec_sm_pwdb != 0) rssi_val_min = (rtlpriv->dm.entry_min_undec_sm_pwdb > - rtlpriv->dm.undec_sm_pwdb) ? + rtlpriv->dm.undec_sm_pwdb) ? rtlpriv->dm.undec_sm_pwdb : rtlpriv->dm.entry_min_undec_sm_pwdb; else rssi_val_min = rtlpriv->dm.undec_sm_pwdb; - } else if (dm_dig->cursta_cstate == DIG_STA_CONNECT || - dm_dig->cursta_cstate == DIG_STA_BEFORE_CONNECT) { + } else if (dm_dig->cur_sta_cstate == DIG_STA_CONNECT || + dm_dig->cur_sta_cstate == DIG_STA_BEFORE_CONNECT) { rssi_val_min = rtlpriv->dm.undec_sm_pwdb; } else if (dm_dig->curmultista_cstate == DIG_MULTISTA_CONNECT) { rssi_val_min = rtlpriv->dm.entry_min_undec_sm_pwdb; } + return (u8)rssi_val_min; } @@ -371,57 +404,58 @@ static void rtl88e_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw) { u32 ret_value; struct rtl_priv *rtlpriv = rtl_priv(hw); - struct false_alarm_statistics *alm_cnt = &(rtlpriv->falsealm_cnt); + struct false_alarm_statistics *falsealm_cnt = &rtlpriv->falsealm_cnt; rtl_set_bbreg(hw, ROFDM0_LSTF, BIT(31), 1); rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(31), 1); ret_value = rtl_get_bbreg(hw, ROFDM0_FRAMESYNC, MASKDWORD); - alm_cnt->cnt_fast_fsync_fail = (ret_value&0xffff); - alm_cnt->cnt_sb_search_fail = ((ret_value&0xffff0000)>>16); + falsealm_cnt->cnt_fast_fsync_fail = (ret_value&0xffff); + falsealm_cnt->cnt_sb_search_fail = ((ret_value&0xffff0000)>>16); ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, MASKDWORD); - alm_cnt->cnt_ofdm_cca = (ret_value&0xffff); - alm_cnt->cnt_parity_fail = ((ret_value & 0xffff0000) >> 16); + falsealm_cnt->cnt_ofdm_cca = (ret_value&0xffff); + falsealm_cnt->cnt_parity_fail = ((ret_value & 0xffff0000) >> 16); ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, MASKDWORD); - alm_cnt->cnt_rate_illegal = (ret_value & 0xffff); - alm_cnt->cnt_crc8_fail = ((ret_value & 0xffff0000) >> 16); + falsealm_cnt->cnt_rate_illegal = (ret_value & 0xffff); + falsealm_cnt->cnt_crc8_fail = ((ret_value & 0xffff0000) >> 16); ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD); - alm_cnt->cnt_mcs_fail = (ret_value & 0xffff); - alm_cnt->cnt_ofdm_fail = alm_cnt->cnt_parity_fail + - alm_cnt->cnt_rate_illegal + - alm_cnt->cnt_crc8_fail + - alm_cnt->cnt_mcs_fail + - alm_cnt->cnt_fast_fsync_fail + - alm_cnt->cnt_sb_search_fail; + falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff); + falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail + + falsealm_cnt->cnt_rate_illegal + + falsealm_cnt->cnt_crc8_fail + + falsealm_cnt->cnt_mcs_fail + + falsealm_cnt->cnt_fast_fsync_fail + + falsealm_cnt->cnt_sb_search_fail; ret_value = rtl_get_bbreg(hw, REG_SC_CNT, MASKDWORD); - alm_cnt->cnt_bw_lsc = (ret_value & 0xffff); - alm_cnt->cnt_bw_usc = ((ret_value & 0xffff0000) >> 16); + falsealm_cnt->cnt_bw_lsc = (ret_value & 0xffff); + falsealm_cnt->cnt_bw_usc = ((ret_value & 0xffff0000) >> 16); rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(12), 1); rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(14), 1); ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, MASKBYTE0); - alm_cnt->cnt_cck_fail = ret_value; + falsealm_cnt->cnt_cck_fail = ret_value; ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, MASKBYTE3); - alm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8; + falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8; ret_value = rtl_get_bbreg(hw, RCCK0_CCA_CNT, MASKDWORD); - alm_cnt->cnt_cck_cca = ((ret_value & 0xff) << 8) | - ((ret_value&0xFF00)>>8); - - alm_cnt->cnt_all = alm_cnt->cnt_fast_fsync_fail + - alm_cnt->cnt_sb_search_fail + - alm_cnt->cnt_parity_fail + - alm_cnt->cnt_rate_illegal + - alm_cnt->cnt_crc8_fail + - alm_cnt->cnt_mcs_fail + - alm_cnt->cnt_cck_fail; - alm_cnt->cnt_cca_all = alm_cnt->cnt_ofdm_cca + alm_cnt->cnt_cck_cca; + falsealm_cnt->cnt_cck_cca = ((ret_value & 0xff) << 8) | + ((ret_value&0xFF00)>>8); + + falsealm_cnt->cnt_all = (falsealm_cnt->cnt_fast_fsync_fail + + falsealm_cnt->cnt_sb_search_fail + + falsealm_cnt->cnt_parity_fail + + falsealm_cnt->cnt_rate_illegal + + falsealm_cnt->cnt_crc8_fail + + falsealm_cnt->cnt_mcs_fail + + falsealm_cnt->cnt_cck_fail); + falsealm_cnt->cnt_cca_all = falsealm_cnt->cnt_ofdm_cca + + falsealm_cnt->cnt_cck_cca; rtl_set_bbreg(hw, ROFDM0_TRSWISOLATION, BIT(31), 1); rtl_set_bbreg(hw, ROFDM0_TRSWISOLATION, BIT(31), 0); @@ -435,16 +469,15 @@ static void rtl88e_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw) rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(15)|BIT(14), 2); RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, - "cnt_parity_fail = %d, cnt_rate_illegal = %d, " - "cnt_crc8_fail = %d, cnt_mcs_fail = %d\n", - alm_cnt->cnt_parity_fail, - alm_cnt->cnt_rate_illegal, - alm_cnt->cnt_crc8_fail, alm_cnt->cnt_mcs_fail); + "cnt_parity_fail = %d, cnt_rate_illegal = %d, cnt_crc8_fail = %d, cnt_mcs_fail = %d\n", + falsealm_cnt->cnt_parity_fail, + falsealm_cnt->cnt_rate_illegal, + falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail); RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n", - alm_cnt->cnt_ofdm_fail, - alm_cnt->cnt_cck_fail, alm_cnt->cnt_all); + falsealm_cnt->cnt_ofdm_fail, + falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all); } static void rtl88e_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw) @@ -453,7 +486,7 @@ static void rtl88e_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw) struct dig_t *dm_dig = &rtlpriv->dm_digtable; u8 cur_cck_cca_thresh; - if (dm_dig->cursta_cstate == DIG_STA_CONNECT) { + if (dm_dig->cur_sta_cstate == DIG_STA_CONNECT) { dm_dig->rssi_val_min = rtl88e_dm_initial_gain_min_pwdb(hw); if (dm_dig->rssi_val_min > 25) { cur_cck_cca_thresh = 0xcd; @@ -486,10 +519,10 @@ static void rtl88e_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw) static void rtl88e_dm_dig(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct dig_t *dm_dig = &rtlpriv->dm_digtable; struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); - u8 dig_min, dig_maxofmin; + struct dig_t *dm_dig = &rtlpriv->dm_digtable; + u8 dig_dynamic_min, dig_maxofmin; bool bfirstconnect; u8 dm_dig_max, dm_dig_min; u8 current_igi = dm_dig->cur_igvalue; @@ -502,19 +535,19 @@ static void rtl88e_dm_dig(struct ieee80211_hw *hw) return; if (mac->link_state >= MAC80211_LINKED) - dm_dig->cursta_cstate = DIG_STA_CONNECT; + dm_dig->cur_sta_cstate = DIG_STA_CONNECT; else - dm_dig->cursta_cstate = DIG_STA_DISCONNECT; + dm_dig->cur_sta_cstate = DIG_STA_DISCONNECT; if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP || rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC) - dm_dig->cursta_cstate = DIG_STA_DISCONNECT; + dm_dig->cur_sta_cstate = DIG_STA_DISCONNECT; dm_dig_max = DM_DIG_MAX; dm_dig_min = DM_DIG_MIN; dig_maxofmin = DM_DIG_MAX_AP; - dig_min = dm_dig->dig_min_0; + dig_dynamic_min = dm_dig->dig_min_0; bfirstconnect = ((mac->link_state >= MAC80211_LINKED) ? true : false) && - (dm_dig->media_connect_0 == false); + !dm_dig->media_connect_0; dm_dig->rssi_val_min = rtl88e_dm_initial_gain_min_pwdb(hw); @@ -528,18 +561,18 @@ static void rtl88e_dm_dig(struct ieee80211_hw *hw) dm_dig->rx_gain_max = dm_dig->rssi_val_min + 20; if (rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) { - dig_min = dm_dig->antdiv_rssi_max; + dig_dynamic_min = dm_dig->antdiv_rssi_max; } else { if (dm_dig->rssi_val_min < dm_dig_min) - dig_min = dm_dig_min; + dig_dynamic_min = dm_dig_min; else if (dm_dig->rssi_val_min < dig_maxofmin) - dig_min = dig_maxofmin; + dig_dynamic_min = dig_maxofmin; else - dig_min = dm_dig->rssi_val_min; + dig_dynamic_min = dm_dig->rssi_val_min; } } else { dm_dig->rx_gain_max = dm_dig_max; - dig_min = dm_dig_min; + dig_dynamic_min = dm_dig_min; RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "no link\n"); } @@ -551,10 +584,13 @@ static void rtl88e_dm_dig(struct ieee80211_hw *hw) } if (dm_dig->large_fa_hit >= 3) { - if ((dm_dig->forbidden_igi + 1) > dm_dig->rx_gain_max) - dm_dig->rx_gain_min = dm_dig->rx_gain_max; + if ((dm_dig->forbidden_igi + 1) > + dm_dig->rx_gain_max) + dm_dig->rx_gain_min = + dm_dig->rx_gain_max; else - dm_dig->rx_gain_min = dm_dig->forbidden_igi + 1; + dm_dig->rx_gain_min = + dm_dig->forbidden_igi + 1; dm_dig->recover_cnt = 3600; } } else { @@ -562,13 +598,14 @@ static void rtl88e_dm_dig(struct ieee80211_hw *hw) dm_dig->recover_cnt--; } else { if (dm_dig->large_fa_hit == 0) { - if ((dm_dig->forbidden_igi - 1) < dig_min) { - dm_dig->forbidden_igi = dig_min; - dm_dig->rx_gain_min = dig_min; + if ((dm_dig->forbidden_igi - 1) < + dig_dynamic_min) { + dm_dig->forbidden_igi = dig_dynamic_min; + dm_dig->rx_gain_min = dig_dynamic_min; } else { dm_dig->forbidden_igi--; dm_dig->rx_gain_min = - dm_dig->forbidden_igi + 1; + dm_dig->forbidden_igi + 1; } } else if (dm_dig->large_fa_hit == 3) { dm_dig->large_fa_hit = 0; @@ -576,7 +613,7 @@ static void rtl88e_dm_dig(struct ieee80211_hw *hw) } } - if (dm_dig->cursta_cstate == DIG_STA_CONNECT) { + if (dm_dig->cur_sta_cstate == DIG_STA_CONNECT) { if (bfirstconnect) { current_igi = dm_dig->rssi_val_min; } else { @@ -606,9 +643,9 @@ static void rtl88e_dm_dig(struct ieee80211_hw *hw) dm_dig->cur_igvalue = current_igi; rtl88e_dm_write_dig(hw); - dm_dig->media_connect_0 = ((mac->link_state >= MAC80211_LINKED) ? - true : false); - dm_dig->dig_min_0 = dig_min; + dm_dig->media_connect_0 = + ((mac->link_state >= MAC80211_LINKED) ? true : false); + dm_dig->dig_min_0 = dig_dynamic_min; rtl88e_dm_cck_packet_detection_thresh(hw); } @@ -626,7 +663,7 @@ static void rtl88e_dm_init_dynamic_txpower(struct ieee80211_hw *hw) static void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); long undec_sm_pwdb; @@ -641,7 +678,7 @@ static void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw) if ((mac->link_state < MAC80211_LINKED) && (rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) { RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE, - "Not connected\n"); + "Not connected to any\n"); rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; @@ -664,10 +701,12 @@ static void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw) undec_sm_pwdb); } } else { - undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb; + undec_sm_pwdb = + rtlpriv->dm.entry_min_undec_sm_pwdb; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, - "AP Ext Port PWDB = 0x%lx\n", undec_sm_pwdb); + "AP Ext Port PWDB = 0x%lx\n", + undec_sm_pwdb); } if (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) { @@ -676,17 +715,20 @@ static void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw) "TXHIGHPWRLEVEL_LEVEL1 (TxPwr = 0x0)\n"); } else if ((undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) && - (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL1)) { + (undec_sm_pwdb >= + TX_POWER_NEAR_FIELD_THRESH_LVL1)) { rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "TXHIGHPWRLEVEL_LEVEL1 (TxPwr = 0x10)\n"); - } else if (undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) { + } else if (undec_sm_pwdb < + (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) { rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "TXHIGHPWRLEVEL_NORMAL\n"); } - if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) { + if ((rtlpriv->dm.dynamic_txhighpower_lvl != + rtlpriv->dm.last_dtp_lvl)) { RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "PHY_SetTxPowerLevel8192S() Channel = %d\n", rtlphy->current_channel); @@ -702,10 +744,9 @@ void rtl88e_dm_write_dig(struct ieee80211_hw *hw) struct dig_t *dm_dig = &rtlpriv->dm_digtable; RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, - "cur_igvalue = 0x%x, " - "pre_igvalue = 0x%x, back_val = %d\n", - dm_dig->cur_igvalue, dm_dig->pre_igvalue, - dm_dig->back_val); + "cur_igvalue = 0x%x, pre_igvalue = 0x%x, backoff_val = %d\n", + dm_dig->cur_igvalue, dm_dig->pre_igvalue, + dm_dig->back_val); if (dm_dig->cur_igvalue > 0x3f) dm_dig->cur_igvalue = 0x3f; @@ -722,17 +763,19 @@ static void rtl88e_dm_pwdb_monitor(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_sta_info *drv_priv; - static u64 last_txok; - static u64 last_rx; + static u64 last_record_txok_cnt; + static u64 last_record_rxok_cnt; long tmp_entry_max_pwdb = 0, tmp_entry_min_pwdb = 0xff; if (rtlhal->oem_id == RT_CID_819X_HP) { u64 cur_txok_cnt = 0; u64 cur_rxok_cnt = 0; - cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok; - cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rx; - last_txok = cur_txok_cnt; - last_rx = cur_rxok_cnt; + cur_txok_cnt = rtlpriv->stats.txbytesunicast - + last_record_txok_cnt; + cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - + last_record_rxok_cnt; + last_record_txok_cnt = cur_txok_cnt; + last_record_rxok_cnt = cur_rxok_cnt; if (cur_rxok_cnt > (cur_txok_cnt * 6)) rtl_write_dword(rtlpriv, REG_ARFR0, 0x8f015); @@ -743,9 +786,11 @@ static void rtl88e_dm_pwdb_monitor(struct ieee80211_hw *hw) /* AP & ADHOC & MESH */ spin_lock_bh(&rtlpriv->locks.entry_list_lock); list_for_each_entry(drv_priv, &rtlpriv->entry_list, list) { - if (drv_priv->rssi_stat.undec_sm_pwdb < tmp_entry_min_pwdb) + if (drv_priv->rssi_stat.undec_sm_pwdb < + tmp_entry_min_pwdb) tmp_entry_min_pwdb = drv_priv->rssi_stat.undec_sm_pwdb; - if (drv_priv->rssi_stat.undec_sm_pwdb > tmp_entry_max_pwdb) + if (drv_priv->rssi_stat.undec_sm_pwdb > + tmp_entry_max_pwdb) tmp_entry_max_pwdb = drv_priv->rssi_stat.undec_sm_pwdb; } spin_unlock_bh(&rtlpriv->locks.entry_list_lock); @@ -762,13 +807,19 @@ static void rtl88e_dm_pwdb_monitor(struct ieee80211_hw *hw) if (tmp_entry_min_pwdb != 0xff) { rtlpriv->dm.entry_min_undec_sm_pwdb = tmp_entry_min_pwdb; RTPRINT(rtlpriv, FDM, DM_PWDB, "EntryMinPWDB = 0x%lx(%ld)\n", - tmp_entry_min_pwdb, tmp_entry_min_pwdb); + tmp_entry_min_pwdb, tmp_entry_min_pwdb); } else { rtlpriv->dm.entry_min_undec_sm_pwdb = 0; } /* Indicate Rx signal strength to FW. */ - if (!rtlpriv->dm.useramask) + if (rtlpriv->dm.useramask) { + u8 h2c_parameter[3] = { 0 }; + + h2c_parameter[2] = (u8)(rtlpriv->dm.undec_sm_pwdb & 0xFF); + h2c_parameter[0] = 0x20; + } else { rtl_write_byte(rtlpriv, 0x4fe, rtlpriv->dm.undec_sm_pwdb); + } } void rtl88e_dm_init_edca_turbo(struct ieee80211_hw *hw) @@ -783,7 +834,6 @@ void rtl88e_dm_init_edca_turbo(struct ieee80211_hw *hw) static void rtl88e_dm_check_edca_turbo(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); static u64 last_txok_cnt; static u64 last_rxok_cnt; @@ -793,40 +843,33 @@ static void rtl88e_dm_check_edca_turbo(struct ieee80211_hw *hw) u64 cur_rxok_cnt = 0; u32 edca_be_ul = 0x5ea42b; u32 edca_be_dl = 0x5ea42b; - bool change_edca = false; + bool bt_change_edca = false; - if ((last_bt_edca_ul != rtlpcipriv->bt_coexist.bt_edca_ul) || - (last_bt_edca_dl != rtlpcipriv->bt_coexist.bt_edca_dl)) { + if ((last_bt_edca_ul != rtlpriv->btcoexist.bt_edca_ul) || + (last_bt_edca_dl != rtlpriv->btcoexist.bt_edca_dl)) { rtlpriv->dm.current_turbo_edca = false; - last_bt_edca_ul = rtlpcipriv->bt_coexist.bt_edca_ul; - last_bt_edca_dl = rtlpcipriv->bt_coexist.bt_edca_dl; + last_bt_edca_ul = rtlpriv->btcoexist.bt_edca_ul; + last_bt_edca_dl = rtlpriv->btcoexist.bt_edca_dl; } - if (rtlpcipriv->bt_coexist.bt_edca_ul != 0) { - edca_be_ul = rtlpcipriv->bt_coexist.bt_edca_ul; - change_edca = true; + if (rtlpriv->btcoexist.bt_edca_ul != 0) { + edca_be_ul = rtlpriv->btcoexist.bt_edca_ul; + bt_change_edca = true; } - if (rtlpcipriv->bt_coexist.bt_edca_dl != 0) { - edca_be_ul = rtlpcipriv->bt_coexist.bt_edca_dl; - change_edca = true; + if (rtlpriv->btcoexist.bt_edca_dl != 0) { + edca_be_ul = rtlpriv->btcoexist.bt_edca_dl; + bt_change_edca = true; } if (mac->link_state != MAC80211_LINKED) { rtlpriv->dm.current_turbo_edca = false; return; } + if ((bt_change_edca) || + ((!rtlpriv->dm.is_any_nonbepkts) && + (!rtlpriv->dm.disable_framebursting))) { - if ((!mac->ht_enable) && (!rtlpcipriv->bt_coexist.bt_coexistence)) { - if (!(edca_be_ul & 0xffff0000)) - edca_be_ul |= 0x005e0000; - - if (!(edca_be_dl & 0xffff0000)) - edca_be_dl |= 0x005e0000; - } - - if ((change_edca) || ((!rtlpriv->dm.is_any_nonbepkts) && - (!rtlpriv->dm.disable_framebursting))) { cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt; cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt; @@ -851,7 +894,9 @@ static void rtl88e_dm_check_edca_turbo(struct ieee80211_hw *hw) } else { if (rtlpriv->dm.current_turbo_edca) { u8 tmp = AC0_BE; - rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM, + + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_AC_PARAM, &tmp); rtlpriv->dm.current_turbo_edca = false; } @@ -862,29 +907,29 @@ static void rtl88e_dm_check_edca_turbo(struct ieee80211_hw *hw) last_rxok_cnt = rtlpriv->stats.rxbytesunicast; } -static void rtl88e_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw - *hw) +static void dm_txpower_track_cb_therm(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - u8 thermalvalue = 0, delta, delta_lck, delta_iqk, off; - u8 th_avg_cnt = 0; + u8 thermalvalue = 0, delta, delta_lck, delta_iqk, offset; + u8 thermalvalue_avg_count = 0; u32 thermalvalue_avg = 0; long ele_d, temp_cck; - char ofdm_index[2], cck_index = 0, ofdm_old[2] = {0, 0}, cck_old = 0; + char ofdm_index[2], cck_index = 0, + ofdm_index_old[2] = {0, 0}, cck_index_old = 0; int i = 0; - bool is2t = false; + /*bool is2t = false;*/ - u8 ofdm_min_index = 6, rf = (is2t) ? 2 : 1; - u8 index_for_channel; - enum _dec_inc {dec, power_inc}; + u8 ofdm_min_index = 6, rf = 1; + /*u8 index_for_channel;*/ + enum _power_dec_inc {power_dec, power_inc}; - /* 0.1 the following TWO tables decide the final index of - * OFDM/CCK swing table + /*0.1 the following TWO tables decide the + *final index of OFDM/CCK swing table */ - char del_tbl_idx[2][15] = { + char delta_swing_table_idx[2][15] = { {0, 0, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11}, {0, 0, -1, -2, -3, -4, -4, -4, -4, -5, -7, -8, -9, -9, -10} }; @@ -896,9 +941,10 @@ static void rtl88e_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw /*Initilization (7 steps in total) */ rtlpriv->dm.txpower_trackinginit = true; RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - "rtl88e_dm_txpower_tracking_callback_thermalmeter\n"); + "dm_txpower_track_cb_therm\n"); - thermalvalue = (u8) rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0xfc00); + thermalvalue = (u8)rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, + 0xfc00); if (!thermalvalue) return; RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, @@ -907,55 +953,44 @@ static void rtl88e_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw rtlefuse->eeprom_thermalmeter); /*1. Query OFDM Default Setting: Path A*/ - ele_d = rtl_get_bbreg(hw, ROFDM0_XATXIQIMBAL, MASKDWORD) & MASKOFDM_D; + ele_d = rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD) & + MASKOFDM_D; for (i = 0; i < OFDM_TABLE_LENGTH; i++) { if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) { - ofdm_old[0] = (u8) i; - rtldm->swing_idx_ofdm_base[0] = (u8)i; + ofdm_index_old[0] = (u8)i; + rtldm->swing_idx_ofdm_base[RF90_PATH_A] = (u8)i; RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Initial pathA ele_d reg0x%x = 0x%lx, ofdm_index = 0x%x\n", - ROFDM0_XATXIQIMBAL, - ele_d, ofdm_old[0]); + ROFDM0_XATXIQIMBALANCE, + ele_d, ofdm_index_old[0]); break; } } - if (is2t) { - ele_d = rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBAL, - MASKDWORD) & MASKOFDM_D; - for (i = 0; i < OFDM_TABLE_LENGTH; i++) { - if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) { - ofdm_old[1] = (u8)i; - - RT_TRACE(rtlpriv, COMP_POWER_TRACKING, - DBG_LOUD, - "Initial pathB ele_d reg0x%x = 0x%lx, ofdm_index = 0x%x\n", - ROFDM0_XBTXIQIMBAL, ele_d, - ofdm_old[1]); - break; - } - } - } /*2.Query CCK default setting From 0xa24*/ temp_cck = rtl_get_bbreg(hw, RCCK0_TXFILTER2, MASKDWORD) & MASKCCK; for (i = 0; i < CCK_TABLE_LENGTH; i++) { if (rtlpriv->dm.cck_inch14) { if (memcmp(&temp_cck, &cck_tbl_ch14[i][2], 4) == 0) { - cck_old = (u8)i; + cck_index_old = (u8)i; rtldm->swing_idx_cck_base = (u8)i; - RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, + DBG_LOUD, "Initial reg0x%x = 0x%lx, cck_index = 0x%x, ch 14 %d\n", - RCCK0_TXFILTER2, temp_cck, cck_old, + RCCK0_TXFILTER2, temp_cck, + cck_index_old, rtlpriv->dm.cck_inch14); break; } } else { if (memcmp(&temp_cck, &cck_tbl_ch1_13[i][2], 4) == 0) { - cck_old = (u8)i; + cck_index_old = (u8)i; rtldm->swing_idx_cck_base = (u8)i; - RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, + DBG_LOUD, "Initial reg0x%x = 0x%lx, cck_index = 0x%x, ch14 %d\n", - RCCK0_TXFILTER2, temp_cck, cck_old, + RCCK0_TXFILTER2, temp_cck, + cck_index_old, rtlpriv->dm.cck_inch14); break; } @@ -968,8 +1003,8 @@ static void rtl88e_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw rtlpriv->dm.thermalvalue_lck = thermalvalue; rtlpriv->dm.thermalvalue_iqk = thermalvalue; for (i = 0; i < rf; i++) - rtlpriv->dm.ofdm_index[i] = ofdm_old[i]; - rtlpriv->dm.cck_index = cck_old; + rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i]; + rtlpriv->dm.cck_index = cck_index_old; } /*4 Calculate average thermal meter*/ @@ -981,12 +1016,12 @@ static void rtl88e_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw for (i = 0; i < AVG_THERMAL_NUM_88E; i++) { if (rtldm->thermalvalue_avg[i]) { thermalvalue_avg += rtldm->thermalvalue_avg[i]; - th_avg_cnt++; + thermalvalue_avg_count++; } } - if (th_avg_cnt) - thermalvalue = (u8)(thermalvalue_avg / th_avg_cnt); + if (thermalvalue_avg_count) + thermalvalue = (u8)(thermalvalue_avg / thermalvalue_avg_count); /* 5 Calculate delta, delta_LCK, delta_IQK.*/ if (rtlhal->reloadtxpowerindex) { @@ -997,24 +1032,22 @@ static void rtl88e_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw rtlpriv->dm.done_txpower = false; } else if (rtlpriv->dm.done_txpower) { delta = (thermalvalue > rtlpriv->dm.thermalvalue) ? - (thermalvalue - rtlpriv->dm.thermalvalue) : - (rtlpriv->dm.thermalvalue - thermalvalue); + (thermalvalue - rtlpriv->dm.thermalvalue) : + (rtlpriv->dm.thermalvalue - thermalvalue); } else { delta = (thermalvalue > rtlefuse->eeprom_thermalmeter) ? - (thermalvalue - rtlefuse->eeprom_thermalmeter) : - (rtlefuse->eeprom_thermalmeter - thermalvalue); + (thermalvalue - rtlefuse->eeprom_thermalmeter) : + (rtlefuse->eeprom_thermalmeter - thermalvalue); } delta_lck = (thermalvalue > rtlpriv->dm.thermalvalue_lck) ? - (thermalvalue - rtlpriv->dm.thermalvalue_lck) : - (rtlpriv->dm.thermalvalue_lck - thermalvalue); + (thermalvalue - rtlpriv->dm.thermalvalue_lck) : + (rtlpriv->dm.thermalvalue_lck - thermalvalue); delta_iqk = (thermalvalue > rtlpriv->dm.thermalvalue_iqk) ? - (thermalvalue - rtlpriv->dm.thermalvalue_iqk) : - (rtlpriv->dm.thermalvalue_iqk - thermalvalue); + (thermalvalue - rtlpriv->dm.thermalvalue_iqk) : + (rtlpriv->dm.thermalvalue_iqk - thermalvalue); RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - "Readback Thermal Meter = 0x%x pre thermal meter 0x%x " - "eeprom_thermalmeter 0x%x delta 0x%x " - "delta_lck 0x%x delta_iqk 0x%x\n", + "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x delta 0x%x delta_lck 0x%x delta_iqk 0x%x\n", thermalvalue, rtlpriv->dm.thermalvalue, rtlefuse->eeprom_thermalmeter, delta, delta_lck, delta_iqk); @@ -1024,28 +1057,35 @@ static void rtl88e_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw rtl88e_phy_lc_calibrate(hw); } - /* 7 If necessary, move the index of swing table to adjust Tx power. */ + /* 7 If necessary, move the index of + * swing table to adjust Tx power. + */ if (delta > 0 && rtlpriv->dm.txpower_track_control) { delta = (thermalvalue > rtlefuse->eeprom_thermalmeter) ? - (thermalvalue - rtlefuse->eeprom_thermalmeter) : - (rtlefuse->eeprom_thermalmeter - thermalvalue); + (thermalvalue - rtlefuse->eeprom_thermalmeter) : + (rtlefuse->eeprom_thermalmeter - thermalvalue); /* 7.1 Get the final CCK_index and OFDM_index for each * swing table. */ if (thermalvalue > rtlefuse->eeprom_thermalmeter) { - CAL_SWING_OFF(off, power_inc, IDX_MAP, delta); + CAL_SWING_OFF(offset, power_inc, INDEX_MAPPING_NUM, + delta); for (i = 0; i < rf; i++) - ofdm_index[i] = rtldm->ofdm_index[i] + - del_tbl_idx[power_inc][off]; + ofdm_index[i] = + rtldm->ofdm_index[i] + + delta_swing_table_idx[power_inc][offset]; cck_index = rtldm->cck_index + - del_tbl_idx[power_inc][off]; + delta_swing_table_idx[power_inc][offset]; } else { - CAL_SWING_OFF(off, dec, IDX_MAP, delta); + CAL_SWING_OFF(offset, power_dec, INDEX_MAPPING_NUM, + delta); for (i = 0; i < rf; i++) - ofdm_index[i] = rtldm->ofdm_index[i] + - del_tbl_idx[dec][off]; - cck_index = rtldm->cck_index + del_tbl_idx[dec][off]; + ofdm_index[i] = + rtldm->ofdm_index[i] + + delta_swing_table_idx[power_dec][offset]; + cck_index = rtldm->cck_index + + delta_swing_table_idx[power_dec][offset]; } /* 7.2 Handle boundary conditions of index.*/ @@ -1056,8 +1096,8 @@ static void rtl88e_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw ofdm_index[i] = ofdm_min_index; } - if (cck_index > CCK_TABLE_SIZE - 1) - cck_index = CCK_TABLE_SIZE - 1; + if (cck_index > CCK_TABLE_SIZE-1) + cck_index = CCK_TABLE_SIZE-1; else if (cck_index < 0) cck_index = 0; @@ -1065,10 +1105,7 @@ static void rtl88e_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw if (rtlpriv->dm.txpower_track_control) { rtldm->done_txpower = true; rtldm->swing_idx_ofdm[RF90_PATH_A] = - (u8)ofdm_index[RF90_PATH_A]; - if (is2t) - rtldm->swing_idx_ofdm[RF90_PATH_B] = - (u8)ofdm_index[RF90_PATH_B]; + (u8)ofdm_index[RF90_PATH_A]; rtldm->swing_idx_cck = cck_index; if (rtldm->swing_idx_ofdm_cur != rtldm->swing_idx_ofdm[0]) { @@ -1082,12 +1119,7 @@ static void rtl88e_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw rtldm->swing_flag_cck = true; } - rtl88e_chk_tx_track(hw, TXAGC, 0, 0); - - if (is2t) - rtl88e_chk_tx_track(hw, BBSWING, - RF90_PATH_B, - index_for_channel); + dm_tx_pwr_track_set_pwr(hw, TXAGC, 0, 0); } } @@ -1115,7 +1147,7 @@ static void rtl88e_dm_init_txpower_tracking(struct ieee80211_hw *hw) rtlpriv->dm.swing_idx_ofdm_cur = 12; rtlpriv->dm.swing_flag_ofdm = false; RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - " rtlpriv->dm.txpower_tracking = %d\n", + "rtlpriv->dm.txpower_tracking = %d\n", rtlpriv->dm.txpower_tracking); } @@ -1137,7 +1169,7 @@ void rtl88e_dm_check_txpower_tracking(struct ieee80211_hw *hw) } else { RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Schedule TxPowerTracking !!\n"); - rtl88e_dm_txpower_tracking_callback_thermalmeter(hw); + dm_txpower_track_cb_therm(hw); tm_trigger = 0; } } @@ -1145,7 +1177,7 @@ void rtl88e_dm_check_txpower_tracking(struct ieee80211_hw *hw) void rtl88e_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rate_adaptive *p_ra = &(rtlpriv->ra); + struct rate_adaptive *p_ra = &rtlpriv->ra; p_ra->ratr_state = DM_RATR_STA_INIT; p_ra->pre_ratr_state = DM_RATR_STA_INIT; @@ -1161,9 +1193,9 @@ static void rtl88e_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - struct rate_adaptive *p_ra = &(rtlpriv->ra); + struct rate_adaptive *p_ra = &rtlpriv->ra; + u32 low_rssithresh_for_ra, high_rssithresh_for_ra; struct ieee80211_sta *sta = NULL; - u32 low_rssi, hi_rssi; if (is_hal_stop(rtlhal)) { RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, @@ -1181,26 +1213,28 @@ static void rtl88e_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw) mac->opmode == NL80211_IFTYPE_STATION) { switch (p_ra->pre_ratr_state) { case DM_RATR_STA_HIGH: - hi_rssi = 50; - low_rssi = 20; + high_rssithresh_for_ra = 50; + low_rssithresh_for_ra = 20; break; case DM_RATR_STA_MIDDLE: - hi_rssi = 55; - low_rssi = 20; + high_rssithresh_for_ra = 55; + low_rssithresh_for_ra = 20; break; case DM_RATR_STA_LOW: - hi_rssi = 50; - low_rssi = 25; + high_rssithresh_for_ra = 50; + low_rssithresh_for_ra = 25; break; default: - hi_rssi = 50; - low_rssi = 20; + high_rssithresh_for_ra = 50; + low_rssithresh_for_ra = 20; break; } - if (rtlpriv->dm.undec_sm_pwdb > (long)hi_rssi) + if (rtlpriv->dm.undec_sm_pwdb > + (long)high_rssithresh_for_ra) p_ra->ratr_state = DM_RATR_STA_HIGH; - else if (rtlpriv->dm.undec_sm_pwdb > (long)low_rssi) + else if (rtlpriv->dm.undec_sm_pwdb > + (long)low_rssithresh_for_ra) p_ra->ratr_state = DM_RATR_STA_MIDDLE; else p_ra->ratr_state = DM_RATR_STA_LOW; @@ -1208,7 +1242,7 @@ static void rtl88e_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw) if (p_ra->pre_ratr_state != p_ra->ratr_state) { RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, "RSSI = %ld\n", - rtlpriv->dm.undec_sm_pwdb); + rtlpriv->dm.undec_sm_pwdb); RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, "RSSI_LEVEL = %d\n", p_ra->ratr_state); RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, @@ -1219,7 +1253,7 @@ static void rtl88e_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw) sta = rtl_find_sta(hw, mac->bssid); if (sta) rtlpriv->cfg->ops->update_rate_tbl(hw, sta, - p_ra->ratr_state); + p_ra->ratr_state); rcu_read_unlock(); p_ra->pre_ratr_state = p_ra->ratr_state; @@ -1239,56 +1273,62 @@ static void rtl92c_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw) dm_pstable->rssi_val_min = 0; } -static void rtl88e_dm_update_rx_idle_ant(struct ieee80211_hw *hw, u8 ant) +static void rtl88e_dm_update_rx_idle_ant(struct ieee80211_hw *hw, + u8 ant) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); - struct fast_ant_training *fat_tbl = &(rtldm->fat_table); - u32 def_ant, opt_ant; + struct fast_ant_training *pfat_table = &rtldm->fat_table; + u32 default_ant, optional_ant; - if (fat_tbl->rx_idle_ant != ant) { + if (pfat_table->rx_idle_ant != ant) { RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "need to update rx idle ant\n"); if (ant == MAIN_ANT) { - def_ant = (fat_tbl->rx_idle_ant == CG_TRX_HW_ANTDIV) ? - MAIN_ANT_CG_TRX : MAIN_ANT_CGCS_RX; - opt_ant = (fat_tbl->rx_idle_ant == CG_TRX_HW_ANTDIV) ? - AUX_ANT_CG_TRX : AUX_ANT_CGCS_RX; + default_ant = + (pfat_table->rx_idle_ant == CG_TRX_HW_ANTDIV) ? + MAIN_ANT_CG_TRX : MAIN_ANT_CGCS_RX; + optional_ant = + (pfat_table->rx_idle_ant == CG_TRX_HW_ANTDIV) ? + AUX_ANT_CG_TRX : AUX_ANT_CGCS_RX; } else { - def_ant = (fat_tbl->rx_idle_ant == CG_TRX_HW_ANTDIV) ? - AUX_ANT_CG_TRX : AUX_ANT_CGCS_RX; - opt_ant = (fat_tbl->rx_idle_ant == CG_TRX_HW_ANTDIV) ? - MAIN_ANT_CG_TRX : MAIN_ANT_CGCS_RX; + default_ant = + (pfat_table->rx_idle_ant == CG_TRX_HW_ANTDIV) ? + AUX_ANT_CG_TRX : AUX_ANT_CGCS_RX; + optional_ant = + (pfat_table->rx_idle_ant == CG_TRX_HW_ANTDIV) ? + MAIN_ANT_CG_TRX : MAIN_ANT_CGCS_RX; } if (rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) { - rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, BIT(5) | - BIT(4) | BIT(3), def_ant); - rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, BIT(8) | - BIT(7) | BIT(6), opt_ant); - rtl_set_bbreg(hw, DM_REG_ANTSEL_CTRL_11N, BIT(14) | - BIT(13) | BIT(12), def_ant); - rtl_set_bbreg(hw, DM_REG_RESP_TX_11N, BIT(6) | BIT(7), - def_ant); + rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, + BIT(5) | BIT(4) | BIT(3), default_ant); + rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, + BIT(8) | BIT(7) | BIT(6), optional_ant); + rtl_set_bbreg(hw, DM_REG_ANTSEL_CTRL_11N, + BIT(14) | BIT(13) | BIT(12), + default_ant); + rtl_set_bbreg(hw, DM_REG_RESP_TX_11N, + BIT(6) | BIT(7), default_ant); } else if (rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV) { - rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, BIT(5) | - BIT(4) | BIT(3), def_ant); - rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, BIT(8) | - BIT(7) | BIT(6), opt_ant); + rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, + BIT(5) | BIT(4) | BIT(3), default_ant); + rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, + BIT(8) | BIT(7) | BIT(6), optional_ant); } } - fat_tbl->rx_idle_ant = ant; + pfat_table->rx_idle_ant = ant; RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "RxIdleAnt %s\n", - ((ant == MAIN_ANT) ? ("MAIN_ANT") : ("AUX_ANT"))); + (ant == MAIN_ANT) ? ("MAIN_ANT") : ("AUX_ANT")); } static void rtl88e_dm_update_tx_ant(struct ieee80211_hw *hw, - u8 ant, u32 mac_id) + u8 ant, u32 mac_id) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); - struct fast_ant_training *fat_tbl = &(rtldm->fat_table); + struct fast_ant_training *pfat_table = &rtldm->fat_table; u8 target_ant; if (ant == MAIN_ANT) @@ -1296,23 +1336,25 @@ static void rtl88e_dm_update_tx_ant(struct ieee80211_hw *hw, else target_ant = AUX_ANT_CG_TRX; - fat_tbl->antsel_a[mac_id] = target_ant & BIT(0); - fat_tbl->antsel_b[mac_id] = (target_ant & BIT(1)) >> 1; - fat_tbl->antsel_c[mac_id] = (target_ant & BIT(2)) >> 2; + pfat_table->antsel_a[mac_id] = target_ant & BIT(0); + pfat_table->antsel_b[mac_id] = (target_ant & BIT(1)) >> 1; + pfat_table->antsel_c[mac_id] = (target_ant & BIT(2)) >> 2; RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "txfrominfo target ant %s\n", - ((ant == MAIN_ANT) ? ("MAIN_ANT") : ("AUX_ANT"))); + (ant == MAIN_ANT) ? ("MAIN_ANT") : ("AUX_ANT")); RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "antsel_tr_mux = 3'b%d%d%d\n", - fat_tbl->antsel_c[mac_id], - fat_tbl->antsel_b[mac_id], fat_tbl->antsel_a[mac_id]); + pfat_table->antsel_c[mac_id], + pfat_table->antsel_b[mac_id], + pfat_table->antsel_a[mac_id]); } static void rtl88e_dm_rx_hw_antena_div_init(struct ieee80211_hw *hw) { u32 value32; + /*MAC Setting*/ value32 = rtl_get_bbreg(hw, DM_REG_ANTSEL_PIN_11N, MASKDWORD); - rtl_set_bbreg(hw, DM_REG_ANTSEL_PIN_11N, MASKDWORD, value32 | - (BIT(23) | BIT(25))); + rtl_set_bbreg(hw, DM_REG_ANTSEL_PIN_11N, + MASKDWORD, value32 | (BIT(23) | BIT(25))); /*Pin Setting*/ rtl_set_bbreg(hw, DM_REG_PIN_CTRL_11N, BIT(9) | BIT(8), 0); rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, BIT(10), 0); @@ -1333,8 +1375,8 @@ static void rtl88e_dm_trx_hw_antenna_div_init(struct ieee80211_hw *hw) /*MAC Setting*/ value32 = rtl_get_bbreg(hw, DM_REG_ANTSEL_PIN_11N, MASKDWORD); - rtl_set_bbreg(hw, DM_REG_ANTSEL_PIN_11N, MASKDWORD, value32 | - (BIT(23) | BIT(25))); + rtl_set_bbreg(hw, DM_REG_ANTSEL_PIN_11N, MASKDWORD, + value32 | (BIT(23) | BIT(25))); /*Pin Setting*/ rtl_set_bbreg(hw, DM_REG_PIN_CTRL_11N, BIT(9) | BIT(8), 0); rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, BIT(10), 0); @@ -1354,28 +1396,30 @@ static void rtl88e_dm_trx_hw_antenna_div_init(struct ieee80211_hw *hw) static void rtl88e_dm_fast_training_init(struct ieee80211_hw *hw) { struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); - struct fast_ant_training *fat_tbl = &(rtldm->fat_table); - u32 ant_combo = 2; + struct fast_ant_training *pfat_table = &rtldm->fat_table; + u32 ant_combination = 2; u32 value32, i; for (i = 0; i < 6; i++) { - fat_tbl->bssid[i] = 0; - fat_tbl->ant_sum[i] = 0; - fat_tbl->ant_cnt[i] = 0; - fat_tbl->ant_ave[i] = 0; + pfat_table->bssid[i] = 0; + pfat_table->ant_sum[i] = 0; + pfat_table->ant_cnt[i] = 0; + pfat_table->ant_ave[i] = 0; } - fat_tbl->train_idx = 0; - fat_tbl->fat_state = FAT_NORMAL_STATE; + pfat_table->train_idx = 0; + pfat_table->fat_state = FAT_NORMAL_STATE; /*MAC Setting*/ value32 = rtl_get_bbreg(hw, DM_REG_ANTSEL_PIN_11N, MASKDWORD); - rtl_set_bbreg(hw, DM_REG_ANTSEL_PIN_11N, MASKDWORD, value32 | (BIT(23) | - BIT(25))); - value32 = rtl_get_bbreg(hw, DM_REG_ANT_TRAIN_2, MASKDWORD); - rtl_set_bbreg(hw, DM_REG_ANT_TRAIN_2, MASKDWORD, value32 | (BIT(16) | - BIT(17))); - rtl_set_bbreg(hw, DM_REG_ANT_TRAIN_2, MASKLWORD, 0); - rtl_set_bbreg(hw, DM_REG_ANT_TRAIN_1, MASKDWORD, 0); + rtl_set_bbreg(hw, DM_REG_ANTSEL_PIN_11N, + MASKDWORD, value32 | (BIT(23) | BIT(25))); + value32 = rtl_get_bbreg(hw, DM_REG_ANT_TRAIN_PARA2_11N, MASKDWORD); + rtl_set_bbreg(hw, DM_REG_ANT_TRAIN_PARA2_11N, + MASKDWORD, value32 | (BIT(16) | BIT(17))); + rtl_set_bbreg(hw, DM_REG_ANT_TRAIN_PARA2_11N, + MASKLWORD, 0); + rtl_set_bbreg(hw, DM_REG_ANT_TRAIN_PARA1_11N, + MASKDWORD, 0); /*Pin Setting*/ rtl_set_bbreg(hw, DM_REG_PIN_CTRL_11N, BIT(9) | BIT(8), 0); @@ -1386,26 +1430,17 @@ static void rtl88e_dm_fast_training_init(struct ieee80211_hw *hw) /*OFDM Setting*/ rtl_set_bbreg(hw, DM_REG_ANTDIV_PARA1_11N, MASKDWORD, 0x000000a0); /*antenna mapping table*/ - if (ant_combo == 2) { - rtl_set_bbreg(hw, DM_REG_ANT_MAPPING1_11N, MASKBYTE0, 1); - rtl_set_bbreg(hw, DM_REG_ANT_MAPPING1_11N, MASKBYTE1, 2); - } else if (ant_combo == 7) { - rtl_set_bbreg(hw, DM_REG_ANT_MAPPING1_11N, MASKBYTE0, 1); - rtl_set_bbreg(hw, DM_REG_ANT_MAPPING1_11N, MASKBYTE1, 2); - rtl_set_bbreg(hw, DM_REG_ANT_MAPPING1_11N, MASKBYTE2, 2); - rtl_set_bbreg(hw, DM_REG_ANT_MAPPING1_11N, MASKBYTE3, 3); - rtl_set_bbreg(hw, DM_REG_ANT_MAPPING2_11N, MASKBYTE0, 4); - rtl_set_bbreg(hw, DM_REG_ANT_MAPPING2_11N, MASKBYTE1, 5); - rtl_set_bbreg(hw, DM_REG_ANT_MAPPING2_11N, MASKBYTE2, 6); - rtl_set_bbreg(hw, DM_REG_ANT_MAPPING2_11N, MASKBYTE3, 7); - } + rtl_set_bbreg(hw, DM_REG_ANT_MAPPING1_11N, MASKBYTE0, 1); + rtl_set_bbreg(hw, DM_REG_ANT_MAPPING1_11N, MASKBYTE1, 2); /*TX Setting*/ rtl_set_bbreg(hw, DM_REG_TX_ANT_CTRL_11N, BIT(21), 1); - rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, BIT(5) | BIT(4) | BIT(3), 0); - rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, BIT(8) | BIT(7) | BIT(6), 1); - rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, BIT(2) | BIT(1) | BIT(0), - (ant_combo - 1)); + rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, + BIT(5) | BIT(4) | BIT(3), 0); + rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, + BIT(8) | BIT(7) | BIT(6), 1); + rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, + BIT(2) | BIT(1) | BIT(0), (ant_combination - 1)); rtl_set_bbreg(hw, DM_REG_IGI_A_11N, BIT(7), 1); } @@ -1420,6 +1455,7 @@ static void rtl88e_dm_antenna_div_init(struct ieee80211_hw *hw) rtl88e_dm_trx_hw_antenna_div_init(hw); else if (rtlefuse->antenna_div_type == CG_TRX_SMART_ANTDIV) rtl88e_dm_fast_training_init(hw); + } void rtl88e_dm_set_tx_ant_by_tx_info(struct ieee80211_hw *hw, @@ -1427,38 +1463,39 @@ void rtl88e_dm_set_tx_ant_by_tx_info(struct ieee80211_hw *hw, { struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); - struct fast_ant_training *fat_tbl = &(rtldm->fat_table); + struct fast_ant_training *pfat_table = &rtldm->fat_table; if ((rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) || - (rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV)) { - SET_TX_DESC_ANTSEL_A(pdesc, fat_tbl->antsel_a[mac_id]); - SET_TX_DESC_ANTSEL_B(pdesc, fat_tbl->antsel_b[mac_id]); - SET_TX_DESC_ANTSEL_C(pdesc, fat_tbl->antsel_c[mac_id]); + (rtlefuse->antenna_div_type == CG_TRX_SMART_ANTDIV)) { + SET_TX_DESC_ANTSEL_A(pdesc, pfat_table->antsel_a[mac_id]); + SET_TX_DESC_ANTSEL_B(pdesc, pfat_table->antsel_b[mac_id]); + SET_TX_DESC_ANTSEL_C(pdesc, pfat_table->antsel_c[mac_id]); } } void rtl88e_dm_ant_sel_statistics(struct ieee80211_hw *hw, - u8 antsel_tr_mux, u32 mac_id, u32 rx_pwdb_all) + u8 antsel_tr_mux, u32 mac_id, + u32 rx_pwdb_all) { struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); - struct fast_ant_training *fat_tbl = &(rtldm->fat_table); + struct fast_ant_training *pfat_table = &rtldm->fat_table; if (rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) { if (antsel_tr_mux == MAIN_ANT_CG_TRX) { - fat_tbl->main_ant_sum[mac_id] += rx_pwdb_all; - fat_tbl->main_ant_cnt[mac_id]++; + pfat_table->main_ant_sum[mac_id] += rx_pwdb_all; + pfat_table->main_ant_cnt[mac_id]++; } else { - fat_tbl->aux_ant_sum[mac_id] += rx_pwdb_all; - fat_tbl->aux_ant_cnt[mac_id]++; + pfat_table->aux_ant_sum[mac_id] += rx_pwdb_all; + pfat_table->aux_ant_cnt[mac_id]++; } } else if (rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV) { if (antsel_tr_mux == MAIN_ANT_CGCS_RX) { - fat_tbl->main_ant_sum[mac_id] += rx_pwdb_all; - fat_tbl->main_ant_cnt[mac_id]++; + pfat_table->main_ant_sum[mac_id] += rx_pwdb_all; + pfat_table->main_ant_cnt[mac_id]++; } else { - fat_tbl->aux_ant_sum[mac_id] += rx_pwdb_all; - fat_tbl->aux_ant_cnt[mac_id]++; + pfat_table->aux_ant_sum[mac_id] += rx_pwdb_all; + pfat_table->aux_ant_cnt[mac_id]++; } } } @@ -1466,43 +1503,43 @@ void rtl88e_dm_ant_sel_statistics(struct ieee80211_hw *hw, static void rtl88e_dm_hw_ant_div(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct dig_t *dm_dig = &rtlpriv->dm_digtable; struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); struct rtl_sta_info *drv_priv; - struct fast_ant_training *fat_tbl = &(rtldm->fat_table); - u32 i, min_rssi = 0xff, ant_div_max_rssi = 0, max_rssi = 0; - u32 local_min_rssi, local_max_rssi; + struct fast_ant_training *pfat_table = &rtldm->fat_table; + struct dig_t *dm_dig = &rtlpriv->dm_digtable; + u32 i, min_rssi = 0xff, ant_div_max_rssi = 0; + u32 max_rssi = 0, local_min_rssi, local_max_rssi; u32 main_rssi, aux_rssi; u8 rx_idle_ant = 0, target_ant = 7; + /*for sta its self*/ i = 0; - main_rssi = (fat_tbl->main_ant_cnt[i] != 0) ? - (fat_tbl->main_ant_sum[i] / - fat_tbl->main_ant_cnt[i]) : 0; - aux_rssi = (fat_tbl->aux_ant_cnt[i] != 0) ? - (fat_tbl->aux_ant_sum[i] / fat_tbl->aux_ant_cnt[i]) : 0; + main_rssi = (pfat_table->main_ant_cnt[i] != 0) ? + (pfat_table->main_ant_sum[i] / pfat_table->main_ant_cnt[i]) : 0; + aux_rssi = (pfat_table->aux_ant_cnt[i] != 0) ? + (pfat_table->aux_ant_sum[i] / pfat_table->aux_ant_cnt[i]) : 0; target_ant = (main_rssi == aux_rssi) ? - fat_tbl->rx_idle_ant : ((main_rssi >= aux_rssi) ? - MAIN_ANT : AUX_ANT); + pfat_table->rx_idle_ant : ((main_rssi >= aux_rssi) ? + MAIN_ANT : AUX_ANT); RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "main_ant_sum %d main_ant_cnt %d\n", - fat_tbl->main_ant_sum[i], fat_tbl->main_ant_cnt[i]); + "main_ant_sum %d main_ant_cnt %d\n", + pfat_table->main_ant_sum[i], + pfat_table->main_ant_cnt[i]); RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "aux_ant_sum %d aux_ant_cnt %d\n", - fat_tbl->aux_ant_sum[i], - fat_tbl->aux_ant_cnt[i]); - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "main_rssi %d aux_rssi%d\n", main_rssi, aux_rssi); + pfat_table->aux_ant_sum[i], pfat_table->aux_ant_cnt[i]); + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "main_rssi %d aux_rssi%d\n", + main_rssi, aux_rssi); local_max_rssi = (main_rssi > aux_rssi) ? main_rssi : aux_rssi; if ((local_max_rssi > ant_div_max_rssi) && (local_max_rssi < 40)) ant_div_max_rssi = local_max_rssi; if (local_max_rssi > max_rssi) max_rssi = local_max_rssi; - if ((fat_tbl->rx_idle_ant == MAIN_ANT) && (main_rssi == 0)) + if ((pfat_table->rx_idle_ant == MAIN_ANT) && (main_rssi == 0)) main_rssi = aux_rssi; - else if ((fat_tbl->rx_idle_ant == AUX_ANT) && (aux_rssi == 0)) + else if ((pfat_table->rx_idle_ant == AUX_ANT) && (aux_rssi == 0)) aux_rssi = main_rssi; local_min_rssi = (main_rssi > aux_rssi) ? aux_rssi : main_rssi; @@ -1518,32 +1555,33 @@ static void rtl88e_dm_hw_ant_div(struct ieee80211_hw *hw) spin_lock_bh(&rtlpriv->locks.entry_list_lock); list_for_each_entry(drv_priv, &rtlpriv->entry_list, list) { i++; - main_rssi = (fat_tbl->main_ant_cnt[i] != 0) ? - (fat_tbl->main_ant_sum[i] / - fat_tbl->main_ant_cnt[i]) : 0; - aux_rssi = (fat_tbl->aux_ant_cnt[i] != 0) ? - (fat_tbl->aux_ant_sum[i] / - fat_tbl->aux_ant_cnt[i]) : 0; + main_rssi = (pfat_table->main_ant_cnt[i] != 0) ? + (pfat_table->main_ant_sum[i] / + pfat_table->main_ant_cnt[i]) : 0; + aux_rssi = (pfat_table->aux_ant_cnt[i] != 0) ? + (pfat_table->aux_ant_sum[i] / + pfat_table->aux_ant_cnt[i]) : 0; target_ant = (main_rssi == aux_rssi) ? - fat_tbl->rx_idle_ant : ((main_rssi >= - aux_rssi) ? MAIN_ANT : AUX_ANT); - + pfat_table->rx_idle_ant : ((main_rssi >= + aux_rssi) ? MAIN_ANT : AUX_ANT); - local_max_rssi = max_t(u32, main_rssi, aux_rssi); + local_max_rssi = (main_rssi > aux_rssi) ? + main_rssi : aux_rssi; if ((local_max_rssi > ant_div_max_rssi) && (local_max_rssi < 40)) ant_div_max_rssi = local_max_rssi; if (local_max_rssi > max_rssi) max_rssi = local_max_rssi; - if ((fat_tbl->rx_idle_ant == MAIN_ANT) && !main_rssi) + if ((pfat_table->rx_idle_ant == MAIN_ANT) && + (main_rssi == 0)) main_rssi = aux_rssi; - else if ((fat_tbl->rx_idle_ant == AUX_ANT) && + else if ((pfat_table->rx_idle_ant == AUX_ANT) && (aux_rssi == 0)) aux_rssi = main_rssi; local_min_rssi = (main_rssi > aux_rssi) ? - aux_rssi : main_rssi; + aux_rssi : main_rssi; if (local_min_rssi < min_rssi) { min_rssi = local_min_rssi; rx_idle_ant = target_ant; @@ -1555,10 +1593,10 @@ static void rtl88e_dm_hw_ant_div(struct ieee80211_hw *hw) } for (i = 0; i < ASSOCIATE_ENTRY_NUM; i++) { - fat_tbl->main_ant_sum[i] = 0; - fat_tbl->aux_ant_sum[i] = 0; - fat_tbl->main_ant_cnt[i] = 0; - fat_tbl->aux_ant_cnt[i] = 0; + pfat_table->main_ant_sum[i] = 0; + pfat_table->aux_ant_sum[i] = 0; + pfat_table->main_ant_cnt[i] = 0; + pfat_table->aux_ant_cnt[i] = 0; } rtl88e_dm_update_rx_idle_ant(hw, rx_idle_ant); @@ -1573,27 +1611,27 @@ static void rtl88e_set_next_mac_address_target(struct ieee80211_hw *hw) struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); struct rtl_sta_info *drv_priv; - struct fast_ant_training *fat_tbl = &(rtldm->fat_table); + struct fast_ant_training *pfat_table = &rtldm->fat_table; u32 value32, i, j = 0; if (mac->link_state >= MAC80211_LINKED) { for (i = 0; i < ASSOCIATE_ENTRY_NUM; i++) { - if ((fat_tbl->train_idx + 1) == ASSOCIATE_ENTRY_NUM) - fat_tbl->train_idx = 0; + if ((pfat_table->train_idx + 1) == ASSOCIATE_ENTRY_NUM) + pfat_table->train_idx = 0; else - fat_tbl->train_idx++; + pfat_table->train_idx++; - if (fat_tbl->train_idx == 0) { + if (pfat_table->train_idx == 0) { value32 = (mac->mac_addr[5] << 8) | - mac->mac_addr[4]; - rtl_set_bbreg(hw, DM_REG_ANT_TRAIN_2, + mac->mac_addr[4]; + rtl_set_bbreg(hw, DM_REG_ANT_TRAIN_PARA2_11N, MASKLWORD, value32); value32 = (mac->mac_addr[3] << 24) | (mac->mac_addr[2] << 16) | (mac->mac_addr[1] << 8) | - mac->mac_addr[0]; - rtl_set_bbreg(hw, DM_REG_ANT_TRAIN_1, + mac->mac_addr[0]; + rtl_set_bbreg(hw, DM_REG_ANT_TRAIN_PARA1_11N, MASKDWORD, value32); break; } @@ -1602,28 +1640,29 @@ static void rtl88e_set_next_mac_address_target(struct ieee80211_hw *hw) NL80211_IFTYPE_STATION) { spin_lock_bh(&rtlpriv->locks.entry_list_lock); list_for_each_entry(drv_priv, - &rtlpriv->entry_list, - list) { + &rtlpriv->entry_list, list) { j++; - if (j != fat_tbl->train_idx) + if (j != pfat_table->train_idx) continue; value32 = (drv_priv->mac_addr[5] << 8) | - drv_priv->mac_addr[4]; - rtl_set_bbreg(hw, DM_REG_ANT_TRAIN_2, + drv_priv->mac_addr[4]; + rtl_set_bbreg(hw, + DM_REG_ANT_TRAIN_PARA2_11N, MASKLWORD, value32); - value32 = (drv_priv->mac_addr[3]<<24) | - (drv_priv->mac_addr[2]<<16) | - (drv_priv->mac_addr[1]<<8) | - drv_priv->mac_addr[0]; - rtl_set_bbreg(hw, DM_REG_ANT_TRAIN_1, + value32 = (drv_priv->mac_addr[3] << 24) | + (drv_priv->mac_addr[2] << 16) | + (drv_priv->mac_addr[1] << 8) | + drv_priv->mac_addr[0]; + rtl_set_bbreg(hw, + DM_REG_ANT_TRAIN_PARA1_11N, MASKDWORD, value32); break; } spin_unlock_bh(&rtlpriv->locks.entry_list_lock); /*find entry, break*/ - if (j == fat_tbl->train_idx) + if (j == pfat_table->train_idx) break; } } @@ -1634,23 +1673,24 @@ static void rtl88e_dm_fast_ant_training(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); - struct fast_ant_training *fat_tbl = &(rtldm->fat_table); + struct fast_ant_training *pfat_table = &rtldm->fat_table; u32 i, max_rssi = 0; u8 target_ant = 2; bool bpkt_filter_match = false; - if (fat_tbl->fat_state == FAT_TRAINING_STATE) { + if (pfat_table->fat_state == FAT_TRAINING_STATE) { for (i = 0; i < 7; i++) { - if (fat_tbl->ant_cnt[i] == 0) { - fat_tbl->ant_ave[i] = 0; + if (pfat_table->ant_cnt[i] == 0) { + pfat_table->ant_ave[i] = 0; } else { - fat_tbl->ant_ave[i] = fat_tbl->ant_sum[i] / - fat_tbl->ant_cnt[i]; + pfat_table->ant_ave[i] = + pfat_table->ant_sum[i] / + pfat_table->ant_cnt[i]; bpkt_filter_match = true; } - if (fat_tbl->ant_ave[i] > max_rssi) { - max_rssi = fat_tbl->ant_ave[i]; + if (pfat_table->ant_ave[i] > max_rssi) { + max_rssi = pfat_table->ant_ave[i]; target_ant = (u8) i; } } @@ -1664,32 +1704,33 @@ static void rtl88e_dm_fast_ant_training(struct ieee80211_hw *hw) BIT(16), 0); rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, BIT(8) | BIT(7) | BIT(6), target_ant); - rtl_set_bbreg(hw, DM_REG_TX_ANT_CTRL_11N, BIT(21), 1); + rtl_set_bbreg(hw, DM_REG_TX_ANT_CTRL_11N, + BIT(21), 1); - fat_tbl->antsel_a[fat_tbl->train_idx] = - target_ant & BIT(0); - fat_tbl->antsel_b[fat_tbl->train_idx] = - (target_ant & BIT(1)) >> 1; - fat_tbl->antsel_c[fat_tbl->train_idx] = - (target_ant & BIT(2)) >> 2; + pfat_table->antsel_a[pfat_table->train_idx] = + target_ant & BIT(0); + pfat_table->antsel_b[pfat_table->train_idx] = + (target_ant & BIT(1)) >> 1; + pfat_table->antsel_c[pfat_table->train_idx] = + (target_ant & BIT(2)) >> 2; if (target_ant == 0) rtl_set_bbreg(hw, DM_REG_IGI_A_11N, BIT(7), 0); } for (i = 0; i < 7; i++) { - fat_tbl->ant_sum[i] = 0; - fat_tbl->ant_cnt[i] = 0; + pfat_table->ant_sum[i] = 0; + pfat_table->ant_cnt[i] = 0; } - fat_tbl->fat_state = FAT_NORMAL_STATE; + pfat_table->fat_state = FAT_NORMAL_STATE; return; } - if (fat_tbl->fat_state == FAT_NORMAL_STATE) { + if (pfat_table->fat_state == FAT_NORMAL_STATE) { rtl88e_set_next_mac_address_target(hw); - fat_tbl->fat_state = FAT_TRAINING_STATE; + pfat_table->fat_state = FAT_TRAINING_STATE; rtl_set_bbreg(hw, DM_REG_TXAGC_A_1_MCS32_11N, BIT(16), 1); rtl_set_bbreg(hw, DM_REG_IGI_A_11N, BIT(7), 1); @@ -1711,11 +1752,11 @@ static void rtl88e_dm_antenna_diversity(struct ieee80211_hw *hw) struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); - struct fast_ant_training *fat_tbl = &(rtldm->fat_table); + struct fast_ant_training *pfat_table = &rtldm->fat_table; if (mac->link_state < MAC80211_LINKED) { RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "No Link\n"); - if (fat_tbl->becomelinked == true) { + if (pfat_table->becomelinked) { RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "need to turn off HW AntDiv\n"); rtl_set_bbreg(hw, DM_REG_IGI_A_11N, BIT(7), 0); @@ -1724,12 +1765,13 @@ static void rtl88e_dm_antenna_diversity(struct ieee80211_hw *hw) if (rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) rtl_set_bbreg(hw, DM_REG_TX_ANT_CTRL_11N, BIT(21), 0); - fat_tbl->becomelinked = - (mac->link_state == MAC80211_LINKED) ? true : false; + pfat_table->becomelinked = + (mac->link_state == MAC80211_LINKED) ? + true : false; } return; } else { - if (fat_tbl->becomelinked == false) { + if (!pfat_table->becomelinked) { RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "Need to turn on HW AntDiv\n"); rtl_set_bbreg(hw, DM_REG_IGI_A_11N, BIT(7), 1); @@ -1738,8 +1780,9 @@ static void rtl88e_dm_antenna_diversity(struct ieee80211_hw *hw) if (rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) rtl_set_bbreg(hw, DM_REG_TX_ANT_CTRL_11N, BIT(21), 1); - fat_tbl->becomelinked = - (mac->link_state >= MAC80211_LINKED) ? true : false; + pfat_table->becomelinked = + (mac->link_state >= MAC80211_LINKED) ? + true : false; } } diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/dm.h b/drivers/net/wireless/rtlwifi/rtl8188ee/dm.h index 0e07f72ea158419db6851676de04451e0aa0b23a..64f1f3ea98071b1fe2892c0c28ae86fd3eb904e8 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/dm.h +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/dm.h @@ -156,7 +156,6 @@ #define DM_REG_SLEEP_11N 0xEE0 #define DM_REG_PMPD_ANAEN_11N 0xEEC - /*MAC REG LIST*/ #define DM_REG_BB_RST_11N 0x02 #define DM_REG_ANTSEL_PIN_11N 0x4C @@ -168,8 +167,9 @@ #define DM_REG_EDCA_BK_11N 0x50C #define DM_REG_TXPAUSE_11N 0x522 #define DM_REG_RESP_TX_11N 0x6D8 -#define DM_REG_ANT_TRAIN_1 0x7b0 -#define DM_REG_ANT_TRAIN_2 0x7b4 +#define DM_REG_ANT_TRAIN_PARA1_11N 0x7b0 +#define DM_REG_ANT_TRAIN_PARA2_11N 0x7b4 + /*DIG Related*/ #define DM_BIT_IGI_11N 0x0000007F @@ -208,7 +208,7 @@ #define DM_DIG_BACKOFF_MIN -4 #define DM_DIG_BACKOFF_DEFAULT 10 -#define RXPATHSELECTION_SS_TH_LOW 30 +#define RXPATHSELECTION_SS_TH_W 30 #define RXPATHSELECTION_DIFF_TH 18 #define DM_RATR_STA_INIT 0 @@ -232,20 +232,22 @@ #define TX_POWER_NEAR_FIELD_THRESH_LVL2 74 #define TX_POWER_NEAR_FIELD_THRESH_LVL1 67 -#define TXPWRTRACK_MAX_IDX 6 +#define TXPWRTRACK_MAX_IDX 6 struct swat_t { u8 failure_cnt; u8 try_flag; u8 stop_trying; + long pre_rssi; long trying_threshold; u8 cur_antenna; u8 pre_antenna; + }; enum FAT_STATE { - FAT_NORMAL_STATE = 0, + FAT_NORMAL_STATE = 0, FAT_TRAINING_STATE = 1, }; @@ -310,8 +312,9 @@ enum pwr_track_control_method { void rtl88e_dm_set_tx_ant_by_tx_info(struct ieee80211_hw *hw, u8 *pdesc, u32 mac_id); -void rtl88e_dm_ant_sel_statistics(struct ieee80211_hw *hw, u8 antsel_tr_mux, - u32 mac_id, u32 rx_pwdb_all); +void rtl88e_dm_ant_sel_statistics(struct ieee80211_hw *hw, + u8 antsel_tr_mux, u32 mac_id, + u32 rx_pwdb_all); void rtl88e_dm_fast_antenna_training_callback(unsigned long data); void rtl88e_dm_init(struct ieee80211_hw *hw); void rtl88e_dm_watchdog(struct ieee80211_hw *hw); @@ -320,7 +323,5 @@ void rtl88e_dm_init_edca_turbo(struct ieee80211_hw *hw); void rtl88e_dm_check_txpower_tracking(struct ieee80211_hw *hw); void rtl88e_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw); void rtl88e_dm_txpower_track_adjust(struct ieee80211_hw *hw, - u8 type, u8 *pdirection, - u32 *poutwrite_val); - + u8 type, u8 *pdirection, u32 *poutwrite_val); #endif diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c index 4f9376ad473966c9b19c986080e73e047a40adde..c8058aa73ecfc1aa8f282cc80aa27b789f657906 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -30,12 +26,11 @@ #include "../wifi.h" #include "../pci.h" #include "../base.h" +#include "../core.h" #include "reg.h" #include "def.h" #include "fw.h" -#include - static void _rtl88e_enable_fw_download(struct ieee80211_hw *hw, bool enable) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -62,26 +57,26 @@ static void _rtl88e_fw_block_write(struct ieee80211_hw *hw, const u8 *buffer, u32 size) { struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 blk_sz = sizeof(u32); - u8 *buf_ptr = (u8 *)buffer; + u32 blocksize = sizeof(u32); + u8 *bufferptr = (u8 *)buffer; u32 *pu4BytePtr = (u32 *)buffer; - u32 i, offset, blk_cnt, remain; + u32 i, offset, blockcount, remainsize; - blk_cnt = size / blk_sz; - remain = size % blk_sz; + blockcount = size / blocksize; + remainsize = size % blocksize; - for (i = 0; i < blk_cnt; i++) { - offset = i * blk_sz; + for (i = 0; i < blockcount; i++) { + offset = i * blocksize; rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset), *(pu4BytePtr + i)); } - if (remain) { - offset = blk_cnt * blk_sz; - buf_ptr += offset; - for (i = 0; i < remain; i++) { + if (remainsize) { + offset = blockcount * blocksize; + bufferptr += offset; + for (i = 0; i < remainsize; i++) { rtl_write_byte(rtlpriv, (FW_8192C_START_ADDRESS + - offset + i), *(buf_ptr + i)); + offset + i), *(bufferptr + i)); } } } @@ -119,32 +114,33 @@ static void _rtl88e_write_fw(struct ieee80211_hw *hw, enum version_8188e version, u8 *buffer, u32 size) { struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 *buf_ptr = buffer; - u32 page_no, remain; + u8 *bufferptr = (u8 *)buffer; + u32 pagenums, remainsize; u32 page, offset; RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "FW size is %d bytes,\n", size); - _rtl88e_fill_dummy(buf_ptr, &size); + _rtl88e_fill_dummy(bufferptr, &size); - page_no = size / FW_8192C_PAGE_SIZE; - remain = size % FW_8192C_PAGE_SIZE; + pagenums = size / FW_8192C_PAGE_SIZE; + remainsize = size % FW_8192C_PAGE_SIZE; - if (page_no > 8) { + if (pagenums > 8) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Page numbers should not greater then 8\n"); } - for (page = 0; page < page_no; page++) { + for (page = 0; page < pagenums; page++) { offset = page * FW_8192C_PAGE_SIZE; - _rtl88e_fw_page_write(hw, page, (buf_ptr + offset), + _rtl88e_fw_page_write(hw, page, (bufferptr + offset), FW_8192C_PAGE_SIZE); } - if (remain) { - offset = page_no * FW_8192C_PAGE_SIZE; - page = page_no; - _rtl88e_fw_page_write(hw, page, (buf_ptr + offset), remain); + if (remainsize) { + offset = pagenums * FW_8192C_PAGE_SIZE; + page = pagenums; + _rtl88e_fw_page_write(hw, page, (bufferptr + offset), + remainsize); } } @@ -199,7 +195,8 @@ static int _rtl88e_fw_free_to_go(struct ieee80211_hw *hw) return err; } -int rtl88e_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw) +int rtl88e_download_fw(struct ieee80211_hw *hw, + bool buse_wake_on_wlan_fw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); @@ -221,8 +218,8 @@ int rtl88e_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw) if (IS_FW_HEADER_EXIST(pfwheader)) { RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, "Firmware Version(%d), Signature(%#x), Size(%d)\n", - pfwheader->version, pfwheader->signature, - (int)sizeof(struct rtl92c_firmware_header)); + pfwheader->version, pfwheader->signature, + (int)sizeof(struct rtl92c_firmware_header)); pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header); fwsize = fwsize - sizeof(struct rtl92c_firmware_header); @@ -237,9 +234,14 @@ int rtl88e_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw) _rtl88e_enable_fw_download(hw, false); err = _rtl88e_fw_free_to_go(hw); + if (err) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "Firmware is not ready to run!\n"); + } else { + RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, + "Firmware is ready to run!\n"); + } - RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, - "Firmware is%s ready to run!\n", err ? " not" : ""); return 0; } @@ -266,9 +268,9 @@ static void _rtl88e_fill_h2c_command(struct ieee80211_hw *hw, bool isfw_read = false; u8 buf_index = 0; bool write_sucess = false; - u8 wait_h2c_limit = 100; + u8 wait_h2c_limmit = 100; u8 wait_writeh2c_limit = 100; - u8 boxc[4], boxext[2]; + u8 boxcontent[4], boxextcontent[4]; u32 h2c_waitcounter = 0; unsigned long flag; u8 idx; @@ -331,18 +333,17 @@ static void _rtl88e_fill_h2c_command(struct ieee80211_hw *hw, box_extreg = REG_HMEBOX_EXT_3; break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case not processed\n"); + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, + "switch case not process\n"); break; } - isfw_read = _rtl88e_check_fw_read_last_h2c(hw, boxnum); while (!isfw_read) { - wait_h2c_limit--; - if (wait_h2c_limit == 0) { + wait_h2c_limmit--; + if (wait_h2c_limmit == 0) { RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, - "Waiting too long for FW read " - "clear HMEBox(%d)!\n", boxnum); + "Waiting too long for FW read clear HMEBox(%d)!\n", + boxnum); break; } @@ -351,20 +352,20 @@ static void _rtl88e_fill_h2c_command(struct ieee80211_hw *hw, isfw_read = _rtl88e_check_fw_read_last_h2c(hw, boxnum); u1b_tmp = rtl_read_byte(rtlpriv, 0x130); RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, - "Waiting for FW read clear HMEBox(%d)!!! " - "0x130 = %2x\n", boxnum, u1b_tmp); + "Waiting for FW read clear HMEBox(%d)!!! 0x130 = %2x\n", + boxnum, u1b_tmp); } if (!isfw_read) { RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, - "Write H2C register BOX[%d] fail!!!!! " - "Fw do not read.\n", boxnum); + "Write H2C register BOX[%d] fail!!!!! Fw do not read.\n", + boxnum); break; } - memset(boxc, 0, sizeof(boxc)); - memset(boxext, 0, sizeof(boxext)); - boxc[0] = element_id; + memset(boxcontent, 0, sizeof(boxcontent)); + memset(boxextcontent, 0, sizeof(boxextcontent)); + boxcontent[0] = element_id; RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "Write element_id box_reg(%4x) = %2x\n", box_reg, element_id); @@ -373,33 +374,38 @@ static void _rtl88e_fill_h2c_command(struct ieee80211_hw *hw, case 1: case 2: case 3: - /*boxc[0] &= ~(BIT(7));*/ - memcpy((u8 *)(boxc) + 1, cmd_b + buf_index, cmd_len); + /*boxcontent[0] &= ~(BIT(7));*/ + memcpy((u8 *)(boxcontent) + 1, + cmd_b + buf_index, cmd_len); - for (idx = 0; idx < 4; idx++) - rtl_write_byte(rtlpriv, box_reg+idx, boxc[idx]); + for (idx = 0; idx < 4; idx++) { + rtl_write_byte(rtlpriv, box_reg + idx, + boxcontent[idx]); + } break; case 4: case 5: case 6: case 7: - /*boxc[0] |= (BIT(7));*/ - memcpy((u8 *)(boxext), cmd_b + buf_index+3, cmd_len-3); - memcpy((u8 *)(boxc) + 1, cmd_b + buf_index, 3); + /*boxcontent[0] |= (BIT(7));*/ + memcpy((u8 *)(boxextcontent), + cmd_b + buf_index+3, cmd_len-3); + memcpy((u8 *)(boxcontent) + 1, + cmd_b + buf_index, 3); for (idx = 0; idx < 2; idx++) { rtl_write_byte(rtlpriv, box_extreg + idx, - boxext[idx]); + boxextcontent[idx]); } for (idx = 0; idx < 4; idx++) { rtl_write_byte(rtlpriv, box_reg + idx, - boxc[idx]); + boxcontent[idx]); } break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case not processed\n"); + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, + "switch case not process\n"); break; } @@ -411,7 +417,7 @@ static void _rtl88e_fill_h2c_command(struct ieee80211_hw *hw, RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "pHalData->last_hmeboxnum = %d\n", - rtlhal->last_hmeboxnum); + rtlhal->last_hmeboxnum); } spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag); @@ -422,18 +428,19 @@ static void _rtl88e_fill_h2c_command(struct ieee80211_hw *hw, } void rtl88e_fill_h2c_cmd(struct ieee80211_hw *hw, - u8 element_id, u32 cmd_len, u8 *cmd_b) + u8 element_id, u32 cmd_len, u8 *cmdbuffer) { struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); u32 tmp_cmdbuf[2]; - if (rtlhal->fw_ready == false) { - RT_ASSERT(false, "fail H2C cmd - Fw download fail!!!\n"); + if (!rtlhal->fw_ready) { + RT_ASSERT(false, + "return H2C cmd because of Fw download fail!!!\n"); return; } memset(tmp_cmdbuf, 0, 8); - memcpy(tmp_cmdbuf, cmd_b, cmd_len); + memcpy(tmp_cmdbuf, cmdbuffer, cmd_len); _rtl88e_fill_h2c_command(hw, element_id, cmd_len, (u8 *)&tmp_cmdbuf); return; @@ -448,7 +455,8 @@ void rtl88e_firmware_selfreset(struct ieee80211_hw *hw) rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN+1, (u1b_tmp & (~BIT(2)))); rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN+1, (u1b_tmp | BIT(2))); RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "8051Reset88E(): 8051 reset success.\n"); + "8051Reset88E(): 8051 reset success\n"); + } void rtl88e_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode) @@ -456,28 +464,29 @@ void rtl88e_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode) struct rtl_priv *rtlpriv = rtl_priv(hw); u8 u1_h2c_set_pwrmode[H2C_88E_PWEMODE_LENGTH] = { 0 }; struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); - u8 power_state = 0; - + u8 rlbm, power_state = 0; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode); + SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, ((mode) ? 1 : 0)); - SET_H2CCMD_PWRMODE_PARM_RLBM(u1_h2c_set_pwrmode, 0); + rlbm = 0;/*YJ, temp, 120316. FW now not support RLBM=2.*/ + SET_H2CCMD_PWRMODE_PARM_RLBM(u1_h2c_set_pwrmode, rlbm); SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode, - (rtlpriv->mac80211.p2p) ? - ppsc->smart_ps : 1); + (rtlpriv->mac80211.p2p) ? ppsc->smart_ps : 1); SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(u1_h2c_set_pwrmode, - ppsc->reg_max_lps_awakeintvl); + ppsc->reg_max_lps_awakeintvl); SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(u1_h2c_set_pwrmode, 0); if (mode == FW_PS_ACTIVE_MODE) power_state |= FW_PWR_STATE_ACTIVE; else power_state |= FW_PWR_STATE_RF_OFF; + SET_H2CCMD_PWRMODE_PARM_PWR_STATE(u1_h2c_set_pwrmode, power_state); RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, "rtl92c_set_fw_pwrmode(): u1_h2c_set_pwrmode\n", u1_h2c_set_pwrmode, H2C_88E_PWEMODE_LENGTH); - rtl88e_fill_h2c_cmd(hw, H2C_88E_SETPWRMODE, H2C_88E_PWEMODE_LENGTH, - u1_h2c_set_pwrmode); + rtl88e_fill_h2c_cmd(hw, H2C_88E_SETPWRMODE, + H2C_88E_PWEMODE_LENGTH, u1_h2c_set_pwrmode); } void rtl88e_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus) @@ -499,39 +508,9 @@ void rtl88e_set_fw_ap_off_load_cmd(struct ieee80211_hw *hw, SET_H2CCMD_AP_OFFLOAD_HIDDEN(u1_apoffload_parm, mac->hiddenssid); SET_H2CCMD_AP_OFFLOAD_DENYANY(u1_apoffload_parm, 0); - rtl88e_fill_h2c_cmd(hw, H2C_88E_AP_OFFLOAD, H2C_88E_AP_OFFLOAD_LENGTH, - u1_apoffload_parm); -} + rtl88e_fill_h2c_cmd(hw, H2C_88E_AP_OFFLOAD, + H2C_88E_AP_OFFLOAD_LENGTH, u1_apoffload_parm); -static bool _rtl88e_cmd_send_packet(struct ieee80211_hw *hw, - struct sk_buff *skb) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); - struct rtl8192_tx_ring *ring; - struct rtl_tx_desc *pdesc; - struct sk_buff *pskb = NULL; - unsigned long flags; - - ring = &rtlpci->tx_ring[BEACON_QUEUE]; - - pskb = __skb_dequeue(&ring->queue); - if (pskb) - kfree_skb(pskb); - - spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags); - - pdesc = &ring->desc[0]; - - rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *)pdesc, 1, 1, skb); - - __skb_queue_tail(&ring->queue, skb); - - spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); - - rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE); - - return true; } #define BEACON_PG 0 /* ->1 */ @@ -656,14 +635,15 @@ void rtl88e_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct sk_buff *skb = NULL; - u32 totalpacketlen; - u8 u1RsvdPageLoc[5] = { 0 }; - + bool rtstatus; + u8 u1rsvdpageloc[5] = { 0 }; + bool b_dlok = false; u8 *beacon; - u8 *pspoll; + u8 *p_pspoll; u8 *nullfunc; - u8 *probersp; + u8 *p_probersp; + /*--------------------------------------------------------- * (1) beacon *--------------------------------------------------------- @@ -676,12 +656,12 @@ void rtl88e_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished) * (2) ps-poll *-------------------------------------------------------- */ - pspoll = &reserved_page_packet[PSPOLL_PG * 128]; - SET_80211_PS_POLL_AID(pspoll, (mac->assoc_id | 0xc000)); - SET_80211_PS_POLL_BSSID(pspoll, mac->bssid); - SET_80211_PS_POLL_TA(pspoll, mac->mac_addr); + p_pspoll = &reserved_page_packet[PSPOLL_PG * 128]; + SET_80211_PS_POLL_AID(p_pspoll, (mac->assoc_id | 0xc000)); + SET_80211_PS_POLL_BSSID(p_pspoll, mac->bssid); + SET_80211_PS_POLL_TA(p_pspoll, mac->mac_addr); - SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1RsvdPageLoc, PSPOLL_PG); + SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1rsvdpageloc, PSPOLL_PG); /*-------------------------------------------------------- * (3) null data @@ -692,18 +672,18 @@ void rtl88e_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished) SET_80211_HDR_ADDRESS2(nullfunc, mac->mac_addr); SET_80211_HDR_ADDRESS3(nullfunc, mac->bssid); - SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1RsvdPageLoc, NULL_PG); + SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1rsvdpageloc, NULL_PG); /*--------------------------------------------------------- * (4) probe response *---------------------------------------------------------- */ - probersp = &reserved_page_packet[PROBERSP_PG * 128]; - SET_80211_HDR_ADDRESS1(probersp, mac->bssid); - SET_80211_HDR_ADDRESS2(probersp, mac->mac_addr); - SET_80211_HDR_ADDRESS3(probersp, mac->bssid); + p_probersp = &reserved_page_packet[PROBERSP_PG * 128]; + SET_80211_HDR_ADDRESS1(p_probersp, mac->bssid); + SET_80211_HDR_ADDRESS2(p_probersp, mac->mac_addr); + SET_80211_HDR_ADDRESS3(p_probersp, mac->bssid); - SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1RsvdPageLoc, PROBERSP_PG); + SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1rsvdpageloc, PROBERSP_PG); totalpacketlen = TOTAL_RESERVED_PKT_LEN; @@ -712,33 +692,36 @@ void rtl88e_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished) &reserved_page_packet[0], totalpacketlen); RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, "rtl88e_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n", - u1RsvdPageLoc, 3); + u1rsvdpageloc, 3); skb = dev_alloc_skb(totalpacketlen); - if (!skb) - return; - kmemleak_not_leak(skb); memcpy(skb_put(skb, totalpacketlen), &reserved_page_packet, totalpacketlen); - if (_rtl88e_cmd_send_packet(hw, skb)) { + rtstatus = rtl_cmd_send_packet(hw, skb); + + if (rtstatus) + b_dlok = true; + + if (b_dlok) { RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Set RSVD page location to Fw.\n"); RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, - "H2C_RSVDPAGE:\n", u1RsvdPageLoc, 3); + "H2C_RSVDPAGE:\n", u1rsvdpageloc, 3); rtl88e_fill_h2c_cmd(hw, H2C_88E_RSVDPAGE, - sizeof(u1RsvdPageLoc), u1RsvdPageLoc); + sizeof(u1rsvdpageloc), u1rsvdpageloc); } else RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "Set RSVD page location to Fw FAIL!!!!!!.\n"); } -/*Shoud check FW support p2p or not.*/ +/*Should check FW support p2p or not.*/ static void rtl88e_set_p2p_ctw_period_cmd(struct ieee80211_hw *hw, u8 ctwindow) { - u8 u1_ctwindow_period[1] = {ctwindow}; + u8 u1_ctwindow_period[1] = { ctwindow}; rtl88e_fill_h2c_cmd(hw, H2C_88E_P2P_PS_CTW_CMD, 1, u1_ctwindow_period); + } void rtl88e_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state) @@ -755,7 +738,7 @@ void rtl88e_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state) switch (p2p_ps_state) { case P2P_PS_DISABLE: RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_DISABLE\n"); - memset(p2p_ps_offload, 0, sizeof(struct p2p_ps_offload_t)); + memset(p2p_ps_offload, 0, sizeof(*p2p_ps_offload)); break; case P2P_PS_ENABLE: RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_ENABLE\n"); @@ -765,8 +748,9 @@ void rtl88e_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state) ctwindow = p2pinfo->ctwindow; rtl88e_set_p2p_ctw_period_cmd(hw, ctwindow); } + /* hw only support 2 set of NoA */ - for (i = 0; i < p2pinfo->noa_num; i++) { + for (i = 0 ; i < p2pinfo->noa_num; i++) { /* To control the register setting for which NOA*/ rtl_write_byte(rtlpriv, 0x5cf, (i << 4)); if (i == 0) @@ -785,7 +769,7 @@ void rtl88e_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state) start_time = p2pinfo->noa_start_time[i]; if (p2pinfo->noa_count_type[i] != 1) { - while (start_time <= (tsf_low + (50 * 1024))) { + while (start_time <= (tsf_low+(50*1024))) { start_time += p2pinfo->noa_interval[i]; if (p2pinfo->noa_count_type[i] != 255) p2pinfo->noa_count_type[i]--; @@ -804,7 +788,7 @@ void rtl88e_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state) if (P2P_ROLE_GO == rtlpriv->mac80211.p2p) { p2p_ps_offload->role = 1; - p2p_ps_offload->allstasleep = 0; + p2p_ps_offload->allstasleep = -1; } else { p2p_ps_offload->role = 0; } @@ -827,4 +811,5 @@ void rtl88e_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state) rtl88e_fill_h2c_cmd(hw, H2C_88E_P2P_PS_OFFLOAD, 1, (u8 *)p2p_ps_offload); + } diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/fw.h b/drivers/net/wireless/rtlwifi/rtl8188ee/fw.h index 854a9875cd5fd65c71dd026870db845db3fc0fe5..05e944e451f442f2d610502348c7e8c2c0e8098f 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/fw.h +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/fw.h @@ -55,10 +55,11 @@ #define H2C_88E_AOAC_RSVDPAGE_LOC_LEN 7 /* Fw PS state for RPWM. - * BIT[2:0] = HW state - * BIT[3] = Protocol PS state, 1: register active state, 0: register sleep state - * BIT[4] = sub-state - */ +*BIT[2:0] = HW state +*BIT[3] = Protocol PS state, +*1: register active state , 0: register sleep state +*BIT[4] = sub-state +*/ #define FW_PS_GO_ON BIT(0) #define FW_PS_TX_NULL BIT(1) #define FW_PS_RF_ON BIT(2) @@ -98,10 +99,13 @@ #define FW_PS_STATE_S2 (FW_PS_RF_OFF) #define FW_PS_STATE_S3 (FW_PS_ALL_ON) #define FW_PS_STATE_S4 ((FW_PS_ST_ACTIVE) | (FW_PS_ALL_ON)) - +/* ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))*/ #define FW_PS_STATE_ALL_ON_88E (FW_PS_CLOCK_ON) +/* (FW_PS_RF_ON)*/ #define FW_PS_STATE_RF_ON_88E (FW_PS_CLOCK_ON) -#define FW_PS_STATE_RF_OFF_88E (FW_PS_CLOCK_ON) +/* 0x0*/ +#define FW_PS_STATE_RF_OFF_88E (FW_PS_CLOCK_ON) +/* (FW_PS_STATE_RF_OFF)*/ #define FW_PS_STATE_RF_OFF_LOW_PWR_88E (FW_PS_CLOCK_OFF) #define FW_PS_STATE_ALL_ON_92C (FW_PS_STATE_S4) @@ -146,7 +150,7 @@ struct rtl92c_firmware_header { u32 rsvd5; }; -enum rtl8192c_h2c_cmd { +enum rtl8188e_h2c_cmd { H2C_88E_RSVDPAGE = 0, H2C_88E_JOINBSSRPT = 1, H2C_88E_SCAN = 2, @@ -175,7 +179,7 @@ enum rtl8192c_h2c_cmd { H2C_88E_AOAC_GLOBAL_INFO = 0x82, H2C_88E_AOAC_RSVDPAGE = 0x83, #endif - /* Not defined in new 88E H2C CMD Format */ + /*Not defined in new 88E H2C CMD Format*/ H2C_88E_RA_MASK, H2C_88E_SELECTIVE_SUSPEND_ROF_CMD, H2C_88E_P2P_PS_MODE, @@ -289,13 +293,12 @@ enum rtl8192c_h2c_cmd { int rtl88e_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw); void rtl88e_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id, - u32 cmd_len, u8 *p_cmdbuffer); + u32 cmd_len, u8 *cmdbuffer); void rtl88e_firmware_selfreset(struct ieee80211_hw *hw); void rtl88e_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode); -void rtl88e_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, - u8 mstatus); -void rtl88e_set_fw_ap_off_load_cmd(struct ieee80211_hw *hw, u8 enable); +void rtl88e_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus); +void rtl88e_set_fw_ap_off_load_cmd(struct ieee80211_hw *hw, + u8 ap_offload_enable); void rtl88e_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished); void rtl88e_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state); - #endif diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c index d840ad7bdf65e82248a97190e43fce3c97d4abaf..f2b9713c456e14e8a89726ec989113a4635a1767 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -93,7 +89,9 @@ static void _rtl88ee_return_beacon_queue_skb(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[BEACON_QUEUE]; + unsigned long flags; + spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags); while (skb_queue_len(&ring->queue)) { struct rtl_tx_desc *entry = &ring->desc[ring->idx]; struct sk_buff *skb = __skb_dequeue(&ring->queue); @@ -105,6 +103,7 @@ static void _rtl88ee_return_beacon_queue_skb(struct ieee80211_hw *hw) kfree_skb(skb); ring->idx = (ring->idx + 1) % ring->entries; } + spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); } static void _rtl88ee_disable_bcn_sub_func(struct ieee80211_hw *hw) @@ -113,16 +112,16 @@ static void _rtl88ee_disable_bcn_sub_func(struct ieee80211_hw *hw) } static void _rtl88ee_set_fw_clock_on(struct ieee80211_hw *hw, - u8 rpwm_val, bool need_turn_off_ckk) + u8 rpwm_val, bool b_need_turn_off_ckk) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - bool support_remote_wake_up; + bool b_support_remote_wake_up; u32 count = 0, isr_regaddr, content; - bool schedule_timer = need_turn_off_ckk; - + bool schedule_timer = b_need_turn_off_ckk; rtlpriv->cfg->ops->get_hw_reg(hw, HAL_DEF_WOWLAN, - (u8 *)(&support_remote_wake_up)); + (u8 *)(&b_support_remote_wake_up)); + if (!rtlhal->fw_ready) return; if (!rtlpriv->psc.fw_current_inpsmode) @@ -133,8 +132,9 @@ static void _rtl88ee_set_fw_clock_on(struct ieee80211_hw *hw, if (rtlhal->fw_clk_change_in_progress) { while (rtlhal->fw_clk_change_in_progress) { spin_unlock_bh(&rtlpriv->locks.fw_ps_lock); + count++; udelay(100); - if (++count > 1000) + if (count > 1000) return; spin_lock_bh(&rtlpriv->locks.fw_ps_lock); } @@ -173,6 +173,7 @@ static void _rtl88ee_set_fw_clock_on(struct ieee80211_hw *hw, mod_timer(&rtlpriv->works.fw_clockoff_timer, jiffies + MSECS(10)); } + } else { spin_lock_bh(&rtlpriv->locks.fw_ps_lock); rtlhal->fw_clk_change_in_progress = false; @@ -247,11 +248,9 @@ static void _rtl88ee_set_fw_ps_rf_on(struct ieee80211_hw *hw) static void _rtl88ee_set_fw_ps_rf_off_low_power(struct ieee80211_hw *hw) { u8 rpwm_val = 0; - rpwm_val |= FW_PS_STATE_RF_OFF_LOW_PWR_88E; _rtl88ee_set_fw_clock_off(hw, rpwm_val); } - void rtl88ee_fw_clk_off_timer_callback(unsigned long data) { struct ieee80211_hw *hw = (struct ieee80211_hw *)data; @@ -325,23 +324,23 @@ void rtl88ee_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) *((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state; break; case HW_VAR_FWLPS_RF_ON:{ - enum rf_pwrstate rfstate; - u32 val_rcr; - - rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RF_STATE, - (u8 *)(&rfstate)); - if (rfstate == ERFOFF) { + enum rf_pwrstate rfstate; + u32 val_rcr; + + rtlpriv->cfg->ops->get_hw_reg(hw, + HW_VAR_RF_STATE, + (u8 *)(&rfstate)); + if (rfstate == ERFOFF) { + *((bool *)(val)) = true; + } else { + val_rcr = rtl_read_dword(rtlpriv, REG_RCR); + val_rcr &= 0x00070000; + if (val_rcr) + *((bool *)(val)) = false; + else *((bool *)(val)) = true; - } else { - val_rcr = rtl_read_dword(rtlpriv, REG_RCR); - val_rcr &= 0x00070000; - if (val_rcr) - *((bool *)(val)) = false; - else - *((bool *)(val)) = true; - } - break; } + break; } case HW_VAR_FW_PSMODE_STATUS: *((bool *)(val)) = ppsc->fw_current_inpsmode; break; @@ -373,25 +372,32 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) switch (variable) { case HW_VAR_ETHER_ADDR: - for (idx = 0; idx < ETH_ALEN; idx++) - rtl_write_byte(rtlpriv, (REG_MACID + idx), val[idx]); + for (idx = 0; idx < ETH_ALEN; idx++) { + rtl_write_byte(rtlpriv, (REG_MACID + idx), + val[idx]); + } break; case HW_VAR_BASIC_RATE:{ - u16 rate_cfg = ((u16 *)val)[0]; + u16 b_rate_cfg = ((u16 *)val)[0]; u8 rate_index = 0; - rate_cfg = rate_cfg & 0x15f; - rate_cfg |= 0x01; - rtl_write_byte(rtlpriv, REG_RRSR, rate_cfg & 0xff); - rtl_write_byte(rtlpriv, REG_RRSR + 1, (rate_cfg >> 8) & 0xff); - while (rate_cfg > 0x1) { - rate_cfg = (rate_cfg >> 1); + b_rate_cfg = b_rate_cfg & 0x15f; + b_rate_cfg |= 0x01; + rtl_write_byte(rtlpriv, REG_RRSR, b_rate_cfg & 0xff); + rtl_write_byte(rtlpriv, REG_RRSR + 1, + (b_rate_cfg >> 8) & 0xff); + while (b_rate_cfg > 0x1) { + b_rate_cfg = (b_rate_cfg >> 1); rate_index++; } - rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL, rate_index); - break; } + rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL, + rate_index); + break; + } case HW_VAR_BSSID: - for (idx = 0; idx < ETH_ALEN; idx++) - rtl_write_byte(rtlpriv, (REG_BSSID + idx), val[idx]); + for (idx = 0; idx < ETH_ALEN; idx++) { + rtl_write_byte(rtlpriv, (REG_BSSID + idx), + val[idx]); + } break; case HW_VAR_SIFS: rtl_write_byte(rtlpriv, REG_SIFS_CTX + 1, val[0]); @@ -401,7 +407,8 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) rtl_write_byte(rtlpriv, REG_MAC_SPEC_SIFS + 1, val[0]); if (!mac->ht_enable) - rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM, 0x0e0e); + rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM, + 0x0e0e); else rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM, *((u16 *)val)); @@ -418,17 +425,20 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM, &e_aci); } - break; } + break; + } case HW_VAR_ACK_PREAMBLE:{ u8 reg_tmp; u8 short_preamble = (bool)*val; reg_tmp = rtl_read_byte(rtlpriv, REG_TRXPTCL_CTL+2); if (short_preamble) { reg_tmp |= 0x02; - rtl_write_byte(rtlpriv, REG_TRXPTCL_CTL + 2, reg_tmp); + rtl_write_byte(rtlpriv, REG_TRXPTCL_CTL + + 2, reg_tmp); } else { reg_tmp |= 0xFD; - rtl_write_byte(rtlpriv, REG_TRXPTCL_CTL + 2, reg_tmp); + rtl_write_byte(rtlpriv, REG_TRXPTCL_CTL + + 2, reg_tmp); } break; } case HW_VAR_WPA_CONFIG: @@ -446,7 +456,8 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) min_spacing_to_set = sec_min_space; mac->min_space_cfg = ((mac->min_space_cfg & - 0xf8) | min_spacing_to_set); + 0xf8) | + min_spacing_to_set); *val = min_spacing_to_set; @@ -470,35 +481,44 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, mac->min_space_cfg); - break; } + break; + } case HW_VAR_AMPDU_FACTOR:{ u8 regtoset_normal[4] = { 0x41, 0xa8, 0x72, 0xb9 }; - u8 factor; - u8 *reg = NULL; - u8 id = 0; - - reg = regtoset_normal; - - factor = *val; - if (factor <= 3) { - factor = (1 << (factor + 2)); - if (factor > 0xf) - factor = 0xf; - - for (id = 0; id < 4; id++) { - if ((reg[id] & 0xf0) > (factor << 4)) - reg[id] = (reg[id] & 0x0f) | - (factor << 4); + u8 factor_toset; + u8 *p_regtoset = NULL; + u8 index = 0; + + p_regtoset = regtoset_normal; + + factor_toset = *val; + if (factor_toset <= 3) { + factor_toset = (1 << (factor_toset + 2)); + if (factor_toset > 0xf) + factor_toset = 0xf; + + for (index = 0; index < 4; index++) { + if ((p_regtoset[index] & 0xf0) > + (factor_toset << 4)) + p_regtoset[index] = + (p_regtoset[index] & 0x0f) | + (factor_toset << 4); + + if ((p_regtoset[index] & 0x0f) > + factor_toset) + p_regtoset[index] = + (p_regtoset[index] & 0xf0) | + (factor_toset); + + rtl_write_byte(rtlpriv, + (REG_AGGLEN_LMT + index), + p_regtoset[index]); - if ((reg[id] & 0x0f) > factor) - reg[id] = (reg[id] & 0xf0) | (factor); - - rtl_write_byte(rtlpriv, (REG_AGGLEN_LMT + id), - reg[id]); } RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, - "Set HW_VAR_AMPDU_FACTOR: %#x\n", factor); + "Set HW_VAR_AMPDU_FACTOR: %#x\n", + factor_toset); } break; } case HW_VAR_AC_PARAM:{ @@ -506,7 +526,8 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) rtl88e_dm_init_edca_turbo(hw); if (rtlpci->acm_method != EACMWAY2_SW) - rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACM_CTRL, + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_ACM_CTRL, &e_aci); break; } case HW_VAR_ACM_CTRL:{ @@ -516,7 +537,8 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) u8 acm = p_aci_aifsn->f.acm; u8 acm_ctrl = rtl_read_byte(rtlpriv, REG_ACMHWCTRL); - acm_ctrl = acm_ctrl | ((rtlpci->acm_method == 2) ? 0x0 : 0x1); + acm_ctrl = acm_ctrl | + ((rtlpci->acm_method == 2) ? 0x0 : 0x1); if (acm) { switch (e_aci) { @@ -609,66 +631,76 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) _rtl88ee_fwlps_enter(hw); else _rtl88ee_fwlps_leave(hw); + break; } case HW_VAR_H2C_FW_JOINBSSRPT:{ u8 mstatus = *val; - u8 tmp, tmp_reg422, uval; + u8 tmp_regcr, tmp_reg422, bcnvalid_reg; u8 count = 0, dlbcn_count = 0; - bool recover = false; + bool b_recover = false; if (mstatus == RT_MEDIA_CONNECT) { - rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AID, NULL); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AID, + NULL); - tmp = rtl_read_byte(rtlpriv, REG_CR + 1); - rtl_write_byte(rtlpriv, REG_CR + 1, (tmp | BIT(0))); + tmp_regcr = rtl_read_byte(rtlpriv, REG_CR + 1); + rtl_write_byte(rtlpriv, REG_CR + 1, + (tmp_regcr | BIT(0))); _rtl88ee_set_bcn_ctrl_reg(hw, 0, BIT(3)); _rtl88ee_set_bcn_ctrl_reg(hw, BIT(4), 0); - tmp_reg422 = rtl_read_byte(rtlpriv, - REG_FWHW_TXQ_CTRL + 2); + tmp_reg422 = + rtl_read_byte(rtlpriv, + REG_FWHW_TXQ_CTRL + 2); rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp_reg422 & (~BIT(6))); if (tmp_reg422 & BIT(6)) - recover = true; + b_recover = true; do { - uval = rtl_read_byte(rtlpriv, REG_TDECTRL+2); + bcnvalid_reg = rtl_read_byte(rtlpriv, + REG_TDECTRL+2); rtl_write_byte(rtlpriv, REG_TDECTRL+2, - (uval | BIT(0))); + (bcnvalid_reg | BIT(0))); _rtl88ee_return_beacon_queue_skb(hw); rtl88e_set_fw_rsvdpagepkt(hw, 0); - uval = rtl_read_byte(rtlpriv, REG_TDECTRL+2); + bcnvalid_reg = rtl_read_byte(rtlpriv, + REG_TDECTRL+2); count = 0; - while (!(uval & BIT(0)) && count < 20) { + while (!(bcnvalid_reg & BIT(0)) && count < 20) { count++; udelay(10); - uval = rtl_read_byte(rtlpriv, - REG_TDECTRL+2); + bcnvalid_reg = + rtl_read_byte(rtlpriv, REG_TDECTRL+2); } dlbcn_count++; - } while (!(uval & BIT(0)) && dlbcn_count < 5); + } while (!(bcnvalid_reg & BIT(0)) && dlbcn_count < 5); - if (uval & BIT(0)) + if (bcnvalid_reg & BIT(0)) rtl_write_byte(rtlpriv, REG_TDECTRL+2, BIT(0)); _rtl88ee_set_bcn_ctrl_reg(hw, BIT(3), 0); _rtl88ee_set_bcn_ctrl_reg(hw, 0, BIT(4)); - if (recover) { - rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, + if (b_recover) { + rtl_write_byte(rtlpriv, + REG_FWHW_TXQ_CTRL + 2, tmp_reg422); } - rtl_write_byte(rtlpriv, REG_CR + 1, (tmp & ~(BIT(0)))); + + rtl_write_byte(rtlpriv, REG_CR + 1, + (tmp_regcr & ~(BIT(0)))); } - rtl88e_set_fw_joinbss_report_cmd(hw, *val); + rtl88e_set_fw_joinbss_report_cmd(hw, (*(u8 *)val)); break; } case HW_VAR_H2C_FW_P2P_PS_OFFLOAD: rtl88e_set_p2p_ps_offload_cmd(hw, *val); break; case HW_VAR_AID:{ u16 u2btmp; + u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT); u2btmp &= 0xC000; rtl_write_word(rtlpriv, REG_BCN_PSR_RPT, (u2btmp | @@ -677,21 +709,29 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) case HW_VAR_CORRECT_TSF:{ u8 btype_ibss = *val; - if (btype_ibss == true) + if (btype_ibss) _rtl88ee_stop_tx_beacon(hw); _rtl88ee_set_bcn_ctrl_reg(hw, 0, BIT(3)); rtl_write_dword(rtlpriv, REG_TSFTR, - (u32) (mac->tsf & 0xffffffff)); + (u32)(mac->tsf & 0xffffffff)); rtl_write_dword(rtlpriv, REG_TSFTR + 4, - (u32) ((mac->tsf >> 32) & 0xffffffff)); + (u32)((mac->tsf >> 32) & 0xffffffff)); _rtl88ee_set_bcn_ctrl_reg(hw, BIT(3), 0); - if (btype_ibss == true) + if (btype_ibss) _rtl88ee_resume_tx_beacon(hw); break; } + case HW_VAR_KEEP_ALIVE: { + u8 array[2]; + + array[0] = 0xff; + array[1] = *((u8 *)val); + rtl88e_fill_h2c_cmd(hw, H2C_88E_KEEP_ALIVE_CTRL, + 2, array); + break; } default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "switch case not process %x\n", variable); @@ -740,7 +780,7 @@ static bool _rtl88ee_llt_table_init(struct ieee80211_hw *hw) rtl_write_byte(rtlpriv, REG_RQPN_NPQ, 0x01); rtl_write_dword(rtlpriv, REG_RQPN, 0x80730d29); - + /*0x2600 MaxRxBuff=10k-max(TxReportSize(64*8), WOLPattern(16*24)) */ rtl_write_dword(rtlpriv, REG_TRXFF_BNDY, (0x25FF0000 | txpktbuf_bndy)); rtl_write_byte(rtlpriv, REG_TDECTRL + 1, txpktbuf_bndy); @@ -797,10 +837,11 @@ static bool _rtl88ee_init_mac(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 bytetmp; u16 wordtmp; - /*Disable XTAL OUTPUT for power saving. YJ, add, 111206. */ + /*Disable XTAL OUTPUT for power saving. YJ,add,111206. */ bytetmp = rtl_read_byte(rtlpriv, REG_XCK_OUT_CTRL) & (~BIT(0)); rtl_write_byte(rtlpriv, REG_XCK_OUT_CTRL, bytetmp); /*Auto Power Down to CHIP-off State*/ @@ -811,7 +852,7 @@ static bool _rtl88ee_init_mac(struct ieee80211_hw *hw) /* HW Power on sequence */ if (!rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, - Rtl8188E_NIC_ENABLE_FLOW)) { + RTL8188EE_NIC_ENABLE_FLOW)) { RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "init MAC Fail as rtl_hal_pwrseqcmdparsing\n"); return false; @@ -853,8 +894,6 @@ static bool _rtl88ee_init_mac(struct ieee80211_hw *hw) return false; } } - - rtl_write_dword(rtlpriv, REG_HISR, 0xffffffff); rtl_write_dword(rtlpriv, REG_HISRE, 0xffffffff); @@ -889,9 +928,8 @@ static bool _rtl88ee_init_mac(struct ieee80211_hw *hw) DMA_BIT_MASK(32)); /* if we want to support 64 bit DMA, we should set it here, - * but at the moment we do not support 64 bit DMA + * but now we do not support 64 bit DMA */ - rtl_write_dword(rtlpriv, REG_INT_MIG, 0); rtl_write_dword(rtlpriv, REG_MCUTST_1, 0x0); @@ -910,8 +948,12 @@ static bool _rtl88ee_init_mac(struct ieee80211_hw *hw) static void _rtl88ee_hw_configure(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 reg_prsr; + u8 reg_bw_opmode; + u32 reg_ratr, reg_prsr; + reg_bw_opmode = BW_OPMODE_20MHZ; + reg_ratr = RATE_ALL_CCK | RATE_ALL_OFDM_AG | + RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS; reg_prsr = RATE_ALL_CCK | RATE_ALL_OFDM_AG; rtl_write_dword(rtlpriv, REG_RRSR, reg_prsr); @@ -923,7 +965,7 @@ static void _rtl88ee_enable_aspm_back_door(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); u8 tmp1byte = 0; - u32 tmp4Byte = 0, count; + u32 tmp4byte = 0, count = 0; rtl_write_word(rtlpriv, 0x354, 0x8104); rtl_write_word(rtlpriv, 0x358, 0x24); @@ -938,8 +980,8 @@ static void _rtl88ee_enable_aspm_back_door(struct ieee80211_hw *hw) count++; } if (0 == tmp1byte) { - tmp4Byte = rtl_read_dword(rtlpriv, 0x34c); - rtl_write_dword(rtlpriv, 0x348, tmp4Byte|BIT(31)); + tmp4byte = rtl_read_dword(rtlpriv, 0x34c); + rtl_write_dword(rtlpriv, 0x348, tmp4byte|BIT(31)); rtl_write_word(rtlpriv, 0x350, 0xf70c); rtl_write_byte(rtlpriv, 0x352, 0x1); } @@ -961,12 +1003,14 @@ static void _rtl88ee_enable_aspm_back_door(struct ieee80211_hw *hw) tmp1byte = rtl_read_byte(rtlpriv, 0x352); count++; } + if (ppsc->support_backdoor || (0 == tmp1byte)) { - tmp4Byte = rtl_read_dword(rtlpriv, 0x34c); - rtl_write_dword(rtlpriv, 0x348, tmp4Byte|BIT(11)|BIT(12)); + tmp4byte = rtl_read_dword(rtlpriv, 0x34c); + rtl_write_dword(rtlpriv, 0x348, tmp4byte|BIT(11)|BIT(12)); rtl_write_word(rtlpriv, 0x350, 0xf718); rtl_write_byte(rtlpriv, 0x352, 0x1); } + tmp1byte = rtl_read_byte(rtlpriv, 0x352); count = 0; while (tmp1byte && count < 20) { @@ -983,14 +1027,15 @@ void rtl88ee_enable_hw_security_config(struct ieee80211_hw *hw) RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n", - rtlpriv->sec.pairwise_enc_algorithm, - rtlpriv->sec.group_enc_algorithm); + rtlpriv->sec.pairwise_enc_algorithm, + rtlpriv->sec.group_enc_algorithm); if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) { RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "not open hw encryption\n"); return; } + sec_reg_value = SCR_TXENCENABLE | SCR_RXDECENABLE; if (rtlpriv->sec.use_defaultkey) { @@ -1004,6 +1049,7 @@ void rtl88ee_enable_hw_security_config(struct ieee80211_hw *hw) RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "The SECR-value %x\n", sec_reg_value); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value); } @@ -1021,7 +1067,6 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw) u8 tmp_u1b, u1byte; unsigned long flags; - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Rtl8188EE hw init\n"); rtlpriv->rtlhal.being_init_adapter = true; /* As this function can take a very long time (up to 350 ms) * and can be called with irqs disabled, reenable the irqs @@ -1032,6 +1077,7 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw) */ local_save_flags(flags); local_irq_enable(); + rtlhal->fw_ready = false; rtlpriv->intf_ops->disable_aspm(hw); @@ -1057,9 +1103,8 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw) "Failed to download FW. Init HW without FW now..\n"); err = 1; goto exit; - } else { - rtlhal->fw_ready = true; } + rtlhal->fw_ready = true; /*fw related variable initialize */ rtlhal->last_hmeboxnum = 0; rtlhal->fw_ps_state = FW_PS_STATE_ALL_ON_88E; @@ -1068,10 +1113,10 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw) ppsc->fw_current_inpsmode = false; rtl88e_phy_mac_config(hw); - /* because last function modifies RCR, we update - * rcr var here, or TP will be unstable for receive_config - * is wrong, RX RCR_ACRC32 will cause TP unstable & Rx - * RCR_APP_ICV will cause mac80211 disassoc for cisco 1252 + /* because last function modify RCR, so we update + * rcr var here, or TP will unstable for receive_config + * is wrong, RX RCR_ACRC32 will cause TP unstabel & Rx + * RCR_APP_ICV will cause mac80211 unassoc for cisco 1252 */ rtlpci->receive_config &= ~(RCR_ACRC32 | RCR_AICV); rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config); @@ -1101,15 +1146,14 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw) if (ppsc->rfpwr_state == ERFON) { if ((rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV) || ((rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) && - (rtlhal->oem_id == RT_CID_819X_HP))) { + (rtlhal->oem_id == RT_CID_819X_HP))) { rtl88e_phy_set_rfpath_switch(hw, true); rtlpriv->dm.fat_table.rx_idle_ant = MAIN_ANT; } else { rtl88e_phy_set_rfpath_switch(hw, false); rtlpriv->dm.fat_table.rx_idle_ant = AUX_ANT; } - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "rx idle ant %s\n", + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "rx idle ant %s\n", (rtlpriv->dm.fat_table.rx_idle_ant == MAIN_ANT) ? ("MAIN_ANT") : ("AUX_ANT")); @@ -1119,6 +1163,7 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw) rtl88e_phy_iq_calibrate(hw, false); rtlphy->iqk_initialized = true; } + rtl88e_dm_check_txpower_tracking(hw); rtl88e_phy_lc_calibrate(hw); } @@ -1142,8 +1187,6 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw) exit: local_irq_restore(flags); rtlpriv->rtlhal.being_init_adapter = false; - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "end of Rtl8188EE hw init %x\n", - err); return err; } @@ -1176,62 +1219,67 @@ static int _rtl88ee_set_media_status(struct ieee80211_hw *hw, enum nl80211_iftype type) { struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 bt_msr = rtl_read_byte(rtlpriv, MSR); + u8 bt_msr = rtl_read_byte(rtlpriv, MSR) & 0xfc; enum led_ctl_mode ledaction = LED_CTL_NO_LINK; - bt_msr &= 0xfc; - - if (type == NL80211_IFTYPE_UNSPECIFIED || - type == NL80211_IFTYPE_STATION) { - _rtl88ee_stop_tx_beacon(hw); - _rtl88ee_enable_bcn_sub_func(hw); - } else if (type == NL80211_IFTYPE_ADHOC || - type == NL80211_IFTYPE_AP || - type == NL80211_IFTYPE_MESH_POINT) { - _rtl88ee_resume_tx_beacon(hw); - _rtl88ee_disable_bcn_sub_func(hw); - } else { - RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, - "Set HW_VAR_MEDIA_STATUS: No such media status(%x).\n", - type); - } + u8 mode = MSR_NOLINK; switch (type) { case NL80211_IFTYPE_UNSPECIFIED: - bt_msr |= MSR_NOLINK; - ledaction = LED_CTL_LINK; + mode = MSR_NOLINK; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Set Network type to NO LINK!\n"); break; case NL80211_IFTYPE_ADHOC: - bt_msr |= MSR_ADHOC; + case NL80211_IFTYPE_MESH_POINT: + mode = MSR_ADHOC; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Set Network type to Ad Hoc!\n"); break; case NL80211_IFTYPE_STATION: - bt_msr |= MSR_INFRA; + mode = MSR_INFRA; ledaction = LED_CTL_LINK; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Set Network type to STA!\n"); break; case NL80211_IFTYPE_AP: - bt_msr |= MSR_AP; + mode = MSR_AP; + ledaction = LED_CTL_LINK; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Set Network type to AP!\n"); break; - case NL80211_IFTYPE_MESH_POINT: - bt_msr |= MSR_ADHOC; - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "Set Network type to Mesh Point!\n"); - break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Network type %d not support!\n", type); return 1; + break; + } + + /* MSR_INFRA == Link in infrastructure network; + * MSR_ADHOC == Link in ad hoc network; + * Therefore, check link state is necessary. + * + * MSR_AP == AP mode; link state is not cared here. + */ + if (mode != MSR_AP && rtlpriv->mac80211.link_state < MAC80211_LINKED) { + mode = MSR_NOLINK; + ledaction = LED_CTL_NO_LINK; + } + + if (mode == MSR_NOLINK || mode == MSR_INFRA) { + _rtl88ee_stop_tx_beacon(hw); + _rtl88ee_enable_bcn_sub_func(hw); + } else if (mode == MSR_ADHOC || mode == MSR_AP) { + _rtl88ee_resume_tx_beacon(hw); + _rtl88ee_disable_bcn_sub_func(hw); + } else { + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + "Set HW_VAR_MEDIA_STATUS: No such media status(%x).\n", + mode); } - rtl_write_byte(rtlpriv, (MSR), bt_msr); + rtl_write_byte(rtlpriv, (MSR), bt_msr | mode); rtlpriv->cfg->ops->led_control(hw, ledaction); - if ((bt_msr & MSR_MASK) == MSR_AP) + if (mode == MSR_AP) rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00); else rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66); @@ -1241,13 +1289,12 @@ static int _rtl88ee_set_media_status(struct ieee80211_hw *hw, void rtl88ee_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid) { struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 reg_rcr; + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + u32 reg_rcr = rtlpci->receive_config; if (rtlpriv->psc.rfpwr_state != ERFON) return; - rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *)(®_rcr)); - if (check_bssid == true) { reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN); rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, @@ -1259,9 +1306,11 @@ void rtl88ee_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid) rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)(®_rcr)); } + } -int rtl88ee_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type) +int rtl88ee_set_network_type(struct ieee80211_hw *hw, + enum nl80211_iftype type) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -1279,7 +1328,9 @@ int rtl88ee_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type) return 0; } -/* don't set REG_EDCA_BE_PARAM here because mac80211 will send pkt when scan */ +/* don't set REG_EDCA_BE_PARAM here + * because mac80211 will send pkt when scan + */ void rtl88ee_set_qos(struct ieee80211_hw *hw, int aci) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -1302,22 +1353,41 @@ void rtl88ee_set_qos(struct ieee80211_hw *hw, int aci) } } +static void rtl88ee_clear_interrupt(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 tmp; + + tmp = rtl_read_dword(rtlpriv, REG_HISR); + rtl_write_dword(rtlpriv, REG_HISR, tmp); + + tmp = rtl_read_dword(rtlpriv, REG_HISRE); + rtl_write_dword(rtlpriv, REG_HISRE, tmp); + + tmp = rtl_read_dword(rtlpriv, REG_HSISR); + rtl_write_dword(rtlpriv, REG_HSISR, tmp); +} + void rtl88ee_enable_interrupt(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); - rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF); - rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF); + rtl88ee_clear_interrupt(hw);/*clear it here first*/ + rtl_write_dword(rtlpriv, REG_HIMR, + rtlpci->irq_mask[0] & 0xFFFFFFFF); + rtl_write_dword(rtlpriv, REG_HIMRE, + rtlpci->irq_mask[1] & 0xFFFFFFFF); rtlpci->irq_enabled = true; - /* there are some C2H CMDs have been sent before system interrupt - * is enabled, e.g., C2H, CPWM. - * So we need to clear all C2H events that FW has notified, otherwise - * FW won't schedule any commands anymore. + /* there are some C2H CMDs have been sent + * before system interrupt is enabled, e.g., C2H, CPWM. + * So we need to clear all C2H events that FW has notified, + * otherwise FW won't schedule any commands anymore. */ rtl_write_byte(rtlpriv, REG_C2HEVT_CLEAR, 0); /*enable system interrupt*/ - rtl_write_dword(rtlpriv, REG_HSIMR, rtlpci->sys_irq_mask & 0xFFFFFFFF); + rtl_write_dword(rtlpriv, REG_HSIMR, + rtlpci->sys_irq_mask & 0xFFFFFFFF); } void rtl88ee_disable_interrupt(struct ieee80211_hw *hw) @@ -1328,7 +1398,7 @@ void rtl88ee_disable_interrupt(struct ieee80211_hw *hw) rtl_write_dword(rtlpriv, REG_HIMR, IMR_DISABLED); rtl_write_dword(rtlpriv, REG_HIMRE, IMR_DISABLED); rtlpci->irq_enabled = false; - synchronize_irq(rtlpci->pdev->irq); + /*synchronize_irq(rtlpci->pdev->irq);*/ } static void _rtl88ee_poweroff_adapter(struct ieee80211_hw *hw) @@ -1354,7 +1424,7 @@ static void _rtl88ee_poweroff_adapter(struct ieee80211_hw *hw) rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, - Rtl8188E_NIC_LPS_ENTER_FLOW); + RTL8188EE_NIC_LPS_ENTER_FLOW); rtl_write_byte(rtlpriv, REG_RF_CTRL, 0x00); @@ -1369,7 +1439,7 @@ static void _rtl88ee_poweroff_adapter(struct ieee80211_hw *hw) rtl_write_byte(rtlpriv, REG_32K_CTRL, (u1b_tmp & (~BIT(0)))); rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, - PWR_INTF_PCI_MSK, Rtl8188E_NIC_DISABLE_FLOW); + PWR_INTF_PCI_MSK, RTL8188EE_NIC_DISABLE_FLOW); u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL+1); rtl_write_byte(rtlpriv, REG_RSV_CTRL+1, (u1b_tmp & (~BIT(3)))); @@ -1426,6 +1496,7 @@ void rtl88ee_interrupt_recognized(struct ieee80211_hw *hw, *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1]; rtl_write_dword(rtlpriv, REG_HISRE, *p_intb); + } void rtl88ee_set_beacon_related_registers(struct ieee80211_hw *hw) @@ -1471,233 +1542,241 @@ void rtl88ee_update_interrupt_mask(struct ieee80211_hw *hw, RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD, "add_msr:%x, rm_msr:%x\n", add_msr, rm_msr); - rtl88ee_disable_interrupt(hw); if (add_msr) rtlpci->irq_mask[0] |= add_msr; if (rm_msr) rtlpci->irq_mask[0] &= (~rm_msr); + rtl88ee_disable_interrupt(hw); rtl88ee_enable_interrupt(hw); } -static inline u8 get_chnl_group(u8 chnl) +static u8 _rtl88e_get_chnl_group(u8 chnl) { - u8 group; - - group = chnl / 3; - if (chnl == 14) + u8 group = 0; + + if (chnl < 3) + group = 0; + else if (chnl < 6) + group = 1; + else if (chnl < 9) + group = 2; + else if (chnl < 12) + group = 3; + else if (chnl < 14) + group = 4; + else if (chnl == 14) group = 5; return group; } -static void set_diff0_2g(struct txpower_info_2g *pwr2g, u8 *hwinfo, u32 path, - u32 i, u32 eadr) -{ - pwr2g->bw40_diff[path][i] = 0; - if (hwinfo[eadr] == 0xFF) { - pwr2g->bw20_diff[path][i] = 0x02; - } else { - pwr2g->bw20_diff[path][i] = (hwinfo[eadr]&0xf0)>>4; - /*bit sign number to 8 bit sign number*/ - if (pwr2g->bw20_diff[path][i] & BIT(3)) - pwr2g->bw20_diff[path][i] |= 0xF0; - } - - if (hwinfo[eadr] == 0xFF) { - pwr2g->ofdm_diff[path][i] = 0x04; - } else { - pwr2g->ofdm_diff[path][i] = (hwinfo[eadr] & 0x0f); - /*bit sign number to 8 bit sign number*/ - if (pwr2g->ofdm_diff[path][i] & BIT(3)) - pwr2g->ofdm_diff[path][i] |= 0xF0; - } - pwr2g->cck_diff[path][i] = 0; -} - -static void set_diff0_5g(struct txpower_info_5g *pwr5g, u8 *hwinfo, u32 path, - u32 i, u32 eadr) -{ - pwr5g->bw40_diff[path][i] = 0; - if (hwinfo[eadr] == 0xFF) { - pwr5g->bw20_diff[path][i] = 0; - } else { - pwr5g->bw20_diff[path][i] = (hwinfo[eadr]&0xf0)>>4; - /*bit sign number to 8 bit sign number*/ - if (pwr5g->bw20_diff[path][i] & BIT(3)) - pwr5g->bw20_diff[path][i] |= 0xF0; - } - - if (hwinfo[eadr] == 0xFF) { - pwr5g->ofdm_diff[path][i] = 0x04; - } else { - pwr5g->ofdm_diff[path][i] = (hwinfo[eadr] & 0x0f); - /*bit sign number to 8 bit sign number*/ - if (pwr5g->ofdm_diff[path][i] & BIT(3)) - pwr5g->ofdm_diff[path][i] |= 0xF0; - } -} - -static void set_diff1_2g(struct txpower_info_2g *pwr2g, u8 *hwinfo, u32 path, - u32 i, u32 eadr) -{ - if (hwinfo[eadr] == 0xFF) { - pwr2g->bw40_diff[path][i] = 0xFE; - } else { - pwr2g->bw40_diff[path][i] = (hwinfo[eadr]&0xf0)>>4; - if (pwr2g->bw40_diff[path][i] & BIT(3)) - pwr2g->bw40_diff[path][i] |= 0xF0; - } - - if (hwinfo[eadr] == 0xFF) { - pwr2g->bw20_diff[path][i] = 0xFE; - } else { - pwr2g->bw20_diff[path][i] = (hwinfo[eadr]&0x0f); - if (pwr2g->bw20_diff[path][i] & BIT(3)) - pwr2g->bw20_diff[path][i] |= 0xF0; - } -} - -static void set_diff1_5g(struct txpower_info_5g *pwr5g, u8 *hwinfo, u32 path, - u32 i, u32 eadr) +static void set_24g_base(struct txpower_info_2g *pwrinfo24g, u32 rfpath) { - if (hwinfo[eadr] == 0xFF) { - pwr5g->bw40_diff[path][i] = 0xFE; - } else { - pwr5g->bw40_diff[path][i] = (hwinfo[eadr]&0xf0)>>4; - if (pwr5g->bw40_diff[path][i] & BIT(3)) - pwr5g->bw40_diff[path][i] |= 0xF0; - } + int group, txcnt; - if (hwinfo[eadr] == 0xFF) { - pwr5g->bw20_diff[path][i] = 0xFE; - } else { - pwr5g->bw20_diff[path][i] = (hwinfo[eadr] & 0x0f); - if (pwr5g->bw20_diff[path][i] & BIT(3)) - pwr5g->bw20_diff[path][i] |= 0xF0; + for (group = 0 ; group < MAX_CHNL_GROUP_24G; group++) { + pwrinfo24g->index_cck_base[rfpath][group] = 0x2D; + pwrinfo24g->index_bw40_base[rfpath][group] = 0x2D; } -} - -static void set_diff2_2g(struct txpower_info_2g *pwr2g, u8 *hwinfo, u32 path, - u32 i, u32 eadr) -{ - if (hwinfo[eadr] == 0xFF) { - pwr2g->ofdm_diff[path][i] = 0xFE; - } else { - pwr2g->ofdm_diff[path][i] = (hwinfo[eadr]&0xf0)>>4; - if (pwr2g->ofdm_diff[path][i] & BIT(3)) - pwr2g->ofdm_diff[path][i] |= 0xF0; - } - - if (hwinfo[eadr] == 0xFF) { - pwr2g->cck_diff[path][i] = 0xFE; - } else { - pwr2g->cck_diff[path][i] = (hwinfo[eadr]&0x0f); - if (pwr2g->cck_diff[path][i] & BIT(3)) - pwr2g->cck_diff[path][i] |= 0xF0; + for (txcnt = 0; txcnt < MAX_TX_COUNT; txcnt++) { + if (txcnt == 0) { + pwrinfo24g->bw20_diff[rfpath][0] = 0x02; + pwrinfo24g->ofdm_diff[rfpath][0] = 0x04; + } else { + pwrinfo24g->bw20_diff[rfpath][txcnt] = 0xFE; + pwrinfo24g->bw40_diff[rfpath][txcnt] = 0xFE; + pwrinfo24g->cck_diff[rfpath][txcnt] = 0xFE; + pwrinfo24g->ofdm_diff[rfpath][txcnt] = 0xFE; + } } } -static void _rtl8188e_read_power_value_fromprom(struct ieee80211_hw *hw, - struct txpower_info_2g *pwr2g, - struct txpower_info_5g *pwr5g, - bool autoload_fail, - u8 *hwinfo) +static void read_power_value_fromprom(struct ieee80211_hw *hw, + struct txpower_info_2g *pwrinfo24g, + struct txpower_info_5g *pwrinfo5g, + bool autoload_fail, u8 *hwinfo) { struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 path, eadr = EEPROM_TX_PWR_INX, i; + u32 rfpath, eeaddr = EEPROM_TX_PWR_INX, group, txcnt = 0; RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "hal_ReadPowerValueFromPROM88E(): PROMContent[0x%x]= 0x%x\n", - (eadr+1), hwinfo[eadr+1]); - if (0xFF == hwinfo[eadr+1]) + "hal_ReadPowerValueFromPROM88E():PROMContent[0x%x]=0x%x\n", + (eeaddr+1), hwinfo[eeaddr+1]); + if (0xFF == hwinfo[eeaddr+1]) /*YJ,add,120316*/ autoload_fail = true; if (autoload_fail) { RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "auto load fail : Use Default value!\n"); - for (path = 0; path < MAX_RF_PATH; path++) { + for (rfpath = 0 ; rfpath < MAX_RF_PATH ; rfpath++) { /* 2.4G default value */ - for (i = 0; i < MAX_CHNL_GROUP_24G; i++) { - pwr2g->index_cck_base[path][i] = 0x2D; - pwr2g->index_bw40_base[path][i] = 0x2D; - } - for (i = 0; i < MAX_TX_COUNT; i++) { - if (i == 0) { - pwr2g->bw20_diff[path][0] = 0x02; - pwr2g->ofdm_diff[path][0] = 0x04; - } else { - pwr2g->bw20_diff[path][i] = 0xFE; - pwr2g->bw40_diff[path][i] = 0xFE; - pwr2g->cck_diff[path][i] = 0xFE; - pwr2g->ofdm_diff[path][i] = 0xFE; - } - } + set_24g_base(pwrinfo24g, rfpath); } return; } - for (path = 0; path < MAX_RF_PATH; path++) { + for (rfpath = 0 ; rfpath < MAX_RF_PATH ; rfpath++) { /*2.4G default value*/ - for (i = 0; i < MAX_CHNL_GROUP_24G; i++) { - pwr2g->index_cck_base[path][i] = hwinfo[eadr++]; - if (pwr2g->index_cck_base[path][i] == 0xFF) - pwr2g->index_cck_base[path][i] = 0x2D; + for (group = 0 ; group < MAX_CHNL_GROUP_24G; group++) { + pwrinfo24g->index_cck_base[rfpath][group] = + hwinfo[eeaddr++]; + if (pwrinfo24g->index_cck_base[rfpath][group] == 0xFF) + pwrinfo24g->index_cck_base[rfpath][group] = + 0x2D; } - for (i = 0; i < MAX_CHNL_GROUP_24G; i++) { - pwr2g->index_bw40_base[path][i] = hwinfo[eadr++]; - if (pwr2g->index_bw40_base[path][i] == 0xFF) - pwr2g->index_bw40_base[path][i] = 0x2D; + for (group = 0 ; group < MAX_CHNL_GROUP_24G-1; group++) { + pwrinfo24g->index_bw40_base[rfpath][group] = + hwinfo[eeaddr++]; + if (pwrinfo24g->index_bw40_base[rfpath][group] == 0xFF) + pwrinfo24g->index_bw40_base[rfpath][group] = + 0x2D; } - for (i = 0; i < MAX_TX_COUNT; i++) { - if (i == 0) { - set_diff0_2g(pwr2g, hwinfo, path, i, eadr); - eadr++; + pwrinfo24g->bw40_diff[rfpath][0] = 0; + if (hwinfo[eeaddr] == 0xFF) { + pwrinfo24g->bw20_diff[rfpath][0] = 0x02; + } else { + pwrinfo24g->bw20_diff[rfpath][0] = + (hwinfo[eeaddr]&0xf0)>>4; + /*bit sign number to 8 bit sign number*/ + if (pwrinfo24g->bw20_diff[rfpath][0] & BIT(3)) + pwrinfo24g->bw20_diff[rfpath][0] |= 0xF0; + } + + if (hwinfo[eeaddr] == 0xFF) { + pwrinfo24g->ofdm_diff[rfpath][0] = 0x04; + } else { + pwrinfo24g->ofdm_diff[rfpath][0] = + (hwinfo[eeaddr]&0x0f); + /*bit sign number to 8 bit sign number*/ + if (pwrinfo24g->ofdm_diff[rfpath][0] & BIT(3)) + pwrinfo24g->ofdm_diff[rfpath][0] |= 0xF0; + } + pwrinfo24g->cck_diff[rfpath][0] = 0; + eeaddr++; + for (txcnt = 1; txcnt < MAX_TX_COUNT; txcnt++) { + if (hwinfo[eeaddr] == 0xFF) { + pwrinfo24g->bw40_diff[rfpath][txcnt] = 0xFE; + } else { + pwrinfo24g->bw40_diff[rfpath][txcnt] = + (hwinfo[eeaddr]&0xf0)>>4; + if (pwrinfo24g->bw40_diff[rfpath][txcnt] & + BIT(3)) + pwrinfo24g->bw40_diff[rfpath][txcnt] |= + 0xF0; + } + + if (hwinfo[eeaddr] == 0xFF) { + pwrinfo24g->bw20_diff[rfpath][txcnt] = + 0xFE; } else { - set_diff1_2g(pwr2g, hwinfo, path, i, eadr); - eadr++; + pwrinfo24g->bw20_diff[rfpath][txcnt] = + (hwinfo[eeaddr]&0x0f); + if (pwrinfo24g->bw20_diff[rfpath][txcnt] & + BIT(3)) + pwrinfo24g->bw20_diff[rfpath][txcnt] |= + 0xF0; + } + eeaddr++; - set_diff2_2g(pwr2g, hwinfo, path, i, eadr); - eadr++; + if (hwinfo[eeaddr] == 0xFF) { + pwrinfo24g->ofdm_diff[rfpath][txcnt] = 0xFE; + } else { + pwrinfo24g->ofdm_diff[rfpath][txcnt] = + (hwinfo[eeaddr]&0xf0)>>4; + if (pwrinfo24g->ofdm_diff[rfpath][txcnt] & + BIT(3)) + pwrinfo24g->ofdm_diff[rfpath][txcnt] |= + 0xF0; } + + if (hwinfo[eeaddr] == 0xFF) { + pwrinfo24g->cck_diff[rfpath][txcnt] = 0xFE; + } else { + pwrinfo24g->cck_diff[rfpath][txcnt] = + (hwinfo[eeaddr]&0x0f); + if (pwrinfo24g->cck_diff[rfpath][txcnt] & + BIT(3)) + pwrinfo24g->cck_diff[rfpath][txcnt] |= + 0xF0; + } + eeaddr++; } /*5G default value*/ - for (i = 0; i < MAX_CHNL_GROUP_5G; i++) { - pwr5g->index_bw40_base[path][i] = hwinfo[eadr++]; - if (pwr5g->index_bw40_base[path][i] == 0xFF) - pwr5g->index_bw40_base[path][i] = 0xFE; + for (group = 0 ; group < MAX_CHNL_GROUP_5G; group++) { + pwrinfo5g->index_bw40_base[rfpath][group] = + hwinfo[eeaddr++]; + if (pwrinfo5g->index_bw40_base[rfpath][group] == 0xFF) + pwrinfo5g->index_bw40_base[rfpath][group] = + 0xFE; } - for (i = 0; i < MAX_TX_COUNT; i++) { - if (i == 0) { - set_diff0_5g(pwr5g, hwinfo, path, i, eadr); - eadr++; + pwrinfo5g->bw40_diff[rfpath][0] = 0; + + if (hwinfo[eeaddr] == 0xFF) { + pwrinfo5g->bw20_diff[rfpath][0] = 0; + } else { + pwrinfo5g->bw20_diff[rfpath][0] = + (hwinfo[eeaddr]&0xf0)>>4; + if (pwrinfo5g->bw20_diff[rfpath][0] & BIT(3)) + pwrinfo5g->bw20_diff[rfpath][0] |= 0xF0; + } + + if (hwinfo[eeaddr] == 0xFF) { + pwrinfo5g->ofdm_diff[rfpath][0] = 0x04; + } else { + pwrinfo5g->ofdm_diff[rfpath][0] = (hwinfo[eeaddr]&0x0f); + if (pwrinfo5g->ofdm_diff[rfpath][0] & BIT(3)) + pwrinfo5g->ofdm_diff[rfpath][0] |= 0xF0; + } + eeaddr++; + for (txcnt = 1; txcnt < MAX_TX_COUNT; txcnt++) { + if (hwinfo[eeaddr] == 0xFF) { + pwrinfo5g->bw40_diff[rfpath][txcnt] = 0xFE; } else { - set_diff1_5g(pwr5g, hwinfo, path, i, eadr); - eadr++; + pwrinfo5g->bw40_diff[rfpath][txcnt] = + (hwinfo[eeaddr]&0xf0)>>4; + if (pwrinfo5g->bw40_diff[rfpath][txcnt] & + BIT(3)) + pwrinfo5g->bw40_diff[rfpath][txcnt] |= + 0xF0; } + + if (hwinfo[eeaddr] == 0xFF) { + pwrinfo5g->bw20_diff[rfpath][txcnt] = 0xFE; + } else { + pwrinfo5g->bw20_diff[rfpath][txcnt] = + (hwinfo[eeaddr]&0x0f); + if (pwrinfo5g->bw20_diff[rfpath][txcnt] & + BIT(3)) + pwrinfo5g->bw20_diff[rfpath][txcnt] |= + 0xF0; + } + eeaddr++; } - if (hwinfo[eadr] == 0xFF) { - pwr5g->ofdm_diff[path][1] = 0xFE; - pwr5g->ofdm_diff[path][2] = 0xFE; + if (hwinfo[eeaddr] == 0xFF) { + pwrinfo5g->ofdm_diff[rfpath][1] = 0xFE; + pwrinfo5g->ofdm_diff[rfpath][2] = 0xFE; } else { - pwr5g->ofdm_diff[path][1] = (hwinfo[eadr] & 0xf0) >> 4; - pwr5g->ofdm_diff[path][2] = (hwinfo[eadr] & 0x0f); + pwrinfo5g->ofdm_diff[rfpath][1] = + (hwinfo[eeaddr]&0xf0)>>4; + pwrinfo5g->ofdm_diff[rfpath][2] = + (hwinfo[eeaddr]&0x0f); } - eadr++; + eeaddr++; - if (hwinfo[eadr] == 0xFF) - pwr5g->ofdm_diff[path][3] = 0xFE; + if (hwinfo[eeaddr] == 0xFF) + pwrinfo5g->ofdm_diff[rfpath][3] = 0xFE; else - pwr5g->ofdm_diff[path][3] = (hwinfo[eadr]&0x0f); - eadr++; - - for (i = 1; i < MAX_TX_COUNT; i++) { - if (pwr5g->ofdm_diff[path][i] == 0xFF) - pwr5g->ofdm_diff[path][i] = 0xFE; - else if (pwr5g->ofdm_diff[path][i] & BIT(3)) - pwr5g->ofdm_diff[path][i] |= 0xF0; + pwrinfo5g->ofdm_diff[rfpath][3] = (hwinfo[eeaddr]&0x0f); + eeaddr++; + + for (txcnt = 1; txcnt < MAX_TX_COUNT; txcnt++) { + if (pwrinfo5g->ofdm_diff[rfpath][txcnt] == 0xFF) + pwrinfo5g->ofdm_diff[rfpath][txcnt] = 0xFE; + else if (pwrinfo5g->ofdm_diff[rfpath][txcnt] & BIT(3)) + pwrinfo5g->ofdm_diff[rfpath][txcnt] |= 0xF0; } } } @@ -1712,41 +1791,36 @@ static void _rtl88ee_read_txpower_info_from_hwpg(struct ieee80211_hw *hw, struct txpower_info_5g pwrinfo5g; u8 rf_path, index; u8 i; - int jj = EEPROM_RF_BOARD_OPTION_88E; - int kk = EEPROM_THERMAL_METER_88E; - _rtl8188e_read_power_value_fromprom(hw, &pwrinfo24g, &pwrinfo5g, - autoload_fail, hwinfo); + read_power_value_fromprom(hw, &pwrinfo24g, + &pwrinfo5g, autoload_fail, hwinfo); for (rf_path = 0; rf_path < 2; rf_path++) { for (i = 0; i < 14; i++) { - index = get_chnl_group(i+1); + index = _rtl88e_get_chnl_group(i+1); rtlefuse->txpwrlevel_cck[rf_path][i] = - pwrinfo24g.index_cck_base[rf_path][index]; - if (i == 13) - rtlefuse->txpwrlevel_ht40_1s[rf_path][i] = - pwrinfo24g.index_bw40_base[rf_path][4]; - else - rtlefuse->txpwrlevel_ht40_1s[rf_path][i] = - pwrinfo24g.index_bw40_base[rf_path][index]; + pwrinfo24g.index_cck_base[rf_path][index]; + rtlefuse->txpwrlevel_ht40_1s[rf_path][i] = + pwrinfo24g.index_bw40_base[rf_path][index]; rtlefuse->txpwr_ht20diff[rf_path][i] = - pwrinfo24g.bw20_diff[rf_path][0]; + pwrinfo24g.bw20_diff[rf_path][0]; rtlefuse->txpwr_legacyhtdiff[rf_path][i] = - pwrinfo24g.ofdm_diff[rf_path][0]; + pwrinfo24g.ofdm_diff[rf_path][0]; } for (i = 0; i < 14; i++) { RTPRINT(rtlpriv, FINIT, INIT_TXPOWER, - "RF(%d)-Ch(%d) [CCK / HT40_1S ] = " - "[0x%x / 0x%x ]\n", rf_path, i, + "RF(%d)-Ch(%d) [CCK / HT40_1S ] = [0x%x / 0x%x ]\n", + rf_path, i, rtlefuse->txpwrlevel_cck[rf_path][i], rtlefuse->txpwrlevel_ht40_1s[rf_path][i]); } } if (!autoload_fail) - rtlefuse->eeprom_thermalmeter = hwinfo[kk]; + rtlefuse->eeprom_thermalmeter = + hwinfo[EEPROM_THERMAL_METER_88E]; else rtlefuse->eeprom_thermalmeter = EEPROM_DEFAULT_THERMALMETER; @@ -1760,8 +1834,9 @@ static void _rtl88ee_read_txpower_info_from_hwpg(struct ieee80211_hw *hw, "thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter); if (!autoload_fail) { - rtlefuse->eeprom_regulatory = hwinfo[jj] & 0x07;/*bit0~2*/ - if (hwinfo[jj] == 0xFF) + rtlefuse->eeprom_regulatory = + hwinfo[EEPROM_RF_BOARD_OPTION_88E] & 0x07;/*bit0~2*/ + if (hwinfo[EEPROM_RF_BOARD_OPTION_88E] == 0xFF) rtlefuse->eeprom_regulatory = 0; } else { rtlefuse->eeprom_regulatory = 0; @@ -1775,12 +1850,9 @@ static void _rtl88ee_read_adapter_info(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - struct rtl_pci_priv *rppriv = rtl_pcipriv(hw); u16 i, usvalue; u8 hwinfo[HWSET_MAX_SIZE]; u16 eeprom_id; - int jj = EEPROM_RF_BOARD_OPTION_88E; - int kk = EEPROM_RF_FEATURE_OPTION_88E; if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) { rtl_efuse_shadow_map_update(hw); @@ -1790,9 +1862,14 @@ static void _rtl88ee_read_adapter_info(struct ieee80211_hw *hw) } else if (rtlefuse->epromtype == EEPROM_93C46) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "RTL819X Not boot from eeprom, check it !!"); + return; + } else { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "boot from neither eeprom nor efuse, check it !!"); + return; } - RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, ("MAP\n"), + RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP\n", hwinfo, HWSET_MAX_SIZE); eeprom_id = *((u16 *)&hwinfo[0]); @@ -1825,7 +1902,7 @@ static void _rtl88ee_read_adapter_info(struct ieee80211_hw *hw) /*customer ID*/ rtlefuse->eeprom_oemid = hwinfo[EEPROM_CUSTOMER_ID]; if (rtlefuse->eeprom_oemid == 0xFF) - rtlefuse->eeprom_oemid = 0; + rtlefuse->eeprom_oemid = 0; RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid); @@ -1844,34 +1921,40 @@ static void _rtl88ee_read_adapter_info(struct ieee80211_hw *hw) /* set channel paln to world wide 13 */ rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13; /*tx power*/ - _rtl88ee_read_txpower_info_from_hwpg(hw, rtlefuse->autoload_failflag, + _rtl88ee_read_txpower_info_from_hwpg(hw, + rtlefuse->autoload_failflag, hwinfo); rtlefuse->txpwr_fromeprom = true; rtl8188ee_read_bt_coexist_info_from_hwpg(hw, rtlefuse->autoload_failflag, hwinfo); + /*board type*/ - rtlefuse->board_type = (hwinfo[jj] & 0xE0) >> 5; + rtlefuse->board_type = + ((hwinfo[EEPROM_RF_BOARD_OPTION_88E] & 0xE0) >> 5); + rtlhal->board_type = rtlefuse->board_type; /*Wake on wlan*/ - rtlefuse->wowlan_enable = ((hwinfo[kk] & 0x40) >> 6); + rtlefuse->wowlan_enable = + ((hwinfo[EEPROM_RF_FEATURE_OPTION_88E] & 0x40) >> 6); /*parse xtal*/ rtlefuse->crystalcap = hwinfo[EEPROM_XTAL_88E]; if (hwinfo[EEPROM_XTAL_88E]) rtlefuse->crystalcap = 0x20; /*antenna diversity*/ - rtlefuse->antenna_div_cfg = (hwinfo[jj] & 0x18) >> 3; - if (hwinfo[jj] == 0xFF) + rtlefuse->antenna_div_cfg = + (hwinfo[EEPROM_RF_BOARD_OPTION_88E] & 0x18) >> 3; + if (hwinfo[EEPROM_RF_BOARD_OPTION_88E] == 0xFF) rtlefuse->antenna_div_cfg = 0; - if (rppriv->bt_coexist.eeprom_bt_coexist != 0 && - rppriv->bt_coexist.eeprom_bt_ant_num == ANT_X1) + if (rtlpriv->btcoexist.eeprom_bt_coexist != 0 && + rtlpriv->btcoexist.eeprom_bt_ant_num == ANT_X1) rtlefuse->antenna_div_cfg = 0; rtlefuse->antenna_div_type = hwinfo[EEPROM_RF_ANTENNA_OPT_88E]; if (rtlefuse->antenna_div_type == 0xFF) rtlefuse->antenna_div_type = 0x01; if (rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV || - rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV) + rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV) rtlefuse->antenna_div_cfg = 1; if (rtlhal->oem_id == RT_CID_DEFAULT) { @@ -1881,12 +1964,12 @@ static void _rtl88ee_read_adapter_info(struct ieee80211_hw *hw) if (rtlefuse->eeprom_svid == 0x1025) { rtlhal->oem_id = RT_CID_819X_ACER; } else if ((rtlefuse->eeprom_svid == 0x10EC && - rtlefuse->eeprom_smid == 0x0179) || - (rtlefuse->eeprom_svid == 0x17AA && - rtlefuse->eeprom_smid == 0x0179)) { + rtlefuse->eeprom_smid == 0x0179) || + (rtlefuse->eeprom_svid == 0x17AA && + rtlefuse->eeprom_smid == 0x0179)) { rtlhal->oem_id = RT_CID_819X_LENOVO; } else if (rtlefuse->eeprom_svid == 0x103c && - rtlefuse->eeprom_smid == 0x197d) { + rtlefuse->eeprom_smid == 0x197d) { rtlhal->oem_id = RT_CID_819X_HP; } else { rtlhal->oem_id = RT_CID_DEFAULT; @@ -1905,6 +1988,7 @@ static void _rtl88ee_read_adapter_info(struct ieee80211_hw *hw) default: rtlhal->oem_id = RT_CID_DEFAULT; break; + } } } @@ -1943,14 +2027,13 @@ void rtl88ee_read_eeprom_info(struct ieee80211_hw *hw) u8 tmp_u1b; rtlhal->version = _rtl88ee_read_chip_version(hw); - if (get_rf_type(rtlphy) == RF_1T1R) { - rtlpriv->dm.rfpath_rxenable[0] = true; - } else { + if (get_rf_type(rtlphy) == RF_1T1R) rtlpriv->dm.rfpath_rxenable[0] = true; - rtlpriv->dm.rfpath_rxenable[1] = true; - } + else + rtlpriv->dm.rfpath_rxenable[0] = + rtlpriv->dm.rfpath_rxenable[1] = true; RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n", - rtlhal->version); + rtlhal->version); tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR); if (tmp_u1b & BIT(4)) { RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n"); @@ -1970,24 +2053,25 @@ void rtl88ee_read_eeprom_info(struct ieee80211_hw *hw) } static void rtl88ee_update_hal_rate_table(struct ieee80211_hw *hw, - struct ieee80211_sta *sta) + struct ieee80211_sta *sta) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *rppriv = rtl_pcipriv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); u32 ratr_value; u8 ratr_index = 0; - u8 nmode = mac->ht_enable; - u8 mimo_ps = IEEE80211_SMPS_OFF; + u8 b_nmode = mac->ht_enable; + /*u8 mimo_ps = IEEE80211_SMPS_OFF;*/ u16 shortgi_rate; u32 tmp_ratr_value; - u8 ctx40 = mac->bw_40; - u16 cap = sta->ht_cap.cap; - u8 short40 = (cap & IEEE80211_HT_CAP_SGI_40) ? 1 : 0; - u8 short20 = (cap & IEEE80211_HT_CAP_SGI_20) ? 1 : 0; + u8 curtxbw_40mhz = mac->bw_40; + u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? + 1 : 0; + u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? + 1 : 0; enum wireless_mode wirelessmode = mac->mode; + u32 ratr_mask; if (rtlhal->current_bandtype == BAND_ON_5G) ratr_value = sta->supp_rates[1] << 4; @@ -1996,7 +2080,7 @@ static void rtl88ee_update_hal_rate_table(struct ieee80211_hw *hw, if (mac->opmode == NL80211_IFTYPE_ADHOC) ratr_value = 0xfff; ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 | - sta->ht_cap.mcs.rx_mask[0] << 12); + sta->ht_cap.mcs.rx_mask[0] << 12); switch (wirelessmode) { case WIRELESS_MODE_B: if (ratr_value & 0x0000000c) @@ -2009,20 +2093,14 @@ static void rtl88ee_update_hal_rate_table(struct ieee80211_hw *hw, break; case WIRELESS_MODE_N_24G: case WIRELESS_MODE_N_5G: - nmode = 1; - if (mimo_ps == IEEE80211_SMPS_STATIC) { - ratr_value &= 0x0007F005; - } else { - u32 ratr_mask; - - if (get_rf_type(rtlphy) == RF_1T2R || - get_rf_type(rtlphy) == RF_1T1R) - ratr_mask = 0x000ff005; - else - ratr_mask = 0x0f0ff005; + b_nmode = 1; + if (get_rf_type(rtlphy) == RF_1T2R || + get_rf_type(rtlphy) == RF_1T1R) + ratr_mask = 0x000ff005; + else + ratr_mask = 0x0f0ff005; - ratr_value &= ratr_mask; - } + ratr_value &= ratr_mask; break; default: if (rtlphy->rf_type == RF_1T2R) @@ -2033,18 +2111,19 @@ static void rtl88ee_update_hal_rate_table(struct ieee80211_hw *hw, break; } - if ((rppriv->bt_coexist.bt_coexistence) && - (rppriv->bt_coexist.bt_coexist_type == BT_CSR_BC4) && - (rppriv->bt_coexist.bt_cur_state) && - (rppriv->bt_coexist.bt_ant_isolation) && - ((rppriv->bt_coexist.bt_service == BT_SCO) || - (rppriv->bt_coexist.bt_service == BT_BUSY))) + if ((rtlpriv->btcoexist.bt_coexistence) && + (rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4) && + (rtlpriv->btcoexist.bt_cur_state) && + (rtlpriv->btcoexist.bt_ant_isolation) && + ((rtlpriv->btcoexist.bt_service == BT_SCO) || + (rtlpriv->btcoexist.bt_service == BT_BUSY))) ratr_value &= 0x0fffcfc0; else ratr_value &= 0x0FFFFFFF; - if (nmode && ((ctx40 && short40) || - (!ctx40 && short20))) { + if (b_nmode && + ((curtxbw_40mhz && curshortgi_40mhz) || + (!curtxbw_40mhz && curshortgi_20mhz))) { ratr_value |= 0x10000000; tmp_ratr_value = (ratr_value >> 12); @@ -2064,7 +2143,7 @@ static void rtl88ee_update_hal_rate_table(struct ieee80211_hw *hw, } static void rtl88ee_update_hal_rate_mask(struct ieee80211_hw *hw, - struct ieee80211_sta *sta, u8 rssi) + struct ieee80211_sta *sta, u8 rssi_level) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); @@ -2073,23 +2152,25 @@ static void rtl88ee_update_hal_rate_mask(struct ieee80211_hw *hw, struct rtl_sta_info *sta_entry = NULL; u32 ratr_bitmap; u8 ratr_index; - u16 cap = sta->ht_cap.cap; - u8 ctx40 = (cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) ? 1 : 0; - u8 short40 = (cap & IEEE80211_HT_CAP_SGI_40) ? 1 : 0; - u8 short20 = (cap & IEEE80211_HT_CAP_SGI_20) ? 1 : 0; + u8 curtxbw_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) + ? 1 : 0; + u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? + 1 : 0; + u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? + 1 : 0; enum wireless_mode wirelessmode = 0; - bool shortgi = false; + bool b_shortgi = false; u8 rate_mask[5]; u8 macid = 0; - u8 mimo_ps = IEEE80211_SMPS_OFF; + /*u8 mimo_ps = IEEE80211_SMPS_OFF;*/ sta_entry = (struct rtl_sta_info *)sta->drv_priv; wirelessmode = sta_entry->wireless_mode; if (mac->opmode == NL80211_IFTYPE_STATION || - mac->opmode == NL80211_IFTYPE_MESH_POINT) - ctx40 = mac->bw_40; + mac->opmode == NL80211_IFTYPE_MESH_POINT) + curtxbw_40mhz = mac->bw_40; else if (mac->opmode == NL80211_IFTYPE_AP || - mac->opmode == NL80211_IFTYPE_ADHOC) + mac->opmode == NL80211_IFTYPE_ADHOC) macid = sta->aid + 1; if (rtlhal->current_bandtype == BAND_ON_5G) @@ -2111,70 +2192,59 @@ static void rtl88ee_update_hal_rate_mask(struct ieee80211_hw *hw, case WIRELESS_MODE_G: ratr_index = RATR_INX_WIRELESS_GB; - if (rssi == 1) + if (rssi_level == 1) ratr_bitmap &= 0x00000f00; - else if (rssi == 2) + else if (rssi_level == 2) ratr_bitmap &= 0x00000ff0; else ratr_bitmap &= 0x00000ff5; break; - case WIRELESS_MODE_A: - ratr_index = RATR_INX_WIRELESS_A; - ratr_bitmap &= 0x00000ff0; - break; case WIRELESS_MODE_N_24G: case WIRELESS_MODE_N_5G: ratr_index = RATR_INX_WIRELESS_NGB; - - if (mimo_ps == IEEE80211_SMPS_STATIC) { - if (rssi == 1) - ratr_bitmap &= 0x00070000; - else if (rssi == 2) - ratr_bitmap &= 0x0007f000; - else - ratr_bitmap &= 0x0007f005; + if (rtlphy->rf_type == RF_1T2R || + rtlphy->rf_type == RF_1T1R) { + if (curtxbw_40mhz) { + if (rssi_level == 1) + ratr_bitmap &= 0x000f0000; + else if (rssi_level == 2) + ratr_bitmap &= 0x000ff000; + else + ratr_bitmap &= 0x000ff015; + } else { + if (rssi_level == 1) + ratr_bitmap &= 0x000f0000; + else if (rssi_level == 2) + ratr_bitmap &= 0x000ff000; + else + ratr_bitmap &= 0x000ff005; + } } else { - if (rtlphy->rf_type == RF_1T2R || - rtlphy->rf_type == RF_1T1R) { - if (ctx40) { - if (rssi == 1) - ratr_bitmap &= 0x000f0000; - else if (rssi == 2) - ratr_bitmap &= 0x000ff000; - else - ratr_bitmap &= 0x000ff015; - } else { - if (rssi == 1) - ratr_bitmap &= 0x000f0000; - else if (rssi == 2) - ratr_bitmap &= 0x000ff000; - else - ratr_bitmap &= 0x000ff005; - } + if (curtxbw_40mhz) { + if (rssi_level == 1) + ratr_bitmap &= 0x0f8f0000; + else if (rssi_level == 2) + ratr_bitmap &= 0x0f8ff000; + else + ratr_bitmap &= 0x0f8ff015; } else { - if (ctx40) { - if (rssi == 1) - ratr_bitmap &= 0x0f8f0000; - else if (rssi == 2) - ratr_bitmap &= 0x0f8ff000; - else - ratr_bitmap &= 0x0f8ff015; - } else { - if (rssi == 1) - ratr_bitmap &= 0x0f8f0000; - else if (rssi == 2) - ratr_bitmap &= 0x0f8ff000; - else - ratr_bitmap &= 0x0f8ff005; - } + if (rssi_level == 1) + ratr_bitmap &= 0x0f8f0000; + else if (rssi_level == 2) + ratr_bitmap &= 0x0f8ff000; + else + ratr_bitmap &= 0x0f8ff005; } } + /*}*/ + + if ((curtxbw_40mhz && curshortgi_40mhz) || + (!curtxbw_40mhz && curshortgi_20mhz)) { - if ((ctx40 && short40) || (!ctx40 && short20)) { if (macid == 0) - shortgi = true; + b_shortgi = true; else if (macid == 1) - shortgi = false; + b_shortgi = false; } break; default: @@ -2192,22 +2262,24 @@ static void rtl88ee_update_hal_rate_mask(struct ieee80211_hw *hw, "ratr_bitmap :%x\n", ratr_bitmap); *(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) | (ratr_index << 28); - rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80; + rate_mask[4] = macid | (b_shortgi ? 0x20 : 0x00) | 0x80; RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x\n", - ratr_index, ratr_bitmap, rate_mask[0], rate_mask[1], - rate_mask[2], rate_mask[3], rate_mask[4]); + ratr_index, ratr_bitmap, + rate_mask[0], rate_mask[1], + rate_mask[2], rate_mask[3], + rate_mask[4]); rtl88e_fill_h2c_cmd(hw, H2C_88E_RA_MASK, 5, rate_mask); _rtl88ee_set_bcn_ctrl_reg(hw, BIT(3), 0); } void rtl88ee_update_hal_rate_tbl(struct ieee80211_hw *hw, - struct ieee80211_sta *sta, u8 rssi) + struct ieee80211_sta *sta, u8 rssi_level) { struct rtl_priv *rtlpriv = rtl_priv(hw); if (rtlpriv->dm.useramask) - rtl88ee_update_hal_rate_mask(hw, sta, rssi); + rtl88ee_update_hal_rate_mask(hw, sta, rssi_level); else rtl88ee_update_hal_rate_table(hw, sta); } @@ -2230,9 +2302,9 @@ bool rtl88ee_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); - enum rf_pwrstate state_toset; + enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate; u32 u4tmp; - bool actuallyset = false; + bool b_actuallyset = false; if (rtlpriv->rtlhal.being_init_adapter) return false; @@ -2249,27 +2321,29 @@ bool rtl88ee_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid) spin_unlock(&rtlpriv->locks.rf_ps_lock); } - u4tmp = rtl_read_dword(rtlpriv, REG_GPIO_OUTPUT); - state_toset = (u4tmp & BIT(31)) ? ERFON : ERFOFF; + cur_rfstate = ppsc->rfpwr_state; + u4tmp = rtl_read_dword(rtlpriv, REG_GPIO_OUTPUT); + e_rfpowerstate_toset = (u4tmp & BIT(31)) ? ERFON : ERFOFF; - if ((ppsc->hwradiooff == true) && (state_toset == ERFON)) { + if (ppsc->hwradiooff && (e_rfpowerstate_toset == ERFON)) { RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, "GPIOChangeRF - HW Radio ON, RF ON\n"); - state_toset = ERFON; + e_rfpowerstate_toset = ERFON; ppsc->hwradiooff = false; - actuallyset = true; - } else if ((ppsc->hwradiooff == false) && (state_toset == ERFOFF)) { + b_actuallyset = true; + } else if ((!ppsc->hwradiooff) && + (e_rfpowerstate_toset == ERFOFF)) { RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, "GPIOChangeRF - HW Radio OFF, RF OFF\n"); - state_toset = ERFOFF; + e_rfpowerstate_toset = ERFOFF; ppsc->hwradiooff = true; - actuallyset = true; + b_actuallyset = true; } - if (actuallyset) { + if (b_actuallyset) { spin_lock(&rtlpriv->locks.rf_ps_lock); ppsc->rfchange_inprogress = false; spin_unlock(&rtlpriv->locks.rf_ps_lock); @@ -2284,50 +2358,19 @@ bool rtl88ee_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid) *valid = 1; return !ppsc->hwradiooff; -} - -static void add_one_key(struct ieee80211_hw *hw, u8 *macaddr, - struct rtl_mac *mac, u32 key, u32 id, - u8 enc_algo, bool is_pairwise) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); - - RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "add one entry\n"); - if (is_pairwise) { - RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "set Pairwise key\n"); - rtl_cam_add_one_entry(hw, macaddr, key, id, enc_algo, - CAM_CONFIG_NO_USEDK, - rtlpriv->sec.key_buf[key]); - } else { - RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "set group key\n"); - - if (mac->opmode == NL80211_IFTYPE_ADHOC) { - rtl_cam_add_one_entry(hw, rtlefuse->dev_addr, - PAIRWISE_KEYIDX, - CAM_PAIRWISE_KEY_POSITION, - enc_algo, - CAM_CONFIG_NO_USEDK, - rtlpriv->sec.key_buf[id]); - } - - rtl_cam_add_one_entry(hw, macaddr, key, id, enc_algo, - CAM_CONFIG_NO_USEDK, - rtlpriv->sec.key_buf[id]); - } } -void rtl88ee_set_key(struct ieee80211_hw *hw, u32 key, - u8 *mac_ad, bool is_group, u8 enc_algo, +void rtl88ee_set_key(struct ieee80211_hw *hw, u32 key_index, + u8 *p_macaddr, bool is_group, u8 enc_algo, bool is_wepkey, bool clear_all) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - u8 *macaddr = mac_ad; - u32 id = 0; + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + u8 *macaddr = p_macaddr; + u32 entry_id = 0; bool is_pairwise = false; - static u8 cam_const_addr[4][6] = { {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, {0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, @@ -2372,122 +2415,176 @@ void rtl88ee_set_key(struct ieee80211_hw *hw, u32 key, break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case not processed\n"); + "switch case not process\n"); enc_algo = CAM_TKIP; break; } if (is_wepkey || rtlpriv->sec.use_defaultkey) { - macaddr = cam_const_addr[key]; - id = key; + macaddr = cam_const_addr[key_index]; + entry_id = key_index; } else { if (is_group) { macaddr = cam_const_broad; - id = key; + entry_id = key_index; } else { if (mac->opmode == NL80211_IFTYPE_AP || mac->opmode == NL80211_IFTYPE_MESH_POINT) { - id = rtl_cam_get_free_entry(hw, mac_ad); - if (id >= TOTAL_CAM_ENTRY) { + entry_id = + rtl_cam_get_free_entry(hw, p_macaddr); + if (entry_id >= TOTAL_CAM_ENTRY) { RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG, "Can not find free hw security cam entry\n"); return; } } else { - id = CAM_PAIRWISE_KEY_POSITION; + entry_id = CAM_PAIRWISE_KEY_POSITION; } - - key = PAIRWISE_KEYIDX; + key_index = PAIRWISE_KEYIDX; is_pairwise = true; } } - if (rtlpriv->sec.key_len[key] == 0) { + if (rtlpriv->sec.key_len[key_index] == 0) { RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, - "delete one entry, id is %d\n", id); + "delete one entry, entry_id is %d\n", + entry_id); if (mac->opmode == NL80211_IFTYPE_AP || - mac->opmode == NL80211_IFTYPE_MESH_POINT) - rtl_cam_del_entry(hw, mac_ad); - rtl_cam_delete_one_entry(hw, mac_ad, id); + mac->opmode == NL80211_IFTYPE_MESH_POINT) + rtl_cam_del_entry(hw, p_macaddr); + rtl_cam_delete_one_entry(hw, p_macaddr, entry_id); } else { - add_one_key(hw, macaddr, mac, key, id, enc_algo, - is_pairwise); + RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, + "add one entry\n"); + if (is_pairwise) { + RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, + "set Pairwise key\n"); + + rtl_cam_add_one_entry(hw, macaddr, key_index, + entry_id, enc_algo, + CAM_CONFIG_NO_USEDK, + rtlpriv->sec.key_buf[key_index]); + } else { + RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, + "set group key\n"); + + if (mac->opmode == NL80211_IFTYPE_ADHOC) { + rtl_cam_add_one_entry(hw, + rtlefuse->dev_addr, + PAIRWISE_KEYIDX, + CAM_PAIRWISE_KEY_POSITION, + enc_algo, + CAM_CONFIG_NO_USEDK, + rtlpriv->sec.key_buf + [entry_id]); + } + + rtl_cam_add_one_entry(hw, macaddr, key_index, + entry_id, enc_algo, + CAM_CONFIG_NO_USEDK, + rtlpriv->sec.key_buf[entry_id]); + } + } } } static void rtl8188ee_bt_var_init(struct ieee80211_hw *hw) { - struct rtl_pci_priv *rppriv = rtl_pcipriv(hw); - struct bt_coexist_info coexist = rppriv->bt_coexist; + struct rtl_priv *rtlpriv = rtl_priv(hw); - coexist.bt_coexistence = rppriv->bt_coexist.eeprom_bt_coexist; - coexist.bt_ant_num = coexist.eeprom_bt_ant_num; - coexist.bt_coexist_type = coexist.eeprom_bt_type; + rtlpriv->btcoexist.bt_coexistence = + rtlpriv->btcoexist.eeprom_bt_coexist; + rtlpriv->btcoexist.bt_ant_num = rtlpriv->btcoexist.eeprom_bt_ant_num; + rtlpriv->btcoexist.bt_coexist_type = rtlpriv->btcoexist.eeprom_bt_type; - if (coexist.reg_bt_iso == 2) - coexist.bt_ant_isolation = coexist.eeprom_bt_ant_isol; + if (rtlpriv->btcoexist.reg_bt_iso == 2) + rtlpriv->btcoexist.bt_ant_isolation = + rtlpriv->btcoexist.eeprom_bt_ant_isol; else - coexist.bt_ant_isolation = coexist.reg_bt_iso; - - coexist.bt_radio_shared_type = coexist.eeprom_bt_radio_shared; - - if (coexist.bt_coexistence) { - if (coexist.reg_bt_sco == 1) - coexist.bt_service = BT_OTHER_ACTION; - else if (coexist.reg_bt_sco == 2) - coexist.bt_service = BT_SCO; - else if (coexist.reg_bt_sco == 4) - coexist.bt_service = BT_BUSY; - else if (coexist.reg_bt_sco == 5) - coexist.bt_service = BT_OTHERBUSY; + rtlpriv->btcoexist.bt_ant_isolation = + rtlpriv->btcoexist.reg_bt_iso; + + rtlpriv->btcoexist.bt_radio_shared_type = + rtlpriv->btcoexist.eeprom_bt_radio_shared; + + if (rtlpriv->btcoexist.bt_coexistence) { + if (rtlpriv->btcoexist.reg_bt_sco == 1) + rtlpriv->btcoexist.bt_service = BT_OTHER_ACTION; + else if (rtlpriv->btcoexist.reg_bt_sco == 2) + rtlpriv->btcoexist.bt_service = BT_SCO; + else if (rtlpriv->btcoexist.reg_bt_sco == 4) + rtlpriv->btcoexist.bt_service = BT_BUSY; + else if (rtlpriv->btcoexist.reg_bt_sco == 5) + rtlpriv->btcoexist.bt_service = BT_OTHERBUSY; else - coexist.bt_service = BT_IDLE; + rtlpriv->btcoexist.bt_service = BT_IDLE; - coexist.bt_edca_ul = 0; - coexist.bt_edca_dl = 0; - coexist.bt_rssi_state = 0xff; + rtlpriv->btcoexist.bt_edca_ul = 0; + rtlpriv->btcoexist.bt_edca_dl = 0; + rtlpriv->btcoexist.bt_rssi_state = 0xff; } } void rtl8188ee_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw, bool auto_load_fail, u8 *hwinfo) { + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 value; + + if (!auto_load_fail) { + rtlpriv->btcoexist.eeprom_bt_coexist = + ((hwinfo[EEPROM_RF_FEATURE_OPTION_88E] & 0xe0) >> 5); + if (hwinfo[EEPROM_RF_FEATURE_OPTION_88E] == 0xFF) + rtlpriv->btcoexist.eeprom_bt_coexist = 0; + value = hwinfo[EEPROM_RF_BT_SETTING_88E]; + rtlpriv->btcoexist.eeprom_bt_type = ((value & 0xe) >> 1); + rtlpriv->btcoexist.eeprom_bt_ant_num = (value & 0x1); + rtlpriv->btcoexist.eeprom_bt_ant_isol = ((value & 0x10) >> 4); + rtlpriv->btcoexist.eeprom_bt_radio_shared = + ((value & 0x20) >> 5); + } else { + rtlpriv->btcoexist.eeprom_bt_coexist = 0; + rtlpriv->btcoexist.eeprom_bt_type = BT_2WIRE; + rtlpriv->btcoexist.eeprom_bt_ant_num = ANT_X2; + rtlpriv->btcoexist.eeprom_bt_ant_isol = 0; + rtlpriv->btcoexist.eeprom_bt_radio_shared = BT_RADIO_SHARED; + } + rtl8188ee_bt_var_init(hw); } void rtl8188ee_bt_reg_init(struct ieee80211_hw *hw) { - struct rtl_pci_priv *rppriv = rtl_pcipriv(hw); + struct rtl_priv *rtlpriv = rtl_priv(hw); /* 0:Low, 1:High, 2:From Efuse. */ - rppriv->bt_coexist.reg_bt_iso = 2; + rtlpriv->btcoexist.reg_bt_iso = 2; /* 0:Idle, 1:None-SCO, 2:SCO, 3:From Counter. */ - rppriv->bt_coexist.reg_bt_sco = 3; + rtlpriv->btcoexist.reg_bt_sco = 3; /* 0:Disable BT control A-MPDU, 1:Enable BT control A-MPDU. */ - rppriv->bt_coexist.reg_bt_sco = 0; + rtlpriv->btcoexist.reg_bt_sco = 0; } void rtl8188ee_bt_hw_init(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - struct rtl_pci_priv *rppriv = rtl_pcipriv(hw); - struct bt_coexist_info coexist = rppriv->bt_coexist; + struct rtl_phy *rtlphy = &rtlpriv->phy; u8 u1_tmp; - if (coexist.bt_coexistence && - ((coexist.bt_coexist_type == BT_CSR_BC4) || - coexist.bt_coexist_type == BT_CSR_BC8)) { - if (coexist.bt_ant_isolation) + if (rtlpriv->btcoexist.bt_coexistence && + ((rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4) || + rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC8)) { + if (rtlpriv->btcoexist.bt_ant_isolation) rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0); u1_tmp = rtl_read_byte(rtlpriv, 0x4fd) & - BIT_OFFSET_LEN_MASK_32(0, 1); - u1_tmp = u1_tmp | ((coexist.bt_ant_isolation == 1) ? + BIT_OFFSET_LEN_MASK_32(0, 1); + u1_tmp = u1_tmp | + ((rtlpriv->btcoexist.bt_ant_isolation == 1) ? 0 : BIT_OFFSET_LEN_MASK_32(1, 1)) | - ((coexist.bt_service == BT_SCO) ? + ((rtlpriv->btcoexist.bt_service == BT_SCO) ? 0 : BIT_OFFSET_LEN_MASK_32(2, 1)); rtl_write_byte(rtlpriv, 0x4fd, u1_tmp); diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/led.c b/drivers/net/wireless/rtlwifi/rtl8188ee/led.c index c81a9cb6894c62c5e7b834432b02fc869e80bbf0..b504bd092fc4b36e62416a6cf6dedba94b9230f3 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/led.c +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/led.c @@ -32,8 +32,8 @@ #include "reg.h" #include "led.h" -static void rtl88ee_init_led(struct ieee80211_hw *hw, - struct rtl_led *pled, enum rtl_led_pin ledpin) +static void _rtl88ee_init_led(struct ieee80211_hw *hw, + struct rtl_led *pled, enum rtl_led_pin ledpin) { pled->hw = hw; pled->ledpin = ledpin; @@ -46,23 +46,23 @@ void rtl88ee_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled) struct rtl_priv *rtlpriv = rtl_priv(hw); RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, - "LedAddr:%X ledpin =%d\n", REG_LEDCFG2, pled->ledpin); + "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin); switch (pled->ledpin) { case LED_PIN_GPIO0: break; case LED_PIN_LED0: ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2); - rtl_write_byte(rtlpriv, REG_LEDCFG2, - (ledcfg & 0xf0) | BIT(5) | BIT(6)); + rtl_write_byte(rtlpriv, + REG_LEDCFG2, (ledcfg & 0xf0) | BIT(5) | BIT(6)); break; case LED_PIN_LED1: ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG1); rtl_write_byte(rtlpriv, REG_LEDCFG1, ledcfg & 0x10); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case not processed\n"); + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, + "switch case not process\n"); break; } pled->ledon = true; @@ -73,10 +73,9 @@ void rtl88ee_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); u8 ledcfg; - u8 val; RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, - "LedAddr:%X ledpin =%d\n", REG_LEDCFG2, pled->ledpin); + "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin); switch (pled->ledpin) { case LED_PIN_GPIO0: @@ -84,15 +83,15 @@ void rtl88ee_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) case LED_PIN_LED0: ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2); ledcfg &= 0xf0; - val = ledcfg | BIT(3) | BIT(5) | BIT(6); - if (pcipriv->ledctl.led_opendrain == true) { - rtl_write_byte(rtlpriv, REG_LEDCFG2, val); + if (pcipriv->ledctl.led_opendrain) { + rtl_write_byte(rtlpriv, REG_LEDCFG2, + (ledcfg | BIT(3) | BIT(5) | BIT(6))); ledcfg = rtl_read_byte(rtlpriv, REG_MAC_PINMUX_CFG); - val = ledcfg & 0xFE; - rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG, val); - } else { - rtl_write_byte(rtlpriv, REG_LEDCFG2, val); - } + rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG, + (ledcfg & 0xFE)); + } else + rtl_write_byte(rtlpriv, REG_LEDCFG2, + (ledcfg | BIT(3) | BIT(5) | BIT(6))); break; case LED_PIN_LED1: ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG1); @@ -100,8 +99,8 @@ void rtl88ee_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) rtl_write_byte(rtlpriv, REG_LEDCFG1, (ledcfg | BIT(3))); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case not processed\n"); + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, + "switch case not process\n"); break; } pled->ledon = false; @@ -110,17 +109,15 @@ void rtl88ee_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) void rtl88ee_init_sw_leds(struct ieee80211_hw *hw) { struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); - - rtl88ee_init_led(hw, &(pcipriv->ledctl.sw_led0), LED_PIN_LED0); - rtl88ee_init_led(hw, &(pcipriv->ledctl.sw_led1), LED_PIN_LED1); + _rtl88ee_init_led(hw, &pcipriv->ledctl.sw_led0, LED_PIN_LED0); + _rtl88ee_init_led(hw, &pcipriv->ledctl.sw_led1, LED_PIN_LED1); } -static void rtl88ee_sw_led_control(struct ieee80211_hw *hw, +static void _rtl88ee_sw_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction) { struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0); - switch (ledaction) { case LED_CTL_POWER_ON: case LED_CTL_LINK: @@ -152,6 +149,6 @@ void rtl88ee_led_control(struct ieee80211_hw *hw, return; } RT_TRACE(rtlpriv, COMP_LED, DBG_TRACE, "ledaction %d,\n", - ledaction); - rtl88ee_sw_led_control(hw, ledaction); + ledaction); + _rtl88ee_sw_led_control(hw, ledaction); } diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/led.h b/drivers/net/wireless/rtlwifi/rtl8188ee/led.h index 4073f6f847b2cb77f6d0619ee52b97565f20e509..4b325b75faafe2ea4784df46977044f02e70f035 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/led.h +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/led.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c index 1cd6c16d597ee40a01e3b529ea4faca80f22c773..3f6c59cdeababd42c1ab5692ffe15c9d2b897956 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -29,7 +25,6 @@ #include "../wifi.h" #include "../pci.h" -#include "../core.h" #include "../ps.h" #include "reg.h" #include "def.h" @@ -38,443 +33,32 @@ #include "dm.h" #include "table.h" -static void set_baseband_phy_config(struct ieee80211_hw *hw); -static void set_baseband_agc_config(struct ieee80211_hw *hw); -static void store_pwrindex_offset(struct ieee80211_hw *hw, - u32 regaddr, u32 bitmask, - u32 data); -static bool check_cond(struct ieee80211_hw *hw, const u32 condition); - -static u32 rf_serial_read(struct ieee80211_hw *hw, - enum radio_path rfpath, u32 offset) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - struct bb_reg_def *phreg = &rtlphy->phyreg_def[rfpath]; - u32 newoffset; - u32 tmplong, tmplong2; - u8 rfpi_enable = 0; - u32 ret; - int jj = RF90_PATH_A; - int kk = RF90_PATH_B; - - offset &= 0xff; - newoffset = offset; - if (RT_CANNOT_IO(hw)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "return all one\n"); - return 0xFFFFFFFF; - } - tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD); - if (rfpath == jj) - tmplong2 = tmplong; - else - tmplong2 = rtl_get_bbreg(hw, phreg->rfhssi_para2, MASKDWORD); - tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) | - (newoffset << 23) | BLSSIREADEDGE; - rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD, - tmplong & (~BLSSIREADEDGE)); - mdelay(1); - rtl_set_bbreg(hw, phreg->rfhssi_para2, MASKDWORD, tmplong2); - mdelay(2); - if (rfpath == jj) - rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1, - BIT(8)); - else if (rfpath == kk) - rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1, - BIT(8)); - if (rfpi_enable) - ret = rtl_get_bbreg(hw, phreg->rf_rbpi, BLSSIREADBACKDATA); - else - ret = rtl_get_bbreg(hw, phreg->rf_rb, BLSSIREADBACKDATA); - RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFR-%d Addr[0x%x]= 0x%x\n", - rfpath, phreg->rf_rb, ret); - return ret; -} - -static void rf_serial_write(struct ieee80211_hw *hw, - enum radio_path rfpath, u32 offset, - u32 data) -{ - u32 data_and_addr; - u32 newoffset; - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - struct bb_reg_def *phreg = &rtlphy->phyreg_def[rfpath]; - - if (RT_CANNOT_IO(hw)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "stop\n"); - return; - } - offset &= 0xff; - newoffset = offset; - data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff; - rtl_set_bbreg(hw, phreg->rf3wire_offset, MASKDWORD, data_and_addr); - RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFW-%d Addr[0x%x]= 0x%x\n", - rfpath, phreg->rf3wire_offset, data_and_addr); -} - -static u32 cal_bit_shift(u32 bitmask) -{ - u32 i; - - for (i = 0; i <= 31; i++) { - if (((bitmask >> i) & 0x1) == 1) - break; - } - return i; -} - -static bool config_bb_with_header(struct ieee80211_hw *hw, - u8 configtype) -{ - if (configtype == BASEBAND_CONFIG_PHY_REG) - set_baseband_phy_config(hw); - else if (configtype == BASEBAND_CONFIG_AGC_TAB) - set_baseband_agc_config(hw); - return true; -} - -static bool config_bb_with_pgheader(struct ieee80211_hw *hw, - u8 configtype) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - int i; - u32 *table_pg; - u16 tbl_page_len; - u32 v1 = 0, v2 = 0; - - tbl_page_len = RTL8188EEPHY_REG_ARRAY_PGLEN; - table_pg = RTL8188EEPHY_REG_ARRAY_PG; - - if (configtype == BASEBAND_CONFIG_PHY_REG) { - for (i = 0; i < tbl_page_len; i = i + 3) { - v1 = table_pg[i]; - v2 = table_pg[i + 1]; - - if (v1 < 0xcdcdcdcd) { - rtl_addr_delay(table_pg[i]); - - store_pwrindex_offset(hw, table_pg[i], - table_pg[i + 1], - table_pg[i + 2]); - continue; - } else { - if (!check_cond(hw, table_pg[i])) { - /*don't need the hw_body*/ - i += 2; /* skip the pair of expression*/ - v1 = table_pg[i]; - v2 = table_pg[i + 1]; - while (v2 != 0xDEAD) { - i += 3; - v1 = table_pg[i]; - v2 = table_pg[i + 1]; - } - } - } - } - } else { - RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, - "configtype != BaseBand_Config_PHY_REG\n"); - } - return true; -} - -static bool config_parafile(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - struct rtl_efuse *fuse = rtl_efuse(rtl_priv(hw)); - bool rtstatus; - - rtstatus = config_bb_with_header(hw, BASEBAND_CONFIG_PHY_REG); - if (rtstatus != true) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!"); - return false; - } - - if (fuse->autoload_failflag == false) { - rtlphy->pwrgroup_cnt = 0; - rtstatus = config_bb_with_pgheader(hw, BASEBAND_CONFIG_PHY_REG); - } - if (rtstatus != true) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!"); - return false; - } - rtstatus = config_bb_with_header(hw, BASEBAND_CONFIG_AGC_TAB); - if (rtstatus != true) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n"); - return false; - } - rtlphy->cck_high_power = (bool) (rtl_get_bbreg(hw, - RFPGA0_XA_HSSIPARAMETER2, 0x200)); - - return true; -} - -static void rtl88e_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - int jj = RF90_PATH_A; - int kk = RF90_PATH_B; - - rtlphy->phyreg_def[jj].rfintfs = RFPGA0_XAB_RFINTERFACESW; - rtlphy->phyreg_def[kk].rfintfs = RFPGA0_XAB_RFINTERFACESW; - rtlphy->phyreg_def[RF90_PATH_C].rfintfs = RFPGA0_XCD_RFINTERFACESW; - rtlphy->phyreg_def[RF90_PATH_D].rfintfs = RFPGA0_XCD_RFINTERFACESW; - - rtlphy->phyreg_def[jj].rfintfi = RFPGA0_XAB_RFINTERFACERB; - rtlphy->phyreg_def[kk].rfintfi = RFPGA0_XAB_RFINTERFACERB; - rtlphy->phyreg_def[RF90_PATH_C].rfintfi = RFPGA0_XCD_RFINTERFACERB; - rtlphy->phyreg_def[RF90_PATH_D].rfintfi = RFPGA0_XCD_RFINTERFACERB; - - rtlphy->phyreg_def[jj].rfintfo = RFPGA0_XA_RFINTERFACEOE; - rtlphy->phyreg_def[kk].rfintfo = RFPGA0_XB_RFINTERFACEOE; - - rtlphy->phyreg_def[jj].rfintfe = RFPGA0_XA_RFINTERFACEOE; - rtlphy->phyreg_def[kk].rfintfe = RFPGA0_XB_RFINTERFACEOE; - - rtlphy->phyreg_def[jj].rf3wire_offset = RFPGA0_XA_LSSIPARAMETER; - rtlphy->phyreg_def[kk].rf3wire_offset = RFPGA0_XB_LSSIPARAMETER; - - rtlphy->phyreg_def[jj].rflssi_select = rFPGA0_XAB_RFPARAMETER; - rtlphy->phyreg_def[kk].rflssi_select = rFPGA0_XAB_RFPARAMETER; - rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = rFPGA0_XCD_RFPARAMETER; - rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = rFPGA0_XCD_RFPARAMETER; - - rtlphy->phyreg_def[jj].rftxgain_stage = RFPGA0_TXGAINSTAGE; - rtlphy->phyreg_def[kk].rftxgain_stage = RFPGA0_TXGAINSTAGE; - rtlphy->phyreg_def[RF90_PATH_C].rftxgain_stage = RFPGA0_TXGAINSTAGE; - rtlphy->phyreg_def[RF90_PATH_D].rftxgain_stage = RFPGA0_TXGAINSTAGE; - - rtlphy->phyreg_def[jj].rfhssi_para1 = RFPGA0_XA_HSSIPARAMETER1; - rtlphy->phyreg_def[kk].rfhssi_para1 = RFPGA0_XB_HSSIPARAMETER1; - - rtlphy->phyreg_def[jj].rfhssi_para2 = RFPGA0_XA_HSSIPARAMETER2; - rtlphy->phyreg_def[kk].rfhssi_para2 = RFPGA0_XB_HSSIPARAMETER2; - - rtlphy->phyreg_def[jj].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL; - rtlphy->phyreg_def[kk].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL; - rtlphy->phyreg_def[RF90_PATH_C].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL; - rtlphy->phyreg_def[RF90_PATH_D].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL; - - rtlphy->phyreg_def[jj].rfagc_control1 = ROFDM0_XAAGCCORE1; - rtlphy->phyreg_def[kk].rfagc_control1 = ROFDM0_XBAGCCORE1; - rtlphy->phyreg_def[RF90_PATH_C].rfagc_control1 = ROFDM0_XCAGCCORE1; - rtlphy->phyreg_def[RF90_PATH_D].rfagc_control1 = ROFDM0_XDAGCCORE1; - - rtlphy->phyreg_def[jj].rfagc_control2 = ROFDM0_XAAGCCORE2; - rtlphy->phyreg_def[kk].rfagc_control2 = ROFDM0_XBAGCCORE2; - rtlphy->phyreg_def[RF90_PATH_C].rfagc_control2 = ROFDM0_XCAGCCORE2; - rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2; - - rtlphy->phyreg_def[jj].rfrxiq_imbal = ROFDM0_XARXIQIMBAL; - rtlphy->phyreg_def[kk].rfrxiq_imbal = ROFDM0_XBRXIQIMBAL; - rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbal = ROFDM0_XCRXIQIMBAL; - rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbal = ROFDM0_XDRXIQIMBAL; - - rtlphy->phyreg_def[jj].rfrx_afe = ROFDM0_XARXAFE; - rtlphy->phyreg_def[kk].rfrx_afe = ROFDM0_XBRXAFE; - rtlphy->phyreg_def[RF90_PATH_C].rfrx_afe = ROFDM0_XCRXAFE; - rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE; - - rtlphy->phyreg_def[jj].rftxiq_imbal = ROFDM0_XATXIQIMBAL; - rtlphy->phyreg_def[kk].rftxiq_imbal = ROFDM0_XBTXIQIMBAL; - rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbal = ROFDM0_XCTXIQIMBAL; - rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbal = ROFDM0_XDTXIQIMBAL; - - rtlphy->phyreg_def[jj].rftx_afe = ROFDM0_XATXAFE; - rtlphy->phyreg_def[kk].rftx_afe = ROFDM0_XBTXAFE; - - rtlphy->phyreg_def[jj].rf_rb = RFPGA0_XA_LSSIREADBACK; - rtlphy->phyreg_def[kk].rf_rb = RFPGA0_XB_LSSIREADBACK; - - rtlphy->phyreg_def[jj].rf_rbpi = TRANSCEIVEA_HSPI_READBACK; - rtlphy->phyreg_def[kk].rf_rbpi = TRANSCEIVEB_HSPI_READBACK; -} - -static bool rtl88e_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable, - u32 cmdtableidx, u32 cmdtablesz, - enum swchnlcmd_id cmdid, - u32 para1, u32 para2, u32 msdelay) -{ - struct swchnlcmd *pcmd; - - if (cmdtable == NULL) { - RT_ASSERT(false, "cmdtable cannot be NULL.\n"); - return false; - } - - if (cmdtableidx >= cmdtablesz) - return false; - - pcmd = cmdtable + cmdtableidx; - pcmd->cmdid = cmdid; - pcmd->para1 = para1; - pcmd->para2 = para2; - pcmd->msdelay = msdelay; - return true; -} - -static bool chnl_step_by_step(struct ieee80211_hw *hw, - u8 channel, u8 *stage, u8 *step, - u32 *delay) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - struct swchnlcmd precommoncmd[MAX_PRECMD_CNT]; - u32 precommoncmdcnt; - struct swchnlcmd postcommoncmd[MAX_POSTCMD_CNT]; - u32 postcommoncmdcnt; - struct swchnlcmd rfdependcmd[MAX_RFDEPENDCMD_CNT]; - u32 rfdependcmdcnt; - struct swchnlcmd *currentcmd = NULL; - u8 rfpath; - u8 num_total_rfpath = rtlphy->num_total_rfpath; - - precommoncmdcnt = 0; - rtl88e_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++, - MAX_PRECMD_CNT, - CMDID_SET_TXPOWEROWER_LEVEL, 0, 0, 0); - rtl88e_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++, - MAX_PRECMD_CNT, CMDID_END, 0, 0, 0); - - postcommoncmdcnt = 0; - - rtl88e_phy_set_sw_chnl_cmdarray(postcommoncmd, postcommoncmdcnt++, - MAX_POSTCMD_CNT, CMDID_END, 0, 0, 0); - - rfdependcmdcnt = 0; - - RT_ASSERT((channel >= 1 && channel <= 14), - "illegal channel for Zebra: %d\n", channel); - - rtl88e_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++, - MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG, - RF_CHNLBW, channel, 10); - - rtl88e_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++, - MAX_RFDEPENDCMD_CNT, CMDID_END, 0, 0, - 0); - - do { - switch (*stage) { - case 0: - currentcmd = &precommoncmd[*step]; - break; - case 1: - currentcmd = &rfdependcmd[*step]; - break; - case 2: - currentcmd = &postcommoncmd[*step]; - break; - } - - if (currentcmd->cmdid == CMDID_END) { - if ((*stage) == 2) { - return true; - } else { - (*stage)++; - (*step) = 0; - continue; - } - } - - switch (currentcmd->cmdid) { - case CMDID_SET_TXPOWEROWER_LEVEL: - rtl88e_phy_set_txpower_level(hw, channel); - break; - case CMDID_WRITEPORT_ULONG: - rtl_write_dword(rtlpriv, currentcmd->para1, - currentcmd->para2); - break; - case CMDID_WRITEPORT_USHORT: - rtl_write_word(rtlpriv, currentcmd->para1, - (u16) currentcmd->para2); - break; - case CMDID_WRITEPORT_UCHAR: - rtl_write_byte(rtlpriv, currentcmd->para1, - (u8) currentcmd->para2); - break; - case CMDID_RF_WRITEREG: - for (rfpath = 0; rfpath < num_total_rfpath; rfpath++) { - rtlphy->rfreg_chnlval[rfpath] = - ((rtlphy->rfreg_chnlval[rfpath] & - 0xfffffc00) | currentcmd->para2); - - rtl_set_rfreg(hw, (enum radio_path)rfpath, - currentcmd->para1, - RFREG_OFFSET_MASK, - rtlphy->rfreg_chnlval[rfpath]); - } - break; - default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case not processed\n"); - break; - } - - break; - } while (true); - - (*delay) = currentcmd->msdelay; - (*step)++; - return false; -} - -static long rtl88e_pwr_idx_dbm(struct ieee80211_hw *hw, - enum wireless_mode wirelessmode, - u8 txpwridx) -{ - long offset; - long pwrout_dbm; - - switch (wirelessmode) { - case WIRELESS_MODE_B: - offset = -7; - break; - case WIRELESS_MODE_G: - case WIRELESS_MODE_N_24G: - offset = -8; - break; - default: - offset = -8; - break; - } - pwrout_dbm = txpwridx / 2 + offset; - return pwrout_dbm; -} - -static void rtl88e_phy_set_io(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - struct dig_t *dm_digtable = &rtlpriv->dm_digtable; - - RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, - "--->Cmd(%#x), set_io_inprogress(%d)\n", - rtlphy->current_io_type, rtlphy->set_io_inprogress); - switch (rtlphy->current_io_type) { - case IO_CMD_RESUME_DM_BY_SCAN: - dm_digtable->cur_igvalue = rtlphy->initgain_backup.xaagccore1; - /*rtl92c_dm_write_dig(hw);*/ - rtl88e_phy_set_txpower_level(hw, rtlphy->current_channel); - rtl_set_bbreg(hw, RCCK0_CCA, 0xff0000, 0x83); - break; - case IO_CMD_PAUSE_DM_BY_SCAN: - rtlphy->initgain_backup.xaagccore1 = dm_digtable->cur_igvalue; - dm_digtable->cur_igvalue = 0x17; - rtl_set_bbreg(hw, RCCK0_CCA, 0xff0000, 0x40); - break; - default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case not processed\n"); - break; - } - rtlphy->set_io_inprogress = false; - RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, - "(%#x)\n", rtlphy->current_io_type); -} +static u32 _rtl88e_phy_rf_serial_read(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 offset); +static void _rtl88e_phy_rf_serial_write(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 offset, + u32 data); +static u32 _rtl88e_phy_calculate_bit_shift(u32 bitmask); +static bool _rtl88e_phy_bb8188e_config_parafile(struct ieee80211_hw *hw); +static bool _rtl88e_phy_config_mac_with_headerfile(struct ieee80211_hw *hw); +static bool phy_config_bb_with_headerfile(struct ieee80211_hw *hw, + u8 configtype); +static bool phy_config_bb_with_pghdr(struct ieee80211_hw *hw, + u8 configtype); +static void _rtl88e_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw); +static bool _rtl88e_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable, + u32 cmdtableidx, u32 cmdtablesz, + enum swchnlcmd_id cmdid, u32 para1, + u32 para2, u32 msdelay); +static bool _rtl88e_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, + u8 channel, u8 *stage, u8 *step, + u32 *delay); + +static long _rtl88e_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw, + enum wireless_mode wirelessmode, + u8 txpwridx); +static void rtl88ee_phy_set_rf_on(struct ieee80211_hw *hw); +static void rtl88e_phy_set_io(struct ieee80211_hw *hw); u32 rtl88e_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask) { @@ -484,14 +68,15 @@ u32 rtl88e_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask) RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "regaddr(%#x), bitmask(%#x)\n", regaddr, bitmask); originalvalue = rtl_read_dword(rtlpriv, regaddr); - bitshift = cal_bit_shift(bitmask); + bitshift = _rtl88e_phy_calculate_bit_shift(bitmask); returnvalue = (originalvalue & bitmask) >> bitshift; RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, - "BBR MASK = 0x%x Addr[0x%x]= 0x%x\n", bitmask, + "BBR MASK=0x%x Addr[0x%x]=0x%x\n", bitmask, regaddr, originalvalue); return returnvalue; + } void rtl88e_phy_set_bb_reg(struct ieee80211_hw *hw, @@ -501,12 +86,12 @@ void rtl88e_phy_set_bb_reg(struct ieee80211_hw *hw, u32 originalvalue, bitshift; RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, - "regaddr(%#x), bitmask(%#x),data(%#x)\n", + "regaddr(%#x), bitmask(%#x), data(%#x)\n", regaddr, bitmask, data); if (bitmask != MASKDWORD) { originalvalue = rtl_read_dword(rtlpriv, regaddr); - bitshift = cal_bit_shift(bitmask); + bitshift = _rtl88e_phy_calculate_bit_shift(bitmask); data = ((originalvalue & (~bitmask)) | (data << bitshift)); } @@ -531,8 +116,8 @@ u32 rtl88e_phy_query_rf_reg(struct ieee80211_hw *hw, spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags); - original_value = rf_serial_read(hw, rfpath, regaddr); - bitshift = cal_bit_shift(bitmask); + original_value = _rtl88e_phy_rf_serial_read(hw, rfpath, regaddr); + bitshift = _rtl88e_phy_calculate_bit_shift(bitmask); readback_value = (original_value & bitmask) >> bitshift; spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags); @@ -540,7 +125,6 @@ u32 rtl88e_phy_query_rf_reg(struct ieee80211_hw *hw, RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n", regaddr, rfpath, bitmask, original_value); - return readback_value; } @@ -559,13 +143,16 @@ void rtl88e_phy_set_rf_reg(struct ieee80211_hw *hw, spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags); if (bitmask != RFREG_OFFSET_MASK) { - original_value = rf_serial_read(hw, rfpath, regaddr); - bitshift = cal_bit_shift(bitmask); - data = ((original_value & (~bitmask)) | - (data << bitshift)); + original_value = _rtl88e_phy_rf_serial_read(hw, + rfpath, + regaddr); + bitshift = _rtl88e_phy_calculate_bit_shift(bitmask); + data = + ((original_value & (~bitmask)) | + (data << bitshift)); } - rf_serial_write(hw, rfpath, regaddr, data); + _rtl88e_phy_rf_serial_write(hw, rfpath, regaddr, data); spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags); @@ -575,27 +162,91 @@ void rtl88e_phy_set_rf_reg(struct ieee80211_hw *hw, regaddr, bitmask, data, rfpath); } -static bool config_mac_with_header(struct ieee80211_hw *hw) +static u32 _rtl88e_phy_rf_serial_read(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 offset) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath]; + u32 newoffset; + u32 tmplong, tmplong2; + u8 rfpi_enable = 0; + u32 retvalue; + + offset &= 0xff; + newoffset = offset; + if (RT_CANNOT_IO(hw)) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "return all one\n"); + return 0xFFFFFFFF; + } + tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD); + if (rfpath == RF90_PATH_A) + tmplong2 = tmplong; + else + tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD); + tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) | + (newoffset << 23) | BLSSIREADEDGE; + rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD, + tmplong & (~BLSSIREADEDGE)); + mdelay(1); + rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2); + mdelay(2); + if (rfpath == RF90_PATH_A) + rfpi_enable = (u8)rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1, + BIT(8)); + else if (rfpath == RF90_PATH_B) + rfpi_enable = (u8)rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1, + BIT(8)); + if (rfpi_enable) + retvalue = rtl_get_bbreg(hw, pphyreg->rf_rbpi, + BLSSIREADBACKDATA); + else + retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb, + BLSSIREADBACKDATA); + RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, + "RFR-%d Addr[0x%x]=0x%x\n", + rfpath, pphyreg->rf_rb, retvalue); + return retvalue; +} + +static void _rtl88e_phy_rf_serial_write(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 offset, + u32 data) { + u32 data_and_addr; + u32 newoffset; struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath]; + + if (RT_CANNOT_IO(hw)) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "stop\n"); + return; + } + offset &= 0xff; + newoffset = offset; + data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff; + rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr); + RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, + "RFW-%d Addr[0x%x]=0x%x\n", + rfpath, pphyreg->rf3wire_offset, data_and_addr); +} + +static u32 _rtl88e_phy_calculate_bit_shift(u32 bitmask) +{ u32 i; - u32 arraylength; - u32 *ptrarray; - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read Rtl8188EMACPHY_Array\n"); - arraylength = RTL8188EEMAC_1T_ARRAYLEN; - ptrarray = RTL8188EEMAC_1T_ARRAY; - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "Img:RTL8188EEMAC_1T_ARRAY LEN %d\n", arraylength); - for (i = 0; i < arraylength; i = i + 2) - rtl_write_byte(rtlpriv, ptrarray[i], (u8) ptrarray[i + 1]); - return true; + for (i = 0; i <= 31; i++) { + if (((bitmask >> i) & 0x1) == 1) + break; + } + return i; } bool rtl88e_phy_mac_config(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - bool rtstatus = config_mac_with_header(hw); + bool rtstatus = _rtl88e_phy_config_mac_with_headerfile(hw); rtl_write_byte(rtlpriv, 0x04CA, 0x0B); return rtstatus; @@ -606,9 +257,9 @@ bool rtl88e_phy_bb_config(struct ieee80211_hw *hw) bool rtstatus = true; struct rtl_priv *rtlpriv = rtl_priv(hw); u16 regval; - u8 reg_hwparafile = 1; + u8 b_reg_hwparafile = 1; u32 tmp; - rtl88e_phy_init_bb_rf_register_definition(hw); + _rtl88e_phy_init_bb_rf_register_definition(hw); regval = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN); rtl_write_word(rtlpriv, REG_SYS_FUNC_EN, regval | BIT(13) | BIT(0) | BIT(1)); @@ -619,8 +270,8 @@ bool rtl88e_phy_bb_config(struct ieee80211_hw *hw) FEN_BB_GLB_RSTN | FEN_BBRSTB); tmp = rtl_read_dword(rtlpriv, 0x4c); rtl_write_dword(rtlpriv, 0x4c, tmp | BIT(23)); - if (reg_hwparafile == 1) - rtstatus = config_parafile(hw); + if (b_reg_hwparafile == 1) + rtstatus = _rtl88e_phy_bb8188e_config_parafile(hw); return rtstatus; } @@ -629,12 +280,12 @@ bool rtl88e_phy_rf_config(struct ieee80211_hw *hw) return rtl88e_phy_rf6052_config(hw); } -static bool check_cond(struct ieee80211_hw *hw, +static bool _rtl88e_check_condition(struct ieee80211_hw *hw, const u32 condition) { struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - struct rtl_efuse *fuse = rtl_efuse(rtl_priv(hw)); - u32 _board = fuse->board_type; /*need efuse define*/ + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + u32 _board = rtlefuse->board_type; /*need efuse define*/ u32 _interface = rtlhal->interface; u32 _platform = 0x08;/*SupportPlatform */ u32 cond = condition; @@ -658,450 +309,811 @@ static bool check_cond(struct ieee80211_hw *hw, return true; } -static void _rtl8188e_config_rf_reg(struct ieee80211_hw *hw, - u32 addr, u32 data, enum radio_path rfpath, +static void _rtl8188e_config_rf_reg(struct ieee80211_hw *hw, u32 addr, + u32 data, enum radio_path rfpath, u32 regaddr) { - rtl_rfreg_delay(hw, rfpath, regaddr, - RFREG_OFFSET_MASK, - data); + if (addr == 0xffe) { + mdelay(50); + } else if (addr == 0xfd) { + mdelay(5); + } else if (addr == 0xfc) { + mdelay(1); + } else if (addr == 0xfb) { + udelay(50); + } else if (addr == 0xfa) { + udelay(5); + } else if (addr == 0xf9) { + udelay(1); + } else { + rtl_set_rfreg(hw, rfpath, regaddr, + RFREG_OFFSET_MASK, + data); + udelay(1); + } } -static void rtl88_config_s(struct ieee80211_hw *hw, - u32 addr, u32 data) +static void _rtl8188e_config_rf_radio_a(struct ieee80211_hw *hw, + u32 addr, u32 data) { u32 content = 0x1000; /*RF Content: radio_a_txt*/ u32 maskforphyset = (u32)(content & 0xE000); _rtl8188e_config_rf_reg(hw, addr, data, RF90_PATH_A, - addr | maskforphyset); + addr | maskforphyset); +} + +static void _rtl8188e_config_bb_reg(struct ieee80211_hw *hw, + u32 addr, u32 data) +{ + if (addr == 0xfe) { + mdelay(50); + } else if (addr == 0xfd) { + mdelay(5); + } else if (addr == 0xfc) { + mdelay(1); + } else if (addr == 0xfb) { + udelay(50); + } else if (addr == 0xfa) { + udelay(5); + } else if (addr == 0xf9) { + udelay(1); + } else { + rtl_set_bbreg(hw, addr, MASKDWORD, data); + udelay(1); + } } -#define NEXT_PAIR(v1, v2, i) \ +static bool _rtl88e_phy_bb8188e_config_parafile(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + bool rtstatus; + + rtstatus = phy_config_bb_with_headerfile(hw, BASEBAND_CONFIG_PHY_REG); + if (!rtstatus) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!"); + return false; + } + + if (!rtlefuse->autoload_failflag) { + rtlphy->pwrgroup_cnt = 0; + rtstatus = + phy_config_bb_with_pghdr(hw, BASEBAND_CONFIG_PHY_REG); + } + if (!rtstatus) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!"); + return false; + } + rtstatus = + phy_config_bb_with_headerfile(hw, BASEBAND_CONFIG_AGC_TAB); + if (!rtstatus) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n"); + return false; + } + rtlphy->cck_high_power = + (bool)(rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, 0x200)); + + return true; +} + +static bool _rtl88e_phy_config_mac_with_headerfile(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 i; + u32 arraylength; + u32 *ptrarray; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read Rtl8188EMACPHY_Array\n"); + arraylength = RTL8188EEMAC_1T_ARRAYLEN; + ptrarray = RTL8188EEMAC_1T_ARRAY; + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Img:RTL8188EEMAC_1T_ARRAY LEN %d\n", arraylength); + for (i = 0; i < arraylength; i = i + 2) + rtl_write_byte(rtlpriv, ptrarray[i], (u8)ptrarray[i + 1]); + return true; +} + +#define READ_NEXT_PAIR(v1, v2, i) \ do { \ i += 2; v1 = array_table[i]; \ - v2 = array_table[i + 1]; \ + v2 = array_table[i+1]; \ } while (0) -static void set_baseband_agc_config(struct ieee80211_hw *hw) +static void handle_branch1(struct ieee80211_hw *hw, u16 arraylen, + u32 *array_table) { + u32 v1; + u32 v2; int i; - u32 *array_table; - u16 arraylen; - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 v1 = 0, v2 = 0; - arraylen = RTL8188EEAGCTAB_1TARRAYLEN; - array_table = RTL8188EEAGCTAB_1TARRAY; + for (i = 0; i < arraylen; i = i + 2) { + v1 = array_table[i]; + v2 = array_table[i+1]; + if (v1 < 0xcdcdcdcd) { + _rtl8188e_config_bb_reg(hw, v1, v2); + } else { /*This line is the start line of branch.*/ + /* to protect READ_NEXT_PAIR not overrun */ + if (i >= arraylen - 2) + break; + + if (!_rtl88e_check_condition(hw, array_table[i])) { + /*Discard the following (offset, data) pairs*/ + READ_NEXT_PAIR(v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && i < arraylen - 2) + READ_NEXT_PAIR(v1, v2, i); + i -= 2; /* prevent from for-loop += 2*/ + } else { /* Configure matched pairs and skip + * to end of if-else. + */ + READ_NEXT_PAIR(v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && i < arraylen - 2) + _rtl8188e_config_bb_reg(hw, v1, v2); + READ_NEXT_PAIR(v1, v2, i); + + while (v2 != 0xDEAD && i < arraylen - 2) + READ_NEXT_PAIR(v1, v2, i); + } + } + } +} + +static void handle_branch2(struct ieee80211_hw *hw, u16 arraylen, + u32 *array_table) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 v1; + u32 v2; + int i; - for (i = 0; i < arraylen; i += 2) { + for (i = 0; i < arraylen; i = i + 2) { v1 = array_table[i]; - v2 = array_table[i + 1]; + v2 = array_table[i+1]; if (v1 < 0xCDCDCDCD) { rtl_set_bbreg(hw, array_table[i], MASKDWORD, array_table[i + 1]); udelay(1); continue; - } else {/*This line is the start line of branch.*/ - if (!check_cond(hw, array_table[i])) { + } else { /*This line is the start line of branch.*/ + /* to protect READ_NEXT_PAIR not overrun */ + if (i >= arraylen - 2) + break; + + if (!_rtl88e_check_condition(hw, array_table[i])) { /*Discard the following (offset, data) pairs*/ - NEXT_PAIR(v1, v2, i); - while (v2 != 0xDEAD && v2 != 0xCDEF && - v2 != 0xCDCD && i < arraylen - 2) { - NEXT_PAIR(v1, v2, i); - } - i -= 2; /* compensate for loop's += 2*/ - } else { - /* Configure matched pairs and skip to end */ - NEXT_PAIR(v1, v2, i); - while (v2 != 0xDEAD && v2 != 0xCDEF && + READ_NEXT_PAIR(v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && i < arraylen - 2) + READ_NEXT_PAIR(v1, v2, i); + i -= 2; /* prevent from for-loop += 2*/ + } else { /* Configure matched pairs and skip + * to end of if-else. + */ + READ_NEXT_PAIR(v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && v2 != 0xCDCD && i < arraylen - 2) { rtl_set_bbreg(hw, array_table[i], MASKDWORD, array_table[i + 1]); udelay(1); - NEXT_PAIR(v1, v2, i); + READ_NEXT_PAIR(v1, v2, i); } while (v2 != 0xDEAD && i < arraylen - 2) - NEXT_PAIR(v1, v2, i); + READ_NEXT_PAIR(v1, v2, i); } } RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "The agctab_array_table[0] is %x Rtl818EEPHY_REGArray[1] is %x\n", - array_table[i], - array_table[i + 1]); + array_table[i], array_table[i + 1]); } } -static void set_baseband_phy_config(struct ieee80211_hw *hw) +static bool phy_config_bb_with_headerfile(struct ieee80211_hw *hw, + u8 configtype) { - int i; u32 *array_table; u16 arraylen; - u32 v1 = 0, v2 = 0; - - arraylen = RTL8188EEPHY_REG_1TARRAYLEN; - array_table = RTL8188EEPHY_REG_1TARRAY; - - for (i = 0; i < arraylen; i += 2) { - v1 = array_table[i]; - v2 = array_table[i + 1]; - if (v1 < 0xcdcdcdcd) { - rtl_bb_delay(hw, v1, v2); - } else {/*This line is the start line of branch.*/ - if (!check_cond(hw, array_table[i])) { - /*Discard the following (offset, data) pairs*/ - NEXT_PAIR(v1, v2, i); - while (v2 != 0xDEAD && - v2 != 0xCDEF && - v2 != 0xCDCD && i < arraylen - 2) - NEXT_PAIR(v1, v2, i); - i -= 2; /* prevent from for-loop += 2*/ - } else { - /* Configure matched pairs and skip to end */ - NEXT_PAIR(v1, v2, i); - while (v2 != 0xDEAD && - v2 != 0xCDEF && - v2 != 0xCDCD && i < arraylen - 2) { - rtl_bb_delay(hw, v1, v2); - NEXT_PAIR(v1, v2, i); - } - while (v2 != 0xDEAD && i < arraylen - 2) - NEXT_PAIR(v1, v2, i); - } - } + if (configtype == BASEBAND_CONFIG_PHY_REG) { + arraylen = RTL8188EEPHY_REG_1TARRAYLEN; + array_table = RTL8188EEPHY_REG_1TARRAY; + handle_branch1(hw, arraylen, array_table); + } else if (configtype == BASEBAND_CONFIG_AGC_TAB) { + arraylen = RTL8188EEAGCTAB_1TARRAYLEN; + array_table = RTL8188EEAGCTAB_1TARRAY; + handle_branch2(hw, arraylen, array_table); } + return true; } -static void store_pwrindex_offset(struct ieee80211_hw *hw, - u32 regaddr, u32 bitmask, - u32 data) +static void store_pwrindex_rate_offset(struct ieee80211_hw *hw, + u32 regaddr, u32 bitmask, + u32 data) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; + int count = rtlphy->pwrgroup_cnt; if (regaddr == RTXAGC_A_RATE18_06) { - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][0] = data; + rtlphy->mcs_txpwrlevel_origoffset[count][0] = data; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "MCSTxPowerLevelOriginalOffset[%d][0] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][0]); + count, + rtlphy->mcs_txpwrlevel_origoffset[count][0]); } if (regaddr == RTXAGC_A_RATE54_24) { - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][1] = data; + rtlphy->mcs_txpwrlevel_origoffset[count][1] = data; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "MCSTxPowerLevelOriginalOffset[%d][1] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][1]); + count, + rtlphy->mcs_txpwrlevel_origoffset[count][1]); } if (regaddr == RTXAGC_A_CCK1_MCS32) { - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][6] = data; + rtlphy->mcs_txpwrlevel_origoffset[count][6] = data; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "MCSTxPowerLevelOriginalOffset[%d][6] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][6]); + count, + rtlphy->mcs_txpwrlevel_origoffset[count][6]); } if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0xffffff00) { - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][7] = data; + rtlphy->mcs_txpwrlevel_origoffset[count][7] = data; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "MCSTxPowerLevelOriginalOffset[%d][7] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][7]); + count, + rtlphy->mcs_txpwrlevel_origoffset[count][7]); } if (regaddr == RTXAGC_A_MCS03_MCS00) { - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][2] = data; + rtlphy->mcs_txpwrlevel_origoffset[count][2] = data; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "MCSTxPowerLevelOriginalOffset[%d][2] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][2]); + count, + rtlphy->mcs_txpwrlevel_origoffset[count][2]); } if (regaddr == RTXAGC_A_MCS07_MCS04) { - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][3] = data; + rtlphy->mcs_txpwrlevel_origoffset[count][3] = data; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "MCSTxPowerLevelOriginalOffset[%d][3] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][3]); + count, + rtlphy->mcs_txpwrlevel_origoffset[count][3]); } if (regaddr == RTXAGC_A_MCS11_MCS08) { - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][4] = data; + rtlphy->mcs_txpwrlevel_origoffset[count][4] = data; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "MCSTxPowerLevelOriginalOffset[%d][4] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][4]); + count, + rtlphy->mcs_txpwrlevel_origoffset[count][4]); } if (regaddr == RTXAGC_A_MCS15_MCS12) { - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][5] = data; - if (get_rf_type(rtlphy) == RF_1T1R) - rtlphy->pwrgroup_cnt++; + rtlphy->mcs_txpwrlevel_origoffset[count][5] = data; + if (get_rf_type(rtlphy) == RF_1T1R) { + count++; + rtlphy->pwrgroup_cnt = count; + } RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "MCSTxPowerLevelOriginalOffset[%d][5] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][5]); + count, + rtlphy->mcs_txpwrlevel_origoffset[count][5]); } if (regaddr == RTXAGC_B_RATE18_06) { - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][8] = data; + rtlphy->mcs_txpwrlevel_origoffset[count][8] = data; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "MCSTxPowerLevelOriginalOffset[%d][8] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][8]); + count, + rtlphy->mcs_txpwrlevel_origoffset[count][8]); } if (regaddr == RTXAGC_B_RATE54_24) { - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][9] = data; + rtlphy->mcs_txpwrlevel_origoffset[count][9] = data; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "MCSTxPowerLevelOriginalOffset[%d][9] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][9]); + count, + rtlphy->mcs_txpwrlevel_origoffset[count][9]); } if (regaddr == RTXAGC_B_CCK1_55_MCS32) { - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][14] = data; + rtlphy->mcs_txpwrlevel_origoffset[count][14] = data; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "MCSTxPowerLevelOriginalOffset[%d][14] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][14]); + count, + rtlphy->mcs_txpwrlevel_origoffset[count][14]); } if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0x000000ff) { - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][15] = data; + rtlphy->mcs_txpwrlevel_origoffset[count][15] = data; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "MCSTxPowerLevelOriginalOffset[%d][15] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][15]); + count, + rtlphy->mcs_txpwrlevel_origoffset[count][15]); } if (regaddr == RTXAGC_B_MCS03_MCS00) { - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][10] = data; + rtlphy->mcs_txpwrlevel_origoffset[count][10] = data; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "MCSTxPowerLevelOriginalOffset[%d][10] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][10]); + count, + rtlphy->mcs_txpwrlevel_origoffset[count][10]); } if (regaddr == RTXAGC_B_MCS07_MCS04) { - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][11] = data; + rtlphy->mcs_txpwrlevel_origoffset[count][11] = data; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "MCSTxPowerLevelOriginalOffset[%d][11] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][11]); + count, + rtlphy->mcs_txpwrlevel_origoffset[count][11]); } if (regaddr == RTXAGC_B_MCS11_MCS08) { - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][12] = data; + rtlphy->mcs_txpwrlevel_origoffset[count][12] = data; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "MCSTxPowerLevelOriginalOffset[%d][12] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][12]); + count, + rtlphy->mcs_txpwrlevel_origoffset[count][12]); } if (regaddr == RTXAGC_B_MCS15_MCS12) { - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][13] = data; + rtlphy->mcs_txpwrlevel_origoffset[count][13] = data; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "MCSTxPowerLevelOriginalOffset[%d][13] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][13]); - if (get_rf_type(rtlphy) != RF_1T1R) - rtlphy->pwrgroup_cnt++; + count, + rtlphy->mcs_txpwrlevel_origoffset[count][13]); + if (get_rf_type(rtlphy) != RF_1T1R) { + count++; + rtlphy->pwrgroup_cnt = count; + } } } -#define READ_NEXT_RF_PAIR(v1, v2, i) \ - do { \ - i += 2; v1 = a_table[i]; \ - v2 = a_table[i + 1]; \ - } while (0) +static bool phy_config_bb_with_pghdr(struct ieee80211_hw *hw, u8 configtype) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + int i; + u32 *phy_reg_page; + u16 phy_reg_page_len; + u32 v1 = 0, v2 = 0, v3 = 0; + + phy_reg_page_len = RTL8188EEPHY_REG_ARRAY_PGLEN; + phy_reg_page = RTL8188EEPHY_REG_ARRAY_PG; + + if (configtype == BASEBAND_CONFIG_PHY_REG) { + for (i = 0; i < phy_reg_page_len; i = i + 3) { + v1 = phy_reg_page[i]; + v2 = phy_reg_page[i+1]; + v3 = phy_reg_page[i+2]; + + if (v1 < 0xcdcdcdcd) { + if (phy_reg_page[i] == 0xfe) + mdelay(50); + else if (phy_reg_page[i] == 0xfd) + mdelay(5); + else if (phy_reg_page[i] == 0xfc) + mdelay(1); + else if (phy_reg_page[i] == 0xfb) + udelay(50); + else if (phy_reg_page[i] == 0xfa) + udelay(5); + else if (phy_reg_page[i] == 0xf9) + udelay(1); + + store_pwrindex_rate_offset(hw, phy_reg_page[i], + phy_reg_page[i + 1], + phy_reg_page[i + 2]); + continue; + } else { + if (!_rtl88e_check_condition(hw, + phy_reg_page[i])) { + /*don't need the hw_body*/ + i += 2; /* skip the pair of expression*/ + /* to protect 'i+1' 'i+2' not overrun */ + if (i >= phy_reg_page_len - 2) + break; + + v1 = phy_reg_page[i]; + v2 = phy_reg_page[i+1]; + v3 = phy_reg_page[i+2]; + while (v2 != 0xDEAD && + i < phy_reg_page_len - 5) { + i += 3; + v1 = phy_reg_page[i]; + v2 = phy_reg_page[i+1]; + v3 = phy_reg_page[i+2]; + } + } + } + } + } else { + RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, + "configtype != BaseBand_Config_PHY_REG\n"); + } + return true; +} + +#define READ_NEXT_RF_PAIR(v1, v2, i) \ +do { \ + i += 2; \ + v1 = radioa_array_table[i]; \ + v2 = radioa_array_table[i+1]; \ +} while (0) + +static void process_path_a(struct ieee80211_hw *hw, + u16 radioa_arraylen, + u32 *radioa_array_table) +{ + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u32 v1, v2; + int i; + + for (i = 0; i < radioa_arraylen; i = i + 2) { + v1 = radioa_array_table[i]; + v2 = radioa_array_table[i+1]; + if (v1 < 0xcdcdcdcd) { + _rtl8188e_config_rf_radio_a(hw, v1, v2); + } else { /*This line is the start line of branch.*/ + /* to protect READ_NEXT_PAIR not overrun */ + if (i >= radioa_arraylen - 2) + break; + + if (!_rtl88e_check_condition(hw, radioa_array_table[i])) { + /*Discard the following (offset, data) pairs*/ + READ_NEXT_RF_PAIR(v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && + i < radioa_arraylen - 2) { + READ_NEXT_RF_PAIR(v1, v2, i); + } + i -= 2; /* prevent from for-loop += 2*/ + } else { /* Configure matched pairs and + * skip to end of if-else. + */ + READ_NEXT_RF_PAIR(v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && + i < radioa_arraylen - 2) { + _rtl8188e_config_rf_radio_a(hw, v1, v2); + READ_NEXT_RF_PAIR(v1, v2, i); + } + + while (v2 != 0xDEAD && + i < radioa_arraylen - 2) + READ_NEXT_RF_PAIR(v1, v2, i); + } + } + } + + if (rtlhal->oem_id == RT_CID_819X_HP) + _rtl8188e_config_rf_radio_a(hw, 0x52, 0x7E4BD); +} bool rtl88e_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, enum radio_path rfpath) { - int i; - u32 *a_table; - u16 a_len; struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - u32 v1 = 0, v2 = 0; + bool rtstatus = true; + u32 *radioa_array_table; + u16 radioa_arraylen; - a_len = RTL8188EE_RADIOA_1TARRAYLEN; - a_table = RTL8188EE_RADIOA_1TARRAY; + radioa_arraylen = RTL8188EE_RADIOA_1TARRAYLEN; + radioa_array_table = RTL8188EE_RADIOA_1TARRAY; RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "Radio_A:RTL8188EE_RADIOA_1TARRAY %d\n", a_len); + "Radio_A:RTL8188EE_RADIOA_1TARRAY %d\n", radioa_arraylen); RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath); + rtstatus = true; switch (rfpath) { case RF90_PATH_A: - for (i = 0; i < a_len; i = i + 2) { - v1 = a_table[i]; - v2 = a_table[i + 1]; - if (v1 < 0xcdcdcdcd) { - rtl88_config_s(hw, v1, v2); - } else {/*This line is the start line of branch.*/ - if (!check_cond(hw, a_table[i])) { - /* Discard the following (offset, data) - * pairs - */ - READ_NEXT_RF_PAIR(v1, v2, i); - while (v2 != 0xDEAD && v2 != 0xCDEF && - v2 != 0xCDCD && i < a_len - 2) - READ_NEXT_RF_PAIR(v1, v2, i); - i -= 2; /* prevent from for-loop += 2*/ - } else { - /* Configure matched pairs and skip to - * end of if-else. - */ - READ_NEXT_RF_PAIR(v1, v2, i); - while (v2 != 0xDEAD && v2 != 0xCDEF && - v2 != 0xCDCD && i < a_len - 2) { - rtl88_config_s(hw, v1, v2); - READ_NEXT_RF_PAIR(v1, v2, i); - } - - while (v2 != 0xDEAD && i < a_len - 2) - READ_NEXT_RF_PAIR(v1, v2, i); - } - } - } + process_path_a(hw, radioa_arraylen, radioa_array_table); + break; + case RF90_PATH_B: + case RF90_PATH_C: + case RF90_PATH_D: + break; + } + return true; +} + +void rtl88e_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + + rtlphy->default_initialgain[0] = + (u8)rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0); + rtlphy->default_initialgain[1] = + (u8)rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0); + rtlphy->default_initialgain[2] = + (u8)rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0); + rtlphy->default_initialgain[3] = + (u8)rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n", + rtlphy->default_initialgain[0], + rtlphy->default_initialgain[1], + rtlphy->default_initialgain[2], + rtlphy->default_initialgain[3]); + + rtlphy->framesync = (u8)rtl_get_bbreg(hw, ROFDM0_RXDETECTOR3, + MASKBYTE0); + rtlphy->framesync_c34 = rtl_get_bbreg(hw, ROFDM0_RXDETECTOR2, + MASKDWORD); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "Default framesync (0x%x) = 0x%x\n", + ROFDM0_RXDETECTOR3, rtlphy->framesync); +} + +static void _rtl88e_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + + rtlphy->phyreg_def[RF90_PATH_A].rfintfs = RFPGA0_XAB_RFINTERFACESW; + rtlphy->phyreg_def[RF90_PATH_B].rfintfs = RFPGA0_XAB_RFINTERFACESW; + rtlphy->phyreg_def[RF90_PATH_C].rfintfs = RFPGA0_XCD_RFINTERFACESW; + rtlphy->phyreg_def[RF90_PATH_D].rfintfs = RFPGA0_XCD_RFINTERFACESW; + + rtlphy->phyreg_def[RF90_PATH_A].rfintfi = RFPGA0_XAB_RFINTERFACERB; + rtlphy->phyreg_def[RF90_PATH_B].rfintfi = RFPGA0_XAB_RFINTERFACERB; + rtlphy->phyreg_def[RF90_PATH_C].rfintfi = RFPGA0_XCD_RFINTERFACERB; + rtlphy->phyreg_def[RF90_PATH_D].rfintfi = RFPGA0_XCD_RFINTERFACERB; + + rtlphy->phyreg_def[RF90_PATH_A].rfintfo = RFPGA0_XA_RFINTERFACEOE; + rtlphy->phyreg_def[RF90_PATH_B].rfintfo = RFPGA0_XB_RFINTERFACEOE; + + rtlphy->phyreg_def[RF90_PATH_A].rfintfe = RFPGA0_XA_RFINTERFACEOE; + rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE; + + rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset = + RFPGA0_XA_LSSIPARAMETER; + rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset = + RFPGA0_XB_LSSIPARAMETER; + + rtlphy->phyreg_def[RF90_PATH_A].rflssi_select = RFPGA0_XAB_RFPARAMETER; + rtlphy->phyreg_def[RF90_PATH_B].rflssi_select = RFPGA0_XAB_RFPARAMETER; + rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = RFPGA0_XCD_RFPARAMETER; + rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = RFPGA0_XCD_RFPARAMETER; + + rtlphy->phyreg_def[RF90_PATH_A].rftxgain_stage = RFPGA0_TXGAINSTAGE; + rtlphy->phyreg_def[RF90_PATH_B].rftxgain_stage = RFPGA0_TXGAINSTAGE; + rtlphy->phyreg_def[RF90_PATH_C].rftxgain_stage = RFPGA0_TXGAINSTAGE; + rtlphy->phyreg_def[RF90_PATH_D].rftxgain_stage = RFPGA0_TXGAINSTAGE; + + rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para1 = RFPGA0_XA_HSSIPARAMETER1; + rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para1 = RFPGA0_XB_HSSIPARAMETER1; - if (rtlhal->oem_id == RT_CID_819X_HP) - rtl88_config_s(hw, 0x52, 0x7E4BD); + rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RFPGA0_XA_HSSIPARAMETER2; + rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RFPGA0_XB_HSSIPARAMETER2; - break; + rtlphy->phyreg_def[RF90_PATH_A].rfsw_ctrl = + RFPGA0_XAB_SWITCHCONTROL; + rtlphy->phyreg_def[RF90_PATH_B].rfsw_ctrl = + RFPGA0_XAB_SWITCHCONTROL; + rtlphy->phyreg_def[RF90_PATH_C].rfsw_ctrl = + RFPGA0_XCD_SWITCHCONTROL; + rtlphy->phyreg_def[RF90_PATH_D].rfsw_ctrl = + RFPGA0_XCD_SWITCHCONTROL; - case RF90_PATH_B: - case RF90_PATH_C: - case RF90_PATH_D: - default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case not processed\n"); - break; - } - return true; -} + rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1; + rtlphy->phyreg_def[RF90_PATH_B].rfagc_control1 = ROFDM0_XBAGCCORE1; + rtlphy->phyreg_def[RF90_PATH_C].rfagc_control1 = ROFDM0_XCAGCCORE1; + rtlphy->phyreg_def[RF90_PATH_D].rfagc_control1 = ROFDM0_XDAGCCORE1; -void rtl88e_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + rtlphy->phyreg_def[RF90_PATH_A].rfagc_control2 = ROFDM0_XAAGCCORE2; + rtlphy->phyreg_def[RF90_PATH_B].rfagc_control2 = ROFDM0_XBAGCCORE2; + rtlphy->phyreg_def[RF90_PATH_C].rfagc_control2 = ROFDM0_XCAGCCORE2; + rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2; - rtlphy->default_initialgain[0] = rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, - MASKBYTE0); - rtlphy->default_initialgain[1] = rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, - MASKBYTE0); - rtlphy->default_initialgain[2] = rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, - MASKBYTE0); - rtlphy->default_initialgain[3] = rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, - MASKBYTE0); + rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbal = ROFDM0_XARXIQIMBALANCE; + rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbal = ROFDM0_XBRXIQIMBALANCE; + rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbal = ROFDM0_XCRXIQIMBANLANCE; + rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbal = ROFDM0_XDRXIQIMBALANCE; - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "Default initial gain (c50 = 0x%x, c58 = 0x%x, c60 = 0x%x, c68 = 0x%x\n", - rtlphy->default_initialgain[0], - rtlphy->default_initialgain[1], - rtlphy->default_initialgain[2], - rtlphy->default_initialgain[3]); - - rtlphy->framesync = rtl_get_bbreg(hw, ROFDM0_RXDETECTOR3, - MASKBYTE0); - rtlphy->framesync_c34 = rtl_get_bbreg(hw, ROFDM0_RXDETECTOR2, - MASKDWORD); + rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE; + rtlphy->phyreg_def[RF90_PATH_B].rfrx_afe = ROFDM0_XBRXAFE; + rtlphy->phyreg_def[RF90_PATH_C].rfrx_afe = ROFDM0_XCRXAFE; + rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE; - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "Default framesync (0x%x) = 0x%x\n", - ROFDM0_RXDETECTOR3, rtlphy->framesync); + rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbal = ROFDM0_XATXIQIMBALANCE; + rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbal = ROFDM0_XBTXIQIMBALANCE; + rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbal = ROFDM0_XCTXIQIMBALANCE; + rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbal = ROFDM0_XDTXIQIMBALANCE; + + rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE; + rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTXAFE; + + rtlphy->phyreg_def[RF90_PATH_A].rf_rb = RFPGA0_XA_LSSIREADBACK; + rtlphy->phyreg_def[RF90_PATH_B].rf_rb = RFPGA0_XB_LSSIREADBACK; + + rtlphy->phyreg_def[RF90_PATH_A].rf_rbpi = TRANSCEIVEA_HSPI_READBACK; + rtlphy->phyreg_def[RF90_PATH_B].rf_rbpi = TRANSCEIVEB_HSPI_READBACK; } void rtl88e_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - u8 level; - long dbm; - - level = rtlphy->cur_cck_txpwridx; - dbm = rtl88e_pwr_idx_dbm(hw, WIRELESS_MODE_B, level); - level = rtlphy->cur_ofdm24g_txpwridx; - if (rtl88e_pwr_idx_dbm(hw, WIRELESS_MODE_G, level) > dbm) - dbm = rtl88e_pwr_idx_dbm(hw, WIRELESS_MODE_G, level); - level = rtlphy->cur_ofdm24g_txpwridx; - if (rtl88e_pwr_idx_dbm(hw, WIRELESS_MODE_N_24G, level) > dbm) - dbm = rtl88e_pwr_idx_dbm(hw, WIRELESS_MODE_N_24G, level); - *powerlevel = dbm; + struct rtl_phy *rtlphy = &rtlpriv->phy; + u8 txpwr_level; + long txpwr_dbm; + + txpwr_level = rtlphy->cur_cck_txpwridx; + txpwr_dbm = _rtl88e_phy_txpwr_idx_to_dbm(hw, + WIRELESS_MODE_B, txpwr_level); + txpwr_level = rtlphy->cur_ofdm24g_txpwridx; + if (_rtl88e_phy_txpwr_idx_to_dbm(hw, + WIRELESS_MODE_G, + txpwr_level) > txpwr_dbm) + txpwr_dbm = + _rtl88e_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G, + txpwr_level); + txpwr_level = rtlphy->cur_ofdm24g_txpwridx; + if (_rtl88e_phy_txpwr_idx_to_dbm(hw, + WIRELESS_MODE_N_24G, + txpwr_level) > txpwr_dbm) + txpwr_dbm = + _rtl88e_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G, + txpwr_level); + *powerlevel = txpwr_dbm; +} + +static void handle_path_a(struct rtl_efuse *rtlefuse, u8 index, + u8 *cckpowerlevel, u8 *ofdmpowerlevel, + u8 *bw20powerlevel, u8 *bw40powerlevel) +{ + cckpowerlevel[RF90_PATH_A] = + rtlefuse->txpwrlevel_cck[RF90_PATH_A][index]; + /*-8~7 */ + if (rtlefuse->txpwr_ht20diff[RF90_PATH_A][index] > 0x0f) + bw20powerlevel[RF90_PATH_A] = + rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_A][index] - + (~(rtlefuse->txpwr_ht20diff[RF90_PATH_A][index]) + 1); + else + bw20powerlevel[RF90_PATH_A] = + rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_A][index] + + rtlefuse->txpwr_ht20diff[RF90_PATH_A][index]; + if (rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][index] > 0xf) + ofdmpowerlevel[RF90_PATH_A] = + rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_A][index] - + (~(rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][index])+1); + else + ofdmpowerlevel[RF90_PATH_A] = + rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_A][index] + + rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][index]; + bw40powerlevel[RF90_PATH_A] = + rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_A][index]; } static void _rtl88e_get_txpower_index(struct ieee80211_hw *hw, u8 channel, - u8 *cckpower, u8 *ofdm, u8 *bw20_pwr, - u8 *bw40_pwr) + u8 *cckpowerlevel, u8 *ofdmpowerlevel, + u8 *bw20powerlevel, u8 *bw40powerlevel) { - struct rtl_efuse *fuse = rtl_efuse(rtl_priv(hw)); - u8 i = (channel - 1); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + u8 index = (channel - 1); u8 rf_path = 0; - int jj = RF90_PATH_A; - int kk = RF90_PATH_B; for (rf_path = 0; rf_path < 2; rf_path++) { - if (rf_path == jj) { - cckpower[jj] = fuse->txpwrlevel_cck[jj][i]; - if (fuse->txpwr_ht20diff[jj][i] > 0x0f) /*-8~7 */ - bw20_pwr[jj] = fuse->txpwrlevel_ht40_1s[jj][i] - - (~(fuse->txpwr_ht20diff[jj][i]) + 1); - else - bw20_pwr[jj] = fuse->txpwrlevel_ht40_1s[jj][i] + - fuse->txpwr_ht20diff[jj][i]; - if (fuse->txpwr_legacyhtdiff[jj][i] > 0xf) - ofdm[jj] = fuse->txpwrlevel_ht40_1s[jj][i] - - (~(fuse->txpwr_legacyhtdiff[jj][i])+1); - else - ofdm[jj] = fuse->txpwrlevel_ht40_1s[jj][i] + - fuse->txpwr_legacyhtdiff[jj][i]; - bw40_pwr[jj] = fuse->txpwrlevel_ht40_1s[jj][i]; - - } else if (rf_path == kk) { - cckpower[kk] = fuse->txpwrlevel_cck[kk][i]; - bw20_pwr[kk] = fuse->txpwrlevel_ht40_1s[kk][i] + - fuse->txpwr_ht20diff[kk][i]; - ofdm[kk] = fuse->txpwrlevel_ht40_1s[kk][i] + - fuse->txpwr_legacyhtdiff[kk][i]; - bw40_pwr[kk] = fuse->txpwrlevel_ht40_1s[kk][i]; + if (rf_path == RF90_PATH_A) { + handle_path_a(rtlefuse, index, cckpowerlevel, + ofdmpowerlevel, bw20powerlevel, + bw40powerlevel); + } else if (rf_path == RF90_PATH_B) { + cckpowerlevel[RF90_PATH_B] = + rtlefuse->txpwrlevel_cck[RF90_PATH_B][index]; + bw20powerlevel[RF90_PATH_B] = + rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_B][index] + + rtlefuse->txpwr_ht20diff[RF90_PATH_B][index]; + ofdmpowerlevel[RF90_PATH_B] = + rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_B][index] + + rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][index]; + bw40powerlevel[RF90_PATH_B] = + rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_B][index]; } } + } static void _rtl88e_ccxpower_index_check(struct ieee80211_hw *hw, - u8 channel, u8 *cckpower, - u8 *ofdm, u8 *bw20_pwr, - u8 *bw40_pwr) + u8 channel, u8 *cckpowerlevel, + u8 *ofdmpowerlevel, u8 *bw20powerlevel, + u8 *bw40powerlevel) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; + + rtlphy->cur_cck_txpwridx = cckpowerlevel[0]; + rtlphy->cur_ofdm24g_txpwridx = ofdmpowerlevel[0]; + rtlphy->cur_bw20_txpwridx = bw20powerlevel[0]; + rtlphy->cur_bw40_txpwridx = bw40powerlevel[0]; - rtlphy->cur_cck_txpwridx = cckpower[0]; - rtlphy->cur_ofdm24g_txpwridx = ofdm[0]; - rtlphy->cur_bw20_txpwridx = bw20_pwr[0]; - rtlphy->cur_bw40_txpwridx = bw40_pwr[0]; } void rtl88e_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel) { - struct rtl_efuse *fuse = rtl_efuse(rtl_priv(hw)); - u8 cckpower[MAX_TX_COUNT] = {0}, ofdm[MAX_TX_COUNT] = {0}; - u8 bw20_pwr[MAX_TX_COUNT] = {0}, bw40_pwr[MAX_TX_COUNT] = {0}; + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + u8 cckpowerlevel[MAX_TX_COUNT] = {0}; + u8 ofdmpowerlevel[MAX_TX_COUNT] = {0}; + u8 bw20powerlevel[MAX_TX_COUNT] = {0}; + u8 bw40powerlevel[MAX_TX_COUNT] = {0}; - if (fuse->txpwr_fromeprom == false) + if (!rtlefuse->txpwr_fromeprom) return; - _rtl88e_get_txpower_index(hw, channel, &cckpower[0], &ofdm[0], - &bw20_pwr[0], &bw40_pwr[0]); - _rtl88e_ccxpower_index_check(hw, channel, &cckpower[0], &ofdm[0], - &bw20_pwr[0], &bw40_pwr[0]); - rtl88e_phy_rf6052_set_cck_txpower(hw, &cckpower[0]); - rtl88e_phy_rf6052_set_ofdm_txpower(hw, &ofdm[0], &bw20_pwr[0], - &bw40_pwr[0], channel); + _rtl88e_get_txpower_index(hw, channel, + &cckpowerlevel[0], &ofdmpowerlevel[0], + &bw20powerlevel[0], &bw40powerlevel[0]); + _rtl88e_ccxpower_index_check(hw, channel, + &cckpowerlevel[0], &ofdmpowerlevel[0], + &bw20powerlevel[0], &bw40powerlevel[0]); + rtl88e_phy_rf6052_set_cck_txpower(hw, &cckpowerlevel[0]); + rtl88e_phy_rf6052_set_ofdm_txpower(hw, &ofdmpowerlevel[0], + &bw20powerlevel[0], + &bw40powerlevel[0], channel); +} + +static long _rtl88e_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw, + enum wireless_mode wirelessmode, + u8 txpwridx) +{ + long offset; + long pwrout_dbm; + + switch (wirelessmode) { + case WIRELESS_MODE_B: + offset = -7; + break; + case WIRELESS_MODE_G: + case WIRELESS_MODE_N_24G: + offset = -8; + break; + default: + offset = -8; + break; + } + pwrout_dbm = txpwridx / 2 + offset; + return pwrout_dbm; +} + +void rtl88e_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + enum io_type iotype; + + if (!is_hal_stop(rtlhal)) { + switch (operation) { + case SCAN_OPT_BACKUP_BAND0: + iotype = IO_CMD_PAUSE_BAND0_DM_BY_SCAN; + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_IO_CMD, + (u8 *)&iotype); + + break; + case SCAN_OPT_RESTORE: + iotype = IO_CMD_RESUME_DM_BY_SCAN; + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_IO_CMD, + (u8 *)&iotype); + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "Unknown Scan Backup operation.\n"); + break; + } + } } void rtl88e_phy_set_bw_mode_callback(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); u8 reg_bw_opmode; u8 reg_prsr_rsc; RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "Switch to %s bandwidth\n", - rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ? - "20MHz" : "40MHz"); + rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ? + "20MHz" : "40MHz"); if (is_hal_stop(rtlhal)) { rtlphy->set_bwmode_inprogress = false; @@ -1162,7 +1174,7 @@ void rtl88e_phy_set_bw_mode(struct ieee80211_hw *hw, enum nl80211_channel_type ch_type) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); u8 tmp_bw = rtlphy->current_chan_bw; @@ -1173,7 +1185,7 @@ void rtl88e_phy_set_bw_mode(struct ieee80211_hw *hw, rtl88e_phy_set_bw_mode_callback(hw); } else { RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, - "FALSE driver sleep or unload\n"); + "false driver sleep or unload\n"); rtlphy->set_bwmode_inprogress = false; rtlphy->current_chan_bw = tmp_bw; } @@ -1183,7 +1195,7 @@ void rtl88e_phy_sw_chnl_callback(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; u32 delay; RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, @@ -1193,9 +1205,9 @@ void rtl88e_phy_sw_chnl_callback(struct ieee80211_hw *hw) do { if (!rtlphy->sw_chnl_inprogress) break; - if (!chnl_step_by_step(hw, rtlphy->current_channel, - &rtlphy->sw_chnl_stage, - &rtlphy->sw_chnl_step, &delay)) { + if (!_rtl88e_phy_sw_chnl_step_by_step + (hw, rtlphy->current_channel, &rtlphy->sw_chnl_stage, + &rtlphy->sw_chnl_step, &delay)) { if (delay > 0) mdelay(delay); else @@ -1211,7 +1223,7 @@ void rtl88e_phy_sw_chnl_callback(struct ieee80211_hw *hw) u8 rtl88e_phy_sw_chnl(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); if (rtlphy->sw_chnl_inprogress) @@ -1237,9 +1249,140 @@ u8 rtl88e_phy_sw_chnl(struct ieee80211_hw *hw) return 1; } +static bool _rtl88e_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, + u8 channel, u8 *stage, u8 *step, + u32 *delay) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct swchnlcmd precommoncmd[MAX_PRECMD_CNT]; + u32 precommoncmdcnt; + struct swchnlcmd postcommoncmd[MAX_POSTCMD_CNT]; + u32 postcommoncmdcnt; + struct swchnlcmd rfdependcmd[MAX_RFDEPENDCMD_CNT]; + u32 rfdependcmdcnt; + struct swchnlcmd *currentcmd = NULL; + u8 rfpath; + u8 num_total_rfpath = rtlphy->num_total_rfpath; + + precommoncmdcnt = 0; + _rtl88e_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++, + MAX_PRECMD_CNT, + CMDID_SET_TXPOWEROWER_LEVEL, 0, 0, 0); + _rtl88e_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++, + MAX_PRECMD_CNT, CMDID_END, 0, 0, 0); + + postcommoncmdcnt = 0; + + _rtl88e_phy_set_sw_chnl_cmdarray(postcommoncmd, postcommoncmdcnt++, + MAX_POSTCMD_CNT, CMDID_END, 0, 0, 0); + + rfdependcmdcnt = 0; + + RT_ASSERT((channel >= 1 && channel <= 14), + "illegal channel for Zebra: %d\n", channel); + + _rtl88e_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++, + MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG, + RF_CHNLBW, channel, 10); + + _rtl88e_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++, + MAX_RFDEPENDCMD_CNT, CMDID_END, 0, 0, + 0); + + do { + switch (*stage) { + case 0: + currentcmd = &precommoncmd[*step]; + break; + case 1: + currentcmd = &rfdependcmd[*step]; + break; + case 2: + currentcmd = &postcommoncmd[*step]; + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "Invalid 'stage' = %d, Check it!\n", *stage); + return true; + } + + if (currentcmd->cmdid == CMDID_END) { + if ((*stage) == 2) + return true; + (*stage)++; + (*step) = 0; + continue; + } + + switch (currentcmd->cmdid) { + case CMDID_SET_TXPOWEROWER_LEVEL: + rtl88e_phy_set_txpower_level(hw, channel); + break; + case CMDID_WRITEPORT_ULONG: + rtl_write_dword(rtlpriv, currentcmd->para1, + currentcmd->para2); + break; + case CMDID_WRITEPORT_USHORT: + rtl_write_word(rtlpriv, currentcmd->para1, + (u16)currentcmd->para2); + break; + case CMDID_WRITEPORT_UCHAR: + rtl_write_byte(rtlpriv, currentcmd->para1, + (u8)currentcmd->para2); + break; + case CMDID_RF_WRITEREG: + for (rfpath = 0; rfpath < num_total_rfpath; rfpath++) { + rtlphy->rfreg_chnlval[rfpath] = + ((rtlphy->rfreg_chnlval[rfpath] & + 0xfffffc00) | currentcmd->para2); + + rtl_set_rfreg(hw, (enum radio_path)rfpath, + currentcmd->para1, + RFREG_OFFSET_MASK, + rtlphy->rfreg_chnlval[rfpath]); + } + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, + "switch case not process\n"); + break; + } + + break; + } while (true); + + (*delay) = currentcmd->msdelay; + (*step)++; + return false; +} + +static bool _rtl88e_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable, + u32 cmdtableidx, u32 cmdtablesz, + enum swchnlcmd_id cmdid, + u32 para1, u32 para2, u32 msdelay) +{ + struct swchnlcmd *pcmd; + + if (cmdtable == NULL) { + RT_ASSERT(false, "cmdtable cannot be NULL.\n"); + return false; + } + + if (cmdtableidx >= cmdtablesz) + return false; + + pcmd = cmdtable + cmdtableidx; + pcmd->cmdid = cmdid; + pcmd->para1 = para1; + pcmd->para2 = para2; + pcmd->msdelay = msdelay; + return true; +} + static u8 _rtl88e_phy_path_a_iqk(struct ieee80211_hw *hw, bool config_pathb) { - u32 reg_eac, reg_e94, reg_e9c; + u32 reg_eac, reg_e94, reg_e9c, reg_ea4; u8 result = 0x00; rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x10008c1c); @@ -1256,6 +1399,7 @@ static u8 _rtl88e_phy_path_a_iqk(struct ieee80211_hw *hw, bool config_pathb) reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD); reg_e94 = rtl_get_bbreg(hw, 0xe94, MASKDWORD); reg_e9c = rtl_get_bbreg(hw, 0xe9c, MASKDWORD); + reg_ea4 = rtl_get_bbreg(hw, 0xea4, MASKDWORD); if (!(reg_eac & BIT(28)) && (((reg_e94 & 0x03FF0000) >> 16) != 0x142) && @@ -1295,15 +1439,14 @@ static u8 _rtl88e_phy_path_a_rx_iqk(struct ieee80211_hw *hw, bool config_pathb) { u32 reg_eac, reg_e94, reg_e9c, reg_ea4, u32temp; u8 result = 0x00; - int jj = RF90_PATH_A; /*Get TXIMR Setting*/ /*Modify RX IQK mode table*/ rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x00000000); - rtl_set_rfreg(hw, jj, RF_WE_LUT, RFREG_OFFSET_MASK, 0x800a0); - rtl_set_rfreg(hw, jj, RF_RCK_OS, RFREG_OFFSET_MASK, 0x30000); - rtl_set_rfreg(hw, jj, RF_TXPA_G1, RFREG_OFFSET_MASK, 0x0000f); - rtl_set_rfreg(hw, jj, RF_TXPA_G2, RFREG_OFFSET_MASK, 0xf117b); + rtl_set_rfreg(hw, RF90_PATH_A, RF_WE_LUT, RFREG_OFFSET_MASK, 0x800a0); + rtl_set_rfreg(hw, RF90_PATH_A, RF_RCK_OS, RFREG_OFFSET_MASK, 0x30000); + rtl_set_rfreg(hw, RF90_PATH_A, RF_TXPA_G1, RFREG_OFFSET_MASK, 0x0000f); + rtl_set_rfreg(hw, RF90_PATH_A, RF_TXPA_G2, RFREG_OFFSET_MASK, 0xf117b); rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x80800000); /*IQK Setting*/ @@ -1318,7 +1461,7 @@ static u8 _rtl88e_phy_path_a_rx_iqk(struct ieee80211_hw *hw, bool config_pathb) /*LO calibration Setting*/ rtl_set_bbreg(hw, RIQK_AGC_RSP, MASKDWORD, 0x0046a911); - /*one shot, path A LOK & iqk*/ + /*one shot,path A LOK & iqk*/ rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xf9000000); rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xf8000000); @@ -1336,16 +1479,16 @@ static u8 _rtl88e_phy_path_a_rx_iqk(struct ieee80211_hw *hw, bool config_pathb) else return result; - u32temp = 0x80007C00 | (reg_e94&0x3FF0000) | + u32temp = 0x80007C00 | (reg_e94&0x3FF0000) | ((reg_e9c&0x3FF0000) >> 16); rtl_set_bbreg(hw, RTX_IQK, MASKDWORD, u32temp); /*RX IQK*/ /*Modify RX IQK mode table*/ rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x00000000); - rtl_set_rfreg(hw, jj, RF_WE_LUT, RFREG_OFFSET_MASK, 0x800a0); - rtl_set_rfreg(hw, jj, RF_RCK_OS, RFREG_OFFSET_MASK, 0x30000); - rtl_set_rfreg(hw, jj, RF_TXPA_G1, RFREG_OFFSET_MASK, 0x0000f); - rtl_set_rfreg(hw, jj, RF_TXPA_G2, RFREG_OFFSET_MASK, 0xf7ffa); + rtl_set_rfreg(hw, RF90_PATH_A, RF_WE_LUT, RFREG_OFFSET_MASK, 0x800a0); + rtl_set_rfreg(hw, RF90_PATH_A, RF_RCK_OS, RFREG_OFFSET_MASK, 0x30000); + rtl_set_rfreg(hw, RF90_PATH_A, RF_TXPA_G1, RFREG_OFFSET_MASK, 0x0000f); + rtl_set_rfreg(hw, RF90_PATH_A, RF_TXPA_G2, RFREG_OFFSET_MASK, 0xf7ffa); rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x80800000); /*IQK Setting*/ @@ -1359,7 +1502,7 @@ static u8 _rtl88e_phy_path_a_rx_iqk(struct ieee80211_hw *hw, bool config_pathb) /*LO calibration Setting*/ rtl_set_bbreg(hw, RIQK_AGC_RSP, MASKDWORD, 0x0046a911); - /*one shot, path A LOK & iqk*/ + /*one shot,path A LOK & iqk*/ rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xf9000000); rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xf8000000); @@ -1377,57 +1520,58 @@ static u8 _rtl88e_phy_path_a_rx_iqk(struct ieee80211_hw *hw, bool config_pathb) return result; } -static void fill_iqk(struct ieee80211_hw *hw, bool iqk_ok, long result[][8], - u8 final, bool btxonly) +static void _rtl88e_phy_path_a_fill_iqk_matrix(struct ieee80211_hw *hw, + bool iqk_ok, long result[][8], + u8 final_candidate, bool btxonly) { u32 oldval_0, x, tx0_a, reg; long y, tx0_c; - if (final == 0xFF) { + if (final_candidate == 0xFF) { return; } else if (iqk_ok) { - oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATXIQIMBAL, + oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD) >> 22) & 0x3FF; - x = result[final][0]; + x = result[final_candidate][0]; if ((x & 0x00000200) != 0) x = x | 0xFFFFFC00; tx0_a = (x * oldval_0) >> 8; - rtl_set_bbreg(hw, ROFDM0_XATXIQIMBAL, 0x3FF, tx0_a); - rtl_set_bbreg(hw, ROFDM0_ECCATHRES, BIT(31), + rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x3FF, tx0_a); + rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(31), ((x * oldval_0 >> 7) & 0x1)); - y = result[final][1]; + y = result[final_candidate][1]; if ((y & 0x00000200) != 0) - y |= 0xFFFFFC00; + y = y | 0xFFFFFC00; tx0_c = (y * oldval_0) >> 8; rtl_set_bbreg(hw, ROFDM0_XCTXAFE, 0xF0000000, ((tx0_c & 0x3C0) >> 6)); - rtl_set_bbreg(hw, ROFDM0_XATXIQIMBAL, 0x003F0000, + rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x003F0000, (tx0_c & 0x3F)); - rtl_set_bbreg(hw, ROFDM0_ECCATHRES, BIT(29), + rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(29), ((y * oldval_0 >> 7) & 0x1)); if (btxonly) return; - reg = result[final][2]; - rtl_set_bbreg(hw, ROFDM0_XARXIQIMBAL, 0x3FF, reg); - reg = result[final][3] & 0x3F; - rtl_set_bbreg(hw, ROFDM0_XARXIQIMBAL, 0xFC00, reg); - reg = (result[final][3] >> 6) & 0xF; + reg = result[final_candidate][2]; + rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0x3FF, reg); + reg = result[final_candidate][3] & 0x3F; + rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0xFC00, reg); + reg = (result[final_candidate][3] >> 6) & 0xF; rtl_set_bbreg(hw, 0xca0, 0xF0000000, reg); } } -static void save_adda_reg(struct ieee80211_hw *hw, - const u32 *addareg, u32 *backup, - u32 registernum) +static void _rtl88e_phy_save_adda_registers(struct ieee80211_hw *hw, + u32 *addareg, u32 *addabackup, + u32 registernum) { u32 i; for (i = 0; i < registernum; i++) - backup[i] = rtl_get_bbreg(hw, addareg[i], MASKDWORD); + addabackup[i] = rtl_get_bbreg(hw, addareg[i], MASKDWORD); } -static void save_mac_reg(struct ieee80211_hw *hw, const u32 *macreg, - u32 *macbackup) +static void _rtl88e_phy_save_mac_registers(struct ieee80211_hw *hw, + u32 *macreg, u32 *macbackup) { struct rtl_priv *rtlpriv = rtl_priv(hw); u32 i; @@ -1437,17 +1581,18 @@ static void save_mac_reg(struct ieee80211_hw *hw, const u32 *macreg, macbackup[i] = rtl_read_dword(rtlpriv, macreg[i]); } -static void reload_adda(struct ieee80211_hw *hw, const u32 *addareg, - u32 *backup, u32 reg_num) +static void _rtl88e_phy_reload_adda_registers(struct ieee80211_hw *hw, + u32 *addareg, u32 *addabackup, + u32 regiesternum) { u32 i; - for (i = 0; i < reg_num; i++) - rtl_set_bbreg(hw, addareg[i], MASKDWORD, backup[i]); + for (i = 0; i < regiesternum; i++) + rtl_set_bbreg(hw, addareg[i], MASKDWORD, addabackup[i]); } -static void reload_mac(struct ieee80211_hw *hw, const u32 *macreg, - u32 *macbackup) +static void _rtl88e_phy_reload_mac_registers(struct ieee80211_hw *hw, + u32 *macreg, u32 *macbackup) { struct rtl_priv *rtlpriv = rtl_priv(hw); u32 i; @@ -1458,8 +1603,7 @@ static void reload_mac(struct ieee80211_hw *hw, const u32 *macreg, } static void _rtl88e_phy_path_adda_on(struct ieee80211_hw *hw, - const u32 *addareg, bool is_patha_on, - bool is2t) + u32 *addareg, bool is_patha_on, bool is2t) { u32 pathon; u32 i; @@ -1477,8 +1621,7 @@ static void _rtl88e_phy_path_adda_on(struct ieee80211_hw *hw, } static void _rtl88e_phy_mac_setting_calibration(struct ieee80211_hw *hw, - const u32 *macreg, - u32 *macbackup) + u32 *macreg, u32 *macbackup) { struct rtl_priv *rtlpriv = rtl_priv(hw); u32 i = 0; @@ -1507,12 +1650,13 @@ static void _rtl88e_phy_pi_mode_switch(struct ieee80211_hw *hw, bool pi_mode) rtl_set_bbreg(hw, 0x828, MASKDWORD, mode); } -static bool sim_comp(struct ieee80211_hw *hw, long result[][8], u8 c1, u8 c2) +static bool _rtl88e_phy_simularity_compare(struct ieee80211_hw *hw, + long result[][8], u8 c1, u8 c2) { - u32 i, j, diff, bitmap, bound; + u32 i, j, diff, simularity_bitmap, bound; struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - u8 final[2] = {0xFF, 0xFF}; + u8 final_candidate[2] = { 0xFF, 0xFF }; bool bresult = true, is2t = IS_92C_SERIAL(rtlhal->version); if (is2t) @@ -1520,81 +1664,88 @@ static bool sim_comp(struct ieee80211_hw *hw, long result[][8], u8 c1, u8 c2) else bound = 4; - bitmap = 0; + simularity_bitmap = 0; for (i = 0; i < bound; i++) { diff = (result[c1][i] > result[c2][i]) ? - (result[c1][i] - result[c2][i]) : - (result[c2][i] - result[c1][i]); + (result[c1][i] - result[c2][i]) : + (result[c2][i] - result[c1][i]); if (diff > MAX_TOLERANCE) { - if ((i == 2 || i == 6) && !bitmap) { + if ((i == 2 || i == 6) && !simularity_bitmap) { if (result[c1][i] + result[c1][i + 1] == 0) - final[(i / 4)] = c2; + final_candidate[(i / 4)] = c2; else if (result[c2][i] + result[c2][i + 1] == 0) - final[(i / 4)] = c1; + final_candidate[(i / 4)] = c1; else - bitmap = bitmap | (1 << i); - } else { - bitmap = bitmap | (1 << i); - } + simularity_bitmap = simularity_bitmap | + (1 << i); + } else + simularity_bitmap = + simularity_bitmap | (1 << i); } } - if (bitmap == 0) { + if (simularity_bitmap == 0) { for (i = 0; i < (bound / 4); i++) { - if (final[i] != 0xFF) { + if (final_candidate[i] != 0xFF) { for (j = i * 4; j < (i + 1) * 4 - 2; j++) - result[3][j] = result[final[i]][j]; + result[3][j] = + result[final_candidate[i]][j]; bresult = false; } } return bresult; - } else if (!(bitmap & 0x0F)) { + } else if (!(simularity_bitmap & 0x0F)) { for (i = 0; i < 4; i++) result[3][i] = result[c1][i]; return false; - } else if (!(bitmap & 0xF0) && is2t) { + } else if (!(simularity_bitmap & 0xF0) && is2t) { for (i = 4; i < 8; i++) result[3][i] = result[c1][i]; return false; } else { return false; } + } static void _rtl88e_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8], u8 t, bool is2t) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; u32 i; u8 patha_ok, pathb_ok; - const u32 adda_reg[IQK_ADDA_REG_NUM] = { + u32 adda_reg[IQK_ADDA_REG_NUM] = { 0x85c, 0xe6c, 0xe70, 0xe74, 0xe78, 0xe7c, 0xe80, 0xe84, 0xe88, 0xe8c, 0xed0, 0xed4, 0xed8, 0xedc, 0xee0, 0xeec }; - const u32 iqk_mac_reg[IQK_MAC_REG_NUM] = { + u32 iqk_mac_reg[IQK_MAC_REG_NUM] = { 0x522, 0x550, 0x551, 0x040 }; - const u32 iqk_bb_reg[IQK_BB_REG_NUM] = { - ROFDM0_TRXPATHENABLE, ROFDM0_TRMUXPAR, RFPGA0_XCD_RFINTERFACESW, - 0xb68, 0xb6c, 0x870, 0x860, 0x864, 0x800 + u32 iqk_bb_reg[IQK_BB_REG_NUM] = { + ROFDM0_TRXPATHENABLE, ROFDM0_TRMUXPAR, + RFPGA0_XCD_RFINTERFACESW, 0xb68, 0xb6c, + 0x870, 0x860, 0x864, 0x800 }; const u32 retrycount = 2; if (t == 0) { - save_adda_reg(hw, adda_reg, rtlphy->adda_backup, 16); - save_mac_reg(hw, iqk_mac_reg, rtlphy->iqk_mac_backup); - save_adda_reg(hw, iqk_bb_reg, rtlphy->iqk_bb_backup, - IQK_BB_REG_NUM); + _rtl88e_phy_save_adda_registers(hw, adda_reg, + rtlphy->adda_backup, 16); + _rtl88e_phy_save_mac_registers(hw, iqk_mac_reg, + rtlphy->iqk_mac_backup); + _rtl88e_phy_save_adda_registers(hw, iqk_bb_reg, + rtlphy->iqk_bb_backup, + IQK_BB_REG_NUM); } _rtl88e_phy_path_adda_on(hw, adda_reg, true, is2t); if (t == 0) { - rtlphy->rfpi_enable = (u8) rtl_get_bbreg(hw, - RFPGA0_XA_HSSIPARAMETER1, BIT(8)); + rtlphy->rfpi_enable = + (u8)rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1, BIT(8)); } if (!rtlphy->rfpi_enable) @@ -1652,10 +1803,9 @@ static void _rtl88e_phy_iq_calibrate(struct ieee80211_hw *hw, } } - if (0 == patha_ok) { + if (0 == patha_ok) RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Path A IQK Success!!\n"); - } if (is2t) { _rtl88e_phy_path_a_standby(hw); _rtl88e_phy_path_adda_on(hw, adda_reg, false, is2t); @@ -1663,21 +1813,23 @@ static void _rtl88e_phy_iq_calibrate(struct ieee80211_hw *hw, pathb_ok = _rtl88e_phy_path_b_iqk(hw); if (pathb_ok == 0x03) { result[t][4] = (rtl_get_bbreg(hw, - 0xeb4, MASKDWORD) & + 0xeb4, + MASKDWORD) & 0x3FF0000) >> 16; result[t][5] = (rtl_get_bbreg(hw, 0xebc, MASKDWORD) & - 0x3FF0000) >> 16; + 0x3FF0000) >> 16; result[t][6] = (rtl_get_bbreg(hw, 0xec4, MASKDWORD) & - 0x3FF0000) >> 16; + 0x3FF0000) >> 16; result[t][7] = (rtl_get_bbreg(hw, 0xecc, MASKDWORD) & - 0x3FF0000) >> 16; + 0x3FF0000) >> 16; break; } else if (i == (retrycount - 1) && pathb_ok == 0x01) { result[t][4] = (rtl_get_bbreg(hw, - 0xeb4, MASKDWORD) & + 0xeb4, + MASKDWORD) & 0x3FF0000) >> 16; } result[t][5] = (rtl_get_bbreg(hw, 0xebc, MASKDWORD) & @@ -1690,10 +1842,13 @@ static void _rtl88e_phy_iq_calibrate(struct ieee80211_hw *hw, if (t != 0) { if (!rtlphy->rfpi_enable) _rtl88e_phy_pi_mode_switch(hw, false); - reload_adda(hw, adda_reg, rtlphy->adda_backup, 16); - reload_mac(hw, iqk_mac_reg, rtlphy->iqk_mac_backup); - reload_adda(hw, iqk_bb_reg, rtlphy->iqk_bb_backup, - IQK_BB_REG_NUM); + _rtl88e_phy_reload_adda_registers(hw, adda_reg, + rtlphy->adda_backup, 16); + _rtl88e_phy_reload_mac_registers(hw, iqk_mac_reg, + rtlphy->iqk_mac_backup); + _rtl88e_phy_reload_adda_registers(hw, iqk_bb_reg, + rtlphy->iqk_bb_backup, + IQK_BB_REG_NUM); rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00032ed3); if (is2t) @@ -1709,8 +1864,6 @@ static void _rtl88e_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t) u8 tmpreg; u32 rf_a_mode = 0, rf_b_mode = 0, lc_cal; struct rtl_priv *rtlpriv = rtl_priv(hw); - int jj = RF90_PATH_A; - int kk = RF90_PATH_B; tmpreg = rtl_read_byte(rtlpriv, 0xd03); @@ -1720,51 +1873,52 @@ static void _rtl88e_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t) rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF); if ((tmpreg & 0x70) != 0) { - rf_a_mode = rtl_get_rfreg(hw, jj, 0x00, MASK12BITS); + rf_a_mode = rtl_get_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS); if (is2t) - rf_b_mode = rtl_get_rfreg(hw, kk, 0x00, + rf_b_mode = rtl_get_rfreg(hw, RF90_PATH_B, 0x00, MASK12BITS); - rtl_set_rfreg(hw, jj, 0x00, MASK12BITS, + rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS, (rf_a_mode & 0x8FFFF) | 0x10000); if (is2t) - rtl_set_rfreg(hw, kk, 0x00, MASK12BITS, + rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASK12BITS, (rf_b_mode & 0x8FFFF) | 0x10000); } - lc_cal = rtl_get_rfreg(hw, jj, 0x18, MASK12BITS); + lc_cal = rtl_get_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS); - rtl_set_rfreg(hw, jj, 0x18, MASK12BITS, lc_cal | 0x08000); + rtl_set_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS, lc_cal | 0x08000); mdelay(100); if ((tmpreg & 0x70) != 0) { rtl_write_byte(rtlpriv, 0xd03, tmpreg); - rtl_set_rfreg(hw, jj, 0x00, MASK12BITS, rf_a_mode); + rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS, rf_a_mode); if (is2t) - rtl_set_rfreg(hw, kk, 0x00, MASK12BITS, + rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASK12BITS, rf_b_mode); } else { rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00); } - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n"); +RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n"); + } -static void rfpath_switch(struct ieee80211_hw *hw, - bool bmain, bool is2t) +static void _rtl88e_phy_set_rfpath_switch(struct ieee80211_hw *hw, + bool bmain, bool is2t) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - struct rtl_efuse *fuse = rtl_efuse(rtl_priv(hw)); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n"); if (is_hal_stop(rtlhal)) { u8 u1btmp; u1btmp = rtl_read_byte(rtlpriv, REG_LEDCFG0); rtl_write_byte(rtlpriv, REG_LEDCFG0, u1btmp | BIT(7)); - rtl_set_bbreg(hw, rFPGA0_XAB_RFPARAMETER, BIT(13), 0x01); + rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(13), 0x01); } if (is2t) { if (bmain) @@ -1777,24 +1931,24 @@ static void rfpath_switch(struct ieee80211_hw *hw, rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, BIT(8) | BIT(9), 0); rtl_set_bbreg(hw, 0x914, MASKLWORD, 0x0201); - /* We use the RF definition of MAIN and AUX, left antenna and - * right antenna repectively. + /* We use the RF definition of MAIN and AUX, + * left antenna and right antenna repectively. * Default output at AUX. */ if (bmain) { - rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, BIT(14) | - BIT(13) | BIT(12), 0); - rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, BIT(5) | - BIT(4) | BIT(3), 0); - if (fuse->antenna_div_type == CGCS_RX_HW_ANTDIV) - rtl_set_bbreg(hw, RCONFIG_RAM64X16, BIT(31), 0); + rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, + BIT(14) | BIT(13) | BIT(12), 0); + rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, + BIT(5) | BIT(4) | BIT(3), 0); + if (rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV) + rtl_set_bbreg(hw, RCONFIG_RAM64x16, BIT(31), 0); } else { - rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, BIT(14) | - BIT(13) | BIT(12), 1); - rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, BIT(5) | - BIT(4) | BIT(3), 1); - if (fuse->antenna_div_type == CGCS_RX_HW_ANTDIV) - rtl_set_bbreg(hw, RCONFIG_RAM64X16, BIT(31), 1); + rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, + BIT(14) | BIT(13) | BIT(12), 1); + rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, + BIT(5) | BIT(4) | BIT(3), 1); + if (rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV) + rtl_set_bbreg(hw, RCONFIG_RAM64x16, BIT(31), 1); } } } @@ -1802,35 +1956,44 @@ static void rfpath_switch(struct ieee80211_hw *hw, #undef IQK_ADDA_REG_NUM #undef IQK_DELAY_TIME -void rtl88e_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery) +void rtl88e_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; long result[4][8]; - u8 i, final; - bool patha_ok; - long reg_e94, reg_e9c, reg_ea4, reg_eb4, reg_ebc, reg_tmp = 0; + u8 i, final_candidate; + bool b_patha_ok, b_pathb_ok; + long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4, + reg_ecc, reg_tmp = 0; bool is12simular, is13simular, is23simular; u32 iqk_bb_reg[9] = { - ROFDM0_XARXIQIMBAL, - ROFDM0_XBRXIQIMBAL, - ROFDM0_ECCATHRES, + ROFDM0_XARXIQIMBALANCE, + ROFDM0_XBRXIQIMBALANCE, + ROFDM0_ECCATHRESHOLD, ROFDM0_AGCRSSITABLE, - ROFDM0_XATXIQIMBAL, - ROFDM0_XBTXIQIMBAL, + ROFDM0_XATXIQIMBALANCE, + ROFDM0_XBTXIQIMBALANCE, ROFDM0_XCTXAFE, ROFDM0_XDTXAFE, ROFDM0_RXIQEXTANTA }; - if (recovery) { - reload_adda(hw, iqk_bb_reg, rtlphy->iqk_bb_backup, 9); + if (b_recovery) { + _rtl88e_phy_reload_adda_registers(hw, + iqk_bb_reg, + rtlphy->iqk_bb_backup, 9); return; } - memset(result, 0, 32 * sizeof(long)); - final = 0xff; - patha_ok = false; + for (i = 0; i < 8; i++) { + result[0][i] = 0; + result[1][i] = 0; + result[2][i] = 0; + result[3][i] = 0; + } + final_candidate = 0xff; + b_patha_ok = false; + b_pathb_ok = false; is12simular = false; is23simular = false; is13simular = false; @@ -1840,29 +2003,32 @@ void rtl88e_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery) else _rtl88e_phy_iq_calibrate(hw, result, i, false); if (i == 1) { - is12simular = sim_comp(hw, result, 0, 1); + is12simular = + _rtl88e_phy_simularity_compare(hw, result, 0, 1); if (is12simular) { - final = 0; + final_candidate = 0; break; } } if (i == 2) { - is13simular = sim_comp(hw, result, 0, 2); + is13simular = + _rtl88e_phy_simularity_compare(hw, result, 0, 2); if (is13simular) { - final = 0; + final_candidate = 0; break; } - is23simular = sim_comp(hw, result, 1, 2); + is23simular = + _rtl88e_phy_simularity_compare(hw, result, 1, 2); if (is23simular) { - final = 1; + final_candidate = 1; } else { for (i = 0; i < 8; i++) reg_tmp += result[3][i]; if (reg_tmp != 0) - final = 3; + final_candidate = 3; else - final = 0xFF; + final_candidate = 0xFF; } } } @@ -1870,47 +2036,55 @@ void rtl88e_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery) reg_e94 = result[i][0]; reg_e9c = result[i][1]; reg_ea4 = result[i][2]; + reg_eac = result[i][3]; reg_eb4 = result[i][4]; reg_ebc = result[i][5]; + reg_ec4 = result[i][6]; + reg_ecc = result[i][7]; } - if (final != 0xff) { - reg_e94 = result[final][0]; - rtlphy->reg_e94 = reg_e94; - reg_e9c = result[final][1]; - rtlphy->reg_e9c = reg_e9c; - reg_ea4 = result[final][2]; - reg_eb4 = result[final][4]; + if (final_candidate != 0xff) { + reg_e94 = result[final_candidate][0]; + reg_e9c = result[final_candidate][1]; + reg_ea4 = result[final_candidate][2]; + reg_eac = result[final_candidate][3]; + reg_eb4 = result[final_candidate][4]; + reg_ebc = result[final_candidate][5]; + reg_ec4 = result[final_candidate][6]; + reg_ecc = result[final_candidate][7]; rtlphy->reg_eb4 = reg_eb4; - reg_ebc = result[final][5]; rtlphy->reg_ebc = reg_ebc; - patha_ok = true; + rtlphy->reg_e94 = reg_e94; + rtlphy->reg_e9c = reg_e9c; + b_patha_ok = true; + b_pathb_ok = true; } else { rtlphy->reg_e94 = 0x100; rtlphy->reg_eb4 = 0x100; - rtlphy->reg_ebc = 0x0; rtlphy->reg_e9c = 0x0; + rtlphy->reg_ebc = 0x0; } if (reg_e94 != 0) /*&&(reg_ea4 != 0) */ - fill_iqk(hw, patha_ok, result, final, (reg_ea4 == 0)); - if (final != 0xFF) { + _rtl88e_phy_path_a_fill_iqk_matrix(hw, b_patha_ok, result, + final_candidate, + (reg_ea4 == 0)); + if (final_candidate != 0xFF) { for (i = 0; i < IQK_MATRIX_REG_NUM; i++) - rtlphy->iqk_matrix[0].value[0][i] = result[final][i]; + rtlphy->iqk_matrix[0].value[0][i] = + result[final_candidate][i]; rtlphy->iqk_matrix[0].iqk_done = true; + } - save_adda_reg(hw, iqk_bb_reg, rtlphy->iqk_bb_backup, 9); + _rtl88e_phy_save_adda_registers(hw, iqk_bb_reg, + rtlphy->iqk_bb_backup, 9); } void rtl88e_phy_lc_calibrate(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - struct rtl_hal *rtlhal = &(rtlpriv->rtlhal); - bool start_conttx = false, singletone = false; + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct rtl_hal *rtlhal = &rtlpriv->rtlhal; u32 timeout = 2000, timecount = 0; - if (start_conttx || singletone) - return; - while (rtlpriv->mac80211.act_scanning && timecount < timeout) { udelay(50); timecount += 50; @@ -1928,18 +2102,18 @@ void rtl88e_phy_lc_calibrate(struct ieee80211_hw *hw) void rtl88e_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain) { - rfpath_switch(hw, bmain, false); + _rtl88e_phy_set_rfpath_switch(hw, bmain, false); } bool rtl88e_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; bool postprocessing = false; RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "-->IO Cmd(%#x), set_io_inprogress(%d)\n", - iotype, rtlphy->set_io_inprogress); + iotype, rtlphy->set_io_inprogress); do { switch (iotype) { case IO_CMD_RESUME_DM_BY_SCAN: @@ -1947,14 +2121,14 @@ bool rtl88e_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype) "[IO CMD] Resume DM after scan.\n"); postprocessing = true; break; - case IO_CMD_PAUSE_DM_BY_SCAN: + case IO_CMD_PAUSE_BAND0_DM_BY_SCAN: RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "[IO CMD] Pause DM before scan.\n"); postprocessing = true; break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case not processed\n"); + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, + "switch case not process\n"); break; } } while (false); @@ -1969,6 +2143,37 @@ bool rtl88e_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype) return true; } +static void rtl88e_phy_set_io(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct dig_t *dm_digtable = &rtlpriv->dm_digtable; + + RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, + "--->Cmd(%#x), set_io_inprogress(%d)\n", + rtlphy->current_io_type, rtlphy->set_io_inprogress); + switch (rtlphy->current_io_type) { + case IO_CMD_RESUME_DM_BY_SCAN: + dm_digtable->cur_igvalue = rtlphy->initgain_backup.xaagccore1; + /*rtl92c_dm_write_dig(hw);*/ + rtl88e_phy_set_txpower_level(hw, rtlphy->current_channel); + rtl_set_bbreg(hw, RCCK0_CCA, 0xff0000, 0x83); + break; + case IO_CMD_PAUSE_BAND0_DM_BY_SCAN: + rtlphy->initgain_backup.xaagccore1 = dm_digtable->cur_igvalue; + dm_digtable->cur_igvalue = 0x17; + rtl_set_bbreg(hw, RCCK0_CCA, 0xff0000, 0x40); + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, + "switch case not process\n"); + break; + } + rtlphy->set_io_inprogress = false; + RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, + "(%#x)\n", rtlphy->current_io_type); +} + static void rtl88ee_phy_set_rf_on(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -1984,10 +2189,9 @@ static void rtl88ee_phy_set_rf_on(struct ieee80211_hw *hw) static void _rtl88ee_phy_set_rf_sleep(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - int jj = RF90_PATH_A; rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF); - rtl_set_rfreg(hw, jj, 0x00, RFREG_OFFSET_MASK, 0x00); + rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00); rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2); rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x22); } @@ -1999,42 +2203,49 @@ static bool _rtl88ee_phy_set_rf_power_state(struct ieee80211_hw *hw, struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); - struct rtl8192_tx_ring *ring = NULL; bool bresult = true; u8 i, queue_id; + struct rtl8192_tx_ring *ring = NULL; switch (rfpwr_state) { - case ERFON:{ + case ERFON: if ((ppsc->rfpwr_state == ERFOFF) && RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) { bool rtstatus; - u32 init = 0; + u32 initializecount = 0; + do { - init++; + initializecount++; RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, "IPS Set eRf nic enable\n"); rtstatus = rtl_ps_enable_nic(hw); - } while ((rtstatus != true) && (init < 10)); + } while (!rtstatus && + (initializecount < 10)); RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); } else { RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, "Set ERFON sleeped:%d ms\n", - jiffies_to_msecs(jiffies - ppsc-> - last_sleep_jiffies)); + jiffies_to_msecs(jiffies - + ppsc-> + last_sleep_jiffies)); ppsc->last_awake_jiffies = jiffies; rtl88ee_phy_set_rf_on(hw); } - if (mac->link_state == MAC80211_LINKED) - rtlpriv->cfg->ops->led_control(hw, LED_CTL_LINK); - else - rtlpriv->cfg->ops->led_control(hw, LED_CTL_NO_LINK); - break; } - case ERFOFF:{ + if (mac->link_state == MAC80211_LINKED) { + rtlpriv->cfg->ops->led_control(hw, + LED_CTL_LINK); + } else { + rtlpriv->cfg->ops->led_control(hw, + LED_CTL_NO_LINK); + } + break; + case ERFOFF: for (queue_id = 0, i = 0; queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) { ring = &pcipriv->dev.tx_ring[queue_id]; - if (skb_queue_len(&ring->queue) == 0) { + if (queue_id == BEACON_QUEUE || + skb_queue_len(&ring->queue) == 0) { queue_id++; continue; } else { @@ -2055,6 +2266,7 @@ static bool _rtl88ee_phy_set_rf_power_state(struct ieee80211_hw *hw, break; } } + if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) { RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, "IPS Set eRf nic disable\n"); @@ -2063,49 +2275,51 @@ static bool _rtl88ee_phy_set_rf_power_state(struct ieee80211_hw *hw, } else { if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS) { rtlpriv->cfg->ops->led_control(hw, - LED_CTL_NO_LINK); + LED_CTL_NO_LINK); } else { rtlpriv->cfg->ops->led_control(hw, - LED_CTL_POWER_OFF); + LED_CTL_POWER_OFF); } } - break; } + break; case ERFSLEEP:{ - if (ppsc->rfpwr_state == ERFOFF) - break; - for (queue_id = 0, i = 0; - queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) { - ring = &pcipriv->dev.tx_ring[queue_id]; - if (skb_queue_len(&ring->queue) == 0) { - queue_id++; - continue; - } else { - RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, - "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n", - (i + 1), queue_id, - skb_queue_len(&ring->queue)); - - udelay(10); - i++; - } - if (i >= MAX_DOZE_WAITING_TIMES_9x) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, - "\n ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n", - MAX_DOZE_WAITING_TIMES_9x, - queue_id, - skb_queue_len(&ring->queue)); + if (ppsc->rfpwr_state == ERFOFF) break; + for (queue_id = 0, i = 0; + queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) { + ring = &pcipriv->dev.tx_ring[queue_id]; + if (skb_queue_len(&ring->queue) == 0) { + queue_id++; + continue; + } else { + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n", + (i + 1), queue_id, + skb_queue_len(&ring->queue)); + + udelay(10); + i++; + } + if (i >= MAX_DOZE_WAITING_TIMES_9x) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + "\n ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n", + MAX_DOZE_WAITING_TIMES_9x, + queue_id, + skb_queue_len(&ring->queue)); + break; + } } + RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, + "Set ERFSLEEP awaked:%d ms\n", + jiffies_to_msecs(jiffies - + ppsc->last_awake_jiffies)); + ppsc->last_sleep_jiffies = jiffies; + _rtl88ee_phy_set_rf_sleep(hw); + break; } - RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, - "Set ERFSLEEP awaked:%d ms\n", - jiffies_to_msecs(jiffies - ppsc->last_awake_jiffies)); - ppsc->last_sleep_jiffies = jiffies; - _rtl88ee_phy_set_rf_sleep(hw); - break; } default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case not processed\n"); + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, + "switch case not process\n"); bresult = false; break; } @@ -2118,10 +2332,11 @@ bool rtl88e_phy_set_rf_power_state(struct ieee80211_hw *hw, enum rf_pwrstate rfpwr_state) { struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); - bool bresult; + + bool bresult = false; if (rfpwr_state == ppsc->rfpwr_state) - return false; + return bresult; bresult = _rtl88ee_phy_set_rf_power_state(hw, rfpwr_state); return bresult; } diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.h b/drivers/net/wireless/rtlwifi/rtl8188ee/phy.h index 89f0f1ef14657ae467bea1445658cfd202c31da6..b29bd77210f4f554f0cb22d94c761614862926bb 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.h +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/phy.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -30,33 +26,35 @@ #ifndef __RTL92C_PHY_H__ #define __RTL92C_PHY_H__ -/*It must always set to 4, otherwise read efuse table secquence will be wrong.*/ -#define MAX_TX_COUNT 4 +/* MAX_TX_COUNT must always set to 4, otherwise read efuse + * table secquence will be wrong. + */ +#define MAX_TX_COUNT 4 #define MAX_PRECMD_CNT 16 -#define MAX_RFDEPENDCMD_CNT 16 +#define MAX_RFDEPENDCMD_CNT 16 #define MAX_POSTCMD_CNT 16 -#define MAX_DOZE_WAITING_TIMES_9x 64 +#define MAX_DOZE_WAITING_TIMES_9x 64 #define RT_CANNOT_IO(hw) false -#define HIGHPOWER_RADIOA_ARRAYLEN 22 +#define HIGHPOWER_RADIOA_ARRAYLEN 22 #define IQK_ADDA_REG_NUM 16 #define IQK_BB_REG_NUM 9 #define MAX_TOLERANCE 5 #define IQK_DELAY_TIME 10 -#define IDX_MAP 15 +#define INDEX_MAPPING_NUM 15 #define APK_BB_REG_NUM 5 #define APK_AFE_REG_NUM 16 #define APK_CURVE_REG_NUM 4 -#define PATH_NUM 2 +#define PATH_NUM 2 -#define LOOP_LIMIT 5 +#define LOOP_LIMIT 5 #define MAX_STALL_TIME 50 -#define ANTENNADIVERSITYVALUE 0x80 -#define MAX_TXPWR_IDX_NMODE_92S 63 +#define ANTENNADIVERSITYVALUE 0x80 +#define MAX_TXPWR_IDX_NMODE_92S 63 #define RESET_CNT_LIMIT 3 #define IQK_ADDA_REG_NUM 16 @@ -66,8 +64,8 @@ #define CT_OFFSET_MAC_ADDR 0X16 -#define CT_OFFSET_CCK_TX_PWR_IDX 0x5A -#define CT_OFFSET_HT401S_TX_PWR_IDX 0x60 +#define CT_OFFSET_CCK_TX_PWR_IDX 0x5A +#define CT_OFFSET_HT401S_TX_PWR_IDX 0x60 #define CT_OFFSET_HT402S_TX_PWR_IDX_DIFF 0x66 #define CT_OFFSET_HT20_TX_PWR_IDX_DIFF 0x69 #define CT_OFFSET_OFDM_TX_PWR_IDX_DIFF 0x6C @@ -75,13 +73,13 @@ #define CT_OFFSET_HT40_MAX_PWR_OFFSET 0x6F #define CT_OFFSET_HT20_MAX_PWR_OFFSET 0x72 -#define CT_OFFSET_CHANNEL_PLAH 0x75 -#define CT_OFFSET_THERMAL_METER 0x78 -#define CT_OFFSET_RF_OPTION 0x79 -#define CT_OFFSET_VERSION 0x7E -#define CT_OFFSET_CUSTOMER_ID 0x7F +#define CT_OFFSET_CHANNEL_PLAH 0x75 +#define CT_OFFSET_THERMAL_METER 0x78 +#define CT_OFFSET_RF_OPTION 0x79 +#define CT_OFFSET_VERSION 0x7E +#define CT_OFFSET_CUSTOMER_ID 0x7F -#define RTL92C_MAX_PATH_NUM 2 +#define RTL92C_MAX_PATH_NUM 2 enum swchnlcmd_id { CMDID_END, @@ -160,7 +158,6 @@ struct r_antenna_select_cck { u8 r_ccktx_enable:4; }; - struct efuse_contents { u8 mac_addr[ETH_ALEN]; u8 cck_tx_power_idx[6]; @@ -192,10 +189,10 @@ struct tx_power_struct { }; enum _ANT_DIV_TYPE { - NO_ANTDIV = 0xFF, + NO_ANTDIV = 0xFF, CG_TRX_HW_ANTDIV = 0x01, CGCS_RX_HW_ANTDIV = 0x02, - FIXED_HW_ANTDIV = 0x03, + FIXED_HW_ANTDIV = 0x03, CG_TRX_SMART_ANTDIV = 0x04, CGCS_RX_SW_ANTDIV = 0x05, }; @@ -217,6 +214,8 @@ void rtl88e_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw); void rtl88e_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel); void rtl88e_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel); +void rtl88e_phy_scan_operation_backup(struct ieee80211_hw *hw, + u8 operation); void rtl88e_phy_set_bw_mode_callback(struct ieee80211_hw *hw); void rtl88e_phy_set_bw_mode(struct ieee80211_hw *hw, enum nl80211_channel_type ch_type); diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.c b/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.c index 6dc4e3a954f6e7d045da5a101617fb9fc500a009..ef28c8ea1e84601b34417a8f6760b2b824a235f8 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.c +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -32,78 +28,78 @@ /* drivers should parse below arrays and do the corresponding actions */ /*3 Power on Array*/ -struct wlan_pwr_cfg rtl8188e_power_on_flow[RTL8188E_TRANS_CARDEMU_TO_ACT_STEPS + - RTL8188E_TRANS_END_STEPS] = { - RTL8188E_TRANS_CARDEMU_TO_ACT - RTL8188E_TRANS_END +struct wlan_pwr_cfg rtl8188ee_power_on_flow[RTL8188EE_TRANS_CARDEMU_TO_ACT_STEPS + + RTL8188EE_TRANS_END_STEPS] = { + RTL8188EE_TRANS_CARDEMU_TO_ACT + RTL8188EE_TRANS_END }; /*3Radio off GPIO Array */ -struct wlan_pwr_cfg rtl8188e_radio_off_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS - + RTL8188E_TRANS_END_STEPS] = { - RTL8188E_TRANS_ACT_TO_CARDEMU - RTL8188E_TRANS_END +struct wlan_pwr_cfg rtl8188ee_radio_off_flow[RTL8188EE_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8188EE_TRANS_END_STEPS] = { + RTL8188EE_TRANS_ACT_TO_CARDEMU + RTL8188EE_TRANS_END }; /*3Card Disable Array*/ -struct wlan_pwr_cfg rtl8188e_card_disable_flow - [RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS + - RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS + - RTL8188E_TRANS_END_STEPS] = { - RTL8188E_TRANS_ACT_TO_CARDEMU - RTL8188E_TRANS_CARDEMU_TO_CARDDIS - RTL8188E_TRANS_END +struct wlan_pwr_cfg rtl8188ee_card_disable_flow + [RTL8188EE_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8188EE_TRANS_CARDEMU_TO_PDN_STEPS + + RTL8188EE_TRANS_END_STEPS] = { + RTL8188EE_TRANS_ACT_TO_CARDEMU + RTL8188EE_TRANS_CARDEMU_TO_CARDDIS + RTL8188EE_TRANS_END }; /*3 Card Enable Array*/ -struct wlan_pwr_cfg rtl8188e_card_enable_flow - [RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS + - RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS + - RTL8188E_TRANS_END_STEPS] = { - RTL8188E_TRANS_CARDDIS_TO_CARDEMU - RTL8188E_TRANS_CARDEMU_TO_ACT - RTL8188E_TRANS_END +struct wlan_pwr_cfg rtl8188ee_card_enable_flow + [RTL8188EE_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8188EE_TRANS_CARDEMU_TO_PDN_STEPS + + RTL8188EE_TRANS_END_STEPS] = { + RTL8188EE_TRANS_CARDDIS_TO_CARDEMU + RTL8188EE_TRANS_CARDEMU_TO_ACT + RTL8188EE_TRANS_END }; /*3Suspend Array*/ -struct wlan_pwr_cfg rtl8188e_suspend_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS - + RTL8188E_TRANS_CARDEMU_TO_SUS_STEPS - + RTL8188E_TRANS_END_STEPS] = { - RTL8188E_TRANS_ACT_TO_CARDEMU - RTL8188E_TRANS_CARDEMU_TO_SUS - RTL8188E_TRANS_END +struct wlan_pwr_cfg rtl8188ee_suspend_flow[RTL8188EE_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8188EE_TRANS_CARDEMU_TO_SUS_STEPS + + RTL8188EE_TRANS_END_STEPS] = { + RTL8188EE_TRANS_ACT_TO_CARDEMU + RTL8188EE_TRANS_CARDEMU_TO_SUS + RTL8188EE_TRANS_END }; /*3 Resume Array*/ -struct wlan_pwr_cfg rtl8188e_resume_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS - + RTL8188E_TRANS_CARDEMU_TO_SUS_STEPS - + RTL8188E_TRANS_END_STEPS] = { - RTL8188E_TRANS_SUS_TO_CARDEMU - RTL8188E_TRANS_CARDEMU_TO_ACT - RTL8188E_TRANS_END +struct wlan_pwr_cfg rtl8188ee_resume_flow[RTL8188EE_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8188EE_TRANS_CARDEMU_TO_SUS_STEPS + + RTL8188EE_TRANS_END_STEPS] = { + RTL8188EE_TRANS_SUS_TO_CARDEMU + RTL8188EE_TRANS_CARDEMU_TO_ACT + RTL8188EE_TRANS_END }; /*3HWPDN Array*/ -struct wlan_pwr_cfg rtl8188e_hwpdn_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS - + RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS - + RTL8188E_TRANS_END_STEPS] = { - RTL8188E_TRANS_ACT_TO_CARDEMU - RTL8188E_TRANS_CARDEMU_TO_PDN - RTL8188E_TRANS_END +struct wlan_pwr_cfg rtl8188ee_hwpdn_flow[RTL8188EE_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8188EE_TRANS_CARDEMU_TO_PDN_STEPS + + RTL8188EE_TRANS_END_STEPS] = { + RTL8188EE_TRANS_ACT_TO_CARDEMU + RTL8188EE_TRANS_CARDEMU_TO_PDN + RTL8188EE_TRANS_END }; /*3 Enter LPS */ -struct wlan_pwr_cfg rtl8188e_enter_lps_flow[RTL8188E_TRANS_ACT_TO_LPS_STEPS - + RTL8188E_TRANS_END_STEPS] = { +struct wlan_pwr_cfg rtl8188ee_enter_lps_flow[RTL8188EE_TRANS_ACT_TO_LPS_STEPS + + RTL8188EE_TRANS_END_STEPS] = { /*FW behavior*/ - RTL8188E_TRANS_ACT_TO_LPS - RTL8188E_TRANS_END + RTL8188EE_TRANS_ACT_TO_LPS + RTL8188EE_TRANS_END }; /*3 Leave LPS */ -struct wlan_pwr_cfg rtl8188e_leave_lps_flow[RTL8188E_TRANS_LPS_TO_ACT_STEPS - + RTL8188E_TRANS_END_STEPS] = { +struct wlan_pwr_cfg rtl8188ee_leave_lps_flow[RTL8188EE_TRANS_LPS_TO_ACT_STEPS + + RTL8188EE_TRANS_END_STEPS] = { /*FW behavior*/ - RTL8188E_TRANS_LPS_TO_ACT - RTL8188E_TRANS_END + RTL8188EE_TRANS_LPS_TO_ACT + RTL8188EE_TRANS_END }; diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h b/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h index 32e135ab9a63fcda64369ea3b0776c0e13175d68..79103347d96759c91cd67c1e49171d76381572a5 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -30,297 +26,286 @@ #ifndef __RTL8723E_PWRSEQ_H__ #define __RTL8723E_PWRSEQ_H__ -/* - Check document WM-20110607-Paul-RTL8188E_Power_Architecture-R02.vsd - There are 6 HW Power States: - 0: POFF--Power Off - 1: PDN--Power Down - 2: CARDEMU--Card Emulation - 3: ACT--Active Mode - 4: LPS--Low Power State - 5: SUS--Suspend - - The transision from different states are defined below - TRANS_CARDEMU_TO_ACT - TRANS_ACT_TO_CARDEMU - TRANS_CARDEMU_TO_SUS - TRANS_SUS_TO_CARDEMU - TRANS_CARDEMU_TO_PDN - TRANS_ACT_TO_LPS - TRANS_LPS_TO_ACT - - TRANS_END - PWR SEQ Version: rtl8188e_PwrSeq_V09.h -*/ - -#define RTL8188E_TRANS_CARDEMU_TO_ACT_STEPS 10 -#define RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS 10 -#define RTL8188E_TRANS_CARDEMU_TO_SUS_STEPS 10 -#define RTL8188E_TRANS_SUS_TO_CARDEMU_STEPS 10 -#define RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS 10 -#define RTL8188E_TRANS_PDN_TO_CARDEMU_STEPS 10 -#define RTL8188E_TRANS_ACT_TO_LPS_STEPS 15 -#define RTL8188E_TRANS_LPS_TO_ACT_STEPS 15 -#define RTL8188E_TRANS_END_STEPS 1 +#include "pwrseqcmd.h" +/* Check document WM-20110607-Paul-RTL8188EE_Power_Architecture-R02.vsd + * There are 6 HW Power States: + * 0: POFF--Power Off + * 1: PDN--Power Down + * 2: CARDEMU--Card Emulation + * 3: ACT--Active Mode + * 4: LPS--Low Power State + * 5: SUS--Suspend + * + * The transision from different states are defined below + * TRANS_CARDEMU_TO_ACT + * TRANS_ACT_TO_CARDEMU + * TRANS_CARDEMU_TO_SUS + * TRANS_SUS_TO_CARDEMU + * TRANS_CARDEMU_TO_PDN + * TRANS_ACT_TO_LPS + * TRANS_LPS_TO_ACT + * + * TRANS_END + * PWR SEQ Version: rtl8188ee_PwrSeq_V09.h + */ +#define RTL8188EE_TRANS_CARDEMU_TO_ACT_STEPS 10 +#define RTL8188EE_TRANS_ACT_TO_CARDEMU_STEPS 10 +#define RTL8188EE_TRANS_CARDEMU_TO_SUS_STEPS 10 +#define RTL8188EE_TRANS_SUS_TO_CARDEMU_STEPS 10 +#define RTL8188EE_TRANS_CARDEMU_TO_PDN_STEPS 10 +#define RTL8188EE_TRANS_PDN_TO_CARDEMU_STEPS 10 +#define RTL8188EE_TRANS_ACT_TO_LPS_STEPS 15 +#define RTL8188EE_TRANS_LPS_TO_ACT_STEPS 15 +#define RTL8188EE_TRANS_END_STEPS 1 -#define RTL8188E_TRANS_CARDEMU_TO_ACT \ - /* format */ \ - /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value },*/\ +/* The following macros have the following format: + * { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value + * comments }, + */ +#define RTL8188EE_TRANS_CARDEMU_TO_ACT \ {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - /* wait till 0x04[17] = 1 power ready*/ \ - PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(1), BIT(1)}, \ + PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(1), BIT(1) \ + /* wait till 0x04[17] = 1 power ready*/}, \ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - /* 0x02[1:0] = 0 reset BB*/ \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0)|BIT(1), 0}, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0)|BIT(1), 0 \ + /* 0x02[1:0] = 0 reset BB*/}, \ {0x0026, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - /*0x24[23] = 2b'01 schmit trigger */ \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), BIT(7)}, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), BIT(7) \ + /*0x24[23] = 2b'01 schmit trigger */}, \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - /* 0x04[15] = 0 disable HWPDN (control by DRV)*/ \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), 0}, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), 0 \ + /* 0x04[15] = 0 disable HWPDN (control by DRV)*/}, \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - /*0x04[12:11] = 2b'00 disable WL suspend*/ \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4)|BIT(3), 0}, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4)|BIT(3), 0 \ + /*0x04[12:11] = 2b'00 disable WL suspend*/}, \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - /*0x04[8] = 1 polling until return 0*/ \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)}, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0) \ + /*0x04[8] = 1 polling until return 0*/}, \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - /*wait till 0x04[8] = 0*/ \ - PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(0), 0}, \ + PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(0), 0 \ + /*wait till 0x04[8] = 0*/}, \ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, /*LDO normal mode*/\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0 \ + /*LDO normal mode*/}, \ {0x0074, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4)}, /*SDIO Driving*/\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4) \ + /*SDIO Driving*/}, -#define RTL8188E_TRANS_ACT_TO_CARDEMU \ - /* format */ \ - /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value },*/\ +#define RTL8188EE_TRANS_ACT_TO_CARDEMU \ {0x001F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0},/*0x1F[7:0] = 0 turn off RF*/\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0 \ + /*0x1F[7:0] = 0 turn off RF*/}, \ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4)}, /*LDO Sleep mode*/\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4) \ + /*LDO Sleep mode*/}, \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - /*0x04[9] = 1 turn off MAC by HW state machine*/ \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1) \ + /*0x04[9] = 1 turn off MAC by HW state machine*/}, \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - /*wait till 0x04[9] = 0 polling until return 0 to disable*/ \ - PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(1), 0}, \ + PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(1), 0 \ + /*wait till 0x04[9] = 0 polling until return 0 to disable*/}, - -#define RTL8188E_TRANS_CARDEMU_TO_SUS \ - /* format */ \ - /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value },*/\ +#define RTL8188EE_TRANS_CARDEMU_TO_SUS \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, \ - /*0x04[12:11] = 2b'01enable WL suspend*/ \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), BIT(3)}, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), BIT(3) \ + /*0x04[12:11] = 2b'01enable WL suspend*/}, \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \ - /*0x04[12:11] = 2b'11enable WL suspend for PCIe*/ \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), BIT(3)|BIT(4)},\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), BIT(3)|BIT(4) \ + /*0x04[12:11] = 2b'11enable WL suspend for PCIe*/}, \ {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, \ - /* 0x04[31:30] = 2b'10 enable enable bandgap mbias in suspend */\ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, BIT(7)}, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, BIT(7) \ + /* 0x04[31:30] = 2b'10 enable enable bandgap mbias in suspend */},\ {0x0041, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, \ - /*Clear SIC_EN register 0x40[12] = 1'b0 */ \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0 \ + /*Clear SIC_EN register 0x40[12] = 1'b0 */}, \ {0xfe10, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, \ - /*Set USB suspend enable local register 0xfe10[4]= 1 */ \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4)}, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4) \ + /*Set USB suspend enable local register 0xfe10[4]=1 */}, \ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ - /*Set SDIO suspend local register*/ \ - PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), BIT(0)}, \ + PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), BIT(0) \ + /*Set SDIO suspend local register*/}, \ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ - /*wait power state to suspend*/ \ - PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), 0}, + PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), 0 \ + /*wait power state to suspend*/}, -#define RTL8188E_TRANS_SUS_TO_CARDEMU \ - /* format */ \ - /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\ +#define RTL8188EE_TRANS_SUS_TO_CARDEMU \ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ - /*Set SDIO suspend local register*/ \ - PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), 0}, \ + PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), 0 \ + /*Set SDIO suspend local register*/}, \ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ - /*wait power state to suspend*/ \ - PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), BIT(1)}, \ + PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), BIT(1) \ + /*wait power state to suspend*/}, \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - /*0x04[12:11] = 2b'01enable WL suspend*/ \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), 0}, + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3) | BIT(4), 0 \ + /*0x04[12:11] = 2b'01enable WL suspend*/}, -#define RTL8188E_TRANS_CARDEMU_TO_CARDDIS \ - /* format */ \ - /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\ +#define RTL8188EE_TRANS_CARDEMU_TO_CARDDIS \ {0x0026, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - /*0x24[23] = 2b'01 schmit trigger */ \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), BIT(7)}, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), BIT(7) \ + /*0x24[23] = 2b'01 schmit trigger */}, \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, \ - /*0x04[12:11] = 2b'01 enable WL suspend*/ \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), BIT(3)}, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3) \ + /*0x04[12:11] = 2b'01 enable WL suspend*/}, \ {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, \ - /* 0x04[31:30] = 2b'10 enable enable bandgap mbias in suspend */\ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0}, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0 \ + /* 0x04[31:30] = 2b'10 enable enable bandgap mbias in suspend */},\ {0x0041, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, \ - /*Clear SIC_EN register 0x40[12] = 1'b0 */ \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0 \ + /*Clear SIC_EN register 0x40[12] = 1'b0 */}, \ {0xfe10, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, \ - /*Set USB suspend enable local register 0xfe10[4]= 1 */ \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4)}, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4) \ + /*Set USB suspend enable local register 0xfe10[4]=1 */}, \ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ - /*Set SDIO suspend local register*/ \ - PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), BIT(0)}, \ + PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), BIT(0) \ + /*Set SDIO suspend local register*/}, \ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ - PWR_CMD_POLLING, BIT(1), 0}, /*wait power state to suspend*/ + PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), 0 \ + /*wait power state to suspend*/}, -#define RTL8188E_TRANS_CARDDIS_TO_CARDEMU \ - /* format */ \ - /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\ +#define RTL8188EE_TRANS_CARDDIS_TO_CARDEMU \ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ - PWR_BASEADDR_SDIO,\ - PWR_CMD_WRITE, BIT(0), 0}, /*Set SDIO suspend local register*/ \ + PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), 0 \ + /*Set SDIO suspend local register*/}, \ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ - PWR_BASEADDR_SDIO,\ - PWR_CMD_POLLING, BIT(1), BIT(1)}, /*wait power state to suspend*/\ + PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), BIT(1) \ + /*wait power state to suspend*/}, \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, \ - PWR_CMD_WRITE, BIT(3)|BIT(4), 0}, \ - /*0x04[12:11] = 2b'01enable WL suspend*/ - + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), 0 \ + /*0x04[12:11] = 2b'01enable WL suspend*/}, -#define RTL8188E_TRANS_CARDEMU_TO_PDN \ - /* format */ \ - /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\ +#define RTL8188EE_TRANS_CARDEMU_TO_PDN \ {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0},/* 0x04[16] = 0*/ \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0/* 0x04[16] = 0*/}, \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), BIT(7)},/* 0x04[15] = 1*/ - + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), BIT(7) \ + /* 0x04[15] = 1*/}, -#define RTL8188E_TRANS_PDN_TO_CARDEMU \ - /* format */ \ - /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\ +#define RTL8188EE_TRANS_PDN_TO_CARDEMU \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), 0},/* 0x04[15] = 0*/ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), 0/* 0x04[15] = 0*/}, - -#define RTL8188E_TRANS_ACT_TO_LPS \ - /* format */ \ - /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value },*/\ +#define RTL8188EE_TRANS_ACT_TO_LPS \ {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x7F},/*Tx Pause*/ \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x7F \ + /*Tx Pause*/}, \ {0x05F8, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - /*zero if no pkt is tx*/\ - PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \ + PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0 \ + /*Should be zero if no packet is transmitting*/}, \ {0x05F9, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - /*Should be zero if no packet is transmitting*/ \ - PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \ + PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0 \ + /*Should be zero if no packet is transmitting*/}, \ {0x05FA, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - /*Should be zero if no packet is transmitting*/ \ - PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \ + PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0 \ + /*Should be zero if no packet is transmitting*/}, \ {0x05FB, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - /*Should be zero if no packet is transmitting*/ \ - PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \ + PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0 \ + /*Should be zero if no packet is transmitting*/}, \ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - /*CCK and OFDM are disabled, and clock are gated*/ \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0}, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0 \ + /*CCK and OFDM are disabled,and clock are gated*/}, \ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_US},/*Delay 1us*/\ + PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_US \ + /*Delay 1us*/}, \ {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x3F},/*Reset MAC TRX*/ \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x3F \ + /*Reset MAC TRX*/}, \ {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - /*check if removed later*/ \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), 0}, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), 0 \ + /*check if removed later*/}, \ {0x0553, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - /*Respond TxOK to scheduler*/ \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(5), BIT(5)}, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(5), BIT(5) \ + /*Respond TxOK to scheduler*/}, -#define RTL8188E_TRANS_LPS_TO_ACT \ - /* format */ \ - /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\ +#define RTL8188EE_TRANS_LPS_TO_ACT \ {0x0080, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ - PWR_BASEADDR_SDIO, PWR_CMD_WRITE, 0xFF, 0x84}, /*SDIO RPWM*/ \ + PWR_BASEADDR_SDIO, PWR_CMD_WRITE, 0xFF, 0x84 \ + /*SDIO RPWM*/}, \ {0xFE58, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84}, /*USB RPWM*/ \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84 \ + /*USB RPWM*/}, \ {0x0361, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84}, /*PCIe RPWM*/ \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84 \ + /*PCIe RPWM*/}, \ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_MS}, /*Delay*/ \ + PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_MS \ + /*Delay*/}, \ {0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - /*. 0x08[4] = 0 switch TSF to 40M*/ \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0 \ + /*. 0x08[4] = 0 switch TSF to 40M*/}, \ {0x0109, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - /*Polling 0x109[7]= 0 TSF in 40M*/ \ - PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(7), 0}, \ + PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(7), 0 \ + /*Polling 0x109[7]=0 TSF in 40M*/}, \ {0x0029, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - /*. 0x29[7:6] = 2b'00 enable BB clock*/ \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(6)|BIT(7), 0}, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(6)|BIT(7), 0 \ + /*. 0x29[7:6] = 2b'00 enable BB clock*/}, \ {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - /*. 0x101[1] = 1*/\ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1) \ + /*. 0x101[1] = 1*/}, \ {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - /*. 0x100[7:0] = 0xFF enable WMAC TRX*/\ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF}, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF \ + /*. 0x100[7:0] = 0xFF enable WMAC TRX*/}, \ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - /*. 0x02[1:0] = 2b'11 enable BB macro*/\ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1)|BIT(0), BIT(1)|BIT(0)}, \ - {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0}, /*. 0x522 = 0*/ - + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1)|BIT(0), BIT(1)|BIT(0) \ + /*. 0x02[1:0] = 2b'11 enable BB macro*/}, \ + {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0 \ + /*. 0x522 = 0*/}, -#define RTL8188E_TRANS_END \ - /* format */ \ - /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value },*/\ +#define RTL8188EE_TRANS_END \ {0xFFFF, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ 0, PWR_CMD_END, 0, 0} -extern struct wlan_pwr_cfg rtl8188e_power_on_flow - [RTL8188E_TRANS_CARDEMU_TO_ACT_STEPS + - RTL8188E_TRANS_END_STEPS]; -extern struct wlan_pwr_cfg rtl8188e_radio_off_flow - [RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS + - RTL8188E_TRANS_END_STEPS]; -extern struct wlan_pwr_cfg rtl8188e_card_disable_flow - [RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS + - RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS + - RTL8188E_TRANS_END_STEPS]; -extern struct wlan_pwr_cfg rtl8188e_card_enable_flow - [RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS + - RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS + - RTL8188E_TRANS_END_STEPS]; -extern struct wlan_pwr_cfg rtl8188e_suspend_flow - [RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS + - RTL8188E_TRANS_CARDEMU_TO_SUS_STEPS + - RTL8188E_TRANS_END_STEPS]; -extern struct wlan_pwr_cfg rtl8188e_resume_flow - [RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS + - RTL8188E_TRANS_CARDEMU_TO_SUS_STEPS + - RTL8188E_TRANS_END_STEPS]; -extern struct wlan_pwr_cfg rtl8188e_hwpdn_flow - [RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS + - RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS + - RTL8188E_TRANS_END_STEPS]; -extern struct wlan_pwr_cfg rtl8188e_enter_lps_flow - [RTL8188E_TRANS_ACT_TO_LPS_STEPS + - RTL8188E_TRANS_END_STEPS]; -extern struct wlan_pwr_cfg rtl8188e_leave_lps_flow - [RTL8188E_TRANS_LPS_TO_ACT_STEPS + - RTL8188E_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8188ee_power_on_flow + [RTL8188EE_TRANS_CARDEMU_TO_ACT_STEPS + + RTL8188EE_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8188ee_radio_off_flow + [RTL8188EE_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8188EE_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8188ee_card_disable_flow + [RTL8188EE_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8188EE_TRANS_CARDEMU_TO_PDN_STEPS + + RTL8188EE_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8188ee_card_enable_flow + [RTL8188EE_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8188EE_TRANS_CARDEMU_TO_PDN_STEPS + + RTL8188EE_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8188ee_suspend_flow + [RTL8188EE_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8188EE_TRANS_CARDEMU_TO_SUS_STEPS + + RTL8188EE_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8188ee_resume_flow + [RTL8188EE_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8188EE_TRANS_CARDEMU_TO_SUS_STEPS + + RTL8188EE_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8188ee_hwpdn_flow + [RTL8188EE_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8188EE_TRANS_CARDEMU_TO_PDN_STEPS + + RTL8188EE_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8188ee_enter_lps_flow + [RTL8188EE_TRANS_ACT_TO_LPS_STEPS + + RTL8188EE_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8188ee_leave_lps_flow + [RTL8188EE_TRANS_LPS_TO_ACT_STEPS + + RTL8188EE_TRANS_END_STEPS]; /* RTL8723 Power Configuration CMDs for PCIe interface */ -#define Rtl8188E_NIC_PWR_ON_FLOW rtl8188e_power_on_flow -#define Rtl8188E_NIC_RF_OFF_FLOW rtl8188e_radio_off_flow -#define Rtl8188E_NIC_DISABLE_FLOW rtl8188e_card_disable_flow -#define Rtl8188E_NIC_ENABLE_FLOW rtl8188e_card_enable_flow -#define Rtl8188E_NIC_SUSPEND_FLOW rtl8188e_suspend_flow -#define Rtl8188E_NIC_RESUME_FLOW rtl8188e_resume_flow -#define Rtl8188E_NIC_PDN_FLOW rtl8188e_hwpdn_flow -#define Rtl8188E_NIC_LPS_ENTER_FLOW rtl8188e_enter_lps_flow -#define Rtl8188E_NIC_LPS_LEAVE_FLOW rtl8188e_leave_lps_flow +#define RTL8188EE_NIC_PWR_ON_FLOW rtl8188ee_power_on_flow +#define RTL8188EE_NIC_RF_OFF_FLOW rtl8188ee_radio_off_flow +#define RTL8188EE_NIC_DISABLE_FLOW rtl8188ee_card_disable_flow +#define RTL8188EE_NIC_ENABLE_FLOW rtl8188ee_card_enable_flow +#define RTL8188EE_NIC_SUSPEND_FLOW rtl8188ee_suspend_flow +#define RTL8188EE_NIC_RESUME_FLOW rtl8188ee_resume_flow +#define RTL8188EE_NIC_PDN_FLOW rtl8188ee_hwpdn_flow +#define RTL8188EE_NIC_LPS_ENTER_FLOW rtl8188ee_enter_lps_flow +#define RTL8188EE_NIC_LPS_LEAVE_FLOW rtl8188ee_leave_lps_flow #endif diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseqcmd.c b/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseqcmd.c deleted file mode 100644 index 0f9314205526b469727eeba332687e2b3e5a477d..0000000000000000000000000000000000000000 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseqcmd.c +++ /dev/null @@ -1,139 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2013 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ - -#include "pwrseq.h" - - -/* Description: - * This routine deal with the Power Configuration CMDs - * parsing for RTL8723/RTL8188E Series IC. - * Assumption: - * We should follow specific format which was released from HW SD. - * - * 2011.07.07, added by Roger. - */ - -bool rtl88_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version, - u8 fab_version, u8 interface_type, - struct wlan_pwr_cfg pwrcfgcmd[]) -{ - struct wlan_pwr_cfg cmd = {0}; - bool polling_bit = false; - u32 ary_idx = 0; - u8 val = 0; - u32 offset = 0; - u32 polling_count = 0; - u32 max_polling_cnt = 5000; - - do { - cmd = pwrcfgcmd[ary_idx]; - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "rtl88_hal_pwrseqcmdparsing(): offset(%#x), cut_msk(%#x), fab_msk(%#x)," - "interface_msk(%#x), base(%#x), cmd(%#x), msk(%#x), val(%#x)\n", - GET_PWR_CFG_OFFSET(cmd), - GET_PWR_CFG_CUT_MASK(cmd), - GET_PWR_CFG_FAB_MASK(cmd), - GET_PWR_CFG_INTF_MASK(cmd), - GET_PWR_CFG_BASE(cmd), - GET_PWR_CFG_CMD(cmd), - GET_PWR_CFG_MASK(cmd), - GET_PWR_CFG_VALUE(cmd)); - - if ((GET_PWR_CFG_FAB_MASK(cmd) & fab_version) && - (GET_PWR_CFG_CUT_MASK(cmd) & cut_version) && - (GET_PWR_CFG_INTF_MASK(cmd) & interface_type)) { - switch (GET_PWR_CFG_CMD(cmd)) { - case PWR_CMD_READ: - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "rtl88_hal_pwrseqcmdparsing(): PWR_CMD_READ\n"); - break; - case PWR_CMD_WRITE: { - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "rtl88_hal_pwrseqcmdparsing(): PWR_CMD_WRITE\n"); - offset = GET_PWR_CFG_OFFSET(cmd); - - /*Read the val from system register*/ - val = rtl_read_byte(rtlpriv, offset); - val &= (~(GET_PWR_CFG_MASK(cmd))); - val |= (GET_PWR_CFG_VALUE(cmd) & - GET_PWR_CFG_MASK(cmd)); - - /*Write the val back to sytem register*/ - rtl_write_byte(rtlpriv, offset, val); - } - break; - case PWR_CMD_POLLING: - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "rtl88_hal_pwrseqcmdparsing(): PWR_CMD_POLLING\n"); - polling_bit = false; - offset = GET_PWR_CFG_OFFSET(cmd); - - do { - val = rtl_read_byte(rtlpriv, offset); - - val = val & GET_PWR_CFG_MASK(cmd); - if (val == (GET_PWR_CFG_VALUE(cmd) & - GET_PWR_CFG_MASK(cmd))) - polling_bit = true; - else - udelay(10); - - if (polling_count++ > max_polling_cnt) { - RT_TRACE(rtlpriv, COMP_INIT, - DBG_LOUD, - "polling fail in pwrseqcmd\n"); - return false; - } - } while (!polling_bit); - - break; - case PWR_CMD_DELAY: - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "rtl88_hal_pwrseqcmdparsing(): PWR_CMD_DELAY\n"); - if (GET_PWR_CFG_VALUE(cmd) == PWRSEQ_DELAY_US) - udelay(GET_PWR_CFG_OFFSET(cmd)); - else - mdelay(GET_PWR_CFG_OFFSET(cmd)); - break; - case PWR_CMD_END: - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "rtl88_hal_pwrseqcmdparsing(): PWR_CMD_END\n"); - return true; - default: - RT_ASSERT(false, - "rtl88_hal_pwrseqcmdparsing(): Unknown CMD!!\n"); - break; - } - } - - ary_idx++; - } while (1); - - return true; -} diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseqcmd.h b/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseqcmd.h deleted file mode 100644 index d9ae280bb1a2ba7cce6e87a014843b7c0acf35da..0000000000000000000000000000000000000000 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseqcmd.h +++ /dev/null @@ -1,97 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2013 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ - -#ifndef __RTL8723E_PWRSEQCMD_H__ -#define __RTL8723E_PWRSEQCMD_H__ - -#include "../wifi.h" -/*---------------------------------------------*/ -/* The value of cmd: 4 bits */ -/*---------------------------------------------*/ -#define PWR_CMD_READ 0x00 -#define PWR_CMD_WRITE 0x01 -#define PWR_CMD_POLLING 0x02 -#define PWR_CMD_DELAY 0x03 -#define PWR_CMD_END 0x04 - -/* define the base address of each block */ -#define PWR_BASEADDR_MAC 0x00 -#define PWR_BASEADDR_USB 0x01 -#define PWR_BASEADDR_PCIE 0x02 -#define PWR_BASEADDR_SDIO 0x03 - -#define PWR_INTF_SDIO_MSK BIT(0) -#define PWR_INTF_USB_MSK BIT(1) -#define PWR_INTF_PCI_MSK BIT(2) -#define PWR_INTF_ALL_MSK (BIT(0)|BIT(1)|BIT(2)|BIT(3)) - -#define PWR_FAB_TSMC_MSK BIT(0) -#define PWR_FAB_UMC_MSK BIT(1) -#define PWR_FAB_ALL_MSK (BIT(0)|BIT(1)|BIT(2)|BIT(3)) - -#define PWR_CUT_TESTCHIP_MSK BIT(0) -#define PWR_CUT_A_MSK BIT(1) -#define PWR_CUT_B_MSK BIT(2) -#define PWR_CUT_C_MSK BIT(3) -#define PWR_CUT_D_MSK BIT(4) -#define PWR_CUT_E_MSK BIT(5) -#define PWR_CUT_F_MSK BIT(6) -#define PWR_CUT_G_MSK BIT(7) -#define PWR_CUT_ALL_MSK 0xFF - -enum pwrseq_delay_unit { - PWRSEQ_DELAY_US, - PWRSEQ_DELAY_MS, -}; - -struct wlan_pwr_cfg { - u16 offset; - u8 cut_msk; - u8 fab_msk:4; - u8 interface_msk:4; - u8 base:4; - u8 cmd:4; - u8 msk; - u8 value; -}; - -#define GET_PWR_CFG_OFFSET(__PWR) (__PWR.offset) -#define GET_PWR_CFG_CUT_MASK(__PWR) (__PWR.cut_msk) -#define GET_PWR_CFG_FAB_MASK(__PWR) (__PWR.fab_msk) -#define GET_PWR_CFG_INTF_MASK(__PWR) (__PWR.interface_msk) -#define GET_PWR_CFG_BASE(__PWR) (__PWR.base) -#define GET_PWR_CFG_CMD(__PWR) (__PWR.cmd) -#define GET_PWR_CFG_MASK(__PWR) (__PWR.msk) -#define GET_PWR_CFG_VALUE(__PWR) (__PWR.value) - -bool rtl88_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version, - u8 fab_version, u8 interface_type, - struct wlan_pwr_cfg pwrcfgcmd[]); - -#endif diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/reg.h b/drivers/net/wireless/rtlwifi/rtl8188ee/reg.h index cd7e7a52713380966bc5128548ab8bf6c266233a..15400ee6c04b08864cb532af4a3f57aebdf01b61 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/reg.h +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/reg.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -30,62 +26,60 @@ #ifndef __RTL92C_REG_H__ #define __RTL92C_REG_H__ -#define TXPKT_BUF_SELECT 0x69 -#define RXPKT_BUF_SELECT 0xA5 -#define DISABLE_TRXPKT_BUF_ACCESS 0x0 +#define TXPKT_BUF_SELECT 0x69 +#define RXPKT_BUF_SELECT 0xA5 +#define DISABLE_TRXPKT_BUF_ACCESS 0x0 #define REG_SYS_ISO_CTRL 0x0000 #define REG_SYS_FUNC_EN 0x0002 #define REG_APS_FSMCO 0x0004 #define REG_SYS_CLKR 0x0008 -#define REG_9346CR 0x000A -#define REG_EE_VPD 0x000C +#define REG_9346CR 0x000A +#define REG_EE_VPD 0x000C #define REG_AFE_MISC 0x0010 #define REG_SPS0_CTRL 0x0011 #define REG_SPS_OCP_CFG 0x0018 #define REG_RSV_CTRL 0x001C -#define REG_RF_CTRL 0x001F +#define REG_RF_CTRL 0x001F #define REG_LDOA15_CTRL 0x0020 #define REG_LDOV12D_CTRL 0x0021 #define REG_LDOHCI12_CTRL 0x0022 #define REG_LPLDO_CTRL 0x0023 #define REG_AFE_XTAL_CTRL 0x0024 -#define REG_AFE_LDO_CTRL 0x0027 /* 1.5v for 8188EE test - * chip, 1.4v for MP chip - */ +/* 1.5v for 8188EE test chip, 1.4v for MP chip */ +#define REG_AFE_LDO_CTRL 0x0027 #define REG_AFE_PLL_CTRL 0x0028 #define REG_EFUSE_CTRL 0x0030 #define REG_EFUSE_TEST 0x0034 #define REG_PWR_DATA 0x0038 #define REG_CAL_TIMER 0x003C #define REG_ACLK_MON 0x003E -#define REG_GPIO_MUXCFG 0x0040 +#define REG_GPIO_MUXCFG 0x0040 #define REG_GPIO_IO_SEL 0x0042 -#define REG_MAC_PINMUX_CFG 0x0043 +#define REG_MAC_PINMUX_CFG 0x0043 #define REG_GPIO_PIN_CTRL 0x0044 #define REG_GPIO_INTM 0x0048 -#define REG_LEDCFG0 0x004C -#define REG_LEDCFG1 0x004D -#define REG_LEDCFG2 0x004E -#define REG_LEDCFG3 0x004F -#define REG_FSIMR 0x0050 -#define REG_FSISR 0x0054 -#define REG_HSIMR 0x0058 -#define REG_HSISR 0x005c -#define REG_GPIO_PIN_CTRL_2 0x0060 +#define REG_LEDCFG0 0x004C +#define REG_LEDCFG1 0x004D +#define REG_LEDCFG2 0x004E +#define REG_LEDCFG3 0x004F +#define REG_FSIMR 0x0050 +#define REG_FSISR 0x0054 +#define REG_HSIMR 0x0058 +#define REG_HSISR 0x005c +#define REG_GPIO_PIN_CTRL_2 0x0060 #define REG_GPIO_IO_SEL_2 0x0062 -#define REG_GPIO_OUTPUT 0x006c -#define REG_AFE_XTAL_CTRL_EXT 0x0078 +#define REG_GPIO_OUTPUT 0x006c +#define REG_AFE_XTAL_CTRL_EXT 0x0078 #define REG_XCK_OUT_CTRL 0x007c #define REG_MCUFWDL 0x0080 #define REG_WOL_EVENT 0x0081 #define REG_MCUTSTCFG 0x0084 - -#define REG_HIMR 0x00B0 -#define REG_HISR 0x00B4 -#define REG_HIMRE 0x00B8 -#define REG_HISRE 0x00BC +#define REG_HIMR 0x00B0 +#define REG_HISR 0x00B4 +#define REG_HIMRE 0x00B8 +#define REG_HISRE 0x00BC #define REG_EFUSE_ACCESS 0x00CF @@ -96,23 +90,23 @@ #define REG_PCIE_MIO_INTF 0x00E4 #define REG_PCIE_MIO_INTD 0x00E8 #define REG_HPON_FSM 0x00EC -#define REG_SYS_CFG 0x00F0 +#define REG_SYS_CFG 0x00F0 -#define REG_CR 0x0100 -#define REG_PBP 0x0104 -#define REG_PKT_BUFF_ACCESS_CTRL 0x0106 +#define REG_CR 0x0100 +#define REG_PBP 0x0104 +#define REG_PKT_BUFF_ACCESS_CTRL 0x0106 #define REG_TRXDMA_CTRL 0x010C #define REG_TRXFF_BNDY 0x0114 #define REG_TRXFF_STATUS 0x0118 #define REG_RXFF_PTR 0x011C -#define REG_CPWM 0x012F -#define REG_FWIMR 0x0130 -#define REG_FWISR 0x0134 +#define REG_CPWM 0x012F +#define REG_FWIMR 0x0130 +#define REG_FWISR 0x0134 #define REG_PKTBUF_DBG_CTRL 0x0140 -#define REG_PKTBUF_DBG_DATA_L 0x0144 -#define REG_PKTBUF_DBG_DATA_H 0x0148 -#define REG_RXPKTBUF_CTRL (REG_PKTBUF_DBG_CTRL+2) +#define REG_PKTBUF_DBG_DATA_L 0x0144 +#define REG_PKTBUF_DBG_DATA_H 0x0148 +#define REG_RXPKTBUF_CTRL (REG_PKTBUF_DBG_CTRL+2) #define REG_TC0_CTRL 0x0150 #define REG_TC1_CTRL 0x0154 @@ -123,13 +117,13 @@ #define REG_MBIST_START 0x0174 #define REG_MBIST_DONE 0x0178 #define REG_MBIST_FAIL 0x017C -#define REG_32K_CTRL 0x0194 -#define REG_C2HEVT_MSG_NORMAL 0x01A0 +#define REG_32K_CTRL 0x0194 +#define REG_C2HEVT_MSG_NORMAL 0x01A0 #define REG_C2HEVT_CLEAR 0x01AF #define REG_C2HEVT_MSG_TEST 0x01B8 #define REG_MCUTST_1 0x01c0 -#define REG_FMETHR 0x01C8 -#define REG_HMETFR 0x01CC +#define REG_FMETHR 0x01C8 +#define REG_HMETFR 0x01CC #define REG_HMEBOX_0 0x01D0 #define REG_HMEBOX_1 0x01D4 #define REG_HMEBOX_2 0x01D8 @@ -144,36 +138,37 @@ #define REG_HMEBOX_EXT_2 0x01F8 #define REG_HMEBOX_EXT_3 0x01FC -#define REG_RQPN 0x0200 +#define REG_RQPN 0x0200 #define REG_FIFOPAGE 0x0204 -#define REG_TDECTRL 0x0208 -#define REG_TXDMA_OFFSET_CHK 0x020C +#define REG_TDECTRL 0x0208 +#define REG_TXDMA_OFFSET_CHK 0x020C #define REG_TXDMA_STATUS 0x0210 #define REG_RQPN_NPQ 0x0214 -#define REG_RXDMA_AGG_PG_TH 0x0280 -#define REG_FW_UPD_RDPTR 0x0284 /* FW shall update this - * register before FW * write - * RXPKT_RELEASE_POLL to 1 - */ -#define REG_RXDMA_CONTROL 0x0286 /* Control the RX DMA.*/ -#define REG_RXPKT_NUM 0x0287 /* The number of packets - * in RXPKTBUF. - */ +#define REG_RXDMA_AGG_PG_TH 0x0280 +/* FW shall update this register before + * FW write RXPKT_RELEASE_POLL to 1 + */ +#define REG_FW_UPD_RDPTR 0x0284 +/* Control the RX DMA.*/ +#define REG_RXDMA_CONTROL 0x0286 +/* The number of packets in RXPKTBUF. */ +#define REG_RXPKT_NUM 0x0287 + #define REG_PCIE_CTRL_REG 0x0300 -#define REG_INT_MIG 0x0304 +#define REG_INT_MIG 0x0304 #define REG_BCNQ_DESA 0x0308 -#define REG_HQ_DESA 0x0310 +#define REG_HQ_DESA 0x0310 #define REG_MGQ_DESA 0x0318 #define REG_VOQ_DESA 0x0320 #define REG_VIQ_DESA 0x0328 #define REG_BEQ_DESA 0x0330 #define REG_BKQ_DESA 0x0338 -#define REG_RX_DESA 0x0340 +#define REG_RX_DESA 0x0340 -#define REG_DBI 0x0348 -#define REG_MDIO 0x0354 -#define REG_DBG_SEL 0x0360 +#define REG_DBI 0x0348 +#define REG_MDIO 0x0354 +#define REG_DBG_SEL 0x0360 #define REG_PCIE_HRPWM 0x0361 #define REG_PCIE_HCPWM 0x0363 #define REG_UART_CTRL 0x0364 @@ -181,7 +176,6 @@ #define REG_UART_TX_DESA 0x0370 #define REG_UART_RX_DESA 0x0378 - #define REG_HDAQ_DESA_NODEF 0x0000 #define REG_CMDQ_DESA_NODEF 0x0000 @@ -191,33 +185,32 @@ #define REG_BKQ_INFORMATION 0x040C #define REG_MGQ_INFORMATION 0x0410 #define REG_HGQ_INFORMATION 0x0414 -#define REG_BCNQ_INFORMATION 0x0418 +#define REG_BCNQ_INFORMATION 0x0418 #define REG_TXPKT_EMPTY 0x041A - -#define REG_CPU_MGQ_INFORMATION 0x041C +#define REG_CPU_MGQ_INFORMATION 0x041C #define REG_FWHW_TXQ_CTRL 0x0420 #define REG_HWSEQ_CTRL 0x0423 -#define REG_TXPKTBUF_BCNQ_BDNY 0x0424 -#define REG_TXPKTBUF_MGQ_BDNY 0x0425 +#define REG_TXPKTBUF_BCNQ_BDNY 0x0424 +#define REG_TXPKTBUF_MGQ_BDNY 0x0425 #define REG_MULTI_BCNQ_EN 0x0426 -#define REG_MULTI_BCNQ_OFFSET 0x0427 +#define REG_MULTI_BCNQ_OFFSET 0x0427 #define REG_SPEC_SIFS 0x0428 -#define REG_RL 0x042A -#define REG_DARFRC 0x0430 -#define REG_RARFRC 0x0438 -#define REG_RRSR 0x0440 -#define REG_ARFR0 0x0444 -#define REG_ARFR1 0x0448 -#define REG_ARFR2 0x044C -#define REG_ARFR3 0x0450 +#define REG_RL 0x042A +#define REG_DARFRC 0x0430 +#define REG_RARFRC 0x0438 +#define REG_RRSR 0x0440 +#define REG_ARFR0 0x0444 +#define REG_ARFR1 0x0448 +#define REG_ARFR2 0x044C +#define REG_ARFR3 0x0450 #define REG_AGGLEN_LMT 0x0458 #define REG_AMPDU_MIN_SPACE 0x045C -#define REG_TXPKTBUF_WMAC_LBK_BF_HD 0x045D +#define REG_TXPKTBUF_WMAC_LBK_BF_HD 0x045D #define REG_FAST_EDCA_CTRL 0x0460 #define REG_RD_RESP_PKT_TH 0x0463 #define REG_INIRTS_RATE_SEL 0x0480 -#define REG_INIDATA_RATE_SEL 0x0484 +#define REG_INIDATA_RATE_SEL 0x0484 #define REG_POWER_STATUS 0x04A4 #define REG_POWER_STAGE1 0x04B4 #define REG_POWER_STAGE2 0x04B8 @@ -225,32 +218,32 @@ #define REG_STBC_SETTING 0x04C4 #define REG_PROT_MODE_CTRL 0x04C8 #define REG_BAR_MODE_CTRL 0x04CC -#define REG_RA_TRY_RATE_AGG_LMT 0x04CF -#define REG_EARLY_MODE_CONTROL 0x04D0 +#define REG_RA_TRY_RATE_AGG_LMT 0x04CF +#define REG_EARLY_MODE_CONTROL 0x04D0 #define REG_NQOS_SEQ 0x04DC -#define REG_QOS_SEQ 0x04DE +#define REG_QOS_SEQ 0x04DE #define REG_NEED_CPU_HANDLE 0x04E0 #define REG_PKT_LOSE_RPT 0x04E1 #define REG_PTCL_ERR_STATUS 0x04E2 #define REG_TX_RPT_CTRL 0x04EC #define REG_TX_RPT_TIME 0x04F0 -#define REG_DUMMY 0x04FC +#define REG_DUMMY 0x04FC #define REG_EDCA_VO_PARAM 0x0500 #define REG_EDCA_VI_PARAM 0x0504 #define REG_EDCA_BE_PARAM 0x0508 #define REG_EDCA_BK_PARAM 0x050C -#define REG_BCNTCFG 0x0510 -#define REG_PIFS 0x0512 +#define REG_BCNTCFG 0x0510 +#define REG_PIFS 0x0512 #define REG_RDG_PIFS 0x0513 #define REG_SIFS_CTX 0x0514 #define REG_SIFS_TRX 0x0516 #define REG_AGGR_BREAK_TIME 0x051A -#define REG_SLOT 0x051B +#define REG_SLOT 0x051B #define REG_TX_PTCL_CTRL 0x0520 -#define REG_TXPAUSE 0x0522 +#define REG_TXPAUSE 0x0522 #define REG_DIS_TXREQ_CLR 0x0523 -#define REG_RD_CTRL 0x0524 +#define REG_RD_CTRL 0x0524 #define REG_TBTT_PROHIBIT 0x0540 #define REG_RD_NAV_NXT 0x0544 #define REG_NAV_PROT_LEN 0x0546 @@ -259,21 +252,21 @@ #define REG_MBID_NUM 0x0552 #define REG_DUAL_TSF_RST 0x0553 #define REG_BCN_INTERVAL 0x0554 -#define REG_MBSSID_BCN_SPACE 0x0554 +#define REG_MBSSID_BCN_SPACE 0x0554 #define REG_DRVERLYINT 0x0558 #define REG_BCNDMATIM 0x0559 -#define REG_ATIMWND 0x055A +#define REG_ATIMWND 0x055A #define REG_BCN_MAX_ERR 0x055D -#define REG_RXTSF_OFFSET_CCK 0x055E -#define REG_RXTSF_OFFSET_OFDM 0x055F -#define REG_TSFTR 0x0560 +#define REG_RXTSF_OFFSET_CCK 0x055E +#define REG_RXTSF_OFFSET_OFDM 0x055F +#define REG_TSFTR 0x0560 #define REG_INIT_TSFTR 0x0564 -#define REG_PSTIMER 0x0580 -#define REG_TIMER0 0x0584 -#define REG_TIMER1 0x0588 +#define REG_PSTIMER 0x0580 +#define REG_TIMER0 0x0584 +#define REG_TIMER1 0x0588 #define REG_ACMHWCTRL 0x05C0 #define REG_ACMRSTCTRL 0x05C1 -#define REG_ACMAVG 0x05C2 +#define REG_ACMAVG 0x05C2 #define REG_VO_ADMTIME 0x05C4 #define REG_VI_ADMTIME 0x05C6 #define REG_BE_ADMTIME 0x05C8 @@ -282,38 +275,38 @@ #define REG_APSD_CTRL 0x0600 #define REG_BWOPMODE 0x0603 -#define REG_TCR 0x0604 -#define REG_RCR 0x0608 +#define REG_TCR 0x0604 +#define REG_RCR 0x0608 #define REG_RX_PKT_LIMIT 0x060C #define REG_RX_DLK_TIME 0x060D #define REG_RX_DRVINFO_SZ 0x060F -#define REG_MACID 0x0610 -#define REG_BSSID 0x0618 -#define REG_MAR 0x0620 +#define REG_MACID 0x0610 +#define REG_BSSID 0x0618 +#define REG_MAR 0x0620 #define REG_MBIDCAMCFG 0x0628 #define REG_USTIME_EDCA 0x0638 #define REG_MAC_SPEC_SIFS 0x063A #define REG_RESP_SIFS_CCK 0x063C #define REG_RESP_SIFS_OFDM 0x063E -#define REG_ACKTO 0x0640 -#define REG_CTS2TO 0x0641 -#define REG_EIFS 0x0642 +#define REG_ACKTO 0x0640 +#define REG_CTS2TO 0x0641 +#define REG_EIFS 0x0642 #define REG_NAV_CTRL 0x0650 #define REG_BACAMCMD 0x0654 #define REG_BACAMCONTENT 0x0658 -#define REG_LBDLY 0x0660 -#define REG_FWDLY 0x0661 +#define REG_LBDLY 0x0660 +#define REG_FWDLY 0x0661 #define REG_RXERR_RPT 0x0664 #define REG_TRXPTCL_CTL 0x0668 -#define REG_CAMCMD 0x0670 +#define REG_CAMCMD 0x0670 #define REG_CAMWRITE 0x0674 -#define REG_CAMREAD 0x0678 -#define REG_CAMDBG 0x067C -#define REG_SECCFG 0x0680 +#define REG_CAMREAD 0x0678 +#define REG_CAMDBG 0x067C +#define REG_SECCFG 0x0680 #define REG_WOW_CTRL 0x0690 #define REG_PSSTATUS 0x0691 @@ -329,10 +322,10 @@ #define REG_CALB32K_CTRL 0x06AC #define REG_PKT_MON_CTRL 0x06B4 #define REG_BT_COEX_TABLE 0x06C0 -#define REG_WMAC_RESP_TXINFO 0x06D8 +#define REG_WMAC_RESP_TXINFO 0x06D8 #define REG_USB_INFO 0xFE17 -#define REG_USB_SPECIAL_OPTION 0xFE55 +#define REG_USB_SPECIAL_OPTION 0xFE55 #define REG_USB_DMA_AGG_TO 0xFE5B #define REG_USB_AGG_TO 0xFE5C #define REG_USB_AGG_TH 0xFE5D @@ -340,523 +333,545 @@ #define REG_TEST_USB_TXQS 0xFE48 #define REG_TEST_SIE_VID 0xFE60 #define REG_TEST_SIE_PID 0xFE62 -#define REG_TEST_SIE_OPTIONAL 0xFE64 -#define REG_TEST_SIE_CHIRP_K 0xFE65 +#define REG_TEST_SIE_OPTIONAL 0xFE64 +#define REG_TEST_SIE_CHIRP_K 0xFE65 #define REG_TEST_SIE_PHY 0xFE66 -#define REG_TEST_SIE_MAC_ADDR 0xFE70 +#define REG_TEST_SIE_MAC_ADDR 0xFE70 #define REG_TEST_SIE_STRING 0xFE80 #define REG_NORMAL_SIE_VID 0xFE60 #define REG_NORMAL_SIE_PID 0xFE62 -#define REG_NORMAL_SIE_OPTIONAL 0xFE64 +#define REG_NORMAL_SIE_OPTIONAL 0xFE64 #define REG_NORMAL_SIE_EP 0xFE65 #define REG_NORMAL_SIE_PHY 0xFE68 -#define REG_NORMAL_SIE_MAC_ADDR 0xFE70 -#define REG_NORMAL_SIE_STRING 0xFE80 +#define REG_NORMAL_SIE_MAC_ADDR 0xFE70 +#define REG_NORMAL_SIE_STRING 0xFE80 -#define CR9346 REG_9346CR -#define MSR (REG_CR + 2) -#define ISR REG_HISR -#define TSFR REG_TSFTR +#define CR9346 REG_9346CR +#define MSR (REG_CR + 2) +#define ISR REG_HISR +#define TSFR REG_TSFTR -#define MACIDR0 REG_MACID -#define MACIDR4 (REG_MACID + 4) +#define MACIDR0 REG_MACID +#define MACIDR4 (REG_MACID + 4) -#define PBP REG_PBP +#define PBP REG_PBP -#define IDR0 MACIDR0 -#define IDR4 MACIDR4 +#define IDR0 MACIDR0 +#define IDR4 MACIDR4 -#define UNUSED_REGISTER 0x1BF -#define DCAM UNUSED_REGISTER -#define PSR UNUSED_REGISTER -#define BBADDR UNUSED_REGISTER -#define PHYDATAR UNUSED_REGISTER +#define UNUSED_REGISTER 0x1BF +#define DCAM UNUSED_REGISTER +#define PSR UNUSED_REGISTER +#define BBADDR UNUSED_REGISTER +#define PHYDATAR UNUSED_REGISTER -#define INVALID_BBRF_VALUE 0x12345678 +#define INVALID_BBRF_VALUE 0x12345678 -#define MAX_MSS_DENSITY_2T 0x13 -#define MAX_MSS_DENSITY_1T 0x0A +#define MAX_MSS_DENSITY_2T 0x13 +#define MAX_MSS_DENSITY_1T 0x0A -#define CMDEEPROM_EN BIT(5) -#define CMDEEPROM_SEL BIT(4) -#define CMD9346CR_9356SEL BIT(4) -#define AUTOLOAD_EEPROM (CMDEEPROM_EN|CMDEEPROM_SEL) -#define AUTOLOAD_EFUSE CMDEEPROM_EN +#define CMDEEPROM_EN BIT(5) +#define CMDEEPROM_SEL BIT(4) +#define CMD9346CR_9356SEL BIT(4) +#define AUTOLOAD_EEPROM (CMDEEPROM_EN|CMDEEPROM_SEL) +#define AUTOLOAD_EFUSE CMDEEPROM_EN -#define GPIOSEL_GPIO 0 -#define GPIOSEL_ENBT BIT(5) +#define GPIOSEL_GPIO 0 +#define GPIOSEL_ENBT BIT(5) -#define GPIO_IN REG_GPIO_PIN_CTRL -#define GPIO_OUT (REG_GPIO_PIN_CTRL+1) -#define GPIO_IO_SEL (REG_GPIO_PIN_CTRL+2) -#define GPIO_MOD (REG_GPIO_PIN_CTRL+3) +#define GPIO_IN REG_GPIO_PIN_CTRL +#define GPIO_OUT (REG_GPIO_PIN_CTRL+1) +#define GPIO_IO_SEL (REG_GPIO_PIN_CTRL+2) +#define GPIO_MOD (REG_GPIO_PIN_CTRL+3) -/* 8723/8188E Host System Interrupt Mask Register (offset 0x58, 32 byte) */ +/*8723/8188E Host System Interrupt + *Mask Register (offset 0x58, 32 byte) + */ #define HSIMR_GPIO12_0_INT_EN BIT(0) #define HSIMR_SPS_OCP_INT_EN BIT(5) #define HSIMR_RON_INT_EN BIT(6) #define HSIMR_PDN_INT_EN BIT(7) #define HSIMR_GPIO9_INT_EN BIT(25) - -/* 8723/8188E Host System Interrupt Status Register (offset 0x5C, 32 byte) */ +/* 8723/8188E Host System Interrupt + * Status Register (offset 0x5C, 32 byte) + */ #define HSISR_GPIO12_0_INT BIT(0) #define HSISR_SPS_OCP_INT BIT(5) #define HSISR_RON_INT_EN BIT(6) #define HSISR_PDNINT BIT(7) #define HSISR_GPIO9_INT BIT(25) -#define MSR_NOLINK 0x00 -#define MSR_ADHOC 0x01 -#define MSR_INFRA 0x02 -#define MSR_AP 0x03 -#define MSR_MASK 0x03 +#define MSR_NOLINK 0x00 +#define MSR_ADHOC 0x01 +#define MSR_INFRA 0x02 +#define MSR_AP 0x03 #define RRSR_RSC_OFFSET 21 #define RRSR_SHORT_OFFSET 23 #define RRSR_RSC_BW_40M 0x600000 #define RRSR_RSC_UPSUBCHNL 0x400000 #define RRSR_RSC_LOWSUBCHNL 0x200000 -#define RRSR_SHORT 0x800000 -#define RRSR_1M BIT(0) -#define RRSR_2M BIT(1) -#define RRSR_5_5M BIT(2) -#define RRSR_11M BIT(3) -#define RRSR_6M BIT(4) -#define RRSR_9M BIT(5) -#define RRSR_12M BIT(6) -#define RRSR_18M BIT(7) -#define RRSR_24M BIT(8) -#define RRSR_36M BIT(9) -#define RRSR_48M BIT(10) -#define RRSR_54M BIT(11) -#define RRSR_MCS0 BIT(12) -#define RRSR_MCS1 BIT(13) -#define RRSR_MCS2 BIT(14) -#define RRSR_MCS3 BIT(15) -#define RRSR_MCS4 BIT(16) -#define RRSR_MCS5 BIT(17) -#define RRSR_MCS6 BIT(18) -#define RRSR_MCS7 BIT(19) +#define RRSR_SHORT 0x800000 +#define RRSR_1M BIT(0) +#define RRSR_2M BIT(1) +#define RRSR_5_5M BIT(2) +#define RRSR_11M BIT(3) +#define RRSR_6M BIT(4) +#define RRSR_9M BIT(5) +#define RRSR_12M BIT(6) +#define RRSR_18M BIT(7) +#define RRSR_24M BIT(8) +#define RRSR_36M BIT(9) +#define RRSR_48M BIT(10) +#define RRSR_54M BIT(11) +#define RRSR_MCS0 BIT(12) +#define RRSR_MCS1 BIT(13) +#define RRSR_MCS2 BIT(14) +#define RRSR_MCS3 BIT(15) +#define RRSR_MCS4 BIT(16) +#define RRSR_MCS5 BIT(17) +#define RRSR_MCS6 BIT(18) +#define RRSR_MCS7 BIT(19) #define BRSR_ACKSHORTPMB BIT(23) -#define RATR_1M 0x00000001 -#define RATR_2M 0x00000002 -#define RATR_55M 0x00000004 -#define RATR_11M 0x00000008 -#define RATR_6M 0x00000010 -#define RATR_9M 0x00000020 -#define RATR_12M 0x00000040 -#define RATR_18M 0x00000080 -#define RATR_24M 0x00000100 -#define RATR_36M 0x00000200 -#define RATR_48M 0x00000400 -#define RATR_54M 0x00000800 -#define RATR_MCS0 0x00001000 -#define RATR_MCS1 0x00002000 -#define RATR_MCS2 0x00004000 -#define RATR_MCS3 0x00008000 -#define RATR_MCS4 0x00010000 -#define RATR_MCS5 0x00020000 -#define RATR_MCS6 0x00040000 -#define RATR_MCS7 0x00080000 -#define RATR_MCS8 0x00100000 -#define RATR_MCS9 0x00200000 -#define RATR_MCS10 0x00400000 -#define RATR_MCS11 0x00800000 -#define RATR_MCS12 0x01000000 -#define RATR_MCS13 0x02000000 -#define RATR_MCS14 0x04000000 -#define RATR_MCS15 0x08000000 - -#define RATE_1M BIT(0) -#define RATE_2M BIT(1) -#define RATE_5_5M BIT(2) -#define RATE_11M BIT(3) -#define RATE_6M BIT(4) -#define RATE_9M BIT(5) -#define RATE_12M BIT(6) -#define RATE_18M BIT(7) -#define RATE_24M BIT(8) -#define RATE_36M BIT(9) -#define RATE_48M BIT(10) -#define RATE_54M BIT(11) -#define RATE_MCS0 BIT(12) -#define RATE_MCS1 BIT(13) -#define RATE_MCS2 BIT(14) -#define RATE_MCS3 BIT(15) -#define RATE_MCS4 BIT(16) -#define RATE_MCS5 BIT(17) -#define RATE_MCS6 BIT(18) -#define RATE_MCS7 BIT(19) -#define RATE_MCS8 BIT(20) -#define RATE_MCS9 BIT(21) -#define RATE_MCS10 BIT(22) -#define RATE_MCS11 BIT(23) -#define RATE_MCS12 BIT(24) -#define RATE_MCS13 BIT(25) -#define RATE_MCS14 BIT(26) -#define RATE_MCS15 BIT(27) +#define RATR_1M 0x00000001 +#define RATR_2M 0x00000002 +#define RATR_55M 0x00000004 +#define RATR_11M 0x00000008 +#define RATR_6M 0x00000010 +#define RATR_9M 0x00000020 +#define RATR_12M 0x00000040 +#define RATR_18M 0x00000080 +#define RATR_24M 0x00000100 +#define RATR_36M 0x00000200 +#define RATR_48M 0x00000400 +#define RATR_54M 0x00000800 +#define RATR_MCS0 0x00001000 +#define RATR_MCS1 0x00002000 +#define RATR_MCS2 0x00004000 +#define RATR_MCS3 0x00008000 +#define RATR_MCS4 0x00010000 +#define RATR_MCS5 0x00020000 +#define RATR_MCS6 0x00040000 +#define RATR_MCS7 0x00080000 +#define RATR_MCS8 0x00100000 +#define RATR_MCS9 0x00200000 +#define RATR_MCS10 0x00400000 +#define RATR_MCS11 0x00800000 +#define RATR_MCS12 0x01000000 +#define RATR_MCS13 0x02000000 +#define RATR_MCS14 0x04000000 +#define RATR_MCS15 0x08000000 + +#define RATE_1M BIT(0) +#define RATE_2M BIT(1) +#define RATE_5_5M BIT(2) +#define RATE_11M BIT(3) +#define RATE_6M BIT(4) +#define RATE_9M BIT(5) +#define RATE_12M BIT(6) +#define RATE_18M BIT(7) +#define RATE_24M BIT(8) +#define RATE_36M BIT(9) +#define RATE_48M BIT(10) +#define RATE_54M BIT(11) +#define RATE_MCS0 BIT(12) +#define RATE_MCS1 BIT(13) +#define RATE_MCS2 BIT(14) +#define RATE_MCS3 BIT(15) +#define RATE_MCS4 BIT(16) +#define RATE_MCS5 BIT(17) +#define RATE_MCS6 BIT(18) +#define RATE_MCS7 BIT(19) +#define RATE_MCS8 BIT(20) +#define RATE_MCS9 BIT(21) +#define RATE_MCS10 BIT(22) +#define RATE_MCS11 BIT(23) +#define RATE_MCS12 BIT(24) +#define RATE_MCS13 BIT(25) +#define RATE_MCS14 BIT(26) +#define RATE_MCS15 BIT(27) #define RATE_ALL_CCK (RATR_1M | RATR_2M | RATR_55M | RATR_11M) -#define RATE_ALL_OFDM_AG (RATR_6M | RATR_9M | RATR_12M | RATR_18M | \ +#define RATE_ALL_OFDM_AG (RATR_6M | RATR_9M | RATR_12M | RATR_18M |\ RATR_24M | RATR_36M | RATR_48M | RATR_54M) -#define RATE_ALL_OFDM_1SS (RATR_MCS0 | RATR_MCS1 | RATR_MCS2 | \ - RATR_MCS3 | RATR_MCS4 | RATR_MCS5 | \ +#define RATE_ALL_OFDM_1SS (RATR_MCS0 | RATR_MCS1 | RATR_MCS2 |\ + RATR_MCS3 | RATR_MCS4 | RATR_MCS5 |\ RATR_MCS6 | RATR_MCS7) -#define RATE_ALL_OFDM_2SS (RATR_MCS8 | RATR_MCS9 | RATR_MCS10 | \ - RATR_MCS11 | RATR_MCS12 | RATR_MCS13 | \ +#define RATE_ALL_OFDM_2SS (RATR_MCS8 | RATR_MCS9 | RATR_MCS10 |\ + RATR_MCS11 | RATR_MCS12 | RATR_MCS13 |\ RATR_MCS14 | RATR_MCS15) #define BW_OPMODE_20MHZ BIT(2) #define BW_OPMODE_5G BIT(1) #define BW_OPMODE_11J BIT(0) -#define CAM_VALID BIT(15) +#define CAM_VALID BIT(15) #define CAM_NOTVALID 0x0000 -#define CAM_USEDK BIT(5) +#define CAM_USEDK BIT(5) -#define CAM_NONE 0x0 -#define CAM_WEP40 0x01 -#define CAM_TKIP 0x02 -#define CAM_AES 0x04 -#define CAM_WEP104 0x05 +#define CAM_NONE 0x0 +#define CAM_WEP40 0x01 +#define CAM_TKIP 0x02 +#define CAM_AES 0x04 +#define CAM_WEP104 0x05 #define TOTAL_CAM_ENTRY 32 #define HALF_CAM_ENTRY 16 -#define CAM_WRITE BIT(16) -#define CAM_READ 0x00000000 +#define CAM_WRITE BIT(16) +#define CAM_READ 0x00000000 #define CAM_POLLINIG BIT(31) -#define SCR_USEDK 0x01 +#define SCR_USEDK 0x01 #define SCR_TXSEC_ENABLE 0x02 #define SCR_RXSEC_ENABLE 0x04 -#define WOW_PMEN BIT(0) -#define WOW_WOMEN BIT(1) -#define WOW_MAGIC BIT(2) -#define WOW_UWF BIT(3) +#define WOW_PMEN BIT(0) +#define WOW_WOMEN BIT(1) +#define WOW_MAGIC BIT(2) +#define WOW_UWF BIT(3) /********************************************* * 8188 IMR/ISR bits **********************************************/ -#define IMR_DISABLED 0x0 +#define IMR_DISABLED 0x0 /* IMR DW0(0x0060-0063) Bit 0-31 */ -#define IMR_TXCCK BIT(30) /* TXRPT interrupt when CCX bit of - * the packet is set - */ -#define IMR_PSTIMEOUT BIT(29) /* Power Save Time Out Interrupt */ -#define IMR_GTINT4 BIT(28) /* When GTIMER4 expires, - * this bit is set to 1 - */ -#define IMR_GTINT3 BIT(27) /* When GTIMER3 expires, - * this bit is set to 1 - */ -#define IMR_TBDER BIT(26) /* Transmit Beacon0 Error */ -#define IMR_TBDOK BIT(25) /* Transmit Beacon0 OK */ -#define IMR_TSF_BIT32_TOGGLE BIT(24) /* TSF Timer BIT32 toggle ind int */ -#define IMR_BCNDMAINT0 BIT(20) /* Beacon DMA Interrupt 0 */ -#define IMR_BCNDOK0 BIT(16) /* Beacon Queue DMA OK0 */ -#define IMR_HSISR_IND_ON_INT BIT(15) /* HSISR Indicator (HSIMR & HSISR is - * true, this bit is set to 1) - */ -#define IMR_BCNDMAINT_E BIT(14) /* Beacon DMA Int Extension for Win7 */ -#define IMR_ATIMEND BIT(12) /* CTWidnow End or ATIM Window End */ -#define IMR_HISR1_IND_INT BIT(11) /* HISR1 Indicator (HISR1 & HIMR1 is - * true, this bit is set to 1) - */ -#define IMR_C2HCMD BIT(10) /* CPU to Host Command INT Status, - * Write 1 clear - */ -#define IMR_CPWM2 BIT(9) /* CPU power Mode exchange INT Status, - * Write 1 clear - */ -#define IMR_CPWM BIT(8) /* CPU power Mode exchange INT Status, - * Write 1 clear - */ -#define IMR_HIGHDOK BIT(7) /* High Queue DMA OK */ -#define IMR_MGNTDOK BIT(6) /* Management Queue DMA OK */ -#define IMR_BKDOK BIT(5) /* AC_BK DMA OK */ -#define IMR_BEDOK BIT(4) /* AC_BE DMA OK */ -#define IMR_VIDOK BIT(3) /* AC_VI DMA OK */ -#define IMR_VODOK BIT(2) /* AC_VO DMA OK */ -#define IMR_RDU BIT(1) /* Rx Descriptor Unavailable */ -#define IMR_ROK BIT(0) /* Receive DMA OK */ +/* TXRPT interrupt when CCX bit of the packet is set */ +#define IMR_TXCCK BIT(30) +/* Power Save Time Out Interrupt */ +#define IMR_PSTIMEOUT BIT(29) +/* When GTIMER4 expires, this bit is set to 1 */ +#define IMR_GTINT4 BIT(28) +/* When GTIMER3 expires, this bit is set to 1 */ +#define IMR_GTINT3 BIT(27) +/* Transmit Beacon0 Error */ +#define IMR_TBDER BIT(26) +/* Transmit Beacon0 OK */ +#define IMR_TBDOK BIT(25) +/* TSF Timer BIT32 toggle indication interrupt */ +#define IMR_TSF_BIT32_TOGGLE BIT(24) +/* Beacon DMA Interrupt 0 */ +#define IMR_BCNDMAINT0 BIT(20) +/* Beacon Queue DMA OK0 */ +#define IMR_BCNDOK0 BIT(16) +/* HSISR Indicator (HSIMR & HSISR is true, this bit is set to 1) */ +#define IMR_HSISR_IND_ON_INT BIT(15) +/* Beacon DMA Interrupt Extension for Win7 */ +#define IMR_BCNDMAINT_E BIT(14) +/* CTWidnow End or ATIM Window End */ +#define IMR_ATIMEND BIT(12) +/* HISR1 Indicator (HISR1 & HIMR1 is true, this bit is set to 1)*/ +#define IMR_HISR1_IND_INT BIT(11) +/* CPU to Host Command INT Status, Write 1 clear */ +#define IMR_C2HCMD BIT(10) +/* CPU power Mode exchange INT Status, Write 1 clear */ +#define IMR_CPWM2 BIT(9) +/* CPU power Mode exchange INT Status, Write 1 clear */ +#define IMR_CPWM BIT(8) +/* High Queue DMA OK */ +#define IMR_HIGHDOK BIT(7) +/* Management Queue DMA OK */ +#define IMR_MGNTDOK BIT(6) +/* AC_BK DMA OK */ +#define IMR_BKDOK BIT(5) +/* AC_BE DMA OK */ +#define IMR_BEDOK BIT(4) +/* AC_VI DMA OK */ +#define IMR_VIDOK BIT(3) +/* AC_VO DMA OK */ +#define IMR_VODOK BIT(2) +/* Rx Descriptor Unavailable */ +#define IMR_RDU BIT(1) +/* Receive DMA OK */ +#define IMR_ROK BIT(0) /* IMR DW1(0x00B4-00B7) Bit 0-31 */ -#define IMR_BCNDMAINT7 BIT(27) /* Beacon DMA Interrupt 7 */ -#define IMR_BCNDMAINT6 BIT(26) /* Beacon DMA Interrupt 6 */ -#define IMR_BCNDMAINT5 BIT(25) /* Beacon DMA Interrupt 5 */ -#define IMR_BCNDMAINT4 BIT(24) /* Beacon DMA Interrupt 4 */ -#define IMR_BCNDMAINT3 BIT(23) /* Beacon DMA Interrupt 3 */ -#define IMR_BCNDMAINT2 BIT(22) /* Beacon DMA Interrupt 2 */ -#define IMR_BCNDMAINT1 BIT(21) /* Beacon DMA Interrupt 1 */ -#define IMR_BCNDOK7 BIT(20) /* Beacon Queue DMA OK Interrup 7 */ -#define IMR_BCNDOK6 BIT(19) /* Beacon Queue DMA OK Interrup 6 */ -#define IMR_BCNDOK5 BIT(18) /* Beacon Queue DMA OK Interrup 5 */ -#define IMR_BCNDOK4 BIT(17) /* Beacon Queue DMA OK Interrup 4 */ -#define IMR_BCNDOK3 BIT(16) /* Beacon Queue DMA OK Interrup 3 */ -#define IMR_BCNDOK2 BIT(15) /* Beacon Queue DMA OK Interrup 2 */ -#define IMR_BCNDOK1 BIT(14) /* Beacon Queue DMA OK Interrup 1 */ -#define IMR_ATIMEND_E BIT(13) /* ATIM Window End Extension for Win7 */ -#define IMR_TXERR BIT(11) /* Tx Err Flag Int Status, - * write 1 clear. - */ -#define IMR_RXERR BIT(10) /* Rx Err Flag INT Status, - * Write 1 clear - */ -#define IMR_TXFOVW BIT(9) /* Transmit FIFO Overflow */ -#define IMR_RXFOVW BIT(8) /* Receive FIFO Overflow */ - +/* Beacon DMA Interrupt 7 */ +#define IMR_BCNDMAINT7 BIT(27) +/* Beacon DMA Interrupt 6 */ +#define IMR_BCNDMAINT6 BIT(26) +/* Beacon DMA Interrupt 5 */ +#define IMR_BCNDMAINT5 BIT(25) +/* Beacon DMA Interrupt 4 */ +#define IMR_BCNDMAINT4 BIT(24) +/* Beacon DMA Interrupt 3 */ +#define IMR_BCNDMAINT3 BIT(23) +/* Beacon DMA Interrupt 2 */ +#define IMR_BCNDMAINT2 BIT(22) +/* Beacon DMA Interrupt 1 */ +#define IMR_BCNDMAINT1 BIT(21) +/* Beacon Queue DMA OK Interrup 7 */ +#define IMR_BCNDOK7 BIT(20) +/* Beacon Queue DMA OK Interrup 6 */ +#define IMR_BCNDOK6 BIT(19) +/* Beacon Queue DMA OK Interrup 5 */ +#define IMR_BCNDOK5 BIT(18) +/* Beacon Queue DMA OK Interrup 4 */ +#define IMR_BCNDOK4 BIT(17) +/* Beacon Queue DMA OK Interrup 3 */ +#define IMR_BCNDOK3 BIT(16) +/* Beacon Queue DMA OK Interrup 2 */ +#define IMR_BCNDOK2 BIT(15) +/* Beacon Queue DMA OK Interrup 1 */ +#define IMR_BCNDOK1 BIT(14) +/* ATIM Window End Extension for Win7 */ +#define IMR_ATIMEND_E BIT(13) +/* Tx Error Flag Interrupt Status, write 1 clear. */ +#define IMR_TXERR BIT(11) +/* Rx Error Flag INT Status, Write 1 clear */ +#define IMR_RXERR BIT(10) +/* Transmit FIFO Overflow */ +#define IMR_TXFOVW BIT(9) +/* Receive FIFO Overflow */ +#define IMR_RXFOVW BIT(8) #define HWSET_MAX_SIZE 512 #define EFUSE_MAX_SECTION 64 -#define EFUSE_REAL_CONTENT_LEN 256 -#define EFUSE_OOB_PROTECT_BYTES 18 /* PG data exclude header, - * dummy 7 bytes frome CP - * test and reserved 1byte. - */ - -#define EEPROM_DEFAULT_TSSI 0x0 -#define EEPROM_DEFAULT_TXPOWERDIFF 0x0 -#define EEPROM_DEFAULT_CRYSTALCAP 0x5 -#define EEPROM_DEFAULT_BOARDTYPE 0x02 -#define EEPROM_DEFAULT_TXPOWER 0x1010 -#define EEPROM_DEFAULT_HT2T_TXPWR 0x10 +#define EFUSE_REAL_CONTENT_LEN 256 +/* PG data exclude header, dummy 7 bytes frome CP test and reserved 1byte.*/ +#define EFUSE_OOB_PROTECT_BYTES 18 + +#define EEPROM_DEFAULT_TSSI 0x0 +#define EEPROM_DEFAULT_TXPOWERDIFF 0x0 +#define EEPROM_DEFAULT_CRYSTALCAP 0x5 +#define EEPROM_DEFAULT_BOARDTYPE 0x02 +#define EEPROM_DEFAULT_TXPOWER 0x1010 +#define EEPROM_DEFAULT_HT2T_TXPWR 0x10 #define EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF 0x3 -#define EEPROM_DEFAULT_THERMALMETER 0x18 +#define EEPROM_DEFAULT_THERMALMETER 0x18 #define EEPROM_DEFAULT_ANTTXPOWERDIFF 0x0 #define EEPROM_DEFAULT_TXPWDIFF_CRYSTALCAP 0x5 -#define EEPROM_DEFAULT_TXPOWERLEVEL 0x22 -#define EEPROM_DEFAULT_HT40_2SDIFF 0x0 -#define EEPROM_DEFAULT_HT20_DIFF 2 +#define EEPROM_DEFAULT_TXPOWERLEVEL 0x22 +#define EEPROM_DEFAULT_HT40_2SDIFF 0x0 +#define EEPROM_DEFAULT_HT20_DIFF 2 #define EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF 0x3 #define EEPROM_DEFAULT_HT40_PWRMAXOFFSET 0 #define EEPROM_DEFAULT_HT20_PWRMAXOFFSET 0 -#define RF_OPTION1 0x79 -#define RF_OPTION2 0x7A -#define RF_OPTION3 0x7B -#define RF_OPTION4 0x7C +#define RF_OPTION1 0x79 +#define RF_OPTION2 0x7A +#define RF_OPTION3 0x7B +#define RF_OPTION4 0x7C -#define EEPROM_DEFAULT_PID 0x1234 -#define EEPROM_DEFAULT_VID 0x5678 -#define EEPROM_DEFAULT_CUSTOMERID 0xAB +#define EEPROM_DEFAULT_PID 0x1234 +#define EEPROM_DEFAULT_VID 0x5678 +#define EEPROM_DEFAULT_CUSTOMERID 0xAB #define EEPROM_DEFAULT_SUBCUSTOMERID 0xCD -#define EEPROM_DEFAULT_VERSION 0 - -#define EEPROM_CHANNEL_PLAN_FCC 0x0 -#define EEPROM_CHANNEL_PLAN_IC 0x1 -#define EEPROM_CHANNEL_PLAN_ETSI 0x2 -#define EEPROM_CHANNEL_PLAN_SPAIN 0x3 -#define EEPROM_CHANNEL_PLAN_FRANCE 0x4 -#define EEPROM_CHANNEL_PLAN_MKK 0x5 -#define EEPROM_CHANNEL_PLAN_MKK1 0x6 -#define EEPROM_CHANNEL_PLAN_ISRAEL 0x7 -#define EEPROM_CHANNEL_PLAN_TELEC 0x8 +#define EEPROM_DEFAULT_VERSION 0 + +#define EEPROM_CHANNEL_PLAN_FCC 0x0 +#define EEPROM_CHANNEL_PLAN_IC 0x1 +#define EEPROM_CHANNEL_PLAN_ETSI 0x2 +#define EEPROM_CHANNEL_PLAN_SPAIN 0x3 +#define EEPROM_CHANNEL_PLAN_FRANCE 0x4 +#define EEPROM_CHANNEL_PLAN_MKK 0x5 +#define EEPROM_CHANNEL_PLAN_MKK1 0x6 +#define EEPROM_CHANNEL_PLAN_ISRAEL 0x7 +#define EEPROM_CHANNEL_PLAN_TELEC 0x8 #define EEPROM_CHANNEL_PLAN_GLOBAL_DOMAIN 0x9 #define EEPROM_CHANNEL_PLAN_WORLD_WIDE_13 0xA -#define EEPROM_CHANNEL_PLAN_NCC 0xB +#define EEPROM_CHANNEL_PLAN_NCC 0xB #define EEPROM_CHANNEL_PLAN_BY_HW_MASK 0x80 -#define EEPROM_CID_DEFAULT 0x0 -#define EEPROM_CID_TOSHIBA 0x4 -#define EEPROM_CID_CCX 0x10 -#define EEPROM_CID_QMI 0x0D -#define EEPROM_CID_WHQL 0xFE +#define EEPROM_CID_DEFAULT 0x0 +#define EEPROM_CID_TOSHIBA 0x4 +#define EEPROM_CID_CCX 0x10 +#define EEPROM_CID_QMI 0x0D +#define EEPROM_CID_WHQL 0xFE -#define RTL8188E_EEPROM_ID 0x8129 +#define RTL8188E_EEPROM_ID 0x8129 -#define EEPROM_HPON 0x02 -#define EEPROM_CLK 0x06 -#define EEPROM_TESTR 0x08 +#define EEPROM_HPON 0x02 +#define EEPROM_CLK 0x06 +#define EEPROM_TESTR 0x08 #define EEPROM_TXPOWERCCK 0x10 -#define EEPROM_TXPOWERHT40_1S 0x16 -#define EEPROM_TXPOWERHT20DIFF 0x1B -#define EEPROM_TXPOWER_OFDMDIFF 0x1B +#define EEPROM_TXPOWERHT40_1S 0x16 +#define EEPROM_TXPOWERHT20DIFF 0x1B +#define EEPROM_TXPOWER_OFDMDIFF 0x1B -#define EEPROM_TX_PWR_INX 0x10 +#define EEPROM_TX_PWR_INX 0x10 -#define EEPROM_CHANNELPLAN 0xB8 -#define EEPROM_XTAL_88E 0xB9 -#define EEPROM_THERMAL_METER_88E 0xBA -#define EEPROM_IQK_LCK_88E 0xBB +#define EEPROM_CHANNELPLAN 0xB8 +#define EEPROM_XTAL_88E 0xB9 +#define EEPROM_THERMAL_METER_88E 0xBA +#define EEPROM_IQK_LCK_88E 0xBB -#define EEPROM_RF_BOARD_OPTION_88E 0xC1 +#define EEPROM_RF_BOARD_OPTION_88E 0xC1 #define EEPROM_RF_FEATURE_OPTION_88E 0xC2 -#define EEPROM_RF_BT_SETTING_88E 0xC3 -#define EEPROM_VERSION 0xC4 -#define EEPROM_CUSTOMER_ID 0xC5 -#define EEPROM_RF_ANTENNA_OPT_88E 0xC9 - -#define EEPROM_MAC_ADDR 0xD0 -#define EEPROM_VID 0xD6 -#define EEPROM_DID 0xD8 -#define EEPROM_SVID 0xDA -#define EEPROM_SMID 0xDC - -#define STOPBECON BIT(6) -#define STOPHIGHT BIT(5) -#define STOPMGT BIT(4) -#define STOPVO BIT(3) -#define STOPVI BIT(2) -#define STOPBE BIT(1) -#define STOPBK BIT(0) - -#define RCR_APPFCS BIT(31) -#define RCR_APP_MIC BIT(30) -#define RCR_APP_ICV BIT(29) +#define EEPROM_RF_BT_SETTING_88E 0xC3 +#define EEPROM_VERSION 0xC4 +#define EEPROM_CUSTOMER_ID 0xC5 +#define EEPROM_RF_ANTENNA_OPT_88E 0xC9 + +#define EEPROM_MAC_ADDR 0xD0 +#define EEPROM_VID 0xD6 +#define EEPROM_DID 0xD8 +#define EEPROM_SVID 0xDA +#define EEPROM_SMID 0xDC + +#define STOPBECON BIT(6) +#define STOPHIGHT BIT(5) +#define STOPMGT BIT(4) +#define STOPVO BIT(3) +#define STOPVI BIT(2) +#define STOPBE BIT(1) +#define STOPBK BIT(0) + +#define RCR_APPFCS BIT(31) +#define RCR_APP_MIC BIT(30) +#define RCR_APP_ICV BIT(29) #define RCR_APP_PHYST_RXFF BIT(28) #define RCR_APP_BA_SSN BIT(27) -#define RCR_ENMBID BIT(24) -#define RCR_LSIGEN BIT(23) -#define RCR_MFBEN BIT(22) +#define RCR_ENMBID BIT(24) +#define RCR_LSIGEN BIT(23) +#define RCR_MFBEN BIT(22) #define RCR_HTC_LOC_CTRL BIT(14) -#define RCR_AMF BIT(13) -#define RCR_ACF BIT(12) -#define RCR_ADF BIT(11) -#define RCR_AICV BIT(9) -#define RCR_ACRC32 BIT(8) +#define RCR_AMF BIT(13) +#define RCR_ACF BIT(12) +#define RCR_ADF BIT(11) +#define RCR_AICV BIT(9) +#define RCR_ACRC32 BIT(8) #define RCR_CBSSID_BCN BIT(7) #define RCR_CBSSID_DATA BIT(6) -#define RCR_CBSSID RCR_CBSSID_DATA -#define RCR_APWRMGT BIT(5) -#define RCR_ADD3 BIT(4) -#define RCR_AB BIT(3) -#define RCR_AM BIT(2) -#define RCR_APM BIT(1) -#define RCR_AAP BIT(0) +#define RCR_CBSSID RCR_CBSSID_DATA +#define RCR_APWRMGT BIT(5) +#define RCR_ADD3 BIT(4) +#define RCR_AB BIT(3) +#define RCR_AM BIT(2) +#define RCR_APM BIT(1) +#define RCR_AAP BIT(0) #define RCR_MXDMA_OFFSET 8 #define RCR_FIFO_OFFSET 13 -#define RSV_CTRL 0x001C -#define RD_CTRL 0x0524 +#define RSV_CTRL 0x001C +#define RD_CTRL 0x0524 #define REG_USB_INFO 0xFE17 -#define REG_USB_SPECIAL_OPTION 0xFE55 +#define REG_USB_SPECIAL_OPTION 0xFE55 #define REG_USB_DMA_AGG_TO 0xFE5B #define REG_USB_AGG_TO 0xFE5C #define REG_USB_AGG_TH 0xFE5D -#define REG_USB_VID 0xFE60 -#define REG_USB_PID 0xFE62 +#define REG_USB_VID 0xFE60 +#define REG_USB_PID 0xFE62 #define REG_USB_OPTIONAL 0xFE64 #define REG_USB_CHIRP_K 0xFE65 -#define REG_USB_PHY 0xFE66 +#define REG_USB_PHY 0xFE66 #define REG_USB_MAC_ADDR 0xFE70 #define REG_USB_HRPWM 0xFE58 #define REG_USB_HCPWM 0xFE57 -#define SW18_FPWM BIT(3) +#define SW18_FPWM BIT(3) -#define ISO_MD2PP BIT(0) -#define ISO_UA2USB BIT(1) -#define ISO_UD2CORE BIT(2) -#define ISO_PA2PCIE BIT(3) -#define ISO_PD2CORE BIT(4) -#define ISO_IP2MAC BIT(5) -#define ISO_DIOP BIT(6) -#define ISO_DIOE BIT(7) -#define ISO_EB2CORE BIT(8) -#define ISO_DIOR BIT(9) +#define ISO_MD2PP BIT(0) +#define ISO_UA2USB BIT(1) +#define ISO_UD2CORE BIT(2) +#define ISO_PA2PCIE BIT(3) +#define ISO_PD2CORE BIT(4) +#define ISO_IP2MAC BIT(5) +#define ISO_DIOP BIT(6) +#define ISO_DIOE BIT(7) +#define ISO_EB2CORE BIT(8) +#define ISO_DIOR BIT(9) -#define PWC_EV25V BIT(14) -#define PWC_EV12V BIT(15) +#define PWC_EV25V BIT(14) +#define PWC_EV12V BIT(15) -#define FEN_BBRSTB BIT(0) +#define FEN_BBRSTB BIT(0) #define FEN_BB_GLB_RSTN BIT(1) -#define FEN_USBA BIT(2) -#define FEN_UPLL BIT(3) -#define FEN_USBD BIT(4) +#define FEN_USBA BIT(2) +#define FEN_UPLL BIT(3) +#define FEN_USBD BIT(4) #define FEN_DIO_PCIE BIT(5) -#define FEN_PCIEA BIT(6) -#define FEN_PPLL BIT(7) -#define FEN_PCIED BIT(8) -#define FEN_DIOE BIT(9) -#define FEN_CPUEN BIT(10) -#define FEN_DCORE BIT(11) -#define FEN_ELDR BIT(12) -#define FEN_DIO_RF BIT(13) -#define FEN_HWPDN BIT(14) -#define FEN_MREGEN BIT(15) - -#define PFM_LDALL BIT(0) -#define PFM_ALDN BIT(1) -#define PFM_LDKP BIT(2) -#define PFM_WOWL BIT(3) -#define ENPDN BIT(4) -#define PDN_PL BIT(5) -#define APFM_ONMAC BIT(8) -#define APFM_OFF BIT(9) -#define APFM_RSM BIT(10) -#define AFSM_HSUS BIT(11) -#define AFSM_PCIE BIT(12) -#define APDM_MAC BIT(13) -#define APDM_HOST BIT(14) -#define APDM_HPDN BIT(15) -#define RDY_MACON BIT(16) -#define SUS_HOST BIT(17) -#define ROP_ALD BIT(20) -#define ROP_PWR BIT(21) -#define ROP_SPS BIT(22) -#define SOP_MRST BIT(25) -#define SOP_FUSE BIT(26) -#define SOP_ABG BIT(27) -#define SOP_AMB BIT(28) -#define SOP_RCK BIT(29) -#define SOP_A8M BIT(30) -#define XOP_BTCK BIT(31) - -#define ANAD16V_EN BIT(0) -#define ANA8M BIT(1) -#define MACSLP BIT(4) +#define FEN_PCIEA BIT(6) +#define FEN_PPLL BIT(7) +#define FEN_PCIED BIT(8) +#define FEN_DIOE BIT(9) +#define FEN_CPUEN BIT(10) +#define FEN_DCORE BIT(11) +#define FEN_ELDR BIT(12) +#define FEN_DIO_RF BIT(13) +#define FEN_HWPDN BIT(14) +#define FEN_MREGEN BIT(15) + +#define PFM_LDALL BIT(0) +#define PFM_ALDN BIT(1) +#define PFM_LDKP BIT(2) +#define PFM_WOWL BIT(3) +#define ENPDN BIT(4) +#define PDN_PL BIT(5) +#define APFM_ONMAC BIT(8) +#define APFM_OFF BIT(9) +#define APFM_RSM BIT(10) +#define AFSM_HSUS BIT(11) +#define AFSM_PCIE BIT(12) +#define APDM_MAC BIT(13) +#define APDM_HOST BIT(14) +#define APDM_HPDN BIT(15) +#define RDY_MACON BIT(16) +#define SUS_HOST BIT(17) +#define ROP_ALD BIT(20) +#define ROP_PWR BIT(21) +#define ROP_SPS BIT(22) +#define SOP_MRST BIT(25) +#define SOP_FUSE BIT(26) +#define SOP_ABG BIT(27) +#define SOP_AMB BIT(28) +#define SOP_RCK BIT(29) +#define SOP_A8M BIT(30) +#define XOP_BTCK BIT(31) + +#define ANAD16V_EN BIT(0) +#define ANA8M BIT(1) +#define MACSLP BIT(4) #define LOADER_CLK_EN BIT(5) #define _80M_SSC_DIS BIT(7) #define _80M_SSC_EN_HO BIT(8) #define PHY_SSC_RSTB BIT(9) -#define SEC_CLK_EN BIT(10) -#define MAC_CLK_EN BIT(11) -#define SYS_CLK_EN BIT(12) -#define RING_CLK_EN BIT(13) +#define SEC_CLK_EN BIT(10) +#define MAC_CLK_EN BIT(11) +#define SYS_CLK_EN BIT(12) +#define RING_CLK_EN BIT(13) #define BOOT_FROM_EEPROM BIT(4) -#define EEPROM_EN BIT(5) +#define EEPROM_EN BIT(5) -#define AFE_BGEN BIT(0) -#define AFE_MBEN BIT(1) -#define MAC_ID_EN BIT(7) +#define AFE_BGEN BIT(0) +#define AFE_MBEN BIT(1) +#define MAC_ID_EN BIT(7) -#define WLOCK_ALL BIT(0) -#define WLOCK_00 BIT(1) -#define WLOCK_04 BIT(2) -#define WLOCK_08 BIT(3) -#define WLOCK_40 BIT(4) +#define WLOCK_ALL BIT(0) +#define WLOCK_00 BIT(1) +#define WLOCK_04 BIT(2) +#define WLOCK_08 BIT(3) +#define WLOCK_40 BIT(4) #define R_DIS_PRST_0 BIT(5) #define R_DIS_PRST_1 BIT(6) -#define LOCK_ALL_EN BIT(7) +#define LOCK_ALL_EN BIT(7) -#define RF_EN BIT(0) -#define RF_RSTB BIT(1) -#define RF_SDMRSTB BIT(2) +#define RF_EN BIT(0) +#define RF_RSTB BIT(1) +#define RF_SDMRSTB BIT(2) -#define LDA15_EN BIT(0) -#define LDA15_STBY BIT(1) -#define LDA15_OBUF BIT(2) +#define LDA15_EN BIT(0) +#define LDA15_STBY BIT(1) +#define LDA15_OBUF BIT(2) #define LDA15_REG_VOS BIT(3) #define _LDA15_VOADJ(x) (((x) & 0x7) << 4) -#define LDV12_EN BIT(0) -#define LDV12_SDBY BIT(1) -#define LPLDO_HSM BIT(2) +#define LDV12_EN BIT(0) +#define LDV12_SDBY BIT(1) +#define LPLDO_HSM BIT(2) #define LPLDO_LSM_DIS BIT(3) #define _LDV12_VADJ(x) (((x) & 0xF) << 4) -#define XTAL_EN BIT(0) -#define XTAL_BSEL BIT(1) +#define XTAL_EN BIT(0) +#define XTAL_BSEL BIT(1) #define _XTAL_BOSC(x) (((x) & 0x3) << 2) #define _XTAL_CADJ(x) (((x) & 0xF) << 4) #define XTAL_GATE_USB BIT(8) @@ -871,145 +886,145 @@ #define _XTAL_BT_DRV(x) (((x) & 0x3) << 21) #define _XTAL_GPIO(x) (((x) & 0x7) << 23) -#define CKDLY_AFE BIT(26) -#define CKDLY_USB BIT(27) -#define CKDLY_DIG BIT(28) -#define CKDLY_BT BIT(29) +#define CKDLY_AFE BIT(26) +#define CKDLY_USB BIT(27) +#define CKDLY_DIG BIT(28) +#define CKDLY_BT BIT(29) -#define APLL_EN BIT(0) -#define APLL_320_EN BIT(1) +#define APLL_EN BIT(0) +#define APLL_320_EN BIT(1) #define APLL_FREF_SEL BIT(2) #define APLL_EDGE_SEL BIT(3) -#define APLL_WDOGB BIT(4) -#define APLL_LPFEN BIT(5) +#define APLL_WDOGB BIT(4) +#define APLL_LPFEN BIT(5) #define APLL_REF_CLK_13MHZ 0x1 -#define APLL_REF_CLK_19_2MHZ 0x2 +#define APLL_REF_CLK_19_2MHZ 0x2 #define APLL_REF_CLK_20MHZ 0x3 #define APLL_REF_CLK_25MHZ 0x4 #define APLL_REF_CLK_26MHZ 0x5 -#define APLL_REF_CLK_38_4MHZ 0x6 +#define APLL_REF_CLK_38_4MHZ 0x6 #define APLL_REF_CLK_40MHZ 0x7 -#define APLL_320EN BIT(14) -#define APLL_80EN BIT(15) -#define APLL_1MEN BIT(24) - -#define ALD_EN BIT(18) -#define EF_PD BIT(19) -#define EF_FLAG BIT(31) - -#define EF_TRPT BIT(7) -#define LDOE25_EN BIT(31) - -#define RSM_EN BIT(0) -#define TIMER_EN BIT(4) - -#define TRSW0EN BIT(2) -#define TRSW1EN BIT(3) -#define EROM_EN BIT(4) -#define ENBT BIT(5) -#define ENUART BIT(8) -#define UART_910 BIT(9) -#define ENPMAC BIT(10) -#define SIC_SWRST BIT(11) -#define ENSIC BIT(12) -#define SIC_23 BIT(13) -#define ENHDP BIT(14) -#define SIC_LBK BIT(15) - -#define LED0PL BIT(4) -#define LED1PL BIT(12) -#define LED0DIS BIT(7) - -#define MCUFWDL_EN BIT(0) -#define MCUFWDL_RDY BIT(1) +#define APLL_320EN BIT(14) +#define APLL_80EN BIT(15) +#define APLL_1MEN BIT(24) + +#define ALD_EN BIT(18) +#define EF_PD BIT(19) +#define EF_FLAG BIT(31) + +#define EF_TRPT BIT(7) +#define LDOE25_EN BIT(31) + +#define RSM_EN BIT(0) +#define TIMER_EN BIT(4) + +#define TRSW0EN BIT(2) +#define TRSW1EN BIT(3) +#define EROM_EN BIT(4) +#define ENBT BIT(5) +#define ENUART BIT(8) +#define UART_910 BIT(9) +#define ENPMAC BIT(10) +#define SIC_SWRST BIT(11) +#define ENSIC BIT(12) +#define SIC_23 BIT(13) +#define ENHDP BIT(14) +#define SIC_LBK BIT(15) + +#define LED0PL BIT(4) +#define LED1PL BIT(12) +#define LED0DIS BIT(7) + +#define MCUFWDL_EN BIT(0) +#define MCUFWDL_RDY BIT(1) #define FWDL_CHKSUM_RPT BIT(2) -#define MACINI_RDY BIT(3) -#define BBINI_RDY BIT(4) -#define RFINI_RDY BIT(5) -#define WINTINI_RDY BIT(6) -#define CPRST BIT(23) - -#define XCLK_VLD BIT(0) -#define ACLK_VLD BIT(1) -#define UCLK_VLD BIT(2) -#define PCLK_VLD BIT(3) -#define PCIRSTB BIT(4) -#define V15_VLD BIT(5) -#define TRP_B15V_EN BIT(7) -#define SIC_IDLE BIT(8) -#define BD_MAC2 BIT(9) -#define BD_MAC1 BIT(10) +#define MACINI_RDY BIT(3) +#define BBINI_RDY BIT(4) +#define RFINI_RDY BIT(5) +#define WINTINI_RDY BIT(6) +#define CPRST BIT(23) + +#define XCLK_VLD BIT(0) +#define ACLK_VLD BIT(1) +#define UCLK_VLD BIT(2) +#define PCLK_VLD BIT(3) +#define PCIRSTB BIT(4) +#define V15_VLD BIT(5) +#define TRP_B15V_EN BIT(7) +#define SIC_IDLE BIT(8) +#define BD_MAC2 BIT(9) +#define BD_MAC1 BIT(10) #define IC_MACPHY_MODE BIT(11) -#define VENDOR_ID BIT(19) +#define VENDOR_ID BIT(19) #define PAD_HWPD_IDN BIT(22) -#define TRP_VAUX_EN BIT(23) -#define TRP_BT_EN BIT(24) -#define BD_PKG_SEL BIT(25) -#define BD_HCI_SEL BIT(26) -#define TYPE_ID BIT(27) +#define TRP_VAUX_EN BIT(23) +#define TRP_BT_EN BIT(24) +#define BD_PKG_SEL BIT(25) +#define BD_HCI_SEL BIT(26) +#define TYPE_ID BIT(27) #define CHIP_VER_RTL_MASK 0xF000 #define CHIP_VER_RTL_SHIFT 12 -#define REG_LBMODE (REG_CR + 3) +#define REG_LBMODE (REG_CR + 3) #define HCI_TXDMA_EN BIT(0) #define HCI_RXDMA_EN BIT(1) -#define TXDMA_EN BIT(2) -#define RXDMA_EN BIT(3) -#define PROTOCOL_EN BIT(4) -#define SCHEDULE_EN BIT(5) -#define MACTXEN BIT(6) -#define MACRXEN BIT(7) -#define ENSWBCN BIT(8) -#define ENSEC BIT(9) - -#define _NETTYPE(x) (((x) & 0x3) << 16) +#define TXDMA_EN BIT(2) +#define RXDMA_EN BIT(3) +#define PROTOCOL_EN BIT(4) +#define SCHEDULE_EN BIT(5) +#define MACTXEN BIT(6) +#define MACRXEN BIT(7) +#define ENSWBCN BIT(8) +#define ENSEC BIT(9) + +#define _NETTYPE(x) (((x) & 0x3) << 16) #define MASK_NETTYPE 0x30000 -#define NT_NO_LINK 0x0 +#define NT_NO_LINK 0x0 #define NT_LINK_AD_HOC 0x1 -#define NT_LINK_AP 0x2 -#define NT_AS_AP 0x3 +#define NT_LINK_AP 0x2 +#define NT_AS_AP 0x3 -#define _LBMODE(x) (((x) & 0xF) << 24) -#define MASK_LBMODE 0xF000000 +#define _LBMODE(x) (((x) & 0xF) << 24) +#define MASK_LBMODE 0xF000000 #define LOOPBACK_NORMAL 0x0 -#define LOOPBACK_IMMEDIATELY 0xB +#define LOOPBACK_IMMEDIATELY 0xB #define LOOPBACK_MAC_DELAY 0x3 #define LOOPBACK_PHY 0x1 #define LOOPBACK_DMA 0x7 #define GET_RX_PAGE_SIZE(value) ((value) & 0xF) #define GET_TX_PAGE_SIZE(value) (((value) & 0xF0) >> 4) -#define _PSRX_MASK 0xF -#define _PSTX_MASK 0xF0 -#define _PSRX(x) (x) -#define _PSTX(x) ((x) << 4) +#define _PSRX_MASK 0xF +#define _PSTX_MASK 0xF0 +#define _PSRX(x) (x) +#define _PSTX(x) ((x) << 4) -#define PBP_64 0x0 -#define PBP_128 0x1 -#define PBP_256 0x2 -#define PBP_512 0x3 -#define PBP_1024 0x4 +#define PBP_64 0x0 +#define PBP_128 0x1 +#define PBP_256 0x2 +#define PBP_512 0x3 +#define PBP_1024 0x4 #define RXDMA_ARBBW_EN BIT(0) -#define RXSHFT_EN BIT(1) +#define RXSHFT_EN BIT(1) #define RXDMA_AGG_EN BIT(2) -#define QS_VO_QUEUE BIT(8) -#define QS_VI_QUEUE BIT(9) -#define QS_BE_QUEUE BIT(10) -#define QS_BK_QUEUE BIT(11) +#define QS_VO_QUEUE BIT(8) +#define QS_VI_QUEUE BIT(9) +#define QS_BE_QUEUE BIT(10) +#define QS_BK_QUEUE BIT(11) #define QS_MANAGER_QUEUE BIT(12) #define QS_HIGH_QUEUE BIT(13) -#define HQSEL_VOQ BIT(0) -#define HQSEL_VIQ BIT(1) -#define HQSEL_BEQ BIT(2) -#define HQSEL_BKQ BIT(3) -#define HQSEL_MGTQ BIT(4) -#define HQSEL_HIQ BIT(5) +#define HQSEL_VOQ BIT(0) +#define HQSEL_VIQ BIT(1) +#define HQSEL_BEQ BIT(2) +#define HQSEL_BKQ BIT(3) +#define HQSEL_MGTQ BIT(4) +#define HQSEL_HIQ BIT(5) #define _TXDMA_HIQ_MAP(x) (((x)&0x3) << 14) #define _TXDMA_MGQ_MAP(x) (((x)&0x3) << 12) @@ -1018,9 +1033,9 @@ #define _TXDMA_VIQ_MAP(x) (((x)&0x3) << 6) #define _TXDMA_VOQ_MAP(x) (((x)&0x3) << 4) -#define QUEUE_LOW 1 +#define QUEUE_LOW 1 #define QUEUE_NORMAL 2 -#define QUEUE_HIGH 3 +#define QUEUE_HIGH 3 #define _LLT_NO_ACTIVE 0x0 #define _LLT_WRITE_ACCESS 0x1 @@ -1028,25 +1043,25 @@ #define _LLT_INIT_DATA(x) ((x) & 0xFF) #define _LLT_INIT_ADDR(x) (((x) & 0xFF) << 8) -#define _LLT_OP(x) (((x) & 0x3) << 30) +#define _LLT_OP(x) (((x) & 0x3) << 30) #define _LLT_OP_VALUE(x) (((x) >> 30) & 0x3) #define BB_WRITE_READ_MASK (BIT(31) | BIT(30)) -#define BB_WRITE_EN BIT(30) -#define BB_READ_EN BIT(31) +#define BB_WRITE_EN BIT(30) +#define BB_READ_EN BIT(31) -#define _HPQ(x) ((x) & 0xFF) -#define _LPQ(x) (((x) & 0xFF) << 8) -#define _PUBQ(x) (((x) & 0xFF) << 16) -#define _NPQ(x) ((x) & 0xFF) +#define _HPQ(x) ((x) & 0xFF) +#define _LPQ(x) (((x) & 0xFF) << 8) +#define _PUBQ(x) (((x) & 0xFF) << 16) +#define _NPQ(x) ((x) & 0xFF) -#define HPQ_PUBLIC_DIS BIT(24) -#define LPQ_PUBLIC_DIS BIT(25) -#define LD_RQPN BIT(31) +#define HPQ_PUBLIC_DIS BIT(24) +#define LPQ_PUBLIC_DIS BIT(25) +#define LD_RQPN BIT(31) -#define BCN_VALID BIT(16) -#define BCN_HEAD(x) (((x) & 0xFF) << 8) -#define BCN_HEAD_MASK 0xFF00 +#define BCN_VALID BIT(16) +#define BCN_HEAD(x) (((x) & 0xFF) << 8) +#define BCN_HEAD_MASK 0xFF00 #define BLK_DESC_NUM_SHIFT 4 #define BLK_DESC_NUM_MASK 0xF @@ -1066,9 +1081,9 @@ #define _RRSR_RSC(x) (((x) & 0x3) << 21) #define RRSR_RSC_RESERVED 0x0 -#define RRSR_RSC_UPPER_SUBCHANNEL 0x1 -#define RRSR_RSC_LOWER_SUBCHANNEL 0x2 -#define RRSR_RSC_DUPLICATE_MODE 0x3 +#define RRSR_RSC_UPPER_SUBCHANNEL 0x1 +#define RRSR_RSC_LOWER_SUBCHANNEL 0x2 +#define RRSR_RSC_DUPLICATE_MODE 0x3 #define USE_SHORT_G1 BIT(20) @@ -1081,8 +1096,8 @@ #define _AGGLMT_MCS6(x) (((x) & 0xF) << 24) #define _AGGLMT_MCS7(x) (((x) & 0xF) << 28) -#define RETRY_LIMIT_SHORT_SHIFT 8 -#define RETRY_LIMIT_LONG_SHIFT 0 +#define RETRY_LIMIT_SHORT_SHIFT 8 +#define RETRY_LIMIT_LONG_SHIFT 0 #define _DARF_RC1(x) ((x) & 0x1F) #define _DARF_RC2(x) (((x) & 0x1F) << 8) @@ -1102,20 +1117,20 @@ #define _RARF_RC7(x) (((x) & 0x1F) << 16) #define _RARF_RC8(x) (((x) & 0x1F) << 24) -#define AC_PARAM_TXOP_LIMIT_OFFSET 16 -#define AC_PARAM_ECW_MAX_OFFSET 12 -#define AC_PARAM_ECW_MIN_OFFSET 8 -#define AC_PARAM_AIFS_OFFSET 0 +#define AC_PARAM_TXOP_LIMIT_OFFSET 16 +#define AC_PARAM_ECW_MAX_OFFSET 12 +#define AC_PARAM_ECW_MIN_OFFSET 8 +#define AC_PARAM_AIFS_OFFSET 0 -#define _AIFS(x) (x) +#define _AIFS(x) (x) #define _ECW_MAX_MIN(x) ((x) << 8) #define _TXOP_LIMIT(x) ((x) << 16) -#define _BCNIFS(x) ((x) & 0xFF) -#define _BCNECW(x) ((((x) & 0xF)) << 8) +#define _BCNIFS(x) ((x) & 0xFF) +#define _BCNECW(x) ((((x) & 0xF)) << 8) -#define _LRL(x) ((x) & 0x3F) -#define _SRL(x) (((x) & 0x3F) << 8) +#define _LRL(x) ((x) & 0x3F) +#define _SRL(x) (((x) & 0x3F) << 8) #define _SIFS_CCK_CTX(x) ((x) & 0xFF) #define _SIFS_CCK_TRX(x) (((x) & 0xFF) << 8); @@ -1123,102 +1138,102 @@ #define _SIFS_OFDM_CTX(x) ((x) & 0xFF) #define _SIFS_OFDM_TRX(x) (((x) & 0xFF) << 8); -#define _TBTT_PROHIBIT_HOLD(x) (((x) & 0xFF) << 8) +#define _TBTT_PROHIBIT_HOLD(x) (((x) & 0xFF) << 8) #define DIS_EDCA_CNT_DWN BIT(11) -#define EN_MBSSID BIT(1) +#define EN_MBSSID BIT(1) #define EN_TXBCN_RPT BIT(2) #define EN_BCN_FUNCTION BIT(3) -#define TSFTR_RST BIT(0) -#define TSFTR1_RST BIT(1) +#define TSFTR_RST BIT(0) +#define TSFTR1_RST BIT(1) -#define STOP_BCNQ BIT(6) +#define STOP_BCNQ BIT(6) -#define DIS_TSF_UDT0_NORMAL_CHIP BIT(4) -#define DIS_TSF_UDT0_TEST_CHIP BIT(5) +#define DIS_TSF_UDT0_NORMAL_CHIP BIT(4) +#define DIS_TSF_UDT0_TEST_CHIP BIT(5) -#define ACMHW_HWEN BIT(0) -#define ACMHW_BEQEN BIT(1) -#define ACMHW_VIQEN BIT(2) -#define ACMHW_VOQEN BIT(3) +#define ACMHW_HWEN BIT(0) +#define ACMHW_BEQEN BIT(1) +#define ACMHW_VIQEN BIT(2) +#define ACMHW_VOQEN BIT(3) #define ACMHW_BEQSTATUS BIT(4) #define ACMHW_VIQSTATUS BIT(5) #define ACMHW_VOQSTATUS BIT(6) -#define APSDOFF BIT(6) +#define APSDOFF BIT(6) #define APSDOFF_STATUS BIT(7) -#define BW_20MHZ BIT(2) +#define BW_20MHZ BIT(2) #define RATE_BITMAP_ALL 0xFFFFF -#define RATE_RRSR_CCK_ONLY_1M 0xFFFF1 +#define RATE_RRSR_CCK_ONLY_1M 0xFFFF1 -#define TSFRST BIT(0) -#define DIS_GCLK BIT(1) -#define PAD_SEL BIT(2) -#define PWR_ST BIT(6) +#define TSFRST BIT(0) +#define DIS_GCLK BIT(1) +#define PAD_SEL BIT(2) +#define PWR_ST BIT(6) #define PWRBIT_OW_EN BIT(7) -#define ACRC BIT(8) -#define CFENDFORM BIT(9) -#define ICV BIT(10) - -#define AAP BIT(0) -#define APM BIT(1) -#define AM BIT(2) -#define AB BIT(3) -#define ADD3 BIT(4) -#define APWRMGT BIT(5) -#define CBSSID BIT(6) -#define CBSSID_DATA BIT(6) -#define CBSSID_BCN BIT(7) -#define ACRC32 BIT(8) -#define AICV BIT(9) -#define ADF BIT(11) -#define ACF BIT(12) -#define AMF BIT(13) +#define ACRC BIT(8) +#define CFENDFORM BIT(9) +#define ICV BIT(10) + +#define AAP BIT(0) +#define APM BIT(1) +#define AM BIT(2) +#define AB BIT(3) +#define ADD3 BIT(4) +#define APWRMGT BIT(5) +#define CBSSID BIT(6) +#define CBSSID_DATA BIT(6) +#define CBSSID_BCN BIT(7) +#define ACRC32 BIT(8) +#define AICV BIT(9) +#define ADF BIT(11) +#define ACF BIT(12) +#define AMF BIT(13) #define HTC_LOC_CTRL BIT(14) -#define UC_DATA_EN BIT(16) -#define BM_DATA_EN BIT(17) -#define MFBEN BIT(22) -#define LSIGEN BIT(23) -#define ENMBID BIT(24) -#define APP_BASSN BIT(27) -#define APP_PHYSTS BIT(28) -#define APP_ICV BIT(29) -#define APP_MIC BIT(30) -#define APP_FCS BIT(31) +#define UC_DATA_EN BIT(16) +#define BM_DATA_EN BIT(17) +#define MFBEN BIT(22) +#define LSIGEN BIT(23) +#define ENMBID BIT(24) +#define APP_BASSN BIT(27) +#define APP_PHYSTS BIT(28) +#define APP_ICV BIT(29) +#define APP_MIC BIT(30) +#define APP_FCS BIT(31) #define _MIN_SPACE(x) ((x) & 0x7) -#define _SHORT_GI_PADDING(x) (((x) & 0x1F) << 3) +#define _SHORT_GI_PADDING(x) (((x) & 0x1F) << 3) -#define RXERR_TYPE_OFDM_PPDU 0 -#define RXERR_TYPE_OFDM_FALSE_ALARM 1 -#define RXERR_TYPE_OFDM_MPDU_OK 2 -#define RXERR_TYPE_OFDM_MPDU_FAIL 3 +#define RXERR_TYPE_OFDM_PPDU 0 +#define RXERR_TYPE_OFDM_FALSE_ALARM 1 +#define RXERR_TYPE_OFDM_MPDU_OK 2 +#define RXERR_TYPE_OFDM_MPDU_FAIL 3 #define RXERR_TYPE_CCK_PPDU 4 -#define RXERR_TYPE_CCK_FALSE_ALARM 5 -#define RXERR_TYPE_CCK_MPDU_OK 6 -#define RXERR_TYPE_CCK_MPDU_FAIL 7 +#define RXERR_TYPE_CCK_FALSE_ALARM 5 +#define RXERR_TYPE_CCK_MPDU_OK 6 +#define RXERR_TYPE_CCK_MPDU_FAIL 7 #define RXERR_TYPE_HT_PPDU 8 -#define RXERR_TYPE_HT_FALSE_ALARM 9 -#define RXERR_TYPE_HT_MPDU_TOTAL 10 -#define RXERR_TYPE_HT_MPDU_OK 11 -#define RXERR_TYPE_HT_MPDU_FAIL 12 -#define RXERR_TYPE_RX_FULL_DROP 15 +#define RXERR_TYPE_HT_FALSE_ALARM 9 +#define RXERR_TYPE_HT_MPDU_TOTAL 10 +#define RXERR_TYPE_HT_MPDU_OK 11 +#define RXERR_TYPE_HT_MPDU_FAIL 12 +#define RXERR_TYPE_RX_FULL_DROP 15 #define RXERR_COUNTER_MASK 0xFFFFF #define RXERR_RPT_RST BIT(27) -#define _RXERR_RPT_SEL(type) ((type) << 28) +#define _RXERR_RPT_SEL(type) ((type) << 28) -#define SCR_TXUSEDK BIT(0) -#define SCR_RXUSEDK BIT(1) +#define SCR_TXUSEDK BIT(0) +#define SCR_RXUSEDK BIT(1) #define SCR_TXENCENABLE BIT(2) #define SCR_RXDECENABLE BIT(3) -#define SCR_SKBYA2 BIT(4) -#define SCR_NOSKMC BIT(5) +#define SCR_SKBYA2 BIT(4) +#define SCR_NOSKMC BIT(5) #define SCR_TXBCUSEDK BIT(6) #define SCR_RXBCUSEDK BIT(7) @@ -1226,32 +1241,32 @@ #define USB_IS_FULL_SPEED 1 #define USB_SPEED_MASK BIT(5) -#define USB_NORMAL_SIE_EP_MASK 0xF -#define USB_NORMAL_SIE_EP_SHIFT 4 +#define USB_NORMAL_SIE_EP_MASK 0xF +#define USB_NORMAL_SIE_EP_SHIFT 4 #define USB_TEST_EP_MASK 0x30 #define USB_TEST_EP_SHIFT 4 -#define USB_AGG_EN BIT(3) +#define USB_AGG_EN BIT(3) #define MAC_ADDR_LEN 6 -#define LAST_ENTRY_OF_TX_PKT_BUFFER 175/*255 88e*/ +#define LAST_ENTRY_OF_TX_PKT_BUFFER 175/*255 88e*/ -#define POLLING_LLT_THRESHOLD 20 +#define POLLING_LLT_THRESHOLD 20 #define POLLING_READY_TIMEOUT_COUNT 3000 #define MAX_MSS_DENSITY_2T 0x13 #define MAX_MSS_DENSITY_1T 0x0A -#define EPROM_CMD_OPERATING_MODE_MASK ((1<<7)|(1<<6)) +#define EPROM_CMD_OPERATING_MODE_MASK ((1<<7)|(1<<6)) #define EPROM_CMD_CONFIG 0x3 #define EPROM_CMD_LOAD 1 #define HWSET_MAX_SIZE_92S HWSET_MAX_SIZE -#define HAL_8192C_HW_GPIO_WPS_BIT BIT(2) +#define HAL_8192C_HW_GPIO_WPS_BIT BIT(2) -#define RPMAC_RESET 0x100 +#define RPMAC_RESET 0x100 #define RPMAC_TXSTART 0x104 #define RPMAC_TXLEGACYSIG 0x108 #define RPMAC_TXHTSIG1 0x10c @@ -1267,12 +1282,12 @@ #define RPMAC_TXMACHEADER5 0x134 #define RPMAC_TXDADATYPE 0x138 #define RPMAC_TXRANDOMSEED 0x13c -#define RPMAC_CCKPLCPPREAMBLE 0x140 +#define RPMAC_CCKPLCPPREAMBLE 0x140 #define RPMAC_CCKPLCPHEADER 0x144 #define RPMAC_CCKCRC16 0x148 #define RPMAC_OFDMRXCRC32OK 0x170 -#define RPMAC_OFDMRXCRC32Er 0x174 -#define RPMAC_OFDMRXPARITYER 0x178 +#define RPMAC_OFDMRXCRC32ER 0x174 +#define RPMAC_OFDMRXPARITYER 0x178 #define RPMAC_OFDMRXCRC8ER 0x17c #define RPMAC_CCKCRXRC16ER 0x180 #define RPMAC_CCKCRXRC32ER 0x184 @@ -1289,45 +1304,45 @@ #define RFPGA0_RFTIMING1 0x810 #define RFPGA0_RFTIMING2 0x814 -#define RFPGA0_XA_HSSIPARAMETER1 0x820 -#define RFPGA0_XA_HSSIPARAMETER2 0x824 -#define RFPGA0_XB_HSSIPARAMETER1 0x828 -#define RFPGA0_XB_HSSIPARAMETER2 0x82c +#define RFPGA0_XA_HSSIPARAMETER1 0x820 +#define RFPGA0_XA_HSSIPARAMETER2 0x824 +#define RFPGA0_XB_HSSIPARAMETER1 0x828 +#define RFPGA0_XB_HSSIPARAMETER2 0x82c -#define RFPGA0_XA_LSSIPARAMETER 0x840 -#define RFPGA0_XB_LSSIPARAMETER 0x844 +#define RFPGA0_XA_LSSIPARAMETER 0x840 +#define RFPGA0_XB_LSSIPARAMETER 0x844 -#define RFPGA0_RFWAKEUPPARAMETER 0x850 -#define RFPGA0_RFSLEEPUPPARAMETER 0x854 +#define RFPGA0_RFWAKEUPPARAMETER 0x850 +#define RFPGA0_RFSLEEPUPPARAMETER 0x854 -#define RFPGA0_XAB_SWITCHCONTROL 0x858 -#define RFPGA0_XCD_SWITCHCONTROL 0x85c +#define RFPGA0_XAB_SWITCHCONTROL 0x858 +#define RFPGA0_XCD_SWITCHCONTROL 0x85c -#define RFPGA0_XA_RFINTERFACEOE 0x860 -#define RFPGA0_XB_RFINTERFACEOE 0x864 +#define RFPGA0_XA_RFINTERFACEOE 0x860 +#define RFPGA0_XB_RFINTERFACEOE 0x864 -#define RFPGA0_XAB_RFINTERFACESW 0x870 -#define RFPGA0_XCD_RFINTERFACESW 0x874 +#define RFPGA0_XAB_RFINTERFACESW 0x870 +#define RFPGA0_XCD_RFINTERFACESW 0x874 -#define rFPGA0_XAB_RFPARAMETER 0x878 -#define rFPGA0_XCD_RFPARAMETER 0x87c +#define RFPGA0_XAB_RFPARAMETER 0x878 +#define RFPGA0_XCD_RFPARAMETER 0x87c -#define RFPGA0_ANALOGPARAMETER1 0x880 -#define RFPGA0_ANALOGPARAMETER2 0x884 -#define RFPGA0_ANALOGPARAMETER3 0x888 -#define RFPGA0_ANALOGPARAMETER4 0x88c +#define RFPGA0_ANALOGPARAMETER1 0x880 +#define RFPGA0_ANALOGPARAMETER2 0x884 +#define RFPGA0_ANALOGPARAMETER3 0x888 +#define RFPGA0_ANALOGPARAMETER4 0x88c -#define RFPGA0_XA_LSSIREADBACK 0x8a0 -#define RFPGA0_XB_LSSIREADBACK 0x8a4 -#define RFPGA0_XC_LSSIREADBACK 0x8a8 -#define RFPGA0_XD_LSSIREADBACK 0x8ac +#define RFPGA0_XA_LSSIREADBACK 0x8a0 +#define RFPGA0_XB_LSSIREADBACK 0x8a4 +#define RFPGA0_XC_LSSIREADBACK 0x8a8 +#define RFPGA0_XD_LSSIREADBACK 0x8ac #define RFPGA0_PSDREPORT 0x8b4 -#define TRANSCEIVEA_HSPI_READBACK 0x8b8 -#define TRANSCEIVEB_HSPI_READBACK 0x8bc -#define REG_SC_CNT 0x8c4 -#define RFPGA0_XAB_RFINTERFACERB 0x8e0 -#define RFPGA0_XCD_RFINTERFACERB 0x8e4 +#define TRANSCEIVEA_HSPI_READBACK 0x8b8 +#define TRANSCEIVEB_HSPI_READBACK 0x8bc +#define REG_SC_CNT 0x8c4 +#define RFPGA0_XAB_RFINTERFACERB 0x8e0 +#define RFPGA0_XCD_RFINTERFACERB 0x8e4 #define RFPGA1_RFMOD 0x900 @@ -1338,12 +1353,12 @@ #define RCCK0_SYSTEM 0xa00 #define RCCK0_AFESETTING 0xa04 -#define RCCK0_CCA 0xa08 +#define RCCK0_CCA 0xa08 #define RCCK0_RXAGC1 0xa0c #define RCCK0_RXAGC2 0xa10 -#define RCCK0_RXHP 0xa14 +#define RCCK0_RXHP 0xa14 #define RCCK0_DSPPARAMETER1 0xa18 #define RCCK0_DSPPARAMETER2 0xa1c @@ -1351,75 +1366,74 @@ #define RCCK0_TXFILTER1 0xa20 #define RCCK0_TXFILTER2 0xa24 #define RCCK0_DEBUGPORT 0xa28 -#define RCCK0_FALSEALARMREPORT 0xa2c -#define RCCK0_TRSSIREPORT 0xa50 -#define RCCK0_RXREPORT 0xa54 -#define RCCK0_FACOUNTERLOWER 0xa5c -#define RCCK0_FACOUNTERUPPER 0xa58 -#define RCCK0_CCA_CNT 0xa60 - +#define RCCK0_FALSEALARMREPORT 0xa2c +#define RCCK0_TRSSIREPORT 0xa50 +#define RCCK0_RXREPORT 0xa54 +#define RCCK0_FACOUNTERLOWER 0xa5c +#define RCCK0_FACOUNTERUPPER 0xa58 +#define RCCK0_CCA_CNT 0xa60 /* PageB(0xB00) */ -#define RPDP_ANTA 0xb00 +#define RPDP_ANTA 0xb00 #define RPDP_ANTA_4 0xb04 #define RPDP_ANTA_8 0xb08 #define RPDP_ANTA_C 0xb0c -#define RPDP_ANTA_10 0xb10 -#define RPDP_ANTA_14 0xb14 -#define RPDP_ANTA_18 0xb18 -#define RPDP_ANTA_1C 0xb1c -#define RPDP_ANTA_20 0xb20 -#define RPDP_ANTA_24 0xb24 +#define RPDP_ANTA_10 0xb10 +#define RPDP_ANTA_14 0xb14 +#define RPDP_ANTA_18 0xb18 +#define RPDP_ANTA_1C 0xb1c +#define RPDP_ANTA_20 0xb20 +#define RPDP_ANTA_24 0xb24 #define RCONFIG_PMPD_ANTA 0xb28 -#define RCONFIG_RAM64X16 0xb2c +#define RCONFIG_RAM64x16 0xb2c -#define RBNDA 0xb30 -#define RHSSIPAR 0xb34 +#define RBNDA 0xb30 +#define RHSSIPAR 0xb34 -#define RCONFIG_ANTA 0xb68 -#define RCONFIG_ANTB 0xb6c +#define RCONFIG_ANTA 0xb68 +#define RCONFIG_ANTB 0xb6c -#define RPDP_ANTB 0xb70 -#define RPDP_ANTB_4 0xb74 -#define RPDP_ANTB_8 0xb78 -#define RPDP_ANTB_C 0xb7c -#define RPDP_ANTB_10 0xb80 -#define RPDP_ANTB_14 0xb84 -#define RPDP_ANTB_18 0xb88 -#define RPDP_ANTB_1C 0xb8c -#define RPDP_ANTB_20 0xb90 -#define RPDP_ANTB_24 0xb94 +#define RPDP_ANTB 0xb70 +#define RPDP_ANTB_4 0xb74 +#define RPDP_ANTB_8 0xb78 +#define RPDP_ANTB_C 0xb7c +#define RPDP_ANTB_10 0xb80 +#define RPDP_ANTB_14 0xb84 +#define RPDP_ANTB_18 0xb88 +#define RPDP_ANTB_1C 0xb8c +#define RPDP_ANTB_20 0xb90 +#define RPDP_ANTB_24 0xb94 #define RCONFIG_PMPD_ANTB 0xb98 -#define RBNDB 0xba0 +#define RBNDB 0xba0 -#define RAPK 0xbd8 -#define rPm_Rx0_AntA 0xbdc -#define rPm_Rx1_AntA 0xbe0 -#define rPm_Rx2_AntA 0xbe4 -#define rPm_Rx3_AntA 0xbe8 -#define rPm_Rx0_AntB 0xbec -#define rPm_Rx1_AntB 0xbf0 -#define rPm_Rx2_AntB 0xbf4 -#define rPm_Rx3_AntB 0xbf8 +#define RAPK 0xbd8 +#define RPM_RX0_ANTA 0xbdc +#define RPM_RX1_ANTA 0xbe0 +#define RPM_RX2_ANTA 0xbe4 +#define RPM_RX3_ANTA 0xbe8 +#define RPM_RX0_ANTB 0xbec +#define RPM_RX1_ANTB 0xbf0 +#define RPM_RX2_ANTB 0xbf4 +#define RPM_RX3_ANTB 0xbf8 /*Page C*/ -#define ROFDM0_LSTF 0xc00 +#define ROFDM0_LSTF 0xc00 -#define ROFDM0_TRXPATHENABLE 0xc04 +#define ROFDM0_TRXPATHENABLE 0xc04 #define ROFDM0_TRMUXPAR 0xc08 -#define ROFDM0_TRSWISOLATION 0xc0c +#define ROFDM0_TRSWISOLATION 0xc0c #define ROFDM0_XARXAFE 0xc10 -#define ROFDM0_XARXIQIMBAL 0xc14 -#define ROFDM0_XBRXAFE 0xc18 -#define ROFDM0_XBRXIQIMBAL 0xc1c -#define ROFDM0_XCRXAFE 0xc20 -#define ROFDM0_XCRXIQIMBAL 0xc24 -#define ROFDM0_XDRXAFE 0xc28 -#define ROFDM0_XDRXIQIMBAL 0xc2c +#define ROFDM0_XARXIQIMBALANCE 0xc14 +#define ROFDM0_XBRXAFE 0xc18 +#define ROFDM0_XBRXIQIMBALANCE 0xc1c +#define ROFDM0_XCRXAFE 0xc20 +#define ROFDM0_XCRXIQIMBANLANCE 0xc24 +#define ROFDM0_XDRXAFE 0xc28 +#define ROFDM0_XDRXIQIMBALANCE 0xc2c #define ROFDM0_RXDETECTOR1 0xc30 #define ROFDM0_RXDETECTOR2 0xc34 @@ -1428,8 +1442,8 @@ #define ROFDM0_RXDSP 0xc40 #define ROFDM0_CFOANDDAGC 0xc44 -#define ROFDM0_CCADROPTHRES 0xc48 -#define ROFDM0_ECCATHRES 0xc4c +#define ROFDM0_CCADROPTHRESHOLD 0xc48 +#define ROFDM0_ECCATHRESHOLD 0xc4c #define ROFDM0_XAAGCCORE1 0xc50 #define ROFDM0_XAAGCCORE2 0xc54 @@ -1440,18 +1454,18 @@ #define ROFDM0_XDAGCCORE1 0xc68 #define ROFDM0_XDAGCCORE2 0xc6c -#define ROFDM0_AGCPARAMETER1 0xc70 -#define ROFDM0_AGCPARAMETER2 0xc74 +#define ROFDM0_AGCPARAMETER1 0xc70 +#define ROFDM0_AGCPARAMETER2 0xc74 #define ROFDM0_AGCRSSITABLE 0xc78 #define ROFDM0_HTSTFAGC 0xc7c -#define ROFDM0_XATXIQIMBAL 0xc80 +#define ROFDM0_XATXIQIMBALANCE 0xc80 #define ROFDM0_XATXAFE 0xc84 -#define ROFDM0_XBTXIQIMBAL 0xc88 +#define ROFDM0_XBTXIQIMBALANCE 0xc88 #define ROFDM0_XBTXAFE 0xc8c -#define ROFDM0_XCTXIQIMBAL 0xc90 -#define ROFDM0_XCTXAFE 0xc94 -#define ROFDM0_XDTXIQIMBAL 0xc98 +#define ROFDM0_XCTXIQIMBALANCE 0xc90 +#define ROFDM0_XCTXAFE 0xc94 +#define ROFDM0_XDTXIQIMBALANCE 0xc98 #define ROFDM0_XDTXAFE 0xc9c #define ROFDM0_RXIQEXTANTA 0xca0 @@ -1462,25 +1476,24 @@ #define ROFDM0_TXCOEFF5 0xcb4 #define ROFDM0_TXCOEFF6 0xcb8 -#define ROFDM0_RXHPPARAMETER 0xce0 -#define ROFDM0_TXPSEUDONOISEWGT 0xce4 +#define ROFDM0_RXHPPARAMETER 0xce0 +#define ROFDM0_TXPSEUDONOISEWGT 0xce4 #define ROFDM0_FRAMESYNC 0xcf0 #define ROFDM0_DFSREPORT 0xcf4 +#define ROFDM1_LSTF 0xd00 +#define ROFDM1_TRXPATHENABLE 0xd04 -#define ROFDM1_LSTF 0xd00 -#define ROFDM1_TRXPATHENABLE 0xd04 - -#define ROFDM1_CF0 0xd08 -#define ROFDM1_CSI1 0xd10 -#define ROFDM1_SBD 0xd14 -#define ROFDM1_CSI2 0xd18 +#define ROFDM1_CF0 0xd08 +#define ROFDM1_CSI1 0xd10 +#define ROFDM1_SBD 0xd14 +#define ROFDM1_CSI2 0xd18 #define ROFDM1_CFOTRACKING 0xd2c #define ROFDM1_TRXMESAURE1 0xd34 #define ROFDM1_INTFDET 0xd3c -#define ROFDM1_PSEUDONOISESTATEAB 0xd50 -#define ROFDM1_PSEUDONOISESTATECD 0xd54 -#define ROFDM1_RXPSEUDONOISEWGT 0xd58 +#define ROFDM1_PSEUDONOISESTATEAB 0xd50 +#define ROFDM1_PSEUDONOISESTATECD 0xd54 +#define ROFDM1_RXPSEUDONOISEWGT 0xd58 #define ROFDM_PHYCOUNTER1 0xda0 #define ROFDM_PHYCOUNTER2 0xda4 @@ -1492,84 +1505,84 @@ #define ROFDM_LONGCFOCD 0xdb8 #define ROFDM_TAILCF0AB 0xdbc #define ROFDM_TAILCF0CD 0xdc0 -#define ROFDM_PWMEASURE1 0xdc4 -#define ROFDM_PWMEASURE2 0xdc8 +#define ROFDM_PWMEASURE1 0xdc4 +#define ROFDM_PWMEASURE2 0xdc8 #define ROFDM_BWREPORT 0xdcc #define ROFDM_AGCREPORT 0xdd0 -#define ROFDM_RXSNR 0xdd4 +#define ROFDM_RXSNR 0xdd4 #define ROFDM_RXEVMCSI 0xdd8 #define ROFDM_SIGREPORT 0xddc #define RTXAGC_A_RATE18_06 0xe00 #define RTXAGC_A_RATE54_24 0xe04 #define RTXAGC_A_CCK1_MCS32 0xe08 -#define RTXAGC_A_MCS03_MCS00 0xe10 -#define RTXAGC_A_MCS07_MCS04 0xe14 -#define RTXAGC_A_MCS11_MCS08 0xe18 -#define RTXAGC_A_MCS15_MCS12 0xe1c +#define RTXAGC_A_MCS03_MCS00 0xe10 +#define RTXAGC_A_MCS07_MCS04 0xe14 +#define RTXAGC_A_MCS11_MCS08 0xe18 +#define RTXAGC_A_MCS15_MCS12 0xe1c #define RTXAGC_B_RATE18_06 0x830 #define RTXAGC_B_RATE54_24 0x834 -#define RTXAGC_B_CCK1_55_MCS32 0x838 -#define RTXAGC_B_MCS03_MCS00 0x83c -#define RTXAGC_B_MCS07_MCS04 0x848 -#define RTXAGC_B_MCS11_MCS08 0x84c -#define RTXAGC_B_MCS15_MCS12 0x868 -#define RTXAGC_B_CCK11_A_CCK2_11 0x86c - -#define RFPGA0_IQK 0xe28 +#define RTXAGC_B_CCK1_55_MCS32 0x838 +#define RTXAGC_B_MCS03_MCS00 0x83c +#define RTXAGC_B_MCS07_MCS04 0x848 +#define RTXAGC_B_MCS11_MCS08 0x84c +#define RTXAGC_B_MCS15_MCS12 0x868 +#define RTXAGC_B_CCK11_A_CCK2_11 0x86c + +#define RFPGA0_IQK 0xe28 #define RTX_IQK_TONE_A 0xe30 #define RRX_IQK_TONE_A 0xe34 -#define RTX_IQK_PI_A 0xe38 -#define RRX_IQK_PI_A 0xe3c +#define RTX_IQK_PI_A 0xe38 +#define RRX_IQK_PI_A 0xe3c -#define RTX_IQK 0xe40 -#define RRX_IQK 0xe44 -#define RIQK_AGC_PTS 0xe48 -#define RIQK_AGC_RSP 0xe4c +#define RTX_IQK 0xe40 +#define RRX_IQK 0xe44 +#define RIQK_AGC_PTS 0xe48 +#define RIQK_AGC_RSP 0xe4c #define RTX_IQK_TONE_B 0xe50 #define RRX_IQK_TONE_B 0xe54 -#define RTX_IQK_PI_B 0xe58 -#define RRX_IQK_PI_B 0xe5c +#define RTX_IQK_PI_B 0xe58 +#define RRX_IQK_PI_B 0xe5c #define RIQK_AGC_CONT 0xe60 -#define RBLUE_TOOTH 0xe6c -#define RRX_WAIT_CCA 0xe70 -#define RTX_CCK_RFON 0xe74 +#define RBLUE_TOOTH 0xe6c +#define RRX_WAIT_CCA 0xe70 +#define RTX_CCK_RFON 0xe74 #define RTX_CCK_BBON 0xe78 #define RTX_OFDM_RFON 0xe7c #define RTX_OFDM_BBON 0xe80 -#define RTX_TO_RX 0xe84 -#define RTX_TO_TX 0xe88 -#define RRX_CCK 0xe8c +#define RTX_TO_RX 0xe84 +#define RTX_TO_TX 0xe88 +#define RRX_CCK 0xe8c -#define RTX_POWER_BEFORE_IQK_A 0xe94 +#define RTX_POWER_BEFORE_IQK_A 0xe94 #define RTX_POWER_AFTER_IQK_A 0xe9c -#define RRX_POWER_BEFORE_IQK_A 0xea0 +#define RRX_POWER_BEFORE_IQK_A 0xea0 #define RRX_POWER_BEFORE_IQK_A_2 0xea4 #define RRX_POWER_AFTER_IQK_A 0xea8 -#define RRX_POWER_AFTER_IQK_A_2 0xeac +#define RRX_POWER_AFTER_IQK_A_2 0xeac -#define RTX_POWER_BEFORE_IQK_B 0xeb4 +#define RTX_POWER_BEFORE_IQK_B 0xeb4 #define RTX_POWER_AFTER_IQK_B 0xebc -#define RRX_POWER_BEFORE_IQK_B 0xec0 +#define RRX_POWER_BEFORE_IQK_B 0xec0 #define RRX_POWER_BEFORE_IQK_B_2 0xec4 #define RRX_POWER_AFTER_IQK_B 0xec8 -#define RRX_POWER_AFTER_IQK_B_2 0xecc +#define RRX_POWER_AFTER_IQK_B_2 0xecc -#define RRX_OFDM 0xed0 +#define RRX_OFDM 0xed0 #define RRX_WAIT_RIFS 0xed4 -#define RRX_TO_RX 0xed8 -#define RSTANDBY 0xedc -#define RSLEEP 0xee0 +#define RRX_TO_RX 0xed8 +#define RSTANDBY 0xedc +#define RSLEEP 0xee0 #define RPMPD_ANAEN 0xeec #define RZEBRA1_HSSIENABLE 0x0 #define RZEBRA1_TRXENABLE1 0x1 #define RZEBRA1_TRXENABLE2 0x2 -#define RZEBRA1_AGC 0x4 +#define RZEBRA1_AGC 0x4 #define RZEBRA1_CHARGEPUMP 0x5 #define RZEBRA1_CHANNEL 0x7 @@ -1578,666 +1591,681 @@ #define RZEBRA1_RXLPF 0xb #define RZEBRA1_RXHPFCORNER 0xc -#define RGLOBALCTRL 0 +#define RGLOBALCTRL 0 #define RRTL8256_TXLPF 19 #define RRTL8256_RXLPF 11 #define RRTL8258_TXLPF 0x11 #define RRTL8258_RXLPF 0x13 #define RRTL8258_RSSILPF 0xa -#define RF_AC 0x00 +#define RF_AC 0x00 -#define RF_IQADJ_G1 0x01 -#define RF_IQADJ_G2 0x02 -#define RF_POW_TRSW 0x05 +#define RF_IQADJ_G1 0x01 +#define RF_IQADJ_G2 0x02 +#define RF_POW_TRSW 0x05 -#define RF_GAIN_RX 0x06 -#define RF_GAIN_TX 0x07 +#define RF_GAIN_RX 0x06 +#define RF_GAIN_TX 0x07 -#define RF_TXM_IDAC 0x08 -#define RF_BS_IQGEN 0x0F +#define RF_TXM_IDAC 0x08 +#define RF_BS_IQGEN 0x0F -#define RF_MODE1 0x10 -#define RF_MODE2 0x11 +#define RF_MODE1 0x10 +#define RF_MODE2 0x11 #define RF_RX_AGC_HP 0x12 -#define RF_TX_AGC 0x13 -#define RF_BIAS 0x14 -#define RF_IPA 0x15 +#define RF_TX_AGC 0x13 +#define RF_BIAS 0x14 +#define RF_IPA 0x15 #define RF_POW_ABILITY 0x17 -#define RF_MODE_AG 0x18 -#define RRFCHANNEL 0x18 -#define RF_CHNLBW 0x18 -#define RF_TOP 0x19 - -#define RF_RX_G1 0x1A -#define RF_RX_G2 0x1B - -#define RF_RX_BB2 0x1C -#define RF_RX_BB1 0x1D - -#define RF_RCK1 0x1E -#define RF_RCK2 0x1F - -#define RF_TX_G1 0x20 -#define RF_TX_G2 0x21 -#define RF_TX_G3 0x22 - -#define RF_TX_BB1 0x23 -#define RF_T_METER 0x42 - -#define RF_SYN_G1 0x25 -#define RF_SYN_G2 0x26 -#define RF_SYN_G3 0x27 -#define RF_SYN_G4 0x28 -#define RF_SYN_G5 0x29 -#define RF_SYN_G6 0x2A -#define RF_SYN_G7 0x2B -#define RF_SYN_G8 0x2C - -#define RF_RCK_OS 0x30 -#define RF_TXPA_G1 0x31 -#define RF_TXPA_G2 0x32 -#define RF_TXPA_G3 0x33 - -#define RF_TX_BIAS_A 0x35 -#define RF_TX_BIAS_D 0x36 -#define RF_LOBF_9 0x38 -#define RF_RXRF_A3 0x3C -#define RF_TRSW 0x3F - -#define RF_TXRF_A2 0x41 -#define RF_TXPA_G4 0x46 -#define RF_TXPA_A4 0x4B - -#define RF_WE_LUT 0xEF - -#define BBBRESETB 0x100 +#define RF_MODE_AG 0x18 +#define RRFCHANNEL 0x18 +#define RF_CHNLBW 0x18 +#define RF_TOP 0x19 + +#define RF_RX_G1 0x1A +#define RF_RX_G2 0x1B + +#define RF_RX_BB2 0x1C +#define RF_RX_BB1 0x1D + +#define RF_RCK1 0x1E +#define RF_RCK2 0x1F + +#define RF_TX_G1 0x20 +#define RF_TX_G2 0x21 +#define RF_TX_G3 0x22 + +#define RF_TX_BB1 0x23 +#define RF_T_METER 0x42 + +#define RF_SYN_G1 0x25 +#define RF_SYN_G2 0x26 +#define RF_SYN_G3 0x27 +#define RF_SYN_G4 0x28 +#define RF_SYN_G5 0x29 +#define RF_SYN_G6 0x2A +#define RF_SYN_G7 0x2B +#define RF_SYN_G8 0x2C + +#define RF_RCK_OS 0x30 +#define RF_TXPA_G1 0x31 +#define RF_TXPA_G2 0x32 +#define RF_TXPA_G3 0x33 + +#define RF_TX_BIAS_A 0x35 +#define RF_TX_BIAS_D 0x36 +#define RF_LOBF_9 0x38 +#define RF_RXRF_A3 0x3C +#define RF_TRSW 0x3F + +#define RF_TXRF_A2 0x41 +#define RF_TXPA_G4 0x46 +#define RF_TXPA_A4 0x4B + +#define RF_WE_LUT 0xEF + +#define BBBRESETB 0x100 #define BGLOBALRESETB 0x200 #define BOFDMTXSTART 0x4 -#define BCCKTXSTART 0x8 -#define BCRC32DEBUG 0x100 +#define BCCKTXSTART 0x8 +#define BCRC32DEBUG 0x100 #define BPMACLOOPBACK 0x10 -#define BTXLSIG 0xffffff -#define BOFDMTXRATE 0xf +#define BTXLSIG 0xffffff +#define BOFDMTXRATE 0xf #define BOFDMTXRESERVED 0x10 #define BOFDMTXLENGTH 0x1ffe0 #define BOFDMTXPARITY 0x20000 -#define BTXHTSIG1 0xffffff +#define BTXHTSIG1 0xffffff #define BTXHTMCSRATE 0x7f -#define BTXHTBW 0x80 -#define BTXHTLENGTH 0xffff00 -#define BTXHTSIG2 0xffffff +#define BTXHTBW 0x80 +#define BTXHTLENGTH 0xffff00 +#define BTXHTSIG2 0xffffff #define BTXHTSMOOTHING 0x1 #define BTXHTSOUNDING 0x2 #define BTXHTRESERVED 0x4 #define BTXHTAGGREATION 0x8 -#define BTXHTSTBC 0x30 +#define BTXHTSTBC 0x30 #define BTXHTADVANCECODING 0x40 #define BTXHTSHORTGI 0x80 #define BTXHTNUMBERHT_LTF 0x300 -#define BTXHTCRC8 0x3fc00 +#define BTXHTCRC8 0x3fc00 #define BCOUNTERRESET 0x10000 #define BNUMOFOFDMTX 0xffff -#define BNUMOFCCKTX 0xffff0000 +#define BNUMOFCCKTX 0xffff0000 #define BTXIDLEINTERVAL 0xffff #define BOFDMSERVICE 0xffff0000 #define BTXMACHEADER 0xffffffff -#define BTXDATAINIT 0xff -#define BTXHTMODE 0x100 -#define BTXDATATYPE 0x30000 +#define BTXDATAINIT 0xff +#define BTXHTMODE 0x100 +#define BTXDATATYPE 0x30000 #define BTXRANDOMSEED 0xffffffff #define BCCKTXPREAMBLE 0x1 -#define BCCKTXSFD 0xffff0000 -#define BCCKTXSIG 0xff +#define BCCKTXSFD 0xffff0000 +#define BCCKTXSIG 0xff #define BCCKTXSERVICE 0xff00 #define BCCKLENGTHEXT 0x8000 #define BCCKTXLENGHT 0xffff0000 -#define BCCKTXCRC16 0xffff +#define BCCKTXCRC16 0xffff #define BCCKTXSTATUS 0x1 #define BOFDMTXSTATUS 0x2 #define IS_BB_REG_OFFSET_92S(_offset) \ ((_offset >= 0x800) && (_offset <= 0xfff)) -#define BRFMOD 0x1 -#define BJAPANMODE 0x2 -#define BCCKTXSC 0x30 -#define BCCKEN 0x1000000 -#define BOFDMEN 0x2000000 - -#define BOFDMRXADCPHASE 0x10000 -#define BOFDMTXDACPHASE 0x40000 -#define BXATXAGC 0x3f - -#define BXBTXAGC 0xf00 -#define BXCTXAGC 0xf000 -#define BXDTXAGC 0xf0000 - -#define BPASTART 0xf0000000 -#define BTRSTART 0x00f00000 -#define BRFSTART 0x0000f000 -#define BBBSTART 0x000000f0 -#define BBBCCKSTART 0x0000000f -#define BPAEND 0xf -#define BTREND 0x0f000000 -#define BRFEND 0x000f0000 -#define BCCAMASK 0x000000f0 -#define BR2RCCAMASK 0x00000f00 -#define BHSSI_R2TDELAY 0xf8000000 -#define BHSSI_T2RDELAY 0xf80000 -#define BCONTXHSSI 0x400 -#define BIGFROMCCK 0x200 -#define BAGCADDRESS 0x3f -#define BRXHPTX 0x7000 -#define BRXHP2RX 0x38000 -#define BRXHPCCKINI 0xc0000 -#define BAGCTXCODE 0xc00000 -#define BAGCRXCODE 0x300000 - -#define B3WIREDATALENGTH 0x800 -#define B3WIREADDREAALENGTH 0x400 - -#define B3WIRERFPOWERDOWN 0x1 -#define B5GPAPEPOLARITY 0x40000000 -#define B2GPAPEPOLARITY 0x80000000 -#define BRFSW_TXDEFAULTANT 0x3 -#define BRFSW_TXOPTIONANT 0x30 -#define BRFSW_RXDEFAULTANT 0x300 -#define BRFSW_RXOPTIONANT 0x3000 -#define BRFSI_3WIREDATA 0x1 -#define BRFSI_3WIRECLOCK 0x2 -#define BRFSI_3WIRELOAD 0x4 -#define BRFSI_3WIRERW 0x8 -#define BRFSI_3WIRE 0xf - -#define BRFSI_RFENV 0x10 - -#define BRFSI_TRSW 0x20 -#define BRFSI_TRSWB 0x40 -#define BRFSI_ANTSW 0x100 -#define BRFSI_ANTSWB 0x200 -#define BRFSI_PAPE 0x400 -#define BRFSI_PAPE5G 0x800 -#define BBANDSELECT 0x1 -#define BHTSIG2_GI 0x80 -#define BHTSIG2_SMOOTHING 0x01 -#define BHTSIG2_SOUNDING 0x02 -#define BHTSIG2_AGGREATON 0x08 -#define BHTSIG2_STBC 0x30 -#define BHTSIG2_ADVCODING 0x40 -#define BHTSIG2_NUMOFHTLTF 0x300 -#define BHTSIG2_CRC8 0x3fc -#define BHTSIG1_MCS 0x7f -#define BHTSIG1_BANDWIDTH 0x80 -#define BHTSIG1_HTLENGTH 0xffff -#define BLSIG_RATE 0xf -#define BLSIG_RESERVED 0x10 -#define BLSIG_LENGTH 0x1fffe -#define BLSIG_PARITY 0x20 -#define BCCKRXPHASE 0x4 - -#define BLSSIREADADDRESS 0x7f800000 -#define BLSSIREADEDGE 0x80000000 - -#define BLSSIREADBACKDATA 0xfffff - -#define BLSSIREADOKFLAG 0x1000 -#define BCCKSAMPLERATE 0x8 -#define BREGULATOR0STANDBY 0x1 -#define BREGULATORPLLSTANDBY 0x2 -#define BREGULATOR1STANDBY 0x4 -#define BPLLPOWERUP 0x8 -#define BDPLLPOWERUP 0x10 -#define BDA10POWERUP 0x20 -#define BAD7POWERUP 0x200 -#define BDA6POWERUP 0x2000 -#define BXTALPOWERUP 0x4000 -#define B40MDCLKPOWERUP 0x8000 -#define BDA6DEBUGMODE 0x20000 -#define BDA6SWING 0x380000 - -#define BADCLKPHASE 0x4000000 -#define B80MCLKDELAY 0x18000000 -#define BAFEWATCHDOGENABLE 0x20000000 - -#define BXTALCAP01 0xc0000000 -#define BXTALCAP23 0x3 -#define BXTALCAP92X 0x0f000000 -#define BXTALCAP 0x0f000000 - -#define BINTDIFCLKENABLE 0x400 -#define BEXTSIGCLKENABLE 0x800 -#define BBANDGAP_MBIAS_POWERUP 0x10000 -#define BAD11SH_GAIN 0xc0000 -#define BAD11NPUT_RANGE 0x700000 -#define BAD110P_CURRENT 0x3800000 -#define BLPATH_LOOPBACK 0x4000000 -#define BQPATH_LOOPBACK 0x8000000 -#define BAFE_LOOPBACK 0x10000000 -#define BDA10_SWING 0x7e0 -#define BDA10_REVERSE 0x800 -#define BDA_CLK_SOURCE 0x1000 -#define BDA7INPUT_RANGE 0x6000 -#define BDA7_GAIN 0x38000 -#define BDA7OUTPUT_CM_MODE 0x40000 -#define BDA7INPUT_CM_MODE 0x380000 -#define BDA7CURRENT 0xc00000 -#define BREGULATOR_ADJUST 0x7000000 -#define BAD11POWERUP_ATTX 0x1 -#define BDA10PS_ATTX 0x10 -#define BAD11POWERUP_ATRX 0x100 -#define BDA10PS_ATRX 0x1000 -#define BCCKRX_AGC_FORMAT 0x200 -#define BPSDFFT_SAMPLE_POINT 0xc000 -#define BPSD_AVERAGE_NUM 0x3000 -#define BIQPATH_CONTROL 0xc00 -#define BPSD_FREQ 0x3ff -#define BPSD_ANTENNA_PATH 0x30 -#define BPSD_IQ_SWITCH 0x40 -#define BPSD_RX_TRIGGER 0x400000 -#define BPSD_TX_TRIGGERCW 0x80000000 -#define BPSD_SINE_TONE_SCALE 0x7f000000 -#define BPSD_REPORT 0xffff - -#define BOFDM_TXSC 0x30000000 -#define BCCK_TXON 0x1 -#define BOFDM_TXON 0x2 -#define BDEBUG_PAGE 0xfff -#define BDEBUG_ITEM 0xff -#define BANTL 0x10 -#define BANT_NONHT 0x100 -#define BANT_HT1 0x1000 -#define BANT_HT2 0x10000 -#define BANT_HT1S1 0x100000 -#define BANT_NONHTS1 0x1000000 - -#define BCCK_BBMODE 0x3 -#define BCCK_TXPOWERSAVING 0x80 -#define BCCK_RXPOWERSAVING 0x40 - -#define BCCK_SIDEBAND 0x10 - -#define BCCK_SCRAMBLE 0x8 -#define BCCK_ANTDIVERSITY 0x8000 -#define BCCK_CARRIER_RECOVERY 0x4000 -#define BCCK_TXRATE 0x3000 -#define BCCK_DCCANCEL 0x0800 -#define BCCK_ISICANCEL 0x0400 -#define BCCK_MATCH_FILTER 0x0200 -#define BCCK_EQUALIZER 0x0100 -#define BCCK_PREAMBLE_DETECT 0x800000 -#define BCCK_FAST_FALSECCA 0x400000 -#define BCCK_CH_ESTSTART 0x300000 -#define BCCK_CCA_COUNT 0x080000 -#define BCCK_CS_LIM 0x070000 -#define BCCK_BIST_MODE 0x80000000 -#define BCCK_CCAMASK 0x40000000 -#define BCCK_TX_DAC_PHASE 0x4 -#define BCCK_RX_ADC_PHASE 0x20000000 -#define BCCKR_CP_MODE 0x0100 -#define BCCK_TXDC_OFFSET 0xf0 -#define BCCK_RXDC_OFFSET 0xf -#define BCCK_CCA_MODE 0xc000 -#define BCCK_FALSECS_LIM 0x3f00 -#define BCCK_CS_RATIO 0xc00000 -#define BCCK_CORGBIT_SEL 0x300000 -#define BCCK_PD_LIM 0x0f0000 -#define BCCK_NEWCCA 0x80000000 -#define BCCK_RXHP_OF_IG 0x8000 -#define BCCK_RXIG 0x7f00 -#define BCCK_LNA_POLARITY 0x800000 -#define BCCK_RX1ST_BAIN 0x7f0000 -#define BCCK_RF_EXTEND 0x20000000 -#define BCCK_RXAGC_SATLEVEL 0x1f000000 -#define BCCK_RXAGC_SATCOUNT 0xe0 -#define BCCKRXRFSETTLE 0x1f -#define BCCK_FIXED_RXAGC 0x8000 -#define BCCK_ANTENNA_POLARITY 0x2000 -#define BCCK_TXFILTER_TYPE 0x0c00 -#define BCCK_RXAGC_REPORTTYPE 0x0300 -#define BCCK_RXDAGC_EN 0x80000000 -#define BCCK_RXDAGC_PERIOD 0x20000000 -#define BCCK_RXDAGC_SATLEVEL 0x1f000000 -#define BCCK_TIMING_RECOVERY 0x800000 -#define BCCK_TXC0 0x3f0000 -#define BCCK_TXC1 0x3f000000 -#define BCCK_TXC2 0x3f -#define BCCK_TXC3 0x3f00 -#define BCCK_TXC4 0x3f0000 -#define BCCK_TXC5 0x3f000000 -#define BCCK_TXC6 0x3f -#define BCCK_TXC7 0x3f00 -#define BCCK_DEBUGPORT 0xff0000 -#define BCCK_DAC_DEBUG 0x0f000000 -#define BCCK_FALSEALARM_ENABLE 0x8000 -#define BCCK_FALSEALARM_READ 0x4000 -#define BCCK_TRSSI 0x7f -#define BCCK_RXAGC_REPORT 0xfe -#define BCCK_RXREPORT_ANTSEL 0x80000000 -#define BCCK_RXREPORT_MFOFF 0x40000000 -#define BCCK_RXREPORT_SQLOSS 0x20000000 -#define BCCK_RXREPORT_PKTLOSS 0x10000000 -#define BCCK_RXREPORT_LOCKEDBIT 0x08000000 -#define BCCK_RXREPORT_RATEERROR 0x04000000 -#define BCCK_RXREPORT_RXRATE 0x03000000 -#define BCCK_RXFA_COUNTER_LOWER 0xff -#define BCCK_RXFA_COUNTER_UPPER 0xff000000 -#define BCCK_RXHPAGC_START 0xe000 -#define BCCK_RXHPAGC_FINAL 0x1c00 -#define BCCK_RXFALSEALARM_ENABLE 0x8000 -#define BCCK_FACOUNTER_FREEZE 0x4000 -#define BCCK_TXPATH_SEL 0x10000000 -#define BCCK_DEFAULT_RXPATH 0xc000000 -#define BCCK_OPTION_RXPATH 0x3000000 - -#define BNUM_OFSTF 0x3 -#define BSHIFT_L 0xc0 -#define BGI_TH 0xc -#define BRXPATH_A 0x1 -#define BRXPATH_B 0x2 -#define BRXPATH_C 0x4 -#define BRXPATH_D 0x8 -#define BTXPATH_A 0x1 -#define BTXPATH_B 0x2 -#define BTXPATH_C 0x4 -#define BTXPATH_D 0x8 -#define BTRSSI_FREQ 0x200 -#define BADC_BACKOFF 0x3000 -#define BDFIR_BACKOFF 0xc000 -#define BTRSSI_LATCH_PHASE 0x10000 -#define BRX_LDC_OFFSET 0xff -#define BRX_QDC_OFFSET 0xff00 -#define BRX_DFIR_MODE 0x1800000 -#define BRX_DCNF_TYPE 0xe000000 -#define BRXIQIMB_A 0x3ff -#define BRXIQIMB_B 0xfc00 -#define BRXIQIMB_C 0x3f0000 -#define BRXIQIMB_D 0xffc00000 -#define BDC_DC_NOTCH 0x60000 -#define BRXNB_NOTCH 0x1f000000 -#define BPD_TH 0xf -#define BPD_TH_OPT2 0xc000 -#define BPWED_TH 0x700 -#define BIFMF_WIN_L 0x800 -#define BPD_OPTION 0x1000 -#define BMF_WIN_L 0xe000 -#define BBW_SEARCH_L 0x30000 -#define BWIN_ENH_L 0xc0000 -#define BBW_TH 0x700000 -#define BED_TH2 0x3800000 -#define BBW_OPTION 0x4000000 -#define BRADIO_TH 0x18000000 -#define BWINDOW_L 0xe0000000 -#define BSBD_OPTION 0x1 -#define BFRAME_TH 0x1c -#define BFS_OPTION 0x60 -#define BDC_SLOPE_CHECK 0x80 -#define BFGUARD_COUNTER_DC_L 0xe00 -#define BFRAME_WEIGHT_SHORT 0x7000 -#define BSUB_TUNE 0xe00000 -#define BFRAME_DC_LENGTH 0xe000000 -#define BSBD_START_OFFSET 0x30000000 -#define BFRAME_TH_2 0x7 -#define BFRAME_GI2_TH 0x38 -#define BGI2_SYNC_EN 0x40 -#define BSARCH_SHORT_EARLY 0x300 -#define BSARCH_SHORT_LATE 0xc00 -#define BSARCH_GI2_LATE 0x70000 -#define BCFOANTSUM 0x1 -#define BCFOACC 0x2 -#define BCFOSTARTOFFSET 0xc -#define BCFOLOOPBACK 0x70 -#define BCFOSUMWEIGHT 0x80 -#define BDAGCENABLE 0x10000 -#define BTXIQIMB_A 0x3ff -#define BTXIQIMB_B 0xfc00 -#define BTXIQIMB_C 0x3f0000 -#define BTXIQIMB_D 0xffc00000 -#define BTXIDCOFFSET 0xff -#define BTXIQDCOFFSET 0xff00 -#define BTXDFIRMODE 0x10000 -#define BTXPESUDO_NOISEON 0x4000000 -#define BTXPESUDO_NOISE_A 0xff -#define BTXPESUDO_NOISE_B 0xff00 -#define BTXPESUDO_NOISE_C 0xff0000 -#define BTXPESUDO_NOISE_D 0xff000000 -#define BCCA_DROPOPTION 0x20000 -#define BCCA_DROPTHRES 0xfff00000 -#define BEDCCA_H 0xf -#define BEDCCA_L 0xf0 -#define BLAMBDA_ED 0x300 -#define BRX_INITIALGAIN 0x7f -#define BRX_ANTDIV_EN 0x80 -#define BRX_AGC_ADDRESS_FOR_LNA 0x7f00 -#define BRX_HIGHPOWER_FLOW 0x8000 -#define BRX_AGC_FREEZE_THRES 0xc0000 -#define BRX_FREEZESTEP_AGC1 0x300000 -#define BRX_FREEZESTEP_AGC2 0xc00000 -#define BRX_FREEZESTEP_AGC3 0x3000000 -#define BRX_FREEZESTEP_AGC0 0xc000000 -#define BRXRSSI_CMP_EN 0x10000000 -#define BRXQUICK_AGCEN 0x20000000 -#define BRXAGC_FREEZE_THRES_MODE 0x40000000 -#define BRX_OVERFLOW_CHECKTYPE 0x80000000 -#define BRX_AGCSHIFT 0x7f -#define BTRSW_TRI_ONLY 0x80 -#define BPOWER_THRES 0x300 -#define BRXAGC_EN 0x1 -#define BRXAGC_TOGETHER_EN 0x2 -#define BRXAGC_MIN 0x4 -#define BRXHP_INI 0x7 -#define BRXHP_TRLNA 0x70 -#define BRXHP_RSSI 0x700 -#define BRXHP_BBP1 0x7000 -#define BRXHP_BBP2 0x70000 -#define BRXHP_BBP3 0x700000 -#define BRSSI_H 0x7f0000 -#define BRSSI_GEN 0x7f000000 -#define BRXSETTLE_TRSW 0x7 -#define BRXSETTLE_LNA 0x38 -#define BRXSETTLE_RSSI 0x1c0 -#define BRXSETTLE_BBP 0xe00 -#define BRXSETTLE_RXHP 0x7000 -#define BRXSETTLE_ANTSW_RSSI 0x38000 -#define BRXSETTLE_ANTSW 0xc0000 -#define BRXPROCESS_TIME_DAGC 0x300000 -#define BRXSETTLE_HSSI 0x400000 -#define BRXPROCESS_TIME_BBPPW 0x800000 -#define BRXANTENNA_POWER_SHIFT 0x3000000 -#define BRSSI_TABLE_SELECT 0xc000000 -#define BRXHP_FINAL 0x7000000 -#define BRXHPSETTLE_BBP 0x7 -#define BRXHTSETTLE_HSSI 0x8 -#define BRXHTSETTLE_RXHP 0x70 -#define BRXHTSETTLE_BBPPW 0x80 -#define BRXHTSETTLE_IDLE 0x300 -#define BRXHTSETTLE_RESERVED 0x1c00 -#define BRXHT_RXHP_EN 0x8000 -#define BRXAGC_FREEZE_THRES 0x30000 -#define BRXAGC_TOGETHEREN 0x40000 -#define BRXHTAGC_MIN 0x80000 -#define BRXHTAGC_EN 0x100000 -#define BRXHTDAGC_EN 0x200000 -#define BRXHT_RXHP_BBP 0x1c00000 -#define BRXHT_RXHP_FINAL 0xe0000000 -#define BRXPW_RADIO_TH 0x3 -#define BRXPW_RADIO_EN 0x4 -#define BRXMF_HOLD 0x3800 -#define BRXPD_DELAY_TH1 0x38 -#define BRXPD_DELAY_TH2 0x1c0 -#define BRXPD_DC_COUNT_MAX 0x600 -#define BRXPD_DELAY_TH 0x8000 -#define BRXPROCESS_DELAY 0xf0000 -#define BRXSEARCHRANGE_GI2_EARLY 0x700000 -#define BRXFRAME_FUARD_COUNTER_L 0x3800000 -#define BRXSGI_GUARD_L 0xc000000 -#define BRXSGI_SEARCH_L 0x30000000 -#define BRXSGI_TH 0xc0000000 -#define BDFSCNT0 0xff -#define BDFSCNT1 0xff00 -#define BDFSFLAG 0xf0000 -#define BMF_WEIGHT_SUM 0x300000 -#define BMINIDX_TH 0x7f000000 -#define BDAFORMAT 0x40000 -#define BTXCH_EMU_ENABLE 0x01000000 -#define BTRSW_ISOLATION_A 0x7f -#define BTRSW_ISOLATION_B 0x7f00 -#define BTRSW_ISOLATION_C 0x7f0000 -#define BTRSW_ISOLATION_D 0x7f000000 -#define BEXT_LNA_GAIN 0x7c00 - -#define BSTBC_EN 0x4 -#define BANTENNA_MAPPING 0x10 -#define BNSS 0x20 -#define BCFO_ANTSUM_ID 0x200 -#define BPHY_COUNTER_RESET 0x8000000 -#define BCFO_REPORT_GET 0x4000000 -#define BOFDM_CONTINUE_TX 0x10000000 -#define BOFDM_SINGLE_CARRIER 0x20000000 -#define BOFDM_SINGLE_TONE 0x40000000 -#define BHT_DETECT 0x100 -#define BCFOEN 0x10000 -#define BCFOVALUE 0xfff00000 -#define BSIGTONE_RE 0x3f -#define BSIGTONE_IM 0x7f00 -#define BCOUNTER_CCA 0xffff -#define BCOUNTER_PARITYFAIL 0xffff0000 -#define BCOUNTER_RATEILLEGAL 0xffff -#define BCOUNTER_CRC8FAIL 0xffff0000 -#define BCOUNTER_MCSNOSUPPORT 0xffff -#define BCOUNTER_FASTSYNC 0xffff -#define BSHORTCFO 0xfff -#define BSHORTCFOT_LENGTH 12 -#define BSHORTCFOF_LENGTH 11 -#define BLONGCFO 0x7ff -#define BLONGCFOT_LENGTH 11 -#define BLONGCFOF_LENGTH 11 -#define BTAILCFO 0x1fff -#define BTAILCFOT_LENGTH 13 -#define BTAILCFOF_LENGTH 12 -#define BNOISE_EN_PWDB 0xffff -#define BCC_POWER_DB 0xffff0000 -#define BMOISE_PWDB 0xffff -#define BPOWERMEAST_LENGTH 10 -#define BPOWERMEASF_LENGTH 3 -#define BRX_HT_BW 0x1 -#define BRXSC 0x6 -#define BRX_HT 0x8 -#define BNB_INTF_DET_ON 0x1 -#define BINTF_WIN_LEN_CFG 0x30 -#define BNB_INTF_TH_CFG 0x1c0 -#define BRFGAIN 0x3f -#define BTABLESEL 0x40 -#define BTRSW 0x80 -#define BRXSNR_A 0xff -#define BRXSNR_B 0xff00 -#define BRXSNR_C 0xff0000 -#define BRXSNR_D 0xff000000 -#define BSNR_EVMT_LENGTH 8 -#define BSNR_EVMF_LENGTH 1 -#define BCSI1ST 0xff -#define BCSI2ND 0xff00 -#define BRXEVM1ST 0xff0000 -#define BRXEVM2ND 0xff000000 -#define BSIGEVM 0xff -#define BPWDB 0xff00 -#define BSGIEN 0x10000 - -#define BSFACTOR_QMA1 0xf -#define BSFACTOR_QMA2 0xf0 -#define BSFACTOR_QMA3 0xf00 -#define BSFACTOR_QMA4 0xf000 -#define BSFACTOR_QMA5 0xf0000 -#define BSFACTOR_QMA6 0xf0000 -#define BSFACTOR_QMA7 0xf00000 -#define BSFACTOR_QMA8 0xf000000 -#define BSFACTOR_QMA9 0xf0000000 -#define BCSI_SCHEME 0x100000 - -#define BNOISE_LVL_TOP_SET 0x3 -#define BCHSMOOTH 0x4 -#define BCHSMOOTH_CFG1 0x38 -#define BCHSMOOTH_CFG2 0x1c0 -#define BCHSMOOTH_CFG3 0xe00 -#define BCHSMOOTH_CFG4 0x7000 -#define BMRCMODE 0x800000 -#define BTHEVMCFG 0x7000000 - -#define BLOOP_FIT_TYPE 0x1 -#define BUPD_CFO 0x40 -#define BUPD_CFO_OFFDATA 0x80 -#define BADV_UPD_CFO 0x100 -#define BADV_TIME_CTRL 0x800 -#define BUPD_CLKO 0x1000 -#define BFC 0x6000 -#define BTRACKING_MODE 0x8000 -#define BPHCMP_ENABLE 0x10000 -#define BUPD_CLKO_LTF 0x20000 -#define BCOM_CH_CFO 0x40000 -#define BCSI_ESTI_MODE 0x80000 -#define BADV_UPD_EQZ 0x100000 -#define BUCHCFG 0x7000000 -#define BUPDEQZ 0x8000000 - -#define BRX_PESUDO_NOISE_ON 0x20000000 -#define BRX_PESUDO_NOISE_A 0xff -#define BRX_PESUDO_NOISE_B 0xff00 -#define BRX_PESUDO_NOISE_C 0xff0000 -#define BRX_PESUDO_NOISE_D 0xff000000 -#define BRX_PESUDO_NOISESTATE_A 0xffff -#define BRX_PESUDO_NOISESTATE_B 0xffff0000 -#define BRX_PESUDO_NOISESTATE_C 0xffff -#define BRX_PESUDO_NOISESTATE_D 0xffff0000 - -#define BZEBRA1_HSSIENABLE 0x8 -#define BZEBRA1_TRXCONTROL 0xc00 -#define BZEBRA1_TRXGAINSETTING 0x07f -#define BZEBRA1_RXCOUNTER 0xc00 -#define BZEBRA1_TXCHANGEPUMP 0x38 -#define BZEBRA1_RXCHANGEPUMP 0x7 -#define BZEBRA1_CHANNEL_NUM 0xf80 -#define BZEBRA1_TXLPFBW 0x400 -#define BZEBRA1_RXLPFBW 0x600 - -#define BRTL8256REG_MODE_CTRL1 0x100 -#define BRTL8256REG_MODE_CTRL0 0x40 -#define BRTL8256REG_TXLPFBW 0x18 -#define BRTL8256REG_RXLPFBW 0x600 - -#define BRTL8258_TXLPFBW 0xc -#define BRTL8258_RXLPFBW 0xc00 -#define BRTL8258_RSSILPFBW 0xc0 - -#define BBYTE0 0x1 -#define BBYTE1 0x2 -#define BBYTE2 0x4 -#define BBYTE3 0x8 -#define BWORD0 0x3 -#define BWORD1 0xc -#define BWORD 0xf - -#define BENABLE 0x1 -#define BDISABLE 0x0 - -#define LEFT_ANTENNA 0x0 -#define RIGHT_ANTENNA 0x1 - -#define TCHECK_TXSTATUS 500 -#define TUPDATE_RXCOUNTER 100 - -#define REG_UN_USED_REGISTER 0x01bf +#define BRFMOD 0x1 +#define BJAPANMODE 0x2 +#define BCCKTXSC 0x30 +#define BCCKEN 0x1000000 +#define BOFDMEN 0x2000000 + +#define BOFDMRXADCPHASE 0x10000 +#define BOFDMTXDACPHASE 0x40000 +#define BXATXAGC 0x3f + +#define BXBTXAGC 0xf00 +#define BXCTXAGC 0xf000 +#define BXDTXAGC 0xf0000 + +#define BPASTART 0xf0000000 +#define BTRSTART 0x00f00000 +#define BRFSTART 0x0000f000 +#define BBBSTART 0x000000f0 +#define BBBCCKSTART 0x0000000f +#define BPAEND 0xf +#define BTREND 0x0f000000 +#define BRFEND 0x000f0000 +#define BCCAMASK 0x000000f0 +#define BR2RCCAMASK 0x00000f00 +#define BHSSI_R2TDELAY 0xf8000000 +#define BHSSI_T2RDELAY 0xf80000 +#define BCONTXHSSI 0x400 +#define BIGFROMCCK 0x200 +#define BAGCADDRESS 0x3f +#define BRXHPTX 0x7000 +#define BRXHP2RX 0x38000 +#define BRXHPCCKINI 0xc0000 +#define BAGCTXCODE 0xc00000 +#define BAGCRXCODE 0x300000 + +#define B3WIREDATALENGTH 0x800 +#define B3WIREADDREAALENGTH 0x400 + +#define B3WIRERFPOWERDOWN 0x1 +#define B5GPAPEPOLARITY 0x40000000 +#define B2GPAPEPOLARITY 0x80000000 +#define BRFSW_TXDEFAULTANT 0x3 +#define BRFSW_TXOPTIONANT 0x30 +#define BRFSW_RXDEFAULTANT 0x300 +#define BRFSW_RXOPTIONANT 0x3000 +#define BRFSI_3WIREDATA 0x1 +#define BRFSI_3WIRECLOCK 0x2 +#define BRFSI_3WIRELOAD 0x4 +#define BRFSI_3WIRERW 0x8 +#define BRFSI_3WIRE 0xf + +#define BRFSI_RFENV 0x10 + +#define BRFSI_TRSW 0x20 +#define BRFSI_TRSWB 0x40 +#define BRFSI_ANTSW 0x100 +#define BRFSI_ANTSWB 0x200 +#define BRFSI_PAPE 0x400 +#define BRFSI_PAPE5G 0x800 +#define BBANDSELECT 0x1 +#define BHTSIG2_GI 0x80 +#define BHTSIG2_SMOOTHING 0x01 +#define BHTSIG2_SOUNDING 0x02 +#define BHTSIG2_AGGREATON 0x08 +#define BHTSIG2_STBC 0x30 +#define BHTSIG2_ADVCODING 0x40 +#define BHTSIG2_NUMOFHTLTF 0x300 +#define BHTSIG2_CRC8 0x3fc +#define BHTSIG1_MCS 0x7f +#define BHTSIG1_BANDWIDTH 0x80 +#define BHTSIG1_HTLENGTH 0xffff +#define BLSIG_RATE 0xf +#define BLSIG_RESERVED 0x10 +#define BLSIG_LENGTH 0x1fffe +#define BLSIG_PARITY 0x20 +#define BCCKRXPHASE 0x4 + +#define BLSSIREADADDRESS 0x7f800000 +#define BLSSIREADEDGE 0x80000000 + +#define BLSSIREADBACKDATA 0xfffff + +#define BLSSIREADOKFLAG 0x1000 +#define BCCKSAMPLERATE 0x8 +#define BREGULATOR0STANDBY 0x1 +#define BREGULATORPLLSTANDBY 0x2 +#define BREGULATOR1STANDBY 0x4 +#define BPLLPOWERUP 0x8 +#define BDPLLPOWERUP 0x10 +#define BDA10POWERUP 0x20 +#define BAD7POWERUP 0x200 +#define BDA6POWERUP 0x2000 +#define BXTALPOWERUP 0x4000 +#define B40MDCLKPOWERUP 0x8000 +#define BDA6DEBUGMODE 0x20000 +#define BDA6SWING 0x380000 + +#define BADCLKPHASE 0x4000000 +#define B80MCLKDELAY 0x18000000 +#define BAFEWATCHDOGENABLE 0x20000000 + +#define BXTALCAP01 0xc0000000 +#define BXTALCAP23 0x3 +#define BXTALCAP92X 0x0f000000 +#define BXTALCAP 0x0f000000 + +#define BINTDIFCLKENABLE 0x400 +#define BEXTSIGCLKENABLE 0x800 +#define BBANDGAP_MBIAS_POWERUP 0x10000 +#define BAD11SH_GAIN 0xc0000 +#define BAD11NPUT_RANGE 0x700000 +#define BAD110P_CURRENT 0x3800000 +#define BLPATH_LOOPBACK 0x4000000 +#define BQPATH_LOOPBACK 0x8000000 +#define BAFE_LOOPBACK 0x10000000 +#define BDA10_SWING 0x7e0 +#define BDA10_REVERSE 0x800 +#define BDA_CLK_SOURCE 0x1000 +#define BDA7INPUT_RANGE 0x6000 +#define BDA7_GAIN 0x38000 +#define BDA7OUTPUT_CM_MODE 0x40000 +#define BDA7INPUT_CM_MODE 0x380000 +#define BDA7CURRENT 0xc00000 +#define BREGULATOR_ADJUST 0x7000000 +#define BAD11POWERUP_ATTX 0x1 +#define BDA10PS_ATTX 0x10 +#define BAD11POWERUP_ATRX 0x100 +#define BDA10PS_ATRX 0x1000 +#define BCCKRX_AGC_FORMAT 0x200 +#define BPSDFFT_SAMPLE_POINT 0xc000 +#define BPSD_AVERAGE_NUM 0x3000 +#define BIQPATH_CONTROL 0xc00 +#define BPSD_FREQ 0x3ff +#define BPSD_ANTENNA_PATH 0x30 +#define BPSD_IQ_SWITCH 0x40 +#define BPSD_RX_TRIGGER 0x400000 +#define BPSD_TX_TRIGGER 0x80000000 +#define BPSD_SINE_TONE_SCALE 0x7f000000 +#define BPSD_REPORT 0xffff + +#define BOFDM_TXSC 0x30000000 +#define BCCK_TXON 0x1 +#define BOFDM_TXON 0x2 +#define BDEBUG_PAGE 0xfff +#define BDEBUG_ITEM 0xff +#define BANTL 0x10 +#define BANT_NONHT 0x100 +#define BANT_HT1 0x1000 +#define BANT_HT2 0x10000 +#define BANT_HT1S1 0x100000 +#define BANT_NONHTS1 0x1000000 + +#define BCCK_BBMODE 0x3 +#define BCCK_TXPOWERSAVING 0x80 +#define BCCK_RXPOWERSAVING 0x40 + +#define BCCK_SIDEBAND 0x10 + +#define BCCK_SCRAMBLE 0x8 +#define BCCK_ANTDIVERSITY 0x8000 +#define BCCK_CARRIER_RECOVERY 0x4000 +#define BCCK_TXRATE 0x3000 +#define BCCK_DCCANCEL 0x0800 +#define BCCK_ISICANCEL 0x0400 +#define BCCK_MATCH_FILTER 0x0200 +#define BCCK_EQUALIZER 0x0100 +#define BCCK_PREAMBLE_DETECT 0x800000 +#define BCCK_FAST_FALSECCA 0x400000 +#define BCCK_CH_ESTSTART 0x300000 +#define BCCK_CCA_COUNT 0x080000 +#define BCCK_CS_LIM 0x070000 +#define BCCK_BIST_MODE 0x80000000 +#define BCCK_CCAMASK 0x40000000 +#define BCCK_TX_DAC_PHASE 0x4 +#define BCCK_RX_ADC_PHASE 0x20000000 +#define BCCKR_CP_MODE 0x0100 +#define BCCK_TXDC_OFFSET 0xf0 +#define BCCK_RXDC_OFFSET 0xf +#define BCCK_CCA_MODE 0xc000 +#define BCCK_FALSECS_LIM 0x3f00 +#define BCCK_CS_RATIO 0xc00000 +#define BCCK_CORGBIT_SEL 0x300000 +#define BCCK_PD_LIM 0x0f0000 +#define BCCK_NEWCCA 0x80000000 +#define BCCK_RXHP_OF_IG 0x8000 +#define BCCK_RXIG 0x7f00 +#define BCCK_LNA_POLARITY 0x800000 +#define BCCK_RX1ST_BAIN 0x7f0000 +#define BCCK_RF_EXTEND 0x20000000 +#define BCCK_RXAGC_SATLEVEL 0x1f000000 +#define BCCK_RXAGC_SATCOUNT 0xe0 +#define BCCKRXRFSETTLE 0x1f +#define BCCK_FIXED_RXAGC 0x8000 +#define BCCK_ANTENNA_POLARITY 0x2000 +#define BCCK_TXFILTER_TYPE 0x0c00 +#define BCCK_RXAGC_REPORTTYPE 0x0300 +#define BCCK_RXDAGC_EN 0x80000000 +#define BCCK_RXDAGC_PERIOD 0x20000000 +#define BCCK_RXDAGC_SATLEVEL 0x1f000000 +#define BCCK_TIMING_RECOVERY 0x800000 +#define BCCK_TXC0 0x3f0000 +#define BCCK_TXC1 0x3f000000 +#define BCCK_TXC2 0x3f +#define BCCK_TXC3 0x3f00 +#define BCCK_TXC4 0x3f0000 +#define BCCK_TXC5 0x3f000000 +#define BCCK_TXC6 0x3f +#define BCCK_TXC7 0x3f00 +#define BCCK_DEBUGPORT 0xff0000 +#define BCCK_DAC_DEBUG 0x0f000000 +#define BCCK_FALSEALARM_ENABLE 0x8000 +#define BCCK_FALSEALARM_READ 0x4000 +#define BCCK_TRSSI 0x7f +#define BCCK_RXAGC_REPORT 0xfe +#define BCCK_RXREPORT_ANTSEL 0x80000000 +#define BCCK_RXREPORT_MFOFF 0x40000000 +#define BCCK_RXREPORT_SQLOSS 0x20000000 +#define BCCK_RXREPORT_PKTLOSS 0x10000000 +#define BCCK_RXREPORT_LOCKEDBIT 0x08000000 +#define BCCK_RXREPORT_RATEERROR 0x04000000 +#define BCCK_RXREPORT_RXRATE 0x03000000 +#define BCCK_RXFA_COUNTER_LOWER 0xff +#define BCCK_RXFA_COUNTER_UPPER 0xff000000 +#define BCCK_RXHPAGC_START 0xe000 +#define BCCK_RXHPAGC_FINAL 0x1c00 +#define BCCK_RXFALSEALARM_ENABLE 0x8000 +#define BCCK_FACOUNTER_FREEZE 0x4000 +#define BCCK_TXPATH_SEL 0x10000000 +#define BCCK_DEFAULT_RXPATH 0xc000000 +#define BCCK_OPTION_RXPATH 0x3000000 + +#define BNUM_OFSTF 0x3 +#define BSHIFT_L 0xc0 +#define BGI_TH 0xc +#define BRXPATH_A 0x1 +#define BRXPATH_B 0x2 +#define BRXPATH_C 0x4 +#define BRXPATH_D 0x8 +#define BTXPATH_A 0x1 +#define BTXPATH_B 0x2 +#define BTXPATH_C 0x4 +#define BTXPATH_D 0x8 +#define BTRSSI_FREQ 0x200 +#define BADC_BACKOFF 0x3000 +#define BDFIR_BACKOFF 0xc000 +#define BTRSSI_LATCH_PHASE 0x10000 +#define BRX_LDC_OFFSET 0xff +#define BRX_QDC_OFFSET 0xff00 +#define BRX_DFIR_MODE 0x1800000 +#define BRX_DCNF_TYPE 0xe000000 +#define BRXIQIMB_A 0x3ff +#define BRXIQIMB_B 0xfc00 +#define BRXIQIMB_C 0x3f0000 +#define BRXIQIMB_D 0xffc00000 +#define BDC_DC_NOTCH 0x60000 +#define BRXNB_NOTCH 0x1f000000 +#define BPD_TH 0xf +#define BPD_TH_OPT2 0xc000 +#define BPWED_TH 0x700 +#define BIFMF_WIN_L 0x800 +#define BPD_OPTION 0x1000 +#define BMF_WIN_L 0xe000 +#define BBW_SEARCH_L 0x30000 +#define BWIN_ENH_L 0xc0000 +#define BBW_TH 0x700000 +#define BED_TH2 0x3800000 +#define BBW_OPTION 0x4000000 +#define BRADIO_TH 0x18000000 +#define BWINDOW_L 0xe0000000 +#define BSBD_OPTION 0x1 +#define BFRAME_TH 0x1c +#define BFS_OPTION 0x60 +#define BDC_SLOPE_CHECK 0x80 +#define BFGUARD_COUNTER_DC_L 0xe00 +#define BFRAME_WEIGHT_SHORT 0x7000 +#define BSUB_TUNE 0xe00000 +#define BFRAME_DC_LENGTH 0xe000000 +#define BSBD_START_OFFSET 0x30000000 +#define BFRAME_TH_2 0x7 +#define BFRAME_GI2_TH 0x38 +#define BGI2_SYNC_EN 0x40 +#define BSARCH_SHORT_EARLY 0x300 +#define BSARCH_SHORT_LATE 0xc00 +#define BSARCH_GI2_LATE 0x70000 +#define BCFOANTSUM 0x1 +#define BCFOACC 0x2 +#define BCFOSTARTOFFSET 0xc +#define BCFOLOOPBACK 0x70 +#define BCFOSUMWEIGHT 0x80 +#define BDAGCENABLE 0x10000 +#define BTXIQIMB_A 0x3ff +#define BTXIQIMB_b 0xfc00 +#define BTXIQIMB_C 0x3f0000 +#define BTXIQIMB_D 0xffc00000 +#define BTXIDCOFFSET 0xff +#define BTXIQDCOFFSET 0xff00 +#define BTXDFIRMODE 0x10000 +#define BTXPESUDO_NOISEON 0x4000000 +#define BTXPESUDO_NOISE_A 0xff +#define BTXPESUDO_NOISE_B 0xff00 +#define BTXPESUDO_NOISE_C 0xff0000 +#define BTXPESUDO_NOISE_D 0xff000000 +#define BCCA_DROPOPTION 0x20000 +#define BCCA_DROPTHRES 0xfff00000 +#define BEDCCA_H 0xf +#define BEDCCA_L 0xf0 +#define BLAMBDA_ED 0x300 +#define BRX_INITIALGAIN 0x7f +#define BRX_ANTDIV_EN 0x80 +#define BRX_AGC_ADDRESS_FOR_LNA 0x7f00 +#define BRX_HIGHPOWER_FLOW 0x8000 +#define BRX_AGC_FREEZE_THRES 0xc0000 +#define BRX_FREEZESTEP_AGC1 0x300000 +#define BRX_FREEZESTEP_AGC2 0xc00000 +#define BRX_FREEZESTEP_AGC3 0x3000000 +#define BRX_FREEZESTEP_AGC0 0xc000000 +#define BRXRSSI_CMP_EN 0x10000000 +#define BRXQUICK_AGCEN 0x20000000 +#define BRXAGC_FREEZE_THRES_MODE 0x40000000 +#define BRX_OVERFLOW_CHECKTYPE 0x80000000 +#define BRX_AGCSHIFT 0x7f +#define BTRSW_TRI_ONLY 0x80 +#define BPOWER_THRES 0x300 +#define BRXAGC_EN 0x1 +#define BRXAGC_TOGETHER_EN 0x2 +#define BRXAGC_MIN 0x4 +#define BRXHP_INI 0x7 +#define BRXHP_TRLNA 0x70 +#define BRXHP_RSSI 0x700 +#define BRXHP_BBP1 0x7000 +#define BRXHP_BBP2 0x70000 +#define BRXHP_BBP3 0x700000 +#define BRSSI_H 0x7f0000 +#define BRSSI_GEN 0x7f000000 +#define BRXSETTLE_TRSW 0x7 +#define BRXSETTLE_LNA 0x38 +#define BRXSETTLE_RSSI 0x1c0 +#define BRXSETTLE_BBP 0xe00 +#define BRXSETTLE_RXHP 0x7000 +#define BRXSETTLE_ANTSW_RSSI 0x38000 +#define BRXSETTLE_ANTSW 0xc0000 +#define BRXPROCESS_TIME_DAGC 0x300000 +#define BRXSETTLE_HSSI 0x400000 +#define BRXPROCESS_TIME_BBPPW 0x800000 +#define BRXANTENNA_POWER_SHIFT 0x3000000 +#define BRSSI_TABLE_SELECT 0xc000000 +#define BRXHP_FINAL 0x7000000 +#define BRXHPSETTLE_BBP 0x7 +#define BRXHTSETTLE_HSSI 0x8 +#define BRXHTSETTLE_RXHP 0x70 +#define BRXHTSETTLE_BBPPW 0x80 +#define BRXHTSETTLE_IDLE 0x300 +#define BRXHTSETTLE_RESERVED 0x1c00 +#define BRXHT_RXHP_EN 0x8000 +#define BRXAGC_FREEZE_THRES 0x30000 +#define BRXAGC_TOGETHEREN 0x40000 +#define BRXHTAGC_MIN 0x80000 +#define BRXHTAGC_EN 0x100000 +#define BRXHTDAGC_EN 0x200000 +#define BRXHT_RXHP_BBP 0x1c00000 +#define BRXHT_RXHP_FINAL 0xe0000000 +#define BRXPW_RADIO_TH 0x3 +#define BRXPW_RADIO_EN 0x4 +#define BRXMF_HOLD 0x3800 +#define BRXPD_DELAY_TH1 0x38 +#define BRXPD_DELAY_TH2 0x1c0 +#define BRXPD_DC_COUNT_MAX 0x600 +#define BRXPD_DELAY_TH 0x8000 +#define BRXPROCESS_DELAY 0xf0000 +#define BRXSEARCHRANGE_GI2_EARLY 0x700000 +#define BRXFRAME_FUARD_COUNTER_L 0x3800000 +#define BRXSGI_GUARD_L 0xc000000 +#define BRXSGI_SEARCH_L 0x30000000 +#define BRXSGI_TH 0xc0000000 +#define BDFSCNT0 0xff +#define BDFSCNT1 0xff00 +#define BDFSFLAG 0xf0000 +#define BMF_WEIGHT_SUM 0x300000 +#define BMINIDX_TH 0x7f000000 +#define BDAFORMAT 0x40000 +#define BTXCH_EMU_ENABLE 0x01000000 +#define BTRSW_ISOLATION_A 0x7f +#define BTRSW_ISOLATION_B 0x7f00 +#define BTRSW_ISOLATION_C 0x7f0000 +#define BTRSW_ISOLATION_D 0x7f000000 +#define BEXT_LNA_GAIN 0x7c00 + +#define BSTBC_EN 0x4 +#define BANTENNA_MAPPING 0x10 +#define BNSS 0x20 +#define BCFO_ANTSUM_ID 0x200 +#define BPHY_COUNTER_RESET 0x8000000 +#define BCFO_REPORT_GET 0x4000000 +#define BOFDM_CONTINUE_TX 0x10000000 +#define BOFDM_SINGLE_CARRIER 0x20000000 +#define BOFDM_SINGLE_TONE 0x40000000 +#define BHT_DETECT 0x100 +#define BCFOEN 0x10000 +#define BCFOVALUE 0xfff00000 +#define BSIGTONE_RE 0x3f +#define BSIGTONE_IM 0x7f00 +#define BCOUNTER_CCA 0xffff +#define BCOUNTER_PARITYFAIL 0xffff0000 +#define BCOUNTER_RATEILLEGAL 0xffff +#define BCOUNTER_CRC8FAIL 0xffff0000 +#define BCOUNTER_MCSNOSUPPORT 0xffff +#define BCOUNTER_FASTSYNC 0xffff +#define BSHORTCFO 0xfff +#define BSHORTCFOT_LENGTH 12 +#define BSHORTCFOF_LENGTH 11 +#define BLONGCFO 0x7ff +#define BLONGCFOT_LENGTH 11 +#define BLONGCFOF_LENGTH 11 +#define BTAILCFO 0x1fff +#define BTAILCFOT_LENGTH 13 +#define BTAILCFOF_LENGTH 12 +#define BNOISE_EN_PWDB 0xffff +#define BCC_POWER_DB 0xffff0000 +#define BMOISE_PWDB 0xffff +#define BPOWERMEAST_LENGTH 10 +#define BPOWERMEASF_LENGTH 3 +#define BRX_HT_BW 0x1 +#define BRXSC 0x6 +#define BRX_HT 0x8 +#define BNB_INTF_DET_ON 0x1 +#define BINTF_WIN_LEN_CFG 0x30 +#define BNB_INTF_TH_CFG 0x1c0 +#define BRFGAIN 0x3f +#define BTABLESEL 0x40 +#define BTRSW 0x80 +#define BRXSNR_A 0xff +#define BRXSNR_B 0xff00 +#define BRXSNR_C 0xff0000 +#define BRXSNR_D 0xff000000 +#define BSNR_EVMT_LENGTH 8 +#define BSNR_EVMF_LENGTH 1 +#define BCSI1ST 0xff +#define BCSI2ND 0xff00 +#define BRXEVM1ST 0xff0000 +#define BRXEVM2ND 0xff000000 +#define BSIGEVM 0xff +#define BPWDB 0xff00 +#define BSGIEN 0x10000 + +#define BSFACTOR_QMA1 0xf +#define BSFACTOR_QMA2 0xf0 +#define BSFACTOR_QMA3 0xf00 +#define BSFACTOR_QMA4 0xf000 +#define BSFACTOR_QMA5 0xf0000 +#define BSFACTOR_QMA6 0xf0000 +#define BSFACTOR_QMA7 0xf00000 +#define BSFACTOR_QMA8 0xf000000 +#define BSFACTOR_QMA9 0xf0000000 +#define BCSI_SCHEME 0x100000 + +#define BNOISE_LVL_TOP_SET 0x3 +#define BCHSMOOTH 0x4 +#define BCHSMOOTH_CFG1 0x38 +#define BCHSMOOTH_CFG2 0x1c0 +#define BCHSMOOTH_CFG3 0xe00 +#define BCHSMOOTH_CFG4 0x7000 +#define BMRCMODE 0x800000 +#define BTHEVMCFG 0x7000000 + +#define BLOOP_FIT_TYPE 0x1 +#define BUPD_CFO 0x40 +#define BUPD_CFO_OFFDATA 0x80 +#define BADV_UPD_CFO 0x100 +#define BADV_TIME_CTRL 0x800 +#define BUPD_CLKO 0x1000 +#define BFC 0x6000 +#define BTRACKING_MODE 0x8000 +#define BPHCMP_ENABLE 0x10000 +#define BUPD_CLKO_LTF 0x20000 +#define BCOM_CH_CFO 0x40000 +#define BCSI_ESTI_MODE 0x80000 +#define BADV_UPD_EQZ 0x100000 +#define BUCHCFG 0x7000000 +#define BUPDEQZ 0x8000000 + +#define BRX_PESUDO_NOISE_ON 0x20000000 +#define BRX_PESUDO_NOISE_A 0xff +#define BRX_PESUDO_NOISE_B 0xff00 +#define BRX_PESUDO_NOISE_C 0xff0000 +#define BRX_PESUDO_NOISE_D 0xff000000 +#define BRX_PESUDO_NOISESTATE_A 0xffff +#define BRX_PESUDO_NOISESTATE_B 0xffff0000 +#define BRX_PESUDO_NOISESTATE_C 0xffff +#define BRX_PESUDO_NOISESTATE_D 0xffff0000 + +#define BZEBRA1_HSSIENABLE 0x8 +#define BZEBRA1_TRXCONTROL 0xc00 +#define BZEBRA1_TRXGAINSETTING 0x07f +#define BZEBRA1_RXCOUNTER 0xc00 +#define BZEBRA1_TXCHANGEPUMP 0x38 +#define BZEBRA1_RXCHANGEPUMP 0x7 +#define BZEBRA1_CHANNEL_NUM 0xf80 +#define BZEBRA1_TXLPFBW 0x400 +#define BZEBRA1_RXLPFBW 0x600 + +#define BRTL8256REG_MODE_CTRL1 0x100 +#define BRTL8256REG_MODE_CTRL0 0x40 +#define BRTL8256REG_TXLPFBW 0x18 +#define BRTL8256REG_RXLPFBW 0x600 + +#define BRTL8258_TXLPFBW 0xc +#define BRTL8258_RXLPFBW 0xc00 +#define BRTL8258_RSSILPFBW 0xc0 + +#define BBYTE0 0x1 +#define BBYTE1 0x2 +#define BBYTE2 0x4 +#define BBYTE3 0x8 +#define BWORD0 0x3 +#define BWORD1 0xc +#define BWORD 0xf + +#define MASKBYTE0 0xff +#define MASKBYTE1 0xff00 +#define MASKBYTE2 0xff0000 +#define MASKBYTE3 0xff000000 +#define MASKHWORD 0xffff0000 +#define MASKLWORD 0x0000ffff +#define MASKDWORD 0xffffffff +#define MASK12BITS 0xfff +#define MASKH4BITS 0xf0000000 +#define MASKOFDM_D 0xffc00000 +#define MASKCCK 0x3f3f3f3f + +#define MASK4BITS 0x0f +#define MASK20BITS 0xfffff +#define RFREG_OFFSET_MASK 0xfffff + +#define BENABLE 0x1 +#define BDISABLE 0x0 + +#define LEFT_ANTENNA 0x0 +#define RIGHT_ANTENNA 0x1 + +#define TCHECK_TXSTATUS 500 +#define TUPDATE_RXCOUNTER 100 + +#define REG_UN_used_register 0x01bf /* WOL bit information */ #define HAL92C_WOL_PTK_UPDATE_EVENT BIT(0) #define HAL92C_WOL_GTK_UPDATE_EVENT BIT(1) #define HAL92C_WOL_DISASSOC_EVENT BIT(2) #define HAL92C_WOL_DEAUTH_EVENT BIT(3) -#define HAL92C_WOL_FW_DISCONNECT_EVENT BIT(4) +#define HAL92C_WOL_FW_DISCONNECT_EVENT BIT(4) #define WOL_REASON_PTK_UPDATE BIT(0) #define WOL_REASON_GTK_UPDATE BIT(1) -#define WOL_REASON_DISASSOC BIT(2) -#define WOL_REASON_DEAUTH BIT(3) +#define WOL_REASON_DISASSOC BIT(2) +#define WOL_REASON_DEAUTH BIT(3) #define WOL_REASON_FW_DISCONNECT BIT(4) - #endif diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/rf.c b/drivers/net/wireless/rtlwifi/rtl8188ee/rf.c index 4faafdbab9c6e007537c9f511a67f97973241883..40893cef7dfe3a9df44483db9d3e60cb42f8ef54 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/rf.c +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/rf.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -34,6 +30,8 @@ #include "rf.h" #include "dm.h" +static bool _rtl88e_phy_rf6052_config_parafile(struct ieee80211_hw *hw); + void rtl88e_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -60,7 +58,7 @@ void rtl88e_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth) } void rtl88e_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, - u8 *plevel) + u8 *ppowerlevel) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); @@ -82,32 +80,36 @@ void rtl88e_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, if (turbo_scanoff) { for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) { - tx_agc[idx1] = plevel[idx1] | - (plevel[idx1] << 8) | - (plevel[idx1] << 16) | - (plevel[idx1] << 24); + tx_agc[idx1] = ppowerlevel[idx1] | + (ppowerlevel[idx1] << 8) | + (ppowerlevel[idx1] << 16) | + (ppowerlevel[idx1] << 24); } } } else { for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) { - tx_agc[idx1] = plevel[idx1] | (plevel[idx1] << 8) | - (plevel[idx1] << 16) | - (plevel[idx1] << 24); + tx_agc[idx1] = ppowerlevel[idx1] | + (ppowerlevel[idx1] << 8) | + (ppowerlevel[idx1] << 16) | + (ppowerlevel[idx1] << 24); } if (rtlefuse->eeprom_regulatory == 0) { - tmpval = (rtlphy->mcs_offset[0][6]) + - (rtlphy->mcs_offset[0][7] << 8); + tmpval = + (rtlphy->mcs_txpwrlevel_origoffset[0][6]) + + (rtlphy->mcs_txpwrlevel_origoffset[0][7] << + 8); tx_agc[RF90_PATH_A] += tmpval; - tmpval = (rtlphy->mcs_offset[0][14]) + - (rtlphy->mcs_offset[0][15] << 24); + tmpval = (rtlphy->mcs_txpwrlevel_origoffset[0][14]) + + (rtlphy->mcs_txpwrlevel_origoffset[0][15] << + 24); tx_agc[RF90_PATH_B] += tmpval; } } for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) { - ptr = (u8 *)(&(tx_agc[idx1])); + ptr = (u8 *)(&tx_agc[idx1]); for (idx2 = 0; idx2 < 4; idx2++) { if (*ptr > RF6052_MAX_TX_PWR) *ptr = RF6052_MAX_TX_PWR; @@ -127,10 +129,12 @@ void rtl88e_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n", tmpval, - RTXAGC_A_CCK1_MCS32); + RTXAGC_A_CCK1_MCS32); tmpval = tx_agc[RF90_PATH_A] >> 8; + /*tmpval = tmpval & 0xff00ffff;*/ + rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, @@ -153,148 +157,180 @@ void rtl88e_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, } static void rtl88e_phy_get_power_base(struct ieee80211_hw *hw, - u8 *pwrlvlofdm, u8 *pwrlvlbw20, - u8 *pwrlvlbw40, u8 channel, + u8 *ppowerlevel_ofdm, + u8 *ppowerlevel_bw20, + u8 *ppowerlevel_bw40, u8 channel, u32 *ofdmbase, u32 *mcsbase) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); - u32 base0, base1; + u32 powerbase0, powerbase1; u8 i, powerlevel[2]; for (i = 0; i < 2; i++) { - base0 = pwrlvlofdm[i]; + powerbase0 = ppowerlevel_ofdm[i]; - base0 = (base0 << 24) | (base0 << 16) | - (base0 << 8) | base0; - *(ofdmbase + i) = base0; + powerbase0 = (powerbase0 << 24) | (powerbase0 << 16) | + (powerbase0 << 8) | powerbase0; + *(ofdmbase + i) = powerbase0; RTPRINT(rtlpriv, FPHY, PHY_TXPWR, - "[OFDM power base index rf(%c) = 0x%x]\n", - ((i == 0) ? 'A' : 'B'), *(ofdmbase + i)); + " [OFDM power base index rf(%c) = 0x%x]\n", + ((i == 0) ? 'A' : 'B'), *(ofdmbase + i)); } for (i = 0; i < 2; i++) { if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) - powerlevel[i] = pwrlvlbw20[i]; + powerlevel[i] = ppowerlevel_bw20[i]; else - powerlevel[i] = pwrlvlbw40[i]; - base1 = powerlevel[i]; - base1 = (base1 << 24) | - (base1 << 16) | (base1 << 8) | base1; + powerlevel[i] = ppowerlevel_bw40[i]; - *(mcsbase + i) = base1; + powerbase1 = powerlevel[i]; + powerbase1 = (powerbase1 << 24) | + (powerbase1 << 16) | (powerbase1 << 8) | powerbase1; + + *(mcsbase + i) = powerbase1; RTPRINT(rtlpriv, FPHY, PHY_TXPWR, - "[MCS power base index rf(%c) = 0x%x]\n", - ((i == 0) ? 'A' : 'B'), *(mcsbase + i)); + " [MCS power base index rf(%c) = 0x%x]\n", + ((i == 0) ? 'A' : 'B'), *(mcsbase + i)); } } -static void get_txpwr_by_reg(struct ieee80211_hw *hw, u8 chan, u8 index, - u32 *base0, u32 *base1, u32 *outval) +static void _rtl88e_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw, + u8 channel, u8 index, + u32 *powerbase0, + u32 *powerbase1, + u32 *p_outwriteval) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); - u8 i, chg = 0, pwr_lim[4], pwr_diff = 0, cust_pwr_dif; - u32 writeval, cust_lim, rf, tmp; - u8 ch = chan - 1; - u8 j; + u8 i, chnlgroup = 0, pwr_diff_limit[4], pwr_diff = 0, customer_pwr_diff; + u32 writeval, customer_limit, rf; for (rf = 0; rf < 2; rf++) { - j = index + (rf ? 8 : 0); - tmp = ((index < 2) ? base0[rf] : base1[rf]); switch (rtlefuse->eeprom_regulatory) { case 0: - chg = 0; + chnlgroup = 0; - writeval = rtlphy->mcs_offset[chg][j] + tmp; + writeval = + rtlphy->mcs_txpwrlevel_origoffset + [chnlgroup][index + (rf ? 8 : 0)] + + ((index < 2) ? powerbase0[rf] : powerbase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, - "RTK better performance, " - "writeval(%c) = 0x%x\n", + "RTK better performance, writeval(%c) = 0x%x\n", ((rf == 0) ? 'A' : 'B'), writeval); break; case 1: if (rtlphy->pwrgroup_cnt == 1) { - chg = 0; + chnlgroup = 0; } else { - chg = chan / 3; - if (chan == 14) - chg = 5; + if (channel < 3) + chnlgroup = 0; + else if (channel < 6) + chnlgroup = 1; + else if (channel < 9) + chnlgroup = 2; + else if (channel < 12) + chnlgroup = 3; + else if (channel < 14) + chnlgroup = 4; + else if (channel == 14) + chnlgroup = 5; } - writeval = rtlphy->mcs_offset[chg][j] + tmp; + + writeval = + rtlphy->mcs_txpwrlevel_origoffset[chnlgroup] + [index + (rf ? 8 : 0)] + ((index < 2) ? + powerbase0[rf] : + powerbase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "Realtek regulatory, 20MHz, writeval(%c) = 0x%x\n", ((rf == 0) ? 'A' : 'B'), writeval); + break; case 2: - writeval = ((index < 2) ? base0[rf] : base1[rf]); + writeval = + ((index < 2) ? powerbase0[rf] : powerbase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "Better regulatory, writeval(%c) = 0x%x\n", - ((rf == 0) ? 'A' : 'B'), writeval); + ((rf == 0) ? 'A' : 'B'), writeval); break; case 3: - chg = 0; + chnlgroup = 0; if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) { RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "customer's limit, 40MHz rf(%c) = 0x%x\n", - ((rf == 0) ? 'A' : 'B'), - rtlefuse->pwrgroup_ht40[rf][ch]); + ((rf == 0) ? 'A' : 'B'), + rtlefuse->pwrgroup_ht40[rf][channel - + 1]); } else { RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "customer's limit, 20MHz rf(%c) = 0x%x\n", - ((rf == 0) ? 'A' : 'B'), - rtlefuse->pwrgroup_ht20[rf][ch]); + ((rf == 0) ? 'A' : 'B'), + rtlefuse->pwrgroup_ht20[rf][channel - + 1]); } if (index < 2) - pwr_diff = rtlefuse->txpwr_legacyhtdiff[rf][ch]; + pwr_diff = + rtlefuse->txpwr_legacyhtdiff[rf][channel-1]; else if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) - pwr_diff = rtlefuse->txpwr_ht20diff[rf][ch]; + pwr_diff = + rtlefuse->txpwr_ht20diff[rf][channel-1]; if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) - cust_pwr_dif = rtlefuse->pwrgroup_ht40[rf][ch]; + customer_pwr_diff = + rtlefuse->pwrgroup_ht40[rf][channel-1]; else - cust_pwr_dif = rtlefuse->pwrgroup_ht20[rf][ch]; + customer_pwr_diff = + rtlefuse->pwrgroup_ht20[rf][channel-1]; - if (pwr_diff > cust_pwr_dif) + if (pwr_diff > customer_pwr_diff) pwr_diff = 0; else - pwr_diff = cust_pwr_dif - pwr_diff; + pwr_diff = customer_pwr_diff - pwr_diff; for (i = 0; i < 4; i++) { - pwr_lim[i] = (u8)((rtlphy->mcs_offset[chg][j] & - (0x7f << (i * 8))) >> (i * 8)); - - if (pwr_lim[i] > pwr_diff) - pwr_lim[i] = pwr_diff; + pwr_diff_limit[i] = + (u8)((rtlphy->mcs_txpwrlevel_origoffset + [chnlgroup][index + + (rf ? 8 : 0)] & (0x7f << + (i * 8))) >> (i * 8)); + + if (pwr_diff_limit[i] > pwr_diff) + pwr_diff_limit[i] = pwr_diff; } - cust_lim = (pwr_lim[3] << 24) | (pwr_lim[2] << 16) | - (pwr_lim[1] << 8) | (pwr_lim[0]); + customer_limit = (pwr_diff_limit[3] << 24) | + (pwr_diff_limit[2] << 16) | + (pwr_diff_limit[1] << 8) | (pwr_diff_limit[0]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "Customer's limit rf(%c) = 0x%x\n", - ((rf == 0) ? 'A' : 'B'), cust_lim); + ((rf == 0) ? 'A' : 'B'), customer_limit); - writeval = cust_lim + tmp; + writeval = customer_limit + + ((index < 2) ? powerbase0[rf] : powerbase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, - "Customer, writeval rf(%c) = 0x%x\n", - ((rf == 0) ? 'A' : 'B'), writeval); + "Customer, writeval rf(%c)= 0x%x\n", + ((rf == 0) ? 'A' : 'B'), writeval); break; default: - chg = 0; - writeval = rtlphy->mcs_offset[chg][j] + tmp; + chnlgroup = 0; + writeval = + rtlphy->mcs_txpwrlevel_origoffset[chnlgroup] + [index + (rf ? 8 : 0)] + + ((index < 2) ? powerbase0[rf] : powerbase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, - "RTK better performance, writeval " - "rf(%c) = 0x%x\n", - ((rf == 0) ? 'A' : 'B'), writeval); + "RTK better performance, writeval rf(%c) = 0x%x\n", + ((rf == 0) ? 'A' : 'B'), writeval); break; } @@ -302,12 +338,13 @@ static void get_txpwr_by_reg(struct ieee80211_hw *hw, u8 chan, u8 index, writeval = writeval - 0x06060606; else if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT2) - writeval -= 0x0c0c0c0c; - *(outval + rf) = writeval; + writeval = writeval - 0x0c0c0c0c; + *(p_outwriteval + rf) = writeval; } } -static void write_ofdm_pwr(struct ieee80211_hw *hw, u8 index, u32 *pvalue) +static void _rtl88e_write_ofdm_power_reg(struct ieee80211_hw *hw, + u8 index, u32 *value) { struct rtl_priv *rtlpriv = rtl_priv(hw); u16 regoffset_a[6] = { @@ -325,16 +362,16 @@ static void write_ofdm_pwr(struct ieee80211_hw *hw, u8 index, u32 *pvalue) u16 regoffset; for (rf = 0; rf < 2; rf++) { - writeval = pvalue[rf]; + writeval = value[rf]; for (i = 0; i < 4; i++) { - pwr_val[i] = (u8) ((writeval & (0x7f << - (i * 8))) >> (i * 8)); + pwr_val[i] = (u8)((writeval & (0x7f << + (i * 8))) >> (i * 8)); if (pwr_val[i] > RF6052_MAX_TX_PWR) pwr_val[i] = RF6052_MAX_TX_PWR; } writeval = (pwr_val[3] << 24) | (pwr_val[2] << 16) | - (pwr_val[1] << 8) | pwr_val[0]; + (pwr_val[1] << 8) | pwr_val[0]; if (rf == 0) regoffset = regoffset_a[index]; @@ -348,24 +385,27 @@ static void write_ofdm_pwr(struct ieee80211_hw *hw, u8 index, u32 *pvalue) } void rtl88e_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, - u8 *pwrlvlofdm, - u8 *pwrlvlbw20, - u8 *pwrlvlbw40, u8 chan) + u8 *ppowerlevel_ofdm, + u8 *ppowerlevel_bw20, + u8 *ppowerlevel_bw40, u8 channel) { - u32 writeval[2], base0[2], base1[2]; + u32 writeval[2], powerbase0[2], powerbase1[2]; u8 index; u8 direction; u32 pwrtrac_value; - rtl88e_phy_get_power_base(hw, pwrlvlofdm, pwrlvlbw20, - pwrlvlbw40, chan, &base0[0], - &base1[0]); + rtl88e_phy_get_power_base(hw, ppowerlevel_ofdm, + ppowerlevel_bw20, ppowerlevel_bw40, + channel, &powerbase0[0], &powerbase1[0]); rtl88e_dm_txpower_track_adjust(hw, 1, &direction, &pwrtrac_value); for (index = 0; index < 6; index++) { - get_txpwr_by_reg(hw, chan, index, &base0[0], &base1[0], - &writeval[0]); + _rtl88e_get_txpower_writeval_by_regulatory(hw, + channel, index, + &powerbase0[0], + &powerbase1[0], + &writeval[0]); if (direction == 1) { writeval[0] += pwrtrac_value; writeval[1] += pwrtrac_value; @@ -373,15 +413,28 @@ void rtl88e_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, writeval[0] -= pwrtrac_value; writeval[1] -= pwrtrac_value; } - write_ofdm_pwr(hw, index, &writeval[0]); + _rtl88e_write_ofdm_power_reg(hw, index, &writeval[0]); } } -static bool rf6052_conf_para(struct ieee80211_hw *hw) +bool rtl88e_phy_rf6052_config(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); - u32 u4val = 0; + + if (rtlphy->rf_type == RF_1T1R) + rtlphy->num_total_rfpath = 1; + else + rtlphy->num_total_rfpath = 2; + + return _rtl88e_phy_rf6052_config_parafile(hw); +} + +static bool _rtl88e_phy_rf6052_config_parafile(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u32 u4_regvalue = 0; u8 rfpath; bool rtstatus = true; struct bb_reg_def *pphyreg; @@ -392,12 +445,12 @@ static bool rf6052_conf_para(struct ieee80211_hw *hw) switch (rfpath) { case RF90_PATH_A: case RF90_PATH_C: - u4val = rtl_get_bbreg(hw, pphyreg->rfintfs, + u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV); break; case RF90_PATH_B: case RF90_PATH_D: - u4val = rtl_get_bbreg(hw, pphyreg->rfintfs, + u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV << 16); break; } @@ -418,11 +471,11 @@ static bool rf6052_conf_para(struct ieee80211_hw *hw) switch (rfpath) { case RF90_PATH_A: rtstatus = rtl88e_phy_config_rf_with_headerfile(hw, - (enum radio_path)rfpath); + (enum radio_path)rfpath); break; case RF90_PATH_B: rtstatus = rtl88e_phy_config_rf_with_headerfile(hw, - (enum radio_path)rfpath); + (enum radio_path)rfpath); break; case RF90_PATH_C: break; @@ -433,12 +486,13 @@ static bool rf6052_conf_para(struct ieee80211_hw *hw) switch (rfpath) { case RF90_PATH_A: case RF90_PATH_C: - rtl_set_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV, u4val); + rtl_set_bbreg(hw, pphyreg->rfintfs, + BRFSI_RFENV, u4_regvalue); break; case RF90_PATH_B: case RF90_PATH_D: - rtl_set_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV << 16, - u4val); + rtl_set_bbreg(hw, pphyreg->rfintfs, + BRFSI_RFENV << 16, u4_regvalue); break; } @@ -447,21 +501,9 @@ static bool rf6052_conf_para(struct ieee80211_hw *hw) "Radio[%d] Fail!!", rfpath); return false; } + } RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "\n"); return rtstatus; } - -bool rtl88e_phy_rf6052_config(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - - if (rtlphy->rf_type == RF_1T1R) - rtlphy->num_total_rfpath = 1; - else - rtlphy->num_total_rfpath = 2; - - return rf6052_conf_para(hw); -} diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/rf.h b/drivers/net/wireless/rtlwifi/rtl8188ee/rf.h index a39a2a3dbcc9f440fa8fee25484110550f1d4f8e..5c1472d88fd4496f123d43675f506c137a2dd92b 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/rf.h +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/rf.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -40,7 +36,8 @@ void rtl88e_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, void rtl88e_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, u8 *ppowerlevel_ofdm, u8 *ppowerlevel_bw20, - u8 *ppowerlevel_bw40, u8 channel); + u8 *ppowerlevel_bw40, + u8 channel); bool rtl88e_phy_rf6052_config(struct ieee80211_hw *hw); #endif diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c index 631b6907c17dced301c5d9406f9a02e254d415f4..11344121c55e072f4a1e5788d24202971e565cc4 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -30,7 +26,6 @@ #include "../wifi.h" #include "../core.h" #include "../pci.h" -#include "../base.h" #include "reg.h" #include "def.h" #include "phy.h" @@ -122,7 +117,7 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw) 0); rtlpci->irq_mask[0] = - (u32) (IMR_PSTIMEOUT | + (u32)(IMR_PSTIMEOUT | IMR_HSISR_IND_ON_INT | IMR_C2HCMD | IMR_HIGHDOK | @@ -143,6 +138,8 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps; + if (rtlpriv->cfg->mod_params->disable_watchdog) + pr_info("watchdog disabled\n"); if (!rtlpriv->psc.inactiveps) pr_info("rtl8188ee: Power Save off (module option)\n"); if (!rtlpriv->psc.fwctrl_lps) @@ -162,7 +159,7 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE; /* for firmware buf */ - rtlpriv->rtlhal.pfirmware = vmalloc(0x8000); + rtlpriv->rtlhal.pfirmware = vzalloc(0x8000); if (!rtlpriv->rtlhal.pfirmware) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't alloc buffer for fw.\n"); @@ -199,7 +196,7 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw) init_timer(&rtlpriv->works.fast_antenna_training_timer); setup_timer(&rtlpriv->works.fast_antenna_training_timer, rtl88e_dm_fast_antenna_training_callback, - (unsigned long)hw); + (unsigned long)hw); return err; } @@ -218,6 +215,12 @@ void rtl88e_deinit_sw_vars(struct ieee80211_hw *hw) del_timer_sync(&rtlpriv->works.fast_antenna_training_timer); } +/* get bt coexist status */ +bool rtl88e_get_btc_status(void) +{ + return false; +} + static struct rtl_hal_ops rtl8188ee_hal_ops = { .init_sw_vars = rtl88e_init_sw_vars, .deinit_sw_vars = rtl88e_deinit_sw_vars, @@ -246,11 +249,12 @@ static struct rtl_hal_ops rtl8188ee_hal_ops = { .set_bw_mode = rtl88e_phy_set_bw_mode, .switch_channel = rtl88e_phy_sw_chnl, .dm_watchdog = rtl88e_dm_watchdog, - .scan_operation_backup = rtl_phy_scan_operation_backup, + .scan_operation_backup = rtl88e_phy_scan_operation_backup, .set_rf_power_state = rtl88e_phy_set_rf_power_state, .led_control = rtl88ee_led_control, .set_desc = rtl88ee_set_desc, .get_desc = rtl88ee_get_desc, + .is_tx_desc_closed = rtl88ee_is_tx_desc_closed, .tx_polling = rtl88ee_tx_polling, .enable_hw_sec = rtl88ee_enable_hw_security_config, .set_key = rtl88ee_set_key, @@ -259,14 +263,17 @@ static struct rtl_hal_ops rtl8188ee_hal_ops = { .set_bbreg = rtl88e_phy_set_bb_reg, .get_rfreg = rtl88e_phy_query_rf_reg, .set_rfreg = rtl88e_phy_set_rf_reg, + .get_btc_status = rtl88e_get_btc_status, + .rx_command_packet = rtl88ee_rx_command_packet, + }; static struct rtl_mod_params rtl88ee_mod_params = { .sw_crypto = false, - .inactiveps = true, + .inactiveps = false, .swctrl_lps = false, - .fwctrl_lps = true, - .msi_support = false, + .fwctrl_lps = false, + .msi_support = true, .debug = DBG_EMERG, }; @@ -274,6 +281,7 @@ static struct rtl_hal_cfg rtl88ee_hal_cfg = { .bar_id = 2, .write_readback = true, .name = "rtl88e_pci", + .fw_name = "rtlwifi/rtl8188efw.bin", .ops = &rtl8188ee_hal_ops, .mod_params = &rtl88ee_mod_params, @@ -285,6 +293,9 @@ static struct rtl_hal_cfg rtl88ee_hal_cfg = { .maps[MAC_RCR_ACRC32] = ACRC32, .maps[MAC_RCR_ACF] = ACF, .maps[MAC_RCR_AAP] = AAP, + .maps[MAC_HIMR] = REG_HIMR, + .maps[MAC_HIMRE] = REG_HIMRE, + .maps[MAC_HSISR] = REG_HSISR, .maps[EFUSE_ACCESS] = REG_EFUSE_ACCESS, @@ -345,6 +356,7 @@ static struct rtl_hal_cfg rtl88ee_hal_cfg = { .maps[RTL_IMR_VIDOK] = IMR_VIDOK, .maps[RTL_IMR_VODOK] = IMR_VODOK, .maps[RTL_IMR_ROK] = IMR_ROK, + .maps[RTL_IMR_HSISR_IND] = IMR_HSISR_IND_ON_INT, .maps[RTL_IBSS_INT_MASKS] = (IMR_BCNDMAINT0 | IMR_TBDOK | IMR_TBDER), .maps[RTL_RC_CCK_RATE1M] = DESC92C_RATE1M, @@ -364,7 +376,7 @@ static struct rtl_hal_cfg rtl88ee_hal_cfg = { .maps[RTL_RC_HT_RATEMCS15] = DESC92C_RATEMCS15, }; -static const struct pci_device_id rtl88ee_pci_ids[] = { +static struct pci_device_id rtl88ee_pci_ids[] = { {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8179, rtl88ee_hal_cfg)}, {}, }; @@ -384,12 +396,15 @@ module_param_named(ips, rtl88ee_mod_params.inactiveps, bool, 0444); module_param_named(swlps, rtl88ee_mod_params.swctrl_lps, bool, 0444); module_param_named(fwlps, rtl88ee_mod_params.fwctrl_lps, bool, 0444); module_param_named(msi, rtl88ee_mod_params.msi_support, bool, 0444); +module_param_named(disable_watchdog, rtl88ee_mod_params.disable_watchdog, + bool, 0444); MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n"); MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n"); MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n"); -MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n"); +MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 1)\n"); MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); +MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n"); static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.h b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.h index 85e02b3bdff8120eba0c13817b65075301898dd9..22398c3753a6b5fc6f91b50687bcfc1b2a12ed7e 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.h +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -32,5 +28,7 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw); void rtl88e_deinit_sw_vars(struct ieee80211_hw *hw); +bool rtl88e_get_btc_status(void); + #endif diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/table.c b/drivers/net/wireless/rtlwifi/rtl8188ee/table.c index fad373f97b2cc1bfc2087987e2da14d60940b352..68bcb7fe6a652665700ac1a523386ddf31d5ae4a 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/table.c +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/table.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -30,7 +26,6 @@ *****************************************************************************/ #include "table.h" - u32 RTL8188EEPHY_REG_1TARRAY[] = { 0x800, 0x80040000, 0x804, 0x00000003, @@ -640,4 +635,5 @@ u32 RTL8188EEAGCTAB_1TARRAY[] = { 0xC78, 0x407D0001, 0xC78, 0x407E0001, 0xC78, 0x407F0001, + }; diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/table.h b/drivers/net/wireless/rtlwifi/rtl8188ee/table.h index c1218e8351291773aa3de00ad4339bbf2ebe0566..403c4ddd236f95c549a42fc82c73171d1b116e8f 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/table.h +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/table.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -35,13 +31,13 @@ #include #define RTL8188EEPHY_REG_1TARRAYLEN 382 extern u32 RTL8188EEPHY_REG_1TARRAY[]; -#define RTL8188EEPHY_REG_ARRAY_PGLEN 264 +#define RTL8188EEPHY_REG_ARRAY_PGLEN 264 extern u32 RTL8188EEPHY_REG_ARRAY_PG[]; -#define RTL8188EE_RADIOA_1TARRAYLEN 190 +#define RTL8188EE_RADIOA_1TARRAYLEN 190 extern u32 RTL8188EE_RADIOA_1TARRAY[]; -#define RTL8188EEMAC_1T_ARRAYLEN 180 +#define RTL8188EEMAC_1T_ARRAYLEN 180 extern u32 RTL8188EEMAC_1T_ARRAY[]; -#define RTL8188EEAGCTAB_1TARRAYLEN 256 +#define RTL8188EEAGCTAB_1TARRAYLEN 256 extern u32 RTL8188EEAGCTAB_1TARRAY[]; #endif diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c index 5b4c225396f244cea599bade9755951daa7d15ec..df549c96adef9bec920ef1165be1ced6ea127504 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -37,6 +33,7 @@ #include "trx.h" #include "led.h" #include "dm.h" +#include "phy.h" static u8 _rtl88ee_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue) { @@ -50,6 +47,164 @@ static u8 _rtl88ee_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue) return skb->priority; } +/* mac80211's rate_idx is like this: + * + * 2.4G band:rx_status->band == IEEE80211_BAND_2GHZ + * + * B/G rate: + * (rx_status->flag & RX_FLAG_HT) = 0, + * DESC92C_RATE1M-->DESC92C_RATE54M ==> idx is 0-->11, + * + * N rate: + * (rx_status->flag & RX_FLAG_HT) = 1, + * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15 + * + * 5G band:rx_status->band == IEEE80211_BAND_5GHZ + * A rate: + * (rx_status->flag & RX_FLAG_HT) = 0, + * DESC92C_RATE6M-->DESC92C_RATE54M ==> idx is 0-->7, + * + * N rate: + * (rx_status->flag & RX_FLAG_HT) = 1, + * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15 + */ +static int _rtl88ee_rate_mapping(struct ieee80211_hw *hw, + bool isht, u8 desc_rate) +{ + int rate_idx; + + if (!isht) { + if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) { + switch (desc_rate) { + case DESC92C_RATE1M: + rate_idx = 0; + break; + case DESC92C_RATE2M: + rate_idx = 1; + break; + case DESC92C_RATE5_5M: + rate_idx = 2; + break; + case DESC92C_RATE11M: + rate_idx = 3; + break; + case DESC92C_RATE6M: + rate_idx = 4; + break; + case DESC92C_RATE9M: + rate_idx = 5; + break; + case DESC92C_RATE12M: + rate_idx = 6; + break; + case DESC92C_RATE18M: + rate_idx = 7; + break; + case DESC92C_RATE24M: + rate_idx = 8; + break; + case DESC92C_RATE36M: + rate_idx = 9; + break; + case DESC92C_RATE48M: + rate_idx = 10; + break; + case DESC92C_RATE54M: + rate_idx = 11; + break; + default: + rate_idx = 0; + break; + } + } else { + switch (desc_rate) { + case DESC92C_RATE6M: + rate_idx = 0; + break; + case DESC92C_RATE9M: + rate_idx = 1; + break; + case DESC92C_RATE12M: + rate_idx = 2; + break; + case DESC92C_RATE18M: + rate_idx = 3; + break; + case DESC92C_RATE24M: + rate_idx = 4; + break; + case DESC92C_RATE36M: + rate_idx = 5; + break; + case DESC92C_RATE48M: + rate_idx = 6; + break; + case DESC92C_RATE54M: + rate_idx = 7; + break; + default: + rate_idx = 0; + break; + } + } + } else { + switch (desc_rate) { + case DESC92C_RATEMCS0: + rate_idx = 0; + break; + case DESC92C_RATEMCS1: + rate_idx = 1; + break; + case DESC92C_RATEMCS2: + rate_idx = 2; + break; + case DESC92C_RATEMCS3: + rate_idx = 3; + break; + case DESC92C_RATEMCS4: + rate_idx = 4; + break; + case DESC92C_RATEMCS5: + rate_idx = 5; + break; + case DESC92C_RATEMCS6: + rate_idx = 6; + break; + case DESC92C_RATEMCS7: + rate_idx = 7; + break; + case DESC92C_RATEMCS8: + rate_idx = 8; + break; + case DESC92C_RATEMCS9: + rate_idx = 9; + break; + case DESC92C_RATEMCS10: + rate_idx = 10; + break; + case DESC92C_RATEMCS11: + rate_idx = 11; + break; + case DESC92C_RATEMCS12: + rate_idx = 12; + break; + case DESC92C_RATEMCS13: + rate_idx = 13; + break; + case DESC92C_RATEMCS14: + rate_idx = 14; + break; + case DESC92C_RATEMCS15: + rate_idx = 15; + break; + default: + rate_idx = 0; + break; + } + } + return rate_idx; +} + static void _rtl88ee_query_rxphystatus(struct ieee80211_hw *hw, struct rtl_stats *pstatus, u8 *pdesc, struct rx_fwinfo_88e *p_drvinfo, @@ -59,7 +214,8 @@ static void _rtl88ee_query_rxphystatus(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv); struct phy_sts_cck_8192s_t *cck_buf; - struct phy_status_rpt *phystrpt = (struct phy_status_rpt *)p_drvinfo; + struct phy_status_rpt *phystrpt = + (struct phy_status_rpt *)p_drvinfo; struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); char rx_pwr_all = 0, rx_pwr[4]; u8 rf_rx_num = 0, evm, pwdb_all; @@ -72,11 +228,11 @@ static void _rtl88ee_query_rxphystatus(struct ieee80211_hw *hw, pstatus->packet_matchbssid = bpacket_match_bssid; pstatus->packet_toself = bpacket_toself; pstatus->packet_beacon = packet_beacon; - pstatus->rx_mimo_sig_qual[0] = -1; - pstatus->rx_mimo_sig_qual[1] = -1; + pstatus->rx_mimo_signalquality[0] = -1; + pstatus->rx_mimo_signalquality[1] = -1; if (is_cck) { - u8 cck_hipwr; + u8 cck_highpwr; u8 cck_agc_rpt; /* CCK Driver info Structure is not the same as OFDM packet. */ cck_buf = (struct phy_sts_cck_8192s_t *)p_drvinfo; @@ -87,53 +243,58 @@ static void _rtl88ee_query_rxphystatus(struct ieee80211_hw *hw, * hardware (for rate adaptive) */ if (ppsc->rfpwr_state == ERFON) - cck_hipwr = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, + cck_highpwr = + (u8)rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, BIT(9)); else - cck_hipwr = false; + cck_highpwr = false; lan_idx = ((cck_agc_rpt & 0xE0) >> 5); vga_idx = (cck_agc_rpt & 0x1f); switch (lan_idx) { case 7: if (vga_idx <= 27) - rx_pwr_all = -100 + 2 * (27 - vga_idx); + /*VGA_idx = 27~2*/ + rx_pwr_all = -100 + 2*(27-vga_idx); else rx_pwr_all = -100; break; case 6: - rx_pwr_all = -48 + 2 * (2 - vga_idx); /*VGA_idx = 2~0*/ + /*VGA_idx = 2~0*/ + rx_pwr_all = -48 + 2*(2-vga_idx); break; case 5: - rx_pwr_all = -42 + 2 * (7 - vga_idx); /*VGA_idx = 7~5*/ + /*VGA_idx = 7~5*/ + rx_pwr_all = -42 + 2*(7-vga_idx); break; case 4: - rx_pwr_all = -36 + 2 * (7 - vga_idx); /*VGA_idx = 7~4*/ + /*VGA_idx = 7~4*/ + rx_pwr_all = -36 + 2*(7-vga_idx); break; case 3: - rx_pwr_all = -24 + 2 * (7 - vga_idx); /*VGA_idx = 7~0*/ + /*VGA_idx = 7~0*/ + rx_pwr_all = -24 + 2*(7-vga_idx); break; case 2: - if (cck_hipwr) - rx_pwr_all = -12 + 2 * (5 - vga_idx); + if (cck_highpwr) + /*VGA_idx = 5~0*/ + rx_pwr_all = -12 + 2*(5-vga_idx); else - rx_pwr_all = -6 + 2 * (5 - vga_idx); + rx_pwr_all = -6 + 2*(5-vga_idx); break; case 1: - rx_pwr_all = 8 - 2 * vga_idx; + rx_pwr_all = 8-2*vga_idx; break; case 0: - rx_pwr_all = 14 - 2 * vga_idx; + rx_pwr_all = 14-2*vga_idx; break; default: break; } rx_pwr_all += 6; pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all); - /* CCK gain is smaller than OFDM/MCS gain, - * so we add gain diff by experiences, - * the val is 6 - */ + /* CCK gain is smaller than OFDM/MCS gain, */ + /* so we add gain diff by experiences, the val is 6 */ pwdb_all += 6; if (pwdb_all > 100) pwdb_all = 100; @@ -148,10 +309,10 @@ static void _rtl88ee_query_rxphystatus(struct ieee80211_hw *hw, pwdb_all -= 8; else if (pwdb_all > 4 && pwdb_all <= 14) pwdb_all -= 4; - if (cck_hipwr == false) { + if (!cck_highpwr) { if (pwdb_all >= 80) - pwdb_all = ((pwdb_all - 80)<<1) + - ((pwdb_all - 80)>>1) + 80; + pwdb_all = ((pwdb_all-80)<<1) + + ((pwdb_all-80)>>1) + 80; else if ((pwdb_all <= 78) && (pwdb_all >= 20)) pwdb_all += 3; if (pwdb_all > 100) @@ -165,9 +326,9 @@ static void _rtl88ee_query_rxphystatus(struct ieee80211_hw *hw, if (bpacket_match_bssid) { u8 sq; - if (pstatus->rx_pwdb_all > 40) { + if (pstatus->rx_pwdb_all > 40) sq = 100; - } else { + else { sq = cck_buf->sq_rpt; if (sq > 64) sq = 0; @@ -178,8 +339,8 @@ static void _rtl88ee_query_rxphystatus(struct ieee80211_hw *hw, } pstatus->signalquality = sq; - pstatus->rx_mimo_sig_qual[0] = sq; - pstatus->rx_mimo_sig_qual[1] = -1; + pstatus->rx_mimo_signalquality[0] = sq; + pstatus->rx_mimo_signalquality[1] = -1; } } else { rtlpriv->dm.rfpath_rxenable[0] = @@ -191,18 +352,20 @@ static void _rtl88ee_query_rxphystatus(struct ieee80211_hw *hw, if (rtlpriv->dm.rfpath_rxenable[i]) rf_rx_num++; - rx_pwr[i] = ((p_drvinfo->gain_trsw[i] & 0x3f) * 2)-110; + rx_pwr[i] = ((p_drvinfo->gain_trsw[i] & + 0x3f) * 2) - 110; /* Translate DBM to percentage. */ rssi = rtl_query_rxpwrpercentage(rx_pwr[i]); total_rssi += rssi; /* Get Rx snr value in DB */ - rtlpriv->stats.rx_snr_db[i] = p_drvinfo->rxsnr[i] / 2; + rtlpriv->stats.rx_snr_db[i] = + (long)(p_drvinfo->rxsnr[i] / 2); /* Record Signal Strength for next packet */ if (bpacket_match_bssid) - pstatus->rx_mimo_signalstrength[i] = (u8) rssi; + pstatus->rx_mimo_signalstrength[i] = (u8)rssi; } /* (2)PWDB, Average PWDB cacluated by @@ -227,11 +390,13 @@ static void _rtl88ee_query_rxphystatus(struct ieee80211_hw *hw, if (bpacket_match_bssid) { /* Fill value in RFD, Get the first - * spatial stream only + * spatial stream onlyi */ if (i == 0) - pstatus->signalquality = evm & 0xff; - pstatus->rx_mimo_sig_qual[i] = evm & 0xff; + pstatus->signalquality = + (u8)(evm & 0xff); + pstatus->rx_mimo_signalquality[i] = + (u8)(evm & 0xff); } } } @@ -241,10 +406,10 @@ static void _rtl88ee_query_rxphystatus(struct ieee80211_hw *hw, */ if (is_cck) pstatus->signalstrength = (u8)(rtl_signal_scale_mapping(hw, - pwdb_all)); + pwdb_all)); else if (rf_rx_num != 0) pstatus->signalstrength = (u8)(rtl_signal_scale_mapping(hw, - total_rssi /= rf_rx_num)); + total_rssi /= rf_rx_num)); /*HW antenna diversity*/ rtldm->fat_table.antsel_rx_keep_0 = phystrpt->ant_sel; rtldm->fat_table.antsel_rx_keep_1 = phystrpt->ant_sel_b; @@ -256,34 +421,39 @@ static void _rtl88ee_smart_antenna(struct ieee80211_hw *hw, { struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); - u8 ant_mux; - struct fast_ant_training *pfat = &(rtldm->fat_table); + u8 antsel_tr_mux; + struct fast_ant_training *pfat_table = &rtldm->fat_table; if (rtlefuse->antenna_div_type == CG_TRX_SMART_ANTDIV) { - if (pfat->fat_state == FAT_TRAINING_STATE) { + if (pfat_table->fat_state == FAT_TRAINING_STATE) { if (pstatus->packet_toself) { - ant_mux = (pfat->antsel_rx_keep_2 << 2) | - (pfat->antsel_rx_keep_1 << 1) | - pfat->antsel_rx_keep_0; - pfat->ant_sum[ant_mux] += pstatus->rx_pwdb_all; - pfat->ant_cnt[ant_mux]++; + antsel_tr_mux = + (pfat_table->antsel_rx_keep_2 << 2) | + (pfat_table->antsel_rx_keep_1 << 1) | + pfat_table->antsel_rx_keep_0; + pfat_table->ant_sum[antsel_tr_mux] += + pstatus->rx_pwdb_all; + pfat_table->ant_cnt[antsel_tr_mux]++; } } } else if ((rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) || - (rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV)) { + (rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV)) { if (pstatus->packet_toself || pstatus->packet_matchbssid) { - ant_mux = (pfat->antsel_rx_keep_2 << 2) | - (pfat->antsel_rx_keep_1 << 1) | - pfat->antsel_rx_keep_0; - rtl88e_dm_ant_sel_statistics(hw, ant_mux, 0, + antsel_tr_mux = (pfat_table->antsel_rx_keep_2 << 2) | + (pfat_table->antsel_rx_keep_1 << 1) | + pfat_table->antsel_rx_keep_0; + rtl88e_dm_ant_sel_statistics(hw, antsel_tr_mux, 0, pstatus->rx_pwdb_all); } + } } static void _rtl88ee_translate_rx_signal_stuff(struct ieee80211_hw *hw, - struct sk_buff *skb, struct rtl_stats *pstatus, - u8 *pdesc, struct rx_fwinfo_88e *p_drvinfo) + struct sk_buff *skb, + struct rtl_stats *pstatus, + u8 *pdesc, + struct rx_fwinfo_88e *p_drvinfo) { struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); @@ -292,42 +462,42 @@ static void _rtl88ee_translate_rx_signal_stuff(struct ieee80211_hw *hw, u8 *praddr; u8 *psaddr; __le16 fc; - u16 type, ufc; - bool match_bssid, packet_toself, packet_beacon = false, addr; + bool packet_matchbssid, packet_toself, packet_beacon; tmp_buf = skb->data + pstatus->rx_drvinfo_size + pstatus->rx_bufshift; hdr = (struct ieee80211_hdr *)tmp_buf; fc = hdr->frame_control; - ufc = le16_to_cpu(fc); - type = WLAN_FC_GET_TYPE(fc); praddr = hdr->addr1; psaddr = ieee80211_get_SA(hdr); memcpy(pstatus->psaddr, psaddr, ETH_ALEN); - addr = ether_addr_equal(mac->bssid, - (ufc & IEEE80211_FCTL_TODS) ? hdr->addr1 : - (ufc & IEEE80211_FCTL_FROMDS) ? hdr->addr2 : - hdr->addr3); - match_bssid = ((IEEE80211_FTYPE_CTL != type) && (!pstatus->hwerror) && - (!pstatus->crc) && (!pstatus->icv)) && addr; + packet_matchbssid = ((!ieee80211_is_ctl(fc)) && + (ether_addr_equal(mac->bssid, ieee80211_has_tods(fc) ? + hdr->addr1 : ieee80211_has_fromds(fc) ? + hdr->addr2 : hdr->addr3)) && + (!pstatus->hwerror) && + (!pstatus->crc) && (!pstatus->icv)); - addr = ether_addr_equal(praddr, rtlefuse->dev_addr); - packet_toself = match_bssid && addr; + packet_toself = packet_matchbssid && + (ether_addr_equal(praddr, rtlefuse->dev_addr)); - if (ieee80211_is_beacon(fc)) + if (ieee80211_is_beacon(hdr->frame_control)) packet_beacon = true; + else + packet_beacon = false; _rtl88ee_query_rxphystatus(hw, pstatus, pdesc, p_drvinfo, - match_bssid, packet_toself, packet_beacon); + packet_matchbssid, packet_toself, + packet_beacon); _rtl88ee_smart_antenna(hw, pstatus); rtl_process_phyinfo(hw, tmp_buf, pstatus); } -static void insert_em(struct rtl_tcb_desc *ptcb_desc, u8 *virtualaddress) +static void _rtl88ee_insert_emcontent(struct rtl_tcb_desc *ptcb_desc, + u8 *virtualaddress) { u32 dwtmp = 0; - memset(virtualaddress, 0, 8); SET_EARLYMODE_PKTNUM(virtualaddress, ptcb_desc->empkt_num); @@ -335,7 +505,7 @@ static void insert_em(struct rtl_tcb_desc *ptcb_desc, u8 *virtualaddress) dwtmp = ptcb_desc->empkt_len[0]; } else { dwtmp = ptcb_desc->empkt_len[0]; - dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4; + dwtmp += ((dwtmp%4) ? (4-dwtmp%4) : 0)+4; dwtmp += ptcb_desc->empkt_len[1]; } SET_EARLYMODE_LEN0(virtualaddress, dwtmp); @@ -344,7 +514,7 @@ static void insert_em(struct rtl_tcb_desc *ptcb_desc, u8 *virtualaddress) dwtmp = ptcb_desc->empkt_len[2]; } else { dwtmp = ptcb_desc->empkt_len[2]; - dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4; + dwtmp += ((dwtmp%4) ? (4-dwtmp%4) : 0)+4; dwtmp += ptcb_desc->empkt_len[3]; } SET_EARLYMODE_LEN1(virtualaddress, dwtmp); @@ -352,7 +522,7 @@ static void insert_em(struct rtl_tcb_desc *ptcb_desc, u8 *virtualaddress) dwtmp = ptcb_desc->empkt_len[4]; } else { dwtmp = ptcb_desc->empkt_len[4]; - dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4; + dwtmp += ((dwtmp%4) ? (4-dwtmp%4) : 0)+4; dwtmp += ptcb_desc->empkt_len[5]; } SET_EARLYMODE_LEN2_1(virtualaddress, dwtmp & 0xF); @@ -361,7 +531,7 @@ static void insert_em(struct rtl_tcb_desc *ptcb_desc, u8 *virtualaddress) dwtmp = ptcb_desc->empkt_len[6]; } else { dwtmp = ptcb_desc->empkt_len[6]; - dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4; + dwtmp += ((dwtmp%4) ? (4-dwtmp%4) : 0)+4; dwtmp += ptcb_desc->empkt_len[7]; } SET_EARLYMODE_LEN3(virtualaddress, dwtmp); @@ -369,7 +539,7 @@ static void insert_em(struct rtl_tcb_desc *ptcb_desc, u8 *virtualaddress) dwtmp = ptcb_desc->empkt_len[8]; } else { dwtmp = ptcb_desc->empkt_len[8]; - dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4; + dwtmp += ((dwtmp%4) ? (4-dwtmp%4) : 0)+4; dwtmp += ptcb_desc->empkt_len[9]; } SET_EARLYMODE_LEN4(virtualaddress, dwtmp); @@ -387,21 +557,21 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw, u32 phystatus = GET_RX_DESC_PHYST(pdesc); status->packet_report_type = (u8)GET_RX_STATUS_DESC_RPT_SEL(pdesc); if (status->packet_report_type == TX_REPORT2) - status->length = (u16) GET_RX_RPT2_DESC_PKT_LEN(pdesc); + status->length = (u16)GET_RX_RPT2_DESC_PKT_LEN(pdesc); else - status->length = (u16) GET_RX_DESC_PKT_LEN(pdesc); - status->rx_drvinfo_size = (u8) GET_RX_DESC_DRV_INFO_SIZE(pdesc) * - RX_DRV_INFO_SIZE_UNIT; - status->rx_bufshift = (u8) (GET_RX_DESC_SHIFT(pdesc) & 0x03); - status->icv = (u16) GET_RX_DESC_ICV(pdesc); - status->crc = (u16) GET_RX_DESC_CRC32(pdesc); + status->length = (u16)GET_RX_DESC_PKT_LEN(pdesc); + status->rx_drvinfo_size = (u8)GET_RX_DESC_DRV_INFO_SIZE(pdesc) * + RX_DRV_INFO_SIZE_UNIT; + status->rx_bufshift = (u8)(GET_RX_DESC_SHIFT(pdesc) & 0x03); + status->icv = (u16)GET_RX_DESC_ICV(pdesc); + status->crc = (u16)GET_RX_DESC_CRC32(pdesc); status->hwerror = (status->crc | status->icv); status->decrypted = !GET_RX_DESC_SWDEC(pdesc); - status->rate = (u8) GET_RX_DESC_RXMCS(pdesc); - status->shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc); + status->rate = (u8)GET_RX_DESC_RXMCS(pdesc); + status->shortpreamble = (u16)GET_RX_DESC_SPLCP(pdesc); status->isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1); - status->isfirst_ampdu = (bool) ((GET_RX_DESC_PAGGR(pdesc) == 1) && - (GET_RX_DESC_FAGGR(pdesc) == 1)); + status->isfirst_ampdu = (bool)((GET_RX_DESC_PAGGR(pdesc) == 1) && + (GET_RX_DESC_FAGGR(pdesc) == 1)); if (status->packet_report_type == NORMAL_RX) status->timestamp_low = GET_RX_DESC_TSFL(pdesc); status->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc); @@ -420,11 +590,14 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw, status->wake_match = 0; if (status->wake_match) RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD, - "Get Wakeup Packet!! WakeMatch =%d\n", - status->wake_match); + "GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n", + status->wake_match); rx_status->freq = hw->conf.chandef.chan->center_freq; rx_status->band = hw->conf.chandef.chan->band; + hdr = (struct ieee80211_hdr *)(skb->data + status->rx_drvinfo_size + + status->rx_bufshift); + if (status->crc) rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; @@ -445,18 +618,11 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw, * to decrypt it */ if (status->decrypted) { - hdr = (struct ieee80211_hdr *)(skb->data + - status->rx_drvinfo_size + status->rx_bufshift); - - if (!hdr) { - /* During testing, hdr was NULL */ - return false; - } - if ((_ieee80211_is_robust_mgmt_frame(hdr)) && + if ((!_ieee80211_is_robust_mgmt_frame(hdr)) && (ieee80211_has_protected(hdr->frame_control))) - rx_status->flag &= ~RX_FLAG_DECRYPTED; - else rx_status->flag |= RX_FLAG_DECRYPTED; + else + rx_status->flag &= ~RX_FLAG_DECRYPTED; } /* rate_idx: index of data rate into band's @@ -464,19 +630,18 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw, * are use (RX_FLAG_HT) * Notice: this is diff with windows define */ - rx_status->rate_idx = rtlwifi_rate_mapping(hw, status->is_ht, - status->rate, false); + rx_status->rate_idx = _rtl88ee_rate_mapping(hw, + status->is_ht, status->rate); rx_status->mactime = status->timestamp_low; if (phystatus == true) { p_drvinfo = (struct rx_fwinfo_88e *)(skb->data + status->rx_bufshift); - _rtl88ee_translate_rx_signal_stuff(hw, skb, status, pdesc, + _rtl88ee_translate_rx_signal_stuff(hw, + skb, status, pdesc, p_drvinfo); } - - /*rx_status->qual = status->signal; */ rx_status->signal = status->recvsignalpower + 10; if (status->packet_report_type == TX_REPORT2) { status->macid_valid_entry[0] = @@ -489,15 +654,17 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw, void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr, u8 *pdesc_tx, - u8 *pbd_desc_tx, struct ieee80211_tx_info *info, - struct ieee80211_sta *sta, struct sk_buff *skb, + u8 *txbd, struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, + struct sk_buff *skb, u8 hw_queue, struct rtl_tcb_desc *ptcb_desc) + { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtlpriv); - u8 *pdesc = pdesc_tx; + u8 *pdesc = (u8 *)pdesc_tx; u16 seq_number; __le16 fc = hdr->frame_control; unsigned int buf_len = 0; @@ -547,8 +714,9 @@ void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw, if (ptcb_desc->empkt_num) { RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "Insert 8 byte.pTcb->EMPktNum:%d\n", - ptcb_desc->empkt_num); - insert_em(ptcb_desc, (u8 *)(skb->data)); + ptcb_desc->empkt_num); + _rtl88ee_insert_emcontent(ptcb_desc, + (u8 *)(skb->data)); } } else { SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN); @@ -560,6 +728,7 @@ void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw, short_gi = (ptcb_desc->use_shortgi) ? 1 : 0; else short_gi = (ptcb_desc->use_shortpreamble) ? 1 : 0; + SET_TX_DESC_DATA_SHORTGI(pdesc, short_gi); if (info->flags & IEEE80211_TX_CTL_AMPDU) { @@ -568,7 +737,7 @@ void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw, } SET_TX_DESC_SEQ(pdesc, seq_number); SET_TX_DESC_RTS_ENABLE(pdesc, ((ptcb_desc->rts_enable && - !ptcb_desc->cts_enable) ? 1 : 0)); + !ptcb_desc->cts_enable) ? 1 : 0)); SET_TX_DESC_HW_RTS_ENABLE(pdesc, 0); SET_TX_DESC_CTS2SELF(pdesc, ((ptcb_desc->cts_enable) ? 1 : 0)); SET_TX_DESC_RTS_STBC(pdesc, ((ptcb_desc->rts_stbc) ? 1 : 0)); @@ -581,17 +750,17 @@ void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw, (ptcb_desc->rts_use_shortpreamble ? 1 : 0) : (ptcb_desc->rts_use_shortgi ? 1 : 0))); - if (ptcb_desc->btx_enable_sw_calc_duration) + if (ptcb_desc->tx_enable_sw_calc_duration) SET_TX_DESC_NAV_USE_HDR(pdesc, 1); if (bw_40) { - if (ptcb_desc->packet_bw) { + if (ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_20_40) { SET_TX_DESC_DATA_BW(pdesc, 1); SET_TX_DESC_TX_SUB_CARRIER(pdesc, 3); } else { SET_TX_DESC_DATA_BW(pdesc, 0); SET_TX_DESC_TX_SUB_CARRIER(pdesc, - mac->cur_40_prime_sc); + mac->cur_40_prime_sc); } } else { SET_TX_DESC_DATA_BW(pdesc, 0); @@ -599,13 +768,14 @@ void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw, } SET_TX_DESC_LINIP(pdesc, 0); - SET_TX_DESC_PKT_SIZE(pdesc, (u16) skb_len); + SET_TX_DESC_PKT_SIZE(pdesc, (u16)skb_len); if (sta) { u8 ampdu_density = sta->ht_cap.ampdu_density; SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density); } if (info->control.hw_key) { struct ieee80211_key_conf *keyconf; + keyconf = info->control.hw_key; switch (keyconf->cipher) { case WLAN_CIPHER_SUITE_WEP40: @@ -619,6 +789,7 @@ void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw, default: SET_TX_DESC_SEC_TYPE(pdesc, 0x0); break; + } } @@ -629,6 +800,7 @@ void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw, 1 : 0); SET_TX_DESC_USE_RATE(pdesc, ptcb_desc->use_driver_rate ? 1 : 0); + /*SET_TX_DESC_PWR_STATUS(pdesc, pwr_status);*/ /* Set TxRate and RTSRate in TxDesc */ /* This prevent Tx initial rate of new-coming packets */ /* from being overwritten by retried packet rate.*/ @@ -639,7 +811,7 @@ void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw, if (ieee80211_is_data_qos(fc)) { if (mac->rdg_en) { RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, - "Enable RDG function.\n"); + "Enable RDG function.\n"); SET_TX_DESC_RDG_ENABLE(pdesc, 1); SET_TX_DESC_HTC(pdesc, 1); } @@ -648,7 +820,7 @@ void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw, SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0)); SET_TX_DESC_LAST_SEG(pdesc, (lastseg ? 1 : 0)); - SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) buf_len); + SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)buf_len); SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping); if (rtlpriv->dm.useramask) { SET_TX_DESC_RATE_ID(pdesc, ptcb_desc->ratr_index); @@ -664,8 +836,9 @@ void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw, SET_TX_DESC_HWSEQ_EN(pdesc, 1); SET_TX_DESC_MORE_FRAG(pdesc, (lastseg ? 0 : 1)); if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) || - is_broadcast_ether_addr(ieee80211_get_DA(hdr))) + is_broadcast_ether_addr(ieee80211_get_DA(hdr))) { SET_TX_DESC_BMC(pdesc, 1); + } rtl88e_dm_set_tx_ant_by_tx_info(hw, pdesc, ptcb_desc->mac_id); RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n"); @@ -733,8 +906,8 @@ void rtl88ee_tx_fill_cmddesc(struct ieee80211_hw *hw, pdesc, TX_DESC_SIZE); } -void rtl88ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, - u8 desc_name, u8 *val) +void rtl88ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, + bool istx, u8 desc_name, u8 *val) { if (istx == true) { switch (desc_name) { @@ -745,7 +918,7 @@ void rtl88ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *)val); break; default: - RT_ASSERT(false, "ERR txdesc :%d not processed\n", + RT_ASSERT(false, "ERR txdesc :%d not process\n", desc_name); break; } @@ -764,7 +937,7 @@ void rtl88ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, SET_RX_DESC_EOR(pdesc, 1); break; default: - RT_ASSERT(false, "ERR rxdesc :%d not processed\n", + RT_ASSERT(false, "ERR rxdesc :%d not process\n", desc_name); break; } @@ -784,7 +957,7 @@ u32 rtl88ee_get_desc(u8 *pdesc, bool istx, u8 desc_name) ret = GET_TX_DESC_TX_BUFFER_ADDRESS(pdesc); break; default: - RT_ASSERT(false, "ERR txdesc :%d not processed\n", + RT_ASSERT(false, "ERR txdesc :%d not process\n", desc_name); break; } @@ -796,8 +969,11 @@ u32 rtl88ee_get_desc(u8 *pdesc, bool istx, u8 desc_name) case HW_DESC_RXPKT_LEN: ret = GET_RX_DESC_PKT_LEN(pdesc); break; + case HW_DESC_RXBUFF_ADDR: + ret = GET_RX_DESC_BUFF_ADDR(pdesc); + break; default: - RT_ASSERT(false, "ERR rxdesc :%d not processed\n", + RT_ASSERT(false, "ERR rxdesc :%d not process\n", desc_name); break; } @@ -805,6 +981,22 @@ u32 rtl88ee_get_desc(u8 *pdesc, bool istx, u8 desc_name) return ret; } +bool rtl88ee_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index) +{ + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue]; + u8 *entry = (u8 *)(&ring->desc[ring->idx]); + u8 own = (u8)rtl88ee_get_desc(entry, true, HW_DESC_OWN); + + /*beacon packet will only use the first + *descriptor defautly,and the own may not + *be cleared by the hardware + */ + if (own) + return false; + return true; +} + void rtl88ee_tx_polling(struct ieee80211_hw *hw, u8 hw_queue) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -815,3 +1007,10 @@ void rtl88ee_tx_polling(struct ieee80211_hw *hw, u8 hw_queue) BIT(0) << (hw_queue)); } } + +u32 rtl88ee_rx_command_packet(struct ieee80211_hw *hw, + struct rtl_stats status, + struct sk_buff *skb) +{ + return 0; +} diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.h b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.h index 8c2609412d2cc26d92b7efcccf85ef0281def89d..eab5ae0eb46c4b65cecdc192c2e82da54e74944d 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.h +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -30,59 +26,59 @@ #ifndef __RTL92CE_TRX_H__ #define __RTL92CE_TRX_H__ -#define TX_DESC_SIZE 64 +#define TX_DESC_SIZE 64 #define TX_DESC_AGGR_SUBFRAME_SIZE 32 -#define RX_DESC_SIZE 32 +#define RX_DESC_SIZE 32 #define RX_DRV_INFO_SIZE_UNIT 8 #define TX_DESC_NEXT_DESC_OFFSET 40 #define USB_HWDESC_HEADER_LEN 32 -#define CRCLENGTH 4 +#define CRCLENGTH 4 #define SET_TX_DESC_PKT_SIZE(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc, 0, 16, __val) -#define SET_TX_DESC_OFFSET(__pdesc, __val) \ +#define SET_TX_DESC_OFFSET(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc, 16, 8, __val) -#define SET_TX_DESC_BMC(__pdesc, __val) \ +#define SET_TX_DESC_BMC(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc, 24, 1, __val) -#define SET_TX_DESC_HTC(__pdesc, __val) \ +#define SET_TX_DESC_HTC(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc, 25, 1, __val) #define SET_TX_DESC_LAST_SEG(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc, 26, 1, __val) #define SET_TX_DESC_FIRST_SEG(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc, 27, 1, __val) -#define SET_TX_DESC_LINIP(__pdesc, __val) \ +#define SET_TX_DESC_LINIP(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc, 28, 1, __val) -#define SET_TX_DESC_NO_ACM(__pdesc, __val) \ +#define SET_TX_DESC_NO_ACM(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc, 29, 1, __val) -#define SET_TX_DESC_GF(__pdesc, __val) \ +#define SET_TX_DESC_GF(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val) -#define SET_TX_DESC_OWN(__pdesc, __val) \ +#define SET_TX_DESC_OWN(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val) -#define GET_TX_DESC_PKT_SIZE(__pdesc) \ +#define GET_TX_DESC_PKT_SIZE(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc, 0, 16) -#define GET_TX_DESC_OFFSET(__pdesc) \ +#define GET_TX_DESC_OFFSET(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc, 16, 8) -#define GET_TX_DESC_BMC(__pdesc) \ +#define GET_TX_DESC_BMC(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc, 24, 1) -#define GET_TX_DESC_HTC(__pdesc) \ +#define GET_TX_DESC_HTC(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc, 25, 1) -#define GET_TX_DESC_LAST_SEG(__pdesc) \ +#define GET_TX_DESC_LAST_SEG(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc, 26, 1) -#define GET_TX_DESC_FIRST_SEG(__pdesc) \ +#define GET_TX_DESC_FIRST_SEG(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc, 27, 1) -#define GET_TX_DESC_LINIP(__pdesc) \ +#define GET_TX_DESC_LINIP(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc, 28, 1) -#define GET_TX_DESC_NO_ACM(__pdesc) \ +#define GET_TX_DESC_NO_ACM(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc, 29, 1) -#define GET_TX_DESC_GF(__pdesc) \ +#define GET_TX_DESC_GF(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc, 30, 1) -#define GET_TX_DESC_OWN(__pdesc) \ +#define GET_TX_DESC_OWN(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc, 31, 1) -#define SET_TX_DESC_MACID(__pdesc, __val) \ +#define SET_TX_DESC_MACID(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+4, 0, 6, __val) #define SET_TX_DESC_QUEUE_SEL(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+4, 8, 5, __val) @@ -90,11 +86,11 @@ SET_BITS_TO_LE_4BYTE(__pdesc+4, 13, 1, __val) #define SET_TX_DESC_LSIG_TXOP_EN(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+4, 14, 1, __val) -#define SET_TX_DESC_PIFS(__pdesc, __val) \ +#define SET_TX_DESC_PIFS(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+4, 15, 1, __val) #define SET_TX_DESC_RATE_ID(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+4, 16, 4, __val) -#define SET_TX_DESC_NAV_USE_HDR(__pdesc, __val) \ +#define SET_TX_DESC_NAV_USE_HDR(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+4, 20, 1, __val) #define SET_TX_DESC_EN_DESC_ID(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+4, 21, 1, __val) @@ -102,10 +98,10 @@ SET_BITS_TO_LE_4BYTE(__pdesc+4, 22, 2, __val) #define SET_TX_DESC_PKT_OFFSET(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+4, 26, 5, __val) -#define SET_TX_DESC_PADDING_LEN(__pdesc, __val) \ +#define SET_TX_DESC_PADDING_LEN(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+4, 24, 8, __val) -#define GET_TX_DESC_MACID(__pdesc) \ +#define GET_TX_DESC_MACID(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+4, 0, 5) #define GET_TX_DESC_AGG_ENABLE(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+4, 5, 1) @@ -119,7 +115,7 @@ LE_BITS_TO_4BYTE(__pdesc+4, 13, 1) #define GET_TX_DESC_LSIG_TXOP_EN(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+4, 14, 1) -#define GET_TX_DESC_PIFS(__pdesc) \ +#define GET_TX_DESC_PIFS(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+4, 15, 1) #define GET_TX_DESC_RATE_ID(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+4, 16, 4) @@ -205,7 +201,6 @@ #define SET_TX_DESC_HWSEQ_EN(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+12, 31, 1, __val) - #define GET_TX_DESC_NEXT_HEAP_PAGE(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+12, 0, 8) #define GET_TX_DESC_TAIL_PAGE(__pdesc) \ @@ -213,7 +208,6 @@ #define GET_TX_DESC_SEQ(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+12, 16, 12) - #define SET_TX_DESC_RTS_RATE(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+16, 0, 5, __val) #define SET_TX_DESC_AP_DCFE(__pdesc, __val) \ @@ -386,7 +380,6 @@ #define GET_TX_DESC_TX_BUFFER_SIZE(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+28, 0, 16) - #define SET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+32, 0, 32, __val) #define SET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc, __val) \ @@ -549,8 +542,10 @@ do { \ rxmcs == DESC92C_RATE5_5M ||\ rxmcs == DESC92C_RATE11M) +#define IS_LITTLE_ENDIAN 1 + struct phy_rx_agc_info_t { - #ifdef __LITTLE_ENDIAN + #if IS_LITTLE_ENDIAN u8 gain:7, trsw:1; #else u8 trsw:1, gain:7; @@ -562,7 +557,7 @@ struct phy_status_rpt { u8 cck_sig_qual_ofdm_pwdb_all; u8 cck_agc_rpt_ofdm_cfosho_a; u8 cck_rpt_b_ofdm_cfosho_b; - u8 rsvd_1; + u8 rsvd_1;/* ch_corr_msb; */ u8 noise_power_db_msb; u8 path_cfotail[2]; u8 pcts_mask[2]; @@ -574,7 +569,7 @@ struct phy_status_rpt { u8 stream_target_csi[2]; u8 sig_evm; u8 rsvd_3; -#ifdef __LITTLE_ENDIAN +#if IS_LITTLE_ENDIAN u8 antsel_rx_keep_2:1; /*ex_intf_flg:1;*/ u8 sgi_en:1; u8 rxsc:2; @@ -777,19 +772,25 @@ struct rx_desc_88e { void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr, u8 *pdesc_tx, - u8 *pbd_desc_tx, struct ieee80211_tx_info *info, - struct ieee80211_sta *sta, struct sk_buff *skb, + u8 *txbd, struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, + struct sk_buff *skb, u8 hw_queue, struct rtl_tcb_desc *ptcb_desc); bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *status, struct ieee80211_rx_status *rx_status, u8 *pdesc, struct sk_buff *skb); -void rtl88ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, - u8 desc_name, u8 *val); +void rtl88ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, + bool istx, u8 desc_name, u8 *val); u32 rtl88ee_get_desc(u8 *pdesc, bool istx, u8 desc_name); +bool rtl88ee_is_tx_desc_closed(struct ieee80211_hw *hw, + u8 hw_queue, u16 index); void rtl88ee_tx_polling(struct ieee80211_hw *hw, u8 hw_queue); void rtl88ee_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, - bool b_firstseg, bool b_lastseg, + bool firstseg, bool lastseg, struct sk_buff *skb); +u32 rtl88ee_rx_command_packet(struct ieee80211_hw *hw, + struct rtl_stats status, + struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c index eb78fd8607f7e456baf79066cb5e00a7c00e5d88..f6cb5aedfdd1ec30fd7046aede5547386723a32d 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c +++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c @@ -1771,7 +1771,7 @@ static void rtl92c_check_bt_change(struct ieee80211_hw *hw) struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); u8 tmp1byte = 0; - if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version) && + if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version) && rtlpcipriv->bt_coexist.bt_coexistence) tmp1byte |= BIT(5); if (rtlpcipriv->bt_coexist.bt_cur_state) { diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c index 04a41628ceedd374ed12f86740ffe1f71d543f71..a00861b26ece7ae6b2c6168ca22bdaed42c71033 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c +++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -30,6 +26,7 @@ #include "../wifi.h" #include "../pci.h" #include "../base.h" +#include "../core.h" #include "../rtl8192ce/reg.h" #include "../rtl8192ce/def.h" #include "fw_common.h" @@ -71,66 +68,31 @@ static void _rtl92c_enable_fw_download(struct ieee80211_hw *hw, bool enable) } } -static void rtl_block_fw_writeN(struct ieee80211_hw *hw, const u8 *buffer, - u32 size) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 blockSize = REALTEK_USB_VENQT_MAX_BUF_SIZE - 20; - u8 *bufferPtr = (u8 *) buffer; - u32 i, offset, blockCount, remainSize; - - blockCount = size / blockSize; - remainSize = size % blockSize; - - for (i = 0; i < blockCount; i++) { - offset = i * blockSize; - rtlpriv->io.writeN_sync(rtlpriv, - (FW_8192C_START_ADDRESS + offset), - (void *)(bufferPtr + offset), - blockSize); - } - - if (remainSize) { - offset = blockCount * blockSize; - rtlpriv->io.writeN_sync(rtlpriv, - (FW_8192C_START_ADDRESS + offset), - (void *)(bufferPtr + offset), - remainSize); - } -} - static void _rtl92c_fw_block_write(struct ieee80211_hw *hw, const u8 *buffer, u32 size) { struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 blockSize = sizeof(u32); - u8 *bufferPtr = (u8 *) buffer; - u32 *pu4BytePtr = (u32 *) buffer; - u32 i, offset, blockCount, remainSize; - u32 data; - - if (rtlpriv->io.writeN_sync) { - rtl_block_fw_writeN(hw, buffer, size); - return; - } - blockCount = size / blockSize; - remainSize = size % blockSize; - if (remainSize) { - /* the last word is < 4 bytes - pad it with zeros */ - for (i = 0; i < 4 - remainSize; i++) - *(bufferPtr + size + i) = 0; - blockCount++; - } + u32 blocksize = sizeof(u32); + u8 *bufferptr = (u8 *)buffer; + u32 *pu4byteptr = (u32 *)buffer; + u32 i, offset, blockcount, remainsize; - for (i = 0; i < blockCount; i++) { - offset = i * blockSize; - /* for big-endian platforms, the firmware data need to be byte - * swapped as it was read as a byte string and will be written - * as 32-bit dwords and byte swapped when written - */ - data = le32_to_cpu(*(__le32 *)(pu4BytePtr + i)); + blockcount = size / blocksize; + remainsize = size % blocksize; + + for (i = 0; i < blockcount; i++) { + offset = i * blocksize; rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset), - data); + *(pu4byteptr + i)); + } + + if (remainsize) { + offset = blockcount * blocksize; + bufferptr += offset; + for (i = 0; i < remainsize; i++) { + rtl_write_byte(rtlpriv, (FW_8192C_START_ADDRESS + + offset + i), *(bufferptr + i)); + } } } @@ -168,19 +130,20 @@ static void _rtl92c_write_fw(struct ieee80211_hw *hw, { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - u8 *bufferPtr = buffer; + bool is_version_b; + u8 *bufferptr = (u8 *)buffer; - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes\n", size); - - if (IS_CHIP_VER_B(version)) { - u32 pageNums, remainSize; + RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes,\n", size); + is_version_b = IS_NORMAL_CHIP(version); + if (is_version_b) { + u32 pageNums, remainsize; u32 page, offset; - if (IS_HARDWARE_TYPE_8192CE(rtlhal)) - _rtl92c_fill_dummy(bufferPtr, &size); + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CE) + _rtl92c_fill_dummy(bufferptr, &size); pageNums = size / FW_8192C_PAGE_SIZE; - remainSize = size % FW_8192C_PAGE_SIZE; + remainsize = size % FW_8192C_PAGE_SIZE; if (pageNums > 4) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, @@ -189,15 +152,15 @@ static void _rtl92c_write_fw(struct ieee80211_hw *hw, for (page = 0; page < pageNums; page++) { offset = page * FW_8192C_PAGE_SIZE; - _rtl92c_fw_page_write(hw, page, (bufferPtr + offset), + _rtl92c_fw_page_write(hw, page, (bufferptr + offset), FW_8192C_PAGE_SIZE); } - if (remainSize) { + if (remainsize) { offset = pageNums * FW_8192C_PAGE_SIZE; page = pageNums; - _rtl92c_fw_page_write(hw, page, (bufferPtr + offset), - remainSize); + _rtl92c_fw_page_write(hw, page, (bufferptr + offset), + remainsize); } } else { _rtl92c_fw_block_write(hw, buffer, size); @@ -207,6 +170,7 @@ static void _rtl92c_write_fw(struct ieee80211_hw *hw, static int _rtl92c_fw_free_to_go(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); + int err = -EIO; u32 counter = 0; u32 value32; @@ -217,12 +181,13 @@ static int _rtl92c_fw_free_to_go(struct ieee80211_hw *hw) if (counter >= FW_8192C_POLLING_TIMEOUT_COUNT) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "chksum report faill ! REG_MCUFWDL:0x%08x\n", value32); - return -EIO; + "chksum report faill ! REG_MCUFWDL:0x%08x .\n", + value32); + goto exit; } RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "Checksum report OK ! REG_MCUFWDL:0x%08x\n", value32); + "Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32); value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); value32 |= MCUFWDL_RDY; @@ -235,9 +200,10 @@ static int _rtl92c_fw_free_to_go(struct ieee80211_hw *hw) value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); if (value32 & WINTINI_RDY) { RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "Polling FW ready success!! REG_MCUFWDL:0x%08x\n", - value32); - return 0; + "Polling FW ready success!! REG_MCUFWDL:0x%08x .\n", + value32); + err = 0; + goto exit; } mdelay(FW_8192C_POLLING_DELAY); @@ -245,8 +211,10 @@ static int _rtl92c_fw_free_to_go(struct ieee80211_hw *hw) } while (counter++ < FW_8192C_POLLING_TIMEOUT_COUNT); RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Polling FW ready fail!! REG_MCUFWDL:0x%08x\n", value32); - return -EIO; + "Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n", value32); + +exit: + return err; } int rtl92c_download_fw(struct ieee80211_hw *hw) @@ -256,21 +224,21 @@ int rtl92c_download_fw(struct ieee80211_hw *hw) struct rtl92c_firmware_header *pfwheader; u8 *pfwdata; u32 fwsize; + int err; enum version_8192c version = rtlhal->version; - if (rtlpriv->max_fw_size == 0 || !rtlhal->pfirmware) + if (!rtlhal->pfirmware) return 1; pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware; - pfwdata = rtlhal->pfirmware; + pfwdata = (u8 *)rtlhal->pfirmware; fwsize = rtlhal->fwsize; if (IS_FW_HEADER_EXIST(pfwheader)) { RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, "Firmware Version(%d), Signature(%#x),Size(%d)\n", - le16_to_cpu(pfwheader->version), - le16_to_cpu(pfwheader->signature), - (uint)sizeof(struct rtl92c_firmware_header)); + pfwheader->version, pfwheader->signature, + (int)sizeof(struct rtl92c_firmware_header)); pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header); fwsize = fwsize - sizeof(struct rtl92c_firmware_header); @@ -280,7 +248,8 @@ int rtl92c_download_fw(struct ieee80211_hw *hw) _rtl92c_write_fw(hw, version, pfwdata, fwsize); _rtl92c_enable_fw_download(hw, false); - if (_rtl92c_fw_free_to_go(hw)) { + err = _rtl92c_fw_free_to_go(hw); + if (err) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Firmware is not ready to run!\n"); } else { @@ -307,7 +276,7 @@ static bool _rtl92c_check_fw_read_last_h2c(struct ieee80211_hw *hw, u8 boxnum) } static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw, - u8 element_id, u32 cmd_len, u8 *p_cmdbuffer) + u8 element_id, u32 cmd_len, u8 *cmdbuffer) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); @@ -315,7 +284,8 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw, u16 box_reg = 0, box_extreg = 0; u8 u1b_tmp; bool isfw_read = false; - bool bwrite_success = false; + u8 buf_index = 0; + bool bwrite_sucess = false; u8 wait_h2c_limmit = 100; u8 wait_writeh2c_limmit = 100; u8 boxcontent[4], boxextcontent[2]; @@ -329,16 +299,15 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw, spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag); if (rtlhal->h2c_setinprogress) { RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, - "H2C set in progress! Wait to set..element_id(%d)\n", + "H2C set in progress! Wait to set..element_id(%d).\n", element_id); - while (rtlhal->h2c_setinprogress) { spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag); h2c_waitcounter++; RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "Wait 100 us (%d times)...\n", - h2c_waitcounter); + h2c_waitcounter); udelay(100); if (h2c_waitcounter > 1000) @@ -354,7 +323,7 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw, } } - while (!bwrite_success) { + while (!bwrite_sucess) { wait_writeh2c_limmit--; if (wait_writeh2c_limmit == 0) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, @@ -381,14 +350,13 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw, box_extreg = REG_HMEBOX_EXT_3; break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case not processed\n"); + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, + "switch case not process\n"); break; } isfw_read = _rtl92c_check_fw_read_last_h2c(hw, boxnum); while (!isfw_read) { - wait_h2c_limmit--; if (wait_h2c_limmit == 0) { RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, @@ -408,7 +376,7 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw, if (!isfw_read) { RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, - "Write H2C register BOX[%d] fail!!!!! Fw do not read\n", + "Write H2C register BOX[%d] fail!!!!! Fw do not read.\n", boxnum); break; } @@ -418,13 +386,13 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw, boxcontent[0] = element_id; RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "Write element_id box_reg(%4x) = %2x\n", - box_reg, element_id); + box_reg, element_id); switch (cmd_len) { case 1: boxcontent[0] &= ~(BIT(7)); - memcpy((u8 *) (boxcontent) + 1, - p_cmdbuffer, 1); + memcpy((u8 *)(boxcontent) + 1, + cmdbuffer + buf_index, 1); for (idx = 0; idx < 4; idx++) { rtl_write_byte(rtlpriv, box_reg + idx, @@ -433,8 +401,8 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw, break; case 2: boxcontent[0] &= ~(BIT(7)); - memcpy((u8 *) (boxcontent) + 1, - p_cmdbuffer, 2); + memcpy((u8 *)(boxcontent) + 1, + cmdbuffer + buf_index, 2); for (idx = 0; idx < 4; idx++) { rtl_write_byte(rtlpriv, box_reg + idx, @@ -443,8 +411,8 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw, break; case 3: boxcontent[0] &= ~(BIT(7)); - memcpy((u8 *) (boxcontent) + 1, - p_cmdbuffer, 3); + memcpy((u8 *)(boxcontent) + 1, + cmdbuffer + buf_index, 3); for (idx = 0; idx < 4; idx++) { rtl_write_byte(rtlpriv, box_reg + idx, @@ -453,10 +421,10 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw, break; case 4: boxcontent[0] |= (BIT(7)); - memcpy((u8 *) (boxextcontent), - p_cmdbuffer, 2); - memcpy((u8 *) (boxcontent) + 1, - p_cmdbuffer + 2, 2); + memcpy((u8 *)(boxextcontent), + cmdbuffer + buf_index, 2); + memcpy((u8 *)(boxcontent) + 1, + cmdbuffer + buf_index + 2, 2); for (idx = 0; idx < 2; idx++) { rtl_write_byte(rtlpriv, box_extreg + idx, @@ -470,10 +438,10 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw, break; case 5: boxcontent[0] |= (BIT(7)); - memcpy((u8 *) (boxextcontent), - p_cmdbuffer, 2); - memcpy((u8 *) (boxcontent) + 1, - p_cmdbuffer + 2, 3); + memcpy((u8 *)(boxextcontent), + cmdbuffer + buf_index, 2); + memcpy((u8 *)(boxcontent) + 1, + cmdbuffer + buf_index + 2, 3); for (idx = 0; idx < 2; idx++) { rtl_write_byte(rtlpriv, box_extreg + idx, @@ -486,12 +454,12 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw, } break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case not processed\n"); + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, + "switch case not process\n"); break; } - bwrite_success = true; + bwrite_sucess = true; rtlhal->last_hmeboxnum = boxnum + 1; if (rtlhal->last_hmeboxnum == 4) @@ -499,7 +467,7 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw, RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "pHalData->last_hmeboxnum = %d\n", - rtlhal->last_hmeboxnum); + rtlhal->last_hmeboxnum); } spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag); @@ -510,12 +478,19 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw, } void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw, - u8 element_id, u32 cmd_len, u8 *p_cmdbuffer) + u8 element_id, u32 cmd_len, u8 *cmdbuffer) { + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); u32 tmp_cmdbuf[2]; + if (!rtlhal->fw_ready) { + RT_ASSERT(false, + "return H2C cmd because of Fw download fail!!!\n"); + return; + } + memset(tmp_cmdbuf, 0, 8); - memcpy(tmp_cmdbuf, p_cmdbuffer, cmd_len); + memcpy(tmp_cmdbuf, cmdbuffer, cmd_len); _rtl92c_fill_h2c_command(hw, element_id, cmd_len, (u8 *)&tmp_cmdbuf); return; @@ -534,7 +509,7 @@ void rtl92c_firmware_selfreset(struct ieee80211_hw *hw) while (u1b_tmp & BIT(2)) { delay--; if (delay == 0) { - RT_ASSERT(false, "8051 reset fail\n"); + RT_ASSERT(false, "8051 reset fail.\n"); break; } udelay(50); @@ -546,56 +521,24 @@ EXPORT_SYMBOL(rtl92c_firmware_selfreset); void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode) { struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 u1_h2c_set_pwrmode[3] = {0}; + u8 u1_h2c_set_pwrmode[3] = { 0 }; struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode); SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, mode); SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode, - (rtlpriv->mac80211.p2p) ? - ppsc->smart_ps : 1); + (rtlpriv->mac80211.p2p) ? ppsc->smart_ps : 1); SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(u1_h2c_set_pwrmode, ppsc->reg_max_lps_awakeintvl); RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, - "rtl92c_set_fw_rsvdpagepkt(): u1_h2c_set_pwrmode", + "rtl92c_set_fw_rsvdpagepkt(): u1_h2c_set_pwrmode\n", u1_h2c_set_pwrmode, 3); rtl92c_fill_h2c_cmd(hw, H2C_SETPWRMODE, 3, u1_h2c_set_pwrmode); - } EXPORT_SYMBOL(rtl92c_set_fw_pwrmode_cmd); -static bool _rtl92c_cmd_send_packet(struct ieee80211_hw *hw, - struct sk_buff *skb) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); - struct rtl8192_tx_ring *ring; - struct rtl_tx_desc *pdesc; - unsigned long flags; - struct sk_buff *pskb = NULL; - - ring = &rtlpci->tx_ring[BEACON_QUEUE]; - - pskb = __skb_dequeue(&ring->queue); - kfree_skb(pskb); - - spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags); - - pdesc = &ring->desc[0]; - - rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *) pdesc, 1, 1, skb); - - __skb_queue_tail(&ring->queue, skb); - - spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); - - rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE); - - return true; -} - #define BEACON_PG 0 /*->1*/ #define PSPOLL_PG 2 #define NULL_PG 3 @@ -713,7 +656,7 @@ static u8 reserved_page_packet[TOTAL_RESERVED_PKT_LEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; -void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished) +void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); @@ -721,13 +664,13 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished) u32 totalpacketlen; bool rtstatus; - u8 u1RsvdPageLoc[3] = {0}; - bool dlok = false; + u8 u1rsvdpageloc[3] = { 0 }; + bool b_dlok = false; u8 *beacon; - u8 *pspoll; + u8 *p_pspoll; u8 *nullfunc; - u8 *probersp; + u8 *p_probersp; /*--------------------------------------------------------- (1) beacon ---------------------------------------------------------*/ @@ -738,12 +681,12 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished) /*------------------------------------------------------- (2) ps-poll --------------------------------------------------------*/ - pspoll = &reserved_page_packet[PSPOLL_PG * 128]; - SET_80211_PS_POLL_AID(pspoll, (mac->assoc_id | 0xc000)); - SET_80211_PS_POLL_BSSID(pspoll, mac->bssid); - SET_80211_PS_POLL_TA(pspoll, mac->mac_addr); + p_pspoll = &reserved_page_packet[PSPOLL_PG * 128]; + SET_80211_PS_POLL_AID(p_pspoll, (mac->assoc_id | 0xc000)); + SET_80211_PS_POLL_BSSID(p_pspoll, mac->bssid); + SET_80211_PS_POLL_TA(p_pspoll, mac->mac_addr); - SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1RsvdPageLoc, PSPOLL_PG); + SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1rsvdpageloc, PSPOLL_PG); /*-------------------------------------------------------- (3) null data @@ -753,57 +696,54 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished) SET_80211_HDR_ADDRESS2(nullfunc, mac->mac_addr); SET_80211_HDR_ADDRESS3(nullfunc, mac->bssid); - SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1RsvdPageLoc, NULL_PG); + SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1rsvdpageloc, NULL_PG); /*--------------------------------------------------------- (4) probe response ----------------------------------------------------------*/ - probersp = &reserved_page_packet[PROBERSP_PG * 128]; - SET_80211_HDR_ADDRESS1(probersp, mac->bssid); - SET_80211_HDR_ADDRESS2(probersp, mac->mac_addr); - SET_80211_HDR_ADDRESS3(probersp, mac->bssid); + p_probersp = &reserved_page_packet[PROBERSP_PG * 128]; + SET_80211_HDR_ADDRESS1(p_probersp, mac->bssid); + SET_80211_HDR_ADDRESS2(p_probersp, mac->mac_addr); + SET_80211_HDR_ADDRESS3(p_probersp, mac->bssid); - SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1RsvdPageLoc, PROBERSP_PG); + SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1rsvdpageloc, PROBERSP_PG); totalpacketlen = TOTAL_RESERVED_PKT_LEN; RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, - "rtl92c_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL", + "rtl92c_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n", &reserved_page_packet[0], totalpacketlen); RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, - "rtl92c_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL", - u1RsvdPageLoc, 3); + "rtl92c_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n", + u1rsvdpageloc, 3); skb = dev_alloc_skb(totalpacketlen); - if (!skb) - return; - kmemleak_not_leak(skb); - - memcpy((u8 *) skb_put(skb, totalpacketlen), + memcpy((u8 *)skb_put(skb, totalpacketlen), &reserved_page_packet, totalpacketlen); - rtstatus = _rtl92c_cmd_send_packet(hw, skb); + rtstatus = rtl_cmd_send_packet(hw, skb); if (rtstatus) - dlok = true; + b_dlok = true; - if (dlok) { + if (b_dlok) { RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, - "Set RSVD page location to Fw\n"); + "Set RSVD page location to Fw.\n"); RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, - "H2C_RSVDPAGE", u1RsvdPageLoc, 3); + "H2C_RSVDPAGE:\n", + u1rsvdpageloc, 3); rtl92c_fill_h2c_cmd(hw, H2C_RSVDPAGE, - sizeof(u1RsvdPageLoc), u1RsvdPageLoc); + sizeof(u1rsvdpageloc), u1rsvdpageloc); } else RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, - "Set RSVD page location to Fw FAIL!!!!!!\n"); + "Set RSVD page location to Fw FAIL!!!!!!.\n"); } EXPORT_SYMBOL(rtl92c_set_fw_rsvdpagepkt); void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus) { - u8 u1_joinbssrpt_parm[1] = {0}; + u8 u1_joinbssrpt_parm[1] = { 0 }; SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(u1_joinbssrpt_parm, mstatus); @@ -813,11 +753,51 @@ EXPORT_SYMBOL(rtl92c_set_fw_joinbss_report_cmd); static void rtl92c_set_p2p_ctw_period_cmd(struct ieee80211_hw *hw, u8 ctwindow) { - u8 u1_ctwindow_period[1] = {ctwindow}; + u8 u1_ctwindow_period[1] = { ctwindow}; rtl92c_fill_h2c_cmd(hw, H2C_P2P_PS_CTW_CMD, 1, u1_ctwindow_period); } +/* refactored routine */ +static void set_noa_data(struct rtl_priv *rtlpriv, + struct rtl_p2p_ps_info *p2pinfo, + struct p2p_ps_offload_t *p2p_ps_offload) +{ + int i; + u32 start_time, tsf_low; + + /* hw only support 2 set of NoA */ + for (i = 0 ; i < p2pinfo->noa_num ; i++) { + /* To control the reg setting for which NOA*/ + rtl_write_byte(rtlpriv, 0x5cf, (i << 4)); + if (i == 0) + p2p_ps_offload->noa0_en = 1; + else + p2p_ps_offload->noa1_en = 1; + + /* config P2P NoA Descriptor Register */ + rtl_write_dword(rtlpriv, 0x5E0, + p2pinfo->noa_duration[i]); + rtl_write_dword(rtlpriv, 0x5E4, + p2pinfo->noa_interval[i]); + + /*Get Current TSF value */ + tsf_low = rtl_read_dword(rtlpriv, REG_TSFTR); + + start_time = p2pinfo->noa_start_time[i]; + if (p2pinfo->noa_count_type[i] != 1) { + while (start_time <= (tsf_low+(50*1024))) { + start_time += p2pinfo->noa_interval[i]; + if (p2pinfo->noa_count_type[i] != 255) + p2pinfo->noa_count_type[i]--; + } + } + rtl_write_dword(rtlpriv, 0x5E8, start_time); + rtl_write_dword(rtlpriv, 0x5EC, + p2pinfo->noa_count_type[i]); + } +} + void rtl92c_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -825,83 +805,58 @@ void rtl92c_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state) struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_p2p_ps_info *p2pinfo = &(rtlps->p2p_ps_info); struct p2p_ps_offload_t *p2p_ps_offload = &rtlhal->p2p_ps_offload; - u8 i; u16 ctwindow; - u32 start_time, tsf_low; switch (p2p_ps_state) { case P2P_PS_DISABLE: - RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_DISABLE\n"); - memset(p2p_ps_offload, 0, sizeof(struct p2p_ps_offload_t)); - break; + RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, + "P2P_PS_DISABLE\n"); + memset(p2p_ps_offload, 0, sizeof(*p2p_ps_offload)); + break; case P2P_PS_ENABLE: - RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_ENABLE\n"); - /* update CTWindow value. */ - if (p2pinfo->ctwindow > 0) { - p2p_ps_offload->ctwindow_en = 1; - ctwindow = p2pinfo->ctwindow; - rtl92c_set_p2p_ctw_period_cmd(hw, ctwindow); - } - /* hw only support 2 set of NoA */ - for (i = 0; i < p2pinfo->noa_num; i++) { - /* To control the register setting for which NOA*/ - rtl_write_byte(rtlpriv, 0x5cf, (i << 4)); - if (i == 0) - p2p_ps_offload->noa0_en = 1; - else - p2p_ps_offload->noa1_en = 1; - - /* config P2P NoA Descriptor Register */ - rtl_write_dword(rtlpriv, 0x5E0, - p2pinfo->noa_duration[i]); - rtl_write_dword(rtlpriv, 0x5E4, - p2pinfo->noa_interval[i]); - - /*Get Current TSF value */ - tsf_low = rtl_read_dword(rtlpriv, REG_TSFTR); - - start_time = p2pinfo->noa_start_time[i]; - if (p2pinfo->noa_count_type[i] != 1) { - while (start_time <= (tsf_low+(50*1024))) { - start_time += p2pinfo->noa_interval[i]; - if (p2pinfo->noa_count_type[i] != 255) - p2pinfo->noa_count_type[i]--; - } + RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, + "P2P_PS_ENABLE\n"); + /* update CTWindow value. */ + if (p2pinfo->ctwindow > 0) { + p2p_ps_offload->ctwindow_en = 1; + ctwindow = p2pinfo->ctwindow; + rtl92c_set_p2p_ctw_period_cmd(hw, ctwindow); } - rtl_write_dword(rtlpriv, 0x5E8, start_time); - rtl_write_dword(rtlpriv, 0x5EC, - p2pinfo->noa_count_type[i]); - } + /* call refactored routine */ + set_noa_data(rtlpriv, p2pinfo, p2p_ps_offload); - if ((p2pinfo->opp_ps == 1) || (p2pinfo->noa_num > 0)) { - /* rst p2p circuit */ - rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, BIT(4)); + if ((p2pinfo->opp_ps == 1) || (p2pinfo->noa_num > 0)) { + /* rst p2p circuit */ + rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, + BIT(4)); - p2p_ps_offload->offload_en = 1; + p2p_ps_offload->offload_en = 1; - if (P2P_ROLE_GO == rtlpriv->mac80211.p2p) { - p2p_ps_offload->role = 1; - p2p_ps_offload->allstasleep = 0; - } else { - p2p_ps_offload->role = 0; - } + if (P2P_ROLE_GO == rtlpriv->mac80211.p2p) { + p2p_ps_offload->role = 1; + p2p_ps_offload->allstasleep = 0; + } else { + p2p_ps_offload->role = 0; + } - p2p_ps_offload->discovery = 0; - } - break; + p2p_ps_offload->discovery = 0; + } + break; case P2P_PS_SCAN: - RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN\n"); - p2p_ps_offload->discovery = 1; - break; + RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN\n"); + p2p_ps_offload->discovery = 1; + break; case P2P_PS_SCAN_DONE: - RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN_DONE\n"); - p2p_ps_offload->discovery = 0; - p2pinfo->p2p_ps_state = P2P_PS_ENABLE; - break; + RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, + "P2P_PS_SCAN_DONE\n"); + p2p_ps_offload->discovery = 0; + p2pinfo->p2p_ps_state = P2P_PS_ENABLE; + break; default: - break; + break; } rtl92c_fill_h2c_cmd(hw, H2C_P2P_PS_OFFLOAD, 1, (u8 *)p2p_ps_offload); + } EXPORT_SYMBOL_GPL(rtl92c_set_p2p_ps_offload_cmd); diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h index 15b2055e6212c4401347dbc873f9f49628fe2d3f..a815bd6273da0645cf86f6f77050f220875ec6ff 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h +++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h @@ -36,11 +36,38 @@ #define FW_8192C_PAGE_SIZE 4096 #define FW_8192C_POLLING_DELAY 5 #define FW_8192C_POLLING_TIMEOUT_COUNT 100 +#define NORMAL_CHIP BIT(4) #define IS_FW_HEADER_EXIST(_pfwhdr) \ ((le16_to_cpu(_pfwhdr->signature)&0xFFF0) == 0x92C0 ||\ (le16_to_cpu(_pfwhdr->signature)&0xFFF0) == 0x88C0) +#define CUT_VERSION_MASK (BIT(6)|BIT(7)) +#define CHIP_VENDOR_UMC BIT(5) +#define CHIP_VENDOR_UMC_B_CUT BIT(6) /* Chip version for ECO */ +#define IS_CHIP_VER_B(version) ((version & CHIP_VER_B) ? true : false) +#define RF_TYPE_MASK (BIT(0)|BIT(1)) +#define GET_CVID_RF_TYPE(version) \ + ((version) & RF_TYPE_MASK) +#define GET_CVID_CUT_VERSION(version) \ + ((version) & CUT_VERSION_MASK) +#define IS_NORMAL_CHIP(version) \ + ((version & NORMAL_CHIP) ? true : false) +#define IS_2T2R(version) \ + (((GET_CVID_RF_TYPE(version)) == \ + CHIP_92C_BITMASK) ? true : false) +#define IS_92C_SERIAL(version) \ + ((IS_2T2R(version)) ? true : false) +#define IS_CHIP_VENDOR_UMC(version) \ + ((version & CHIP_VENDOR_UMC) ? true : false) +#define IS_VENDOR_UMC_A_CUT(version) \ + ((IS_CHIP_VENDOR_UMC(version)) ? \ + ((GET_CVID_CUT_VERSION(version)) ? false : true) : false) +#define IS_81XXC_VENDOR_UMC_B_CUT(version) \ + ((IS_CHIP_VENDOR_UMC(version)) ? \ + ((GET_CVID_CUT_VERSION(version) == \ + CHIP_VENDOR_UMC_B_CUT) ? true : false) : false) + struct rtl92c_firmware_header { __le16 signature; u8 category; @@ -60,19 +87,6 @@ struct rtl92c_firmware_header { __le32 rsvd5; }; -enum rtl8192c_h2c_cmd { - H2C_AP_OFFLOAD = 0, - H2C_SETPWRMODE = 1, - H2C_JOINBSSRPT = 2, - H2C_RSVDPAGE = 3, - H2C_RSSI_REPORT = 5, - H2C_RA_MASK = 6, - H2C_MACID_PS_MODE = 7, - H2C_P2P_PS_OFFLOAD = 8, - H2C_P2P_PS_CTW_CMD = 32, - MAX_H2CCMD -}; - #define pagenum_128(_len) (u32)(((_len)>>7) + ((_len)&0x7F ? 1 : 0)) #define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \ diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c index 9e32ac8a4425f5dd9b9d3c8f4c57db66d684c1dd..77e61b19bf36a7f72b264d1d4715d1c5c83aa244 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c +++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c @@ -27,12 +27,13 @@ * *****************************************************************************/ -#include #include "../wifi.h" #include "../rtl8192ce/reg.h" #include "../rtl8192ce/def.h" #include "dm_common.h" +#include "fw_common.h" #include "phy_common.h" +#include u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask) { @@ -50,7 +51,6 @@ u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask) bitmask, regaddr, originalvalue); return returnvalue; - } EXPORT_SYMBOL(rtl92c_phy_query_bb_reg); @@ -75,7 +75,6 @@ void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw, RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "regaddr(%#x), bitmask(%#x), data(%#x)\n", regaddr, bitmask, data); - } EXPORT_SYMBOL(rtl92c_phy_set_bb_reg); @@ -84,7 +83,6 @@ u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw, { RT_ASSERT(false, "deprecated!\n"); return 0; - } EXPORT_SYMBOL(_rtl92c_phy_fw_rf_serial_read); @@ -129,10 +127,10 @@ u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw, tmplong | BLSSIREADEDGE); mdelay(1); if (rfpath == RF90_PATH_A) - rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1, + rfpi_enable = (u8)rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1, BIT(8)); else if (rfpath == RF90_PATH_B) - rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1, + rfpi_enable = (u8)rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1, BIT(8)); if (rfpi_enable) retvalue = rtl_get_bbreg(hw, pphyreg->rf_rbpi, @@ -141,7 +139,8 @@ u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw, retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb, BLSSIREADBACKDATA); RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFR-%d Addr[0x%x]=0x%x\n", - rfpath, pphyreg->rf_rb, retvalue); + rfpath, pphyreg->rf_rb, + retvalue); return retvalue; } EXPORT_SYMBOL(_rtl92c_phy_rf_serial_read); @@ -165,7 +164,8 @@ void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw, data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff; rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr); RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFW-%d Addr[0x%x]=0x%x\n", - rfpath, pphyreg->rf3wire_offset, data_and_addr); + rfpath, pphyreg->rf3wire_offset, + data_and_addr); } EXPORT_SYMBOL(_rtl92c_phy_rf_serial_write); @@ -174,7 +174,7 @@ u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask) u32 i; for (i = 0; i <= 31; i++) { - if ((bitmask >> i) & 0x1) + if (((bitmask >> i) & 0x1) == 1) break; } return i; @@ -210,11 +210,10 @@ bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw) struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); bool rtstatus; - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "==>\n"); rtstatus = rtlpriv->cfg->ops->config_bb_with_headerfile(hw, BASEBAND_CONFIG_PHY_REG); if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!\n"); + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!"); return false; } if (rtlphy->rf_type == RF_1T2R) { @@ -227,7 +226,7 @@ bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw) BASEBAND_CONFIG_PHY_REG); } if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!\n"); + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!"); return false; } rtstatus = rtlpriv->cfg->ops->config_bb_with_headerfile(hw, @@ -236,12 +235,12 @@ bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw) RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n"); return false; } - rtlphy->cck_high_power = (bool) (rtl_get_bbreg(hw, - RFPGA0_XA_HSSIPARAMETER2, - 0x200)); + rtlphy->cck_high_power = + (bool)(rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, 0x200)); return true; } + EXPORT_SYMBOL(_rtl92c_phy_bb8192c_config_parafile); void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw, @@ -250,51 +249,153 @@ void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw, { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); - int index; - - if (regaddr == RTXAGC_A_RATE18_06) - index = 0; - else if (regaddr == RTXAGC_A_RATE54_24) - index = 1; - else if (regaddr == RTXAGC_A_CCK1_MCS32) - index = 6; - else if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0xffffff00) - index = 7; - else if (regaddr == RTXAGC_A_MCS03_MCS00) - index = 2; - else if (regaddr == RTXAGC_A_MCS07_MCS04) - index = 3; - else if (regaddr == RTXAGC_A_MCS11_MCS08) - index = 4; - else if (regaddr == RTXAGC_A_MCS15_MCS12) - index = 5; - else if (regaddr == RTXAGC_B_RATE18_06) - index = 8; - else if (regaddr == RTXAGC_B_RATE54_24) - index = 9; - else if (regaddr == RTXAGC_B_CCK1_55_MCS32) - index = 14; - else if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0x000000ff) - index = 15; - else if (regaddr == RTXAGC_B_MCS03_MCS00) - index = 10; - else if (regaddr == RTXAGC_B_MCS07_MCS04) - index = 11; - else if (regaddr == RTXAGC_B_MCS11_MCS08) - index = 12; - else if (regaddr == RTXAGC_B_MCS15_MCS12) - index = 13; - else - return; - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][index] = data; - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "MCSTxPowerLevelOriginalOffset[%d][%d] = 0x%x\n", - rtlphy->pwrgroup_cnt, index, - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][index]); + if (regaddr == RTXAGC_A_RATE18_06) { + rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][0] = + data; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "MCSTxPowerLevelOriginalOffset[%d][0] = 0x%x\n", + rtlphy->pwrgroup_cnt, + rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> + pwrgroup_cnt][0]); + } + if (regaddr == RTXAGC_A_RATE54_24) { + rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][1] = + data; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "MCSTxPowerLevelOriginalOffset[%d][1] = 0x%x\n", + rtlphy->pwrgroup_cnt, + rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> + pwrgroup_cnt][1]); + } + if (regaddr == RTXAGC_A_CCK1_MCS32) { + rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][6] = + data; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "MCSTxPowerLevelOriginalOffset[%d][6] = 0x%x\n", + rtlphy->pwrgroup_cnt, + rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> + pwrgroup_cnt][6]); + } + if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0xffffff00) { + rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][7] = + data; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "MCSTxPowerLevelOriginalOffset[%d][7] = 0x%x\n", + rtlphy->pwrgroup_cnt, + rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> + pwrgroup_cnt][7]); + } + if (regaddr == RTXAGC_A_MCS03_MCS00) { + rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][2] = + data; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "MCSTxPowerLevelOriginalOffset[%d][2] = 0x%x\n", + rtlphy->pwrgroup_cnt, + rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> + pwrgroup_cnt][2]); + } + if (regaddr == RTXAGC_A_MCS07_MCS04) { + rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][3] = + data; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "MCSTxPowerLevelOriginalOffset[%d][3] = 0x%x\n", + rtlphy->pwrgroup_cnt, + rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> + pwrgroup_cnt][3]); + } + if (regaddr == RTXAGC_A_MCS11_MCS08) { + rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][4] = + data; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "MCSTxPowerLevelOriginalOffset[%d][4] = 0x%x\n", + rtlphy->pwrgroup_cnt, + rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> + pwrgroup_cnt][4]); + } + if (regaddr == RTXAGC_A_MCS15_MCS12) { + rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][5] = + data; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "MCSTxPowerLevelOriginalOffset[%d][5] = 0x%x\n", + rtlphy->pwrgroup_cnt, + rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> + pwrgroup_cnt][5]); + } + if (regaddr == RTXAGC_B_RATE18_06) { + rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][8] = + data; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "MCSTxPowerLevelOriginalOffset[%d][8] = 0x%x\n", + rtlphy->pwrgroup_cnt, + rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> + pwrgroup_cnt][8]); + } + if (regaddr == RTXAGC_B_RATE54_24) { + rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][9] = + data; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "MCSTxPowerLevelOriginalOffset[%d][9] = 0x%x\n", + rtlphy->pwrgroup_cnt, + rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> + pwrgroup_cnt][9]); + } + if (regaddr == RTXAGC_B_CCK1_55_MCS32) { + rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][14] = + data; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "MCSTxPowerLevelOriginalOffset[%d][14] = 0x%x\n", + rtlphy->pwrgroup_cnt, + rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> + pwrgroup_cnt][14]); + } + if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0x000000ff) { + rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][15] = + data; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "MCSTxPowerLevelOriginalOffset[%d][15] = 0x%x\n", + rtlphy->pwrgroup_cnt, + rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> + pwrgroup_cnt][15]); + } + if (regaddr == RTXAGC_B_MCS03_MCS00) { + rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][10] = + data; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "MCSTxPowerLevelOriginalOffset[%d][10] = 0x%x\n", + rtlphy->pwrgroup_cnt, + rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> + pwrgroup_cnt][10]); + } + if (regaddr == RTXAGC_B_MCS07_MCS04) { + rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][11] = + data; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "MCSTxPowerLevelOriginalOffset[%d][11] = 0x%x\n", + rtlphy->pwrgroup_cnt, + rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> + pwrgroup_cnt][11]); + } + if (regaddr == RTXAGC_B_MCS11_MCS08) { + rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][12] = + data; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "MCSTxPowerLevelOriginalOffset[%d][12] = 0x%x\n", + rtlphy->pwrgroup_cnt, + rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> + pwrgroup_cnt][12]); + } + if (regaddr == RTXAGC_B_MCS15_MCS12) { + rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][13] = + data; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "MCSTxPowerLevelOriginalOffset[%d][13] = 0x%x\n", + rtlphy->pwrgroup_cnt, + rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> + pwrgroup_cnt][13]); - if (index == 13) rtlphy->pwrgroup_cnt++; + } } EXPORT_SYMBOL(_rtl92c_store_pwrIndex_diffrate_offset); @@ -304,29 +405,29 @@ void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw) struct rtl_phy *rtlphy = &(rtlpriv->phy); rtlphy->default_initialgain[0] = - (u8) rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0); + (u8)rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0); rtlphy->default_initialgain[1] = - (u8) rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0); + (u8)rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0); rtlphy->default_initialgain[2] = - (u8) rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0); + (u8)rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0); rtlphy->default_initialgain[3] = - (u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0); + (u8)rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0); RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n", - rtlphy->default_initialgain[0], - rtlphy->default_initialgain[1], - rtlphy->default_initialgain[2], - rtlphy->default_initialgain[3]); + rtlphy->default_initialgain[0], + rtlphy->default_initialgain[1], + rtlphy->default_initialgain[2], + rtlphy->default_initialgain[3]); - rtlphy->framesync = (u8) rtl_get_bbreg(hw, + rtlphy->framesync = (u8)rtl_get_bbreg(hw, ROFDM0_RXDETECTOR3, MASKBYTE0); rtlphy->framesync_c34 = rtl_get_bbreg(hw, ROFDM0_RXDETECTOR2, MASKDWORD); RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Default framesync (0x%x) = 0x%x\n", - ROFDM0_RXDETECTOR3, rtlphy->framesync); + ROFDM0_RXDETECTOR3, rtlphy->framesync); } void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw) @@ -426,19 +527,17 @@ void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel) long txpwr_dbm; txpwr_level = rtlphy->cur_cck_txpwridx; - txpwr_dbm = _rtl92c_phy_txpwr_idx_to_dbm(hw, - WIRELESS_MODE_B, txpwr_level); + txpwr_dbm = _rtl92c_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_B, + txpwr_level); txpwr_level = rtlphy->cur_ofdm24g_txpwridx + rtlefuse->legacy_ht_txpowerdiff; - if (_rtl92c_phy_txpwr_idx_to_dbm(hw, - WIRELESS_MODE_G, + if (_rtl92c_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G, txpwr_level) > txpwr_dbm) txpwr_dbm = _rtl92c_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G, txpwr_level); txpwr_level = rtlphy->cur_ofdm24g_txpwridx; - if (_rtl92c_phy_txpwr_idx_to_dbm(hw, - WIRELESS_MODE_N_24G, + if (_rtl92c_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G, txpwr_level) > txpwr_dbm) txpwr_dbm = _rtl92c_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G, @@ -480,21 +579,19 @@ static void _rtl92c_ccxpower_index_check(struct ieee80211_hw *hw, rtlphy->cur_cck_txpwridx = cckpowerlevel[0]; rtlphy->cur_ofdm24g_txpwridx = ofdmpowerlevel[0]; - } void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_efuse *rtlefuse = rtl_efuse(rtlpriv); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); u8 cckpowerlevel[2], ofdmpowerlevel[2]; if (!rtlefuse->txpwr_fromeprom) return; _rtl92c_get_txpower_index(hw, channel, &cckpowerlevel[0], &ofdmpowerlevel[0]); - _rtl92c_ccxpower_index_check(hw, - channel, &cckpowerlevel[0], + _rtl92c_ccxpower_index_check(hw, channel, &cckpowerlevel[0], &ofdmpowerlevel[0]); rtlpriv->cfg->ops->phy_rf6052_set_cck_txpower(hw, &cckpowerlevel[0]); rtlpriv->cfg->ops->phy_rf6052_set_ofdm_txpower(hw, &ofdmpowerlevel[0], @@ -509,11 +606,9 @@ bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw, long power_indbm) struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); u8 idx; u8 rf_path; - u8 ccktxpwridx = _rtl92c_phy_dbm_to_txpwr_Idx(hw, - WIRELESS_MODE_B, + u8 ccktxpwridx = _rtl92c_phy_dbm_to_txpwr_idx(hw, WIRELESS_MODE_B, power_indbm); - u8 ofdmtxpwridx = _rtl92c_phy_dbm_to_txpwr_Idx(hw, - WIRELESS_MODE_N_24G, + u8 ofdmtxpwridx = _rtl92c_phy_dbm_to_txpwr_idx(hw, WIRELESS_MODE_N_24G, power_indbm); if (ofdmtxpwridx - rtlefuse->legacy_ht_txpowerdiff > 0) ofdmtxpwridx -= rtlefuse->legacy_ht_txpowerdiff; @@ -521,7 +616,7 @@ bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw, long power_indbm) ofdmtxpwridx = 0; RT_TRACE(rtlpriv, COMP_TXAGC, DBG_TRACE, "%lx dBm, ccktxpwridx = %d, ofdmtxpwridx = %d\n", - power_indbm, ccktxpwridx, ofdmtxpwridx); + power_indbm, ccktxpwridx, ofdmtxpwridx); for (idx = 0; idx < 14; idx++) { for (rf_path = 0; rf_path < 2; rf_path++) { rtlefuse->txpwrlevel_cck[rf_path][idx] = ccktxpwridx; @@ -536,7 +631,7 @@ bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw, long power_indbm) } EXPORT_SYMBOL(rtl92c_phy_update_txpower_dbm); -u8 _rtl92c_phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw, +u8 _rtl92c_phy_dbm_to_txpwr_idx(struct ieee80211_hw *hw, enum wireless_mode wirelessmode, long power_indbm) { @@ -557,7 +652,7 @@ u8 _rtl92c_phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw, } if ((power_indbm - offset) > 0) - txpwridx = (u8) ((power_indbm - offset) * 2); + txpwridx = (u8)((power_indbm - offset) * 2); else txpwridx = 0; @@ -566,7 +661,7 @@ u8 _rtl92c_phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw, return txpwridx; } -EXPORT_SYMBOL(_rtl92c_phy_dbm_to_txpwr_Idx); +EXPORT_SYMBOL(_rtl92c_phy_dbm_to_txpwr_idx); long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw, enum wireless_mode wirelessmode, @@ -607,7 +702,7 @@ void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw, rtlpriv->cfg->ops->phy_set_bw_mode_callback(hw); } else { RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, - "FALSE driver sleep or unload\n"); + "false driver sleep or unload\n"); rtlphy->set_bwmode_inprogress = false; rtlphy->current_chan_bw = tmp_bw; } @@ -640,7 +735,7 @@ void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw) } break; } while (true); - RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n"); + RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "\n"); } EXPORT_SYMBOL(rtl92c_phy_sw_chnl_callback); @@ -655,14 +750,14 @@ u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw) if (rtlphy->set_bwmode_inprogress) return 0; RT_ASSERT((rtlphy->current_channel <= 14), - "WIRELESS_MODE_G but channel>14\n"); + "WIRELESS_MODE_G but channel>14"); rtlphy->sw_chnl_inprogress = true; rtlphy->sw_chnl_stage = 0; rtlphy->sw_chnl_step = 0; if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) { rtl92c_phy_sw_chnl_callback(hw); RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD, - "sw_chnl_inprogress false schedule workitem\n"); + "sw_chnl_inprogress false schdule workitem\n"); rtlphy->sw_chnl_inprogress = false; } else { RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD, @@ -673,22 +768,22 @@ u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw) } EXPORT_SYMBOL(rtl92c_phy_sw_chnl); -static void _rtl92c_phy_sw_rf_setting(struct ieee80211_hw *hw, u8 channel) +static void _rtl92c_phy_sw_rf_seting(struct ieee80211_hw *hw, u8 channel) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - - if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version)) { - if (channel == 6 && rtlphy->current_chan_bw == - HT_CHANNEL_WIDTH_20) - rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G1, MASKDWORD, - 0x00255); - else{ - u32 backupRF0x1A = (u32)rtl_get_rfreg(hw, RF90_PATH_A, - RF_RX_G1, RFREG_OFFSET_MASK); + if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version)) { + if (channel == 6 && + rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) { + rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G1, + MASKDWORD, 0x00255); + } else { + u32 backuprf0x1A = + (u32)rtl_get_rfreg(hw, RF90_PATH_A, RF_RX_G1, + RFREG_OFFSET_MASK); rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G1, MASKDWORD, - backupRF0x1A); + backuprf0x1A); } } } @@ -701,7 +796,7 @@ static bool _rtl92c_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable, struct swchnlcmd *pcmd; if (cmdtable == NULL) { - RT_ASSERT(false, "cmdtable cannot be NULL\n"); + RT_ASSERT(false, "cmdtable cannot be NULL.\n"); return false; } @@ -747,7 +842,7 @@ bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, rfdependcmdcnt = 0; RT_ASSERT((channel >= 1 && channel <= 14), - "invalid channel for Zebra: %d\n", channel); + "illegal channel for Zebra: %d\n", channel); _rtl92c_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++, MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG, @@ -768,6 +863,10 @@ bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, case 2: currentcmd = &postcommoncmd[*step]; break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "Invalid 'stage' = %d, Check it!\n", *stage); + return true; } if (currentcmd->cmdid == CMDID_END) { @@ -794,7 +893,7 @@ bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, break; case CMDID_WRITEPORT_UCHAR: rtl_write_byte(rtlpriv, currentcmd->para1, - (u8) currentcmd->para2); + (u8)currentcmd->para2); break; case CMDID_RF_WRITEREG: for (rfpath = 0; rfpath < num_total_rfpath; rfpath++) { @@ -806,12 +905,12 @@ bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, currentcmd->para1, RFREG_OFFSET_MASK, rtlphy->rfreg_chnlval[rfpath]); - _rtl92c_phy_sw_rf_setting(hw, channel); } + _rtl92c_phy_sw_rf_seting(hw, channel); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case not processed\n"); + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, + "switch case not process\n"); break; } @@ -900,7 +999,7 @@ static u8 _rtl92c_phy_path_b_iqk(struct ieee80211_hw *hw) } static void _rtl92c_phy_path_a_fill_iqk_matrix(struct ieee80211_hw *hw, - bool iqk_ok, long result[][8], + bool b_iqk_ok, long result[][8], u8 final_candidate, bool btxonly) { u32 oldval_0, x, tx0_a, reg; @@ -908,7 +1007,7 @@ static void _rtl92c_phy_path_a_fill_iqk_matrix(struct ieee80211_hw *hw, if (final_candidate == 0xFF) { return; - } else if (iqk_ok) { + } else if (b_iqk_ok) { oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD) >> 22) & 0x3FF; x = result[final_candidate][0]; @@ -940,7 +1039,7 @@ static void _rtl92c_phy_path_a_fill_iqk_matrix(struct ieee80211_hw *hw, } static void _rtl92c_phy_path_b_fill_iqk_matrix(struct ieee80211_hw *hw, - bool iqk_ok, long result[][8], + bool b_iqk_ok, long result[][8], u8 final_candidate, bool btxonly) { u32 oldval_1, x, tx1_a, reg; @@ -948,7 +1047,7 @@ static void _rtl92c_phy_path_b_fill_iqk_matrix(struct ieee80211_hw *hw, if (final_candidate == 0xFF) { return; - } else if (iqk_ok) { + } else if (b_iqk_ok) { oldval_1 = (rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, MASKDWORD) >> 22) & 0x3FF; x = result[final_candidate][4]; @@ -1017,7 +1116,7 @@ static void _rtl92c_phy_reload_mac_registers(struct ieee80211_hw *hw, u32 i; for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++) - rtl_write_byte(rtlpriv, macreg[i], (u8) macbackup[i]); + rtl_write_byte(rtlpriv, macreg[i], (u8)macbackup[i]); rtl_write_dword(rtlpriv, macreg[i], macbackup[i]); } @@ -1043,14 +1142,14 @@ static void _rtl92c_phy_mac_setting_calibration(struct ieee80211_hw *hw, u32 *macreg, u32 *macbackup) { struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 i; + u32 i = 0; - rtl_write_byte(rtlpriv, macreg[0], 0x3F); + rtl_write_byte(rtlpriv, macreg[i], 0x3F); for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++) rtl_write_byte(rtlpriv, macreg[i], - (u8) (macbackup[i] & (~BIT(3)))); - rtl_write_byte(rtlpriv, macreg[i], (u8) (macbackup[i] & (~BIT(5)))); + (u8)(macbackup[i] & (~BIT(3)))); + rtl_write_byte(rtlpriv, macreg[i], (u8)(macbackup[i] & (~BIT(5)))); } static void _rtl92c_phy_path_a_standby(struct ieee80211_hw *hw) @@ -1126,7 +1225,6 @@ static bool _rtl92c_phy_simularity_compare(struct ieee80211_hw *hw, } else { return false; } - } static void _rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, @@ -1142,51 +1240,37 @@ static void _rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, 0xe88, 0xe8c, 0xed0, 0xed4, 0xed8, 0xedc, 0xee0, 0xeec }; - u32 iqk_mac_reg[IQK_MAC_REG_NUM] = { 0x522, 0x550, 0x551, 0x040 }; - - u32 iqk_bb_reg_92C[9] = { - 0xc04, 0xc08, 0x874, 0xb68, - 0xb6c, 0x870, 0x860, 0x864, - 0x800 - }; - const u32 retrycount = 2; + u32 bbvalue; if (t == 0) { - /* dummy read */ - rtl_get_bbreg(hw, 0x800, MASKDWORD); + bbvalue = rtl_get_bbreg(hw, 0x800, MASKDWORD); _rtl92c_phy_save_adda_registers(hw, adda_reg, rtlphy->adda_backup, 16); _rtl92c_phy_save_mac_registers(hw, iqk_mac_reg, rtlphy->iqk_mac_backup); - _rtl92c_phy_save_adda_registers(hw, iqk_bb_reg_92C, - rtlphy->iqk_bb_backup, 9); } _rtl92c_phy_path_adda_on(hw, adda_reg, true, is2t); if (t == 0) { - rtlphy->rfpi_enable = (u8) rtl_get_bbreg(hw, - RFPGA0_XA_HSSIPARAMETER1, - BIT(8)); + rtlphy->rfpi_enable = + (u8)rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1, + BIT(8)); } if (!rtlphy->rfpi_enable) _rtl92c_phy_pi_mode_switch(hw, true); - - rtl_set_bbreg(hw, 0x800, BIT(24), 0x0); - + if (t == 0) { + rtlphy->reg_c04 = rtl_get_bbreg(hw, 0xc04, MASKDWORD); + rtlphy->reg_c08 = rtl_get_bbreg(hw, 0xc08, MASKDWORD); + rtlphy->reg_874 = rtl_get_bbreg(hw, 0x874, MASKDWORD); + } rtl_set_bbreg(hw, 0xc04, MASKDWORD, 0x03a05600); rtl_set_bbreg(hw, 0xc08, MASKDWORD, 0x000800e4); rtl_set_bbreg(hw, 0x874, MASKDWORD, 0x22204000); - - rtl_set_bbreg(hw, 0x870, BIT(10), 0x1); - rtl_set_bbreg(hw, 0x870, BIT(26), 0x1); - rtl_set_bbreg(hw, 0x860, BIT(10), 0x0); - rtl_set_bbreg(hw, 0x864, BIT(10), 0x0); - if (is2t) { rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000); rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00010000); @@ -1228,8 +1312,8 @@ static void _rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, pathb_ok = _rtl92c_phy_path_b_iqk(hw); if (pathb_ok == 0x03) { result[t][4] = (rtl_get_bbreg(hw, - 0xeb4, - MASKDWORD) & + 0xeb4, + MASKDWORD) & 0x3FF0000) >> 16; result[t][5] = (rtl_get_bbreg(hw, 0xebc, MASKDWORD) & @@ -1243,17 +1327,21 @@ static void _rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, break; } else if (i == (retrycount - 1) && pathb_ok == 0x01) { result[t][4] = (rtl_get_bbreg(hw, - 0xeb4, - MASKDWORD) & + 0xeb4, + MASKDWORD) & 0x3FF0000) >> 16; } result[t][5] = (rtl_get_bbreg(hw, 0xebc, MASKDWORD) & 0x3FF0000) >> 16; } } - + rtl_set_bbreg(hw, 0xc04, MASKDWORD, rtlphy->reg_c04); + rtl_set_bbreg(hw, 0x874, MASKDWORD, rtlphy->reg_874); + rtl_set_bbreg(hw, 0xc08, MASKDWORD, rtlphy->reg_c08); rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0); - + rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00032ed3); + if (is2t) + rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00032ed3); if (t != 0) { if (!rtlphy->rfpi_enable) _rtl92c_phy_pi_mode_switch(hw, false); @@ -1261,379 +1349,12 @@ static void _rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, rtlphy->adda_backup, 16); _rtl92c_phy_reload_mac_registers(hw, iqk_mac_reg, rtlphy->iqk_mac_backup); - _rtl92c_phy_reload_adda_registers(hw, iqk_bb_reg_92C, - rtlphy->iqk_bb_backup, 9); - - rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00032ed3); - if (is2t) - rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00032ed3); - - rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x01008c00); - rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x01008c00); } } static void _rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta, bool is2t) { -#if 0 /* This routine is deliberately dummied out for later fixes */ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); - - u32 reg_d[PATH_NUM]; - u32 tmpreg, index, offset, path, i, pathbound = PATH_NUM, apkbound; - - u32 bb_backup[APK_BB_REG_NUM]; - u32 bb_reg[APK_BB_REG_NUM] = { - 0x904, 0xc04, 0x800, 0xc08, 0x874 - }; - u32 bb_ap_mode[APK_BB_REG_NUM] = { - 0x00000020, 0x00a05430, 0x02040000, - 0x000800e4, 0x00204000 - }; - u32 bb_normal_ap_mode[APK_BB_REG_NUM] = { - 0x00000020, 0x00a05430, 0x02040000, - 0x000800e4, 0x22204000 - }; - - u32 afe_backup[APK_AFE_REG_NUM]; - u32 afe_reg[APK_AFE_REG_NUM] = { - 0x85c, 0xe6c, 0xe70, 0xe74, 0xe78, - 0xe7c, 0xe80, 0xe84, 0xe88, 0xe8c, - 0xed0, 0xed4, 0xed8, 0xedc, 0xee0, - 0xeec - }; - - u32 mac_backup[IQK_MAC_REG_NUM]; - u32 mac_reg[IQK_MAC_REG_NUM] = { - 0x522, 0x550, 0x551, 0x040 - }; - - u32 apk_rf_init_value[PATH_NUM][APK_BB_REG_NUM] = { - {0x0852c, 0x1852c, 0x5852c, 0x1852c, 0x5852c}, - {0x2852e, 0x0852e, 0x3852e, 0x0852e, 0x0852e} - }; - - u32 apk_normal_rf_init_value[PATH_NUM][APK_BB_REG_NUM] = { - {0x0852c, 0x0a52c, 0x3a52c, 0x5a52c, 0x5a52c}, - {0x0852c, 0x0a52c, 0x5a52c, 0x5a52c, 0x5a52c} - }; - - u32 apk_rf_value_0[PATH_NUM][APK_BB_REG_NUM] = { - {0x52019, 0x52014, 0x52013, 0x5200f, 0x5208d}, - {0x5201a, 0x52019, 0x52016, 0x52033, 0x52050} - }; - - u32 apk_normal_rf_value_0[PATH_NUM][APK_BB_REG_NUM] = { - {0x52019, 0x52017, 0x52010, 0x5200d, 0x5206a}, - {0x52019, 0x52017, 0x52010, 0x5200d, 0x5206a} - }; - - u32 afe_on_off[PATH_NUM] = { - 0x04db25a4, 0x0b1b25a4 - }; - - const u32 apk_offset[PATH_NUM] = { 0xb68, 0xb6c }; - - u32 apk_normal_offset[PATH_NUM] = { 0xb28, 0xb98 }; - - u32 apk_value[PATH_NUM] = { 0x92fc0000, 0x12fc0000 }; - - u32 apk_normal_value[PATH_NUM] = { 0x92680000, 0x12680000 }; - - const char apk_delta_mapping[APK_BB_REG_NUM][13] = { - {-4, -3, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6}, - {-4, -3, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6}, - {-6, -4, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6}, - {-1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6}, - {-11, -9, -7, -5, -3, -1, 0, 0, 0, 0, 0, 0, 0} - }; - - const u32 apk_normal_setting_value_1[13] = { - 0x01017018, 0xf7ed8f84, 0x1b1a1816, 0x2522201e, 0x322e2b28, - 0x433f3a36, 0x5b544e49, 0x7b726a62, 0xa69a8f84, 0xdfcfc0b3, - 0x12680000, 0x00880000, 0x00880000 - }; - - const u32 apk_normal_setting_value_2[16] = { - 0x01c7021d, 0x01670183, 0x01000123, 0x00bf00e2, 0x008d00a3, - 0x0068007b, 0x004d0059, 0x003a0042, 0x002b0031, 0x001f0025, - 0x0017001b, 0x00110014, 0x000c000f, 0x0009000b, 0x00070008, - 0x00050006 - }; - - u32 apk_result[PATH_NUM][APK_BB_REG_NUM]; - - long bb_offset, delta_v, delta_offset; - - if (!is2t) - pathbound = 1; - - return; - - for (index = 0; index < PATH_NUM; index++) { - apk_offset[index] = apk_normal_offset[index]; - apk_value[index] = apk_normal_value[index]; - afe_on_off[index] = 0x6fdb25a4; - } - - for (index = 0; index < APK_BB_REG_NUM; index++) { - for (path = 0; path < pathbound; path++) { - apk_rf_init_value[path][index] = - apk_normal_rf_init_value[path][index]; - apk_rf_value_0[path][index] = - apk_normal_rf_value_0[path][index]; - } - bb_ap_mode[index] = bb_normal_ap_mode[index]; - - apkbound = 6; - } - - for (index = 0; index < APK_BB_REG_NUM; index++) { - if (index == 0) - continue; - bb_backup[index] = rtl_get_bbreg(hw, bb_reg[index], MASKDWORD); - } - - _rtl92c_phy_save_mac_registers(hw, mac_reg, mac_backup); - - _rtl92c_phy_save_adda_registers(hw, afe_reg, afe_backup, 16); - - for (path = 0; path < pathbound; path++) { - if (path == RF90_PATH_A) { - offset = 0xb00; - for (index = 0; index < 11; index++) { - rtl_set_bbreg(hw, offset, MASKDWORD, - apk_normal_setting_value_1 - [index]); - - offset += 0x04; - } - - rtl_set_bbreg(hw, 0xb98, MASKDWORD, 0x12680000); - - offset = 0xb68; - for (; index < 13; index++) { - rtl_set_bbreg(hw, offset, MASKDWORD, - apk_normal_setting_value_1 - [index]); - - offset += 0x04; - } - - rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x40000000); - - offset = 0xb00; - for (index = 0; index < 16; index++) { - rtl_set_bbreg(hw, offset, MASKDWORD, - apk_normal_setting_value_2 - [index]); - - offset += 0x04; - } - rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000); - } else if (path == RF90_PATH_B) { - offset = 0xb70; - for (index = 0; index < 10; index++) { - rtl_set_bbreg(hw, offset, MASKDWORD, - apk_normal_setting_value_1 - [index]); - - offset += 0x04; - } - rtl_set_bbreg(hw, 0xb28, MASKDWORD, 0x12680000); - rtl_set_bbreg(hw, 0xb98, MASKDWORD, 0x12680000); - - offset = 0xb68; - index = 11; - for (; index < 13; index++) { - rtl_set_bbreg(hw, offset, MASKDWORD, - apk_normal_setting_value_1 - [index]); - - offset += 0x04; - } - - rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x40000000); - - offset = 0xb60; - for (index = 0; index < 16; index++) { - rtl_set_bbreg(hw, offset, MASKDWORD, - apk_normal_setting_value_2 - [index]); - - offset += 0x04; - } - rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000); - } - - reg_d[path] = rtl_get_rfreg(hw, (enum radio_path)path, - 0xd, MASKDWORD); - - for (index = 0; index < APK_AFE_REG_NUM; index++) - rtl_set_bbreg(hw, afe_reg[index], MASKDWORD, - afe_on_off[path]); - - if (path == RF90_PATH_A) { - for (index = 0; index < APK_BB_REG_NUM; index++) { - if (index == 0) - continue; - rtl_set_bbreg(hw, bb_reg[index], MASKDWORD, - bb_ap_mode[index]); - } - } - - _rtl92c_phy_mac_setting_calibration(hw, mac_reg, mac_backup); - - if (path == 0) { - rtl_set_rfreg(hw, RF90_PATH_B, 0x0, MASKDWORD, 0x10000); - } else { - rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASKDWORD, - 0x10000); - rtl_set_rfreg(hw, RF90_PATH_A, 0x10, MASKDWORD, - 0x1000f); - rtl_set_rfreg(hw, RF90_PATH_A, 0x11, MASKDWORD, - 0x20103); - } - - delta_offset = ((delta + 14) / 2); - if (delta_offset < 0) - delta_offset = 0; - else if (delta_offset > 12) - delta_offset = 12; - - for (index = 0; index < APK_BB_REG_NUM; index++) { - if (index != 1) - continue; - - tmpreg = apk_rf_init_value[path][index]; - - if (!rtlefuse->apk_thermalmeterignore) { - bb_offset = (tmpreg & 0xF0000) >> 16; - - if (!(tmpreg & BIT(15))) - bb_offset = -bb_offset; - - delta_v = - apk_delta_mapping[index][delta_offset]; - - bb_offset += delta_v; - - if (bb_offset < 0) { - tmpreg = tmpreg & (~BIT(15)); - bb_offset = -bb_offset; - } else { - tmpreg = tmpreg | BIT(15); - } - - tmpreg = - (tmpreg & 0xFFF0FFFF) | (bb_offset << 16); - } - - rtl_set_rfreg(hw, (enum radio_path)path, 0xc, - MASKDWORD, 0x8992e); - rtl_set_rfreg(hw, (enum radio_path)path, 0x0, - MASKDWORD, apk_rf_value_0[path][index]); - rtl_set_rfreg(hw, (enum radio_path)path, 0xd, - MASKDWORD, tmpreg); - - i = 0; - do { - rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80000000); - rtl_set_bbreg(hw, apk_offset[path], - MASKDWORD, apk_value[0]); - RTPRINT(rtlpriv, FINIT, INIT_IQK, - ("PHY_APCalibrate() offset 0x%x " - "value 0x%x\n", - apk_offset[path], - rtl_get_bbreg(hw, apk_offset[path], - MASKDWORD))); - - mdelay(3); - - rtl_set_bbreg(hw, apk_offset[path], - MASKDWORD, apk_value[1]); - RTPRINT(rtlpriv, FINIT, INIT_IQK, - ("PHY_APCalibrate() offset 0x%x " - "value 0x%x\n", - apk_offset[path], - rtl_get_bbreg(hw, apk_offset[path], - MASKDWORD))); - - mdelay(20); - - rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000); - - if (path == RF90_PATH_A) - tmpreg = rtl_get_bbreg(hw, 0xbd8, - 0x03E00000); - else - tmpreg = rtl_get_bbreg(hw, 0xbd8, - 0xF8000000); - - RTPRINT(rtlpriv, FINIT, INIT_IQK, - ("PHY_APCalibrate() offset " - "0xbd8[25:21] %x\n", tmpreg)); - - i++; - - } while (tmpreg > apkbound && i < 4); - - apk_result[path][index] = tmpreg; - } - } - - _rtl92c_phy_reload_mac_registers(hw, mac_reg, mac_backup); - - for (index = 0; index < APK_BB_REG_NUM; index++) { - if (index == 0) - continue; - rtl_set_bbreg(hw, bb_reg[index], MASKDWORD, bb_backup[index]); - } - - _rtl92c_phy_reload_adda_registers(hw, afe_reg, afe_backup, 16); - - for (path = 0; path < pathbound; path++) { - rtl_set_rfreg(hw, (enum radio_path)path, 0xd, - MASKDWORD, reg_d[path]); - - if (path == RF90_PATH_B) { - rtl_set_rfreg(hw, RF90_PATH_A, 0x10, MASKDWORD, - 0x1000f); - rtl_set_rfreg(hw, RF90_PATH_A, 0x11, MASKDWORD, - 0x20101); - } - - if (apk_result[path][1] > 6) - apk_result[path][1] = 6; - } - - for (path = 0; path < pathbound; path++) { - rtl_set_rfreg(hw, (enum radio_path)path, 0x3, MASKDWORD, - ((apk_result[path][1] << 15) | - (apk_result[path][1] << 10) | - (apk_result[path][1] << 5) | - apk_result[path][1])); - - if (path == RF90_PATH_A) - rtl_set_rfreg(hw, (enum radio_path)path, 0x4, MASKDWORD, - ((apk_result[path][1] << 15) | - (apk_result[path][1] << 10) | - (0x00 << 5) | 0x05)); - else - rtl_set_rfreg(hw, (enum radio_path)path, 0x4, MASKDWORD, - ((apk_result[path][1] << 15) | - (apk_result[path][1] << 10) | - (0x02 << 5) | 0x05)); - - rtl_set_rfreg(hw, (enum radio_path)path, 0xe, MASKDWORD, - ((0x08 << 15) | (0x08 << 10) | (0x08 << 5) | - 0x08)); - - } - rtlphy->b_apk_done = true; -#endif } static void _rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, @@ -1657,15 +1378,13 @@ static void _rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300, 0x2); else rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300, 0x1); - } - } #undef IQK_ADDA_REG_NUM #undef IQK_DELAY_TIME -void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery) +void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); @@ -1673,10 +1392,10 @@ void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery) long result[4][8]; u8 i, final_candidate; - bool patha_ok, pathb_ok; - long reg_e94, reg_e9c, reg_ea4, reg_eb4, reg_ebc, reg_ec4, reg_tmp = 0; + bool b_patha_ok, b_pathb_ok; + long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4, + reg_ecc, reg_tmp = 0; bool is12simular, is13simular, is23simular; - bool start_conttx = false, singletone = false; u32 iqk_bb_reg[10] = { ROFDM0_XARXIQIMBALANCE, ROFDM0_XBRXIQIMBALANCE, @@ -1690,14 +1409,12 @@ void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery) ROFDM0_RXIQEXTANTA }; - if (recovery) { + if (b_recovery) { _rtl92c_phy_reload_adda_registers(hw, iqk_bb_reg, rtlphy->iqk_bb_backup, 10); return; } - if (start_conttx || singletone) - return; for (i = 0; i < 8; i++) { result[0][i] = 0; result[1][i] = 0; @@ -1705,8 +1422,8 @@ void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery) result[3][i] = 0; } final_candidate = 0xff; - patha_ok = false; - pathb_ok = false; + b_patha_ok = false; + b_pathb_ok = false; is12simular = false; is23simular = false; is13simular = false; @@ -1752,29 +1469,34 @@ void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery) reg_e94 = result[i][0]; reg_e9c = result[i][1]; reg_ea4 = result[i][2]; + reg_eac = result[i][3]; reg_eb4 = result[i][4]; reg_ebc = result[i][5]; reg_ec4 = result[i][6]; + reg_ecc = result[i][7]; } if (final_candidate != 0xff) { rtlphy->reg_e94 = reg_e94 = result[final_candidate][0]; rtlphy->reg_e9c = reg_e9c = result[final_candidate][1]; reg_ea4 = result[final_candidate][2]; + reg_eac = result[final_candidate][3]; rtlphy->reg_eb4 = reg_eb4 = result[final_candidate][4]; rtlphy->reg_ebc = reg_ebc = result[final_candidate][5]; reg_ec4 = result[final_candidate][6]; - patha_ok = pathb_ok = true; + reg_ecc = result[final_candidate][7]; + b_patha_ok = true; + b_pathb_ok = true; } else { rtlphy->reg_e94 = rtlphy->reg_eb4 = 0x100; rtlphy->reg_e9c = rtlphy->reg_ebc = 0x0; } if (reg_e94 != 0) /*&&(reg_ea4 != 0) */ - _rtl92c_phy_path_a_fill_iqk_matrix(hw, patha_ok, result, + _rtl92c_phy_path_a_fill_iqk_matrix(hw, b_patha_ok, result, final_candidate, (reg_ea4 == 0)); if (IS_92C_SERIAL(rtlhal->version)) { if (reg_eb4 != 0) /*&&(reg_ec4 != 0) */ - _rtl92c_phy_path_b_fill_iqk_matrix(hw, pathb_ok, + _rtl92c_phy_path_b_fill_iqk_matrix(hw, b_pathb_ok, result, final_candidate, (reg_ec4 == 0)); @@ -1788,10 +1510,7 @@ void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - bool start_conttx = false, singletone = false; - if (start_conttx || singletone) - return; if (IS_92C_SERIAL(rtlhal->version)) rtlpriv->cfg->ops->phy_lc_calibrate(hw, true); else @@ -1833,22 +1552,22 @@ bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype) RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "-->IO Cmd(%#x), set_io_inprogress(%d)\n", - iotype, rtlphy->set_io_inprogress); + iotype, rtlphy->set_io_inprogress); do { switch (iotype) { case IO_CMD_RESUME_DM_BY_SCAN: RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, - "[IO CMD] Resume DM after scan\n"); + "[IO CMD] Resume DM after scan.\n"); postprocessing = true; break; - case IO_CMD_PAUSE_DM_BY_SCAN: + case IO_CMD_PAUSE_BAND0_DM_BY_SCAN: RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, - "[IO CMD] Pause DM before scan\n"); + "[IO CMD] Pause DM before scan.\n"); postprocessing = true; break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case not processed\n"); + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, + "switch case not process\n"); break; } } while (false); @@ -1859,7 +1578,7 @@ bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype) return false; } rtl92c_phy_set_io(hw); - RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "<--IO Type(%#x)\n", iotype); + RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "IO Type(%#x)\n", iotype); return true; } EXPORT_SYMBOL(rtl92c_phy_set_io_cmd); @@ -1868,30 +1587,30 @@ void rtl92c_phy_set_io(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); - struct dig_t dm_digtable = rtlpriv->dm_digtable; + struct dig_t *dm_digtable = &rtlpriv->dm_digtable; RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "--->Cmd(%#x), set_io_inprogress(%d)\n", - rtlphy->current_io_type, rtlphy->set_io_inprogress); + rtlphy->current_io_type, rtlphy->set_io_inprogress); switch (rtlphy->current_io_type) { case IO_CMD_RESUME_DM_BY_SCAN: - dm_digtable.cur_igvalue = rtlphy->initgain_backup.xaagccore1; + dm_digtable->cur_igvalue = rtlphy->initgain_backup.xaagccore1; rtl92c_dm_write_dig(hw); rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel); break; - case IO_CMD_PAUSE_DM_BY_SCAN: - rtlphy->initgain_backup.xaagccore1 = dm_digtable.cur_igvalue; - dm_digtable.cur_igvalue = 0x37; + case IO_CMD_PAUSE_BAND0_DM_BY_SCAN: + rtlphy->initgain_backup.xaagccore1 = dm_digtable->cur_igvalue; + dm_digtable->cur_igvalue = 0x17; rtl92c_dm_write_dig(hw); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case not processed\n"); + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, + "switch case not process\n"); break; } rtlphy->set_io_inprogress = false; - RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "<---(%#x)\n", - rtlphy->current_io_type); + RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, + "(%#x)\n", rtlphy->current_io_type); } EXPORT_SYMBOL(rtl92c_phy_set_io); @@ -1931,7 +1650,7 @@ void _rtl92c_phy_set_rf_sleep(struct ieee80211_hw *hw) rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3); rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00); RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE, - "Switch RF timeout !!!\n"); + "Switch RF timeout !!!.\n"); return; } rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2); diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h index e79dabe9ba1de77c757e91b46c6ba48ca6e9f12f..64bc49f4dbc6c61e941b13e7e9c54cfa326dd290 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h +++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h @@ -226,7 +226,7 @@ u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask); long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw, enum wireless_mode wirelessmode, u8 txpwridx); -u8 _rtl92c_phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw, +u8 _rtl92c_phy_dbm_to_txpwr_idx(struct ieee80211_hw *hw, enum wireless_mode wirelessmode, long power_indbm); void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw); diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h index fa24de43ce795d4588f002a93390ea8ffb5ee109..831df101d7b7a97866b55283c8e998e3c9152806 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h @@ -146,21 +146,6 @@ enum version_8192c { VERSION_UNKNOWN = 0x88, }; -#define CUT_VERSION_MASK (BIT(6)|BIT(7)) -#define CHIP_VENDOR_UMC BIT(5) -#define CHIP_VENDOR_UMC_B_CUT BIT(6) /* Chip version for ECO */ -#define IS_VENDOR_UMC_A_CUT(version) ((IS_CHIP_VENDOR_UMC(version)) ? \ - ((GET_CVID_CUT_VERSION(version)) ? false : true) : false) -#define IS_CHIP_VER_B(version) ((version & CHIP_VER_B) ? true : false) -#define IS_92C_SERIAL(version) ((version & CHIP_92C_BITMASK) ? true : false) -#define IS_CHIP_VENDOR_UMC(version) \ - ((version & CHIP_VENDOR_UMC) ? true : false) -#define GET_CVID_CUT_VERSION(version) ((version) & CUT_VERSION_MASK) -#define IS_81xxC_VENDOR_UMC_B_CUT(version) \ - ((IS_CHIP_VENDOR_UMC(version)) ? \ - ((GET_CVID_CUT_VERSION(version) == CHIP_VENDOR_UMC_B_CUT) ? \ - true : false) : false) - enum rtl819x_loopback_e { RTL819X_NO_LOOPBACK = 0, RTL819X_MAC_LOOPBACK = 1, diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h index d4a3d032c7bf9bf51dbc9146036c04e0280bbb51..9c5311c299fdf0b144c6ab092f82bba321084cb9 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h @@ -86,70 +86,6 @@ #define TX_POWER_NEAR_FIELD_THRESH_LVL2 74 #define TX_POWER_NEAR_FIELD_THRESH_LVL1 67 -struct swat_t { - u8 failure_cnt; - u8 try_flag; - u8 stop_trying; - long pre_rssi; - long trying_threshold; - u8 cur_antenna; - u8 pre_antenna; -}; - -enum tag_dynamic_init_gain_operation_type_definition { - DIG_TYPE_THRESH_HIGH = 0, - DIG_TYPE_THRESH_LOW = 1, - DIG_TYPE_BACKOFF = 2, - DIG_TYPE_RX_GAIN_MIN = 3, - DIG_TYPE_RX_GAIN_MAX = 4, - DIG_TYPE_ENABLE = 5, - DIG_TYPE_DISABLE = 6, - DIG_OP_TYPE_MAX -}; - -enum tag_cck_packet_detection_threshold_type_definition { - CCK_PD_STAGE_LowRssi = 0, - CCK_PD_STAGE_HighRssi = 1, - CCK_FA_STAGE_Low = 2, - CCK_FA_STAGE_High = 3, - CCK_PD_STAGE_MAX = 4, -}; - -enum dm_1r_cca_e { - CCA_1R = 0, - CCA_2R = 1, - CCA_MAX = 2, -}; - -enum dm_rf_e { - RF_SAVE = 0, - RF_NORMAL = 1, - RF_MAX = 2, -}; - -enum dm_sw_ant_switch_e { - ANS_ANTENNA_B = 1, - ANS_ANTENNA_A = 2, - ANS_ANTENNA_MAX = 3, -}; - -enum dm_dig_ext_port_alg_e { - DIG_EXT_PORT_STAGE_0 = 0, - DIG_EXT_PORT_STAGE_1 = 1, - DIG_EXT_PORT_STAGE_2 = 2, - DIG_EXT_PORT_STAGE_3 = 3, - DIG_EXT_PORT_STAGE_MAX = 4, -}; - -enum dm_dig_connect_e { - DIG_STA_DISCONNECT = 0, - DIG_STA_CONNECT = 1, - DIG_STA_BEFORE_CONNECT = 2, - DIG_MULTISTA_DISCONNECT = 3, - DIG_MULTISTA_CONNECT = 4, - DIG_CONNECT_MAX -}; - void rtl92c_dm_init(struct ieee80211_hw *hw); void rtl92c_dm_watchdog(struct ieee80211_hw *hw); void rtl92c_dm_write_dig(struct ieee80211_hw *hw); diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c index df98a5e4729acef3065040d1f7c719eb829a3a83..8ec0f031f48a575035dbe59af4b2f236d4260c72 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c @@ -37,7 +37,9 @@ #include "reg.h" #include "def.h" #include "phy.h" +#include "../rtl8192c/dm_common.h" #include "../rtl8192c/fw_common.h" +#include "../rtl8192c/phy_common.h" #include "dm.h" #include "led.h" #include "hw.h" @@ -53,7 +55,7 @@ static void _rtl92ce_set_bcn_ctrl_reg(struct ieee80211_hw *hw, rtlpci->reg_bcn_ctrl_val |= set_bits; rtlpci->reg_bcn_ctrl_val &= ~clear_bits; - rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8) rtlpci->reg_bcn_ctrl_val); + rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8)rtlpci->reg_bcn_ctrl_val); } static void _rtl92ce_stop_tx_beacon(struct ieee80211_hw *hw) @@ -985,7 +987,7 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw) !IS_92C_SERIAL(rtlhal->version)) { rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G1, MASKDWORD, 0x30255); rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G2, MASKDWORD, 0x50a00); - } else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version)) { + } else if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version)) { rtl_set_rfreg(hw, RF90_PATH_A, 0x0C, MASKDWORD, 0x894AE); rtl_set_rfreg(hw, RF90_PATH_A, 0x0A, MASKDWORD, 0x1AF31); rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, MASKDWORD, 0x8F425); @@ -1330,7 +1332,7 @@ static void _rtl92ce_poweroff_adapter(struct ieee80211_hw *hw) rtl_write_word(rtlpriv, REG_GPIO_IO_SEL, 0x0790); rtl_write_word(rtlpriv, REG_LEDCFG0, 0x8080); rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL, 0x80); - if (!IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version)) + if (!IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version)) rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x23); if (rtlpcipriv->bt_coexist.bt_coexistence) { u4b_tmp = rtl_read_dword(rtlpriv, REG_AFE_XTAL_CTRL); @@ -1494,7 +1496,7 @@ static void _rtl92ce_read_txpower_info_from_hwpg(struct ieee80211_hw *hw, for (rf_path = 0; rf_path < 2; rf_path++) { for (i = 0; i < 14; i++) { - index = _rtl92c_get_chnl_group((u8) i); + index = rtl92c_get_chnl_group((u8)i); rtlefuse->txpwrlevel_cck[rf_path][i] = rtlefuse->eeprom_chnlarea_txpwr_cck[rf_path][index]; @@ -1543,7 +1545,7 @@ static void _rtl92ce_read_txpower_info_from_hwpg(struct ieee80211_hw *hw, for (rf_path = 0; rf_path < 2; rf_path++) { for (i = 0; i < 14; i++) { - index = _rtl92c_get_chnl_group((u8) i); + index = rtl92c_get_chnl_group((u8)i); if (rf_path == RF90_PATH_A) { rtlefuse->pwrgroup_ht20[rf_path][i] = @@ -1573,7 +1575,7 @@ static void _rtl92ce_read_txpower_info_from_hwpg(struct ieee80211_hw *hw, } for (i = 0; i < 14; i++) { - index = _rtl92c_get_chnl_group((u8) i); + index = rtl92c_get_chnl_group((u8)i); if (!autoload_fail) tempval = hwinfo[EEPROM_TXPOWERHT20DIFF + index]; @@ -1590,7 +1592,7 @@ static void _rtl92ce_read_txpower_info_from_hwpg(struct ieee80211_hw *hw, if (rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] & BIT(3)) rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] |= 0xF0; - index = _rtl92c_get_chnl_group((u8) i); + index = rtl92c_get_chnl_group((u8)i); if (!autoload_fail) tempval = hwinfo[EEPROM_TXPOWER_OFDMDIFF + index]; diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h index 5533070f266c4c0706b4d83c2477d4c8421f17eb..98a086822aaceb5172e1d040352ab2d3f4bce283 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h @@ -30,7 +30,7 @@ #ifndef __RTL92CE_HW_H__ #define __RTL92CE_HW_H__ -static inline u8 _rtl92c_get_chnl_group(u8 chnl) +static inline u8 rtl92c_get_chnl_group(u8 chnl) { u8 group; diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c index 98b22303c84d1849da10221e80cfce7d87568c5a..bc5ca989b915eda932c8aee82ca026be05f23451 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c @@ -35,8 +35,11 @@ #include "def.h" #include "hw.h" #include "phy.h" +#include "../rtl8192c/phy_common.h" #include "rf.h" #include "dm.h" +#include "../rtl8192c/dm_common.h" +#include "../rtl8192c/fw_common.h" #include "table.h" static bool _rtl92c_phy_config_mac_with_headerfile(struct ieee80211_hw *hw); diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h index 94486cca400077515ced7c81e0c35922af97e425..e5e1353a94c35dca2bdcc977b13f2bb9c38a167e 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h @@ -78,113 +78,6 @@ #define RTL92C_MAX_PATH_NUM 2 -enum swchnlcmd_id { - CMDID_END, - CMDID_SET_TXPOWEROWER_LEVEL, - CMDID_BBREGWRITE10, - CMDID_WRITEPORT_ULONG, - CMDID_WRITEPORT_USHORT, - CMDID_WRITEPORT_UCHAR, - CMDID_RF_WRITEREG, -}; - -struct swchnlcmd { - enum swchnlcmd_id cmdid; - u32 para1; - u32 para2; - u32 msdelay; -}; - -enum hw90_block_e { - HW90_BLOCK_MAC = 0, - HW90_BLOCK_PHY0 = 1, - HW90_BLOCK_PHY1 = 2, - HW90_BLOCK_RF = 3, - HW90_BLOCK_MAXIMUM = 4, -}; - -enum baseband_config_type { - BASEBAND_CONFIG_PHY_REG = 0, - BASEBAND_CONFIG_AGC_TAB = 1, -}; - -enum ra_offset_area { - RA_OFFSET_LEGACY_OFDM1, - RA_OFFSET_LEGACY_OFDM2, - RA_OFFSET_HT_OFDM1, - RA_OFFSET_HT_OFDM2, - RA_OFFSET_HT_OFDM3, - RA_OFFSET_HT_OFDM4, - RA_OFFSET_HT_CCK, -}; - -enum antenna_path { - ANTENNA_NONE, - ANTENNA_D, - ANTENNA_C, - ANTENNA_CD, - ANTENNA_B, - ANTENNA_BD, - ANTENNA_BC, - ANTENNA_BCD, - ANTENNA_A, - ANTENNA_AD, - ANTENNA_AC, - ANTENNA_ACD, - ANTENNA_AB, - ANTENNA_ABD, - ANTENNA_ABC, - ANTENNA_ABCD -}; - -struct r_antenna_select_ofdm { - u32 r_tx_antenna:4; - u32 r_ant_l:4; - u32 r_ant_non_ht:4; - u32 r_ant_ht1:4; - u32 r_ant_ht2:4; - u32 r_ant_ht_s1:4; - u32 r_ant_non_ht_s1:4; - u32 ofdm_txsc:2; - u32 reserved:2; -}; - -struct r_antenna_select_cck { - u8 r_cckrx_enable_2:2; - u8 r_cckrx_enable:2; - u8 r_ccktx_enable:4; -}; - -struct efuse_contents { - u8 mac_addr[ETH_ALEN]; - u8 cck_tx_power_idx[6]; - u8 ht40_1s_tx_power_idx[6]; - u8 ht40_2s_tx_power_idx_diff[3]; - u8 ht20_tx_power_idx_diff[3]; - u8 ofdm_tx_power_idx_diff[3]; - u8 ht40_max_power_offset[3]; - u8 ht20_max_power_offset[3]; - u8 channel_plan; - u8 thermal_meter; - u8 rf_option[5]; - u8 version; - u8 oem_id; - u8 regulatory; -}; - -struct tx_power_struct { - u8 cck[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER]; - u8 ht40_1s[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER]; - u8 ht40_2s[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER]; - u8 ht20_diff[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER]; - u8 legacy_ht_diff[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER]; - u8 legacy_ht_txpowerdiff; - u8 groupht20[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER]; - u8 groupht40[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER]; - u8 pwrgroup_cnt; - u32 mcs_original_offset[4][16]; -}; - bool rtl92c_phy_bb_config(struct ieee80211_hw *hw); u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask); void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask, diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c index 4bbdfb2df36357c3b84f7b7e81a169986d4af207..d86b5b566444e1098a666909e3d86865c00f0b7c 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c @@ -35,6 +35,9 @@ #include "def.h" #include "phy.h" #include "dm.h" +#include "../rtl8192c/dm_common.h" +#include "../rtl8192c/fw_common.h" +#include "../rtl8192c/phy_common.h" #include "hw.h" #include "rf.h" #include "sw.h" @@ -165,7 +168,7 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw) if (IS_VENDOR_UMC_A_CUT(rtlhal->version) && !IS_92C_SERIAL(rtlhal->version)) rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cfwU.bin"; - else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version)) + else if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version)) rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cfwU_B.bin"; rtlpriv->max_fw_size = 0x4000; diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c index 8f04817cb7ec810bc70dea859895788fd9259687..2fb9c7acb76a3bf7d6fa1feee728b15365b86207 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c @@ -125,7 +125,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw, u32 rssi, total_rssi = 0; bool is_cck_rate; - is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc); + is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc->rxmcs); pstats->packet_matchbssid = packet_match_bssid; pstats->packet_toself = packet_toself; pstats->is_cck = is_cck_rate; @@ -361,7 +361,7 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw, stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc); stats->is_ht = (bool)GET_RX_DESC_RXHT(pdesc); - stats->is_cck = RX_HAL_IS_CCK_RATE(pdesc); + stats->is_cck = RX_HAL_IS_CCK_RATE(pdesc->rxmcs); rx_status->freq = hw->conf.chandef.chan->center_freq; rx_status->band = hw->conf.chandef.chan->band; @@ -389,10 +389,6 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw, * to decrypt it */ if (stats->decrypted) { - if (!hdr) { - /* In testing, hdr was NULL here */ - return false; - } if ((_ieee80211_is_robust_mgmt_frame(hdr)) && (ieee80211_has_protected(hdr->frame_control))) rx_status->flag &= ~RX_FLAG_DECRYPTED; diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/def.h b/drivers/net/wireless/rtlwifi/rtl8192cu/def.h index f916555e6311dafef2c3821222d8d8bb54e8a916..c940a87175ca15d701bad48abbe2be5dd755091c 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/def.h +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/def.h @@ -38,9 +38,6 @@ #define CHIP_VENDOR_UMC BIT(5) #define CHIP_VENDOR_UMC_B_CUT BIT(6) -#define IS_NORMAL_CHIP(version) \ - (((version) & NORMAL_CHIP) ? true : false) - #define IS_8723_SERIES(version) \ (((version) & CHIP_8723) ? true : false) diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c index 270cbffcac70cf02329fc1093f82e00244b78cae..04aa0b5f5b3d10a1ae16eb8db5ea74d4f38e1890 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c @@ -36,8 +36,11 @@ #include "reg.h" #include "def.h" #include "phy.h" +#include "../rtl8192c/phy_common.h" #include "mac.h" #include "dm.h" +#include "../rtl8192c/dm_common.h" +#include "../rtl8192c/fw_common.h" #include "hw.h" #include "../rtl8192ce/hw.h" #include "trx.h" @@ -180,7 +183,7 @@ static void _rtl92cu_read_txpower_info_from_hwpg(struct ieee80211_hw *hw, eprom_chnl_txpwr_ht40_2sdf[rf_path][i]); for (rf_path = 0; rf_path < 2; rf_path++) { for (i = 0; i < 14; i++) { - index = _rtl92c_get_chnl_group((u8) i); + index = rtl92c_get_chnl_group((u8)i); rtlefuse->txpwrlevel_cck[rf_path][i] = rtlefuse->eeprom_chnlarea_txpwr_cck[rf_path][index]; rtlefuse->txpwrlevel_ht40_1s[rf_path][i] = @@ -222,7 +225,7 @@ static void _rtl92cu_read_txpower_info_from_hwpg(struct ieee80211_hw *hw, } for (rf_path = 0; rf_path < 2; rf_path++) { for (i = 0; i < 14; i++) { - index = _rtl92c_get_chnl_group((u8) i); + index = rtl92c_get_chnl_group((u8)i); if (rf_path == RF90_PATH_A) { rtlefuse->pwrgroup_ht20[rf_path][i] = (rtlefuse->eeprom_pwrlimit_ht20[index] @@ -249,7 +252,7 @@ static void _rtl92cu_read_txpower_info_from_hwpg(struct ieee80211_hw *hw, } } for (i = 0; i < 14; i++) { - index = _rtl92c_get_chnl_group((u8) i); + index = rtl92c_get_chnl_group((u8)i); if (!autoload_fail) tempval = hwinfo[EEPROM_TXPOWERHT20DIFF + index]; else @@ -261,7 +264,7 @@ static void _rtl92cu_read_txpower_info_from_hwpg(struct ieee80211_hw *hw, rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] |= 0xF0; if (rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] & BIT(3)) rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] |= 0xF0; - index = _rtl92c_get_chnl_group((u8) i); + index = rtl92c_get_chnl_group((u8)i); if (!autoload_fail) tempval = hwinfo[EEPROM_TXPOWER_OFDMDIFF + index]; else @@ -1169,13 +1172,13 @@ n. LEDCFG 0x4C[15:0] = 0x8080 /* 1. Disable GPIO[7:0] */ rtl_write_word(rtlpriv, REG_GPIO_PIN_CTRL+2, 0x0000); value32 = rtl_read_dword(rtlpriv, REG_GPIO_PIN_CTRL) & 0xFFFF00FF; - value8 = (u8) (value32&0x000000FF); + value8 = (u8)(value32&0x000000FF); value32 |= ((value8<<8) | 0x00FF0000); rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, value32); /* 2. Disable GPIO[10:8] */ rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG+3, 0x00); value16 = rtl_read_word(rtlpriv, REG_GPIO_MUXCFG+2) & 0xFF0F; - value8 = (u8) (value16&0x000F); + value8 = (u8)(value16&0x000F); value16 |= ((value8<<4) | 0x0780); rtl_write_word(rtlpriv, REG_GPIO_PIN_CTRL+2, value16); /* 3. Disable LED0 & 1 */ @@ -1245,7 +1248,7 @@ static void _rtl92cu_set_bcn_ctrl_reg(struct ieee80211_hw *hw, rtlusb->reg_bcn_ctrl_val |= set_bits; rtlusb->reg_bcn_ctrl_val &= ~clear_bits; - rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8) rtlusb->reg_bcn_ctrl_val); + rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8)rtlusb->reg_bcn_ctrl_val); } static void _rtl92cu_stop_tx_beacon(struct ieee80211_hw *hw) diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c index e26312fb4356720b06e3f844423a96ca07dd79e4..c2d8ec6afcdafa3b43ae2f21380d4155ccc5d3e9 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c @@ -40,6 +40,7 @@ #include "dm.h" #include "mac.h" #include "trx.h" +#include "../rtl8192c/fw_common.h" #include @@ -786,7 +787,7 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw, bool is_cck_rate; u8 *pdesc = (u8 *)p_desc; - is_cck_rate = RX_HAL_IS_CCK_RATE(p_desc); + is_cck_rate = RX_HAL_IS_CCK_RATE(p_desc->rxmcs); pstats->packet_matchbssid = packet_match_bssid; pstats->packet_toself = packet_toself; pstats->packet_beacon = packet_beacon; diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c index 9831ff1128ca93385377a302187ee5c0d887b56d..12f6d474b492e3b4ff74ed29c76dd972f241a8c0 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c @@ -34,8 +34,11 @@ #include "reg.h" #include "def.h" #include "phy.h" +#include "../rtl8192c/phy_common.h" #include "rf.h" #include "dm.h" +#include "../rtl8192c/dm_common.h" +#include "../rtl8192c/fw_common.h" #include "table.h" u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c index 1ac6383e79471f5944e4267f3ae8bc28715c53e2..7c5fbaf5fee0d790d1cfbeaa563a1c360a1a976b 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c @@ -42,6 +42,7 @@ #include "trx.h" #include "led.h" #include "hw.h" +#include "../rtl8192c/fw_common.h" #include MODULE_AUTHOR("Georgia "); @@ -75,7 +76,7 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw) if (IS_VENDOR_UMC_A_CUT(rtlpriv->rtlhal.version) && !IS_92C_SERIAL(rtlpriv->rtlhal.version)) { rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_A.bin"; - } else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlpriv->rtlhal.version)) { + } else if (IS_81XXC_VENDOR_UMC_B_CUT(rtlpriv->rtlhal.version)) { rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_B.bin"; } else { rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_TMSC.bin"; @@ -121,7 +122,6 @@ static struct rtl_hal_ops rtl8192cu_hal_ops = { .fill_tx_desc = rtl92cu_tx_fill_desc, .fill_fake_txdesc = rtl92cu_fill_fake_txdesc, .fill_tx_cmddesc = rtl92cu_tx_fill_cmddesc, - .cmd_send_packet = rtl92cu_cmd_send_packet, .query_rx_desc = rtl92cu_rx_query_desc, .set_channel_access = rtl92cu_update_channel_access_setting, .radio_onoff_checking = rtl92cu_gpio_radio_on_off_checking, diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c index 035e0dc3922caf99311cb42c3dadea8e255c587c..f383d5f1fed5ffcb9a547f0cd96472709310c2ef 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c @@ -38,6 +38,7 @@ #include "dm.h" #include "mac.h" #include "trx.h" +#include "../rtl8192c/fw_common.h" static int _ConfigVerTOutEP(struct ieee80211_hw *hw) { diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/fw.h b/drivers/net/wireless/rtlwifi/rtl8192de/fw.h index 1ffacdda734c96222ff59370dd917a3d73cb28e3..a55a803a0b4d6ac9ee40bfbdbf6b59db433c2e87 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/fw.h +++ b/drivers/net/wireless/rtlwifi/rtl8192de/fw.h @@ -132,18 +132,6 @@ struct rtl92d_firmware_header { u32 rsvd5; }; -enum rtl8192d_h2c_cmd { - H2C_AP_OFFLOAD = 0, - H2C_SETPWRMODE = 1, - H2C_JOINBSSRPT = 2, - H2C_RSVDPAGE = 3, - H2C_RSSI_REPORT = 5, - H2C_RA_MASK = 6, - H2C_MAC_MODE_SEL = 9, - H2C_PWRM = 15, - MAX_H2CCMD -}; - int rtl92d_download_fw(struct ieee80211_hw *hw); void rtl92d_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id, u32 cmd_len, u8 *p_cmdbuffer); diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c index 592125a5f19cbd3b43501d26345abe68c3b11fc8..1961b8e28dc16eb7261bc267641cf64b06791b77 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c +++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c @@ -677,7 +677,7 @@ static void _rtl92d_store_pwrindex_diffrate_offset(struct ieee80211_hw *hw, rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][index] = data; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "MCSTxPowerLevelOriginalOffset[%d][%d] = 0x%ulx\n", + "MCSTxPowerLevelOriginalOffset[%d][%d] = 0x%x\n", rtlphy->pwrgroup_cnt, index, rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][index]); if (index == 13) @@ -2531,7 +2531,7 @@ static void _rtl92d_phy_reload_lck_setting(struct ieee80211_hw *hw, if (rtlpriv->rtlhal.current_bandtype == BAND_ON_5G) {/* Path-A for 5G */ u4tmp = curveindex_5g[channel-1]; RTPRINT(rtlpriv, FINIT, INIT_IQK, - "ver 1 set RF-A, 5G, 0x28 = 0x%ulx !!\n", u4tmp); + "ver 1 set RF-A, 5G, 0x28 = 0x%x !!\n", u4tmp); if (rtlpriv->rtlhal.macphymode == DUALMAC_DUALPHY && rtlpriv->rtlhal.interfaceindex == 1) { bneed_powerdown_radio = @@ -2550,7 +2550,7 @@ static void _rtl92d_phy_reload_lck_setting(struct ieee80211_hw *hw, } else if (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G) { u4tmp = curveindex_2g[channel-1]; RTPRINT(rtlpriv, FINIT, INIT_IQK, - "ver 3 set RF-B, 2G, 0x28 = 0x%ulx !!\n", u4tmp); + "ver 3 set RF-B, 2G, 0x28 = 0x%x !!\n", u4tmp); if (rtlpriv->rtlhal.macphymode == DUALMAC_DUALPHY && rtlpriv->rtlhal.interfaceindex == 0) { bneed_powerdown_radio = @@ -2562,7 +2562,7 @@ static void _rtl92d_phy_reload_lck_setting(struct ieee80211_hw *hw, } rtl_set_rfreg(hw, erfpath, RF_SYN_G4, 0x3f800, u4tmp); RTPRINT(rtlpriv, FINIT, INIT_IQK, - "ver 3 set RF-B, 2G, 0x28 = 0x%ulx !!\n", + "ver 3 set RF-B, 2G, 0x28 = 0x%x !!\n", rtl_get_rfreg(hw, erfpath, RF_SYN_G4, 0x3f800)); if (bneed_powerdown_radio) _rtl92d_phy_restore_rf_env(hw, erfpath, &u4regvalue); diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c index 99c2ab5dfcebfe230a70da60c42a8427509dd9b9..8efbcc7af250830cd5a7117e4375186188c73b92 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c @@ -127,7 +127,7 @@ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw, u32 rssi, total_rssi = 0; bool is_cck_rate; - is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc); + is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc->rxmcs); pstats->packet_matchbssid = packet_match_bssid; pstats->packet_toself = packet_toself; pstats->packet_beacon = packet_beacon; diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/Makefile b/drivers/net/wireless/rtlwifi/rtl8192ee/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..11952b99daf8991626a23f6ca7d48f1f03b364ca --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192ee/Makefile @@ -0,0 +1,19 @@ +obj-m := rtl8192ee.o + + +rtl8192ee-objs := \ + dm.o \ + fw.o \ + hw.o \ + led.o \ + phy.o \ + pwrseq.o \ + rf.o \ + sw.o \ + table.o \ + trx.o \ + + +obj-$(CONFIG_RTL8821AE) += rtl8192ee.o + +ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/def.h b/drivers/net/wireless/rtlwifi/rtl8192ee/def.h new file mode 100644 index 0000000000000000000000000000000000000000..60f5728b4e2de98c1c2c87b6f90fb9c8f54a173c --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192ee/def.h @@ -0,0 +1,101 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL92E_DEF_H__ +#define __RTL92E_DEF_H__ + +#define RX_DESC_NUM_92E 512 + +#define HAL_PRIME_CHNL_OFFSET_DONT_CARE 0 +#define HAL_PRIME_CHNL_OFFSET_LOWER 1 +#define HAL_PRIME_CHNL_OFFSET_UPPER 2 + +#define RX_MPDU_QUEUE 0 + +#define IS_HT_RATE(_rate) \ + (_rate >= DESC92C_RATEMCS0) +#define IS_CCK_RATE(_rate) \ + (_rate >= DESC92C_RATE1M && _rate <= DESC92C_RATE11M) +#define IS_OFDM_RATE(_rate) \ + (_rate >= DESC92C_RATE6M && _rate <= DESC92C_RATE54M) + +enum version_8192e { + VERSION_TEST_CHIP_2T2R_8192E = 0x0024, + VERSION_NORMAL_CHIP_2T2R_8192E = 0x102C, + VERSION_UNKNOWN = 0xFF, +}; + +enum rx_packet_type { + NORMAL_RX, + TX_REPORT1, + TX_REPORT2, + HIS_REPORT, + C2H_PACKET, +}; + +enum rtl_desc_qsel { + QSLT_BK = 0x2, + QSLT_BE = 0x0, + QSLT_VI = 0x5, + QSLT_VO = 0x7, + QSLT_BEACON = 0x10, + QSLT_HIGH = 0x11, + QSLT_MGNT = 0x12, + QSLT_CMD = 0x13, +}; + +enum rtl_desc92c_rate { + DESC92C_RATE1M = 0x00, + DESC92C_RATE2M = 0x01, + DESC92C_RATE5_5M = 0x02, + DESC92C_RATE11M = 0x03, + + DESC92C_RATE6M = 0x04, + DESC92C_RATE9M = 0x05, + DESC92C_RATE12M = 0x06, + DESC92C_RATE18M = 0x07, + DESC92C_RATE24M = 0x08, + DESC92C_RATE36M = 0x09, + DESC92C_RATE48M = 0x0a, + DESC92C_RATE54M = 0x0b, + + DESC92C_RATEMCS0 = 0x0c, + DESC92C_RATEMCS1 = 0x0d, + DESC92C_RATEMCS2 = 0x0e, + DESC92C_RATEMCS3 = 0x0f, + DESC92C_RATEMCS4 = 0x10, + DESC92C_RATEMCS5 = 0x11, + DESC92C_RATEMCS6 = 0x12, + DESC92C_RATEMCS7 = 0x13, + DESC92C_RATEMCS8 = 0x14, + DESC92C_RATEMCS9 = 0x15, + DESC92C_RATEMCS10 = 0x16, + DESC92C_RATEMCS11 = 0x17, + DESC92C_RATEMCS12 = 0x18, + DESC92C_RATEMCS13 = 0x19, + DESC92C_RATEMCS14 = 0x1a, + DESC92C_RATEMCS15 = 0x1b, +}; +#endif diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/dm.c b/drivers/net/wireless/rtlwifi/rtl8192ee/dm.c new file mode 100644 index 0000000000000000000000000000000000000000..77deedf79d1dd3dcf6d4ba69457251912a60037e --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192ee/dm.c @@ -0,0 +1,1263 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "../wifi.h" +#include "../base.h" +#include "../pci.h" +#include "reg.h" +#include "def.h" +#include "phy.h" +#include "dm.h" +#include "fw.h" +#include "trx.h" + +static const u32 ofdmswing_table[OFDM_TABLE_SIZE] = { + 0x7f8001fe, /* 0, +6.0dB */ + 0x788001e2, /* 1, +5.5dB */ + 0x71c001c7, /* 2, +5.0dB */ + 0x6b8001ae, /* 3, +4.5dB */ + 0x65400195, /* 4, +4.0dB */ + 0x5fc0017f, /* 5, +3.5dB */ + 0x5a400169, /* 6, +3.0dB */ + 0x55400155, /* 7, +2.5dB */ + 0x50800142, /* 8, +2.0dB */ + 0x4c000130, /* 9, +1.5dB */ + 0x47c0011f, /* 10, +1.0dB */ + 0x43c0010f, /* 11, +0.5dB */ + 0x40000100, /* 12, +0dB */ + 0x3c8000f2, /* 13, -0.5dB */ + 0x390000e4, /* 14, -1.0dB */ + 0x35c000d7, /* 15, -1.5dB */ + 0x32c000cb, /* 16, -2.0dB */ + 0x300000c0, /* 17, -2.5dB */ + 0x2d4000b5, /* 18, -3.0dB */ + 0x2ac000ab, /* 19, -3.5dB */ + 0x288000a2, /* 20, -4.0dB */ + 0x26000098, /* 21, -4.5dB */ + 0x24000090, /* 22, -5.0dB */ + 0x22000088, /* 23, -5.5dB */ + 0x20000080, /* 24, -6.0dB */ + 0x1e400079, /* 25, -6.5dB */ + 0x1c800072, /* 26, -7.0dB */ + 0x1b00006c, /* 27. -7.5dB */ + 0x19800066, /* 28, -8.0dB */ + 0x18000060, /* 29, -8.5dB */ + 0x16c0005b, /* 30, -9.0dB */ + 0x15800056, /* 31, -9.5dB */ + 0x14400051, /* 32, -10.0dB */ + 0x1300004c, /* 33, -10.5dB */ + 0x12000048, /* 34, -11.0dB */ + 0x11000044, /* 35, -11.5dB */ + 0x10000040, /* 36, -12.0dB */ + 0x0f00003c, /* 37, -12.5dB */ + 0x0e400039, /* 38, -13.0dB */ + 0x0d800036, /* 39, -13.5dB */ + 0x0cc00033, /* 40, -14.0dB */ + 0x0c000030, /* 41, -14.5dB */ + 0x0b40002d, /* 42, -15.0dB */ +}; + +static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = { + {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04}, /* 0, +0dB */ + {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, /* 1, -0.5dB */ + {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, /* 2, -1.0dB */ + {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, /* 3, -1.5dB */ + {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, /* 4, -2.0dB */ + {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, /* 5, -2.5dB */ + {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, /* 6, -3.0dB */ + {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, /* 7, -3.5dB */ + {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, /* 8, -4.0dB */ + {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, /* 9, -4.5dB */ + {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, /* 10, -5.0dB */ + {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, /* 11, -5.5dB */ + {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, /* 12, -6.0dB */ + {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, /* 13, -6.5dB */ + {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, /* 14, -7.0dB */ + {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, /* 15, -7.5dB */ + {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, /* 16, -8.0dB */ + {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, /* 17, -8.5dB */ + {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, /* 18, -9.0dB */ + {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 19, -9.5dB */ + {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 20, -10.0dB */ + {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 21, -10.5dB */ + {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 22, -11.0dB */ + {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, /* 23, -11.5dB */ + {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, /* 24, -12.0dB */ + {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, /* 25, -12.5dB */ + {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, /* 26, -13.0dB */ + {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 27, -13.5dB */ + {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 28, -14.0dB */ + {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 29, -14.5dB */ + {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 30, -15.0dB */ + {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, /* 31, -15.5dB */ + {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01} /* 32, -16.0dB */ +}; + +static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = { + {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00}, /* 0, +0dB */ + {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00}, /* 1, -0.5dB */ + {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, /* 2, -1.0dB */ + {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00}, /* 3, -1.5dB */ + {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, /* 4, -2.0dB */ + {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00}, /* 5, -2.5dB */ + {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, /* 6, -3.0dB */ + {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00}, /* 7, -3.5dB */ + {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, /* 8, -4.0dB */ + {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00}, /* 9, -4.5dB */ + {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, /* 10, -5.0dB */ + {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 11, -5.5dB */ + {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 12, -6.0dB */ + {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00}, /* 13, -6.5dB */ + {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, /* 14, -7.0dB */ + {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 15, -7.5dB */ + {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 16, -8.0dB */ + {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 17, -8.5dB */ + {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 18, -9.0dB */ + {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 19, -9.5dB */ + {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 20, -10.0dB */ + {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 21, -10.5dB */ + {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 22, -11.0dB */ + {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 23, -11.5dB */ + {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 24, -12.0dB */ + {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 25, -12.5dB */ + {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 26, -13.0dB */ + {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 27, -13.5dB */ + {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 28, -14.0dB */ + {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 29, -14.5dB */ + {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 30, -15.0dB */ + {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 31, -15.5dB */ + {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00} /* 32, -16.0dB */ +}; + +static void rtl92ee_dm_diginit(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *dm_dig = &rtlpriv->dm_digtable; + + dm_dig->cur_igvalue = rtl_get_bbreg(hw, DM_REG_IGI_A_11N, + DM_BIT_IGI_11N); + dm_dig->rssi_lowthresh = DM_DIG_THRESH_LOW; + dm_dig->rssi_highthresh = DM_DIG_THRESH_HIGH; + dm_dig->fa_lowthresh = DM_FALSEALARM_THRESH_LOW; + dm_dig->fa_highthresh = DM_FALSEALARM_THRESH_HIGH; + dm_dig->rx_gain_max = DM_DIG_MAX; + dm_dig->rx_gain_min = DM_DIG_MIN; + dm_dig->back_val = DM_DIG_BACKOFF_DEFAULT; + dm_dig->back_range_max = DM_DIG_BACKOFF_MAX; + dm_dig->back_range_min = DM_DIG_BACKOFF_MIN; + dm_dig->pre_cck_cca_thres = 0xff; + dm_dig->cur_cck_cca_thres = 0x83; + dm_dig->forbidden_igi = DM_DIG_MIN; + dm_dig->large_fa_hit = 0; + dm_dig->recover_cnt = 0; + dm_dig->dig_dynamic_min = DM_DIG_MIN; + dm_dig->dig_dynamic_min_1 = DM_DIG_MIN; + dm_dig->media_connect_0 = false; + dm_dig->media_connect_1 = false; + rtlpriv->dm.dm_initialgain_enable = true; + dm_dig->bt30_cur_igi = 0x32; +} + +static void rtl92ee_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw) +{ + u32 ret_value; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct false_alarm_statistics *falsealm_cnt = &rtlpriv->falsealm_cnt; + + rtl_set_bbreg(hw, DM_REG_OFDM_FA_HOLDC_11N, BIT(31), 1); + rtl_set_bbreg(hw, DM_REG_OFDM_FA_RSTD_11N, BIT(31), 1); + + ret_value = rtl_get_bbreg(hw, DM_REG_OFDM_FA_TYPE1_11N, MASKDWORD); + falsealm_cnt->cnt_fast_fsync_fail = (ret_value & 0xffff); + falsealm_cnt->cnt_sb_search_fail = ((ret_value & 0xffff0000) >> 16); + + ret_value = rtl_get_bbreg(hw, DM_REG_OFDM_FA_TYPE2_11N, MASKDWORD); + falsealm_cnt->cnt_ofdm_cca = (ret_value & 0xffff); + falsealm_cnt->cnt_parity_fail = ((ret_value & 0xffff0000) >> 16); + + ret_value = rtl_get_bbreg(hw, DM_REG_OFDM_FA_TYPE3_11N, MASKDWORD); + falsealm_cnt->cnt_rate_illegal = (ret_value & 0xffff); + falsealm_cnt->cnt_crc8_fail = ((ret_value & 0xffff0000) >> 16); + + ret_value = rtl_get_bbreg(hw, DM_REG_OFDM_FA_TYPE4_11N, MASKDWORD); + falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff); + + falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail + + falsealm_cnt->cnt_rate_illegal + + falsealm_cnt->cnt_crc8_fail + + falsealm_cnt->cnt_mcs_fail + + falsealm_cnt->cnt_fast_fsync_fail + + falsealm_cnt->cnt_sb_search_fail; + + ret_value = rtl_get_bbreg(hw, DM_REG_SC_CNT_11N, MASKDWORD); + falsealm_cnt->cnt_bw_lsc = (ret_value & 0xffff); + falsealm_cnt->cnt_bw_usc = ((ret_value & 0xffff0000) >> 16); + + rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(12), 1); + rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(14), 1); + + ret_value = rtl_get_bbreg(hw, DM_REG_CCK_FA_LSB_11N, MASKBYTE0); + falsealm_cnt->cnt_cck_fail = ret_value; + + ret_value = rtl_get_bbreg(hw, DM_REG_CCK_FA_MSB_11N, MASKBYTE3); + falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8; + + ret_value = rtl_get_bbreg(hw, DM_REG_CCK_CCA_CNT_11N, MASKDWORD); + falsealm_cnt->cnt_cck_cca = ((ret_value & 0xff) << 8) | + ((ret_value & 0xFF00) >> 8); + + falsealm_cnt->cnt_all = falsealm_cnt->cnt_fast_fsync_fail + + falsealm_cnt->cnt_sb_search_fail + + falsealm_cnt->cnt_parity_fail + + falsealm_cnt->cnt_rate_illegal + + falsealm_cnt->cnt_crc8_fail + + falsealm_cnt->cnt_mcs_fail + + falsealm_cnt->cnt_cck_fail; + + falsealm_cnt->cnt_cca_all = falsealm_cnt->cnt_ofdm_cca + + falsealm_cnt->cnt_cck_cca; + + /*reset false alarm counter registers*/ + rtl_set_bbreg(hw, DM_REG_OFDM_FA_RSTC_11N, BIT(31), 1); + rtl_set_bbreg(hw, DM_REG_OFDM_FA_RSTC_11N, BIT(31), 0); + rtl_set_bbreg(hw, DM_REG_OFDM_FA_RSTD_11N, BIT(27), 1); + rtl_set_bbreg(hw, DM_REG_OFDM_FA_RSTD_11N, BIT(27), 0); + /*update ofdm counter*/ + rtl_set_bbreg(hw, DM_REG_OFDM_FA_HOLDC_11N, BIT(31), 0); + rtl_set_bbreg(hw, DM_REG_OFDM_FA_RSTD_11N, BIT(31), 0); + /*reset CCK CCA counter*/ + rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(13) | BIT(12), 0); + rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(13) | BIT(12), 2); + /*reset CCK FA counter*/ + rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(15) | BIT(14), 0); + rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(15) | BIT(14), 2); + + RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, + "cnt_parity_fail = %d, cnt_rate_illegal = %d, cnt_crc8_fail = %d, cnt_mcs_fail = %d\n", + falsealm_cnt->cnt_parity_fail, + falsealm_cnt->cnt_rate_illegal, + falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail); + + RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, + "cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n", + falsealm_cnt->cnt_ofdm_fail, + falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all); +} + +static void rtl92ee_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *dm_dig = &rtlpriv->dm_digtable; + u8 cur_cck_cca_thresh; + + if (rtlpriv->mac80211.link_state >= MAC80211_LINKED) { + if (dm_dig->rssi_val_min > 25) { + cur_cck_cca_thresh = 0xcd; + } else if ((dm_dig->rssi_val_min <= 25) && + (dm_dig->rssi_val_min > 10)) { + cur_cck_cca_thresh = 0x83; + } else { + if (rtlpriv->falsealm_cnt.cnt_cck_fail > 1000) + cur_cck_cca_thresh = 0x83; + else + cur_cck_cca_thresh = 0x40; + } + } else { + if (rtlpriv->falsealm_cnt.cnt_cck_fail > 1000) + cur_cck_cca_thresh = 0x83; + else + cur_cck_cca_thresh = 0x40; + } + rtl92ee_dm_write_cck_cca_thres(hw, cur_cck_cca_thresh); +} + +static void rtl92ee_dm_dig(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct dig_t *dm_dig = &rtlpriv->dm_digtable; + u8 dig_dynamic_min , dig_maxofmin; + bool bfirstconnect , bfirstdisconnect; + u8 dm_dig_max, dm_dig_min; + u8 current_igi = dm_dig->cur_igvalue; + u8 offset; + + /* AP,BT */ + if (mac->act_scanning) + return; + + dig_dynamic_min = dm_dig->dig_dynamic_min; + bfirstconnect = (mac->link_state >= MAC80211_LINKED) && + !dm_dig->media_connect_0; + bfirstdisconnect = (mac->link_state < MAC80211_LINKED) && + dm_dig->media_connect_0; + + dm_dig_max = 0x5a; + dm_dig_min = DM_DIG_MIN; + dig_maxofmin = DM_DIG_MAX_AP; + + if (mac->link_state >= MAC80211_LINKED) { + if ((dm_dig->rssi_val_min + 10) > dm_dig_max) + dm_dig->rx_gain_max = dm_dig_max; + else if ((dm_dig->rssi_val_min + 10) < dm_dig_min) + dm_dig->rx_gain_max = dm_dig_min; + else + dm_dig->rx_gain_max = dm_dig->rssi_val_min + 10; + + if (rtlpriv->dm.one_entry_only) { + offset = 0; + if (dm_dig->rssi_val_min - offset < dm_dig_min) + dig_dynamic_min = dm_dig_min; + else if (dm_dig->rssi_val_min - offset > + dig_maxofmin) + dig_dynamic_min = dig_maxofmin; + else + dig_dynamic_min = dm_dig->rssi_val_min - offset; + } else { + dig_dynamic_min = dm_dig_min; + } + + } else { + dm_dig->rx_gain_max = dm_dig_max; + dig_dynamic_min = dm_dig_min; + RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "no link\n"); + } + + if (rtlpriv->falsealm_cnt.cnt_all > 10000) { + if (dm_dig->large_fa_hit != 3) + dm_dig->large_fa_hit++; + if (dm_dig->forbidden_igi < current_igi) { + dm_dig->forbidden_igi = current_igi; + dm_dig->large_fa_hit = 1; + } + + if (dm_dig->large_fa_hit >= 3) { + if (dm_dig->forbidden_igi + 1 > dm_dig->rx_gain_max) + dm_dig->rx_gain_min = + dm_dig->rx_gain_max; + else + dm_dig->rx_gain_min = + dm_dig->forbidden_igi + 1; + dm_dig->recover_cnt = 3600; + } + } else { + if (dm_dig->recover_cnt != 0) { + dm_dig->recover_cnt--; + } else { + if (dm_dig->large_fa_hit < 3) { + if ((dm_dig->forbidden_igi - 1) < + dig_dynamic_min) { + dm_dig->forbidden_igi = dig_dynamic_min; + dm_dig->rx_gain_min = + dig_dynamic_min; + } else { + dm_dig->forbidden_igi--; + dm_dig->rx_gain_min = + dm_dig->forbidden_igi + 1; + } + } else { + dm_dig->large_fa_hit = 0; + } + } + } + + if (rtlpriv->dm.dbginfo.num_qry_beacon_pkt < 5) + dm_dig->rx_gain_min = dm_dig_min; + + if (dm_dig->rx_gain_min > dm_dig->rx_gain_max) + dm_dig->rx_gain_min = dm_dig->rx_gain_max; + + if (mac->link_state >= MAC80211_LINKED) { + if (bfirstconnect) { + if (dm_dig->rssi_val_min <= dig_maxofmin) + current_igi = dm_dig->rssi_val_min; + else + current_igi = dig_maxofmin; + + dm_dig->large_fa_hit = 0; + } else { + if (rtlpriv->falsealm_cnt.cnt_all > DM_DIG_FA_TH2) + current_igi += 4; + else if (rtlpriv->falsealm_cnt.cnt_all > DM_DIG_FA_TH1) + current_igi += 2; + else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH0) + current_igi -= 2; + + if (rtlpriv->dm.dbginfo.num_qry_beacon_pkt < 5 && + rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH1) + current_igi = dm_dig->rx_gain_min; + } + } else { + if (bfirstdisconnect) { + current_igi = dm_dig->rx_gain_min; + } else { + if (rtlpriv->falsealm_cnt.cnt_all > 10000) + current_igi += 4; + else if (rtlpriv->falsealm_cnt.cnt_all > 8000) + current_igi += 2; + else if (rtlpriv->falsealm_cnt.cnt_all < 500) + current_igi -= 2; + } + } + + if (current_igi > dm_dig->rx_gain_max) + current_igi = dm_dig->rx_gain_max; + if (current_igi < dm_dig->rx_gain_min) + current_igi = dm_dig->rx_gain_min; + + rtl92ee_dm_write_dig(hw , current_igi); + dm_dig->media_connect_0 = ((mac->link_state >= MAC80211_LINKED) ? + true : false); + dm_dig->dig_dynamic_min = dig_dynamic_min; +} + +void rtl92ee_dm_write_cck_cca_thres(struct ieee80211_hw *hw, u8 cur_thres) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *dm_dig = &rtlpriv->dm_digtable; + + if (dm_dig->cur_cck_cca_thres != cur_thres) + rtl_write_byte(rtlpriv, DM_REG_CCK_CCA_11N, cur_thres); + + dm_dig->pre_cck_cca_thres = dm_dig->cur_cck_cca_thres; + dm_dig->cur_cck_cca_thres = cur_thres; +} + +void rtl92ee_dm_write_dig(struct ieee80211_hw *hw, u8 current_igi) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *dm_dig = &rtlpriv->dm_digtable; + + if (dm_dig->stop_dig) + return; + + if (dm_dig->cur_igvalue != current_igi) { + rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f, current_igi); + if (rtlpriv->phy.rf_type != RF_1T1R) + rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f, current_igi); + } + dm_dig->pre_igvalue = dm_dig->cur_igvalue; + dm_dig->cur_igvalue = current_igi; +} + +static void rtl92ee_rssi_dump_to_register(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtl_write_byte(rtlpriv, RA_RSSIDUMP, + rtlpriv->stats.rx_rssi_percentage[0]); + rtl_write_byte(rtlpriv, RB_RSSIDUMP, + rtlpriv->stats.rx_rssi_percentage[1]); + /*It seems the following values are not initialized. + *According to Windows code, + *these value will only be valid with JAGUAR chips + */ + /* Rx EVM */ + rtl_write_byte(rtlpriv, RS1_RXEVMDUMP, rtlpriv->stats.rx_evm_dbm[0]); + rtl_write_byte(rtlpriv, RS2_RXEVMDUMP, rtlpriv->stats.rx_evm_dbm[1]); + /* Rx SNR */ + rtl_write_byte(rtlpriv, RA_RXSNRDUMP, + (u8)(rtlpriv->stats.rx_snr_db[0])); + rtl_write_byte(rtlpriv, RB_RXSNRDUMP, + (u8)(rtlpriv->stats.rx_snr_db[1])); + /* Rx Cfo_Short */ + rtl_write_word(rtlpriv, RA_CFOSHORTDUMP, + rtlpriv->stats.rx_cfo_short[0]); + rtl_write_word(rtlpriv, RB_CFOSHORTDUMP, + rtlpriv->stats.rx_cfo_short[1]); + /* Rx Cfo_Tail */ + rtl_write_word(rtlpriv, RA_CFOLONGDUMP, rtlpriv->stats.rx_cfo_tail[0]); + rtl_write_word(rtlpriv, RB_CFOLONGDUMP, rtlpriv->stats.rx_cfo_tail[1]); +} + +static void rtl92ee_dm_find_minimum_rssi(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *rtl_dm_dig = &rtlpriv->dm_digtable; + struct rtl_mac *mac = rtl_mac(rtlpriv); + + /* Determine the minimum RSSI */ + if ((mac->link_state < MAC80211_LINKED) && + (rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) { + rtl_dm_dig->min_undec_pwdb_for_dm = 0; + RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, + "Not connected to any\n"); + } + if (mac->link_state >= MAC80211_LINKED) { + if (mac->opmode == NL80211_IFTYPE_AP || + mac->opmode == NL80211_IFTYPE_ADHOC) { + rtl_dm_dig->min_undec_pwdb_for_dm = + rtlpriv->dm.entry_min_undec_sm_pwdb; + RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, + "AP Client PWDB = 0x%lx\n", + rtlpriv->dm.entry_min_undec_sm_pwdb); + } else { + rtl_dm_dig->min_undec_pwdb_for_dm = + rtlpriv->dm.undec_sm_pwdb; + RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, + "STA Default Port PWDB = 0x%x\n", + rtl_dm_dig->min_undec_pwdb_for_dm); + } + } else { + rtl_dm_dig->min_undec_pwdb_for_dm = + rtlpriv->dm.entry_min_undec_sm_pwdb; + RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, + "AP Ext Port or disconnet PWDB = 0x%x\n", + rtl_dm_dig->min_undec_pwdb_for_dm); + } + RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, + "MinUndecoratedPWDBForDM =%d\n", + rtl_dm_dig->min_undec_pwdb_for_dm); +} + +static void rtl92ee_dm_check_rssi_monitor(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *dm_dig = &rtlpriv->dm_digtable; + struct rtl_mac *mac = rtl_mac(rtlpriv); + struct rtl_dm *dm = rtl_dm(rtlpriv); + struct rtl_sta_info *drv_priv; + u8 h2c[4] = { 0 }; + long max = 0, min = 0xff; + u8 i = 0; + + if (mac->opmode == NL80211_IFTYPE_AP || + mac->opmode == NL80211_IFTYPE_ADHOC || + mac->opmode == NL80211_IFTYPE_MESH_POINT) { + /* AP & ADHOC & MESH */ + spin_lock_bh(&rtlpriv->locks.entry_list_lock); + list_for_each_entry(drv_priv, &rtlpriv->entry_list, list) { + struct rssi_sta *stat = &drv_priv->rssi_stat; + + if (stat->undec_sm_pwdb < min) + min = stat->undec_sm_pwdb; + if (stat->undec_sm_pwdb > max) + max = stat->undec_sm_pwdb; + + h2c[3] = 0; + h2c[2] = (u8)(dm->undec_sm_pwdb & 0xFF); + h2c[1] = 0x20; + h2c[0] = ++i; + rtl92ee_fill_h2c_cmd(hw, H2C_92E_RSSI_REPORT, 4, h2c); + } + spin_unlock_bh(&rtlpriv->locks.entry_list_lock); + + /* If associated entry is found */ + if (max != 0) { + dm->entry_max_undec_sm_pwdb = max; + RTPRINT(rtlpriv, FDM, DM_PWDB, + "EntryMaxPWDB = 0x%lx(%ld)\n", max, max); + } else { + dm->entry_max_undec_sm_pwdb = 0; + } + /* If associated entry is found */ + if (min != 0xff) { + dm->entry_min_undec_sm_pwdb = min; + RTPRINT(rtlpriv, FDM, DM_PWDB, + "EntryMinPWDB = 0x%lx(%ld)\n", min, min); + } else { + dm->entry_min_undec_sm_pwdb = 0; + } + } + + /* Indicate Rx signal strength to FW. */ + if (dm->useramask) { + h2c[3] = 0; + h2c[2] = (u8)(dm->undec_sm_pwdb & 0xFF); + h2c[1] = 0x20; + h2c[0] = 0; + rtl92ee_fill_h2c_cmd(hw, H2C_92E_RSSI_REPORT, 4, h2c); + } else { + rtl_write_byte(rtlpriv, 0x4fe, dm->undec_sm_pwdb); + } + rtl92ee_rssi_dump_to_register(hw); + rtl92ee_dm_find_minimum_rssi(hw); + dm_dig->rssi_val_min = rtlpriv->dm_digtable.min_undec_pwdb_for_dm; +} + +static void rtl92ee_dm_init_primary_cca_check(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + struct dynamic_primary_cca *primarycca = &rtlpriv->primarycca; + + rtlhal->rts_en = 0; + primarycca->dup_rts_flag = 0; + primarycca->intf_flag = 0; + primarycca->intf_type = 0; + primarycca->monitor_flag = 0; + primarycca->ch_offset = 0; + primarycca->mf_state = 0; +} + +static bool rtl92ee_dm_is_edca_turbo_disable(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (rtlpriv->mac80211.mode == WIRELESS_MODE_B) + return true; + + return false; +} + +void rtl92ee_dm_init_edca_turbo(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtlpriv->dm.current_turbo_edca = false; + rtlpriv->dm.is_cur_rdlstate = false; + rtlpriv->dm.is_any_nonbepkts = false; +} + +static void rtl92ee_dm_check_edca_turbo(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + static u64 last_txok_cnt; + static u64 last_rxok_cnt; + u64 cur_txok_cnt = 0; + u64 cur_rxok_cnt = 0; + u32 edca_be_ul = 0x5ea42b; + u32 edca_be_dl = 0x5ea42b; /*not sure*/ + u32 edca_be = 0x5ea42b; + bool is_cur_rdlstate; + bool b_edca_turbo_on = false; + + if (rtlpriv->dm.dbginfo.num_non_be_pkt > 0x100) + rtlpriv->dm.is_any_nonbepkts = true; + rtlpriv->dm.dbginfo.num_non_be_pkt = 0; + + cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt; + cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt; + + /*b_bias_on_rx = false;*/ + b_edca_turbo_on = ((!rtlpriv->dm.is_any_nonbepkts) && + (!rtlpriv->dm.disable_framebursting)) ? + true : false; + + if (rtl92ee_dm_is_edca_turbo_disable(hw)) + goto check_exit; + + if (b_edca_turbo_on) { + is_cur_rdlstate = (cur_rxok_cnt > cur_txok_cnt * 4) ? + true : false; + + edca_be = is_cur_rdlstate ? edca_be_dl : edca_be_ul; + rtl_write_dword(rtlpriv , REG_EDCA_BE_PARAM , edca_be); + rtlpriv->dm.is_cur_rdlstate = is_cur_rdlstate; + rtlpriv->dm.current_turbo_edca = true; + } else { + if (rtlpriv->dm.current_turbo_edca) { + u8 tmp = AC0_BE; + + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM, + (u8 *)(&tmp)); + } + rtlpriv->dm.current_turbo_edca = false; + } + +check_exit: + rtlpriv->dm.is_any_nonbepkts = false; + last_txok_cnt = rtlpriv->stats.txbytesunicast; + last_rxok_cnt = rtlpriv->stats.rxbytesunicast; +} + +static void rtl92ee_dm_dynamic_edcca(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 reg_c50 , reg_c58; + bool fw_current_in_ps_mode = false; + + rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS, + (u8 *)(&fw_current_in_ps_mode)); + if (fw_current_in_ps_mode) + return; + + reg_c50 = rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0); + reg_c58 = rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0); + + if (reg_c50 > 0x28 && reg_c58 > 0x28) { + if (!rtlpriv->rtlhal.pre_edcca_enable) { + rtl_write_byte(rtlpriv, ROFDM0_ECCATHRESHOLD, 0x03); + rtl_write_byte(rtlpriv, ROFDM0_ECCATHRESHOLD + 2, 0x00); + rtlpriv->rtlhal.pre_edcca_enable = true; + } + } else if (reg_c50 < 0x25 && reg_c58 < 0x25) { + if (rtlpriv->rtlhal.pre_edcca_enable) { + rtl_write_byte(rtlpriv, ROFDM0_ECCATHRESHOLD, 0x7f); + rtl_write_byte(rtlpriv, ROFDM0_ECCATHRESHOLD + 2, 0x7f); + rtlpriv->rtlhal.pre_edcca_enable = false; + } + } +} + +static void rtl92ee_dm_adaptivity(struct ieee80211_hw *hw) +{ + rtl92ee_dm_dynamic_edcca(hw); +} + +static void rtl92ee_dm_write_dynamic_cca(struct ieee80211_hw *hw, + u8 cur_mf_state) +{ + struct dynamic_primary_cca *primarycca = &rtl_priv(hw)->primarycca; + + if (primarycca->mf_state != cur_mf_state) + rtl_set_bbreg(hw, DM_REG_L1SBD_PD_CH_11N, BIT(8) | BIT(7), + cur_mf_state); + + primarycca->mf_state = cur_mf_state; +} + +static void rtl92ee_dm_dynamic_primary_cca_ckeck(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct false_alarm_statistics *falsealm_cnt = &rtlpriv->falsealm_cnt; + struct dynamic_primary_cca *primarycca = &rtlpriv->primarycca; + bool is40mhz = false; + u64 ofdm_cca, ofdm_fa, bw_usc_cnt, bw_lsc_cnt; + u8 sec_ch_offset; + u8 cur_mf_state; + static u8 count_down = MONITOR_TIME; + + ofdm_cca = falsealm_cnt->cnt_ofdm_cca; + ofdm_fa = falsealm_cnt->cnt_ofdm_fail; + bw_usc_cnt = falsealm_cnt->cnt_bw_usc; + bw_lsc_cnt = falsealm_cnt->cnt_bw_lsc; + is40mhz = rtlpriv->mac80211.bw_40; + sec_ch_offset = rtlpriv->mac80211.cur_40_prime_sc; + /* NIC: 2: sec is below, 1: sec is above */ + + if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP) { + cur_mf_state = MF_USC_LSC; + rtl92ee_dm_write_dynamic_cca(hw, cur_mf_state); + return; + } + + if (rtlpriv->mac80211.link_state < MAC80211_LINKED) + return; + + if (is40mhz) + return; + + if (primarycca->pricca_flag == 0) { + /* Primary channel is above + * NOTE: duplicate CTS can remove this condition + */ + if (sec_ch_offset == 2) { + if ((ofdm_cca > OFDMCCA_TH) && + (bw_lsc_cnt > (bw_usc_cnt + BW_IND_BIAS)) && + (ofdm_fa > (ofdm_cca >> 1))) { + primarycca->intf_type = 1; + primarycca->intf_flag = 1; + cur_mf_state = MF_USC; + rtl92ee_dm_write_dynamic_cca(hw, cur_mf_state); + primarycca->pricca_flag = 1; + } else if ((ofdm_cca > OFDMCCA_TH) && + (bw_lsc_cnt > (bw_usc_cnt + BW_IND_BIAS)) && + (ofdm_fa < (ofdm_cca >> 1))) { + primarycca->intf_type = 2; + primarycca->intf_flag = 1; + cur_mf_state = MF_USC; + rtl92ee_dm_write_dynamic_cca(hw, cur_mf_state); + primarycca->pricca_flag = 1; + primarycca->dup_rts_flag = 1; + rtlpriv->rtlhal.rts_en = 1; + } else { + primarycca->intf_type = 0; + primarycca->intf_flag = 0; + cur_mf_state = MF_USC_LSC; + rtl92ee_dm_write_dynamic_cca(hw, cur_mf_state); + rtlpriv->rtlhal.rts_en = 0; + primarycca->dup_rts_flag = 0; + } + } else if (sec_ch_offset == 1) { + if ((ofdm_cca > OFDMCCA_TH) && + (bw_usc_cnt > (bw_lsc_cnt + BW_IND_BIAS)) && + (ofdm_fa > (ofdm_cca >> 1))) { + primarycca->intf_type = 1; + primarycca->intf_flag = 1; + cur_mf_state = MF_LSC; + rtl92ee_dm_write_dynamic_cca(hw, cur_mf_state); + primarycca->pricca_flag = 1; + } else if ((ofdm_cca > OFDMCCA_TH) && + (bw_usc_cnt > (bw_lsc_cnt + BW_IND_BIAS)) && + (ofdm_fa < (ofdm_cca >> 1))) { + primarycca->intf_type = 2; + primarycca->intf_flag = 1; + cur_mf_state = MF_LSC; + rtl92ee_dm_write_dynamic_cca(hw, cur_mf_state); + primarycca->pricca_flag = 1; + primarycca->dup_rts_flag = 1; + rtlpriv->rtlhal.rts_en = 1; + } else { + primarycca->intf_type = 0; + primarycca->intf_flag = 0; + cur_mf_state = MF_USC_LSC; + rtl92ee_dm_write_dynamic_cca(hw, cur_mf_state); + rtlpriv->rtlhal.rts_en = 0; + primarycca->dup_rts_flag = 0; + } + } + } else {/* PrimaryCCA->PriCCA_flag==1 */ + count_down--; + if (count_down == 0) { + count_down = MONITOR_TIME; + primarycca->pricca_flag = 0; + cur_mf_state = MF_USC_LSC; + /* default */ + rtl92ee_dm_write_dynamic_cca(hw, cur_mf_state); + rtlpriv->rtlhal.rts_en = 0; + primarycca->dup_rts_flag = 0; + primarycca->intf_type = 0; + primarycca->intf_flag = 0; + } + } +} + +static void rtl92ee_dm_dynamic_atc_switch(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); + u8 crystal_cap; + u32 packet_count; + int cfo_khz_a , cfo_khz_b , cfo_ave = 0, adjust_xtal = 0; + int cfo_ave_diff; + + if (rtlpriv->mac80211.link_state < MAC80211_LINKED) { + if (rtldm->atc_status == ATC_STATUS_OFF) { + rtl_set_bbreg(hw, ROFDM1_CFOTRACKING, BIT(11), + ATC_STATUS_ON); + rtldm->atc_status = ATC_STATUS_ON; + } + /* Disable CFO tracking for BT */ + if (rtlpriv->cfg->ops->get_btc_status()) { + if (!rtlpriv->btcoexist.btc_ops-> + btc_is_bt_disabled(rtlpriv)) { + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "odm_DynamicATCSwitch(): Disable CFO tracking for BT!!\n"); + return; + } + } + /* Reset Crystal Cap */ + if (rtldm->crystal_cap != rtlpriv->efuse.crystalcap) { + rtldm->crystal_cap = rtlpriv->efuse.crystalcap; + crystal_cap = rtldm->crystal_cap & 0x3f; + rtl_set_bbreg(hw, REG_MAC_PHY_CTRL, 0xFFF000, + (crystal_cap | (crystal_cap << 6))); + } + } else { + cfo_khz_a = (int)(rtldm->cfo_tail[0] * 3125) / 1280; + cfo_khz_b = (int)(rtldm->cfo_tail[1] * 3125) / 1280; + packet_count = rtldm->packet_count; + + if (packet_count == rtldm->packet_count_pre) + return; + + rtldm->packet_count_pre = packet_count; + + if (rtlpriv->phy.rf_type == RF_1T1R) + cfo_ave = cfo_khz_a; + else + cfo_ave = (int)(cfo_khz_a + cfo_khz_b) >> 1; + + cfo_ave_diff = (rtldm->cfo_ave_pre >= cfo_ave) ? + (rtldm->cfo_ave_pre - cfo_ave) : + (cfo_ave - rtldm->cfo_ave_pre); + + if (cfo_ave_diff > 20 && rtldm->large_cfo_hit == 0) { + rtldm->large_cfo_hit = 1; + return; + } + rtldm->large_cfo_hit = 0; + + rtldm->cfo_ave_pre = cfo_ave; + + if (cfo_ave >= -rtldm->cfo_threshold && + cfo_ave <= rtldm->cfo_threshold && rtldm->is_freeze == 0) { + if (rtldm->cfo_threshold == CFO_THRESHOLD_XTAL) { + rtldm->cfo_threshold = CFO_THRESHOLD_XTAL + 10; + rtldm->is_freeze = 1; + } else { + rtldm->cfo_threshold = CFO_THRESHOLD_XTAL; + } + } + + if (cfo_ave > rtldm->cfo_threshold && rtldm->crystal_cap < 0x3f) + adjust_xtal = ((cfo_ave - CFO_THRESHOLD_XTAL) >> 2) + 1; + else if ((cfo_ave < -rtlpriv->dm.cfo_threshold) && + rtlpriv->dm.crystal_cap > 0) + adjust_xtal = ((cfo_ave + CFO_THRESHOLD_XTAL) >> 2) - 1; + + if (adjust_xtal != 0) { + rtldm->is_freeze = 0; + rtldm->crystal_cap += adjust_xtal; + + if (rtldm->crystal_cap > 0x3f) + rtldm->crystal_cap = 0x3f; + else if (rtldm->crystal_cap < 0) + rtldm->crystal_cap = 0; + + crystal_cap = rtldm->crystal_cap & 0x3f; + rtl_set_bbreg(hw, REG_MAC_PHY_CTRL, 0xFFF000, + (crystal_cap | (crystal_cap << 6))); + } + + if (cfo_ave < CFO_THRESHOLD_ATC && + cfo_ave > -CFO_THRESHOLD_ATC) { + if (rtldm->atc_status == ATC_STATUS_ON) { + rtl_set_bbreg(hw, ROFDM1_CFOTRACKING, BIT(11), + ATC_STATUS_OFF); + rtldm->atc_status = ATC_STATUS_OFF; + } + } else { + if (rtldm->atc_status == ATC_STATUS_OFF) { + rtl_set_bbreg(hw, ROFDM1_CFOTRACKING, BIT(11), + ATC_STATUS_ON); + rtldm->atc_status = ATC_STATUS_ON; + } + } + } +} + +static void rtl92ee_dm_init_txpower_tracking(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_dm *dm = rtl_dm(rtlpriv); + u8 path; + + dm->txpower_tracking = true; + dm->default_ofdm_index = 30; + dm->default_cck_index = 20; + + dm->swing_idx_cck_base = dm->default_cck_index; + dm->cck_index = dm->default_cck_index; + + for (path = RF90_PATH_A; path < MAX_RF_PATH; path++) { + dm->swing_idx_ofdm_base[path] = dm->default_ofdm_index; + dm->ofdm_index[path] = dm->default_ofdm_index; + dm->delta_power_index[path] = 0; + dm->delta_power_index_last[path] = 0; + dm->power_index_offset[path] = 0; + } +} + +void rtl92ee_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rate_adaptive *p_ra = &rtlpriv->ra; + + p_ra->ratr_state = DM_RATR_STA_INIT; + p_ra->pre_ratr_state = DM_RATR_STA_INIT; + + if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER) + rtlpriv->dm.useramask = true; + else + rtlpriv->dm.useramask = false; + + p_ra->ldpc_thres = 35; + p_ra->use_ldpc = false; + p_ra->high_rssi_thresh_for_ra = 50; + p_ra->low_rssi_thresh_for_ra40m = 20; +} + +static bool _rtl92ee_dm_ra_state_check(struct ieee80211_hw *hw, + s32 rssi, u8 *ratr_state) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rate_adaptive *p_ra = &rtlpriv->ra; + const u8 go_up_gap = 5; + u32 high_rssithresh_for_ra = p_ra->high_rssi_thresh_for_ra; + u32 low_rssithresh_for_ra = p_ra->low_rssi_thresh_for_ra40m; + u8 state; + + /* Threshold Adjustment: + * when RSSI state trends to go up one or two levels, + * make sure RSSI is high enough. + * Here GoUpGap is added to solve + * the boundary's level alternation issue. + */ + switch (*ratr_state) { + case DM_RATR_STA_INIT: + case DM_RATR_STA_HIGH: + break; + case DM_RATR_STA_MIDDLE: + high_rssithresh_for_ra += go_up_gap; + break; + case DM_RATR_STA_LOW: + high_rssithresh_for_ra += go_up_gap; + low_rssithresh_for_ra += go_up_gap; + break; + default: + RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, + "wrong rssi level setting %d !", *ratr_state); + break; + } + + /* Decide RATRState by RSSI. */ + if (rssi > high_rssithresh_for_ra) + state = DM_RATR_STA_HIGH; + else if (rssi > low_rssithresh_for_ra) + state = DM_RATR_STA_MIDDLE; + else + state = DM_RATR_STA_LOW; + + if (*ratr_state != state) { + *ratr_state = state; + return true; + } + + return false; +} + +static void rtl92ee_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rate_adaptive *p_ra = &rtlpriv->ra; + struct ieee80211_sta *sta = NULL; + + if (is_hal_stop(rtlhal)) { + RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, + "driver is going to unload\n"); + return; + } + + if (!rtlpriv->dm.useramask) { + RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, + "driver does not control rate adaptive mask\n"); + return; + } + + if (mac->link_state == MAC80211_LINKED && + mac->opmode == NL80211_IFTYPE_STATION) { + if (rtlpriv->dm.undec_sm_pwdb < p_ra->ldpc_thres) { + p_ra->use_ldpc = true; + p_ra->lower_rts_rate = true; + } else if (rtlpriv->dm.undec_sm_pwdb > + (p_ra->ldpc_thres - 5)) { + p_ra->use_ldpc = false; + p_ra->lower_rts_rate = false; + } + if (_rtl92ee_dm_ra_state_check(hw, rtlpriv->dm.undec_sm_pwdb, + &p_ra->ratr_state)) { + rcu_read_lock(); + sta = rtl_find_sta(hw, mac->bssid); + if (sta) + rtlpriv->cfg->ops->update_rate_tbl(hw, sta, + p_ra->ratr_state); + rcu_read_unlock(); + + p_ra->pre_ratr_state = p_ra->ratr_state; + } + } +} + +static void rtl92ee_dm_init_dynamic_atc_switch(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtlpriv->dm.crystal_cap = rtlpriv->efuse.crystalcap; + + rtlpriv->dm.atc_status = rtl_get_bbreg(hw, ROFDM1_CFOTRACKING, BIT(11)); + rtlpriv->dm.cfo_threshold = CFO_THRESHOLD_XTAL; +} + +void rtl92ee_dm_init(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER; + + rtl92ee_dm_diginit(hw); + rtl92ee_dm_init_rate_adaptive_mask(hw); + rtl92ee_dm_init_primary_cca_check(hw); + rtl92ee_dm_init_edca_turbo(hw); + rtl92ee_dm_init_txpower_tracking(hw); + rtl92ee_dm_init_dynamic_atc_switch(hw); +} + +static void rtl92ee_dm_common_info_self_update(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_sta_info *drv_priv; + u8 cnt = 0; + + rtlpriv->dm.one_entry_only = false; + + if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_STATION && + rtlpriv->mac80211.link_state >= MAC80211_LINKED) { + rtlpriv->dm.one_entry_only = true; + return; + } + + if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP || + rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC || + rtlpriv->mac80211.opmode == NL80211_IFTYPE_MESH_POINT) { + spin_lock_bh(&rtlpriv->locks.entry_list_lock); + list_for_each_entry(drv_priv, &rtlpriv->entry_list, list) { + cnt++; + } + spin_unlock_bh(&rtlpriv->locks.entry_list_lock); + + if (cnt == 1) + rtlpriv->dm.one_entry_only = true; + } +} + +void rtl92ee_dm_dynamic_arfb_select(struct ieee80211_hw *hw, + u8 rate, bool collision_state) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (rate >= DESC92C_RATEMCS8 && rate <= DESC92C_RATEMCS12) { + if (collision_state == 1) { + if (rate == DESC92C_RATEMCS12) { + rtl_write_dword(rtlpriv, REG_DARFRC, 0x0); + rtl_write_dword(rtlpriv, REG_DARFRC + 4, + 0x07060501); + } else if (rate == DESC92C_RATEMCS11) { + rtl_write_dword(rtlpriv, REG_DARFRC, 0x0); + rtl_write_dword(rtlpriv, REG_DARFRC + 4, + 0x07070605); + } else if (rate == DESC92C_RATEMCS10) { + rtl_write_dword(rtlpriv, REG_DARFRC, 0x0); + rtl_write_dword(rtlpriv, REG_DARFRC + 4, + 0x08080706); + } else if (rate == DESC92C_RATEMCS9) { + rtl_write_dword(rtlpriv, REG_DARFRC, 0x0); + rtl_write_dword(rtlpriv, REG_DARFRC + 4, + 0x08080707); + } else { + rtl_write_dword(rtlpriv, REG_DARFRC, 0x0); + rtl_write_dword(rtlpriv, REG_DARFRC + 4, + 0x09090808); + } + } else { /* collision_state == 0 */ + if (rate == DESC92C_RATEMCS12) { + rtl_write_dword(rtlpriv, REG_DARFRC, + 0x05010000); + rtl_write_dword(rtlpriv, REG_DARFRC + 4, + 0x09080706); + } else if (rate == DESC92C_RATEMCS11) { + rtl_write_dword(rtlpriv, REG_DARFRC, + 0x06050000); + rtl_write_dword(rtlpriv, REG_DARFRC + 4, + 0x09080807); + } else if (rate == DESC92C_RATEMCS10) { + rtl_write_dword(rtlpriv, REG_DARFRC, + 0x07060000); + rtl_write_dword(rtlpriv, REG_DARFRC + 4, + 0x0a090908); + } else if (rate == DESC92C_RATEMCS9) { + rtl_write_dword(rtlpriv, REG_DARFRC, + 0x07070000); + rtl_write_dword(rtlpriv, REG_DARFRC + 4, + 0x0a090808); + } else { + rtl_write_dword(rtlpriv, REG_DARFRC, + 0x08080000); + rtl_write_dword(rtlpriv, REG_DARFRC + 4, + 0x0b0a0909); + } + } + } else { /* MCS13~MCS15, 1SS, G-mode */ + if (collision_state == 1) { + if (rate == DESC92C_RATEMCS15) { + rtl_write_dword(rtlpriv, REG_DARFRC, + 0x00000000); + rtl_write_dword(rtlpriv, REG_DARFRC + 4, + 0x05040302); + } else if (rate == DESC92C_RATEMCS14) { + rtl_write_dword(rtlpriv, REG_DARFRC, + 0x00000000); + rtl_write_dword(rtlpriv, REG_DARFRC + 4, + 0x06050302); + } else if (rate == DESC92C_RATEMCS13) { + rtl_write_dword(rtlpriv, REG_DARFRC, + 0x00000000); + rtl_write_dword(rtlpriv, REG_DARFRC + 4, + 0x07060502); + } else { + rtl_write_dword(rtlpriv, REG_DARFRC, + 0x00000000); + rtl_write_dword(rtlpriv, REG_DARFRC + 4, + 0x06050402); + } + } else{ /* collision_state == 0 */ + if (rate == DESC92C_RATEMCS15) { + rtl_write_dword(rtlpriv, REG_DARFRC, + 0x03020000); + rtl_write_dword(rtlpriv, REG_DARFRC + 4, + 0x07060504); + } else if (rate == DESC92C_RATEMCS14) { + rtl_write_dword(rtlpriv, REG_DARFRC, + 0x03020000); + rtl_write_dword(rtlpriv, REG_DARFRC + 4, + 0x08070605); + } else if (rate == DESC92C_RATEMCS13) { + rtl_write_dword(rtlpriv, REG_DARFRC, + 0x05020000); + rtl_write_dword(rtlpriv, REG_DARFRC + 4, + 0x09080706); + } else { + rtl_write_dword(rtlpriv, REG_DARFRC, + 0x04020000); + rtl_write_dword(rtlpriv, REG_DARFRC + 4, + 0x08070605); + } + } + } +} + +void rtl92ee_dm_watchdog(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + bool fw_current_inpsmode = false; + bool fw_ps_awake = true; + + rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS, + (u8 *)(&fw_current_inpsmode)); + rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON, + (u8 *)(&fw_ps_awake)); + if (ppsc->p2p_ps_info.p2p_ps_mode) + fw_ps_awake = false; + + if ((ppsc->rfpwr_state == ERFON) && + ((!fw_current_inpsmode) && fw_ps_awake) && + (!ppsc->rfchange_inprogress)) { + rtl92ee_dm_common_info_self_update(hw); + rtl92ee_dm_false_alarm_counter_statistics(hw); + rtl92ee_dm_check_rssi_monitor(hw); + rtl92ee_dm_dig(hw); + rtl92ee_dm_adaptivity(hw); + rtl92ee_dm_cck_packet_detection_thresh(hw); + rtl92ee_dm_refresh_rate_adaptive_mask(hw); + rtl92ee_dm_check_edca_turbo(hw); + rtl92ee_dm_dynamic_atc_switch(hw); + rtl92ee_dm_dynamic_primary_cca_ckeck(hw); + } +} diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/dm.h b/drivers/net/wireless/rtlwifi/rtl8192ee/dm.h new file mode 100644 index 0000000000000000000000000000000000000000..881db7d6fef77d57aceff86612d5cfb1444859c7 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192ee/dm.h @@ -0,0 +1,267 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL92E_DM_H__ +#define __RTL92E_DM_H__ + +#define OFDMCCA_TH 500 +#define BW_IND_BIAS 500 +#define MF_USC 2 +#define MF_LSC 1 +#define MF_USC_LSC 0 +#define MONITOR_TIME 30 + +#define MAIN_ANT 0 +#define AUX_ANT 1 +#define MAIN_ANT_CG_TRX 1 +#define AUX_ANT_CG_TRX 0 +#define MAIN_ANT_CGCS_RX 0 +#define AUX_ANT_CGCS_RX 1 + +/*RF REG LIST*/ +#define DM_REG_RF_MODE_11N 0x00 +#define DM_REG_RF_0B_11N 0x0B +#define DM_REG_CHNBW_11N 0x18 +#define DM_REG_T_METER_11N 0x24 +#define DM_REG_RF_25_11N 0x25 +#define DM_REG_RF_26_11N 0x26 +#define DM_REG_RF_27_11N 0x27 +#define DM_REG_RF_2B_11N 0x2B +#define DM_REG_RF_2C_11N 0x2C +#define DM_REG_RXRF_A3_11N 0x3C +#define DM_REG_T_METER_92D_11N 0x42 +#define DM_REG_T_METER_92E_11N 0x42 + +/*BB REG LIST*/ +/*PAGE 8 */ +#define DM_REG_BB_CTRL_11N 0x800 +#define DM_REG_RF_PIN_11N 0x804 +#define DM_REG_PSD_CTRL_11N 0x808 +#define DM_REG_TX_ANT_CTRL_11N 0x80C +#define DM_REG_BB_PWR_SAV5_11N 0x818 +#define DM_REG_CCK_RPT_FORMAT_11N 0x824 +#define DM_REG_RX_DEFUALT_A_11N 0x858 +#define DM_REG_RX_DEFUALT_B_11N 0x85A +#define DM_REG_BB_PWR_SAV3_11N 0x85C +#define DM_REG_ANTSEL_CTRL_11N 0x860 +#define DM_REG_RX_ANT_CTRL_11N 0x864 +#define DM_REG_PIN_CTRL_11N 0x870 +#define DM_REG_BB_PWR_SAV1_11N 0x874 +#define DM_REG_ANTSEL_PATH_11N 0x878 +#define DM_REG_BB_3WIRE_11N 0x88C +#define DM_REG_SC_CNT_11N 0x8C4 +#define DM_REG_PSD_DATA_11N 0x8B4 +/*PAGE 9*/ +#define DM_REG_ANT_MAPPING1_11N 0x914 +#define DM_REG_ANT_MAPPING2_11N 0x918 +/*PAGE A*/ +#define DM_REG_CCK_ANTDIV_PARA1_11N 0xA00 +#define DM_REG_CCK_CCA_11N 0xA0A +#define DM_REG_CCK_ANTDIV_PARA2_11N 0xA0C +#define DM_REG_CCK_ANTDIV_PARA3_11N 0xA10 +#define DM_REG_CCK_ANTDIV_PARA4_11N 0xA14 +#define DM_REG_CCK_FILTER_PARA1_11N 0xA22 +#define DM_REG_CCK_FILTER_PARA2_11N 0xA23 +#define DM_REG_CCK_FILTER_PARA3_11N 0xA24 +#define DM_REG_CCK_FILTER_PARA4_11N 0xA25 +#define DM_REG_CCK_FILTER_PARA5_11N 0xA26 +#define DM_REG_CCK_FILTER_PARA6_11N 0xA27 +#define DM_REG_CCK_FILTER_PARA7_11N 0xA28 +#define DM_REG_CCK_FILTER_PARA8_11N 0xA29 +#define DM_REG_CCK_FA_RST_11N 0xA2C +#define DM_REG_CCK_FA_MSB_11N 0xA58 +#define DM_REG_CCK_FA_LSB_11N 0xA5C +#define DM_REG_CCK_CCA_CNT_11N 0xA60 +#define DM_REG_BB_PWR_SAV4_11N 0xA74 +/*PAGE B */ +#define DM_REG_LNA_SWITCH_11N 0xB2C +#define DM_REG_PATH_SWITCH_11N 0xB30 +#define DM_REG_RSSI_CTRL_11N 0xB38 +#define DM_REG_CONFIG_ANTA_11N 0xB68 +#define DM_REG_RSSI_BT_11N 0xB9C +/*PAGE C */ +#define DM_REG_OFDM_FA_HOLDC_11N 0xC00 +#define DM_REG_RX_PATH_11N 0xC04 +#define DM_REG_TRMUX_11N 0xC08 +#define DM_REG_OFDM_FA_RSTC_11N 0xC0C +#define DM_REG_RXIQI_MATRIX_11N 0xC14 +#define DM_REG_TXIQK_MATRIX_LSB1_11N 0xC4C +#define DM_REG_IGI_A_11N 0xC50 +#define DM_REG_ANTDIV_PARA2_11N 0xC54 +#define DM_REG_IGI_B_11N 0xC58 +#define DM_REG_ANTDIV_PARA3_11N 0xC5C +#define DM_REG_L1SBD_PD_CH_11N 0XC6C +#define DM_REG_BB_PWR_SAV2_11N 0xC70 +#define DM_REG_RX_OFF_11N 0xC7C +#define DM_REG_TXIQK_MATRIXA_11N 0xC80 +#define DM_REG_TXIQK_MATRIXB_11N 0xC88 +#define DM_REG_TXIQK_MATRIXA_LSB2_11N 0xC94 +#define DM_REG_TXIQK_MATRIXB_LSB2_11N 0xC9C +#define DM_REG_RXIQK_MATRIX_LSB_11N 0xCA0 +#define DM_REG_ANTDIV_PARA1_11N 0xCA4 +#define DM_REG_OFDM_FA_TYPE1_11N 0xCF0 +/*PAGE D */ +#define DM_REG_OFDM_FA_RSTD_11N 0xD00 +#define DM_REG_OFDM_FA_TYPE2_11N 0xDA0 +#define DM_REG_OFDM_FA_TYPE3_11N 0xDA4 +#define DM_REG_OFDM_FA_TYPE4_11N 0xDA8 +/*PAGE E */ +#define DM_REG_TXAGC_A_6_18_11N 0xE00 +#define DM_REG_TXAGC_A_24_54_11N 0xE04 +#define DM_REG_TXAGC_A_1_MCS32_11N 0xE08 +#define DM_REG_TXAGC_A_MCS0_3_11N 0xE10 +#define DM_REG_TXAGC_A_MCS4_7_11N 0xE14 +#define DM_REG_TXAGC_A_MCS8_11_11N 0xE18 +#define DM_REG_TXAGC_A_MCS12_15_11N 0xE1C +#define DM_REG_FPGA0_IQK_11N 0xE28 +#define DM_REG_TXIQK_TONE_A_11N 0xE30 +#define DM_REG_RXIQK_TONE_A_11N 0xE34 +#define DM_REG_TXIQK_PI_A_11N 0xE38 +#define DM_REG_RXIQK_PI_A_11N 0xE3C +#define DM_REG_TXIQK_11N 0xE40 +#define DM_REG_RXIQK_11N 0xE44 +#define DM_REG_IQK_AGC_PTS_11N 0xE48 +#define DM_REG_IQK_AGC_RSP_11N 0xE4C +#define DM_REG_BLUETOOTH_11N 0xE6C +#define DM_REG_RX_WAIT_CCA_11N 0xE70 +#define DM_REG_TX_CCK_RFON_11N 0xE74 +#define DM_REG_TX_CCK_BBON_11N 0xE78 +#define DM_REG_OFDM_RFON_11N 0xE7C +#define DM_REG_OFDM_BBON_11N 0xE80 +#define DM_REG_TX2RX_11N 0xE84 +#define DM_REG_TX2TX_11N 0xE88 +#define DM_REG_RX_CCK_11N 0xE8C +#define DM_REG_RX_OFDM_11N 0xED0 +#define DM_REG_RX_WAIT_RIFS_11N 0xED4 +#define DM_REG_RX2RX_11N 0xED8 +#define DM_REG_STANDBY_11N 0xEDC +#define DM_REG_SLEEP_11N 0xEE0 +#define DM_REG_PMPD_ANAEN_11N 0xEEC + +/*MAC REG LIST*/ +#define DM_REG_BB_RST_11N 0x02 +#define DM_REG_ANTSEL_PIN_11N 0x4C +#define DM_REG_EARLY_MODE_11N 0x4D0 +#define DM_REG_RSSI_MONITOR_11N 0x4FE +#define DM_REG_EDCA_VO_11N 0x500 +#define DM_REG_EDCA_VI_11N 0x504 +#define DM_REG_EDCA_BE_11N 0x508 +#define DM_REG_EDCA_BK_11N 0x50C +#define DM_REG_TXPAUSE_11N 0x522 +#define DM_REG_RESP_TX_11N 0x6D8 +#define DM_REG_ANT_TRAIN_PARA1_11N 0x7b0 +#define DM_REG_ANT_TRAIN_PARA2_11N 0x7b4 + +/*DIG Related*/ +#define DM_BIT_IGI_11N 0x0000007F + +#define HAL_DM_DIG_DISABLE BIT(0) +#define HAL_DM_HIPWR_DISABLE BIT(1) + +#define OFDM_TABLE_LENGTH 43 +#define CCK_TABLE_LENGTH 33 + +#define OFDM_TABLE_SIZE 43 +#define CCK_TABLE_SIZE 33 + +#define BW_AUTO_SWITCH_HIGH_LOW 25 +#define BW_AUTO_SWITCH_LOW_HIGH 30 + +#define DM_DIG_THRESH_HIGH 40 +#define DM_DIG_THRESH_LOW 35 + +#define DM_FALSEALARM_THRESH_LOW 400 +#define DM_FALSEALARM_THRESH_HIGH 1000 + +#define DM_DIG_MAX 0x3e +#define DM_DIG_MIN 0x1e + +#define DM_DIG_MAX_AP 0x32 +#define DM_DIG_MIN_AP 0x20 + +#define DM_DIG_FA_UPPER 0x3e +#define DM_DIG_FA_LOWER 0x1e +#define DM_DIG_FA_TH0 0x200 +#define DM_DIG_FA_TH1 0x300 +#define DM_DIG_FA_TH2 0x400 + +#define DM_DIG_BACKOFF_MAX 12 +#define DM_DIG_BACKOFF_MIN -4 +#define DM_DIG_BACKOFF_DEFAULT 10 + +#define RXPATHSELECTION_SS_TH_LOW 30 +#define RXPATHSELECTION_DIFF_TH 18 + +#define DM_RATR_STA_INIT 0 +#define DM_RATR_STA_HIGH 1 +#define DM_RATR_STA_MIDDLE 2 +#define DM_RATR_STA_LOW 3 + +#define CTS2SELF_THVAL 30 +#define REGC38_TH 20 + +#define WAIOTTHVAL 25 + +#define TXHIGHPWRLEVEL_NORMAL 0 +#define TXHIGHPWRLEVEL_LEVEL1 1 +#define TXHIGHPWRLEVEL_LEVEL2 2 +#define TXHIGHPWRLEVEL_BT1 3 +#define TXHIGHPWRLEVEL_BT2 4 + +#define DM_TYPE_BYFW 0 +#define DM_TYPE_BYDRIVER 1 + +#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74 +#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67 +#define TXPWRTRACK_MAX_IDX 6 + +/* Dynamic ATC switch */ +#define ATC_STATUS_OFF 0x0 /* enable */ +#define ATC_STATUS_ON 0x1 /* disable */ +#define CFO_THRESHOLD_XTAL 10 /* kHz */ +#define CFO_THRESHOLD_ATC 80 /* kHz */ + +/* RSSI Dump Message */ +#define RA_RSSIDUMP 0xcb0 +#define RB_RSSIDUMP 0xcb1 +#define RS1_RXEVMDUMP 0xcb2 +#define RS2_RXEVMDUMP 0xcb3 +#define RA_RXSNRDUMP 0xcb4 +#define RB_RXSNRDUMP 0xcb5 +#define RA_CFOSHORTDUMP 0xcb6 +#define RB_CFOSHORTDUMP 0xcb8 +#define RA_CFOLONGDUMP 0xcba +#define RB_CFOLONGDUMP 0xcbc + +void rtl92ee_dm_init(struct ieee80211_hw *hw); +void rtl92ee_dm_watchdog(struct ieee80211_hw *hw); +void rtl92ee_dm_write_cck_cca_thres(struct ieee80211_hw *hw, + u8 cur_thres); +void rtl92ee_dm_write_dig(struct ieee80211_hw *hw, u8 current_igi); +void rtl92ee_dm_init_edca_turbo(struct ieee80211_hw *hw); +void rtl92ee_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw); +void rtl92ee_dm_dynamic_arfb_select(struct ieee80211_hw *hw, + u8 rate, bool collision_state); +#endif diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c new file mode 100644 index 0000000000000000000000000000000000000000..45c128b91f7fecb0d6ff07bfa7edc72f3a69847e --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c @@ -0,0 +1,906 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "../wifi.h" +#include "../pci.h" +#include "../base.h" +#include "../core.h" +#include "reg.h" +#include "def.h" +#include "fw.h" +#include "dm.h" + +static void _rtl92ee_enable_fw_download(struct ieee80211_hw *hw, bool enable) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 tmp; + + if (enable) { + rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x05); + + tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL + 2); + rtl_write_byte(rtlpriv, REG_MCUFWDL + 2, tmp & 0xf7); + } else { + tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL); + rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp & 0xfe); + } +} + +static void _rtl92ee_fw_block_write(struct ieee80211_hw *hw, + const u8 *buffer, u32 size) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 blocksize = sizeof(u32); + u8 *bufferptr = (u8 *)buffer; + u32 *pu4byteptr = (u32 *)buffer; + u32 i, offset, blockcount, remainsize; + + blockcount = size / blocksize; + remainsize = size % blocksize; + + for (i = 0; i < blockcount; i++) { + offset = i * blocksize; + rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset), + *(pu4byteptr + i)); + } + + if (remainsize) { + offset = blockcount * blocksize; + bufferptr += offset; + for (i = 0; i < remainsize; i++) { + rtl_write_byte(rtlpriv, + (FW_8192C_START_ADDRESS + offset + i), + *(bufferptr + i)); + } + } +} + +static void _rtl92ee_fw_page_write(struct ieee80211_hw *hw, u32 page, + const u8 *buffer, u32 size) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 value8; + u8 u8page = (u8)(page & 0x07); + + value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page; + rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8); + + _rtl92ee_fw_block_write(hw, buffer, size); +} + +static void _rtl92ee_fill_dummy(u8 *pfwbuf, u32 *pfwlen) +{ + u32 fwlen = *pfwlen; + u8 remain = (u8)(fwlen % 4); + + remain = (remain == 0) ? 0 : (4 - remain); + + while (remain > 0) { + pfwbuf[fwlen] = 0; + fwlen++; + remain--; + } + + *pfwlen = fwlen; +} + +static void _rtl92ee_write_fw(struct ieee80211_hw *hw, + enum version_8192e version, + u8 *buffer, u32 size) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 *bufferptr = (u8 *)buffer; + u32 pagenums, remainsize; + u32 page, offset; + + RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD , "FW size is %d bytes,\n", size); + + _rtl92ee_fill_dummy(bufferptr, &size); + + pagenums = size / FW_8192C_PAGE_SIZE; + remainsize = size % FW_8192C_PAGE_SIZE; + + if (pagenums > 8) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "Page numbers should not greater then 8\n"); + } + + for (page = 0; page < pagenums; page++) { + offset = page * FW_8192C_PAGE_SIZE; + _rtl92ee_fw_page_write(hw, page, (bufferptr + offset), + FW_8192C_PAGE_SIZE); + udelay(2); + } + + if (remainsize) { + offset = pagenums * FW_8192C_PAGE_SIZE; + page = pagenums; + _rtl92ee_fw_page_write(hw, page, (bufferptr + offset), + remainsize); + } +} + +static int _rtl92ee_fw_free_to_go(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + int err = -EIO; + u32 counter = 0; + u32 value32; + + do { + value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); + } while ((counter++ < FW_8192C_POLLING_TIMEOUT_COUNT) && + (!(value32 & FWDL_CHKSUM_RPT))); + + if (counter >= FW_8192C_POLLING_TIMEOUT_COUNT) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "chksum report faill ! REG_MCUFWDL:0x%08x .\n", + value32); + goto exit; + } + + RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, + "Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32); + + value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); + value32 |= MCUFWDL_RDY; + value32 &= ~WINTINI_RDY; + rtl_write_dword(rtlpriv, REG_MCUFWDL, value32); + + rtl92ee_firmware_selfreset(hw); + counter = 0; + + do { + value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); + if (value32 & WINTINI_RDY) { + RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD , + "Polling FW ready success!! REG_MCUFWDL:0x%08x. count = %d\n", + value32, counter); + err = 0; + goto exit; + } + + udelay(FW_8192C_POLLING_DELAY*10); + + } while (counter++ < FW_8192C_POLLING_TIMEOUT_COUNT); + + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "Polling FW ready fail!! REG_MCUFWDL:0x%08x. count = %d\n", + value32, counter); + +exit: + return err; +} + +int rtl92ee_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl92c_firmware_header *pfwheader; + u8 *pfwdata; + u32 fwsize; + int err; + enum version_8192e version = rtlhal->version; + + if (!rtlhal->pfirmware) + return 1; + + pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware; + rtlhal->fw_version = pfwheader->version; + rtlhal->fw_subversion = pfwheader->subversion; + pfwdata = (u8 *)rtlhal->pfirmware; + fwsize = rtlhal->fwsize; + RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, + "normal Firmware SIZE %d\n" , fwsize); + + if (IS_FW_HEADER_EXIST(pfwheader)) { + RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, + "Firmware Version(%d), Signature(%#x),Size(%d)\n", + pfwheader->version, pfwheader->signature, + (int)sizeof(struct rtl92c_firmware_header)); + + pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header); + fwsize = fwsize - sizeof(struct rtl92c_firmware_header); + } else { + RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, + "Firmware no Header, Signature(%#x)\n", + pfwheader->signature); + } + + if (rtlhal->mac_func_enable) { + if (rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) { + rtl_write_byte(rtlpriv, REG_MCUFWDL, 0); + rtl92ee_firmware_selfreset(hw); + } + } + _rtl92ee_enable_fw_download(hw, true); + _rtl92ee_write_fw(hw, version, pfwdata, fwsize); + _rtl92ee_enable_fw_download(hw, false); + + err = _rtl92ee_fw_free_to_go(hw); + if (err) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "Firmware is not ready to run!\n"); + } else { + RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD , + "Firmware is ready to run!\n"); + } + + return 0; +} + +static bool _rtl92ee_check_fw_read_last_h2c(struct ieee80211_hw *hw, u8 boxnum) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 val_hmetfr; + bool result = false; + + val_hmetfr = rtl_read_byte(rtlpriv, REG_HMETFR); + if (((val_hmetfr >> boxnum) & BIT(0)) == 0) + result = true; + return result; +} + +static void _rtl92ee_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id, + u32 cmd_len, u8 *cmdbuffer) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + u8 boxnum; + u16 box_reg = 0, box_extreg = 0; + u8 u1b_tmp; + bool isfw_read = false; + u8 buf_index = 0; + bool bwrite_sucess = false; + u8 wait_h2c_limmit = 100; + u8 boxcontent[4], boxextcontent[4]; + u32 h2c_waitcounter = 0; + unsigned long flag; + u8 idx; + + if (ppsc->dot11_psmode != EACTIVE || + ppsc->inactive_pwrstate == ERFOFF) { + RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD , + "FillH2CCommand8192E(): Return because RF is off!!!\n"); + return; + } + + RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD , "come in\n"); + + /* 1. Prevent race condition in setting H2C cmd. + * (copy from MgntActSet_RF_State().) + */ + while (true) { + spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag); + if (rtlhal->h2c_setinprogress) { + RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD , + "H2C set in progress! Wait to set..element_id(%d).\n", + element_id); + + while (rtlhal->h2c_setinprogress) { + spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, + flag); + h2c_waitcounter++; + RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD , + "Wait 100 us (%d times)...\n", + h2c_waitcounter); + udelay(100); + + if (h2c_waitcounter > 1000) + return; + spin_lock_irqsave(&rtlpriv->locks.h2c_lock, + flag); + } + spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag); + } else { + rtlhal->h2c_setinprogress = true; + spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag); + break; + } + } + + while (!bwrite_sucess) { + /* 2. Find the last BOX number which has been writen. */ + boxnum = rtlhal->last_hmeboxnum; + switch (boxnum) { + case 0: + box_reg = REG_HMEBOX_0; + box_extreg = REG_HMEBOX_EXT_0; + break; + case 1: + box_reg = REG_HMEBOX_1; + box_extreg = REG_HMEBOX_EXT_1; + break; + case 2: + box_reg = REG_HMEBOX_2; + box_extreg = REG_HMEBOX_EXT_2; + break; + case 3: + box_reg = REG_HMEBOX_3; + box_extreg = REG_HMEBOX_EXT_3; + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, + "switch case not process\n"); + break; + } + + /* 3. Check if the box content is empty. */ + isfw_read = false; + u1b_tmp = rtl_read_byte(rtlpriv, REG_CR); + + if (u1b_tmp != 0xea) { + isfw_read = true; + } else { + if (rtl_read_byte(rtlpriv, REG_TXDMA_STATUS) == 0xea || + rtl_read_byte(rtlpriv, REG_TXPKT_EMPTY) == 0xea) + rtl_write_byte(rtlpriv, REG_SYS_CFG1 + 3, 0xff); + } + + if (isfw_read) { + wait_h2c_limmit = 100; + isfw_read = _rtl92ee_check_fw_read_last_h2c(hw, boxnum); + while (!isfw_read) { + wait_h2c_limmit--; + if (wait_h2c_limmit == 0) { + RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD , + "Waiting too long for FW read clear HMEBox(%d)!!!\n", + boxnum); + break; + } + udelay(10); + isfw_read = + _rtl92ee_check_fw_read_last_h2c(hw, boxnum); + u1b_tmp = rtl_read_byte(rtlpriv, 0x130); + RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD , + "Waiting for FW read clear HMEBox(%d)!!! 0x130 = %2x\n", + boxnum, u1b_tmp); + } + } + + /* If Fw has not read the last + * H2C cmd, break and give up this H2C. + */ + if (!isfw_read) { + RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD , + "Write H2C reg BOX[%d] fail,Fw don't read.\n", + boxnum); + break; + } + /* 4. Fill the H2C cmd into box */ + memset(boxcontent, 0, sizeof(boxcontent)); + memset(boxextcontent, 0, sizeof(boxextcontent)); + boxcontent[0] = element_id; + RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD , + "Write element_id box_reg(%4x) = %2x\n", + box_reg, element_id); + + switch (cmd_len) { + case 1: + case 2: + case 3: + /*boxcontent[0] &= ~(BIT(7));*/ + memcpy((u8 *)(boxcontent) + 1, + cmdbuffer + buf_index, cmd_len); + + for (idx = 0; idx < 4; idx++) { + rtl_write_byte(rtlpriv, box_reg + idx, + boxcontent[idx]); + } + break; + case 4: + case 5: + case 6: + case 7: + /*boxcontent[0] |= (BIT(7));*/ + memcpy((u8 *)(boxextcontent), + cmdbuffer + buf_index+3, cmd_len-3); + memcpy((u8 *)(boxcontent) + 1, + cmdbuffer + buf_index, 3); + + for (idx = 0; idx < 4; idx++) { + rtl_write_byte(rtlpriv, box_extreg + idx, + boxextcontent[idx]); + } + + for (idx = 0; idx < 4; idx++) { + rtl_write_byte(rtlpriv, box_reg + idx, + boxcontent[idx]); + } + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, + "switch case not process\n"); + break; + } + + bwrite_sucess = true; + + rtlhal->last_hmeboxnum = boxnum + 1; + if (rtlhal->last_hmeboxnum == 4) + rtlhal->last_hmeboxnum = 0; + + RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD , + "pHalData->last_hmeboxnum = %d\n", + rtlhal->last_hmeboxnum); + } + + spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag); + rtlhal->h2c_setinprogress = false; + spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag); + + RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD , "go out\n"); +} + +void rtl92ee_fill_h2c_cmd(struct ieee80211_hw *hw, + u8 element_id, u32 cmd_len, u8 *cmdbuffer) +{ + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u32 tmp_cmdbuf[2]; + + if (!rtlhal->fw_ready) { + RT_ASSERT(false, + "return H2C cmd because of Fw download fail!!!\n"); + return; + } + + memset(tmp_cmdbuf, 0, 8); + memcpy(tmp_cmdbuf, cmdbuffer, cmd_len); + _rtl92ee_fill_h2c_command(hw, element_id, cmd_len, (u8 *)&tmp_cmdbuf); +} + +void rtl92ee_firmware_selfreset(struct ieee80211_hw *hw) +{ + u8 u1b_tmp; + struct rtl_priv *rtlpriv = rtl_priv(hw); + + u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1); + rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, (u1b_tmp & (~BIT(0)))); + + u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, (u1b_tmp & (~BIT(2)))); + + udelay(50); + + u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1); + rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, (u1b_tmp | BIT(0))); + + u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, (u1b_tmp | BIT(2))); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD , + " _8051Reset92E(): 8051 reset success .\n"); +} + +void rtl92ee_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 u1_h2c_set_pwrmode[H2C_92E_PWEMODE_LENGTH] = { 0 }; + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + u8 rlbm , power_state = 0; + + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD , "FW LPS mode = %d\n", mode); + + SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, ((mode) ? 1 : 0)); + rlbm = 0;/*YJ,temp,120316. FW now not support RLBM=2.*/ + SET_H2CCMD_PWRMODE_PARM_RLBM(u1_h2c_set_pwrmode, rlbm); + SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode, + (rtlpriv->mac80211.p2p) ? + ppsc->smart_ps : 1); + SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(u1_h2c_set_pwrmode, + ppsc->reg_max_lps_awakeintvl); + SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(u1_h2c_set_pwrmode, 0); + if (mode == FW_PS_ACTIVE_MODE) + power_state |= FW_PWR_STATE_ACTIVE; + else + power_state |= FW_PWR_STATE_RF_OFF; + SET_H2CCMD_PWRMODE_PARM_PWR_STATE(u1_h2c_set_pwrmode, power_state); + + RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, + "rtl92c_set_fw_pwrmode(): u1_h2c_set_pwrmode\n", + u1_h2c_set_pwrmode, H2C_92E_PWEMODE_LENGTH); + rtl92ee_fill_h2c_cmd(hw, H2C_92E_SETPWRMODE, H2C_92E_PWEMODE_LENGTH, + u1_h2c_set_pwrmode); +} + +void rtl92ee_set_fw_media_status_rpt_cmd(struct ieee80211_hw *hw, u8 mstatus) +{ + u8 parm[3] = { 0 , 0 , 0 }; + /* parm[0]: bit0=0-->Disconnect, bit0=1-->Connect + * bit1=0-->update Media Status to MACID + * bit1=1-->update Media Status from MACID to MACID_End + * parm[1]: MACID, if this is INFRA_STA, MacID = 0 + * parm[2]: MACID_End + */ + + SET_H2CCMD_MSRRPT_PARM_OPMODE(parm, mstatus); + SET_H2CCMD_MSRRPT_PARM_MACID_IND(parm, 0); + + rtl92ee_fill_h2c_cmd(hw, H2C_92E_MSRRPT, 3, parm); +} + +#define BEACON_PG 0 /* ->1 */ +#define PSPOLL_PG 2 +#define NULL_PG 3 +#define PROBERSP_PG 4 /* ->5 */ + +#define TOTAL_RESERVED_PKT_LEN 768 + +static u8 reserved_page_packet[TOTAL_RESERVED_PKT_LEN] = { + /* page 0 beacon */ + 0x80, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0x00, 0xE0, 0x4C, 0x02, 0xB1, 0x78, + 0xEC, 0x1A, 0x59, 0x0B, 0xAD, 0xD4, 0x20, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x64, 0x00, 0x10, 0x04, 0x00, 0x05, 0x54, 0x65, + 0x73, 0x74, 0x32, 0x01, 0x08, 0x82, 0x84, 0x0B, + 0x16, 0x24, 0x30, 0x48, 0x6C, 0x03, 0x01, 0x06, + 0x06, 0x02, 0x00, 0x00, 0x2A, 0x01, 0x02, 0x32, + 0x04, 0x0C, 0x12, 0x18, 0x60, 0x2D, 0x1A, 0x6C, + 0x09, 0x03, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x3D, 0x00, 0xDD, 0x07, 0x00, 0xE0, 0x4C, + 0x02, 0x02, 0x00, 0x00, 0xDD, 0x18, 0x00, 0x50, + 0xF2, 0x01, 0x01, 0x00, 0x00, 0x50, 0xF2, 0x04, + 0x01, 0x00, 0x00, 0x50, 0xF2, 0x04, 0x01, 0x00, + + /* page 1 beacon */ + 0x00, 0x50, 0xF2, 0x02, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x10, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + /* page 2 ps-poll */ + 0xA4, 0x10, 0x01, 0xC0, 0xEC, 0x1A, 0x59, 0x0B, + 0xAD, 0xD4, 0x00, 0xE0, 0x4C, 0x02, 0xB1, 0x78, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x18, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + /* page 3 null */ + 0x48, 0x01, 0x00, 0x00, 0xEC, 0x1A, 0x59, 0x0B, + 0xAD, 0xD4, 0x00, 0xE0, 0x4C, 0x02, 0xB1, 0x78, + 0xEC, 0x1A, 0x59, 0x0B, 0xAD, 0xD4, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x72, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + /* page 4 probe_resp */ + 0x50, 0x00, 0x00, 0x00, 0x00, 0x40, 0x10, 0x10, + 0x00, 0x03, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42, + 0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x00, 0x00, + 0x9E, 0x46, 0x15, 0x32, 0x27, 0xF2, 0x2D, 0x00, + 0x64, 0x00, 0x00, 0x04, 0x00, 0x0C, 0x6C, 0x69, + 0x6E, 0x6B, 0x73, 0x79, 0x73, 0x5F, 0x77, 0x6C, + 0x61, 0x6E, 0x01, 0x04, 0x82, 0x84, 0x8B, 0x96, + 0x03, 0x01, 0x01, 0x06, 0x02, 0x00, 0x00, 0x2A, + 0x01, 0x00, 0x32, 0x08, 0x24, 0x30, 0x48, 0x6C, + 0x0C, 0x12, 0x18, 0x60, 0x2D, 0x1A, 0x6C, 0x18, + 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x3D, 0x00, 0xDD, 0x06, 0x00, 0xE0, 0x4C, 0x02, + 0x01, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + /* page 5 probe_resp */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct sk_buff *skb = NULL; + + u32 totalpacketlen; + bool rtstatus; + u8 u1rsvdpageloc[5] = { 0 }; + bool b_dlok = false; + + u8 *beacon; + u8 *p_pspoll; + u8 *nullfunc; + u8 *p_probersp; + /*--------------------------------------------------------- + * (1) beacon + *--------------------------------------------------------- + */ + beacon = &reserved_page_packet[BEACON_PG * 128]; + SET_80211_HDR_ADDRESS2(beacon, mac->mac_addr); + SET_80211_HDR_ADDRESS3(beacon, mac->bssid); + + /*------------------------------------------------------- + * (2) ps-poll + *-------------------------------------------------------- + */ + p_pspoll = &reserved_page_packet[PSPOLL_PG * 128]; + SET_80211_PS_POLL_AID(p_pspoll, (mac->assoc_id | 0xc000)); + SET_80211_PS_POLL_BSSID(p_pspoll, mac->bssid); + SET_80211_PS_POLL_TA(p_pspoll, mac->mac_addr); + + SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1rsvdpageloc, PSPOLL_PG); + + /*-------------------------------------------------------- + * (3) null data + *--------------------------------------------------------- + */ + nullfunc = &reserved_page_packet[NULL_PG * 128]; + SET_80211_HDR_ADDRESS1(nullfunc, mac->bssid); + SET_80211_HDR_ADDRESS2(nullfunc, mac->mac_addr); + SET_80211_HDR_ADDRESS3(nullfunc, mac->bssid); + + SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1rsvdpageloc, NULL_PG); + + /*--------------------------------------------------------- + * (4) probe response + *---------------------------------------------------------- + */ + p_probersp = &reserved_page_packet[PROBERSP_PG * 128]; + SET_80211_HDR_ADDRESS1(p_probersp, mac->bssid); + SET_80211_HDR_ADDRESS2(p_probersp, mac->mac_addr); + SET_80211_HDR_ADDRESS3(p_probersp, mac->bssid); + + SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1rsvdpageloc, PROBERSP_PG); + + totalpacketlen = TOTAL_RESERVED_PKT_LEN; + + RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD , + "rtl92ee_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n", + &reserved_page_packet[0], totalpacketlen); + RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD , + "rtl92ee_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n", + u1rsvdpageloc, 3); + + skb = dev_alloc_skb(totalpacketlen); + memcpy((u8 *)skb_put(skb, totalpacketlen), + &reserved_page_packet, totalpacketlen); + + rtstatus = rtl_cmd_send_packet(hw, skb); + + if (rtstatus) + b_dlok = true; + + if (b_dlok) { + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD , + "Set RSVD page location to Fw.\n"); + RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD , + "H2C_RSVDPAGE:\n", u1rsvdpageloc, 3); + rtl92ee_fill_h2c_cmd(hw, H2C_92E_RSVDPAGE, + sizeof(u1rsvdpageloc), u1rsvdpageloc); + } else { + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + "Set RSVD page location to Fw FAIL!!!!!!.\n"); + } +} + +/*Shoud check FW support p2p or not.*/ +static void rtl92ee_set_p2p_ctw_period_cmd(struct ieee80211_hw *hw, u8 ctwindow) +{ + u8 u1_ctwindow_period[1] = {ctwindow}; + + rtl92ee_fill_h2c_cmd(hw, H2C_92E_P2P_PS_CTW_CMD, 1, u1_ctwindow_period); +} + +void rtl92ee_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *rtlps = rtl_psc(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_p2p_ps_info *p2pinfo = &rtlps->p2p_ps_info; + struct p2p_ps_offload_t *p2p_ps_offload = &rtlhal->p2p_ps_offload; + u8 i; + u16 ctwindow; + u32 start_time, tsf_low; + + switch (p2p_ps_state) { + case P2P_PS_DISABLE: + RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD , "P2P_PS_DISABLE\n"); + memset(p2p_ps_offload, 0, sizeof(*p2p_ps_offload)); + break; + case P2P_PS_ENABLE: + RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD , "P2P_PS_ENABLE\n"); + /* update CTWindow value. */ + if (p2pinfo->ctwindow > 0) { + p2p_ps_offload->ctwindow_en = 1; + ctwindow = p2pinfo->ctwindow; + rtl92ee_set_p2p_ctw_period_cmd(hw, ctwindow); + } + /* hw only support 2 set of NoA */ + for (i = 0 ; i < p2pinfo->noa_num ; i++) { + /* To control the register setting for which NOA*/ + rtl_write_byte(rtlpriv, 0x5cf, (i << 4)); + if (i == 0) + p2p_ps_offload->noa0_en = 1; + else + p2p_ps_offload->noa1_en = 1; + /* config P2P NoA Descriptor Register */ + rtl_write_dword(rtlpriv, 0x5E0, + p2pinfo->noa_duration[i]); + rtl_write_dword(rtlpriv, 0x5E4, + p2pinfo->noa_interval[i]); + + /*Get Current TSF value */ + tsf_low = rtl_read_dword(rtlpriv, REG_TSFTR); + + start_time = p2pinfo->noa_start_time[i]; + if (p2pinfo->noa_count_type[i] != 1) { + while (start_time <= (tsf_low + (50 * 1024))) { + start_time += p2pinfo->noa_interval[i]; + if (p2pinfo->noa_count_type[i] != 255) + p2pinfo->noa_count_type[i]--; + } + } + rtl_write_dword(rtlpriv, 0x5E8, start_time); + rtl_write_dword(rtlpriv, 0x5EC, + p2pinfo->noa_count_type[i]); + } + if ((p2pinfo->opp_ps == 1) || (p2pinfo->noa_num > 0)) { + /* rst p2p circuit */ + rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, BIT(4)); + p2p_ps_offload->offload_en = 1; + + if (P2P_ROLE_GO == rtlpriv->mac80211.p2p) { + p2p_ps_offload->role = 1; + p2p_ps_offload->allstasleep = 0; + } else { + p2p_ps_offload->role = 0; + } + p2p_ps_offload->discovery = 0; + } + break; + case P2P_PS_SCAN: + RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD , "P2P_PS_SCAN\n"); + p2p_ps_offload->discovery = 1; + break; + case P2P_PS_SCAN_DONE: + RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD , "P2P_PS_SCAN_DONE\n"); + p2p_ps_offload->discovery = 0; + p2pinfo->p2p_ps_state = P2P_PS_ENABLE; + break; + default: + break; + } + rtl92ee_fill_h2c_cmd(hw, H2C_92E_P2P_PS_OFFLOAD, 1, + (u8 *)p2p_ps_offload); +} + +static void _rtl92ee_c2h_ra_report_handler(struct ieee80211_hw *hw, + u8 *cmd_buf, u8 cmd_len) +{ + u8 rate = cmd_buf[0] & 0x3F; + bool collision_state = cmd_buf[3] & BIT(0); + + rtl92ee_dm_dynamic_arfb_select(hw, rate, collision_state); +} + +static void _rtl92ee_c2h_content_parsing(struct ieee80211_hw *hw, u8 c2h_cmd_id, + u8 c2h_cmd_len, u8 *tmp_buf) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + switch (c2h_cmd_id) { + case C2H_8192E_DBG: + RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, + "[C2H], C2H_8723BE_DBG!!\n"); + break; + case C2H_8192E_TXBF: + RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, + "[C2H], C2H_8192E_TXBF!!\n"); + break; + case C2H_8192E_TX_REPORT: + RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE , + "[C2H], C2H_8723BE_TX_REPORT!\n"); + break; + case C2H_8192E_BT_INFO: + RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, + "[C2H], C2H_8723BE_BT_INFO!!\n"); + rtlpriv->btcoexist.btc_ops->btc_btinfo_notify(rtlpriv, tmp_buf, + c2h_cmd_len); + break; + case C2H_8192E_BT_MP: + RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, + "[C2H], C2H_8723BE_BT_MP!!\n"); + break; + case C2H_8192E_RA_RPT: + _rtl92ee_c2h_ra_report_handler(hw, tmp_buf, c2h_cmd_len); + break; + default: + RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, + "[C2H], Unkown packet!! CmdId(%#X)!\n", c2h_cmd_id); + break; + } +} + +void rtl92ee_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, u8 len) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 c2h_cmd_id = 0, c2h_cmd_seq = 0, c2h_cmd_len = 0; + u8 *tmp_buf = NULL; + + c2h_cmd_id = buffer[0]; + c2h_cmd_seq = buffer[1]; + c2h_cmd_len = len - 2; + tmp_buf = buffer + 2; + + RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, + "[C2H packet], c2hCmdId=0x%x, c2hCmdSeq=0x%x, c2hCmdLen=%d\n", + c2h_cmd_id, c2h_cmd_seq, c2h_cmd_len); + + RT_PRINT_DATA(rtlpriv, COMP_FW, DBG_TRACE, + "[C2H packet], Content Hex:\n", tmp_buf, c2h_cmd_len); + + _rtl92ee_c2h_content_parsing(hw, c2h_cmd_id, c2h_cmd_len, tmp_buf); +} diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.h b/drivers/net/wireless/rtlwifi/rtl8192ee/fw.h new file mode 100644 index 0000000000000000000000000000000000000000..3e2a48e5fb4deffb1ad2a5f7b78ed3050aef4fe9 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192ee/fw.h @@ -0,0 +1,208 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL92E__FW__H__ +#define __RTL92E__FW__H__ + +#define FW_8192C_SIZE 0x8000 +#define FW_8192C_START_ADDRESS 0x1000 +#define FW_8192C_END_ADDRESS 0x5FFF +#define FW_8192C_PAGE_SIZE 4096 +#define FW_8192C_POLLING_DELAY 5 +#define FW_8192C_POLLING_TIMEOUT_COUNT 3000 + +#define IS_FW_HEADER_EXIST(_pfwhdr) \ + ((_pfwhdr->signature&0xFFF0) == 0x92E0) +#define USE_OLD_WOWLAN_DEBUG_FW 0 + +#define H2C_92E_RSVDPAGE_LOC_LEN 5 +#define H2C_92E_PWEMODE_LENGTH 5 +#define H2C_92E_JOINBSSRPT_LENGTH 1 +#define H2C_92E_AP_OFFLOAD_LENGTH 3 +#define H2C_92E_WOWLAN_LENGTH 3 +#define H2C_92E_KEEP_ALIVE_CTRL_LENGTH 3 +#if (USE_OLD_WOWLAN_DEBUG_FW == 0) +#define H2C_92E_REMOTE_WAKE_CTRL_LEN 1 +#else +#define H2C_92E_REMOTE_WAKE_CTRL_LEN 3 +#endif +#define H2C_92E_AOAC_GLOBAL_INFO_LEN 2 +#define H2C_92E_AOAC_RSVDPAGE_LOC_LEN 7 + +/* Fw PS state for RPWM. +*BIT[2:0] = HW state +*BIT[3] = Protocol PS state, 1: register active state, 0: register sleep state +*BIT[4] = sub-state +*/ +#define FW_PS_RF_ON BIT(2) +#define FW_PS_REGISTER_ACTIVE BIT(3) + +#define FW_PS_ACK BIT(6) +#define FW_PS_TOGGLE BIT(7) + + /* 92E RPWM value*/ + /* BIT[0] = 1: 32k, 0: 40M*/ +#define FW_PS_CLOCK_OFF BIT(0) /* 32k */ +#define FW_PS_CLOCK_ON 0 /* 40M */ + +#define FW_PS_STATE_MASK (0x0F) +#define FW_PS_STATE_HW_MASK (0x07) +#define FW_PS_STATE_INT_MASK (0x3F) + +#define FW_PS_STATE(x) (FW_PS_STATE_MASK & (x)) + +#define FW_PS_STATE_ALL_ON_92E (FW_PS_CLOCK_ON) +#define FW_PS_STATE_RF_ON_92E (FW_PS_CLOCK_ON) +#define FW_PS_STATE_RF_OFF_92E (FW_PS_CLOCK_ON) +#define FW_PS_STATE_RF_OFF_LOW_PWR (FW_PS_CLOCK_OFF) + +/* For 92E H2C PwrMode Cmd ID 5.*/ +#define FW_PWR_STATE_ACTIVE ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE)) +#define FW_PWR_STATE_RF_OFF 0 + +#define FW_PS_IS_ACK(x) ((x) & FW_PS_ACK) + +#define IS_IN_LOW_POWER_STATE_92E(__state) \ + (FW_PS_STATE(__state) == FW_PS_CLOCK_OFF) + +#define FW_PWR_STATE_ACTIVE ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE)) +#define FW_PWR_STATE_RF_OFF 0 + +struct rtl92c_firmware_header { + u16 signature; + u8 category; + u8 function; + u16 version; + u8 subversion; + u8 rsvd1; + u8 month; + u8 date; + u8 hour; + u8 minute; + u16 ramcodesize; + u16 rsvd2; + u32 svnindex; + u32 rsvd3; + u32 rsvd4; + u32 rsvd5; +}; + +enum rtl8192e_h2c_cmd { + H2C_92E_RSVDPAGE = 0, + H2C_92E_MSRRPT = 1, + H2C_92E_SCAN = 2, + H2C_92E_KEEP_ALIVE_CTRL = 3, + H2C_92E_DISCONNECT_DECISION = 4, +#if (USE_OLD_WOWLAN_DEBUG_FW == 1) + H2C_92E_WO_WLAN = 5, +#endif + H2C_92E_INIT_OFFLOAD = 6, +#if (USE_OLD_WOWLAN_DEBUG_FW == 1) + H2C_92E_REMOTE_WAKE_CTRL = 7, +#endif + H2C_92E_AP_OFFLOAD = 8, + H2C_92E_BCN_RSVDPAGE = 9, + H2C_92E_PROBERSP_RSVDPAGE = 10, + + H2C_92E_SETPWRMODE = 0x20, + H2C_92E_PS_TUNING_PARA = 0x21, + H2C_92E_PS_TUNING_PARA2 = 0x22, + H2C_92E_PS_LPS_PARA = 0x23, + H2C_92E_P2P_PS_OFFLOAD = 024, + +#if (USE_OLD_WOWLAN_DEBUG_FW == 0) + H2C_92E_WO_WLAN = 0x80, + H2C_92E_REMOTE_WAKE_CTRL = 0x81, + H2C_92E_AOAC_GLOBAL_INFO = 0x82, + H2C_92E_AOAC_RSVDPAGE = 0x83, +#endif + H2C_92E_RA_MASK = 0x40, + H2C_92E_RSSI_REPORT = 0x42, + H2C_92E_SELECTIVE_SUSPEND_ROF_CMD, + H2C_92E_P2P_PS_MODE, + H2C_92E_PSD_RESULT, + /*Not defined CTW CMD for P2P yet*/ + H2C_92E_P2P_PS_CTW_CMD, + MAX_92E_H2CCMD +}; + +enum rtl8192e_c2h_evt { + C2H_8192E_DBG = 0, + C2H_8192E_LB = 1, + C2H_8192E_TXBF = 2, + C2H_8192E_TX_REPORT = 3, + C2H_8192E_BT_INFO = 9, + C2H_8192E_BT_MP = 11, + C2H_8192E_RA_RPT = 12, + MAX_8192E_C2HEVENT +}; + +#define pagenum_128(_len) \ + (u32)(((_len) >> 7) + ((_len) & 0x7F ? 1 : 0)) + +#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val) +#define SET_H2CCMD_PWRMODE_PARM_RLBM(__cmd, __val) \ + SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 4, __val) +#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__cmd, __val) \ + SET_BITS_TO_LE_1BYTE((__cmd)+1, 4, 4, __val) +#define SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(__cmd, __val) \ + SET_BITS_TO_LE_1BYTE((__cmd)+2, 0, 8, __val) +#define SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(__cmd, __val) \ + SET_BITS_TO_LE_1BYTE((__cmd)+3, 0, 8, __val) +#define SET_H2CCMD_PWRMODE_PARM_PWR_STATE(__cmd, __val) \ + SET_BITS_TO_LE_1BYTE((__cmd)+4, 0, 8, __val) +#define GET_92E_H2CCMD_PWRMODE_PARM_MODE(__cmd) \ + LE_BITS_TO_1BYTE(__cmd, 0, 8) + +#define SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val) +#define SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val) +#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val) +#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val) + +/* _MEDIA_STATUS_RPT_PARM_CMD1 */ +#define SET_H2CCMD_MSRRPT_PARM_OPMODE(__cmd, __val) \ + SET_BITS_TO_LE_1BYTE(__cmd, 0, 1, __val) +#define SET_H2CCMD_MSRRPT_PARM_MACID_IND(__cmd, __val) \ + SET_BITS_TO_LE_1BYTE(__cmd, 1, 1, __val) +#define SET_H2CCMD_MSRRPT_PARM_MACID(__cmd, __val) \ + SET_BITS_TO_LE_1BYTE(__cmd+1, 0, 8, __val) +#define SET_H2CCMD_MSRRPT_PARM_MACID_END(__cmd, __val) \ + SET_BITS_TO_LE_1BYTE(__cmd+2, 0, 8, __val) + +int rtl92ee_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw); +void rtl92ee_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id, + u32 cmd_len, u8 *cmdbuffer); +void rtl92ee_firmware_selfreset(struct ieee80211_hw *hw); +void rtl92ee_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode); +void rtl92ee_set_fw_media_status_rpt_cmd(struct ieee80211_hw *hw, u8 mstatus); +void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished); +void rtl92ee_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state); +void rtl92ee_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, u8 len); + +#endif diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c new file mode 100644 index 0000000000000000000000000000000000000000..dfdc9b20e4ad4d868012c80f4233559e58050052 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c @@ -0,0 +1,2569 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "../wifi.h" +#include "../efuse.h" +#include "../base.h" +#include "../regd.h" +#include "../cam.h" +#include "../ps.h" +#include "../pci.h" +#include "reg.h" +#include "def.h" +#include "phy.h" +#include "dm.h" +#include "fw.h" +#include "led.h" +#include "hw.h" +#include "../pwrseqcmd.h" +#include "pwrseq.h" + +#define LLT_CONFIG 5 + +static void _rtl92ee_set_bcn_ctrl_reg(struct ieee80211_hw *hw, + u8 set_bits, u8 clear_bits) +{ + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtlpci->reg_bcn_ctrl_val |= set_bits; + rtlpci->reg_bcn_ctrl_val &= ~clear_bits; + + rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8)rtlpci->reg_bcn_ctrl_val); +} + +static void _rtl92ee_stop_tx_beacon(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 tmp; + + tmp = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2); + rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp & (~BIT(6))); + rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0x64); + tmp = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2); + tmp &= ~(BIT(0)); + rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp); +} + +static void _rtl92ee_resume_tx_beacon(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 tmp; + + tmp = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2); + rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp | BIT(6)); + rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff); + tmp = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2); + tmp |= BIT(0); + rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp); +} + +static void _rtl92ee_enable_bcn_sub_func(struct ieee80211_hw *hw) +{ + _rtl92ee_set_bcn_ctrl_reg(hw, 0, BIT(1)); +} + +static void _rtl92ee_return_beacon_queue_skb(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[BEACON_QUEUE]; + unsigned long flags; + + spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags); + while (skb_queue_len(&ring->queue)) { + struct rtl_tx_buffer_desc *entry = + &ring->buffer_desc[ring->idx]; + struct sk_buff *skb = __skb_dequeue(&ring->queue); + + pci_unmap_single(rtlpci->pdev, + rtlpriv->cfg->ops->get_desc( + (u8 *)entry, true, HW_DESC_TXBUFF_ADDR), + skb->len, PCI_DMA_TODEVICE); + kfree_skb(skb); + ring->idx = (ring->idx + 1) % ring->entries; + } + spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); +} + +static void _rtl92ee_disable_bcn_sub_func(struct ieee80211_hw *hw) +{ + _rtl92ee_set_bcn_ctrl_reg(hw, BIT(1), 0); +} + +static void _rtl92ee_set_fw_clock_on(struct ieee80211_hw *hw, + u8 rpwm_val, bool b_need_turn_off_ckk) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + bool b_support_remote_wake_up; + u32 count = 0, isr_regaddr, content; + bool b_schedule_timer = b_need_turn_off_ckk; + + rtlpriv->cfg->ops->get_hw_reg(hw, HAL_DEF_WOWLAN, + (u8 *)(&b_support_remote_wake_up)); + + if (!rtlhal->fw_ready) + return; + if (!rtlpriv->psc.fw_current_inpsmode) + return; + + while (1) { + spin_lock_bh(&rtlpriv->locks.fw_ps_lock); + if (rtlhal->fw_clk_change_in_progress) { + while (rtlhal->fw_clk_change_in_progress) { + spin_unlock_bh(&rtlpriv->locks.fw_ps_lock); + count++; + udelay(100); + if (count > 1000) + return; + spin_lock_bh(&rtlpriv->locks.fw_ps_lock); + } + spin_unlock_bh(&rtlpriv->locks.fw_ps_lock); + } else { + rtlhal->fw_clk_change_in_progress = false; + spin_unlock_bh(&rtlpriv->locks.fw_ps_lock); + break; + } + } + + if (IS_IN_LOW_POWER_STATE_92E(rtlhal->fw_ps_state)) { + rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_SET_RPWM, + (u8 *)(&rpwm_val)); + if (FW_PS_IS_ACK(rpwm_val)) { + isr_regaddr = REG_HISR; + content = rtl_read_dword(rtlpriv, isr_regaddr); + while (!(content & IMR_CPWM) && (count < 500)) { + udelay(50); + count++; + content = rtl_read_dword(rtlpriv, isr_regaddr); + } + + if (content & IMR_CPWM) { + rtl_write_word(rtlpriv, isr_regaddr, 0x0100); + rtlhal->fw_ps_state = FW_PS_STATE_RF_ON_92E; + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, + "Receive CPWM INT!!! PSState = %X\n", + rtlhal->fw_ps_state); + } + } + + spin_lock_bh(&rtlpriv->locks.fw_ps_lock); + rtlhal->fw_clk_change_in_progress = false; + spin_unlock_bh(&rtlpriv->locks.fw_ps_lock); + if (b_schedule_timer) { + mod_timer(&rtlpriv->works.fw_clockoff_timer, + jiffies + MSECS(10)); + } + } else { + spin_lock_bh(&rtlpriv->locks.fw_ps_lock); + rtlhal->fw_clk_change_in_progress = false; + spin_unlock_bh(&rtlpriv->locks.fw_ps_lock); + } +} + +static void _rtl92ee_set_fw_clock_off(struct ieee80211_hw *hw, u8 rpwm_val) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl8192_tx_ring *ring; + enum rf_pwrstate rtstate; + bool b_schedule_timer = false; + u8 queue; + + if (!rtlhal->fw_ready) + return; + if (!rtlpriv->psc.fw_current_inpsmode) + return; + if (!rtlhal->allow_sw_to_change_hwclc) + return; + + rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RF_STATE, (u8 *)(&rtstate)); + if (rtstate == ERFOFF || rtlpriv->psc.inactive_pwrstate == ERFOFF) + return; + + for (queue = 0; queue < RTL_PCI_MAX_TX_QUEUE_COUNT; queue++) { + ring = &rtlpci->tx_ring[queue]; + if (skb_queue_len(&ring->queue)) { + b_schedule_timer = true; + break; + } + } + + if (b_schedule_timer) { + mod_timer(&rtlpriv->works.fw_clockoff_timer, + jiffies + MSECS(10)); + return; + } + + if (FW_PS_STATE(rtlhal->fw_ps_state) != FW_PS_STATE_RF_OFF_LOW_PWR) { + spin_lock_bh(&rtlpriv->locks.fw_ps_lock); + if (!rtlhal->fw_clk_change_in_progress) { + rtlhal->fw_clk_change_in_progress = true; + spin_unlock_bh(&rtlpriv->locks.fw_ps_lock); + rtlhal->fw_ps_state = FW_PS_STATE(rpwm_val); + rtl_write_word(rtlpriv, REG_HISR, 0x0100); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, + (u8 *)(&rpwm_val)); + spin_lock_bh(&rtlpriv->locks.fw_ps_lock); + rtlhal->fw_clk_change_in_progress = false; + spin_unlock_bh(&rtlpriv->locks.fw_ps_lock); + } else { + spin_unlock_bh(&rtlpriv->locks.fw_ps_lock); + mod_timer(&rtlpriv->works.fw_clockoff_timer, + jiffies + MSECS(10)); + } + } +} + +static void _rtl92ee_set_fw_ps_rf_on(struct ieee80211_hw *hw) +{ + u8 rpwm_val = 0; + + rpwm_val |= (FW_PS_STATE_RF_OFF_92E | FW_PS_ACK); + _rtl92ee_set_fw_clock_on(hw, rpwm_val, true); +} + +static void _rtl92ee_set_fw_ps_rf_off_low_power(struct ieee80211_hw *hw) +{ + u8 rpwm_val = 0; + + rpwm_val |= FW_PS_STATE_RF_OFF_LOW_PWR; + _rtl92ee_set_fw_clock_off(hw, rpwm_val); +} + +void rtl92ee_fw_clk_off_timer_callback(unsigned long data) +{ + struct ieee80211_hw *hw = (struct ieee80211_hw *)data; + + _rtl92ee_set_fw_ps_rf_off_low_power(hw); +} + +static void _rtl92ee_fwlps_leave(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + bool fw_current_inps = false; + u8 rpwm_val = 0, fw_pwrmode = FW_PS_ACTIVE_MODE; + + if (ppsc->low_power_enable) { + rpwm_val = (FW_PS_STATE_ALL_ON_92E | FW_PS_ACK);/* RF on */ + _rtl92ee_set_fw_clock_on(hw, rpwm_val, false); + rtlhal->allow_sw_to_change_hwclc = false; + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE, + (u8 *)(&fw_pwrmode)); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS, + (u8 *)(&fw_current_inps)); + } else { + rpwm_val = FW_PS_STATE_ALL_ON_92E; /* RF on */ + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, + (u8 *)(&rpwm_val)); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE, + (u8 *)(&fw_pwrmode)); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS, + (u8 *)(&fw_current_inps)); + } +} + +static void _rtl92ee_fwlps_enter(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + bool fw_current_inps = true; + u8 rpwm_val; + + if (ppsc->low_power_enable) { + rpwm_val = FW_PS_STATE_RF_OFF_LOW_PWR; /* RF off */ + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS, + (u8 *)(&fw_current_inps)); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE, + (u8 *)(&ppsc->fwctrl_psmode)); + rtlhal->allow_sw_to_change_hwclc = true; + _rtl92ee_set_fw_clock_off(hw, rpwm_val); + } else { + rpwm_val = FW_PS_STATE_RF_OFF_92E; /* RF off */ + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS, + (u8 *)(&fw_current_inps)); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE, + (u8 *)(&ppsc->fwctrl_psmode)); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, + (u8 *)(&rpwm_val)); + } +} + +void rtl92ee_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + + switch (variable) { + case HW_VAR_RCR: + *((u32 *)(val)) = rtlpci->receive_config; + break; + case HW_VAR_RF_STATE: + *((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state; + break; + case HW_VAR_FWLPS_RF_ON:{ + enum rf_pwrstate rfstate; + u32 val_rcr; + + rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RF_STATE, + (u8 *)(&rfstate)); + if (rfstate == ERFOFF) { + *((bool *)(val)) = true; + } else { + val_rcr = rtl_read_dword(rtlpriv, REG_RCR); + val_rcr &= 0x00070000; + if (val_rcr) + *((bool *)(val)) = false; + else + *((bool *)(val)) = true; + } + } + break; + case HW_VAR_FW_PSMODE_STATUS: + *((bool *)(val)) = ppsc->fw_current_inpsmode; + break; + case HW_VAR_CORRECT_TSF:{ + u64 tsf; + u32 *ptsf_low = (u32 *)&tsf; + u32 *ptsf_high = ((u32 *)&tsf) + 1; + + *ptsf_high = rtl_read_dword(rtlpriv, (REG_TSFTR + 4)); + *ptsf_low = rtl_read_dword(rtlpriv, REG_TSFTR); + + *((u64 *)(val)) = tsf; + } + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "switch case not process %x\n", variable); + break; + } +} + +static void _rtl92ee_download_rsvd_page(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 tmp_regcr, tmp_reg422; + u8 bcnvalid_reg, txbc_reg; + u8 count = 0, dlbcn_count = 0; + bool b_recover = false; + + /*Set REG_CR bit 8. DMA beacon by SW.*/ + tmp_regcr = rtl_read_byte(rtlpriv, REG_CR + 1); + rtl_write_byte(rtlpriv, REG_CR + 1, tmp_regcr | BIT(0)); + + /* Disable Hw protection for a time which revserd for Hw sending beacon. + * Fix download reserved page packet fail + * that access collision with the protection time. + * 2010.05.11. Added by tynli. + */ + _rtl92ee_set_bcn_ctrl_reg(hw, 0, BIT(3)); + _rtl92ee_set_bcn_ctrl_reg(hw, BIT(4), 0); + + /* Set FWHW_TXQ_CTRL 0x422[6]=0 to + * tell Hw the packet is not a real beacon frame. + */ + tmp_reg422 = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2); + rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp_reg422 & (~BIT(6))); + + if (tmp_reg422 & BIT(6)) + b_recover = true; + + do { + /* Clear beacon valid check bit */ + bcnvalid_reg = rtl_read_byte(rtlpriv, REG_DWBCN0_CTRL + 2); + rtl_write_byte(rtlpriv, REG_DWBCN0_CTRL + 2, + bcnvalid_reg | BIT(0)); + + /* Return Beacon TCB */ + _rtl92ee_return_beacon_queue_skb(hw); + + /* download rsvd page */ + rtl92ee_set_fw_rsvdpagepkt(hw, false); + + txbc_reg = rtl_read_byte(rtlpriv, REG_MGQ_TXBD_NUM + 3); + count = 0; + while ((txbc_reg & BIT(4)) && count < 20) { + count++; + udelay(10); + txbc_reg = rtl_read_byte(rtlpriv, REG_MGQ_TXBD_NUM + 3); + } + rtl_write_byte(rtlpriv, REG_MGQ_TXBD_NUM + 3, + txbc_reg | BIT(4)); + + /* check rsvd page download OK. */ + bcnvalid_reg = rtl_read_byte(rtlpriv, REG_DWBCN0_CTRL + 2); + count = 0; + while (!(bcnvalid_reg & BIT(0)) && count < 20) { + count++; + udelay(50); + bcnvalid_reg = rtl_read_byte(rtlpriv, + REG_DWBCN0_CTRL + 2); + } + + if (bcnvalid_reg & BIT(0)) + rtl_write_byte(rtlpriv, REG_DWBCN0_CTRL + 2, BIT(0)); + + dlbcn_count++; + } while (!(bcnvalid_reg & BIT(0)) && dlbcn_count < 5); + + if (!(bcnvalid_reg & BIT(0))) + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Download RSVD page failed!\n"); + + /* Enable Bcn */ + _rtl92ee_set_bcn_ctrl_reg(hw, BIT(3), 0); + _rtl92ee_set_bcn_ctrl_reg(hw, 0, BIT(4)); + + if (b_recover) + rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp_reg422); + + tmp_regcr = rtl_read_byte(rtlpriv, REG_CR + 1); + rtl_write_byte(rtlpriv, REG_CR + 1, tmp_regcr & (~BIT(0))); +} + +void rtl92ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_efuse *efuse = rtl_efuse(rtl_priv(hw)); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + u8 idx; + + switch (variable) { + case HW_VAR_ETHER_ADDR: + for (idx = 0; idx < ETH_ALEN; idx++) + rtl_write_byte(rtlpriv, (REG_MACID + idx), val[idx]); + break; + case HW_VAR_BASIC_RATE:{ + u16 b_rate_cfg = ((u16 *)val)[0]; + + b_rate_cfg = b_rate_cfg & 0x15f; + b_rate_cfg |= 0x01; + b_rate_cfg = (b_rate_cfg | 0xd) & (~BIT(1)); + rtl_write_byte(rtlpriv, REG_RRSR, b_rate_cfg & 0xff); + rtl_write_byte(rtlpriv, REG_RRSR + 1, (b_rate_cfg >> 8) & 0xff); + break; } + case HW_VAR_BSSID: + for (idx = 0; idx < ETH_ALEN; idx++) + rtl_write_byte(rtlpriv, (REG_BSSID + idx), val[idx]); + break; + case HW_VAR_SIFS: + rtl_write_byte(rtlpriv, REG_SIFS_CTX + 1, val[0]); + rtl_write_byte(rtlpriv, REG_SIFS_TRX + 1, val[1]); + + rtl_write_byte(rtlpriv, REG_SPEC_SIFS + 1, val[0]); + rtl_write_byte(rtlpriv, REG_MAC_SPEC_SIFS + 1, val[0]); + + if (!mac->ht_enable) + rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM, 0x0e0e); + else + rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM, + *((u16 *)val)); + break; + case HW_VAR_SLOT_TIME:{ + u8 e_aci; + + RT_TRACE(rtlpriv, COMP_MLME, DBG_TRACE, + "HW_VAR_SLOT_TIME %x\n", val[0]); + + rtl_write_byte(rtlpriv, REG_SLOT, val[0]); + + for (e_aci = 0; e_aci < AC_MAX; e_aci++) { + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM, + (u8 *)(&e_aci)); + } + break; } + case HW_VAR_ACK_PREAMBLE:{ + u8 reg_tmp; + u8 short_preamble = (bool)(*(u8 *)val); + + reg_tmp = (rtlpriv->mac80211.cur_40_prime_sc) << 5; + if (short_preamble) + reg_tmp |= 0x80; + rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_tmp); + rtlpriv->mac80211.short_preamble = short_preamble; + } + break; + case HW_VAR_WPA_CONFIG: + rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *)val)); + break; + case HW_VAR_AMPDU_FACTOR:{ + u8 regtoset_normal[4] = { 0x41, 0xa8, 0x72, 0xb9 }; + u8 fac; + u8 *reg = NULL; + u8 i = 0; + + reg = regtoset_normal; + + fac = *((u8 *)val); + if (fac <= 3) { + fac = (1 << (fac + 2)); + if (fac > 0xf) + fac = 0xf; + for (i = 0; i < 4; i++) { + if ((reg[i] & 0xf0) > (fac << 4)) + reg[i] = (reg[i] & 0x0f) | + (fac << 4); + if ((reg[i] & 0x0f) > fac) + reg[i] = (reg[i] & 0xf0) | fac; + rtl_write_byte(rtlpriv, + (REG_AGGLEN_LMT + i), + reg[i]); + } + RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, + "Set HW_VAR_AMPDU_FACTOR:%#x\n", fac); + } + } + break; + case HW_VAR_AC_PARAM:{ + u8 e_aci = *((u8 *)val); + + if (rtlpci->acm_method != EACMWAY2_SW) + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACM_CTRL, + (u8 *)(&e_aci)); + } + break; + case HW_VAR_ACM_CTRL:{ + u8 e_aci = *((u8 *)val); + union aci_aifsn *aifs = (union aci_aifsn *)(&mac->ac[0].aifs); + + u8 acm = aifs->f.acm; + u8 acm_ctrl = rtl_read_byte(rtlpriv, REG_ACMHWCTRL); + + acm_ctrl = acm_ctrl | ((rtlpci->acm_method == 2) ? 0x0 : 0x1); + + if (acm) { + switch (e_aci) { + case AC0_BE: + acm_ctrl |= ACMHW_BEQEN; + break; + case AC2_VI: + acm_ctrl |= ACMHW_VIQEN; + break; + case AC3_VO: + acm_ctrl |= ACMHW_VOQEN; + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n", + acm); + break; + } + } else { + switch (e_aci) { + case AC0_BE: + acm_ctrl &= (~ACMHW_BEQEN); + break; + case AC2_VI: + acm_ctrl &= (~ACMHW_VIQEN); + break; + case AC3_VO: + acm_ctrl &= (~ACMHW_BEQEN); + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "switch case not process\n"); + break; + } + } + + RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE, + "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n", + acm_ctrl); + rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl); + } + break; + case HW_VAR_RCR:{ + rtl_write_dword(rtlpriv, REG_RCR, ((u32 *)(val))[0]); + rtlpci->receive_config = ((u32 *)(val))[0]; + } + break; + case HW_VAR_RETRY_LIMIT:{ + u8 retry_limit = ((u8 *)(val))[0]; + + rtl_write_word(rtlpriv, REG_RETRY_LIMIT, + retry_limit << RETRY_LIMIT_SHORT_SHIFT | + retry_limit << RETRY_LIMIT_LONG_SHIFT); + } + break; + case HW_VAR_DUAL_TSF_RST: + rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, (BIT(0) | BIT(1))); + break; + case HW_VAR_EFUSE_BYTES: + efuse->efuse_usedbytes = *((u16 *)val); + break; + case HW_VAR_EFUSE_USAGE: + efuse->efuse_usedpercentage = *((u8 *)val); + break; + case HW_VAR_IO_CMD: + rtl92ee_phy_set_io_cmd(hw, (*(enum io_type *)val)); + break; + case HW_VAR_SET_RPWM:{ + u8 rpwm_val; + + rpwm_val = rtl_read_byte(rtlpriv, REG_PCIE_HRPWM); + udelay(1); + + if (rpwm_val & BIT(7)) { + rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, (*(u8 *)val)); + } else { + rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, + ((*(u8 *)val) | BIT(7))); + } + } + break; + case HW_VAR_H2C_FW_PWRMODE: + rtl92ee_set_fw_pwrmode_cmd(hw, (*(u8 *)val)); + break; + case HW_VAR_FW_PSMODE_STATUS: + ppsc->fw_current_inpsmode = *((bool *)val); + break; + case HW_VAR_RESUME_CLK_ON: + _rtl92ee_set_fw_ps_rf_on(hw); + break; + case HW_VAR_FW_LPS_ACTION:{ + bool b_enter_fwlps = *((bool *)val); + + if (b_enter_fwlps) + _rtl92ee_fwlps_enter(hw); + else + _rtl92ee_fwlps_leave(hw); + } + break; + case HW_VAR_H2C_FW_JOINBSSRPT:{ + u8 mstatus = (*(u8 *)val); + + if (mstatus == RT_MEDIA_CONNECT) { + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AID, NULL); + _rtl92ee_download_rsvd_page(hw); + } + rtl92ee_set_fw_media_status_rpt_cmd(hw, mstatus); + } + break; + case HW_VAR_H2C_FW_P2P_PS_OFFLOAD: + rtl92ee_set_p2p_ps_offload_cmd(hw, (*(u8 *)val)); + break; + case HW_VAR_AID:{ + u16 u2btmp; + + u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT); + u2btmp &= 0xC000; + rtl_write_word(rtlpriv, REG_BCN_PSR_RPT, + (u2btmp | mac->assoc_id)); + } + break; + case HW_VAR_CORRECT_TSF:{ + u8 btype_ibss = ((u8 *)(val))[0]; + + if (btype_ibss) + _rtl92ee_stop_tx_beacon(hw); + + _rtl92ee_set_bcn_ctrl_reg(hw, 0, BIT(3)); + + rtl_write_dword(rtlpriv, REG_TSFTR, + (u32)(mac->tsf & 0xffffffff)); + rtl_write_dword(rtlpriv, REG_TSFTR + 4, + (u32)((mac->tsf >> 32) & 0xffffffff)); + + _rtl92ee_set_bcn_ctrl_reg(hw, BIT(3), 0); + + if (btype_ibss) + _rtl92ee_resume_tx_beacon(hw); + } + break; + case HW_VAR_KEEP_ALIVE: { + u8 array[2]; + + array[0] = 0xff; + array[1] = *((u8 *)val); + rtl92ee_fill_h2c_cmd(hw, H2C_92E_KEEP_ALIVE_CTRL, 2, array); + } + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "switch case not process %x\n", variable); + break; + } +} + +static bool _rtl92ee_llt_table_init(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 txpktbuf_bndy; + u8 u8tmp, testcnt = 0; + + txpktbuf_bndy = 0xFA; + + rtl_write_dword(rtlpriv, REG_RQPN, 0x80E90808); + + rtl_write_byte(rtlpriv, REG_TRXFF_BNDY, txpktbuf_bndy); + rtl_write_word(rtlpriv, REG_TRXFF_BNDY + 2, 0x3d00 - 1); + + rtl_write_byte(rtlpriv, REG_DWBCN0_CTRL + 1, txpktbuf_bndy); + rtl_write_byte(rtlpriv, REG_DWBCN1_CTRL + 1, txpktbuf_bndy); + + rtl_write_byte(rtlpriv, REG_BCNQ_BDNY, txpktbuf_bndy); + rtl_write_byte(rtlpriv, REG_BCNQ1_BDNY, txpktbuf_bndy); + + rtl_write_byte(rtlpriv, REG_MGQ_BDNY, txpktbuf_bndy); + rtl_write_byte(rtlpriv, 0x45D, txpktbuf_bndy); + + rtl_write_byte(rtlpriv, REG_PBP, 0x31); + rtl_write_byte(rtlpriv, REG_RX_DRVINFO_SZ, 0x4); + + u8tmp = rtl_read_byte(rtlpriv, REG_AUTO_LLT + 2); + rtl_write_byte(rtlpriv, REG_AUTO_LLT + 2, u8tmp | BIT(0)); + + while (u8tmp & BIT(0)) { + u8tmp = rtl_read_byte(rtlpriv, REG_AUTO_LLT + 2); + udelay(10); + testcnt++; + if (testcnt > 10) + break; + } + + return true; +} + +static void _rtl92ee_gen_refresh_led_state(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_led *pled0 = &pcipriv->ledctl.sw_led0; + + if (rtlpriv->rtlhal.up_first_time) + return; + + if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS) + rtl92ee_sw_led_on(hw, pled0); + else if (ppsc->rfoff_reason == RF_CHANGE_BY_INIT) + rtl92ee_sw_led_on(hw, pled0); + else + rtl92ee_sw_led_off(hw, pled0); +} + +static bool _rtl92ee_init_mac(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + + u8 bytetmp; + u16 wordtmp; + u32 dwordtmp; + + rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0); + + dwordtmp = rtl_read_dword(rtlpriv, REG_SYS_CFG1); + if (dwordtmp & BIT(24)) { + rtl_write_byte(rtlpriv, 0x7c, 0xc3); + } else { + bytetmp = rtl_read_byte(rtlpriv, 0x16); + rtl_write_byte(rtlpriv, 0x16, bytetmp | BIT(4) | BIT(6)); + rtl_write_byte(rtlpriv, 0x7c, 0x83); + } + /* 1. 40Mhz crystal source*/ + bytetmp = rtl_read_byte(rtlpriv, REG_AFE_CTRL2); + bytetmp &= 0xfb; + rtl_write_byte(rtlpriv, REG_AFE_CTRL2, bytetmp); + + dwordtmp = rtl_read_dword(rtlpriv, REG_AFE_CTRL4); + dwordtmp &= 0xfffffc7f; + rtl_write_dword(rtlpriv, REG_AFE_CTRL4, dwordtmp); + + /* 2. 92E AFE parameter + * MP chip then check version + */ + bytetmp = rtl_read_byte(rtlpriv, REG_AFE_CTRL2); + bytetmp &= 0xbf; + rtl_write_byte(rtlpriv, REG_AFE_CTRL2, bytetmp); + + dwordtmp = rtl_read_dword(rtlpriv, REG_AFE_CTRL4); + dwordtmp &= 0xffdfffff; + rtl_write_dword(rtlpriv, REG_AFE_CTRL4, dwordtmp); + + /* HW Power on sequence */ + if (!rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, + PWR_INTF_PCI_MSK, + RTL8192E_NIC_ENABLE_FLOW)) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "init MAC Fail as rtl_hal_pwrseqcmdparsing\n"); + return false; + } + + /* Release MAC IO register reset */ + bytetmp = rtl_read_byte(rtlpriv, REG_CR); + bytetmp = 0xff; + rtl_write_byte(rtlpriv, REG_CR, bytetmp); + mdelay(2); + bytetmp = 0x7f; + rtl_write_byte(rtlpriv, REG_HWSEQ_CTRL, bytetmp); + mdelay(2); + + /* Add for wakeup online */ + bytetmp = rtl_read_byte(rtlpriv, REG_SYS_CLKR); + rtl_write_byte(rtlpriv, REG_SYS_CLKR, bytetmp | BIT(3)); + bytetmp = rtl_read_byte(rtlpriv, REG_GPIO_MUXCFG + 1); + rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG + 1, bytetmp & (~BIT(4))); + /* Release MAC IO register reset */ + rtl_write_word(rtlpriv, REG_CR, 0x2ff); + + if (!rtlhal->mac_func_enable) { + if (_rtl92ee_llt_table_init(hw) == false) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "LLT table init fail\n"); + return false; + } + } + + rtl_write_dword(rtlpriv, REG_HISR, 0xffffffff); + rtl_write_dword(rtlpriv, REG_HISRE, 0xffffffff); + + wordtmp = rtl_read_word(rtlpriv, REG_TRXDMA_CTRL); + wordtmp &= 0xf; + wordtmp |= 0xF5B1; + rtl_write_word(rtlpriv, REG_TRXDMA_CTRL, wordtmp); + /* Reported Tx status from HW for rate adaptive.*/ + rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 1, 0x1F); + + /* Set RCR register */ + rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config); + rtl_write_word(rtlpriv, REG_RXFLTMAP2, 0xffff); + + /* Set TCR register */ + rtl_write_dword(rtlpriv, REG_TCR, rtlpci->transmit_config); + + /* Set TX/RX descriptor physical address(from OS API). */ + rtl_write_dword(rtlpriv, REG_BCNQ_DESA, + ((u64)rtlpci->tx_ring[BEACON_QUEUE].buffer_desc_dma) & + DMA_BIT_MASK(32)); + rtl_write_dword(rtlpriv, REG_MGQ_DESA, + (u64)rtlpci->tx_ring[MGNT_QUEUE].buffer_desc_dma & + DMA_BIT_MASK(32)); + rtl_write_dword(rtlpriv, REG_VOQ_DESA, + (u64)rtlpci->tx_ring[VO_QUEUE].buffer_desc_dma & + DMA_BIT_MASK(32)); + rtl_write_dword(rtlpriv, REG_VIQ_DESA, + (u64)rtlpci->tx_ring[VI_QUEUE].buffer_desc_dma & + DMA_BIT_MASK(32)); + + rtl_write_dword(rtlpriv, REG_BEQ_DESA, + (u64)rtlpci->tx_ring[BE_QUEUE].buffer_desc_dma & + DMA_BIT_MASK(32)); + + dwordtmp = rtl_read_dword(rtlpriv, REG_BEQ_DESA); + + rtl_write_dword(rtlpriv, REG_BKQ_DESA, + (u64)rtlpci->tx_ring[BK_QUEUE].buffer_desc_dma & + DMA_BIT_MASK(32)); + rtl_write_dword(rtlpriv, REG_HQ0_DESA, + (u64)rtlpci->tx_ring[HIGH_QUEUE].buffer_desc_dma & + DMA_BIT_MASK(32)); + + rtl_write_dword(rtlpriv, REG_RX_DESA, + (u64)rtlpci->rx_ring[RX_MPDU_QUEUE].dma & + DMA_BIT_MASK(32)); + + /* if we want to support 64 bit DMA, we should set it here, + * but now we do not support 64 bit DMA + */ + + rtl_write_dword(rtlpriv, REG_TSFTIMER_HCI, 0x3fffffff); + + bytetmp = rtl_read_byte(rtlpriv, REG_PCIE_CTRL_REG + 3); + rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 3, bytetmp | 0xF7); + + rtl_write_dword(rtlpriv, REG_INT_MIG, 0); + + rtl_write_dword(rtlpriv, REG_MCUTST_1, 0x0); + + rtl_write_word(rtlpriv, REG_MGQ_TXBD_NUM, + TX_DESC_NUM_92E | ((RTL8192EE_SEG_NUM << 12) & 0x3000)); + rtl_write_word(rtlpriv, REG_VOQ_TXBD_NUM, + TX_DESC_NUM_92E | ((RTL8192EE_SEG_NUM << 12) & 0x3000)); + rtl_write_word(rtlpriv, REG_VIQ_TXBD_NUM, + TX_DESC_NUM_92E | ((RTL8192EE_SEG_NUM << 12) & 0x3000)); + rtl_write_word(rtlpriv, REG_BEQ_TXBD_NUM, + TX_DESC_NUM_92E | ((RTL8192EE_SEG_NUM << 12) & 0x3000)); + rtl_write_word(rtlpriv, REG_VOQ_TXBD_NUM, + TX_DESC_NUM_92E | ((RTL8192EE_SEG_NUM << 12) & 0x3000)); + rtl_write_word(rtlpriv, REG_BKQ_TXBD_NUM, + TX_DESC_NUM_92E | ((RTL8192EE_SEG_NUM << 12) & 0x3000)); + rtl_write_word(rtlpriv, REG_HI0Q_TXBD_NUM, + TX_DESC_NUM_92E | ((RTL8192EE_SEG_NUM << 12) & 0x3000)); + rtl_write_word(rtlpriv, REG_HI1Q_TXBD_NUM, + TX_DESC_NUM_92E | ((RTL8192EE_SEG_NUM << 12) & 0x3000)); + rtl_write_word(rtlpriv, REG_HI2Q_TXBD_NUM, + TX_DESC_NUM_92E | ((RTL8192EE_SEG_NUM << 12) & 0x3000)); + rtl_write_word(rtlpriv, REG_HI3Q_TXBD_NUM, + TX_DESC_NUM_92E | ((RTL8192EE_SEG_NUM << 12) & 0x3000)); + rtl_write_word(rtlpriv, REG_HI4Q_TXBD_NUM, + TX_DESC_NUM_92E | ((RTL8192EE_SEG_NUM << 12) & 0x3000)); + rtl_write_word(rtlpriv, REG_HI5Q_TXBD_NUM, + TX_DESC_NUM_92E | ((RTL8192EE_SEG_NUM << 12) & 0x3000)); + rtl_write_word(rtlpriv, REG_HI6Q_TXBD_NUM, + TX_DESC_NUM_92E | ((RTL8192EE_SEG_NUM << 12) & 0x3000)); + rtl_write_word(rtlpriv, REG_HI7Q_TXBD_NUM, + TX_DESC_NUM_92E | ((RTL8192EE_SEG_NUM << 12) & 0x3000)); + /*Rx*/ +#if (DMA_IS_64BIT == 1) + rtl_write_word(rtlpriv, REG_RX_RXBD_NUM, + RX_DESC_NUM_92E | + ((RTL8192EE_SEG_NUM << 13) & 0x6000) | 0x8000); +#else + rtl_write_word(rtlpriv, REG_RX_RXBD_NUM, + RX_DESC_NUM_92E | + ((RTL8192EE_SEG_NUM << 13) & 0x6000) | 0x0000); +#endif + + rtl_write_dword(rtlpriv, REG_TSFTIMER_HCI, 0XFFFFFFFF); + + _rtl92ee_gen_refresh_led_state(hw); + return true; +} + +static void _rtl92ee_hw_configure(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + u32 reg_rrsr; + + reg_rrsr = RATE_ALL_CCK | RATE_ALL_OFDM_AG; + /* Init value for RRSR. */ + rtl_write_dword(rtlpriv, REG_RRSR, reg_rrsr); + + /* ARFB table 9 for 11ac 5G 2SS */ + rtl_write_dword(rtlpriv, REG_ARFR0, 0x00000010); + rtl_write_dword(rtlpriv, REG_ARFR0 + 4, 0x3e0ff000); + + /* ARFB table 10 for 11ac 5G 1SS */ + rtl_write_dword(rtlpriv, REG_ARFR1, 0x00000010); + rtl_write_dword(rtlpriv, REG_ARFR1 + 4, 0x000ff000); + + /* Set SLOT time */ + rtl_write_byte(rtlpriv, REG_SLOT, 0x09); + + /* CF-End setting. */ + rtl_write_word(rtlpriv, REG_FWHW_TXQ_CTRL, 0x1F80); + + /* Set retry limit */ + rtl_write_word(rtlpriv, REG_RETRY_LIMIT, 0x0707); + + /* BAR settings */ + rtl_write_dword(rtlpriv, REG_BAR_MODE_CTRL, 0x0201ffff); + + /* Set Data / Response auto rate fallack retry count */ + rtl_write_dword(rtlpriv, REG_DARFRC, 0x01000000); + rtl_write_dword(rtlpriv, REG_DARFRC + 4, 0x07060504); + rtl_write_dword(rtlpriv, REG_RARFRC, 0x01000000); + rtl_write_dword(rtlpriv, REG_RARFRC + 4, 0x07060504); + + /* Beacon related, for rate adaptive */ + rtl_write_byte(rtlpriv, REG_ATIMWND, 0x2); + rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0xff); + + rtlpci->reg_bcn_ctrl_val = 0x1d; + rtl_write_byte(rtlpriv, REG_BCN_CTRL, rtlpci->reg_bcn_ctrl_val); + + /* Marked out by Bruce, 2010-09-09. + * This register is configured for the 2nd Beacon (multiple BSSID). + * We shall disable this register if we only support 1 BSSID. + * vivi guess 92d also need this, also 92d now doesnot set this reg + */ + rtl_write_byte(rtlpriv, REG_BCN_CTRL_1, 0); + + /* TBTT prohibit hold time. Suggested by designer TimChen. */ + rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff); /* 8 ms */ + + rtl_write_byte(rtlpriv, REG_PIFS, 0); + rtl_write_byte(rtlpriv, REG_AGGR_BREAK_TIME, 0x16); + + rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0040); + rtl_write_word(rtlpriv, REG_PROT_MODE_CTRL, 0x08ff); + + /* For Rx TP. Suggested by SD1 Richard. Added by tynli. 2010.04.12.*/ + rtl_write_dword(rtlpriv, REG_FAST_EDCA_CTRL, 0x03086666); + + /* ACKTO for IOT issue. */ + rtl_write_byte(rtlpriv, REG_ACKTO, 0x40); + + /* Set Spec SIFS (used in NAV) */ + rtl_write_word(rtlpriv, REG_SPEC_SIFS, 0x100a); + rtl_write_word(rtlpriv, REG_MAC_SPEC_SIFS, 0x100a); + + /* Set SIFS for CCK */ + rtl_write_word(rtlpriv, REG_SIFS_CTX, 0x100a); + + /* Set SIFS for OFDM */ + rtl_write_word(rtlpriv, REG_SIFS_TRX, 0x100a); + + /* Note Data sheet don't define */ + rtl_write_word(rtlpriv, 0x4C7, 0x80); + + rtl_write_byte(rtlpriv, REG_RX_PKT_LIMIT, 0x20); + + rtl_write_word(rtlpriv, REG_MAX_AGGR_NUM, 0x1717); + + /* Set Multicast Address. 2009.01.07. by tynli. */ + rtl_write_dword(rtlpriv, REG_MAR, 0xffffffff); + rtl_write_dword(rtlpriv, REG_MAR + 4, 0xffffffff); +} + +static void _rtl92ee_enable_aspm_back_door(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + u32 tmp32 = 0, count = 0; + u8 tmp8 = 0; + + rtl_write_word(rtlpriv, REG_BACKDOOR_DBI_DATA, 0x78); + rtl_write_byte(rtlpriv, REG_BACKDOOR_DBI_DATA + 2, 0x2); + tmp8 = rtl_read_byte(rtlpriv, REG_BACKDOOR_DBI_DATA + 2); + count = 0; + while (tmp8 && count < 20) { + udelay(10); + tmp8 = rtl_read_byte(rtlpriv, REG_BACKDOOR_DBI_DATA + 2); + count++; + } + + if (0 == tmp8) { + tmp32 = rtl_read_dword(rtlpriv, REG_BACKDOOR_DBI_RDATA); + if ((tmp32 & 0xff00) != 0x2000) { + tmp32 &= 0xffff00ff; + rtl_write_dword(rtlpriv, REG_BACKDOOR_DBI_WDATA, + tmp32 | BIT(13)); + rtl_write_word(rtlpriv, REG_BACKDOOR_DBI_DATA, 0xf078); + rtl_write_byte(rtlpriv, REG_BACKDOOR_DBI_DATA + 2, 0x1); + + tmp8 = rtl_read_byte(rtlpriv, + REG_BACKDOOR_DBI_DATA + 2); + count = 0; + while (tmp8 && count < 20) { + udelay(10); + tmp8 = rtl_read_byte(rtlpriv, + REG_BACKDOOR_DBI_DATA + 2); + count++; + } + } + } + + rtl_write_word(rtlpriv, REG_BACKDOOR_DBI_DATA, 0x70c); + rtl_write_byte(rtlpriv, REG_BACKDOOR_DBI_DATA + 2, 0x2); + tmp8 = rtl_read_byte(rtlpriv, REG_BACKDOOR_DBI_DATA + 2); + count = 0; + while (tmp8 && count < 20) { + udelay(10); + tmp8 = rtl_read_byte(rtlpriv, REG_BACKDOOR_DBI_DATA + 2); + count++; + } + if (0 == tmp8) { + tmp32 = rtl_read_dword(rtlpriv, REG_BACKDOOR_DBI_RDATA); + rtl_write_dword(rtlpriv, REG_BACKDOOR_DBI_WDATA, + tmp32 | BIT(31)); + rtl_write_word(rtlpriv, REG_BACKDOOR_DBI_DATA, 0xf70c); + rtl_write_byte(rtlpriv, REG_BACKDOOR_DBI_DATA + 2, 0x1); + } + + tmp8 = rtl_read_byte(rtlpriv, REG_BACKDOOR_DBI_DATA + 2); + count = 0; + while (tmp8 && count < 20) { + udelay(10); + tmp8 = rtl_read_byte(rtlpriv, REG_BACKDOOR_DBI_DATA + 2); + count++; + } + + rtl_write_word(rtlpriv, REG_BACKDOOR_DBI_DATA, 0x718); + rtl_write_byte(rtlpriv, REG_BACKDOOR_DBI_DATA + 2, 0x2); + tmp8 = rtl_read_byte(rtlpriv, REG_BACKDOOR_DBI_DATA + 2); + count = 0; + while (tmp8 && count < 20) { + udelay(10); + tmp8 = rtl_read_byte(rtlpriv, REG_BACKDOOR_DBI_DATA + 2); + count++; + } + if (ppsc->support_backdoor || (0 == tmp8)) { + tmp32 = rtl_read_dword(rtlpriv, REG_BACKDOOR_DBI_RDATA); + rtl_write_dword(rtlpriv, REG_BACKDOOR_DBI_WDATA, + tmp32 | BIT(11) | BIT(12)); + rtl_write_word(rtlpriv, REG_BACKDOOR_DBI_DATA, 0xf718); + rtl_write_byte(rtlpriv, REG_BACKDOOR_DBI_DATA + 2, 0x1); + } + tmp8 = rtl_read_byte(rtlpriv, REG_BACKDOOR_DBI_DATA + 2); + count = 0; + while (tmp8 && count < 20) { + udelay(10); + tmp8 = rtl_read_byte(rtlpriv, REG_BACKDOOR_DBI_DATA + 2); + count++; + } +} + +void rtl92ee_enable_hw_security_config(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 sec_reg_value; + u8 tmp; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n", + rtlpriv->sec.pairwise_enc_algorithm, + rtlpriv->sec.group_enc_algorithm); + + if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) { + RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, + "not open hw encryption\n"); + return; + } + + sec_reg_value = SCR_TXENCENABLE | SCR_RXDECENABLE; + + if (rtlpriv->sec.use_defaultkey) { + sec_reg_value |= SCR_TXUSEDK; + sec_reg_value |= SCR_RXUSEDK; + } + + sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK); + + tmp = rtl_read_byte(rtlpriv, REG_CR + 1); + rtl_write_byte(rtlpriv, REG_CR + 1, tmp | BIT(1)); + + RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, + "The SECR-value %x\n", sec_reg_value); + + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value); +} + +int rtl92ee_hw_init(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + bool rtstatus = true; + int err = 0; + u8 tmp_u1b, u1byte; + u32 tmp_u4b; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, " Rtl8192EE hw init\n"); + rtlpriv->rtlhal.being_init_adapter = true; + rtlpriv->intf_ops->disable_aspm(hw); + + tmp_u1b = rtl_read_byte(rtlpriv, REG_SYS_CLKR+1); + u1byte = rtl_read_byte(rtlpriv, REG_CR); + if ((tmp_u1b & BIT(3)) && (u1byte != 0 && u1byte != 0xEA)) { + rtlhal->mac_func_enable = true; + } else { + rtlhal->mac_func_enable = false; + rtlhal->fw_ps_state = FW_PS_STATE_ALL_ON_92E; + } + + rtstatus = _rtl92ee_init_mac(hw); + + rtl_write_byte(rtlpriv, 0x577, 0x03); + + /*for Crystal 40 Mhz setting */ + rtl_write_byte(rtlpriv, REG_AFE_CTRL4, 0x2A); + rtl_write_byte(rtlpriv, REG_AFE_CTRL4 + 1, 0x00); + rtl_write_byte(rtlpriv, REG_AFE_CTRL2, 0x83); + + /*Forced the antenna b to wifi */ + if (rtlpriv->btcoexist.btc_info.btcoexist == 1) { + rtl_write_byte(rtlpriv, 0x64, 0); + rtl_write_byte(rtlpriv, 0x65, 1); + } + if (!rtstatus) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n"); + err = 1; + return err; + } + rtlhal->rx_tag = 0; + rtl_write_word(rtlpriv, REG_PCIE_CTRL_REG, 0x8000); + err = rtl92ee_download_fw(hw, false); + if (err) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + "Failed to download FW. Init HW without FW now..\n"); + err = 1; + rtlhal->fw_ready = false; + return err; + } + rtlhal->fw_ready = true; + /*fw related variable initialize */ + ppsc->fw_current_inpsmode = false; + rtlhal->fw_ps_state = FW_PS_STATE_ALL_ON_92E; + rtlhal->fw_clk_change_in_progress = false; + rtlhal->allow_sw_to_change_hwclc = false; + rtlhal->last_hmeboxnum = 0; + + rtl92ee_phy_mac_config(hw); + + rtl92ee_phy_bb_config(hw); + + rtl92ee_phy_rf_config(hw); + + rtlphy->rfreg_chnlval[0] = rtl_get_rfreg(hw, RF90_PATH_A, + RF_CHNLBW, RFREG_OFFSET_MASK); + rtlphy->rfreg_chnlval[1] = rtl_get_rfreg(hw, RF90_PATH_B, + RF_CHNLBW, RFREG_OFFSET_MASK); + rtlphy->backup_rf_0x1a = (u32)rtl_get_rfreg(hw, RF90_PATH_A, RF_RX_G1, + RFREG_OFFSET_MASK); + rtlphy->rfreg_chnlval[0] = (rtlphy->rfreg_chnlval[0] & 0xfffff3ff) | + BIT(10) | BIT(11); + + rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK, + rtlphy->rfreg_chnlval[0]); + rtl_set_rfreg(hw, RF90_PATH_B, RF_CHNLBW, RFREG_OFFSET_MASK, + rtlphy->rfreg_chnlval[0]); + + /*---- Set CCK and OFDM Block "ON"----*/ + rtl_set_bbreg(hw, RFPGA0_RFMOD, BCCKEN, 0x1); + rtl_set_bbreg(hw, RFPGA0_RFMOD, BOFDMEN, 0x1); + + /* Must set this, + * otherwise the rx sensitivity will be very pool. Maddest + */ + rtl_set_rfreg(hw, RF90_PATH_A, 0xB1, RFREG_OFFSET_MASK, 0x54418); + + /*Set Hardware(MAC default setting.)*/ + _rtl92ee_hw_configure(hw); + + rtlhal->mac_func_enable = true; + + rtl_cam_reset_all_entry(hw); + rtl92ee_enable_hw_security_config(hw); + + ppsc->rfpwr_state = ERFON; + + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, mac->mac_addr); + _rtl92ee_enable_aspm_back_door(hw); + rtlpriv->intf_ops->enable_aspm(hw); + + rtl92ee_bt_hw_init(hw); + + rtlpriv->rtlhal.being_init_adapter = false; + + if (ppsc->rfpwr_state == ERFON) { + if (rtlphy->iqk_initialized) { + rtl92ee_phy_iq_calibrate(hw, true); + } else { + rtl92ee_phy_iq_calibrate(hw, false); + rtlphy->iqk_initialized = true; + } + } + + rtlphy->rfpath_rx_enable[0] = true; + if (rtlphy->rf_type == RF_2T2R) + rtlphy->rfpath_rx_enable[1] = true; + + efuse_one_byte_read(hw, 0x1FA, &tmp_u1b); + if (!(tmp_u1b & BIT(0))) { + rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0F, 0x05); + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "PA BIAS path A\n"); + } + + if ((!(tmp_u1b & BIT(1))) && (rtlphy->rf_type == RF_2T2R)) { + rtl_set_rfreg(hw, RF90_PATH_B, 0x15, 0x0F, 0x05); + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "PA BIAS path B\n"); + } + + rtl_write_byte(rtlpriv, REG_NAV_UPPER, ((30000 + 127) / 128)); + + /*Fixed LDPC rx hang issue. */ + tmp_u4b = rtl_read_dword(rtlpriv, REG_SYS_SWR_CTRL1); + rtl_write_byte(rtlpriv, REG_SYS_SWR_CTRL2, 0x75); + tmp_u4b = (tmp_u4b & 0xfff00fff) | (0x7E << 12); + rtl_write_dword(rtlpriv, REG_SYS_SWR_CTRL1, tmp_u4b); + + rtl92ee_dm_init(hw); + + rtl_write_dword(rtlpriv, 0x4fc, 0); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "end of Rtl8192EE hw init %x\n", err); + return 0; +} + +static enum version_8192e _rtl92ee_read_chip_version(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + enum version_8192e version = VERSION_UNKNOWN; + u32 value32; + + rtlphy->rf_type = RF_2T2R; + + value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG1); + if (value32 & TRP_VAUX_EN) + version = (enum version_8192e)VERSION_TEST_CHIP_2T2R_8192E; + else + version = (enum version_8192e)VERSION_NORMAL_CHIP_2T2R_8192E; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Chip RF Type: %s\n", (rtlphy->rf_type == RF_2T2R) ? + "RF_2T2R" : "RF_1T1R"); + + return version; +} + +static int _rtl92ee_set_media_status(struct ieee80211_hw *hw, + enum nl80211_iftype type) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 bt_msr = rtl_read_byte(rtlpriv, MSR) & 0xfc; + enum led_ctl_mode ledaction = LED_CTL_NO_LINK; + u8 mode = MSR_NOLINK; + + switch (type) { + case NL80211_IFTYPE_UNSPECIFIED: + mode = MSR_NOLINK; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "Set Network type to NO LINK!\n"); + break; + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_MESH_POINT: + mode = MSR_ADHOC; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "Set Network type to Ad Hoc!\n"); + break; + case NL80211_IFTYPE_STATION: + mode = MSR_INFRA; + ledaction = LED_CTL_LINK; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "Set Network type to STA!\n"); + break; + case NL80211_IFTYPE_AP: + mode = MSR_AP; + ledaction = LED_CTL_LINK; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "Set Network type to AP!\n"); + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "Network type %d not support!\n", type); + return 1; + } + + /* MSR_INFRA == Link in infrastructure network; + * MSR_ADHOC == Link in ad hoc network; + * Therefore, check link state is necessary. + * + * MSR_AP == AP mode; link state is not cared here. + */ + if (mode != MSR_AP && rtlpriv->mac80211.link_state < MAC80211_LINKED) { + mode = MSR_NOLINK; + ledaction = LED_CTL_NO_LINK; + } + + if (mode == MSR_NOLINK || mode == MSR_INFRA) { + _rtl92ee_stop_tx_beacon(hw); + _rtl92ee_enable_bcn_sub_func(hw); + } else if (mode == MSR_ADHOC || mode == MSR_AP) { + _rtl92ee_resume_tx_beacon(hw); + _rtl92ee_disable_bcn_sub_func(hw); + } else { + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + "Set HW_VAR_MEDIA_STATUS: No such media status(%x).\n", + mode); + } + + rtl_write_byte(rtlpriv, (MSR), bt_msr | mode); + rtlpriv->cfg->ops->led_control(hw, ledaction); + if (mode == MSR_AP) + rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00); + else + rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66); + return 0; +} + +void rtl92ee_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + u32 reg_rcr = rtlpci->receive_config; + + if (rtlpriv->psc.rfpwr_state != ERFON) + return; + + if (check_bssid) { + reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, + (u8 *)(®_rcr)); + _rtl92ee_set_bcn_ctrl_reg(hw, 0, BIT(4)); + } else { + reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN)); + _rtl92ee_set_bcn_ctrl_reg(hw, BIT(4), 0); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, + (u8 *)(®_rcr)); + } +} + +int rtl92ee_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (_rtl92ee_set_media_status(hw, type)) + return -EOPNOTSUPP; + + if (rtlpriv->mac80211.link_state == MAC80211_LINKED) { + if (type != NL80211_IFTYPE_AP && + type != NL80211_IFTYPE_MESH_POINT) + rtl92ee_set_check_bssid(hw, true); + } else { + rtl92ee_set_check_bssid(hw, false); + } + + return 0; +} + +/* don't set REG_EDCA_BE_PARAM here because mac80211 will send pkt when scan */ +void rtl92ee_set_qos(struct ieee80211_hw *hw, int aci) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtl92ee_dm_init_edca_turbo(hw); + switch (aci) { + case AC1_BK: + rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0xa44f); + break; + case AC0_BE: + /* rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, u4b_ac_param); */ + break; + case AC2_VI: + rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, 0x5e4322); + break; + case AC3_VO: + rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222); + break; + default: + RT_ASSERT(false, "invalid aci: %d !\n", aci); + break; + } +} + +static void rtl92ee_clear_interrupt(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 tmp; + + tmp = rtl_read_dword(rtlpriv, REG_HISR); + rtl_write_dword(rtlpriv, REG_HISR, tmp); + + tmp = rtl_read_dword(rtlpriv, REG_HISRE); + rtl_write_dword(rtlpriv, REG_HISRE, tmp); + + tmp = rtl_read_dword(rtlpriv, REG_HSISR); + rtl_write_dword(rtlpriv, REG_HSISR, tmp); +} + +void rtl92ee_enable_interrupt(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + + rtl92ee_clear_interrupt(hw);/*clear it here first*/ + + rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF); + rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF); + rtlpci->irq_enabled = true; +} + +void rtl92ee_disable_interrupt(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + + rtl_write_dword(rtlpriv, REG_HIMR, IMR_DISABLED); + rtl_write_dword(rtlpriv, REG_HIMRE, IMR_DISABLED); + rtlpci->irq_enabled = false; + /*synchronize_irq(rtlpci->pdev->irq);*/ +} + +static void _rtl92ee_poweroff_adapter(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 u1b_tmp; + + rtlhal->mac_func_enable = false; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "POWER OFF adapter\n"); + + /* Run LPS WL RFOFF flow */ + rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, + PWR_INTF_PCI_MSK, RTL8192E_NIC_LPS_ENTER_FLOW); + /* turn off RF */ + rtl_write_byte(rtlpriv, REG_RF_CTRL, 0x00); + + /* ==== Reset digital sequence ====== */ + if ((rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) && rtlhal->fw_ready) + rtl92ee_firmware_selfreset(hw); + + /* Reset MCU */ + u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, (u1b_tmp & (~BIT(2)))); + + /* reset MCU ready status */ + rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00); + + /* HW card disable configuration. */ + rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, + PWR_INTF_PCI_MSK, RTL8192E_NIC_DISABLE_FLOW); + + /* Reset MCU IO Wrapper */ + u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1); + rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, (u1b_tmp & (~BIT(0)))); + u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1); + rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, (u1b_tmp | BIT(0))); + + /* lock ISO/CLK/Power control register */ + rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0E); +} + +void rtl92ee_card_disable(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + enum nl80211_iftype opmode; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "RTL8192ee card disable\n"); + + RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); + + mac->link_state = MAC80211_NOLINK; + opmode = NL80211_IFTYPE_UNSPECIFIED; + + _rtl92ee_set_media_status(hw, opmode); + + if (rtlpriv->rtlhal.driver_is_goingto_unload || + ppsc->rfoff_reason > RF_CHANGE_BY_PS) + rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF); + + _rtl92ee_poweroff_adapter(hw); + + /* after power off we should do iqk again */ + rtlpriv->phy.iqk_initialized = false; +} + +void rtl92ee_interrupt_recognized(struct ieee80211_hw *hw, + u32 *p_inta, u32 *p_intb) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + + *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0]; + rtl_write_dword(rtlpriv, ISR, *p_inta); + + *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1]; + rtl_write_dword(rtlpriv, REG_HISRE, *p_intb); +} + +void rtl92ee_set_beacon_related_registers(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + u16 bcn_interval, atim_window; + + bcn_interval = mac->beacon_interval; + atim_window = 2; /*FIX MERGE */ + rtl92ee_disable_interrupt(hw); + rtl_write_word(rtlpriv, REG_ATIMWND, atim_window); + rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval); + rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660f); + rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_CCK, 0x18); + rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x18); + rtl_write_byte(rtlpriv, 0x606, 0x30); + rtlpci->reg_bcn_ctrl_val |= BIT(3); + rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8)rtlpci->reg_bcn_ctrl_val); +} + +void rtl92ee_set_beacon_interval(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + u16 bcn_interval = mac->beacon_interval; + + RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG, + "beacon_interval:%d\n", bcn_interval); + rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval); +} + +void rtl92ee_update_interrupt_mask(struct ieee80211_hw *hw, + u32 add_msr, u32 rm_msr) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + + RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD, + "add_msr:%x, rm_msr:%x\n", add_msr, rm_msr); + + if (add_msr) + rtlpci->irq_mask[0] |= add_msr; + if (rm_msr) + rtlpci->irq_mask[0] &= (~rm_msr); + rtl92ee_disable_interrupt(hw); + rtl92ee_enable_interrupt(hw); +} + +static u8 _rtl92ee_get_chnl_group(u8 chnl) +{ + u8 group = 0; + + if (chnl <= 14) { + if (1 <= chnl && chnl <= 2) + group = 0; + else if (3 <= chnl && chnl <= 5) + group = 1; + else if (6 <= chnl && chnl <= 8) + group = 2; + else if (9 <= chnl && chnl <= 11) + group = 3; + else if (12 <= chnl && chnl <= 14) + group = 4; + } else { + if (36 <= chnl && chnl <= 42) + group = 0; + else if (44 <= chnl && chnl <= 48) + group = 1; + else if (50 <= chnl && chnl <= 58) + group = 2; + else if (60 <= chnl && chnl <= 64) + group = 3; + else if (100 <= chnl && chnl <= 106) + group = 4; + else if (108 <= chnl && chnl <= 114) + group = 5; + else if (116 <= chnl && chnl <= 122) + group = 6; + else if (124 <= chnl && chnl <= 130) + group = 7; + else if (132 <= chnl && chnl <= 138) + group = 8; + else if (140 <= chnl && chnl <= 144) + group = 9; + else if (149 <= chnl && chnl <= 155) + group = 10; + else if (157 <= chnl && chnl <= 161) + group = 11; + else if (165 <= chnl && chnl <= 171) + group = 12; + else if (173 <= chnl && chnl <= 177) + group = 13; + } + return group; +} + +static void _rtl8192ee_read_power_value_fromprom(struct ieee80211_hw *hw, + struct txpower_info_2g *pwr2g, + struct txpower_info_5g *pwr5g, + bool autoload_fail, u8 *hwinfo) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 rf, addr = EEPROM_TX_PWR_INX, group, i = 0; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "hal_ReadPowerValueFromPROM92E(): PROMContent[0x%x]=0x%x\n", + (addr + 1), hwinfo[addr + 1]); + if (0xFF == hwinfo[addr+1]) /*YJ,add,120316*/ + autoload_fail = true; + + if (autoload_fail) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "auto load fail : Use Default value!\n"); + for (rf = 0 ; rf < MAX_RF_PATH ; rf++) { + /* 2.4G default value */ + for (group = 0 ; group < MAX_CHNL_GROUP_24G; group++) { + pwr2g->index_cck_base[rf][group] = 0x2D; + pwr2g->index_bw40_base[rf][group] = 0x2D; + } + for (i = 0; i < MAX_TX_COUNT; i++) { + if (i == 0) { + pwr2g->bw20_diff[rf][0] = 0x02; + pwr2g->ofdm_diff[rf][0] = 0x04; + } else { + pwr2g->bw20_diff[rf][i] = 0xFE; + pwr2g->bw40_diff[rf][i] = 0xFE; + pwr2g->cck_diff[rf][i] = 0xFE; + pwr2g->ofdm_diff[rf][i] = 0xFE; + } + } + + /*5G default value*/ + for (group = 0 ; group < MAX_CHNL_GROUP_5G; group++) + pwr5g->index_bw40_base[rf][group] = 0x2A; + + for (i = 0; i < MAX_TX_COUNT; i++) { + if (i == 0) { + pwr5g->ofdm_diff[rf][0] = 0x04; + pwr5g->bw20_diff[rf][0] = 0x00; + pwr5g->bw80_diff[rf][0] = 0xFE; + pwr5g->bw160_diff[rf][0] = 0xFE; + } else { + pwr5g->ofdm_diff[rf][0] = 0xFE; + pwr5g->bw20_diff[rf][0] = 0xFE; + pwr5g->bw40_diff[rf][0] = 0xFE; + pwr5g->bw80_diff[rf][0] = 0xFE; + pwr5g->bw160_diff[rf][0] = 0xFE; + } + } + } + return; + } + + rtl_priv(hw)->efuse.txpwr_fromeprom = true; + + for (rf = 0 ; rf < MAX_RF_PATH ; rf++) { + /*2.4G default value*/ + for (group = 0 ; group < MAX_CHNL_GROUP_24G; group++) { + pwr2g->index_cck_base[rf][group] = hwinfo[addr++]; + if (pwr2g->index_cck_base[rf][group] == 0xFF) + pwr2g->index_cck_base[rf][group] = 0x2D; + } + for (group = 0 ; group < MAX_CHNL_GROUP_24G - 1; group++) { + pwr2g->index_bw40_base[rf][group] = hwinfo[addr++]; + if (pwr2g->index_bw40_base[rf][group] == 0xFF) + pwr2g->index_bw40_base[rf][group] = 0x2D; + } + for (i = 0; i < MAX_TX_COUNT; i++) { + if (i == 0) { + pwr2g->bw40_diff[rf][i] = 0; + if (hwinfo[addr] == 0xFF) { + pwr2g->bw20_diff[rf][i] = 0x02; + } else { + pwr2g->bw20_diff[rf][i] = (hwinfo[addr] + & 0xf0) >> 4; + if (pwr2g->bw20_diff[rf][i] & BIT(3)) + pwr2g->bw20_diff[rf][i] |= 0xF0; + } + + if (hwinfo[addr] == 0xFF) { + pwr2g->ofdm_diff[rf][i] = 0x04; + } else { + pwr2g->ofdm_diff[rf][i] = (hwinfo[addr] + & 0x0f); + if (pwr2g->ofdm_diff[rf][i] & BIT(3)) + pwr2g->ofdm_diff[rf][i] |= 0xF0; + } + pwr2g->cck_diff[rf][i] = 0; + addr++; + } else { + if (hwinfo[addr] == 0xFF) { + pwr2g->bw40_diff[rf][i] = 0xFE; + } else { + pwr2g->bw40_diff[rf][i] = (hwinfo[addr] + & 0xf0) >> 4; + if (pwr2g->bw40_diff[rf][i] & BIT(3)) + pwr2g->bw40_diff[rf][i] |= 0xF0; + } + + if (hwinfo[addr] == 0xFF) { + pwr2g->bw20_diff[rf][i] = 0xFE; + } else { + pwr2g->bw20_diff[rf][i] = (hwinfo[addr] + & 0x0f); + if (pwr2g->bw20_diff[rf][i] & BIT(3)) + pwr2g->bw20_diff[rf][i] |= 0xF0; + } + addr++; + + if (hwinfo[addr] == 0xFF) { + pwr2g->ofdm_diff[rf][i] = 0xFE; + } else { + pwr2g->ofdm_diff[rf][i] = (hwinfo[addr] + & 0xf0) >> 4; + if (pwr2g->ofdm_diff[rf][i] & BIT(3)) + pwr2g->ofdm_diff[rf][i] |= 0xF0; + } + + if (hwinfo[addr] == 0xFF) { + pwr2g->cck_diff[rf][i] = 0xFE; + } else { + pwr2g->cck_diff[rf][i] = (hwinfo[addr] + & 0x0f); + if (pwr2g->cck_diff[rf][i] & BIT(3)) + pwr2g->cck_diff[rf][i] |= 0xF0; + } + addr++; + } + } + + /*5G default value*/ + for (group = 0 ; group < MAX_CHNL_GROUP_5G; group++) { + pwr5g->index_bw40_base[rf][group] = hwinfo[addr++]; + if (pwr5g->index_bw40_base[rf][group] == 0xFF) + pwr5g->index_bw40_base[rf][group] = 0xFE; + } + + for (i = 0; i < MAX_TX_COUNT; i++) { + if (i == 0) { + pwr5g->bw40_diff[rf][i] = 0; + + if (hwinfo[addr] == 0xFF) { + pwr5g->bw20_diff[rf][i] = 0; + } else { + pwr5g->bw20_diff[rf][0] = (hwinfo[addr] + & 0xf0) >> 4; + if (pwr5g->bw20_diff[rf][i] & BIT(3)) + pwr5g->bw20_diff[rf][i] |= 0xF0; + } + + if (hwinfo[addr] == 0xFF) { + pwr5g->ofdm_diff[rf][i] = 0x04; + } else { + pwr5g->ofdm_diff[rf][0] = (hwinfo[addr] + & 0x0f); + if (pwr5g->ofdm_diff[rf][i] & BIT(3)) + pwr5g->ofdm_diff[rf][i] |= 0xF0; + } + addr++; + } else { + if (hwinfo[addr] == 0xFF) { + pwr5g->bw40_diff[rf][i] = 0xFE; + } else { + pwr5g->bw40_diff[rf][i] = (hwinfo[addr] + & 0xf0) >> 4; + if (pwr5g->bw40_diff[rf][i] & BIT(3)) + pwr5g->bw40_diff[rf][i] |= 0xF0; + } + + if (hwinfo[addr] == 0xFF) { + pwr5g->bw20_diff[rf][i] = 0xFE; + } else { + pwr5g->bw20_diff[rf][i] = (hwinfo[addr] + & 0x0f); + if (pwr5g->bw20_diff[rf][i] & BIT(3)) + pwr5g->bw20_diff[rf][i] |= 0xF0; + } + addr++; + } + } + + if (hwinfo[addr] == 0xFF) { + pwr5g->ofdm_diff[rf][1] = 0xFE; + pwr5g->ofdm_diff[rf][2] = 0xFE; + } else { + pwr5g->ofdm_diff[rf][1] = (hwinfo[addr] & 0xf0) >> 4; + pwr5g->ofdm_diff[rf][2] = (hwinfo[addr] & 0x0f); + } + addr++; + + if (hwinfo[addr] == 0xFF) + pwr5g->ofdm_diff[rf][3] = 0xFE; + else + pwr5g->ofdm_diff[rf][3] = (hwinfo[addr] & 0x0f); + addr++; + + for (i = 1; i < MAX_TX_COUNT; i++) { + if (pwr5g->ofdm_diff[rf][i] == 0xFF) + pwr5g->ofdm_diff[rf][i] = 0xFE; + else if (pwr5g->ofdm_diff[rf][i] & BIT(3)) + pwr5g->ofdm_diff[rf][i] |= 0xF0; + } + + for (i = 0; i < MAX_TX_COUNT; i++) { + if (hwinfo[addr] == 0xFF) { + pwr5g->bw80_diff[rf][i] = 0xFE; + } else { + pwr5g->bw80_diff[rf][i] = (hwinfo[addr] & 0xf0) + >> 4; + if (pwr5g->bw80_diff[rf][i] & BIT(3)) + pwr5g->bw80_diff[rf][i] |= 0xF0; + } + + if (hwinfo[addr] == 0xFF) { + pwr5g->bw160_diff[rf][i] = 0xFE; + } else { + pwr5g->bw160_diff[rf][i] = + (hwinfo[addr] & 0x0f); + if (pwr5g->bw160_diff[rf][i] & BIT(3)) + pwr5g->bw160_diff[rf][i] |= 0xF0; + } + addr++; + } + } +} + +static void _rtl92ee_read_txpower_info_from_hwpg(struct ieee80211_hw *hw, + bool autoload_fail, u8 *hwinfo) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_efuse *efu = rtl_efuse(rtl_priv(hw)); + struct txpower_info_2g pwr2g; + struct txpower_info_5g pwr5g; + u8 channel5g[CHANNEL_MAX_NUMBER_5G] = { + 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, + 56, 58, 60, 62, 64, 100, 102, 104, 106, + 108, 110, 112, 114, 116, 118, 120, 122, + 124, 126, 128, 130, 132, 134, 136, 138, + 140, 142, 144, 149, 151, 153, 155, 157, + 159, 161, 163, 165, 167, 168, 169, 171, + 173, 175, 177 + }; + u8 channel5g_80m[CHANNEL_MAX_NUMBER_5G_80M] = { + 42, 58, 106, 122, 138, 155, 171 + }; + u8 rf, idx; + u8 i; + + _rtl8192ee_read_power_value_fromprom(hw, &pwr2g, &pwr5g, + autoload_fail, hwinfo); + + for (rf = 0; rf < MAX_RF_PATH; rf++) { + for (i = 0; i < 14; i++) { + idx = _rtl92ee_get_chnl_group(i + 1); + + if (i == CHANNEL_MAX_NUMBER_2G - 1) { + efu->txpwrlevel_cck[rf][i] = + pwr2g.index_cck_base[rf][5]; + efu->txpwrlevel_ht40_1s[rf][i] = + pwr2g.index_bw40_base[rf][idx]; + } else { + efu->txpwrlevel_cck[rf][i] = + pwr2g.index_cck_base[rf][idx]; + efu->txpwrlevel_ht40_1s[rf][i] = + pwr2g.index_bw40_base[rf][idx]; + } + } + for (i = 0; i < CHANNEL_MAX_NUMBER_5G; i++) { + idx = _rtl92ee_get_chnl_group(channel5g[i]); + efu->txpwr_5g_bw40base[rf][i] = + pwr5g.index_bw40_base[rf][idx]; + } + for (i = 0; i < CHANNEL_MAX_NUMBER_5G_80M; i++) { + u8 upper, lower; + + idx = _rtl92ee_get_chnl_group(channel5g_80m[i]); + upper = pwr5g.index_bw40_base[rf][idx]; + lower = pwr5g.index_bw40_base[rf][idx + 1]; + + efu->txpwr_5g_bw80base[rf][i] = (upper + lower) / 2; + } + for (i = 0; i < MAX_TX_COUNT; i++) { + efu->txpwr_cckdiff[rf][i] = pwr2g.cck_diff[rf][i]; + efu->txpwr_legacyhtdiff[rf][i] = pwr2g.ofdm_diff[rf][i]; + efu->txpwr_ht20diff[rf][i] = pwr2g.bw20_diff[rf][i]; + efu->txpwr_ht40diff[rf][i] = pwr2g.bw40_diff[rf][i]; + + efu->txpwr_5g_ofdmdiff[rf][i] = pwr5g.ofdm_diff[rf][i]; + efu->txpwr_5g_bw20diff[rf][i] = pwr5g.bw20_diff[rf][i]; + efu->txpwr_5g_bw40diff[rf][i] = pwr5g.bw40_diff[rf][i]; + efu->txpwr_5g_bw80diff[rf][i] = pwr5g.bw80_diff[rf][i]; + } + } + + if (!autoload_fail) + efu->eeprom_thermalmeter = hwinfo[EEPROM_THERMAL_METER_92E]; + else + efu->eeprom_thermalmeter = EEPROM_DEFAULT_THERMALMETER; + + if (efu->eeprom_thermalmeter == 0xff || autoload_fail) { + efu->apk_thermalmeterignore = true; + efu->eeprom_thermalmeter = EEPROM_DEFAULT_THERMALMETER; + } + + efu->thermalmeter[0] = efu->eeprom_thermalmeter; + RTPRINT(rtlpriv, FINIT, INIT_TXPOWER, + "thermalmeter = 0x%x\n", efu->eeprom_thermalmeter); + + if (!autoload_fail) { + efu->eeprom_regulatory = hwinfo[EEPROM_RF_BOARD_OPTION_92E] + & 0x07; + if (hwinfo[EEPROM_RF_BOARD_OPTION_92E] == 0xFF) + efu->eeprom_regulatory = 0; + } else { + efu->eeprom_regulatory = 0; + } + RTPRINT(rtlpriv, FINIT, INIT_TXPOWER, + "eeprom_regulatory = 0x%x\n", efu->eeprom_regulatory); +} + +static void _rtl92ee_read_adapter_info(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u16 i, usvalue; + u8 hwinfo[HWSET_MAX_SIZE]; + u16 eeprom_id; + + if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) { + rtl_efuse_shadow_map_update(hw); + + memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], + HWSET_MAX_SIZE); + } else if (rtlefuse->epromtype == EEPROM_93C46) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "RTL819X Not boot from eeprom, check it !!"); + return; + } else { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "boot from neither eeprom nor efuse, check it !!"); + return; + } + + RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP\n", + hwinfo, HWSET_MAX_SIZE); + + eeprom_id = *((u16 *)&hwinfo[0]); + if (eeprom_id != RTL8192E_EEPROM_ID) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + "EEPROM ID(%#x) is invalid!!\n", eeprom_id); + rtlefuse->autoload_failflag = true; + } else { + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n"); + rtlefuse->autoload_failflag = false; + } + + if (rtlefuse->autoload_failflag) + return; + /*VID DID SVID SDID*/ + rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID]; + rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID]; + rtlefuse->eeprom_svid = *(u16 *)&hwinfo[EEPROM_SVID]; + rtlefuse->eeprom_smid = *(u16 *)&hwinfo[EEPROM_SMID]; + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROMId = 0x%4x\n", eeprom_id); + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid); + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did); + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid); + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid); + /*customer ID*/ + rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID]; + if (rtlefuse->eeprom_oemid == 0xFF) + rtlefuse->eeprom_oemid = 0; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid); + /*EEPROM version*/ + rtlefuse->eeprom_version = *(u8 *)&hwinfo[EEPROM_VERSION]; + /*mac address*/ + for (i = 0; i < 6; i += 2) { + usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR + i]; + *((u16 *)(&rtlefuse->dev_addr[i])) = usvalue; + } + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "dev_addr: %pM\n", rtlefuse->dev_addr); + /*channel plan */ + rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN]; + /* set channel paln to world wide 13 */ + rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13; + /*tx power*/ + _rtl92ee_read_txpower_info_from_hwpg(hw, rtlefuse->autoload_failflag, + hwinfo); + + rtl92ee_read_bt_coexist_info_from_hwpg(hw, rtlefuse->autoload_failflag, + hwinfo); + + /*board type*/ + rtlefuse->board_type = (((*(u8 *)&hwinfo[EEPROM_RF_BOARD_OPTION_92E]) + & 0xE0) >> 5); + if ((*(u8 *)&hwinfo[EEPROM_RF_BOARD_OPTION_92E]) == 0xFF) + rtlefuse->board_type = 0; + + rtlhal->board_type = rtlefuse->board_type; + /*parse xtal*/ + rtlefuse->crystalcap = hwinfo[EEPROM_XTAL_92E]; + if (hwinfo[EEPROM_XTAL_92E] == 0xFF) + rtlefuse->crystalcap = 0x20; + + /*antenna diversity*/ + rtlefuse->antenna_div_type = NO_ANTDIV; + rtlefuse->antenna_div_cfg = 0; + + if (rtlhal->oem_id == RT_CID_DEFAULT) { + switch (rtlefuse->eeprom_oemid) { + case EEPROM_CID_DEFAULT: + if (rtlefuse->eeprom_did == 0x818B) { + if ((rtlefuse->eeprom_svid == 0x10EC) && + (rtlefuse->eeprom_smid == 0x001B)) + rtlhal->oem_id = RT_CID_819X_LENOVO; + } else { + rtlhal->oem_id = RT_CID_DEFAULT; + } + break; + default: + rtlhal->oem_id = RT_CID_DEFAULT; + break; + } + } +} + +static void _rtl92ee_hal_customized_behavior(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + + pcipriv->ledctl.led_opendrain = true; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "RT Customized ID: 0x%02X\n", rtlhal->oem_id); +} + +void rtl92ee_read_eeprom_info(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 tmp_u1b; + + rtlhal->version = _rtl92ee_read_chip_version(hw); + if (get_rf_type(rtlphy) == RF_1T1R) { + rtlpriv->dm.rfpath_rxenable[0] = true; + } else { + rtlpriv->dm.rfpath_rxenable[0] = true; + rtlpriv->dm.rfpath_rxenable[1] = true; + } + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n", + rtlhal->version); + tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR); + if (tmp_u1b & BIT(4)) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n"); + rtlefuse->epromtype = EEPROM_93C46; + } else { + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n"); + rtlefuse->epromtype = EEPROM_BOOT_EFUSE; + } + if (tmp_u1b & BIT(5)) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n"); + rtlefuse->autoload_failflag = false; + _rtl92ee_read_adapter_info(hw); + } else { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n"); + } + _rtl92ee_hal_customized_behavior(hw); + + rtlphy->rfpath_rx_enable[0] = true; + if (rtlphy->rf_type == RF_2T2R) + rtlphy->rfpath_rx_enable[1] = true; +} + +static u8 _rtl92ee_mrate_idx_to_arfr_id(struct ieee80211_hw *hw, u8 rate_index) +{ + u8 ret = 0; + + switch (rate_index) { + case RATR_INX_WIRELESS_NGB: + ret = 0; + break; + case RATR_INX_WIRELESS_N: + case RATR_INX_WIRELESS_NG: + ret = 4; + break; + case RATR_INX_WIRELESS_NB: + ret = 2; + break; + case RATR_INX_WIRELESS_GB: + ret = 6; + break; + case RATR_INX_WIRELESS_G: + ret = 7; + break; + case RATR_INX_WIRELESS_B: + ret = 8; + break; + default: + ret = 0; + break; + } + return ret; +} + +static void rtl92ee_update_hal_rate_mask(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + u8 rssi_level) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_sta_info *sta_entry = NULL; + u32 ratr_bitmap; + u8 ratr_index; + u8 curtxbw_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) + ? 1 : 0; + u8 b_curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? + 1 : 0; + u8 b_curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? + 1 : 0; + enum wireless_mode wirelessmode = 0; + bool b_shortgi = false; + u8 rate_mask[7] = {0}; + u8 macid = 0; + /*u8 mimo_ps = IEEE80211_SMPS_OFF;*/ + sta_entry = (struct rtl_sta_info *)sta->drv_priv; + wirelessmode = sta_entry->wireless_mode; + if (mac->opmode == NL80211_IFTYPE_STATION || + mac->opmode == NL80211_IFTYPE_MESH_POINT) + curtxbw_40mhz = mac->bw_40; + else if (mac->opmode == NL80211_IFTYPE_AP || + mac->opmode == NL80211_IFTYPE_ADHOC) + macid = sta->aid + 1; + + ratr_bitmap = sta->supp_rates[0]; + if (mac->opmode == NL80211_IFTYPE_ADHOC) + ratr_bitmap = 0xfff; + + ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 | + sta->ht_cap.mcs.rx_mask[0] << 12); + + switch (wirelessmode) { + case WIRELESS_MODE_B: + ratr_index = RATR_INX_WIRELESS_B; + if (ratr_bitmap & 0x0000000c) + ratr_bitmap &= 0x0000000d; + else + ratr_bitmap &= 0x0000000f; + break; + case WIRELESS_MODE_G: + ratr_index = RATR_INX_WIRELESS_GB; + + if (rssi_level == 1) + ratr_bitmap &= 0x00000f00; + else if (rssi_level == 2) + ratr_bitmap &= 0x00000ff0; + else + ratr_bitmap &= 0x00000ff5; + break; + case WIRELESS_MODE_N_24G: + if (curtxbw_40mhz) + ratr_index = RATR_INX_WIRELESS_NGB; + else + ratr_index = RATR_INX_WIRELESS_NB; + + if (rtlphy->rf_type == RF_1T1R) { + if (curtxbw_40mhz) { + if (rssi_level == 1) + ratr_bitmap &= 0x000f0000; + else if (rssi_level == 2) + ratr_bitmap &= 0x000ff000; + else + ratr_bitmap &= 0x000ff015; + } else { + if (rssi_level == 1) + ratr_bitmap &= 0x000f0000; + else if (rssi_level == 2) + ratr_bitmap &= 0x000ff000; + else + ratr_bitmap &= 0x000ff005; + } + } else { + if (curtxbw_40mhz) { + if (rssi_level == 1) + ratr_bitmap &= 0x0f8f0000; + else if (rssi_level == 2) + ratr_bitmap &= 0x0ffff000; + else + ratr_bitmap &= 0x0ffff015; + } else { + if (rssi_level == 1) + ratr_bitmap &= 0x0f8f0000; + else if (rssi_level == 2) + ratr_bitmap &= 0x0ffff000; + else + ratr_bitmap &= 0x0ffff005; + } + } + + if ((curtxbw_40mhz && b_curshortgi_40mhz) || + (!curtxbw_40mhz && b_curshortgi_20mhz)) { + if (macid == 0) + b_shortgi = true; + else if (macid == 1) + b_shortgi = false; + } + break; + default: + ratr_index = RATR_INX_WIRELESS_NGB; + + if (rtlphy->rf_type == RF_1T1R) + ratr_bitmap &= 0x000ff0ff; + else + ratr_bitmap &= 0x0f8ff0ff; + break; + } + ratr_index = _rtl92ee_mrate_idx_to_arfr_id(hw, ratr_index); + sta_entry->ratr_index = ratr_index; + + RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, + "ratr_bitmap :%x\n", ratr_bitmap); + *(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) | + (ratr_index << 28); + rate_mask[0] = macid; + rate_mask[1] = ratr_index | (b_shortgi ? 0x80 : 0x00); + rate_mask[2] = curtxbw_40mhz; + rate_mask[3] = (u8)(ratr_bitmap & 0x000000ff); + rate_mask[4] = (u8)((ratr_bitmap & 0x0000ff00) >> 8); + rate_mask[5] = (u8)((ratr_bitmap & 0x00ff0000) >> 16); + rate_mask[6] = (u8)((ratr_bitmap & 0xff000000) >> 24); + RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, + "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x:%x:%x\n", + ratr_index, ratr_bitmap, rate_mask[0], rate_mask[1], + rate_mask[2], rate_mask[3], rate_mask[4], + rate_mask[5], rate_mask[6]); + rtl92ee_fill_h2c_cmd(hw, H2C_92E_RA_MASK, 7, rate_mask); + _rtl92ee_set_bcn_ctrl_reg(hw, BIT(3), 0); +} + +void rtl92ee_update_hal_rate_tbl(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, u8 rssi_level) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (rtlpriv->dm.useramask) + rtl92ee_update_hal_rate_mask(hw, sta, rssi_level); +} + +void rtl92ee_update_channel_access_setting(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + u16 sifs_timer; + + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME, + (u8 *)&mac->slot_time); + if (!mac->ht_enable) + sifs_timer = 0x0a0a; + else + sifs_timer = 0x0e0e; + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *)&sifs_timer); +} + +bool rtl92ee_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid) +{ + *valid = 1; + return true; +} + +void rtl92ee_set_key(struct ieee80211_hw *hw, u32 key_index, + u8 *p_macaddr, bool is_group, u8 enc_algo, + bool is_wepkey, bool clear_all) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + u8 *macaddr = p_macaddr; + u32 entry_id = 0; + bool is_pairwise = false; + + static u8 cam_const_addr[4][6] = { + {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + {0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, + {0x00, 0x00, 0x00, 0x00, 0x00, 0x02}, + {0x00, 0x00, 0x00, 0x00, 0x00, 0x03} + }; + static u8 cam_const_broad[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff + }; + + if (clear_all) { + u8 idx = 0; + u8 cam_offset = 0; + u8 clear_number = 5; + + RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n"); + + for (idx = 0; idx < clear_number; idx++) { + rtl_cam_mark_invalid(hw, cam_offset + idx); + rtl_cam_empty_entry(hw, cam_offset + idx); + + if (idx < 5) { + memset(rtlpriv->sec.key_buf[idx], 0, + MAX_KEY_LEN); + rtlpriv->sec.key_len[idx] = 0; + } + } + + } else { + switch (enc_algo) { + case WEP40_ENCRYPTION: + enc_algo = CAM_WEP40; + break; + case WEP104_ENCRYPTION: + enc_algo = CAM_WEP104; + break; + case TKIP_ENCRYPTION: + enc_algo = CAM_TKIP; + break; + case AESCCMP_ENCRYPTION: + enc_algo = CAM_AES; + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "switch case not process\n"); + enc_algo = CAM_TKIP; + break; + } + + if (is_wepkey || rtlpriv->sec.use_defaultkey) { + macaddr = cam_const_addr[key_index]; + entry_id = key_index; + } else { + if (is_group) { + macaddr = cam_const_broad; + entry_id = key_index; + } else { + if (mac->opmode == NL80211_IFTYPE_AP || + mac->opmode == NL80211_IFTYPE_MESH_POINT) { + entry_id = rtl_cam_get_free_entry(hw, + p_macaddr); + if (entry_id >= TOTAL_CAM_ENTRY) { + RT_TRACE(rtlpriv, COMP_SEC, + DBG_EMERG, + "Can not find free hw security cam entry\n"); + return; + } + } else { + entry_id = CAM_PAIRWISE_KEY_POSITION; + } + + key_index = PAIRWISE_KEYIDX; + is_pairwise = true; + } + } + + if (rtlpriv->sec.key_len[key_index] == 0) { + RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, + "delete one entry, entry_id is %d\n", + entry_id); + if (mac->opmode == NL80211_IFTYPE_AP || + mac->opmode == NL80211_IFTYPE_MESH_POINT) + rtl_cam_del_entry(hw, p_macaddr); + rtl_cam_delete_one_entry(hw, p_macaddr, entry_id); + } else { + RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, + "add one entry\n"); + if (is_pairwise) { + RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, + "set Pairwiase key\n"); + + rtl_cam_add_one_entry(hw, macaddr, key_index, + entry_id, enc_algo, + CAM_CONFIG_NO_USEDK, + rtlpriv->sec.key_buf[key_index]); + } else { + RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, + "set group key\n"); + + if (mac->opmode == NL80211_IFTYPE_ADHOC) { + rtl_cam_add_one_entry(hw, + rtlefuse->dev_addr, + PAIRWISE_KEYIDX, + CAM_PAIRWISE_KEY_POSITION, + enc_algo, CAM_CONFIG_NO_USEDK, + rtlpriv->sec.key_buf[entry_id]); + } + + rtl_cam_add_one_entry(hw, macaddr, key_index, + entry_id, enc_algo, + CAM_CONFIG_NO_USEDK, + rtlpriv->sec.key_buf[entry_id]); + } + } + } +} + +void rtl92ee_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw, + bool auto_load_fail, u8 *hwinfo) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 value; + + if (!auto_load_fail) { + value = hwinfo[EEPROM_RF_BOARD_OPTION_92E]; + if (((value & 0xe0) >> 5) == 0x1) + rtlpriv->btcoexist.btc_info.btcoexist = 1; + else + rtlpriv->btcoexist.btc_info.btcoexist = 0; + + rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8192E; + rtlpriv->btcoexist.btc_info.ant_num = ANT_TOTAL_X2; + } else { + rtlpriv->btcoexist.btc_info.btcoexist = 1; + rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8192E; + rtlpriv->btcoexist.btc_info.ant_num = ANT_TOTAL_X1; + } +} + +void rtl92ee_bt_reg_init(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + /* 0:Low, 1:High, 2:From Efuse. */ + rtlpriv->btcoexist.reg_bt_iso = 2; + /* 0:Idle, 1:None-SCO, 2:SCO, 3:From Counter. */ + rtlpriv->btcoexist.reg_bt_sco = 3; + /* 0:Disable BT control A-MPDU, 1:Enable BT control A-MPDU. */ + rtlpriv->btcoexist.reg_bt_sco = 0; +} + +void rtl92ee_bt_hw_init(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (rtlpriv->cfg->ops->get_btc_status()) + rtlpriv->btcoexist.btc_ops->btc_init_hw_config(rtlpriv); +} + +void rtl92ee_suspend(struct ieee80211_hw *hw) +{ +} + +void rtl92ee_resume(struct ieee80211_hw *hw) +{ +} + +/* Turn on AAP (RCR:bit 0) for promicuous mode. */ +void rtl92ee_allow_all_destaddr(struct ieee80211_hw *hw, + bool allow_all_da, bool write_into_reg) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + + if (allow_all_da) /* Set BIT0 */ + rtlpci->receive_config |= RCR_AAP; + else /* Clear BIT0 */ + rtlpci->receive_config &= ~RCR_AAP; + + if (write_into_reg) + rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config); + + RT_TRACE(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD, + "receive_config=0x%08X, write_into_reg=%d\n", + rtlpci->receive_config, write_into_reg); +} diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.h b/drivers/net/wireless/rtlwifi/rtl8192ee/hw.h new file mode 100644 index 0000000000000000000000000000000000000000..05413f1896855b7f553400d4d7ecbf722d883e7f --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192ee/hw.h @@ -0,0 +1,62 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL92E_HW_H__ +#define __RTL92E_HW_H__ + +void rtl92ee_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); +void rtl92ee_read_eeprom_info(struct ieee80211_hw *hw); +void rtl92ee_interrupt_recognized(struct ieee80211_hw *hw, + u32 *p_inta, u32 *p_intb); +int rtl92ee_hw_init(struct ieee80211_hw *hw); +void rtl92ee_card_disable(struct ieee80211_hw *hw); +void rtl92ee_enable_interrupt(struct ieee80211_hw *hw); +void rtl92ee_disable_interrupt(struct ieee80211_hw *hw); +int rtl92ee_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type); +void rtl92ee_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid); +void rtl92ee_set_qos(struct ieee80211_hw *hw, int aci); +void rtl92ee_set_beacon_related_registers(struct ieee80211_hw *hw); +void rtl92ee_set_beacon_interval(struct ieee80211_hw *hw); +void rtl92ee_update_interrupt_mask(struct ieee80211_hw *hw, + u32 add_msr, u32 rm_msr); +void rtl92ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); +void rtl92ee_update_hal_rate_tbl(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, u8 rssi_level); +void rtl92ee_update_channel_access_setting(struct ieee80211_hw *hw); +bool rtl92ee_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid); +void rtl92ee_enable_hw_security_config(struct ieee80211_hw *hw); +void rtl92ee_set_key(struct ieee80211_hw *hw, u32 key_index, + u8 *p_macaddr, bool is_group, u8 enc_algo, + bool is_wepkey, bool clear_all); +void rtl92ee_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw, + bool autoload_fail, u8 *hwinfo); +void rtl92ee_bt_reg_init(struct ieee80211_hw *hw); +void rtl92ee_bt_hw_init(struct ieee80211_hw *hw); +void rtl92ee_suspend(struct ieee80211_hw *hw); +void rtl92ee_resume(struct ieee80211_hw *hw); +void rtl92ee_allow_all_destaddr(struct ieee80211_hw *hw, bool allow_all_da, + bool write_into_reg); +void rtl92ee_fw_clk_off_timer_callback(unsigned long data); +#endif diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/led.c b/drivers/net/wireless/rtlwifi/rtl8192ee/led.c new file mode 100644 index 0000000000000000000000000000000000000000..8388e371c8e217b1fc5d8632b41745823ef912b2 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192ee/led.c @@ -0,0 +1,145 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "../wifi.h" +#include "../pci.h" +#include "reg.h" +#include "led.h" + +static void _rtl92ee_init_led(struct ieee80211_hw *hw, + struct rtl_led *pled, enum rtl_led_pin ledpin) +{ + pled->hw = hw; + pled->ledpin = ledpin; + pled->ledon = false; +} + +void rtl92ee_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled) +{ + u32 ledcfg; + struct rtl_priv *rtlpriv = rtl_priv(hw); + + RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, + "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin); + + switch (pled->ledpin) { + case LED_PIN_GPIO0: + break; + case LED_PIN_LED0: + ledcfg = rtl_read_dword(rtlpriv , REG_GPIO_PIN_CTRL); + ledcfg &= ~BIT(13); + ledcfg |= BIT(21); + ledcfg &= ~BIT(29); + + rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, ledcfg); + + break; + case LED_PIN_LED1: + + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, + "switch case not process\n"); + break; + } + pled->ledon = true; +} + +void rtl92ee_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 ledcfg; + + RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, + "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin); + + switch (pled->ledpin) { + case LED_PIN_GPIO0: + break; + case LED_PIN_LED0: + + ledcfg = rtl_read_dword(rtlpriv , REG_GPIO_PIN_CTRL); + ledcfg |= ~BIT(21); + ledcfg &= ~BIT(29); + rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, ledcfg); + + break; + case LED_PIN_LED1: + + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, + "switch case not process\n"); + break; + } + pled->ledon = false; +} + +void rtl92ee_init_sw_leds(struct ieee80211_hw *hw) +{ + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + + _rtl92ee_init_led(hw, &pcipriv->ledctl.sw_led0, LED_PIN_LED0); + _rtl92ee_init_led(hw, &pcipriv->ledctl.sw_led1, LED_PIN_LED1); +} + +static void _rtl92ee_sw_led_control(struct ieee80211_hw *hw, + enum led_ctl_mode ledaction) +{ + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + struct rtl_led *pLed0 = &pcipriv->ledctl.sw_led0; + + switch (ledaction) { + case LED_CTL_POWER_ON: + case LED_CTL_LINK: + case LED_CTL_NO_LINK: + rtl92ee_sw_led_on(hw, pLed0); + break; + case LED_CTL_POWER_OFF: + rtl92ee_sw_led_off(hw, pLed0); + break; + default: + break; + } +} + +void rtl92ee_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + + if ((ppsc->rfoff_reason > RF_CHANGE_BY_PS) && + (ledaction == LED_CTL_TX || + ledaction == LED_CTL_RX || + ledaction == LED_CTL_SITE_SURVEY || + ledaction == LED_CTL_LINK || + ledaction == LED_CTL_NO_LINK || + ledaction == LED_CTL_START_TO_LINK || + ledaction == LED_CTL_POWER_ON)) { + return; + } + RT_TRACE(rtlpriv, COMP_LED, DBG_TRACE, "ledaction %d,\n", ledaction); + _rtl92ee_sw_led_control(hw, ledaction); +} diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/led.h b/drivers/net/wireless/rtlwifi/rtl8192ee/led.h new file mode 100644 index 0000000000000000000000000000000000000000..8ef640a2ef7f34c0b8731756898519a39343385d --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192ee/led.h @@ -0,0 +1,34 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL92E_LED_H__ +#define __RTL92E_LED_H__ + +void rtl92ee_init_sw_leds(struct ieee80211_hw *hw); +void rtl92ee_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled); +void rtl92ee_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled); +void rtl92ee_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction); + +#endif diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/phy.c b/drivers/net/wireless/rtlwifi/rtl8192ee/phy.c new file mode 100644 index 0000000000000000000000000000000000000000..a863a44f9e16a0574b5a455736a9ba02863a9683 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192ee/phy.c @@ -0,0 +1,3219 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "../wifi.h" +#include "../pci.h" +#include "../ps.h" +#include "reg.h" +#include "def.h" +#include "phy.h" +#include "rf.h" +#include "dm.h" +#include "table.h" + +static u32 _rtl92ee_phy_rf_serial_read(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 offset); +static void _rtl92ee_phy_rf_serial_write(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 offset, + u32 data); +static u32 _rtl92ee_phy_calculate_bit_shift(u32 bitmask); +static bool _rtl92ee_phy_bb8192ee_config_parafile(struct ieee80211_hw *hw); +static bool _rtl92ee_phy_config_mac_with_headerfile(struct ieee80211_hw *hw); +static bool phy_config_bb_with_hdr_file(struct ieee80211_hw *hw, + u8 configtype); +static bool phy_config_bb_with_pghdrfile(struct ieee80211_hw *hw, + u8 configtype); +static void phy_init_bb_rf_register_def(struct ieee80211_hw *hw); +static bool _rtl92ee_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable, + u32 cmdtableidx, u32 cmdtablesz, + enum swchnlcmd_id cmdid, + u32 para1, u32 para2, + u32 msdelay); +static bool _rtl92ee_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, + u8 channel, u8 *stage, + u8 *step, u32 *delay); +static long _rtl92ee_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw, + enum wireless_mode wirelessmode, + u8 txpwridx); +static void rtl92ee_phy_set_rf_on(struct ieee80211_hw *hw); +static void rtl92ee_phy_set_io(struct ieee80211_hw *hw); + +u32 rtl92ee_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 returnvalue, originalvalue, bitshift; + + RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, + "regaddr(%#x), bitmask(%#x)\n", regaddr, bitmask); + originalvalue = rtl_read_dword(rtlpriv, regaddr); + bitshift = _rtl92ee_phy_calculate_bit_shift(bitmask); + returnvalue = (originalvalue & bitmask) >> bitshift; + + RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, + "BBR MASK=0x%x Addr[0x%x]=0x%x\n", + bitmask, regaddr, originalvalue); + + return returnvalue; +} + +void rtl92ee_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr, + u32 bitmask, u32 data) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 originalvalue, bitshift; + + RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, + "regaddr(%#x), bitmask(%#x), data(%#x)\n", + regaddr, bitmask, data); + + if (bitmask != MASKDWORD) { + originalvalue = rtl_read_dword(rtlpriv, regaddr); + bitshift = _rtl92ee_phy_calculate_bit_shift(bitmask); + data = ((originalvalue & (~bitmask)) | (data << bitshift)); + } + + rtl_write_dword(rtlpriv, regaddr, data); + + RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, + "regaddr(%#x), bitmask(%#x), data(%#x)\n", + regaddr, bitmask, data); +} + +u32 rtl92ee_phy_query_rf_reg(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 regaddr, u32 bitmask) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 original_value, readback_value, bitshift; + unsigned long flags; + + RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, + "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n", + regaddr, rfpath, bitmask); + + spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags); + + original_value = _rtl92ee_phy_rf_serial_read(hw , rfpath, regaddr); + bitshift = _rtl92ee_phy_calculate_bit_shift(bitmask); + readback_value = (original_value & bitmask) >> bitshift; + + spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags); + + RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, + "regaddr(%#x),rfpath(%#x),bitmask(%#x),original_value(%#x)\n", + regaddr, rfpath, bitmask, original_value); + + return readback_value; +} + +void rtl92ee_phy_set_rf_reg(struct ieee80211_hw *hw, + enum radio_path rfpath, + u32 addr, u32 bitmask, u32 data) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 original_value, bitshift; + unsigned long flags; + + RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, + "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n", + addr, bitmask, data, rfpath); + + spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags); + + if (bitmask != RFREG_OFFSET_MASK) { + original_value = _rtl92ee_phy_rf_serial_read(hw, rfpath, addr); + bitshift = _rtl92ee_phy_calculate_bit_shift(bitmask); + data = (original_value & (~bitmask)) | (data << bitshift); + } + + _rtl92ee_phy_rf_serial_write(hw, rfpath, addr, data); + + spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags); + + RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, + "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n", + addr, bitmask, data, rfpath); +} + +static u32 _rtl92ee_phy_rf_serial_read(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 offset) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath]; + u32 newoffset; + u32 tmplong, tmplong2; + u8 rfpi_enable = 0; + u32 retvalue; + + offset &= 0xff; + newoffset = offset; + if (RT_CANNOT_IO(hw)) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "return all one\n"); + return 0xFFFFFFFF; + } + tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD); + if (rfpath == RF90_PATH_A) + tmplong2 = tmplong; + else + tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD); + tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) | + (newoffset << 23) | BLSSIREADEDGE; + rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD, + tmplong & (~BLSSIREADEDGE)); + mdelay(1); + rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2); + mdelay(2); + if (rfpath == RF90_PATH_A) + rfpi_enable = (u8)rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1, + BIT(8)); + else if (rfpath == RF90_PATH_B) + rfpi_enable = (u8)rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1, + BIT(8)); + if (rfpi_enable) + retvalue = rtl_get_bbreg(hw, pphyreg->rf_rbpi, + BLSSIREADBACKDATA); + else + retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb, + BLSSIREADBACKDATA); + RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, + "RFR-%d Addr[0x%x]=0x%x\n", + rfpath, pphyreg->rf_rb, retvalue); + return retvalue; +} + +static void _rtl92ee_phy_rf_serial_write(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 offset, + u32 data) +{ + u32 data_and_addr; + u32 newoffset; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath]; + + if (RT_CANNOT_IO(hw)) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "stop\n"); + return; + } + offset &= 0xff; + newoffset = offset; + data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff; + rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr); + RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, + "RFW-%d Addr[0x%x]=0x%x\n", rfpath, + pphyreg->rf3wire_offset, data_and_addr); +} + +static u32 _rtl92ee_phy_calculate_bit_shift(u32 bitmask) +{ + u32 i; + + for (i = 0; i <= 31; i++) { + if (((bitmask >> i) & 0x1) == 1) + break; + } + return i; +} + +bool rtl92ee_phy_mac_config(struct ieee80211_hw *hw) +{ + return _rtl92ee_phy_config_mac_with_headerfile(hw); +} + +bool rtl92ee_phy_bb_config(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + bool rtstatus = true; + u16 regval; + u32 tmp; + u8 crystal_cap; + + phy_init_bb_rf_register_def(hw); + regval = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN); + rtl_write_word(rtlpriv, REG_SYS_FUNC_EN, + regval | BIT(13) | BIT(0) | BIT(1)); + + rtl_write_byte(rtlpriv, REG_RF_CTRL, RF_EN | RF_RSTB | RF_SDMRSTB); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, + FEN_PPLL | FEN_PCIEA | FEN_DIO_PCIE | + FEN_BB_GLB_RSTN | FEN_BBRSTB); + + rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL + 1, 0x80); + + tmp = rtl_read_dword(rtlpriv, 0x4c); + rtl_write_dword(rtlpriv, 0x4c, tmp | BIT(23)); + + rtstatus = _rtl92ee_phy_bb8192ee_config_parafile(hw); + + crystal_cap = rtlpriv->efuse.eeprom_crystalcap & 0x3F; + rtl_set_bbreg(hw, REG_MAC_PHY_CTRL, 0xFFF000, + (crystal_cap | (crystal_cap << 6))); + return rtstatus; +} + +bool rtl92ee_phy_rf_config(struct ieee80211_hw *hw) +{ + return rtl92ee_phy_rf6052_config(hw); +} + +static bool _check_condition(struct ieee80211_hw *hw, + const u32 condition) +{ + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + u32 _board = rtlefuse->board_type; /*need efuse define*/ + u32 _interface = rtlhal->interface; + u32 _platform = 0x08;/*SupportPlatform */ + u32 cond = condition; + + if (condition == 0xCDCDCDCD) + return true; + + cond = condition & 0xFF; + if ((_board != cond) && (cond != 0xFF)) + return false; + + cond = condition & 0xFF00; + cond = cond >> 8; + if ((_interface & cond) == 0 && cond != 0x07) + return false; + + cond = condition & 0xFF0000; + cond = cond >> 16; + if ((_platform & cond) == 0 && cond != 0x0F) + return false; + + return true; +} + +static void _rtl92ee_config_rf_reg(struct ieee80211_hw *hw, u32 addr, u32 data, + enum radio_path rfpath, u32 regaddr) +{ + if (addr == 0xfe || addr == 0xffe) { + mdelay(50); + } else { + rtl_set_rfreg(hw, rfpath, regaddr, RFREG_OFFSET_MASK, data); + udelay(1); + + if (addr == 0xb6) { + u32 getvalue; + u8 count = 0; + + getvalue = rtl_get_rfreg(hw, rfpath, addr, MASKDWORD); + udelay(1); + + while ((getvalue >> 8) != (data >> 8)) { + count++; + rtl_set_rfreg(hw, rfpath, regaddr, + RFREG_OFFSET_MASK, data); + udelay(1); + getvalue = rtl_get_rfreg(hw, rfpath, addr, + MASKDWORD); + if (count > 5) + break; + } + } + + if (addr == 0xb2) { + u32 getvalue; + u8 count = 0; + + getvalue = rtl_get_rfreg(hw, rfpath, addr, MASKDWORD); + udelay(1); + + while (getvalue != data) { + count++; + rtl_set_rfreg(hw, rfpath, regaddr, + RFREG_OFFSET_MASK, data); + udelay(1); + rtl_set_rfreg(hw, rfpath, 0x18, + RFREG_OFFSET_MASK, 0x0fc07); + udelay(1); + getvalue = rtl_get_rfreg(hw, rfpath, addr, + MASKDWORD); + if (count > 5) + break; + } + } + } +} + +static void _rtl92ee_config_rf_radio_a(struct ieee80211_hw *hw, + u32 addr, u32 data) +{ + u32 content = 0x1000; /*RF Content: radio_a_txt*/ + u32 maskforphyset = (u32)(content & 0xE000); + + _rtl92ee_config_rf_reg(hw, addr, data, RF90_PATH_A, + addr | maskforphyset); +} + +static void _rtl92ee_config_rf_radio_b(struct ieee80211_hw *hw, + u32 addr, u32 data) +{ + u32 content = 0x1001; /*RF Content: radio_b_txt*/ + u32 maskforphyset = (u32)(content & 0xE000); + + _rtl92ee_config_rf_reg(hw, addr, data, RF90_PATH_B, + addr | maskforphyset); +} + +static void _rtl92ee_config_bb_reg(struct ieee80211_hw *hw, + u32 addr, u32 data) +{ + if (addr == 0xfe) + mdelay(50); + else if (addr == 0xfd) + mdelay(5); + else if (addr == 0xfc) + mdelay(1); + else if (addr == 0xfb) + udelay(50); + else if (addr == 0xfa) + udelay(5); + else if (addr == 0xf9) + udelay(1); + else + rtl_set_bbreg(hw, addr, MASKDWORD , data); + + udelay(1); +} + +static void _rtl92ee_phy_init_tx_power_by_rate(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + + u8 band = BAND_ON_2_4G, rf = 0, txnum = 0, sec = 0; + + for (; band <= BAND_ON_5G; ++band) + for (; rf < TX_PWR_BY_RATE_NUM_RF; ++rf) + for (; txnum < TX_PWR_BY_RATE_NUM_RF; ++txnum) + for (; sec < TX_PWR_BY_RATE_NUM_SECTION; ++sec) + rtlphy->tx_power_by_rate_offset + [band][rf][txnum][sec] = 0; +} + +static void _rtl92ee_phy_set_txpower_by_rate_base(struct ieee80211_hw *hw, + u8 band, u8 path, + u8 rate_section, u8 txnum, + u8 value) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + + if (path > RF90_PATH_D) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Invalid Rf Path %d\n", path); + return; + } + + if (band == BAND_ON_2_4G) { + switch (rate_section) { + case CCK: + rtlphy->txpwr_by_rate_base_24g[path][txnum][0] = value; + break; + case OFDM: + rtlphy->txpwr_by_rate_base_24g[path][txnum][1] = value; + break; + case HT_MCS0_MCS7: + rtlphy->txpwr_by_rate_base_24g[path][txnum][2] = value; + break; + case HT_MCS8_MCS15: + rtlphy->txpwr_by_rate_base_24g[path][txnum][3] = value; + break; + default: + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Invalid RateSection %d in 2.4G,Rf %d,%dTx\n", + rate_section, path, txnum); + break; + }; + } else { + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Invalid Band %d\n", band); + } +} + +static u8 _rtl92ee_phy_get_txpower_by_rate_base(struct ieee80211_hw *hw, + u8 band, u8 path, u8 txnum, + u8 rate_section) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u8 value = 0; + + if (path > RF90_PATH_D) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Invalid Rf Path %d\n", path); + return 0; + } + + if (band == BAND_ON_2_4G) { + switch (rate_section) { + case CCK: + value = rtlphy->txpwr_by_rate_base_24g[path][txnum][0]; + break; + case OFDM: + value = rtlphy->txpwr_by_rate_base_24g[path][txnum][1]; + break; + case HT_MCS0_MCS7: + value = rtlphy->txpwr_by_rate_base_24g[path][txnum][2]; + break; + case HT_MCS8_MCS15: + value = rtlphy->txpwr_by_rate_base_24g[path][txnum][3]; + break; + default: + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Invalid RateSection %d in 2.4G,Rf %d,%dTx\n", + rate_section, path, txnum); + break; + }; + } else { + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Invalid Band %d()\n", band); + } + return value; +} + +static void _rtl92ee_phy_store_txpower_by_rate_base(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u16 raw = 0; + u8 base = 0, path = 0; + + for (path = RF90_PATH_A; path <= RF90_PATH_B; ++path) { + if (path == RF90_PATH_A) { + raw = (u16)(rtlphy->tx_power_by_rate_offset + [BAND_ON_2_4G][path][RF_1TX][3] >> 24) & + 0xFF; + base = (raw >> 4) * 10 + (raw & 0xF); + _rtl92ee_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, + path, CCK, RF_1TX, + base); + } else if (path == RF90_PATH_B) { + raw = (u16)(rtlphy->tx_power_by_rate_offset + [BAND_ON_2_4G][path][RF_1TX][3] >> 0) & + 0xFF; + base = (raw >> 4) * 10 + (raw & 0xF); + _rtl92ee_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, + path, CCK, RF_1TX, + base); + } + raw = (u16)(rtlphy->tx_power_by_rate_offset + [BAND_ON_2_4G][path][RF_1TX][1] >> 24) & 0xFF; + base = (raw >> 4) * 10 + (raw & 0xF); + _rtl92ee_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, + OFDM, RF_1TX, base); + + raw = (u16)(rtlphy->tx_power_by_rate_offset + [BAND_ON_2_4G][path][RF_1TX][5] >> 24) & 0xFF; + base = (raw >> 4) * 10 + (raw & 0xF); + _rtl92ee_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, + HT_MCS0_MCS7, RF_1TX, + base); + + raw = (u16)(rtlphy->tx_power_by_rate_offset + [BAND_ON_2_4G][path][RF_2TX][7] >> 24) & 0xFF; + base = (raw >> 4) * 10 + (raw & 0xF); + _rtl92ee_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, + HT_MCS8_MCS15, RF_2TX, + base); + } +} + +static void _phy_convert_txpower_dbm_to_relative_value(u32 *data, u8 start, + u8 end, u8 base) +{ + char i = 0; + u8 tmp = 0; + u32 temp_data = 0; + + for (i = 3; i >= 0; --i) { + if (i >= start && i <= end) { + /* Get the exact value */ + tmp = (u8)(*data >> (i * 8)) & 0xF; + tmp += ((u8)((*data >> (i * 8 + 4)) & 0xF)) * 10; + + /* Change the value to a relative value */ + tmp = (tmp > base) ? tmp - base : base - tmp; + } else { + tmp = (u8)(*data >> (i * 8)) & 0xFF; + } + temp_data <<= 8; + temp_data |= tmp; + } + *data = temp_data; +} + +static void phy_convert_txpwr_dbm_to_rel_val(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u8 base = 0, rf = 0, band = BAND_ON_2_4G; + + for (rf = RF90_PATH_A; rf <= RF90_PATH_B; ++rf) { + if (rf == RF90_PATH_A) { + base = _rtl92ee_phy_get_txpower_by_rate_base(hw, band, + rf, RF_1TX, + CCK); + _phy_convert_txpower_dbm_to_relative_value( + &rtlphy->tx_power_by_rate_offset + [band][rf][RF_1TX][2], + 1, 1, base); + _phy_convert_txpower_dbm_to_relative_value( + &rtlphy->tx_power_by_rate_offset + [band][rf][RF_1TX][3], + 1, 3, base); + } else if (rf == RF90_PATH_B) { + base = _rtl92ee_phy_get_txpower_by_rate_base(hw, band, + rf, RF_1TX, + CCK); + _phy_convert_txpower_dbm_to_relative_value( + &rtlphy->tx_power_by_rate_offset + [band][rf][RF_1TX][3], + 0, 0, base); + _phy_convert_txpower_dbm_to_relative_value( + &rtlphy->tx_power_by_rate_offset + [band][rf][RF_1TX][2], + 1, 3, base); + } + base = _rtl92ee_phy_get_txpower_by_rate_base(hw, band, rf, + RF_1TX, OFDM); + _phy_convert_txpower_dbm_to_relative_value( + &rtlphy->tx_power_by_rate_offset[band][rf][RF_1TX][0], + 0, 3, base); + _phy_convert_txpower_dbm_to_relative_value( + &rtlphy->tx_power_by_rate_offset[band][rf][RF_1TX][1], + 0, 3, base); + + base = _rtl92ee_phy_get_txpower_by_rate_base(hw, band, rf, + RF_1TX, + HT_MCS0_MCS7); + _phy_convert_txpower_dbm_to_relative_value( + &rtlphy->tx_power_by_rate_offset[band][rf][RF_1TX][4], + 0, 3, base); + _phy_convert_txpower_dbm_to_relative_value( + &rtlphy->tx_power_by_rate_offset[band][rf][RF_1TX][5], + 0, 3, base); + + base = _rtl92ee_phy_get_txpower_by_rate_base(hw, band, rf, + RF_2TX, + HT_MCS8_MCS15); + _phy_convert_txpower_dbm_to_relative_value( + &rtlphy->tx_power_by_rate_offset[band][rf][RF_2TX][6], + 0, 3, base); + + _phy_convert_txpower_dbm_to_relative_value( + &rtlphy->tx_power_by_rate_offset[band][rf][RF_2TX][7], + 0, 3, base); + } + + RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE, + "<==phy_convert_txpwr_dbm_to_rel_val()\n"); +} + +static void _rtl92ee_phy_txpower_by_rate_configuration(struct ieee80211_hw *hw) +{ + _rtl92ee_phy_store_txpower_by_rate_base(hw); + phy_convert_txpwr_dbm_to_rel_val(hw); +} + +static bool _rtl92ee_phy_bb8192ee_config_parafile(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + bool rtstatus; + + rtstatus = phy_config_bb_with_hdr_file(hw, BASEBAND_CONFIG_PHY_REG); + if (!rtstatus) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!"); + return false; + } + + _rtl92ee_phy_init_tx_power_by_rate(hw); + if (!rtlefuse->autoload_failflag) { + rtlphy->pwrgroup_cnt = 0; + rtstatus = + phy_config_bb_with_pghdrfile(hw, BASEBAND_CONFIG_PHY_REG); + } + _rtl92ee_phy_txpower_by_rate_configuration(hw); + if (!rtstatus) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!"); + return false; + } + rtstatus = phy_config_bb_with_hdr_file(hw, BASEBAND_CONFIG_AGC_TAB); + if (!rtstatus) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n"); + return false; + } + rtlphy->cck_high_power = (bool)(rtl_get_bbreg(hw, + RFPGA0_XA_HSSIPARAMETER2, + 0x200)); + + return true; +} + +static bool _rtl92ee_phy_config_mac_with_headerfile(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 i; + u32 arraylength; + u32 *ptrarray; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read Rtl8192EMACPHY_Array\n"); + arraylength = RTL8192EE_MAC_ARRAY_LEN; + ptrarray = RTL8192EE_MAC_ARRAY; + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Img:RTL8192EE_MAC_ARRAY LEN %d\n" , arraylength); + for (i = 0; i < arraylength; i = i + 2) + rtl_write_byte(rtlpriv, ptrarray[i], (u8)ptrarray[i + 1]); + return true; +} + +#define READ_NEXT_PAIR(v1, v2, i) \ + do { \ + i += 2; \ + v1 = array[i]; \ + v2 = array[i+1]; \ + } while (0) + +static bool phy_config_bb_with_hdr_file(struct ieee80211_hw *hw, + u8 configtype) +{ + int i; + u32 *array; + u16 len; + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 v1 = 0, v2 = 0; + + if (configtype == BASEBAND_CONFIG_PHY_REG) { + len = RTL8192EE_PHY_REG_ARRAY_LEN; + array = RTL8192EE_PHY_REG_ARRAY; + + for (i = 0; i < len; i = i + 2) { + v1 = array[i]; + v2 = array[i+1]; + if (v1 < 0xcdcdcdcd) { + _rtl92ee_config_bb_reg(hw, v1, v2); + } else {/*This line is the start line of branch.*/ + /* to protect READ_NEXT_PAIR not overrun */ + if (i >= len - 2) + break; + + if (!_check_condition(hw , array[i])) { + /*Discard the following pairs*/ + READ_NEXT_PAIR(v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && i < len - 2) { + READ_NEXT_PAIR(v1, v2, i); + } + i -= 2; /* prevent from for-loop += 2*/ + } else { + /* Configure matched pairs and + * skip to end of if-else. + */ + READ_NEXT_PAIR(v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && i < len - 2) { + _rtl92ee_config_bb_reg(hw, v1, + v2); + READ_NEXT_PAIR(v1, v2, i); + } + + while (v2 != 0xDEAD && i < len - 2) + READ_NEXT_PAIR(v1, v2, i); + } + } + } + } else if (configtype == BASEBAND_CONFIG_AGC_TAB) { + len = RTL8192EE_AGC_TAB_ARRAY_LEN; + array = RTL8192EE_AGC_TAB_ARRAY; + + for (i = 0; i < len; i = i + 2) { + v1 = array[i]; + v2 = array[i+1]; + if (v1 < 0xCDCDCDCD) { + rtl_set_bbreg(hw, array[i], MASKDWORD, + array[i + 1]); + udelay(1); + continue; + } else{/*This line is the start line of branch.*/ + /* to protect READ_NEXT_PAIR not overrun */ + if (i >= len - 2) + break; + + if (!_check_condition(hw , array[i])) { + /*Discard the following pairs*/ + READ_NEXT_PAIR(v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && + i < len - 2) { + READ_NEXT_PAIR(v1, v2, i); + } + i -= 2; /* prevent from for-loop += 2*/ + } else { + /* Configure matched pairs and + * skip to end of if-else. + */ + READ_NEXT_PAIR(v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && + i < len - 2) { + rtl_set_bbreg(hw, + array[i], + MASKDWORD, + array[i + 1]); + udelay(1); + READ_NEXT_PAIR(v1 , v2 , i); + } + + while (v2 != 0xDEAD && + i < len - 2) { + READ_NEXT_PAIR(v1 , v2 , i); + } + } + } + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "The agctab_array_table[0] is %x Rtl818EEPHY_REGArray[1] is %x\n", + array[i], + array[i + 1]); + } + } + return true; +} + +static u8 _rtl92ee_get_rate_section_index(u32 regaddr) +{ + u8 index = 0; + + switch (regaddr) { + case RTXAGC_A_RATE18_06: + case RTXAGC_B_RATE18_06: + index = 0; + break; + case RTXAGC_A_RATE54_24: + case RTXAGC_B_RATE54_24: + index = 1; + break; + case RTXAGC_A_CCK1_MCS32: + case RTXAGC_B_CCK1_55_MCS32: + index = 2; + break; + case RTXAGC_B_CCK11_A_CCK2_11: + index = 3; + break; + case RTXAGC_A_MCS03_MCS00: + case RTXAGC_B_MCS03_MCS00: + index = 4; + break; + case RTXAGC_A_MCS07_MCS04: + case RTXAGC_B_MCS07_MCS04: + index = 5; + break; + case RTXAGC_A_MCS11_MCS08: + case RTXAGC_B_MCS11_MCS08: + index = 6; + break; + case RTXAGC_A_MCS15_MCS12: + case RTXAGC_B_MCS15_MCS12: + index = 7; + break; + default: + regaddr &= 0xFFF; + if (regaddr >= 0xC20 && regaddr <= 0xC4C) + index = (u8)((regaddr - 0xC20) / 4); + else if (regaddr >= 0xE20 && regaddr <= 0xE4C) + index = (u8)((regaddr - 0xE20) / 4); + break; + }; + return index; +} + +static void _rtl92ee_store_tx_power_by_rate(struct ieee80211_hw *hw, + enum band_type band, + enum radio_path rfpath, + u32 txnum, u32 regaddr, + u32 bitmask, u32 data) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u8 section = _rtl92ee_get_rate_section_index(regaddr); + + if (band != BAND_ON_2_4G && band != BAND_ON_5G) { + RT_TRACE(rtlpriv, FPHY, PHY_TXPWR, "Invalid Band %d\n", band); + return; + } + + if (rfpath > MAX_RF_PATH - 1) { + RT_TRACE(rtlpriv, FPHY, PHY_TXPWR, + "Invalid RfPath %d\n", rfpath); + return; + } + if (txnum > MAX_RF_PATH - 1) { + RT_TRACE(rtlpriv, FPHY, PHY_TXPWR, "Invalid TxNum %d\n", txnum); + return; + } + + rtlphy->tx_power_by_rate_offset[band][rfpath][txnum][section] = data; +} + +static bool phy_config_bb_with_pghdrfile(struct ieee80211_hw *hw, + u8 configtype) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + int i; + u32 *phy_regarray_table_pg; + u16 phy_regarray_pg_len; + u32 v1 = 0, v2 = 0, v3 = 0, v4 = 0, v5 = 0, v6 = 0; + + phy_regarray_pg_len = RTL8192EE_PHY_REG_ARRAY_PG_LEN; + phy_regarray_table_pg = RTL8192EE_PHY_REG_ARRAY_PG; + + if (configtype == BASEBAND_CONFIG_PHY_REG) { + for (i = 0; i < phy_regarray_pg_len; i = i + 6) { + v1 = phy_regarray_table_pg[i]; + v2 = phy_regarray_table_pg[i+1]; + v3 = phy_regarray_table_pg[i+2]; + v4 = phy_regarray_table_pg[i+3]; + v5 = phy_regarray_table_pg[i+4]; + v6 = phy_regarray_table_pg[i+5]; + + if (v1 < 0xcdcdcdcd) { + _rtl92ee_store_tx_power_by_rate(hw, v1, v2, v3, + v4, v5, v6); + continue; + } + } + } else { + RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, + "configtype != BaseBand_Config_PHY_REG\n"); + } + return true; +} + +#define READ_NEXT_RF_PAIR(v1, v2, i) \ + do { \ + i += 2; \ + v1 = array[i]; \ + v2 = array[i+1]; \ + } while (0) + +bool rtl92ee_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, + enum radio_path rfpath) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + int i; + u32 *array; + u16 len; + u32 v1 = 0, v2 = 0; + + switch (rfpath) { + case RF90_PATH_A: + len = RTL8192EE_RADIOA_ARRAY_LEN; + array = RTL8192EE_RADIOA_ARRAY; + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Radio_A:RTL8192EE_RADIOA_ARRAY %d\n" , len); + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath); + for (i = 0; i < len; i = i + 2) { + v1 = array[i]; + v2 = array[i+1]; + if (v1 < 0xcdcdcdcd) { + _rtl92ee_config_rf_radio_a(hw, v1, v2); + continue; + } else {/*This line is the start line of branch.*/ + /* to protect READ_NEXT_PAIR not overrun */ + if (i >= len - 2) + break; + + if (!_check_condition(hw , array[i])) { + /*Discard the following pairs*/ + READ_NEXT_RF_PAIR(v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && i < len - 2) { + READ_NEXT_RF_PAIR(v1, v2, i); + } + i -= 2; /* prevent from for-loop += 2*/ + } else { + /* Configure matched pairs and + * skip to end of if-else. + */ + READ_NEXT_RF_PAIR(v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && i < len - 2) { + _rtl92ee_config_rf_radio_a(hw, + v1, + v2); + READ_NEXT_RF_PAIR(v1, v2, i); + } + + while (v2 != 0xDEAD && i < len - 2) + READ_NEXT_RF_PAIR(v1, v2, i); + } + } + } + break; + + case RF90_PATH_B: + len = RTL8192EE_RADIOB_ARRAY_LEN; + array = RTL8192EE_RADIOB_ARRAY; + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Radio_A:RTL8192EE_RADIOB_ARRAY %d\n" , len); + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath); + for (i = 0; i < len; i = i + 2) { + v1 = array[i]; + v2 = array[i+1]; + if (v1 < 0xcdcdcdcd) { + _rtl92ee_config_rf_radio_b(hw, v1, v2); + continue; + } else {/*This line is the start line of branch.*/ + /* to protect READ_NEXT_PAIR not overrun */ + if (i >= len - 2) + break; + + if (!_check_condition(hw , array[i])) { + /*Discard the following pairs*/ + READ_NEXT_RF_PAIR(v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && i < len - 2) { + READ_NEXT_RF_PAIR(v1, v2, i); + } + i -= 2; /* prevent from for-loop += 2*/ + } else { + /* Configure matched pairs and + * skip to end of if-else. + */ + READ_NEXT_RF_PAIR(v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && i < len - 2) { + _rtl92ee_config_rf_radio_b(hw, + v1, + v2); + READ_NEXT_RF_PAIR(v1, v2, i); + } + + while (v2 != 0xDEAD && i < len - 2) + READ_NEXT_RF_PAIR(v1, v2, i); + } + } + } + break; + case RF90_PATH_C: + case RF90_PATH_D: + break; + } + return true; +} + +void rtl92ee_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + + rtlphy->default_initialgain[0] = + (u8)rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0); + rtlphy->default_initialgain[1] = + (u8)rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0); + rtlphy->default_initialgain[2] = + (u8)rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0); + rtlphy->default_initialgain[3] = + (u8)rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n", + rtlphy->default_initialgain[0], + rtlphy->default_initialgain[1], + rtlphy->default_initialgain[2], + rtlphy->default_initialgain[3]); + + rtlphy->framesync = (u8)rtl_get_bbreg(hw, + ROFDM0_RXDETECTOR3, MASKBYTE0); + rtlphy->framesync_c34 = rtl_get_bbreg(hw, + ROFDM0_RXDETECTOR2, MASKDWORD); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "Default framesync (0x%x) = 0x%x\n", + ROFDM0_RXDETECTOR3, rtlphy->framesync); +} + +static void phy_init_bb_rf_register_def(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + + rtlphy->phyreg_def[RF90_PATH_A].rfintfs = RFPGA0_XAB_RFINTERFACESW; + rtlphy->phyreg_def[RF90_PATH_B].rfintfs = RFPGA0_XAB_RFINTERFACESW; + + rtlphy->phyreg_def[RF90_PATH_A].rfintfo = RFPGA0_XA_RFINTERFACEOE; + rtlphy->phyreg_def[RF90_PATH_B].rfintfo = RFPGA0_XB_RFINTERFACEOE; + + rtlphy->phyreg_def[RF90_PATH_A].rfintfe = RFPGA0_XA_RFINTERFACEOE; + rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE; + + rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset = + RFPGA0_XA_LSSIPARAMETER; + rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset = + RFPGA0_XB_LSSIPARAMETER; + + rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RFPGA0_XA_HSSIPARAMETER2; + rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RFPGA0_XB_HSSIPARAMETER2; + + rtlphy->phyreg_def[RF90_PATH_A].rf_rb = RFPGA0_XA_LSSIREADBACK; + rtlphy->phyreg_def[RF90_PATH_B].rf_rb = RFPGA0_XB_LSSIREADBACK; + + rtlphy->phyreg_def[RF90_PATH_A].rf_rbpi = TRANSCEIVEA_HSPI_READBACK; + rtlphy->phyreg_def[RF90_PATH_B].rf_rbpi = TRANSCEIVEB_HSPI_READBACK; +} + +void rtl92ee_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u8 txpwr_level; + long txpwr_dbm; + + txpwr_level = rtlphy->cur_cck_txpwridx; + txpwr_dbm = _rtl92ee_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_B, + txpwr_level); + txpwr_level = rtlphy->cur_ofdm24g_txpwridx; + if (_rtl92ee_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G, txpwr_level) > + txpwr_dbm) + txpwr_dbm = _rtl92ee_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G, + txpwr_level); + txpwr_level = rtlphy->cur_ofdm24g_txpwridx; + if (_rtl92ee_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G, + txpwr_level) > txpwr_dbm) + txpwr_dbm = _rtl92ee_phy_txpwr_idx_to_dbm(hw, + WIRELESS_MODE_N_24G, + txpwr_level); + *powerlevel = txpwr_dbm; +} + +static u8 _rtl92ee_phy_get_ratesection_intxpower_byrate(enum radio_path path, + u8 rate) +{ + u8 rate_section = 0; + + switch (rate) { + case DESC92C_RATE1M: + rate_section = 2; + break; + case DESC92C_RATE2M: + case DESC92C_RATE5_5M: + if (path == RF90_PATH_A) + rate_section = 3; + else if (path == RF90_PATH_B) + rate_section = 2; + break; + case DESC92C_RATE11M: + rate_section = 3; + break; + case DESC92C_RATE6M: + case DESC92C_RATE9M: + case DESC92C_RATE12M: + case DESC92C_RATE18M: + rate_section = 0; + break; + case DESC92C_RATE24M: + case DESC92C_RATE36M: + case DESC92C_RATE48M: + case DESC92C_RATE54M: + rate_section = 1; + break; + case DESC92C_RATEMCS0: + case DESC92C_RATEMCS1: + case DESC92C_RATEMCS2: + case DESC92C_RATEMCS3: + rate_section = 4; + break; + case DESC92C_RATEMCS4: + case DESC92C_RATEMCS5: + case DESC92C_RATEMCS6: + case DESC92C_RATEMCS7: + rate_section = 5; + break; + case DESC92C_RATEMCS8: + case DESC92C_RATEMCS9: + case DESC92C_RATEMCS10: + case DESC92C_RATEMCS11: + rate_section = 6; + break; + case DESC92C_RATEMCS12: + case DESC92C_RATEMCS13: + case DESC92C_RATEMCS14: + case DESC92C_RATEMCS15: + rate_section = 7; + break; + default: + RT_ASSERT(true, "Rate_Section is Illegal\n"); + break; + } + return rate_section; +} + +static u8 _rtl92ee_get_txpower_by_rate(struct ieee80211_hw *hw, + enum band_type band, + enum radio_path rf, u8 rate) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u8 shift = 0, sec, tx_num; + char diff = 0; + + sec = _rtl92ee_phy_get_ratesection_intxpower_byrate(rf, rate); + tx_num = RF_TX_NUM_NONIMPLEMENT; + + if (tx_num == RF_TX_NUM_NONIMPLEMENT) { + if ((rate >= DESC92C_RATEMCS8 && rate <= DESC92C_RATEMCS15)) + tx_num = RF_2TX; + else + tx_num = RF_1TX; + } + + switch (rate) { + case DESC92C_RATE1M: + case DESC92C_RATE6M: + case DESC92C_RATE24M: + case DESC92C_RATEMCS0: + case DESC92C_RATEMCS4: + case DESC92C_RATEMCS8: + case DESC92C_RATEMCS12: + shift = 0; + break; + case DESC92C_RATE2M: + case DESC92C_RATE9M: + case DESC92C_RATE36M: + case DESC92C_RATEMCS1: + case DESC92C_RATEMCS5: + case DESC92C_RATEMCS9: + case DESC92C_RATEMCS13: + shift = 8; + break; + case DESC92C_RATE5_5M: + case DESC92C_RATE12M: + case DESC92C_RATE48M: + case DESC92C_RATEMCS2: + case DESC92C_RATEMCS6: + case DESC92C_RATEMCS10: + case DESC92C_RATEMCS14: + shift = 16; + break; + case DESC92C_RATE11M: + case DESC92C_RATE18M: + case DESC92C_RATE54M: + case DESC92C_RATEMCS3: + case DESC92C_RATEMCS7: + case DESC92C_RATEMCS11: + case DESC92C_RATEMCS15: + shift = 24; + break; + default: + RT_ASSERT(true, "Rate_Section is Illegal\n"); + break; + } + + diff = (u8)(rtlphy->tx_power_by_rate_offset[band][rf][tx_num][sec] >> + shift) & 0xff; + + return diff; +} + +static u8 _rtl92ee_get_txpower_index(struct ieee80211_hw *hw, + enum radio_path rfpath, u8 rate, + u8 bw, u8 channel) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_efuse *rtlefuse = rtl_efuse(rtlpriv); + u8 index = (channel - 1); + u8 tx_power = 0; + u8 diff = 0; + + if (channel < 1 || channel > 14) { + index = 0; + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_DMESG, + "Illegal channel!!\n"); + } + + if (IS_CCK_RATE(rate)) + tx_power = rtlefuse->txpwrlevel_cck[rfpath][index]; + else if (DESC92C_RATE6M <= rate) + tx_power = rtlefuse->txpwrlevel_ht40_1s[rfpath][index]; + + /* OFDM-1T*/ + if (DESC92C_RATE6M <= rate && rate <= DESC92C_RATE54M && + !IS_CCK_RATE(rate)) + tx_power += rtlefuse->txpwr_legacyhtdiff[rfpath][TX_1S]; + + /* BW20-1S, BW20-2S */ + if (bw == HT_CHANNEL_WIDTH_20) { + if (DESC92C_RATEMCS0 <= rate && rate <= DESC92C_RATEMCS15) + tx_power += rtlefuse->txpwr_ht20diff[rfpath][TX_1S]; + if (DESC92C_RATEMCS8 <= rate && rate <= DESC92C_RATEMCS15) + tx_power += rtlefuse->txpwr_ht20diff[rfpath][TX_2S]; + } else if (bw == HT_CHANNEL_WIDTH_20_40) {/* BW40-1S, BW40-2S */ + if (DESC92C_RATEMCS0 <= rate && rate <= DESC92C_RATEMCS15) + tx_power += rtlefuse->txpwr_ht40diff[rfpath][TX_1S]; + if (DESC92C_RATEMCS8 <= rate && rate <= DESC92C_RATEMCS15) + tx_power += rtlefuse->txpwr_ht40diff[rfpath][TX_2S]; + } + + if (rtlefuse->eeprom_regulatory != 2) + diff = _rtl92ee_get_txpower_by_rate(hw, BAND_ON_2_4G, + rfpath, rate); + + tx_power += diff; + + if (tx_power > MAX_POWER_INDEX) + tx_power = MAX_POWER_INDEX; + + return tx_power; +} + +static void _rtl92ee_set_txpower_index(struct ieee80211_hw *hw, u8 pwr_idx, + enum radio_path rfpath, u8 rate) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (rfpath == RF90_PATH_A) { + switch (rate) { + case DESC92C_RATE1M: + rtl_set_bbreg(hw, RTXAGC_A_CCK1_MCS32, MASKBYTE1, + pwr_idx); + break; + case DESC92C_RATE2M: + rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, MASKBYTE1, + pwr_idx); + break; + case DESC92C_RATE5_5M: + rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, MASKBYTE2, + pwr_idx); + break; + case DESC92C_RATE11M: + rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, MASKBYTE3, + pwr_idx); + break; + case DESC92C_RATE6M: + rtl_set_bbreg(hw, RTXAGC_A_RATE18_06, MASKBYTE0, + pwr_idx); + break; + case DESC92C_RATE9M: + rtl_set_bbreg(hw, RTXAGC_A_RATE18_06, MASKBYTE1, + pwr_idx); + break; + case DESC92C_RATE12M: + rtl_set_bbreg(hw, RTXAGC_A_RATE18_06, MASKBYTE2, + pwr_idx); + break; + case DESC92C_RATE18M: + rtl_set_bbreg(hw, RTXAGC_A_RATE18_06, MASKBYTE3, + pwr_idx); + break; + case DESC92C_RATE24M: + rtl_set_bbreg(hw, RTXAGC_A_RATE54_24, MASKBYTE0, + pwr_idx); + break; + case DESC92C_RATE36M: + rtl_set_bbreg(hw, RTXAGC_A_RATE54_24, MASKBYTE1, + pwr_idx); + break; + case DESC92C_RATE48M: + rtl_set_bbreg(hw, RTXAGC_A_RATE54_24, MASKBYTE2, + pwr_idx); + break; + case DESC92C_RATE54M: + rtl_set_bbreg(hw, RTXAGC_A_RATE54_24, MASKBYTE3, + pwr_idx); + break; + case DESC92C_RATEMCS0: + rtl_set_bbreg(hw, RTXAGC_A_MCS03_MCS00, MASKBYTE0, + pwr_idx); + break; + case DESC92C_RATEMCS1: + rtl_set_bbreg(hw, RTXAGC_A_MCS03_MCS00, MASKBYTE1, + pwr_idx); + break; + case DESC92C_RATEMCS2: + rtl_set_bbreg(hw, RTXAGC_A_MCS03_MCS00, MASKBYTE2, + pwr_idx); + break; + case DESC92C_RATEMCS3: + rtl_set_bbreg(hw, RTXAGC_A_MCS03_MCS00, MASKBYTE3, + pwr_idx); + break; + case DESC92C_RATEMCS4: + rtl_set_bbreg(hw, RTXAGC_A_MCS07_MCS04, MASKBYTE0, + pwr_idx); + break; + case DESC92C_RATEMCS5: + rtl_set_bbreg(hw, RTXAGC_A_MCS07_MCS04, MASKBYTE1, + pwr_idx); + break; + case DESC92C_RATEMCS6: + rtl_set_bbreg(hw, RTXAGC_A_MCS07_MCS04, MASKBYTE2, + pwr_idx); + break; + case DESC92C_RATEMCS7: + rtl_set_bbreg(hw, RTXAGC_A_MCS07_MCS04, MASKBYTE3, + pwr_idx); + break; + case DESC92C_RATEMCS8: + rtl_set_bbreg(hw, RTXAGC_A_MCS11_MCS08, MASKBYTE0, + pwr_idx); + break; + case DESC92C_RATEMCS9: + rtl_set_bbreg(hw, RTXAGC_A_MCS11_MCS08, MASKBYTE1, + pwr_idx); + break; + case DESC92C_RATEMCS10: + rtl_set_bbreg(hw, RTXAGC_A_MCS11_MCS08, MASKBYTE2, + pwr_idx); + break; + case DESC92C_RATEMCS11: + rtl_set_bbreg(hw, RTXAGC_A_MCS11_MCS08, MASKBYTE3, + pwr_idx); + break; + case DESC92C_RATEMCS12: + rtl_set_bbreg(hw, RTXAGC_A_MCS15_MCS12, MASKBYTE0, + pwr_idx); + break; + case DESC92C_RATEMCS13: + rtl_set_bbreg(hw, RTXAGC_A_MCS15_MCS12, MASKBYTE1, + pwr_idx); + break; + case DESC92C_RATEMCS14: + rtl_set_bbreg(hw, RTXAGC_A_MCS15_MCS12, MASKBYTE2, + pwr_idx); + break; + case DESC92C_RATEMCS15: + rtl_set_bbreg(hw, RTXAGC_A_MCS15_MCS12, MASKBYTE3, + pwr_idx); + break; + default: + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, + "Invalid Rate!!\n"); + break; + } + } else if (rfpath == RF90_PATH_B) { + switch (rate) { + case DESC92C_RATE1M: + rtl_set_bbreg(hw, RTXAGC_B_CCK1_55_MCS32, MASKBYTE1, + pwr_idx); + break; + case DESC92C_RATE2M: + rtl_set_bbreg(hw, RTXAGC_B_CCK1_55_MCS32, MASKBYTE2, + pwr_idx); + break; + case DESC92C_RATE5_5M: + rtl_set_bbreg(hw, RTXAGC_B_CCK1_55_MCS32, MASKBYTE3, + pwr_idx); + break; + case DESC92C_RATE11M: + rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, MASKBYTE0, + pwr_idx); + break; + case DESC92C_RATE6M: + rtl_set_bbreg(hw, RTXAGC_B_RATE18_06, MASKBYTE0, + pwr_idx); + break; + case DESC92C_RATE9M: + rtl_set_bbreg(hw, RTXAGC_B_RATE18_06, MASKBYTE1, + pwr_idx); + break; + case DESC92C_RATE12M: + rtl_set_bbreg(hw, RTXAGC_B_RATE18_06, MASKBYTE2, + pwr_idx); + break; + case DESC92C_RATE18M: + rtl_set_bbreg(hw, RTXAGC_B_RATE18_06, MASKBYTE3, + pwr_idx); + break; + case DESC92C_RATE24M: + rtl_set_bbreg(hw, RTXAGC_B_RATE54_24, MASKBYTE0, + pwr_idx); + break; + case DESC92C_RATE36M: + rtl_set_bbreg(hw, RTXAGC_B_RATE54_24, MASKBYTE1, + pwr_idx); + break; + case DESC92C_RATE48M: + rtl_set_bbreg(hw, RTXAGC_B_RATE54_24, MASKBYTE2, + pwr_idx); + break; + case DESC92C_RATE54M: + rtl_set_bbreg(hw, RTXAGC_B_RATE54_24, MASKBYTE3, + pwr_idx); + break; + case DESC92C_RATEMCS0: + rtl_set_bbreg(hw, RTXAGC_B_MCS03_MCS00, MASKBYTE0, + pwr_idx); + break; + case DESC92C_RATEMCS1: + rtl_set_bbreg(hw, RTXAGC_B_MCS03_MCS00, MASKBYTE1, + pwr_idx); + break; + case DESC92C_RATEMCS2: + rtl_set_bbreg(hw, RTXAGC_B_MCS03_MCS00, MASKBYTE2, + pwr_idx); + break; + case DESC92C_RATEMCS3: + rtl_set_bbreg(hw, RTXAGC_B_MCS03_MCS00, MASKBYTE3, + pwr_idx); + break; + case DESC92C_RATEMCS4: + rtl_set_bbreg(hw, RTXAGC_B_MCS07_MCS04, MASKBYTE0, + pwr_idx); + break; + case DESC92C_RATEMCS5: + rtl_set_bbreg(hw, RTXAGC_B_MCS07_MCS04, MASKBYTE1, + pwr_idx); + break; + case DESC92C_RATEMCS6: + rtl_set_bbreg(hw, RTXAGC_B_MCS07_MCS04, MASKBYTE2, + pwr_idx); + break; + case DESC92C_RATEMCS7: + rtl_set_bbreg(hw, RTXAGC_B_MCS07_MCS04, MASKBYTE3, + pwr_idx); + break; + case DESC92C_RATEMCS8: + rtl_set_bbreg(hw, RTXAGC_B_MCS11_MCS08, MASKBYTE0, + pwr_idx); + break; + case DESC92C_RATEMCS9: + rtl_set_bbreg(hw, RTXAGC_B_MCS11_MCS08, MASKBYTE1, + pwr_idx); + break; + case DESC92C_RATEMCS10: + rtl_set_bbreg(hw, RTXAGC_B_MCS11_MCS08, MASKBYTE2, + pwr_idx); + break; + case DESC92C_RATEMCS11: + rtl_set_bbreg(hw, RTXAGC_B_MCS11_MCS08, MASKBYTE3, + pwr_idx); + break; + case DESC92C_RATEMCS12: + rtl_set_bbreg(hw, RTXAGC_B_MCS15_MCS12, MASKBYTE0, + pwr_idx); + break; + case DESC92C_RATEMCS13: + rtl_set_bbreg(hw, RTXAGC_B_MCS15_MCS12, MASKBYTE1, + pwr_idx); + break; + case DESC92C_RATEMCS14: + rtl_set_bbreg(hw, RTXAGC_B_MCS15_MCS12, MASKBYTE2, + pwr_idx); + break; + case DESC92C_RATEMCS15: + rtl_set_bbreg(hw, RTXAGC_B_MCS15_MCS12, MASKBYTE3, + pwr_idx); + break; + default: + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, + "Invalid Rate!!\n"); + break; + } + } else { + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Invalid RFPath!!\n"); + } +} + +static void phy_set_txpower_index_by_rate_array(struct ieee80211_hw *hw, + enum radio_path rfpath, u8 bw, + u8 channel, u8 *rates, u8 size) +{ + u8 i; + u8 power_index; + + for (i = 0; i < size; i++) { + power_index = _rtl92ee_get_txpower_index(hw, rfpath, rates[i], + bw, channel); + _rtl92ee_set_txpower_index(hw, power_index, rfpath, rates[i]); + } +} + +static void phy_set_txpower_index_by_rate_section(struct ieee80211_hw *hw, + enum radio_path rfpath, + u8 channel, + enum rate_section section) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + struct rtl_phy *rtlphy = &rtlpriv->phy; + + if (section == CCK) { + u8 cck_rates[] = {DESC92C_RATE1M, DESC92C_RATE2M, + DESC92C_RATE5_5M, DESC92C_RATE11M}; + if (rtlhal->current_bandtype == BAND_ON_2_4G) + phy_set_txpower_index_by_rate_array(hw, rfpath, + rtlphy->current_chan_bw, + channel, cck_rates, 4); + } else if (section == OFDM) { + u8 ofdm_rates[] = {DESC92C_RATE6M, DESC92C_RATE9M, + DESC92C_RATE12M, DESC92C_RATE18M, + DESC92C_RATE24M, DESC92C_RATE36M, + DESC92C_RATE48M, DESC92C_RATE54M}; + phy_set_txpower_index_by_rate_array(hw, rfpath, + rtlphy->current_chan_bw, + channel, ofdm_rates, 8); + } else if (section == HT_MCS0_MCS7) { + u8 ht_rates1t[] = {DESC92C_RATEMCS0, DESC92C_RATEMCS1, + DESC92C_RATEMCS2, DESC92C_RATEMCS3, + DESC92C_RATEMCS4, DESC92C_RATEMCS5, + DESC92C_RATEMCS6, DESC92C_RATEMCS7}; + phy_set_txpower_index_by_rate_array(hw, rfpath, + rtlphy->current_chan_bw, + channel, ht_rates1t, 8); + } else if (section == HT_MCS8_MCS15) { + u8 ht_rates2t[] = {DESC92C_RATEMCS8, DESC92C_RATEMCS9, + DESC92C_RATEMCS10, DESC92C_RATEMCS11, + DESC92C_RATEMCS12, DESC92C_RATEMCS13, + DESC92C_RATEMCS14, DESC92C_RATEMCS15}; + phy_set_txpower_index_by_rate_array(hw, rfpath, + rtlphy->current_chan_bw, + channel, ht_rates2t, 8); + } else + RT_TRACE(rtlpriv, FPHY, PHY_TXPWR, + "Invalid RateSection %d\n", section); +} + +void rtl92ee_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel) +{ + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct rtl_phy *rtlphy = &rtl_priv(hw)->phy; + enum radio_path rfpath; + + if (!rtlefuse->txpwr_fromeprom) + return; + for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath; + rfpath++) { + phy_set_txpower_index_by_rate_section(hw, rfpath, + channel, CCK); + phy_set_txpower_index_by_rate_section(hw, rfpath, + channel, OFDM); + phy_set_txpower_index_by_rate_section(hw, rfpath, + channel, + HT_MCS0_MCS7); + + if (rtlphy->num_total_rfpath >= 2) + phy_set_txpower_index_by_rate_section(hw, + rfpath, channel, + HT_MCS8_MCS15); + } +} + +static long _rtl92ee_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw, + enum wireless_mode wirelessmode, + u8 txpwridx) +{ + long offset; + long pwrout_dbm; + + switch (wirelessmode) { + case WIRELESS_MODE_B: + offset = -7; + break; + case WIRELESS_MODE_G: + case WIRELESS_MODE_N_24G: + offset = -8; + break; + default: + offset = -8; + break; + } + pwrout_dbm = txpwridx / 2 + offset; + return pwrout_dbm; +} + +void rtl92ee_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + enum io_type iotype; + + if (!is_hal_stop(rtlhal)) { + switch (operation) { + case SCAN_OPT_BACKUP_BAND0: + iotype = IO_CMD_PAUSE_BAND0_DM_BY_SCAN; + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_IO_CMD, + (u8 *)&iotype); + + break; + case SCAN_OPT_RESTORE: + iotype = IO_CMD_RESUME_DM_BY_SCAN; + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_IO_CMD, + (u8 *)&iotype); + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "Unknown Scan Backup operation.\n"); + break; + } + } +} + +void rtl92ee_phy_set_bw_mode_callback(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + u8 reg_bw_opmode; + u8 reg_prsr_rsc; + + RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, + "Switch to %s bandwidth\n", + rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ? + "20MHz" : "40MHz"); + + if (is_hal_stop(rtlhal)) { + rtlphy->set_bwmode_inprogress = false; + return; + } + + reg_bw_opmode = rtl_read_byte(rtlpriv, REG_BWOPMODE); + reg_prsr_rsc = rtl_read_byte(rtlpriv, REG_RRSR + 2); + + switch (rtlphy->current_chan_bw) { + case HT_CHANNEL_WIDTH_20: + reg_bw_opmode |= BW_OPMODE_20MHZ; + rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode); + break; + case HT_CHANNEL_WIDTH_20_40: + reg_bw_opmode &= ~BW_OPMODE_20MHZ; + rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode); + reg_prsr_rsc = (reg_prsr_rsc & 0x90) | + (mac->cur_40_prime_sc << 5); + rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc); + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "unknown bandwidth: %#X\n", rtlphy->current_chan_bw); + break; + } + + switch (rtlphy->current_chan_bw) { + case HT_CHANNEL_WIDTH_20: + rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x0); + rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x0); + rtl_set_bbreg(hw, ROFDM0_TXPSEUDONOISEWGT, + (BIT(31) | BIT(30)), 0); + break; + case HT_CHANNEL_WIDTH_20_40: + rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x1); + rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x1); + rtl_set_bbreg(hw, RCCK0_SYSTEM, BCCK_SIDEBAND, + (mac->cur_40_prime_sc >> 1)); + rtl_set_bbreg(hw, ROFDM1_LSTF, 0xC00, + mac->cur_40_prime_sc); + + rtl_set_bbreg(hw, 0x818, (BIT(26) | BIT(27)), + (mac->cur_40_prime_sc == + HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1); + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "unknown bandwidth: %#X\n", rtlphy->current_chan_bw); + break; + } + rtl92ee_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw); + rtlphy->set_bwmode_inprogress = false; + RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD, "\n"); +} + +void rtl92ee_phy_set_bw_mode(struct ieee80211_hw *hw, + enum nl80211_channel_type ch_type) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 tmp_bw = rtlphy->current_chan_bw; + + if (rtlphy->set_bwmode_inprogress) + return; + rtlphy->set_bwmode_inprogress = true; + if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) { + rtl92ee_phy_set_bw_mode_callback(hw); + } else { + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + "false driver sleep or unload\n"); + rtlphy->set_bwmode_inprogress = false; + rtlphy->current_chan_bw = tmp_bw; + } +} + +void rtl92ee_phy_sw_chnl_callback(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u32 delay; + + RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, + "switch to channel%d\n", rtlphy->current_channel); + if (is_hal_stop(rtlhal)) + return; + do { + if (!rtlphy->sw_chnl_inprogress) + break; + if (!_rtl92ee_phy_sw_chnl_step_by_step + (hw, rtlphy->current_channel, &rtlphy->sw_chnl_stage, + &rtlphy->sw_chnl_step, &delay)) { + if (delay > 0) + mdelay(delay); + else + continue; + } else { + rtlphy->sw_chnl_inprogress = false; + } + break; + } while (true); + RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "\n"); +} + +u8 rtl92ee_phy_sw_chnl(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + + if (rtlphy->sw_chnl_inprogress) + return 0; + if (rtlphy->set_bwmode_inprogress) + return 0; + RT_ASSERT((rtlphy->current_channel <= 14), + "WIRELESS_MODE_G but channel>14"); + rtlphy->sw_chnl_inprogress = true; + rtlphy->sw_chnl_stage = 0; + rtlphy->sw_chnl_step = 0; + if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) { + rtl92ee_phy_sw_chnl_callback(hw); + RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD, + "sw_chnl_inprogress false schdule workitem current channel %d\n", + rtlphy->current_channel); + rtlphy->sw_chnl_inprogress = false; + } else { + RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD, + "sw_chnl_inprogress false driver sleep or unload\n"); + rtlphy->sw_chnl_inprogress = false; + } + return 1; +} + +static bool _rtl92ee_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, + u8 channel, u8 *stage, u8 *step, + u32 *delay) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct swchnlcmd precommoncmd[MAX_PRECMD_CNT]; + u32 precommoncmdcnt; + struct swchnlcmd postcommoncmd[MAX_POSTCMD_CNT]; + u32 postcommoncmdcnt; + struct swchnlcmd rfdependcmd[MAX_RFDEPENDCMD_CNT]; + u32 rfdependcmdcnt; + struct swchnlcmd *currentcmd = NULL; + u8 rfpath; + u8 num_total_rfpath = rtlphy->num_total_rfpath; + + precommoncmdcnt = 0; + _rtl92ee_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++, + MAX_PRECMD_CNT, + CMDID_SET_TXPOWEROWER_LEVEL, 0, 0, 0); + _rtl92ee_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++, + MAX_PRECMD_CNT, CMDID_END, 0, 0, 0); + + postcommoncmdcnt = 0; + + _rtl92ee_phy_set_sw_chnl_cmdarray(postcommoncmd, postcommoncmdcnt++, + MAX_POSTCMD_CNT, CMDID_END, 0, 0, 0); + + rfdependcmdcnt = 0; + + RT_ASSERT((channel >= 1 && channel <= 14), + "illegal channel for Zebra: %d\n", channel); + + _rtl92ee_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++, + MAX_RFDEPENDCMD_CNT, + CMDID_RF_WRITEREG, + RF_CHNLBW, channel, 10); + + _rtl92ee_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++, + MAX_RFDEPENDCMD_CNT, CMDID_END, + 0, 0, 0); + + do { + switch (*stage) { + case 0: + currentcmd = &precommoncmd[*step]; + break; + case 1: + currentcmd = &rfdependcmd[*step]; + break; + case 2: + currentcmd = &postcommoncmd[*step]; + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "Invalid 'stage' = %d, Check it!\n" , *stage); + return true; + } + + if (currentcmd->cmdid == CMDID_END) { + if ((*stage) == 2) + return true; + (*stage)++; + (*step) = 0; + continue; + } + + switch (currentcmd->cmdid) { + case CMDID_SET_TXPOWEROWER_LEVEL: + rtl92ee_phy_set_txpower_level(hw, channel); + break; + case CMDID_WRITEPORT_ULONG: + rtl_write_dword(rtlpriv, currentcmd->para1, + currentcmd->para2); + break; + case CMDID_WRITEPORT_USHORT: + rtl_write_word(rtlpriv, currentcmd->para1, + (u16)currentcmd->para2); + break; + case CMDID_WRITEPORT_UCHAR: + rtl_write_byte(rtlpriv, currentcmd->para1, + (u8)currentcmd->para2); + break; + case CMDID_RF_WRITEREG: + for (rfpath = 0; rfpath < num_total_rfpath; rfpath++) { + rtlphy->rfreg_chnlval[rfpath] = + ((rtlphy->rfreg_chnlval[rfpath] & + 0xfffff00) | currentcmd->para2); + + rtl_set_rfreg(hw, (enum radio_path)rfpath, + currentcmd->para1, + 0x3ff, + rtlphy->rfreg_chnlval[rfpath]); + } + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, + "switch case not process\n"); + break; + } + + break; + } while (true); + + (*delay) = currentcmd->msdelay; + (*step)++; + return false; +} + +static bool _rtl92ee_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable, + u32 cmdtableidx, u32 cmdtablesz, + enum swchnlcmd_id cmdid, + u32 para1, u32 para2, u32 msdelay) +{ + struct swchnlcmd *pcmd; + + if (cmdtable == NULL) { + RT_ASSERT(false, "cmdtable cannot be NULL.\n"); + return false; + } + + if (cmdtableidx >= cmdtablesz) + return false; + + pcmd = cmdtable + cmdtableidx; + pcmd->cmdid = cmdid; + pcmd->para1 = para1; + pcmd->para2 = para2; + pcmd->msdelay = msdelay; + return true; +} + +static u8 _rtl92ee_phy_path_a_iqk(struct ieee80211_hw *hw, bool config_pathb) +{ + u32 reg_eac, reg_e94, reg_e9c; + u8 result = 0x00; + /* path-A IQK setting */ + /* PA/PAD controlled by 0x0 */ + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x00000000); + rtl_set_rfreg(hw, RF90_PATH_A, 0xdf, RFREG_OFFSET_MASK, 0x180); + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x80800000); + + rtl_set_bbreg(hw, RTX_IQK_TONE_A, MASKDWORD, 0x18008c1c); + rtl_set_bbreg(hw, RRX_IQK_TONE_A, MASKDWORD, 0x38008c1c); + rtl_set_bbreg(hw, RTX_IQK_TONE_B, MASKDWORD, 0x38008c1c); + rtl_set_bbreg(hw, RRX_IQK_TONE_B, MASKDWORD, 0x38008c1c); + + rtl_set_bbreg(hw, RTX_IQK_PI_A, MASKDWORD, 0x82140303); + rtl_set_bbreg(hw, RRX_IQK_PI_A, MASKDWORD, 0x68160000); + + /*LO calibration setting*/ + rtl_set_bbreg(hw, RIQK_AGC_RSP, MASKDWORD, 0x00462911); + + /*One shot, path A LOK & IQK*/ + rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xf9000000); + rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xf8000000); + + mdelay(IQK_DELAY_TIME); + + reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD); + reg_e94 = rtl_get_bbreg(hw, 0xe94, MASKDWORD); + reg_e9c = rtl_get_bbreg(hw, 0xe9c, MASKDWORD); + + if (!(reg_eac & BIT(28)) && + (((reg_e94 & 0x03FF0000) >> 16) != 0x142) && + (((reg_e9c & 0x03FF0000) >> 16) != 0x42)) + result |= 0x01; + else + return result; + + return result; +} + +static u8 _rtl92ee_phy_path_b_iqk(struct ieee80211_hw *hw) +{ + u32 reg_eac, reg_eb4, reg_ebc; + u8 result = 0x00; + + /* PA/PAD controlled by 0x0 */ + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x00000000); + rtl_set_rfreg(hw, RF90_PATH_B, 0xdf, RFREG_OFFSET_MASK, 0x180); + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x80800000); + + rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000); + rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000); + + rtl_set_bbreg(hw, RTX_IQK_TONE_A, MASKDWORD, 0x38008c1c); + rtl_set_bbreg(hw, RRX_IQK_TONE_A, MASKDWORD, 0x38008c1c); + rtl_set_bbreg(hw, RTX_IQK_TONE_B, MASKDWORD, 0x18008c1c); + rtl_set_bbreg(hw, RRX_IQK_TONE_B, MASKDWORD, 0x38008c1c); + + rtl_set_bbreg(hw, RTX_IQK_PI_B, MASKDWORD, 0x821403e2); + rtl_set_bbreg(hw, RRX_IQK_PI_B, MASKDWORD, 0x68160000); + + /* LO calibration setting */ + rtl_set_bbreg(hw, RIQK_AGC_RSP, MASKDWORD, 0x00462911); + + /*One shot, path B LOK & IQK*/ + rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xfa000000); + rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xf8000000); + + mdelay(IQK_DELAY_TIME); + + reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD); + reg_eb4 = rtl_get_bbreg(hw, 0xeb4, MASKDWORD); + reg_ebc = rtl_get_bbreg(hw, 0xebc, MASKDWORD); + + if (!(reg_eac & BIT(31)) && + (((reg_eb4 & 0x03FF0000) >> 16) != 0x142) && + (((reg_ebc & 0x03FF0000) >> 16) != 0x42)) + result |= 0x01; + else + return result; + + return result; +} + +static u8 _rtl92ee_phy_path_a_rx_iqk(struct ieee80211_hw *hw, bool config_pathb) +{ + u32 reg_eac, reg_e94, reg_e9c, reg_ea4 , u32temp; + u8 result = 0x00; + + /*Get TXIMR Setting*/ + /*Modify RX IQK mode table*/ + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x00000000); + + rtl_set_rfreg(hw, RF90_PATH_A, RF_WE_LUT, RFREG_OFFSET_MASK, 0x800a0); + rtl_set_rfreg(hw, RF90_PATH_A, RF_RCK_OS, RFREG_OFFSET_MASK, 0x30000); + rtl_set_rfreg(hw, RF90_PATH_A, RF_TXPA_G1, RFREG_OFFSET_MASK, 0x0000f); + rtl_set_rfreg(hw, RF90_PATH_A, RF_TXPA_G2, RFREG_OFFSET_MASK, 0xf117b); + + /*PA/PAD control by 0x56, and set = 0x0*/ + rtl_set_rfreg(hw, RF90_PATH_A, 0xdf, RFREG_OFFSET_MASK, 0x980); + rtl_set_rfreg(hw, RF90_PATH_A, 0x56, RFREG_OFFSET_MASK, 0x51000); + + /*enter IQK mode*/ + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x80800000); + + /*IQK Setting*/ + rtl_set_bbreg(hw, RTX_IQK, MASKDWORD, 0x01007c00); + rtl_set_bbreg(hw, RRX_IQK, MASKDWORD, 0x01004800); + + /*path a IQK setting*/ + rtl_set_bbreg(hw, RTX_IQK_TONE_A, MASKDWORD, 0x18008c1c); + rtl_set_bbreg(hw, RRX_IQK_TONE_A, MASKDWORD, 0x38008c1c); + rtl_set_bbreg(hw, RTX_IQK_TONE_B, MASKDWORD, 0x38008c1c); + rtl_set_bbreg(hw, RRX_IQK_TONE_B, MASKDWORD, 0x38008c1c); + + rtl_set_bbreg(hw, RTX_IQK_PI_A, MASKDWORD, 0x82160c1f); + rtl_set_bbreg(hw, RRX_IQK_PI_A, MASKDWORD, 0x68160c1f); + + /*LO calibration Setting*/ + rtl_set_bbreg(hw, RIQK_AGC_RSP, MASKDWORD, 0x0046a911); + + /*one shot,path A LOK & iqk*/ + rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xfa000000); + rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xf8000000); + + mdelay(IQK_DELAY_TIME); + + /* Check failed */ + reg_eac = rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_A_2, MASKDWORD); + reg_e94 = rtl_get_bbreg(hw, RTX_POWER_BEFORE_IQK_A, MASKDWORD); + reg_e9c = rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_A, MASKDWORD); + + if (!(reg_eac & BIT(28)) && + (((reg_e94 & 0x03FF0000) >> 16) != 0x142) && + (((reg_e9c & 0x03FF0000) >> 16) != 0x42)) { + result |= 0x01; + } else { + /* PA/PAD controlled by 0x0 */ + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x00000000); + rtl_set_rfreg(hw, RF90_PATH_A, 0xdf, RFREG_OFFSET_MASK, 0x180); + return result; + } + + u32temp = 0x80007C00 | (reg_e94 & 0x3FF0000) | + ((reg_e9c & 0x3FF0000) >> 16); + rtl_set_bbreg(hw, RTX_IQK, MASKDWORD, u32temp); + /*RX IQK*/ + /*Modify RX IQK mode table*/ + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x00000000); + + rtl_set_rfreg(hw, RF90_PATH_A, RF_WE_LUT, RFREG_OFFSET_MASK, 0x800a0); + + rtl_set_rfreg(hw, RF90_PATH_A, RF_RCK_OS, RFREG_OFFSET_MASK, 0x30000); + rtl_set_rfreg(hw, RF90_PATH_A, RF_TXPA_G1, RFREG_OFFSET_MASK, 0x0000f); + rtl_set_rfreg(hw, RF90_PATH_A, RF_TXPA_G2, RFREG_OFFSET_MASK, 0xf7ffa); + + /*PA/PAD control by 0x56, and set = 0x0*/ + rtl_set_rfreg(hw, RF90_PATH_A, 0xdf, RFREG_OFFSET_MASK, 0x980); + rtl_set_rfreg(hw, RF90_PATH_A, 0x56, RFREG_OFFSET_MASK, 0x51000); + + /*enter IQK mode*/ + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x80800000); + + /*IQK Setting*/ + rtl_set_bbreg(hw, RRX_IQK, MASKDWORD, 0x01004800); + + /*path a IQK setting*/ + rtl_set_bbreg(hw, RTX_IQK_TONE_A, MASKDWORD, 0x38008c1c); + rtl_set_bbreg(hw, RRX_IQK_TONE_A, MASKDWORD, 0x18008c1c); + rtl_set_bbreg(hw, RTX_IQK_TONE_B, MASKDWORD, 0x38008c1c); + rtl_set_bbreg(hw, RRX_IQK_TONE_B, MASKDWORD, 0x38008c1c); + + rtl_set_bbreg(hw, RTX_IQK_PI_A, MASKDWORD, 0x82160c1f); + rtl_set_bbreg(hw, RRX_IQK_PI_A, MASKDWORD, 0x28160c1f); + + /*LO calibration Setting*/ + rtl_set_bbreg(hw, RIQK_AGC_RSP, MASKDWORD, 0x0046a891); + /*one shot,path A LOK & iqk*/ + rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xfa000000); + rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xf8000000); + + mdelay(IQK_DELAY_TIME); + /*Check failed*/ + reg_eac = rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_A_2, MASKDWORD); + reg_ea4 = rtl_get_bbreg(hw, RRX_POWER_BEFORE_IQK_A_2, MASKDWORD); + + /*PA/PAD controlled by 0x0*/ + /*leave IQK mode*/ + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x00000000); + rtl_set_rfreg(hw, RF90_PATH_A, 0xdf, RFREG_OFFSET_MASK, 0x180); + /*if Tx is OK, check whether Rx is OK*/ + if (!(reg_eac & BIT(27)) && + (((reg_ea4 & 0x03FF0000) >> 16) != 0x132) && + (((reg_eac & 0x03FF0000) >> 16) != 0x36)) + result |= 0x02; + + return result; +} + +static u8 _rtl92ee_phy_path_b_rx_iqk(struct ieee80211_hw *hw, bool config_pathb) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 reg_eac, reg_eb4, reg_ebc, reg_ecc, reg_ec4, u32temp; + u8 result = 0x00; + + /*Get TXIMR Setting*/ + /*Modify RX IQK mode table*/ + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x00000000); + + rtl_set_rfreg(hw, RF90_PATH_B, RF_WE_LUT, RFREG_OFFSET_MASK, 0x800a0); + rtl_set_rfreg(hw, RF90_PATH_B, RF_RCK_OS, RFREG_OFFSET_MASK, 0x30000); + rtl_set_rfreg(hw, RF90_PATH_B, RF_TXPA_G1, RFREG_OFFSET_MASK, 0x0000f); + rtl_set_rfreg(hw, RF90_PATH_B, RF_TXPA_G2, RFREG_OFFSET_MASK, 0xf117b); + + /*PA/PAD all off*/ + rtl_set_rfreg(hw, RF90_PATH_B, 0xdf, RFREG_OFFSET_MASK, 0x980); + rtl_set_rfreg(hw, RF90_PATH_B, 0x56, RFREG_OFFSET_MASK, 0x51000); + + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x80800000); + + /*IQK Setting*/ + rtl_set_bbreg(hw, RTX_IQK, MASKDWORD, 0x01007c00); + rtl_set_bbreg(hw, RRX_IQK, MASKDWORD, 0x01004800); + + /*path a IQK setting*/ + rtl_set_bbreg(hw, RTX_IQK_TONE_A, MASKDWORD, 0x38008c1c); + rtl_set_bbreg(hw, RRX_IQK_TONE_A, MASKDWORD, 0x38008c1c); + rtl_set_bbreg(hw, RTX_IQK_TONE_B, MASKDWORD, 0x18008c1c); + rtl_set_bbreg(hw, RRX_IQK_TONE_B, MASKDWORD, 0x38008c1c); + + rtl_set_bbreg(hw, RTX_IQK_PI_B, MASKDWORD, 0x82160c1f); + rtl_set_bbreg(hw, RRX_IQK_PI_B, MASKDWORD, 0x68160c1f); + + /*LO calibration Setting*/ + rtl_set_bbreg(hw, RIQK_AGC_RSP, MASKDWORD, 0x0046a911); + + /*one shot,path A LOK & iqk*/ + rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xfa000000); + rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xf8000000); + + mdelay(IQK_DELAY_TIME); + + /* Check failed */ + reg_eac = rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_A_2, MASKDWORD); + reg_eb4 = rtl_get_bbreg(hw, RTX_POWER_BEFORE_IQK_B, MASKDWORD); + reg_ebc = rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_B, MASKDWORD); + + if (!(reg_eac & BIT(31)) && + (((reg_eb4 & 0x03FF0000) >> 16) != 0x142) && + (((reg_ebc & 0x03FF0000) >> 16) != 0x42)) { + result |= 0x01; + } else { + /* PA/PAD controlled by 0x0 */ + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x00000000); + rtl_set_rfreg(hw, RF90_PATH_B, 0xdf, RFREG_OFFSET_MASK, 0x180); + return result; + } + + u32temp = 0x80007C00 | (reg_eb4 & 0x3FF0000) | + ((reg_ebc & 0x3FF0000) >> 16); + rtl_set_bbreg(hw, RTX_IQK, MASKDWORD, u32temp); + /*RX IQK*/ + /*Modify RX IQK mode table*/ + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x00000000); + rtl_set_rfreg(hw, RF90_PATH_B, RF_WE_LUT, RFREG_OFFSET_MASK, 0x800a0); + + rtl_set_rfreg(hw, RF90_PATH_B, RF_RCK_OS, RFREG_OFFSET_MASK, 0x30000); + rtl_set_rfreg(hw, RF90_PATH_B, RF_TXPA_G1, RFREG_OFFSET_MASK, 0x0000f); + rtl_set_rfreg(hw, RF90_PATH_B, RF_TXPA_G2, RFREG_OFFSET_MASK, 0xf7ffa); + + /*PA/PAD all off*/ + rtl_set_rfreg(hw, RF90_PATH_B, 0xdf, RFREG_OFFSET_MASK, 0x980); + rtl_set_rfreg(hw, RF90_PATH_B, 0x56, RFREG_OFFSET_MASK, 0x51000); + + /*enter IQK mode*/ + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x80800000); + + /*IQK Setting*/ + rtl_set_bbreg(hw, RRX_IQK, MASKDWORD, 0x01004800); + + /*path b IQK setting*/ + rtl_set_bbreg(hw, RTX_IQK_TONE_A, MASKDWORD, 0x38008c1c); + rtl_set_bbreg(hw, RRX_IQK_TONE_A, MASKDWORD, 0x38008c1c); + rtl_set_bbreg(hw, RTX_IQK_TONE_B, MASKDWORD, 0x38008c1c); + rtl_set_bbreg(hw, RRX_IQK_TONE_B, MASKDWORD, 0x18008c1c); + + rtl_set_bbreg(hw, RTX_IQK_PI_B, MASKDWORD, 0x82160c1f); + rtl_set_bbreg(hw, RRX_IQK_PI_B, MASKDWORD, 0x28160c1f); + + /*LO calibration Setting*/ + rtl_set_bbreg(hw, RIQK_AGC_RSP, MASKDWORD, 0x0046a891); + /*one shot,path A LOK & iqk*/ + rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xfa000000); + rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xf8000000); + + mdelay(IQK_DELAY_TIME); + /*Check failed*/ + reg_eac = rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_A_2, MASKDWORD); + reg_ec4 = rtl_get_bbreg(hw, RRX_POWER_BEFORE_IQK_B_2, MASKDWORD); + reg_ecc = rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_B_2, MASKDWORD); + /*PA/PAD controlled by 0x0*/ + /*leave IQK mode*/ + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x00000000); + rtl_set_rfreg(hw, RF90_PATH_B, 0xdf, RFREG_OFFSET_MASK, 0x180); + /*if Tx is OK, check whether Rx is OK*/ + if (!(reg_eac & BIT(30)) && + (((reg_ec4 & 0x03FF0000) >> 16) != 0x132) && + (((reg_ecc & 0x03FF0000) >> 16) != 0x36)) + result |= 0x02; + else + RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "Path B Rx IQK fail!!\n"); + + return result; +} + +static void _rtl92ee_phy_path_a_fill_iqk_matrix(struct ieee80211_hw *hw, + bool b_iqk_ok, long result[][8], + u8 final_candidate, + bool btxonly) +{ + u32 oldval_0, x, tx0_a, reg; + long y, tx0_c; + + if (final_candidate == 0xFF) { + return; + } else if (b_iqk_ok) { + oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE, + MASKDWORD) >> 22) & 0x3FF; + x = result[final_candidate][0]; + if ((x & 0x00000200) != 0) + x = x | 0xFFFFFC00; + tx0_a = (x * oldval_0) >> 8; + rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x3FF, tx0_a); + rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(31), + ((x * oldval_0 >> 7) & 0x1)); + y = result[final_candidate][1]; + if ((y & 0x00000200) != 0) + y = y | 0xFFFFFC00; + tx0_c = (y * oldval_0) >> 8; + rtl_set_bbreg(hw, ROFDM0_XCTXAFE, 0xF0000000, + ((tx0_c & 0x3C0) >> 6)); + rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x003F0000, + (tx0_c & 0x3F)); + rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(29), + ((y * oldval_0 >> 7) & 0x1)); + + if (btxonly) + return; + + reg = result[final_candidate][2]; + rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0x3FF, reg); + + reg = result[final_candidate][3] & 0x3F; + rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0xFC00, reg); + + reg = (result[final_candidate][3] >> 6) & 0xF; + rtl_set_bbreg(hw, ROFDM0_RXIQEXTANTA, 0xF0000000, reg); + } +} + +static void _rtl92ee_phy_path_b_fill_iqk_matrix(struct ieee80211_hw *hw, + bool b_iqk_ok, long result[][8], + u8 final_candidate, + bool btxonly) +{ + u32 oldval_1, x, tx1_a, reg; + long y, tx1_c; + + if (final_candidate == 0xFF) { + return; + } else if (b_iqk_ok) { + oldval_1 = (rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE, + MASKDWORD) >> 22) & 0x3FF; + x = result[final_candidate][4]; + if ((x & 0x00000200) != 0) + x = x | 0xFFFFFC00; + tx1_a = (x * oldval_1) >> 8; + rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x3FF, tx1_a); + rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(27), + ((x * oldval_1 >> 7) & 0x1)); + y = result[final_candidate][5]; + if ((y & 0x00000200) != 0) + y = y | 0xFFFFFC00; + tx1_c = (y * oldval_1) >> 8; + rtl_set_bbreg(hw, ROFDM0_XDTXAFE, 0xF0000000, + ((tx1_c & 0x3C0) >> 6)); + rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0x003F0000, + (tx1_c & 0x3F)); + rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(25), + ((y * oldval_1 >> 7) & 0x1)); + + if (btxonly) + return; + + reg = result[final_candidate][6]; + rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0x3FF, reg); + + reg = result[final_candidate][7] & 0x3F; + rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0xFC00, reg); + + reg = (result[final_candidate][7] >> 6) & 0xF; + rtl_set_bbreg(hw, ROFDM0_AGCRSSITABLE, 0xF0000000, reg); + } +} + +static void _rtl92ee_phy_save_adda_registers(struct ieee80211_hw *hw, + u32 *addareg, u32 *addabackup, + u32 registernum) +{ + u32 i; + + for (i = 0; i < registernum; i++) + addabackup[i] = rtl_get_bbreg(hw, addareg[i], MASKDWORD); +} + +static void _rtl92ee_phy_save_mac_registers(struct ieee80211_hw *hw, + u32 *macreg, u32 *macbackup) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 i; + + for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++) + macbackup[i] = rtl_read_byte(rtlpriv, macreg[i]); + + macbackup[i] = rtl_read_dword(rtlpriv, macreg[i]); +} + +static void _rtl92ee_phy_reload_adda_registers(struct ieee80211_hw *hw, + u32 *addareg, u32 *addabackup, + u32 regiesternum) +{ + u32 i; + + for (i = 0; i < regiesternum; i++) + rtl_set_bbreg(hw, addareg[i], MASKDWORD, addabackup[i]); +} + +static void _rtl92ee_phy_reload_mac_registers(struct ieee80211_hw *hw, + u32 *macreg, u32 *macbackup) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 i; + + for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++) + rtl_write_byte(rtlpriv, macreg[i], (u8)macbackup[i]); + rtl_write_dword(rtlpriv, macreg[i], macbackup[i]); +} + +static void _rtl92ee_phy_path_adda_on(struct ieee80211_hw *hw, u32 *addareg, + bool is_patha_on, bool is2t) +{ + u32 pathon; + u32 i; + + pathon = is_patha_on ? 0x0fc01616 : 0x0fc01616; + if (!is2t) { + pathon = 0x0fc01616; + rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0fc01616); + } else { + rtl_set_bbreg(hw, addareg[0], MASKDWORD, pathon); + } + + for (i = 1; i < IQK_ADDA_REG_NUM; i++) + rtl_set_bbreg(hw, addareg[i], MASKDWORD, pathon); +} + +static void _rtl92ee_phy_mac_setting_calibration(struct ieee80211_hw *hw, + u32 *macreg, u32 *macbackup) +{ + rtl_set_bbreg(hw, 0x520, 0x00ff0000, 0xff); +} + +static void _rtl92ee_phy_path_a_standby(struct ieee80211_hw *hw) +{ + rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x0); + rtl_set_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK, 0x10000); + rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000); +} + +static bool _rtl92ee_phy_simularity_compare(struct ieee80211_hw *hw, + long result[][8], u8 c1, u8 c2) +{ + u32 i, j, diff, simularity_bitmap, bound; + + u8 final_candidate[2] = { 0xFF, 0xFF }; + bool bresult = true/*, is2t = true*/; + s32 tmp1, tmp2; + + bound = 8; + + simularity_bitmap = 0; + + for (i = 0; i < bound; i++) { + if ((i == 1) || (i == 3) || (i == 5) || (i == 7)) { + if ((result[c1][i] & 0x00000200) != 0) + tmp1 = result[c1][i] | 0xFFFFFC00; + else + tmp1 = result[c1][i]; + + if ((result[c2][i] & 0x00000200) != 0) + tmp2 = result[c2][i] | 0xFFFFFC00; + else + tmp2 = result[c2][i]; + } else { + tmp1 = result[c1][i]; + tmp2 = result[c2][i]; + } + + diff = (tmp1 > tmp2) ? (tmp1 - tmp2) : (tmp2 - tmp1); + + if (diff > MAX_TOLERANCE) { + if ((i == 2 || i == 6) && !simularity_bitmap) { + if (result[c1][i] + result[c1][i + 1] == 0) + final_candidate[(i / 4)] = c2; + else if (result[c2][i] + result[c2][i + 1] == 0) + final_candidate[(i / 4)] = c1; + else + simularity_bitmap |= (1 << i); + } else { + simularity_bitmap |= (1 << i); + } + } + } + + if (simularity_bitmap == 0) { + for (i = 0; i < (bound / 4); i++) { + if (final_candidate[i] != 0xFF) { + for (j = i * 4; j < (i + 1) * 4 - 2; j++) + result[3][j] = + result[final_candidate[i]][j]; + bresult = false; + } + } + return bresult; + } + if (!(simularity_bitmap & 0x03)) {/*path A TX OK*/ + for (i = 0; i < 2; i++) + result[3][i] = result[c1][i]; + } + if (!(simularity_bitmap & 0x0c)) {/*path A RX OK*/ + for (i = 2; i < 4; i++) + result[3][i] = result[c1][i]; + } + if (!(simularity_bitmap & 0x30)) {/*path B TX OK*/ + for (i = 4; i < 6; i++) + result[3][i] = result[c1][i]; + } + if (!(simularity_bitmap & 0xc0)) {/*path B RX OK*/ + for (i = 6; i < 8; i++) + result[3][i] = result[c1][i]; + } + return false; +} + +static void _rtl92ee_phy_iq_calibrate(struct ieee80211_hw *hw, + long result[][8], u8 t, bool is2t) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u32 i; + u8 patha_ok, pathb_ok; + u8 tmp_0xc50 = (u8)rtl_get_bbreg(hw, 0xc50, MASKBYTE0); + u8 tmp_0xc58 = (u8)rtl_get_bbreg(hw, 0xc58, MASKBYTE0); + u32 adda_reg[IQK_ADDA_REG_NUM] = { + 0x85c, 0xe6c, 0xe70, 0xe74, + 0xe78, 0xe7c, 0xe80, 0xe84, + 0xe88, 0xe8c, 0xed0, 0xed4, + 0xed8, 0xedc, 0xee0, 0xeec + }; + u32 iqk_mac_reg[IQK_MAC_REG_NUM] = { + 0x522, 0x550, 0x551, 0x040 + }; + u32 iqk_bb_reg[IQK_BB_REG_NUM] = { + ROFDM0_TRXPATHENABLE, ROFDM0_TRMUXPAR, + RFPGA0_XCD_RFINTERFACESW, 0xb68, 0xb6c, + 0x870, 0x860, + 0x864, 0x800 + }; + const u32 retrycount = 2; + + if (t == 0) { + _rtl92ee_phy_save_adda_registers(hw, adda_reg, + rtlphy->adda_backup, + IQK_ADDA_REG_NUM); + _rtl92ee_phy_save_mac_registers(hw, iqk_mac_reg, + rtlphy->iqk_mac_backup); + _rtl92ee_phy_save_adda_registers(hw, iqk_bb_reg, + rtlphy->iqk_bb_backup, + IQK_BB_REG_NUM); + } + + _rtl92ee_phy_path_adda_on(hw, adda_reg, true, is2t); + + /*BB setting*/ + rtl_set_bbreg(hw, RFPGA0_RFMOD, BIT(24), 0x00); + rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKDWORD, 0x03a05600); + rtl_set_bbreg(hw, ROFDM0_TRMUXPAR, MASKDWORD, 0x000800e4); + rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, MASKDWORD, 0x22208200); + + rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, BIT(10), 0x01); + rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, BIT(26), 0x01); + rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, BIT(10), 0x01); + rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, BIT(10), 0x01); + + _rtl92ee_phy_mac_setting_calibration(hw, iqk_mac_reg, + rtlphy->iqk_mac_backup); + /* Page B init*/ + /* IQ calibration setting*/ + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x80800000); + rtl_set_bbreg(hw, RTX_IQK, MASKDWORD, 0x01007c00); + rtl_set_bbreg(hw, RRX_IQK, MASKDWORD, 0x01004800); + + for (i = 0 ; i < retrycount ; i++) { + patha_ok = _rtl92ee_phy_path_a_iqk(hw, is2t); + + if (patha_ok == 0x01) { + RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, + "Path A Tx IQK Success!!\n"); + result[t][0] = (rtl_get_bbreg(hw, + RTX_POWER_BEFORE_IQK_A, + MASKDWORD) & 0x3FF0000) + >> 16; + result[t][1] = (rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_A, + MASKDWORD) & 0x3FF0000) + >> 16; + break; + } + RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, + "Path A Tx IQK Fail!!, ret = 0x%x\n", + patha_ok); + } + + for (i = 0 ; i < retrycount ; i++) { + patha_ok = _rtl92ee_phy_path_a_rx_iqk(hw, is2t); + + if (patha_ok == 0x03) { + RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, + "Path A Rx IQK Success!!\n"); + result[t][2] = (rtl_get_bbreg(hw, + RRX_POWER_BEFORE_IQK_A_2, + MASKDWORD) & 0x3FF0000) + >> 16; + result[t][3] = (rtl_get_bbreg(hw, + RRX_POWER_AFTER_IQK_A_2, + MASKDWORD) & 0x3FF0000) + >> 16; + break; + } + RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, + "Path A Rx IQK Fail!!, ret = 0x%x\n", + patha_ok); + } + + if (0x00 == patha_ok) + RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, + "Path A IQK failed!!, ret = 0\n"); + if (is2t) { + _rtl92ee_phy_path_a_standby(hw); + /* Turn Path B ADDA on */ + _rtl92ee_phy_path_adda_on(hw, adda_reg, false, is2t); + + /* IQ calibration setting */ + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x80800000); + rtl_set_bbreg(hw, RTX_IQK, MASKDWORD, 0x01007c00); + rtl_set_bbreg(hw, RRX_IQK, MASKDWORD, 0x01004800); + + for (i = 0 ; i < retrycount ; i++) { + pathb_ok = _rtl92ee_phy_path_b_iqk(hw); + if (pathb_ok == 0x01) { + RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, + "Path B Tx IQK Success!!\n"); + result[t][4] = (rtl_get_bbreg(hw, + RTX_POWER_BEFORE_IQK_B, + MASKDWORD) & 0x3FF0000) + >> 16; + result[t][5] = (rtl_get_bbreg(hw, + RTX_POWER_AFTER_IQK_B, + MASKDWORD) & 0x3FF0000) + >> 16; + break; + } + RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, + "Path B Tx IQK Fail!!, ret = 0x%x\n", + pathb_ok); + } + + for (i = 0 ; i < retrycount ; i++) { + pathb_ok = _rtl92ee_phy_path_b_rx_iqk(hw, is2t); + if (pathb_ok == 0x03) { + RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, + "Path B Rx IQK Success!!\n"); + result[t][6] = (rtl_get_bbreg(hw, + RRX_POWER_BEFORE_IQK_B_2, + MASKDWORD) & 0x3FF0000) + >> 16; + result[t][7] = (rtl_get_bbreg(hw, + RRX_POWER_AFTER_IQK_B_2, + MASKDWORD) & 0x3FF0000) + >> 16; + break; + } + RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, + "Path B Rx IQK Fail!!, ret = 0x%x\n", + pathb_ok); + } + + if (0x00 == pathb_ok) + RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, + "Path B IQK failed!!, ret = 0\n"); + } + /* Back to BB mode, load original value */ + RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, + "IQK:Back to BB mode, load original value!\n"); + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0); + + if (t != 0) { + /* Reload ADDA power saving parameters */ + _rtl92ee_phy_reload_adda_registers(hw, adda_reg, + rtlphy->adda_backup, + IQK_ADDA_REG_NUM); + + /* Reload MAC parameters */ + _rtl92ee_phy_reload_mac_registers(hw, iqk_mac_reg, + rtlphy->iqk_mac_backup); + + _rtl92ee_phy_reload_adda_registers(hw, iqk_bb_reg, + rtlphy->iqk_bb_backup, + IQK_BB_REG_NUM); + + /* Restore RX initial gain */ + rtl_set_bbreg(hw, 0xc50, MASKBYTE0, 0x50); + rtl_set_bbreg(hw, 0xc50, MASKBYTE0, tmp_0xc50); + if (is2t) { + rtl_set_bbreg(hw, 0xc50, MASKBYTE0, 0x50); + rtl_set_bbreg(hw, 0xc58, MASKBYTE0, tmp_0xc58); + } + + /* load 0xe30 IQC default value */ + rtl_set_bbreg(hw, RTX_IQK_TONE_A, MASKDWORD, 0x01008c00); + rtl_set_bbreg(hw, RRX_IQK_TONE_A, MASKDWORD, 0x01008c00); + } +} + +static void _rtl92ee_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t) +{ + u8 tmpreg; + u32 rf_a_mode = 0, rf_b_mode = 0, lc_cal; + struct rtl_priv *rtlpriv = rtl_priv(hw); + + tmpreg = rtl_read_byte(rtlpriv, 0xd03); + + if ((tmpreg & 0x70) != 0) + rtl_write_byte(rtlpriv, 0xd03, tmpreg & 0x8F); + else + rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF); + + if ((tmpreg & 0x70) != 0) { + rf_a_mode = rtl_get_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS); + + if (is2t) + rf_b_mode = rtl_get_rfreg(hw, RF90_PATH_B, 0x00, + MASK12BITS); + + rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS, + (rf_a_mode & 0x8FFFF) | 0x10000); + + if (is2t) + rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASK12BITS, + (rf_b_mode & 0x8FFFF) | 0x10000); + } + lc_cal = rtl_get_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS); + + rtl_set_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS, lc_cal | 0x08000); + + mdelay(100); + + if ((tmpreg & 0x70) != 0) { + rtl_write_byte(rtlpriv, 0xd03, tmpreg); + rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS, rf_a_mode); + + if (is2t) + rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASK12BITS, + rf_b_mode); + } else { + rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00); + } +} + +static void _rtl92ee_phy_set_rfpath_switch(struct ieee80211_hw *hw, + bool bmain, bool is2t) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + + RT_TRACE(rtlpriv, COMP_INIT , DBG_LOUD , "\n"); + + if (is_hal_stop(rtlhal)) { + u8 u1btmp; + + u1btmp = rtl_read_byte(rtlpriv, REG_LEDCFG0); + rtl_write_byte(rtlpriv, REG_LEDCFG0, u1btmp | BIT(7)); + rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(13), 0x01); + } + if (is2t) { + if (bmain) + rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, + BIT(5) | BIT(6), 0x1); + else + rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, + BIT(5) | BIT(6), 0x2); + } else { + rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, BIT(8) | BIT(9), 0); + rtl_set_bbreg(hw, 0x914, MASKLWORD, 0x0201); + + /* We use the RF definition of MAIN and AUX, + * left antenna and right antenna repectively. + * Default output at AUX. + */ + if (bmain) { + rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, + BIT(14) | BIT(13) | BIT(12), 0); + rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, + BIT(5) | BIT(4) | BIT(3), 0); + if (rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV) + rtl_set_bbreg(hw, RCONFIG_RAM64x16, BIT(31), 0); + } else { + rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, + BIT(14) | BIT(13) | BIT(12), 1); + rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, + BIT(5) | BIT(4) | BIT(3), 1); + if (rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV) + rtl_set_bbreg(hw, RCONFIG_RAM64x16, BIT(31), 1); + } + } +} + +#undef IQK_ADDA_REG_NUM +#undef IQK_DELAY_TIME + +static u8 rtl92ee_get_rightchnlplace_for_iqk(u8 chnl) +{ + u8 channel_all[59] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, + 60, 62, 64, 100, 102, 104, 106, 108, 110, 112, + 114, 116, 118, 120, 122, 124, 126, 128, 130, + 132, 134, 136, 138, 140, 149, 151, 153, 155, + 157, 159, 161, 163, 165 + }; + u8 place = chnl; + + if (chnl > 14) { + for (place = 14; place < sizeof(channel_all); place++) { + if (channel_all[place] == chnl) + return place - 13; + } + } + + return 0; +} + +void rtl92ee_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + long result[4][8]; + u8 i, final_candidate; + bool b_patha_ok, b_pathb_ok; + long reg_e94, reg_e9c, reg_ea4, reg_eac; + long reg_eb4, reg_ebc, reg_ec4, reg_ecc; + bool is12simular, is13simular, is23simular; + u8 idx; + u32 iqk_bb_reg[IQK_BB_REG_NUM] = { + ROFDM0_XARXIQIMBALANCE, + ROFDM0_XBRXIQIMBALANCE, + ROFDM0_ECCATHRESHOLD, + ROFDM0_AGCRSSITABLE, + ROFDM0_XATXIQIMBALANCE, + ROFDM0_XBTXIQIMBALANCE, + ROFDM0_XCTXAFE, + ROFDM0_XDTXAFE, + ROFDM0_RXIQEXTANTA + }; + + if (b_recovery) { + _rtl92ee_phy_reload_adda_registers(hw, iqk_bb_reg, + rtlphy->iqk_bb_backup, 9); + return; + } + + for (i = 0; i < 8; i++) { + result[0][i] = 0; + result[1][i] = 0; + result[2][i] = 0; + + if ((i == 0) || (i == 2) || (i == 4) || (i == 6)) + result[3][i] = 0x100; + else + result[3][i] = 0; + } + final_candidate = 0xff; + b_patha_ok = false; + b_pathb_ok = false; + is12simular = false; + is23simular = false; + is13simular = false; + for (i = 0; i < 3; i++) { + _rtl92ee_phy_iq_calibrate(hw, result, i, true); + if (i == 1) { + is12simular = _rtl92ee_phy_simularity_compare(hw, + result, + 0, 1); + if (is12simular) { + final_candidate = 0; + break; + } + } + + if (i == 2) { + is13simular = _rtl92ee_phy_simularity_compare(hw, + result, + 0, 2); + if (is13simular) { + final_candidate = 0; + break; + } + is23simular = _rtl92ee_phy_simularity_compare(hw, + result, + 1, 2); + if (is23simular) + final_candidate = 1; + else + final_candidate = 3; + } + } + + for (i = 0; i < 4; i++) { + reg_e94 = result[i][0]; + reg_e9c = result[i][1]; + reg_ea4 = result[i][2]; + reg_eac = result[i][3]; + reg_eb4 = result[i][4]; + reg_ebc = result[i][5]; + reg_ec4 = result[i][6]; + reg_ecc = result[i][7]; + } + + if (final_candidate != 0xff) { + reg_e94 = result[final_candidate][0]; + rtlphy->reg_e94 = reg_e94; + reg_e9c = result[final_candidate][1]; + rtlphy->reg_e9c = reg_e9c; + reg_ea4 = result[final_candidate][2]; + reg_eac = result[final_candidate][3]; + reg_eb4 = result[final_candidate][4]; + rtlphy->reg_eb4 = reg_eb4; + reg_ebc = result[final_candidate][5]; + rtlphy->reg_ebc = reg_ebc; + reg_ec4 = result[final_candidate][6]; + reg_ecc = result[final_candidate][7]; + b_patha_ok = true; + b_pathb_ok = true; + } else { + rtlphy->reg_e94 = 0x100; + rtlphy->reg_eb4 = 0x100; + rtlphy->reg_e9c = 0x0; + rtlphy->reg_ebc = 0x0; + } + + if (reg_e94 != 0) + _rtl92ee_phy_path_a_fill_iqk_matrix(hw, b_patha_ok, result, + final_candidate, + (reg_ea4 == 0)); + + _rtl92ee_phy_path_b_fill_iqk_matrix(hw, b_pathb_ok, result, + final_candidate, + (reg_ec4 == 0)); + + idx = rtl92ee_get_rightchnlplace_for_iqk(rtlphy->current_channel); + + /* To Fix BSOD when final_candidate is 0xff */ + if (final_candidate < 4) { + for (i = 0; i < IQK_MATRIX_REG_NUM; i++) + rtlphy->iqk_matrix[idx].value[0][i] = + result[final_candidate][i]; + + rtlphy->iqk_matrix[idx].iqk_done = true; + } + _rtl92ee_phy_save_adda_registers(hw, iqk_bb_reg, + rtlphy->iqk_bb_backup, 9); +} + +void rtl92ee_phy_lc_calibrate(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct rtl_hal *rtlhal = &rtlpriv->rtlhal; + u32 timeout = 2000, timecount = 0; + + while (rtlpriv->mac80211.act_scanning && timecount < timeout) { + udelay(50); + timecount += 50; + } + + rtlphy->lck_inprogress = true; + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "LCK:Start!!! currentband %x delay %d ms\n", + rtlhal->current_bandtype, timecount); + + _rtl92ee_phy_lc_calibrate(hw, false); + + rtlphy->lck_inprogress = false; +} + +void rtl92ee_phy_ap_calibrate(struct ieee80211_hw *hw, char delta) +{ +} + +void rtl92ee_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain) +{ + _rtl92ee_phy_set_rfpath_switch(hw, bmain, false); +} + +bool rtl92ee_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + bool postprocessing = false; + + RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, + "-->IO Cmd(%#x), set_io_inprogress(%d)\n", + iotype, rtlphy->set_io_inprogress); + do { + switch (iotype) { + case IO_CMD_RESUME_DM_BY_SCAN: + RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, + "[IO CMD] Resume DM after scan.\n"); + postprocessing = true; + break; + case IO_CMD_PAUSE_BAND0_DM_BY_SCAN: + RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, + "[IO CMD] Pause DM before scan.\n"); + postprocessing = true; + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, + "switch case not process\n"); + break; + } + } while (false); + if (postprocessing && !rtlphy->set_io_inprogress) { + rtlphy->set_io_inprogress = true; + rtlphy->current_io_type = iotype; + } else { + return false; + } + rtl92ee_phy_set_io(hw); + RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "IO Type(%#x)\n", iotype); + return true; +} + +static void rtl92ee_phy_set_io(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct dig_t *dm_dig = &rtlpriv->dm_digtable; + + RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, + "--->Cmd(%#x), set_io_inprogress(%d)\n", + rtlphy->current_io_type, rtlphy->set_io_inprogress); + switch (rtlphy->current_io_type) { + case IO_CMD_RESUME_DM_BY_SCAN: + rtl92ee_dm_write_dig(hw, rtlphy->initgain_backup.xaagccore1); + rtl92ee_dm_write_cck_cca_thres(hw, rtlphy->initgain_backup.cca); + RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE , "no set txpower\n"); + rtl92ee_phy_set_txpower_level(hw, rtlphy->current_channel); + break; + case IO_CMD_PAUSE_BAND0_DM_BY_SCAN: + /* 8192eebt */ + rtlphy->initgain_backup.xaagccore1 = dm_dig->cur_igvalue; + rtl92ee_dm_write_dig(hw, 0x17); + rtlphy->initgain_backup.cca = dm_dig->cur_cck_cca_thres; + rtl92ee_dm_write_cck_cca_thres(hw, 0x40); + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, + "switch case not process\n"); + break; + } + rtlphy->set_io_inprogress = false; + RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, + "(%#x)\n", rtlphy->current_io_type); +} + +static void rtl92ee_phy_set_rf_on(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3); + /*rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);*/ + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3); + rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00); +} + +static void _rtl92ee_phy_set_rf_sleep(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF); + rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00); + + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2); + rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x22); +} + +static bool _rtl92ee_phy_set_rf_power_state(struct ieee80211_hw *hw, + enum rf_pwrstate rfpwr_state) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + bool bresult = true; + u8 i, queue_id; + struct rtl8192_tx_ring *ring = NULL; + + switch (rfpwr_state) { + case ERFON: + if ((ppsc->rfpwr_state == ERFOFF) && + RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) { + bool rtstatus; + u32 initializecount = 0; + + do { + initializecount++; + RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, + "IPS Set eRf nic enable\n"); + rtstatus = rtl_ps_enable_nic(hw); + } while (!rtstatus && (initializecount < 10)); + RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); + } else { + RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, + "Set ERFON sleeping:%d ms\n", + jiffies_to_msecs(jiffies - + ppsc->last_sleep_jiffies)); + ppsc->last_awake_jiffies = jiffies; + rtl92ee_phy_set_rf_on(hw); + } + if (mac->link_state == MAC80211_LINKED) + rtlpriv->cfg->ops->led_control(hw, LED_CTL_LINK); + else + rtlpriv->cfg->ops->led_control(hw, LED_CTL_NO_LINK); + break; + case ERFOFF: + for (queue_id = 0, i = 0; + queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) { + ring = &pcipriv->dev.tx_ring[queue_id]; + if (queue_id == BEACON_QUEUE || + skb_queue_len(&ring->queue) == 0) { + queue_id++; + continue; + } else { + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n", + (i + 1), queue_id, + skb_queue_len(&ring->queue)); + + udelay(10); + i++; + } + if (i >= MAX_DOZE_WAITING_TIMES_9x) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + "\n ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n", + MAX_DOZE_WAITING_TIMES_9x, + queue_id, + skb_queue_len(&ring->queue)); + break; + } + } + + if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) { + RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, + "IPS Set eRf nic disable\n"); + rtl_ps_disable_nic(hw); + RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); + } else { + if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS) { + rtlpriv->cfg->ops->led_control(hw, + LED_CTL_NO_LINK); + } else { + rtlpriv->cfg->ops->led_control(hw, + LED_CTL_POWER_OFF); + } + } + break; + case ERFSLEEP: + if (ppsc->rfpwr_state == ERFOFF) + break; + for (queue_id = 0, i = 0; + queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) { + ring = &pcipriv->dev.tx_ring[queue_id]; + if (skb_queue_len(&ring->queue) == 0) { + queue_id++; + continue; + } else { + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n", + (i + 1), queue_id, + skb_queue_len(&ring->queue)); + udelay(10); + i++; + } + if (i >= MAX_DOZE_WAITING_TIMES_9x) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + "\n ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n", + MAX_DOZE_WAITING_TIMES_9x, + queue_id, + skb_queue_len(&ring->queue)); + break; + } + } + RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, + "Set ERFSLEEP awaked:%d ms\n", + jiffies_to_msecs(jiffies - + ppsc->last_awake_jiffies)); + ppsc->last_sleep_jiffies = jiffies; + _rtl92ee_phy_set_rf_sleep(hw); + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, + "switch case not process\n"); + bresult = false; + break; + } + if (bresult) + ppsc->rfpwr_state = rfpwr_state; + return bresult; +} + +bool rtl92ee_phy_set_rf_power_state(struct ieee80211_hw *hw, + enum rf_pwrstate rfpwr_state) +{ + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + + bool bresult = false; + + if (rfpwr_state == ppsc->rfpwr_state) + return bresult; + bresult = _rtl92ee_phy_set_rf_power_state(hw, rfpwr_state); + return bresult; +} diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/phy.h b/drivers/net/wireless/rtlwifi/rtl8192ee/phy.h new file mode 100644 index 0000000000000000000000000000000000000000..c6e97c8df54c1528fd63bc7458f1ae270467c208 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192ee/phy.h @@ -0,0 +1,153 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL92E_PHY_H__ +#define __RTL92E_PHY_H__ + +/* MAX_TX_COUNT must always set to 4, otherwise read efuse table sequence + * will be wrong. + */ +#define MAX_TX_COUNT 4 +#define TX_1S 0 +#define TX_2S 1 +#define TX_3S 2 +#define TX_4S 3 + +#define MAX_POWER_INDEX 0x3f + +#define MAX_PRECMD_CNT 16 +#define MAX_RFDEPENDCMD_CNT 16 +#define MAX_POSTCMD_CNT 16 + +#define MAX_DOZE_WAITING_TIMES_9x 64 + +#define RT_CANNOT_IO(hw) false +#define HIGHPOWER_RADIOA_ARRAYLEN 22 + +#define IQK_ADDA_REG_NUM 16 +#define IQK_MAC_REG_NUM 4 +#define IQK_BB_REG_NUM 9 +#define MAX_TOLERANCE 5 +#define IQK_DELAY_TIME 10 +#define index_mapping_NUM 15 + +#define APK_BB_REG_NUM 5 +#define APK_AFE_REG_NUM 16 +#define APK_CURVE_REG_NUM 4 +#define PATH_NUM 2 + +#define LOOP_LIMIT 5 +#define MAX_STALL_TIME 50 +#define ANTENNADIVERSITYVALUE 0x80 +#define MAX_TXPWR_IDX_NMODE_92S 63 +#define RESET_CNT_LIMIT 3 + +#define RF6052_MAX_PATH 2 + +#define CT_OFFSET_MAC_ADDR 0X16 + +#define CT_OFFSET_CCK_TX_PWR_IDX 0x5A +#define CT_OFFSET_HT401S_TX_PWR_IDX 0x60 +#define CT_OFFSET_HT402S_TX_PWR_IDX_DIFF 0x66 +#define CT_OFFSET_HT20_TX_PWR_IDX_DIFF 0x69 +#define CT_OFFSET_OFDM_TX_PWR_IDX_DIFF 0x6C + +#define CT_OFFSET_HT40_MAX_PWR_OFFSET 0x6F +#define CT_OFFSET_HT20_MAX_PWR_OFFSET 0x72 + +#define CT_OFFSET_CHANNEL_PLAH 0x75 +#define CT_OFFSET_THERMAL_METER 0x78 +#define CT_OFFSET_RF_OPTION 0x79 +#define CT_OFFSET_VERSION 0x7E +#define CT_OFFSET_CUSTOMER_ID 0x7F + +#define RTL92C_MAX_PATH_NUM 2 + +enum swchnlcmd_id { + CMDID_END, + CMDID_SET_TXPOWEROWER_LEVEL, + CMDID_BBREGWRITE10, + CMDID_WRITEPORT_ULONG, + CMDID_WRITEPORT_USHORT, + CMDID_WRITEPORT_UCHAR, + CMDID_RF_WRITEREG, +}; + +struct swchnlcmd { + enum swchnlcmd_id cmdid; + u32 para1; + u32 para2; + u32 msdelay; +}; + +enum baseband_config_type { + BASEBAND_CONFIG_PHY_REG = 0, + BASEBAND_CONFIG_AGC_TAB = 1, +}; + +enum ant_div_type { + NO_ANTDIV = 0xFF, + CG_TRX_HW_ANTDIV = 0x01, + CGCS_RX_HW_ANTDIV = 0x02, + FIXED_HW_ANTDIV = 0x03, + CG_TRX_SMART_ANTDIV = 0x04, + CGCS_RX_SW_ANTDIV = 0x05, +}; + +u32 rtl92ee_phy_query_bb_reg(struct ieee80211_hw *hw, + u32 regaddr, u32 bitmask); +void rtl92ee_phy_set_bb_reg(struct ieee80211_hw *hw, + u32 regaddr, u32 bitmask, u32 data); +u32 rtl92ee_phy_query_rf_reg(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 regaddr, + u32 bitmask); +void rtl92ee_phy_set_rf_reg(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 regaddr, + u32 bitmask, u32 data); +bool rtl92ee_phy_mac_config(struct ieee80211_hw *hw); +bool rtl92ee_phy_bb_config(struct ieee80211_hw *hw); +bool rtl92ee_phy_rf_config(struct ieee80211_hw *hw); +void rtl92ee_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw); +void rtl92ee_phy_get_txpower_level(struct ieee80211_hw *hw, + long *powerlevel); +void rtl92ee_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel); +void rtl92ee_phy_scan_operation_backup(struct ieee80211_hw *hw, + u8 operation); +void rtl92ee_phy_set_bw_mode_callback(struct ieee80211_hw *hw); +void rtl92ee_phy_set_bw_mode(struct ieee80211_hw *hw, + enum nl80211_channel_type ch_type); +void rtl92ee_phy_sw_chnl_callback(struct ieee80211_hw *hw); +u8 rtl92ee_phy_sw_chnl(struct ieee80211_hw *hw); +void rtl92ee_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery); +void rtl92ee_phy_ap_calibrate(struct ieee80211_hw *hw, char delta); +void rtl92ee_phy_lc_calibrate(struct ieee80211_hw *hw); +void rtl92ee_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain); +bool rtl92ee_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, + enum radio_path rfpath); +bool rtl92ee_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype); +bool rtl92ee_phy_set_rf_power_state(struct ieee80211_hw *hw, + enum rf_pwrstate rfpwr_state); + +#endif diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/pwrseq.c b/drivers/net/wireless/rtlwifi/rtl8192ee/pwrseq.c new file mode 100644 index 0000000000000000000000000000000000000000..1a701d007f0c8e8528f2e1ef2ca84351dbc34a61 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192ee/pwrseq.c @@ -0,0 +1,112 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "pwrseq.h" + +/* drivers should parse below arrays and do the corresponding actions */ + +/*3 Power on Array*/ +struct wlan_pwr_cfg rtl8192E_power_on_flow + [RTL8192E_TRANS_CARDEMU_TO_ACT_STEPS + + RTL8192E_TRANS_END_STEPS] = { + RTL8192E_TRANS_CARDEMU_TO_ACT + RTL8192E_TRANS_END +}; + +/*3Radio off GPIO Array */ +struct wlan_pwr_cfg rtl8192E_radio_off_flow + [RTL8192E_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8192E_TRANS_END_STEPS] = { + RTL8192E_TRANS_ACT_TO_CARDEMU + RTL8192E_TRANS_END +}; + +/*3Card Disable Array*/ +struct wlan_pwr_cfg rtl8192E_card_disable_flow + [RTL8192E_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8192E_TRANS_CARDEMU_TO_PDN_STEPS + + RTL8192E_TRANS_END_STEPS] = { + RTL8192E_TRANS_ACT_TO_CARDEMU + RTL8192E_TRANS_CARDEMU_TO_CARDDIS + RTL8192E_TRANS_END +}; + +/*3 Card Enable Array*/ +struct wlan_pwr_cfg rtl8192E_card_enable_flow + [RTL8192E_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8192E_TRANS_CARDEMU_TO_PDN_STEPS + + RTL8192E_TRANS_END_STEPS] = { + RTL8192E_TRANS_CARDDIS_TO_CARDEMU + RTL8192E_TRANS_CARDEMU_TO_ACT + RTL8192E_TRANS_END +}; + +/*3Suspend Array*/ +struct wlan_pwr_cfg rtl8192E_suspend_flow + [RTL8192E_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8192E_TRANS_CARDEMU_TO_SUS_STEPS + + RTL8192E_TRANS_END_STEPS] = { + RTL8192E_TRANS_ACT_TO_CARDEMU + RTL8192E_TRANS_CARDEMU_TO_SUS + RTL8192E_TRANS_END +}; + +/*3 Resume Array*/ +struct wlan_pwr_cfg rtl8192E_resume_flow + [RTL8192E_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8192E_TRANS_CARDEMU_TO_SUS_STEPS + + RTL8192E_TRANS_END_STEPS] = { + RTL8192E_TRANS_SUS_TO_CARDEMU + RTL8192E_TRANS_CARDEMU_TO_ACT + RTL8192E_TRANS_END +}; + +/*3HWPDN Array*/ +struct wlan_pwr_cfg rtl8192E_hwpdn_flow + [RTL8192E_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8192E_TRANS_CARDEMU_TO_PDN_STEPS + + RTL8192E_TRANS_END_STEPS] = { + RTL8192E_TRANS_ACT_TO_CARDEMU + RTL8192E_TRANS_CARDEMU_TO_PDN + RTL8192E_TRANS_END +}; + +/*3 Enter LPS */ +struct wlan_pwr_cfg rtl8192E_enter_lps_flow + [RTL8192E_TRANS_ACT_TO_LPS_STEPS + + RTL8192E_TRANS_END_STEPS] = { + /*FW behavior*/ + RTL8192E_TRANS_ACT_TO_LPS + RTL8192E_TRANS_END +}; + +/*3 Leave LPS */ +struct wlan_pwr_cfg rtl8192E_leave_lps_flow + [RTL8192E_TRANS_LPS_TO_ACT_STEPS + + RTL8192E_TRANS_END_STEPS] = { + /*FW behavior*/ + RTL8192E_TRANS_LPS_TO_ACT + RTL8192E_TRANS_END +}; diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/pwrseq.h b/drivers/net/wireless/rtlwifi/rtl8192ee/pwrseq.h new file mode 100644 index 0000000000000000000000000000000000000000..781eeaa6af4953f9a9baa19ee4e5ae02c97358f7 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192ee/pwrseq.h @@ -0,0 +1,340 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL92E_PWRSEQ_H__ +#define __RTL92E_PWRSEQ_H__ + +#include "../pwrseqcmd.h" +/** + * Check document WM-20110607-Paul-RTL8192E_Power_Architecture-R02.vsd + * There are 6 HW Power States: + * 0: POFF--Power Off + * 1: PDN--Power Down + * 2: CARDEMU--Card Emulation + * 3: ACT--Active Mode + * 4: LPS--Low Power State + * 5: SUS--Suspend + * + * The transision from different states are defined below + * TRANS_CARDEMU_TO_ACT + * TRANS_ACT_TO_CARDEMU + * TRANS_CARDEMU_TO_SUS + * TRANS_SUS_TO_CARDEMU + * TRANS_CARDEMU_TO_PDN + * TRANS_ACT_TO_LPS + * TRANS_LPS_TO_ACT + * + * TRANS_END + * PWR SEQ Version: rtl8192E_PwrSeq_V09.h + */ + +#define RTL8192E_TRANS_CARDEMU_TO_ACT_STEPS 18 +#define RTL8192E_TRANS_ACT_TO_CARDEMU_STEPS 18 +#define RTL8192E_TRANS_CARDEMU_TO_SUS_STEPS 18 +#define RTL8192E_TRANS_SUS_TO_CARDEMU_STEPS 18 +#define RTL8192E_TRANS_CARDEMU_TO_PDN_STEPS 18 +#define RTL8192E_TRANS_PDN_TO_CARDEMU_STEPS 18 +#define RTL8192E_TRANS_ACT_TO_LPS_STEPS 23 +#define RTL8192E_TRANS_LPS_TO_ACT_STEPS 23 +#define RTL8192E_TRANS_END_STEPS 1 + +#define RTL8192E_TRANS_CARDEMU_TO_ACT \ + /* format */ \ + /* comments here */ \ + /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value },*/\ + /* disable HWPDN 0x04[15]=0*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_WRITE, BIT(7), 0}, \ + /* disable SW LPS 0x04[10]=0*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_WRITE, BIT(2), 0}, \ + /* disable WL suspend*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_WRITE, (BIT(4)|BIT(3)), 0}, \ + /* wait till 0x04[17] = 1 power ready*/ \ + {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_POLLING, BIT(1), BIT(1)}, \ + /* release WLON reset 0x04[16]=1*/ \ + {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_WRITE, BIT(0), BIT(0)}, \ + /* polling until return 0*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_WRITE, BIT(0), BIT(0)}, \ + /**/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_POLLING, BIT(0), 0}, + +#define RTL8192E_TRANS_ACT_TO_CARDEMU \ + /* format */ \ + /* comments here */ \ + /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value },*/\ + /*0x1F[7:0] = 0 turn off RF*/ \ + {0x001F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_WRITE, 0xFF, 0}, \ + /*0x4C[23]=0x4E[7]=0, switch DPDT_SEL_P output from register 0x65[2] */\ + {0x004E, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_WRITE, BIT(7), 0}, \ + /*0x04[9] = 1 turn off MAC by HW state machine*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_WRITE, BIT(1), BIT(1)}, \ + /*wait till 0x04[9] = 0 polling until return 0 to disable*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_POLLING, BIT(1), 0}, + +#define RTL8192E_TRANS_CARDEMU_TO_SUS \ + /* format */ \ + /* comments here */ \ + /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value },*/\ + /*0x04[12:11] = 2b'11 enable WL suspend for PCIe*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_WRITE, BIT(4) | BIT(3), (BIT(4) | BIT(3))},\ + /*0x04[12:11] = 2b'01 enable WL suspend*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, \ + PWR_CMD_WRITE, BIT(3)|BIT(4), BIT(3)}, \ + /*0x04[12:11] = 2b'11 enable WL suspend for PCIe*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3) | BIT(4)},\ + /*Set SDIO suspend local register*/ \ + {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ + PWR_BASEADDR_SDIO , PWR_CMD_WRITE, BIT(0), BIT(0)}, \ + /*wait power state to suspend*/ \ + {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ + PWR_BASEADDR_SDIO , PWR_CMD_POLLING, BIT(1), 0}, + +#define RTL8192E_TRANS_SUS_TO_CARDEMU \ + /* format */ \ + /* comments here */ \ + /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value },*/\ + /*Set SDIO suspend local register*/ \ + {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ + PWR_BASEADDR_SDIO , PWR_CMD_WRITE, BIT(0), 0}, \ + /*wait power state to suspend*/ \ + {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ + PWR_BASEADDR_SDIO , PWR_CMD_POLLING, BIT(1), BIT(1)}, \ + /*0x04[12:11] = 2b'01enable WL suspend*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_WRITE, BIT(3) | BIT(4), 0}, + +#define RTL8192E_TRANS_CARDEMU_TO_CARDDIS \ + /* format */ \ + /* comments here */ \ + /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value },*/\ + /*0x07=0x20 , SOP option to disable BG/MB*/ \ + {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_WRITE, 0xFF, 0x20}, \ + /*Unlock small LDO Register*/ \ + {0x00CC, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_WRITE, BIT(2), BIT(2)}, \ + /*Disable small LDO*/ \ + {0x0011, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_WRITE, BIT(0), 0}, \ + /*0x04[12:11] = 2b'01 enable WL suspend*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, \ + PWR_CMD_WRITE, BIT(3)|BIT(4), BIT(3)}, \ + /*0x04[10] = 1, enable SW LPS*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_WRITE, BIT(2), BIT(2)}, \ + /*Set SDIO suspend local register*/ \ + {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ + PWR_BASEADDR_SDIO , PWR_CMD_WRITE, BIT(0), BIT(0)}, \ + /*wait power state to suspend*/ \ + {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ + PWR_BASEADDR_SDIO , PWR_CMD_POLLING, BIT(1), 0}, + +#define RTL8192E_TRANS_CARDDIS_TO_CARDEMU \ + /* format */ \ + /* comments here */ \ + /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value },*/\ + /*Set SDIO suspend local register*/ \ + {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ + PWR_BASEADDR_SDIO , PWR_CMD_WRITE, BIT(0), 0}, \ + /*wait power state to suspend*/ \ + {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ + PWR_BASEADDR_SDIO , PWR_CMD_POLLING, BIT(1), BIT(1)}, \ + /*Enable small LDO*/ \ + {0x0011, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_WRITE, BIT(0), BIT(0)}, \ + /*Lock small LDO Register*/ \ + {0x00CC, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_WRITE, BIT(2), 0}, \ + /*0x04[12:11] = 2b'01enable WL suspend*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_WRITE, BIT(3) | BIT(4), 0}, + +#define RTL8192E_TRANS_CARDEMU_TO_PDN \ + /* format */ \ + /* comments here */ \ + /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value },*/\ + /* 0x04[16] = 0*/ \ + {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_WRITE, BIT(0), 0}, \ + /* 0x04[15] = 1*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_WRITE, BIT(7), BIT(7)}, + +#define RTL8192E_TRANS_PDN_TO_CARDEMU \ + /* format */ \ + /* comments here */ \ + /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value },*/\ + /* 0x04[15] = 0*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_WRITE, BIT(7), 0}, + +#define RTL8192E_TRANS_ACT_TO_LPS \ + /* format */ \ + /* comments here */ \ + /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value },*/\ + /*PCIe DMA stop*/ \ + {0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_WRITE, 0xFF, 0xFF}, \ + /*Tx Pause*/ \ + {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_WRITE, 0xFF, 0xFF}, \ + /*Should be zero if no packet is transmitting*/ \ + {0x05F8, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_POLLING, 0xFF, 0}, \ + /*Should be zero if no packet is transmitting*/ \ + {0x05F9, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_POLLING, 0xFF, 0}, \ + /*Should be zero if no packet is transmitting*/ \ + {0x05FA, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_POLLING, 0xFF, 0}, \ + /*Should be zero if no packet is transmitting*/ \ + {0x05FB, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_POLLING, 0xFF, 0}, \ + /*CCK and OFDM are disabled,and clock are gated*/ \ + {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_WRITE, BIT(0), 0}, \ + /*Delay 1us*/ \ + {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_DELAY, 0, PWRSEQ_DELAY_US}, \ + /*Whole BB is reset*/ \ + {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_WRITE, BIT(1), 0}, \ + /*Reset MAC TRX*/ \ + {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_WRITE, 0xFF, 0x03}, \ + /*check if removed later*/ \ + {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_WRITE, BIT(1), 0}, \ + /*When driver enter Sus/ Disable, enable LOP for BT*/ \ + {0x0093, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_WRITE, 0xFF, 0x00}, \ + /*Respond TxOK to scheduler*/ \ + {0x0553, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_WRITE, BIT(5), BIT(5)}, + +#define RTL8192E_TRANS_LPS_TO_ACT \ + /* format */ \ + /* comments here */ \ + /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value },*/\ + /*SDIO RPWM, For Repeatly In and out, Taggle bit should be changed*/\ + {0x0080, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ + PWR_BASEADDR_SDIO , PWR_CMD_WRITE, 0xFF, 0x84}, \ + /*USB RPWM*/ \ + {0xFE58, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_WRITE, 0xFF, 0x84}, \ + /*PCIe RPWM*/ \ + {0x0361, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_WRITE, 0xFF, 0x84}, \ + /*Delay*/ \ + {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_DELAY, 0, PWRSEQ_DELAY_MS}, \ + /*0x08[4] = 0 switch TSF to 40M*/ \ + {0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_WRITE, BIT(4), 0}, \ + /*Polling 0x109[7]=0 TSF in 40M*/ \ + {0x0109, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_POLLING, BIT(7), 0}, \ + /*0x101[1] = 1*/ \ + {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_WRITE, BIT(1), BIT(1)}, \ + /*0x100[7:0] = 0xFF enable WMAC TRX*/ \ + {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_WRITE, 0xFF, 0xFF}, \ + /* 0x02[1:0] = 2b'11 enable BB macro*/ \ + {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_WRITE, BIT(1) | BIT(0), BIT(1) | BIT(0)},\ + /*0x522 = 0*/ \ + {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_WRITE, 0xFF, 0}, \ + /*Clear ISR*/ \ + {0x013D, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC , PWR_CMD_WRITE, 0xFF, 0xFF}, + +#define RTL8192E_TRANS_END \ + /* format */ \ + /* comments here */ \ + /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value },*/\ + {0xFFFF, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + 0, PWR_CMD_END, 0, 0}, + +extern struct wlan_pwr_cfg rtl8192E_power_on_flow + [RTL8192E_TRANS_CARDEMU_TO_ACT_STEPS + + RTL8192E_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8192E_radio_off_flow + [RTL8192E_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8192E_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8192E_card_disable_flow + [RTL8192E_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8192E_TRANS_CARDEMU_TO_PDN_STEPS + + RTL8192E_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8192E_card_enable_flow + [RTL8192E_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8192E_TRANS_CARDEMU_TO_PDN_STEPS + + RTL8192E_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8192E_suspend_flow + [RTL8192E_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8192E_TRANS_CARDEMU_TO_SUS_STEPS + + RTL8192E_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8192E_resume_flow + [RTL8192E_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8192E_TRANS_CARDEMU_TO_SUS_STEPS + + RTL8192E_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8192E_hwpdn_flow + [RTL8192E_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8192E_TRANS_CARDEMU_TO_PDN_STEPS + + RTL8192E_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8192E_enter_lps_flow + [RTL8192E_TRANS_ACT_TO_LPS_STEPS + + RTL8192E_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8192E_leave_lps_flow + [RTL8192E_TRANS_LPS_TO_ACT_STEPS + + RTL8192E_TRANS_END_STEPS]; + +/* RTL8192EE Power Configuration CMDs for PCIe interface */ +#define RTL8192E_NIC_PWR_ON_FLOW rtl8192E_power_on_flow +#define RTL8192E_NIC_RF_OFF_FLOW rtl8192E_radio_off_flow +#define RTL8192E_NIC_DISABLE_FLOW rtl8192E_card_disable_flow +#define RTL8192E_NIC_ENABLE_FLOW rtl8192E_card_enable_flow +#define RTL8192E_NIC_SUSPEND_FLOW rtl8192E_suspend_flow +#define RTL8192E_NIC_RESUME_FLOW rtl8192E_resume_flow +#define RTL8192E_NIC_PDN_FLOW rtl8192E_hwpdn_flow +#define RTL8192E_NIC_LPS_ENTER_FLOW rtl8192E_enter_lps_flow +#define RTL8192E_NIC_LPS_LEAVE_FLOW rtl8192E_leave_lps_flow + +#endif diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/reg.h b/drivers/net/wireless/rtlwifi/rtl8192ee/reg.h new file mode 100644 index 0000000000000000000000000000000000000000..3f2a9596e7cd89859928a797c52d7b6b13fad70d --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192ee/reg.h @@ -0,0 +1,2231 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL92E_REG_H__ +#define __RTL92E_REG_H__ + +#define TXPKT_BUF_SELECT 0x69 +#define RXPKT_BUF_SELECT 0xA5 +#define DISABLE_TRXPKT_BUF_ACCESS 0x0 + +#define REG_SYS_ISO_CTRL 0x0000 +#define REG_SYS_FUNC_EN 0x0002 +#define REG_APS_FSMCO 0x0004 +#define REG_SYS_CLKR 0x0008 +#define REG_9346CR 0x000A +#define REG_EE_VPD 0x000C +#define REG_SYS_SWR_CTRL1 0x0010 +#define REG_SPS0_CTRL 0x0011 +#define REG_SYS_SWR_CTRL2 0x0014 +#define REG_SYS_SWR_CTRL3 0x0018 +#define REG_RSV_CTRL 0x001C +#define REG_RF_CTRL 0x001F +#define REG_LPLDO_CTRL 0x0023 +#define REG_AFE_CTRL1 0x0024 +#define REG_AFE_XTAL_CTRL 0x0024 +#define REG_AFE_CTRL2 0x0028 +#define REG_MAC_PHY_CTRL 0x002c +#define REG_AFE_CTRL3 0x002c +#define REG_EFUSE_CTRL 0x0030 +#define REG_EFUSE_TEST 0x0034 +#define REG_PWR_DATA 0x0038 +#define REG_CAL_TIMER 0x003C +#define REG_ACLK_MON 0x003E +#define REG_GPIO_MUXCFG 0x0040 +#define REG_GPIO_IO_SEL 0x0042 +#define REG_MAC_PINMUX_CFG 0x0043 +#define REG_GPIO_PIN_CTRL 0x0044 +#define REG_GPIO_INTM 0x0048 +#define REG_LEDCFG0 0x004C +#define REG_LEDCFG1 0x004D +#define REG_LEDCFG2 0x004E +#define REG_LEDCFG3 0x004F +#define REG_FSIMR 0x0050 +#define REG_FSISR 0x0054 +#define REG_HSIMR 0x0058 +#define REG_HSISR 0x005c +#define REG_SDIO_CTRL 0x0070 +#define REG_OPT_CTRL 0x0074 +#define REG_GPIO_OUTPUT 0x006c +#define REG_AFE_CTRL4 0x0078 +#define REG_MCUFWDL 0x0080 + +#define REG_HIMR 0x00B0 +#define REG_HISR 0x00B4 +#define REG_HIMRE 0x00B8 +#define REG_HISRE 0x00BC + +#define REG_EFUSE_ACCESS 0x00CF +#define REG_HPON_FSM 0x00EC +#define REG_SYS_CFG1 0x00F0 +#define REG_SYS_CFG2 0x00FC + +#define REG_CR 0x0100 +#define REG_PBP 0x0104 +#define REG_PKT_BUFF_ACCESS_CTRL 0x0106 +#define REG_TRXDMA_CTRL 0x010C +#define REG_TRXFF_BNDY 0x0114 +#define REG_TRXFF_STATUS 0x0118 +#define REG_RXFF_PTR 0x011C + +#define REG_CPWM 0x012F +#define REG_FWIMR 0x0130 +#define REG_FWISR 0x0134 +#define REG_PKTBUF_DBG_CTRL 0x0140 +#define REG_RXPKTBUF_CTRL 0x0142 +#define REG_PKTBUF_DBG_DATA_L 0x0144 +#define REG_PKTBUF_DBG_DATA_H 0x0148 + +#define REG_TC0_CTRL 0x0150 +#define REG_TC1_CTRL 0x0154 +#define REG_TC2_CTRL 0x0158 +#define REG_TC3_CTRL 0x015C +#define REG_TC4_CTRL 0x0160 +#define REG_TCUNIT_BASE 0x0164 +#define REG_RSVD3 0x0168 +#define REG_C2HEVT_MSG_NORMAL 0x01A0 +#define REG_C2HEVT_CLEAR 0x01AF +#define REG_MCUTST_1 0x01c0 +#define REG_MCUTST_WOWLAN 0x01C7 +#define REG_FMETHR 0x01C8 +#define REG_HMETFR 0x01CC +#define REG_HMEBOX_0 0x01D0 +#define REG_HMEBOX_1 0x01D4 +#define REG_HMEBOX_2 0x01D8 +#define REG_HMEBOX_3 0x01DC + +#define REG_LLT_INIT 0x01E0 + +#define REG_HMEBOX_EXT_0 0x01F0 +#define REG_HMEBOX_EXT_1 0x01F4 +#define REG_HMEBOX_EXT_2 0x01F8 +#define REG_HMEBOX_EXT_3 0x01FC + +/*----------------------------------------------------- + * + * 0x0200h ~ 0x027Fh TXDMA Configuration + * + *----------------------------------------------------- + */ +#define REG_RQPN 0x0200 +#define REG_FIFOPAGE 0x0204 +#define REG_DWBCN0_CTRL 0x0208 +#define REG_TXDMA_OFFSET_CHK 0x020C +#define REG_TXDMA_STATUS 0x0210 +#define REG_RQPN_NPQ 0x0214 +#define REG_AUTO_LLT 0x0224 +#define REG_DWBCN1_CTRL 0x0228 + +/*----------------------------------------------------- + * + * 0x0280h ~ 0x02FFh RXDMA Configuration + * + *----------------------------------------------------- + */ +#define REG_RXDMA_AGG_PG_TH 0x0280 +#define REG_FW_UPD_RDPTR 0x0284 +#define REG_RXDMA_CONTROL 0x0286 +#define REG_RXPKT_NUM 0x0287 +#define REG_RXDMA_STATUS 0x0288 +#define REG_RXDMA_PRO 0x0290 +#define REG_EARLY_MODE_CONTROL 0x02BC +#define REG_RSVD5 0x02F0 +#define REG_RSVD6 0x02F4 + +/*----------------------------------------------------- + * + * 0x0300h ~ 0x03FFh PCIe + * + *----------------------------------------------------- + */ +#define REG_PCIE_CTRL_REG 0x0300 +#define REG_INT_MIG 0x0304 +#define REG_BCNQ_DESA 0x0308 +#define REG_MGQ_DESA 0x0310 +#define REG_VOQ_DESA 0x0318 +#define REG_VIQ_DESA 0x0320 +#define REG_BEQ_DESA 0x0328 +#define REG_BKQ_DESA 0x0330 +#define REG_RX_DESA 0x0338 +#define REG_HQ0_DESA 0x0340 +#define REG_HQ1_DESA 0x0348 +#define REG_HQ2_DESA 0x0350 +#define REG_HQ3_DESA 0x0358 +#define REG_HQ4_DESA 0x0360 +#define REG_HQ5_DESA 0x0368 +#define REG_HQ6_DESA 0x0370 +#define REG_HQ7_DESA 0x0378 +#define REG_MGQ_TXBD_NUM 0x0380 +#define REG_RX_RXBD_NUM 0x0382 +#define REG_VOQ_TXBD_NUM 0x0384 +#define REG_VIQ_TXBD_NUM 0x0386 +#define REG_BEQ_TXBD_NUM 0x0388 +#define REG_BKQ_TXBD_NUM 0x038A +#define REG_HI0Q_TXBD_NUM 0x038C +#define REG_HI1Q_TXBD_NUM 0x038E +#define REG_HI2Q_TXBD_NUM 0x0390 +#define REG_HI3Q_TXBD_NUM 0x0392 +#define REG_HI4Q_TXBD_NUM 0x0394 +#define REG_HI5Q_TXBD_NUM 0x0396 +#define REG_HI6Q_TXBD_NUM 0x0398 +#define REG_HI7Q_TXBD_NUM 0x039A +#define REG_TSFTIMER_HCI 0x039C +/*Read Write Point*/ +#define REG_VOQ_TXBD_IDX 0x03A0 +#define REG_VIQ_TXBD_IDX 0x03A4 +#define REG_BEQ_TXBD_IDX 0x03A8 +#define REG_BKQ_TXBD_IDX 0x03AC +#define REG_MGQ_TXBD_IDX 0x03B0 +#define REG_RXQ_TXBD_IDX 0x03B4 + +#define REG_HI0Q_TXBD_IDX 0x03B8 +#define REG_HI1Q_TXBD_IDX 0x03BC +#define REG_HI2Q_TXBD_IDX 0x03C0 +#define REG_HI3Q_TXBD_IDX 0x03C4 + +#define REG_HI4Q_TXBD_IDX 0x03C8 +#define REG_HI5Q_TXBD_IDX 0x03CC +#define REG_HI6Q_TXBD_IDX 0x03D0 +#define REG_HI7Q_TXBD_IDX 0x03D4 +#define REG_PCIE_HCPWM 0x03D8 +#define REG_PCIE_CTRL2 0x03DB +#define REG_PCIE_HRPWM 0x03DC +#define REG_H2C_MSG_DRV2FW_INFO 0x03E0 +#define REG_PCIE_C2H_MSG_REQUEST 0x03E4 +#define REG_BACKDOOR_DBI_WDATA 0x03E8 +#define REG_BACKDOOR_DBI_RDATA 0x03EC +#define REG_BACKDOOR_DBI_DATA 0x03F0 +#define REG_MDIO 0x03F4 +#define REG_MDIO_DATA 0x03F8 + +#define REG_HDAQ_DESA_NODEF 0x0000 +#define REG_CMDQ_DESA_NODEF 0x0000 +/* spec version 11 + *----------------------------------------------------- + * + * 0x0400h ~ 0x047Fh Protocol Configuration + * + *----------------------------------------------------- + */ +#define REG_VOQ_INFORMATION 0x0400 +#define REG_VIQ_INFORMATION 0x0404 +#define REG_BEQ_INFORMATION 0x0408 +#define REG_BKQ_INFORMATION 0x040C +#define REG_MGQ_INFORMATION 0x0410 +#define REG_HGQ_INFORMATION 0x0414 +#define REG_BCNQ_INFORMATION 0x0418 +#define REG_TXPKT_EMPTY 0x041A + +#define REG_FWHW_TXQ_CTRL 0x0420 +#define REG_HWSEQ_CTRL 0x0423 +#define REG_BCNQ_BDNY 0x0424 +#define REG_MGQ_BDNY 0x0425 +#define REG_LIFECTRL_CTRL 0x0426 +#define REG_MULTI_BCNQ_OFFSET 0x0427 +#define REG_SPEC_SIFS 0x0428 +#define REG_RETRY_LIMIT 0x042A +#define REG_TXBF_CTRL 0x042C +#define REG_DARFRC 0x0430 +#define REG_RARFRC 0x0438 +#define REG_RRSR 0x0440 +#define REG_ARFR0 0x0444 +#define REG_ARFR1 0x044C +#define REG_AMPDU_MAX_TIME 0x0456 +#define REG_BCNQ1_BDNY 0x0457 +#define REG_AGGLEN_LMT 0x0458 +#define REG_AMPDU_MIN_SPACE 0x045C +#define REG_TXPKTBUF_WMAC_LBK_BF_HD 0x045D +#define REG_NDPA_OPT_CTRL 0x045F +#define REG_FAST_EDCA_CTRL 0x0460 +#define REG_RD_RESP_PKT_TH 0x0463 +#define REG_POWER_STAGE1 0x04B4 +#define REG_POWER_STAGE2 0x04B8 +#define REG_AMPDU_BURST_MODE 0x04BC +#define REG_PKT_VO_VI_LIFE_TIME 0x04C0 +#define REG_PKT_BE_BK_LIFE_TIME 0x04C2 +#define REG_STBC_SETTING 0x04C4 +#define REG_PROT_MODE_CTRL 0x04C8 +#define REG_MAX_AGGR_NUM 0x04CA +#define REG_RTS_MAX_AGGR_NUM 0x04CB +#define REG_BAR_MODE_CTRL 0x04CC +#define REG_RA_TRY_RATE_AGG_LMT 0x04CF +#define REG_MACID_PKT_DROP0 0x04D0 + +/*----------------------------------------------------- + * + * 0x0500h ~ 0x05FFh EDCA Configuration + * + *----------------------------------------------------- + */ +#define REG_EDCA_VO_PARAM 0x0500 +#define REG_EDCA_VI_PARAM 0x0504 +#define REG_EDCA_BE_PARAM 0x0508 +#define REG_EDCA_BK_PARAM 0x050C +#define REG_BCNTCFG 0x0510 +#define REG_PIFS 0x0512 +#define REG_RDG_PIFS 0x0513 +#define REG_SIFS_CTX 0x0514 +#define REG_SIFS_TRX 0x0516 +#define REG_AGGR_BREAK_TIME 0x051A +#define REG_SLOT 0x051B +#define REG_TX_PTCL_CTRL 0x0520 +#define REG_TXPAUSE 0x0522 +#define REG_DIS_TXREQ_CLR 0x0523 +#define REG_RD_CTRL 0x0524 + +#define REG_TBTT_PROHIBIT 0x0540 +#define REG_RD_NAV_NXT 0x0544 +#define REG_NAV_PROT_LEN 0x0546 +#define REG_BCN_CTRL 0x0550 +#define REG_BCN_CTRL_1 0x0551 +#define REG_MBID_NUM 0x0552 +#define REG_DUAL_TSF_RST 0x0553 +#define REG_BCN_INTERVAL 0x0554 +#define REG_DRVERLYINT 0x0558 +#define REG_BCNDMATIM 0x0559 +#define REG_ATIMWND 0x055A +#define REG_BCN_MAX_ERR 0x055D +#define REG_RXTSF_OFFSET_CCK 0x055E +#define REG_RXTSF_OFFSET_OFDM 0x055F +#define REG_TSFTR 0x0560 +#define REG_CTWND 0x0572 +#define REG_PSTIMER 0x0580 +#define REG_TIMER0 0x0584 +#define REG_TIMER1 0x0588 +#define REG_BCN_PREDL_ITV 0x058F +#define REG_ACMHWCTRL 0x05C0 + +/*----------------------------------------------------- + * + * 0x0600h ~ 0x07FFh WMAC Configuration + * + *----------------------------------------------------- + */ +#define REG_MAC_CR 0x0600 +#define REG_BWOPMODE 0x0603 +#define REG_TCR 0x0604 +#define REG_RCR 0x0608 +#define REG_RX_PKT_LIMIT 0x060C +#define REG_RX_DLK_TIME 0x060D +#define REG_RX_DRVINFO_SZ 0x060F + +#define REG_MACID 0x0610 +#define REG_BSSID 0x0618 +#define REG_MAR 0x0620 +#define REG_MBIDCAMCFG 0x0628 + +#define REG_USTIME_EDCA 0x0638 +#define REG_MAC_SPEC_SIFS 0x063A +#define REG_RESP_SIFS_CCK 0x063C +#define REG_RESP_SIFS_OFDM 0x063E +#define REG_ACKTO 0x0640 +#define REG_CTS2TO 0x0641 +#define REG_EIFS 0x0642 + +#define REG_NAV_UPPER 0x0652 + +/* Security*/ +#define REG_CAMCMD 0x0670 +#define REG_CAMWRITE 0x0674 +#define REG_CAMREAD 0x0678 +#define REG_CAMDBG 0x067C +#define REG_SECCFG 0x0680 + +/* Power*/ +#define REG_WOW_CTRL 0x0690 +#define REG_PS_RX_INFO 0x0692 +#define REG_UAPSD_TID 0x0693 +#define REG_WKFMCAM_NUM 0x0698 +#define REG_WKFMCAM_RWD 0x069C +#define REG_RXFLTMAP0 0x06A0 +#define REG_RXFLTMAP1 0x06A2 +#define REG_RXFLTMAP2 0x06A4 +#define REG_BCN_PSR_RPT 0x06A8 +#define REG_BT_COEX_TABLE 0x06C0 +#define REG_BFMER0_INFO 0x06E4 +#define REG_BFMER1_INFO 0x06EC +#define REG_CSI_RPT_PARAM_BW20 0x06F4 +#define REG_CSI_RPT_PARAM_BW40 0x06F8 +#define REG_CSI_RPT_PARAM_BW80 0x06FC +/* Hardware Port 2*/ +#define REG_MACID1 0x0700 +#define REG_BSSID1 0x0708 +#define REG_BFMEE_SEL 0x0714 +#define REG_SND_PTCL_CTRL 0x0718 + +#define CR9346 REG_9346CR +#define MSR (REG_CR + 2) +#define ISR REG_HISR +#define TSFR REG_TSFTR + +#define MACIDR0 REG_MACID +#define MACIDR4 (REG_MACID + 4) + +#define PBP REG_PBP + +#define IDR0 MACIDR0 +#define IDR4 MACIDR4 + +#define UNUSED_REGISTER 0x1BF +#define DCAM UNUSED_REGISTER +#define PSR UNUSED_REGISTER +#define BBADDR UNUSED_REGISTER +#define PHYDATAR UNUSED_REGISTER + +#define INVALID_BBRF_VALUE 0x12345678 + +#define MAX_MSS_DENSITY_2T 0x13 +#define MAX_MSS_DENSITY_1T 0x0A + +#define CMDEEPROM_EN BIT(5) +#define CMDEEPROM_SEL BIT(4) +#define CMD9346CR_9356SEL BIT(4) +#define AUTOLOAD_EEPROM (CMDEEPROM_EN | CMDEEPROM_SEL) +#define AUTOLOAD_EFUSE CMDEEPROM_EN + +#define GPIOSEL_GPIO 0 +#define GPIOSEL_ENBT BIT(5) + +#define GPIO_IN REG_GPIO_PIN_CTRL +#define GPIO_OUT (REG_GPIO_PIN_CTRL + 1) +#define GPIO_IO_SEL (REG_GPIO_PIN_CTRL + 2) +#define GPIO_MOD (REG_GPIO_PIN_CTRL + 3) + +#define MSR_NOLINK 0x00 +#define MSR_ADHOC 0x01 +#define MSR_INFRA 0x02 +#define MSR_AP 0x03 + +#define RRSR_RSC_OFFSET 21 +#define RRSR_SHORT_OFFSET 23 +#define RRSR_RSC_BW_40M 0x600000 +#define RRSR_RSC_UPSUBCHNL 0x400000 +#define RRSR_RSC_LOWSUBCHNL 0x200000 +#define RRSR_SHORT 0x800000 +#define RRSR_1M BIT(0) +#define RRSR_2M BIT(1) +#define RRSR_5_5M BIT(2) +#define RRSR_11M BIT(3) +#define RRSR_6M BIT(4) +#define RRSR_9M BIT(5) +#define RRSR_12M BIT(6) +#define RRSR_18M BIT(7) +#define RRSR_24M BIT(8) +#define RRSR_36M BIT(9) +#define RRSR_48M BIT(10) +#define RRSR_54M BIT(11) +#define RRSR_MCS0 BIT(12) +#define RRSR_MCS1 BIT(13) +#define RRSR_MCS2 BIT(14) +#define RRSR_MCS3 BIT(15) +#define RRSR_MCS4 BIT(16) +#define RRSR_MCS5 BIT(17) +#define RRSR_MCS6 BIT(18) +#define RRSR_MCS7 BIT(19) +#define BRSR_ACKSHORTPMB BIT(23) + +#define RATR_1M 0x00000001 +#define RATR_2M 0x00000002 +#define RATR_55M 0x00000004 +#define RATR_11M 0x00000008 +#define RATR_6M 0x00000010 +#define RATR_9M 0x00000020 +#define RATR_12M 0x00000040 +#define RATR_18M 0x00000080 +#define RATR_24M 0x00000100 +#define RATR_36M 0x00000200 +#define RATR_48M 0x00000400 +#define RATR_54M 0x00000800 +#define RATR_MCS0 0x00001000 +#define RATR_MCS1 0x00002000 +#define RATR_MCS2 0x00004000 +#define RATR_MCS3 0x00008000 +#define RATR_MCS4 0x00010000 +#define RATR_MCS5 0x00020000 +#define RATR_MCS6 0x00040000 +#define RATR_MCS7 0x00080000 +#define RATR_MCS8 0x00100000 +#define RATR_MCS9 0x00200000 +#define RATR_MCS10 0x00400000 +#define RATR_MCS11 0x00800000 +#define RATR_MCS12 0x01000000 +#define RATR_MCS13 0x02000000 +#define RATR_MCS14 0x04000000 +#define RATR_MCS15 0x08000000 + +#define RATE_1M BIT(0) +#define RATE_2M BIT(1) +#define RATE_5_5M BIT(2) +#define RATE_11M BIT(3) +#define RATE_6M BIT(4) +#define RATE_9M BIT(5) +#define RATE_12M BIT(6) +#define RATE_18M BIT(7) +#define RATE_24M BIT(8) +#define RATE_36M BIT(9) +#define RATE_48M BIT(10) +#define RATE_54M BIT(11) +#define RATE_MCS0 BIT(12) +#define RATE_MCS1 BIT(13) +#define RATE_MCS2 BIT(14) +#define RATE_MCS3 BIT(15) +#define RATE_MCS4 BIT(16) +#define RATE_MCS5 BIT(17) +#define RATE_MCS6 BIT(18) +#define RATE_MCS7 BIT(19) +#define RATE_MCS8 BIT(20) +#define RATE_MCS9 BIT(21) +#define RATE_MCS10 BIT(22) +#define RATE_MCS11 BIT(23) +#define RATE_MCS12 BIT(24) +#define RATE_MCS13 BIT(25) +#define RATE_MCS14 BIT(26) +#define RATE_MCS15 BIT(27) + +#define RATE_ALL_CCK (RATR_1M | RATR_2M | RATR_55M | RATR_11M) +#define RATE_ALL_OFDM_AG (RATR_6M | RATR_9M | RATR_12M | RATR_18M |\ + RATR_24M | RATR_36M | RATR_48M | RATR_54M) +#define RATE_ALL_OFDM_1SS (RATR_MCS0 | RATR_MCS1 | RATR_MCS2 |\ + RATR_MCS3 | RATR_MCS4 | RATR_MCS5 |\ + RATR_MCS6 | RATR_MCS7) +#define RATE_ALL_OFDM_2SS (RATR_MCS8 | RATR_MCS9 | RATR_MCS10 |\ + RATR_MCS11 | RATR_MCS12 | RATR_MCS13 |\ + RATR_MCS14 | RATR_MCS15) + +#define BW_OPMODE_20MHZ BIT(2) +#define BW_OPMODE_5G BIT(1) +#define CAM_VALID BIT(15) +#define CAM_NOTVALID 0x0000 +#define CAM_USEDK BIT(5) + +#define CAM_NONE 0x0 +#define CAM_WEP40 0x01 +#define CAM_TKIP 0x02 +#define CAM_AES 0x04 +#define CAM_WEP104 0x05 + +#define TOTAL_CAM_ENTRY 32 +#define HALF_CAM_ENTRY 16 + +#define CAM_WRITE BIT(16) +#define CAM_READ 0x00000000 +#define CAM_POLLINIG BIT(31) + +#define SCR_USEDK 0x01 +#define SCR_TXSEC_ENABLE 0x02 +#define SCR_RXSEC_ENABLE 0x04 + +/********************************************* +* 8192EE IMR/ISR bits +**********************************************/ +#define IMR_DISABLED 0x0 +/* IMR DW0(0x0060-0063) Bit 0-31 */ +#define IMR_TIMER2 BIT(31) +#define IMR_TIMER1 BIT(30) +#define IMR_PSTIMEOUT BIT(29) +#define IMR_GTINT4 BIT(28) +#define IMR_GTINT3 BIT(27) +#define IMR_TBDER BIT(26) +#define IMR_TBDOK BIT(25) +#define IMR_TSF_BIT32_TOGGLE BIT(24) +#define IMR_BCNDMAINT0 BIT(20) +#define IMR_BCNDOK0 BIT(16) +#define IMR_BCNDMAINT_E BIT(14) +#define IMR_ATIMEND BIT(12) +#define IMR_HISR1_IND_INT BIT(11) +#define IMR_C2HCMD BIT(10) +#define IMR_CPWM2 BIT(9) +#define IMR_CPWM BIT(8) +#define IMR_HIGHDOK BIT(7) +#define IMR_MGNTDOK BIT(6) +#define IMR_BKDOK BIT(5) +#define IMR_BEDOK BIT(4) +#define IMR_VIDOK BIT(3) +#define IMR_VODOK BIT(2) +#define IMR_RDU BIT(1) +#define IMR_ROK BIT(0) + +/* IMR DW1(0x00B4-00B7) Bit 0-31 */ +#define IMR_MCUERR BIT(28) +#define IMR_BCNDMAINT7 BIT(27) +#define IMR_BCNDMAINT6 BIT(26) +#define IMR_BCNDMAINT5 BIT(25) +#define IMR_BCNDMAINT4 BIT(24) +#define IMR_BCNDMAINT3 BIT(23) +#define IMR_BCNDMAINT2 BIT(22) +#define IMR_BCNDMAINT1 BIT(21) +#define IMR_BCNDOK7 BIT(20) +#define IMR_BCNDOK6 BIT(19) +#define IMR_BCNDOK5 BIT(18) +#define IMR_BCNDOK4 BIT(17) +#define IMR_BCNDOK3 BIT(16) +#define IMR_BCNDOK2 BIT(15) +#define IMR_BCNDOK1 BIT(14) +#define IMR_ATIMEND_E BIT(13) +#define IMR_TXERR BIT(11) +#define IMR_RXERR BIT(10) +#define IMR_TXFOVW BIT(9) +#define IMR_RXFOVW BIT(8) + +#define HWSET_MAX_SIZE 512 +#define EFUSE_MAX_SECTION 64 +#define EFUSE_REAL_CONTENT_LEN 256 +#define EFUSE_OOB_PROTECT_BYTES 18 + +#define EEPROM_DEFAULT_TSSI 0x0 +#define EEPROM_DEFAULT_TXPOWERDIFF 0x0 +#define EEPROM_DEFAULT_CRYSTALCAP 0x5 +#define EEPROM_DEFAULT_BOARDTYPE 0x02 +#define EEPROM_DEFAULT_TXPOWER 0x1010 +#define EEPROM_DEFAULT_HT2T_TXPWR 0x10 + +#define EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF 0x3 +#define EEPROM_DEFAULT_THERMALMETER 0x1A +#define EEPROM_DEFAULT_ANTTXPOWERDIFF 0x0 +#define EEPROM_DEFAULT_TXPWDIFF_CRYSTALCAP 0x5 +#define EEPROM_DEFAULT_TXPOWERLEVEL 0x22 +#define EEPROM_DEFAULT_HT40_2SDIFF 0x0 +#define EEPROM_DEFAULT_HT20_DIFF 2 +#define EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF 0x3 +#define EEPROM_DEFAULT_HT40_PWRMAXOFFSET 0 +#define EEPROM_DEFAULT_HT20_PWRMAXOFFSET 0 + +#define RF_OPTION1 0x79 +#define RF_OPTION2 0x7A +#define RF_OPTION3 0x7B +#define RF_OPTION4 0x7C + +#define EEPROM_DEFAULT_PID 0x1234 +#define EEPROM_DEFAULT_VID 0x5678 +#define EEPROM_DEFAULT_CUSTOMERID 0xAB +#define EEPROM_DEFAULT_SUBCUSTOMERID 0xCD +#define EEPROM_DEFAULT_VERSION 0 + +#define EEPROM_CHANNEL_PLAN_FCC 0x0 +#define EEPROM_CHANNEL_PLAN_IC 0x1 +#define EEPROM_CHANNEL_PLAN_ETSI 0x2 +#define EEPROM_CHANNEL_PLAN_SPAIN 0x3 +#define EEPROM_CHANNEL_PLAN_FRANCE 0x4 +#define EEPROM_CHANNEL_PLAN_MKK 0x5 +#define EEPROM_CHANNEL_PLAN_MKK1 0x6 +#define EEPROM_CHANNEL_PLAN_ISRAEL 0x7 +#define EEPROM_CHANNEL_PLAN_TELEC 0x8 +#define EEPROM_CHANNEL_PLAN_GLOBAL_DOMAIN 0x9 +#define EEPROM_CHANNEL_PLAN_WORLD_WIDE_13 0xA +#define EEPROM_CHANNEL_PLAN_NCC 0xB +#define EEPROM_CHANNEL_PLAN_BY_HW_MASK 0x80 + +#define EEPROM_CID_DEFAULT 0x0 +#define EEPROM_CID_TOSHIBA 0x4 +#define EEPROM_CID_CCX 0x10 +#define EEPROM_CID_QMI 0x0D +#define EEPROM_CID_WHQL 0xFE + +#define RTL8192E_EEPROM_ID 0x8129 + +#define EEPROM_HPON 0x02 +#define EEPROM_CLK 0x06 +#define EEPROM_TESTR 0x08 + +#define EEPROM_TXPOWERCCK 0x10 +#define EEPROM_TXPOWERHT40_1S 0x16 +#define EEPROM_TXPOWERHT20DIFF 0x1B +#define EEPROM_TXPOWER_OFDMDIFF 0x1B + +#define EEPROM_TX_PWR_INX 0x10 + +#define EEPROM_CHANNELPLAN 0xB8 +#define EEPROM_XTAL_92E 0xB9 +#define EEPROM_THERMAL_METER_92E 0xBA +#define EEPROM_IQK_LCK_92E 0xBB + +#define EEPROM_RF_BOARD_OPTION_92E 0xC1 +#define EEPROM_RF_FEATURE_OPTION_92E 0xC2 +#define EEPROM_RF_BT_SETTING_92E 0xC3 +#define EEPROM_VERSION 0xC4 +#define EEPROM_CUSTOMER_ID 0xC5 +#define EEPROM_RF_ANTENNA_OPT_92E 0xC9 + +#define EEPROM_MAC_ADDR 0xD0 +#define EEPROM_VID 0xD6 +#define EEPROM_DID 0xD8 +#define EEPROM_SVID 0xDA +#define EEPROM_SMID 0xDC + +#define STOPBECON BIT(6) +#define STOPHIGHT BIT(5) +#define STOPMGT BIT(4) +#define STOPVO BIT(3) +#define STOPVI BIT(2) +#define STOPBE BIT(1) +#define STOPBK BIT(0) + +#define RCR_APPFCS BIT(31) +#define RCR_APP_MIC BIT(30) +#define RCR_APP_ICV BIT(29) +#define RCR_APP_PHYST_RXFF BIT(28) +#define RCR_APP_BA_SSN BIT(27) +#define RCR_ENMBID BIT(24) +#define RCR_LSIGEN BIT(23) +#define RCR_MFBEN BIT(22) +#define RCR_HTC_LOC_CTRL BIT(14) +#define RCR_AMF BIT(13) +#define RCR_ACF BIT(12) +#define RCR_ADF BIT(11) +#define RCR_AICV BIT(9) +#define RCR_ACRC32 BIT(8) +#define RCR_CBSSID_BCN BIT(7) +#define RCR_CBSSID_DATA BIT(6) +#define RCR_CBSSID RCR_CBSSID_DATA +#define RCR_APWRMGT BIT(5) +#define RCR_ADD3 BIT(4) +#define RCR_AB BIT(3) +#define RCR_AM BIT(2) +#define RCR_APM BIT(1) +#define RCR_AAP BIT(0) +#define RCR_MXDMA_OFFSET 8 +#define RCR_FIFO_OFFSET 13 + +#define RSV_CTRL 0x001C +#define RD_CTRL 0x0524 + +#define REG_USB_INFO 0xFE17 +#define REG_USB_SPECIAL_OPTION 0xFE55 +#define REG_USB_DMA_AGG_TO 0xFE5B +#define REG_USB_AGG_TO 0xFE5C +#define REG_USB_AGG_TH 0xFE5D + +#define REG_USB_VID 0xFE60 +#define REG_USB_PID 0xFE62 +#define REG_USB_OPTIONAL 0xFE64 +#define REG_USB_CHIRP_K 0xFE65 +#define REG_USB_PHY 0xFE66 +#define REG_USB_MAC_ADDR 0xFE70 +#define REG_USB_HRPWM 0xFE58 +#define REG_USB_HCPWM 0xFE57 + +#define SW18_FPWM BIT(3) + +#define ISO_MD2PP BIT(0) +#define ISO_UA2USB BIT(1) +#define ISO_UD2CORE BIT(2) +#define ISO_PA2PCIE BIT(3) +#define ISO_PD2CORE BIT(4) +#define ISO_IP2MAC BIT(5) +#define ISO_DIOP BIT(6) +#define ISO_DIOE BIT(7) +#define ISO_EB2CORE BIT(8) +#define ISO_DIOR BIT(9) + +#define PWC_EV25V BIT(14) +#define PWC_EV12V BIT(15) + +#define FEN_BBRSTB BIT(0) +#define FEN_BB_GLB_RSTN BIT(1) +#define FEN_USBA BIT(2) +#define FEN_UPLL BIT(3) +#define FEN_USBD BIT(4) +#define FEN_DIO_PCIE BIT(5) +#define FEN_PCIEA BIT(6) +#define FEN_PPLL BIT(7) +#define FEN_PCIED BIT(8) +#define FEN_DIOE BIT(9) +#define FEN_CPUEN BIT(10) +#define FEN_DCORE BIT(11) +#define FEN_ELDR BIT(12) +#define FEN_DIO_RF BIT(13) +#define FEN_HWPDN BIT(14) +#define FEN_MREGEN BIT(15) + +#define PFM_LDALL BIT(0) +#define PFM_ALDN BIT(1) +#define PFM_LDKP BIT(2) +#define PFM_WOWL BIT(3) +#define ENPDN BIT(4) +#define PDN_PL BIT(5) +#define APFM_ONMAC BIT(8) +#define APFM_OFF BIT(9) +#define APFM_RSM BIT(10) +#define AFSM_HSUS BIT(11) +#define AFSM_PCIE BIT(12) +#define APDM_MAC BIT(13) +#define APDM_HOST BIT(14) +#define APDM_HPDN BIT(15) +#define RDY_MACON BIT(16) +#define SUS_HOST BIT(17) +#define ROP_ALD BIT(20) +#define ROP_PWR BIT(21) +#define ROP_SPS BIT(22) +#define SOP_MRST BIT(25) +#define SOP_FUSE BIT(26) +#define SOP_ABG BIT(27) +#define SOP_AMB BIT(28) +#define SOP_RCK BIT(29) +#define SOP_A8M BIT(30) +#define XOP_BTCK BIT(31) + +#define ANAD16V_EN BIT(0) +#define ANA8M BIT(1) +#define MACSLP BIT(4) +#define LOADER_CLK_EN BIT(5) +#define _80M_SSC_DIS BIT(7) +#define _80M_SSC_EN_HO BIT(8) +#define PHY_SSC_RSTB BIT(9) +#define SEC_CLK_EN BIT(10) +#define MAC_CLK_EN BIT(11) +#define SYS_CLK_EN BIT(12) +#define RING_CLK_EN BIT(13) + +#define BOOT_FROM_EEPROM BIT(4) +#define EEPROM_EN BIT(5) + +#define AFE_BGEN BIT(0) +#define AFE_MBEN BIT(1) +#define MAC_ID_EN BIT(7) + +#define WLOCK_ALL BIT(0) +#define WLOCK_00 BIT(1) +#define WLOCK_04 BIT(2) +#define WLOCK_08 BIT(3) +#define WLOCK_40 BIT(4) +#define R_DIS_PRST_0 BIT(5) +#define R_DIS_PRST_1 BIT(6) +#define LOCK_ALL_EN BIT(7) + +#define RF_EN BIT(0) +#define RF_RSTB BIT(1) +#define RF_SDMRSTB BIT(2) + +#define LDA15_EN BIT(0) +#define LDA15_STBY BIT(1) +#define LDA15_OBUF BIT(2) +#define LDA15_REG_VOS BIT(3) +#define _LDA15_VOADJ(x) (((x) & 0x7) << 4) + +#define LDV12_EN BIT(0) +#define LDV12_SDBY BIT(1) +#define LPLDO_HSM BIT(2) +#define LPLDO_LSM_DIS BIT(3) +#define _LDV12_VADJ(x) (((x) & 0xF) << 4) + +#define XTAL_EN BIT(0) +#define XTAL_BSEL BIT(1) +#define _XTAL_BOSC(x) (((x) & 0x3) << 2) +#define _XTAL_CADJ(x) (((x) & 0xF) << 4) +#define XTAL_GATE_USB BIT(8) +#define _XTAL_USB_DRV(x) (((x) & 0x3) << 9) +#define XTAL_GATE_AFE BIT(11) +#define _XTAL_AFE_DRV(x) (((x) & 0x3) << 12) +#define XTAL_RF_GATE BIT(14) +#define _XTAL_RF_DRV(x) (((x) & 0x3) << 15) +#define XTAL_GATE_DIG BIT(17) +#define _XTAL_DIG_DRV(x) (((x) & 0x3) << 18) +#define XTAL_BT_GATE BIT(20) +#define _XTAL_BT_DRV(x) (((x) & 0x3) << 21) +#define _XTAL_GPIO(x) (((x) & 0x7) << 23) + +#define CKDLY_AFE BIT(26) +#define CKDLY_USB BIT(27) +#define CKDLY_DIG BIT(28) +#define CKDLY_BT BIT(29) + +#define APLL_EN BIT(0) +#define APLL_320_EN BIT(1) +#define APLL_FREF_SEL BIT(2) +#define APLL_EDGE_SEL BIT(3) +#define APLL_WDOGB BIT(4) +#define APLL_LPFEN BIT(5) + +#define APLL_REF_CLK_13MHZ 0x1 +#define APLL_REF_CLK_19_2MHZ 0x2 +#define APLL_REF_CLK_20MHZ 0x3 +#define APLL_REF_CLK_25MHZ 0x4 +#define APLL_REF_CLK_26MHZ 0x5 +#define APLL_REF_CLK_38_4MHZ 0x6 +#define APLL_REF_CLK_40MHZ 0x7 + +#define APLL_320EN BIT(14) +#define APLL_80EN BIT(15) +#define APLL_1MEN BIT(24) + +#define ALD_EN BIT(18) +#define EF_PD BIT(19) +#define EF_FLAG BIT(31) + +#define EF_TRPT BIT(7) +#define LDOE25_EN BIT(31) + +#define RSM_EN BIT(0) +#define TIMER_EN BIT(4) + +#define TRSW0EN BIT(2) +#define TRSW1EN BIT(3) +#define EROM_EN BIT(4) +#define ENBT BIT(5) +#define ENUART BIT(8) +#define UART_910 BIT(9) +#define ENPMAC BIT(10) +#define SIC_SWRST BIT(11) +#define ENSIC BIT(12) +#define SIC_23 BIT(13) +#define ENHDP BIT(14) +#define SIC_LBK BIT(15) + +#define LED0PL BIT(4) +#define LED1PL BIT(12) +#define LED0DIS BIT(7) + +#define MCUFWDL_EN BIT(0) +#define MCUFWDL_RDY BIT(1) +#define FWDL_CHKSUM_RPT BIT(2) +#define MACINI_RDY BIT(3) +#define BBINI_RDY BIT(4) +#define RFINI_RDY BIT(5) +#define WINTINI_RDY BIT(6) +#define CPRST BIT(23) + +#define XCLK_VLD BIT(0) +#define ACLK_VLD BIT(1) +#define UCLK_VLD BIT(2) +#define PCLK_VLD BIT(3) +#define PCIRSTB BIT(4) +#define V15_VLD BIT(5) +#define TRP_B15V_EN BIT(7) +#define SIC_IDLE BIT(8) +#define BD_MAC2 BIT(9) +#define BD_MAC1 BIT(10) +#define IC_MACPHY_MODE BIT(11) +#define VENDOR_ID BIT(19) +#define PAD_HWPD_IDN BIT(22) +#define TRP_VAUX_EN BIT(23) +#define TRP_BT_EN BIT(24) +#define BD_PKG_SEL BIT(25) +#define BD_HCI_SEL BIT(26) +#define TYPE_ID BIT(27) + +#define CHIP_VER_RTL_MASK 0xF000 +#define CHIP_VER_RTL_SHIFT 12 + +#define REG_LBMODE (REG_CR + 3) + +#define HCI_TXDMA_EN BIT(0) +#define HCI_RXDMA_EN BIT(1) +#define TXDMA_EN BIT(2) +#define RXDMA_EN BIT(3) +#define PROTOCOL_EN BIT(4) +#define SCHEDULE_EN BIT(5) +#define MACTXEN BIT(6) +#define MACRXEN BIT(7) +#define ENSWBCN BIT(8) +#define ENSEC BIT(9) + +#define _NETTYPE(x) (((x) & 0x3) << 16) +#define MASK_NETTYPE 0x30000 +#define NT_NO_LINK 0x0 +#define NT_LINK_AD_HOC 0x1 +#define NT_LINK_AP 0x2 +#define NT_AS_AP 0x3 + +#define _LBMODE(x) (((x) & 0xF) << 24) +#define MASK_LBMODE 0xF000000 +#define LOOPBACK_NORMAL 0x0 +#define LOOPBACK_IMMEDIATELY 0xB +#define LOOPBACK_MAC_DELAY 0x3 +#define LOOPBACK_PHY 0x1 +#define LOOPBACK_DMA 0x7 + +#define GET_RX_PAGE_SIZE(value) ((value) & 0xF) +#define GET_TX_PAGE_SIZE(value) (((value) & 0xF0) >> 4) +#define _PSRX_MASK 0xF +#define _PSTX_MASK 0xF0 +#define _PSRX(x) (x) +#define _PSTX(x) ((x) << 4) + +#define PBP_64 0x0 +#define PBP_128 0x1 +#define PBP_256 0x2 +#define PBP_512 0x3 +#define PBP_1024 0x4 + +#define RXDMA_ARBBW_EN BIT(0) +#define RXSHFT_EN BIT(1) +#define RXDMA_AGG_EN BIT(2) +#define QS_VO_QUEUE BIT(8) +#define QS_VI_QUEUE BIT(9) +#define QS_BE_QUEUE BIT(10) +#define QS_BK_QUEUE BIT(11) +#define QS_MANAGER_QUEUE BIT(12) +#define QS_HIGH_QUEUE BIT(13) + +#define HQSEL_VOQ BIT(0) +#define HQSEL_VIQ BIT(1) +#define HQSEL_BEQ BIT(2) +#define HQSEL_BKQ BIT(3) +#define HQSEL_MGTQ BIT(4) +#define HQSEL_HIQ BIT(5) + +#define _TXDMA_HIQ_MAP(x) (((x)&0x3) << 14) +#define _TXDMA_MGQ_MAP(x) (((x)&0x3) << 12) +#define _TXDMA_BKQ_MAP(x) (((x)&0x3) << 10) +#define _TXDMA_BEQ_MAP(x) (((x)&0x3) << 8) +#define _TXDMA_VIQ_MAP(x) (((x)&0x3) << 6) +#define _TXDMA_VOQ_MAP(x) (((x)&0x3) << 4) + +#define QUEUE_LOW 1 +#define QUEUE_NORMAL 2 +#define QUEUE_HIGH 3 + +#define _LLT_NO_ACTIVE 0x0 +#define _LLT_WRITE_ACCESS 0x1 +#define _LLT_READ_ACCESS 0x2 + +#define _LLT_INIT_DATA(x) ((x) & 0xFF) +#define _LLT_INIT_ADDR(x) (((x) & 0xFF) << 8) +#define _LLT_OP(x) (((x) & 0x3) << 30) +#define _LLT_OP_VALUE(x) (((x) >> 30) & 0x3) + +#define BB_WRITE_READ_MASK (BIT(31) | BIT(30)) +#define BB_WRITE_EN BIT(30) +#define BB_READ_EN BIT(31) + +#define _HPQ(x) ((x) & 0xFF) +#define _LPQ(x) (((x) & 0xFF) << 8) +#define _PUBQ(x) (((x) & 0xFF) << 16) +#define _NPQ(x) ((x) & 0xFF) + +#define HPQ_PUBLIC_DIS BIT(24) +#define LPQ_PUBLIC_DIS BIT(25) +#define LD_RQPN BIT(31) + +#define BCN_VALID BIT(16) +#define BCN_HEAD(x) (((x) & 0xFF) << 8) +#define BCN_HEAD_MASK 0xFF00 + +#define BLK_DESC_NUM_SHIFT 4 +#define BLK_DESC_NUM_MASK 0xF + +#define DROP_DATA_EN BIT(9) + +#define EN_AMPDU_RTY_NEW BIT(7) + +#define _INIRTSMCS_SEL(x) ((x) & 0x3F) + +#define _SPEC_SIFS_CCK(x) ((x) & 0xFF) +#define _SPEC_SIFS_OFDM(x) (((x) & 0xFF) << 8) + +#define RATE_REG_BITMAP_ALL 0xFFFFF + +#define _RRSC_BITMAP(x) ((x) & 0xFFFFF) + +#define _RRSR_RSC(x) (((x) & 0x3) << 21) +#define RRSR_RSC_RESERVED 0x0 +#define RRSR_RSC_UPPER_SUBCHANNEL 0x1 +#define RRSR_RSC_LOWER_SUBCHANNEL 0x2 +#define RRSR_RSC_DUPLICATE_MODE 0x3 + +#define USE_SHORT_G1 BIT(20) + +#define _AGGLMT_MCS0(x) ((x) & 0xF) +#define _AGGLMT_MCS1(x) (((x) & 0xF) << 4) +#define _AGGLMT_MCS2(x) (((x) & 0xF) << 8) +#define _AGGLMT_MCS3(x) (((x) & 0xF) << 12) +#define _AGGLMT_MCS4(x) (((x) & 0xF) << 16) +#define _AGGLMT_MCS5(x) (((x) & 0xF) << 20) +#define _AGGLMT_MCS6(x) (((x) & 0xF) << 24) +#define _AGGLMT_MCS7(x) (((x) & 0xF) << 28) + +#define RETRY_LIMIT_SHORT_SHIFT 8 +#define RETRY_LIMIT_LONG_SHIFT 0 + +#define _DARF_RC1(x) ((x) & 0x1F) +#define _DARF_RC2(x) (((x) & 0x1F) << 8) +#define _DARF_RC3(x) (((x) & 0x1F) << 16) +#define _DARF_RC4(x) (((x) & 0x1F) << 24) +#define _DARF_RC5(x) ((x) & 0x1F) +#define _DARF_RC6(x) (((x) & 0x1F) << 8) +#define _DARF_RC7(x) (((x) & 0x1F) << 16) +#define _DARF_RC8(x) (((x) & 0x1F) << 24) + +#define _RARF_RC1(x) ((x) & 0x1F) +#define _RARF_RC2(x) (((x) & 0x1F) << 8) +#define _RARF_RC3(x) (((x) & 0x1F) << 16) +#define _RARF_RC4(x) (((x) & 0x1F) << 24) +#define _RARF_RC5(x) ((x) & 0x1F) +#define _RARF_RC6(x) (((x) & 0x1F) << 8) +#define _RARF_RC7(x) (((x) & 0x1F) << 16) +#define _RARF_RC8(x) (((x) & 0x1F) << 24) + +#define AC_PARAM_TXOP_LIMIT_OFFSET 16 +#define AC_PARAM_ECW_MAX_OFFSET 12 +#define AC_PARAM_ECW_MIN_OFFSET 8 +#define AC_PARAM_AIFS_OFFSET 0 + +#define _AIFS(x) (x) +#define _ECW_MAX_MIN(x) ((x) << 8) +#define _TXOP_LIMIT(x) ((x) << 16) + +#define _BCNIFS(x) ((x) & 0xFF) +#define _BCNECW(x) ((((x) & 0xF)) << 8) + +#define _LRL(x) ((x) & 0x3F) +#define _SRL(x) (((x) & 0x3F) << 8) + +#define _SIFS_CCK_CTX(x) ((x) & 0xFF) +#define _SIFS_CCK_TRX(x) (((x) & 0xFF) << 8) + +#define _SIFS_OFDM_CTX(x) ((x) & 0xFF) +#define _SIFS_OFDM_TRX(x) (((x) & 0xFF) << 8) + +#define _TBTT_PROHIBIT_HOLD(x) (((x) & 0xFF) << 8) + +#define DIS_EDCA_CNT_DWN BIT(11) + +#define EN_MBSSID BIT(1) +#define EN_TXBCN_RPT BIT(2) +#define EN_BCN_FUNCTION BIT(3) + +#define TSFTR_RST BIT(0) +#define TSFTR1_RST BIT(1) + +#define STOP_BCNQ BIT(6) + +#define DIS_TSF_UDT0_NORMAL_CHIP BIT(4) +#define DIS_TSF_UDT0_TEST_CHIP BIT(5) + +#define ACMHW_HWEN BIT(0) +#define ACMHW_BEQEN BIT(1) +#define ACMHW_VIQEN BIT(2) +#define ACMHW_VOQEN BIT(3) +#define ACMHW_BEQSTATUS BIT(4) +#define ACMHW_VIQSTATUS BIT(5) +#define ACMHW_VOQSTATUS BIT(6) + +#define APSDOFF BIT(6) +#define APSDOFF_STATUS BIT(7) + +#define BW_20MHZ BIT(2) + +#define RATE_BITMAP_ALL 0xFFFFF + +#define RATE_RRSR_CCK_ONLY_1M 0xFFFF1 + +#define TSFRST BIT(0) +#define DIS_GCLK BIT(1) +#define PAD_SEL BIT(2) +#define PWR_ST BIT(6) +#define PWRBIT_OW_EN BIT(7) +#define ACRC BIT(8) +#define CFENDFORM BIT(9) +#define ICV BIT(10) + +#define AAP BIT(0) +#define APM BIT(1) +#define AM BIT(2) +#define AB BIT(3) +#define ADD3 BIT(4) +#define APWRMGT BIT(5) +#define CBSSID BIT(6) +#define CBSSID_DATA BIT(6) +#define CBSSID_BCN BIT(7) +#define ACRC32 BIT(8) +#define AICV BIT(9) +#define ADF BIT(11) +#define ACF BIT(12) +#define AMF BIT(13) +#define HTC_LOC_CTRL BIT(14) +#define UC_DATA_EN BIT(16) +#define BM_DATA_EN BIT(17) +#define MFBEN BIT(22) +#define LSIGEN BIT(23) +#define ENMBID BIT(24) +#define APP_BASSN BIT(27) +#define APP_PHYSTS BIT(28) +#define APP_ICV BIT(29) +#define APP_MIC BIT(30) +#define APP_FCS BIT(31) + +#define _MIN_SPACE(x) ((x) & 0x7) +#define _SHORT_GI_PADDING(x) (((x) & 0x1F) << 3) + +#define RXERR_TYPE_OFDM_PPDU 0 +#define RXERR_TYPE_OFDM_FALSE_ALARM 1 +#define RXERR_TYPE_OFDM_MPDU_OK 2 +#define RXERR_TYPE_OFDM_MPDU_FAIL 3 +#define RXERR_TYPE_CCK_PPDU 4 +#define RXERR_TYPE_CCK_FALSE_ALARM 5 +#define RXERR_TYPE_CCK_MPDU_OK 6 +#define RXERR_TYPE_CCK_MPDU_FAIL 7 +#define RXERR_TYPE_HT_PPDU 8 +#define RXERR_TYPE_HT_FALSE_ALARM 9 +#define RXERR_TYPE_HT_MPDU_TOTAL 10 +#define RXERR_TYPE_HT_MPDU_OK 11 +#define RXERR_TYPE_HT_MPDU_FAIL 12 +#define RXERR_TYPE_RX_FULL_DROP 15 + +#define RXERR_COUNTER_MASK 0xFFFFF +#define RXERR_RPT_RST BIT(27) +#define _RXERR_RPT_SEL(type) ((type) << 28) + +#define SCR_TXUSEDK BIT(0) +#define SCR_RXUSEDK BIT(1) +#define SCR_TXENCENABLE BIT(2) +#define SCR_RXDECENABLE BIT(3) +#define SCR_SKBYA2 BIT(4) +#define SCR_NOSKMC BIT(5) +#define SCR_TXBCUSEDK BIT(6) +#define SCR_RXBCUSEDK BIT(7) + +#define USB_IS_HIGH_SPEED 0 +#define USB_IS_FULL_SPEED 1 +#define USB_SPEED_MASK BIT(5) + +#define USB_NORMAL_SIE_EP_MASK 0xF +#define USB_NORMAL_SIE_EP_SHIFT 4 + +#define USB_TEST_EP_MASK 0x30 +#define USB_TEST_EP_SHIFT 4 + +#define USB_AGG_EN BIT(3) + +#define MAC_ADDR_LEN 6 +#define LAST_ENTRY_OF_TX_PKT_BUFFER 175 + +#define POLLING_LLT_THRESHOLD 20 +#define POLLING_READY_TIMEOUT_COUNT 3000 + +#define MAX_MSS_DENSITY_2T 0x13 +#define MAX_MSS_DENSITY_1T 0x0A + +#define EPROM_CMD_OPERATING_MODE_MASK ((1 << 7) | (1 << 6)) +#define EPROM_CMD_CONFIG 0x3 +#define EPROM_CMD_LOAD 1 + +#define HWSET_MAX_SIZE_92S HWSET_MAX_SIZE + +#define HAL_8192C_HW_GPIO_WPS_BIT BIT(2) + +#define RPMAC_RESET 0x100 +#define RPMAC_TXSTART 0x104 +#define RPMAC_TXLEGACYSIG 0x108 +#define RPMAC_TXHTSIG1 0x10c +#define RPMAC_TXHTSIG2 0x110 +#define RPMAC_PHYDEBUG 0x114 +#define RPMAC_TXPACKETNUM 0x118 +#define RPMAC_TXIDLE 0x11c +#define RPMAC_TXMACHEADER0 0x120 +#define RPMAC_TXMACHEADER1 0x124 +#define RPMAC_TXMACHEADER2 0x128 +#define RPMAC_TXMACHEADER3 0x12c +#define RPMAC_TXMACHEADER4 0x130 +#define RPMAC_TXMACHEADER5 0x134 +#define RPMAC_TXDADATYPE 0x138 +#define RPMAC_TXRANDOMSEED 0x13c +#define RPMAC_CCKPLCPPREAMBLE 0x140 +#define RPMAC_CCKPLCPHEADER 0x144 +#define RPMAC_CCKCRC16 0x148 +#define RPMAC_OFDMRXCRC32OK 0x170 +#define RPMAC_OFDMRXCRC32ER 0x174 +#define RPMAC_OFDMRXPARITYER 0x178 +#define RPMAC_OFDMRXCRC8ER 0x17c +#define RPMAC_CCKCRXRC16ER 0x180 +#define RPMAC_CCKCRXRC32ER 0x184 +#define RPMAC_CCKCRXRC32OK 0x188 +#define RPMAC_TXSTATUS 0x18c + +#define RFPGA0_RFMOD 0x800 + +#define RFPGA0_TXINFO 0x804 +#define RFPGA0_PSDFUNCTION 0x808 + +#define RFPGA0_TXGAINSTAGE 0x80c + +#define RFPGA0_RFTIMING1 0x810 +#define RFPGA0_RFTIMING2 0x814 + +#define RFPGA0_XA_HSSIPARAMETER1 0x820 +#define RFPGA0_XA_HSSIPARAMETER2 0x824 +#define RFPGA0_XB_HSSIPARAMETER1 0x828 +#define RFPGA0_XB_HSSIPARAMETER2 0x82c + +#define RFPGA0_XA_LSSIPARAMETER 0x840 +#define RFPGA0_XB_LSSIPARAMETER 0x844 + +#define RFPGA0_RFWAKEUPPARAMETER 0x850 +#define RFPGA0_RFSLEEPUPPARAMETER 0x854 + +#define RFPGA0_XAB_SWITCHCONTROL 0x858 +#define RFPGA0_XCD_SWITCHCONTROL 0x85c + +#define RFPGA0_XA_RFINTERFACEOE 0x860 +#define RFPGA0_XB_RFINTERFACEOE 0x864 + +#define RFPGA0_XAB_RFINTERFACESW 0x870 +#define RFPGA0_XCD_RFINTERFACESW 0x874 + +#define RFPGA0_XAB_RFPARAMETER 0x878 +#define RFPGA0_XCD_RFPARAMETER 0x87c + +#define RFPGA0_ANALOGPARAMETER1 0x880 +#define RFPGA0_ANALOGPARAMETER2 0x884 +#define RFPGA0_ANALOGPARAMETER3 0x888 +#define RFPGA0_ANALOGPARAMETER4 0x88c + +#define RFPGA0_XA_LSSIREADBACK 0x8a0 +#define RFPGA0_XB_LSSIREADBACK 0x8a4 +#define RFPGA0_XC_LSSIREADBACK 0x8a8 +#define RFPGA0_XD_LSSIREADBACK 0x8ac + +#define RFPGA0_PSDREPORT 0x8b4 +#define TRANSCEIVEA_HSPI_READBACK 0x8b8 +#define TRANSCEIVEB_HSPI_READBACK 0x8bc +#define REG_SC_CNT 0x8c4 +#define RFPGA0_XAB_RFINTERFACERB 0x8e0 +#define RFPGA0_XCD_RFINTERFACERB 0x8e4 + +#define RFPGA1_RFMOD 0x900 + +#define RFPGA1_TXBLOCK 0x904 +#define RFPGA1_DEBUGSELECT 0x908 +#define RFPGA1_TXINFO 0x90c + +#define RCCK0_SYSTEM 0xa00 + +#define RCCK0_AFESETTING 0xa04 +#define RCCK0_CCA 0xa08 + +#define RCCK0_RXAGC1 0xa0c +#define RCCK0_RXAGC2 0xa10 + +#define RCCK0_RXHP 0xa14 + +#define RCCK0_DSPPARAMETER1 0xa18 +#define RCCK0_DSPPARAMETER2 0xa1c + +#define RCCK0_TXFILTER1 0xa20 +#define RCCK0_TXFILTER2 0xa24 +#define RCCK0_DEBUGPORT 0xa28 +#define RCCK0_FALSEALARMREPORT 0xa2c +#define RCCK0_TRSSIREPORT 0xa50 +#define RCCK0_RXREPORT 0xa54 +#define RCCK0_FACOUNTERLOWER 0xa5c +#define RCCK0_FACOUNTERUPPER 0xa58 +#define RCCK0_CCA_CNT 0xa60 + +/* PageB(0xB00) */ +#define RPDP_ANTA 0xb00 +#define RPDP_ANTA_4 0xb04 +#define RPDP_ANTA_8 0xb08 +#define RPDP_ANTA_C 0xb0c +#define RPDP_ANTA_10 0xb10 +#define RPDP_ANTA_14 0xb14 +#define RPDP_ANTA_18 0xb18 +#define RPDP_ANTA_1C 0xb1c +#define RPDP_ANTA_20 0xb20 +#define RPDP_ANTA_24 0xb24 + +#define RCONFIG_PMPD_ANTA 0xb28 +#define RCONFIG_RAM64x16 0xb2c + +#define RBNDA 0xb30 +#define RHSSIPAR 0xb34 + +#define RCONFIG_ANTA 0xb68 +#define RCONFIG_ANTB 0xb6c + +#define RPDP_ANTB 0xb70 +#define RPDP_ANTB_4 0xb74 +#define RPDP_ANTB_8 0xb78 +#define RPDP_ANTB_C 0xb7c +#define RPDP_ANTB_10 0xb80 +#define RPDP_ANTB_14 0xb84 +#define RPDP_ANTB_18 0xb88 +#define RPDP_ANTB_1C 0xb8c +#define RPDP_ANTB_20 0xb90 +#define RPDP_ANTB_24 0xb94 + +#define RCONFIG_PMPD_ANTB 0xb98 + +#define RBNDB 0xba0 + +#define RAPK 0xbd8 +#define RPM_RX0_ANTA 0xbdc +#define RPM_RX1_ANTA 0xbe0 +#define RPM_RX2_ANTA 0xbe4 +#define RPM_RX3_ANTA 0xbe8 +#define RPM_RX0_ANTB 0xbec +#define RPM_RX1_ANTB 0xbf0 +#define RPM_RX2_ANTB 0xbf4 +#define RPM_RX3_ANTB 0xbf8 + +/*Page C*/ +#define ROFDM0_LSTF 0xc00 + +#define ROFDM0_TRXPATHENABLE 0xc04 +#define ROFDM0_TRMUXPAR 0xc08 +#define ROFDM0_TRSWISOLATION 0xc0c + +#define ROFDM0_XARXAFE 0xc10 +#define ROFDM0_XARXIQIMBALANCE 0xc14 +#define ROFDM0_XBRXAFE 0xc18 +#define ROFDM0_XBRXIQIMBALANCE 0xc1c +#define ROFDM0_XCRXAFE 0xc20 +#define ROFDM0_XCRXIQIMBANLANCE 0xc24 +#define ROFDM0_XDRXAFE 0xc28 +#define ROFDM0_XDRXIQIMBALANCE 0xc2c + +#define ROFDM0_RXDETECTOR1 0xc30 +#define ROFDM0_RXDETECTOR2 0xc34 +#define ROFDM0_RXDETECTOR3 0xc38 +#define ROFDM0_RXDETECTOR4 0xc3c + +#define ROFDM0_RXDSP 0xc40 +#define ROFDM0_CFOANDDAGC 0xc44 +#define ROFDM0_CCADROPTHRESHOLD 0xc48 +#define ROFDM0_ECCATHRESHOLD 0xc4c + +#define ROFDM0_XAAGCCORE1 0xc50 +#define ROFDM0_XAAGCCORE2 0xc54 +#define ROFDM0_XBAGCCORE1 0xc58 +#define ROFDM0_XBAGCCORE2 0xc5c +#define ROFDM0_XCAGCCORE1 0xc60 +#define ROFDM0_XCAGCCORE2 0xc64 +#define ROFDM0_XDAGCCORE1 0xc68 +#define ROFDM0_XDAGCCORE2 0xc6c + +#define ROFDM0_AGCPARAMETER1 0xc70 +#define ROFDM0_AGCPARAMETER2 0xc74 +#define ROFDM0_AGCRSSITABLE 0xc78 +#define ROFDM0_HTSTFAGC 0xc7c + +#define ROFDM0_XATXIQIMBALANCE 0xc80 +#define ROFDM0_XATXAFE 0xc84 +#define ROFDM0_XBTXIQIMBALANCE 0xc88 +#define ROFDM0_XBTXAFE 0xc8c +#define ROFDM0_XCTXIQIMBALANCE 0xc90 +#define ROFDM0_XCTXAFE 0xc94 +#define ROFDM0_XDTXIQIMBALANCE 0xc98 +#define ROFDM0_XDTXAFE 0xc9c + +#define ROFDM0_RXIQEXTANTA 0xca0 +#define ROFDM0_TXCOEFF1 0xca4 +#define ROFDM0_TXCOEFF2 0xca8 +#define ROFDM0_TXCOEFF3 0xcac +#define ROFDM0_TXCOEFF4 0xcb0 +#define ROFDM0_TXCOEFF5 0xcb4 +#define ROFDM0_TXCOEFF6 0xcb8 + +#define ROFDM0_RXHPPARAMETER 0xce0 +#define ROFDM0_TXPSEUDONOISEWGT 0xce4 +#define ROFDM0_FRAMESYNC 0xcf0 +#define ROFDM0_DFSREPORT 0xcf4 + +#define ROFDM1_LSTF 0xd00 +#define ROFDM1_TRXPATHENABLE 0xd04 + +#define ROFDM1_CF0 0xd08 +#define ROFDM1_CSI1 0xd10 +#define ROFDM1_SBD 0xd14 +#define ROFDM1_CSI2 0xd18 +#define ROFDM1_CFOTRACKING 0xd2c +#define ROFDM1_TRXMESAURE1 0xd34 +#define ROFDM1_INTFDET 0xd3c +#define ROFDM1_PSEUDONOISESTATEAB 0xd50 +#define ROFDM1_PSEUDONOISESTATECD 0xd54 +#define ROFDM1_RXPSEUDONOISEWGT 0xd58 + +#define ROFDM_PHYCOUNTER1 0xda0 +#define ROFDM_PHYCOUNTER2 0xda4 +#define ROFDM_PHYCOUNTER3 0xda8 + +#define ROFDM_SHORTCFOAB 0xdac +#define ROFDM_SHORTCFOCD 0xdb0 +#define ROFDM_LONGCFOAB 0xdb4 +#define ROFDM_LONGCFOCD 0xdb8 +#define ROFDM_TAILCF0AB 0xdbc +#define ROFDM_TAILCF0CD 0xdc0 +#define ROFDM_PWMEASURE1 0xdc4 +#define ROFDM_PWMEASURE2 0xdc8 +#define ROFDM_BWREPORT 0xdcc +#define ROFDM_AGCREPORT 0xdd0 +#define ROFDM_RXSNR 0xdd4 +#define ROFDM_RXEVMCSI 0xdd8 +#define ROFDM_SIGREPORT 0xddc + +#define RTXAGC_A_RATE18_06 0xe00 +#define RTXAGC_A_RATE54_24 0xe04 +#define RTXAGC_A_CCK1_MCS32 0xe08 +#define RTXAGC_A_MCS03_MCS00 0xe10 +#define RTXAGC_A_MCS07_MCS04 0xe14 +#define RTXAGC_A_MCS11_MCS08 0xe18 +#define RTXAGC_A_MCS15_MCS12 0xe1c + +#define RTXAGC_B_RATE18_06 0x830 +#define RTXAGC_B_RATE54_24 0x834 +#define RTXAGC_B_CCK1_55_MCS32 0x838 +#define RTXAGC_B_MCS03_MCS00 0x83c +#define RTXAGC_B_MCS07_MCS04 0x848 +#define RTXAGC_B_MCS11_MCS08 0x84c +#define RTXAGC_B_MCS15_MCS12 0x868 +#define RTXAGC_B_CCK11_A_CCK2_11 0x86c + +#define RFPGA0_IQK 0xe28 +#define RTX_IQK_TONE_A 0xe30 +#define RRX_IQK_TONE_A 0xe34 +#define RTX_IQK_PI_A 0xe38 +#define RRX_IQK_PI_A 0xe3c + +#define RTX_IQK 0xe40 +#define RRX_IQK 0xe44 +#define RIQK_AGC_PTS 0xe48 +#define RIQK_AGC_RSP 0xe4c +#define RTX_IQK_TONE_B 0xe50 +#define RRX_IQK_TONE_B 0xe54 +#define RTX_IQK_PI_B 0xe58 +#define RRX_IQK_PI_B 0xe5c +#define RIQK_AGC_CONT 0xe60 + +#define RBLUE_TOOTH 0xe6c +#define RRX_WAIT_CCA 0xe70 +#define RTX_CCK_RFON 0xe74 +#define RTX_CCK_BBON 0xe78 +#define RTX_OFDM_RFON 0xe7c +#define RTX_OFDM_BBON 0xe80 +#define RTX_TO_RX 0xe84 +#define RTX_TO_TX 0xe88 +#define RRX_CCK 0xe8c + +#define RTX_POWER_BEFORE_IQK_A 0xe94 +#define RTX_POWER_AFTER_IQK_A 0xe9c + +#define RRX_POWER_BEFORE_IQK_A 0xea0 +#define RRX_POWER_BEFORE_IQK_A_2 0xea4 +#define RRX_POWER_AFTER_IQK_A 0xea8 +#define RRX_POWER_AFTER_IQK_A_2 0xeac + +#define RTX_POWER_BEFORE_IQK_B 0xeb4 +#define RTX_POWER_AFTER_IQK_B 0xebc + +#define RRX_POWER_BEFORE_IQK_B 0xec0 +#define RRX_POWER_BEFORE_IQK_B_2 0xec4 +#define RRX_POWER_AFTER_IQK_B 0xec8 +#define RRX_POWER_AFTER_IQK_B_2 0xecc + +#define RRX_OFDM 0xed0 +#define RRX_WAIT_RIFS 0xed4 +#define RRX_TO_RX 0xed8 +#define RSTANDBY 0xedc +#define RSLEEP 0xee0 +#define RPMPD_ANAEN 0xeec + +#define RZEBRA1_HSSIENABLE 0x0 +#define RZEBRA1_TRXENABLE1 0x1 +#define RZEBRA1_TRXENABLE2 0x2 +#define RZEBRA1_AGC 0x4 +#define RZEBRA1_CHARGEPUMP 0x5 +#define RZEBRA1_CHANNEL 0x7 + +#define RZEBRA1_TXGAIN 0x8 +#define RZEBRA1_TXLPF 0x9 +#define RZEBRA1_RXLPF 0xb +#define RZEBRA1_RXHPFCORNER 0xc + +#define RGLOBALCTRL 0 +#define RRTL8256_TXLPF 19 +#define RRTL8256_RXLPF 11 +#define RRTL8258_TXLPF 0x11 +#define RRTL8258_RXLPF 0x13 +#define RRTL8258_RSSILPF 0xa + +#define RF_AC 0x00 + +#define RF_IQADJ_G1 0x01 +#define RF_IQADJ_G2 0x02 +#define RF_POW_TRSW 0x05 + +#define RF_GAIN_RX 0x06 +#define RF_GAIN_TX 0x07 + +#define RF_TXM_IDAC 0x08 +#define RF_BS_IQGEN 0x0F + +#define RF_MODE1 0x10 +#define RF_MODE2 0x11 + +#define RF_RX_AGC_HP 0x12 +#define RF_TX_AGC 0x13 +#define RF_BIAS 0x14 +#define RF_IPA 0x15 +#define RF_POW_ABILITY 0x17 +#define RF_MODE_AG 0x18 +#define RRFCHANNEL 0x18 +#define RF_CHNLBW 0x18 +#define RF_TOP 0x19 + +#define RF_RX_G1 0x1A +#define RF_RX_G2 0x1B + +#define RF_RX_BB2 0x1C +#define RF_RX_BB1 0x1D + +#define RF_RCK1 0x1E +#define RF_RCK2 0x1F + +#define RF_TX_G1 0x20 +#define RF_TX_G2 0x21 +#define RF_TX_G3 0x22 + +#define RF_TX_BB1 0x23 +#define RF_T_METER 0x42 + +#define RF_SYN_G1 0x25 +#define RF_SYN_G2 0x26 +#define RF_SYN_G3 0x27 +#define RF_SYN_G4 0x28 +#define RF_SYN_G5 0x29 +#define RF_SYN_G6 0x2A +#define RF_SYN_G7 0x2B +#define RF_SYN_G8 0x2C + +#define RF_RCK_OS 0x30 +#define RF_TXPA_G1 0x31 +#define RF_TXPA_G2 0x32 +#define RF_TXPA_G3 0x33 + +#define RF_TX_BIAS_A 0x35 +#define RF_TX_BIAS_D 0x36 +#define RF_LOBF_9 0x38 +#define RF_RXRF_A3 0x3C +#define RF_TRSW 0x3F + +#define RF_TXRF_A2 0x41 +#define RF_TXPA_G4 0x46 +#define RF_TXPA_A4 0x4B + +#define RF_WE_LUT 0xEF + +#define BBBRESETB 0x100 +#define BGLOBALRESETB 0x200 +#define BOFDMTXSTART 0x4 +#define BCCKTXSTART 0x8 +#define BCRC32DEBUG 0x100 +#define BPMACLOOPBACK 0x10 +#define BTXLSIG 0xffffff +#define BOFDMTXRATE 0xf +#define BOFDMTXRESERVED 0x10 +#define BOFDMTXLENGTH 0x1ffe0 +#define BOFDMTXPARITY 0x20000 +#define BTXHTSIG1 0xffffff +#define BTXHTMCSRATE 0x7f +#define BTXHTBW 0x80 +#define BTXHTLENGTH 0xffff00 +#define BTXHTSIG2 0xffffff +#define BTXHTSMOOTHING 0x1 +#define BTXHTSOUNDING 0x2 +#define BTXHTRESERVED 0x4 +#define BTXHTAGGREATION 0x8 +#define BTXHTSTBC 0x30 +#define BTXHTADVANCECODING 0x40 +#define BTXHTSHORTGI 0x80 +#define BTXHTNUMBERHT_LTF 0x300 +#define BTXHTCRC8 0x3fc00 +#define BCOUNTERRESET 0x10000 +#define BNUMOFOFDMTX 0xffff +#define BNUMOFCCKTX 0xffff0000 +#define BTXIDLEINTERVAL 0xffff +#define BOFDMSERVICE 0xffff0000 +#define BTXMACHEADER 0xffffffff +#define BTXDATAINIT 0xff +#define BTXHTMODE 0x100 +#define BTXDATATYPE 0x30000 +#define BTXRANDOMSEED 0xffffffff +#define BCCKTXPREAMBLE 0x1 +#define BCCKTXSFD 0xffff0000 +#define BCCKTXSIG 0xff +#define BCCKTXSERVICE 0xff00 +#define BCCKLENGTHEXT 0x8000 +#define BCCKTXLENGHT 0xffff0000 +#define BCCKTXCRC16 0xffff +#define BCCKTXSTATUS 0x1 +#define BOFDMTXSTATUS 0x2 +#define IS_BB_REG_OFFSET_92S(_offset) \ + ((_offset >= 0x800) && (_offset <= 0xfff)) + +#define BRFMOD 0x1 +#define BJAPANMODE 0x2 +#define BCCKTXSC 0x30 +#define BCCKEN 0x1000000 +#define BOFDMEN 0x2000000 + +#define BOFDMRXADCPHASE 0x10000 +#define BOFDMTXDACPHASE 0x40000 +#define BXATXAGC 0x3f + +#define BXBTXAGC 0xf00 +#define BXCTXAGC 0xf000 +#define BXDTXAGC 0xf0000 + +#define BPASTART 0xf0000000 +#define BTRSTART 0x00f00000 +#define BRFSTART 0x0000f000 +#define BBBSTART 0x000000f0 +#define BBBCCKSTART 0x0000000f +#define BPAEND 0xf +#define BTREND 0x0f000000 +#define BRFEND 0x000f0000 +#define BCCAMASK 0x000000f0 +#define BR2RCCAMASK 0x00000f00 +#define BHSSI_R2TDELAY 0xf8000000 +#define BHSSI_T2RDELAY 0xf80000 +#define BCONTXHSSI 0x400 +#define BIGFROMCCK 0x200 +#define BAGCADDRESS 0x3f +#define BRXHPTX 0x7000 +#define BRXHP2RX 0x38000 +#define BRXHPCCKINI 0xc0000 +#define BAGCTXCODE 0xc00000 +#define BAGCRXCODE 0x300000 + +#define B3WIREDATALENGTH 0x800 +#define B3WIREADDREAALENGTH 0x400 + +#define B3WIRERFPOWERDOWN 0x1 +#define B5GPAPEPOLARITY 0x40000000 +#define B2GPAPEPOLARITY 0x80000000 +#define BRFSW_TXDEFAULTANT 0x3 +#define BRFSW_TXOPTIONANT 0x30 +#define BRFSW_RXDEFAULTANT 0x300 +#define BRFSW_RXOPTIONANT 0x3000 +#define BRFSI_3WIREDATA 0x1 +#define BRFSI_3WIRECLOCK 0x2 +#define BRFSI_3WIRELOAD 0x4 +#define BRFSI_3WIRERW 0x8 +#define BRFSI_3WIRE 0xf + +#define BRFSI_RFENV 0x10 + +#define BRFSI_TRSW 0x20 +#define BRFSI_TRSWB 0x40 +#define BRFSI_ANTSW 0x100 +#define BRFSI_ANTSWB 0x200 +#define BRFSI_PAPE 0x400 +#define BRFSI_PAPE5G 0x800 +#define BBANDSELECT 0x1 +#define BHTSIG2_GI 0x80 +#define BHTSIG2_SMOOTHING 0x01 +#define BHTSIG2_SOUNDING 0x02 +#define BHTSIG2_AGGREATON 0x08 +#define BHTSIG2_STBC 0x30 +#define BHTSIG2_ADVCODING 0x40 +#define BHTSIG2_NUMOFHTLTF 0x300 +#define BHTSIG2_CRC8 0x3fc +#define BHTSIG1_MCS 0x7f +#define BHTSIG1_BANDWIDTH 0x80 +#define BHTSIG1_HTLENGTH 0xffff +#define BLSIG_RATE 0xf +#define BLSIG_RESERVED 0x10 +#define BLSIG_LENGTH 0x1fffe +#define BLSIG_PARITY 0x20 +#define BCCKRXPHASE 0x4 + +#define BLSSIREADADDRESS 0x7f800000 +#define BLSSIREADEDGE 0x80000000 + +#define BLSSIREADBACKDATA 0xfffff + +#define BLSSIREADOKFLAG 0x1000 +#define BCCKSAMPLERATE 0x8 +#define BREGULATOR0STANDBY 0x1 +#define BREGULATORPLLSTANDBY 0x2 +#define BREGULATOR1STANDBY 0x4 +#define BPLLPOWERUP 0x8 +#define BDPLLPOWERUP 0x10 +#define BDA10POWERUP 0x20 +#define BAD7POWERUP 0x200 +#define BDA6POWERUP 0x2000 +#define BXTALPOWERUP 0x4000 +#define B40MDCLKPOWERUP 0x8000 +#define BDA6DEBUGMODE 0x20000 +#define BDA6SWING 0x380000 + +#define BADCLKPHASE 0x4000000 +#define B80MCLKDELAY 0x18000000 +#define BAFEWATCHDOGENABLE 0x20000000 + +#define BXTALCAP01 0xc0000000 +#define BXTALCAP23 0x3 +#define BXTALCAP92X 0x0f000000 +#define BXTALCAP 0x0f000000 + +#define BINTDIFCLKENABLE 0x400 +#define BEXTSIGCLKENABLE 0x800 +#define BBANDGAP_MBIAS_POWERUP 0x10000 +#define BAD11SH_GAIN 0xc0000 +#define BAD11NPUT_RANGE 0x700000 +#define BAD110P_CURRENT 0x3800000 +#define BLPATH_LOOPBACK 0x4000000 +#define BQPATH_LOOPBACK 0x8000000 +#define BAFE_LOOPBACK 0x10000000 +#define BDA10_SWING 0x7e0 +#define BDA10_REVERSE 0x800 +#define BDA_CLK_SOURCE 0x1000 +#define BDA7INPUT_RANGE 0x6000 +#define BDA7_GAIN 0x38000 +#define BDA7OUTPUT_CM_MODE 0x40000 +#define BDA7INPUT_CM_MODE 0x380000 +#define BDA7CURRENT 0xc00000 +#define BREGULATOR_ADJUST 0x7000000 +#define BAD11POWERUP_ATTX 0x1 +#define BDA10PS_ATTX 0x10 +#define BAD11POWERUP_ATRX 0x100 +#define BDA10PS_ATRX 0x1000 +#define BCCKRX_AGC_FORMAT 0x200 +#define BPSDFFT_SAMPLE_POINT 0xc000 +#define BPSD_AVERAGE_NUM 0x3000 +#define BIQPATH_CONTROL 0xc00 +#define BPSD_FREQ 0x3ff +#define BPSD_ANTENNA_PATH 0x30 +#define BPSD_IQ_SWITCH 0x40 +#define BPSD_RX_TRIGGER 0x400000 +#define BPSD_TX_TRIGGER 0x80000000 +#define BPSD_SINE_TONE_SCALE 0x7f000000 +#define BPSD_REPORT 0xffff + +#define BOFDM_TXSC 0x30000000 +#define BCCK_TXON 0x1 +#define BOFDM_TXON 0x2 +#define BDEBUG_PAGE 0xfff +#define BDEBUG_ITEM 0xff +#define BANTL 0x10 +#define BANT_NONHT 0x100 +#define BANT_HT1 0x1000 +#define BANT_HT2 0x10000 +#define BANT_HT1S1 0x100000 +#define BANT_NONHTS1 0x1000000 + +#define BCCK_BBMODE 0x3 +#define BCCK_TXPOWERSAVING 0x80 +#define BCCK_RXPOWERSAVING 0x40 + +#define BCCK_SIDEBAND 0x10 + +#define BCCK_SCRAMBLE 0x8 +#define BCCK_ANTDIVERSITY 0x8000 +#define BCCK_CARRIER_RECOVERY 0x4000 +#define BCCK_TXRATE 0x3000 +#define BCCK_DCCANCEL 0x0800 +#define BCCK_ISICANCEL 0x0400 +#define BCCK_MATCH_FILTER 0x0200 +#define BCCK_EQUALIZER 0x0100 +#define BCCK_PREAMBLE_DETECT 0x800000 +#define BCCK_FAST_FALSECCA 0x400000 +#define BCCK_CH_ESTSTART 0x300000 +#define BCCK_CCA_COUNT 0x080000 +#define BCCK_CS_LIM 0x070000 +#define BCCK_BIST_MODE 0x80000000 +#define BCCK_CCAMASK 0x40000000 +#define BCCK_TX_DAC_PHASE 0x4 +#define BCCK_RX_ADC_PHASE 0x20000000 +#define BCCKR_CP_MODE 0x0100 +#define BCCK_TXDC_OFFSET 0xf0 +#define BCCK_RXDC_OFFSET 0xf +#define BCCK_CCA_MODE 0xc000 +#define BCCK_FALSECS_LIM 0x3f00 +#define BCCK_CS_RATIO 0xc00000 +#define BCCK_CORGBIT_SEL 0x300000 +#define BCCK_PD_LIM 0x0f0000 +#define BCCK_NEWCCA 0x80000000 +#define BCCK_RXHP_OF_IG 0x8000 +#define BCCK_RXIG 0x7f00 +#define BCCK_LNA_POLARITY 0x800000 +#define BCCK_RX1ST_BAIN 0x7f0000 +#define BCCK_RF_EXTEND 0x20000000 +#define BCCK_RXAGC_SATLEVEL 0x1f000000 +#define BCCK_RXAGC_SATCOUNT 0xe0 +#define BCCKRXRFSETTLE 0x1f +#define BCCK_FIXED_RXAGC 0x8000 +#define BCCK_ANTENNA_POLARITY 0x2000 +#define BCCK_TXFILTER_TYPE 0x0c00 +#define BCCK_RXAGC_REPORTTYPE 0x0300 +#define BCCK_RXDAGC_EN 0x80000000 +#define BCCK_RXDAGC_PERIOD 0x20000000 +#define BCCK_RXDAGC_SATLEVEL 0x1f000000 +#define BCCK_TIMING_RECOVERY 0x800000 +#define BCCK_TXC0 0x3f0000 +#define BCCK_TXC1 0x3f000000 +#define BCCK_TXC2 0x3f +#define BCCK_TXC3 0x3f00 +#define BCCK_TXC4 0x3f0000 +#define BCCK_TXC5 0x3f000000 +#define BCCK_TXC6 0x3f +#define BCCK_TXC7 0x3f00 +#define BCCK_DEBUGPORT 0xff0000 +#define BCCK_DAC_DEBUG 0x0f000000 +#define BCCK_FALSEALARM_ENABLE 0x8000 +#define BCCK_FALSEALARM_READ 0x4000 +#define BCCK_TRSSI 0x7f +#define BCCK_RXAGC_REPORT 0xfe +#define BCCK_RXREPORT_ANTSEL 0x80000000 +#define BCCK_RXREPORT_MFOFF 0x40000000 +#define BCCK_RXREPORT_SQLOSS 0x20000000 +#define BCCK_RXREPORT_PKTLOSS 0x10000000 +#define BCCK_RXREPORT_LOCKEDBIT 0x08000000 +#define BCCK_RXREPORT_RATEERROR 0x04000000 +#define BCCK_RXREPORT_RXRATE 0x03000000 +#define BCCK_RXFA_COUNTER_LOWER 0xff +#define BCCK_RXFA_COUNTER_UPPER 0xff000000 +#define BCCK_RXHPAGC_START 0xe000 +#define BCCK_RXHPAGC_FINAL 0x1c00 +#define BCCK_RXFALSEALARM_ENABLE 0x8000 +#define BCCK_FACOUNTER_FREEZE 0x4000 +#define BCCK_TXPATH_SEL 0x10000000 +#define BCCK_DEFAULT_RXPATH 0xc000000 +#define BCCK_OPTION_RXPATH 0x3000000 + +#define BNUM_OFSTF 0x3 +#define BSHIFT_L 0xc0 +#define BGI_TH 0xc +#define BRXPATH_A 0x1 +#define BRXPATH_B 0x2 +#define BRXPATH_C 0x4 +#define BRXPATH_D 0x8 +#define BTXPATH_A 0x1 +#define BTXPATH_B 0x2 +#define BTXPATH_C 0x4 +#define BTXPATH_D 0x8 +#define BTRSSI_FREQ 0x200 +#define BADC_BACKOFF 0x3000 +#define BDFIR_BACKOFF 0xc000 +#define BTRSSI_LATCH_PHASE 0x10000 +#define BRX_LDC_OFFSET 0xff +#define BRX_QDC_OFFSET 0xff00 +#define BRX_DFIR_MODE 0x1800000 +#define BRX_DCNF_TYPE 0xe000000 +#define BRXIQIMB_A 0x3ff +#define BRXIQIMB_B 0xfc00 +#define BRXIQIMB_C 0x3f0000 +#define BRXIQIMB_D 0xffc00000 +#define BDC_DC_NOTCH 0x60000 +#define BRXNB_NOTCH 0x1f000000 +#define BPD_TH 0xf +#define BPD_TH_OPT2 0xc000 +#define BPWED_TH 0x700 +#define BIFMF_WIN_L 0x800 +#define BPD_OPTION 0x1000 +#define BMF_WIN_L 0xe000 +#define BBW_SEARCH_L 0x30000 +#define BWIN_ENH_L 0xc0000 +#define BBW_TH 0x700000 +#define BED_TH2 0x3800000 +#define BBW_OPTION 0x4000000 +#define BRADIO_TH 0x18000000 +#define BWINDOW_L 0xe0000000 +#define BSBD_OPTION 0x1 +#define BFRAME_TH 0x1c +#define BFS_OPTION 0x60 +#define BDC_SLOPE_CHECK 0x80 +#define BFGUARD_COUNTER_DC_L 0xe00 +#define BFRAME_WEIGHT_SHORT 0x7000 +#define BSUB_TUNE 0xe00000 +#define BFRAME_DC_LENGTH 0xe000000 +#define BSBD_START_OFFSET 0x30000000 +#define BFRAME_TH_2 0x7 +#define BFRAME_GI2_TH 0x38 +#define BGI2_SYNC_EN 0x40 +#define BSARCH_SHORT_EARLY 0x300 +#define BSARCH_SHORT_LATE 0xc00 +#define BSARCH_GI2_LATE 0x70000 +#define BCFOANTSUM 0x1 +#define BCFOACC 0x2 +#define BCFOSTARTOFFSET 0xc +#define BCFOLOOPBACK 0x70 +#define BCFOSUMWEIGHT 0x80 +#define BDAGCENABLE 0x10000 +#define BTXIQIMB_A 0x3ff +#define BTXIQIMB_b 0xfc00 +#define BTXIQIMB_C 0x3f0000 +#define BTXIQIMB_D 0xffc00000 +#define BTXIDCOFFSET 0xff +#define BTXIQDCOFFSET 0xff00 +#define BTXDFIRMODE 0x10000 +#define BTXPESUDO_NOISEON 0x4000000 +#define BTXPESUDO_NOISE_A 0xff +#define BTXPESUDO_NOISE_B 0xff00 +#define BTXPESUDO_NOISE_C 0xff0000 +#define BTXPESUDO_NOISE_D 0xff000000 +#define BCCA_DROPOPTION 0x20000 +#define BCCA_DROPTHRES 0xfff00000 +#define BEDCCA_H 0xf +#define BEDCCA_L 0xf0 +#define BLAMBDA_ED 0x300 +#define BRX_INITIALGAIN 0x7f +#define BRX_ANTDIV_EN 0x80 +#define BRX_AGC_ADDRESS_FOR_LNA 0x7f00 +#define BRX_HIGHPOWER_FLOW 0x8000 +#define BRX_AGC_FREEZE_THRES 0xc0000 +#define BRX_FREEZESTEP_AGC1 0x300000 +#define BRX_FREEZESTEP_AGC2 0xc00000 +#define BRX_FREEZESTEP_AGC3 0x3000000 +#define BRX_FREEZESTEP_AGC0 0xc000000 +#define BRXRSSI_CMP_EN 0x10000000 +#define BRXQUICK_AGCEN 0x20000000 +#define BRXAGC_FREEZE_THRES_MODE 0x40000000 +#define BRX_OVERFLOW_CHECKTYPE 0x80000000 +#define BRX_AGCSHIFT 0x7f +#define BTRSW_TRI_ONLY 0x80 +#define BPOWER_THRES 0x300 +#define BRXAGC_EN 0x1 +#define BRXAGC_TOGETHER_EN 0x2 +#define BRXAGC_MIN 0x4 +#define BRXHP_INI 0x7 +#define BRXHP_TRLNA 0x70 +#define BRXHP_RSSI 0x700 +#define BRXHP_BBP1 0x7000 +#define BRXHP_BBP2 0x70000 +#define BRXHP_BBP3 0x700000 +#define BRSSI_H 0x7f0000 +#define BRSSI_GEN 0x7f000000 +#define BRXSETTLE_TRSW 0x7 +#define BRXSETTLE_LNA 0x38 +#define BRXSETTLE_RSSI 0x1c0 +#define BRXSETTLE_BBP 0xe00 +#define BRXSETTLE_RXHP 0x7000 +#define BRXSETTLE_ANTSW_RSSI 0x38000 +#define BRXSETTLE_ANTSW 0xc0000 +#define BRXPROCESS_TIME_DAGC 0x300000 +#define BRXSETTLE_HSSI 0x400000 +#define BRXPROCESS_TIME_BBPPW 0x800000 +#define BRXANTENNA_POWER_SHIFT 0x3000000 +#define BRSSI_TABLE_SELECT 0xc000000 +#define BRXHP_FINAL 0x7000000 +#define BRXHPSETTLE_BBP 0x7 +#define BRXHTSETTLE_HSSI 0x8 +#define BRXHTSETTLE_RXHP 0x70 +#define BRXHTSETTLE_BBPPW 0x80 +#define BRXHTSETTLE_IDLE 0x300 +#define BRXHTSETTLE_RESERVED 0x1c00 +#define BRXHT_RXHP_EN 0x8000 +#define BRXAGC_FREEZE_THRES 0x30000 +#define BRXAGC_TOGETHEREN 0x40000 +#define BRXHTAGC_MIN 0x80000 +#define BRXHTAGC_EN 0x100000 +#define BRXHTDAGC_EN 0x200000 +#define BRXHT_RXHP_BBP 0x1c00000 +#define BRXHT_RXHP_FINAL 0xe0000000 +#define BRXPW_RADIO_TH 0x3 +#define BRXPW_RADIO_EN 0x4 +#define BRXMF_HOLD 0x3800 +#define BRXPD_DELAY_TH1 0x38 +#define BRXPD_DELAY_TH2 0x1c0 +#define BRXPD_DC_COUNT_MAX 0x600 +#define BRXPD_DELAY_TH 0x8000 +#define BRXPROCESS_DELAY 0xf0000 +#define BRXSEARCHRANGE_GI2_EARLY 0x700000 +#define BRXFRAME_FUARD_COUNTER_L 0x3800000 +#define BRXSGI_GUARD_L 0xc000000 +#define BRXSGI_SEARCH_L 0x30000000 +#define BRXSGI_TH 0xc0000000 +#define BDFSCNT0 0xff +#define BDFSCNT1 0xff00 +#define BDFSFLAG 0xf0000 +#define BMF_WEIGHT_SUM 0x300000 +#define BMINIDX_TH 0x7f000000 +#define BDAFORMAT 0x40000 +#define BTXCH_EMU_ENABLE 0x01000000 +#define BTRSW_ISOLATION_A 0x7f +#define BTRSW_ISOLATION_B 0x7f00 +#define BTRSW_ISOLATION_C 0x7f0000 +#define BTRSW_ISOLATION_D 0x7f000000 +#define BEXT_LNA_GAIN 0x7c00 + +#define BSTBC_EN 0x4 +#define BANTENNA_MAPPING 0x10 +#define BNSS 0x20 +#define BCFO_ANTSUM_ID 0x200 +#define BPHY_COUNTER_RESET 0x8000000 +#define BCFO_REPORT_GET 0x4000000 +#define BOFDM_CONTINUE_TX 0x10000000 +#define BOFDM_SINGLE_CARRIER 0x20000000 +#define BOFDM_SINGLE_TONE 0x40000000 +#define BHT_DETECT 0x100 +#define BCFOEN 0x10000 +#define BCFOVALUE 0xfff00000 +#define BSIGTONE_RE 0x3f +#define BSIGTONE_IM 0x7f00 +#define BCOUNTER_CCA 0xffff +#define BCOUNTER_PARITYFAIL 0xffff0000 +#define BCOUNTER_RATEILLEGAL 0xffff +#define BCOUNTER_CRC8FAIL 0xffff0000 +#define BCOUNTER_MCSNOSUPPORT 0xffff +#define BCOUNTER_FASTSYNC 0xffff +#define BSHORTCFO 0xfff +#define BSHORTCFOT_LENGTH 12 +#define BSHORTCFOF_LENGTH 11 +#define BLONGCFO 0x7ff +#define BLONGCFOT_LENGTH 11 +#define BLONGCFOF_LENGTH 11 +#define BTAILCFO 0x1fff +#define BTAILCFOT_LENGTH 13 +#define BTAILCFOF_LENGTH 12 +#define BNOISE_EN_PWDB 0xffff +#define BCC_POWER_DB 0xffff0000 +#define BMOISE_PWDB 0xffff +#define BPOWERMEAST_LENGTH 10 +#define BPOWERMEASF_LENGTH 3 +#define BRX_HT_BW 0x1 +#define BRXSC 0x6 +#define BRX_HT 0x8 +#define BNB_INTF_DET_ON 0x1 +#define BINTF_WIN_LEN_CFG 0x30 +#define BNB_INTF_TH_CFG 0x1c0 +#define BRFGAIN 0x3f +#define BTABLESEL 0x40 +#define BTRSW 0x80 +#define BRXSNR_A 0xff +#define BRXSNR_B 0xff00 +#define BRXSNR_C 0xff0000 +#define BRXSNR_D 0xff000000 +#define BSNR_EVMT_LENGTH 8 +#define BSNR_EVMF_LENGTH 1 +#define BCSI1ST 0xff +#define BCSI2ND 0xff00 +#define BRXEVM1ST 0xff0000 +#define BRXEVM2ND 0xff000000 +#define BSIGEVM 0xff +#define BPWDB 0xff00 +#define BSGIEN 0x10000 + +#define BSFACTOR_QMA1 0xf +#define BSFACTOR_QMA2 0xf0 +#define BSFACTOR_QMA3 0xf00 +#define BSFACTOR_QMA4 0xf000 +#define BSFACTOR_QMA5 0xf0000 +#define BSFACTOR_QMA6 0xf0000 +#define BSFACTOR_QMA7 0xf00000 +#define BSFACTOR_QMA8 0xf000000 +#define BSFACTOR_QMA9 0xf0000000 +#define BCSI_SCHEME 0x100000 + +#define BNOISE_LVL_TOP_SET 0x3 +#define BCHSMOOTH 0x4 +#define BCHSMOOTH_CFG1 0x38 +#define BCHSMOOTH_CFG2 0x1c0 +#define BCHSMOOTH_CFG3 0xe00 +#define BCHSMOOTH_CFG4 0x7000 +#define BMRCMODE 0x800000 +#define BTHEVMCFG 0x7000000 + +#define BLOOP_FIT_TYPE 0x1 +#define BUPD_CFO 0x40 +#define BUPD_CFO_OFFDATA 0x80 +#define BADV_UPD_CFO 0x100 +#define BADV_TIME_CTRL 0x800 +#define BUPD_CLKO 0x1000 +#define BFC 0x6000 +#define BTRACKING_MODE 0x8000 +#define BPHCMP_ENABLE 0x10000 +#define BUPD_CLKO_LTF 0x20000 +#define BCOM_CH_CFO 0x40000 +#define BCSI_ESTI_MODE 0x80000 +#define BADV_UPD_EQZ 0x100000 +#define BUCHCFG 0x7000000 +#define BUPDEQZ 0x8000000 + +#define BRX_PESUDO_NOISE_ON 0x20000000 +#define BRX_PESUDO_NOISE_A 0xff +#define BRX_PESUDO_NOISE_B 0xff00 +#define BRX_PESUDO_NOISE_C 0xff0000 +#define BRX_PESUDO_NOISE_D 0xff000000 +#define BRX_PESUDO_NOISESTATE_A 0xffff +#define BRX_PESUDO_NOISESTATE_B 0xffff0000 +#define BRX_PESUDO_NOISESTATE_C 0xffff +#define BRX_PESUDO_NOISESTATE_D 0xffff0000 + +#define BZEBRA1_HSSIENABLE 0x8 +#define BZEBRA1_TRXCONTROL 0xc00 +#define BZEBRA1_TRXGAINSETTING 0x07f +#define BZEBRA1_RXCOUNTER 0xc00 +#define BZEBRA1_TXCHANGEPUMP 0x38 +#define BZEBRA1_RXCHANGEPUMP 0x7 +#define BZEBRA1_CHANNEL_NUM 0xf80 +#define BZEBRA1_TXLPFBW 0x400 +#define BZEBRA1_RXLPFBW 0x600 + +#define BRTL8256REG_MODE_CTRL1 0x100 +#define BRTL8256REG_MODE_CTRL0 0x40 +#define BRTL8256REG_TXLPFBW 0x18 +#define BRTL8256REG_RXLPFBW 0x600 + +#define BRTL8258_TXLPFBW 0xc +#define BRTL8258_RXLPFBW 0xc00 +#define BRTL8258_RSSILPFBW 0xc0 + +#define BBYTE0 0x1 +#define BBYTE1 0x2 +#define BBYTE2 0x4 +#define BBYTE3 0x8 +#define BWORD0 0x3 +#define BWORD1 0xc +#define BWORD 0xf + +#define MASKBYTE0 0xff +#define MASKBYTE1 0xff00 +#define MASKBYTE2 0xff0000 +#define MASKBYTE3 0xff000000 +#define MASKHWORD 0xffff0000 +#define MASKLWORD 0x0000ffff +#define MASKDWORD 0xffffffff +#define MASK12BITS 0xfff +#define MASKH4BITS 0xf0000000 +#define MASKOFDM_D 0xffc00000 +#define MASKCCK 0x3f3f3f3f + +#define MASK4BITS 0x0f +#define MASK20BITS 0xfffff +#define RFREG_OFFSET_MASK 0xfffff + +#define BENABLE 0x1 +#define BDISABLE 0x0 + +#define LEFT_ANTENNA 0x0 +#define RIGHT_ANTENNA 0x1 + +#define TCHECK_TXSTATUS 500 +#define TUPDATE_RXCOUNTER 100 + +#define REG_UN_used_register 0x01bf + +/* WOL bit information */ +#define HAL92C_WOL_PTK_UPDATE_EVENT BIT(0) +#define HAL92C_WOL_GTK_UPDATE_EVENT BIT(1) +#define HAL92C_WOL_DISASSOC_EVENT BIT(2) +#define HAL92C_WOL_DEAUTH_EVENT BIT(3) +#define HAL92C_WOL_FW_DISCONNECT_EVENT BIT(4) + +#define WOL_REASON_PTK_UPDATE BIT(0) +#define WOL_REASON_GTK_UPDATE BIT(1) +#define WOL_REASON_DISASSOC BIT(2) +#define WOL_REASON_DEAUTH BIT(3) +#define WOL_REASON_FW_DISCONNECT BIT(4) +#endif diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/rf.c b/drivers/net/wireless/rtlwifi/rtl8192ee/rf.c new file mode 100644 index 0000000000000000000000000000000000000000..c9bc33cd109002b151a6bab65aea74959a10e924 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192ee/rf.c @@ -0,0 +1,152 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "../wifi.h" +#include "reg.h" +#include "def.h" +#include "phy.h" +#include "rf.h" +#include "dm.h" + +static bool _rtl92ee_phy_rf6052_config_parafile(struct ieee80211_hw *hw); + +void rtl92ee_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + + switch (bandwidth) { + case HT_CHANNEL_WIDTH_20: + rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] & + 0xfffff3ff) | BIT(10) | BIT(11)); + rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK, + rtlphy->rfreg_chnlval[0]); + rtl_set_rfreg(hw, RF90_PATH_B, RF_CHNLBW, RFREG_OFFSET_MASK, + rtlphy->rfreg_chnlval[0]); + break; + case HT_CHANNEL_WIDTH_20_40: + rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] & + 0xfffff3ff) | BIT(10)); + rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK, + rtlphy->rfreg_chnlval[0]); + rtl_set_rfreg(hw, RF90_PATH_B, RF_CHNLBW, RFREG_OFFSET_MASK, + rtlphy->rfreg_chnlval[0]); + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "unknown bandwidth: %#X\n", bandwidth); + break; + } +} + +bool rtl92ee_phy_rf6052_config(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + + if (rtlphy->rf_type == RF_1T1R) + rtlphy->num_total_rfpath = 1; + else + rtlphy->num_total_rfpath = 2; + + return _rtl92ee_phy_rf6052_config_parafile(hw); +} + +static bool _rtl92ee_phy_rf6052_config_parafile(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u32 u4_regvalue = 0; + u8 rfpath; + bool rtstatus = true; + struct bb_reg_def *pphyreg; + + for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) { + pphyreg = &rtlphy->phyreg_def[rfpath]; + + switch (rfpath) { + case RF90_PATH_A: + case RF90_PATH_C: + u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs, + BRFSI_RFENV); + break; + case RF90_PATH_B: + case RF90_PATH_D: + u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs, + BRFSI_RFENV << 16); + break; + } + + rtl_set_bbreg(hw, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1); + udelay(1); + + rtl_set_bbreg(hw, pphyreg->rfintfo, BRFSI_RFENV, 0x1); + udelay(1); + + rtl_set_bbreg(hw, pphyreg->rfhssi_para2, + B3WIREADDREAALENGTH, 0x0); + udelay(1); + + rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREDATALENGTH, 0x0); + udelay(1); + + switch (rfpath) { + case RF90_PATH_A: + rtstatus = rtl92ee_phy_config_rf_with_headerfile(hw, + (enum radio_path)rfpath); + break; + case RF90_PATH_B: + rtstatus = rtl92ee_phy_config_rf_with_headerfile(hw, + (enum radio_path)rfpath); + break; + case RF90_PATH_C: + break; + case RF90_PATH_D: + break; + } + + switch (rfpath) { + case RF90_PATH_A: + case RF90_PATH_C: + rtl_set_bbreg(hw, pphyreg->rfintfs, + BRFSI_RFENV, u4_regvalue); + break; + case RF90_PATH_B: + case RF90_PATH_D: + rtl_set_bbreg(hw, pphyreg->rfintfs, + BRFSI_RFENV << 16, u4_regvalue); + break; + } + + if (!rtstatus) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "Radio[%d] Fail!!", rfpath); + return false; + } + } + + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "\n"); + return rtstatus; +} diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/rf.h b/drivers/net/wireless/rtlwifi/rtl8192ee/rf.h new file mode 100644 index 0000000000000000000000000000000000000000..8bdeed3c064ebb43e3141bb4305d4832c7f4e3c3 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192ee/rf.h @@ -0,0 +1,36 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL92E_RF_H__ +#define __RTL92E_RF_H__ + +#define RF6052_MAX_TX_PWR 0x3F +#define RF6052_MAX_REG 0x3F + +void rtl92ee_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, + u8 bandwidth); +bool rtl92ee_phy_rf6052_config(struct ieee80211_hw *hw); + +#endif diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ee/sw.c new file mode 100644 index 0000000000000000000000000000000000000000..9b5a7d5be1215626929e5d8673d62dfd50743b26 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192ee/sw.c @@ -0,0 +1,399 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "../wifi.h" +#include "../core.h" +#include "../pci.h" +#include "reg.h" +#include "def.h" +#include "phy.h" +#include "dm.h" +#include "hw.h" +#include "sw.h" +#include "fw.h" +#include "trx.h" +#include "led.h" +#include "table.h" + +#include "../btcoexist/rtl_btc.h" + +#include +#include + +static void rtl92ee_init_aspm_vars(struct ieee80211_hw *hw) +{ + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + + /*close ASPM for AMD defaultly */ + rtlpci->const_amdpci_aspm = 0; + + /** + * ASPM PS mode. + * 0 - Disable ASPM, + * 1 - Enable ASPM without Clock Req, + * 2 - Enable ASPM with Clock Req, + * 3 - Alwyas Enable ASPM with Clock Req, + * 4 - Always Enable ASPM without Clock Req. + * set defult to RTL8192CE:3 RTL8192E:2 + */ + rtlpci->const_pci_aspm = 3; + + /*Setting for PCI-E device */ + rtlpci->const_devicepci_aspm_setting = 0x03; + + /*Setting for PCI-E bridge */ + rtlpci->const_hostpci_aspm_setting = 0x02; + + /** + * In Hw/Sw Radio Off situation. + * 0 - Default, + * 1 - From ASPM setting without low Mac Pwr, + * 2 - From ASPM setting with low Mac Pwr, + * 3 - Bus D3 + * set default to RTL8192CE:0 RTL8192SE:2 + */ + rtlpci->const_hwsw_rfoff_d3 = 0; + + /** + * This setting works for those device with + * backdoor ASPM setting such as EPHY setting. + * 0 - Not support ASPM, + * 1 - Support ASPM, + * 2 - According to chipset. + */ + rtlpci->const_support_pciaspm = 1; +} + +int rtl92ee_init_sw_vars(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + int err = 0; + + rtl92ee_bt_reg_init(hw); + rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; + rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer(); + + rtlpriv->dm.dm_initialgain_enable = 1; + rtlpriv->dm.dm_flag = 0; + rtlpriv->dm.disable_framebursting = 0; + rtlpci->transmit_config = CFENDFORM | BIT(15); + + /*just 2.4G band*/ + rtlpriv->rtlhal.current_bandtype = BAND_ON_2_4G; + rtlpriv->rtlhal.bandset = BAND_ON_2_4G; + rtlpriv->rtlhal.macphymode = SINGLEMAC_SINGLEPHY; + + rtlpci->receive_config = (RCR_APPFCS | + RCR_APP_MIC | + RCR_APP_ICV | + RCR_APP_PHYST_RXFF | + RCR_HTC_LOC_CTRL | + RCR_AMF | + RCR_ACF | + RCR_ADF | + RCR_AICV | + RCR_ACRC32 | + RCR_AB | + RCR_AM | + RCR_APM | + 0); + + rtlpci->irq_mask[0] = (u32)(IMR_PSTIMEOUT | + IMR_C2HCMD | + IMR_HIGHDOK | + IMR_MGNTDOK | + IMR_BKDOK | + IMR_BEDOK | + IMR_VIDOK | + IMR_VODOK | + IMR_RDU | + IMR_ROK | + 0); + rtlpci->irq_mask[1] = (u32)(IMR_RXFOVW | 0); + + /* for debug level */ + rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug; + /* for LPS & IPS */ + rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; + rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; + rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps; + rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; + if (rtlpriv->cfg->mod_params->disable_watchdog) + pr_info("watchdog disabled\n"); + rtlpriv->psc.reg_fwctrl_lps = 3; + rtlpriv->psc.reg_max_lps_awakeintvl = 5; + /* for ASPM, you can close aspm through + * set const_support_pciaspm = 0 + */ + rtl92ee_init_aspm_vars(hw); + + if (rtlpriv->psc.reg_fwctrl_lps == 1) + rtlpriv->psc.fwctrl_psmode = FW_PS_MIN_MODE; + else if (rtlpriv->psc.reg_fwctrl_lps == 2) + rtlpriv->psc.fwctrl_psmode = FW_PS_MAX_MODE; + else if (rtlpriv->psc.reg_fwctrl_lps == 3) + rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE; + + /* for early mode */ + rtlpriv->rtlhal.earlymode_enable = false; + + /*low power */ + rtlpriv->psc.low_power_enable = false; + + /* for firmware buf */ + rtlpriv->rtlhal.pfirmware = vzalloc(0x8000); + if (!rtlpriv->rtlhal.pfirmware) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "Can't alloc buffer for fw\n"); + return 1; + } + + /* request fw */ + rtlpriv->cfg->fw_name = "rtlwifi/rtl8192eefw.bin"; + + rtlpriv->max_fw_size = 0x8000; + pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name); + err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, + rtlpriv->io.dev, GFP_KERNEL, hw, + rtl_fw_cb); + if (err) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "Failed to request firmware!\n"); + return 1; + } + + return 0; +} + +void rtl92ee_deinit_sw_vars(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (rtlpriv->rtlhal.pfirmware) { + vfree(rtlpriv->rtlhal.pfirmware); + rtlpriv->rtlhal.pfirmware = NULL; + } +} + +/* get bt coexist status */ +bool rtl92ee_get_btc_status(void) +{ + return true; +} + +static struct rtl_hal_ops rtl8192ee_hal_ops = { + .init_sw_vars = rtl92ee_init_sw_vars, + .deinit_sw_vars = rtl92ee_deinit_sw_vars, + .read_eeprom_info = rtl92ee_read_eeprom_info, + .interrupt_recognized = rtl92ee_interrupt_recognized,/*need check*/ + .hw_init = rtl92ee_hw_init, + .hw_disable = rtl92ee_card_disable, + .hw_suspend = rtl92ee_suspend, + .hw_resume = rtl92ee_resume, + .enable_interrupt = rtl92ee_enable_interrupt, + .disable_interrupt = rtl92ee_disable_interrupt, + .set_network_type = rtl92ee_set_network_type, + .set_chk_bssid = rtl92ee_set_check_bssid, + .set_qos = rtl92ee_set_qos, + .set_bcn_reg = rtl92ee_set_beacon_related_registers, + .set_bcn_intv = rtl92ee_set_beacon_interval, + .update_interrupt_mask = rtl92ee_update_interrupt_mask, + .get_hw_reg = rtl92ee_get_hw_reg, + .set_hw_reg = rtl92ee_set_hw_reg, + .update_rate_tbl = rtl92ee_update_hal_rate_tbl, + .pre_fill_tx_bd_desc = rtl92ee_pre_fill_tx_bd_desc, + .rx_desc_buff_remained_cnt = rtl92ee_rx_desc_buff_remained_cnt, + .rx_check_dma_ok = rtl92ee_rx_check_dma_ok, + .fill_tx_desc = rtl92ee_tx_fill_desc, + .fill_tx_cmddesc = rtl92ee_tx_fill_cmddesc, + .query_rx_desc = rtl92ee_rx_query_desc, + .set_channel_access = rtl92ee_update_channel_access_setting, + .radio_onoff_checking = rtl92ee_gpio_radio_on_off_checking, + .set_bw_mode = rtl92ee_phy_set_bw_mode, + .switch_channel = rtl92ee_phy_sw_chnl, + .dm_watchdog = rtl92ee_dm_watchdog, + .scan_operation_backup = rtl92ee_phy_scan_operation_backup, + .set_rf_power_state = rtl92ee_phy_set_rf_power_state, + .led_control = rtl92ee_led_control, + .set_desc = rtl92ee_set_desc, + .get_desc = rtl92ee_get_desc, + .is_tx_desc_closed = rtl92ee_is_tx_desc_closed, + .tx_polling = rtl92ee_tx_polling, + .enable_hw_sec = rtl92ee_enable_hw_security_config, + .set_key = rtl92ee_set_key, + .init_sw_leds = rtl92ee_init_sw_leds, + .get_bbreg = rtl92ee_phy_query_bb_reg, + .set_bbreg = rtl92ee_phy_set_bb_reg, + .get_rfreg = rtl92ee_phy_query_rf_reg, + .set_rfreg = rtl92ee_phy_set_rf_reg, + .fill_h2c_cmd = rtl92ee_fill_h2c_cmd, + .get_btc_status = rtl92ee_get_btc_status, + .rx_command_packet = rtl92ee_rx_command_packet, +}; + +static struct rtl_mod_params rtl92ee_mod_params = { + .sw_crypto = false, + .inactiveps = false, + .swctrl_lps = false, + .fwctrl_lps = true, + .msi_support = true, + .debug = DBG_EMERG, +}; + +static struct rtl_hal_cfg rtl92ee_hal_cfg = { + .bar_id = 2, + .write_readback = true, + .name = "rtl92ee_pci", + .fw_name = "rtlwifi/rtl8192eefw.bin", + .ops = &rtl8192ee_hal_ops, + .mod_params = &rtl92ee_mod_params, + + .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL, + .maps[SYS_FUNC_EN] = REG_SYS_FUNC_EN, + .maps[SYS_CLK] = REG_SYS_CLKR, + .maps[MAC_RCR_AM] = AM, + .maps[MAC_RCR_AB] = AB, + .maps[MAC_RCR_ACRC32] = ACRC32, + .maps[MAC_RCR_ACF] = ACF, + .maps[MAC_RCR_AAP] = AAP, + .maps[MAC_HIMR] = REG_HIMR, + .maps[MAC_HIMRE] = REG_HIMRE, + + .maps[EFUSE_ACCESS] = REG_EFUSE_ACCESS, + + .maps[EFUSE_TEST] = REG_EFUSE_TEST, + .maps[EFUSE_CTRL] = REG_EFUSE_CTRL, + .maps[EFUSE_CLK] = 0, + .maps[EFUSE_CLK_CTRL] = REG_EFUSE_CTRL, + .maps[EFUSE_PWC_EV12V] = PWC_EV12V, + .maps[EFUSE_FEN_ELDR] = FEN_ELDR, + .maps[EFUSE_LOADER_CLK_EN] = LOADER_CLK_EN, + .maps[EFUSE_ANA8M] = ANA8M, + .maps[EFUSE_HWSET_MAX_SIZE] = HWSET_MAX_SIZE, + .maps[EFUSE_MAX_SECTION_MAP] = EFUSE_MAX_SECTION, + .maps[EFUSE_REAL_CONTENT_SIZE] = EFUSE_REAL_CONTENT_LEN, + .maps[EFUSE_OOB_PROTECT_BYTES_LEN] = EFUSE_OOB_PROTECT_BYTES, + + .maps[RWCAM] = REG_CAMCMD, + .maps[WCAMI] = REG_CAMWRITE, + .maps[RCAMO] = REG_CAMREAD, + .maps[CAMDBG] = REG_CAMDBG, + .maps[SECR] = REG_SECCFG, + .maps[SEC_CAM_NONE] = CAM_NONE, + .maps[SEC_CAM_WEP40] = CAM_WEP40, + .maps[SEC_CAM_TKIP] = CAM_TKIP, + .maps[SEC_CAM_AES] = CAM_AES, + .maps[SEC_CAM_WEP104] = CAM_WEP104, + + .maps[RTL_IMR_BCNDMAINT6] = IMR_BCNDMAINT6, + .maps[RTL_IMR_BCNDMAINT5] = IMR_BCNDMAINT5, + .maps[RTL_IMR_BCNDMAINT4] = IMR_BCNDMAINT4, + .maps[RTL_IMR_BCNDMAINT3] = IMR_BCNDMAINT3, + .maps[RTL_IMR_BCNDMAINT2] = IMR_BCNDMAINT2, + .maps[RTL_IMR_BCNDMAINT1] = IMR_BCNDMAINT1, + .maps[RTL_IMR_BCNDOK7] = IMR_BCNDOK7, + .maps[RTL_IMR_BCNDOK6] = IMR_BCNDOK6, + .maps[RTL_IMR_BCNDOK5] = IMR_BCNDOK5, + .maps[RTL_IMR_BCNDOK4] = IMR_BCNDOK4, + .maps[RTL_IMR_BCNDOK3] = IMR_BCNDOK3, + .maps[RTL_IMR_BCNDOK2] = IMR_BCNDOK2, + .maps[RTL_IMR_BCNDOK1] = IMR_BCNDOK1, + + .maps[RTL_IMR_TXFOVW] = IMR_TXFOVW, + .maps[RTL_IMR_PSTIMEOUT] = IMR_PSTIMEOUT, + .maps[RTL_IMR_BCNINT] = IMR_BCNDMAINT0, + .maps[RTL_IMR_RXFOVW] = IMR_RXFOVW, + .maps[RTL_IMR_RDU] = IMR_RDU, + .maps[RTL_IMR_ATIMEND] = IMR_ATIMEND, + .maps[RTL_IMR_BDOK] = IMR_BCNDOK0, + .maps[RTL_IMR_MGNTDOK] = IMR_MGNTDOK, + .maps[RTL_IMR_TBDER] = IMR_TBDER, + .maps[RTL_IMR_HIGHDOK] = IMR_HIGHDOK, + .maps[RTL_IMR_TBDOK] = IMR_TBDOK, + .maps[RTL_IMR_BKDOK] = IMR_BKDOK, + .maps[RTL_IMR_BEDOK] = IMR_BEDOK, + .maps[RTL_IMR_VIDOK] = IMR_VIDOK, + .maps[RTL_IMR_VODOK] = IMR_VODOK, + .maps[RTL_IMR_ROK] = IMR_ROK, + .maps[RTL_IBSS_INT_MASKS] = (IMR_BCNDMAINT0 | IMR_TBDOK | IMR_TBDER), + + .maps[RTL_RC_CCK_RATE1M] = DESC92C_RATE1M, + .maps[RTL_RC_CCK_RATE2M] = DESC92C_RATE2M, + .maps[RTL_RC_CCK_RATE5_5M] = DESC92C_RATE5_5M, + .maps[RTL_RC_CCK_RATE11M] = DESC92C_RATE11M, + .maps[RTL_RC_OFDM_RATE6M] = DESC92C_RATE6M, + .maps[RTL_RC_OFDM_RATE9M] = DESC92C_RATE9M, + .maps[RTL_RC_OFDM_RATE12M] = DESC92C_RATE12M, + .maps[RTL_RC_OFDM_RATE18M] = DESC92C_RATE18M, + .maps[RTL_RC_OFDM_RATE24M] = DESC92C_RATE24M, + .maps[RTL_RC_OFDM_RATE36M] = DESC92C_RATE36M, + .maps[RTL_RC_OFDM_RATE48M] = DESC92C_RATE48M, + .maps[RTL_RC_OFDM_RATE54M] = DESC92C_RATE54M, + + .maps[RTL_RC_HT_RATEMCS7] = DESC92C_RATEMCS7, + .maps[RTL_RC_HT_RATEMCS15] = DESC92C_RATEMCS15, +}; + +static struct pci_device_id rtl92ee_pci_ids[] = { + {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x818B, rtl92ee_hal_cfg)}, + {}, +}; + +MODULE_DEVICE_TABLE(pci, rtl92ee_pci_ids); + +MODULE_AUTHOR("Realtek WlanFAE "); +MODULE_AUTHOR("Larry Finger "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Realtek 8192EE 802.11n PCI wireless"); +MODULE_FIRMWARE("rtlwifi/rtl8192eefw.bin"); + +module_param_named(swenc, rtl92ee_mod_params.sw_crypto, bool, 0444); +module_param_named(debug, rtl92ee_mod_params.debug, int, 0444); +module_param_named(ips, rtl92ee_mod_params.inactiveps, bool, 0444); +module_param_named(swlps, rtl92ee_mod_params.swctrl_lps, bool, 0444); +module_param_named(fwlps, rtl92ee_mod_params.fwctrl_lps, bool, 0444); +module_param_named(msi, rtl92ee_mod_params.msi_support, bool, 0444); +module_param_named(disable_watchdog, rtl92ee_mod_params.disable_watchdog, + bool, 0444); +MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); +MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n"); +MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n"); +MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n"); +MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 1)\n"); +MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); +MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n"); + +static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); + +static struct pci_driver rtl92ee_driver = { + .name = KBUILD_MODNAME, + .id_table = rtl92ee_pci_ids, + .probe = rtl_pci_probe, + .remove = rtl_pci_disconnect, + .driver.pm = &rtlwifi_pm_ops, +}; + +module_pci_driver(rtl92ee_driver); diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/sw.h b/drivers/net/wireless/rtlwifi/rtl8192ee/sw.h new file mode 100644 index 0000000000000000000000000000000000000000..21433d0332d035066d3c1b42ac82e6f744caaefc --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192ee/sw.h @@ -0,0 +1,33 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL92E_SW_H__ +#define __RTL92E_SW_H__ + +int rtl92ee_init_sw_vars(struct ieee80211_hw *hw); +void rtl92ee_deinit_sw_vars(struct ieee80211_hw *hw); +bool rtl92ee_get_btc_status(void); + +#endif diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/table.c b/drivers/net/wireless/rtlwifi/rtl8192ee/table.c new file mode 100644 index 0000000000000000000000000000000000000000..abcdd0670fd823807e3db1c0a4dad93cdf90129e --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192ee/table.c @@ -0,0 +1,882 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Created on 2010/ 5/18, 1:41 + * + * Larry Finger + * + *****************************************************************************/ + +#include "table.h" +u32 RTL8192EE_PHY_REG_ARRAY[] = { + 0x800, 0x80040000, + 0x804, 0x00000003, + 0x808, 0x0000FC00, + 0x80C, 0x0000000A, + 0x810, 0x10001331, + 0x814, 0x020C3D10, + 0x818, 0x02220385, + 0x81C, 0x00000000, + 0x820, 0x01000100, + 0x824, 0x00390204, + 0x828, 0x01000100, + 0x82C, 0x00390204, + 0x830, 0x32323232, + 0x834, 0x30303030, + 0x838, 0x30303030, + 0x83C, 0x30303030, + 0x840, 0x00010000, + 0x844, 0x00010000, + 0x848, 0x28282828, + 0x84C, 0x28282828, + 0x850, 0x00000000, + 0x854, 0x00000000, + 0x858, 0x009A009A, + 0x85C, 0x01000014, + 0x860, 0x66F60000, + 0x864, 0x061F0000, + 0x868, 0x30303030, + 0x86C, 0x30303030, + 0x870, 0x00000000, + 0x874, 0x55004200, + 0x878, 0x08080808, + 0x87C, 0x00000000, + 0x880, 0xB0000C1C, + 0x884, 0x00000001, + 0x888, 0x00000000, + 0x88C, 0xCC0000C0, + 0x890, 0x00000800, + 0x894, 0xFFFFFFFE, + 0x898, 0x40302010, + 0x900, 0x00000000, + 0x904, 0x00000023, + 0x908, 0x00000000, + 0x90C, 0x81121313, + 0x910, 0x806C0001, + 0x914, 0x00000001, + 0x918, 0x00000000, + 0x91C, 0x00010000, + 0x924, 0x00000001, + 0x928, 0x00000000, + 0x92C, 0x00000000, + 0x930, 0x00000000, + 0x934, 0x00000000, + 0x938, 0x00000000, + 0x93C, 0x00000000, + 0x940, 0x00000000, + 0x944, 0x00000000, + 0x94C, 0x00000008, + 0xA00, 0x00D0C7C8, + 0xA04, 0x81FF000C, + 0xA08, 0x8C838300, + 0xA0C, 0x2E68120F, + 0xA10, 0x95009B78, + 0xA14, 0x1114D028, + 0xA18, 0x00881117, + 0xA1C, 0x89140F00, + 0xA20, 0x1A1B0000, + 0xA24, 0x090E1317, + 0xA28, 0x00000204, + 0xA2C, 0x00D30000, + 0xA70, 0x101FBF00, + 0xA74, 0x00000007, + 0xA78, 0x00000900, + 0xA7C, 0x225B0606, + 0xA80, 0x218075B1, + 0xB38, 0x00000000, + 0xC00, 0x48071D40, + 0xC04, 0x03A05633, + 0xC08, 0x000000E4, + 0xC0C, 0x6C6C6C6C, + 0xC10, 0x08800000, + 0xC14, 0x40000100, + 0xC18, 0x08800000, + 0xC1C, 0x40000100, + 0xC20, 0x00000000, + 0xC24, 0x00000000, + 0xC28, 0x00000000, + 0xC2C, 0x00000000, + 0xC30, 0x69E9AC47, + 0xC34, 0x469652AF, + 0xC38, 0x49795994, + 0xC3C, 0x0A97971C, + 0xC40, 0x1F7C403F, + 0xC44, 0x000100B7, + 0xC48, 0xEC020107, + 0xC4C, 0x007F037F, + 0xFF010718, 0xABCD, + 0xC50, 0x00340220, + 0xCDCDCDCD, 0xCDCD, + 0xC50, 0x00340020, + 0xFF010718, 0xDEAD, + 0xC54, 0x0080801F, + 0xFF010718, 0xABCD, + 0xC58, 0x00000220, + 0xCDCDCDCD, 0xCDCD, + 0xC58, 0x00000020, + 0xFF010718, 0xDEAD, + 0xC5C, 0x00248492, + 0xC60, 0x00000000, + 0xC64, 0x7112848B, + 0xC68, 0x47C00BFF, + 0xC6C, 0x00000036, + 0xC70, 0x00000600, + 0xC74, 0x02013169, + 0xC78, 0x0000001F, + 0xC7C, 0x00B91612, + 0xFF010718, 0xABCD, + 0xC80, 0x2D4000B5, + 0xCDCDCDCD, 0xCDCD, + 0xC80, 0x40000100, + 0xFF010718, 0xDEAD, + 0xC84, 0x21F60000, + 0xFF010718, 0xABCD, + 0xC88, 0x2D4000B5, + 0xCDCDCDCD, 0xCDCD, + 0xC88, 0x40000100, + 0xFF010718, 0xDEAD, + 0xC8C, 0xA0E40000, + 0xC90, 0x00121820, + 0xC94, 0x00000000, + 0xC98, 0x00121820, + 0xC9C, 0x00007F7F, + 0xCA0, 0x00000000, + 0xCA4, 0x000300A0, + 0xCA8, 0x00000000, + 0xCAC, 0x00000000, + 0xCB0, 0x00000000, + 0xCB4, 0x00000000, + 0xCB8, 0x00000000, + 0xCBC, 0x28000000, + 0xCC0, 0x00000000, + 0xCC4, 0x00000000, + 0xCC8, 0x00000000, + 0xCCC, 0x00000000, + 0xCD0, 0x00000000, + 0xCD4, 0x00000000, + 0xCD8, 0x64B22427, + 0xCDC, 0x00766932, + 0xCE0, 0x00222222, + 0xCE4, 0x00040000, + 0xCE8, 0x77644302, + 0xCEC, 0x2F97D40C, + 0xD00, 0x00080740, + 0xD04, 0x00020403, + 0xD08, 0x0000907F, + 0xD0C, 0x20010201, + 0xD10, 0xA0633333, + 0xD14, 0x3333BC43, + 0xD18, 0x7A8F5B6B, + 0xD1C, 0x0000007F, + 0xD2C, 0xCC979975, + 0xD30, 0x00000000, + 0xD34, 0x80608000, + 0xD38, 0x00000000, + 0xD3C, 0x00127353, + 0xD40, 0x00000000, + 0xD44, 0x00000000, + 0xD48, 0x00000000, + 0xD4C, 0x00000000, + 0xD50, 0x6437140A, + 0xD54, 0x00000000, + 0xD58, 0x00000282, + 0xD5C, 0x30032064, + 0xD60, 0x4653DE68, + 0xD64, 0x04518A3C, + 0xD68, 0x00002101, + 0xD6C, 0x2A201C16, + 0xD70, 0x1812362E, + 0xD74, 0x322C2220, + 0xD78, 0x000E3C24, + 0xD80, 0x01081008, + 0xD84, 0x00000800, + 0xD88, 0xF0B50000, + 0xE00, 0x30303030, + 0xE04, 0x30303030, + 0xE08, 0x03903030, + 0xE10, 0x30303030, + 0xE14, 0x30303030, + 0xE18, 0x30303030, + 0xE1C, 0x30303030, + 0xE28, 0x00000000, + 0xE30, 0x1000DC1F, + 0xE34, 0x10008C1F, + 0xE38, 0x02140102, + 0xE3C, 0x681604C2, + 0xE40, 0x01007C00, + 0xE44, 0x01004800, + 0xE48, 0xFB000000, + 0xE4C, 0x000028D1, + 0xE50, 0x1000DC1F, + 0xE54, 0x10008C1F, + 0xE58, 0x02140102, + 0xE5C, 0x28160D05, + 0xE60, 0x00000008, + 0xE68, 0x0FC05656, + 0xE6C, 0x03C09696, + 0xE70, 0x03C09696, + 0xE74, 0x0C005656, + 0xE78, 0x0C005656, + 0xE7C, 0x0C005656, + 0xE80, 0x0C005656, + 0xE84, 0x03C09696, + 0xE88, 0x0C005656, + 0xE8C, 0x03C09696, + 0xED0, 0x03C09696, + 0xED4, 0x03C09696, + 0xED8, 0x03C09696, + 0xEDC, 0x0000D6D6, + 0xEE0, 0x0000D6D6, + 0xEEC, 0x0FC01616, + 0xEE4, 0xB0000C1C, + 0xEE8, 0x00000001, + 0xF14, 0x00000003, + 0xF4C, 0x00000000, + 0xF00, 0x00000300, +}; + +u32 RTL8192EE_PHY_REG_ARRAY_PG[] = { + 0, 0, 0, 0x00000e08, 0x0000ff00, 0x00003200, + 0, 0, 1, 0x00000e08, 0x0000ff00, 0x00003200, + 0, 0, 0, 0x0000086c, 0xffffff00, 0x32323200, + 0, 0, 1, 0x0000086c, 0xffffff00, 0x32323200, + 0, 0, 0, 0x00000e00, 0xffffffff, 0x34343636, + 0, 0, 1, 0x00000e00, 0xffffffff, 0x34343636, + 0, 0, 0, 0x00000e04, 0xffffffff, 0x28283032, + 0, 0, 1, 0x00000e04, 0xffffffff, 0x28283032, + 0, 0, 0, 0x00000e10, 0xffffffff, 0x34363840, + 0, 0, 1, 0x00000e10, 0xffffffff, 0x34363840, + 0, 0, 0, 0x00000e14, 0xffffffff, 0x26283032, + 0, 0, 1, 0x00000e14, 0xffffffff, 0x26283032, + 0, 0, 1, 0x00000e18, 0xffffffff, 0x36384040, + 0, 0, 1, 0x00000e1c, 0xffffffff, 0x24262832, + 0, 1, 0, 0x00000838, 0xffffff00, 0x32323200, + 0, 1, 1, 0x00000838, 0xffffff00, 0x32323200, + 0, 1, 0, 0x0000086c, 0x000000ff, 0x00000032, + 0, 1, 1, 0x0000086c, 0x000000ff, 0x00000032, + 0, 1, 0, 0x00000830, 0xffffffff, 0x34343636, + 0, 1, 1, 0x00000830, 0xffffffff, 0x34343636, + 0, 1, 0, 0x00000834, 0xffffffff, 0x28283032, + 0, 1, 1, 0x00000834, 0xffffffff, 0x28283032, + 0, 1, 0, 0x0000083c, 0xffffffff, 0x34363840, + 0, 1, 1, 0x0000083c, 0xffffffff, 0x34363840, + 0, 1, 0, 0x00000848, 0xffffffff, 0x26283032, + 0, 1, 1, 0x00000848, 0xffffffff, 0x26283032, + 0, 1, 1, 0x0000084c, 0xffffffff, 0x36384040, + 0, 1, 1, 0x00000868, 0xffffffff, 0x24262832 +}; + +u32 RTL8192EE_RADIOA_ARRAY[] = { + 0x07F, 0x00000082, + 0x081, 0x0003FC00, + 0x000, 0x00030000, + 0x008, 0x00008400, + 0x018, 0x00000407, + 0x019, 0x00000012, + 0x01B, 0x00000064, + 0x01E, 0x00080009, + 0x01F, 0x00000880, + 0x02F, 0x0001A060, + 0x03F, 0x00000000, + 0x042, 0x000060C0, + 0x057, 0x000D0000, + 0x058, 0x000BE180, + 0x067, 0x00001552, + 0x083, 0x00000000, + 0x0B0, 0x000FF9F1, + 0x0B1, 0x00055418, + 0x0B2, 0x0008CC00, + 0x0B4, 0x00043083, + 0x0B5, 0x00008166, + 0x0B6, 0x0000803E, + 0x0B7, 0x0001C69F, + 0x0B8, 0x0000407F, + 0x0B9, 0x00080001, + 0x0BA, 0x00040001, + 0x0BB, 0x00000400, + 0x0BF, 0x000C0000, + 0x0C2, 0x00002400, + 0x0C3, 0x00000009, + 0x0C4, 0x00040C91, + 0x0C5, 0x00099999, + 0x0C6, 0x000000A3, + 0x0C7, 0x00088820, + 0x0C8, 0x00076C06, + 0x0C9, 0x00000000, + 0x0CA, 0x00080000, + 0x0DF, 0x00000180, + 0x0EF, 0x000001A0, + 0x051, 0x00069545, + 0x052, 0x0007E45E, + 0x053, 0x00000071, + 0x056, 0x00051FF3, + 0x035, 0x000000A8, + 0x035, 0x000001E2, + 0x035, 0x000002A8, + 0x036, 0x00001C24, + 0x036, 0x00009C24, + 0x036, 0x00011C24, + 0x036, 0x00019C24, + 0x018, 0x00000C07, + 0x05A, 0x00048000, + 0x019, 0x000739D0, + 0xFF010718, 0xABCD, + 0x034, 0x0000A093, + 0x034, 0x0000908F, + 0x034, 0x0000808C, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000000, + 0xCDCDCDCD, 0xCDCD, + 0x034, 0x0000ADD7, + 0x034, 0x00009DD4, + 0x034, 0x00008DD1, + 0x034, 0x00007DCE, + 0x034, 0x00006DCB, + 0x034, 0x00005DC8, + 0x034, 0x00004DC5, + 0x034, 0x000034CC, + 0x034, 0x0000244F, + 0x034, 0x0000144C, + 0x034, 0x00000014, + 0xFF010718, 0xDEAD, + 0x000, 0x00030159, + 0x084, 0x00068180, + 0x086, 0x0000014E, + 0x087, 0x00048E00, + 0x08E, 0x00065540, + 0x08F, 0x00088000, + 0x0EF, 0x000020A0, + 0xFF010718, 0xABCD, + 0x03B, 0x000F07B0, + 0xCDCDCDCD, 0xCDCD, + 0x03B, 0x000F02B0, + 0xFF010718, 0xDEAD, + 0x03B, 0x000EF7B0, + 0x03B, 0x000D4FB0, + 0x03B, 0x000CF060, + 0x03B, 0x000B0090, + 0x03B, 0x000A0080, + 0x03B, 0x00090080, + 0x03B, 0x0008F780, + 0xFF010718, 0xABCD, + 0x03B, 0x000787B0, + 0xCDCDCDCD, 0xCDCD, + 0x03B, 0x00078730, + 0xFF010718, 0xDEAD, + 0x03B, 0x00060FB0, + 0x03B, 0x0005FFA0, + 0x03B, 0x00040620, + 0x03B, 0x00037090, + 0x03B, 0x00020080, + 0x03B, 0x0001F060, + 0x03B, 0x0000FFB0, + 0x0EF, 0x000000A0, + 0x0FE, 0x00000000, + 0x018, 0x0000FC07, + 0x0FE, 0x00000000, + 0x0FE, 0x00000000, + 0x0FE, 0x00000000, + 0x0FE, 0x00000000, + 0x01E, 0x00000001, + 0x01F, 0x00080000, + 0x000, 0x00033E70, +}; + +u32 RTL8192EE_RADIOB_ARRAY[] = { + 0x07F, 0x00000082, + 0x081, 0x0003FC00, + 0x000, 0x00030000, + 0x008, 0x00008400, + 0x018, 0x00000407, + 0x019, 0x00000012, + 0x01B, 0x00000064, + 0x01E, 0x00080009, + 0x01F, 0x00000880, + 0x02F, 0x0001A060, + 0x03F, 0x00000000, + 0x042, 0x000060C0, + 0x057, 0x000D0000, + 0x058, 0x000BE180, + 0x067, 0x00001552, + 0x07F, 0x00000082, + 0x081, 0x0003F000, + 0x083, 0x00000000, + 0x0DF, 0x00000180, + 0x0EF, 0x000001A0, + 0x051, 0x00069545, + 0x052, 0x0007E42E, + 0x053, 0x00000071, + 0x056, 0x00051FF3, + 0x035, 0x000000A8, + 0x035, 0x000001E0, + 0x035, 0x000002A8, + 0x036, 0x00001CA8, + 0x036, 0x00009C24, + 0x036, 0x00011C24, + 0x036, 0x00019C24, + 0x018, 0x00000C07, + 0x05A, 0x00048000, + 0x019, 0x000739D0, + 0xFF010718, 0xABCD, + 0x034, 0x0000A093, + 0x034, 0x0000908F, + 0x034, 0x0000808C, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000000, + 0xCDCDCDCD, 0xCDCD, + 0x034, 0x0000ADD7, + 0x034, 0x00009DD4, + 0x034, 0x00008DD1, + 0x034, 0x00007DCE, + 0x034, 0x00006DCB, + 0x034, 0x00005DC8, + 0x034, 0x00004DC5, + 0x034, 0x000034CC, + 0x034, 0x0000244F, + 0x034, 0x0000144C, + 0x034, 0x00000014, + 0xFF010718, 0xDEAD, + 0x000, 0x00030159, + 0x084, 0x00068180, + 0x086, 0x000000CE, + 0x087, 0x00048A00, + 0x08E, 0x00065540, + 0x08F, 0x00088000, + 0x0EF, 0x000020A0, + 0xFF010718, 0xABCD, + 0x03B, 0x000F07B0, + 0xCDCDCDCD, 0xCDCD, + 0x03B, 0x000F02B0, + 0xFF010718, 0xDEAD, + 0x03B, 0x000EF7B0, + 0x03B, 0x000D4FB0, + 0x03B, 0x000CF060, + 0x03B, 0x000B0090, + 0x03B, 0x000A0080, + 0x03B, 0x00090080, + 0x03B, 0x0008F780, + 0xFF010718, 0xABCD, + 0x03B, 0x000787B0, + 0xCDCDCDCD, 0xCDCD, + 0x03B, 0x00078730, + 0xFF010718, 0xDEAD, + 0x03B, 0x00060FB0, + 0x03B, 0x0005FFA0, + 0x03B, 0x00040620, + 0x03B, 0x00037090, + 0x03B, 0x00020080, + 0x03B, 0x0001F060, + 0x03B, 0x0000FFB0, + 0x0EF, 0x000000A0, + 0x000, 0x00010159, + 0x0FE, 0x00000000, + 0x0FE, 0x00000000, + 0x0FE, 0x00000000, + 0x0FE, 0x00000000, + 0x01E, 0x00000001, + 0x01F, 0x00080000, + 0x000, 0x00033E70, +}; + +u32 RTL8192EE_MAC_ARRAY[] = { + 0x011, 0x000000EB, + 0x012, 0x00000007, + 0x014, 0x00000075, + 0x303, 0x000000A7, + 0x428, 0x0000000A, + 0x429, 0x00000010, + 0x430, 0x00000000, + 0x431, 0x00000000, + 0x432, 0x00000000, + 0x433, 0x00000001, + 0x434, 0x00000004, + 0x435, 0x00000005, + 0x436, 0x00000007, + 0x437, 0x00000008, + 0x43C, 0x00000004, + 0x43D, 0x00000005, + 0x43E, 0x00000007, + 0x43F, 0x00000008, + 0x440, 0x0000005D, + 0x441, 0x00000001, + 0x442, 0x00000000, + 0x444, 0x00000010, + 0x445, 0x00000000, + 0x446, 0x00000000, + 0x447, 0x00000000, + 0x448, 0x00000000, + 0x449, 0x000000F0, + 0x44A, 0x0000000F, + 0x44B, 0x0000003E, + 0x44C, 0x00000010, + 0x44D, 0x00000000, + 0x44E, 0x00000000, + 0x44F, 0x00000000, + 0x450, 0x00000000, + 0x451, 0x000000F0, + 0x452, 0x0000000F, + 0x453, 0x00000000, + 0x456, 0x0000005E, + 0x460, 0x00000066, + 0x461, 0x00000066, + 0x4C8, 0x000000FF, + 0x4C9, 0x00000008, + 0x4CC, 0x000000FF, + 0x4CD, 0x000000FF, + 0x4CE, 0x00000001, + 0x500, 0x00000026, + 0x501, 0x000000A2, + 0x502, 0x0000002F, + 0x503, 0x00000000, + 0x504, 0x00000028, + 0x505, 0x000000A3, + 0x506, 0x0000005E, + 0x507, 0x00000000, + 0x508, 0x0000002B, + 0x509, 0x000000A4, + 0x50A, 0x0000005E, + 0x50B, 0x00000000, + 0x50C, 0x0000004F, + 0x50D, 0x000000A4, + 0x50E, 0x00000000, + 0x50F, 0x00000000, + 0x512, 0x0000001C, + 0x514, 0x0000000A, + 0x516, 0x0000000A, + 0x525, 0x0000004F, + 0x540, 0x00000012, + 0x541, 0x00000064, + 0x550, 0x00000010, + 0x551, 0x00000010, + 0x559, 0x00000002, + 0x55C, 0x00000050, + 0x55D, 0x000000FF, + 0x605, 0x00000030, + 0x608, 0x0000000E, + 0x609, 0x0000002A, + 0x620, 0x000000FF, + 0x621, 0x000000FF, + 0x622, 0x000000FF, + 0x623, 0x000000FF, + 0x624, 0x000000FF, + 0x625, 0x000000FF, + 0x626, 0x000000FF, + 0x627, 0x000000FF, + 0x638, 0x00000050, + 0x63C, 0x0000000A, + 0x63D, 0x0000000A, + 0x63E, 0x0000000E, + 0x63F, 0x0000000E, + 0x640, 0x00000040, + 0x642, 0x00000040, + 0x643, 0x00000000, + 0x652, 0x000000C8, + 0x66E, 0x00000005, + 0x700, 0x00000021, + 0x701, 0x00000043, + 0x702, 0x00000065, + 0x703, 0x00000087, + 0x708, 0x00000021, + 0x709, 0x00000043, + 0x70A, 0x00000065, + 0x70B, 0x00000087, +}; + +u32 RTL8192EE_AGC_TAB_ARRAY[] = { + 0xFF010718, 0xABCD, + 0xC78, 0xFA000001, + 0xC78, 0xF9010001, + 0xC78, 0xF8020001, + 0xC78, 0xF7030001, + 0xC78, 0xF6040001, + 0xC78, 0xF5050001, + 0xC78, 0xF4060001, + 0xC78, 0xF3070001, + 0xC78, 0xF2080001, + 0xC78, 0xF1090001, + 0xC78, 0xF00A0001, + 0xC78, 0xEF0B0001, + 0xC78, 0xEE0C0001, + 0xC78, 0xED0D0001, + 0xC78, 0xEC0E0001, + 0xC78, 0xEB0F0001, + 0xC78, 0xEA100001, + 0xC78, 0xE9110001, + 0xC78, 0xE8120001, + 0xC78, 0xE7130001, + 0xC78, 0xE6140001, + 0xC78, 0xE5150001, + 0xC78, 0xE4160001, + 0xC78, 0xE3170001, + 0xC78, 0xE2180001, + 0xC78, 0xE1190001, + 0xC78, 0x8A1A0001, + 0xC78, 0x891B0001, + 0xC78, 0x881C0001, + 0xC78, 0x871D0001, + 0xC78, 0x861E0001, + 0xC78, 0x851F0001, + 0xC78, 0x84200001, + 0xC78, 0x83210001, + 0xC78, 0x82220001, + 0xC78, 0x6A230001, + 0xC78, 0x69240001, + 0xC78, 0x68250001, + 0xC78, 0x67260001, + 0xC78, 0x66270001, + 0xC78, 0x65280001, + 0xC78, 0x64290001, + 0xC78, 0x632A0001, + 0xC78, 0x622B0001, + 0xC78, 0x612C0001, + 0xC78, 0x602D0001, + 0xC78, 0x472E0001, + 0xC78, 0x462F0001, + 0xC78, 0x45300001, + 0xC78, 0x44310001, + 0xC78, 0x43320001, + 0xC78, 0x42330001, + 0xC78, 0x41340001, + 0xC78, 0x40350001, + 0xC78, 0x40360001, + 0xC78, 0x40370001, + 0xC78, 0x40380001, + 0xC78, 0x40390001, + 0xC78, 0x403A0001, + 0xC78, 0x403B0001, + 0xC78, 0x403C0001, + 0xC78, 0x403D0001, + 0xC78, 0x403E0001, + 0xC78, 0x403F0001, + 0xCDCDCDCD, 0xCDCD, + 0xC78, 0xFB000001, + 0xC78, 0xFB010001, + 0xC78, 0xFB020001, + 0xC78, 0xFB030001, + 0xC78, 0xFB040001, + 0xC78, 0xFB050001, + 0xC78, 0xFA060001, + 0xC78, 0xF9070001, + 0xC78, 0xF8080001, + 0xC78, 0xF7090001, + 0xC78, 0xF60A0001, + 0xC78, 0xF50B0001, + 0xC78, 0xF40C0001, + 0xC78, 0xF30D0001, + 0xC78, 0xF20E0001, + 0xC78, 0xF10F0001, + 0xC78, 0xF0100001, + 0xC78, 0xEF110001, + 0xC78, 0xEE120001, + 0xC78, 0xED130001, + 0xC78, 0xEC140001, + 0xC78, 0xEB150001, + 0xC78, 0xEA160001, + 0xC78, 0xE9170001, + 0xC78, 0xE8180001, + 0xC78, 0xE7190001, + 0xC78, 0xC81A0001, + 0xC78, 0xC71B0001, + 0xC78, 0xC61C0001, + 0xC78, 0x071D0001, + 0xC78, 0x061E0001, + 0xC78, 0x051F0001, + 0xC78, 0x04200001, + 0xC78, 0x03210001, + 0xC78, 0xAA220001, + 0xC78, 0xA9230001, + 0xC78, 0xA8240001, + 0xC78, 0xA7250001, + 0xC78, 0xA6260001, + 0xC78, 0x85270001, + 0xC78, 0x84280001, + 0xC78, 0x83290001, + 0xC78, 0x252A0001, + 0xC78, 0x242B0001, + 0xC78, 0x232C0001, + 0xC78, 0x222D0001, + 0xC78, 0x672E0001, + 0xC78, 0x662F0001, + 0xC78, 0x65300001, + 0xC78, 0x64310001, + 0xC78, 0x63320001, + 0xC78, 0x62330001, + 0xC78, 0x61340001, + 0xC78, 0x45350001, + 0xC78, 0x44360001, + 0xC78, 0x43370001, + 0xC78, 0x42380001, + 0xC78, 0x41390001, + 0xC78, 0x403A0001, + 0xC78, 0x403B0001, + 0xC78, 0x403C0001, + 0xC78, 0x403D0001, + 0xC78, 0x403E0001, + 0xC78, 0x403F0001, + 0xFF010718, 0xDEAD, + 0xFF010718, 0xABCD, + 0xC78, 0xFA400001, + 0xC78, 0xF9410001, + 0xC78, 0xF8420001, + 0xC78, 0xF7430001, + 0xC78, 0xF6440001, + 0xC78, 0xF5450001, + 0xC78, 0xF4460001, + 0xC78, 0xF3470001, + 0xC78, 0xF2480001, + 0xC78, 0xF1490001, + 0xC78, 0xF04A0001, + 0xC78, 0xEF4B0001, + 0xC78, 0xEE4C0001, + 0xC78, 0xED4D0001, + 0xC78, 0xEC4E0001, + 0xC78, 0xEB4F0001, + 0xC78, 0xEA500001, + 0xC78, 0xE9510001, + 0xC78, 0xE8520001, + 0xC78, 0xE7530001, + 0xC78, 0xE6540001, + 0xC78, 0xE5550001, + 0xC78, 0xE4560001, + 0xC78, 0xE3570001, + 0xC78, 0xE2580001, + 0xC78, 0xE1590001, + 0xC78, 0x8A5A0001, + 0xC78, 0x895B0001, + 0xC78, 0x885C0001, + 0xC78, 0x875D0001, + 0xC78, 0x865E0001, + 0xC78, 0x855F0001, + 0xC78, 0x84600001, + 0xC78, 0x83610001, + 0xC78, 0x82620001, + 0xC78, 0x6A630001, + 0xC78, 0x69640001, + 0xC78, 0x68650001, + 0xC78, 0x67660001, + 0xC78, 0x66670001, + 0xC78, 0x65680001, + 0xC78, 0x64690001, + 0xC78, 0x636A0001, + 0xC78, 0x626B0001, + 0xC78, 0x616C0001, + 0xC78, 0x606D0001, + 0xC78, 0x476E0001, + 0xC78, 0x466F0001, + 0xC78, 0x45700001, + 0xC78, 0x44710001, + 0xC78, 0x43720001, + 0xC78, 0x42730001, + 0xC78, 0x41740001, + 0xC78, 0x40750001, + 0xC78, 0x40760001, + 0xC78, 0x40770001, + 0xC78, 0x40780001, + 0xC78, 0x40790001, + 0xC78, 0x407A0001, + 0xC78, 0x407B0001, + 0xC78, 0x407C0001, + 0xC78, 0x407D0001, + 0xC78, 0x407E0001, + 0xC78, 0x407F0001, + 0xC50, 0x00040222, + 0xC50, 0x00040220, + 0xCDCDCDCD, 0xCDCD, + 0xC78, 0xFB400001, + 0xC78, 0xFB410001, + 0xC78, 0xFB420001, + 0xC78, 0xFB430001, + 0xC78, 0xFB440001, + 0xC78, 0xFB450001, + 0xC78, 0xFA460001, + 0xC78, 0xF9470001, + 0xC78, 0xF8480001, + 0xC78, 0xF7490001, + 0xC78, 0xF64A0001, + 0xC78, 0xF54B0001, + 0xC78, 0xF44C0001, + 0xC78, 0xF34D0001, + 0xC78, 0xF24E0001, + 0xC78, 0xF14F0001, + 0xC78, 0xF0500001, + 0xC78, 0xEF510001, + 0xC78, 0xEE520001, + 0xC78, 0xED530001, + 0xC78, 0xEC540001, + 0xC78, 0xEB550001, + 0xC78, 0xEA560001, + 0xC78, 0xE9570001, + 0xC78, 0xE8580001, + 0xC78, 0xE7590001, + 0xC78, 0xE65A0001, + 0xC78, 0xE55B0001, + 0xC78, 0xE45C0001, + 0xC78, 0xE35D0001, + 0xC78, 0xE25E0001, + 0xC78, 0xE15F0001, + 0xC78, 0x8A600001, + 0xC78, 0x89610001, + 0xC78, 0x88620001, + 0xC78, 0x87630001, + 0xC78, 0x86640001, + 0xC78, 0x85650001, + 0xC78, 0x84660001, + 0xC78, 0x83670001, + 0xC78, 0x82680001, + 0xC78, 0x6B690001, + 0xC78, 0x6A6A0001, + 0xC78, 0x696B0001, + 0xC78, 0x686C0001, + 0xC78, 0x676D0001, + 0xC78, 0x666E0001, + 0xC78, 0x656F0001, + 0xC78, 0x64700001, + 0xC78, 0x63710001, + 0xC78, 0x62720001, + 0xC78, 0x61730001, + 0xC78, 0x49740001, + 0xC78, 0x48750001, + 0xC78, 0x47760001, + 0xC78, 0x46770001, + 0xC78, 0x45780001, + 0xC78, 0x44790001, + 0xC78, 0x437A0001, + 0xC78, 0x427B0001, + 0xC78, 0x417C0001, + 0xC78, 0x407D0001, + 0xC78, 0x407E0001, + 0xC78, 0x407F0001, + 0xC50, 0x00040022, + 0xC50, 0x00040020, + 0xFF010718, 0xDEAD, +}; diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/table.h b/drivers/net/wireless/rtlwifi/rtl8192ee/table.h new file mode 100644 index 0000000000000000000000000000000000000000..bff9df88815de193d0f28074cd7f3020127ebbdf --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192ee/table.h @@ -0,0 +1,45 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Created on 2010/ 5/18, 1:41 + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL92E_TABLE__H_ +#define __RTL92E_TABLE__H_ + +#include +#define RTL8192EE_PHY_REG_ARRAY_LEN 448 +extern u32 RTL8192EE_PHY_REG_ARRAY[]; +#define RTL8192EE_PHY_REG_ARRAY_PG_LEN 168 +extern u32 RTL8192EE_PHY_REG_ARRAY_PG[]; +#define RTL8192EE_RADIOA_ARRAY_LEN 238 +extern u32 RTL8192EE_RADIOA_ARRAY[]; +#define RTL8192EE_RADIOB_ARRAY_LEN 198 +extern u32 RTL8192EE_RADIOB_ARRAY[]; +#define RTL8192EE_MAC_ARRAY_LEN 202 +extern u32 RTL8192EE_MAC_ARRAY[]; +#define RTL8192EE_AGC_TAB_ARRAY_LEN 532 +extern u32 RTL8192EE_AGC_TAB_ARRAY[]; + +#endif diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c new file mode 100644 index 0000000000000000000000000000000000000000..2fcbef1d029fa5e9c337926a7cb3005ffe979a63 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c @@ -0,0 +1,1293 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "../wifi.h" +#include "../pci.h" +#include "../base.h" +#include "../stats.h" +#include "reg.h" +#include "def.h" +#include "phy.h" +#include "trx.h" +#include "led.h" +#include "dm.h" +#include "fw.h" + +static u8 _rtl92ee_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue) +{ + __le16 fc = rtl_get_fc(skb); + + if (unlikely(ieee80211_is_beacon(fc))) + return QSLT_BEACON; + if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)) + return QSLT_MGNT; + + return skb->priority; +} + +/* mac80211's rate_idx is like this: + * + * 2.4G band:rx_status->band == IEEE80211_BAND_2GHZ + * + * B/G rate: + * (rx_status->flag & RX_FLAG_HT) = 0, + * DESC92C_RATE1M-->DESC92C_RATE54M ==> idx is 0-->11, + * + * N rate: + * (rx_status->flag & RX_FLAG_HT) = 1, + * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15 + * + * 5G band:rx_status->band == IEEE80211_BAND_5GHZ + * A rate: + * (rx_status->flag & RX_FLAG_HT) = 0, + * DESC92C_RATE6M-->DESC92C_RATE54M ==> idx is 0-->7, + * + * N rate: + * (rx_status->flag & RX_FLAG_HT) = 1, + * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15 + */ +static int _rtl92ee_rate_mapping(struct ieee80211_hw *hw, + bool isht, u8 desc_rate) +{ + int rate_idx; + + if (!isht) { + if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) { + switch (desc_rate) { + case DESC92C_RATE1M: + rate_idx = 0; + break; + case DESC92C_RATE2M: + rate_idx = 1; + break; + case DESC92C_RATE5_5M: + rate_idx = 2; + break; + case DESC92C_RATE11M: + rate_idx = 3; + break; + case DESC92C_RATE6M: + rate_idx = 4; + break; + case DESC92C_RATE9M: + rate_idx = 5; + break; + case DESC92C_RATE12M: + rate_idx = 6; + break; + case DESC92C_RATE18M: + rate_idx = 7; + break; + case DESC92C_RATE24M: + rate_idx = 8; + break; + case DESC92C_RATE36M: + rate_idx = 9; + break; + case DESC92C_RATE48M: + rate_idx = 10; + break; + case DESC92C_RATE54M: + rate_idx = 11; + break; + default: + rate_idx = 0; + break; + } + } else { + switch (desc_rate) { + case DESC92C_RATE6M: + rate_idx = 0; + break; + case DESC92C_RATE9M: + rate_idx = 1; + break; + case DESC92C_RATE12M: + rate_idx = 2; + break; + case DESC92C_RATE18M: + rate_idx = 3; + break; + case DESC92C_RATE24M: + rate_idx = 4; + break; + case DESC92C_RATE36M: + rate_idx = 5; + break; + case DESC92C_RATE48M: + rate_idx = 6; + break; + case DESC92C_RATE54M: + rate_idx = 7; + break; + default: + rate_idx = 0; + break; + } + } + } else { + switch (desc_rate) { + case DESC92C_RATEMCS0: + rate_idx = 0; + break; + case DESC92C_RATEMCS1: + rate_idx = 1; + break; + case DESC92C_RATEMCS2: + rate_idx = 2; + break; + case DESC92C_RATEMCS3: + rate_idx = 3; + break; + case DESC92C_RATEMCS4: + rate_idx = 4; + break; + case DESC92C_RATEMCS5: + rate_idx = 5; + break; + case DESC92C_RATEMCS6: + rate_idx = 6; + break; + case DESC92C_RATEMCS7: + rate_idx = 7; + break; + case DESC92C_RATEMCS8: + rate_idx = 8; + break; + case DESC92C_RATEMCS9: + rate_idx = 9; + break; + case DESC92C_RATEMCS10: + rate_idx = 10; + break; + case DESC92C_RATEMCS11: + rate_idx = 11; + break; + case DESC92C_RATEMCS12: + rate_idx = 12; + break; + case DESC92C_RATEMCS13: + rate_idx = 13; + break; + case DESC92C_RATEMCS14: + rate_idx = 14; + break; + case DESC92C_RATEMCS15: + rate_idx = 15; + break; + default: + rate_idx = 0; + break; + } + } + return rate_idx; +} + +static void _rtl92ee_query_rxphystatus(struct ieee80211_hw *hw, + struct rtl_stats *pstatus, u8 *pdesc, + struct rx_fwinfo *p_drvinfo, + bool bpacket_match_bssid, + bool bpacket_toself, + bool packet_beacon) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct phy_status_rpt *p_phystrpt = (struct phy_status_rpt *)p_drvinfo; + char rx_pwr_all = 0, rx_pwr[4]; + u8 rf_rx_num = 0, evm, pwdb_all; + u8 i, max_spatial_stream; + u32 rssi, total_rssi = 0; + bool is_cck = pstatus->is_cck; + u8 lan_idx, vga_idx; + + /* Record it for next packet processing */ + pstatus->packet_matchbssid = bpacket_match_bssid; + pstatus->packet_toself = bpacket_toself; + pstatus->packet_beacon = packet_beacon; + pstatus->rx_mimo_signalquality[0] = -1; + pstatus->rx_mimo_signalquality[1] = -1; + + if (is_cck) { + u8 cck_highpwr; + u8 cck_agc_rpt; + /* CCK Driver info Structure is not the same as OFDM packet. */ + cck_agc_rpt = p_phystrpt->cck_agc_rpt_ofdm_cfosho_a; + + /* (1)Hardware does not provide RSSI for CCK + * (2)PWDB, Average PWDB cacluated by + * hardware (for rate adaptive) + */ + cck_highpwr = (u8)rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, + BIT(9)); + + lan_idx = ((cck_agc_rpt & 0xE0) >> 5); + vga_idx = (cck_agc_rpt & 0x1f); + switch (lan_idx) { + case 7: /*VGA_idx = 27~2*/ + if (vga_idx <= 27) + rx_pwr_all = -100 + 2 * (27 - vga_idx); + else + rx_pwr_all = -100; + break; + case 6: /*VGA_idx = 2~0*/ + rx_pwr_all = -48 + 2 * (2 - vga_idx); + break; + case 5: /*VGA_idx = 7~5*/ + rx_pwr_all = -42 + 2 * (7 - vga_idx); + break; + case 4: /*VGA_idx = 7~4*/ + rx_pwr_all = -36 + 2 * (7 - vga_idx); + break; + case 3: /*VGA_idx = 7~0*/ + rx_pwr_all = -24 + 2 * (7 - vga_idx); + break; + case 2: /*VGA_idx = 5~0*/ + if (cck_highpwr) + rx_pwr_all = -12 + 2 * (5 - vga_idx); + else + rx_pwr_all = -6 + 2 * (5 - vga_idx); + break; + case 1: + rx_pwr_all = 8 - 2 * vga_idx; + break; + case 0: + rx_pwr_all = 14 - 2 * vga_idx; + break; + default: + break; + } + rx_pwr_all += 16; + pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all); + + if (!cck_highpwr) { + if (pwdb_all >= 80) + pwdb_all = ((pwdb_all - 80) << 1) + + ((pwdb_all - 80) >> 1) + 80; + else if ((pwdb_all <= 78) && (pwdb_all >= 20)) + pwdb_all += 3; + if (pwdb_all > 100) + pwdb_all = 100; + } + + pstatus->rx_pwdb_all = pwdb_all; + pstatus->bt_rx_rssi_percentage = pwdb_all; + pstatus->recvsignalpower = rx_pwr_all; + + /* (3) Get Signal Quality (EVM) */ + if (bpacket_match_bssid) { + u8 sq, sq_rpt; + + if (pstatus->rx_pwdb_all > 40) { + sq = 100; + } else { + sq_rpt = p_phystrpt->cck_sig_qual_ofdm_pwdb_all; + if (sq_rpt > 64) + sq = 0; + else if (sq_rpt < 20) + sq = 100; + else + sq = ((64 - sq_rpt) * 100) / 44; + } + + pstatus->signalquality = sq; + pstatus->rx_mimo_signalquality[0] = sq; + pstatus->rx_mimo_signalquality[1] = -1; + } + } else { + /* (1)Get RSSI for HT rate */ + for (i = RF90_PATH_A; i < RF6052_MAX_PATH; i++) { + /* we will judge RF RX path now. */ + if (rtlpriv->dm.rfpath_rxenable[i]) + rf_rx_num++; + + rx_pwr[i] = ((p_phystrpt->path_agc[i].gain & 0x3f) * 2) + - 110; + + pstatus->rx_pwr[i] = rx_pwr[i]; + /* Translate DBM to percentage. */ + rssi = rtl_query_rxpwrpercentage(rx_pwr[i]); + total_rssi += rssi; + + pstatus->rx_mimo_signalstrength[i] = (u8)rssi; + } + + /* (2)PWDB, Average PWDB cacluated by + * hardware (for rate adaptive) + */ + rx_pwr_all = ((p_phystrpt->cck_sig_qual_ofdm_pwdb_all >> 1) + & 0x7f) - 110; + + pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all); + pstatus->rx_pwdb_all = pwdb_all; + pstatus->bt_rx_rssi_percentage = pwdb_all; + pstatus->rxpower = rx_pwr_all; + pstatus->recvsignalpower = rx_pwr_all; + + /* (3)EVM of HT rate */ + if (pstatus->rate >= DESC92C_RATEMCS8 && + pstatus->rate <= DESC92C_RATEMCS15) + max_spatial_stream = 2; + else + max_spatial_stream = 1; + + for (i = 0; i < max_spatial_stream; i++) { + evm = rtl_evm_db_to_percentage( + p_phystrpt->stream_rxevm[i]); + + if (bpacket_match_bssid) { + /* Fill value in RFD, Get the first + * spatial stream only + */ + if (i == 0) + pstatus->signalquality = (u8)(evm & + 0xff); + pstatus->rx_mimo_signalquality[i] = (u8)(evm & + 0xff); + } + } + + if (bpacket_match_bssid) { + for (i = RF90_PATH_A; i <= RF90_PATH_B; i++) + rtl_priv(hw)->dm.cfo_tail[i] = + (int)p_phystrpt->path_cfotail[i]; + + if (rtl_priv(hw)->dm.packet_count == 0xffffffff) + rtl_priv(hw)->dm.packet_count = 0; + else + rtl_priv(hw)->dm.packet_count++; + } + } + + /* UI BSS List signal strength(in percentage), + * make it good looking, from 0~100. + */ + if (is_cck) + pstatus->signalstrength = (u8)(rtl_signal_scale_mapping(hw, + pwdb_all)); + else if (rf_rx_num != 0) + pstatus->signalstrength = (u8)(rtl_signal_scale_mapping(hw, + total_rssi /= rf_rx_num)); +} + +static void _rtl92ee_translate_rx_signal_stuff(struct ieee80211_hw *hw, + struct sk_buff *skb, + struct rtl_stats *pstatus, + u8 *pdesc, + struct rx_fwinfo *p_drvinfo) +{ + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct ieee80211_hdr *hdr; + u8 *tmp_buf; + u8 *praddr; + u8 *psaddr; + __le16 fc; + bool packet_matchbssid, packet_toself, packet_beacon; + + tmp_buf = skb->data + pstatus->rx_drvinfo_size + + pstatus->rx_bufshift + 24; + + hdr = (struct ieee80211_hdr *)tmp_buf; + fc = hdr->frame_control; + praddr = hdr->addr1; + psaddr = ieee80211_get_SA(hdr); + ether_addr_copy(pstatus->psaddr, psaddr); + + packet_matchbssid = (!ieee80211_is_ctl(fc) && + (ether_addr_equal(mac->bssid, + ieee80211_has_tods(fc) ? + hdr->addr1 : + ieee80211_has_fromds(fc) ? + hdr->addr2 : hdr->addr3)) && + (!pstatus->hwerror) && (!pstatus->crc) && + (!pstatus->icv)); + + packet_toself = packet_matchbssid && + (ether_addr_equal(praddr, rtlefuse->dev_addr)); + + if (ieee80211_is_beacon(fc)) + packet_beacon = true; + else + packet_beacon = false; + + if (packet_beacon && packet_matchbssid) + rtl_priv(hw)->dm.dbginfo.num_qry_beacon_pkt++; + + if (packet_matchbssid && ieee80211_is_data_qos(hdr->frame_control) && + !is_multicast_ether_addr(ieee80211_get_DA(hdr))) { + struct ieee80211_qos_hdr *hdr_qos = + (struct ieee80211_qos_hdr *)tmp_buf; + u16 tid = le16_to_cpu(hdr_qos->qos_ctrl) & 0xf; + + if (tid != 0 && tid != 3) + rtl_priv(hw)->dm.dbginfo.num_non_be_pkt++; + } + + _rtl92ee_query_rxphystatus(hw, pstatus, pdesc, p_drvinfo, + packet_matchbssid, packet_toself, + packet_beacon); + rtl_process_phyinfo(hw, tmp_buf, pstatus); +} + +static void _rtl92ee_insert_emcontent(struct rtl_tcb_desc *ptcb_desc, + u8 *virtualaddress) +{ + u32 dwtmp = 0; + + memset(virtualaddress, 0, 8); + + SET_EARLYMODE_PKTNUM(virtualaddress, ptcb_desc->empkt_num); + if (ptcb_desc->empkt_num == 1) { + dwtmp = ptcb_desc->empkt_len[0]; + } else { + dwtmp = ptcb_desc->empkt_len[0]; + dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4; + dwtmp += ptcb_desc->empkt_len[1]; + } + SET_EARLYMODE_LEN0(virtualaddress, dwtmp); + + if (ptcb_desc->empkt_num <= 3) { + dwtmp = ptcb_desc->empkt_len[2]; + } else { + dwtmp = ptcb_desc->empkt_len[2]; + dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4; + dwtmp += ptcb_desc->empkt_len[3]; + } + SET_EARLYMODE_LEN1(virtualaddress, dwtmp); + if (ptcb_desc->empkt_num <= 5) { + dwtmp = ptcb_desc->empkt_len[4]; + } else { + dwtmp = ptcb_desc->empkt_len[4]; + dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4; + dwtmp += ptcb_desc->empkt_len[5]; + } + SET_EARLYMODE_LEN2_1(virtualaddress, dwtmp & 0xF); + SET_EARLYMODE_LEN2_2(virtualaddress, dwtmp >> 4); + if (ptcb_desc->empkt_num <= 7) { + dwtmp = ptcb_desc->empkt_len[6]; + } else { + dwtmp = ptcb_desc->empkt_len[6]; + dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4; + dwtmp += ptcb_desc->empkt_len[7]; + } + SET_EARLYMODE_LEN3(virtualaddress, dwtmp); + if (ptcb_desc->empkt_num <= 9) { + dwtmp = ptcb_desc->empkt_len[8]; + } else { + dwtmp = ptcb_desc->empkt_len[8]; + dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4; + dwtmp += ptcb_desc->empkt_len[9]; + } + SET_EARLYMODE_LEN4(virtualaddress, dwtmp); +} + +bool rtl92ee_rx_query_desc(struct ieee80211_hw *hw, + struct rtl_stats *status, + struct ieee80211_rx_status *rx_status, + u8 *pdesc, struct sk_buff *skb) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rx_fwinfo *p_drvinfo; + struct ieee80211_hdr *hdr; + u32 phystatus = GET_RX_DESC_PHYST(pdesc); + + status->length = (u16)GET_RX_DESC_PKT_LEN(pdesc); + status->rx_drvinfo_size = (u8)GET_RX_DESC_DRV_INFO_SIZE(pdesc) * + RX_DRV_INFO_SIZE_UNIT; + status->rx_bufshift = (u8)(GET_RX_DESC_SHIFT(pdesc) & 0x03); + status->icv = (u16)GET_RX_DESC_ICV(pdesc); + status->crc = (u16)GET_RX_DESC_CRC32(pdesc); + status->hwerror = (status->crc | status->icv); + status->decrypted = !GET_RX_DESC_SWDEC(pdesc); + status->rate = (u8)GET_RX_DESC_RXMCS(pdesc); + status->isampdu = (bool)(GET_RX_DESC_PAGGR(pdesc) == 1); + status->timestamp_low = GET_RX_DESC_TSFL(pdesc); + status->is_cck = RTL92EE_RX_HAL_IS_CCK_RATE(status->rate); + + status->macid = GET_RX_DESC_MACID(pdesc); + if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc)) + status->wake_match = BIT(2); + else if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc)) + status->wake_match = BIT(1); + else if (GET_RX_STATUS_DESC_UNICAST_MATCH(pdesc)) + status->wake_match = BIT(0); + else + status->wake_match = 0; + if (status->wake_match) + RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD, + "GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n", + status->wake_match); + rx_status->freq = hw->conf.chandef.chan->center_freq; + rx_status->band = hw->conf.chandef.chan->band; + + hdr = (struct ieee80211_hdr *)(skb->data + status->rx_drvinfo_size + + status->rx_bufshift + 24); + + if (status->crc) + rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; + + if (status->rx_is40Mhzpacket) + rx_status->flag |= RX_FLAG_40MHZ; + + if (status->is_ht) + rx_status->flag |= RX_FLAG_HT; + + rx_status->flag |= RX_FLAG_MACTIME_START; + + /* hw will set status->decrypted true, if it finds the + * frame is open data frame or mgmt frame. + * So hw will not decryption robust managment frame + * for IEEE80211w but still set status->decrypted + * true, so here we should set it back to undecrypted + * for IEEE80211w frame, and mac80211 sw will help + * to decrypt it + */ + if (status->decrypted) { + if ((!_ieee80211_is_robust_mgmt_frame(hdr)) && + (ieee80211_has_protected(hdr->frame_control))) + rx_status->flag |= RX_FLAG_DECRYPTED; + else + rx_status->flag &= ~RX_FLAG_DECRYPTED; + } + + /* rate_idx: index of data rate into band's + * supported rates or MCS index if HT rates + * are use (RX_FLAG_HT) + * Notice: this is diff with windows define + */ + rx_status->rate_idx = _rtl92ee_rate_mapping(hw, + status->is_ht, + status->rate); + + rx_status->mactime = status->timestamp_low; + if (phystatus) { + p_drvinfo = (struct rx_fwinfo *)(skb->data + + status->rx_bufshift + 24); + + _rtl92ee_translate_rx_signal_stuff(hw, skb, status, pdesc, + p_drvinfo); + } + rx_status->signal = status->recvsignalpower + 10; + if (status->packet_report_type == TX_REPORT2) { + status->macid_valid_entry[0] = + GET_RX_RPT2_DESC_MACID_VALID_1(pdesc); + status->macid_valid_entry[1] = + GET_RX_RPT2_DESC_MACID_VALID_2(pdesc); + } + return true; +} + +/*in Windows, this == Rx_92EE_Interrupt*/ +void rtl92ee_rx_check_dma_ok(struct ieee80211_hw *hw, u8 *header_desc, + u8 queue_index) +{ + u8 first_seg = 0; + u8 last_seg = 0; + u16 total_len = 0; + u16 read_cnt = 0; + + if (header_desc == NULL) + return; + + total_len = (u16)GET_RX_BUFFER_DESC_TOTAL_LENGTH(header_desc); + + first_seg = (u8)GET_RX_BUFFER_DESC_FS(header_desc); + + last_seg = (u8)GET_RX_BUFFER_DESC_LS(header_desc); + + while (total_len == 0 && first_seg == 0 && last_seg == 0) { + read_cnt++; + total_len = (u16)GET_RX_BUFFER_DESC_TOTAL_LENGTH(header_desc); + first_seg = (u8)GET_RX_BUFFER_DESC_FS(header_desc); + last_seg = (u8)GET_RX_BUFFER_DESC_LS(header_desc); + + if (read_cnt > 20) + break; + } +} + +u16 rtl92ee_rx_desc_buff_remained_cnt(struct ieee80211_hw *hw, u8 queue_index) +{ + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl_priv *rtlpriv = rtl_priv(hw); + u16 read_point = 0, write_point = 0, remind_cnt = 0; + u32 tmp_4byte = 0; + static u16 last_read_point; + static bool start_rx; + + tmp_4byte = rtl_read_dword(rtlpriv, REG_RXQ_TXBD_IDX); + read_point = (u16)((tmp_4byte>>16) & 0x7ff); + write_point = (u16)(tmp_4byte & 0x7ff); + + if (write_point != rtlpci->rx_ring[queue_index].next_rx_rp) { + RT_TRACE(rtlpriv, COMP_RXDESC, DBG_DMESG, + "!!!write point is 0x%x, reg 0x3B4 value is 0x%x\n", + write_point, tmp_4byte); + tmp_4byte = rtl_read_dword(rtlpriv, REG_RXQ_TXBD_IDX); + read_point = (u16)((tmp_4byte>>16) & 0x7ff); + write_point = (u16)(tmp_4byte & 0x7ff); + } + + if (read_point > 0) + start_rx = true; + if (!start_rx) + return 0; + + if ((last_read_point > (RX_DESC_NUM_92E / 2)) && + (read_point <= (RX_DESC_NUM_92E / 2))) { + remind_cnt = RX_DESC_NUM_92E - write_point; + } else { + remind_cnt = (read_point >= write_point) ? + (read_point - write_point) : + (RX_DESC_NUM_92E - write_point + read_point); + } + + if (remind_cnt == 0) + return 0; + + rtlpci->rx_ring[queue_index].next_rx_rp = write_point; + + last_read_point = read_point; + return remind_cnt; +} + +static u16 get_desc_addr_fr_q_idx(u16 queue_index) +{ + u16 desc_address = REG_BEQ_TXBD_IDX; + + switch (queue_index) { + case BK_QUEUE: + desc_address = REG_BKQ_TXBD_IDX; + break; + case BE_QUEUE: + desc_address = REG_BEQ_TXBD_IDX; + break; + case VI_QUEUE: + desc_address = REG_VIQ_TXBD_IDX; + break; + case VO_QUEUE: + desc_address = REG_VOQ_TXBD_IDX; + break; + case BEACON_QUEUE: + desc_address = REG_BEQ_TXBD_IDX; + break; + case TXCMD_QUEUE: + desc_address = REG_BEQ_TXBD_IDX; + break; + case MGNT_QUEUE: + desc_address = REG_MGQ_TXBD_IDX; + break; + case HIGH_QUEUE: + desc_address = REG_HI0Q_TXBD_IDX; + break; + case HCCA_QUEUE: + desc_address = REG_BEQ_TXBD_IDX; + break; + default: + break; + } + return desc_address; +} + +void rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx) +{ + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl_priv *rtlpriv = rtl_priv(hw); + u16 point_diff = 0; + u16 current_tx_read_point = 0, current_tx_write_point = 0; + u32 tmp_4byte; + + tmp_4byte = rtl_read_dword(rtlpriv, + get_desc_addr_fr_q_idx(q_idx)); + current_tx_read_point = (u16)((tmp_4byte >> 16) & 0x0fff); + current_tx_write_point = (u16)((tmp_4byte) & 0x0fff); + + point_diff = ((current_tx_read_point > current_tx_write_point) ? + (current_tx_read_point - current_tx_write_point) : + (TX_DESC_NUM_92E - current_tx_write_point + + current_tx_read_point)); + + rtlpci->tx_ring[q_idx].avl_desc = point_diff; +} + +void rtl92ee_pre_fill_tx_bd_desc(struct ieee80211_hw *hw, + u8 *tx_bd_desc, u8 *desc, u8 queue_index, + struct sk_buff *skb, dma_addr_t addr) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + u32 pkt_len = skb->len; + u16 desc_size = 40; /*tx desc size*/ + u32 psblen = 0; + u16 tx_page_size = 0; + u32 total_packet_size = 0; + u16 current_bd_desc; + u8 i = 0; + u16 real_desc_size = 0x28; + u16 append_early_mode_size = 0; +#if (RTL8192EE_SEG_NUM == 0) + u8 segmentnum = 2; +#elif (RTL8192EE_SEG_NUM == 1) + u8 segmentnum = 4; +#elif (RTL8192EE_SEG_NUM == 2) + u8 segmentnum = 8; +#endif + + tx_page_size = 2; + current_bd_desc = rtlpci->tx_ring[queue_index].cur_tx_wp; + + total_packet_size = desc_size+pkt_len; + + if (rtlpriv->rtlhal.earlymode_enable) { + if (queue_index < BEACON_QUEUE) { + append_early_mode_size = 8; + total_packet_size += append_early_mode_size; + } + } + + if (tx_page_size > 0) { + psblen = (pkt_len + real_desc_size + append_early_mode_size) / + (tx_page_size * 128); + + if (psblen * (tx_page_size * 128) < total_packet_size) + psblen += 1; + } + + /* Reset */ + SET_TX_BUFF_DESC_LEN_0(tx_bd_desc, 0); + SET_TX_BUFF_DESC_PSB(tx_bd_desc, 0); + SET_TX_BUFF_DESC_OWN(tx_bd_desc, 0); + + for (i = 1; i < segmentnum; i++) { + SET_TXBUFFER_DESC_LEN_WITH_OFFSET(tx_bd_desc, i, 0); + SET_TXBUFFER_DESC_AMSDU_WITH_OFFSET(tx_bd_desc, i, 0); + SET_TXBUFFER_DESC_ADD_LOW_WITH_OFFSET(tx_bd_desc, i, 0); +#if (DMA_IS_64BIT == 1) + SET_TXBUFFER_DESC_ADD_HIGT_WITH_OFFSET(tx_bd_desc, i, 0); +#endif + } + SET_TX_BUFF_DESC_LEN_1(tx_bd_desc, 0); + SET_TX_BUFF_DESC_AMSDU_1(tx_bd_desc, 0); + + SET_TX_BUFF_DESC_LEN_2(tx_bd_desc, 0); + SET_TX_BUFF_DESC_AMSDU_2(tx_bd_desc, 0); + SET_TX_BUFF_DESC_LEN_3(tx_bd_desc, 0); + SET_TX_BUFF_DESC_AMSDU_3(tx_bd_desc, 0); + /* Clear all status */ + CLEAR_PCI_TX_DESC_CONTENT(desc, TX_DESC_SIZE); + + if (rtlpriv->rtlhal.earlymode_enable) { + if (queue_index < BEACON_QUEUE) { + /* This if needs braces */ + SET_TX_BUFF_DESC_LEN_0(tx_bd_desc, desc_size + 8); + } else { + SET_TX_BUFF_DESC_LEN_0(tx_bd_desc, desc_size); + } + } else { + SET_TX_BUFF_DESC_LEN_0(tx_bd_desc, desc_size); + } + SET_TX_BUFF_DESC_PSB(tx_bd_desc, psblen); + SET_TX_BUFF_DESC_ADDR_LOW_0(tx_bd_desc, + rtlpci->tx_ring[queue_index].dma + + (current_bd_desc * TX_DESC_SIZE)); + + SET_TXBUFFER_DESC_LEN_WITH_OFFSET(tx_bd_desc, 1, pkt_len); + /* don't using extendsion mode. */ + SET_TXBUFFER_DESC_AMSDU_WITH_OFFSET(tx_bd_desc, 1, 0); + SET_TXBUFFER_DESC_ADD_LOW_WITH_OFFSET(tx_bd_desc, 1, addr); + + SET_TX_DESC_PKT_SIZE(desc, (u16)(pkt_len)); + SET_TX_DESC_TX_BUFFER_SIZE(desc, (u16)(pkt_len)); +} + +void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw, + struct ieee80211_hdr *hdr, u8 *pdesc_tx, + u8 *pbd_desc_tx, + struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, + struct sk_buff *skb, + u8 hw_queue, struct rtl_tcb_desc *ptcb_desc) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + u8 *pdesc = (u8 *)pdesc_tx; + u16 seq_number; + __le16 fc = hdr->frame_control; + unsigned int buf_len = 0; + u8 fw_qsel = _rtl92ee_map_hwqueue_to_fwqueue(skb, hw_queue); + bool firstseg = ((hdr->seq_ctrl & + cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0); + bool lastseg = ((hdr->frame_control & + cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0); + dma_addr_t mapping; + u8 bw_40 = 0; + u8 short_gi = 0; + + if (mac->opmode == NL80211_IFTYPE_STATION) { + bw_40 = mac->bw_40; + } else if (mac->opmode == NL80211_IFTYPE_AP || + mac->opmode == NL80211_IFTYPE_ADHOC) { + if (sta) + bw_40 = sta->ht_cap.cap & + IEEE80211_HT_CAP_SUP_WIDTH_20_40; + } + seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4; + rtl_get_tcb_desc(hw, info, sta, skb, ptcb_desc); + /* reserve 8 byte for AMPDU early mode */ + if (rtlhal->earlymode_enable) { + skb_push(skb, EM_HDR_LEN); + memset(skb->data, 0, EM_HDR_LEN); + } + buf_len = skb->len; + mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len, + PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { + RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, + "DMA mapping error"); + return; + } + + if (pbd_desc_tx != NULL) + rtl92ee_pre_fill_tx_bd_desc(hw, pbd_desc_tx, pdesc, hw_queue, + skb, mapping); + + if (ieee80211_is_nullfunc(fc) || ieee80211_is_ctl(fc)) { + firstseg = true; + lastseg = true; + } + if (firstseg) { + if (rtlhal->earlymode_enable) { + SET_TX_DESC_PKT_OFFSET(pdesc, 1); + SET_TX_DESC_OFFSET(pdesc, + USB_HWDESC_HEADER_LEN + EM_HDR_LEN); + if (ptcb_desc->empkt_num) { + RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, + "Insert 8 byte.pTcb->EMPktNum:%d\n", + ptcb_desc->empkt_num); + _rtl92ee_insert_emcontent(ptcb_desc, + (u8 *)(skb->data)); + } + } else { + SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN); + } + + SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate); + + if (ieee80211_is_mgmt(fc)) { + ptcb_desc->use_driver_rate = true; + } else { + if (rtlpriv->ra.is_special_data) { + ptcb_desc->use_driver_rate = true; + SET_TX_DESC_TX_RATE(pdesc, DESC92C_RATE11M); + } else { + ptcb_desc->use_driver_rate = false; + } + } + + if (ptcb_desc->hw_rate > DESC92C_RATEMCS0) + short_gi = (ptcb_desc->use_shortgi) ? 1 : 0; + else + short_gi = (ptcb_desc->use_shortpreamble) ? 1 : 0; + + if (info->flags & IEEE80211_TX_CTL_AMPDU) { + SET_TX_DESC_AGG_ENABLE(pdesc, 1); + SET_TX_DESC_MAX_AGG_NUM(pdesc, 0x14); + } + SET_TX_DESC_SEQ(pdesc, seq_number); + SET_TX_DESC_RTS_ENABLE(pdesc, + ((ptcb_desc->rts_enable && + !ptcb_desc->cts_enable) ? 1 : 0)); + SET_TX_DESC_HW_RTS_ENABLE(pdesc, 0); + SET_TX_DESC_CTS2SELF(pdesc, + ((ptcb_desc->cts_enable) ? 1 : 0)); + + SET_TX_DESC_RTS_RATE(pdesc, ptcb_desc->rts_rate); + SET_TX_DESC_RTS_SC(pdesc, ptcb_desc->rts_sc); + SET_TX_DESC_RTS_SHORT(pdesc, + ((ptcb_desc->rts_rate <= DESC92C_RATE54M) ? + (ptcb_desc->rts_use_shortpreamble ? 1 : 0) : + (ptcb_desc->rts_use_shortgi ? 1 : 0))); + + if (ptcb_desc->tx_enable_sw_calc_duration) + SET_TX_DESC_NAV_USE_HDR(pdesc, 1); + + if (bw_40) { + if (ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_20_40) { + SET_TX_DESC_DATA_BW(pdesc, 1); + SET_TX_DESC_TX_SUB_CARRIER(pdesc, 3); + } else { + SET_TX_DESC_DATA_BW(pdesc, 0); + SET_TX_DESC_TX_SUB_CARRIER(pdesc, + mac->cur_40_prime_sc); + } + } else { + SET_TX_DESC_DATA_BW(pdesc, 0); + SET_TX_DESC_TX_SUB_CARRIER(pdesc, 0); + } + + SET_TX_DESC_LINIP(pdesc, 0); + if (sta) { + u8 ampdu_density = sta->ht_cap.ampdu_density; + + SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density); + } + if (info->control.hw_key) { + struct ieee80211_key_conf *key = info->control.hw_key; + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + case WLAN_CIPHER_SUITE_TKIP: + SET_TX_DESC_SEC_TYPE(pdesc, 0x1); + break; + case WLAN_CIPHER_SUITE_CCMP: + SET_TX_DESC_SEC_TYPE(pdesc, 0x3); + break; + default: + SET_TX_DESC_SEC_TYPE(pdesc, 0x0); + break; + } + } + + SET_TX_DESC_QUEUE_SEL(pdesc, fw_qsel); + SET_TX_DESC_DATA_RATE_FB_LIMIT(pdesc, 0x1F); + SET_TX_DESC_RTS_RATE_FB_LIMIT(pdesc, 0xF); + SET_TX_DESC_DISABLE_FB(pdesc, + ptcb_desc->disable_ratefallback ? 1 : 0); + SET_TX_DESC_USE_RATE(pdesc, ptcb_desc->use_driver_rate ? 1 : 0); + + /*SET_TX_DESC_PWR_STATUS(pdesc, pwr_status);*/ + /* Set TxRate and RTSRate in TxDesc */ + /* This prevent Tx initial rate of new-coming packets */ + /* from being overwritten by retried packet rate.*/ + if (!ptcb_desc->use_driver_rate) { + /*SET_TX_DESC_RTS_RATE(pdesc, 0x08); */ + /* SET_TX_DESC_TX_RATE(pdesc, 0x0b); */ + } + if (ieee80211_is_data_qos(fc)) { + if (mac->rdg_en) { + RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, + "Enable RDG function.\n"); + SET_TX_DESC_RDG_ENABLE(pdesc, 1); + SET_TX_DESC_HTC(pdesc, 1); + } + } + } + + SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0)); + SET_TX_DESC_LAST_SEG(pdesc, (lastseg ? 1 : 0)); + SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping); + if (rtlpriv->dm.useramask) { + SET_TX_DESC_RATE_ID(pdesc, ptcb_desc->ratr_index); + SET_TX_DESC_MACID(pdesc, ptcb_desc->mac_id); + } else { + SET_TX_DESC_RATE_ID(pdesc, 0xC + ptcb_desc->ratr_index); + SET_TX_DESC_MACID(pdesc, ptcb_desc->ratr_index); + } + + SET_TX_DESC_MORE_FRAG(pdesc, (lastseg ? 0 : 1)); + if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) || + is_broadcast_ether_addr(ieee80211_get_DA(hdr))) { + SET_TX_DESC_BMC(pdesc, 1); + } + RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n"); +} + +void rtl92ee_tx_fill_cmddesc(struct ieee80211_hw *hw, + u8 *pdesc, bool firstseg, + bool lastseg, struct sk_buff *skb) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + u8 fw_queue = QSLT_BEACON; + dma_addr_t mapping = pci_map_single(rtlpci->pdev, + skb->data, skb->len, + PCI_DMA_TODEVICE); + u8 txdesc_len = 40; + + if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { + RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, + "DMA mapping error"); + return; + } + CLEAR_PCI_TX_DESC_CONTENT(pdesc, txdesc_len); + + if (firstseg) + SET_TX_DESC_OFFSET(pdesc, txdesc_len); + + SET_TX_DESC_TX_RATE(pdesc, DESC92C_RATE1M); + + SET_TX_DESC_SEQ(pdesc, 0); + + SET_TX_DESC_LINIP(pdesc, 0); + + SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue); + + SET_TX_DESC_FIRST_SEG(pdesc, 1); + SET_TX_DESC_LAST_SEG(pdesc, 1); + + SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len)); + + SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping); + + SET_TX_DESC_RATE_ID(pdesc, 7); + SET_TX_DESC_MACID(pdesc, 0); + + SET_TX_DESC_OWN(pdesc, 1); + + SET_TX_DESC_PKT_SIZE((u8 *)pdesc, (u16)(skb->len)); + + SET_TX_DESC_FIRST_SEG(pdesc, 1); + SET_TX_DESC_LAST_SEG(pdesc, 1); + + SET_TX_DESC_OFFSET(pdesc, 40); + + SET_TX_DESC_USE_RATE(pdesc, 1); + + RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, + "H2C Tx Cmd Content\n", pdesc, txdesc_len); +} + +void rtl92ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, + u8 desc_name, u8 *val) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u16 cur_tx_rp = 0; + u16 cur_tx_wp = 0; + static u16 last_txw_point; + static bool over_run; + u32 tmp = 0; + u8 q_idx = *val; + + if (istx) { + switch (desc_name) { + case HW_DESC_TX_NEXTDESC_ADDR: + SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *)val); + break; + case HW_DESC_OWN:{ + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[q_idx]; + u16 max_tx_desc = ring->entries; + + if (q_idx == BEACON_QUEUE) { + ring->cur_tx_wp = 0; + ring->cur_tx_rp = 0; + SET_TX_BUFF_DESC_OWN(pdesc, 1); + return; + } + + ring->cur_tx_wp = ((ring->cur_tx_wp + 1) % max_tx_desc); + + if (over_run) { + ring->cur_tx_wp = 0; + over_run = false; + } + if (ring->avl_desc > 1) { + ring->avl_desc--; + + rtl_write_word(rtlpriv, + get_desc_addr_fr_q_idx(q_idx), + ring->cur_tx_wp); + + if (q_idx == 1) + last_txw_point = cur_tx_wp; + } + + if (ring->avl_desc < (max_tx_desc - 15)) { + u16 point_diff = 0; + + tmp = + rtl_read_dword(rtlpriv, + get_desc_addr_fr_q_idx(q_idx)); + cur_tx_rp = (u16)((tmp >> 16) & 0x0fff); + cur_tx_wp = (u16)(tmp & 0x0fff); + + ring->cur_tx_wp = cur_tx_wp; + ring->cur_tx_rp = cur_tx_rp; + point_diff = ((cur_tx_rp > cur_tx_wp) ? + (cur_tx_rp - cur_tx_wp) : + (TX_DESC_NUM_92E - 1 - + cur_tx_wp + cur_tx_rp)); + + ring->avl_desc = point_diff; + } + } + break; + } + } else { + switch (desc_name) { + case HW_DESC_RX_PREPARE: + SET_RX_BUFFER_DESC_LS(pdesc, 0); + SET_RX_BUFFER_DESC_FS(pdesc, 0); + SET_RX_BUFFER_DESC_TOTAL_LENGTH(pdesc, 0); + + SET_RX_BUFFER_DESC_DATA_LENGTH(pdesc, + MAX_RECEIVE_BUFFER_SIZE + + RX_DESC_SIZE); + + SET_RX_BUFFER_PHYSICAL_LOW(pdesc, *(u32 *)val); + break; + case HW_DESC_RXERO: + SET_RX_DESC_EOR(pdesc, 1); + break; + default: + RT_ASSERT(false, + "ERR rxdesc :%d not process\n", desc_name); + break; + } + } +} + +u32 rtl92ee_get_desc(u8 *pdesc, bool istx, u8 desc_name) +{ + u32 ret = 0; + + if (istx) { + switch (desc_name) { + case HW_DESC_OWN: + ret = GET_TX_DESC_OWN(pdesc); + break; + case HW_DESC_TXBUFF_ADDR: + ret = GET_TXBUFFER_DESC_ADDR_LOW(pdesc, 1); + break; + default: + RT_ASSERT(false, + "ERR txdesc :%d not process\n", desc_name); + break; + } + } else { + switch (desc_name) { + case HW_DESC_OWN: + ret = GET_RX_DESC_OWN(pdesc); + break; + case HW_DESC_RXPKT_LEN: + ret = GET_RX_DESC_PKT_LEN(pdesc); + break; + case HW_DESC_RXBUFF_ADDR: + ret = GET_RX_DESC_BUFF_ADDR(pdesc); + break; + default: + RT_ASSERT(false, + "ERR rxdesc :%d not process\n", desc_name); + break; + } + } + return ret; +} + +bool rtl92ee_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index) +{ + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl_priv *rtlpriv = rtl_priv(hw); + u16 read_point, write_point, available_desc_num; + bool ret = false; + static u8 stop_report_cnt; + struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue]; + + /*checking Read/Write Point each interrupt wastes CPU */ + if (stop_report_cnt > 15 || !rtlpriv->link_info.busytraffic) { + u16 point_diff = 0; + u16 cur_tx_rp, cur_tx_wp; + u32 tmpu32 = 0; + + tmpu32 = + rtl_read_dword(rtlpriv, + get_desc_addr_fr_q_idx(hw_queue)); + cur_tx_rp = (u16)((tmpu32 >> 16) & 0x0fff); + cur_tx_wp = (u16)(tmpu32 & 0x0fff); + + ring->cur_tx_wp = cur_tx_wp; + ring->cur_tx_rp = cur_tx_rp; + point_diff = ((cur_tx_rp > cur_tx_wp) ? + (cur_tx_rp - cur_tx_wp) : + (TX_DESC_NUM_92E - cur_tx_wp + cur_tx_rp)); + + ring->avl_desc = point_diff; + } + + read_point = ring->cur_tx_rp; + write_point = ring->cur_tx_wp; + available_desc_num = ring->avl_desc; + + if (write_point > read_point) { + if (index < write_point && index >= read_point) + ret = false; + else + ret = true; + } else if (write_point < read_point) { + if (index > write_point && index < read_point) + ret = true; + else + ret = false; + } else { + if (index != read_point) + ret = true; + } + + if (hw_queue == BEACON_QUEUE) + ret = true; + + if (rtlpriv->rtlhal.driver_is_goingto_unload || + rtlpriv->psc.rfoff_reason > RF_CHANGE_BY_PS) + ret = true; + + if (hw_queue < BEACON_QUEUE) { + if (!ret) + stop_report_cnt++; + else + stop_report_cnt = 0; + } + + return ret; +} + +void rtl92ee_tx_polling(struct ieee80211_hw *hw, u8 hw_queue) +{ +} + +u32 rtl92ee_rx_command_packet(struct ieee80211_hw *hw, + struct rtl_stats status, + struct sk_buff *skb) +{ + u32 result = 0; + struct rtl_priv *rtlpriv = rtl_priv(hw); + + switch (status.packet_report_type) { + case NORMAL_RX: + result = 0; + break; + case C2H_PACKET: + rtl92ee_c2h_packet_handler(hw, skb->data, (u8)skb->len); + result = 1; + break; + default: + RT_TRACE(rtlpriv, COMP_RECV, DBG_TRACE, + "Unknown packet type %d\n", status.packet_report_type); + break; + } + + return result; +} diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h b/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h new file mode 100644 index 0000000000000000000000000000000000000000..6f9be1c7515c1c8e5d0c9221a88edfd7992e4690 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h @@ -0,0 +1,860 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL92E_TRX_H__ +#define __RTL92E_TRX_H__ + +#if (DMA_IS_64BIT == 1) +#if (RTL8192EE_SEG_NUM == 2) +#define TX_BD_DESC_SIZE 128 +#elif (RTL8192EE_SEG_NUM == 1) +#define TX_BD_DESC_SIZE 64 +#elif (RTL8192EE_SEG_NUM == 0) +#define TX_BD_DESC_SIZE 32 +#endif +#else +#if (RTL8192EE_SEG_NUM == 2) +#define TX_BD_DESC_SIZE 64 +#elif (RTL8192EE_SEG_NUM == 1) +#define TX_BD_DESC_SIZE 32 +#elif (RTL8192EE_SEG_NUM == 0) +#define TX_BD_DESC_SIZE 16 +#endif +#endif + +#define TX_DESC_SIZE 64 + +#define RX_DRV_INFO_SIZE_UNIT 8 + +#define TX_DESC_NEXT_DESC_OFFSET 40 +#define USB_HWDESC_HEADER_LEN 40 + +#define RX_DESC_SIZE 24 +#define MAX_RECEIVE_BUFFER_SIZE 8192 + +#define SET_TX_DESC_PKT_SIZE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 0, 16, __val) +#define SET_TX_DESC_OFFSET(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 16, 8, __val) +#define SET_TX_DESC_BMC(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 24, 1, __val) +#define SET_TX_DESC_HTC(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 25, 1, __val) +#define SET_TX_DESC_LAST_SEG(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 26, 1, __val) +#define SET_TX_DESC_FIRST_SEG(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 27, 1, __val) +#define SET_TX_DESC_LINIP(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 28, 1, __val) +#define SET_TX_DESC_NO_ACM(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 29, 1, __val) +#define SET_TX_DESC_GF(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val) +#define SET_TX_DESC_OWN(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val) + +#define GET_TX_DESC_PKT_SIZE(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 0, 16) +#define GET_TX_DESC_OFFSET(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 16, 8) +#define GET_TX_DESC_BMC(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 24, 1) +#define GET_TX_DESC_HTC(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 25, 1) +#define GET_TX_DESC_LAST_SEG(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 26, 1) +#define GET_TX_DESC_FIRST_SEG(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 27, 1) +#define GET_TX_DESC_LINIP(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 28, 1) +#define GET_TX_DESC_NO_ACM(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 29, 1) +#define GET_TX_DESC_GF(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 30, 1) +#define GET_TX_DESC_OWN(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 31, 1) + +#define SET_TX_DESC_MACID(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+4, 0, 7, __val) +#define SET_TX_DESC_QUEUE_SEL(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+4, 8, 5, __val) +#define SET_TX_DESC_RDG_NAV_EXT(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+4, 13, 1, __val) +#define SET_TX_DESC_LSIG_TXOP_EN(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+4, 14, 1, __val) +#define SET_TX_DESC_PIFS(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+4, 15, 1, __val) +#define SET_TX_DESC_RATE_ID(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+4, 16, 5, __val) +#define SET_TX_DESC_EN_DESC_ID(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+4, 21, 1, __val) +#define SET_TX_DESC_SEC_TYPE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+4, 22, 2, __val) +#define SET_TX_DESC_PKT_OFFSET(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+4, 24, 5, __val) +#define SET_TX_DESC_MORE_DATA(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+4, 29, 1, __val) +#define SET_TX_DESC_TXOP_PS_CAP(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+4, 30, 1, __val) +#define SET_TX_DESC_TXOP_PS_MODE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+4, 31, 1, __val) + +#define GET_TX_DESC_MACID(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 0, 5) +#define GET_TX_DESC_AGG_ENABLE(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 5, 1) +#define GET_TX_DESC_AGG_BREAK(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 6, 1) +#define GET_TX_DESC_RDG_ENABLE(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 7, 1) +#define GET_TX_DESC_QUEUE_SEL(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 8, 5) +#define GET_TX_DESC_RDG_NAV_EXT(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 13, 1) +#define GET_TX_DESC_LSIG_TXOP_EN(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 14, 1) +#define GET_TX_DESC_PIFS(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 15, 1) +#define GET_TX_DESC_RATE_ID(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 16, 4) +#define GET_TX_DESC_NAV_USE_HDR(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 20, 1) +#define GET_TX_DESC_EN_DESC_ID(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 21, 1) +#define GET_TX_DESC_SEC_TYPE(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 22, 2) +#define GET_TX_DESC_PKT_OFFSET(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 24, 5) + +#define SET_TX_DESC_PAID(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 0, 9, __val) +#define SET_TX_DESC_CCA_RTS(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 10, 2, __val) +#define SET_TX_DESC_AGG_ENABLE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 12, 1, __val) +#define SET_TX_DESC_RDG_ENABLE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 13, 1, __val) +#define SET_TX_DESC_NULL_0(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 14, 1, __val) +#define SET_TX_DESC_NULL_1(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 15, 1, __val) +#define SET_TX_DESC_BK(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 16, 1, __val) +#define SET_TX_DESC_MORE_FRAG(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 17, 1, __val) +#define SET_TX_DESC_RAW(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 18, 1, __val) +#define SET_TX_DESC_SPE_RPT(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 19, 1, __val) +#define SET_TX_DESC_AMPDU_DENSITY(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 20, 3, __val) +#define SET_TX_DESC_BT_NULL(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 23, 1, __val) +#define SET_TX_DESC_GID(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 24, 6, __val) + +#define SET_TX_DESC_WHEADER_LEN(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 0, 4, __val) +#define SET_TX_DESC_CHK_EN(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 4, 1, __val) +#define SET_TX_DESC_EARLY_RATE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 5, 1, __val) +#define SET_TX_DESC_HWSEQ_SEL(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 6, 2, __val) +#define SET_TX_DESC_USE_RATE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 8, 1, __val) +#define SET_TX_DESC_DISABLE_RTS_FB(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 9, 1, __val) +#define SET_TX_DESC_DISABLE_FB(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 10, 1, __val) +#define SET_TX_DESC_CTS2SELF(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 11, 1, __val) +#define SET_TX_DESC_RTS_ENABLE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 12, 1, __val) +#define SET_TX_DESC_HW_RTS_ENABLE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 13, 1, __val) +#define SET_TX_DESC_HW_PORT_ID(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 14, 1, __val) +#define SET_TX_DESC_NAV_USE_HDR(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 15, 1, __val) +#define SET_TX_DESC_USE_MAX_LEN(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 16, 1, __val) +#define SET_TX_DESC_MAX_AGG_NUM(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 17, 5, __val) +#define SET_TX_DESC_NDPA(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 22, 2, __val) +#define SET_TX_DESC_AMPDU_MAX_TIME(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 24, 8, __val) + +/* Dword 4 */ +#define SET_TX_DESC_TX_RATE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+16, 0, 7, __val) +#define SET_TX_DESC_TRY_RATE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+16, 7, 1, __val) +#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+16, 8, 5, __val) +#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+16, 13, 4, __val) +#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+16, 17, 1, __val) +#define SET_TX_DESC_DATA_RETRY_LIMIT(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+16, 18, 6, __val) +#define SET_TX_DESC_RTS_RATE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+16, 24, 5, __val) +#define SET_TX_DESC_PCTS_ENABLE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+16, 29, 1, __val) +#define SET_TX_DESC_PCTS_MASK_IDX(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+16, 30, 2, __val) + +/* Dword 5 */ +#define SET_TX_DESC_TX_SUB_CARRIER(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+20, 0, 4, __val) +#define SET_TX_DESC_DATA_SHORT(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+20, 4, 1, __val) +#define SET_TX_DESC_DATA_BW(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+20, 5, 2, __val) +#define SET_TX_DESC_DATA_LDPC(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+20, 7, 1, __val) +#define SET_TX_DESC_DATA_STBC(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+20, 8, 2, __val) +#define SET_TX_DESC_VCS_STBC(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+20, 10, 2, __val) +#define SET_TX_DESC_RTS_SHORT(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+20, 12, 1, __val) +#define SET_TX_DESC_RTS_SC(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+20, 13, 4, __val) +#define SET_TX_DESC_TX_ANT(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+20, 24, 4, __val) +#define SET_TX_DESC_TX_POWER_0_PSET(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+20, 28, 3, __val) + +/* Dword 6 */ +#define SET_TX_DESC_SW_DEFINE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 12, __val) +#define SET_TX_DESC_ANTSEL_A(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+24, 16, 3, __val) +#define SET_TX_DESC_ANTSEL_B(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+24, 19, 3, __val) +#define SET_TX_DESC_ANTSEL_C(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+24, 22, 3, __val) +#define SET_TX_DESC_ANTSEL_D(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+24, 25, 3, __val) + +/* Dword 7 */ +#define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 16, __val) +#define SET_TX_DESC_USB_TXAGG_NUM(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+28, 24, 8, __val) + +/* Dword 8 */ +#define SET_TX_DESC_RTS_RC(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+32, 0, 6, __val) +#define SET_TX_DESC_BAR_RTY_TH(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+32, 6, 2, __val) +#define SET_TX_DESC_DATA_RC(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+32, 8, 6, __val) +#define SET_TX_DESC_ENABLE_HW_SELECT(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+32, 15, 1, __val) +#define SET_TX_DESC_NEXT_HEAD_PAGE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+32, 16, 8, __val) +#define SET_TX_DESC_TAIL_PAGE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+32, 24, 8, __val) + +/* Dword 9 */ +#define SET_TX_DESC_PADDING_LENGTH(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+36, 0, 11, __val) +#define SET_TX_DESC_TXBF_PATH(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+36, 11, 1, __val) +#define SET_TX_DESC_SEQ(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+36, 12, 12, __val) +#define SET_TX_DESC_FINAL_DATA_RATE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+36, 24, 8, __val) + +/* Dword 10 */ +#define SET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+40, 0, 32, __val) + +/* Dword 11*/ +#define SET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+48, 0, 32, __val) + +#define SET_EARLYMODE_PKTNUM(__paddr, __val) \ + SET_BITS_TO_LE_4BYTE(__paddr, 0, 4, __val) +#define SET_EARLYMODE_LEN0(__paddr, __val) \ + SET_BITS_TO_LE_4BYTE(__paddr, 4, 15, __val) +#define SET_EARLYMODE_LEN1(__paddr, __val) \ + SET_BITS_TO_LE_4BYTE(__paddr, 16, 2, __val) +#define SET_EARLYMODE_LEN1_1(__paddr, __val) \ + SET_BITS_TO_LE_4BYTE(__paddr, 19, 13, __val) +#define SET_EARLYMODE_LEN1_2(__paddr, __val) \ + SET_BITS_TO_LE_4BYTE(__paddr+4, 0, 2, __val) +#define SET_EARLYMODE_LEN2(__paddr, __val) \ + SET_BITS_TO_LE_4BYTE(__paddr+4, 2, 15, __val) +#define SET_EARLYMODE_LEN2_1(__paddr, __val) \ + SET_BITS_TO_LE_4BYTE(__paddr, 2, 4, __val) +#define SET_EARLYMODE_LEN2_2(__paddr, __val) \ + SET_BITS_TO_LE_4BYTE(__paddr+4, 0, 8, __val) +#define SET_EARLYMODE_LEN3(__paddr, __val) \ + SET_BITS_TO_LE_4BYTE(__paddr+4, 17, 15, __val) +#define SET_EARLYMODE_LEN4(__paddr, __val) \ + SET_BITS_TO_LE_4BYTE(__paddr+4, 20, 12, __val) + +/* TX/RX buffer descriptor */ + +#define SET_TX_EXTBUFF_DESC_LEN(__pdesc, __val, __set) \ + SET_BITS_TO_LE_4BYTE(__pdesc+(__set*16), 0, 16, __val) +#define SET_TX_EXTBUFF_DESC_ADDR_LOW(__pdesc, __val, __set)\ + SET_BITS_TO_LE_4BYTE(__pdesc+(__set*16)+4, 0, 32, __val) +#define SET_TX_EXTBUFF_DESC_ADDR_HIGH(__pdesc, __val, __set)\ + SET_BITS_TO_LE_4BYTE(__pdesc+(__set*16)+8, 0, 32, __val) + +/* for Txfilldescroptor92ee, fill the desc content. */ +#if (DMA_IS_64BIT == 1) +#define SET_TXBUFFER_DESC_LEN_WITH_OFFSET(__pdesc, __offset, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+(__offset*16), 0, 16, __val) +#define SET_TXBUFFER_DESC_AMSDU_WITH_OFFSET(__pdesc, __offset, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+(__offset*16), 31, 1, __val) +#define SET_TXBUFFER_DESC_ADD_LOW_WITH_OFFSET(__pdesc, __offset, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+(__offset*16)+4, 0, 32, __val) +#define SET_TXBUFFER_DESC_ADD_HIGT_WITH_OFFSET(__pdesc, __offset, __val)\ + SET_BITS_TO_LE_4BYTE(__pdesc+(__offset*16)+8, 0, 32, __val) +#define GET_TXBUFFER_DESC_ADDR_LOW(__pdesc, __offset) \ + LE_BITS_TO_4BYTE(__pdesc+(__offset*16)+4, 0, 32) +#else +#define SET_TXBUFFER_DESC_LEN_WITH_OFFSET(__pdesc, __offset, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+(__offset*8), 0, 16, __val) +#define SET_TXBUFFER_DESC_AMSDU_WITH_OFFSET(__pdesc, __offset, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+(__offset*8), 31, 1, __val) +#define SET_TXBUFFER_DESC_ADD_LOW_WITH_OFFSET(__pdesc, __offset, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+(__offset*8)+4, 0, 32, __val) +#define SET_TXBUFFER_DESC_ADD_HIGT_WITH_OFFSET(__pdesc, __offset, __val) +#define GET_TXBUFFER_DESC_ADDR_LOW(__pdesc, __offset) \ + LE_BITS_TO_4BYTE(__pdesc+(__offset*8)+4, 0, 32) +#endif + +/* Dword 0 */ +#define SET_TX_BUFF_DESC_LEN_0(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 0, 14, __val) +#define SET_TX_BUFF_DESC_PSB(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 16, 15, __val) +#define SET_TX_BUFF_DESC_OWN(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val) + +/* Dword 1 */ +#define SET_TX_BUFF_DESC_ADDR_LOW_0(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+4, 0, 32, __val) +#if (DMA_IS_64BIT == 1) +/* Dword 2 */ +#define SET_TX_BUFF_DESC_ADDR_HIGH_0(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 0, 32, __val) +/* Dword 3 / RESERVED 0 */ +/* Dword 4 */ +#define SET_TX_BUFF_DESC_LEN_1(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+16, 0, 16, __val) +#define SET_TX_BUFF_DESC_AMSDU_1(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+16, 31, 1, __val) +/* Dword 5 */ +#define SET_TX_BUFF_DESC_ADDR_LOW_1(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+20, 0, 32, __val) +/* Dword 6 */ +#define SET_TX_BUFF_DESC_ADDR_HIGH_1(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 32, __val) +/* Dword 7 / RESERVED 0 */ +/* Dword 8 */ +#define SET_TX_BUFF_DESC_LEN_2(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+32, 0, 16, __val) +#define SET_TX_BUFF_DESC_AMSDU_2(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+32, 31, 1, __val) +/* Dword 9 */ +#define SET_TX_BUFF_DESC_ADDR_LOW_2(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+36, 0, 32, __val) +/* Dword 10 */ +#define SET_TX_BUFF_DESC_ADDR_HIGH_2(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+40, 0, 32, __val) +/* Dword 11 / RESERVED 0 */ +/* Dword 12 */ +#define SET_TX_BUFF_DESC_LEN_3(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+48, 0, 16, __val) +#define SET_TX_BUFF_DESC_AMSDU_3(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+48, 31, 1, __val) +/* Dword 13 */ +#define SET_TX_BUFF_DESC_ADDR_LOW_3(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+52, 0, 32, __val) +/* Dword 14 */ +#define SET_TX_BUFF_DESC_ADDR_HIGH_3(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+56, 0, 32, __val) +/* Dword 15 / RESERVED 0 */ +#else +#define SET_TX_BUFF_DESC_ADDR_HIGH_0(__pdesc, __val) +/* Dword 2 */ +#define SET_TX_BUFF_DESC_LEN_1(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 0, 16, __val) +#define SET_TX_BUFF_DESC_AMSDU_1(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 31, 1, __val) +/* Dword 3 */ +#define SET_TX_BUFF_DESC_ADDR_LOW_1(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 0, 32, __val) +#define SET_TX_BUFF_DESC_ADDR_HIGH_1(__pdesc, __val) +/* Dword 4 */ +#define SET_TX_BUFF_DESC_LEN_2(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+16, 0, 16, __val) +#define SET_TX_BUFF_DESC_AMSDU_2(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+16, 31, 1, __val) +/* Dword 5 */ +#define SET_TX_BUFF_DESC_ADDR_LOW_2(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+20, 0, 32, __val) +#define SET_TX_BUFF_DESC_ADDR_HIGH_2(__pdesc, __val) +/* Dword 6 */ +#define SET_TX_BUFF_DESC_LEN_3(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 16, __val) +#define SET_TX_BUFF_DESC_AMSDU_3(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+24, 31, 1, __val) +/* Dword 7 */ +#define SET_TX_BUFF_DESC_ADDR_LOW_3(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 32, __val) +#define SET_TX_BUFF_DESC_ADDR_HIGH_3(__pdesc, __val) +#endif + +/* RX buffer */ + +/* DWORD 0 */ +#define SET_RX_BUFFER_DESC_DATA_LENGTH(__status, __val) \ + SET_BITS_TO_LE_4BYTE(__status, 0, 14, __val) +#define SET_RX_BUFFER_DESC_LS(__status, __val) \ + SET_BITS_TO_LE_4BYTE(__status, 15, 1, __val) +#define SET_RX_BUFFER_DESC_FS(__status, __val) \ + SET_BITS_TO_LE_4BYTE(__status, 16, 1, __val) +#define SET_RX_BUFFER_DESC_TOTAL_LENGTH(__status, __val) \ + SET_BITS_TO_LE_4BYTE(__status, 16, 15, __val) + +#define GET_RX_BUFFER_DESC_OWN(__status) \ + LE_BITS_TO_4BYTE(__status, 31, 1) +#define GET_RX_BUFFER_DESC_LS(__status) \ + LE_BITS_TO_4BYTE(__status, 15, 1) +#define GET_RX_BUFFER_DESC_FS(__status) \ + LE_BITS_TO_4BYTE(__status, 16, 1) +#define GET_RX_BUFFER_DESC_TOTAL_LENGTH(__status) \ + LE_BITS_TO_4BYTE(__status, 16, 15) + +/* DWORD 1 */ +#define SET_RX_BUFFER_PHYSICAL_LOW(__status, __val) \ + SET_BITS_TO_LE_4BYTE(__status+4, 0, 32, __val) + +/* DWORD 2 */ +#define SET_RX_BUFFER_PHYSICAL_HIGH(__status, __val) \ + SET_BITS_TO_LE_4BYTE(__status+8, 0, 32, __val) + +#define GET_RX_DESC_PKT_LEN(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 0, 14) +#define GET_RX_DESC_CRC32(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 14, 1) +#define GET_RX_DESC_ICV(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 15, 1) +#define GET_RX_DESC_DRV_INFO_SIZE(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 16, 4) +#define GET_RX_DESC_SECURITY(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 20, 3) +#define GET_RX_DESC_QOS(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 23, 1) +#define GET_RX_DESC_SHIFT(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 24, 2) +#define GET_RX_DESC_PHYST(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 26, 1) +#define GET_RX_DESC_SWDEC(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 27, 1) +#define GET_RX_DESC_LS(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 28, 1) +#define GET_RX_DESC_FS(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 29, 1) +#define GET_RX_DESC_EOR(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 30, 1) +#define GET_RX_DESC_OWN(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 31, 1) + +#define SET_RX_DESC_PKT_LEN(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 0, 14, __val) +#define SET_RX_DESC_EOR(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val) +#define SET_RX_DESC_OWN(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val) + +#define GET_RX_DESC_MACID(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 0, 7) +#define GET_RX_DESC_TID(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 8, 4) +#define GET_RX_DESC_MACID_VLD(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 12, 1) +#define GET_RX_DESC_AMSDU(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 13, 1) +#define GET_RX_DESC_RXID_MATCH(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 14, 1) +#define GET_RX_DESC_PAGGR(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 15, 1) +#define GET_RX_DESC_A1_FIT(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 16, 4) +#define GET_RX_DESC_TCPOFFLOAD_CHKERR(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 20, 1) +#define GET_RX_DESC_TCPOFFLOAD_IPVER(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 21, 1) +#define GET_RX_DESC_TCPOFFLOAD_IS_TCPUDP(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 22, 1) +#define GET_RX_DESC_TCPOFFLOAD_CHK_VLD(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 23, 1) +#define GET_RX_DESC_PAM(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 24, 1) +#define GET_RX_DESC_PWR(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 25, 1) +#define GET_RX_DESC_MD(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 26, 1) +#define GET_RX_DESC_MF(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 27, 1) +#define GET_RX_DESC_TYPE(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 28, 2) +#define GET_RX_DESC_MC(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 30, 1) +#define GET_RX_DESC_BC(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 31, 1) +#define GET_RX_DESC_SEQ(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+8, 0, 12) +#define GET_RX_DESC_FRAG(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+8, 12, 4) +#define GET_RX_DESC_RX_IS_QOS(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+8, 16, 1) + +#define GET_RX_DESC_RXMCS(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+12, 0, 7) +#define GET_RX_DESC_HTC(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+12, 10, 1) +#define GET_RX_STATUS_DESC_EOSP(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+12, 11, 1) +#define GET_RX_STATUS_DESC_BSSID_FIT(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+12, 12, 2) +#define GET_RX_STATUS_DESC_DMA_AGG_NUM(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+12, 16, 8) +#define GET_RX_STATUS_DESC_PATTERN_MATCH(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+12, 29, 1) +#define GET_RX_STATUS_DESC_UNICAST_MATCH(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+12, 30, 1) +#define GET_RX_STATUS_DESC_MAGIC_MATCH(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+12, 31, 1) + +#define GET_RX_DESC_TSFL(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+20, 0, 32) + +#define GET_RX_DESC_BUFF_ADDR(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+24, 0, 32) +#define GET_RX_DESC_BUFF_ADDR64(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+28, 0, 32) + +#define SET_RX_DESC_BUFF_ADDR(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 32, __val) +#define SET_RX_DESC_BUFF_ADDR64(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 32, __val) + +/* TX report 2 format in Rx desc*/ + +#define GET_RX_RPT2_DESC_PKT_LEN(__status) \ + LE_BITS_TO_4BYTE(__status, 0, 9) +#define GET_RX_RPT2_DESC_MACID_VALID_1(__status) \ + LE_BITS_TO_4BYTE(__status+16, 0, 32) +#define GET_RX_RPT2_DESC_MACID_VALID_2(__status) \ + LE_BITS_TO_4BYTE(__status+20, 0, 32) + +#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \ +do { \ + if (_size > TX_DESC_NEXT_DESC_OFFSET) \ + memset(__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET); \ + else \ + memset(__pdesc, 0, _size); \ +} while (0) + +#define RTL92EE_RX_HAL_IS_CCK_RATE(rxmcs)\ + (rxmcs == DESC92C_RATE1M ||\ + rxmcs == DESC92C_RATE2M ||\ + rxmcs == DESC92C_RATE5_5M ||\ + rxmcs == DESC92C_RATE11M) + +#define IS_LITTLE_ENDIAN 1 + +struct phy_rx_agc_info_t { + #if IS_LITTLE_ENDIAN + u8 gain:7, trsw:1; + #else + u8 trsw:1, gain:7; + #endif +}; + +struct phy_status_rpt { + struct phy_rx_agc_info_t path_agc[2]; + u8 ch_corr[2]; + u8 cck_sig_qual_ofdm_pwdb_all; + u8 cck_agc_rpt_ofdm_cfosho_a; + u8 cck_rpt_b_ofdm_cfosho_b; + u8 rsvd_1; + u8 noise_power_db_msb; + u8 path_cfotail[2]; + u8 pcts_mask[2]; + u8 stream_rxevm[2]; + u8 path_rxsnr[2]; + u8 noise_power_db_lsb; + u8 rsvd_2[3]; + u8 stream_csi[2]; + u8 stream_target_csi[2]; + u8 sig_evm; + u8 rsvd_3; +#if IS_LITTLE_ENDIAN + u8 antsel_rx_keep_2:1; /*ex_intf_flg:1;*/ + u8 sgi_en:1; + u8 rxsc:2; + u8 idle_long:1; + u8 r_ant_train_en:1; + u8 ant_sel_b:1; + u8 ant_sel:1; +#else /* _BIG_ENDIAN_ */ + u8 ant_sel:1; + u8 ant_sel_b:1; + u8 r_ant_train_en:1; + u8 idle_long:1; + u8 rxsc:2; + u8 sgi_en:1; + u8 antsel_rx_keep_2:1; /*ex_intf_flg:1;*/ +#endif +} __packed; + +struct rx_fwinfo { + u8 gain_trsw[4]; + u8 pwdb_all; + u8 cfosho[4]; + u8 cfotail[4]; + char rxevm[2]; + char rxsnr[4]; + u8 pdsnr[2]; + u8 csi_current[2]; + u8 csi_target[2]; + u8 sigevm; + u8 max_ex_pwr; + u8 ex_intf_flag:1; + u8 sgi_en:1; + u8 rxsc:2; + u8 reserve:4; +} __packed; + +struct tx_desc { + u32 pktsize:16; + u32 offset:8; + u32 bmc:1; + u32 htc:1; + u32 lastseg:1; + u32 firstseg:1; + u32 linip:1; + u32 noacm:1; + u32 gf:1; + u32 own:1; + + u32 macid:6; + u32 rsvd0:2; + u32 queuesel:5; + u32 rd_nav_ext:1; + u32 lsig_txop_en:1; + u32 pifs:1; + u32 rateid:4; + u32 nav_usehdr:1; + u32 en_descid:1; + u32 sectype:2; + u32 pktoffset:8; + + u32 rts_rc:6; + u32 data_rc:6; + u32 agg_en:1; + u32 rdg_en:1; + u32 bar_retryht:2; + u32 agg_break:1; + u32 morefrag:1; + u32 raw:1; + u32 ccx:1; + u32 ampdudensity:3; + u32 bt_int:1; + u32 ant_sela:1; + u32 ant_selb:1; + u32 txant_cck:2; + u32 txant_l:2; + u32 txant_ht:2; + + u32 nextheadpage:8; + u32 tailpage:8; + u32 seq:12; + u32 cpu_handle:1; + u32 tag1:1; + u32 trigger_int:1; + u32 hwseq_en:1; + + u32 rtsrate:5; + u32 apdcfe:1; + u32 qos:1; + u32 hwseq_ssn:1; + u32 userrate:1; + u32 dis_rtsfb:1; + u32 dis_datafb:1; + u32 cts2self:1; + u32 rts_en:1; + u32 hwrts_en:1; + u32 portid:1; + u32 pwr_status:3; + u32 waitdcts:1; + u32 cts2ap_en:1; + u32 txsc:2; + u32 stbc:2; + u32 txshort:1; + u32 txbw:1; + u32 rtsshort:1; + u32 rtsbw:1; + u32 rtssc:2; + u32 rtsstbc:2; + + u32 txrate:6; + u32 shortgi:1; + u32 ccxt:1; + u32 txrate_fb_lmt:5; + u32 rtsrate_fb_lmt:4; + u32 retrylmt_en:1; + u32 txretrylmt:6; + u32 usb_txaggnum:8; + + u32 txagca:5; + u32 txagcb:5; + u32 usemaxlen:1; + u32 maxaggnum:5; + u32 mcsg1maxlen:4; + u32 mcsg2maxlen:4; + u32 mcsg3maxlen:4; + u32 mcs7sgimaxlen:4; + + u32 txbuffersize:16; + u32 sw_offset30:8; + u32 sw_offset31:4; + u32 rsvd1:1; + u32 antsel_c:1; + u32 null_0:1; + u32 null_1:1; + + u32 txbuffaddr; + u32 txbufferaddr64; + u32 nextdescaddress; + u32 nextdescaddress64; + + u32 reserve_pass_pcie_mm_limit[4]; +} __packed; + +struct rx_desc { + u32 length:14; + u32 crc32:1; + u32 icverror:1; + u32 drv_infosize:4; + u32 security:3; + u32 qos:1; + u32 shift:2; + u32 phystatus:1; + u32 swdec:1; + u32 lastseg:1; + u32 firstseg:1; + u32 eor:1; + u32 own:1; + + u32 macid:6; + u32 tid:4; + u32 hwrsvd:5; + u32 paggr:1; + u32 faggr:1; + u32 a1_fit:4; + u32 a2_fit:4; + u32 pam:1; + u32 pwr:1; + u32 moredata:1; + u32 morefrag:1; + u32 type:2; + u32 mc:1; + u32 bc:1; + + u32 seq:12; + u32 frag:4; + u32 nextpktlen:14; + u32 nextind:1; + u32 rsvd:1; + + u32 rxmcs:6; + u32 rxht:1; + u32 amsdu:1; + u32 splcp:1; + u32 bandwidth:1; + u32 htc:1; + u32 tcpchk_rpt:1; + u32 ipcchk_rpt:1; + u32 tcpchk_valid:1; + u32 hwpcerr:1; + u32 hwpcind:1; + u32 iv0:16; + + u32 iv1; + + u32 tsfl; + + u32 bufferaddress; + u32 bufferaddress64; + +} __packed; + +void rtl92ee_rx_check_dma_ok(struct ieee80211_hw *hw, u8 *header_desc, + u8 queue_index); +u16 rtl92ee_rx_desc_buff_remained_cnt(struct ieee80211_hw *hw, + u8 queue_index); +void rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 queue_index); +void rtl92ee_pre_fill_tx_bd_desc(struct ieee80211_hw *hw, + u8 *tx_bd_desc, u8 *desc, u8 queue_index, + struct sk_buff *skb, dma_addr_t addr); + +void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw, + struct ieee80211_hdr *hdr, u8 *pdesc_tx, + u8 *pbd_desc_tx, + struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, + struct sk_buff *skb, + u8 hw_queue, struct rtl_tcb_desc *ptcb_desc); +bool rtl92ee_rx_query_desc(struct ieee80211_hw *hw, + struct rtl_stats *status, + struct ieee80211_rx_status *rx_status, + u8 *pdesc, struct sk_buff *skb); +void rtl92ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, + u8 desc_name, u8 *val); + +u32 rtl92ee_get_desc(u8 *pdesc, bool istx, u8 desc_name); +bool rtl92ee_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index); +void rtl92ee_tx_polling(struct ieee80211_hw *hw, u8 hw_queue); +void rtl92ee_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, + bool firstseg, bool lastseg, + struct sk_buff *skb); +u32 rtl92ee_rx_command_packet(struct ieee80211_hw *hw, + struct rtl_stats status, + struct sk_buff *skb); +#endif diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/fw.h b/drivers/net/wireless/rtlwifi/rtl8192se/fw.h index d53f4332464daa820ac2b909ba93e5f847e3b420..b1e44b86e8ed3617d0d11e627ef30f31d311b552 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/fw.h +++ b/drivers/net/wireless/rtlwifi/rtl8192se/fw.h @@ -336,7 +336,6 @@ enum fw_h2c_cmd { H2C_TMP3, H2C_WOWLAN_UPDATE_IV_CMD, /*50*/ H2C_TMP4, - MAX_H2CCMD /*52*/ }; /* The following macros are used for FW diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c index 2b3c78baa9f8b3742020aa377b0449785eeb8ab7..b358ebce8942127413105b39e3799006d559eed8 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c @@ -312,10 +312,6 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats, hdr = (struct ieee80211_hdr *)(skb->data + stats->rx_drvinfo_size + stats->rx_bufshift); - if (!hdr) { - /* during testing, hdr was NULL here */ - return false; - } if ((_ieee80211_is_robust_mgmt_frame(hdr)) && (ieee80211_has_protected(hdr->frame_control))) rx_status->flag &= ~RX_FLAG_DECRYPTED; diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/btc.h b/drivers/net/wireless/rtlwifi/rtl8723ae/btc.h index 417afeed36af16d02ca13500935c7af7569ef9a9..06c448c010fdbba3fa19c7b9267c7962d29df004 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/btc.h +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/btc.h @@ -11,10 +11,6 @@ ** FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ** more details. ** - ** You should have received a copy of the GNU General Public License along with - ** this program; if not, write to the Free Software Foundation, Inc., - ** 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - ** ** The full GNU General Public License is included in this distribution in the ** file called LICENSE. ** @@ -24,8 +20,7 @@ ** Hsinchu 300, Taiwan. ** Larry Finger ** - ***************************************************************************** - */ + ******************************************************************************/ #ifndef __RTL8723E_BTC_H__ #define __RTL8723E_BTC_H__ diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/def.h b/drivers/net/wireless/rtlwifi/rtl8723ae/def.h index debe261a7eeb9026f2940db194ff25f1b1d0fe8b..94bdd4bbca5dfcfb6aa537fd70365180df2dd8bf 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/def.h +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/def.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -25,55 +21,145 @@ * * Larry Finger * - **************************************************************************** - */ + *****************************************************************************/ #ifndef __RTL8723E_DEF_H__ #define __RTL8723E_DEF_H__ +#define HAL_RETRY_LIMIT_INFRA 48 +#define HAL_RETRY_LIMIT_AP_ADHOC 7 + +#define RESET_DELAY_8185 20 + +#define RT_IBSS_INT_MASKS (IMR_BCNINT | IMR_TBDOK | IMR_TBDER) +#define RT_AC_INT_MASKS (IMR_VIDOK | IMR_VODOK | IMR_BEDOK|IMR_BKDOK) + +#define NUM_OF_FIRMWARE_QUEUE 10 +#define NUM_OF_PAGES_IN_FW 0x100 +#define NUM_OF_PAGE_IN_FW_QUEUE_BK 0x07 +#define NUM_OF_PAGE_IN_FW_QUEUE_BE 0x07 +#define NUM_OF_PAGE_IN_FW_QUEUE_VI 0x07 +#define NUM_OF_PAGE_IN_FW_QUEUE_VO 0x07 +#define NUM_OF_PAGE_IN_FW_QUEUE_HCCA 0x0 +#define NUM_OF_PAGE_IN_FW_QUEUE_CMD 0x0 +#define NUM_OF_PAGE_IN_FW_QUEUE_MGNT 0x02 +#define NUM_OF_PAGE_IN_FW_QUEUE_HIGH 0x02 +#define NUM_OF_PAGE_IN_FW_QUEUE_BCN 0x2 +#define NUM_OF_PAGE_IN_FW_QUEUE_PUB 0xA1 + +#define NUM_OF_PAGE_IN_FW_QUEUE_BK_DTM 0x026 +#define NUM_OF_PAGE_IN_FW_QUEUE_BE_DTM 0x048 +#define NUM_OF_PAGE_IN_FW_QUEUE_VI_DTM 0x048 +#define NUM_OF_PAGE_IN_FW_QUEUE_VO_DTM 0x026 +#define NUM_OF_PAGE_IN_FW_QUEUE_PUB_DTM 0x00 + +#define MAX_LINES_HWCONFIG_TXT 1000 +#define MAX_BYTES_LINE_HWCONFIG_TXT 256 + +#define SW_THREE_WIRE 0 +#define HW_THREE_WIRE 2 + +#define BT_DEMO_BOARD 0 +#define BT_QA_BOARD 1 +#define BT_FPGA 2 + +#define HAL_PRIME_CHNL_OFFSET_DONT_CARE 0 #define HAL_PRIME_CHNL_OFFSET_LOWER 1 +#define HAL_PRIME_CHNL_OFFSET_UPPER 2 -#define RX_MPDU_QUEUE 0 +#define MAX_H2C_QUEUE_NUM 10 -#define CHIP_8723 BIT(0) -#define NORMAL_CHIP BIT(3) -#define RF_TYPE_1T2R BIT(4) -#define RF_TYPE_2T2R BIT(5) -#define CHIP_VENDOR_UMC BIT(7) -#define B_CUT_VERSION BIT(12) -#define C_CUT_VERSION BIT(13) -#define D_CUT_VERSION ((BIT(12)|BIT(13))) -#define E_CUT_VERSION BIT(14) -#define RF_RL_ID (BIT(31)|BIT(30)|BIT(29)|BIT(28)) +#define RX_MPDU_QUEUE 0 +#define RX_CMD_QUEUE 1 +#define RX_MAX_QUEUE 2 +#define AC2QUEUEID(_AC) (_AC) +#define C2H_RX_CMD_HDR_LEN 8 +#define GET_C2H_CMD_CMD_LEN(__prxhdr) \ + LE_BITS_TO_4BYTE((__prxhdr), 0, 16) +#define GET_C2H_CMD_ELEMENT_ID(__prxhdr) \ + LE_BITS_TO_4BYTE((__prxhdr), 16, 8) +#define GET_C2H_CMD_CMD_SEQ(__prxhdr) \ + LE_BITS_TO_4BYTE((__prxhdr), 24, 7) +#define GET_C2H_CMD_CONTINUE(__prxhdr) \ + LE_BITS_TO_4BYTE((__prxhdr), 31, 1) +#define GET_C2H_CMD_CONTENT(__prxhdr) \ + ((u8 *)(__prxhdr) + C2H_RX_CMD_HDR_LEN) + +#define GET_C2H_CMD_FEEDBACK_ELEMENT_ID(__pcmdfbhdr) \ + LE_BITS_TO_4BYTE((__pcmdfbhdr), 0, 8) +#define GET_C2H_CMD_FEEDBACK_CCX_LEN(__pcmdfbhdr) \ + LE_BITS_TO_4BYTE((__pcmdfbhdr), 8, 8) +#define GET_C2H_CMD_FEEDBACK_CCX_CMD_CNT(__pcmdfbhdr) \ + LE_BITS_TO_4BYTE((__pcmdfbhdr), 16, 16) +#define GET_C2H_CMD_FEEDBACK_CCX_MAC_ID(__pcmdfbhdr) \ + LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 0, 5) +#define GET_C2H_CMD_FEEDBACK_CCX_VALID(__pcmdfbhdr) \ + LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 7, 1) +#define GET_C2H_CMD_FEEDBACK_CCX_RETRY_CNT(__pcmdfbhdr) \ + LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 8, 5) +#define GET_C2H_CMD_FEEDBACK_CCX_TOK(__pcmdfbhdr) \ + LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 15, 1) +#define GET_C2H_CMD_FEEDBACK_CCX_QSEL(__pcmdfbhdr) \ + LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 16, 4) +#define GET_C2H_CMD_FEEDBACK_CCX_SEQ(__pcmdfbhdr) \ + LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 20, 12) + +#define CHIP_BONDING_IDENTIFIER(_value) (((_value)>>22)&0x3) +#define CHIP_BONDING_92C_1T2R 0x1 + +#define CHIP_8723 BIT(0) +#define NORMAL_CHIP BIT(3) +#define RF_TYPE_1T1R (~(BIT(4)|BIT(5)|BIT(6))) +#define RF_TYPE_1T2R BIT(4) +#define RF_TYPE_2T2R BIT(5) +#define CHIP_VENDOR_UMC BIT(7) +#define B_CUT_VERSION BIT(12) +#define C_CUT_VERSION BIT(13) +#define D_CUT_VERSION ((BIT(12)|BIT(13))) +#define E_CUT_VERSION BIT(14) +#define RF_RL_ID (BIT(31)|BIT(30)|BIT(29)|BIT(28)) /* MASK */ -#define IC_TYPE_MASK (BIT(0)|BIT(1)|BIT(2)) -#define CHIP_TYPE_MASK BIT(3) -#define RF_TYPE_MASK (BIT(4)|BIT(5)|BIT(6)) -#define MANUFACTUER_MASK BIT(7) -#define ROM_VERSION_MASK (BIT(11)|BIT(10)|BIT(9)|BIT(8)) -#define CUT_VERSION_MASK (BIT(15)|BIT(14)|BIT(13)|BIT(12)) +#define IC_TYPE_MASK (BIT(0)|BIT(1)|BIT(2)) +#define CHIP_TYPE_MASK BIT(3) +#define RF_TYPE_MASK (BIT(4)|BIT(5)|BIT(6)) +#define MANUFACTUER_MASK BIT(7) +#define ROM_VERSION_MASK (BIT(11)|BIT(10)|BIT(9)|BIT(8)) +#define CUT_VERSION_MASK (BIT(15)|BIT(14)|BIT(13)|BIT(12)) /* Get element */ #define GET_CVID_IC_TYPE(version) ((version) & IC_TYPE_MASK) +#define GET_CVID_CHIP_TYPE(version) ((version) & CHIP_TYPE_MASK) +#define GET_CVID_RF_TYPE(version) ((version) & RF_TYPE_MASK) #define GET_CVID_MANUFACTUER(version) ((version) & MANUFACTUER_MASK) +#define GET_CVID_ROM_VERSION(version) ((version) & ROM_VERSION_MASK) #define GET_CVID_CUT_VERSION(version) ((version) & CUT_VERSION_MASK) -#define IS_81XXC(version) ((GET_CVID_IC_TYPE(version) == 0) ?\ - true : false) -#define IS_8723_SERIES(version) \ - ((GET_CVID_IC_TYPE(version) == CHIP_8723) ? true : false) -#define IS_CHIP_VENDOR_UMC(version) \ - ((GET_CVID_MANUFACTUER(version)) ? true : false) - -#define IS_VENDOR_UMC_A_CUT(version) ((IS_CHIP_VENDOR_UMC(version)) ? \ - ((GET_CVID_CUT_VERSION(version)) ? false : true) : false) -#define IS_VENDOR_8723_A_CUT(version) ((IS_8723_SERIES(version)) ? \ - ((GET_CVID_CUT_VERSION(version)) ? false : true) : false) -#define IS_81xxC_VENDOR_UMC_B_CUT(version) ((IS_CHIP_VENDOR_UMC(version)) \ - ? ((GET_CVID_CUT_VERSION(version) == B_CUT_VERSION) ? \ - true : false) : false) +#define IS_81XXC(version) ((GET_CVID_IC_TYPE(version) == 0) ?\ + true : false) +#define IS_8723_SERIES(version) ((GET_CVID_IC_TYPE(version) == CHIP_8723) ? \ + true : false) +#define IS_1T1R(version) ((GET_CVID_RF_TYPE(version)) ? false : true) +#define IS_1T2R(version) ((GET_CVID_RF_TYPE(version) == RF_TYPE_1T2R)\ + ? true : false) +#define IS_2T2R(version) ((GET_CVID_RF_TYPE(version) == RF_TYPE_2T2R)\ + ? true : false) +#define IS_CHIP_VENDOR_UMC(version) ((GET_CVID_MANUFACTUER(version)) ? \ + true : false) + +#define IS_VENDOR_UMC_A_CUT(version) ((IS_CHIP_VENDOR_UMC(version))\ + ? ((GET_CVID_CUT_VERSION(version)) ? \ + false : true) : false) +#define IS_VENDOR_8723_A_CUT(version) ((IS_8723_SERIES(version))\ + ? ((GET_CVID_CUT_VERSION(version)) ? \ + false : true) : false) +#define IS_VENDOR_8723A_B_CUT(version) ((IS_8723_SERIES(version))\ + ? ((GET_CVID_CUT_VERSION(version) == \ + B_CUT_VERSION) ? true : false) : false) +#define IS_81xxC_VENDOR_UMC_B_CUT(version) ((IS_CHIP_VENDOR_UMC(version))\ + ? ((GET_CVID_CUT_VERSION(version) == \ + B_CUT_VERSION) ? true : false) : false) enum rf_optype { RF_OP_BY_SW_3WIRE = 0, @@ -93,7 +179,7 @@ enum power_save_mode { POWER_SAVE_MODE_SAVE, }; -enum power_polocy_config { +enum power_policy_config { POWERCFG_MAX_POWER_SAVINGS, POWERCFG_GLOBAL_POWER_SAVINGS, POWERCFG_LOCAL_POWER_SAVINGS, @@ -143,6 +229,41 @@ enum rtl_desc_qsel { QSLT_CMD = 0x13, }; +enum rtl_desc8723e_rate { + DESC92C_RATE1M = 0x00, + DESC92C_RATE2M = 0x01, + DESC92C_RATE5_5M = 0x02, + DESC92C_RATE11M = 0x03, + + DESC92C_RATE6M = 0x04, + DESC92C_RATE9M = 0x05, + DESC92C_RATE12M = 0x06, + DESC92C_RATE18M = 0x07, + DESC92C_RATE24M = 0x08, + DESC92C_RATE36M = 0x09, + DESC92C_RATE48M = 0x0a, + DESC92C_RATE54M = 0x0b, + + DESC92C_RATEMCS0 = 0x0c, + DESC92C_RATEMCS1 = 0x0d, + DESC92C_RATEMCS2 = 0x0e, + DESC92C_RATEMCS3 = 0x0f, + DESC92C_RATEMCS4 = 0x10, + DESC92C_RATEMCS5 = 0x11, + DESC92C_RATEMCS6 = 0x12, + DESC92C_RATEMCS7 = 0x13, + DESC92C_RATEMCS8 = 0x14, + DESC92C_RATEMCS9 = 0x15, + DESC92C_RATEMCS10 = 0x16, + DESC92C_RATEMCS11 = 0x17, + DESC92C_RATEMCS12 = 0x18, + DESC92C_RATEMCS13 = 0x19, + DESC92C_RATEMCS14 = 0x1a, + DESC92C_RATEMCS15 = 0x1b, + DESC92C_RATEMCS15_SG = 0x1c, + DESC92C_RATEMCS32 = 0x20, +}; + struct phy_sts_cck_8723e_t { u8 adc_pwdb_X[4]; u8 sq_rpt; diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/dm.c b/drivers/net/wireless/rtlwifi/rtl8723ae/dm.c index 25cc83058b01a25be6a9ea78d44ce744a22a89a0..a0e86922780a6702d14957454ef7e0f9899496b1 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/dm.c +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/dm.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -25,8 +21,7 @@ * * Larry Finger * - **************************************************************************** - */ + *****************************************************************************/ #include "../wifi.h" #include "../base.h" @@ -151,7 +146,7 @@ static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = { {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00} }; -static void rtl8723ae_dm_diginit(struct ieee80211_hw *hw) +static void rtl8723e_dm_diginit(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct dig_t *dm_digtable = &rtlpriv->dm_digtable; @@ -176,7 +171,7 @@ static void rtl8723ae_dm_diginit(struct ieee80211_hw *hw) dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_MAX; } -static u8 rtl_init_gain_min_pwdb(struct ieee80211_hw *hw) +static u8 rtl8723e_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct dig_t *dm_digtable = &rtlpriv->dm_digtable; @@ -195,14 +190,15 @@ static u8 rtl_init_gain_min_pwdb(struct ieee80211_hw *hw) } else if (dm_digtable->cursta_cstate == DIG_STA_CONNECT || dm_digtable->cursta_cstate == DIG_STA_BEFORE_CONNECT) { rssi_val_min = rtlpriv->dm.undec_sm_pwdb; - } else if (dm_digtable->curmultista_cstate == DIG_MULTISTA_CONNECT) { + } else if (dm_digtable->curmultista_cstate == + DIG_MULTISTA_CONNECT) { rssi_val_min = rtlpriv->dm.entry_min_undec_sm_pwdb; } return (u8) rssi_val_min; } -static void rtl8723ae_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw) +static void rtl8723e_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw) { u32 ret_value; struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -239,8 +235,7 @@ static void rtl8723ae_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw) rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 2); RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, - "cnt_parity_fail = %d, cnt_rate_illegal = %d, " - "cnt_crc8_fail = %d, cnt_mcs_fail = %d\n", + "cnt_parity_fail = %d, cnt_rate_illegal = %d, cnt_crc8_fail = %d, cnt_mcs_fail = %d\n", falsealm_cnt->cnt_parity_fail, falsealm_cnt->cnt_rate_illegal, falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail); @@ -263,52 +258,60 @@ static void rtl92c_dm_ctrl_initgain_by_fa(struct ieee80211_hw *hw) value_igi += 0; else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH2) value_igi++; - else + else if (rtlpriv->falsealm_cnt.cnt_all >= DM_DIG_FA_TH2) value_igi += 2; - - value_igi = clamp(value_igi, (u8)DM_DIG_FA_LOWER, (u8)DM_DIG_FA_UPPER); + if (value_igi > DM_DIG_FA_UPPER) + value_igi = DM_DIG_FA_UPPER; + else if (value_igi < DM_DIG_FA_LOWER) + value_igi = DM_DIG_FA_LOWER; if (rtlpriv->falsealm_cnt.cnt_all > 10000) value_igi = 0x32; dm_digtable->cur_igvalue = value_igi; - rtl8723ae_dm_write_dig(hw); + rtl8723e_dm_write_dig(hw); } static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct dig_t *dgtbl = &rtlpriv->dm_digtable; + struct dig_t *dm_digtable = &rtlpriv->dm_digtable; - if (rtlpriv->falsealm_cnt.cnt_all > dgtbl->fa_highthresh) { - if ((dgtbl->back_val - 2) < dgtbl->back_range_min) - dgtbl->back_val = dgtbl->back_range_min; + if (rtlpriv->falsealm_cnt.cnt_all > dm_digtable->fa_highthresh) { + if ((dm_digtable->back_val - 2) < + dm_digtable->back_range_min) + dm_digtable->back_val = + dm_digtable->back_range_min; else - dgtbl->back_val -= 2; - } else if (rtlpriv->falsealm_cnt.cnt_all < dgtbl->fa_lowthresh) { - if ((dgtbl->back_val + 2) > dgtbl->back_range_max) - dgtbl->back_val = dgtbl->back_range_max; + dm_digtable->back_val -= 2; + } else if (rtlpriv->falsealm_cnt.cnt_all < dm_digtable->fa_lowthresh) { + if ((dm_digtable->back_val + 2) > + dm_digtable->back_range_max) + dm_digtable->back_val = + dm_digtable->back_range_max; else - dgtbl->back_val += 2; + dm_digtable->back_val += 2; } - if ((dgtbl->rssi_val_min + 10 - dgtbl->back_val) > - dgtbl->rx_gain_max) - dgtbl->cur_igvalue = dgtbl->rx_gain_max; - else if ((dgtbl->rssi_val_min + 10 - - dgtbl->back_val) < dgtbl->rx_gain_min) - dgtbl->cur_igvalue = dgtbl->rx_gain_min; + if ((dm_digtable->rssi_val_min + 10 - dm_digtable->back_val) > + dm_digtable->rx_gain_max) + dm_digtable->cur_igvalue = dm_digtable->rx_gain_max; + else if ((dm_digtable->rssi_val_min + 10 - + dm_digtable->back_val) < dm_digtable->rx_gain_min) + dm_digtable->cur_igvalue = dm_digtable->rx_gain_min; else - dgtbl->cur_igvalue = dgtbl->rssi_val_min + 10 - dgtbl->back_val; + dm_digtable->cur_igvalue = dm_digtable->rssi_val_min + 10 - + dm_digtable->back_val; RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "rssi_val_min = %x back_val %x\n", - dgtbl->rssi_val_min, dgtbl->back_val); + dm_digtable->rssi_val_min, dm_digtable->back_val); - rtl8723ae_dm_write_dig(hw); + rtl8723e_dm_write_dig(hw); } -static void rtl8723ae_dm_initial_gain_multi_sta(struct ieee80211_hw *hw) +static void rtl8723e_dm_initial_gain_multi_sta(struct ieee80211_hw *hw) { + static u8 binitialized; struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct dig_t *dm_digtable = &rtlpriv->dm_digtable; @@ -318,16 +321,15 @@ static void rtl8723ae_dm_initial_gain_multi_sta(struct ieee80211_hw *hw) if (mac->opmode == NL80211_IFTYPE_ADHOC) multi_sta = true; - if ((!multi_sta) || - (dm_digtable->cursta_cstate != DIG_STA_DISCONNECT)) { - rtlpriv->initialized = false; + if (!multi_sta || (dm_digtable->cursta_cstate != DIG_STA_DISCONNECT)) { + binitialized = false; dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; return; - } else if (!rtlpriv->initialized) { - rtlpriv->initialized = true; + } else if (!binitialized) { + binitialized = true; dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_0; dm_digtable->cur_igvalue = 0x20; - rtl8723ae_dm_write_dig(hw); + rtl8723e_dm_write_dig(hw); } if (dm_digtable->curmultista_cstate == DIG_MULTISTA_CONNECT) { @@ -337,7 +339,7 @@ static void rtl8723ae_dm_initial_gain_multi_sta(struct ieee80211_hw *hw) if (dm_digtable->dig_ext_port_stage == DIG_EXT_PORT_STAGE_2) { dm_digtable->cur_igvalue = 0x20; - rtl8723ae_dm_write_dig(hw); + rtl8723e_dm_write_dig(hw); } dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_1; @@ -348,7 +350,7 @@ static void rtl8723ae_dm_initial_gain_multi_sta(struct ieee80211_hw *hw) } else if (dm_digtable->dig_ext_port_stage != DIG_EXT_PORT_STAGE_0) { dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_0; dm_digtable->cur_igvalue = 0x20; - rtl8723ae_dm_write_dig(hw); + rtl8723e_dm_write_dig(hw); } RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, @@ -357,22 +359,22 @@ static void rtl8723ae_dm_initial_gain_multi_sta(struct ieee80211_hw *hw) dm_digtable->dig_ext_port_stage); } -static void rtl8723ae_dm_initial_gain_sta(struct ieee80211_hw *hw) +static void rtl8723e_dm_initial_gain_sta(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct dig_t *dm_digtable = &rtlpriv->dm_digtable; RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "presta_cstate = %x, cursta_cstate = %x\n", - dm_digtable->presta_cstate, - dm_digtable->cursta_cstate); + dm_digtable->presta_cstate, + dm_digtable->cursta_cstate); if (dm_digtable->presta_cstate == dm_digtable->cursta_cstate || dm_digtable->cursta_cstate == DIG_STA_BEFORE_CONNECT || dm_digtable->cursta_cstate == DIG_STA_CONNECT) { - if (dm_digtable->cursta_cstate != DIG_STA_DISCONNECT) { - dm_digtable->rssi_val_min = rtl_init_gain_min_pwdb(hw); + dm_digtable->rssi_val_min = + rtl8723e_dm_initial_gain_min_pwdb(hw); rtl92c_dm_ctrl_initgain_by_rssi(hw); } } else { @@ -381,16 +383,17 @@ static void rtl8723ae_dm_initial_gain_sta(struct ieee80211_hw *hw) dm_digtable->back_val = DM_DIG_BACKOFF_DEFAULT; dm_digtable->cur_igvalue = 0x20; dm_digtable->pre_igvalue = 0; - rtl8723ae_dm_write_dig(hw); + rtl8723e_dm_write_dig(hw); } } -static void rtl8723ae_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw) + +static void rtl8723e_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct dig_t *dm_digtable = &rtlpriv->dm_digtable; if (dm_digtable->cursta_cstate == DIG_STA_CONNECT) { - dm_digtable->rssi_val_min = rtl_init_gain_min_pwdb(hw); + dm_digtable->rssi_val_min = rtl8723e_dm_initial_gain_min_pwdb(hw); if (dm_digtable->pre_cck_pd_state == CCK_PD_STAGE_LowRssi) { if (dm_digtable->rssi_val_min <= 25) @@ -418,12 +421,11 @@ static void rtl8723ae_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw) CCK_FA_STAGE_High; else dm_digtable->cur_cck_fa_state = - CCK_FA_STAGE_Low; - + CCK_FA_STAGE_LOW; if (dm_digtable->pre_cck_fa_state != dm_digtable->cur_cck_fa_state) { if (dm_digtable->cur_cck_fa_state == - CCK_FA_STAGE_Low) + CCK_FA_STAGE_LOW) rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0x83); else @@ -449,13 +451,13 @@ static void rtl8723ae_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw) } -static void rtl8723ae_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw) +static void rtl8723e_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw) { struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_priv *rtlpriv = rtl_priv(hw); struct dig_t *dm_digtable = &rtlpriv->dm_digtable; - if (mac->act_scanning == true) + if (mac->act_scanning) return; if (mac->link_state >= MAC80211_LINKED) @@ -463,28 +465,29 @@ static void rtl8723ae_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw) else dm_digtable->cursta_cstate = DIG_STA_DISCONNECT; - rtl8723ae_dm_initial_gain_sta(hw); - rtl8723ae_dm_initial_gain_multi_sta(hw); - rtl8723ae_dm_cck_packet_detection_thresh(hw); + rtl8723e_dm_initial_gain_sta(hw); + rtl8723e_dm_initial_gain_multi_sta(hw); + rtl8723e_dm_cck_packet_detection_thresh(hw); dm_digtable->presta_cstate = dm_digtable->cursta_cstate; } -static void rtl8723ae_dm_dig(struct ieee80211_hw *hw) +static void rtl8723e_dm_dig(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct dig_t *dm_digtable = &rtlpriv->dm_digtable; - if (rtlpriv->dm.dm_initialgain_enable == false) + if (!rtlpriv->dm.dm_initialgain_enable) return; - if (dm_digtable->dig_enable_flag == false) + if (!dm_digtable->dig_enable_flag) return; - rtl8723ae_dm_ctrl_initgain_by_twoport(hw); + rtl8723e_dm_ctrl_initgain_by_twoport(hw); + } -static void rtl8723ae_dm_dynamic_txpower(struct ieee80211_hw *hw) +static void rtl8723e_dm_dynamic_txpower(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); @@ -502,7 +505,7 @@ static void rtl8723ae_dm_dynamic_txpower(struct ieee80211_hw *hw) if ((mac->link_state < MAC80211_LINKED) && (rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) { RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE, - "Not connected\n"); + "Not connected to any\n"); rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; @@ -512,18 +515,21 @@ static void rtl8723ae_dm_dynamic_txpower(struct ieee80211_hw *hw) if (mac->link_state >= MAC80211_LINKED) { if (mac->opmode == NL80211_IFTYPE_ADHOC) { - undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb; + undec_sm_pwdb = + rtlpriv->dm.entry_min_undec_sm_pwdb; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "AP Client PWDB = 0x%lx\n", - undec_sm_pwdb); + undec_sm_pwdb); } else { - undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb; + undec_sm_pwdb = + rtlpriv->dm.undec_sm_pwdb; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "STA Default Port PWDB = 0x%lx\n", - undec_sm_pwdb); + undec_sm_pwdb); } } else { - undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb; + undec_sm_pwdb = + rtlpriv->dm.entry_min_undec_sm_pwdb; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "AP Ext Port PWDB = 0x%lx\n", @@ -534,37 +540,39 @@ static void rtl8723ae_dm_dynamic_txpower(struct ieee80211_hw *hw) rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n"); - } else if ((undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) && - (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL1)) { + } else if ((undec_sm_pwdb < + (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) && + (undec_sm_pwdb >= + TX_POWER_NEAR_FIELD_THRESH_LVL1)) { rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n"); - } else if (undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) { + } else if (undec_sm_pwdb < + (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) { rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "TXHIGHPWRLEVEL_NORMAL\n"); } - if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) { + if (rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl) { RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "PHY_SetTxPowerLevel8192S() Channel = %d\n", rtlphy->current_channel); - rtl8723ae_phy_set_txpower_level(hw, rtlphy->current_channel); + rtl8723e_phy_set_txpower_level(hw, rtlphy->current_channel); } rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl; } -void rtl8723ae_dm_write_dig(struct ieee80211_hw *hw) +void rtl8723e_dm_write_dig(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct dig_t *dm_digtable = &rtlpriv->dm_digtable; RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, - "cur_igvalue = 0x%x, " - "pre_igvalue = 0x%x, back_val = %d\n", - dm_digtable->cur_igvalue, dm_digtable->pre_igvalue, - dm_digtable->back_val); + "cur_igvalue = 0x%x, pre_igvalue = 0x%x, back_val = %d\n", + dm_digtable->cur_igvalue, dm_digtable->pre_igvalue, + dm_digtable->back_val); if (dm_digtable->pre_igvalue != dm_digtable->cur_igvalue) { rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f, @@ -576,32 +584,39 @@ void rtl8723ae_dm_write_dig(struct ieee80211_hw *hw) } } -static void rtl8723ae_dm_check_edca_turbo(struct ieee80211_hw *hw) +static void rtl8723e_dm_pwdb_monitor(struct ieee80211_hw *hw) +{ +} + +static void rtl8723e_dm_check_edca_turbo(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + static u64 last_txok_cnt; + static u64 last_rxok_cnt; + static u32 last_bt_edca_ul; + static u32 last_bt_edca_dl; u64 cur_txok_cnt = 0; u64 cur_rxok_cnt = 0; u32 edca_be_ul = 0x5ea42b; u32 edca_be_dl = 0x5ea42b; bool bt_change_edca = false; - if ((mac->last_bt_edca_ul != rtlpcipriv->bt_coexist.bt_edca_ul) || - (mac->last_bt_edca_dl != rtlpcipriv->bt_coexist.bt_edca_dl)) { + if ((last_bt_edca_ul != rtlpriv->btcoexist.bt_edca_ul) || + (last_bt_edca_dl != rtlpriv->btcoexist.bt_edca_dl)) { rtlpriv->dm.current_turbo_edca = false; - mac->last_bt_edca_ul = rtlpcipriv->bt_coexist.bt_edca_ul; - mac->last_bt_edca_dl = rtlpcipriv->bt_coexist.bt_edca_dl; + last_bt_edca_ul = rtlpriv->btcoexist.bt_edca_ul; + last_bt_edca_dl = rtlpriv->btcoexist.bt_edca_dl; } - if (rtlpcipriv->bt_coexist.bt_edca_ul != 0) { - edca_be_ul = rtlpcipriv->bt_coexist.bt_edca_ul; + if (rtlpriv->btcoexist.bt_edca_ul != 0) { + edca_be_ul = rtlpriv->btcoexist.bt_edca_ul; bt_change_edca = true; } - if (rtlpcipriv->bt_coexist.bt_edca_dl != 0) { - edca_be_ul = rtlpcipriv->bt_coexist.bt_edca_dl; + if (rtlpriv->btcoexist.bt_edca_dl != 0) { + edca_be_ul = rtlpriv->btcoexist.bt_edca_dl; bt_change_edca = true; } @@ -609,22 +624,11 @@ static void rtl8723ae_dm_check_edca_turbo(struct ieee80211_hw *hw) rtlpriv->dm.current_turbo_edca = false; return; } - - if ((!mac->ht_enable) && (!rtlpcipriv->bt_coexist.bt_coexistence)) { - if (!(edca_be_ul & 0xffff0000)) - edca_be_ul |= 0x005e0000; - - if (!(edca_be_dl & 0xffff0000)) - edca_be_dl |= 0x005e0000; - } - if ((bt_change_edca) || ((!rtlpriv->dm.is_any_nonbepkts) && (!rtlpriv->dm.disable_framebursting))) { - cur_txok_cnt = rtlpriv->stats.txbytesunicast - - mac->last_txok_cnt; - cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - - mac->last_rxok_cnt; + cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt; + cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt; if (cur_rxok_cnt > 4 * cur_txok_cnt) { if (!rtlpriv->dm.is_cur_rdlstate || @@ -647,18 +651,20 @@ static void rtl8723ae_dm_check_edca_turbo(struct ieee80211_hw *hw) } else { if (rtlpriv->dm.current_turbo_edca) { u8 tmp = AC0_BE; - rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM, - &tmp); + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_AC_PARAM, + (u8 *)(&tmp)); rtlpriv->dm.current_turbo_edca = false; } } rtlpriv->dm.is_any_nonbepkts = false; - mac->last_txok_cnt = rtlpriv->stats.txbytesunicast; - mac->last_rxok_cnt = rtlpriv->stats.rxbytesunicast; + last_txok_cnt = rtlpriv->stats.txbytesunicast; + last_rxok_cnt = rtlpriv->stats.rxbytesunicast; } -static void rtl8723ae_dm_initialize_txpower_tracking(struct ieee80211_hw *hw) +static void rtl8723e_dm_initialize_txpower_tracking_thermalmeter( + struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -667,10 +673,20 @@ static void rtl8723ae_dm_initialize_txpower_tracking(struct ieee80211_hw *hw) RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "pMgntInfo->txpower_tracking = %d\n", - rtlpriv->dm.txpower_tracking); + rtlpriv->dm.txpower_tracking); } -void rtl8723ae_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw) +static void rtl8723e_dm_initialize_txpower_tracking(struct ieee80211_hw *hw) +{ + rtl8723e_dm_initialize_txpower_tracking_thermalmeter(hw); +} + +void rtl8723e_dm_check_txpower_tracking(struct ieee80211_hw *hw) +{ + return; +} + +void rtl8723e_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rate_adaptive *p_ra = &(rtlpriv->ra); @@ -682,101 +698,32 @@ void rtl8723ae_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw) rtlpriv->dm.useramask = true; else rtlpriv->dm.useramask = false; -} -static void rtl8723ae_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - struct rate_adaptive *p_ra = &(rtlpriv->ra); - u32 low_rssithresh_for_ra, high_rssithresh_for_ra; - struct ieee80211_sta *sta = NULL; - - if (is_hal_stop(rtlhal)) { - RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, - " driver is going to unload\n"); - return; - } - - if (!rtlpriv->dm.useramask) { - RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, - " driver does not control rate adaptive mask\n"); - return; - } - - if (mac->link_state == MAC80211_LINKED && - mac->opmode == NL80211_IFTYPE_STATION) { - switch (p_ra->pre_ratr_state) { - case DM_RATR_STA_HIGH: - high_rssithresh_for_ra = 50; - low_rssithresh_for_ra = 20; - break; - case DM_RATR_STA_MIDDLE: - high_rssithresh_for_ra = 55; - low_rssithresh_for_ra = 20; - break; - case DM_RATR_STA_LOW: - high_rssithresh_for_ra = 50; - low_rssithresh_for_ra = 25; - break; - default: - high_rssithresh_for_ra = 50; - low_rssithresh_for_ra = 20; - break; - } - - if (rtlpriv->dm.undec_sm_pwdb > high_rssithresh_for_ra) - p_ra->ratr_state = DM_RATR_STA_HIGH; - else if (rtlpriv->dm.undec_sm_pwdb > low_rssithresh_for_ra) - p_ra->ratr_state = DM_RATR_STA_MIDDLE; - else - p_ra->ratr_state = DM_RATR_STA_LOW; - - if (p_ra->pre_ratr_state != p_ra->ratr_state) { - RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, - "RSSI = %ld\n", - rtlpriv->dm.undec_sm_pwdb); - RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, - "RSSI_LEVEL = %d\n", p_ra->ratr_state); - RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, - "PreState = %d, CurState = %d\n", - p_ra->pre_ratr_state, p_ra->ratr_state); - - rcu_read_lock(); - sta = rtl_find_sta(hw, mac->bssid); - if (sta) - rtlpriv->cfg->ops->update_rate_tbl(hw, sta, - p_ra->ratr_state); - rcu_read_unlock(); - - p_ra->pre_ratr_state = p_ra->ratr_state; - } - } } -void rtl8723ae_dm_rf_saving(struct ieee80211_hw *hw, u8 force_in_normal) +void rtl8723e_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct ps_t *dm_pstable = &rtlpriv->dm_pstable; + static u8 initialize; + static u32 reg_874, reg_c70, reg_85c, reg_a74; - if (!rtlpriv->reg_init) { - rtlpriv->reg_874 = (rtl_get_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, - MASKDWORD) & 0x1CC000) >> 14; + if (initialize == 0) { + reg_874 = (rtl_get_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, + MASKDWORD) & 0x1CC000) >> 14; - rtlpriv->reg_c70 = (rtl_get_bbreg(hw, ROFDM0_AGCPARAMETER1, - MASKDWORD) & BIT(3)) >> 3; + reg_c70 = (rtl_get_bbreg(hw, ROFDM0_AGCPARAMETER1, + MASKDWORD) & BIT(3)) >> 3; - rtlpriv->reg_85c = (rtl_get_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL, - MASKDWORD) & 0xFF000000) >> 24; + reg_85c = (rtl_get_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL, + MASKDWORD) & 0xFF000000) >> 24; - rtlpriv->reg_a74 = (rtl_get_bbreg(hw, 0xa74, MASKDWORD) & - 0xF000) >> 12; + reg_a74 = (rtl_get_bbreg(hw, 0xa74, MASKDWORD) & 0xF000) >> 12; - rtlpriv->reg_init = true; + initialize = 1; } - if (!force_in_normal) { + if (!bforce_in_normal) { if (dm_pstable->rssi_val_min != 0) { if (dm_pstable->pre_rfstate == RF_NORMAL) { if (dm_pstable->rssi_val_min >= 30) @@ -798,7 +745,6 @@ void rtl8723ae_dm_rf_saving(struct ieee80211_hw *hw, u8 force_in_normal) if (dm_pstable->pre_rfstate != dm_pstable->cur_rfstate) { if (dm_pstable->cur_rfstate == RF_SAVE) { - rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, BIT(5), 0x1); rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, @@ -813,12 +759,12 @@ void rtl8723ae_dm_rf_saving(struct ieee80211_hw *hw, u8 force_in_normal) rtl_set_bbreg(hw, 0x818, BIT(28), 0x1); } else { rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, - 0x1CC000, rtlpriv->reg_874); + 0x1CC000, reg_874); rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3), - rtlpriv->reg_c70); + reg_c70); rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL, 0xFF000000, - rtlpriv->reg_85c); - rtl_set_bbreg(hw, 0xa74, 0xF000, rtlpriv->reg_a74); + reg_85c); + rtl_set_bbreg(hw, 0xa74, 0xF000, reg_a74); rtl_set_bbreg(hw, 0x818, BIT(28), 0x0); rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, BIT(5), 0x0); @@ -828,7 +774,7 @@ void rtl8723ae_dm_rf_saving(struct ieee80211_hw *hw, u8 force_in_normal) } } -static void rtl8723ae_dm_dynamic_bpowersaving(struct ieee80211_hw *hw) +static void rtl8723e_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); @@ -847,48 +793,49 @@ static void rtl8723ae_dm_dynamic_bpowersaving(struct ieee80211_hw *hw) rtlpriv->dm.entry_min_undec_sm_pwdb; RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, "AP Client PWDB = 0x%lx\n", - dm_pstable->rssi_val_min); + dm_pstable->rssi_val_min); } else { - dm_pstable->rssi_val_min = rtlpriv->dm.undec_sm_pwdb; + dm_pstable->rssi_val_min = + rtlpriv->dm.undec_sm_pwdb; RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, "STA Default Port PWDB = 0x%lx\n", - dm_pstable->rssi_val_min); + dm_pstable->rssi_val_min); } } else { - dm_pstable->rssi_val_min = rtlpriv->dm.entry_min_undec_sm_pwdb; + dm_pstable->rssi_val_min = + rtlpriv->dm.entry_min_undec_sm_pwdb; RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, "AP Ext Port PWDB = 0x%lx\n", - dm_pstable->rssi_val_min); + dm_pstable->rssi_val_min); } - rtl8723ae_dm_rf_saving(hw, false); + rtl8723e_dm_rf_saving(hw, false); } -void rtl8723ae_dm_init(struct ieee80211_hw *hw) +void rtl8723e_dm_init(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER; - rtl8723ae_dm_diginit(hw); + rtl8723e_dm_diginit(hw); rtl8723_dm_init_dynamic_txpower(hw); rtl8723_dm_init_edca_turbo(hw); - rtl8723ae_dm_init_rate_adaptive_mask(hw); - rtl8723ae_dm_initialize_txpower_tracking(hw); + rtl8723e_dm_init_rate_adaptive_mask(hw); + rtl8723e_dm_initialize_txpower_tracking(hw); rtl8723_dm_init_dynamic_bb_powersaving(hw); } -void rtl8723ae_dm_watchdog(struct ieee80211_hw *hw) +void rtl8723e_dm_watchdog(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); bool fw_current_inpsmode = false; bool fw_ps_awake = true; rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS, - (u8 *) (&fw_current_inpsmode)); + (u8 *)(&fw_current_inpsmode)); rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON, - (u8 *) (&fw_ps_awake)); + (u8 *)(&fw_ps_awake)); if (ppsc->p2p_ps_info.p2p_ps_mode) fw_ps_awake = false; @@ -896,58 +843,57 @@ void rtl8723ae_dm_watchdog(struct ieee80211_hw *hw) if ((ppsc->rfpwr_state == ERFON) && ((!fw_current_inpsmode) && fw_ps_awake) && (!ppsc->rfchange_inprogress)) { - rtl8723ae_dm_dig(hw); - rtl8723ae_dm_false_alarm_counter_statistics(hw); - rtl8723ae_dm_dynamic_bpowersaving(hw); - rtl8723ae_dm_dynamic_txpower(hw); - rtl8723ae_dm_refresh_rate_adaptive_mask(hw); - rtl8723ae_dm_bt_coexist(hw); - rtl8723ae_dm_check_edca_turbo(hw); + rtl8723e_dm_pwdb_monitor(hw); + rtl8723e_dm_dig(hw); + rtl8723e_dm_false_alarm_counter_statistics(hw); + rtl8723e_dm_dynamic_bb_powersaving(hw); + rtl8723e_dm_dynamic_txpower(hw); + rtl8723e_dm_check_txpower_tracking(hw); + /* rtl92c_dm_refresh_rate_adaptive_mask(hw); */ + rtl8723e_dm_bt_coexist(hw); + rtl8723e_dm_check_edca_turbo(hw); } - if (rtlpcipriv->bt_coexist.init_set) + if (rtlpriv->btcoexist.init_set) rtl_write_byte(rtlpriv, 0x76e, 0xc); } -static void rtl8723ae_dm_init_bt_coexist(struct ieee80211_hw *hw) +static void rtl8723e_dm_init_bt_coexist(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); - rtlpcipriv->bt_coexist.bt_rfreg_origin_1e + rtlpriv->btcoexist.bt_rfreg_origin_1e = rtl_get_rfreg(hw, (enum radio_path)0, RF_RCK1, 0xfffff); - rtlpcipriv->bt_coexist.bt_rfreg_origin_1f + rtlpriv->btcoexist.bt_rfreg_origin_1f = rtl_get_rfreg(hw, (enum radio_path)0, RF_RCK2, 0xf0); - rtlpcipriv->bt_coexist.cstate = 0; - rtlpcipriv->bt_coexist.previous_state = 0; - rtlpcipriv->bt_coexist.cstate_h = 0; - rtlpcipriv->bt_coexist.previous_state_h = 0; - rtlpcipriv->bt_coexist.lps_counter = 0; + rtlpriv->btcoexist.cstate = 0; + rtlpriv->btcoexist.previous_state = 0; + rtlpriv->btcoexist.cstate_h = 0; + rtlpriv->btcoexist.previous_state_h = 0; + rtlpriv->btcoexist.lps_counter = 0; /* Enable counter statistics */ rtl_write_byte(rtlpriv, 0x76e, 0x4); rtl_write_byte(rtlpriv, 0x778, 0x3); rtl_write_byte(rtlpriv, 0x40, 0x20); - rtlpcipriv->bt_coexist.init_set = true; + rtlpriv->btcoexist.init_set = true; } -void rtl8723ae_dm_bt_coexist(struct ieee80211_hw *hw) +void rtl8723e_dm_bt_coexist(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); u8 tmp_byte = 0; - if (!rtlpcipriv->bt_coexist.bt_coexistence) { + if (!rtlpriv->btcoexist.bt_coexistence) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[DM]{BT], BT not exist!!\n"); return; } - if (!rtlpcipriv->bt_coexist.init_set) { + if (!rtlpriv->btcoexist.init_set) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[DM][BT], rtl8723ae_dm_bt_coexist()\n"); - - rtl8723ae_dm_init_bt_coexist(hw); + "[DM][BT], rtl8723e_dm_bt_coexist()\n"); + rtl8723e_dm_init_bt_coexist(hw); } tmp_byte = rtl_read_byte(rtlpriv, 0x40); @@ -955,5 +901,5 @@ void rtl8723ae_dm_bt_coexist(struct ieee80211_hw *hw) "[DM][BT], 0x40 is 0x%x", tmp_byte); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, "[DM][BT], bt_dm_coexist start"); - rtl8723ae_dm_bt_coexist_8723(hw); + rtl8723e_dm_bt_coexist_8723(hw); } diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/dm.h b/drivers/net/wireless/rtlwifi/rtl8723ae/dm.h index d253bb53d03e2ccde5b4f646e4fff6a154158836..6fa0feb05f6d11a4fe8f254a5d6622a76838c255 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/dm.h +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/dm.h @@ -25,17 +25,23 @@ * * Larry Finger * - **************************************************************************** - */ + *****************************************************************************/ #ifndef __RTL8723E_DM_H__ #define __RTL8723E_DM_H__ +#define HAL_DM_DIG_DISABLE BIT(0) #define HAL_DM_HIPWR_DISABLE BIT(1) +#define OFDM_TABLE_LENGTH 37 +#define CCK_TABLE_LENGTH 33 + #define OFDM_TABLE_SIZE 37 #define CCK_TABLE_SIZE 33 +#define BW_AUTO_SWITCH_HIGH_LOW 25 +#define BW_AUTO_SWITCH_LOW_HIGH 30 + #define DM_DIG_THRESH_HIGH 40 #define DM_DIG_THRESH_LOW 35 @@ -63,12 +69,18 @@ #define DM_RATR_STA_MIDDLE 2 #define DM_RATR_STA_LOW 3 +#define CTS2SELF_THVAL 30 +#define REGC38_TH 20 + +#define WAIOTTHVAL 25 + #define TXHIGHPWRLEVEL_NORMAL 0 #define TXHIGHPWRLEVEL_LEVEL1 1 #define TXHIGHPWRLEVEL_LEVEL2 2 #define TXHIGHPWRLEVEL_BT1 3 #define TXHIGHPWRLEVEL_BT2 4 +#define DM_TYPE_BYFW 0 #define DM_TYPE_BYDRIVER 1 #define TX_POWER_NEAR_FIELD_THRESH_LVL2 74 @@ -82,6 +94,7 @@ struct swat_t { long trying_threshold; u8 cur_antenna; u8 pre_antenna; + }; enum tag_dynamic_init_gain_operation_type_definition { @@ -98,7 +111,7 @@ enum tag_dynamic_init_gain_operation_type_definition { enum tag_cck_packet_detection_threshold_type_definition { CCK_PD_STAGE_LowRssi = 0, CCK_PD_STAGE_HighRssi = 1, - CCK_FA_STAGE_Low = 2, + CCK_FA_STAGE_LOW = 2, CCK_FA_STAGE_High = 3, CCK_PD_STAGE_MAX = 4, }; @@ -138,17 +151,24 @@ enum dm_dig_connect_e { DIG_CONNECT_MAX }; +#define BT_RSSI_STATE_NORMAL_POWER BIT_OFFSET_LEN_MASK_32(0, 1) +#define BT_RSSI_STATE_AMDPU_OFF BIT_OFFSET_LEN_MASK_32(1, 1) +#define BT_RSSI_STATE_SPECIAL_LOW BIT_OFFSET_LEN_MASK_32(2, 1) +#define BT_RSSI_STATE_BG_EDCA_LOW BIT_OFFSET_LEN_MASK_32(3, 1) +#define BT_RSSI_STATE_TXPOWER_LOW BIT_OFFSET_LEN_MASK_32(4, 1) #define GET_UNDECORATED_AVERAGE_RSSI(_priv) \ - ((((struct rtl_priv *)(_priv))->mac80211.opmode == \ - NL80211_IFTYPE_ADHOC) ? \ - (((struct rtl_priv *)(_priv))->dm.entry_min_undec_sm_pwdb) \ - : (((struct rtl_priv *)(_priv))->dm.undec_sm_pwdb)) - -void rtl8723ae_dm_init(struct ieee80211_hw *hw); -void rtl8723ae_dm_watchdog(struct ieee80211_hw *hw); -void rtl8723ae_dm_write_dig(struct ieee80211_hw *hw); -void rtl8723ae_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw); -void rtl8723ae_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal); -void rtl8723ae_dm_bt_coexist(struct ieee80211_hw *hw); - + ( \ + (((struct rtl_priv *)(_priv))->mac80211.opmode == \ + NL80211_IFTYPE_ADHOC) ? \ + (((struct rtl_priv *)(_priv))->dm.entry_min_undec_sm_pwdb) : \ + (((struct rtl_priv *)(_priv))->dm.undec_sm_pwdb) \ + ) + +void rtl8723e_dm_init(struct ieee80211_hw *hw); +void rtl8723e_dm_watchdog(struct ieee80211_hw *hw); +void rtl8723e_dm_write_dig(struct ieee80211_hw *hw); +void rtl8723e_dm_check_txpower_tracking(struct ieee80211_hw *hw); +void rtl8723e_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw); +void rtl8723e_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal); +void rtl8723e_dm_bt_coexist(struct ieee80211_hw *hw); #endif diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c index 728b7563ad36a32eb5b989a3ddcf5738088dfbf6..b7c0d38ee5b5850623ce2f49554ed9e82e66c0e0 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -25,18 +21,19 @@ * * Larry Finger * - **************************************************************************** - */ + *****************************************************************************/ #include "../wifi.h" #include "../pci.h" #include "../base.h" +#include "../core.h" #include "reg.h" #include "def.h" #include "fw.h" #include "../rtl8723com/fw_common.h" -static bool rtl8723ae_check_fw_read_last_h2c(struct ieee80211_hw *hw, u8 boxnum) +static bool _rtl8723e_check_fw_read_last_h2c(struct ieee80211_hw *hw, + u8 boxnum) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 val_hmetfr, val_mcutst_1; @@ -50,17 +47,17 @@ static bool rtl8723ae_check_fw_read_last_h2c(struct ieee80211_hw *hw, u8 boxnum) return result; } -static void _rtl8723ae_fill_h2c_command(struct ieee80211_hw *hw, - u8 element_id, u32 cmd_len, - u8 *p_cmdbuffer) +static void _rtl8723e_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id, + u32 cmd_len, u8 *cmdbuffer) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); u8 boxnum; u16 box_reg = 0, box_extreg = 0; - u8 u1tmp; - bool isfw_rd = false; - bool bwrite_success = false; + u8 u1b_tmp; + bool isfw_read = false; + u8 buf_index = 0; + bool bwrite_sucess = false; u8 wait_h2c_limmit = 100; u8 wait_writeh2c_limmit = 100; u8 boxcontent[4], boxextcontent[2]; @@ -83,7 +80,7 @@ static void _rtl8723ae_fill_h2c_command(struct ieee80211_hw *hw, h2c_waitcounter++; RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "Wait 100 us (%d times)...\n", - h2c_waitcounter); + h2c_waitcounter); udelay(100); if (h2c_waitcounter > 1000) @@ -99,12 +96,11 @@ static void _rtl8723ae_fill_h2c_command(struct ieee80211_hw *hw, } } - while (!bwrite_success) { + while (!bwrite_sucess) { wait_writeh2c_limmit--; if (wait_writeh2c_limmit == 0) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Write H2C fail because no trigger " - "for FW INT!\n"); + "Write H2C fail because no trigger for FW INT!\n"); break; } @@ -128,34 +124,35 @@ static void _rtl8723ae_fill_h2c_command(struct ieee80211_hw *hw, break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case not processed\n"); + "switch case not process\n"); break; } - isfw_rd = rtl8723ae_check_fw_read_last_h2c(hw, boxnum); - while (!isfw_rd) { + isfw_read = _rtl8723e_check_fw_read_last_h2c(hw, boxnum); + while (!isfw_read) { wait_h2c_limmit--; if (wait_h2c_limmit == 0) { RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, - "Waiting too long for FW read clear HMEBox(%d)!\n", + "Wating too long for FW read clear HMEBox(%d)!\n", boxnum); break; } udelay(10); - isfw_rd = rtl8723ae_check_fw_read_last_h2c(hw, boxnum); - u1tmp = rtl_read_byte(rtlpriv, 0x1BF); + isfw_read = _rtl8723e_check_fw_read_last_h2c(hw, + boxnum); + u1b_tmp = rtl_read_byte(rtlpriv, 0x1BF); RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, - "Waiting for FW read clear HMEBox(%d)!!! " - "0x1BF = %2x\n", boxnum, u1tmp); + "Waiting for FW read clear HMEBox(%d)!!! 0x1BF = %2x\n", + boxnum, u1b_tmp); } - if (!isfw_rd) { + if (!isfw_read) { RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, - "Write H2C register BOX[%d] fail!!!!! " - "Fw do not read.\n", boxnum); + "Write H2C register BOX[%d] fail!!!!! Fw do not read.\n", + boxnum); break; } @@ -169,8 +166,8 @@ static void _rtl8723ae_fill_h2c_command(struct ieee80211_hw *hw, switch (cmd_len) { case 1: boxcontent[0] &= ~(BIT(7)); - memcpy((u8 *) (boxcontent) + 1, - p_cmdbuffer, 1); + memcpy((u8 *)(boxcontent) + 1, + cmdbuffer + buf_index, 1); for (idx = 0; idx < 4; idx++) { rtl_write_byte(rtlpriv, box_reg + idx, @@ -179,8 +176,8 @@ static void _rtl8723ae_fill_h2c_command(struct ieee80211_hw *hw, break; case 2: boxcontent[0] &= ~(BIT(7)); - memcpy((u8 *) (boxcontent) + 1, - p_cmdbuffer, 2); + memcpy((u8 *)(boxcontent) + 1, + cmdbuffer + buf_index, 2); for (idx = 0; idx < 4; idx++) { rtl_write_byte(rtlpriv, box_reg + idx, @@ -189,8 +186,8 @@ static void _rtl8723ae_fill_h2c_command(struct ieee80211_hw *hw, break; case 3: boxcontent[0] &= ~(BIT(7)); - memcpy((u8 *) (boxcontent) + 1, - p_cmdbuffer, 3); + memcpy((u8 *)(boxcontent) + 1, + cmdbuffer + buf_index, 3); for (idx = 0; idx < 4; idx++) { rtl_write_byte(rtlpriv, box_reg + idx, @@ -199,10 +196,10 @@ static void _rtl8723ae_fill_h2c_command(struct ieee80211_hw *hw, break; case 4: boxcontent[0] |= (BIT(7)); - memcpy((u8 *) (boxextcontent), - p_cmdbuffer, 2); - memcpy((u8 *) (boxcontent) + 1, - p_cmdbuffer + 2, 2); + memcpy((u8 *)(boxextcontent), + cmdbuffer + buf_index, 2); + memcpy((u8 *)(boxcontent) + 1, + cmdbuffer + buf_index + 2, 2); for (idx = 0; idx < 2; idx++) { rtl_write_byte(rtlpriv, box_extreg + idx, @@ -216,10 +213,10 @@ static void _rtl8723ae_fill_h2c_command(struct ieee80211_hw *hw, break; case 5: boxcontent[0] |= (BIT(7)); - memcpy((u8 *) (boxextcontent), - p_cmdbuffer, 2); - memcpy((u8 *) (boxcontent) + 1, - p_cmdbuffer + 2, 3); + memcpy((u8 *)(boxextcontent), + cmdbuffer + buf_index, 2); + memcpy((u8 *)(boxcontent) + 1, + cmdbuffer + buf_index + 2, 3); for (idx = 0; idx < 2; idx++) { rtl_write_byte(rtlpriv, box_extreg + idx, @@ -237,7 +234,7 @@ static void _rtl8723ae_fill_h2c_command(struct ieee80211_hw *hw, break; } - bwrite_success = true; + bwrite_sucess = true; rtlhal->last_hmeboxnum = boxnum + 1; if (rtlhal->last_hmeboxnum == 4) @@ -245,7 +242,7 @@ static void _rtl8723ae_fill_h2c_command(struct ieee80211_hw *hw, RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "pHalData->last_hmeboxnum = %d\n", - rtlhal->last_hmeboxnum); + rtlhal->last_hmeboxnum); } spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag); @@ -255,52 +252,49 @@ static void _rtl8723ae_fill_h2c_command(struct ieee80211_hw *hw, RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n"); } -void rtl8723ae_fill_h2c_cmd(struct ieee80211_hw *hw, - u8 element_id, u32 cmd_len, u8 *p_cmdbuffer) +void rtl8723e_fill_h2c_cmd(struct ieee80211_hw *hw, + u8 element_id, u32 cmd_len, u8 *cmdbuffer) { - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u32 tmp_cmdbuf[2]; - if (rtlhal->fw_ready == false) { + if (!rtlhal->fw_ready) { RT_ASSERT(false, - "return H2C cmd because of Fw download fail!!!\n"); + "return H2C cmd because of Fw download fail!!!\n"); return; } - - _rtl8723ae_fill_h2c_command(hw, element_id, cmd_len, p_cmdbuffer); - return; + memset(tmp_cmdbuf, 0, 8); + memcpy(tmp_cmdbuf, cmdbuffer, cmd_len); + _rtl8723e_fill_h2c_command(hw, element_id, cmd_len, + (u8 *)&tmp_cmdbuf); } -static bool _rtl8723ae_cmd_send_packet(struct ieee80211_hw *hw, - struct sk_buff *skb) +void rtl8723e_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); - struct rtl8192_tx_ring *ring; - struct rtl_tx_desc *pdesc; - unsigned long flags; - struct sk_buff *pskb = NULL; - - ring = &rtlpci->tx_ring[BEACON_QUEUE]; - - pskb = __skb_dequeue(&ring->queue); - if (pskb) - kfree_skb(pskb); - - spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags); - - pdesc = &ring->desc[0]; + u8 u1_h2c_set_pwrmode[3] = { 0 }; + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); - rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *) pdesc, 1, 1, skb); + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode); - __skb_queue_tail(&ring->queue, skb); + SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, mode); + SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode, + (rtlpriv->mac80211.p2p) ? ppsc->smart_ps : 1); + SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(u1_h2c_set_pwrmode, + ppsc->reg_max_lps_awakeintvl); - spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); + RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, + "rtl8723e_set_fw_rsvdpagepkt(): u1_h2c_set_pwrmode\n", + u1_h2c_set_pwrmode, 3); + rtl8723e_fill_h2c_cmd(hw, H2C_SETPWRMODE, 3, u1_h2c_set_pwrmode); +} - rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE); +#define BEACON_PG 0 /* ->1 */ +#define PSPOLL_PG 2 +#define NULL_PG 3 +#define PROBERSP_PG 4 /* ->5 */ - return true; -} +#define TOTAL_RESERVED_PKT_LEN 768 static u8 reserved_page_packet[TOTAL_RESERVED_PKT_LEN] = { /* page 0 beacon */ @@ -412,111 +406,111 @@ static u8 reserved_page_packet[TOTAL_RESERVED_PKT_LEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; -void rtl8723ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished) +void rtl8723e_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct sk_buff *skb = NULL; - u32 totalpacketlen; bool rtstatus; - u8 u1RsvdPageLoc[3] = { 0 }; - bool dlok = false; - + u8 u1rsvdpageloc[3] = { 0 }; + bool b_dlok = false; u8 *beacon; u8 *p_pspoll; u8 *nullfunc; u8 *p_probersp; + /*--------------------------------------------------------- - (1) beacon - --------------------------------------------------------- - */ + * (1) beacon + *--------------------------------------------------------- + */ beacon = &reserved_page_packet[BEACON_PG * 128]; SET_80211_HDR_ADDRESS2(beacon, mac->mac_addr); SET_80211_HDR_ADDRESS3(beacon, mac->bssid); /*------------------------------------------------------- - (2) ps-poll - -------------------------------------------------------- - */ + * (2) ps-poll + *-------------------------------------------------------- + */ p_pspoll = &reserved_page_packet[PSPOLL_PG * 128]; SET_80211_PS_POLL_AID(p_pspoll, (mac->assoc_id | 0xc000)); SET_80211_PS_POLL_BSSID(p_pspoll, mac->bssid); SET_80211_PS_POLL_TA(p_pspoll, mac->mac_addr); - SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1RsvdPageLoc, PSPOLL_PG); + SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1rsvdpageloc, PSPOLL_PG); /*-------------------------------------------------------- - (3) null data - ---------------------------------------------------------i - */ + * (3) null data + *--------------------------------------------------------- + */ nullfunc = &reserved_page_packet[NULL_PG * 128]; SET_80211_HDR_ADDRESS1(nullfunc, mac->bssid); SET_80211_HDR_ADDRESS2(nullfunc, mac->mac_addr); SET_80211_HDR_ADDRESS3(nullfunc, mac->bssid); - SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1RsvdPageLoc, NULL_PG); + SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1rsvdpageloc, NULL_PG); /*--------------------------------------------------------- - (4) probe response - ---------------------------------------------------------- - */ + * (4) probe response + *---------------------------------------------------------- + */ p_probersp = &reserved_page_packet[PROBERSP_PG * 128]; SET_80211_HDR_ADDRESS1(p_probersp, mac->bssid); SET_80211_HDR_ADDRESS2(p_probersp, mac->mac_addr); SET_80211_HDR_ADDRESS3(p_probersp, mac->bssid); - SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1RsvdPageLoc, PROBERSP_PG); + SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1rsvdpageloc, PROBERSP_PG); totalpacketlen = TOTAL_RESERVED_PKT_LEN; RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, - "rtl8723ae_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n", + "rtl8723e_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n", &reserved_page_packet[0], totalpacketlen); RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, - "rtl8723ae_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n", - u1RsvdPageLoc, 3); + "rtl8723e_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n", + u1rsvdpageloc, 3); skb = dev_alloc_skb(totalpacketlen); - memcpy((u8 *) skb_put(skb, totalpacketlen), + memcpy((u8 *)skb_put(skb, totalpacketlen), &reserved_page_packet, totalpacketlen); - rtstatus = _rtl8723ae_cmd_send_packet(hw, skb); + rtstatus = rtl_cmd_send_packet(hw, skb); if (rtstatus) - dlok = true; + b_dlok = true; - if (dlok) { + if (b_dlok) { RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Set RSVD page location to Fw.\n"); RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, - "H2C_RSVDPAGE:\n", - u1RsvdPageLoc, 3); - rtl8723ae_fill_h2c_cmd(hw, H2C_RSVDPAGE, - sizeof(u1RsvdPageLoc), u1RsvdPageLoc); + "H2C_RSVDPAGE:\n", + u1rsvdpageloc, 3); + rtl8723e_fill_h2c_cmd(hw, H2C_RSVDPAGE, + sizeof(u1rsvdpageloc), u1rsvdpageloc); } else RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "Set RSVD page location to Fw FAIL!!!!!!.\n"); } -void rtl8723ae_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus) +void rtl8723e_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus) { u8 u1_joinbssrpt_parm[1] = { 0 }; SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(u1_joinbssrpt_parm, mstatus); - rtl8723ae_fill_h2c_cmd(hw, H2C_JOINBSSRPT, 1, u1_joinbssrpt_parm); + rtl8723e_fill_h2c_cmd(hw, H2C_JOINBSSRPT, 1, u1_joinbssrpt_parm); } static void rtl8723e_set_p2p_ctw_period_cmd(struct ieee80211_hw *hw, u8 ctwindow) { - u8 u1_ctwindow_period[1] = {ctwindow}; + u8 u1_ctwindow_period[1] = { ctwindow}; + + rtl8723e_fill_h2c_cmd(hw, H2C_P2P_PS_CTW_CMD, 1, u1_ctwindow_period); - rtl8723ae_fill_h2c_cmd(hw, H2C_P2P_PS_CTW_CMD, 1, u1_ctwindow_period); } -void rtl8723ae_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state) +void rtl8723e_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *rtlps = rtl_psc(rtl_priv(hw)); @@ -530,7 +524,7 @@ void rtl8723ae_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state) switch (p2p_ps_state) { case P2P_PS_DISABLE: RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_DISABLE\n"); - memset(p2p_ps_offload, 0, sizeof(struct p2p_ps_offload_t)); + memset(p2p_ps_offload, 0, sizeof(*p2p_ps_offload)); break; case P2P_PS_ENABLE: RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_ENABLE\n"); @@ -542,7 +536,7 @@ void rtl8723ae_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state) } /* hw only support 2 set of NoA */ - for (i = 0; i < p2pinfo->noa_num; i++) { + for (i = 0 ; i < p2pinfo->noa_num ; i++) { /* To control the register setting for which NOA*/ rtl_write_byte(rtlpriv, 0x5cf, (i << 4)); if (i == 0) @@ -561,27 +555,33 @@ void rtl8723ae_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state) start_time = p2pinfo->noa_start_time[i]; if (p2pinfo->noa_count_type[i] != 1) { - while (start_time <= (tsf_low+(50*1024))) { - start_time += p2pinfo->noa_interval[i]; + while (start_time <= + (tsf_low+(50*1024))) { + start_time += + p2pinfo->noa_interval[i]; if (p2pinfo->noa_count_type[i] != 255) p2pinfo->noa_count_type[i]--; } } rtl_write_dword(rtlpriv, 0x5E8, start_time); rtl_write_dword(rtlpriv, 0x5EC, - p2pinfo->noa_count_type[i]); + p2pinfo->noa_count_type[i]); + } + if ((p2pinfo->opp_ps == 1) || (p2pinfo->noa_num > 0)) { /* rst p2p circuit */ rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, BIT(4)); p2p_ps_offload->offload_en = 1; + if (P2P_ROLE_GO == rtlpriv->mac80211.p2p) { p2p_ps_offload->role = 1; p2p_ps_offload->allstasleep = 0; } else { p2p_ps_offload->role = 0; } + p2p_ps_offload->discovery = 0; } break; @@ -597,26 +597,7 @@ void rtl8723ae_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state) default: break; } - rtl8723ae_fill_h2c_cmd(hw, H2C_P2P_PS_OFFLOAD, 1, (u8 *)p2p_ps_offload); -} - -void rtl8723ae_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 u1_h2c_set_pwrmode[3] = { 0 }; - struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); - - RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode); - SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, mode); - SET_H2CCMD_PWRMODE_PARM_SMART_PS_23A(u1_h2c_set_pwrmode, - (rtlpriv->mac80211.p2p) ? - ppsc->smart_ps : 1); - SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(u1_h2c_set_pwrmode, - ppsc->reg_max_lps_awakeintvl); + rtl8723e_fill_h2c_cmd(hw, H2C_P2P_PS_OFFLOAD, 1, (u8 *)p2p_ps_offload); - RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, - "rtl8723ae_set_fw_rsvdpagepkt(): u1_h2c_set_pwrmode\n", - u1_h2c_set_pwrmode, 3); - rtl8723ae_fill_h2c_cmd(hw, H2C_SETPWRMODE, 3, u1_h2c_set_pwrmode); } diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/fw.h b/drivers/net/wireless/rtlwifi/rtl8723ae/fw.h index d355b85dd9fe72aee073ec44c42908339b904e80..9d1fe25db9538e7bd521ef1e6cd4af3abb25a856 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/fw.h +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/fw.h @@ -24,50 +24,27 @@ * Hsinchu 300, Taiwan. * Larry Finger * - **************************************************************************** - */ + *****************************************************************************/ #ifndef __RTL92C__FW__H__ #define __RTL92C__FW__H__ +#define FW_8192C_SIZE 0x3000 #define FW_8192C_START_ADDRESS 0x1000 #define FW_8192C_END_ADDRESS 0x3FFF -#define FW_8192C_PAGE_SIZE 4096 +#define FW_8192C_PAGE_SIZE 4096 #define FW_8192C_POLLING_DELAY 5 -#define FW_8192C_POLLING_TIMEOUT_COUNT 6000 -#define BEACON_PG 0 -#define PSPOLL_PG 2 -#define NULL_PG 3 -#define PROBERSP_PG 4 /* ->5 */ +#define IS_FW_HEADER_EXIST(_pfwhdr) \ + ((_pfwhdr->signature&0xFFFF) == 0x2300 ||\ + (_pfwhdr->signature&0xFFFF) == 0x2301 ||\ + (_pfwhdr->signature&0xFFFF) == 0x2302) -#define TOTAL_RESERVED_PKT_LEN 768 - -#define IS_FW_HEADER_EXIST(_pfwhdr) \ - ((_pfwhdr->signature&0xFF00) == 0x2300) - -struct rtl8723ae_firmware_header { - u16 signature; - u8 category; - u8 function; - u16 version; - u8 subversion; - u8 rsvd1; - u8 month; - u8 date; - u8 hour; - u8 minute; - u16 ramcodeSize; - u16 rsvd2; - u32 svnindex; - u32 rsvd3; - u32 rsvd4; - u32 rsvd5; -}; +#define pagenum_128(_len) (u32)(((_len)>>7) + ((_len)&0x7F ? 1 : 0)) #define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val) -#define SET_H2CCMD_PWRMODE_PARM_SMART_PS_23A(__ph2ccmd, __val) \ +#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__ph2ccmd, __val) \ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val) #define SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(__ph2ccmd, __val) \ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val) @@ -80,11 +57,10 @@ struct rtl8723ae_firmware_header { #define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val) -void rtl8723ae_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id, - u32 cmd_len, u8 *p_cmdbuffer); -void rtl8723ae_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode); -void rtl8723ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished); -void rtl8723ae_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus); -void rtl8723ae_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state); - +void rtl8723e_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id, + u32 cmd_len, u8 *p_cmdbuffer); +void rtl8723e_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode); +void rtl8723e_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished); +void rtl8723e_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus); +void rtl8723e_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state); #endif diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c index 5b4a714f3c8ce9905d762e08068a568827af1458..5aac45d5a9740dc338e6f95c0bb9d14a95cbd2e3 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -31,96 +27,102 @@ #include "../pci.h" #include "dm.h" #include "fw.h" -#include "../rtl8723com/fw_common.h" #include "phy.h" #include "reg.h" #include "hal_btc.h" -void rtl8723ae_dm_bt_reject_ap_aggregated_packet(struct ieee80211_hw *hw, - bool reject) +static bool bt_operation_on; + +void rtl8723e_dm_bt_reject_ap_aggregated_packet(struct ieee80211_hw *hw, + bool b_reject) { } void _rtl8723_dm_bt_check_wifi_state(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); if (rtlpriv->link_info.busytraffic) { - rtlpcipriv->bt_coexist.cstate &= ~BT_COEX_STATE_WIFI_IDLE; + rtlpriv->btcoexist.cstate &= + ~BT_COEX_STATE_WIFI_IDLE; if (rtlpriv->link_info.tx_busy_traffic) - rtlpcipriv->bt_coexist.cstate |= - BT_COEX_STATE_WIFI_UPLINK; + rtlpriv->btcoexist.cstate |= + BT_COEX_STATE_WIFI_UPLINK; else - rtlpcipriv->bt_coexist.cstate &= - ~BT_COEX_STATE_WIFI_UPLINK; + rtlpriv->btcoexist.cstate &= + ~BT_COEX_STATE_WIFI_UPLINK; if (rtlpriv->link_info.rx_busy_traffic) - rtlpcipriv->bt_coexist.cstate |= - BT_COEX_STATE_WIFI_DOWNLINK; + rtlpriv->btcoexist.cstate |= + BT_COEX_STATE_WIFI_DOWNLINK; else - rtlpcipriv->bt_coexist.cstate &= - ~BT_COEX_STATE_WIFI_DOWNLINK; + rtlpriv->btcoexist.cstate &= + ~BT_COEX_STATE_WIFI_DOWNLINK; } else { - rtlpcipriv->bt_coexist.cstate |= BT_COEX_STATE_WIFI_IDLE; - rtlpcipriv->bt_coexist.cstate &= ~BT_COEX_STATE_WIFI_UPLINK; - rtlpcipriv->bt_coexist.cstate &= ~BT_COEX_STATE_WIFI_DOWNLINK; + rtlpriv->btcoexist.cstate |= BT_COEX_STATE_WIFI_IDLE; + rtlpriv->btcoexist.cstate &= + ~BT_COEX_STATE_WIFI_UPLINK; + rtlpriv->btcoexist.cstate &= + ~BT_COEX_STATE_WIFI_DOWNLINK; } if (rtlpriv->mac80211.mode == WIRELESS_MODE_G || rtlpriv->mac80211.mode == WIRELESS_MODE_B) { - rtlpcipriv->bt_coexist.cstate |= BT_COEX_STATE_WIFI_LEGACY; - rtlpcipriv->bt_coexist.cstate &= ~BT_COEX_STATE_WIFI_HT20; - rtlpcipriv->bt_coexist.cstate &= ~BT_COEX_STATE_WIFI_HT40; + rtlpriv->btcoexist.cstate |= + BT_COEX_STATE_WIFI_LEGACY; + rtlpriv->btcoexist.cstate &= + ~BT_COEX_STATE_WIFI_HT20; + rtlpriv->btcoexist.cstate &= + ~BT_COEX_STATE_WIFI_HT40; } else { - rtlpcipriv->bt_coexist.cstate &= ~BT_COEX_STATE_WIFI_LEGACY; + rtlpriv->btcoexist.cstate &= + ~BT_COEX_STATE_WIFI_LEGACY; if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) { - rtlpcipriv->bt_coexist.cstate |= - BT_COEX_STATE_WIFI_HT40; - rtlpcipriv->bt_coexist.cstate &= - ~BT_COEX_STATE_WIFI_HT20; + rtlpriv->btcoexist.cstate |= + BT_COEX_STATE_WIFI_HT40; + rtlpriv->btcoexist.cstate &= + ~BT_COEX_STATE_WIFI_HT20; } else { - rtlpcipriv->bt_coexist.cstate |= - BT_COEX_STATE_WIFI_HT20; - rtlpcipriv->bt_coexist.cstate &= - ~BT_COEX_STATE_WIFI_HT40; + rtlpriv->btcoexist.cstate |= + BT_COEX_STATE_WIFI_HT20; + rtlpriv->btcoexist.cstate &= + ~BT_COEX_STATE_WIFI_HT40; } } - if (rtlpriv->bt_operation_on) - rtlpcipriv->bt_coexist.cstate |= BT_COEX_STATE_BT30; + if (bt_operation_on) + rtlpriv->btcoexist.cstate |= BT_COEX_STATE_BT30; else - rtlpcipriv->bt_coexist.cstate &= ~BT_COEX_STATE_BT30; + rtlpriv->btcoexist.cstate &= ~BT_COEX_STATE_BT30; } -u8 rtl8723ae_dm_bt_check_coex_rssi_state1(struct ieee80211_hw *hw, - u8 level_num, u8 rssi_thresh, - u8 rssi_thresh1) +u8 rtl8723e_dm_bt_check_coex_rssi_state1(struct ieee80211_hw *hw, + u8 level_num, u8 rssi_thresh, + u8 rssi_thresh1) { - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); struct rtl_priv *rtlpriv = rtl_priv(hw); - long smooth; + long undecoratedsmoothed_pwdb; u8 bt_rssi_state = 0; - smooth = rtl8723ae_dm_bt_get_rx_ss(hw); + undecoratedsmoothed_pwdb = rtl8723e_dm_bt_get_rx_ss(hw); if (level_num == 2) { - rtlpcipriv->bt_coexist.cstate &= - ~BT_COEX_STATE_WIFI_RSSI_1_MEDIUM; - - if ((rtlpcipriv->bt_coexist.bt_pre_rssi_state == - BT_RSSI_STATE_LOW) || - (rtlpcipriv->bt_coexist.bt_pre_rssi_state == - BT_RSSI_STATE_STAY_LOW)) { - if (smooth >= (rssi_thresh + - BT_FW_COEX_THRESH_TOL)) { + rtlpriv->btcoexist.cstate &= + ~BT_COEX_STATE_WIFI_RSSI_1_MEDIUM; + + if ((rtlpriv->btcoexist.bt_pre_rssi_state == + BT_RSSI_STATE_LOW) || + (rtlpriv->btcoexist.bt_pre_rssi_state == + BT_RSSI_STATE_STAY_LOW)) { + if (undecoratedsmoothed_pwdb >= + (rssi_thresh + BT_FW_COEX_THRESH_TOL)) { bt_rssi_state = BT_RSSI_STATE_HIGH; - rtlpcipriv->bt_coexist.cstate |= + rtlpriv->btcoexist.cstate |= BT_COEX_STATE_WIFI_RSSI_1_HIGH; - rtlpcipriv->bt_coexist.cstate &= + rtlpriv->btcoexist.cstate &= ~BT_COEX_STATE_WIFI_RSSI_1_LOW; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "[DM][BT], RSSI_1 state switch to High\n"); @@ -130,12 +132,12 @@ u8 rtl8723ae_dm_bt_check_coex_rssi_state1(struct ieee80211_hw *hw, "[DM][BT], RSSI_1 state stay at Low\n"); } } else { - if (smooth < rssi_thresh) { + if (undecoratedsmoothed_pwdb < rssi_thresh) { bt_rssi_state = BT_RSSI_STATE_LOW; - rtlpcipriv->bt_coexist.cstate |= - BT_COEX_STATE_WIFI_RSSI_1_LOW; - rtlpcipriv->bt_coexist.cstate &= - ~BT_COEX_STATE_WIFI_RSSI_1_HIGH; + rtlpriv->btcoexist.cstate |= + BT_COEX_STATE_WIFI_RSSI_1_LOW; + rtlpriv->btcoexist.cstate &= + ~BT_COEX_STATE_WIFI_RSSI_1_HIGH; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "[DM][BT], RSSI_1 state switch to Low\n"); } else { @@ -148,22 +150,22 @@ u8 rtl8723ae_dm_bt_check_coex_rssi_state1(struct ieee80211_hw *hw, if (rssi_thresh > rssi_thresh1) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "[DM][BT], RSSI_1 thresh error!!\n"); - return rtlpcipriv->bt_coexist.bt_pre_rssi_state; + return rtlpriv->btcoexist.bt_pre_rssi_state; } - if ((rtlpcipriv->bt_coexist.bt_pre_rssi_state == - BT_RSSI_STATE_LOW) || - (rtlpcipriv->bt_coexist.bt_pre_rssi_state == - BT_RSSI_STATE_STAY_LOW)) { - if (smooth >= + if ((rtlpriv->btcoexist.bt_pre_rssi_state == + BT_RSSI_STATE_LOW) || + (rtlpriv->btcoexist.bt_pre_rssi_state == + BT_RSSI_STATE_STAY_LOW)) { + if (undecoratedsmoothed_pwdb >= (rssi_thresh+BT_FW_COEX_THRESH_TOL)) { bt_rssi_state = BT_RSSI_STATE_MEDIUM; - rtlpcipriv->bt_coexist.cstate |= - BT_COEX_STATE_WIFI_RSSI_1_MEDIUM; - rtlpcipriv->bt_coexist.cstate &= - ~BT_COEX_STATE_WIFI_RSSI_1_LOW; - rtlpcipriv->bt_coexist.cstate &= - ~BT_COEX_STATE_WIFI_RSSI_1_HIGH; + rtlpriv->btcoexist.cstate |= + BT_COEX_STATE_WIFI_RSSI_1_MEDIUM; + rtlpriv->btcoexist.cstate &= + ~BT_COEX_STATE_WIFI_RSSI_1_LOW; + rtlpriv->btcoexist.cstate &= + ~BT_COEX_STATE_WIFI_RSSI_1_HIGH; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "[DM][BT], RSSI_1 state switch to Medium\n"); } else { @@ -171,28 +173,28 @@ u8 rtl8723ae_dm_bt_check_coex_rssi_state1(struct ieee80211_hw *hw, RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "[DM][BT], RSSI_1 state stay at Low\n"); } - } else if ((rtlpcipriv->bt_coexist.bt_pre_rssi_state == - BT_RSSI_STATE_MEDIUM) || - (rtlpcipriv->bt_coexist.bt_pre_rssi_state == - BT_RSSI_STATE_STAY_MEDIUM)) { - if (smooth >= (rssi_thresh1 + - BT_FW_COEX_THRESH_TOL)) { + } else if ((rtlpriv->btcoexist.bt_pre_rssi_state == + BT_RSSI_STATE_MEDIUM) || + (rtlpriv->btcoexist.bt_pre_rssi_state == + BT_RSSI_STATE_STAY_MEDIUM)) { + if (undecoratedsmoothed_pwdb >= + (rssi_thresh1 + BT_FW_COEX_THRESH_TOL)) { bt_rssi_state = BT_RSSI_STATE_HIGH; - rtlpcipriv->bt_coexist.cstate |= - BT_COEX_STATE_WIFI_RSSI_1_HIGH; - rtlpcipriv->bt_coexist.cstate &= - ~BT_COEX_STATE_WIFI_RSSI_1_LOW; - rtlpcipriv->bt_coexist.cstate &= - ~BT_COEX_STATE_WIFI_RSSI_1_MEDIUM; + rtlpriv->btcoexist.cstate |= + BT_COEX_STATE_WIFI_RSSI_1_HIGH; + rtlpriv->btcoexist.cstate &= + ~BT_COEX_STATE_WIFI_RSSI_1_LOW; + rtlpriv->btcoexist.cstate &= + ~BT_COEX_STATE_WIFI_RSSI_1_MEDIUM; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "[DM][BT], RSSI_1 state switch to High\n"); - } else if (smooth < rssi_thresh) { + } else if (undecoratedsmoothed_pwdb < rssi_thresh) { bt_rssi_state = BT_RSSI_STATE_LOW; - rtlpcipriv->bt_coexist.cstate |= + rtlpriv->btcoexist.cstate |= BT_COEX_STATE_WIFI_RSSI_1_LOW; - rtlpcipriv->bt_coexist.cstate &= + rtlpriv->btcoexist.cstate &= ~BT_COEX_STATE_WIFI_RSSI_1_HIGH; - rtlpcipriv->bt_coexist.cstate &= + rtlpriv->btcoexist.cstate &= ~BT_COEX_STATE_WIFI_RSSI_1_MEDIUM; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "[DM][BT], RSSI_1 state switch to Low\n"); @@ -202,13 +204,13 @@ u8 rtl8723ae_dm_bt_check_coex_rssi_state1(struct ieee80211_hw *hw, "[DM][BT], RSSI_1 state stay at Medium\n"); } } else { - if (smooth < rssi_thresh1) { + if (undecoratedsmoothed_pwdb < rssi_thresh1) { bt_rssi_state = BT_RSSI_STATE_MEDIUM; - rtlpcipriv->bt_coexist.cstate |= + rtlpriv->btcoexist.cstate |= BT_COEX_STATE_WIFI_RSSI_1_MEDIUM; - rtlpcipriv->bt_coexist.cstate &= + rtlpriv->btcoexist.cstate &= ~BT_COEX_STATE_WIFI_RSSI_1_HIGH; - rtlpcipriv->bt_coexist.cstate &= + rtlpriv->btcoexist.cstate &= ~BT_COEX_STATE_WIFI_RSSI_1_LOW; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "[DM][BT], RSSI_1 state switch to Medium\n"); @@ -219,38 +221,37 @@ u8 rtl8723ae_dm_bt_check_coex_rssi_state1(struct ieee80211_hw *hw, } } } - - rtlpcipriv->bt_coexist.bt_pre_rssi_state1 = bt_rssi_state; + rtlpriv->btcoexist.bt_pre_rssi_state1 = bt_rssi_state; return bt_rssi_state; } -u8 rtl8723ae_dm_bt_check_coex_rssi_state(struct ieee80211_hw *hw, - u8 level_num, u8 rssi_thresh, - u8 rssi_thresh1) +u8 rtl8723e_dm_bt_check_coex_rssi_state(struct ieee80211_hw *hw, + u8 level_num, + u8 rssi_thresh, + u8 rssi_thresh1) { - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); struct rtl_priv *rtlpriv = rtl_priv(hw); - long smooth; + long undecoratedsmoothed_pwdb = 0; u8 bt_rssi_state = 0; - smooth = rtl8723ae_dm_bt_get_rx_ss(hw); + undecoratedsmoothed_pwdb = rtl8723e_dm_bt_get_rx_ss(hw); if (level_num == 2) { - rtlpcipriv->bt_coexist.cstate &= - ~BT_COEX_STATE_WIFI_RSSI_MEDIUM; - - if ((rtlpcipriv->bt_coexist.bt_pre_rssi_state == - BT_RSSI_STATE_LOW) || - (rtlpcipriv->bt_coexist.bt_pre_rssi_state == - BT_RSSI_STATE_STAY_LOW)){ - if (smooth >= + rtlpriv->btcoexist.cstate &= + ~BT_COEX_STATE_WIFI_RSSI_MEDIUM; + + if ((rtlpriv->btcoexist.bt_pre_rssi_state == + BT_RSSI_STATE_LOW) || + (rtlpriv->btcoexist.bt_pre_rssi_state == + BT_RSSI_STATE_STAY_LOW)) { + if (undecoratedsmoothed_pwdb >= (rssi_thresh + BT_FW_COEX_THRESH_TOL)) { bt_rssi_state = BT_RSSI_STATE_HIGH; - rtlpcipriv->bt_coexist.cstate |= - BT_COEX_STATE_WIFI_RSSI_HIGH; - rtlpcipriv->bt_coexist.cstate &= - ~BT_COEX_STATE_WIFI_RSSI_LOW; + rtlpriv->btcoexist.cstate + |= BT_COEX_STATE_WIFI_RSSI_HIGH; + rtlpriv->btcoexist.cstate + &= ~BT_COEX_STATE_WIFI_RSSI_LOW; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "[DM][BT], RSSI state switch to High\n"); } else { @@ -259,12 +260,12 @@ u8 rtl8723ae_dm_bt_check_coex_rssi_state(struct ieee80211_hw *hw, "[DM][BT], RSSI state stay at Low\n"); } } else { - if (smooth < rssi_thresh) { + if (undecoratedsmoothed_pwdb < rssi_thresh) { bt_rssi_state = BT_RSSI_STATE_LOW; - rtlpcipriv->bt_coexist.cstate |= - BT_COEX_STATE_WIFI_RSSI_LOW; - rtlpcipriv->bt_coexist.cstate &= - ~BT_COEX_STATE_WIFI_RSSI_HIGH; + rtlpriv->btcoexist.cstate + |= BT_COEX_STATE_WIFI_RSSI_LOW; + rtlpriv->btcoexist.cstate + &= ~BT_COEX_STATE_WIFI_RSSI_HIGH; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "[DM][BT], RSSI state switch to Low\n"); } else { @@ -277,20 +278,20 @@ u8 rtl8723ae_dm_bt_check_coex_rssi_state(struct ieee80211_hw *hw, if (rssi_thresh > rssi_thresh1) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "[DM][BT], RSSI thresh error!!\n"); - return rtlpcipriv->bt_coexist.bt_pre_rssi_state; + return rtlpriv->btcoexist.bt_pre_rssi_state; } - if ((rtlpcipriv->bt_coexist.bt_pre_rssi_state == - BT_RSSI_STATE_LOW) || - (rtlpcipriv->bt_coexist.bt_pre_rssi_state == - BT_RSSI_STATE_STAY_LOW)) { - if (smooth >= + if ((rtlpriv->btcoexist.bt_pre_rssi_state == + BT_RSSI_STATE_LOW) || + (rtlpriv->btcoexist.bt_pre_rssi_state == + BT_RSSI_STATE_STAY_LOW)) { + if (undecoratedsmoothed_pwdb >= (rssi_thresh + BT_FW_COEX_THRESH_TOL)) { bt_rssi_state = BT_RSSI_STATE_MEDIUM; - rtlpcipriv->bt_coexist.cstate + rtlpriv->btcoexist.cstate |= BT_COEX_STATE_WIFI_RSSI_MEDIUM; - rtlpcipriv->bt_coexist.cstate + rtlpriv->btcoexist.cstate &= ~BT_COEX_STATE_WIFI_RSSI_LOW; - rtlpcipriv->bt_coexist.cstate + rtlpriv->btcoexist.cstate &= ~BT_COEX_STATE_WIFI_RSSI_HIGH; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "[DM][BT], RSSI state switch to Medium\n"); @@ -299,28 +300,28 @@ u8 rtl8723ae_dm_bt_check_coex_rssi_state(struct ieee80211_hw *hw, RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "[DM][BT], RSSI state stay at Low\n"); } - } else if ((rtlpcipriv->bt_coexist.bt_pre_rssi_state == - BT_RSSI_STATE_MEDIUM) || - (rtlpcipriv->bt_coexist.bt_pre_rssi_state == - BT_RSSI_STATE_STAY_MEDIUM)) { - if (smooth >= + } else if ((rtlpriv->btcoexist.bt_pre_rssi_state == + BT_RSSI_STATE_MEDIUM) || + (rtlpriv->btcoexist.bt_pre_rssi_state == + BT_RSSI_STATE_STAY_MEDIUM)) { + if (undecoratedsmoothed_pwdb >= (rssi_thresh1 + BT_FW_COEX_THRESH_TOL)) { bt_rssi_state = BT_RSSI_STATE_HIGH; - rtlpcipriv->bt_coexist.cstate + rtlpriv->btcoexist.cstate |= BT_COEX_STATE_WIFI_RSSI_HIGH; - rtlpcipriv->bt_coexist.cstate + rtlpriv->btcoexist.cstate &= ~BT_COEX_STATE_WIFI_RSSI_LOW; - rtlpcipriv->bt_coexist.cstate + rtlpriv->btcoexist.cstate &= ~BT_COEX_STATE_WIFI_RSSI_MEDIUM; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "[DM][BT], RSSI state switch to High\n"); - } else if (smooth < rssi_thresh) { + } else if (undecoratedsmoothed_pwdb < rssi_thresh) { bt_rssi_state = BT_RSSI_STATE_LOW; - rtlpcipriv->bt_coexist.cstate + rtlpriv->btcoexist.cstate |= BT_COEX_STATE_WIFI_RSSI_LOW; - rtlpcipriv->bt_coexist.cstate + rtlpriv->btcoexist.cstate &= ~BT_COEX_STATE_WIFI_RSSI_HIGH; - rtlpcipriv->bt_coexist.cstate + rtlpriv->btcoexist.cstate &= ~BT_COEX_STATE_WIFI_RSSI_MEDIUM; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "[DM][BT], RSSI state switch to Low\n"); @@ -330,13 +331,13 @@ u8 rtl8723ae_dm_bt_check_coex_rssi_state(struct ieee80211_hw *hw, "[DM][BT], RSSI state stay at Medium\n"); } } else { - if (smooth < rssi_thresh1) { + if (undecoratedsmoothed_pwdb < rssi_thresh1) { bt_rssi_state = BT_RSSI_STATE_MEDIUM; - rtlpcipriv->bt_coexist.cstate + rtlpriv->btcoexist.cstate |= BT_COEX_STATE_WIFI_RSSI_MEDIUM; - rtlpcipriv->bt_coexist.cstate + rtlpriv->btcoexist.cstate &= ~BT_COEX_STATE_WIFI_RSSI_HIGH; - rtlpcipriv->bt_coexist.cstate + rtlpriv->btcoexist.cstate &= ~BT_COEX_STATE_WIFI_RSSI_LOW; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "[DM][BT], RSSI state switch to Medium\n"); @@ -347,31 +348,32 @@ u8 rtl8723ae_dm_bt_check_coex_rssi_state(struct ieee80211_hw *hw, } } } - - rtlpcipriv->bt_coexist.bt_pre_rssi_state = bt_rssi_state; + rtlpriv->btcoexist.bt_pre_rssi_state = bt_rssi_state; return bt_rssi_state; } -long rtl8723ae_dm_bt_get_rx_ss(struct ieee80211_hw *hw) +long rtl8723e_dm_bt_get_rx_ss(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - long smooth = 0; - - if (rtlpriv->mac80211.link_state >= MAC80211_LINKED) - smooth = GET_UNDECORATED_AVERAGE_RSSI(rtlpriv); - else - smooth = rtlpriv->dm.entry_min_undec_sm_pwdb; + long undecoratedsmoothed_pwdb = 0; + if (rtlpriv->mac80211.link_state >= MAC80211_LINKED) { + undecoratedsmoothed_pwdb = + GET_UNDECORATED_AVERAGE_RSSI(rtlpriv); + } else { + undecoratedsmoothed_pwdb + = rtlpriv->dm.entry_min_undec_sm_pwdb; + } RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "rtl8723ae_dm_bt_get_rx_ss() = %ld\n", smooth); + "rtl8723e_dm_bt_get_rx_ss() = %ld\n", + undecoratedsmoothed_pwdb); - return smooth; + return undecoratedsmoothed_pwdb; } -void rtl8723ae_dm_bt_balance(struct ieee80211_hw *hw, - bool balance_on, u8 ms0, u8 ms1) +void rtl8723e_dm_bt_balance(struct ieee80211_hw *hw, + bool balance_on, u8 ms0, u8 ms1) { - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); struct rtl_priv *rtlpriv = rtl_priv(hw); u8 h2c_parameter[3] = {0}; @@ -379,27 +381,26 @@ void rtl8723ae_dm_bt_balance(struct ieee80211_hw *hw, h2c_parameter[2] = 1; h2c_parameter[1] = ms1; h2c_parameter[0] = ms0; - rtlpcipriv->bt_coexist.fw_coexist_all_off = false; + rtlpriv->btcoexist.fw_coexist_all_off = false; } else { h2c_parameter[2] = 0; h2c_parameter[1] = 0; h2c_parameter[0] = 0; } - rtlpcipriv->bt_coexist.balance_on = balance_on; + rtlpriv->btcoexist.balance_on = balance_on; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "[DM][BT], Balance=[%s:%dms:%dms], write 0xc=0x%x\n", - balance_on ? "ON" : "OFF", ms0, ms1, - h2c_parameter[0]<<16 | h2c_parameter[1]<<8 | h2c_parameter[2]); + balance_on ? "ON" : "OFF", ms0, ms1, h2c_parameter[0]<<16 | + h2c_parameter[1]<<8 | h2c_parameter[2]); - rtl8723ae_fill_h2c_cmd(hw, 0xc, 3, h2c_parameter); + rtl8723e_fill_h2c_cmd(hw, 0xc, 3, h2c_parameter); } -void rtl8723ae_dm_bt_agc_table(struct ieee80211_hw *hw, u8 type) +void rtl8723e_dm_bt_agc_table(struct ieee80211_hw *hw, u8 type) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); if (type == BT_AGCTABLE_OFF) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, @@ -410,15 +411,15 @@ void rtl8723ae_dm_bt_agc_table(struct ieee80211_hw *hw, u8 type) rtl_write_dword(rtlpriv, 0xc78, 0x611f0001); rtl_write_dword(rtlpriv, 0xc78, 0x60200001); - rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A, + rtl8723e_phy_set_rf_reg(hw, RF90_PATH_A, RF_RX_AGC_HP, 0xfffff, 0x32000); - rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A, + rtl8723e_phy_set_rf_reg(hw, RF90_PATH_A, RF_RX_AGC_HP, 0xfffff, 0x71000); - rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A, + rtl8723e_phy_set_rf_reg(hw, RF90_PATH_A, RF_RX_AGC_HP, 0xfffff, 0xb0000); - rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A, + rtl8723e_phy_set_rf_reg(hw, RF90_PATH_A, RF_RX_AGC_HP, 0xfffff, 0xfc000); - rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A, + rtl8723e_phy_set_rf_reg(hw, RF90_PATH_A, RF_RX_G1, 0xfffff, 0x30355); } else if (type == BT_AGCTABLE_ON) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, @@ -429,25 +430,24 @@ void rtl8723ae_dm_bt_agc_table(struct ieee80211_hw *hw, u8 type) rtl_write_dword(rtlpriv, 0xc78, 0x4b1f0001); rtl_write_dword(rtlpriv, 0xc78, 0x4a200001); - rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A, + rtl8723e_phy_set_rf_reg(hw, RF90_PATH_A, RF_RX_AGC_HP, 0xfffff, 0xdc000); - rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A, + rtl8723e_phy_set_rf_reg(hw, RF90_PATH_A, RF_RX_AGC_HP, 0xfffff, 0x90000); - rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A, + rtl8723e_phy_set_rf_reg(hw, RF90_PATH_A, RF_RX_AGC_HP, 0xfffff, 0x51000); - rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A, + rtl8723e_phy_set_rf_reg(hw, RF90_PATH_A, RF_RX_AGC_HP, 0xfffff, 0x12000); - rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A, + rtl8723e_phy_set_rf_reg(hw, RF90_PATH_A, RF_RX_G1, 0xfffff, 0x00355); - rtlpcipriv->bt_coexist.sw_coexist_all_off = false; + rtlpriv->btcoexist.sw_coexist_all_off = false; } } -void rtl8723ae_dm_bt_bback_off_level(struct ieee80211_hw *hw, u8 type) +void rtl8723e_dm_bt_bb_back_off_level(struct ieee80211_hw *hw, u8 type) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); if (type == BT_BB_BACKOFF_OFF) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, @@ -457,87 +457,81 @@ void rtl8723ae_dm_bt_bback_off_level(struct ieee80211_hw *hw, u8 type) RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "[BT]BBBackOffLevel On!\n"); rtl_write_dword(rtlpriv, 0xc04, 0x3a07611); - rtlpcipriv->bt_coexist.sw_coexist_all_off = false; + rtlpriv->btcoexist.sw_coexist_all_off = false; } } -void rtl8723ae_dm_bt_fw_coex_all_off(struct ieee80211_hw *hw) +void rtl8723e_dm_bt_fw_coex_all_off(struct ieee80211_hw *hw) { - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); struct rtl_priv *rtlpriv = rtl_priv(hw); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "rtl8723ae_dm_bt_fw_coex_all_off()\n"); + "rtl8723e_dm_bt_fw_coex_all_off()\n"); - if (rtlpcipriv->bt_coexist.fw_coexist_all_off) + if (rtlpriv->btcoexist.fw_coexist_all_off) return; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "rtl8723ae_dm_bt_fw_coex_all_off(), real Do\n"); - rtl8723ae_dm_bt_fw_coex_all_off_8723a(hw); - rtlpcipriv->bt_coexist.fw_coexist_all_off = true; + "rtl8723e_dm_bt_fw_coex_all_off(), real Do\n"); + rtl8723e_dm_bt_fw_coex_all_off_8723a(hw); + rtlpriv->btcoexist.fw_coexist_all_off = true; } -void rtl8723ae_dm_bt_sw_coex_all_off(struct ieee80211_hw *hw) +void rtl8723e_dm_bt_sw_coex_all_off(struct ieee80211_hw *hw) { - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); struct rtl_priv *rtlpriv = rtl_priv(hw); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "rtl8723ae_dm_bt_sw_coex_all_off()\n"); + "rtl8723e_dm_bt_sw_coex_all_off()\n"); - if (rtlpcipriv->bt_coexist.sw_coexist_all_off) + if (rtlpriv->btcoexist.sw_coexist_all_off) return; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "rtl8723ae_dm_bt_sw_coex_all_off(), real Do\n"); - rtl8723ae_dm_bt_sw_coex_all_off_8723a(hw); - rtlpcipriv->bt_coexist.sw_coexist_all_off = true; + "rtl8723e_dm_bt_sw_coex_all_off(), real Do\n"); + rtl8723e_dm_bt_sw_coex_all_off_8723a(hw); + rtlpriv->btcoexist.sw_coexist_all_off = true; } -void rtl8723ae_dm_bt_hw_coex_all_off(struct ieee80211_hw *hw) +void rtl8723e_dm_bt_hw_coex_all_off(struct ieee80211_hw *hw) { - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); struct rtl_priv *rtlpriv = rtl_priv(hw); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "rtl8723ae_dm_bt_hw_coex_all_off()\n"); + "rtl8723e_dm_bt_hw_coex_all_off()\n"); - if (rtlpcipriv->bt_coexist.hw_coexist_all_off) + if (rtlpriv->btcoexist.hw_coexist_all_off) return; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "rtl8723ae_dm_bt_hw_coex_all_off(), real Do\n"); + "rtl8723e_dm_bt_hw_coex_all_off(), real Do\n"); - rtl8723ae_dm_bt_hw_coex_all_off_8723a(hw); + rtl8723e_dm_bt_hw_coex_all_off_8723a(hw); - rtlpcipriv->bt_coexist.hw_coexist_all_off = true; + rtlpriv->btcoexist.hw_coexist_all_off = true; } -void rtl8723ae_btdm_coex_all_off(struct ieee80211_hw *hw) +void rtl8723e_btdm_coex_all_off(struct ieee80211_hw *hw) { - rtl8723ae_dm_bt_fw_coex_all_off(hw); - rtl8723ae_dm_bt_sw_coex_all_off(hw); - rtl8723ae_dm_bt_hw_coex_all_off(hw); + rtl8723e_dm_bt_fw_coex_all_off(hw); + rtl8723e_dm_bt_sw_coex_all_off(hw); + rtl8723e_dm_bt_hw_coex_all_off(hw); } -bool rtl8723ae_dm_bt_is_coexist_state_changed(struct ieee80211_hw *hw) +bool rtl8723e_dm_bt_is_coexist_state_changed(struct ieee80211_hw *hw) { - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); + struct rtl_priv *rtlpriv = rtl_priv(hw); - if ((rtlpcipriv->bt_coexist.previous_state == - rtlpcipriv->bt_coexist.cstate) && - (rtlpcipriv->bt_coexist.previous_state_h == - rtlpcipriv->bt_coexist.cstate_h)) + if ((rtlpriv->btcoexist.previous_state == rtlpriv->btcoexist.cstate) && + (rtlpriv->btcoexist.previous_state_h == + rtlpriv->btcoexist.cstate_h)) return false; - else - return true; + return true; } -bool rtl8723ae_dm_bt_is_wifi_up_link(struct ieee80211_hw *hw) +bool rtl8723e_dm_bt_is_wifi_up_link(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); if (rtlpriv->link_info.tx_busy_traffic) return true; - else - return false; + return false; } diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.h b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.h index 76f4d122dbc105df162779919d193e1272fcc531..bcd64a22acc02285963e7fdf0fbd572f2b4b1037 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.h +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.h @@ -53,8 +53,8 @@ #define BT_COEX_STATE_WIFI_LEGACY BIT(3) #define BT_COEX_STATE_WIFI_RSSI_LOW BIT(4) -#define BT_COEX_STATE_WIFI_RSSI_MEDIUM BIT(5) -#define BT_COEX_STATE_WIFI_RSSI_HIGH BIT(6) +#define BT_COEX_STATE_WIFI_RSSI_MEDIUM BIT(5) +#define BT_COEX_STATE_WIFI_RSSI_HIGH BIT(6) #define BT_COEX_STATE_DEC_BT_POWER BIT(7) #define BT_COEX_STATE_WIFI_IDLE BIT(8) @@ -78,7 +78,7 @@ #define BT_COEX_STATE_WIFI_RSSI_1_MEDIUM BIT(25) #define BT_COEX_STATE_WIFI_RSSI_1_HIGH BIT(26) -#define BT_COEX_STATE_BTINFO_COMMON BIT(30) +#define BT_COEX_STATE_BTINFO_COMMON BIT(30) #define BT_COEX_STATE_BTINFO_B_HID_SCOESCO BIT(31) #define BT_COEX_STATE_BTINFO_B_FTP_A2DP BIT(29) @@ -133,28 +133,26 @@ #define BTINFO_B_SCO_ESCO BIT(1) #define BTINFO_B_CONNECTION BIT(0) +void rtl8723e_btdm_coex_all_off(struct ieee80211_hw *hw); +void rtl8723e_dm_bt_fw_coex_all_off(struct ieee80211_hw *hw); -void rtl8723ae_btdm_coex_all_off(struct ieee80211_hw *hw); -void rtl8723ae_dm_bt_fw_coex_all_off(struct ieee80211_hw *hw); - -void rtl8723ae_dm_bt_sw_coex_all_off(struct ieee80211_hw *hw); -void rtl8723ae_dm_bt_hw_coex_all_off(struct ieee80211_hw *hw); -long rtl8723ae_dm_bt_get_rx_ss(struct ieee80211_hw *hw); -void rtl8723ae_dm_bt_balance(struct ieee80211_hw *hw, +void rtl8723e_dm_bt_sw_coex_all_off(struct ieee80211_hw *hw); +void rtl8723e_dm_bt_hw_coex_all_off(struct ieee80211_hw *hw); +long rtl8723e_dm_bt_get_rx_ss(struct ieee80211_hw *hw); +void rtl8723e_dm_bt_balance(struct ieee80211_hw *hw, bool balance_on, u8 ms0, u8 ms1); -void rtl8723ae_dm_bt_agc_table(struct ieee80211_hw *hw, u8 type); -void rtl8723ae_dm_bt_bback_off_level(struct ieee80211_hw *hw, u8 type); -u8 rtl8723ae_dm_bt_check_coex_rssi_state(struct ieee80211_hw *hw, +void rtl8723e_dm_bt_agc_table(struct ieee80211_hw *hw, u8 tyep); +void rtl8723e_dm_bt_bb_back_off_level(struct ieee80211_hw *hw, u8 type); +u8 rtl8723e_dm_bt_check_coex_rssi_state(struct ieee80211_hw *hw, u8 level_num, u8 rssi_thresh, u8 rssi_thresh1); -u8 rtl8723ae_dm_bt_check_coex_rssi_state1(struct ieee80211_hw *hw, - u8 level_num, u8 rssi_thresh, +u8 rtl8723e_dm_bt_check_coex_rssi_state1(struct ieee80211_hw *hw, + u8 level_num, u8 rssi_thresh, u8 rssi_thresh1); void _rtl8723_dm_bt_check_wifi_state(struct ieee80211_hw *hw); -void rtl8723ae_dm_bt_reject_ap_aggregated_packet(struct ieee80211_hw *hw, - bool reject); - -bool rtl8723ae_dm_bt_is_coexist_state_changed(struct ieee80211_hw *hw); -bool rtl8723ae_dm_bt_is_wifi_up_link(struct ieee80211_hw *hw); +void rtl8723e_dm_bt_reject_ap_aggregated_packet(struct ieee80211_hw *hw, + bool b_reject); +bool rtl8723e_dm_bt_is_coexist_state_changed(struct ieee80211_hw *hw); +bool rtl8723e_dm_bt_is_wifi_up_link(struct ieee80211_hw *hw); #endif diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c index 5d534df8d90ca2529db7bd3413d05dd76883c2c2..00a0531cc5f49a84637f222332a58450ec118ef9 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -25,45 +21,42 @@ * * Larry Finger * - **************************************************************************** - */ + *****************************************************************************/ #include "hal_btc.h" #include "../pci.h" #include "phy.h" -#include "../rtl8723com/phy_common.h" #include "fw.h" -#include "../rtl8723com/fw_common.h" #include "reg.h" #include "def.h" +#include "../rtl8723com/phy_common.h" + +static struct bt_coexist_8723 hal_coex_8723; -void rtl8723ae_bt_coex_off_before_lps(struct ieee80211_hw *hw) +void rtl8723e_dm_bt_turn_off_bt_coexist_before_enter_lps(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); - if (!rtlpcipriv->bt_coexist.bt_coexistence) + if (!rtlpriv->btcoexist.bt_coexistence) return; if (ppsc->inactiveps) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BT][DM], Before enter IPS, turn off all Coexist DM\n"); - rtlpcipriv->bt_coexist.cstate = 0; - rtlpcipriv->bt_coexist.previous_state = 0; - rtlpcipriv->bt_coexist.cstate_h = 0; - rtlpcipriv->bt_coexist.previous_state_h = 0; - rtl8723ae_btdm_coex_all_off(hw); + "[BT][DM], Before enter IPS, turn off all Coexist DM\n"); + rtlpriv->btcoexist.cstate = 0; + rtlpriv->btcoexist.previous_state = 0; + rtlpriv->btcoexist.cstate_h = 0; + rtlpriv->btcoexist.previous_state_h = 0; + rtl8723e_btdm_coex_all_off(hw); } } -static enum _RT_MEDIA_STATUS mgnt_link_status_query(struct ieee80211_hw *hw) +static enum rt_media_status mgnt_link_status_query(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - enum _RT_MEDIA_STATUS m_status = RT_MEDIA_DISCONNECT; - + enum rt_media_status m_status = RT_MEDIA_DISCONNECT; u8 bibss = (mac->opmode == NL80211_IFTYPE_ADHOC) ? 1 : 0; - if (bibss || rtlpriv->mac80211.link_state >= MAC80211_LINKED) m_status = RT_MEDIA_CONNECT; @@ -71,15 +64,14 @@ static enum _RT_MEDIA_STATUS mgnt_link_status_query(struct ieee80211_hw *hw) } void rtl_8723e_bt_wifi_media_status_notify(struct ieee80211_hw *hw, - bool mstatus) + bool mstatus) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); u8 h2c_parameter[3] = {0}; u8 chnl; - if (!rtlpcipriv->bt_coexist.bt_coexistence) + if (!rtlpriv->btcoexist.bt_coexistence) return; if (RT_MEDIA_CONNECT == mstatus) @@ -98,14 +90,13 @@ void rtl_8723e_bt_wifi_media_status_notify(struct ieee80211_hw *hw, h2c_parameter[2] = 0x20; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], FW write 0x19 = 0x%x\n", + "[BTCoex], FW write 0x19=0x%x\n", h2c_parameter[0]<<16|h2c_parameter[1]<<8|h2c_parameter[2]); - rtl8723ae_fill_h2c_cmd(hw, 0x19, 3, h2c_parameter); - + rtl8723e_fill_h2c_cmd(hw, 0x19, 3, h2c_parameter); } -static bool rtl8723ae_dm_bt_is_wifi_busy(struct ieee80211_hw *hw) +static bool rtl8723e_dm_bt_is_wifi_busy(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); if (rtlpriv->link_info.busytraffic || @@ -116,12 +107,12 @@ static bool rtl8723ae_dm_bt_is_wifi_busy(struct ieee80211_hw *hw) return false; } -static void rtl8723ae_dm_bt_set_fw_3a(struct ieee80211_hw *hw, - u8 byte1, u8 byte2, u8 byte3, - u8 byte4, u8 byte5) +static void rtl8723e_dm_bt_set_fw_3a(struct ieee80211_hw *hw, + u8 byte1, u8 byte2, u8 byte3, u8 byte4, + u8 byte5) { struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 h2c_parameter[5] = {0}; + u8 h2c_parameter[5]; h2c_parameter[0] = byte1; h2c_parameter[1] = byte2; @@ -129,37 +120,37 @@ static void rtl8723ae_dm_bt_set_fw_3a(struct ieee80211_hw *hw, h2c_parameter[3] = byte4; h2c_parameter[4] = byte5; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "[BTCoex], FW write 0x3a(4bytes) = 0x%x%8x\n", - h2c_parameter[0], h2c_parameter[1]<<24 | h2c_parameter[2]<<16 | - h2c_parameter[3]<<8 | h2c_parameter[4]); - rtl8723ae_fill_h2c_cmd(hw, 0x3a, 5, h2c_parameter); + "[BTCoex], FW write 0x3a(4bytes)=0x%x%8x\n", + h2c_parameter[0], h2c_parameter[1]<<24 | + h2c_parameter[2]<<16 | h2c_parameter[3]<<8 | + h2c_parameter[4]); + rtl8723e_fill_h2c_cmd(hw, 0x3a, 5, h2c_parameter); } -static bool rtl8723ae_dm_bt_need_to_dec_bt_pwr(struct ieee80211_hw *hw) +static bool rtl8723e_dm_bt_need_to_dec_bt_pwr(struct ieee80211_hw *hw) { - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); struct rtl_priv *rtlpriv = rtl_priv(hw); if (mgnt_link_status_query(hw) == RT_MEDIA_CONNECT) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "Need to decrease bt power\n"); - rtlpcipriv->bt_coexist.cstate |= BT_COEX_STATE_DEC_BT_POWER; - return true; + "Need to decrease bt power\n"); + rtlpriv->btcoexist.cstate |= + BT_COEX_STATE_DEC_BT_POWER; + return true; } - rtlpcipriv->bt_coexist.cstate &= ~BT_COEX_STATE_DEC_BT_POWER; + rtlpriv->btcoexist.cstate &= ~BT_COEX_STATE_DEC_BT_POWER; return false; } -static bool rtl8723ae_dm_bt_is_same_coexist_state(struct ieee80211_hw *hw) +static bool rtl8723e_dm_bt_is_same_coexist_state(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); - if ((rtlpcipriv->bt_coexist.previous_state == - rtlpcipriv->bt_coexist.cstate) && - (rtlpcipriv->bt_coexist.previous_state_h == - rtlpcipriv->bt_coexist.cstate_h)) { + if ((rtlpriv->btcoexist.previous_state == + rtlpriv->btcoexist.cstate) && + (rtlpriv->btcoexist.previous_state_h == + rtlpriv->btcoexist.cstate_h)) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, "[DM][BT], Coexist state do not chang!!\n"); return true; @@ -170,86 +161,84 @@ static bool rtl8723ae_dm_bt_is_same_coexist_state(struct ieee80211_hw *hw) } } -static void rtl8723ae_dm_bt_set_coex_table(struct ieee80211_hw *hw, - u32 val_0x6c0, u32 val_0x6c8, - u32 val_0x6cc) +static void rtl8723e_dm_bt_set_coex_table(struct ieee80211_hw *hw, + u32 val_0x6c0, u32 val_0x6c8, + u32 val_0x6cc) { struct rtl_priv *rtlpriv = rtl_priv(hw); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "set coex table, set 0x6c0 = 0x%x\n", val_0x6c0); + "set coex table, set 0x6c0=0x%x\n", val_0x6c0); rtl_write_dword(rtlpriv, 0x6c0, val_0x6c0); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "set coex table, set 0x6c8 = 0x%x\n", val_0x6c8); + "set coex table, set 0x6c8=0x%x\n", val_0x6c8); rtl_write_dword(rtlpriv, 0x6c8, val_0x6c8); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "set coex table, set 0x6cc = 0x%x\n", val_0x6cc); + "set coex table, set 0x6cc=0x%x\n", val_0x6cc); rtl_write_byte(rtlpriv, 0x6cc, val_0x6cc); } -static void rtl8723ae_dm_bt_set_hw_pta_mode(struct ieee80211_hw *hw, bool mode) +static void rtl8723e_dm_bt_set_hw_pta_mode(struct ieee80211_hw *hw, bool b_mode) { - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); struct rtl_priv *rtlpriv = rtl_priv(hw); - if (BT_PTA_MODE_ON == mode) { + if (BT_PTA_MODE_ON == b_mode) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "PTA mode on, "); /* Enable GPIO 0/1/2/3/8 pins for bt */ rtl_write_byte(rtlpriv, 0x40, 0x20); - rtlpcipriv->bt_coexist.hw_coexist_all_off = false; + rtlpriv->btcoexist.hw_coexist_all_off = false; } else { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "PTA mode off\n"); rtl_write_byte(rtlpriv, 0x40, 0x0); } } -static void rtl8723ae_dm_bt_set_sw_rf_rx_lpf_corner(struct ieee80211_hw *hw, - u8 type) +static void rtl8723e_dm_bt_set_sw_rf_rx_lpf_corner(struct ieee80211_hw *hw, + u8 type) { - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); struct rtl_priv *rtlpriv = rtl_priv(hw); if (BT_RF_RX_LPF_CORNER_SHRINK == type) { - /* Shrink RF Rx LPF corner, 0x1e[7:4]=1111 ==> [11:4] by Jenyu*/ + /* Shrink RF Rx LPF corner, 0x1e[7:4]=1111 ==> [11:4] */ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "Shrink RF Rx LPF corner!!\n"); - rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A, 0x1e, 0xfffff, - 0xf0ff7); - rtlpcipriv->bt_coexist.sw_coexist_all_off = false; + rtl8723e_phy_set_rf_reg(hw, RF90_PATH_A, 0x1e, + 0xfffff, 0xf0ff7); + rtlpriv->btcoexist.sw_coexist_all_off = false; } else if (BT_RF_RX_LPF_CORNER_RESUME == type) { /*Resume RF Rx LPF corner*/ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "Resume RF Rx LPF corner!!\n"); - rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A, 0x1e, 0xfffff, - rtlpcipriv->bt_coexist.bt_rfreg_origin_1e); + rtl8723e_phy_set_rf_reg(hw, RF90_PATH_A, 0x1e, 0xfffff, + rtlpriv->btcoexist.bt_rfreg_origin_1e); } } -static void rtl8723ae_bt_set_penalty_tx_rate_adap(struct ieee80211_hw *hw, - u8 ra_type) +static void dm_bt_set_sw_penalty_tx_rate_adapt(struct ieee80211_hw *hw, + u8 ra_type) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); - u8 tmu1; + u8 tmp_u1; - tmu1 = rtl_read_byte(rtlpriv, 0x4fd); - tmu1 |= BIT(0); + tmp_u1 = rtl_read_byte(rtlpriv, 0x4fd); + tmp_u1 |= BIT(0); if (BT_TX_RATE_ADAPTIVE_LOW_PENALTY == ra_type) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "Tx rate adaptive, set low penalty!!\n"); - tmu1 &= ~BIT(2); - rtlpcipriv->bt_coexist.sw_coexist_all_off = false; + "Tx rate adaptive, set low penalty!!\n"); + tmp_u1 &= ~BIT(2); + rtlpriv->btcoexist.sw_coexist_all_off = false; } else if (BT_TX_RATE_ADAPTIVE_NORMAL == ra_type) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "Tx rate adaptive, set normal!!\n"); - tmu1 |= BIT(2); + "Tx rate adaptive, set normal!!\n"); + tmp_u1 |= BIT(2); } - rtl_write_byte(rtlpriv, 0x4fd, tmu1); + + rtl_write_byte(rtlpriv, 0x4fd, tmp_u1); } -static void rtl8723ae_dm_bt_btdm_structure_reload(struct ieee80211_hw *hw, +static void rtl8723e_dm_bt_btdm_structure_reload(struct ieee80211_hw *hw, struct btdm_8723 *btdm) { btdm->all_off = false; @@ -292,32 +281,31 @@ static void rtl8723ae_dm_bt_btdm_structure_reload(struct ieee80211_hw *hw, btdm->dec_bt_pwr = false; } -static void dm_bt_btdm_structure_reload_all_off(struct ieee80211_hw *hw, - struct btdm_8723 *btdm) +static void rtl8723e_dm_bt_btdm_structure_reload_all_off(struct ieee80211_hw *hw, + struct btdm_8723 *btdm) { - rtl8723ae_dm_bt_btdm_structure_reload(hw, btdm); + rtl8723e_dm_bt_btdm_structure_reload(hw, btdm); btdm->all_off = true; btdm->pta_on = false; btdm->wlan_act_hi = 0x10; } -static bool rtl8723ae_dm_bt_is_2_ant_common_action(struct ieee80211_hw *hw) +static bool rtl8723e_dm_bt_is_2_ant_common_action(struct ieee80211_hw *hw) { - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); struct rtl_priv *rtlpriv = rtl_priv(hw); struct btdm_8723 btdm8723; - bool common = false; + bool b_common = false; - rtl8723ae_dm_bt_btdm_structure_reload(hw, &btdm8723); + rtl8723e_dm_bt_btdm_structure_reload(hw, &btdm8723); - if (!rtl8723ae_dm_bt_is_wifi_busy(hw) - && !rtlpcipriv->bt_coexist.bt_busy) { + if (!rtl8723e_dm_bt_is_wifi_busy(hw) && + !rtlpriv->btcoexist.bt_busy) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, "Wifi idle + Bt idle, bt coex mechanism always off!!\n"); - dm_bt_btdm_structure_reload_all_off(hw, &btdm8723); - common = true; - } else if (rtl8723ae_dm_bt_is_wifi_busy(hw) - && !rtlpcipriv->bt_coexist.bt_busy) { + rtl8723e_dm_bt_btdm_structure_reload_all_off(hw, &btdm8723); + b_common = true; + } else if (rtl8723e_dm_bt_is_wifi_busy(hw) && + !rtlpriv->btcoexist.bt_busy) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, "Wifi non-idle + Bt disabled/idle!!\n"); btdm8723.low_penalty_rate_adaptive = true; @@ -338,17 +326,17 @@ static bool rtl8723ae_dm_bt_is_2_ant_common_action(struct ieee80211_hw *hw) btdm8723.tdma_dac_swing = TDMA_DAC_SWING_OFF; btdm8723.b2_ant_hid_en = false; - common = true; - } else if (rtlpcipriv->bt_coexist.bt_busy) { + b_common = true; + } else if (rtlpriv->btcoexist.bt_busy) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "Bt non-idle!\n"); + "Bt non-idle!\n"); if (mgnt_link_status_query(hw) == RT_MEDIA_CONNECT) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "Wifi connection exist\n"); - common = false; + "Wifi connection exist\n"); + b_common = false; } else { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "No Wifi connection!\n"); + "No Wifi connection!\n"); btdm8723.rf_rx_lpf_shrink = true; btdm8723.low_penalty_rate_adaptive = false; btdm8723.reject_aggre_pkt = false; @@ -367,27 +355,28 @@ static bool rtl8723ae_dm_bt_is_2_ant_common_action(struct ieee80211_hw *hw) btdm8723.tdma_dac_swing = TDMA_DAC_SWING_OFF; btdm8723.b2_ant_hid_en = false; - common = true; + b_common = true; } } - if (rtl8723ae_dm_bt_need_to_dec_bt_pwr(hw)) + if (rtl8723e_dm_bt_need_to_dec_bt_pwr(hw)) btdm8723.dec_bt_pwr = true; - if (common) - rtlpcipriv->bt_coexist.cstate |= BT_COEX_STATE_BTINFO_COMMON; + if (b_common) + rtlpriv->btcoexist.cstate |= + BT_COEX_STATE_BTINFO_COMMON; - if (common && rtl8723ae_dm_bt_is_coexist_state_changed(hw)) - rtl8723ae_dm_bt_set_bt_dm(hw, &btdm8723); + if (b_common && rtl8723e_dm_bt_is_coexist_state_changed(hw)) + rtl8723e_dm_bt_set_bt_dm(hw, &btdm8723); - return common; + return b_common; } -static void rtl8723ae_dm_bt_set_sw_full_time_dac_swing(struct ieee80211_hw *hw, - bool sw_dac_swing_on, - u32 sw_dac_swing_lvl) +static void rtl8723e_dm_bt_set_sw_full_time_dac_swing( + struct ieee80211_hw *hw, + bool sw_dac_swing_on, + u32 sw_dac_swing_lvl) { - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); struct rtl_priv *rtlpriv = rtl_priv(hw); if (sw_dac_swing_on) { @@ -395,7 +384,7 @@ static void rtl8723ae_dm_bt_set_sw_full_time_dac_swing(struct ieee80211_hw *hw, "[BTCoex], SwDacSwing = 0x%x\n", sw_dac_swing_lvl); rtl8723_phy_set_bb_reg(hw, 0x880, 0xff000000, sw_dac_swing_lvl); - rtlpcipriv->bt_coexist.sw_coexist_all_off = false; + rtlpriv->btcoexist.sw_coexist_all_off = false; } else { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "[BTCoex], SwDacSwing Off!\n"); @@ -403,10 +392,9 @@ static void rtl8723ae_dm_bt_set_sw_full_time_dac_swing(struct ieee80211_hw *hw, } } -static void rtl8723ae_dm_bt_set_fw_dec_bt_pwr(struct ieee80211_hw *hw, - bool dec_bt_pwr) +static void rtl8723e_dm_bt_set_fw_dec_bt_pwr( + struct ieee80211_hw *hw, bool dec_bt_pwr) { - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); struct rtl_priv *rtlpriv = rtl_priv(hw); u8 h2c_parameter[1] = {0}; @@ -414,87 +402,86 @@ static void rtl8723ae_dm_bt_set_fw_dec_bt_pwr(struct ieee80211_hw *hw, if (dec_bt_pwr) { h2c_parameter[0] |= BIT(1); - rtlpcipriv->bt_coexist.fw_coexist_all_off = false; + rtlpriv->btcoexist.fw_coexist_all_off = false; } RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "[BTCoex], decrease Bt Power : %s, write 0x21 = 0x%x\n", + "[BTCoex], decrease Bt Power : %s, write 0x21=0x%x\n", (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]); - rtl8723ae_fill_h2c_cmd(hw, 0x21, 1, h2c_parameter); + rtl8723e_fill_h2c_cmd(hw, 0x21, 1, h2c_parameter); } -static void rtl8723ae_dm_bt_set_fw_2_ant_hid(struct ieee80211_hw *hw, - bool enable, bool dac_swing_on) +static void rtl8723e_dm_bt_set_fw_2_ant_hid(struct ieee80211_hw *hw, + bool b_enable, bool b_dac_swing_on) { - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); struct rtl_priv *rtlpriv = rtl_priv(hw); u8 h2c_parameter[1] = {0}; - if (enable) { + if (b_enable) { h2c_parameter[0] |= BIT(0); - rtlpcipriv->bt_coexist.fw_coexist_all_off = false; + rtlpriv->btcoexist.fw_coexist_all_off = false; } - if (dac_swing_on) + if (b_dac_swing_on) h2c_parameter[0] |= BIT(1); /* Dac Swing default enable */ + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "[BTCoex], turn 2-Ant+HID mode %s, DACSwing:%s, write 0x15 = 0x%x\n", - (enable ? "ON!!" : "OFF!!"), (dac_swing_on ? "ON" : "OFF"), + "[BTCoex], turn 2-Ant+HID mode %s, DACSwing:%s, write 0x15=0x%x\n", + (b_enable ? "ON!!" : "OFF!!"), (b_dac_swing_on ? "ON" : "OFF"), h2c_parameter[0]); - rtl8723ae_fill_h2c_cmd(hw, 0x15, 1, h2c_parameter); + rtl8723e_fill_h2c_cmd(hw, 0x15, 1, h2c_parameter); } -static void rtl8723ae_dm_bt_set_fw_tdma_ctrl(struct ieee80211_hw *hw, - bool enable, u8 ant_num, u8 nav_en, - u8 dac_swing_en) +static void rtl8723e_dm_bt_set_fw_tdma_ctrl(struct ieee80211_hw *hw, + bool b_enable, u8 ant_num, + u8 nav_en, u8 dac_swing_en) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); u8 h2c_parameter[1] = {0}; u8 h2c_parameter1[1] = {0}; h2c_parameter[0] = 0; h2c_parameter1[0] = 0; - if (enable) { + if (b_enable) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "[BTCoex], set BT PTA update manager to trigger update!!\n"); h2c_parameter1[0] |= BIT(0); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "[BTCoex], turn TDMA mode ON!!\n"); + "[BTCoex], turn TDMA mode ON!!\n"); h2c_parameter[0] |= BIT(0); /* function enable */ if (TDMA_1ANT == ant_num) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "[BTCoex], TDMA_1ANT\n"); + "[BTCoex], TDMA_1ANT\n"); h2c_parameter[0] |= BIT(1); } else if (TDMA_2ANT == ant_num) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "[BTCoex], TDMA_2ANT\n"); + "[BTCoex], TDMA_2ANT\n"); } else { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "[BTCoex], Unknown Ant\n"); + "[BTCoex], Unknown Ant\n"); } if (TDMA_NAV_OFF == nav_en) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "[BTCoex], TDMA_NAV_OFF\n"); + "[BTCoex], TDMA_NAV_OFF\n"); } else if (TDMA_NAV_ON == nav_en) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "[BTCoex], TDMA_NAV_ON\n"); + "[BTCoex], TDMA_NAV_ON\n"); h2c_parameter[0] |= BIT(2); } if (TDMA_DAC_SWING_OFF == dac_swing_en) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "[BTCoex], TDMA_DAC_SWING_OFF\n"); + "[BTCoex], TDMA_DAC_SWING_OFF\n"); } else if (TDMA_DAC_SWING_ON == dac_swing_en) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "[BTCoex], TDMA_DAC_SWING_ON\n"); + "[BTCoex], TDMA_DAC_SWING_ON\n"); h2c_parameter[0] |= BIT(4); } - rtlpcipriv->bt_coexist.fw_coexist_all_off = false; + rtlpriv->btcoexist.fw_coexist_all_off = false; } else { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "[BTCoex], set BT PTA update manager to no update!!\n"); @@ -503,46 +490,46 @@ static void rtl8723ae_dm_bt_set_fw_tdma_ctrl(struct ieee80211_hw *hw, } RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "[BTCoex], FW2AntTDMA, write 0x26 = 0x%x\n", + "[BTCoex], FW2AntTDMA, write 0x26=0x%x\n", h2c_parameter1[0]); - rtl8723ae_fill_h2c_cmd(hw, 0x26, 1, h2c_parameter1); + rtl8723e_fill_h2c_cmd(hw, 0x26, 1, h2c_parameter1); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "[BTCoex], FW2AntTDMA, write 0x14 = 0x%x\n", h2c_parameter[0]); - rtl8723ae_fill_h2c_cmd(hw, 0x14, 1, h2c_parameter); + "[BTCoex], FW2AntTDMA, write 0x14=0x%x\n", + h2c_parameter[0]); + rtl8723e_fill_h2c_cmd(hw, 0x14, 1, h2c_parameter); } -static void rtl8723ae_dm_bt_set_fw_ignore_wlan_act(struct ieee80211_hw *hw, - bool enable) +static void rtl8723e_dm_bt_set_fw_ignore_wlan_act(struct ieee80211_hw *hw, + bool b_enable) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); u8 h2c_parameter[1] = {0}; - if (enable) { + if (b_enable) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "[BTCoex], BT Ignore Wlan_Act !!\n"); + "[BTCoex], BT Ignore Wlan_Act !!\n"); h2c_parameter[0] |= BIT(0); /* function enable */ - rtlpcipriv->bt_coexist.fw_coexist_all_off = false; + rtlpriv->btcoexist.fw_coexist_all_off = false; } else { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "[BTCoex], BT don't ignore Wlan_Act !!\n"); + "[BTCoex], BT don't ignore Wlan_Act !!\n"); } RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "[BTCoex], set FW for BT Ignore Wlan_Act, write 0x25 = 0x%x\n", + "[BTCoex], set FW for BT Ignore Wlan_Act, write 0x25=0x%x\n", h2c_parameter[0]); - rtl8723ae_fill_h2c_cmd(hw, 0x25, 1, h2c_parameter); + rtl8723e_fill_h2c_cmd(hw, 0x25, 1, h2c_parameter); } -static void rtl8723ae_dm_bt_set_fw_tra_tdma_ctrl(struct ieee80211_hw *hw, - bool enable, u8 ant_num, - u8 nav_en) +static void rtl8723e_dm_bt_set_fw_tra_tdma_ctrl(struct ieee80211_hw *hw, + bool b_enable, u8 ant_num, + u8 nav_en) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 h2c_parameter[2] = {0}; /* Only 8723 B cut should do this */ @@ -552,460 +539,467 @@ static void rtl8723ae_dm_bt_set_fw_tra_tdma_ctrl(struct ieee80211_hw *hw, return; } - if (enable) { + if (b_enable) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "[BTCoex], turn TTDMA mode ON!!\n"); - h2c_parameter[0] |= BIT(0); /* function enable */ + h2c_parameter[0] |= BIT(0); /* function enable */ if (TDMA_1ANT == ant_num) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "[BTCoex], TTDMA_1ANT\n"); h2c_parameter[0] |= BIT(1); } else if (TDMA_2ANT == ant_num) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "[BTCoex], TTDMA_2ANT\n"); + "[BTCoex], TTDMA_2ANT\n"); } else { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "[BTCoex], Unknown Ant\n"); + "[BTCoex], Unknown Ant\n"); } if (TDMA_NAV_OFF == nav_en) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "[BTCoex], TTDMA_NAV_OFF\n"); + "[BTCoex], TTDMA_NAV_OFF\n"); } else if (TDMA_NAV_ON == nav_en) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "[BTCoex], TTDMA_NAV_ON\n"); + "[BTCoex], TTDMA_NAV_ON\n"); h2c_parameter[1] |= BIT(0); } - rtlpcipriv->bt_coexist.fw_coexist_all_off = false; + rtlpriv->btcoexist.fw_coexist_all_off = false; } else { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "[BTCoex], turn TTDMA mode OFF!!\n"); + "[BTCoex], turn TTDMA mode OFF!!\n"); } RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "[BTCoex], FW Traditional TDMA, write 0x33 = 0x%x\n", - h2c_parameter[0] << 8 | h2c_parameter[1]); + "[BTCoex], FW Traditional TDMA, write 0x33=0x%x\n", + h2c_parameter[0] << 8 | h2c_parameter[1]); - rtl8723ae_fill_h2c_cmd(hw, 0x33, 2, h2c_parameter); + rtl8723e_fill_h2c_cmd(hw, 0x33, 2, h2c_parameter); } -static void rtl8723ae_dm_bt_set_fw_dac_swing_level(struct ieee80211_hw *hw, - u8 dac_swing_lvl) +static void rtl8723e_dm_bt_set_fw_dac_swing_level(struct ieee80211_hw *hw, + u8 dac_swing_lvl) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 h2c_parameter[1] = {0}; - h2c_parameter[0] = dac_swing_lvl; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swing_lvl); + "[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "[BTCoex], write 0x29 = 0x%x\n", h2c_parameter[0]); + "[BTCoex], write 0x29=0x%x\n", h2c_parameter[0]); - rtl8723ae_fill_h2c_cmd(hw, 0x29, 1, h2c_parameter); + rtl8723e_fill_h2c_cmd(hw, 0x29, 1, h2c_parameter); } -static void rtl8723ae_dm_bt_set_fw_bt_hid_info(struct ieee80211_hw *hw, - bool enable) +static void rtl8723e_dm_bt_set_fw_bt_hid_info(struct ieee80211_hw *hw, + bool b_enable) { - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); struct rtl_priv *rtlpriv = rtl_priv(hw); u8 h2c_parameter[1] = {0}; - h2c_parameter[0] = 0; - if (enable) { + if (b_enable) { h2c_parameter[0] |= BIT(0); - rtlpcipriv->bt_coexist.fw_coexist_all_off = false; + rtlpriv->btcoexist.fw_coexist_all_off = false; } RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "[BTCoex], Set BT HID information = 0x%x\n", enable); + "[BTCoex], Set BT HID information=0x%x\n", b_enable); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "[BTCoex], write 0x24 = 0x%x\n", h2c_parameter[0]); + "[BTCoex], write 0x24=0x%x\n", h2c_parameter[0]); - rtl8723ae_fill_h2c_cmd(hw, 0x24, 1, h2c_parameter); + rtl8723e_fill_h2c_cmd(hw, 0x24, 1, h2c_parameter); } -static void rtl8723ae_dm_bt_set_fw_bt_retry_index(struct ieee80211_hw *hw, - u8 retry_index) +static void rtl8723e_dm_bt_set_fw_bt_retry_index(struct ieee80211_hw *hw, + u8 retry_index) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 h2c_parameter[1] = {0}; - h2c_parameter[0] = retry_index; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "[BTCoex], Set BT Retry Index=%d\n", retry_index); + "[BTCoex], Set BT Retry Index=%d\n", retry_index); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "[BTCoex], write 0x23 = 0x%x\n", h2c_parameter[0]); + "[BTCoex], write 0x23=0x%x\n", h2c_parameter[0]); - rtl8723ae_fill_h2c_cmd(hw, 0x23, 1, h2c_parameter); + rtl8723e_fill_h2c_cmd(hw, 0x23, 1, h2c_parameter); } -static void rtl8723ae_dm_bt_set_fw_wlan_act(struct ieee80211_hw *hw, - u8 wlan_act_hi, u8 wlan_act_lo) +static void rtl8723e_dm_bt_set_fw_wlan_act(struct ieee80211_hw *hw, + u8 wlan_act_hi, u8 wlan_act_lo) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 h2c_parameter_hi[1] = {0}; u8 h2c_parameter_lo[1] = {0}; - h2c_parameter_hi[0] = wlan_act_hi; h2c_parameter_lo[0] = wlan_act_lo; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "[BTCoex], Set WLAN_ACT Hi:Lo = 0x%x/0x%x\n", wlan_act_hi, - wlan_act_lo); + "[BTCoex], Set WLAN_ACT Hi:Lo=0x%x/0x%x\n", + wlan_act_hi, wlan_act_lo); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "[BTCoex], write 0x22 = 0x%x\n", h2c_parameter_hi[0]); + "[BTCoex], write 0x22=0x%x\n", h2c_parameter_hi[0]); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "[BTCoex], write 0x11 = 0x%x\n", h2c_parameter_lo[0]); + "[BTCoex], write 0x11=0x%x\n", h2c_parameter_lo[0]); /* WLAN_ACT = High duration, unit:ms */ - rtl8723ae_fill_h2c_cmd(hw, 0x22, 1, h2c_parameter_hi); + rtl8723e_fill_h2c_cmd(hw, 0x22, 1, h2c_parameter_hi); /* WLAN_ACT = Low duration, unit:3*625us */ - rtl8723ae_fill_h2c_cmd(hw, 0x11, 1, h2c_parameter_lo); + rtl8723e_fill_h2c_cmd(hw, 0x11, 1, h2c_parameter_lo); } -void rtl8723ae_dm_bt_set_bt_dm(struct ieee80211_hw *hw, struct btdm_8723 *btdm) +void rtl8723e_dm_bt_set_bt_dm(struct ieee80211_hw *hw, + struct btdm_8723 *btdm) { - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_hal *rtlhal = rtl_hal(rtlpriv); - struct btdm_8723 *btdm_8723 = &rtlhal->hal_coex_8723.btdm; + struct btdm_8723 *btdm_8723 = &hal_coex_8723.btdm; u8 i; + bool fw_current_inpsmode = false; bool fw_ps_awake = true; rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS, - (u8 *)(&fw_current_inpsmode)); + (u8 *)(&fw_current_inpsmode)); rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON, - (u8 *)(&fw_ps_awake)); + (u8 *)(&fw_ps_awake)); - /* check new setting is different than the old one, - * if all the same, don't do the setting again. - */ + /* check new setting is different with the old one, */ + /* if all the same, don't do the setting again. */ if (memcmp(btdm_8723, btdm, sizeof(struct btdm_8723)) == 0) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], the same coexist setting, return!!\n"); + "[BTCoex], the same coexist setting, return!!\n"); return; } else { /* save the new coexist setting */ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], UPDATE TO NEW COEX SETTING!!\n"); + "[BTCoex], UPDATE TO NEW COEX SETTING!!\n"); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], original/new bAllOff = 0x%x/ 0x%x\n", - btdm_8723->all_off, btdm->all_off); + "[BTCoex], original/new bAllOff=0x%x/ 0x%x\n", + btdm_8723->all_off, btdm->all_off); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], original/new agc_table_en = 0x%x/ 0x%x\n", - btdm_8723->agc_table_en, btdm->agc_table_en); + "[BTCoex], original/new agc_table_en=0x%x/ 0x%x\n", + btdm_8723->agc_table_en, btdm->agc_table_en); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], original/new adc_back_off_on = 0x%x/ 0x%x\n", - btdm_8723->adc_back_off_on, btdm->adc_back_off_on); + "[BTCoex], original/new adc_back_off_on=0x%x/ 0x%x\n", + btdm_8723->adc_back_off_on, + btdm->adc_back_off_on); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], original/new b2_ant_hid_en = 0x%x/ 0x%x\n", + "[BTCoex], original/new b2_ant_hid_en=0x%x/ 0x%x\n", btdm_8723->b2_ant_hid_en, btdm->b2_ant_hid_en); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], original/new bLowPenaltyRateAdaptive = 0x%x/ 0x%x\n", + "[BTCoex], original/new bLowPenaltyRateAdaptive=0x%x/ 0x%x\n", btdm_8723->low_penalty_rate_adaptive, btdm->low_penalty_rate_adaptive); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], original/new bRfRxLpfShrink = 0x%x/ 0x%x\n", - btdm_8723->rf_rx_lpf_shrink, btdm->rf_rx_lpf_shrink); + "[BTCoex], original/new bRfRxLpfShrink=0x%x/ 0x%x\n", + btdm_8723->rf_rx_lpf_shrink, + btdm->rf_rx_lpf_shrink); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], original/new bRejectAggrePkt = 0x%x/ 0x%x\n", - btdm_8723->reject_aggre_pkt, btdm->reject_aggre_pkt); + "[BTCoex], original/new bRejectAggrePkt=0x%x/ 0x%x\n", + btdm_8723->reject_aggre_pkt, + btdm->reject_aggre_pkt); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], original/new tdma_on = 0x%x/ 0x%x\n", + "[BTCoex], original/new tdma_on=0x%x/ 0x%x\n", btdm_8723->tdma_on, btdm->tdma_on); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], original/new tdmaAnt = 0x%x/ 0x%x\n", + "[BTCoex], original/new tdmaAnt=0x%x/ 0x%x\n", btdm_8723->tdma_ant, btdm->tdma_ant); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], original/new tdmaNav = 0x%x/ 0x%x\n", + "[BTCoex], original/new tdmaNav=0x%x/ 0x%x\n", btdm_8723->tdma_nav, btdm->tdma_nav); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], original/new tdma_dac_swing = 0x%x/ 0x%x\n", + "[BTCoex], original/new tdma_dac_swing=0x%x/ 0x%x\n", btdm_8723->tdma_dac_swing, btdm->tdma_dac_swing); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], original/new fwDacSwingLvl = 0x%x/ 0x%x\n", - btdm_8723->fw_dac_swing_lvl, btdm->fw_dac_swing_lvl); + "[BTCoex], original/new fw_dac_swing_lvl=0x%x/ 0x%x\n", + btdm_8723->fw_dac_swing_lvl, + btdm->fw_dac_swing_lvl); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], original/new bTraTdmaOn = 0x%x/ 0x%x\n", + "[BTCoex], original/new bTraTdmaOn=0x%x/ 0x%x\n", btdm_8723->tra_tdma_on, btdm->tra_tdma_on); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], original/new traTdmaAnt = 0x%x/ 0x%x\n", + "[BTCoex], original/new traTdmaAnt=0x%x/ 0x%x\n", btdm_8723->tra_tdma_ant, btdm->tra_tdma_ant); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], original/new traTdmaNav = 0x%x/ 0x%x\n", + "[BTCoex], original/new traTdmaNav=0x%x/ 0x%x\n", btdm_8723->tra_tdma_nav, btdm->tra_tdma_nav); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], original/new bPsTdmaOn = 0x%x/ 0x%x\n", + "[BTCoex], original/new bPsTdmaOn=0x%x/ 0x%x\n", btdm_8723->ps_tdma_on, btdm->ps_tdma_on); for (i = 0; i < 5; i++) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], original/new psTdmaByte[i] = 0x%x/ 0x%x\n", + "[BTCoex], original/new psTdmaByte[i]=0x%x/ 0x%x\n", btdm_8723->ps_tdma_byte[i], btdm->ps_tdma_byte[i]); } RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], original/new bIgnoreWlanAct = 0x%x/ 0x%x\n", - btdm_8723->ignore_wlan_act, btdm->ignore_wlan_act); + "[BTCoex], original/new bIgnoreWlanAct=0x%x/ 0x%x\n", + btdm_8723->ignore_wlan_act, + btdm->ignore_wlan_act); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], original/new bPtaOn = 0x%x/ 0x%x\n", - btdm_8723->pta_on, btdm->pta_on); + "[BTCoex], original/new bPtaOn=0x%x/ 0x%x\n", + btdm_8723->pta_on, btdm->pta_on); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], original/new val_0x6c0 = 0x%x/ 0x%x\n", - btdm_8723->val_0x6c0, btdm->val_0x6c0); + "[BTCoex], original/new val_0x6c0=0x%x/ 0x%x\n", + btdm_8723->val_0x6c0, btdm->val_0x6c0); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], original/new val_0x6c8 = 0x%x/ 0x%x\n", - btdm_8723->val_0x6c8, btdm->val_0x6c8); + "[BTCoex], original/new val_0x6c8=0x%x/ 0x%x\n", + btdm_8723->val_0x6c8, btdm->val_0x6c8); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], original/new val_0x6cc = 0x%x/ 0x%x\n", - btdm_8723->val_0x6cc, btdm->val_0x6cc); + "[BTCoex], original/new val_0x6cc=0x%x/ 0x%x\n", + btdm_8723->val_0x6cc, btdm->val_0x6cc); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], original/new sw_dac_swing_on = 0x%x/ 0x%x\n", - btdm_8723->sw_dac_swing_on, btdm->sw_dac_swing_on); + "[BTCoex], original/new sw_dac_swing_on=0x%x/ 0x%x\n", + btdm_8723->sw_dac_swing_on, + btdm->sw_dac_swing_on); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], original/new sw_dac_swing_lvl = 0x%x/ 0x%x\n", + "[BTCoex], original/new sw_dac_swing_lvl=0x%x/ 0x%x\n", btdm_8723->sw_dac_swing_lvl, btdm->sw_dac_swing_lvl); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], original/new wlanActHi = 0x%x/ 0x%x\n", + "[BTCoex], original/new wlanActHi=0x%x/ 0x%x\n", btdm_8723->wlan_act_hi, btdm->wlan_act_hi); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], original/new wlanActLo = 0x%x/ 0x%x\n", + "[BTCoex], original/new wlanActLo=0x%x/ 0x%x\n", btdm_8723->wlan_act_lo, btdm->wlan_act_lo); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], original/new btRetryIndex = 0x%x/ 0x%x\n", - btdm_8723->bt_retry_index, btdm->bt_retry_index); + "[BTCoex], original/new btRetryIndex=0x%x/ 0x%x\n", + btdm_8723->bt_retry_index, btdm->bt_retry_index); memcpy(btdm_8723, btdm, sizeof(struct btdm_8723)); } - /* - * Here we only consider when Bt Operation + /* Here we only consider when Bt Operation * inquiry/paging/pairing is ON * we only need to turn off TDMA */ - if (rtlpcipriv->bt_coexist.hold_for_bt_operation) { + if (rtlpriv->btcoexist.hold_for_bt_operation) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "[BTCoex], set to ignore wlanAct for BT OP!!\n"); - rtl8723ae_dm_bt_set_fw_ignore_wlan_act(hw, true); + "[BTCoex], set to ignore wlanAct for BT OP!!\n"); + rtl8723e_dm_bt_set_fw_ignore_wlan_act(hw, true); return; } if (btdm->all_off) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "[BTCoex], disable all coexist mechanism !!\n"); - rtl8723ae_btdm_coex_all_off(hw); + "[BTCoex], disable all coexist mechanism !!\n"); + rtl8723e_btdm_coex_all_off(hw); return; } - rtl8723ae_dm_bt_reject_ap_aggregated_packet(hw, btdm->reject_aggre_pkt); + rtl8723e_dm_bt_reject_ap_aggregated_packet(hw, btdm->reject_aggre_pkt); if (btdm->low_penalty_rate_adaptive) - rtl8723ae_bt_set_penalty_tx_rate_adap(hw, - BT_TX_RATE_ADAPTIVE_LOW_PENALTY); + dm_bt_set_sw_penalty_tx_rate_adapt(hw, BT_TX_RATE_ADAPTIVE_LOW_PENALTY); else - rtl8723ae_bt_set_penalty_tx_rate_adap(hw, - BT_TX_RATE_ADAPTIVE_NORMAL); + dm_bt_set_sw_penalty_tx_rate_adapt(hw, + BT_TX_RATE_ADAPTIVE_NORMAL); if (btdm->rf_rx_lpf_shrink) - rtl8723ae_dm_bt_set_sw_rf_rx_lpf_corner(hw, - BT_RF_RX_LPF_CORNER_SHRINK); + rtl8723e_dm_bt_set_sw_rf_rx_lpf_corner(hw, + BT_RF_RX_LPF_CORNER_SHRINK); else - rtl8723ae_dm_bt_set_sw_rf_rx_lpf_corner(hw, - BT_RF_RX_LPF_CORNER_RESUME); + rtl8723e_dm_bt_set_sw_rf_rx_lpf_corner(hw, + BT_RF_RX_LPF_CORNER_RESUME); if (btdm->agc_table_en) - rtl8723ae_dm_bt_agc_table(hw, BT_AGCTABLE_ON); + rtl8723e_dm_bt_agc_table(hw, BT_AGCTABLE_ON); else - rtl8723ae_dm_bt_agc_table(hw, BT_AGCTABLE_OFF); + rtl8723e_dm_bt_agc_table(hw, BT_AGCTABLE_OFF); if (btdm->adc_back_off_on) - rtl8723ae_dm_bt_bback_off_level(hw, BT_BB_BACKOFF_ON); + rtl8723e_dm_bt_bb_back_off_level(hw, BT_BB_BACKOFF_ON); else - rtl8723ae_dm_bt_bback_off_level(hw, BT_BB_BACKOFF_OFF); + rtl8723e_dm_bt_bb_back_off_level(hw, BT_BB_BACKOFF_OFF); - rtl8723ae_dm_bt_set_fw_bt_retry_index(hw, btdm->bt_retry_index); + rtl8723e_dm_bt_set_fw_bt_retry_index(hw, btdm->bt_retry_index); - rtl8723ae_dm_bt_set_fw_dac_swing_level(hw, btdm->fw_dac_swing_lvl); - rtl8723ae_dm_bt_set_fw_wlan_act(hw, btdm->wlan_act_hi, + rtl8723e_dm_bt_set_fw_dac_swing_level(hw, btdm->fw_dac_swing_lvl); + rtl8723e_dm_bt_set_fw_wlan_act(hw, btdm->wlan_act_hi, btdm->wlan_act_lo); - rtl8723ae_dm_bt_set_coex_table(hw, btdm->val_0x6c0, - btdm->val_0x6c8, btdm->val_0x6cc); - rtl8723ae_dm_bt_set_hw_pta_mode(hw, btdm->pta_on); + rtl8723e_dm_bt_set_coex_table(hw, btdm->val_0x6c0, + btdm->val_0x6c8, btdm->val_0x6cc); + rtl8723e_dm_bt_set_hw_pta_mode(hw, btdm->pta_on); /* Note: There is a constraint between TDMA and 2AntHID - * Only one of 2AntHid and tdma can be turned on - * We should turn off those mechanisms first - * and then turn on them on. + * Only one of 2AntHid and tdma can be turn on + * We should turn off those mechanisms should be turned off first + * and then turn on those mechanisms should be turned on. */ if (btdm->b2_ant_hid_en) { /* turn off tdma */ - rtl8723ae_dm_bt_set_fw_tra_tdma_ctrl(hw, btdm->tra_tdma_on, + rtl8723e_dm_bt_set_fw_tra_tdma_ctrl(hw, btdm->tra_tdma_on, btdm->tra_tdma_ant, btdm->tra_tdma_nav); - rtl8723ae_dm_bt_set_fw_tdma_ctrl(hw, false, btdm->tdma_ant, + rtl8723e_dm_bt_set_fw_tdma_ctrl(hw, false, btdm->tdma_ant, btdm->tdma_nav, btdm->tdma_dac_swing); /* turn off Pstdma */ - rtl8723ae_dm_bt_set_fw_ignore_wlan_act(hw, + rtl8723e_dm_bt_set_fw_ignore_wlan_act(hw, btdm->ignore_wlan_act); /* Antenna control by PTA, 0x870 = 0x300. */ - rtl8723ae_dm_bt_set_fw_3a(hw, 0x0, 0x0, 0x0, 0x8, 0x0); + rtl8723e_dm_bt_set_fw_3a(hw, 0x0, 0x0, 0x0, 0x8, 0x0); /* turn on 2AntHid */ - rtl8723ae_dm_bt_set_fw_bt_hid_info(hw, true); - rtl8723ae_dm_bt_set_fw_2_ant_hid(hw, true, true); + rtl8723e_dm_bt_set_fw_bt_hid_info(hw, true); + rtl8723e_dm_bt_set_fw_2_ant_hid(hw, true, true); } else if (btdm->tdma_on) { /* turn off 2AntHid */ - rtl8723ae_dm_bt_set_fw_bt_hid_info(hw, false); - rtl8723ae_dm_bt_set_fw_2_ant_hid(hw, false, false); + rtl8723e_dm_bt_set_fw_bt_hid_info(hw, false); + rtl8723e_dm_bt_set_fw_2_ant_hid(hw, false, false); /* turn off pstdma */ - rtl8723ae_dm_bt_set_fw_ignore_wlan_act(hw, + rtl8723e_dm_bt_set_fw_ignore_wlan_act(hw, btdm->ignore_wlan_act); /* Antenna control by PTA, 0x870 = 0x300. */ - rtl8723ae_dm_bt_set_fw_3a(hw, 0x0, 0x0, 0x0, 0x8, 0x0); + rtl8723e_dm_bt_set_fw_3a(hw, 0x0, 0x0, 0x0, 0x8, 0x0); /* turn on tdma */ - rtl8723ae_dm_bt_set_fw_tra_tdma_ctrl(hw, btdm->tra_tdma_on, - btdm->tra_tdma_ant, btdm->tra_tdma_nav); - rtl8723ae_dm_bt_set_fw_tdma_ctrl(hw, true, btdm->tdma_ant, - btdm->tdma_nav, btdm->tdma_dac_swing); + rtl8723e_dm_bt_set_fw_tra_tdma_ctrl(hw, btdm->tra_tdma_on, + btdm->tra_tdma_ant, + btdm->tra_tdma_nav); + rtl8723e_dm_bt_set_fw_tdma_ctrl(hw, true, btdm->tdma_ant, + btdm->tdma_nav, + btdm->tdma_dac_swing); } else if (btdm->ps_tdma_on) { /* turn off 2AntHid */ - rtl8723ae_dm_bt_set_fw_bt_hid_info(hw, false); - rtl8723ae_dm_bt_set_fw_2_ant_hid(hw, false, false); + rtl8723e_dm_bt_set_fw_bt_hid_info(hw, false); + rtl8723e_dm_bt_set_fw_2_ant_hid(hw, false, false); /* turn off tdma */ - rtl8723ae_dm_bt_set_fw_tra_tdma_ctrl(hw, btdm->tra_tdma_on, - btdm->tra_tdma_ant, btdm->tra_tdma_nav); - rtl8723ae_dm_bt_set_fw_tdma_ctrl(hw, false, btdm->tdma_ant, - btdm->tdma_nav, btdm->tdma_dac_swing); + rtl8723e_dm_bt_set_fw_tra_tdma_ctrl(hw, btdm->tra_tdma_on, + btdm->tra_tdma_ant, + btdm->tra_tdma_nav); + rtl8723e_dm_bt_set_fw_tdma_ctrl(hw, false, btdm->tdma_ant, + btdm->tdma_nav, + btdm->tdma_dac_swing); /* turn on pstdma */ - rtl8723ae_dm_bt_set_fw_ignore_wlan_act(hw, - btdm->ignore_wlan_act); - rtl8723ae_dm_bt_set_fw_3a(hw, - btdm->ps_tdma_byte[0], - btdm->ps_tdma_byte[1], - btdm->ps_tdma_byte[2], - btdm->ps_tdma_byte[3], - btdm->ps_tdma_byte[4]); + rtl8723e_dm_bt_set_fw_ignore_wlan_act(hw, + btdm->ignore_wlan_act); + rtl8723e_dm_bt_set_fw_3a(hw, btdm->ps_tdma_byte[0], + btdm->ps_tdma_byte[1], + btdm->ps_tdma_byte[2], + btdm->ps_tdma_byte[3], + btdm->ps_tdma_byte[4]); } else { /* turn off 2AntHid */ - rtl8723ae_dm_bt_set_fw_bt_hid_info(hw, false); - rtl8723ae_dm_bt_set_fw_2_ant_hid(hw, false, false); + rtl8723e_dm_bt_set_fw_bt_hid_info(hw, false); + rtl8723e_dm_bt_set_fw_2_ant_hid(hw, false, false); /* turn off tdma */ - rtl8723ae_dm_bt_set_fw_tra_tdma_ctrl(hw, btdm->tra_tdma_on, - btdm->tra_tdma_ant, btdm->tra_tdma_nav); - rtl8723ae_dm_bt_set_fw_tdma_ctrl(hw, false, btdm->tdma_ant, - btdm->tdma_nav, btdm->tdma_dac_swing); + rtl8723e_dm_bt_set_fw_tra_tdma_ctrl(hw, btdm->tra_tdma_on, + btdm->tra_tdma_ant, + btdm->tra_tdma_nav); + rtl8723e_dm_bt_set_fw_tdma_ctrl(hw, false, btdm->tdma_ant, + btdm->tdma_nav, + btdm->tdma_dac_swing); /* turn off pstdma */ - rtl8723ae_dm_bt_set_fw_ignore_wlan_act(hw, - btdm->ignore_wlan_act); + rtl8723e_dm_bt_set_fw_ignore_wlan_act(hw, + btdm->ignore_wlan_act); /* Antenna control by PTA, 0x870 = 0x300. */ - rtl8723ae_dm_bt_set_fw_3a(hw, 0x0, 0x0, 0x0, 0x8, 0x0); + rtl8723e_dm_bt_set_fw_3a(hw, 0x0, 0x0, 0x0, 0x8, 0x0); } /* Note: - * We should add delay for making sure sw DacSwing can be set - * sucessfully. Because of that rtl8723ae_dm_bt_set_fw_2_ant_hid() - * and rtl8723ae_dm_bt_set_fw_tdma_ctrl() + * We should add delay for making sure + * sw DacSwing can be set sucessfully. + * because of that rtl8723e_dm_bt_set_fw_2_ant_hid() + * and rtl8723e_dm_bt_set_fw_tdma_ctrl() * will overwrite the reg 0x880. */ mdelay(30); - rtl8723ae_dm_bt_set_sw_full_time_dac_swing(hw, - btdm->sw_dac_swing_on, btdm->sw_dac_swing_lvl); - rtl8723ae_dm_bt_set_fw_dec_bt_pwr(hw, btdm->dec_bt_pwr); + rtl8723e_dm_bt_set_sw_full_time_dac_swing(hw, btdm->sw_dac_swing_on, + btdm->sw_dac_swing_lvl); + rtl8723e_dm_bt_set_fw_dec_bt_pwr(hw, btdm->dec_bt_pwr); } -/*============================================================ - * extern function start with BTDM_ - *============================================================ +/* ============================================================ */ +/* extern function start with BTDM_ */ +/* ============================================================i */ -static u32 rtl8723ae_dm_bt_tx_rx_couter_h(struct ieee80211_hw *hw) +static u32 rtl8723e_dm_bt_tx_rx_couter_h(struct ieee80211_hw *hw) { - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - u32 counters = 0; + u32 counters = 0; - counters = rtlhal->hal_coex_8723.high_priority_tx + - rtlhal->hal_coex_8723.high_priority_rx; + counters = hal_coex_8723.high_priority_tx + + hal_coex_8723.high_priority_rx; return counters; } -static u32 rtl8723ae_dm_bt_tx_rx_couter_l(struct ieee80211_hw *hw) +static u32 rtl8723e_dm_bt_tx_rx_couter_l(struct ieee80211_hw *hw) { - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u32 counters = 0; - return rtlhal->hal_coex_8723.low_priority_tx + - rtlhal->hal_coex_8723.low_priority_rx; + counters = hal_coex_8723.low_priority_tx + + hal_coex_8723.low_priority_rx; + return counters; } -static u8 rtl8723ae_dm_bt_bt_tx_rx_counter_level(struct ieee80211_hw *hw) +static u8 rtl8723e_dm_bt_bt_tx_rx_counter_level(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); - u32 bt_tx_rx_cnt = 0; - u8 bt_tx_rx_cnt_lvl = 0; + u32 bt_tx_rx_cnt = 0; + u8 bt_tx_rx_cnt_lvl = 0; - bt_tx_rx_cnt = rtl8723ae_dm_bt_tx_rx_couter_h(hw) + - rtl8723ae_dm_bt_tx_rx_couter_l(hw); + bt_tx_rx_cnt = rtl8723e_dm_bt_tx_rx_couter_h(hw) + + rtl8723e_dm_bt_tx_rx_couter_l(hw); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, "[BTCoex], BT TxRx Counters = %d\n", bt_tx_rx_cnt); - rtlpcipriv->bt_coexist.cstate_h &= - ~(BT_COEX_STATE_BT_CNT_LEVEL_0 | BT_COEX_STATE_BT_CNT_LEVEL_1 | + rtlpriv->btcoexist.cstate_h &= ~ + (BT_COEX_STATE_BT_CNT_LEVEL_0 | BT_COEX_STATE_BT_CNT_LEVEL_1| BT_COEX_STATE_BT_CNT_LEVEL_2); if (bt_tx_rx_cnt >= BT_TXRX_CNT_THRES_3) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, "[BTCoex], BT TxRx Counters at level 3\n"); bt_tx_rx_cnt_lvl = BT_TXRX_CNT_LEVEL_3; - rtlpcipriv->bt_coexist.cstate_h |= BT_COEX_STATE_BT_CNT_LEVEL_3; + rtlpriv->btcoexist.cstate_h |= + BT_COEX_STATE_BT_CNT_LEVEL_3; } else if (bt_tx_rx_cnt >= BT_TXRX_CNT_THRES_2) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, "[BTCoex], BT TxRx Counters at level 2\n"); bt_tx_rx_cnt_lvl = BT_TXRX_CNT_LEVEL_2; - rtlpcipriv->bt_coexist.cstate_h |= BT_COEX_STATE_BT_CNT_LEVEL_2; + rtlpriv->btcoexist.cstate_h |= + BT_COEX_STATE_BT_CNT_LEVEL_2; } else if (bt_tx_rx_cnt >= BT_TXRX_CNT_THRES_1) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, "[BTCoex], BT TxRx Counters at level 1\n"); bt_tx_rx_cnt_lvl = BT_TXRX_CNT_LEVEL_1; - rtlpcipriv->bt_coexist.cstate_h |= BT_COEX_STATE_BT_CNT_LEVEL_1; + rtlpriv->btcoexist.cstate_h |= + BT_COEX_STATE_BT_CNT_LEVEL_1; } else { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, "[BTCoex], BT TxRx Counters at level 0\n"); bt_tx_rx_cnt_lvl = BT_TXRX_CNT_LEVEL_0; - rtlpcipriv->bt_coexist.cstate_h |= BT_COEX_STATE_BT_CNT_LEVEL_0; + rtlpriv->btcoexist.cstate_h |= + BT_COEX_STATE_BT_CNT_LEVEL_0; } return bt_tx_rx_cnt_lvl; } -static void rtl8723ae_dm_bt_2_ant_hid_sco_esco(struct ieee80211_hw *hw) +static void rtl8723e_dm_bt_2_ant_hid_sco_esco(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_hal *rtlhal = rtl_hal(rtlpriv); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct btdm_8723 btdm8723; u8 bt_rssi_state, bt_rssi_state1; - u8 bt_tx_rx_cnt_lvl; + u8 bt_tx_rx_cnt_lvl = 0; - rtl8723ae_dm_bt_btdm_structure_reload(hw, &btdm8723); + rtl8723e_dm_bt_btdm_structure_reload(hw, &btdm8723); btdm8723.rf_rx_lpf_shrink = true; btdm8723.low_penalty_rate_adaptive = true; btdm8723.reject_aggre_pkt = false; - bt_tx_rx_cnt_lvl = rtl8723ae_dm_bt_bt_tx_rx_counter_level(hw); + bt_tx_rx_cnt_lvl = rtl8723e_dm_bt_bt_tx_rx_counter_level(hw); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, "[BTCoex], BT TxRx Counters = %d\n", bt_tx_rx_cnt_lvl); @@ -1051,10 +1045,10 @@ static void rtl8723ae_dm_bt_2_ant_hid_sco_esco(struct ieee80211_hw *hw) } else { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, "HT20 or Legacy\n"); - bt_rssi_state = rtl8723ae_dm_bt_check_coex_rssi_state(hw, 2, - 47, 0); - bt_rssi_state1 = rtl8723ae_dm_bt_check_coex_rssi_state1(hw, 2, - 27, 0); + bt_rssi_state = + rtl8723e_dm_bt_check_coex_rssi_state(hw, 2, 47, 0); + bt_rssi_state1 = + rtl8723e_dm_bt_check_coex_rssi_state1(hw, 2, 27, 0); /* coex table */ btdm8723.val_0x6c0 = 0x55555555; @@ -1063,15 +1057,15 @@ static void rtl8723ae_dm_bt_2_ant_hid_sco_esco(struct ieee80211_hw *hw) /* sw mechanism */ if ((bt_rssi_state == BT_RSSI_STATE_HIGH) || - (bt_rssi_state == BT_RSSI_STATE_STAY_HIGH)) { + (bt_rssi_state == BT_RSSI_STATE_STAY_HIGH)) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "Wifi rssi high\n"); + "Wifi rssi high\n"); btdm8723.agc_table_en = true; btdm8723.adc_back_off_on = true; btdm8723.sw_dac_swing_on = false; } else { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "Wifi rssi low\n"); + "Wifi rssi low\n"); btdm8723.agc_table_en = false; btdm8723.adc_back_off_on = false; btdm8723.sw_dac_swing_on = false; @@ -1080,16 +1074,15 @@ static void rtl8723ae_dm_bt_2_ant_hid_sco_esco(struct ieee80211_hw *hw) /* fw mechanism */ btdm8723.ps_tdma_on = true; if ((bt_rssi_state1 == BT_RSSI_STATE_HIGH) || - (bt_rssi_state1 == BT_RSSI_STATE_STAY_HIGH)) { + (bt_rssi_state1 == BT_RSSI_STATE_STAY_HIGH)) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, "Wifi rssi-1 high\n"); - /* only rssi high we need to do this, - * when rssi low, the value will modified by fw - */ + /* only rssi high we need to do this, */ + /* when rssi low, the value will modified by fw */ rtl_write_byte(rtlpriv, 0x883, 0x40); if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], BT TxRx Counters >= 1400\n"); + "[BTCoex], BT TxRx Counters >= 1400\n"); btdm8723.ps_tdma_byte[0] = 0xa3; btdm8723.ps_tdma_byte[1] = 0x5; btdm8723.ps_tdma_byte[2] = 0x5; @@ -1097,7 +1090,7 @@ static void rtl8723ae_dm_bt_2_ant_hid_sco_esco(struct ieee80211_hw *hw) btdm8723.ps_tdma_byte[4] = 0x80; } else if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], BT TxRx Counters >= 1200 && < 1400\n"); + "[BTCoex], BT TxRx Counters>= 1200 && < 1400\n"); btdm8723.ps_tdma_byte[0] = 0xa3; btdm8723.ps_tdma_byte[1] = 0xa; btdm8723.ps_tdma_byte[2] = 0xa; @@ -1114,7 +1107,7 @@ static void rtl8723ae_dm_bt_2_ant_hid_sco_esco(struct ieee80211_hw *hw) } } else { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "Wifi rssi-1 low\n"); + "Wifi rssi-1 low\n"); if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, "[BTCoex], BT TxRx Counters >= 1400\n"); @@ -1143,16 +1136,15 @@ static void rtl8723ae_dm_bt_2_ant_hid_sco_esco(struct ieee80211_hw *hw) } } - if (rtl8723ae_dm_bt_need_to_dec_bt_pwr(hw)) + if (rtl8723e_dm_bt_need_to_dec_bt_pwr(hw)) btdm8723.dec_bt_pwr = true; /* Always ignore WlanAct if bHid|bSCOBusy|bSCOeSCO */ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, "[BTCoex], BT btInqPageStartTime = 0x%x, btTxRxCntLvl = %d\n", - rtlhal->hal_coex_8723.bt_inq_page_start_time, - bt_tx_rx_cnt_lvl); - if ((rtlhal->hal_coex_8723.bt_inq_page_start_time) || + hal_coex_8723.bt_inq_page_start_time, bt_tx_rx_cnt_lvl); + if ((hal_coex_8723.bt_inq_page_start_time) || (BT_TXRX_CNT_LEVEL_3 == bt_tx_rx_cnt_lvl)) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, "[BTCoex], Set BT inquiry / page scan 0x3a setting\n"); @@ -1164,33 +1156,35 @@ static void rtl8723ae_dm_bt_2_ant_hid_sco_esco(struct ieee80211_hw *hw) btdm8723.ps_tdma_byte[4] = 0x80; } - if (rtl8723ae_dm_bt_is_coexist_state_changed(hw)) - rtl8723ae_dm_bt_set_bt_dm(hw, &btdm8723); + if (rtl8723e_dm_bt_is_coexist_state_changed(hw)) + rtl8723e_dm_bt_set_bt_dm(hw, &btdm8723); + } -static void rtl8723ae_dm_bt_2_ant_fta2dp(struct ieee80211_hw *hw) +static void rtl8723e_dm_bt_2_ant_ftp_a2dp(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_hal *rtlhal = rtl_hal(rtlpriv); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct btdm_8723 btdm8723; + u8 bt_rssi_state, bt_rssi_state1; - u32 bt_tx_rx_cnt_lvl; + u32 bt_tx_rx_cnt_lvl = 0; + + rtl8723e_dm_bt_btdm_structure_reload(hw, &btdm8723); - rtl8723ae_dm_bt_btdm_structure_reload(hw, &btdm8723); btdm8723.rf_rx_lpf_shrink = true; btdm8723.low_penalty_rate_adaptive = true; btdm8723.reject_aggre_pkt = false; - bt_tx_rx_cnt_lvl = rtl8723ae_dm_bt_bt_tx_rx_counter_level(hw); + bt_tx_rx_cnt_lvl = rtl8723e_dm_bt_bt_tx_rx_counter_level(hw); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], BT TxRx Counters = %d\n", bt_tx_rx_cnt_lvl); + "[BTCoex], BT TxRx Counters = %d\n", bt_tx_rx_cnt_lvl); if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, "HT40\n"); - bt_rssi_state = rtl8723ae_dm_bt_check_coex_rssi_state(hw, 2, - 37, 0); + bt_rssi_state = + rtl8723e_dm_bt_check_coex_rssi_state(hw, 2, 37, 0); /* coex table */ btdm8723.val_0x6c0 = 0x55555555; @@ -1205,12 +1199,12 @@ static void rtl8723ae_dm_bt_2_ant_fta2dp(struct ieee80211_hw *hw) /* fw mechanism */ btdm8723.ps_tdma_on = true; if ((bt_rssi_state == BT_RSSI_STATE_HIGH) || - (bt_rssi_state == BT_RSSI_STATE_STAY_HIGH)) { + (bt_rssi_state == BT_RSSI_STATE_STAY_HIGH)) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "Wifi rssi high\n"); + "Wifi rssi high\n"); if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], BT TxRx Counters >= 1400\n"); + "[BTCoex], BT TxRx Counters >= 1400\n"); btdm8723.ps_tdma_byte[0] = 0xa3; btdm8723.ps_tdma_byte[1] = 0x5; btdm8723.ps_tdma_byte[2] = 0x5; @@ -1244,7 +1238,8 @@ static void rtl8723ae_dm_bt_2_ant_fta2dp(struct ieee80211_hw *hw) btdm8723.ps_tdma_byte[2] = 0x5; btdm8723.ps_tdma_byte[3] = 0x0; btdm8723.ps_tdma_byte[4] = 0x80; - } else if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) { + } else if (bt_tx_rx_cnt_lvl == + BT_TXRX_CNT_LEVEL_1) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, "[BTCoex], BT TxRx Counters >= 1200 && < 1400\n"); btdm8723.ps_tdma_byte[0] = 0xa3; @@ -1265,10 +1260,10 @@ static void rtl8723ae_dm_bt_2_ant_fta2dp(struct ieee80211_hw *hw) } else { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, "HT20 or Legacy\n"); - bt_rssi_state = rtl8723ae_dm_bt_check_coex_rssi_state(hw, 2, - 47, 0); - bt_rssi_state1 = rtl8723ae_dm_bt_check_coex_rssi_state1(hw, 2, - 27, 0); + bt_rssi_state = + rtl8723e_dm_bt_check_coex_rssi_state(hw, 2, 47, 0); + bt_rssi_state1 = + rtl8723e_dm_bt_check_coex_rssi_state1(hw, 2, 27, 0); /* coex table */ btdm8723.val_0x6c0 = 0x55555555; @@ -1277,7 +1272,7 @@ static void rtl8723ae_dm_bt_2_ant_fta2dp(struct ieee80211_hw *hw) /* sw mechanism */ if ((bt_rssi_state == BT_RSSI_STATE_HIGH) || - (bt_rssi_state == BT_RSSI_STATE_STAY_HIGH)) { + (bt_rssi_state == BT_RSSI_STATE_STAY_HIGH)) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, "Wifi rssi high\n"); btdm8723.agc_table_en = true; @@ -1294,12 +1289,11 @@ static void rtl8723ae_dm_bt_2_ant_fta2dp(struct ieee80211_hw *hw) /* fw mechanism */ btdm8723.ps_tdma_on = true; if ((bt_rssi_state1 == BT_RSSI_STATE_HIGH) || - (bt_rssi_state1 == BT_RSSI_STATE_STAY_HIGH)) { + (bt_rssi_state1 == BT_RSSI_STATE_STAY_HIGH)) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, "Wifi rssi-1 high\n"); - /* only rssi high we need to do this, - * when rssi low, the value will modified by fw - */ + /* only rssi high we need to do this, */ + /* when rssi low, the value will modified by fw */ rtl_write_byte(rtlpriv, 0x883, 0x40); if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, @@ -1357,15 +1351,14 @@ static void rtl8723ae_dm_bt_2_ant_fta2dp(struct ieee80211_hw *hw) } } - if (rtl8723ae_dm_bt_need_to_dec_bt_pwr(hw)) + if (rtl8723e_dm_bt_need_to_dec_bt_pwr(hw)) btdm8723.dec_bt_pwr = true; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, "[BTCoex], BT btInqPageStartTime = 0x%x, btTxRxCntLvl = %d\n", - rtlhal->hal_coex_8723.bt_inq_page_start_time, - bt_tx_rx_cnt_lvl); + hal_coex_8723.bt_inq_page_start_time, bt_tx_rx_cnt_lvl); - if ((rtlhal->hal_coex_8723.bt_inq_page_start_time) || + if ((hal_coex_8723.bt_inq_page_start_time) || (BT_TXRX_CNT_LEVEL_3 == bt_tx_rx_cnt_lvl)) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, "[BTCoex], Set BT inquiry / page scan 0x3a setting\n"); @@ -1377,379 +1370,373 @@ static void rtl8723ae_dm_bt_2_ant_fta2dp(struct ieee80211_hw *hw) btdm8723.ps_tdma_byte[4] = 0x80; } - if (rtl8723ae_dm_bt_is_coexist_state_changed(hw)) - rtl8723ae_dm_bt_set_bt_dm(hw, &btdm8723); + if (rtl8723e_dm_bt_is_coexist_state_changed(hw)) + rtl8723e_dm_bt_set_bt_dm(hw, &btdm8723); + } -static void rtl8723ae_dm_bt_inq_page_monitor(struct ieee80211_hw *hw) +static void rtl8723e_dm_bt_inq_page_monitor(struct ieee80211_hw *hw) { - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_hal *rtlhal = rtl_hal(rtlpriv); - u32 cur_time = jiffies; + u32 cur_time; - if (rtlhal->hal_coex_8723.c2h_bt_inquiry_page) { + cur_time = jiffies; + if (hal_coex_8723.c2h_bt_inquiry_page) { /* bt inquiry or page is started. */ - if (rtlhal->hal_coex_8723.bt_inq_page_start_time == 0) { - rtlpcipriv->bt_coexist.cstate |= - BT_COEX_STATE_BT_INQ_PAGE; - rtlhal->hal_coex_8723.bt_inq_page_start_time = cur_time; + if (hal_coex_8723.bt_inq_page_start_time == 0) { + rtlpriv->btcoexist.cstate |= + BT_COEX_STATE_BT_INQ_PAGE; + hal_coex_8723.bt_inq_page_start_time = cur_time; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, "[BTCoex], BT Inquiry/page is started at time : 0x%x\n", - rtlhal->hal_coex_8723.bt_inq_page_start_time); + hal_coex_8723.bt_inq_page_start_time); } } RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, "[BTCoex], BT Inquiry/page started time : 0x%x, cur_time : 0x%x\n", - rtlhal->hal_coex_8723.bt_inq_page_start_time, cur_time); + hal_coex_8723.bt_inq_page_start_time, cur_time); - if (rtlhal->hal_coex_8723.bt_inq_page_start_time) { + if (hal_coex_8723.bt_inq_page_start_time) { if ((((long)cur_time - - (long)rtlhal->hal_coex_8723.bt_inq_page_start_time) / HZ) >= - 10) { + (long)hal_coex_8723.bt_inq_page_start_time) / HZ) + >= 10) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], BT Inquiry/page >= 10sec!!!"); - rtlhal->hal_coex_8723.bt_inq_page_start_time = 0; - rtlpcipriv->bt_coexist.cstate &= - ~BT_COEX_STATE_BT_INQ_PAGE; + "[BTCoex], BT Inquiry/page >= 10sec!!!"); + hal_coex_8723.bt_inq_page_start_time = 0; + rtlpriv->btcoexist.cstate &= + ~BT_COEX_STATE_BT_INQ_PAGE; } } } -static void rtl8723ae_dm_bt_reset_action_profile_state(struct ieee80211_hw *hw) +static void rtl8723e_dm_bt_reset_action_profile_state(struct ieee80211_hw *hw) { - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); + struct rtl_priv *rtlpriv = rtl_priv(hw); - rtlpcipriv->bt_coexist.cstate &= - ~(BT_COEX_STATE_PROFILE_HID | BT_COEX_STATE_PROFILE_A2DP | + rtlpriv->btcoexist.cstate &= ~ + (BT_COEX_STATE_PROFILE_HID | BT_COEX_STATE_PROFILE_A2DP| BT_COEX_STATE_PROFILE_PAN | BT_COEX_STATE_PROFILE_SCO); - rtlpcipriv->bt_coexist.cstate &= - ~(BT_COEX_STATE_BTINFO_COMMON | - BT_COEX_STATE_BTINFO_B_HID_SCOESCO | + rtlpriv->btcoexist.cstate &= ~ + (BT_COEX_STATE_BTINFO_COMMON | + BT_COEX_STATE_BTINFO_B_HID_SCOESCO| BT_COEX_STATE_BTINFO_B_FTP_A2DP); } -static void _rtl8723ae_dm_bt_coexist_2_ant(struct ieee80211_hw *hw) +static void _rtl8723e_dm_bt_coexist_2_ant(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_hal *rtlhal = rtl_hal(rtlpriv); - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); + u8 bt_retry_cnt; u8 bt_info_original; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex] Get bt info by fw!!\n"); + "[BTCoex] Get bt info by fw!!\n"); _rtl8723_dm_bt_check_wifi_state(hw); - if (rtlhal->hal_coex_8723.c2h_bt_info_req_sent) { + if (hal_coex_8723.c2h_bt_info_req_sent) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "[BTCoex] c2h for btInfo not rcvd yet!!\n"); + "[BTCoex] c2h for bt_info not rcvd yet!!\n"); } - bt_info_original = rtlhal->hal_coex_8723.c2h_bt_info_original; + bt_retry_cnt = hal_coex_8723.bt_retry_cnt; + bt_info_original = hal_coex_8723.c2h_bt_info_original; - /* when bt inquiry or page scan, we have to set h2c 0x25 - * ignore wlanact for continuous 4x2secs - */ - rtl8723ae_dm_bt_inq_page_monitor(hw); - rtl8723ae_dm_bt_reset_action_profile_state(hw); - - if (rtl8723ae_dm_bt_is_2_ant_common_action(hw)) { - rtlpcipriv->bt_coexist.bt_profile_case = BT_COEX_MECH_COMMON; - rtlpcipriv->bt_coexist.bt_profile_action = BT_COEX_MECH_COMMON; + /* when bt inquiry or page scan, we have to set h2c 0x25 */ + /* ignore wlanact for continuous 4x2secs */ + rtl8723e_dm_bt_inq_page_monitor(hw); + rtl8723e_dm_bt_reset_action_profile_state(hw); + if (rtl8723e_dm_bt_is_2_ant_common_action(hw)) { + rtlpriv->btcoexist.bt_profile_case = BT_COEX_MECH_COMMON; + rtlpriv->btcoexist.bt_profile_action = BT_COEX_MECH_COMMON; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "Action 2-Ant common.\n"); + "Action 2-Ant common.\n"); } else { if ((bt_info_original & BTINFO_B_HID) || - (bt_info_original & BTINFO_B_SCO_BUSY) || - (bt_info_original & BTINFO_B_SCO_ESCO)) { - rtlpcipriv->bt_coexist.cstate |= + (bt_info_original & BTINFO_B_SCO_BUSY) || + (bt_info_original & BTINFO_B_SCO_ESCO)) { + rtlpriv->btcoexist.cstate |= BT_COEX_STATE_BTINFO_B_HID_SCOESCO; - rtlpcipriv->bt_coexist.bt_profile_case = + rtlpriv->btcoexist.bt_profile_case = BT_COEX_MECH_HID_SCO_ESCO; - rtlpcipriv->bt_coexist.bt_profile_action = + rtlpriv->btcoexist.bt_profile_action = BT_COEX_MECH_HID_SCO_ESCO; - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], BTInfo: bHid|bSCOBusy|bSCOeSCO\n"); - rtl8723ae_dm_bt_2_ant_hid_sco_esco(hw); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, + "[BTCoex], BTInfo: bHid|bSCOBusy|bSCOeSCO\n"); + rtl8723e_dm_bt_2_ant_hid_sco_esco(hw); } else if ((bt_info_original & BTINFO_B_FTP) || - (bt_info_original & BTINFO_B_A2DP)) { - rtlpcipriv->bt_coexist.cstate |= + (bt_info_original & BTINFO_B_A2DP)) { + rtlpriv->btcoexist.cstate |= BT_COEX_STATE_BTINFO_B_FTP_A2DP; - rtlpcipriv->bt_coexist.bt_profile_case = + rtlpriv->btcoexist.bt_profile_case = BT_COEX_MECH_FTP_A2DP; - rtlpcipriv->bt_coexist.bt_profile_action = + rtlpriv->btcoexist.bt_profile_action = BT_COEX_MECH_FTP_A2DP; - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "BTInfo: bFTP|bA2DP\n"); - rtl8723ae_dm_bt_2_ant_fta2dp(hw); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, + "BTInfo: bFTP|bA2DP\n"); + rtl8723e_dm_bt_2_ant_ftp_a2dp(hw); } else { - rtlpcipriv->bt_coexist.cstate |= - BT_COEX_STATE_BTINFO_B_HID_SCOESCO; - rtlpcipriv->bt_coexist.bt_profile_case = - BT_COEX_MECH_NONE; - rtlpcipriv->bt_coexist.bt_profile_action = - BT_COEX_MECH_NONE; - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], BTInfo: undefined case!!!!\n"); - rtl8723ae_dm_bt_2_ant_hid_sco_esco(hw); + rtlpriv->btcoexist.cstate |= + BT_COEX_STATE_BTINFO_B_HID_SCOESCO; + rtlpriv->btcoexist.bt_profile_case = + BT_COEX_MECH_NONE; + rtlpriv->btcoexist.bt_profile_action = + BT_COEX_MECH_NONE; + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, + "[BTCoex], BTInfo: undefined case!!!!\n"); + rtl8723e_dm_bt_2_ant_hid_sco_esco(hw); } } } -static void _rtl8723ae_dm_bt_coexist_1_ant(struct ieee80211_hw *hw) +static void _rtl8723e_dm_bt_coexist_1_ant(struct ieee80211_hw *hw) { + return; } -void rtl8723ae_dm_bt_hw_coex_all_off_8723a(struct ieee80211_hw *hw) +void rtl8723e_dm_bt_hw_coex_all_off_8723a(struct ieee80211_hw *hw) { - rtl8723ae_dm_bt_set_coex_table(hw, 0x5a5aaaaa, 0xcc, 0x3); - rtl8723ae_dm_bt_set_hw_pta_mode(hw, true); + rtl8723e_dm_bt_set_coex_table(hw, 0x5a5aaaaa, 0xcc, 0x3); + rtl8723e_dm_bt_set_hw_pta_mode(hw, true); } -void rtl8723ae_dm_bt_fw_coex_all_off_8723a(struct ieee80211_hw *hw) +void rtl8723e_dm_bt_fw_coex_all_off_8723a(struct ieee80211_hw *hw) { - rtl8723ae_dm_bt_set_fw_ignore_wlan_act(hw, false); - rtl8723ae_dm_bt_set_fw_3a(hw, 0x0, 0x0, 0x0, 0x8, 0x0); - rtl8723ae_dm_bt_set_fw_2_ant_hid(hw, false, false); - rtl8723ae_dm_bt_set_fw_tra_tdma_ctrl(hw, false, - TDMA_2ANT, TDMA_NAV_OFF); - rtl8723ae_dm_bt_set_fw_tdma_ctrl(hw, false, TDMA_2ANT, - TDMA_NAV_OFF, TDMA_DAC_SWING_OFF); - rtl8723ae_dm_bt_set_fw_dac_swing_level(hw, 0); - rtl8723ae_dm_bt_set_fw_bt_hid_info(hw, false); - rtl8723ae_dm_bt_set_fw_bt_retry_index(hw, 2); - rtl8723ae_dm_bt_set_fw_wlan_act(hw, 0x10, 0x10); - rtl8723ae_dm_bt_set_fw_dec_bt_pwr(hw, false); + rtl8723e_dm_bt_set_fw_ignore_wlan_act(hw, false); + rtl8723e_dm_bt_set_fw_3a(hw, 0x0, 0x0, 0x0, 0x8, 0x0); + rtl8723e_dm_bt_set_fw_2_ant_hid(hw, false, false); + rtl8723e_dm_bt_set_fw_tra_tdma_ctrl(hw, false, TDMA_2ANT, + TDMA_NAV_OFF); + rtl8723e_dm_bt_set_fw_tdma_ctrl(hw, false, TDMA_2ANT, TDMA_NAV_OFF, + TDMA_DAC_SWING_OFF); + rtl8723e_dm_bt_set_fw_dac_swing_level(hw, 0); + rtl8723e_dm_bt_set_fw_bt_hid_info(hw, false); + rtl8723e_dm_bt_set_fw_bt_retry_index(hw, 2); + rtl8723e_dm_bt_set_fw_wlan_act(hw, 0x10, 0x10); + rtl8723e_dm_bt_set_fw_dec_bt_pwr(hw, false); } -void rtl8723ae_dm_bt_sw_coex_all_off_8723a(struct ieee80211_hw *hw) +void rtl8723e_dm_bt_sw_coex_all_off_8723a(struct ieee80211_hw *hw) { - rtl8723ae_dm_bt_agc_table(hw, BT_AGCTABLE_OFF); - rtl8723ae_dm_bt_bback_off_level(hw, BT_BB_BACKOFF_OFF); - rtl8723ae_dm_bt_reject_ap_aggregated_packet(hw, false); + rtl8723e_dm_bt_agc_table(hw, BT_AGCTABLE_OFF); + rtl8723e_dm_bt_bb_back_off_level(hw, BT_BB_BACKOFF_OFF); + rtl8723e_dm_bt_reject_ap_aggregated_packet(hw, false); - rtl8723ae_bt_set_penalty_tx_rate_adap(hw, BT_TX_RATE_ADAPTIVE_NORMAL); - rtl8723ae_dm_bt_set_sw_rf_rx_lpf_corner(hw, BT_RF_RX_LPF_CORNER_RESUME); - rtl8723ae_dm_bt_set_sw_full_time_dac_swing(hw, false, 0xc0); + dm_bt_set_sw_penalty_tx_rate_adapt(hw, BT_TX_RATE_ADAPTIVE_NORMAL); + rtl8723e_dm_bt_set_sw_rf_rx_lpf_corner(hw, BT_RF_RX_LPF_CORNER_RESUME); + rtl8723e_dm_bt_set_sw_full_time_dac_swing(hw, false, 0xc0); } -static void rtl8723ae_dm_bt_query_bt_information(struct ieee80211_hw *hw) +static void rtl8723e_dm_bt_query_bt_information(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_hal *rtlhal = rtl_hal(rtlpriv); u8 h2c_parameter[1] = {0}; - rtlhal->hal_coex_8723.c2h_bt_info_req_sent = true; + hal_coex_8723.c2h_bt_info_req_sent = true; h2c_parameter[0] |= BIT(0); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "Query Bt information, write 0x38 = 0x%x\n", - h2c_parameter[0]); + "Query Bt information, write 0x38=0x%x\n", h2c_parameter[0]); - rtl8723ae_fill_h2c_cmd(hw, 0x38, 1, h2c_parameter); + rtl8723e_fill_h2c_cmd(hw, 0x38, 1, h2c_parameter); } -static void rtl8723ae_dm_bt_bt_hw_counters_monitor(struct ieee80211_hw *hw) +static void rtl8723e_dm_bt_bt_hw_counters_monitor(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_hal *rtlhal = rtl_hal(rtlpriv); - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); - u32 reg_htx_rx, reg_ltx_rx, u32_tmp; - u32 reg_htx, reg_hrx, reg_ltx, reg_lrx; - - reg_htx_rx = REG_HIGH_PRIORITY_TXRX; - reg_ltx_rx = REG_LOW_PRIORITY_TXRX; - - u32_tmp = rtl_read_dword(rtlpriv, reg_htx_rx); - reg_htx = u32_tmp & MASKLWORD; - reg_hrx = (u32_tmp & MASKHWORD)>>16; - - u32_tmp = rtl_read_dword(rtlpriv, reg_ltx_rx); - reg_ltx = u32_tmp & MASKLWORD; - reg_lrx = (u32_tmp & MASKHWORD)>>16; - - if (rtlpcipriv->bt_coexist.lps_counter > 1) { - reg_htx %= rtlpcipriv->bt_coexist.lps_counter; - reg_hrx %= rtlpcipriv->bt_coexist.lps_counter; - reg_ltx %= rtlpcipriv->bt_coexist.lps_counter; - reg_lrx %= rtlpcipriv->bt_coexist.lps_counter; + u32 reg_hp_tx_rx, reg_lp_tx_rx, u32_tmp; + u32 reg_hp_tx = 0, reg_hp_rx = 0, reg_lp_tx = 0, reg_lp_rx = 0; + + reg_hp_tx_rx = REG_HIGH_PRIORITY_TXRX; + reg_lp_tx_rx = REG_LOW_PRIORITY_TXRX; + + u32_tmp = rtl_read_dword(rtlpriv, reg_hp_tx_rx); + reg_hp_tx = u32_tmp & MASKLWORD; + reg_hp_rx = (u32_tmp & MASKHWORD)>>16; + + u32_tmp = rtl_read_dword(rtlpriv, reg_lp_tx_rx); + reg_lp_tx = u32_tmp & MASKLWORD; + reg_lp_rx = (u32_tmp & MASKHWORD)>>16; + + if (rtlpriv->btcoexist.lps_counter > 1) { + reg_hp_tx %= rtlpriv->btcoexist.lps_counter; + reg_hp_rx %= rtlpriv->btcoexist.lps_counter; + reg_lp_tx %= rtlpriv->btcoexist.lps_counter; + reg_lp_rx %= rtlpriv->btcoexist.lps_counter; } - rtlhal->hal_coex_8723.high_priority_tx = reg_htx; - rtlhal->hal_coex_8723.high_priority_rx = reg_hrx; - rtlhal->hal_coex_8723.low_priority_tx = reg_ltx; - rtlhal->hal_coex_8723.low_priority_rx = reg_lrx; + hal_coex_8723.high_priority_tx = reg_hp_tx; + hal_coex_8723.high_priority_rx = reg_hp_rx; + hal_coex_8723.low_priority_tx = reg_lp_tx; + hal_coex_8723.low_priority_rx = reg_lp_rx; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "High Priority Tx/Rx (reg 0x%x)=%x(%d)/%x(%d)\n", - reg_htx_rx, reg_htx, reg_htx, reg_hrx, reg_hrx); + "High Priority Tx/Rx (reg 0x%x)=%x(%d)/%x(%d)\n", + reg_hp_tx_rx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "Low Priority Tx/Rx (reg 0x%x)=%x(%d)/%x(%d)\n", - reg_ltx_rx, reg_ltx, reg_ltx, reg_lrx, reg_lrx); - rtlpcipriv->bt_coexist.lps_counter = 0; + "Low Priority Tx/Rx (reg 0x%x)=%x(%d)/%x(%d)\n", + reg_lp_tx_rx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx); + rtlpriv->btcoexist.lps_counter = 0; + /* rtl_write_byte(rtlpriv, 0x76e, 0xc); */ } -static void rtl8723ae_dm_bt_bt_enable_disable_check(struct ieee80211_hw *hw) +static void rtl8723e_dm_bt_bt_enable_disable_check(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_hal *rtlhal = rtl_hal(rtlpriv); - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); bool bt_alife = true; - if (rtlhal->hal_coex_8723.high_priority_tx == 0 && - rtlhal->hal_coex_8723.high_priority_rx == 0 && - rtlhal->hal_coex_8723.low_priority_tx == 0 && - rtlhal->hal_coex_8723.low_priority_rx == 0) + if (hal_coex_8723.high_priority_tx == 0 && + hal_coex_8723.high_priority_rx == 0 && + hal_coex_8723.low_priority_tx == 0 && + hal_coex_8723.low_priority_rx == 0) { bt_alife = false; - if (rtlhal->hal_coex_8723.high_priority_tx == 0xeaea && - rtlhal->hal_coex_8723.high_priority_rx == 0xeaea && - rtlhal->hal_coex_8723.low_priority_tx == 0xeaea && - rtlhal->hal_coex_8723.low_priority_rx == 0xeaea) + } + if (hal_coex_8723.high_priority_tx == 0xeaea && + hal_coex_8723.high_priority_rx == 0xeaea && + hal_coex_8723.low_priority_tx == 0xeaea && + hal_coex_8723.low_priority_rx == 0xeaea) { bt_alife = false; - if (rtlhal->hal_coex_8723.high_priority_tx == 0xffff && - rtlhal->hal_coex_8723.high_priority_rx == 0xffff && - rtlhal->hal_coex_8723.low_priority_tx == 0xffff && - rtlhal->hal_coex_8723.low_priority_rx == 0xffff) + } + if (hal_coex_8723.high_priority_tx == 0xffff && + hal_coex_8723.high_priority_rx == 0xffff && + hal_coex_8723.low_priority_tx == 0xffff && + hal_coex_8723.low_priority_rx == 0xffff) { bt_alife = false; + } if (bt_alife) { - rtlpcipriv->bt_coexist.bt_active_zero_cnt = 0; - rtlpcipriv->bt_coexist.cur_bt_disabled = false; + rtlpriv->btcoexist.bt_active_zero_cnt = 0; + rtlpriv->btcoexist.cur_bt_disabled = false; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "8723A BT is enabled !!\n"); } else { - rtlpcipriv->bt_coexist.bt_active_zero_cnt++; + rtlpriv->btcoexist.bt_active_zero_cnt++; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "8723A bt all counters = 0, %d times!!\n", - rtlpcipriv->bt_coexist.bt_active_zero_cnt); - if (rtlpcipriv->bt_coexist.bt_active_zero_cnt >= 2) { - rtlpcipriv->bt_coexist.cur_bt_disabled = true; + "8723A bt all counters=0, %d times!!\n", + rtlpriv->btcoexist.bt_active_zero_cnt); + if (rtlpriv->btcoexist.bt_active_zero_cnt >= 2) { + rtlpriv->btcoexist.cur_bt_disabled = true; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "8723A BT is disabled !!\n"); } } - if (rtlpcipriv->bt_coexist.pre_bt_disabled != - rtlpcipriv->bt_coexist.cur_bt_disabled) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "8723A BT is from %s to %s!!\n", - (rtlpcipriv->bt_coexist.pre_bt_disabled ? - "disabled" : "enabled"), - (rtlpcipriv->bt_coexist.cur_bt_disabled ? - "disabled" : "enabled")); - rtlpcipriv->bt_coexist.pre_bt_disabled - = rtlpcipriv->bt_coexist.cur_bt_disabled; + if (rtlpriv->btcoexist.pre_bt_disabled != + rtlpriv->btcoexist.cur_bt_disabled) { + RT_TRACE(rtlpriv, COMP_BT_COEXIST, + DBG_TRACE, "8723A BT is from %s to %s!!\n", + (rtlpriv->btcoexist.pre_bt_disabled ? + "disabled" : "enabled"), + (rtlpriv->btcoexist.cur_bt_disabled ? + "disabled" : "enabled")); + rtlpriv->btcoexist.pre_bt_disabled + = rtlpriv->btcoexist.cur_bt_disabled; } } -void rtl8723ae_dm_bt_coexist_8723(struct ieee80211_hw *hw) +void rtl8723e_dm_bt_coexist_8723(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); - rtl8723ae_dm_bt_query_bt_information(hw); - rtl8723ae_dm_bt_bt_hw_counters_monitor(hw); - rtl8723ae_dm_bt_bt_enable_disable_check(hw); + rtl8723e_dm_bt_query_bt_information(hw); + rtl8723e_dm_bt_bt_hw_counters_monitor(hw); + rtl8723e_dm_bt_bt_enable_disable_check(hw); - if (rtlpcipriv->bt_coexist.bt_ant_num == ANT_X2) { + if (rtlpriv->btcoexist.bt_ant_num == ANT_X2) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], 2 Ant mechanism\n"); - _rtl8723ae_dm_bt_coexist_2_ant(hw); + "[BTCoex], 2 Ant mechanism\n"); + _rtl8723e_dm_bt_coexist_2_ant(hw); } else { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "[BTCoex], 1 Ant mechanism\n"); - _rtl8723ae_dm_bt_coexist_1_ant(hw); + "[BTCoex], 1 Ant mechanism\n"); + _rtl8723e_dm_bt_coexist_1_ant(hw); } - if (!rtl8723ae_dm_bt_is_same_coexist_state(hw)) { + if (!rtl8723e_dm_bt_is_same_coexist_state(hw)) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, "[BTCoex], Coexist State[bitMap] change from 0x%x%8x to 0x%x%8x\n", - rtlpcipriv->bt_coexist.previous_state_h, - rtlpcipriv->bt_coexist.previous_state, - rtlpcipriv->bt_coexist.cstate_h, - rtlpcipriv->bt_coexist.cstate); - rtlpcipriv->bt_coexist.previous_state - = rtlpcipriv->bt_coexist.cstate; - rtlpcipriv->bt_coexist.previous_state_h - = rtlpcipriv->bt_coexist.cstate_h; + rtlpriv->btcoexist.previous_state_h, + rtlpriv->btcoexist.previous_state, + rtlpriv->btcoexist.cstate_h, + rtlpriv->btcoexist.cstate); + rtlpriv->btcoexist.previous_state + = rtlpriv->btcoexist.cstate; + rtlpriv->btcoexist.previous_state_h + = rtlpriv->btcoexist.cstate_h; } } -static void rtl8723ae_dm_bt_parse_bt_info(struct ieee80211_hw *hw, - u8 *tmbuf, u8 len) +static void rtl8723e_dm_bt_parse_bt_info(struct ieee80211_hw *hw, + u8 *tmp_buf, u8 len) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_hal *rtlhal = rtl_hal(rtlpriv); - struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); u8 bt_info; u8 i; - rtlhal->hal_coex_8723.c2h_bt_info_req_sent = false; - rtlhal->hal_coex_8723.bt_retry_cnt = 0; + hal_coex_8723.c2h_bt_info_req_sent = false; + hal_coex_8723.bt_retry_cnt = 0; for (i = 0; i < len; i++) { if (i == 0) - rtlhal->hal_coex_8723.c2h_bt_info_original = tmbuf[i]; + hal_coex_8723.c2h_bt_info_original = tmp_buf[i]; else if (i == 1) - rtlhal->hal_coex_8723.bt_retry_cnt = tmbuf[i]; - if (i == len-1) { + hal_coex_8723.bt_retry_cnt = tmp_buf[i]; + if (i == len-1) RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "0x%2x]", tmbuf[i]); - } else { + "0x%2x]", tmp_buf[i]); + else RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, - "0x%2x, ", tmbuf[i]); - } + "0x%2x, ", tmp_buf[i]); + } RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "BT info bt_info (Data)= 0x%x\n", - rtlhal->hal_coex_8723.c2h_bt_info_original); - bt_info = rtlhal->hal_coex_8723.c2h_bt_info_original; + "BT info bt_info (Data)= 0x%x\n", + hal_coex_8723.c2h_bt_info_original); + bt_info = hal_coex_8723.c2h_bt_info_original; if (bt_info & BIT(2)) - rtlhal->hal_coex_8723.c2h_bt_inquiry_page = true; + hal_coex_8723.c2h_bt_inquiry_page = true; else - rtlhal->hal_coex_8723.c2h_bt_inquiry_page = false; + hal_coex_8723.c2h_bt_inquiry_page = false; + if (bt_info & BTINFO_B_CONNECTION) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTC2H], BTInfo: bConnect=true\n"); - rtlpcipriv->bt_coexist.bt_busy = true; - rtlpcipriv->bt_coexist.cstate &= ~BT_COEX_STATE_BT_IDLE; + "[BTC2H], BTInfo: bConnect=true\n"); + rtlpriv->btcoexist.bt_busy = true; + rtlpriv->btcoexist.cstate &= ~BT_COEX_STATE_BT_IDLE; } else { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTC2H], BTInfo: bConnect=false\n"); - rtlpcipriv->bt_coexist.bt_busy = false; - rtlpcipriv->bt_coexist.cstate |= BT_COEX_STATE_BT_IDLE; + "[BTC2H], BTInfo: bConnect=false\n"); + rtlpriv->btcoexist.bt_busy = false; + rtlpriv->btcoexist.cstate |= BT_COEX_STATE_BT_IDLE; } } void rtl_8723e_c2h_command_handle(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct c2h_evt_hdr c2h_event; - u8 *ptmbuf; - u8 index; - u8 u1tmp; - + u8 *ptmp_buf = NULL; + u8 index = 0; + u8 u1b_tmp = 0; memset(&c2h_event, 0, sizeof(c2h_event)); - u1tmp = rtl_read_byte(rtlpriv, REG_C2HEVT_MSG_NORMAL); + u1b_tmp = rtl_read_byte(rtlpriv, REG_C2HEVT_MSG_NORMAL); RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, - "&&&&&&: REG_C2HEVT_MSG_NORMAL is 0x%x\n", u1tmp); - c2h_event.cmd_id = u1tmp & 0xF; - c2h_event.cmd_len = (u1tmp & 0xF0) >> 4; + "&&&&&&: REG_C2HEVT_MSG_NORMAL is 0x%x\n", u1b_tmp); + c2h_event.cmd_id = u1b_tmp & 0xF; + c2h_event.cmd_len = (u1b_tmp & 0xF0) >> 4; c2h_event.cmd_seq = rtl_read_byte(rtlpriv, REG_C2HEVT_MSG_NORMAL + 1); RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, "cmd_id: %d, cmd_len: %d, cmd_seq: %d\n", c2h_event.cmd_id , c2h_event.cmd_len, c2h_event.cmd_seq); - u1tmp = rtl_read_byte(rtlpriv, 0x01AF); - if (u1tmp == C2H_EVT_HOST_CLOSE) { + u1b_tmp = rtl_read_byte(rtlpriv, 0x01AF); + if (u1b_tmp == C2H_EVT_HOST_CLOSE) { return; - } else if (u1tmp != C2H_EVT_FW_CLOSE) { + } else if (u1b_tmp != C2H_EVT_FW_CLOSE) { rtl_write_byte(rtlpriv, 0x1AF, 0x00); return; } - ptmbuf = kmalloc(c2h_event.cmd_len, GFP_KERNEL); - if (ptmbuf == NULL) { + ptmp_buf = kzalloc(c2h_event.cmd_len, GFP_KERNEL); + if (ptmp_buf == NULL) { RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "malloc cmd buf failed\n"); return; @@ -1757,30 +1744,37 @@ void rtl_8723e_c2h_command_handle(struct ieee80211_hw *hw) /* Read the content */ for (index = 0; index < c2h_event.cmd_len; index++) - ptmbuf[index] = rtl_read_byte(rtlpriv, REG_C2HEVT_MSG_NORMAL + - 2 + index); + ptmp_buf[index] = rtl_read_byte(rtlpriv, + REG_C2HEVT_MSG_NORMAL + 2 + index); + switch (c2h_event.cmd_id) { case C2H_BT_RSSI: - break; + break; case C2H_BT_OP_MODE: break; case BT_INFO: RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "BT info Byte[0] (ID) is 0x%x\n", c2h_event.cmd_id); + "BT info Byte[0] (ID) is 0x%x\n", + c2h_event.cmd_id); RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "BT info Byte[1] (Seq) is 0x%x\n", c2h_event.cmd_seq); + "BT info Byte[1] (Seq) is 0x%x\n", + c2h_event.cmd_seq); RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "BT info Byte[2] (Data)= 0x%x\n", ptmbuf[0]); + "BT info Byte[2] (Data)= 0x%x\n", ptmp_buf[0]); + + rtl8723e_dm_bt_parse_bt_info(hw, ptmp_buf, c2h_event.cmd_len); + + if (rtlpriv->cfg->ops->get_btc_status()) + rtlpriv->btcoexist.btc_ops->btc_periodical(rtlpriv); - rtl8723ae_dm_bt_parse_bt_info(hw, ptmbuf, c2h_event.cmd_len); break; default: break; } - kfree(ptmbuf); + kfree(ptmp_buf); rtl_write_byte(rtlpriv, 0x01AF, C2H_EVT_HOST_CLOSE); } diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.h b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.h index 4325ecd58f0ca0b5393724faece4e2e5a0e60dab..3723d7476717b4bb29479828a81c4c37428fba0b 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.h +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -24,8 +20,7 @@ * Hsinchu 300, Taiwan. * Larry Finger * - **************************************************************************** - */ + *****************************************************************************/ #ifndef __RTL8723E_HAL_BTC_H__ #define __RTL8723E_HAL_BTC_H__ @@ -34,21 +29,31 @@ #include "btc.h" #include "hal_bt_coexist.h" -#define BT_TXRX_CNT_THRES_1 1200 -#define BT_TXRX_CNT_THRES_2 1400 -#define BT_TXRX_CNT_THRES_3 3000 -#define BT_TXRX_CNT_LEVEL_0 0 /* < 1200 */ -#define BT_TXRX_CNT_LEVEL_1 1 /* >= 1200 && < 1400 */ -#define BT_TXRX_CNT_LEVEL_2 2 /* >= 1400 */ -#define BT_TXRX_CNT_LEVEL_3 3 +#define BT_TXRX_CNT_THRES_1 1200 +#define BT_TXRX_CNT_THRES_2 1400 +#define BT_TXRX_CNT_THRES_3 3000 +/* < 1200 */ +#define BT_TXRX_CNT_LEVEL_0 0 +/* >= 1200 && < 1400 */ +#define BT_TXRX_CNT_LEVEL_1 1 +/* >= 1400 */ +#define BT_TXRX_CNT_LEVEL_2 2 +#define BT_TXRX_CNT_LEVEL_3 3 + +#define BT_COEX_DISABLE 0 +#define BT_Q_PKT_OFF 0 +#define BT_Q_PKT_ON 1 + +#define BT_TX_PWR_OFF 0 +#define BT_TX_PWR_ON 1 /* TDMA mode definition */ -#define TDMA_2ANT 0 -#define TDMA_1ANT 1 -#define TDMA_NAV_OFF 0 -#define TDMA_NAV_ON 1 -#define TDMA_DAC_SWING_OFF 0 -#define TDMA_DAC_SWING_ON 1 +#define TDMA_2ANT 0 +#define TDMA_1ANT 1 +#define TDMA_NAV_OFF 0 +#define TDMA_NAV_ON 1 +#define TDMA_DAC_SWING_OFF 0 +#define TDMA_DAC_SWING_ON 1 /* PTA mode related definition */ #define BT_PTA_MODE_OFF 0 @@ -80,6 +85,7 @@ enum bt_traffic_mode_profile { BT_PROFILE_SCO }; +/* enum hci_ext_bt_operation { HCI_BT_OP_NONE = 0x0, HCI_BT_OP_INQUIRE_START = 0x1, @@ -93,6 +99,7 @@ enum hci_ext_bt_operation { HCI_BT_OP_BT_DEV_DISABLE = 0x9, HCI_BT_OP_MAX, }; +*/ enum bt_spec { BT_SPEC_1_0_b = 0x00, @@ -123,12 +130,12 @@ enum bt_state { BT_INFO_STATE_MAX = 7 }; -enum rtl8723ae_c2h_evt { +enum rtl8723e_c2h_evt { C2H_DBG = 0, C2H_TSF = 1, C2H_AP_RPT_RSP = 2, - C2H_CCX_TX_RPT = 3, /* The FW notify the report of the specific */ - /* tx packet. */ + /* The FW notify the report of the specific tx packet. */ + C2H_CCX_TX_RPT = 3, C2H_BT_RSSI = 4, C2H_BT_OP_MODE = 5, C2H_HW_INFO_EXCH = 10, @@ -137,15 +144,16 @@ enum rtl8723ae_c2h_evt { MAX_C2HEVENT }; -void rtl8723ae_dm_bt_fw_coex_all_off_8723a(struct ieee80211_hw *hw); -void rtl8723ae_dm_bt_sw_coex_all_off_8723a(struct ieee80211_hw *hw); -void rtl8723ae_dm_bt_hw_coex_all_off_8723a(struct ieee80211_hw *hw); -void rtl8723ae_dm_bt_coexist_8723(struct ieee80211_hw *hw); -void rtl8723ae_dm_bt_set_bt_dm(struct ieee80211_hw *hw, +void rtl8723e_dm_bt_fw_coex_all_off_8723a(struct ieee80211_hw *hw); +void rtl8723e_dm_bt_sw_coex_all_off_8723a(struct ieee80211_hw *hw); +void rtl8723e_dm_bt_hw_coex_all_off_8723a(struct ieee80211_hw *hw); +void rtl8723e_dm_bt_coexist_8723(struct ieee80211_hw *hw); +void rtl8723e_dm_bt_set_bt_dm(struct ieee80211_hw *hw, struct btdm_8723 *p_btdm); void rtl_8723e_c2h_command_handle(struct ieee80211_hw *hw); void rtl_8723e_bt_wifi_media_status_notify(struct ieee80211_hw *hw, - bool mstatus); -void rtl8723ae_bt_coex_off_before_lps(struct ieee80211_hw *hw); + bool mstatus); +void rtl8723e_dm_bt_turn_off_bt_coexist_before_enter_lps( + struct ieee80211_hw *hw); #endif diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c index 662a079f76f3bdc01b07e6f7d41dbed0fe3ff473..aa085462d0e9592d26d7d57418d451bbbd9ae8ae 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -37,17 +33,21 @@ #include "reg.h" #include "def.h" #include "phy.h" +#include "../rtl8723com/phy_common.h" #include "dm.h" #include "../rtl8723com/dm_common.h" #include "fw.h" #include "../rtl8723com/fw_common.h" #include "led.h" #include "hw.h" +#include "../pwrseqcmd.h" #include "pwrseq.h" #include "btc.h" -static void _rtl8723ae_set_bcn_ctrl_reg(struct ieee80211_hw *hw, - u8 set_bits, u8 clear_bits) +#define LLT_CONFIG 5 + +static void _rtl8723e_set_bcn_ctrl_reg(struct ieee80211_hw *hw, + u8 set_bits, u8 clear_bits) { struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -58,7 +58,7 @@ static void _rtl8723ae_set_bcn_ctrl_reg(struct ieee80211_hw *hw, rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8) rtlpci->reg_bcn_ctrl_val); } -static void _rtl8723ae_stop_tx_beacon(struct ieee80211_hw *hw) +static void _rtl8723e_stop_tx_beacon(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 tmp1byte; @@ -71,7 +71,7 @@ static void _rtl8723ae_stop_tx_beacon(struct ieee80211_hw *hw) rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte); } -static void _rtl8723ae_resume_tx_beacon(struct ieee80211_hw *hw) +static void _rtl8723e_resume_tx_beacon(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 tmp1byte; @@ -84,17 +84,17 @@ static void _rtl8723ae_resume_tx_beacon(struct ieee80211_hw *hw) rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte); } -static void _rtl8723ae_enable_bcn_sufunc(struct ieee80211_hw *hw) +static void _rtl8723e_enable_bcn_sub_func(struct ieee80211_hw *hw) { - _rtl8723ae_set_bcn_ctrl_reg(hw, 0, BIT(1)); + _rtl8723e_set_bcn_ctrl_reg(hw, 0, BIT(1)); } -static void _rtl8723ae_disable_bcn_sufunc(struct ieee80211_hw *hw) +static void _rtl8723e_disable_bcn_sub_func(struct ieee80211_hw *hw) { - _rtl8723ae_set_bcn_ctrl_reg(hw, BIT(1), 0); + _rtl8723e_set_bcn_ctrl_reg(hw, BIT(1), 0); } -void rtl8723ae_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) +void rtl8723e_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); @@ -102,54 +102,55 @@ void rtl8723ae_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) switch (variable) { case HW_VAR_RCR: - *((u32 *) (val)) = rtlpci->receive_config; + *((u32 *)(val)) = rtlpci->receive_config; break; case HW_VAR_RF_STATE: *((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state; break; case HW_VAR_FWLPS_RF_ON:{ - enum rf_pwrstate rfState; - u32 val_rcr; - - rtlpriv->cfg->ops->get_hw_reg(hw, - HW_VAR_RF_STATE, - (u8 *) (&rfState)); - if (rfState == ERFOFF) { - *((bool *) (val)) = true; - } else { - val_rcr = rtl_read_dword(rtlpriv, REG_RCR); - val_rcr &= 0x00070000; - if (val_rcr) - *((bool *) (val)) = false; - else - *((bool *) (val)) = true; + enum rf_pwrstate rfstate; + u32 val_rcr; + + rtlpriv->cfg->ops->get_hw_reg(hw, + HW_VAR_RF_STATE, + (u8 *)(&rfstate)); + if (rfstate == ERFOFF) { + *((bool *)(val)) = true; + } else { + val_rcr = rtl_read_dword(rtlpriv, REG_RCR); + val_rcr &= 0x00070000; + if (val_rcr) + *((bool *)(val)) = false; + else + *((bool *)(val)) = true; + } + break; } - break; } case HW_VAR_FW_PSMODE_STATUS: - *((bool *) (val)) = ppsc->fw_current_inpsmode; + *((bool *)(val)) = ppsc->fw_current_inpsmode; break; case HW_VAR_CORRECT_TSF:{ - u64 tsf; - u32 *ptsf_low = (u32 *)&tsf; - u32 *ptsf_high = ((u32 *)&tsf) + 1; + u64 tsf; + u32 *ptsf_low = (u32 *)&tsf; + u32 *ptsf_high = ((u32 *)&tsf) + 1; - *ptsf_high = rtl_read_dword(rtlpriv, (REG_TSFTR + 4)); - *ptsf_low = rtl_read_dword(rtlpriv, REG_TSFTR); + *ptsf_high = rtl_read_dword(rtlpriv, (REG_TSFTR + 4)); + *ptsf_low = rtl_read_dword(rtlpriv, REG_TSFTR); - *((u64 *) (val)) = tsf; + *((u64 *)(val)) = tsf; - break; } + break; + } default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, "switch case not process\n"); break; } } -void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) +void rtl8723e_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); @@ -157,362 +158,400 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) u8 idx; switch (variable) { - case HW_VAR_ETHER_ADDR: - for (idx = 0; idx < ETH_ALEN; idx++) { - rtl_write_byte(rtlpriv, (REG_MACID + idx), - val[idx]); + case HW_VAR_ETHER_ADDR:{ + for (idx = 0; idx < ETH_ALEN; idx++) { + rtl_write_byte(rtlpriv, (REG_MACID + idx), + val[idx]); + } + break; } - break; case HW_VAR_BASIC_RATE:{ - u16 rate_cfg = ((u16 *) val)[0]; - u8 rate_index = 0; - rate_cfg = rate_cfg & 0x15f; - rate_cfg |= 0x01; - rtl_write_byte(rtlpriv, REG_RRSR, rate_cfg & 0xff); - rtl_write_byte(rtlpriv, REG_RRSR + 1, - (rate_cfg >> 8) & 0xff); - while (rate_cfg > 0x1) { - rate_cfg = (rate_cfg >> 1); - rate_index++; + u16 b_rate_cfg = ((u16 *)val)[0]; + u8 rate_index = 0; + + b_rate_cfg = b_rate_cfg & 0x15f; + b_rate_cfg |= 0x01; + rtl_write_byte(rtlpriv, REG_RRSR, b_rate_cfg & 0xff); + rtl_write_byte(rtlpriv, REG_RRSR + 1, + (b_rate_cfg >> 8) & 0xff); + while (b_rate_cfg > 0x1) { + b_rate_cfg = (b_rate_cfg >> 1); + rate_index++; + } + rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL, + rate_index); + break; } - rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL, - rate_index); - break; } - case HW_VAR_BSSID: - for (idx = 0; idx < ETH_ALEN; idx++) { - rtl_write_byte(rtlpriv, (REG_BSSID + idx), - val[idx]); + case HW_VAR_BSSID:{ + for (idx = 0; idx < ETH_ALEN; idx++) { + rtl_write_byte(rtlpriv, (REG_BSSID + idx), + val[idx]); + } + break; } - break; - case HW_VAR_SIFS: - rtl_write_byte(rtlpriv, REG_SIFS_CTX + 1, val[0]); - rtl_write_byte(rtlpriv, REG_SIFS_TRX + 1, val[1]); + case HW_VAR_SIFS:{ + rtl_write_byte(rtlpriv, REG_SIFS_CTX + 1, val[0]); + rtl_write_byte(rtlpriv, REG_SIFS_TRX + 1, val[1]); - rtl_write_byte(rtlpriv, REG_SPEC_SIFS + 1, val[0]); - rtl_write_byte(rtlpriv, REG_MAC_SPEC_SIFS + 1, val[0]); + rtl_write_byte(rtlpriv, REG_SPEC_SIFS + 1, val[0]); + rtl_write_byte(rtlpriv, REG_MAC_SPEC_SIFS + 1, val[0]); - if (!mac->ht_enable) - rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM, - 0x0e0e); - else - rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM, - *((u16 *) val)); - break; + if (!mac->ht_enable) + rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM, + 0x0e0e); + else + rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM, + *((u16 *)val)); + break; + } case HW_VAR_SLOT_TIME:{ - u8 e_aci; + u8 e_aci; - RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, - "HW_VAR_SLOT_TIME %x\n", val[0]); + RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, + "HW_VAR_SLOT_TIME %x\n", val[0]); - rtl_write_byte(rtlpriv, REG_SLOT, val[0]); + rtl_write_byte(rtlpriv, REG_SLOT, val[0]); - for (e_aci = 0; e_aci < AC_MAX; e_aci++) { - rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM, - &e_aci); + for (e_aci = 0; e_aci < AC_MAX; e_aci++) { + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_AC_PARAM, + (u8 *)(&e_aci)); + } + break; } - break; } case HW_VAR_ACK_PREAMBLE:{ - u8 reg_tmp; - u8 short_preamble = (bool)*val; - reg_tmp = (mac->cur_40_prime_sc) << 5; - if (short_preamble) - reg_tmp |= 0x80; - - rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_tmp); - break; } + u8 reg_tmp; + u8 short_preamble = (bool)(*(u8 *)val); + + reg_tmp = (mac->cur_40_prime_sc) << 5; + if (short_preamble) + reg_tmp |= 0x80; + + rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_tmp); + break; + } case HW_VAR_AMPDU_MIN_SPACE:{ - u8 min_spacing_to_set; - u8 sec_min_space; + u8 min_spacing_to_set; + u8 sec_min_space; - min_spacing_to_set = *val; - if (min_spacing_to_set <= 7) { - sec_min_space = 0; + min_spacing_to_set = *((u8 *)val); + if (min_spacing_to_set <= 7) { + sec_min_space = 0; - if (min_spacing_to_set < sec_min_space) - min_spacing_to_set = sec_min_space; + if (min_spacing_to_set < sec_min_space) + min_spacing_to_set = sec_min_space; - mac->min_space_cfg = ((mac->min_space_cfg & - 0xf8) | - min_spacing_to_set); + mac->min_space_cfg = ((mac->min_space_cfg & + 0xf8) | + min_spacing_to_set); - *val = min_spacing_to_set; + *val = min_spacing_to_set; - RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, - "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n", - mac->min_space_cfg); + RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, + "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n", + mac->min_space_cfg); - rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, - mac->min_space_cfg); + rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, + mac->min_space_cfg); + } + break; } - break; } case HW_VAR_SHORTGI_DENSITY:{ - u8 density_to_set; + u8 density_to_set; - density_to_set = *val; - mac->min_space_cfg |= (density_to_set << 3); + density_to_set = *((u8 *)val); + mac->min_space_cfg |= (density_to_set << 3); - RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, - "Set HW_VAR_SHORTGI_DENSITY: %#x\n", - mac->min_space_cfg); + RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, + "Set HW_VAR_SHORTGI_DENSITY: %#x\n", + mac->min_space_cfg); - rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, - mac->min_space_cfg); + rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, + mac->min_space_cfg); - break; } + break; + } case HW_VAR_AMPDU_FACTOR:{ - u8 regtoset_normal[4] = {0x41, 0xa8, 0x72, 0xb9}; - u8 regtoset_bt[4] = {0x31, 0x74, 0x42, 0x97}; - u8 factor_toset; - u8 *p_regtoset = NULL; - u8 index; - - if ((pcipriv->bt_coexist.bt_coexistence) && - (pcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4)) - p_regtoset = regtoset_bt; - else - p_regtoset = regtoset_normal; - - factor_toset = *val; - if (factor_toset <= 3) { - factor_toset = (1 << (factor_toset + 2)); - if (factor_toset > 0xf) - factor_toset = 0xf; - - for (index = 0; index < 4; index++) { - if ((p_regtoset[index] & 0xf0) > - (factor_toset << 4)) - p_regtoset[index] = - (p_regtoset[index] & 0x0f) | - (factor_toset << 4); - - if ((p_regtoset[index] & 0x0f) > - factor_toset) - p_regtoset[index] = - (p_regtoset[index] & 0xf0) | - (factor_toset); - - rtl_write_byte(rtlpriv, - (REG_AGGLEN_LMT + index), - p_regtoset[index]); + u8 regtoset_normal[4] = { 0x41, 0xa8, 0x72, 0xb9 }; + u8 regtoset_bt[4] = {0x31, 0x74, 0x42, 0x97}; + u8 factor_toset; + u8 *p_regtoset = NULL; + u8 index = 0; + + if ((rtlpriv->btcoexist.bt_coexistence) && + (rtlpriv->btcoexist.bt_coexist_type == + BT_CSR_BC4)) + p_regtoset = regtoset_bt; + else + p_regtoset = regtoset_normal; + + factor_toset = *((u8 *)val); + if (factor_toset <= 3) { + factor_toset = (1 << (factor_toset + 2)); + if (factor_toset > 0xf) + factor_toset = 0xf; + + for (index = 0; index < 4; index++) { + if ((p_regtoset[index] & 0xf0) > + (factor_toset << 4)) + p_regtoset[index] = + (p_regtoset[index] & 0x0f) | + (factor_toset << 4); + + if ((p_regtoset[index] & 0x0f) > + factor_toset) + p_regtoset[index] = + (p_regtoset[index] & 0xf0) | + (factor_toset); + + rtl_write_byte(rtlpriv, + (REG_AGGLEN_LMT + index), + p_regtoset[index]); + } + RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, + "Set HW_VAR_AMPDU_FACTOR: %#x\n", + factor_toset); } - - RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, - "Set HW_VAR_AMPDU_FACTOR: %#x\n", - factor_toset); + break; } - break; } case HW_VAR_AC_PARAM:{ - u8 e_aci = *val; - rtl8723_dm_init_edca_turbo(hw); + u8 e_aci = *((u8 *)val); - if (rtlpci->acm_method != EACMWAY2_SW) - rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACM_CTRL, - &e_aci); - break; } + rtl8723_dm_init_edca_turbo(hw); + + if (rtlpci->acm_method != EACMWAY2_SW) + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_ACM_CTRL, + (u8 *)(&e_aci)); + break; + } case HW_VAR_ACM_CTRL:{ - u8 e_aci = *val; - union aci_aifsn *p_aci_aifsn = - (union aci_aifsn *)(&(mac->ac[0].aifs)); - u8 acm = p_aci_aifsn->f.acm; - u8 acm_ctrl = rtl_read_byte(rtlpriv, REG_ACMHWCTRL); - - acm_ctrl |= ((rtlpci->acm_method == 2) ? 0x0 : 0x1); - - if (acm) { - switch (e_aci) { - case AC0_BE: - acm_ctrl |= AcmHw_BeqEn; - break; - case AC2_VI: - acm_ctrl |= AcmHw_ViqEn; - break; - case AC3_VO: - acm_ctrl |= AcmHw_VoqEn; - break; - default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, - "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n", - acm); - break; - } - } else { - switch (e_aci) { - case AC0_BE: - acm_ctrl &= (~AcmHw_BeqEn); - break; - case AC2_VI: - acm_ctrl &= (~AcmHw_ViqEn); - break; - case AC3_VO: - acm_ctrl &= (~AcmHw_BeqEn); - break; - default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case not processed\n"); - break; + u8 e_aci = *((u8 *)val); + union aci_aifsn *p_aci_aifsn = + (union aci_aifsn *)(&mac->ac[0].aifs); + u8 acm = p_aci_aifsn->f.acm; + u8 acm_ctrl = rtl_read_byte(rtlpriv, REG_ACMHWCTRL); + + acm_ctrl = + acm_ctrl | ((rtlpci->acm_method == 2) ? 0x0 : 0x1); + + if (acm) { + switch (e_aci) { + case AC0_BE: + acm_ctrl |= ACMHW_BEQEN; + break; + case AC2_VI: + acm_ctrl |= ACMHW_VIQEN; + break; + case AC3_VO: + acm_ctrl |= ACMHW_VOQEN; + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n", + acm); + break; + } + } else { + switch (e_aci) { + case AC0_BE: + acm_ctrl &= (~ACMHW_BEQEN); + break; + case AC2_VI: + acm_ctrl &= (~ACMHW_VIQEN); + break; + case AC3_VO: + acm_ctrl &= (~ACMHW_BEQEN); + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, + "switch case not process\n"); + break; + } } - } - RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE, - "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n", - acm_ctrl); - rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl); - break; } - case HW_VAR_RCR: - rtl_write_dword(rtlpriv, REG_RCR, ((u32 *) (val))[0]); - rtlpci->receive_config = ((u32 *) (val))[0]; - break; + RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE, + "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n", + acm_ctrl); + rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl); + break; + } + case HW_VAR_RCR:{ + rtl_write_dword(rtlpriv, REG_RCR, ((u32 *)(val))[0]); + rtlpci->receive_config = ((u32 *)(val))[0]; + break; + } case HW_VAR_RETRY_LIMIT:{ - u8 retry_limit = *val; + u8 retry_limit = ((u8 *)(val))[0]; - rtl_write_word(rtlpriv, REG_RL, - retry_limit << RETRY_LIMIT_SHORT_SHIFT | - retry_limit << RETRY_LIMIT_LONG_SHIFT); - break; } + rtl_write_word(rtlpriv, REG_RL, + retry_limit << RETRY_LIMIT_SHORT_SHIFT | + retry_limit << RETRY_LIMIT_LONG_SHIFT); + break; + } case HW_VAR_DUAL_TSF_RST: rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, (BIT(0) | BIT(1))); break; case HW_VAR_EFUSE_BYTES: - rtlefuse->efuse_usedbytes = *((u16 *) val); + rtlefuse->efuse_usedbytes = *((u16 *)val); break; case HW_VAR_EFUSE_USAGE: - rtlefuse->efuse_usedpercentage = *val; + rtlefuse->efuse_usedpercentage = *((u8 *)val); break; case HW_VAR_IO_CMD: - rtl8723ae_phy_set_io_cmd(hw, (*(enum io_type *)val)); + rtl8723e_phy_set_io_cmd(hw, (*(enum io_type *)val)); break; case HW_VAR_WPA_CONFIG: - rtl_write_byte(rtlpriv, REG_SECCFG, *val); + rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *)val)); break; case HW_VAR_SET_RPWM:{ - u8 rpwm_val; + u8 rpwm_val; - rpwm_val = rtl_read_byte(rtlpriv, REG_PCIE_HRPWM); - udelay(1); + rpwm_val = rtl_read_byte(rtlpriv, REG_PCIE_HRPWM); + udelay(1); - if (rpwm_val & BIT(7)) { - rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, *val); - } else { - rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, *val | BIT(7)); - } + if (rpwm_val & BIT(7)) { + rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, + (*(u8 *)val)); + } else { + rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, + ((*(u8 *)val) | BIT(7))); + } - break; } + break; + } case HW_VAR_H2C_FW_PWRMODE:{ - u8 psmode = *val; + u8 psmode = (*(u8 *)val); - if (psmode != FW_PS_ACTIVE_MODE) - rtl8723ae_dm_rf_saving(hw, true); + if (psmode != FW_PS_ACTIVE_MODE) + rtl8723e_dm_rf_saving(hw, true); - rtl8723ae_set_fw_pwrmode_cmd(hw, *val); - break; } + rtl8723e_set_fw_pwrmode_cmd(hw, (*(u8 *)val)); + break; + } case HW_VAR_FW_PSMODE_STATUS: - ppsc->fw_current_inpsmode = *((bool *) val); + ppsc->fw_current_inpsmode = *((bool *)val); break; case HW_VAR_H2C_FW_JOINBSSRPT:{ - u8 mstatus = *val; - u8 tmp_regcr, tmp_reg422; - bool recover = false; - - if (mstatus == RT_MEDIA_CONNECT) { - rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AID, NULL); - - tmp_regcr = rtl_read_byte(rtlpriv, REG_CR + 1); - rtl_write_byte(rtlpriv, REG_CR + 1, - (tmp_regcr | BIT(0))); - - _rtl8723ae_set_bcn_ctrl_reg(hw, 0, BIT(3)); - _rtl8723ae_set_bcn_ctrl_reg(hw, BIT(4), 0); + u8 mstatus = (*(u8 *)val); + u8 tmp_regcr, tmp_reg422; + bool b_recover = false; + + if (mstatus == RT_MEDIA_CONNECT) { + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AID, + NULL); + + tmp_regcr = rtl_read_byte(rtlpriv, REG_CR + 1); + rtl_write_byte(rtlpriv, REG_CR + 1, + (tmp_regcr | BIT(0))); + + _rtl8723e_set_bcn_ctrl_reg(hw, 0, BIT(3)); + _rtl8723e_set_bcn_ctrl_reg(hw, BIT(4), 0); + + tmp_reg422 = + rtl_read_byte(rtlpriv, + REG_FWHW_TXQ_CTRL + 2); + if (tmp_reg422 & BIT(6)) + b_recover = true; + rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, + tmp_reg422 & (~BIT(6))); - tmp_reg422 = rtl_read_byte(rtlpriv, - REG_FWHW_TXQ_CTRL + 2); - if (tmp_reg422 & BIT(6)) - recover = true; - rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, - tmp_reg422 & (~BIT(6))); + rtl8723e_set_fw_rsvdpagepkt(hw, 0); - rtl8723ae_set_fw_rsvdpagepkt(hw, 0); + _rtl8723e_set_bcn_ctrl_reg(hw, BIT(3), 0); + _rtl8723e_set_bcn_ctrl_reg(hw, 0, BIT(4)); - _rtl8723ae_set_bcn_ctrl_reg(hw, BIT(3), 0); - _rtl8723ae_set_bcn_ctrl_reg(hw, 0, BIT(4)); + if (b_recover) { + rtl_write_byte(rtlpriv, + REG_FWHW_TXQ_CTRL + 2, + tmp_reg422); + } - if (recover) - rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, - tmp_reg422); + rtl_write_byte(rtlpriv, REG_CR + 1, + (tmp_regcr & ~(BIT(0)))); + } + rtl8723e_set_fw_joinbss_report_cmd(hw, (*(u8 *)val)); - rtl_write_byte(rtlpriv, REG_CR + 1, - (tmp_regcr & ~(BIT(0)))); + break; } - rtl8723ae_set_fw_joinbss_report_cmd(hw, *val); - - break; } - case HW_VAR_H2C_FW_P2P_PS_OFFLOAD: - rtl8723ae_set_p2p_ps_offload_cmd(hw, *val); + case HW_VAR_H2C_FW_P2P_PS_OFFLOAD:{ + rtl8723e_set_p2p_ps_offload_cmd(hw, (*(u8 *)val)); break; + } case HW_VAR_AID:{ - u16 u2btmp; - u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT); - u2btmp &= 0xC000; - rtl_write_word(rtlpriv, REG_BCN_PSR_RPT, (u2btmp | - mac->assoc_id)); - break; } + u16 u2btmp; + + u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT); + u2btmp &= 0xC000; + rtl_write_word(rtlpriv, REG_BCN_PSR_RPT, + (u2btmp | mac->assoc_id)); + + break; + } case HW_VAR_CORRECT_TSF:{ - u8 btype_ibss = *val; - - if (btype_ibss == true) - _rtl8723ae_stop_tx_beacon(hw); - - _rtl8723ae_set_bcn_ctrl_reg(hw, 0, BIT(3)); - - rtl_write_dword(rtlpriv, REG_TSFTR, - (u32) (mac->tsf & 0xffffffff)); - rtl_write_dword(rtlpriv, REG_TSFTR + 4, - (u32) ((mac->tsf >> 32) & 0xffffffff)); - - _rtl8723ae_set_bcn_ctrl_reg(hw, BIT(3), 0); - - if (btype_ibss == true) - _rtl8723ae_resume_tx_beacon(hw); - break; } - case HW_VAR_FW_LPS_ACTION: { - bool enter_fwlps = *((bool *)val); - u8 rpwm_val, fw_pwrmode; - bool fw_current_inps; - - if (enter_fwlps) { - rpwm_val = 0x02; /* RF off */ - fw_current_inps = true; - rtlpriv->cfg->ops->set_hw_reg(hw, - HW_VAR_FW_PSMODE_STATUS, - (u8 *)(&fw_current_inps)); - rtlpriv->cfg->ops->set_hw_reg(hw, - HW_VAR_H2C_FW_PWRMODE, - &ppsc->fwctrl_psmode); - - rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, - &rpwm_val); - } else { - rpwm_val = 0x0C; /* RF on */ - fw_pwrmode = FW_PS_ACTIVE_MODE; - fw_current_inps = false; - rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, - &rpwm_val); - rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE, - &fw_pwrmode); - - rtlpriv->cfg->ops->set_hw_reg(hw, - HW_VAR_FW_PSMODE_STATUS, - (u8 *)(&fw_current_inps)); + u8 btype_ibss = ((u8 *)(val))[0]; + + if (btype_ibss) + _rtl8723e_stop_tx_beacon(hw); + + _rtl8723e_set_bcn_ctrl_reg(hw, 0, BIT(3)); + + rtl_write_dword(rtlpriv, REG_TSFTR, + (u32)(mac->tsf & 0xffffffff)); + rtl_write_dword(rtlpriv, REG_TSFTR + 4, + (u32)((mac->tsf >> 32) & 0xffffffff)); + + _rtl8723e_set_bcn_ctrl_reg(hw, BIT(3), 0); + + if (btype_ibss) + _rtl8723e_resume_tx_beacon(hw); + + break; + } + case HW_VAR_FW_LPS_ACTION:{ + bool b_enter_fwlps = *((bool *)val); + u8 rpwm_val, fw_pwrmode; + bool fw_current_inps; + + if (b_enter_fwlps) { + rpwm_val = 0x02; /* RF off */ + fw_current_inps = true; + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_FW_PSMODE_STATUS, + (u8 *)(&fw_current_inps)); + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_H2C_FW_PWRMODE, + (u8 *)(&ppsc->fwctrl_psmode)); + + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_SET_RPWM, + (u8 *)(&rpwm_val)); + } else { + rpwm_val = 0x0C; /* RF on */ + fw_pwrmode = FW_PS_ACTIVE_MODE; + fw_current_inps = false; + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_SET_RPWM, + (u8 *)(&rpwm_val)); + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_H2C_FW_PWRMODE, + (u8 *)(&fw_pwrmode)); + + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_FW_PSMODE_STATUS, + (u8 *)(&fw_current_inps)); + } + break; } - break; } default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case not processed\n"); + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, + "switch case not process\n"); break; } } -static bool _rtl8723ae_llt_write(struct ieee80211_hw *hw, u32 address, u32 data) +static bool _rtl8723e_llt_write(struct ieee80211_hw *hw, u32 address, u32 data) { struct rtl_priv *rtlpriv = rtl_priv(hw); bool status = true; @@ -539,24 +578,49 @@ static bool _rtl8723ae_llt_write(struct ieee80211_hw *hw, u32 address, u32 data) return status; } -static bool _rtl8723ae_llt_table_init(struct ieee80211_hw *hw) +static bool _rtl8723e_llt_table_init(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); unsigned short i; u8 txpktbuf_bndy; - u8 maxPage; + u8 maxpage; bool status; u8 ubyte; - maxPage = 255; +#if LLT_CONFIG == 1 + maxpage = 255; + txpktbuf_bndy = 252; +#elif LLT_CONFIG == 2 + maxpage = 127; + txpktbuf_bndy = 124; +#elif LLT_CONFIG == 3 + maxpage = 255; + txpktbuf_bndy = 174; +#elif LLT_CONFIG == 4 + maxpage = 255; txpktbuf_bndy = 246; +#elif LLT_CONFIG == 5 + maxpage = 255; + txpktbuf_bndy = 246; +#endif rtl_write_byte(rtlpriv, REG_CR, 0x8B); +#if LLT_CONFIG == 1 + rtl_write_byte(rtlpriv, REG_RQPN_NPQ, 0x1c); + rtl_write_dword(rtlpriv, REG_RQPN, 0x80a71c1c); +#elif LLT_CONFIG == 2 + rtl_write_dword(rtlpriv, REG_RQPN, 0x845B1010); +#elif LLT_CONFIG == 3 + rtl_write_dword(rtlpriv, REG_RQPN, 0x84838484); +#elif LLT_CONFIG == 4 + rtl_write_dword(rtlpriv, REG_RQPN, 0x80bd1c1c); +#elif LLT_CONFIG == 5 rtl_write_word(rtlpriv, REG_RQPN_NPQ, 0x0000); rtl_write_dword(rtlpriv, REG_RQPN, 0x80ac1c29); rtl_write_byte(rtlpriv, REG_RQPN_NPQ, 0x03); +#endif rtl_write_dword(rtlpriv, REG_TRXFF_BNDY, (0x27FF0000 | txpktbuf_bndy)); rtl_write_byte(rtlpriv, REG_TDECTRL + 1, txpktbuf_bndy); @@ -569,22 +633,22 @@ static bool _rtl8723ae_llt_table_init(struct ieee80211_hw *hw) rtl_write_byte(rtlpriv, REG_RX_DRVINFO_SZ, 0x4); for (i = 0; i < (txpktbuf_bndy - 1); i++) { - status = _rtl8723ae_llt_write(hw, i, i + 1); + status = _rtl8723e_llt_write(hw, i, i + 1); if (true != status) return status; } - status = _rtl8723ae_llt_write(hw, (txpktbuf_bndy - 1), 0xFF); + status = _rtl8723e_llt_write(hw, (txpktbuf_bndy - 1), 0xFF); if (true != status) return status; - for (i = txpktbuf_bndy; i < maxPage; i++) { - status = _rtl8723ae_llt_write(hw, i, (i + 1)); + for (i = txpktbuf_bndy; i < maxpage; i++) { + status = _rtl8723e_llt_write(hw, i, (i + 1)); if (true != status) return status; } - status = _rtl8723ae_llt_write(hw, maxPage, txpktbuf_bndy); + status = _rtl8723e_llt_write(hw, maxpage, txpktbuf_bndy); if (true != status) return status; @@ -595,28 +659,29 @@ static bool _rtl8723ae_llt_table_init(struct ieee80211_hw *hw) return true; } -static void _rtl8723ae_gen_refresh_led_state(struct ieee80211_hw *hw) +static void _rtl8723e_gen_refresh_led_state(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); - struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0); + struct rtl_led *pled0 = &pcipriv->ledctl.sw_led0; if (rtlpriv->rtlhal.up_first_time) return; if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS) - rtl8723ae_sw_led_on(hw, pLed0); + rtl8723e_sw_led_on(hw, pled0); else if (ppsc->rfoff_reason == RF_CHANGE_BY_INIT) - rtl8723ae_sw_led_on(hw, pLed0); + rtl8723e_sw_led_on(hw, pled0); else - rtl8723ae_sw_led_off(hw, pLed0); + rtl8723e_sw_led_off(hw, pled0); } static bool _rtl8712e_init_mac(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + unsigned char bytetmp; unsigned short wordtmp; u16 retry = 0; @@ -630,7 +695,6 @@ static bool _rtl8712e_init_mac(struct ieee80211_hw *hw) else mac_func_enable = false; - /* HW Power on sequence */ if (!rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, Rtl8723_NIC_ENABLE_FLOW)) @@ -669,7 +733,7 @@ static bool _rtl8712e_init_mac(struct ieee80211_hw *hw) rtl_write_word(rtlpriv, REG_CR + 1, 0x06); if (!mac_func_enable) { - if (_rtl8723ae_llt_table_init(hw) == false) + if (!_rtl8723e_llt_table_init(hw)) return false; } @@ -678,7 +742,8 @@ static bool _rtl8712e_init_mac(struct ieee80211_hw *hw) rtl_write_word(rtlpriv, REG_TRXFF_BNDY + 2, 0x27ff); - wordtmp = rtl_read_word(rtlpriv, REG_TRXDMA_CTRL) & 0xf; + wordtmp = rtl_read_word(rtlpriv, REG_TRXDMA_CTRL); + wordtmp &= 0xf; wordtmp |= 0xF771; rtl_write_word(rtlpriv, REG_TRXDMA_CTRL, wordtmp); @@ -721,22 +786,23 @@ static bool _rtl8712e_init_mac(struct ieee80211_hw *hw) bytetmp = rtl_read_byte(rtlpriv, REG_APSD_CTRL); } while ((retry < 200) && (bytetmp & BIT(7))); - _rtl8723ae_gen_refresh_led_state(hw); + _rtl8723e_gen_refresh_led_state(hw); rtl_write_dword(rtlpriv, REG_MCUTST_1, 0x0); return true; } -static void _rtl8723ae_hw_configure(struct ieee80211_hw *hw) +static void _rtl8723e_hw_configure(struct ieee80211_hw *hw) { struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); u8 reg_bw_opmode; - u32 reg_prsr; + u32 reg_ratr, reg_prsr; reg_bw_opmode = BW_OPMODE_20MHZ; + reg_ratr = RATE_ALL_CCK | RATE_ALL_OFDM_AG | + RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS; reg_prsr = RATE_ALL_CCK | RATE_ALL_OFDM_AG; rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL, 0x8); @@ -762,8 +828,8 @@ static void _rtl8723ae_hw_configure(struct ieee80211_hw *hw) rtl_write_dword(rtlpriv, REG_RARFRC, 0x01000000); rtl_write_dword(rtlpriv, REG_RARFRC + 4, 0x07060504); - if ((pcipriv->bt_coexist.bt_coexistence) && - (pcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4)) + if ((rtlpriv->btcoexist.bt_coexistence) && + (rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4)) rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, 0x97427431); else rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, 0xb972a841); @@ -782,8 +848,8 @@ static void _rtl8723ae_hw_configure(struct ieee80211_hw *hw) rtl_write_byte(rtlpriv, REG_PIFS, 0x1C); rtl_write_byte(rtlpriv, REG_AGGR_BREAK_TIME, 0x16); - if ((pcipriv->bt_coexist.bt_coexistence) && - (pcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4)) { + if ((rtlpriv->btcoexist.bt_coexistence) && + (rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4)) { rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0020); rtl_write_word(rtlpriv, REG_PROT_MODE_CTRL, 0x0402); } else { @@ -791,8 +857,8 @@ static void _rtl8723ae_hw_configure(struct ieee80211_hw *hw) rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0020); } - if ((pcipriv->bt_coexist.bt_coexistence) && - (pcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4)) + if ((rtlpriv->btcoexist.bt_coexistence) && + (rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4)) rtl_write_dword(rtlpriv, REG_FAST_EDCA_CTRL, 0x03086666); else rtl_write_dword(rtlpriv, REG_FAST_EDCA_CTRL, 0x086666); @@ -812,7 +878,7 @@ static void _rtl8723ae_hw_configure(struct ieee80211_hw *hw) rtl_write_dword(rtlpriv, 0x394, 0x1); } -static void _rtl8723ae_enable_aspm_back_door(struct ieee80211_hw *hw) +static void _rtl8723e_enable_aspm_back_door(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); @@ -830,15 +896,15 @@ static void _rtl8723ae_enable_aspm_back_door(struct ieee80211_hw *hw) rtl_write_byte(rtlpriv, 0x352, 0x1); } -void rtl8723ae_enable_hw_security_config(struct ieee80211_hw *hw) +void rtl8723e_enable_hw_security_config(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 sec_reg_value; RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n", - rtlpriv->sec.pairwise_enc_algorithm, - rtlpriv->sec.group_enc_algorithm); + rtlpriv->sec.pairwise_enc_algorithm, + rtlpriv->sec.group_enc_algorithm); if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) { RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, @@ -846,11 +912,11 @@ void rtl8723ae_enable_hw_security_config(struct ieee80211_hw *hw) return; } - sec_reg_value = SCR_TxEncEnable | SCR_RxDecEnable; + sec_reg_value = SCR_TXENCENABLE | SCR_RXDECENABLE; if (rtlpriv->sec.use_defaultkey) { - sec_reg_value |= SCR_TxUseDK; - sec_reg_value |= SCR_RxUseDK; + sec_reg_value |= SCR_TXUSEDK; + sec_reg_value |= SCR_RXUSEDK; } sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK); @@ -864,7 +930,7 @@ void rtl8723ae_enable_hw_security_config(struct ieee80211_hw *hw) } -int rtl8723ae_hw_init(struct ieee80211_hw *hw) +int rtl8723e_hw_init(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); @@ -887,6 +953,7 @@ int rtl8723ae_hw_init(struct ieee80211_hw *hw) */ local_save_flags(flags); local_irq_enable(); + rtlhal->fw_ready = false; rtlpriv->intf_ops->disable_aspm(hw); rtstatus = _rtl8712e_init_mac(hw); @@ -896,20 +963,19 @@ int rtl8723ae_hw_init(struct ieee80211_hw *hw) goto exit; } - err = rtl8723_download_fw(hw, false); + err = rtl8723_download_fw(hw, false, FW_8723A_POLLING_TIMEOUT_COUNT); if (err) { RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "Failed to download FW. Init HW without FW now..\n"); err = 1; goto exit; - } else { - rtlhal->fw_ready = true; } + rtlhal->fw_ready = true; rtlhal->last_hmeboxnum = 0; - rtl8723ae_phy_mac_config(hw); - /* because the last function modifies RCR, we update - * rcr var here, or TP will be unstable as ther receive_config + rtl8723e_phy_mac_config(hw); + /* because last function modify RCR, so we update + * rcr var here, or TP will unstable for receive_config * is wrong, RX RCR_ACRC32 will cause TP unstable & Rx * RCR_APP_ICV will cause mac80211 unassoc for cisco 1252 */ @@ -917,9 +983,9 @@ int rtl8723ae_hw_init(struct ieee80211_hw *hw) rtlpci->receive_config &= ~(RCR_ACRC32 | RCR_AICV); rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config); - rtl8723ae_phy_bb_config(hw); + rtl8723e_phy_bb_config(hw); rtlphy->rf_mode = RF_OP_BY_SW_3WIRE; - rtl8723ae_phy_rf_config(hw); + rtl8723e_phy_rf_config(hw); if (IS_VENDOR_UMC_A_CUT(rtlhal->version)) { rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G1, MASKDWORD, 0x30255); rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G2, MASKDWORD, 0x50a00); @@ -938,28 +1004,29 @@ int rtl8723ae_hw_init(struct ieee80211_hw *hw) rtl_set_bbreg(hw, RFPGA0_RFMOD, BCCKEN, 0x1); rtl_set_bbreg(hw, RFPGA0_RFMOD, BOFDMEN, 0x1); rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 1); - _rtl8723ae_hw_configure(hw); + _rtl8723e_hw_configure(hw); rtl_cam_reset_all_entry(hw); - rtl8723ae_enable_hw_security_config(hw); + rtl8723e_enable_hw_security_config(hw); ppsc->rfpwr_state = ERFON; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, mac->mac_addr); - _rtl8723ae_enable_aspm_back_door(hw); + _rtl8723e_enable_aspm_back_door(hw); rtlpriv->intf_ops->enable_aspm(hw); - rtl8723ae_bt_hw_init(hw); + rtl8723e_bt_hw_init(hw); if (ppsc->rfpwr_state == ERFON) { - rtl8723ae_phy_set_rfpath_switch(hw, 1); + rtl8723e_phy_set_rfpath_switch(hw, 1); if (rtlphy->iqk_initialized) { - rtl8723ae_phy_iq_calibrate(hw, true); + rtl8723e_phy_iq_calibrate(hw, true); } else { - rtl8723ae_phy_iq_calibrate(hw, false); + rtl8723e_phy_iq_calibrate(hw, false); rtlphy->iqk_initialized = true; } - rtl8723ae_phy_lc_calibrate(hw); + rtl8723e_dm_check_txpower_tracking(hw); + rtl8723e_phy_lc_calibrate(hw); } tmp_u1b = efuse_read_1byte(hw, 0x1FA); @@ -969,20 +1036,21 @@ int rtl8723ae_hw_init(struct ieee80211_hw *hw) } if (!(tmp_u1b & BIT(4))) { - tmp_u1b = rtl_read_byte(rtlpriv, 0x16) & 0x0F; + tmp_u1b = rtl_read_byte(rtlpriv, 0x16); + tmp_u1b &= 0x0F; rtl_write_byte(rtlpriv, 0x16, tmp_u1b | 0x80); udelay(10); rtl_write_byte(rtlpriv, 0x16, tmp_u1b | 0x90); RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n"); } - rtl8723ae_dm_init(hw); + rtl8723e_dm_init(hw); exit: local_irq_restore(flags); rtlpriv->rtlhal.being_init_adapter = false; return err; } -static enum version_8723e _rtl8723ae_read_chip_version(struct ieee80211_hw *hw) +static enum version_8723e _rtl8723e_read_chip_version(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); @@ -992,41 +1060,41 @@ static enum version_8723e _rtl8723ae_read_chip_version(struct ieee80211_hw *hw) value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG); if (value32 & TRP_VAUX_EN) { version = (enum version_8723e)(version | - ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : 0)); + ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : 0)); /* RTL8723 with BT function. */ version = (enum version_8723e)(version | - ((value32 & BT_FUNC) ? CHIP_8723 : 0)); + ((value32 & BT_FUNC) ? CHIP_8723 : 0)); } else { /* Normal mass production chip. */ version = (enum version_8723e) NORMAL_CHIP; version = (enum version_8723e)(version | - ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : 0)); + ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : 0)); /* RTL8723 with BT function. */ version = (enum version_8723e)(version | - ((value32 & BT_FUNC) ? CHIP_8723 : 0)); + ((value32 & BT_FUNC) ? CHIP_8723 : 0)); if (IS_CHIP_VENDOR_UMC(version)) version = (enum version_8723e)(version | ((value32 & CHIP_VER_RTL_MASK)));/* IC version (CUT) */ if (IS_8723_SERIES(version)) { value32 = rtl_read_dword(rtlpriv, REG_GPIO_OUTSTS); - /* ROM code version */ + /* ROM code version. */ version = (enum version_8723e)(version | - ((value32 & RF_RL_ID)>>20)); + ((value32 & RF_RL_ID)>>20)); } } if (IS_8723_SERIES(version)) { value32 = rtl_read_dword(rtlpriv, REG_MULTI_FUNC_CTRL); rtlphy->polarity_ctl = ((value32 & WL_HWPDN_SL) ? - RT_POLARITY_HIGH_ACT : - RT_POLARITY_LOW_ACT); + RT_POLARITY_HIGH_ACT : + RT_POLARITY_LOW_ACT); } switch (version) { case VERSION_TEST_UMC_CHIP_8723: RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Chip Version ID: VERSION_TEST_UMC_CHIP_8723.\n"); - break; + break; case VERSION_NORMAL_UMC_CHIP_8723_1T1R_A_CUT: RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Chip Version ID: VERSION_NORMAL_UMC_CHIP_8723_1T1R_A_CUT.\n"); @@ -1050,113 +1118,124 @@ static enum version_8723e _rtl8723ae_read_chip_version(struct ieee80211_hw *hw) return version; } -static int _rtl8723ae_set_media_status(struct ieee80211_hw *hw, - enum nl80211_iftype type) +static int _rtl8723e_set_media_status(struct ieee80211_hw *hw, + enum nl80211_iftype type) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 bt_msr = rtl_read_byte(rtlpriv, MSR) & 0xfc; enum led_ctl_mode ledaction = LED_CTL_NO_LINK; + u8 mode = MSR_NOLINK; rtl_write_dword(rtlpriv, REG_BCN_CTRL, 0); RT_TRACE(rtlpriv, COMP_BEACON, DBG_LOUD, - "clear 0x550 when set HW_VAR_MEDIA_STATUS\n"); - - if (type == NL80211_IFTYPE_UNSPECIFIED || - type == NL80211_IFTYPE_STATION) { - _rtl8723ae_stop_tx_beacon(hw); - _rtl8723ae_enable_bcn_sufunc(hw); - } else if (type == NL80211_IFTYPE_ADHOC || - type == NL80211_IFTYPE_AP) { - _rtl8723ae_resume_tx_beacon(hw); - _rtl8723ae_disable_bcn_sufunc(hw); - } else { - RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, - "Set HW_VAR_MEDIA_STATUS: No such media status(%x).\n", - type); - } + "clear 0x550 when set HW_VAR_MEDIA_STATUS\n"); switch (type) { case NL80211_IFTYPE_UNSPECIFIED: - bt_msr |= MSR_NOLINK; - ledaction = LED_CTL_LINK; + mode = MSR_NOLINK; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "Set Network type to NO LINK!\n"); + "Set Network type to NO LINK!\n"); break; case NL80211_IFTYPE_ADHOC: - bt_msr |= MSR_ADHOC; + mode = MSR_ADHOC; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "Set Network type to Ad Hoc!\n"); + "Set Network type to Ad Hoc!\n"); break; case NL80211_IFTYPE_STATION: - bt_msr |= MSR_INFRA; + mode = MSR_INFRA; ledaction = LED_CTL_LINK; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "Set Network type to STA!\n"); + "Set Network type to STA!\n"); break; case NL80211_IFTYPE_AP: - bt_msr |= MSR_AP; + mode = MSR_AP; + ledaction = LED_CTL_LINK; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "Set Network type to AP!\n"); + "Set Network type to AP!\n"); break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Network type %d not supported!\n", - type); + "Network type %d not support!\n", type); return 1; + break; + } + /* MSR_INFRA == Link in infrastructure network; + * MSR_ADHOC == Link in ad hoc network; + * Therefore, check link state is necessary. + * + * MSR_AP == AP mode; link state is not cared here. + */ + if (mode != MSR_AP && + rtlpriv->mac80211.link_state < MAC80211_LINKED) { + mode = MSR_NOLINK; + ledaction = LED_CTL_NO_LINK; + } + if (mode == MSR_NOLINK || mode == MSR_INFRA) { + _rtl8723e_stop_tx_beacon(hw); + _rtl8723e_enable_bcn_sub_func(hw); + } else if (mode == MSR_ADHOC || mode == MSR_AP) { + _rtl8723e_resume_tx_beacon(hw); + _rtl8723e_disable_bcn_sub_func(hw); + } else { + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + "Set HW_VAR_MEDIA_STATUS: No such media status(%x).\n", + mode); } - rtl_write_byte(rtlpriv, (MSR), bt_msr); + rtl_write_byte(rtlpriv, (MSR), bt_msr | mode); rtlpriv->cfg->ops->led_control(hw, ledaction); - if ((bt_msr & MSR_MASK) == MSR_AP) + if (mode == MSR_AP) rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00); else rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66); return 0; } -void rtl8723ae_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid) +void rtl8723e_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid) { struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 reg_rcr; + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + u32 reg_rcr = rtlpci->receive_config; if (rtlpriv->psc.rfpwr_state != ERFON) return; - rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *)(®_rcr)); - - if (check_bssid == true) { + if (check_bssid) { reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN); rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)(®_rcr)); - _rtl8723ae_set_bcn_ctrl_reg(hw, 0, BIT(4)); - } else if (check_bssid == false) { + _rtl8723e_set_bcn_ctrl_reg(hw, 0, BIT(4)); + } else if (!check_bssid) { reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN)); - _rtl8723ae_set_bcn_ctrl_reg(hw, BIT(4), 0); + _rtl8723e_set_bcn_ctrl_reg(hw, BIT(4), 0); rtlpriv->cfg->ops->set_hw_reg(hw, - HW_VAR_RCR, (u8 *) (®_rcr)); + HW_VAR_RCR, (u8 *)(®_rcr)); } } -int rtl8723ae_set_network_type(struct ieee80211_hw *hw, - enum nl80211_iftype type) +int rtl8723e_set_network_type(struct ieee80211_hw *hw, + enum nl80211_iftype type) { struct rtl_priv *rtlpriv = rtl_priv(hw); - if (_rtl8723ae_set_media_status(hw, type)) + if (_rtl8723e_set_media_status(hw, type)) return -EOPNOTSUPP; if (rtlpriv->mac80211.link_state == MAC80211_LINKED) { if (type != NL80211_IFTYPE_AP) - rtl8723ae_set_check_bssid(hw, true); + rtl8723e_set_check_bssid(hw, true); } else { - rtl8723ae_set_check_bssid(hw, false); + rtl8723e_set_check_bssid(hw, false); } + return 0; } -/* don't set REG_EDCA_BE_PARAM here because mac80211 will send pkt when scan */ -void rtl8723ae_set_qos(struct ieee80211_hw *hw, int aci) +/* don't set REG_EDCA_BE_PARAM here + * because mac80211 will send pkt when scan + */ +void rtl8723e_set_qos(struct ieee80211_hw *hw, int aci) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -1166,7 +1245,6 @@ void rtl8723ae_set_qos(struct ieee80211_hw *hw, int aci) rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0xa44f); break; case AC0_BE: - /* rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, u4ac_param); */ break; case AC2_VI: rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, 0x5e4322); @@ -1180,7 +1258,19 @@ void rtl8723ae_set_qos(struct ieee80211_hw *hw, int aci) } } -void rtl8723ae_enable_interrupt(struct ieee80211_hw *hw) +static void rtl8723e_clear_interrupt(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 tmp; + + tmp = rtl_read_dword(rtlpriv, REG_HISR); + rtl_write_dword(rtlpriv, REG_HISR, tmp); + + tmp = rtl_read_dword(rtlpriv, REG_HISRE); + rtl_write_dword(rtlpriv, REG_HISRE, tmp); +} + +void rtl8723e_enable_interrupt(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); @@ -1190,37 +1280,39 @@ void rtl8723ae_enable_interrupt(struct ieee80211_hw *hw) rtlpci->irq_enabled = true; } -void rtl8723ae_disable_interrupt(struct ieee80211_hw *hw) +void rtl8723e_disable_interrupt(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); - + rtl8723e_clear_interrupt(hw);/*clear it here first*/ rtl_write_dword(rtlpriv, 0x3a8, IMR8190_DISABLED); rtl_write_dword(rtlpriv, 0x3ac, IMR8190_DISABLED); rtlpci->irq_enabled = false; - synchronize_irq(rtlpci->pdev->irq); + /*synchronize_irq(rtlpci->pdev->irq);*/ } -static void _rtl8723ae_poweroff_adapter(struct ieee80211_hw *hw) +static void _rtl8723e_poweroff_adapter(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - u8 u1tmp; + u8 u1b_tmp; /* Combo (PCIe + USB) Card and PCIe-MF Card */ /* 1. Run LPS WL RFOFF flow */ rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, - PWR_INTF_PCI_MSK, Rtl8723_NIC_LPS_ENTER_FLOW); + PWR_INTF_PCI_MSK, Rtl8723_NIC_LPS_ENTER_FLOW); /* 2. 0x1F[7:0] = 0 */ /* turn off RF */ rtl_write_byte(rtlpriv, REG_RF_CTRL, 0x00); - if ((rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) && rtlhal->fw_ready) + if ((rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) && + rtlhal->fw_ready) { rtl8723ae_firmware_selfreset(hw); + } /* Reset MCU. Suggested by Filen. */ - u1tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN+1); - rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN+1, (u1tmp & (~BIT(2)))); + u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN+1); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN+1, (u1b_tmp & (~BIT(2)))); /* g. MCUFWDL 0x80[1:0]=0 */ /* reset MCU ready status */ @@ -1231,39 +1323,38 @@ static void _rtl8723ae_poweroff_adapter(struct ieee80211_hw *hw) PWR_INTF_PCI_MSK, Rtl8723_NIC_DISABLE_FLOW); /* Reset MCU IO Wrapper */ - u1tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1); - rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, (u1tmp & (~BIT(0)))); - u1tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1); - rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, u1tmp | BIT(0)); + u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1); + rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, (u1b_tmp & (~BIT(0)))); + u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1); + rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, u1b_tmp | BIT(0)); /* 7. RSV_CTRL 0x1C[7:0] = 0x0E */ /* lock ISO/CLK/Power control register */ rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0e); } -void rtl8723ae_card_disable(struct ieee80211_hw *hw) +void rtl8723e_card_disable(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); enum nl80211_iftype opmode; mac->link_state = MAC80211_NOLINK; opmode = NL80211_IFTYPE_UNSPECIFIED; - _rtl8723ae_set_media_status(hw, opmode); - if (rtlpci->driver_is_goingto_unload || + _rtl8723e_set_media_status(hw, opmode); + if (rtlpriv->rtlhal.driver_is_goingto_unload || ppsc->rfoff_reason > RF_CHANGE_BY_PS) rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF); RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); - _rtl8723ae_poweroff_adapter(hw); + _rtl8723e_poweroff_adapter(hw); /* after power off we should do iqk again */ rtlpriv->phy.iqk_initialized = false; } -void rtl8723ae_interrupt_recognized(struct ieee80211_hw *hw, - u32 *p_inta, u32 *p_intb) +void rtl8723e_interrupt_recognized(struct ieee80211_hw *hw, + u32 *p_inta, u32 *p_intb) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); @@ -1272,7 +1363,7 @@ void rtl8723ae_interrupt_recognized(struct ieee80211_hw *hw, rtl_write_dword(rtlpriv, 0x3a0, *p_inta); } -void rtl8723ae_set_beacon_related_registers(struct ieee80211_hw *hw) +void rtl8723e_set_beacon_related_registers(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -1281,17 +1372,17 @@ void rtl8723ae_set_beacon_related_registers(struct ieee80211_hw *hw) bcn_interval = mac->beacon_interval; atim_window = 2; /*FIX MERGE */ - rtl8723ae_disable_interrupt(hw); + rtl8723e_disable_interrupt(hw); rtl_write_word(rtlpriv, REG_ATIMWND, atim_window); rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval); rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660f); rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_CCK, 0x18); rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x18); rtl_write_byte(rtlpriv, 0x606, 0x30); - rtl8723ae_enable_interrupt(hw); + rtl8723e_enable_interrupt(hw); } -void rtl8723ae_set_beacon_interval(struct ieee80211_hw *hw) +void rtl8723e_set_beacon_interval(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); @@ -1299,13 +1390,13 @@ void rtl8723ae_set_beacon_interval(struct ieee80211_hw *hw) RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG, "beacon_interval:%d\n", bcn_interval); - rtl8723ae_disable_interrupt(hw); + rtl8723e_disable_interrupt(hw); rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval); - rtl8723ae_enable_interrupt(hw); + rtl8723e_enable_interrupt(hw); } -void rtl8723ae_update_interrupt_mask(struct ieee80211_hw *hw, - u32 add_msr, u32 rm_msr) +void rtl8723e_update_interrupt_mask(struct ieee80211_hw *hw, + u32 add_msr, u32 rm_msr) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); @@ -1317,11 +1408,11 @@ void rtl8723ae_update_interrupt_mask(struct ieee80211_hw *hw, rtlpci->irq_mask[0] |= add_msr; if (rm_msr) rtlpci->irq_mask[0] &= (~rm_msr); - rtl8723ae_disable_interrupt(hw); - rtl8723ae_enable_interrupt(hw); + rtl8723e_disable_interrupt(hw); + rtl8723e_enable_interrupt(hw); } -static u8 _rtl8723ae_get_chnl_group(u8 chnl) +static u8 _rtl8723e_get_chnl_group(u8 chnl) { u8 group; @@ -1334,9 +1425,9 @@ static u8 _rtl8723ae_get_chnl_group(u8 chnl) return group; } -static void _rtl8723ae_read_txpower_info_from_hwpg(struct ieee80211_hw *hw, - bool autoload_fail, - u8 *hwinfo) +static void _rtl8723e_read_txpower_info_from_hwpg(struct ieee80211_hw *hw, + bool autoload_fail, + u8 *hwinfo) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); @@ -1346,19 +1437,14 @@ static void _rtl8723ae_read_txpower_info_from_hwpg(struct ieee80211_hw *hw, for (rf_path = 0; rf_path < 1; rf_path++) { for (i = 0; i < 3; i++) { if (!autoload_fail) { - rtlefuse->eeprom_chnlarea_txpwr_cck - [rf_path][i] = + rtlefuse->eeprom_chnlarea_txpwr_cck[rf_path][i] = hwinfo[EEPROM_TXPOWERCCK + rf_path * 3 + i]; - rtlefuse->eeprom_chnlarea_txpwr_ht40_1s - [rf_path][i] = - hwinfo[EEPROM_TXPOWERHT40_1S + rf_path * - 3 + i]; + rtlefuse->eeprom_chnlarea_txpwr_ht40_1s[rf_path][i] = + hwinfo[EEPROM_TXPOWERHT40_1S + rf_path * 3 + i]; } else { - rtlefuse->eeprom_chnlarea_txpwr_cck - [rf_path][i] = + rtlefuse->eeprom_chnlarea_txpwr_cck[rf_path][i] = EEPROM_DEFAULT_TXPOWERLEVEL; - rtlefuse->eeprom_chnlarea_txpwr_ht40_1s - [rf_path][i] = + rtlefuse->eeprom_chnlarea_txpwr_ht40_1s[rf_path][i] = EEPROM_DEFAULT_TXPOWERLEVEL; } } @@ -1379,43 +1465,43 @@ static void _rtl8723ae_read_txpower_info_from_hwpg(struct ieee80211_hw *hw, for (i = 0; i < 3; i++) RTPRINT(rtlpriv, FINIT, INIT_EEPROM, "RF(%d) EEPROM CCK Area(%d) = 0x%x\n", rf_path, - i, rtlefuse->eeprom_chnlarea_txpwr_cck - [rf_path][i]); + i, rtlefuse->eeprom_chnlarea_txpwr_cck + [rf_path][i]); for (rf_path = 0; rf_path < 2; rf_path++) for (i = 0; i < 3; i++) RTPRINT(rtlpriv, FINIT, INIT_EEPROM, "RF(%d) EEPROM HT40 1S Area(%d) = 0x%x\n", rf_path, i, rtlefuse->eeprom_chnlarea_txpwr_ht40_1s - [rf_path][i]); + [rf_path][i]); for (rf_path = 0; rf_path < 2; rf_path++) for (i = 0; i < 3; i++) RTPRINT(rtlpriv, FINIT, INIT_EEPROM, "RF(%d) EEPROM HT40 2S Diff Area(%d) = 0x%x\n", - rf_path, i, - rtlefuse->eprom_chnl_txpwr_ht40_2sdf - [rf_path][i]); + rf_path, i, + rtlefuse->eprom_chnl_txpwr_ht40_2sdf + [rf_path][i]); for (rf_path = 0; rf_path < 2; rf_path++) { for (i = 0; i < 14; i++) { - index = _rtl8723ae_get_chnl_group((u8) i); + index = _rtl8723e_get_chnl_group((u8)i); rtlefuse->txpwrlevel_cck[rf_path][i] = rtlefuse->eeprom_chnlarea_txpwr_cck - [rf_path][index]; + [rf_path][index]; rtlefuse->txpwrlevel_ht40_1s[rf_path][i] = rtlefuse->eeprom_chnlarea_txpwr_ht40_1s - [rf_path][index]; + [rf_path][index]; if ((rtlefuse->eeprom_chnlarea_txpwr_ht40_1s - [rf_path][index] - - rtlefuse->eprom_chnl_txpwr_ht40_2sdf[rf_path] - [index]) > 0) { - rtlefuse->txpwrlevel_ht40_2s[rf_path][i] = - rtlefuse->eeprom_chnlarea_txpwr_ht40_1s [rf_path][index] - - rtlefuse->eprom_chnl_txpwr_ht40_2sdf - [rf_path][index]; + rtlefuse->eprom_chnl_txpwr_ht40_2sdf + [rf_path][index]) > 0) { + rtlefuse->txpwrlevel_ht40_2s[rf_path][i] = + rtlefuse->eeprom_chnlarea_txpwr_ht40_1s + [rf_path][index] - + rtlefuse->eprom_chnl_txpwr_ht40_2sdf + [rf_path][index]; } else { rtlefuse->txpwrlevel_ht40_2s[rf_path][i] = 0; } @@ -1423,8 +1509,8 @@ static void _rtl8723ae_read_txpower_info_from_hwpg(struct ieee80211_hw *hw, for (i = 0; i < 14; i++) { RTPRINT(rtlpriv, FINIT, INIT_TXPOWER, - "RF(%d)-Ch(%d) [CCK / HT40_1S / HT40_2S] = " - "[0x%x / 0x%x / 0x%x]\n", rf_path, i, + "RF(%d)-Ch(%d) [CCK / HT40_1S / HT40_2S] = [0x%x / 0x%x / 0x%x]\n", + rf_path, i, rtlefuse->txpwrlevel_cck[rf_path][i], rtlefuse->txpwrlevel_ht40_1s[rf_path][i], rtlefuse->txpwrlevel_ht40_2s[rf_path][i]); @@ -1445,22 +1531,20 @@ static void _rtl8723ae_read_txpower_info_from_hwpg(struct ieee80211_hw *hw, for (rf_path = 0; rf_path < 2; rf_path++) { for (i = 0; i < 14; i++) { - index = _rtl8723ae_get_chnl_group((u8) i); + index = _rtl8723e_get_chnl_group((u8)i); if (rf_path == RF90_PATH_A) { rtlefuse->pwrgroup_ht20[rf_path][i] = - (rtlefuse->eeprom_pwrlimit_ht20[index] & - 0xf); + (rtlefuse->eeprom_pwrlimit_ht20[index] & 0xf); rtlefuse->pwrgroup_ht40[rf_path][i] = - (rtlefuse->eeprom_pwrlimit_ht40[index] & - 0xf); + (rtlefuse->eeprom_pwrlimit_ht40[index] & 0xf); } else if (rf_path == RF90_PATH_B) { rtlefuse->pwrgroup_ht20[rf_path][i] = - ((rtlefuse->eeprom_pwrlimit_ht20[index] & - 0xf0) >> 4); + ((rtlefuse->eeprom_pwrlimit_ht20[index] & + 0xf0) >> 4); rtlefuse->pwrgroup_ht40[rf_path][i] = - ((rtlefuse->eeprom_pwrlimit_ht40[index] & - 0xf0) >> 4); + ((rtlefuse->eeprom_pwrlimit_ht40[index] & + 0xf0) >> 4); } RTPRINT(rtlpriv, FINIT, INIT_TXPOWER, @@ -1473,7 +1557,7 @@ static void _rtl8723ae_read_txpower_info_from_hwpg(struct ieee80211_hw *hw, } for (i = 0; i < 14; i++) { - index = _rtl8723ae_get_chnl_group((u8) i); + index = _rtl8723e_get_chnl_group((u8)i); if (!autoload_fail) tempval = hwinfo[EEPROM_TXPOWERHT20DIFF + index]; @@ -1490,7 +1574,7 @@ static void _rtl8723ae_read_txpower_info_from_hwpg(struct ieee80211_hw *hw, if (rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] & BIT(3)) rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] |= 0xF0; - index = _rtl8723ae_get_chnl_group((u8) i); + index = _rtl8723e_get_chnl_group((u8)i); if (!autoload_fail) tempval = hwinfo[EEPROM_TXPOWER_OFDMDIFF + index]; @@ -1508,19 +1592,19 @@ static void _rtl8723ae_read_txpower_info_from_hwpg(struct ieee80211_hw *hw, for (i = 0; i < 14; i++) RTPRINT(rtlpriv, FINIT, INIT_TXPOWER, "RF-A Ht20 to HT40 Diff[%d] = 0x%x\n", i, - rtlefuse->txpwr_ht20diff[RF90_PATH_A][i]); + rtlefuse->txpwr_ht20diff[RF90_PATH_A][i]); for (i = 0; i < 14; i++) RTPRINT(rtlpriv, FINIT, INIT_TXPOWER, "RF-A Legacy to Ht40 Diff[%d] = 0x%x\n", i, - rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i]); + rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i]); for (i = 0; i < 14; i++) RTPRINT(rtlpriv, FINIT, INIT_TXPOWER, "RF-B Ht20 to HT40 Diff[%d] = 0x%x\n", i, - rtlefuse->txpwr_ht20diff[RF90_PATH_B][i]); + rtlefuse->txpwr_ht20diff[RF90_PATH_B][i]); for (i = 0; i < 14; i++) RTPRINT(rtlpriv, FINIT, INIT_TXPOWER, "RF-B Legacy to HT40 Diff[%d] = 0x%x\n", i, - rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i]); + rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i]); if (!autoload_fail) rtlefuse->eeprom_regulatory = (hwinfo[RF_OPTION1] & 0x7); @@ -1533,10 +1617,11 @@ static void _rtl8723ae_read_txpower_info_from_hwpg(struct ieee80211_hw *hw, rtlefuse->eeprom_tssi[RF90_PATH_A] = hwinfo[EEPROM_TSSI_A]; else rtlefuse->eeprom_tssi[RF90_PATH_A] = EEPROM_DEFAULT_TSSI; + RTPRINT(rtlpriv, FINIT, INIT_TXPOWER, "TSSI_A = 0x%x, TSSI_B = 0x%x\n", - rtlefuse->eeprom_tssi[RF90_PATH_A], - rtlefuse->eeprom_tssi[RF90_PATH_B]); + rtlefuse->eeprom_tssi[RF90_PATH_A], + rtlefuse->eeprom_tssi[RF90_PATH_B]); if (!autoload_fail) tempval = hwinfo[EEPROM_THERMAL_METER]; @@ -1552,8 +1637,8 @@ static void _rtl8723ae_read_txpower_info_from_hwpg(struct ieee80211_hw *hw, "thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter); } -static void _rtl8723ae_read_adapter_info(struct ieee80211_hw *hw, - bool pseudo_test) +static void _rtl8723e_read_adapter_info(struct ieee80211_hw *hw, + bool b_pseudo_test) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); @@ -1562,7 +1647,7 @@ static void _rtl8723ae_read_adapter_info(struct ieee80211_hw *hw, u8 hwinfo[HWSET_MAX_SIZE]; u16 eeprom_id; - if (pseudo_test) { + if (b_pseudo_test) { /* need add */ return; } @@ -1576,7 +1661,7 @@ static void _rtl8723ae_read_adapter_info(struct ieee80211_hw *hw, "RTL819X Not boot from eeprom, check it !!"); } - RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, ("MAP\n"), + RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP\n", hwinfo, HWSET_MAX_SIZE); eeprom_id = *((u16 *)&hwinfo[0]); @@ -1589,13 +1674,13 @@ static void _rtl8723ae_read_adapter_info(struct ieee80211_hw *hw, rtlefuse->autoload_failflag = false; } - if (rtlefuse->autoload_failflag == true) + if (rtlefuse->autoload_failflag) return; - rtlefuse->eeprom_vid = *(u16 *) &hwinfo[EEPROM_VID]; - rtlefuse->eeprom_did = *(u16 *) &hwinfo[EEPROM_DID]; - rtlefuse->eeprom_svid = *(u16 *) &hwinfo[EEPROM_SVID]; - rtlefuse->eeprom_smid = *(u16 *) &hwinfo[EEPROM_SMID]; + rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID]; + rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID]; + rtlefuse->eeprom_svid = *(u16 *)&hwinfo[EEPROM_SVID]; + rtlefuse->eeprom_smid = *(u16 *)&hwinfo[EEPROM_SMID]; RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROMId = 0x%4x\n", eeprom_id); RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, @@ -1609,16 +1694,16 @@ static void _rtl8723ae_read_adapter_info(struct ieee80211_hw *hw, for (i = 0; i < 6; i += 2) { usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR + i]; - *((u16 *) (&rtlefuse->dev_addr[i])) = usvalue; + *((u16 *)(&rtlefuse->dev_addr[i])) = usvalue; } RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "dev_addr: %pM\n", rtlefuse->dev_addr); - _rtl8723ae_read_txpower_info_from_hwpg(hw, - rtlefuse->autoload_failflag, hwinfo); + _rtl8723e_read_txpower_info_from_hwpg(hw, rtlefuse->autoload_failflag, + hwinfo); - rtl8723ae_read_bt_coexist_info_from_hwpg(hw, + rtl8723e_read_bt_coexist_info_from_hwpg(hw, rtlefuse->autoload_failflag, hwinfo); rtlefuse->eeprom_channelplan = hwinfo[EEPROM_CHANNELPLAN]; @@ -1644,6 +1729,14 @@ static void _rtl8723ae_read_adapter_info(struct ieee80211_hw *hw, CHK_SVID_SMID(0x10EC, 0x6178) || CHK_SVID_SMID(0x10EC, 0x6179) || CHK_SVID_SMID(0x10EC, 0x6180) || + CHK_SVID_SMID(0x10EC, 0x7151) || + CHK_SVID_SMID(0x10EC, 0x7152) || + CHK_SVID_SMID(0x10EC, 0x7154) || + CHK_SVID_SMID(0x10EC, 0x7155) || + CHK_SVID_SMID(0x10EC, 0x7177) || + CHK_SVID_SMID(0x10EC, 0x7178) || + CHK_SVID_SMID(0x10EC, 0x7179) || + CHK_SVID_SMID(0x10EC, 0x7180) || CHK_SVID_SMID(0x10EC, 0x8151) || CHK_SVID_SMID(0x10EC, 0x8152) || CHK_SVID_SMID(0x10EC, 0x8154) || @@ -1671,7 +1764,10 @@ static void _rtl8723ae_read_adapter_info(struct ieee80211_hw *hw, CHK_SVID_SMID(0x10EC, 0x7193) || CHK_SVID_SMID(0x10EC, 0x8191) || CHK_SVID_SMID(0x10EC, 0x8192) || - CHK_SVID_SMID(0x10EC, 0x8193)) + CHK_SVID_SMID(0x10EC, 0x8193) || + CHK_SVID_SMID(0x10EC, 0x9191) || + CHK_SVID_SMID(0x10EC, 0x9192) || + CHK_SVID_SMID(0x10EC, 0x9193)) rtlhal->oem_id = RT_CID_819X_SAMSUNG; else if (CHK_SVID_SMID(0x10EC, 0x8195) || CHK_SVID_SMID(0x10EC, 0x9195) || @@ -1728,7 +1824,7 @@ static void _rtl8723ae_read_adapter_info(struct ieee80211_hw *hw, else rtlhal->oem_id = RT_CID_DEFAULT; } else { - rtlhal->oem_id = RT_CID_DEFAULT; + rtlhal->oem_id = RT_CID_DEFAULT; } break; case EEPROM_CID_TOSHIBA: @@ -1750,18 +1846,31 @@ static void _rtl8723ae_read_adapter_info(struct ieee80211_hw *hw, } } -static void _rtl8723ae_hal_customized_behavior(struct ieee80211_hw *hw) +static void _rtl8723e_hal_customized_behavior(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); pcipriv->ledctl.led_opendrain = true; + switch (rtlhal->oem_id) { + case RT_CID_819X_HP: + pcipriv->ledctl.led_opendrain = true; + break; + case RT_CID_819X_LENOVO: + case RT_CID_DEFAULT: + case RT_CID_TOSHIBA: + case RT_CID_CCX: + case RT_CID_819X_ACER: + case RT_CID_WHQL: + default: + break; + } RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "RT Customized ID: 0x%02X\n", rtlhal->oem_id); } -void rtl8723ae_read_eeprom_info(struct ieee80211_hw *hw) +void rtl8723e_read_eeprom_info(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); @@ -1774,7 +1883,7 @@ void rtl8723ae_read_eeprom_info(struct ieee80211_hw *hw) value32 = (value32 & ~EFUSE_SEL_MASK) | EFUSE_SEL(EFUSE_WIFI_SEL_0); rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[EFUSE_TEST], value32); - rtlhal->version = _rtl8723ae_read_chip_version(hw); + rtlhal->version = _rtl8723e_read_chip_version(hw); if (get_rf_type(rtlphy) == RF_1T1R) rtlpriv->dm.rfpath_rxenable[0] = true; @@ -1782,7 +1891,7 @@ void rtl8723ae_read_eeprom_info(struct ieee80211_hw *hw) rtlpriv->dm.rfpath_rxenable[0] = rtlpriv->dm.rfpath_rxenable[1] = true; RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n", - rtlhal->version); + rtlhal->version); tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR); if (tmp_u1b & BIT(4)) { @@ -1795,33 +1904,34 @@ void rtl8723ae_read_eeprom_info(struct ieee80211_hw *hw) if (tmp_u1b & BIT(5)) { RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n"); rtlefuse->autoload_failflag = false; - _rtl8723ae_read_adapter_info(hw, false); + _rtl8723e_read_adapter_info(hw, false); } else { rtlefuse->autoload_failflag = true; - _rtl8723ae_read_adapter_info(hw, false); + _rtl8723e_read_adapter_info(hw, false); RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n"); } - _rtl8723ae_hal_customized_behavior(hw); + _rtl8723e_hal_customized_behavior(hw); } -static void rtl8723ae_update_hal_rate_table(struct ieee80211_hw *hw, - struct ieee80211_sta *sta) +static void rtl8723e_update_hal_rate_table(struct ieee80211_hw *hw, + struct ieee80211_sta *sta) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); u32 ratr_value; u8 ratr_index = 0; - u8 nmode = mac->ht_enable; - u8 mimo_ps = IEEE80211_SMPS_OFF; + u8 b_nmode = mac->ht_enable; + u16 shortgi_rate; + u32 tmp_ratr_value; u8 curtxbw_40mhz = mac->bw_40; u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? 1 : 0; u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? 1 : 0; enum wireless_mode wirelessmode = mac->mode; + u32 ratr_mask; if (rtlhal->current_bandtype == BAND_ON_5G) ratr_value = sta->supp_rates[1] << 4; @@ -1830,7 +1940,7 @@ static void rtl8723ae_update_hal_rate_table(struct ieee80211_hw *hw, if (mac->opmode == NL80211_IFTYPE_ADHOC) ratr_value = 0xfff; ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 | - sta->ht_cap.mcs.rx_mask[0] << 12); + sta->ht_cap.mcs.rx_mask[0] << 12); switch (wirelessmode) { case WIRELESS_MODE_B: if (ratr_value & 0x0000000c) @@ -1843,20 +1953,14 @@ static void rtl8723ae_update_hal_rate_table(struct ieee80211_hw *hw, break; case WIRELESS_MODE_N_24G: case WIRELESS_MODE_N_5G: - nmode = 1; - if (mimo_ps == IEEE80211_SMPS_STATIC) { - ratr_value &= 0x0007F005; - } else { - u32 ratr_mask; - - if (get_rf_type(rtlphy) == RF_1T2R || - get_rf_type(rtlphy) == RF_1T1R) - ratr_mask = 0x000ff005; - else - ratr_mask = 0x0f0ff005; + b_nmode = 1; + if (get_rf_type(rtlphy) == RF_1T2R || + get_rf_type(rtlphy) == RF_1T1R) + ratr_mask = 0x000ff005; + else + ratr_mask = 0x0f0ff005; - ratr_value &= ratr_mask; - } + ratr_value &= ratr_mask; break; default: if (rtlphy->rf_type == RF_1T2R) @@ -1867,19 +1971,30 @@ static void rtl8723ae_update_hal_rate_table(struct ieee80211_hw *hw, break; } - if ((pcipriv->bt_coexist.bt_coexistence) && - (pcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4) && - (pcipriv->bt_coexist.bt_cur_state) && - (pcipriv->bt_coexist.bt_ant_isolation) && - ((pcipriv->bt_coexist.bt_service == BT_SCO) || - (pcipriv->bt_coexist.bt_service == BT_BUSY))) + if ((rtlpriv->btcoexist.bt_coexistence) && + (rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4) && + (rtlpriv->btcoexist.bt_cur_state) && + (rtlpriv->btcoexist.bt_ant_isolation) && + ((rtlpriv->btcoexist.bt_service == BT_SCO) || + (rtlpriv->btcoexist.bt_service == BT_BUSY))) ratr_value &= 0x0fffcfc0; else ratr_value &= 0x0FFFFFFF; - if (nmode && ((curtxbw_40mhz && curshortgi_40mhz) || - (!curtxbw_40mhz && curshortgi_20mhz))) + if (b_nmode && + ((curtxbw_40mhz && curshortgi_40mhz) || + (!curtxbw_40mhz && curshortgi_20mhz))) { ratr_value |= 0x10000000; + tmp_ratr_value = (ratr_value >> 12); + + for (shortgi_rate = 15; shortgi_rate > 0; shortgi_rate--) { + if ((1 << shortgi_rate) & tmp_ratr_value) + break; + } + + shortgi_rate = (shortgi_rate << 12) | (shortgi_rate << 8) | + (shortgi_rate << 4) | (shortgi_rate); + } rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value); @@ -1887,8 +2002,9 @@ static void rtl8723ae_update_hal_rate_table(struct ieee80211_hw *hw, "%x\n", rtl_read_dword(rtlpriv, REG_ARFR0)); } -static void rtl8723ae_update_hal_rate_mask(struct ieee80211_hw *hw, - struct ieee80211_sta *sta, u8 rssi_level) +static void rtl8723e_update_hal_rate_mask(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + u8 rssi_level) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); @@ -1897,7 +2013,8 @@ static void rtl8723ae_update_hal_rate_mask(struct ieee80211_hw *hw, struct rtl_sta_info *sta_entry = NULL; u32 ratr_bitmap; u8 ratr_index; - u8 curtxbw_40mhz = (sta->bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0; + u8 curtxbw_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) + ? 1 : 0; u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? 1 : 0; u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? @@ -1906,9 +2023,9 @@ static void rtl8723ae_update_hal_rate_mask(struct ieee80211_hw *hw, bool shortgi = false; u8 rate_mask[5]; u8 macid = 0; - u8 mimo_ps = IEEE80211_SMPS_OFF; + /*u8 mimo_ps = IEEE80211_SMPS_OFF;*/ - sta_entry = (struct rtl_sta_info *) sta->drv_priv; + sta_entry = (struct rtl_sta_info *)sta->drv_priv; wirelessmode = sta_entry->wireless_mode; if (mac->opmode == NL80211_IFTYPE_STATION) curtxbw_40mhz = mac->bw_40; @@ -1943,54 +2060,44 @@ static void rtl8723ae_update_hal_rate_mask(struct ieee80211_hw *hw, ratr_bitmap &= 0x00000ff5; break; case WIRELESS_MODE_A: - ratr_index = RATR_INX_WIRELESS_A; + ratr_index = RATR_INX_WIRELESS_G; ratr_bitmap &= 0x00000ff0; break; case WIRELESS_MODE_N_24G: case WIRELESS_MODE_N_5G: ratr_index = RATR_INX_WIRELESS_NGB; - - if (mimo_ps == IEEE80211_SMPS_STATIC) { - if (rssi_level == 1) - ratr_bitmap &= 0x00070000; - else if (rssi_level == 2) - ratr_bitmap &= 0x0007f000; - else - ratr_bitmap &= 0x0007f005; + if (rtlphy->rf_type == RF_1T2R || + rtlphy->rf_type == RF_1T1R) { + if (curtxbw_40mhz) { + if (rssi_level == 1) + ratr_bitmap &= 0x000f0000; + else if (rssi_level == 2) + ratr_bitmap &= 0x000ff000; + else + ratr_bitmap &= 0x000ff015; + } else { + if (rssi_level == 1) + ratr_bitmap &= 0x000f0000; + else if (rssi_level == 2) + ratr_bitmap &= 0x000ff000; + else + ratr_bitmap &= 0x000ff005; + } } else { - if (rtlphy->rf_type == RF_1T2R || - rtlphy->rf_type == RF_1T1R) { - if (curtxbw_40mhz) { - if (rssi_level == 1) - ratr_bitmap &= 0x000f0000; - else if (rssi_level == 2) - ratr_bitmap &= 0x000ff000; - else - ratr_bitmap &= 0x000ff015; - } else { - if (rssi_level == 1) - ratr_bitmap &= 0x000f0000; - else if (rssi_level == 2) - ratr_bitmap &= 0x000ff000; - else - ratr_bitmap &= 0x000ff005; - } + if (curtxbw_40mhz) { + if (rssi_level == 1) + ratr_bitmap &= 0x0f0f0000; + else if (rssi_level == 2) + ratr_bitmap &= 0x0f0ff000; + else + ratr_bitmap &= 0x0f0ff015; } else { - if (curtxbw_40mhz) { - if (rssi_level == 1) - ratr_bitmap &= 0x0f0f0000; - else if (rssi_level == 2) - ratr_bitmap &= 0x0f0ff000; - else - ratr_bitmap &= 0x0f0ff015; - } else { - if (rssi_level == 1) - ratr_bitmap &= 0x0f0f0000; - else if (rssi_level == 2) - ratr_bitmap &= 0x0f0ff000; - else - ratr_bitmap &= 0x0f0ff005; - } + if (rssi_level == 1) + ratr_bitmap &= 0x0f0f0000; + else if (rssi_level == 2) + ratr_bitmap &= 0x0f0ff000; + else + ratr_bitmap &= 0x0f0ff005; } } @@ -2015,30 +2122,30 @@ static void rtl8723ae_update_hal_rate_mask(struct ieee80211_hw *hw, RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "ratr_bitmap :%x\n", ratr_bitmap); - /* convert ratr_bitmap to le byte array */ - rate_mask[0] = ratr_bitmap; - rate_mask[1] = (ratr_bitmap >>= 8); - rate_mask[2] = (ratr_bitmap >>= 8); - rate_mask[3] = ((ratr_bitmap >> 8) & 0x0f) | (ratr_index << 4); + *(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) | + (ratr_index << 28); rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80; RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, - "Rate_index:%x, ratr_bitmap: %*phC\n", - ratr_index, 5, rate_mask); - rtl8723ae_fill_h2c_cmd(hw, H2C_RA_MASK, 5, rate_mask); + "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x\n", + ratr_index, ratr_bitmap, + rate_mask[0], rate_mask[1], + rate_mask[2], rate_mask[3], + rate_mask[4]); + rtl8723e_fill_h2c_cmd(hw, H2C_RA_MASK, 5, rate_mask); } -void rtl8723ae_update_hal_rate_tbl(struct ieee80211_hw *hw, - struct ieee80211_sta *sta, u8 rssi_level) +void rtl8723e_update_hal_rate_tbl(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, u8 rssi_level) { struct rtl_priv *rtlpriv = rtl_priv(hw); if (rtlpriv->dm.useramask) - rtl8723ae_update_hal_rate_mask(hw, sta, rssi_level); + rtl8723e_update_hal_rate_mask(hw, sta, rssi_level); else - rtl8723ae_update_hal_rate_table(hw, sta); + rtl8723e_update_hal_rate_table(hw, sta); } -void rtl8723ae_update_channel_access_setting(struct ieee80211_hw *hw) +void rtl8723e_update_channel_access_setting(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); @@ -2052,14 +2159,14 @@ void rtl8723ae_update_channel_access_setting(struct ieee80211_hw *hw) rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *)&sifs_timer); } -bool rtl8723ae_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid) +bool rtl8723e_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); struct rtl_phy *rtlphy = &(rtlpriv->phy); - enum rf_pwrstate e_rfpowerstate_toset; + enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate; u8 u1tmp; - bool actuallyset = false; + bool b_actuallyset = false; if (rtlpriv->rtlhal.being_init_adapter) return false; @@ -2076,6 +2183,8 @@ bool rtl8723ae_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid) spin_unlock(&rtlpriv->locks.rf_ps_lock); } + cur_rfstate = ppsc->rfpwr_state; + rtl_write_byte(rtlpriv, REG_GPIO_IO_SEL_2, rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL_2)&~(BIT(1))); @@ -2086,24 +2195,23 @@ bool rtl8723ae_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid) else e_rfpowerstate_toset = (u1tmp & BIT(1)) ? ERFON : ERFOFF; - if ((ppsc->hwradiooff == true) && (e_rfpowerstate_toset == ERFON)) { + if (ppsc->hwradiooff && (e_rfpowerstate_toset == ERFON)) { RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, "GPIOChangeRF - HW Radio ON, RF ON\n"); e_rfpowerstate_toset = ERFON; ppsc->hwradiooff = false; - actuallyset = true; - } else if ((ppsc->hwradiooff == false) - && (e_rfpowerstate_toset == ERFOFF)) { + b_actuallyset = true; + } else if (!ppsc->hwradiooff && (e_rfpowerstate_toset == ERFOFF)) { RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, "GPIOChangeRF - HW Radio OFF, RF OFF\n"); e_rfpowerstate_toset = ERFOFF; ppsc->hwradiooff = true; - actuallyset = true; + b_actuallyset = true; } - if (actuallyset) { + if (b_actuallyset) { spin_lock(&rtlpriv->locks.rf_ps_lock); ppsc->rfchange_inprogress = false; spin_unlock(&rtlpriv->locks.rf_ps_lock); @@ -2118,11 +2226,12 @@ bool rtl8723ae_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid) *valid = 1; return !ppsc->hwradiooff; + } -void rtl8723ae_set_key(struct ieee80211_hw *hw, u32 key_index, - u8 *p_macaddr, bool is_group, u8 enc_algo, - bool is_wepkey, bool clear_all) +void rtl8723e_set_key(struct ieee80211_hw *hw, u32 key_index, + u8 *p_macaddr, bool is_group, u8 enc_algo, + bool is_wepkey, bool clear_all) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); @@ -2130,6 +2239,7 @@ void rtl8723ae_set_key(struct ieee80211_hw *hw, u32 key_index, u8 *macaddr = p_macaddr; u32 entry_id = 0; bool is_pairwise = false; + static u8 cam_const_addr[4][6] = { {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, {0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, @@ -2157,6 +2267,7 @@ void rtl8723ae_set_key(struct ieee80211_hw *hw, u32 key_index, rtlpriv->sec.key_len[idx] = 0; } } + } else { switch (enc_algo) { case WEP40_ENCRYPTION: @@ -2172,8 +2283,8 @@ void rtl8723ae_set_key(struct ieee80211_hw *hw, u32 key_index, enc_algo = CAM_AES; break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case not processed\n"); + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, + "switch case not process\n"); enc_algo = CAM_TKIP; break; } @@ -2187,8 +2298,8 @@ void rtl8723ae_set_key(struct ieee80211_hw *hw, u32 key_index, entry_id = key_index; } else { if (mac->opmode == NL80211_IFTYPE_AP) { - entry_id = rtl_cam_get_free_entry(hw, - macaddr); + entry_id = + rtl_cam_get_free_entry(hw, p_macaddr); if (entry_id >= TOTAL_CAM_ENTRY) { RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG, @@ -2219,22 +2330,22 @@ void rtl8723ae_set_key(struct ieee80211_hw *hw, u32 key_index, "set Pairwiase key\n"); rtl_cam_add_one_entry(hw, macaddr, key_index, - entry_id, enc_algo, - CAM_CONFIG_NO_USEDK, - rtlpriv->sec.key_buf[key_index]); + entry_id, enc_algo, + CAM_CONFIG_NO_USEDK, + rtlpriv->sec.key_buf[key_index]); } else { RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "set group key\n"); if (mac->opmode == NL80211_IFTYPE_ADHOC) { rtl_cam_add_one_entry(hw, - rtlefuse->dev_addr, - PAIRWISE_KEYIDX, - CAM_PAIRWISE_KEY_POSITION, - enc_algo, - CAM_CONFIG_NO_USEDK, - rtlpriv->sec.key_buf - [entry_id]); + rtlefuse->dev_addr, + PAIRWISE_KEYIDX, + CAM_PAIRWISE_KEY_POSITION, + enc_algo, + CAM_CONFIG_NO_USEDK, + rtlpriv->sec.key_buf + [entry_id]); } rtl_cam_add_one_entry(hw, macaddr, key_index, @@ -2247,45 +2358,43 @@ void rtl8723ae_set_key(struct ieee80211_hw *hw, u32 key_index, } } -static void rtl8723ae_bt_var_init(struct ieee80211_hw *hw) +static void rtl8723e_bt_var_init(struct ieee80211_hw *hw) { - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); struct rtl_priv *rtlpriv = rtl_priv(hw); - pcipriv->bt_coexist.bt_coexistence = - pcipriv->bt_coexist.eeprom_bt_coexist; - pcipriv->bt_coexist.bt_ant_num = - pcipriv->bt_coexist.eeprom_bt_ant_num; - pcipriv->bt_coexist.bt_coexist_type = - pcipriv->bt_coexist.eeprom_bt_type; + rtlpriv->btcoexist.bt_coexistence = + rtlpriv->btcoexist.eeprom_bt_coexist; + rtlpriv->btcoexist.bt_ant_num = + rtlpriv->btcoexist.eeprom_bt_ant_num; + rtlpriv->btcoexist.bt_coexist_type = + rtlpriv->btcoexist.eeprom_bt_type; - pcipriv->bt_coexist.bt_ant_isolation = - pcipriv->bt_coexist.eeprom_bt_ant_isol; + rtlpriv->btcoexist.bt_ant_isolation = + rtlpriv->btcoexist.eeprom_bt_ant_isol; - pcipriv->bt_coexist.bt_radio_shared_type = - pcipriv->bt_coexist.eeprom_bt_radio_shared; + rtlpriv->btcoexist.bt_radio_shared_type = + rtlpriv->btcoexist.eeprom_bt_radio_shared; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "BT Coexistance = 0x%x\n", - pcipriv->bt_coexist.bt_coexistence); + rtlpriv->btcoexist.bt_coexistence); - if (pcipriv->bt_coexist.bt_coexistence) { - pcipriv->bt_coexist.bt_busy_traffic = false; - pcipriv->bt_coexist.bt_traffic_mode_set = false; - pcipriv->bt_coexist.bt_non_traffic_mode_set = false; + if (rtlpriv->btcoexist.bt_coexistence) { + rtlpriv->btcoexist.bt_busy_traffic = false; + rtlpriv->btcoexist.bt_traffic_mode_set = false; + rtlpriv->btcoexist.bt_non_traffic_mode_set = false; - pcipriv->bt_coexist.cstate = 0; - pcipriv->bt_coexist.previous_state = 0; + rtlpriv->btcoexist.cstate = 0; + rtlpriv->btcoexist.previous_state = 0; - if (pcipriv->bt_coexist.bt_ant_num == ANT_X2) { + if (rtlpriv->btcoexist.bt_ant_num == ANT_X2) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "BlueTooth BT_Ant_Num = Antx2\n"); - } else if (pcipriv->bt_coexist.bt_ant_num == ANT_X1) { + } else if (rtlpriv->btcoexist.bt_ant_num == ANT_X1) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "BlueTooth BT_Ant_Num = Antx1\n"); } - - switch (pcipriv->bt_coexist.bt_coexist_type) { + switch (rtlpriv->btcoexist.bt_coexist_type) { case BT_2WIRE: RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "BlueTooth BT_CoexistType = BT_2Wire\n"); @@ -2317,20 +2426,19 @@ static void rtl8723ae_bt_var_init(struct ieee80211_hw *hw) } RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "BlueTooth BT_Ant_isolation = %d\n", - pcipriv->bt_coexist.bt_ant_isolation); + rtlpriv->btcoexist.bt_ant_isolation); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "BT_RadioSharedType = 0x%x\n", - pcipriv->bt_coexist.bt_radio_shared_type); - pcipriv->bt_coexist.bt_active_zero_cnt = 0; - pcipriv->bt_coexist.cur_bt_disabled = false; - pcipriv->bt_coexist.pre_bt_disabled = false; + rtlpriv->btcoexist.bt_radio_shared_type); + rtlpriv->btcoexist.bt_active_zero_cnt = 0; + rtlpriv->btcoexist.cur_bt_disabled = false; + rtlpriv->btcoexist.pre_bt_disabled = false; } } -void rtl8723ae_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw, - bool auto_load_fail, u8 *hwinfo) +void rtl8723e_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw, + bool auto_load_fail, u8 *hwinfo) { - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); struct rtl_priv *rtlpriv = rtl_priv(hw); u8 value; u32 tmpu_32; @@ -2338,47 +2446,50 @@ void rtl8723ae_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw, if (!auto_load_fail) { tmpu_32 = rtl_read_dword(rtlpriv, REG_MULTI_FUNC_CTRL); if (tmpu_32 & BIT(18)) - pcipriv->bt_coexist.eeprom_bt_coexist = 1; + rtlpriv->btcoexist.eeprom_bt_coexist = 1; else - pcipriv->bt_coexist.eeprom_bt_coexist = 0; + rtlpriv->btcoexist.eeprom_bt_coexist = 0; value = hwinfo[RF_OPTION4]; - pcipriv->bt_coexist.eeprom_bt_type = BT_RTL8723A; - pcipriv->bt_coexist.eeprom_bt_ant_num = (value & 0x1); - pcipriv->bt_coexist.eeprom_bt_ant_isol = ((value & 0x10) >> 4); - pcipriv->bt_coexist.eeprom_bt_radio_shared = - ((value & 0x20) >> 5); + rtlpriv->btcoexist.eeprom_bt_type = BT_RTL8723A; + rtlpriv->btcoexist.eeprom_bt_ant_num = (value & 0x1); + rtlpriv->btcoexist.eeprom_bt_ant_isol = ((value & 0x10) >> 4); + rtlpriv->btcoexist.eeprom_bt_radio_shared = + ((value & 0x20) >> 5); } else { - pcipriv->bt_coexist.eeprom_bt_coexist = 0; - pcipriv->bt_coexist.eeprom_bt_type = BT_RTL8723A; - pcipriv->bt_coexist.eeprom_bt_ant_num = ANT_X2; - pcipriv->bt_coexist.eeprom_bt_ant_isol = 0; - pcipriv->bt_coexist.eeprom_bt_radio_shared = BT_RADIO_SHARED; + rtlpriv->btcoexist.eeprom_bt_coexist = 0; + rtlpriv->btcoexist.eeprom_bt_type = BT_RTL8723A; + rtlpriv->btcoexist.eeprom_bt_ant_num = ANT_X2; + rtlpriv->btcoexist.eeprom_bt_ant_isol = 0; + rtlpriv->btcoexist.eeprom_bt_radio_shared = BT_RADIO_SHARED; } - rtl8723ae_bt_var_init(hw); + rtl8723e_bt_var_init(hw); } -void rtl8723ae_bt_reg_init(struct ieee80211_hw *hw) +void rtl8723e_bt_reg_init(struct ieee80211_hw *hw) { - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + struct rtl_priv *rtlpriv = rtl_priv(hw); /* 0:Low, 1:High, 2:From Efuse. */ - pcipriv->bt_coexist.reg_bt_iso = 2; + rtlpriv->btcoexist.reg_bt_iso = 2; /* 0:Idle, 1:None-SCO, 2:SCO, 3:From Counter. */ - pcipriv->bt_coexist.reg_bt_sco = 3; + rtlpriv->btcoexist.reg_bt_sco = 3; /* 0:Disable BT control A-MPDU, 1:Enable BT control A-MPDU. */ - pcipriv->bt_coexist.reg_bt_sco = 0; + rtlpriv->btcoexist.reg_bt_sco = 0; } - -void rtl8723ae_bt_hw_init(struct ieee80211_hw *hw) +void rtl8723e_bt_hw_init(struct ieee80211_hw *hw) { + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (rtlpriv->cfg->ops->get_btc_status()) + rtlpriv->btcoexist.btc_ops->btc_init_hw_config(rtlpriv); } -void rtl8723ae_suspend(struct ieee80211_hw *hw) +void rtl8723e_suspend(struct ieee80211_hw *hw) { } -void rtl8723ae_resume(struct ieee80211_hw *hw) +void rtl8723e_resume(struct ieee80211_hw *hw) { } diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.h b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.h index d3bc39fb27a559dd0b527ebd5a377bb29a394dfe..32c1ace97c3f0b6146a20bea56b3f1a18ec410cd 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.h +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -34,38 +30,38 @@ ((rtlefuse->eeprom_svid == (_val1)) && \ (rtlefuse->eeprom_smid == (_val2))) -void rtl8723ae_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); -void rtl8723ae_read_eeprom_info(struct ieee80211_hw *hw); +void rtl8723e_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); +void rtl8723e_read_eeprom_info(struct ieee80211_hw *hw); -void rtl8723ae_interrupt_recognized(struct ieee80211_hw *hw, - u32 *p_inta, u32 *p_intb); -int rtl8723ae_hw_init(struct ieee80211_hw *hw); -void rtl8723ae_card_disable(struct ieee80211_hw *hw); -void rtl8723ae_enable_interrupt(struct ieee80211_hw *hw); -void rtl8723ae_disable_interrupt(struct ieee80211_hw *hw); -int rtl8723ae_set_network_type(struct ieee80211_hw *hw, - enum nl80211_iftype type); -void rtl8723ae_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid); -void rtl8723ae_set_qos(struct ieee80211_hw *hw, int aci); -void rtl8723ae_set_beacon_related_registers(struct ieee80211_hw *hw); -void rtl8723ae_set_beacon_interval(struct ieee80211_hw *hw); -void rtl8723ae_update_interrupt_mask(struct ieee80211_hw *hw, - u32 add_msr, u32 rm_msr); -void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); -void rtl8723ae_update_hal_rate_tbl(struct ieee80211_hw *hw, - struct ieee80211_sta *sta, u8 rssi_level); -void rtl8723ae_update_channel_access_setting(struct ieee80211_hw *hw); -bool rtl8723ae_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid); -void rtl8723ae_enable_hw_security_config(struct ieee80211_hw *hw); -void rtl8723ae_set_key(struct ieee80211_hw *hw, u32 key_index, - u8 *p_macaddr, bool is_group, u8 enc_algo, - bool is_wepkey, bool clear_all); +void rtl8723e_interrupt_recognized(struct ieee80211_hw *hw, + u32 *p_inta, u32 *p_intb); +int rtl8723e_hw_init(struct ieee80211_hw *hw); +void rtl8723e_card_disable(struct ieee80211_hw *hw); +void rtl8723e_enable_interrupt(struct ieee80211_hw *hw); +void rtl8723e_disable_interrupt(struct ieee80211_hw *hw); +int rtl8723e_set_network_type(struct ieee80211_hw *hw, + enum nl80211_iftype type); +void rtl8723e_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid); +void rtl8723e_set_qos(struct ieee80211_hw *hw, int aci); +void rtl8723e_set_beacon_related_registers(struct ieee80211_hw *hw); +void rtl8723e_set_beacon_interval(struct ieee80211_hw *hw); +void rtl8723e_update_interrupt_mask(struct ieee80211_hw *hw, + u32 add_msr, u32 rm_msr); +void rtl8723e_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); +void rtl8723e_update_hal_rate_tbl(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, u8 rssi_level); +void rtl8723e_update_channel_access_setting(struct ieee80211_hw *hw); +bool rtl8723e_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid); +void rtl8723e_enable_hw_security_config(struct ieee80211_hw *hw); +void rtl8723e_set_key(struct ieee80211_hw *hw, u32 key_index, + u8 *p_macaddr, bool is_group, u8 enc_algo, + bool is_wepkey, bool clear_all); -void rtl8723ae_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw, - bool autoload_fail, u8 *hwinfo); -void rtl8723ae_bt_reg_init(struct ieee80211_hw *hw); -void rtl8723ae_bt_hw_init(struct ieee80211_hw *hw); -void rtl8723ae_suspend(struct ieee80211_hw *hw); -void rtl8723ae_resume(struct ieee80211_hw *hw); +void rtl8723e_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw, + bool autoload_fail, u8 *hwinfo); +void rtl8723e_bt_reg_init(struct ieee80211_hw *hw); +void rtl8723e_bt_hw_init(struct ieee80211_hw *hw); +void rtl8723e_suspend(struct ieee80211_hw *hw); +void rtl8723e_resume(struct ieee80211_hw *hw); #endif diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/led.c b/drivers/net/wireless/rtlwifi/rtl8723ae/led.c index 061526fe6e2d1222b4fb8050fc6dc38fa63013e7..13173351cbfd169b711ae2bffe41f00e04182211 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/led.c +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/led.c @@ -32,44 +32,44 @@ #include "reg.h" #include "led.h" -static void _rtl8723ae_init_led(struct ieee80211_hw *hw, - struct rtl_led *pled, enum rtl_led_pin ledpin) +static void _rtl8723e_init_led(struct ieee80211_hw *hw, + struct rtl_led *pled, enum rtl_led_pin ledpin) { pled->hw = hw; pled->ledpin = ledpin; pled->ledon = false; } -void rtl8723ae_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled) +void rtl8723e_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled) { - struct rtl_priv *rtlpriv = rtl_priv(hw); u8 ledcfg; + struct rtl_priv *rtlpriv = rtl_priv(hw); RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin); - ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2); - switch (pled->ledpin) { case LED_PIN_GPIO0: break; case LED_PIN_LED0: + ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2); ledcfg &= ~BIT(6); rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg & 0xf0) | BIT(5)); break; case LED_PIN_LED1: - rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg & 0x0f) | BIT(5)); + ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG1); + rtl_write_byte(rtlpriv, REG_LEDCFG1, ledcfg & 0x10); break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case not processed\n"); + "switch case not process\n"); break; } pled->ledon = true; } -void rtl8723ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) +void rtl8723e_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); @@ -86,7 +86,7 @@ void rtl8723ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) case LED_PIN_LED0: ledcfg &= 0xf0; if (pcipriv->ledctl.led_opendrain) { - ledcfg &= 0x90; + ledcfg &= 0x90; /* Set to software control. */ rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg|BIT(3))); ledcfg = rtl_read_byte(rtlpriv, REG_MAC_PINMUX_CFG); ledcfg &= 0xFE; @@ -94,50 +94,51 @@ void rtl8723ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) } else { ledcfg &= ~BIT(6); rtl_write_byte(rtlpriv, REG_LEDCFG2, - (ledcfg | BIT(3) | BIT(5))); + (ledcfg | BIT(3) | BIT(5))); } break; case LED_PIN_LED1: - ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG1) & 0x10; - rtl_write_byte(rtlpriv, REG_LEDCFG1, (ledcfg | BIT(3))); + ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG1); + ledcfg &= 0x10; /* Set to software control. */ + rtl_write_byte(rtlpriv, REG_LEDCFG1, ledcfg|BIT(3)); + break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case not processed\n"); + "switch case not process\n"); break; } pled->ledon = false; } -void rtl8723ae_init_sw_leds(struct ieee80211_hw *hw) +void rtl8723e_init_sw_leds(struct ieee80211_hw *hw) { struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); - - _rtl8723ae_init_led(hw, &(pcipriv->ledctl.sw_led0), LED_PIN_LED0); - _rtl8723ae_init_led(hw, &(pcipriv->ledctl.sw_led1), LED_PIN_LED1); + _rtl8723e_init_led(hw, &pcipriv->ledctl.sw_led0, LED_PIN_LED0); + _rtl8723e_init_led(hw, &pcipriv->ledctl.sw_led1, LED_PIN_LED1); } -static void _rtl8723ae_sw_led_control(struct ieee80211_hw *hw, - enum led_ctl_mode ledaction) +static void _rtl8723e_sw_led_control(struct ieee80211_hw *hw, + enum led_ctl_mode ledaction) { struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0); - switch (ledaction) { case LED_CTL_POWER_ON: case LED_CTL_LINK: case LED_CTL_NO_LINK: - rtl8723ae_sw_led_on(hw, pLed0); + rtl8723e_sw_led_on(hw, pLed0); break; case LED_CTL_POWER_OFF: - rtl8723ae_sw_led_off(hw, pLed0); + rtl8723e_sw_led_off(hw, pLed0); break; default: break; } } -void rtl8723ae_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction) +void rtl8723e_led_control(struct ieee80211_hw *hw, + enum led_ctl_mode ledaction) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); @@ -152,6 +153,7 @@ void rtl8723ae_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction) ledaction == LED_CTL_POWER_ON)) { return; } - RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d,\n", ledaction); - _rtl8723ae_sw_led_control(hw, ledaction); + RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d,\n", + ledaction); + _rtl8723e_sw_led_control(hw, ledaction); } diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/led.h b/drivers/net/wireless/rtlwifi/rtl8723ae/led.h index 2cb88e78f62a980a709975adb79459f7a9b4c4f8..c22b19f542a65acd536338e18f449a9a3d48fd09 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/led.h +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/led.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -30,10 +26,9 @@ #ifndef __RTL92CE_LED_H__ #define __RTL92CE_LED_H__ -void rtl8723ae_init_sw_leds(struct ieee80211_hw *hw); -void rtl8723ae_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled); -void rtl8723ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled); -void rtl8723ae_led_control(struct ieee80211_hw *hw, - enum led_ctl_mode ledaction); +void rtl8723e_init_sw_leds(struct ieee80211_hw *hw); +void rtl8723e_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled); +void rtl8723e_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled); +void rtl8723e_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction); #endif diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c index 3ea78afdec734f6e31c9c1f5a63e8238e91ca318..d367097f490bcee04c1ab2d5fcb6eb97f7bf07ff 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -30,7 +26,6 @@ #include "../wifi.h" #include "../pci.h" #include "../ps.h" -#include "../core.h" #include "reg.h" #include "def.h" #include "phy.h" @@ -39,29 +34,31 @@ #include "table.h" #include "../rtl8723com/phy_common.h" -/* static forward definitions */ -static u32 _phy_fw_rf_serial_read(struct ieee80211_hw *hw, - enum radio_path rfpath, u32 offset); -static void _phy_fw_rf_serial_write(struct ieee80211_hw *hw, - enum radio_path rfpath, - u32 offset, u32 data); -static bool _phy_bb8192c_config_parafile(struct ieee80211_hw *hw); -static bool _phy_cfg_mac_w_header(struct ieee80211_hw *hw); -static bool _phy_cfg_bb_w_header(struct ieee80211_hw *hw, u8 configtype); -static bool _phy_cfg_bb_w_pgheader(struct ieee80211_hw *hw, u8 configtype); -static bool _phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, u8 channel, - u8 *stage, u8 *step, u32 *delay); -static u8 _phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw, - enum wireless_mode wirelessmode, - long power_indbm); -static void rtl8723ae_phy_set_io(struct ieee80211_hw *hw); - -u32 rtl8723ae_phy_query_rf_reg(struct ieee80211_hw *hw, - enum radio_path rfpath, u32 regaddr, u32 bitmask) +static void _rtl8723e_phy_fw_rf_serial_write(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 offset, + u32 data); +static bool _rtl8723e_phy_bb8192c_config_parafile(struct ieee80211_hw *hw); +static bool _rtl8723e_phy_config_mac_with_headerfile(struct ieee80211_hw *hw); +static bool _rtl8723e_phy_config_bb_with_headerfile(struct ieee80211_hw *hw, + u8 configtype); +static bool _rtl8723e_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw, + u8 configtype); +static bool _rtl8723e_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, + u8 channel, u8 *stage, u8 *step, + u32 *delay); +static u8 _rtl8723e_phy_dbm_to_txpwr_idx(struct ieee80211_hw *hw, + enum wireless_mode wirelessmode, + long power_indbm); +static void rtl8723e_phy_set_rf_on(struct ieee80211_hw *hw); +static void rtl8723e_phy_set_io(struct ieee80211_hw *hw); + +u32 rtl8723e_phy_query_rf_reg(struct ieee80211_hw *hw, + enum radio_path rfpath, + u32 regaddr, u32 bitmask) { struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 original_value, readback_value, bitshift; - struct rtl_phy *rtlphy = &(rtlpriv->phy); + u32 original_value = 0, readback_value, bitshift; + struct rtl_phy *rtlphy = &rtlpriv->phy; unsigned long flags; RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, @@ -70,10 +67,10 @@ u32 rtl8723ae_phy_query_rf_reg(struct ieee80211_hw *hw, spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags); - if (rtlphy->rf_mode != RF_OP_BY_FW) - original_value = rtl8723_phy_rf_serial_read(hw, rfpath, regaddr); - else - original_value = _phy_fw_rf_serial_read(hw, rfpath, regaddr); + if (rtlphy->rf_mode != RF_OP_BY_FW) { + original_value = rtl8723_phy_rf_serial_read(hw, + rfpath, regaddr); + } bitshift = rtl8723_phy_calculate_bit_shift(bitmask); readback_value = (original_value & bitmask) >> bitshift; @@ -82,45 +79,46 @@ u32 rtl8723ae_phy_query_rf_reg(struct ieee80211_hw *hw, RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n", - regaddr, rfpath, bitmask, original_value); + regaddr, rfpath, bitmask, original_value); return readback_value; } -void rtl8723ae_phy_set_rf_reg(struct ieee80211_hw *hw, - enum radio_path rfpath, - u32 regaddr, u32 bitmask, u32 data) +void rtl8723e_phy_set_rf_reg(struct ieee80211_hw *hw, + enum radio_path rfpath, + u32 regaddr, u32 bitmask, u32 data) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - u32 original_value, bitshift; + struct rtl_phy *rtlphy = &rtlpriv->phy; + u32 original_value = 0, bitshift; unsigned long flags; RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n", - regaddr, bitmask, data, rfpath); + regaddr, bitmask, data, rfpath); spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags); if (rtlphy->rf_mode != RF_OP_BY_FW) { if (bitmask != RFREG_OFFSET_MASK) { - original_value = rtl8723_phy_rf_serial_read(hw, rfpath, + original_value = rtl8723_phy_rf_serial_read(hw, + rfpath, regaddr); bitshift = rtl8723_phy_calculate_bit_shift(bitmask); - data = ((original_value & (~bitmask)) | - (data << bitshift)); + data = + ((original_value & (~bitmask)) | + (data << bitshift)); } rtl8723_phy_rf_serial_write(hw, rfpath, regaddr, data); } else { if (bitmask != RFREG_OFFSET_MASK) { - original_value = _phy_fw_rf_serial_read(hw, rfpath, - regaddr); bitshift = rtl8723_phy_calculate_bit_shift(bitmask); - data = ((original_value & (~bitmask)) | - (data << bitshift)); + data = + ((original_value & (~bitmask)) | + (data << bitshift)); } - _phy_fw_rf_serial_write(hw, rfpath, regaddr, data); + _rtl8723e_phy_fw_rf_serial_write(hw, rfpath, regaddr, data); } spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags); @@ -128,23 +126,17 @@ void rtl8723ae_phy_set_rf_reg(struct ieee80211_hw *hw, RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n", regaddr, bitmask, data, rfpath); -} -static u32 _phy_fw_rf_serial_read(struct ieee80211_hw *hw, - enum radio_path rfpath, u32 offset) -{ - RT_ASSERT(false, "deprecated!\n"); - return 0; } -static void _phy_fw_rf_serial_write(struct ieee80211_hw *hw, - enum radio_path rfpath, - u32 offset, u32 data) +static void _rtl8723e_phy_fw_rf_serial_write(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 offset, + u32 data) { RT_ASSERT(false, "deprecated!\n"); } -static void _rtl8723ae_phy_bb_config_1t(struct ieee80211_hw *hw) +static void _rtl8723e_phy_bb_config_1t(struct ieee80211_hw *hw) { rtl_set_bbreg(hw, RFPGA0_TXINFO, 0x3, 0x2); rtl_set_bbreg(hw, RFPGA1_TXINFO, 0x300033, 0x200022); @@ -158,20 +150,20 @@ static void _rtl8723ae_phy_bb_config_1t(struct ieee80211_hw *hw) rtl_set_bbreg(hw, 0xe88, 0x0c000000, 0x2); } -bool rtl8723ae_phy_mac_config(struct ieee80211_hw *hw) +bool rtl8723e_phy_mac_config(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - bool rtstatus = _phy_cfg_mac_w_header(hw); + bool rtstatus = _rtl8723e_phy_config_mac_with_headerfile(hw); rtl_write_byte(rtlpriv, 0x04CA, 0x0A); return rtstatus; } -bool rtl8723ae_phy_bb_config(struct ieee80211_hw *hw) +bool rtl8723e_phy_bb_config(struct ieee80211_hw *hw) { bool rtstatus = true; struct rtl_priv *rtlpriv = rtl_priv(hw); u8 tmpu1b; - u8 reg_hwparafile = 1; + u8 b_reg_hwparafile = 1; rtl8723_phy_init_bb_rf_reg_def(hw); @@ -186,67 +178,72 @@ bool rtl8723ae_phy_bb_config(struct ieee80211_hw *hw) /* 3. 0x02[1:0] = 2b'11 */ tmpu1b = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN); - rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, (tmpu1b | - FEN_BB_GLB_RSTn | FEN_BBRSTB)); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, + (tmpu1b | FEN_BB_GLB_RSTN | FEN_BBRSTB)); /* 4. 0x25[6] = 0 */ tmpu1b = rtl_read_byte(rtlpriv, REG_AFE_XTAL_CTRL+1); - rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL+1, (tmpu1b&(~BIT(6)))); + rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL+1, (tmpu1b & (~BIT(6)))); - /* 5. 0x24[20] = 0 Advised by SD3 Alex Wang. 2011.02.09. */ + /* 5. 0x24[20] = 0 //Advised by SD3 Alex Wang. 2011.02.09. */ tmpu1b = rtl_read_byte(rtlpriv, REG_AFE_XTAL_CTRL+2); - rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL+2, (tmpu1b&(~BIT(4)))); + rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL+2, (tmpu1b & (~BIT(4)))); /* 6. 0x1f[7:0] = 0x07 */ rtl_write_byte(rtlpriv, REG_RF_CTRL, 0x07); - if (reg_hwparafile == 1) - rtstatus = _phy_bb8192c_config_parafile(hw); + if (b_reg_hwparafile == 1) + rtstatus = _rtl8723e_phy_bb8192c_config_parafile(hw); return rtstatus; } -bool rtl8723ae_phy_rf_config(struct ieee80211_hw *hw) +bool rtl8723e_phy_rf_config(struct ieee80211_hw *hw) { - return rtl8723ae_phy_rf6052_config(hw); + return rtl8723e_phy_rf6052_config(hw); } -static bool _phy_bb8192c_config_parafile(struct ieee80211_hw *hw) +static bool _rtl8723e_phy_bb8192c_config_parafile(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); bool rtstatus; - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "==>\n"); - rtstatus = _phy_cfg_bb_w_header(hw, BASEBAND_CONFIG_PHY_REG); + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "\n"); + rtstatus = _rtl8723e_phy_config_bb_with_headerfile(hw, + BASEBAND_CONFIG_PHY_REG); if (rtstatus != true) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!"); return false; } if (rtlphy->rf_type == RF_1T2R) { - _rtl8723ae_phy_bb_config_1t(hw); + _rtl8723e_phy_bb_config_1t(hw); RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Config to 1T!!\n"); } if (rtlefuse->autoload_failflag == false) { rtlphy->pwrgroup_cnt = 0; - rtstatus = _phy_cfg_bb_w_pgheader(hw, BASEBAND_CONFIG_PHY_REG); + rtstatus = _rtl8723e_phy_config_bb_with_pgheaderfile(hw, + BASEBAND_CONFIG_PHY_REG); } if (rtstatus != true) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!"); return false; } - rtstatus = _phy_cfg_bb_w_header(hw, BASEBAND_CONFIG_AGC_TAB); + rtstatus = + _rtl8723e_phy_config_bb_with_headerfile(hw, BASEBAND_CONFIG_AGC_TAB); if (rtstatus != true) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n"); return false; } rtlphy->cck_high_power = (bool) (rtl_get_bbreg(hw, - RFPGA0_XA_HSSIPARAMETER2, 0x200)); + RFPGA0_XA_HSSIPARAMETER2, + 0x200)); + return true; } -static bool _phy_cfg_mac_w_header(struct ieee80211_hw *hw) +static bool _rtl8723e_phy_config_mac_with_headerfile(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); u32 i; @@ -264,7 +261,8 @@ static bool _phy_cfg_mac_w_header(struct ieee80211_hw *hw) return true; } -static bool _phy_cfg_bb_w_header(struct ieee80211_hw *hw, u8 configtype) +static bool _rtl8723e_phy_config_bb_with_headerfile(struct ieee80211_hw *hw, + u8 configtype) { int i; u32 *phy_regarray_table; @@ -278,13 +276,23 @@ static bool _phy_cfg_bb_w_header(struct ieee80211_hw *hw, u8 configtype) phy_regarray_table = RTL8723EPHY_REG_1TARRAY; if (configtype == BASEBAND_CONFIG_PHY_REG) { for (i = 0; i < phy_reg_arraylen; i = i + 2) { - rtl_addr_delay(phy_regarray_table[i]); + if (phy_regarray_table[i] == 0xfe) + mdelay(50); + else if (phy_regarray_table[i] == 0xfd) + mdelay(5); + else if (phy_regarray_table[i] == 0xfc) + mdelay(1); + else if (phy_regarray_table[i] == 0xfb) + udelay(50); + else if (phy_regarray_table[i] == 0xfa) + udelay(5); + else if (phy_regarray_table[i] == 0xf9) + udelay(1); rtl_set_bbreg(hw, phy_regarray_table[i], MASKDWORD, phy_regarray_table[i + 1]); udelay(1); RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "The phy_regarray_table[0] is %x" - " Rtl819XPHY_REGArray[1] is %x\n", + "The phy_regarray_table[0] is %x Rtl819XPHY_REGArray[1] is %x\n", phy_regarray_table[i], phy_regarray_table[i + 1]); } @@ -294,8 +302,7 @@ static bool _phy_cfg_bb_w_header(struct ieee80211_hw *hw, u8 configtype) agctab_array_table[i + 1]); udelay(1); RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "The agctab_array_table[0] is " - "%x Rtl819XPHY_REGArray[1] is %x\n", + "The agctab_array_table[0] is %x Rtl819XPHY_REGArray[1] is %x\n", agctab_array_table[i], agctab_array_table[i + 1]); } @@ -303,132 +310,163 @@ static bool _phy_cfg_bb_w_header(struct ieee80211_hw *hw, u8 configtype) return true; } -static void _st_pwrIdx_dfrate_off(struct ieee80211_hw *hw, u32 regaddr, - u32 bitmask, u32 data) +static void store_pwrindex_diffrate_offset(struct ieee80211_hw *hw, + u32 regaddr, u32 bitmask, + u32 data) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; - switch (regaddr) { - case RTXAGC_A_RATE18_06: - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][0] = data; + if (regaddr == RTXAGC_A_RATE18_06) { + rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][0] = + data; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "MCSTxPowerLevelOriginalOffset[%d][0] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][0]); - break; - case RTXAGC_A_RATE54_24: - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][1] = data; + rtlphy->pwrgroup_cnt, + rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> + pwrgroup_cnt][0]); + } + if (regaddr == RTXAGC_A_RATE54_24) { + rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][1] = + data; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "MCSTxPowerLevelOriginalOffset[%d][1] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][1]); - break; - case RTXAGC_A_CCK1_MCS32: - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][6] = data; + rtlphy->pwrgroup_cnt, + rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> + pwrgroup_cnt][1]); + } + if (regaddr == RTXAGC_A_CCK1_MCS32) { + rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][6] = + data; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "MCSTxPowerLevelOriginalOffset[%d][6] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][6]); - break; - case RTXAGC_B_CCK11_A_CCK2_11: - if (bitmask == 0xffffff00) { - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][7] = data; - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "MCSTxPowerLevelOriginalOffset[%d][7] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][7]); - } - if (bitmask == 0x000000ff) { - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][15] = data; - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "MCSTxPowerLevelOriginalOffset[%d][15] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][15]); - } - break; - case RTXAGC_A_MCS03_MCS00: - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][2] = data; + rtlphy->pwrgroup_cnt, + rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> + pwrgroup_cnt][6]); + } + if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0xffffff00) { + rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][7] = + data; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "MCSTxPowerLevelOriginalOffset[%d][7] = 0x%x\n", + rtlphy->pwrgroup_cnt, + rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> + pwrgroup_cnt][7]); + } + if (regaddr == RTXAGC_A_MCS03_MCS00) { + rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][2] = + data; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "MCSTxPowerLevelOriginalOffset[%d][2] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][2]); - break; - case RTXAGC_A_MCS07_MCS04: - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][3] = data; + rtlphy->pwrgroup_cnt, + rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> + pwrgroup_cnt][2]); + } + if (regaddr == RTXAGC_A_MCS07_MCS04) { + rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][3] = + data; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "MCSTxPowerLevelOriginalOffset[%d][3] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][3]); - break; - case RTXAGC_A_MCS11_MCS08: - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][4] = data; + rtlphy->pwrgroup_cnt, + rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> + pwrgroup_cnt][3]); + } + if (regaddr == RTXAGC_A_MCS11_MCS08) { + rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][4] = + data; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "MCSTxPowerLevelOriginalOffset[%d][4] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][4]); - break; - case RTXAGC_A_MCS15_MCS12: - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][5] = data; + rtlphy->pwrgroup_cnt, + rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> + pwrgroup_cnt][4]); + } + if (regaddr == RTXAGC_A_MCS15_MCS12) { + rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][5] = + data; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "MCSTxPowerLevelOriginalOffset[%d][5] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][5]); - break; - case RTXAGC_B_RATE18_06: - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][8] = data; + rtlphy->pwrgroup_cnt, + rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> + pwrgroup_cnt][5]); + } + if (regaddr == RTXAGC_B_RATE18_06) { + rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][8] = + data; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "MCSTxPowerLevelOriginalOffset[%d][8] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][8]); - break; - case RTXAGC_B_RATE54_24: - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][9] = data; + rtlphy->pwrgroup_cnt, + rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> + pwrgroup_cnt][8]); + } + if (regaddr == RTXAGC_B_RATE54_24) { + rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][9] = + data; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "MCSTxPowerLevelOriginalOffset[%d][9] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][9]); - break; - case RTXAGC_B_CCK1_55_MCS32: - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][14] = data; + rtlphy->pwrgroup_cnt, + rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> + pwrgroup_cnt][9]); + } + if (regaddr == RTXAGC_B_CCK1_55_MCS32) { + rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][14] = + data; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "MCSTxPowerLevelOriginalOffset[%d][14] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][14]); - break; - case RTXAGC_B_MCS03_MCS00: - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][10] = data; + rtlphy->pwrgroup_cnt, + rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> + pwrgroup_cnt][14]); + } + if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0x000000ff) { + rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][15] = + data; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "MCSTxPowerLevelOriginalOffset[%d][15] = 0x%x\n", + rtlphy->pwrgroup_cnt, + rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> + pwrgroup_cnt][15]); + } + if (regaddr == RTXAGC_B_MCS03_MCS00) { + rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][10] = + data; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "MCSTxPowerLevelOriginalOffset[%d][10] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][10]); - break; - case RTXAGC_B_MCS07_MCS04: - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][11] = data; + rtlphy->pwrgroup_cnt, + rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> + pwrgroup_cnt][10]); + } + if (regaddr == RTXAGC_B_MCS07_MCS04) { + rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][11] = + data; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "MCSTxPowerLevelOriginalOffset[%d][11] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][11]); - break; - case RTXAGC_B_MCS11_MCS08: - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][12] = data; + rtlphy->pwrgroup_cnt, + rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> + pwrgroup_cnt][11]); + } + if (regaddr == RTXAGC_B_MCS11_MCS08) { + rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][12] = + data; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "MCSTxPowerLevelOriginalOffset[%d][12] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][12]); - break; - case RTXAGC_B_MCS15_MCS12: - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][13] = data; + rtlphy->pwrgroup_cnt, + rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> + pwrgroup_cnt][12]); + } + if (regaddr == RTXAGC_B_MCS15_MCS12) { + rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][13] = + data; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "MCSTxPowerLevelOriginalOffset[%d][13] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][13]); + rtlphy->pwrgroup_cnt, + rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> + pwrgroup_cnt][13]); + rtlphy->pwrgroup_cnt++; - break; } } -static bool _phy_cfg_bb_w_pgheader(struct ieee80211_hw *hw, u8 configtype) +static bool _rtl8723e_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw, + u8 configtype) { struct rtl_priv *rtlpriv = rtl_priv(hw); int i; @@ -440,11 +478,23 @@ static bool _phy_cfg_bb_w_pgheader(struct ieee80211_hw *hw, u8 configtype) if (configtype == BASEBAND_CONFIG_PHY_REG) { for (i = 0; i < phy_regarray_pg_len; i = i + 3) { - rtl_addr_delay(phy_regarray_table_pg[i]); - - _st_pwrIdx_dfrate_off(hw, phy_regarray_table_pg[i], - phy_regarray_table_pg[i + 1], - phy_regarray_table_pg[i + 2]); + if (phy_regarray_table_pg[i] == 0xfe) + mdelay(50); + else if (phy_regarray_table_pg[i] == 0xfd) + mdelay(5); + else if (phy_regarray_table_pg[i] == 0xfc) + mdelay(1); + else if (phy_regarray_table_pg[i] == 0xfb) + udelay(50); + else if (phy_regarray_table_pg[i] == 0xfa) + udelay(5); + else if (phy_regarray_table_pg[i] == 0xf9) + udelay(1); + + store_pwrindex_diffrate_offset(hw, + phy_regarray_table_pg[i], + phy_regarray_table_pg[i + 1], + phy_regarray_table_pg[i + 2]); } } else { RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, @@ -453,45 +503,57 @@ static bool _phy_cfg_bb_w_pgheader(struct ieee80211_hw *hw, u8 configtype) return true; } -bool rtl8723ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, - enum radio_path rfpath) +bool rtl8723e_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, + enum radio_path rfpath) { - struct rtl_priv *rtlpriv = rtl_priv(hw); int i; + bool rtstatus = true; u32 *radioa_array_table; - u16 radioa_arraylen; + u32 *radiob_array_table; + u16 radioa_arraylen, radiob_arraylen; - radioa_arraylen = Rtl8723ERADIOA_1TARRAYLENGTH; + radioa_arraylen = RTL8723ERADIOA_1TARRAYLENGTH; radioa_array_table = RTL8723E_RADIOA_1TARRAY; + radiob_arraylen = RTL8723E_RADIOB_1TARRAYLENGTH; + radiob_array_table = RTL8723E_RADIOB_1TARRAY; + + rtstatus = true; switch (rfpath) { case RF90_PATH_A: for (i = 0; i < radioa_arraylen; i = i + 2) { - rtl_rfreg_delay(hw, rfpath, radioa_array_table[i], - RFREG_OFFSET_MASK, - radioa_array_table[i + 1]); + if (radioa_array_table[i] == 0xfe) { + mdelay(50); + } else if (radioa_array_table[i] == 0xfd) { + mdelay(5); + } else if (radioa_array_table[i] == 0xfc) { + mdelay(1); + } else if (radioa_array_table[i] == 0xfb) { + udelay(50); + } else if (radioa_array_table[i] == 0xfa) { + udelay(5); + } else if (radioa_array_table[i] == 0xf9) { + udelay(1); + } else { + rtl_set_rfreg(hw, rfpath, radioa_array_table[i], + RFREG_OFFSET_MASK, + radioa_array_table[i + 1]); + udelay(1); + } } break; case RF90_PATH_B: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case not process\n"); - break; case RF90_PATH_C: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case not process\n"); - break; case RF90_PATH_D: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case not process\n"); break; } return true; } -void rtl8723ae_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw) +void rtl8723e_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; rtlphy->default_initialgain[0] = (u8) rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0); @@ -504,10 +566,10 @@ void rtl8723ae_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw) RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n", - rtlphy->default_initialgain[0], - rtlphy->default_initialgain[1], - rtlphy->default_initialgain[2], - rtlphy->default_initialgain[3]); + rtlphy->default_initialgain[0], + rtlphy->default_initialgain[1], + rtlphy->default_initialgain[2], + rtlphy->default_initialgain[3]); rtlphy->framesync = (u8) rtl_get_bbreg(hw, ROFDM0_RXDETECTOR3, MASKBYTE0); @@ -516,37 +578,43 @@ void rtl8723ae_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw) RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Default framesync (0x%x) = 0x%x\n", - ROFDM0_RXDETECTOR3, rtlphy->framesync); + ROFDM0_RXDETECTOR3, rtlphy->framesync); } -void rtl8723ae_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel) +void rtl8723e_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); u8 txpwr_level; long txpwr_dbm; txpwr_level = rtlphy->cur_cck_txpwridx; - txpwr_dbm = rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_B, txpwr_level); + txpwr_dbm = rtl8723_phy_txpwr_idx_to_dbm(hw, + WIRELESS_MODE_B, txpwr_level); txpwr_level = rtlphy->cur_ofdm24g_txpwridx + rtlefuse->legacy_ht_txpowerdiff; - if (rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G, txpwr_level) > txpwr_dbm) - txpwr_dbm = rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G, - txpwr_level); + if (rtl8723_phy_txpwr_idx_to_dbm(hw, + WIRELESS_MODE_G, + txpwr_level) > txpwr_dbm) + txpwr_dbm = + rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G, + txpwr_level); txpwr_level = rtlphy->cur_ofdm24g_txpwridx; - if (rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G, txpwr_level) > - txpwr_dbm) - txpwr_dbm = rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G, - txpwr_level); + if (rtl8723_phy_txpwr_idx_to_dbm(hw, + WIRELESS_MODE_N_24G, + txpwr_level) > txpwr_dbm) + txpwr_dbm = + rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G, + txpwr_level); *powerlevel = txpwr_dbm; } -static void _rtl8723ae_get_txpower_index(struct ieee80211_hw *hw, u8 channel, - u8 *cckpowerlevel, u8 *ofdmpowerlevel) +static void _rtl8723e_get_txpower_index(struct ieee80211_hw *hw, u8 channel, + u8 *cckpowerlevel, u8 *ofdmpowerlevel) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); u8 index = (channel - 1); @@ -567,66 +635,70 @@ static void _rtl8723ae_get_txpower_index(struct ieee80211_hw *hw, u8 channel, } } -static void _rtl8723ae_ccxpower_index_check(struct ieee80211_hw *hw, - u8 channel, u8 *cckpowerlevel, - u8 *ofdmpowerlevel) +static void _rtl8723e_ccxpower_index_check(struct ieee80211_hw *hw, + u8 channel, u8 *cckpowerlevel, + u8 *ofdmpowerlevel) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; rtlphy->cur_cck_txpwridx = cckpowerlevel[0]; rtlphy->cur_ofdm24g_txpwridx = ofdmpowerlevel[0]; + } -void rtl8723ae_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel) +void rtl8723e_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel) { struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); u8 cckpowerlevel[2], ofdmpowerlevel[2]; if (rtlefuse->txpwr_fromeprom == false) return; - _rtl8723ae_get_txpower_index(hw, channel, &cckpowerlevel[0], - &ofdmpowerlevel[0]); - _rtl8723ae_ccxpower_index_check(hw, channel, &cckpowerlevel[0], - &ofdmpowerlevel[0]); - rtl8723ae_phy_rf6052_set_cck_txpower(hw, &cckpowerlevel[0]); - rtl8723ae_phy_rf6052_set_ofdm_txpower(hw, &ofdmpowerlevel[0], channel); + _rtl8723e_get_txpower_index(hw, channel, + &cckpowerlevel[0], &ofdmpowerlevel[0]); + _rtl8723e_ccxpower_index_check(hw, + channel, &cckpowerlevel[0], + &ofdmpowerlevel[0]); + rtl8723e_phy_rf6052_set_cck_txpower(hw, &cckpowerlevel[0]); + rtl8723e_phy_rf6052_set_ofdm_txpower(hw, &ofdmpowerlevel[0], channel); } -bool rtl8723ae_phy_update_txpower_dbm(struct ieee80211_hw *hw, long power_indbm) +bool rtl8723e_phy_update_txpower_dbm(struct ieee80211_hw *hw, long power_indbm) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); u8 idx; u8 rf_path; - u8 ccktxpwridx = _phy_dbm_to_txpwr_Idx(hw, WIRELESS_MODE_B, - power_indbm); - u8 ofdmtxpwridx = _phy_dbm_to_txpwr_Idx(hw, WIRELESS_MODE_N_24G, - power_indbm); + u8 ccktxpwridx = _rtl8723e_phy_dbm_to_txpwr_idx(hw, + WIRELESS_MODE_B, + power_indbm); + u8 ofdmtxpwridx = _rtl8723e_phy_dbm_to_txpwr_idx(hw, + WIRELESS_MODE_N_24G, + power_indbm); if (ofdmtxpwridx - rtlefuse->legacy_ht_txpowerdiff > 0) ofdmtxpwridx -= rtlefuse->legacy_ht_txpowerdiff; else ofdmtxpwridx = 0; RT_TRACE(rtlpriv, COMP_TXAGC, DBG_TRACE, "%lx dBm, ccktxpwridx = %d, ofdmtxpwridx = %d\n", - power_indbm, ccktxpwridx, ofdmtxpwridx); + power_indbm, ccktxpwridx, ofdmtxpwridx); for (idx = 0; idx < 14; idx++) { for (rf_path = 0; rf_path < 2; rf_path++) { rtlefuse->txpwrlevel_cck[rf_path][idx] = ccktxpwridx; rtlefuse->txpwrlevel_ht40_1s[rf_path][idx] = - ofdmtxpwridx; + ofdmtxpwridx; rtlefuse->txpwrlevel_ht40_2s[rf_path][idx] = - ofdmtxpwridx; + ofdmtxpwridx; } } - rtl8723ae_phy_set_txpower_level(hw, rtlphy->current_channel); + rtl8723e_phy_set_txpower_level(hw, rtlphy->current_channel); return true; } -static u8 _phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw, - enum wireless_mode wirelessmode, - long power_indbm) +static u8 _rtl8723e_phy_dbm_to_txpwr_idx(struct ieee80211_hw *hw, + enum wireless_mode wirelessmode, + long power_indbm) { u8 txpwridx; long offset; @@ -645,7 +717,7 @@ static u8 _phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw, } if ((power_indbm - offset) > 0) - txpwridx = (u8) ((power_indbm - offset) * 2); + txpwridx = (u8)((power_indbm - offset) * 2); else txpwridx = 0; @@ -655,19 +727,48 @@ static u8 _phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw, return txpwridx; } -void rtl8723ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw) +void rtl8723e_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + enum io_type iotype; + + if (!is_hal_stop(rtlhal)) { + switch (operation) { + case SCAN_OPT_BACKUP_BAND0: + iotype = IO_CMD_PAUSE_BAND0_DM_BY_SCAN; + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_IO_CMD, + (u8 *)&iotype); + + break; + case SCAN_OPT_RESTORE: + iotype = IO_CMD_RESUME_DM_BY_SCAN; + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_IO_CMD, + (u8 *)&iotype); + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "Unknown Scan Backup operation.\n"); + break; + } + } +} + +void rtl8723e_phy_set_bw_mode_callback(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_phy *rtlphy = &rtlpriv->phy; struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); u8 reg_bw_opmode; u8 reg_prsr_rsc; RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "Switch to %s bandwidth\n", - rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ? - "20MHz" : "40MHz"); + rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ? + "20MHz" : "40MHz"); if (is_hal_stop(rtlhal)) { rtlphy->set_bwmode_inprogress = false; @@ -719,16 +820,16 @@ void rtl8723ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw) "unknown bandwidth: %#X\n", rtlphy->current_chan_bw); break; } - rtl8723ae_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw); + rtl8723e_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw); rtlphy->set_bwmode_inprogress = false; - RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n"); + RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "\n"); } -void rtl8723ae_phy_set_bw_mode(struct ieee80211_hw *hw, - enum nl80211_channel_type ch_type) +void rtl8723e_phy_set_bw_mode(struct ieee80211_hw *hw, + enum nl80211_channel_type ch_type) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); u8 tmp_bw = rtlphy->current_chan_bw; @@ -736,20 +837,20 @@ void rtl8723ae_phy_set_bw_mode(struct ieee80211_hw *hw, return; rtlphy->set_bwmode_inprogress = true; if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) { - rtl8723ae_phy_set_bw_mode_callback(hw); + rtl8723e_phy_set_bw_mode_callback(hw); } else { RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, - "FALSE driver sleep or unload\n"); + "false driver sleep or unload\n"); rtlphy->set_bwmode_inprogress = false; rtlphy->current_chan_bw = tmp_bw; } } -void rtl8723ae_phy_sw_chnl_callback(struct ieee80211_hw *hw) +void rtl8723e_phy_sw_chnl_callback(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; u32 delay; RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, @@ -759,7 +860,7 @@ void rtl8723ae_phy_sw_chnl_callback(struct ieee80211_hw *hw) do { if (!rtlphy->sw_chnl_inprogress) break; - if (!_phy_sw_chnl_step_by_step + if (!_rtl8723e_phy_sw_chnl_step_by_step (hw, rtlphy->current_channel, &rtlphy->sw_chnl_stage, &rtlphy->sw_chnl_step, &delay)) { if (delay > 0) @@ -771,13 +872,13 @@ void rtl8723ae_phy_sw_chnl_callback(struct ieee80211_hw *hw) } break; } while (true); - RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n"); + RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "\n"); } -u8 rtl8723ae_phy_sw_chnl(struct ieee80211_hw *hw) +u8 rtl8723e_phy_sw_chnl(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); if (rtlphy->sw_chnl_inprogress) @@ -790,9 +891,9 @@ u8 rtl8723ae_phy_sw_chnl(struct ieee80211_hw *hw) rtlphy->sw_chnl_stage = 0; rtlphy->sw_chnl_step = 0; if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) { - rtl8723ae_phy_sw_chnl_callback(hw); + rtl8723e_phy_sw_chnl_callback(hw); RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD, - "sw_chnl_inprogress false schedule workitem\n"); + "sw_chnl_inprogress false schdule workitem\n"); rtlphy->sw_chnl_inprogress = false; } else { RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD, @@ -802,31 +903,33 @@ u8 rtl8723ae_phy_sw_chnl(struct ieee80211_hw *hw) return 1; } -static void _rtl8723ae_phy_sw_rf_seting(struct ieee80211_hw *hw, u8 channel) +static void _rtl8723e_phy_sw_rf_seting(struct ieee80211_hw *hw, u8 channel) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version)) { if (channel == 6 && rtlphy->current_chan_bw == - HT_CHANNEL_WIDTH_20) - rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G1, MASKDWORD, - 0x00255); + HT_CHANNEL_WIDTH_20) + rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G1, + MASKDWORD, 0x00255); else{ - u32 backupRF0x1A = (u32)rtl_get_rfreg(hw, RF90_PATH_A, - RF_RX_G1, RFREG_OFFSET_MASK); - rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G1, MASKDWORD, - backupRF0x1A); + u32 backuprf0x1a = (u32)rtl_get_rfreg(hw, + RF90_PATH_A, RF_RX_G1, + RFREG_OFFSET_MASK); + rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G1, + MASKDWORD, backuprf0x1a); } } } -static bool _phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, u8 channel, - u8 *stage, u8 *step, u32 *delay) +static bool _rtl8723e_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, + u8 channel, u8 *stage, u8 *step, + u32 *delay) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; struct swchnlcmd precommoncmd[MAX_PRECMD_CNT]; u32 precommoncmdcnt; struct swchnlcmd postcommoncmd[MAX_POSTCMD_CNT]; @@ -839,14 +942,16 @@ static bool _phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, u8 channel, precommoncmdcnt = 0; rtl8723_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++, - MAX_PRECMD_CNT, CMDID_SET_TXPOWEROWER_LEVEL, - 0, 0, 0); + MAX_PRECMD_CNT, + CMDID_SET_TXPOWEROWER_LEVEL, 0, 0, 0); rtl8723_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++, MAX_PRECMD_CNT, CMDID_END, 0, 0, 0); + postcommoncmdcnt = 0; rtl8723_phy_set_sw_chnl_cmdarray(postcommoncmd, postcommoncmdcnt++, MAX_POSTCMD_CNT, CMDID_END, 0, 0, 0); + rfdependcmdcnt = 0; RT_ASSERT((channel >= 1 && channel <= 14), @@ -854,10 +959,11 @@ static bool _phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, u8 channel, rtl8723_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++, MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG, - RF_CHNLBW, channel, 10); + RF_CHNLBW, channel, 10); rtl8723_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++, - MAX_RFDEPENDCMD_CNT, CMDID_END, 0, 0, 0); + MAX_RFDEPENDCMD_CNT, CMDID_END, 0, 0, + 0); do { switch (*stage) { @@ -870,6 +976,10 @@ static bool _phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, u8 channel, case 2: currentcmd = &postcommoncmd[*step]; break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "Invalid 'stage' = %d, Check it!\n", *stage); + return true; } if (currentcmd->cmdid == CMDID_END) { @@ -884,7 +994,7 @@ static bool _phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, u8 channel, switch (currentcmd->cmdid) { case CMDID_SET_TXPOWEROWER_LEVEL: - rtl8723ae_phy_set_txpower_level(hw, channel); + rtl8723e_phy_set_txpower_level(hw, channel); break; case CMDID_WRITEPORT_ULONG: rtl_write_dword(rtlpriv, currentcmd->para1, @@ -909,10 +1019,10 @@ static bool _phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, u8 channel, RFREG_OFFSET_MASK, rtlphy->rfreg_chnlval[rfpath]); } - _rtl8723ae_phy_sw_rf_seting(hw, channel); + _rtl8723e_phy_sw_rf_seting(hw, channel); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, "switch case not process\n"); break; } @@ -925,7 +1035,7 @@ static bool _phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, u8 channel, return false; } -static u8 _rtl8723ae_phy_path_a_iqk(struct ieee80211_hw *hw, bool config_pathb) +static u8 _rtl8723e_phy_path_a_iqk(struct ieee80211_hw *hw, bool config_pathb) { u32 reg_eac, reg_e94, reg_e9c, reg_ea4; u8 result = 0x00; @@ -968,7 +1078,7 @@ static u8 _rtl8723ae_phy_path_a_iqk(struct ieee80211_hw *hw, bool config_pathb) return result; } -static u8 _rtl8723ae_phy_path_b_iqk(struct ieee80211_hw *hw) +static u8 _rtl8723e_phy_path_b_iqk(struct ieee80211_hw *hw) { u32 reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc; u8 result = 0x00; @@ -995,8 +1105,8 @@ static u8 _rtl8723ae_phy_path_b_iqk(struct ieee80211_hw *hw) return result; } -static bool phy_simularity_comp(struct ieee80211_hw *hw, long result[][8], - u8 c1, u8 c2) +static bool _rtl8723e_phy_simularity_compare(struct ieee80211_hw *hw, + long result[][8], u8 c1, u8 c2) { u32 i, j, diff, simularity_bitmap, bound; @@ -1047,11 +1157,11 @@ static bool phy_simularity_comp(struct ieee80211_hw *hw, long result[][8], } -static void _rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, - long result[][8], u8 t, bool is2t) +static void _rtl8723e_phy_iq_calibrate(struct ieee80211_hw *hw, + long result[][8], u8 t, bool is2t) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; u32 i; u8 patha_ok, pathb_ok; u32 adda_reg[IQK_ADDA_REG_NUM] = { @@ -1060,22 +1170,28 @@ static void _rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, 0xe88, 0xe8c, 0xed0, 0xed4, 0xed8, 0xedc, 0xee0, 0xeec }; + u32 iqk_mac_reg[IQK_MAC_REG_NUM] = { 0x522, 0x550, 0x551, 0x040 }; + const u32 retrycount = 2; + u32 bbvalue; + if (t == 0) { - rtl8723_save_adda_registers(hw, adda_reg, rtlphy->adda_backup, - 16); + bbvalue = rtl_get_bbreg(hw, 0x800, MASKDWORD); + + rtl8723_save_adda_registers(hw, adda_reg, + rtlphy->adda_backup, 16); rtl8723_phy_save_mac_registers(hw, iqk_mac_reg, rtlphy->iqk_mac_backup); } rtl8723_phy_path_adda_on(hw, adda_reg, true, is2t); if (t == 0) { rtlphy->rfpi_enable = (u8) rtl_get_bbreg(hw, - RFPGA0_XA_HSSIPARAMETER1, - BIT(8)); + RFPGA0_XA_HSSIPARAMETER1, + BIT(8)); } if (!rtlphy->rfpi_enable) @@ -1101,7 +1217,7 @@ static void _rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, rtl_set_bbreg(hw, 0xe40, MASKDWORD, 0x01007c00); rtl_set_bbreg(hw, 0xe44, MASKDWORD, 0x01004800); for (i = 0; i < retrycount; i++) { - patha_ok = _rtl8723ae_phy_path_a_iqk(hw, is2t); + patha_ok = _rtl8723e_phy_path_a_iqk(hw, is2t); if (patha_ok == 0x03) { result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) & 0x3FF0000) >> 16; @@ -1115,7 +1231,8 @@ static void _rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, } else if (i == (retrycount - 1) && patha_ok == 0x01) result[t][0] = (rtl_get_bbreg(hw, 0xe94, - MASKDWORD) & 0x3FF0000) >> 16; + MASKDWORD) & 0x3FF0000) >> + 16; result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) & 0x3FF0000) >> 16; @@ -1125,11 +1242,12 @@ static void _rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, rtl8723_phy_path_a_standby(hw); rtl8723_phy_path_adda_on(hw, adda_reg, false, is2t); for (i = 0; i < retrycount; i++) { - pathb_ok = _rtl8723ae_phy_path_b_iqk(hw); + pathb_ok = _rtl8723e_phy_path_b_iqk(hw); if (pathb_ok == 0x03) { - result[t][4] = - (rtl_get_bbreg(hw, 0xeb4, MASKDWORD) & - 0x3FF0000) >> 16; + result[t][4] = (rtl_get_bbreg(hw, + 0xeb4, + MASKDWORD) & + 0x3FF0000) >> 16; result[t][5] = (rtl_get_bbreg(hw, 0xebc, MASKDWORD) & 0x3FF0000) >> 16; @@ -1141,9 +1259,10 @@ static void _rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, 0x3FF0000) >> 16; break; } else if (i == (retrycount - 1) && pathb_ok == 0x01) { - result[t][4] = - (rtl_get_bbreg(hw, 0xeb4, MASKDWORD) & - 0x3FF0000) >> 16; + result[t][4] = (rtl_get_bbreg(hw, + 0xeb4, + MASKDWORD) & + 0x3FF0000) >> 16; } result[t][5] = (rtl_get_bbreg(hw, 0xebc, MASKDWORD) & 0x3FF0000) >> 16; @@ -1166,11 +1285,11 @@ static void _rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, } } -static void _rtl8723ae_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t) +static void _rtl8723e_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t) { - struct rtl_priv *rtlpriv = rtl_priv(hw); u8 tmpreg; u32 rf_a_mode = 0, rf_b_mode = 0, lc_cal; + struct rtl_priv *rtlpriv = rtl_priv(hw); tmpreg = rtl_read_byte(rtlpriv, 0xd03); @@ -1211,14 +1330,14 @@ static void _rtl8723ae_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t) } } -static void _rtl8723ae_phy_set_rfpath_switch(struct ieee80211_hw *hw, - bool bmain, bool is2t) +static void _rtl8723e_phy_set_rfpath_switch(struct ieee80211_hw *hw, + bool bmain, bool is2t) { struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); if (is_hal_stop(rtlhal)) { rtl_set_bbreg(hw, REG_LEDCFG0, BIT(23), 0x01); - rtl_set_bbreg(hw, rFPGA0_XAB_RFPARAMETER, BIT(13), 0x01); + rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(13), 0x01); } if (is2t) { if (bmain) @@ -1234,21 +1353,23 @@ static void _rtl8723ae_phy_set_rfpath_switch(struct ieee80211_hw *hw, rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300, 0x1); } + } #undef IQK_ADDA_REG_NUM #undef IQK_DELAY_TIME -void rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery) +void rtl8723e_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; + long result[4][8]; u8 i, final_candidate; - bool patha_ok, pathb_ok; - long reg_e94, reg_e9c, reg_ea4, reg_eb4, reg_ebc, reg_tmp = 0; + bool b_patha_ok, b_pathb_ok; + long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4, + reg_ecc, reg_tmp = 0; bool is12simular, is13simular, is23simular; - bool start_conttx = false, singletone = false; u32 iqk_bb_reg[10] = { ROFDM0_XARXIQIMBALANCE, ROFDM0_XBRXIQIMBALANCE, @@ -1262,13 +1383,12 @@ void rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery) ROFDM0_RXIQEXTANTA }; - if (recovery) { - rtl8723_phy_reload_adda_registers(hw, iqk_bb_reg, + if (b_recovery) { + rtl8723_phy_reload_adda_registers(hw, + iqk_bb_reg, rtlphy->iqk_bb_backup, 10); return; } - if (start_conttx || singletone) - return; for (i = 0; i < 8; i++) { result[0][i] = 0; result[1][i] = 0; @@ -1276,30 +1396,33 @@ void rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery) result[3][i] = 0; } final_candidate = 0xff; - patha_ok = false; - pathb_ok = false; + b_patha_ok = false; + b_pathb_ok = false; is12simular = false; is23simular = false; is13simular = false; for (i = 0; i < 3; i++) { - _rtl8723ae_phy_iq_calibrate(hw, result, i, false); + _rtl8723e_phy_iq_calibrate(hw, result, i, false); if (i == 1) { - is12simular = phy_simularity_comp(hw, result, 0, 1); + is12simular = + _rtl8723e_phy_simularity_compare(hw, result, 0, 1); if (is12simular) { final_candidate = 0; break; } } if (i == 2) { - is13simular = phy_simularity_comp(hw, result, 0, 2); + is13simular = + _rtl8723e_phy_simularity_compare(hw, result, 0, 2); if (is13simular) { final_candidate = 0; break; } - is23simular = phy_simularity_comp(hw, result, 1, 2); - if (is23simular) { + is23simular = + _rtl8723e_phy_simularity_compare(hw, result, 1, 2); + if (is23simular) final_candidate = 1; - } else { + else { for (i = 0; i < 8; i++) reg_tmp += result[3][i]; @@ -1314,50 +1437,54 @@ void rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery) reg_e94 = result[i][0]; reg_e9c = result[i][1]; reg_ea4 = result[i][2]; + reg_eac = result[i][3]; reg_eb4 = result[i][4]; reg_ebc = result[i][5]; + reg_ec4 = result[i][6]; + reg_ecc = result[i][7]; } if (final_candidate != 0xff) { rtlphy->reg_e94 = reg_e94 = result[final_candidate][0]; rtlphy->reg_e9c = reg_e9c = result[final_candidate][1]; reg_ea4 = result[final_candidate][2]; + reg_eac = result[final_candidate][3]; rtlphy->reg_eb4 = reg_eb4 = result[final_candidate][4]; rtlphy->reg_ebc = reg_ebc = result[final_candidate][5]; - patha_ok = pathb_ok = true; + reg_ec4 = result[final_candidate][6]; + reg_ecc = result[final_candidate][7]; + b_patha_ok = true; + b_pathb_ok = true; } else { rtlphy->reg_e94 = rtlphy->reg_eb4 = 0x100; rtlphy->reg_e9c = rtlphy->reg_ebc = 0x0; } - if (reg_e94 != 0) /*&&(reg_ea4 != 0) */ - rtl8723_phy_path_a_fill_iqk_matrix(hw, patha_ok, result, + if (reg_e94 != 0) + rtl8723_phy_path_a_fill_iqk_matrix(hw, b_patha_ok, result, final_candidate, (reg_ea4 == 0)); - rtl8723_save_adda_registers(hw, iqk_bb_reg, rtlphy->iqk_bb_backup, 10); + rtl8723_save_adda_registers(hw, iqk_bb_reg, + rtlphy->iqk_bb_backup, 10); } -void rtl8723ae_phy_lc_calibrate(struct ieee80211_hw *hw) +void rtl8723e_phy_lc_calibrate(struct ieee80211_hw *hw) { - bool start_conttx = false, singletone = false; - - if (start_conttx || singletone) - return; - _rtl8723ae_phy_lc_calibrate(hw, false); + _rtl8723e_phy_lc_calibrate(hw, false); } -void rtl8723ae_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain) +void rtl8723e_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain) { - _rtl8723ae_phy_set_rfpath_switch(hw, bmain, false); + _rtl8723e_phy_set_rfpath_switch(hw, bmain, false); } -bool rtl8723ae_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype) +bool rtl8723e_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; bool postprocessing = false; RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "-->IO Cmd(%#x), set_io_inprogress(%d)\n", - iotype, rtlphy->set_io_inprogress); + iotype, rtlphy->set_io_inprogress); do { switch (iotype) { case IO_CMD_RESUME_DM_BY_SCAN: @@ -1365,13 +1492,13 @@ bool rtl8723ae_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype) "[IO CMD] Resume DM after scan.\n"); postprocessing = true; break; - case IO_CMD_PAUSE_DM_BY_SCAN: + case IO_CMD_PAUSE_BAND0_DM_BY_SCAN: RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "[IO CMD] Pause DM before scan.\n"); postprocessing = true; break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, "switch case not process\n"); break; } @@ -1382,42 +1509,42 @@ bool rtl8723ae_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype) } else { return false; } - rtl8723ae_phy_set_io(hw); - RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "<--IO Type(%#x)\n", iotype); + rtl8723e_phy_set_io(hw); + RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "IO Type(%#x)\n", iotype); return true; } -static void rtl8723ae_phy_set_io(struct ieee80211_hw *hw) +static void rtl8723e_phy_set_io(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; struct dig_t *dm_digtable = &rtlpriv->dm_digtable; RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "--->Cmd(%#x), set_io_inprogress(%d)\n", - rtlphy->current_io_type, rtlphy->set_io_inprogress); + rtlphy->current_io_type, rtlphy->set_io_inprogress); switch (rtlphy->current_io_type) { case IO_CMD_RESUME_DM_BY_SCAN: dm_digtable->cur_igvalue = rtlphy->initgain_backup.xaagccore1; - rtl8723ae_dm_write_dig(hw); - rtl8723ae_phy_set_txpower_level(hw, rtlphy->current_channel); + rtl8723e_dm_write_dig(hw); + rtl8723e_phy_set_txpower_level(hw, rtlphy->current_channel); break; - case IO_CMD_PAUSE_DM_BY_SCAN: + case IO_CMD_PAUSE_BAND0_DM_BY_SCAN: rtlphy->initgain_backup.xaagccore1 = dm_digtable->cur_igvalue; dm_digtable->cur_igvalue = 0x17; - rtl8723ae_dm_write_dig(hw); + rtl8723e_dm_write_dig(hw); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, "switch case not process\n"); break; } rtlphy->set_io_inprogress = false; RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, - "<---(%#x)\n", rtlphy->current_io_type); + "(%#x)\n", rtlphy->current_io_type); } -static void rtl8723ae_phy_set_rf_on(struct ieee80211_hw *hw) +static void rtl8723e_phy_set_rf_on(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -1429,11 +1556,11 @@ static void rtl8723ae_phy_set_rf_on(struct ieee80211_hw *hw) rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00); } -static void _rtl8723ae_phy_set_rf_sleep(struct ieee80211_hw *hw) +static void _rtl8723e_phy_set_rf_sleep(struct ieee80211_hw *hw) { - struct rtl_priv *rtlpriv = rtl_priv(hw); u32 u4b_tmp; u8 delay = 5; + struct rtl_priv *rtlpriv = rtl_priv(hw); rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF); rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00); @@ -1459,45 +1586,47 @@ static void _rtl8723ae_phy_set_rf_sleep(struct ieee80211_hw *hw) rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x22); } -static bool _rtl8723ae_phy_set_rf_power_state(struct ieee80211_hw *hw, - enum rf_pwrstate rfpwr_state) +static bool _rtl8723e_phy_set_rf_power_state(struct ieee80211_hw *hw, + enum rf_pwrstate rfpwr_state) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); - struct rtl8192_tx_ring *ring = NULL; bool bresult = true; u8 i, queue_id; + struct rtl8192_tx_ring *ring = NULL; switch (rfpwr_state) { case ERFON: if ((ppsc->rfpwr_state == ERFOFF) && RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) { bool rtstatus; - u32 InitializeCount = 0; + u32 initializecount = 0; + do { - InitializeCount++; + initializecount++; RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, "IPS Set eRf nic enable\n"); rtstatus = rtl_ps_enable_nic(hw); - } while ((rtstatus != true) && (InitializeCount < 10)); + } while (!rtstatus && (initializecount < 10)); RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); } else { RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, "Set ERFON sleeped:%d ms\n", - jiffies_to_msecs(jiffies - - ppsc->last_sleep_jiffies)); + jiffies_to_msecs(jiffies - + ppsc-> + last_sleep_jiffies)); ppsc->last_awake_jiffies = jiffies; - rtl8723ae_phy_set_rf_on(hw); + rtl8723e_phy_set_rf_on(hw); } if (mac->link_state == MAC80211_LINKED) { rtlpriv->cfg->ops->led_control(hw, - LED_CTL_LINK); + LED_CTL_LINK); } else { rtlpriv->cfg->ops->led_control(hw, - LED_CTL_NO_LINK); + LED_CTL_NO_LINK); } break; case ERFOFF: @@ -1509,10 +1638,10 @@ static bool _rtl8723ae_phy_set_rf_power_state(struct ieee80211_hw *hw, } else { if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS) { rtlpriv->cfg->ops->led_control(hw, - LED_CTL_NO_LINK); + LED_CTL_NO_LINK); } else { rtlpriv->cfg->ops->led_control(hw, - LED_CTL_POWER_OFF); + LED_CTL_POWER_OFF); } } break; @@ -1522,7 +1651,8 @@ static bool _rtl8723ae_phy_set_rf_power_state(struct ieee80211_hw *hw, for (queue_id = 0, i = 0; queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) { ring = &pcipriv->dev.tx_ring[queue_id]; - if (skb_queue_len(&ring->queue) == 0) { + if (queue_id == BEACON_QUEUE || + skb_queue_len(&ring->queue) == 0) { queue_id++; continue; } else { @@ -1536,22 +1666,23 @@ static bool _rtl8723ae_phy_set_rf_power_state(struct ieee80211_hw *hw, } if (i >= MAX_DOZE_WAITING_TIMES_9x) { RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, - "\n ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n", - MAX_DOZE_WAITING_TIMES_9x, - queue_id, - skb_queue_len(&ring->queue)); + "ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n", + MAX_DOZE_WAITING_TIMES_9x, + queue_id, + skb_queue_len(&ring->queue)); break; } } RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, "Set ERFSLEEP awaked:%d ms\n", - jiffies_to_msecs(jiffies - ppsc->last_awake_jiffies)); + jiffies_to_msecs(jiffies - + ppsc->last_awake_jiffies)); ppsc->last_sleep_jiffies = jiffies; - _rtl8723ae_phy_set_rf_sleep(hw); + _rtl8723e_phy_set_rf_sleep(hw); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case not processed\n"); + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, + "switch case not process\n"); bresult = false; break; } @@ -1560,14 +1691,15 @@ static bool _rtl8723ae_phy_set_rf_power_state(struct ieee80211_hw *hw, return bresult; } -bool rtl8723ae_phy_set_rf_power_state(struct ieee80211_hw *hw, - enum rf_pwrstate rfpwr_state) +bool rtl8723e_phy_set_rf_power_state(struct ieee80211_hw *hw, + enum rf_pwrstate rfpwr_state) { struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + bool bresult = false; if (rfpwr_state == ppsc->rfpwr_state) return bresult; - bresult = _rtl8723ae_phy_set_rf_power_state(hw, rfpwr_state); + bresult = _rtl8723e_phy_set_rf_power_state(hw, rfpwr_state); return bresult; } diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h index cd43139ed332abc85f656e8c766f5b680e5adeb1..b85f5c7c5c011ae480a9672b5ae5b7f16eeae007 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -39,6 +35,7 @@ #define RT_CANNOT_IO(hw) false #define HIGHPOWER_RADIOA_ARRAYLEN 22 +#define IQK_ADDA_REG_NUM 16 #define MAX_TOLERANCE 5 #define IQK_DELAY_TIME 1 @@ -49,12 +46,15 @@ #define LOOP_LIMIT 5 #define MAX_STALL_TIME 50 -#define AntennaDiversityValue 0x80 +#define ANTENNADIVERSITYVALUE 0x80 #define MAX_TXPWR_IDX_NMODE_92S 63 #define Reset_Cnt_Limit 3 +#define IQK_ADDA_REG_NUM 16 #define IQK_MAC_REG_NUM 4 +#define IQK_DELAY_TIME 1 + #define RF6052_MAX_PATH 2 #define CT_OFFSET_MAC_ADDR 0X16 @@ -166,36 +166,37 @@ struct tx_power_struct { u32 mcs_original_offset[4][16]; }; -u32 rtl8723ae_phy_query_rf_reg(struct ieee80211_hw *hw, - enum radio_path rfpath, u32 regaddr, - u32 bitmask); -void rtl8723ae_phy_set_rf_reg(struct ieee80211_hw *hw, +u32 rtl8723e_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath, u32 regaddr, - u32 bitmask, u32 data); -bool rtl8723ae_phy_mac_config(struct ieee80211_hw *hw); -bool rtl8723ae_phy_bb_config(struct ieee80211_hw *hw); -bool rtl8723ae_phy_rf_config(struct ieee80211_hw *hw); + u32 bitmask); +void rtl8723e_phy_set_rf_reg(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 regaddr, + u32 bitmask, u32 data); +bool rtl8723e_phy_mac_config(struct ieee80211_hw *hw); +bool rtl8723e_phy_bb_config(struct ieee80211_hw *hw); +bool rtl8723e_phy_rf_config(struct ieee80211_hw *hw); bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw, enum radio_path rfpath); -void rtl8723ae_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw); -void rtl8723ae_phy_get_txpower_level(struct ieee80211_hw *hw, - long *powerlevel); -void rtl8723ae_phy_set_txpower_level(struct ieee80211_hw *hw, - u8 channel); -bool rtl8723ae_phy_update_txpower_dbm(struct ieee80211_hw *hw, - long power_indbm); -void rtl8723ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw); -void rtl8723ae_phy_set_bw_mode(struct ieee80211_hw *hw, - enum nl80211_channel_type ch_type); -void rtl8723ae_phy_sw_chnl_callback(struct ieee80211_hw *hw); -u8 rtl8723ae_phy_sw_chnl(struct ieee80211_hw *hw); -void rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery); -void rtl8723ae_phy_lc_calibrate(struct ieee80211_hw *hw); -void rtl8723ae_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain); -bool rtl8723ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, - enum radio_path rfpath); -bool rtl8723ae_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype); -bool rtl8723ae_phy_set_rf_power_state(struct ieee80211_hw *hw, - enum rf_pwrstate rfpwr_state); +void rtl8723e_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw); +void rtl8723e_phy_get_txpower_level(struct ieee80211_hw *hw, + long *powerlevel); +void rtl8723e_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel); +bool rtl8723e_phy_update_txpower_dbm(struct ieee80211_hw *hw, + long power_indbm); +void rtl8723e_phy_scan_operation_backup(struct ieee80211_hw *hw, + u8 operation); +void rtl8723e_phy_set_bw_mode_callback(struct ieee80211_hw *hw); +void rtl8723e_phy_set_bw_mode(struct ieee80211_hw *hw, + enum nl80211_channel_type ch_type); +void rtl8723e_phy_sw_chnl_callback(struct ieee80211_hw *hw); +u8 rtl8723e_phy_sw_chnl(struct ieee80211_hw *hw); +void rtl8723e_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery); +void rtl8723e_phy_lc_calibrate(struct ieee80211_hw *hw); +void rtl8723e_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain); +bool rtl8723e_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, + enum radio_path rfpath); +bool rtl8723e_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype); +bool rtl8723e_phy_set_rf_power_state(struct ieee80211_hw *hw, + enum rf_pwrstate rfpwr_state); #endif diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.c b/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.c index df6ca9a57f7f1ca016f87a0e1ffdac64d6879993..2f7f81af8a556647f7c745a044b0b91e9dec8914 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.c +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -27,83 +23,90 @@ * *****************************************************************************/ -#include "pwrseqcmd.h" +#include "../pwrseqcmd.h" #include "pwrseq.h" -/* drivers should parse arrays below and do the corresponding actions */ - +/* drivers should parse below arrays and do the corresponding actions */ /*3 Power on Array*/ -struct wlan_pwr_cfg rtl8723A_power_on_flow[RTL8723A_TRANS_CARDEMU_TO_ACT_STPS - + RTL8723A_TRANS_END_STPS] = { - RTL8723A_TRANS_CARDEMU_TO_ACT, +struct wlan_pwr_cfg rtl8723A_power_on_flow + [RTL8723A_TRANS_CARDEMU_TO_ACT_STEPS + + RTL8723A_TRANS_END_STEPS] = { + RTL8723A_TRANS_CARDEMU_TO_ACT RTL8723A_TRANS_END }; /*3Radio off GPIO Array */ -struct wlan_pwr_cfg rtl8723A_radio_off_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STPS - + RTL8723A_TRANS_END_STPS] = { - RTL8723A_TRANS_ACT_TO_CARDEMU, +struct wlan_pwr_cfg rtl8723A_radio_off_flow + [RTL8723A_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8723A_TRANS_END_STEPS] = { + RTL8723A_TRANS_ACT_TO_CARDEMU RTL8723A_TRANS_END }; /*3Card Disable Array*/ -struct wlan_pwr_cfg -rtl8723A_card_disable_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STPS - + RTL8723A_TRANS_CARDEMU_TO_PDN_STPS - + RTL8723A_TRANS_END_STPS] = { - RTL8723A_TRANS_ACT_TO_CARDEMU, - RTL8723A_TRANS_CARDEMU_TO_CARDDIS, +struct wlan_pwr_cfg rtl8723A_card_disable_flow + [RTL8723A_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8723A_TRANS_CARDEMU_TO_PDN_STEPS + + RTL8723A_TRANS_END_STEPS] = { + RTL8723A_TRANS_ACT_TO_CARDEMU + RTL8723A_TRANS_CARDEMU_TO_CARDDIS RTL8723A_TRANS_END }; /*3 Card Enable Array*/ -struct wlan_pwr_cfg rtl8723A_card_enable_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STPS - + RTL8723A_TRANS_CARDEMU_TO_PDN_STPS - + RTL8723A_TRANS_END_STPS] = { - RTL8723A_TRANS_CARDDIS_TO_CARDEMU, - RTL8723A_TRANS_CARDEMU_TO_ACT, +struct wlan_pwr_cfg rtl8723A_card_enable_flow + [RTL8723A_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8723A_TRANS_CARDEMU_TO_PDN_STEPS + + RTL8723A_TRANS_END_STEPS] = { + RTL8723A_TRANS_CARDDIS_TO_CARDEMU + RTL8723A_TRANS_CARDEMU_TO_ACT RTL8723A_TRANS_END }; /*3Suspend Array*/ -struct wlan_pwr_cfg rtl8723A_suspend_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STPS - + RTL8723A_TRANS_CARDEMU_TO_SUS_STPS - + RTL8723A_TRANS_END_STPS] = { - RTL8723A_TRANS_ACT_TO_CARDEMU, - RTL8723A_TRANS_CARDEMU_TO_SUS, +struct wlan_pwr_cfg rtl8723A_suspend_flow + [RTL8723A_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8723A_TRANS_CARDEMU_TO_SUS_STEPS + + RTL8723A_TRANS_END_STEPS] = { + RTL8723A_TRANS_ACT_TO_CARDEMU + RTL8723A_TRANS_CARDEMU_TO_SUS RTL8723A_TRANS_END }; /*3 Resume Array*/ -struct wlan_pwr_cfg rtl8723A_resume_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STPS - + RTL8723A_TRANS_CARDEMU_TO_SUS_STPS - + RTL8723A_TRANS_END_STPS] = { - RTL8723A_TRANS_SUS_TO_CARDEMU, - RTL8723A_TRANS_CARDEMU_TO_ACT, +struct wlan_pwr_cfg rtl8723A_resume_flow + [RTL8723A_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8723A_TRANS_CARDEMU_TO_SUS_STEPS + + RTL8723A_TRANS_END_STEPS] = { + RTL8723A_TRANS_SUS_TO_CARDEMU + RTL8723A_TRANS_CARDEMU_TO_ACT RTL8723A_TRANS_END }; /*3HWPDN Array*/ -struct wlan_pwr_cfg rtl8723A_hwpdn_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STPS - + RTL8723A_TRANS_CARDEMU_TO_PDN_STPS - + RTL8723A_TRANS_END_STPS] = { - RTL8723A_TRANS_ACT_TO_CARDEMU, - RTL8723A_TRANS_CARDEMU_TO_PDN, +struct wlan_pwr_cfg rtl8723A_hwpdn_flow + [RTL8723A_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8723A_TRANS_CARDEMU_TO_PDN_STEPS + + RTL8723A_TRANS_END_STEPS] = { + RTL8723A_TRANS_ACT_TO_CARDEMU + RTL8723A_TRANS_CARDEMU_TO_PDN RTL8723A_TRANS_END }; /*3 Enter LPS */ -struct wlan_pwr_cfg rtl8723A_enter_lps_flow[RTL8723A_TRANS_ACT_TO_LPS_STPS - + RTL8723A_TRANS_END_STPS] = { +struct wlan_pwr_cfg rtl8723A_enter_lps_flow + [RTL8723A_TRANS_ACT_TO_LPS_STEPS + + RTL8723A_TRANS_END_STEPS] = { /*FW behavior*/ - RTL8723A_TRANS_ACT_TO_LPS, + RTL8723A_TRANS_ACT_TO_LPS RTL8723A_TRANS_END }; /*3 Leave LPS */ -struct wlan_pwr_cfg rtl8723A_leave_lps_flow[RTL8723A_TRANS_LPS_TO_ACT_STPS - + RTL8723A_TRANS_END_STPS] = { +struct wlan_pwr_cfg rtl8723A_leave_lps_flow + [RTL8723A_TRANS_LPS_TO_ACT_STEPS + + RTL8723A_TRANS_END_STEPS] = { /*FW behavior*/ - RTL8723A_TRANS_LPS_TO_ACT, + RTL8723A_TRANS_LPS_TO_ACT RTL8723A_TRANS_END }; diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.h b/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.h index a418acb4d0ca1e56be786cc4122e4d8024dca33c..4ac7db526f152b70b682b54cdbe46c3304126371 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.h +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -30,282 +26,305 @@ #ifndef __RTL8723E_PWRSEQ_H__ #define __RTL8723E_PWRSEQ_H__ +#include "../pwrseqcmd.h" /* - Check document WM-20110607-Paul-RTL8723A_Power_Architecture-R02.vsd - There are 6 HW Power States: - 0: POFF--Power Off - 1: PDN--Power Down - 2: CARDEMU--Card Emulation - 3: ACT--Active Mode - 4: LPS--Low Power State - 5: SUS--Suspend - - The transision from different states are defined below - TRANS_CARDEMU_TO_ACT - TRANS_ACT_TO_CARDEMU - TRANS_CARDEMU_TO_SUS - TRANS_SUS_TO_CARDEMU - TRANS_CARDEMU_TO_PDN - TRANS_ACT_TO_LPS - TRANS_LPS_TO_ACT + * Check document WM-20110607-Paul-RTL8723A_Power_Architecture-R02.vsd + * There are 6 HW Power States: + * 0: POFF--Power Off + * 1: PDN--Power Down + * 2: CARDEMU--Card Emulation + * 3: ACT--Active Mode + * 4: LPS--Low Power State + * 5: SUS--Suspend + * + * The transision from different states are defined below + * TRANS_CARDEMU_TO_ACT + * TRANS_ACT_TO_CARDEMU + * TRANS_CARDEMU_TO_SUS + * TRANS_SUS_TO_CARDEMU + * TRANS_CARDEMU_TO_PDN + * TRANS_ACT_TO_LPS + * TRANS_LPS_TO_ACT + * + * TRANS_END + */ - TRANS_END -*/ +#define RTL8723A_TRANS_CARDEMU_TO_ACT_STEPS 10 +#define RTL8723A_TRANS_ACT_TO_CARDEMU_STEPS 10 +#define RTL8723A_TRANS_CARDEMU_TO_SUS_STEPS 10 +#define RTL8723A_TRANS_SUS_TO_CARDEMU_STEPS 10 +#define RTL8723A_TRANS_CARDEMU_TO_PDN_STEPS 10 +#define RTL8723A_TRANS_PDN_TO_CARDEMU_STEPS 10 +#define RTL8723A_TRANS_ACT_TO_LPS_STEPS 15 +#define RTL8723A_TRANS_LPS_TO_ACT_STEPS 15 +#define RTL8723A_TRANS_END_STEPS 1 -#define RTL8723A_TRANS_CARDEMU_TO_ACT_STPS 10 -#define RTL8723A_TRANS_ACT_TO_CARDEMU_STPS 10 -#define RTL8723A_TRANS_CARDEMU_TO_SUS_STPS 10 -#define RTL8723A_TRANS_SUS_TO_CARDEMU_STPS 10 -#define RTL8723A_TRANS_CARDEMU_TO_PDN_STPS 10 -#define RTL8723A_TRANS_PDN_TO_CARDEMU_STPS 10 -#define RTL8723A_TRANS_ACT_TO_LPS_STPS 15 -#define RTL8723A_TRANS_LPS_TO_ACT_STPS 15 -#define RTL8723A_TRANS_END_STPS 1 +/* format */ +/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }*/ +#define RTL8723A_TRANS_CARDEMU_TO_ACT \ + /* disable SW LPS 0x04[10]=0*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(2), 0},\ + /* wait till 0x04[17] = 1 power ready*/ \ + {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(1), BIT(1)},\ + /* release WLON reset 0x04[16]=1*/ \ + {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)},\ + /* disable HWPDN 0x04[15]=0*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), 0},\ + /* disable WL suspend*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, (BIT(4)|BIT(3)), 0},\ + /* polling until return 0*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)},\ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(0), 0}, -#define RTL8723A_TRANS_CARDEMU_TO_ACT \ - /* format */ \ - /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, \ - * comments here*/ \ - {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(2), 0}, \ - /* disable SW LPS 0x04[10]=0*/ \ - {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(1), BIT(1)}, \ - /* wait till 0x04[17] = 1 power ready*/ \ - {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)}, \ - /* release WLON reset 0x04[16]=1*/ \ - {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), 0}, \ - /* disable HWPDN 0x04[15]=0*/ \ - {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, (BIT(4)|BIT(3)), 0}, \ - /* disable WL suspend*/ \ - {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)}, \ - /* polling until return 0*/ \ - {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(0), 0} +/* format */ +/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */ -#define RTL8723A_TRANS_ACT_TO_CARDEMU \ - /* format */ \ - /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, \ - * comments here*/ \ - {0x001F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0}, \ - /*0x1F[7:0] = 0 turn off RF*/ \ - {0x004E, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), 0}, \ - {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, \ - {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(1), 0} +#define RTL8723A_TRANS_ACT_TO_CARDEMU \ + /*0x1F[7:0] = 0 turn off RF*/ \ + {0x001F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0}, \ + {0x004E, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), 0},\ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(1), 0}, -#define RTL8723A_TRANS_CARDEMU_TO_SUS \ - /* format */ \ - /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, \ - * comments here*/ \ - {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4)|BIT(3), \ - (BIT(4)|BIT(3))}, \ +/* format */ +/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value },*/ +#define RTL8723A_TRANS_CARDEMU_TO_SUS \ /*0x04[12:11] = 2b'11 enable WL suspend for PCIe*/ \ - {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK | \ - PWR_INTF_SDIO_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), BIT(3)},\ - /*0x04[12:11] = 2b'01 enable WL suspend*/ \ - {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \ - PWR_BASEADDR_MAC, \ - PWR_CMD_WRITE, BIT(3)|BIT(4), BIT(3)|BIT(4)}, \ - /*0x04[12:11] = 2b'11 enable WL suspend for PCIe*/ \ - {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ - PWR_BASEADDR_SDIO, \ - PWR_CMD_WRITE, BIT(0), BIT(0)}, \ - /*Set SDIO suspend local register*/ \ - {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ - PWR_BASEADDR_SDIO, \ - PWR_CMD_POLLING, BIT(1), 0} \ - /*wait power state to suspend*/ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, \ + BIT(4)|BIT(3), (BIT(4)|BIT(3))},\ +/*0x04[12:11] = 2b'01 enable WL suspend*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK| \ + PWR_INTF_SDIO_MSK,\ + PWR_BASEADDR_MAC, \ + PWR_CMD_WRITE, \ + BIT(3)|BIT(4), BIT(3)}, \ +/*0x04[12:11] = 2b'11 enable WL suspend for PCIe*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_PCI_MSK, PWR_BASEADDR_MAC, \ + PWR_CMD_WRITE, BIT(3)|BIT(4), \ + BIT(3)|BIT(4)}, \ +/*Set SDIO suspend local register*/ \ + {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO,\ + PWR_CMD_WRITE, BIT(0), BIT(0)}, \ +/*wait power state to suspend*/ \ + {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO,\ + PWR_CMD_POLLING, BIT(1), 0}, + +/* format */ +/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */ + +#define RTL8723A_TRANS_SUS_TO_CARDEMU \ + /*Set SDIO suspend local register*/ \ + {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,\ + PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), 0},\ + /*wait power state to suspend*/ \ + {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,\ + PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), BIT(1)},\ + /*0x04[12:11] = 2b'01enable WL suspend*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), 0}, + +/* format */ +/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */ + +#define RTL8723A_TRANS_CARDEMU_TO_CARDDIS \ + /*0x04[12:11] = 2b'01 enable WL suspend*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), BIT(3)}, \ +/*0x04[10] = 1, enable SW LPS*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_PCI_MSK, PWR_BASEADDR_MAC,\ + PWR_CMD_WRITE, BIT(2), BIT(2)}, \ +/*Set SDIO suspend local register*/ \ + {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO,\ + PWR_CMD_WRITE, BIT(0), BIT(0)}, \ + /*wait power state to suspend*/ \ + {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO,\ + PWR_CMD_POLLING, BIT(1), 0}, + +/* format */ +/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */ -#define RTL8723A_TRANS_SUS_TO_CARDEMU \ - /* format */ \ - /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\ - {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ - PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), 0}, \ - /*Set SDIO suspend local register*/ \ - {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ - PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), BIT(1)}, \ - /*wait power state to suspend*/ \ - {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), 0} \ - /*0x04[12:11] = 2b'01enable WL suspend*/ +#define RTL8723A_TRANS_CARDDIS_TO_CARDEMU\ +/*Set SDIO suspend local register*/ \ + {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO,\ + PWR_CMD_WRITE, BIT(0), 0}, \ + /*wait power state to suspend*/ \ + {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO,\ + PWR_CMD_POLLING, BIT(1), BIT(1)},\ + /*0x04[12:11] = 2b'00enable WL suspend*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC,\ + PWR_CMD_WRITE, BIT(3)|BIT(4), 0},\ +/*PCIe DMA start*/ \ + {0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC,\ + PWR_CMD_WRITE, 0xFF, 0}, -#define RTL8723A_TRANS_CARDEMU_TO_CARDDIS \ - /* format */ \ - /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\ - {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ - PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), BIT(3)},\ - /*0x04[12:11] = 2b'01 enable WL suspend*/ \ - {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(2), BIT(2)}, \ - /*0x04[10] = 1, enable SW LPS*/ \ - {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ - PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), BIT(0)}, \ - /*Set SDIO suspend local register*/ \ - {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ - PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), 0} \ - /*wait power state to suspend*/ +/* format */ +/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */ +#define RTL8723A_TRANS_CARDEMU_TO_PDN \ + {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC,\ + PWR_CMD_WRITE, BIT(0), 0},/* 0x04[16] = 0*/\ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC,\ + PWR_CMD_WRITE, BIT(7), BIT(7)},/* 0x04[15] = 1*/ -#define RTL8723A_TRANS_CARDDIS_TO_CARDEMU \ - /* format */ \ - /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\ - {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ - PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), 0}, \ - /*Set SDIO suspend local register*/ \ - {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ - PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), BIT(1)}, \ - /*wait power state to suspend*/ \ - {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), 0}, \ - /*0x04[12:11] = 2b'00enable WL suspend*/ \ - {0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0} \ - /*PCIe DMA start*/ +/* format */ +/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */ +#define RTL8723A_TRANS_PDN_TO_CARDEMU \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC,\ + PWR_CMD_WRITE, BIT(7), 0},/* 0x04[15] = 0*/ -#define RTL8723A_TRANS_CARDEMU_TO_PDN \ - /* format */ \ - /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\ - {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0}, \ - /* 0x04[16] = 0*/\ - {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), BIT(7)} \ - /* 0x04[15] = 1*/ +/* format */ +/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */ -#define RTL8723A_TRANS_PDN_TO_CARDEMU \ - /* format */ \ - /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\ - {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), 0} \ - /* 0x04[15] = 0*/ +#define RTL8723A_TRANS_ACT_TO_LPS \ + {0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC,\ + PWR_CMD_WRITE, 0xFF, 0xFF},/*PCIe DMA stop*/ \ + {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC,\ + PWR_CMD_WRITE, 0xFF, 0x7F},/*Tx Pause*/ \ + /*Should be zero if no packet is transmitting*/ \ + {0x05F8, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC,\ + PWR_CMD_POLLING, 0xFF, 0},\ + /*Should be zero if no packet is transmitting*/ \ + {0x05F9, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC,\ + PWR_CMD_POLLING, 0xFF, 0},\ + /*Should be zero if no packet is transmitting*/ \ + {0x05FA, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC,\ + PWR_CMD_POLLING, 0xFF, 0},\ + /*Should be zero if no packet is transmitting*/ \ + {0x05FB, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC,\ + PWR_CMD_POLLING, 0xFF, 0},\ + /*CCK and OFDM are disabled,and clock are gated*/ \ + {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC,\ + PWR_CMD_WRITE, BIT(0), 0},\ + {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC,\ + PWR_CMD_DELAY, 0, PWRSEQ_DELAY_US},/*Delay 1us*/\ + {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC,\ + PWR_CMD_WRITE, BIT(1), 0},/*Whole BB is reset*/ \ + {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC,\ + PWR_CMD_WRITE, 0xFF, 0x3F},/*Reset MAC TRX*/ \ + {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC,\ + PWR_CMD_WRITE, BIT(1), 0},/*check if removed later*/ \ + /*Respond TxOK to scheduler*/ \ + {0x0553, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC,\ + PWR_CMD_WRITE, BIT(5), BIT(5)},\ -#define RTL8723A_TRANS_ACT_TO_LPS \ - /* format */ \ - /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\ - {0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF}, \ - /*PCIe DMA stop*/ \ - {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x7F}, \ - /*Tx Pause*/ \ - {0x05F8, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \ - /*Should be zero if no packet is transmitting*/ \ - {0x05F9, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \ - /*Should be zero if no packet is transmitting*/ \ - {0x05FA, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \ - /*Should be zero if no packet is transmitting*/ \ - {0x05FB, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \ - /*Should be zero if no packet is transmitting*/ \ - {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0}, \ - /*CCK and OFDM are disabled,and clock are gated*/ \ - {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_US}, \ - /*Delay 1us*/ \ - {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), 0}, \ - /*Whole BB is reset*/ \ - {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x3F}, \ - /*Reset MAC TRX*/ \ - {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), 0}, \ - /*check if removed later*/ \ - {0x0553, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(5), BIT(5)} \ - /*Respond TxOK to scheduler*/ +#define RTL8723A_TRANS_LPS_TO_ACT\ +/* format */ \ +/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */ \ + {0x0080, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO,\ + PWR_CMD_WRITE, 0xFF, 0x84}, /*SDIO RPWM*/\ + {0xFE58, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_USB_MSK, PWR_BASEADDR_MAC,\ + PWR_CMD_WRITE, 0xFF, 0x84}, /*USB RPWM*/\ + {0x0361, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_PCI_MSK, PWR_BASEADDR_MAC,\ + PWR_CMD_WRITE, 0xFF, 0x84}, /*PCIe RPWM*/\ + {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC,\ + PWR_CMD_DELAY, 0, PWRSEQ_DELAY_MS}, /*Delay*/\ + /*. 0x08[4] = 0 switch TSF to 40M*/\ + {0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC,\ + PWR_CMD_WRITE, BIT(4), 0}, \ + /*Polling 0x109[7]=0 TSF in 40M*/\ + {0x0109, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC,\ + PWR_CMD_POLLING, BIT(7), 0}, \ + /*. 0x29[7:6] = 2b'00 enable BB clock*/\ + {0x0029, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC,\ + PWR_CMD_WRITE, BIT(6)|BIT(7), 0},\ + /*. 0x101[1] = 1*/\ + {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC,\ + PWR_CMD_WRITE, BIT(1), BIT(1)},\ + /*. 0x100[7:0] = 0xFF enable WMAC TRX*/\ + {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC,\ + PWR_CMD_WRITE, 0xFF, 0xFF},\ + /*. 0x02[1:0] = 2b'11 enable BB macro*/\ + {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC,\ + PWR_CMD_WRITE, BIT(1)|BIT(0), BIT(1)|BIT(0)},\ + {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC,\ + PWR_CMD_WRITE, 0xFF, 0}, /*. 0x522 = 0*/ -#define RTL8723A_TRANS_LPS_TO_ACT \ - /* format */ \ - /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\ - {0x0080, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ - PWR_BASEADDR_SDIO, PWR_CMD_WRITE, 0xFF, 0x84}, \ - /*SDIO RPWM*/ \ - {0xFE58, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84}, \ - /*USB RPWM*/ \ - {0x0361, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84}, \ - /*PCIe RPWM*/ \ - {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_MS}, \ - /*Delay*/ \ - {0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, \ - /* 0x08[4] = 0 switch TSF to 40M*/ \ - {0x0109, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(7), 0}, \ - /*Polling 0x109[7]=0 TSF in 40M*/ \ - {0x0029, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(6)|BIT(7), 0}, \ - /*. 0x29[7:6] = 2b'00 enable BB clock*/ \ - {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, \ - /*. 0x101[1] = 1*/ \ - {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF}, \ - /* 0x100[7:0] = 0xFF enable WMAC TRX*/ \ - {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1)|BIT(0), \ - BIT(1)|BIT(0)}, \ - /* 0x02[1:0] = 2b'11 enable BB macro*/ \ - {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0} \ - /*. 0x522 = 0*/ +/* format */ +/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */ -#define RTL8723A_TRANS_END \ - /* format */ \ - /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\ - {0xFFFF, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ +#define RTL8723A_TRANS_END \ + {0xFFFF, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ 0, PWR_CMD_END, 0, 0} -extern struct -wlan_pwr_cfg rtl8723A_power_on_flow[RTL8723A_TRANS_CARDEMU_TO_ACT_STPS - + RTL8723A_TRANS_END_STPS]; -extern struct -wlan_pwr_cfg rtl8723A_radio_off_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STPS - + RTL8723A_TRANS_END_STPS]; -extern struct -wlan_pwr_cfg rtl8723A_card_disable_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STPS - + RTL8723A_TRANS_CARDEMU_TO_PDN_STPS - + RTL8723A_TRANS_END_STPS]; -extern struct -wlan_pwr_cfg rtl8723A_card_enable_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STPS - + RTL8723A_TRANS_CARDEMU_TO_PDN_STPS - + RTL8723A_TRANS_END_STPS]; -extern struct -wlan_pwr_cfg rtl8723A_suspend_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STPS - + RTL8723A_TRANS_CARDEMU_TO_SUS_STPS - + RTL8723A_TRANS_END_STPS]; -extern struct -wlan_pwr_cfg rtl8723A_resume_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STPS - + RTL8723A_TRANS_CARDEMU_TO_SUS_STPS - + RTL8723A_TRANS_END_STPS]; -extern struct -wlan_pwr_cfg rtl8723A_hwpdn_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STPS - + RTL8723A_TRANS_CARDEMU_TO_PDN_STPS - + RTL8723A_TRANS_END_STPS]; -extern struct -wlan_pwr_cfg rtl8723A_enter_lps_flow[RTL8723A_TRANS_ACT_TO_LPS_STPS - + RTL8723A_TRANS_END_STPS]; -extern struct -wlan_pwr_cfg rtl8723A_leave_lps_flow[RTL8723A_TRANS_LPS_TO_ACT_STPS - + RTL8723A_TRANS_END_STPS]; +extern struct wlan_pwr_cfg rtl8723A_power_on_flow + [RTL8723A_TRANS_CARDEMU_TO_ACT_STEPS + + RTL8723A_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8723A_radio_off_flow + [RTL8723A_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8723A_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8723A_card_disable_flow + [RTL8723A_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8723A_TRANS_CARDEMU_TO_PDN_STEPS + + RTL8723A_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8723A_card_enable_flow + [RTL8723A_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8723A_TRANS_CARDEMU_TO_PDN_STEPS + + RTL8723A_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8723A_suspend_flow + [RTL8723A_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8723A_TRANS_CARDEMU_TO_SUS_STEPS + + RTL8723A_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8723A_resume_flow + [RTL8723A_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8723A_TRANS_CARDEMU_TO_SUS_STEPS + + RTL8723A_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8723A_hwpdn_flow + [RTL8723A_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8723A_TRANS_CARDEMU_TO_PDN_STEPS + + RTL8723A_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8723A_enter_lps_flow + [RTL8723A_TRANS_ACT_TO_LPS_STEPS + RTL8723A_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8723A_leave_lps_flow + [RTL8723A_TRANS_LPS_TO_ACT_STEPS + RTL8723A_TRANS_END_STEPS]; /* RTL8723 Power Configuration CMDs for PCIe interface */ #define Rtl8723_NIC_PWR_ON_FLOW rtl8723A_power_on_flow diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseqcmd.c b/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseqcmd.c deleted file mode 100644 index 2044b5936b7fce0511d571823ec98d6ad706cd3d..0000000000000000000000000000000000000000 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseqcmd.c +++ /dev/null @@ -1,129 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ - -#include "pwrseq.h" - -/* Description: - * This routine deals with the Power Configuration CMD - * parsing for RTL8723/RTL8188E Series IC. - * Assumption: - * We should follow specific format that was released from HW SD. - */ -bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version, - u8 faversion, u8 interface_type, - struct wlan_pwr_cfg pwrcfgcmd[]) -{ - struct wlan_pwr_cfg cfg_cmd = {0}; - bool polling_bit = false; - u32 ary_idx = 0; - u8 value = 0; - u32 offset = 0; - u32 polling_count = 0; - u32 max_polling_cnt = 5000; - - do { - cfg_cmd = pwrcfgcmd[ary_idx]; - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "rtl_hal_pwrseqcmdparsing(): offset(%#x),cut_msk(%#x), famsk(%#x)," - "interface_msk(%#x), base(%#x), cmd(%#x), msk(%#x), value(%#x)\n", - GET_PWR_CFG_OFFSET(cfg_cmd), - GET_PWR_CFG_CUT_MASK(cfg_cmd), - GET_PWR_CFG_FAB_MASK(cfg_cmd), - GET_PWR_CFG_INTF_MASK(cfg_cmd), - GET_PWR_CFG_BASE(cfg_cmd), GET_PWR_CFG_CMD(cfg_cmd), - GET_PWR_CFG_MASK(cfg_cmd), GET_PWR_CFG_VALUE(cfg_cmd)); - - if ((GET_PWR_CFG_FAB_MASK(cfg_cmd)&faversion) && - (GET_PWR_CFG_CUT_MASK(cfg_cmd)&cut_version) && - (GET_PWR_CFG_INTF_MASK(cfg_cmd)&interface_type)) { - switch (GET_PWR_CFG_CMD(cfg_cmd)) { - case PWR_CMD_READ: - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "rtl_hal_pwrseqcmdparsing(): PWR_CMD_READ\n"); - break; - case PWR_CMD_WRITE: - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "rtl_hal_pwrseqcmdparsing(): PWR_CMD_WRITE\n"); - offset = GET_PWR_CFG_OFFSET(cfg_cmd); - - /*Read the value from system register*/ - value = rtl_read_byte(rtlpriv, offset); - value &= (~(GET_PWR_CFG_MASK(cfg_cmd))); - value |= (GET_PWR_CFG_VALUE(cfg_cmd) & - GET_PWR_CFG_MASK(cfg_cmd)); - - /*Write the value back to sytem register*/ - rtl_write_byte(rtlpriv, offset, value); - break; - case PWR_CMD_POLLING: - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "rtl_hal_pwrseqcmdparsing(): PWR_CMD_POLLING\n"); - polling_bit = false; - offset = GET_PWR_CFG_OFFSET(cfg_cmd); - - do { - value = rtl_read_byte(rtlpriv, offset); - - value &= GET_PWR_CFG_MASK(cfg_cmd); - if (value == - (GET_PWR_CFG_VALUE(cfg_cmd) - & GET_PWR_CFG_MASK(cfg_cmd))) - polling_bit = true; - else - udelay(10); - - if (polling_count++ > max_polling_cnt) - return false; - } while (!polling_bit); - break; - case PWR_CMD_DELAY: - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "rtl_hal_pwrseqcmdparsing(): PWR_CMD_DELAY\n"); - if (GET_PWR_CFG_VALUE(cfg_cmd) == - PWRSEQ_DELAY_US) - udelay(GET_PWR_CFG_OFFSET(cfg_cmd)); - else - mdelay(GET_PWR_CFG_OFFSET(cfg_cmd)); - break; - case PWR_CMD_END: - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "rtl_hal_pwrseqcmdparsing(): PWR_CMD_END\n"); - return true; - default: - RT_ASSERT(false, - "rtl_hal_pwrseqcmdparsing(): Unknown CMD!!\n"); - break; - } - - } - ary_idx++; - } while (1); - - return true; -} diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/reg.h b/drivers/net/wireless/rtlwifi/rtl8723ae/reg.h index ce2c66fd9eeeaec1f943130a8190805d848d631b..306059f9b9cc50cde4af1a535249147201c8f520 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/reg.h +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/reg.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -34,13 +30,13 @@ #define REG_SYS_FUNC_EN 0x0002 #define REG_APS_FSMCO 0x0004 #define REG_SYS_CLKR 0x0008 -#define REG_9346CR 0x000A -#define REG_EE_VPD 0x000C +#define REG_9346CR 0x000A +#define REG_EE_VPD 0x000C #define REG_AFE_MISC 0x0010 #define REG_SPS0_CTRL 0x0011 #define REG_SPS_OCP_CFG 0x0018 #define REG_RSV_CTRL 0x001C -#define REG_RF_CTRL 0x001F +#define REG_RF_CTRL 0x001F #define REG_LDOA15_CTRL 0x0020 #define REG_LDOV12D_CTRL 0x0021 #define REG_LDOHCI12_CTRL 0x0022 @@ -57,12 +53,12 @@ #define REG_MAC_PINMUX_CFG 0x0043 #define REG_GPIO_PIN_CTRL 0x0044 #define REG_GPIO_INTM 0x0048 -#define REG_LEDCFG0 0x004C -#define REG_LEDCFG1 0x004D -#define REG_LEDCFG2 0x004E -#define REG_LEDCFG3 0x004F -#define REG_FSIMR 0x0050 -#define REG_FSISR 0x0054 +#define REG_LEDCFG0 0x004C +#define REG_LEDCFG1 0x004D +#define REG_LEDCFG2 0x004E +#define REG_LEDCFG3 0x004F +#define REG_FSIMR 0x0050 +#define REG_FSISR 0x0054 #define REG_GPIO_PIN_CTRL_2 0x0060 #define REG_GPIO_IO_SEL_2 0x0062 #define REG_MULTI_FUNC_CTRL 0x0068 @@ -80,25 +76,25 @@ #define REG_USB_SIE_INTF 0x00E0 #define REG_PCIE_MIO_INTF 0x00E4 #define REG_PCIE_MIO_INTD 0x00E8 -#define REG_SYS_CFG 0x00F0 +#define REG_SYS_CFG 0x00F0 #define REG_GPIO_OUTSTS 0x00F4 -#define REG_CR 0x0100 -#define REG_PBP 0x0104 +#define REG_CR 0x0100 +#define REG_PBP 0x0104 #define REG_TRXDMA_CTRL 0x010C #define REG_TRXFF_BNDY 0x0114 #define REG_TRXFF_STATUS 0x0118 #define REG_RXFF_PTR 0x011C -#define REG_HIMR 0x0120 -#define REG_HISR 0x0124 -#define REG_HIMRE 0x0128 -#define REG_HISRE 0x012C -#define REG_CPWM 0x012F -#define REG_FWIMR 0x0130 -#define REG_FWISR 0x0134 +#define REG_HIMR 0x0120 +#define REG_HISR 0x0124 +#define REG_HIMRE 0x0128 +#define REG_HISRE 0x012C +#define REG_CPWM 0x012F +#define REG_FWIMR 0x0130 +#define REG_FWISR 0x0134 #define REG_PKTBUF_DBG_CTRL 0x0140 -#define REG_PKTBUF_DBG_DATA_L 0x0144 -#define REG_PKTBUF_DBG_DATA_H 0x0148 +#define REG_PKTBUF_DBG_DATA_L 0x0144 +#define REG_PKTBUF_DBG_DATA_H 0x0148 #define REG_TC0_CTRL 0x0150 #define REG_TC1_CTRL 0x0154 @@ -109,11 +105,11 @@ #define REG_MBIST_START 0x0174 #define REG_MBIST_DONE 0x0178 #define REG_MBIST_FAIL 0x017C -#define REG_C2HEVT_MSG_NORMAL 0x01A0 +#define REG_C2HEVT_MSG_NORMAL 0x01A0 #define REG_C2HEVT_MSG_TEST 0x01B8 #define REG_MCUTST_1 0x01c0 -#define REG_FMETHR 0x01C8 -#define REG_HMETFR 0x01CC +#define REG_FMETHR 0x01C8 +#define REG_HMETFR 0x01CC #define REG_HMEBOX_0 0x01D0 #define REG_HMEBOX_1 0x01D4 #define REG_HMEBOX_2 0x01D8 @@ -123,10 +119,10 @@ #define REG_BB_ACCEESS_CTRL 0x01E8 #define REG_BB_ACCESS_DATA 0x01EC -#define REG_RQPN 0x0200 +#define REG_RQPN 0x0200 #define REG_FIFOPAGE 0x0204 -#define REG_TDECTRL 0x0208 -#define REG_TXDMA_OFFSET_CHK 0x020C +#define REG_TDECTRL 0x0208 +#define REG_TXDMA_OFFSET_CHK 0x020C #define REG_TXDMA_STATUS 0x0210 #define REG_RQPN_NPQ 0x0214 @@ -135,18 +131,18 @@ #define REG_RXDMA_STATUS 0x0288 #define REG_PCIE_CTRL_REG 0x0300 -#define REG_INT_MIG 0x0304 +#define REG_INT_MIG 0x0304 #define REG_BCNQ_DESA 0x0308 -#define REG_HQ_DESA 0x0310 +#define REG_HQ_DESA 0x0310 #define REG_MGQ_DESA 0x0318 #define REG_VOQ_DESA 0x0320 #define REG_VIQ_DESA 0x0328 #define REG_BEQ_DESA 0x0330 #define REG_BKQ_DESA 0x0338 -#define REG_RX_DESA 0x0340 -#define REG_DBI 0x0348 -#define REG_MDIO 0x0354 -#define REG_DBG_SEL 0x0360 +#define REG_RX_DESA 0x0340 +#define REG_DBI 0x0348 +#define REG_MDIO 0x0354 +#define REG_DBG_SEL 0x0360 #define REG_PCIE_HRPWM 0x0361 #define REG_PCIE_HCPWM 0x0363 #define REG_UART_CTRL 0x0364 @@ -162,31 +158,31 @@ #define REG_BKQ_INFORMATION 0x040C #define REG_MGQ_INFORMATION 0x0410 #define REG_HGQ_INFORMATION 0x0414 -#define REG_BCNQ_INFORMATION 0x0418 +#define REG_BCNQ_INFORMATION 0x0418 -#define REG_CPU_MGQ_INFORMATION 0x041C +#define REG_CPU_MGQ_INFORMATION 0x041C #define REG_FWHW_TXQ_CTRL 0x0420 #define REG_HWSEQ_CTRL 0x0423 -#define REG_TXPKTBUF_BCNQ_BDNY 0x0424 -#define REG_TXPKTBUF_MGQ_BDNY 0x0425 +#define REG_TXPKTBUF_BCNQ_BDNY 0x0424 +#define REG_TXPKTBUF_MGQ_BDNY 0x0425 #define REG_MULTI_BCNQ_EN 0x0426 -#define REG_MULTI_BCNQ_OFFSET 0x0427 +#define REG_MULTI_BCNQ_OFFSET 0x0427 #define REG_SPEC_SIFS 0x0428 -#define REG_RL 0x042A -#define REG_DARFRC 0x0430 -#define REG_RARFRC 0x0438 -#define REG_RRSR 0x0440 -#define REG_ARFR0 0x0444 -#define REG_ARFR1 0x0448 -#define REG_ARFR2 0x044C -#define REG_ARFR3 0x0450 +#define REG_RL 0x042A +#define REG_DARFRC 0x0430 +#define REG_RARFRC 0x0438 +#define REG_RRSR 0x0440 +#define REG_ARFR0 0x0444 +#define REG_ARFR1 0x0448 +#define REG_ARFR2 0x044C +#define REG_ARFR3 0x0450 #define REG_AGGLEN_LMT 0x0458 #define REG_AMPDU_MIN_SPACE 0x045C -#define REG_TXPKTBUF_WMAC_LBK_BF_HD 0x045D +#define REG_TXPKTBUF_WMAC_LBK_BF_HD 0x045D #define REG_FAST_EDCA_CTRL 0x0460 #define REG_RD_RESP_PKT_TH 0x0463 #define REG_INIRTS_RATE_SEL 0x0480 -#define REG_INIDATA_RATE_SEL 0x0484 +#define REG_INIDATA_RATE_SEL 0x0484 #define REG_POWER_STATUS 0x04A4 #define REG_POWER_STAGE1 0x04B4 #define REG_POWER_STAGE2 0x04B8 @@ -194,29 +190,29 @@ #define REG_STBC_SETTING 0x04C4 #define REG_PROT_MODE_CTRL 0x04C8 #define REG_BAR_MODE_CTRL 0x04CC -#define REG_RA_TRY_RATE_AGG_LMT 0x04CF +#define REG_RA_TRY_RATE_AGG_LMT 0x04CF #define REG_NQOS_SEQ 0x04DC -#define REG_QOS_SEQ 0x04DE +#define REG_QOS_SEQ 0x04DE #define REG_NEED_CPU_HANDLE 0x04E0 #define REG_PKT_LOSE_RPT 0x04E1 #define REG_PTCL_ERR_STATUS 0x04E2 -#define REG_DUMMY 0x04FC +#define REG_DUMMY 0x04FC #define REG_EDCA_VO_PARAM 0x0500 #define REG_EDCA_VI_PARAM 0x0504 #define REG_EDCA_BE_PARAM 0x0508 #define REG_EDCA_BK_PARAM 0x050C -#define REG_BCNTCFG 0x0510 -#define REG_PIFS 0x0512 +#define REG_BCNTCFG 0x0510 +#define REG_PIFS 0x0512 #define REG_RDG_PIFS 0x0513 #define REG_SIFS_CTX 0x0514 #define REG_SIFS_TRX 0x0516 #define REG_AGGR_BREAK_TIME 0x051A -#define REG_SLOT 0x051B +#define REG_SLOT 0x051B #define REG_TX_PTCL_CTRL 0x0520 -#define REG_TXPAUSE 0x0522 +#define REG_TXPAUSE 0x0522 #define REG_DIS_TXREQ_CLR 0x0523 -#define REG_RD_CTRL 0x0524 +#define REG_RD_CTRL 0x0524 #define REG_TBTT_PROHIBIT 0x0540 #define REG_RD_NAV_NXT 0x0544 #define REG_NAV_PROT_LEN 0x0546 @@ -225,21 +221,21 @@ #define REG_MBID_NUM 0x0552 #define REG_DUAL_TSF_RST 0x0553 #define REG_BCN_INTERVAL 0x0554 -#define REG_MBSSID_BCN_SPACE 0x0554 +#define REG_MBSSID_BCN_SPACE 0x0554 #define REG_DRVERLYINT 0x0558 #define REG_BCNDMATIM 0x0559 -#define REG_ATIMWND 0x055A +#define REG_ATIMWND 0x055A #define REG_BCN_MAX_ERR 0x055D -#define REG_RXTSF_OFFSET_CCK 0x055E -#define REG_RXTSF_OFFSET_OFDM 0x055F -#define REG_TSFTR 0x0560 +#define REG_RXTSF_OFFSET_CCK 0x055E +#define REG_RXTSF_OFFSET_OFDM 0x055F +#define REG_TSFTR 0x0560 #define REG_INIT_TSFTR 0x0564 -#define REG_PSTIMER 0x0580 -#define REG_TIMER0 0x0584 -#define REG_TIMER1 0x0588 +#define REG_PSTIMER 0x0580 +#define REG_TIMER0 0x0584 +#define REG_TIMER1 0x0588 #define REG_ACMHWCTRL 0x05C0 #define REG_ACMRSTCTRL 0x05C1 -#define REG_ACMAVG 0x05C2 +#define REG_ACMAVG 0x05C2 #define REG_VO_ADMTIME 0x05C4 #define REG_VI_ADMTIME 0x05C6 #define REG_BE_ADMTIME 0x05C8 @@ -248,38 +244,38 @@ #define REG_APSD_CTRL 0x0600 #define REG_BWOPMODE 0x0603 -#define REG_TCR 0x0604 -#define REG_RCR 0x0608 +#define REG_TCR 0x0604 +#define REG_RCR 0x0608 #define REG_RX_PKT_LIMIT 0x060C #define REG_RX_DLK_TIME 0x060D #define REG_RX_DRVINFO_SZ 0x060F -#define REG_MACID 0x0610 -#define REG_BSSID 0x0618 -#define REG_MAR 0x0620 +#define REG_MACID 0x0610 +#define REG_BSSID 0x0618 +#define REG_MAR 0x0620 #define REG_MBIDCAMCFG 0x0628 #define REG_USTIME_EDCA 0x0638 #define REG_MAC_SPEC_SIFS 0x063A #define REG_RESP_SIFS_CCK 0x063C #define REG_RESP_SIFS_OFDM 0x063E -#define REG_ACKTO 0x0640 -#define REG_CTS2TO 0x0641 -#define REG_EIFS 0x0642 +#define REG_ACKTO 0x0640 +#define REG_CTS2TO 0x0641 +#define REG_EIFS 0x0642 #define REG_NAV_CTRL 0x0650 #define REG_BACAMCMD 0x0654 #define REG_BACAMCONTENT 0x0658 -#define REG_LBDLY 0x0660 -#define REG_FWDLY 0x0661 +#define REG_LBDLY 0x0660 +#define REG_FWDLY 0x0661 #define REG_RXERR_RPT 0x0664 -#define REG_WMAC_TRXPTCL_CTL 0x0668 +#define REG_WMAC_TRXPTCL_CTL 0x0668 -#define REG_CAMCMD 0x0670 +#define REG_CAMCMD 0x0670 #define REG_CAMWRITE 0x0674 -#define REG_CAMREAD 0x0678 -#define REG_CAMDBG 0x067C -#define REG_SECCFG 0x0680 +#define REG_CAMREAD 0x0678 +#define REG_CAMDBG 0x067C +#define REG_SECCFG 0x0680 #define REG_WOW_CTRL 0x0690 #define REG_PSSTATUS 0x0691 @@ -294,10 +290,10 @@ #define REG_CALB32K_CTRL 0x06AC #define REG_PKT_MON_CTRL 0x06B4 #define REG_BT_COEX_TABLE 0x06C0 -#define REG_WMAC_RESP_TXINFO 0x06D8 +#define REG_WMAC_RESP_TXINFO 0x06D8 #define REG_USB_INFO 0xFE17 -#define REG_USB_SPECIAL_OPTION 0xFE55 +#define REG_USB_SPECIAL_OPTION 0xFE55 #define REG_USB_DMA_AGG_TO 0xFE5B #define REG_USB_AGG_TO 0xFE5C #define REG_USB_AGG_TH 0xFE5D @@ -305,120 +301,148 @@ #define REG_TEST_USB_TXQS 0xFE48 #define REG_TEST_SIE_VID 0xFE60 #define REG_TEST_SIE_PID 0xFE62 -#define REG_TEST_SIE_OPTIONAL 0xFE64 -#define REG_TEST_SIE_CHIRP_K 0xFE65 +#define REG_TEST_SIE_OPTIONAL 0xFE64 +#define REG_TEST_SIE_CHIRP_K 0xFE65 #define REG_TEST_SIE_PHY 0xFE66 -#define REG_TEST_SIE_MAC_ADDR 0xFE70 +#define REG_TEST_SIE_MAC_ADDR 0xFE70 #define REG_TEST_SIE_STRING 0xFE80 #define REG_NORMAL_SIE_VID 0xFE60 #define REG_NORMAL_SIE_PID 0xFE62 -#define REG_NORMAL_SIE_OPTIONAL 0xFE64 +#define REG_NORMAL_SIE_OPTIONAL 0xFE64 #define REG_NORMAL_SIE_EP 0xFE65 #define REG_NORMAL_SIE_PHY 0xFE68 -#define REG_NORMAL_SIE_MAC_ADDR 0xFE70 -#define REG_NORMAL_SIE_STRING 0xFE80 +#define REG_NORMAL_SIE_MAC_ADDR 0xFE70 +#define REG_NORMAL_SIE_STRING 0xFE80 -#define CR9346 REG_9346CR -#define MSR (REG_CR + 2) -#define ISR REG_HISR -#define TSFR REG_TSFTR +#define CR9346 REG_9346CR +#define MSR (REG_CR + 2) +#define ISR REG_HISR +#define TSFR REG_TSFTR -#define MACIDR0 REG_MACID -#define MACIDR4 (REG_MACID + 4) +#define MACIDR0 REG_MACID +#define MACIDR4 (REG_MACID + 4) -#define PBP REG_PBP +#define PBP REG_PBP -#define IDR0 MACIDR0 -#define IDR4 MACIDR4 +#define IDR0 MACIDR0 +#define IDR4 MACIDR4 -#define UNUSED_REGISTER 0x1BF -#define DCAM UNUSED_REGISTER -#define PSR UNUSED_REGISTER -#define BBADDR UNUSED_REGISTER -#define PHYDATAR UNUSED_REGISTER +#define UNUSED_REGISTER 0x1BF +#define DCAM UNUSED_REGISTER +#define PSR UNUSED_REGISTER +#define BBADDR UNUSED_REGISTER +#define PHYDATAR UNUSED_REGISTER -#define INVALID_BBRF_VALUE 0x12345678 +#define INVALID_BBRF_VALUE 0x12345678 -#define MAX_MSS_DENSITY_2T 0x13 -#define MAX_MSS_DENSITY_1T 0x0A +#define MAX_MSS_DENSITY_2T 0x13 +#define MAX_MSS_DENSITY_1T 0x0A -#define CMDEEPROM_EN BIT(5) -#define CMDEEPROM_SEL BIT(4) -#define CMD9346CR_9356SEL BIT(4) -#define AUTOLOAD_EEPROM (CMDEEPROM_EN|CMDEEPROM_SEL) -#define AUTOLOAD_EFUSE CMDEEPROM_EN +#define CMDEEPROM_EN BIT(5) +#define CMDEEPROM_SEL BIT(4) +#define CMD9346CR_9356SEL BIT(4) +#define AUTOLOAD_EEPROM (CMDEEPROM_EN|CMDEEPROM_SEL) +#define AUTOLOAD_EFUSE CMDEEPROM_EN -#define GPIOSEL_GPIO 0 -#define GPIOSEL_ENBT BIT(5) +#define GPIOSEL_GPIO 0 +#define GPIOSEL_ENBT BIT(5) -#define GPIO_IN REG_GPIO_PIN_CTRL -#define GPIO_OUT (REG_GPIO_PIN_CTRL+1) -#define GPIO_IO_SEL (REG_GPIO_PIN_CTRL+2) -#define GPIO_MOD (REG_GPIO_PIN_CTRL+3) +#define GPIO_IN REG_GPIO_PIN_CTRL +#define GPIO_OUT (REG_GPIO_PIN_CTRL+1) +#define GPIO_IO_SEL (REG_GPIO_PIN_CTRL+2) +#define GPIO_MOD (REG_GPIO_PIN_CTRL+3) -#define MSR_NOLINK 0x00 -#define MSR_ADHOC 0x01 -#define MSR_INFRA 0x02 -#define MSR_AP 0x03 -#define MSR_MASK 0x03 +#define MSR_NOLINK 0x00 +#define MSR_ADHOC 0x01 +#define MSR_INFRA 0x02 +#define MSR_AP 0x03 #define RRSR_RSC_OFFSET 21 #define RRSR_SHORT_OFFSET 23 #define RRSR_RSC_BW_40M 0x600000 #define RRSR_RSC_UPSUBCHNL 0x400000 #define RRSR_RSC_LOWSUBCHNL 0x200000 -#define RRSR_SHORT 0x800000 -#define RRSR_1M BIT(0) -#define RRSR_2M BIT(1) -#define RRSR_5_5M BIT(2) -#define RRSR_11M BIT(3) -#define RRSR_6M BIT(4) -#define RRSR_9M BIT(5) -#define RRSR_12M BIT(6) -#define RRSR_18M BIT(7) -#define RRSR_24M BIT(8) -#define RRSR_36M BIT(9) -#define RRSR_48M BIT(10) -#define RRSR_54M BIT(11) -#define RRSR_MCS0 BIT(12) -#define RRSR_MCS1 BIT(13) -#define RRSR_MCS2 BIT(14) -#define RRSR_MCS3 BIT(15) -#define RRSR_MCS4 BIT(16) -#define RRSR_MCS5 BIT(17) -#define RRSR_MCS6 BIT(18) -#define RRSR_MCS7 BIT(19) +#define RRSR_SHORT 0x800000 +#define RRSR_1M BIT(0) +#define RRSR_2M BIT(1) +#define RRSR_5_5M BIT(2) +#define RRSR_11M BIT(3) +#define RRSR_6M BIT(4) +#define RRSR_9M BIT(5) +#define RRSR_12M BIT(6) +#define RRSR_18M BIT(7) +#define RRSR_24M BIT(8) +#define RRSR_36M BIT(9) +#define RRSR_48M BIT(10) +#define RRSR_54M BIT(11) +#define RRSR_MCS0 BIT(12) +#define RRSR_MCS1 BIT(13) +#define RRSR_MCS2 BIT(14) +#define RRSR_MCS3 BIT(15) +#define RRSR_MCS4 BIT(16) +#define RRSR_MCS5 BIT(17) +#define RRSR_MCS6 BIT(18) +#define RRSR_MCS7 BIT(19) #define BRSR_ACKSHORTPMB BIT(23) -#define RATR_1M 0x00000001 -#define RATR_2M 0x00000002 -#define RATR_55M 0x00000004 -#define RATR_11M 0x00000008 -#define RATR_6M 0x00000010 -#define RATR_9M 0x00000020 -#define RATR_12M 0x00000040 -#define RATR_18M 0x00000080 -#define RATR_24M 0x00000100 -#define RATR_36M 0x00000200 -#define RATR_48M 0x00000400 -#define RATR_54M 0x00000800 -#define RATR_MCS0 0x00001000 -#define RATR_MCS1 0x00002000 -#define RATR_MCS2 0x00004000 -#define RATR_MCS3 0x00008000 -#define RATR_MCS4 0x00010000 -#define RATR_MCS5 0x00020000 -#define RATR_MCS6 0x00040000 -#define RATR_MCS7 0x00080000 -#define RATR_MCS8 0x00100000 -#define RATR_MCS9 0x00200000 -#define RATR_MCS10 0x00400000 -#define RATR_MCS11 0x00800000 -#define RATR_MCS12 0x01000000 -#define RATR_MCS13 0x02000000 -#define RATR_MCS14 0x04000000 -#define RATR_MCS15 0x08000000 +#define RATR_1M 0x00000001 +#define RATR_2M 0x00000002 +#define RATR_55M 0x00000004 +#define RATR_11M 0x00000008 +#define RATR_6M 0x00000010 +#define RATR_9M 0x00000020 +#define RATR_12M 0x00000040 +#define RATR_18M 0x00000080 +#define RATR_24M 0x00000100 +#define RATR_36M 0x00000200 +#define RATR_48M 0x00000400 +#define RATR_54M 0x00000800 +#define RATR_MCS0 0x00001000 +#define RATR_MCS1 0x00002000 +#define RATR_MCS2 0x00004000 +#define RATR_MCS3 0x00008000 +#define RATR_MCS4 0x00010000 +#define RATR_MCS5 0x00020000 +#define RATR_MCS6 0x00040000 +#define RATR_MCS7 0x00080000 +#define RATR_MCS8 0x00100000 +#define RATR_MCS9 0x00200000 +#define RATR_MCS10 0x00400000 +#define RATR_MCS11 0x00800000 +#define RATR_MCS12 0x01000000 +#define RATR_MCS13 0x02000000 +#define RATR_MCS14 0x04000000 +#define RATR_MCS15 0x08000000 + +#define RATE_1M BIT(0) +#define RATE_2M BIT(1) +#define RATE_5_5M BIT(2) +#define RATE_11M BIT(3) +#define RATE_6M BIT(4) +#define RATE_9M BIT(5) +#define RATE_12M BIT(6) +#define RATE_18M BIT(7) +#define RATE_24M BIT(8) +#define RATE_36M BIT(9) +#define RATE_48M BIT(10) +#define RATE_54M BIT(11) +#define RATE_MCS0 BIT(12) +#define RATE_MCS1 BIT(13) +#define RATE_MCS2 BIT(14) +#define RATE_MCS3 BIT(15) +#define RATE_MCS4 BIT(16) +#define RATE_MCS5 BIT(17) +#define RATE_MCS6 BIT(18) +#define RATE_MCS7 BIT(19) +#define RATE_MCS8 BIT(20) +#define RATE_MCS9 BIT(21) +#define RATE_MCS10 BIT(22) +#define RATE_MCS11 BIT(23) +#define RATE_MCS12 BIT(24) +#define RATE_MCS13 BIT(25) +#define RATE_MCS14 BIT(26) +#define RATE_MCS15 BIT(27) #define RATE_ALL_CCK (RATR_1M | RATR_2M | RATR_55M | RATR_11M) #define RATE_ALL_OFDM_AG (RATR_6M | RATR_9M | RATR_12M | RATR_18M |\ @@ -434,31 +458,31 @@ #define BW_OPMODE_5G BIT(1) #define BW_OPMODE_11J BIT(0) -#define CAM_VALID BIT(15) +#define CAM_VALID BIT(15) #define CAM_NOTVALID 0x0000 -#define CAM_USEDK BIT(5) +#define CAM_USEDK BIT(5) -#define CAM_NONE 0x0 -#define CAM_WEP40 0x01 -#define CAM_TKIP 0x02 -#define CAM_AES 0x04 -#define CAM_WEP104 0x05 +#define CAM_NONE 0x0 +#define CAM_WEP40 0x01 +#define CAM_TKIP 0x02 +#define CAM_AES 0x04 +#define CAM_WEP104 0x05 #define TOTAL_CAM_ENTRY 32 #define HALF_CAM_ENTRY 16 -#define CAM_WRITE BIT(16) -#define CAM_READ 0x00000000 +#define CAM_WRITE BIT(16) +#define CAM_READ 0x00000000 #define CAM_POLLINIG BIT(31) -#define SCR_USEDK 0x01 +#define SCR_USEDK 0x01 #define SCR_TXSEC_ENABLE 0x02 #define SCR_RXSEC_ENABLE 0x04 -#define WOW_PMEN BIT(0) -#define WOW_WOMEN BIT(1) -#define WOW_MAGIC BIT(2) -#define WOW_UWF BIT(3) +#define WOW_PMEN BIT(0) +#define WOW_WOMEN BIT(1) +#define WOW_MAGIC BIT(2) +#define WOW_UWF BIT(3) #define IMR8190_DISABLED 0x0 #define IMR_BCNDMAINT6 BIT(31) @@ -467,180 +491,179 @@ #define IMR_BCNDMAINT3 BIT(28) #define IMR_BCNDMAINT2 BIT(27) #define IMR_BCNDMAINT1 BIT(26) -#define IMR_BCNDOK8 BIT(25) -#define IMR_BCNDOK7 BIT(24) -#define IMR_BCNDOK6 BIT(23) -#define IMR_BCNDOK5 BIT(22) -#define IMR_BCNDOK4 BIT(21) -#define IMR_BCNDOK3 BIT(20) -#define IMR_BCNDOK2 BIT(19) -#define IMR_BCNDOK1 BIT(18) +#define IMR_BCNDOK8 BIT(25) +#define IMR_BCNDOK7 BIT(24) +#define IMR_BCNDOK6 BIT(23) +#define IMR_BCNDOK5 BIT(22) +#define IMR_BCNDOK4 BIT(21) +#define IMR_BCNDOK3 BIT(20) +#define IMR_BCNDOK2 BIT(19) +#define IMR_BCNDOK1 BIT(18) #define IMR_TIMEOUT2 BIT(17) #define IMR_TIMEOUT1 BIT(16) -#define IMR_TXFOVW BIT(15) +#define IMR_TXFOVW BIT(15) #define IMR_PSTIMEOUT BIT(14) -#define IMR_BCNINT BIT(13) -#define IMR_RXFOVW BIT(12) -#define IMR_RDU BIT(11) -#define IMR_ATIMEND BIT(10) -#define IMR_BDOK BIT(9) -#define IMR_HIGHDOK BIT(8) -#define IMR_TBDOK BIT(7) -#define IMR_MGNTDOK BIT(6) -#define IMR_TBDER BIT(5) -#define IMR_BKDOK BIT(4) -#define IMR_BEDOK BIT(3) -#define IMR_VIDOK BIT(2) -#define IMR_VODOK BIT(1) -#define IMR_ROK BIT(0) - -#define IMR_TXERR BIT(11) -#define IMR_RXERR BIT(10) -#define IMR_CPWM BIT(8) -#define IMR_OCPINT BIT(1) -#define IMR_WLANOFF BIT(0) +#define IMR_BCNINT BIT(13) +#define IMR_RXFOVW BIT(12) +#define IMR_RDU BIT(11) +#define IMR_ATIMEND BIT(10) +#define IMR_BDOK BIT(9) +#define IMR_HIGHDOK BIT(8) +#define IMR_TBDOK BIT(7) +#define IMR_MGNTDOK BIT(6) +#define IMR_TBDER BIT(5) +#define IMR_BKDOK BIT(4) +#define IMR_BEDOK BIT(3) +#define IMR_VIDOK BIT(2) +#define IMR_VODOK BIT(1) +#define IMR_ROK BIT(0) + +#define IMR_TXERR BIT(11) +#define IMR_RXERR BIT(10) +#define IMR_CPWM BIT(8) +#define IMR_OCPINT BIT(1) +#define IMR_WLANOFF BIT(0) /* 8723E series PCIE Host IMR/ISR bit */ /* IMR DW0 Bit 0-31 */ -#define PHIMR_TIMEOUT2 BIT(31) -#define PHIMR_TIMEOUT1 BIT(30) +#define PHIMR_TIMEOUT2 BIT(31) +#define PHIMR_TIMEOUT1 BIT(30) #define PHIMR_PSTIMEOUT BIT(29) -#define PHIMR_GTINT4 BIT(28) -#define PHIMR_GTINT3 BIT(27) -#define PHIMR_TXBCNERR BIT(26) -#define PHIMR_TXBCNOK BIT(25) +#define PHIMR_GTINT4 BIT(28) +#define PHIMR_GTINT3 BIT(27) +#define PHIMR_TXBCNERR BIT(26) +#define PHIMR_TXBCNOK BIT(25) #define PHIMR_TSF_BIT32_TOGGLE BIT(24) -#define PHIMR_BCNDMAINT3 BIT(23) -#define PHIMR_BCNDMAINT2 BIT(22) -#define PHIMR_BCNDMAINT1 BIT(21) -#define PHIMR_BCNDMAINT0 BIT(20) -#define PHIMR_BCNDOK3 BIT(19) -#define PHIMR_BCNDOK2 BIT(18) -#define PHIMR_BCNDOK1 BIT(17) -#define PHIMR_BCNDOK0 BIT(16) +#define PHIMR_BCNDMAINT3 BIT(23) +#define PHIMR_BCNDMAINT2 BIT(22) +#define PHIMR_BCNDMAINT1 BIT(21) +#define PHIMR_BCNDMAINT0 BIT(20) +#define PHIMR_BCNDOK3 BIT(19) +#define PHIMR_BCNDOK2 BIT(18) +#define PHIMR_BCNDOK1 BIT(17) +#define PHIMR_BCNDOK0 BIT(16) #define PHIMR_HSISR_IND_ON BIT(15) -#define PHIMR_BCNDMAINT_E BIT(14) +#define PHIMR_BCNDMAINT_E BIT(14) #define PHIMR_ATIMEND_E BIT(13) #define PHIMR_ATIM_CTW_END BIT(12) #define PHIMR_HISRE_IND BIT(11) -#define PHIMR_C2HCMD BIT(10) -#define PHIMR_CPWM2 BIT(9) -#define PHIMR_CPWM BIT(8) -#define PHIMR_HIGHDOK BIT(7) -#define PHIMR_MGNTDOK BIT(6) -#define PHIMR_BKDOK BIT(5) -#define PHIMR_BEDOK BIT(4) -#define PHIMR_VIDOK BIT(3) -#define PHIMR_VODOK BIT(2) -#define PHIMR_RDU BIT(1) -#define PHIMR_ROK BIT(0) +#define PHIMR_C2HCMD BIT(10) +#define PHIMR_CPWM2 BIT(9) +#define PHIMR_CPWM BIT(8) +#define PHIMR_HIGHDOK BIT(7) +#define PHIMR_MGNTDOK BIT(6) +#define PHIMR_BKDOK BIT(5) +#define PHIMR_BEDOK BIT(4) +#define PHIMR_VIDOK BIT(3) +#define PHIMR_VODOK BIT(2) +#define PHIMR_RDU BIT(1) +#define PHIMR_ROK BIT(0) /* PCIE Host Interrupt Status Extension bit */ -#define PHIMR_BCNDMAINT7 BIT(23) -#define PHIMR_BCNDMAINT6 BIT(22) -#define PHIMR_BCNDMAINT5 BIT(21) -#define PHIMR_BCNDMAINT4 BIT(20) -#define PHIMR_BCNDOK7 BIT(19) -#define PHIMR_BCNDOK6 BIT(18) -#define PHIMR_BCNDOK5 BIT(17) -#define PHIMR_BCNDOK4 BIT(16) +#define PHIMR_BCNDMAINT7 BIT(23) +#define PHIMR_BCNDMAINT6 BIT(22) +#define PHIMR_BCNDMAINT5 BIT(21) +#define PHIMR_BCNDMAINT4 BIT(20) +#define PHIMR_BCNDOK7 BIT(19) +#define PHIMR_BCNDOK6 BIT(18) +#define PHIMR_BCNDOK5 BIT(17) +#define PHIMR_BCNDOK4 BIT(16) /* bit12-15: RSVD */ -#define PHIMR_TXERR BIT(11) -#define PHIMR_RXERR BIT(10) -#define PHIMR_TXFOVW BIT(9) -#define PHIMR_RXFOVW BIT(8) -/* bit2-7: RSV */ -#define PHIMR_OCPINT BIT(1) +#define PHIMR_TXERR BIT(11) +#define PHIMR_RXERR BIT(10) +#define PHIMR_TXFOVW BIT(9) +#define PHIMR_RXFOVW BIT(8) +/* bit2-7: RSVD */ +#define PHIMR_OCPINT BIT(1) #define HWSET_MAX_SIZE 256 #define EFUSE_MAX_SECTION 32 #define EFUSE_REAL_CONTENT_LEN 512 #define EFUSE_OOB_PROTECT_BYTES 15 -#define EEPROM_DEFAULT_TSSI 0x0 -#define EEPROM_DEFAULT_TXPOWERDIFF 0x0 -#define EEPROM_DEFAULT_CRYSTALCAP 0x5 -#define EEPROM_DEFAULT_BOARDTYPE 0x02 -#define EEPROM_DEFAULT_TXPOWER 0x1010 -#define EEPROM_DEFAULT_HT2T_TXPWR 0x10 +#define EEPROM_DEFAULT_TSSI 0x0 +#define EEPROM_DEFAULT_TXPOWERDIFF 0x0 +#define EEPROM_DEFAULT_CRYSTALCAP 0x5 +#define EEPROM_DEFAULT_BOARDTYPE 0x02 +#define EEPROM_DEFAULT_TXPOWER 0x1010 +#define EEPROM_DEFAULT_HT2T_TXPWR 0x10 #define EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF 0x3 -#define EEPROM_DEFAULT_THERMALMETER 0x12 +#define EEPROM_DEFAULT_THERMALMETER 0x12 #define EEPROM_DEFAULT_ANTTXPOWERDIFF 0x0 #define EEPROM_DEFAULT_TXPWDIFF_CRYSTALCAP 0x5 -#define EEPROM_DEFAULT_TXPOWERLEVEL 0x22 -#define EEPROM_DEFAULT_HT40_2SDIFF 0x0 -#define EEPROM_DEFAULT_HT20_DIFF 2 +#define EEPROM_DEFAULT_TXPOWERLEVEL 0x22 +#define EEPROM_DEFAULT_HT40_2SDIFF 0x0 +#define EEPROM_DEFAULT_HT20_DIFF 2 #define EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF 0x3 #define EEPROM_DEFAULT_HT40_PWRMAXOFFSET 0 #define EEPROM_DEFAULT_HT20_PWRMAXOFFSET 0 - -#define EEPROM_DEFAULT_PID 0x1234 -#define EEPROM_DEFAULT_VID 0x5678 -#define EEPROM_DEFAULT_CUSTOMERID 0xAB +#define EEPROM_DEFAULT_PID 0x1234 +#define EEPROM_DEFAULT_VID 0x5678 +#define EEPROM_DEFAULT_CUSTOMERID 0xAB #define EEPROM_DEFAULT_SUBCUSTOMERID 0xCD -#define EEPROM_DEFAULT_VERSION 0 - -#define EEPROM_CHANNEL_PLAN_FCC 0x0 -#define EEPROM_CHANNEL_PLAN_IC 0x1 -#define EEPROM_CHANNEL_PLAN_ETSI 0x2 -#define EEPROM_CHANNEL_PLAN_SPAIN 0x3 -#define EEPROM_CHANNEL_PLAN_FRANCE 0x4 -#define EEPROM_CHANNEL_PLAN_MKK 0x5 -#define EEPROM_CHANNEL_PLAN_MKK1 0x6 -#define EEPROM_CHANNEL_PLAN_ISRAEL 0x7 -#define EEPROM_CHANNEL_PLAN_TELEC 0x8 +#define EEPROM_DEFAULT_VERSION 0 + +#define EEPROM_CHANNEL_PLAN_FCC 0x0 +#define EEPROM_CHANNEL_PLAN_IC 0x1 +#define EEPROM_CHANNEL_PLAN_ETSI 0x2 +#define EEPROM_CHANNEL_PLAN_SPAIN 0x3 +#define EEPROM_CHANNEL_PLAN_FRANCE 0x4 +#define EEPROM_CHANNEL_PLAN_MKK 0x5 +#define EEPROM_CHANNEL_PLAN_MKK1 0x6 +#define EEPROM_CHANNEL_PLAN_ISRAEL 0x7 +#define EEPROM_CHANNEL_PLAN_TELEC 0x8 #define EEPROM_CHANNEL_PLAN_GLOBAL_DOMAIN 0x9 #define EEPROM_CHANNEL_PLAN_WORLD_WIDE_13 0xA -#define EEPROM_CHANNEL_PLAN_NCC 0xB +#define EEPROM_CHANNEL_PLAN_NCC 0xB #define EEPROM_CHANNEL_PLAN_BY_HW_MASK 0x80 -#define EEPROM_CID_DEFAULT 0x0 -#define EEPROM_CID_TOSHIBA 0x4 -#define EEPROM_CID_CCX 0x10 -#define EEPROM_CID_QMI 0x0D -#define EEPROM_CID_WHQL 0xFE +#define EEPROM_CID_DEFAULT 0x0 +#define EEPROM_CID_TOSHIBA 0x4 +#define EEPROM_CID_CCX 0x10 +#define EEPROM_CID_QMI 0x0D +#define EEPROM_CID_WHQL 0xFE -#define RTL8192_EEPROM_ID 0x8129 +#define RTL8192_EEPROM_ID 0x8129 -#define RTL8190_EEPROM_ID 0x8129 -#define EEPROM_HPON 0x02 -#define EEPROM_CLK 0x06 -#define EEPROM_TESTR 0x08 +#define RTL8190_EEPROM_ID 0x8129 +#define EEPROM_HPON 0x02 +#define EEPROM_CLK 0x06 +#define EEPROM_TESTR 0x08 -#define EEPROM_VID 0x49 -#define EEPROM_DID 0x4B -#define EEPROM_SVID 0x4D -#define EEPROM_SMID 0x4F +#define EEPROM_VID 0x49 +#define EEPROM_DID 0x4B +#define EEPROM_SVID 0x4D +#define EEPROM_SMID 0x4F -#define EEPROM_MAC_ADDR 0x67 +#define EEPROM_MAC_ADDR 0x67 -#define EEPROM_CCK_TX_PWR_INX 0x5A -#define EEPROM_HT40_1S_TX_PWR_INX 0x60 +#define EEPROM_CCK_TX_PWR_INX 0x5A +#define EEPROM_HT40_1S_TX_PWR_INX 0x60 #define EEPROM_HT40_2S_TX_PWR_INX_DIFF 0x66 -#define EEPROM_HT20_TX_PWR_INX_DIFF 0x69 -#define EEPROM_OFDM_TX_PWR_INX_DIFF 0x6C -#define EEPROM_HT40_MAX_PWR_OFFSET 0x25 -#define EEPROM_HT20_MAX_PWR_OFFSET 0x22 - -#define EEPROM_THERMAL_METER 0x2a -#define EEPROM_XTAL_K 0x78 -#define EEPROM_RF_OPT1 0x79 -#define EEPROM_RF_OPT2 0x7A -#define EEPROM_RF_OPT3 0x7B -#define EEPROM_RF_OPT4 0x7C -#define EEPROM_CHANNEL_PLAN 0x28 -#define EEPROM_VERSION 0x30 -#define EEPROM_CUSTOMER_ID 0x31 +#define EEPROM_HT20_TX_PWR_INX_DIFF 0x69 +#define EEPROM_OFDM_TX_PWR_INX_DIFF 0x6C +#define EEPROM_HT40_MAX_PWR_OFFSET 0x25 +#define EEPROM_HT20_MAX_PWR_OFFSET 0x22 + +#define EEPROM_THERMAL_METER 0x2a +#define EEPROM_XTAL_K 0x78 +#define EEPROM_RF_OPT1 0x79 +#define EEPROM_RF_OPT2 0x7A +#define EEPROM_RF_OPT3 0x7B +#define EEPROM_RF_OPT4 0x7C +#define EEPROM_CHANNEL_PLAN 0x28 +#define EEPROM_VERSION 0x30 +#define EEPROM_CUSTOMER_ID 0x31 #define EEPROM_PWRDIFF 0x54 #define EEPROM_TXPOWERCCK 0x10 -#define EEPROM_TXPOWERHT40_1S 0x16 -#define EEPROM_TXPOWERHT40_2SDIFF 0x66 -#define EEPROM_TXPOWERHT20DIFF 0x1C -#define EEPROM_TXPOWER_OFDMDIFF 0x1F +#define EEPROM_TXPOWERHT40_1S 0x16 +#define EEPROM_TXPOWERHT40_2SDIFF 0x66 +#define EEPROM_TXPOWERHT20DIFF 0x1C +#define EEPROM_TXPOWER_OFDMDIFF 0x1F #define EEPROM_TXPWR_GROUP 0x22 @@ -649,169 +672,169 @@ #define EEPROM_CHANNELPLAN 0x28 -#define RF_OPTION1 0x2B -#define RF_OPTION2 0x2C -#define RF_OPTION3 0x2D -#define RF_OPTION4 0x2E - -#define STOPBECON BIT(6) -#define STOPHIGHT BIT(5) -#define STOPMGT BIT(4) -#define STOPVO BIT(3) -#define STOPVI BIT(2) -#define STOPBE BIT(1) -#define STOPBK BIT(0) - -#define RCR_APPFCS BIT(31) -#define RCR_APP_MIC BIT(30) -#define RCR_APP_ICV BIT(29) +#define RF_OPTION1 0x2B +#define RF_OPTION2 0x2C +#define RF_OPTION3 0x2D +#define RF_OPTION4 0x2E + +#define STOPBECON BIT(6) +#define STOPHIGHT BIT(5) +#define STOPMGT BIT(4) +#define STOPVO BIT(3) +#define STOPVI BIT(2) +#define STOPBE BIT(1) +#define STOPBK BIT(0) + +#define RCR_APPFCS BIT(31) +#define RCR_APP_MIC BIT(30) +#define RCR_APP_ICV BIT(29) #define RCR_APP_PHYST_RXFF BIT(28) #define RCR_APP_BA_SSN BIT(27) -#define RCR_ENMBID BIT(24) -#define RCR_LSIGEN BIT(23) -#define RCR_MFBEN BIT(22) +#define RCR_ENMBID BIT(24) +#define RCR_LSIGEN BIT(23) +#define RCR_MFBEN BIT(22) #define RCR_HTC_LOC_CTRL BIT(14) -#define RCR_AMF BIT(13) -#define RCR_ACF BIT(12) -#define RCR_ADF BIT(11) -#define RCR_AICV BIT(9) -#define RCR_ACRC32 BIT(8) +#define RCR_AMF BIT(13) +#define RCR_ACF BIT(12) +#define RCR_ADF BIT(11) +#define RCR_AICV BIT(9) +#define RCR_ACRC32 BIT(8) #define RCR_CBSSID_BCN BIT(7) #define RCR_CBSSID_DATA BIT(6) -#define RCR_CBSSID RCR_CBSSID_DATA -#define RCR_APWRMGT BIT(5) -#define RCR_ADD3 BIT(4) -#define RCR_AB BIT(3) -#define RCR_AM BIT(2) -#define RCR_APM BIT(1) -#define RCR_AAP BIT(0) +#define RCR_CBSSID RCR_CBSSID_DATA +#define RCR_APWRMGT BIT(5) +#define RCR_ADD3 BIT(4) +#define RCR_AB BIT(3) +#define RCR_AM BIT(2) +#define RCR_APM BIT(1) +#define RCR_AAP BIT(0) #define RCR_MXDMA_OFFSET 8 #define RCR_FIFO_OFFSET 13 -#define RSV_CTRL 0x001C -#define RD_CTRL 0x0524 +#define RSV_CTRL 0x001C +#define RD_CTRL 0x0524 #define REG_USB_INFO 0xFE17 -#define REG_USB_SPECIAL_OPTION 0xFE55 +#define REG_USB_SPECIAL_OPTION 0xFE55 #define REG_USB_DMA_AGG_TO 0xFE5B #define REG_USB_AGG_TO 0xFE5C #define REG_USB_AGG_TH 0xFE5D -#define REG_USB_VID 0xFE60 -#define REG_USB_PID 0xFE62 +#define REG_USB_VID 0xFE60 +#define REG_USB_PID 0xFE62 #define REG_USB_OPTIONAL 0xFE64 #define REG_USB_CHIRP_K 0xFE65 -#define REG_USB_PHY 0xFE66 +#define REG_USB_PHY 0xFE66 #define REG_USB_MAC_ADDR 0xFE70 #define REG_USB_HRPWM 0xFE58 #define REG_USB_HCPWM 0xFE57 -#define SW18_FPWM BIT(3) - -#define ISO_MD2PP BIT(0) -#define ISO_UA2USB BIT(1) -#define ISO_UD2CORE BIT(2) -#define ISO_PA2PCIE BIT(3) -#define ISO_PD2CORE BIT(4) -#define ISO_IP2MAC BIT(5) -#define ISO_DIOP BIT(6) -#define ISO_DIOE BIT(7) -#define ISO_EB2CORE BIT(8) -#define ISO_DIOR BIT(9) - -#define PWC_EV25V BIT(14) -#define PWC_EV12V BIT(15) - -#define FEN_BBRSTB BIT(0) -#define FEN_BB_GLB_RSTn BIT(1) -#define FEN_USBA BIT(2) -#define FEN_UPLL BIT(3) -#define FEN_USBD BIT(4) +#define SW18_FPWM BIT(3) + +#define ISO_MD2PP BIT(0) +#define ISO_UA2USB BIT(1) +#define ISO_UD2CORE BIT(2) +#define ISO_PA2PCIE BIT(3) +#define ISO_PD2CORE BIT(4) +#define ISO_IP2MAC BIT(5) +#define ISO_DIOP BIT(6) +#define ISO_DIOE BIT(7) +#define ISO_EB2CORE BIT(8) +#define ISO_DIOR BIT(9) + +#define PWC_EV25V BIT(14) +#define PWC_EV12V BIT(15) + +#define FEN_BBRSTB BIT(0) +#define FEN_BB_GLB_RSTN BIT(1) +#define FEN_USBA BIT(2) +#define FEN_UPLL BIT(3) +#define FEN_USBD BIT(4) #define FEN_DIO_PCIE BIT(5) -#define FEN_PCIEA BIT(6) -#define FEN_PPLL BIT(7) -#define FEN_PCIED BIT(8) -#define FEN_DIOE BIT(9) -#define FEN_CPUEN BIT(10) -#define FEN_DCORE BIT(11) -#define FEN_ELDR BIT(12) -#define FEN_DIO_RF BIT(13) -#define FEN_HWPDN BIT(14) -#define FEN_MREGEN BIT(15) - -#define PFM_LDALL BIT(0) -#define PFM_ALDN BIT(1) -#define PFM_LDKP BIT(2) -#define PFM_WOWL BIT(3) -#define EnPDN BIT(4) -#define PDN_PL BIT(5) -#define APFM_ONMAC BIT(8) -#define APFM_OFF BIT(9) -#define APFM_RSM BIT(10) -#define AFSM_HSUS BIT(11) -#define AFSM_PCIE BIT(12) -#define APDM_MAC BIT(13) -#define APDM_HOST BIT(14) -#define APDM_HPDN BIT(15) -#define RDY_MACON BIT(16) -#define SUS_HOST BIT(17) -#define ROP_ALD BIT(20) -#define ROP_PWR BIT(21) -#define ROP_SPS BIT(22) -#define SOP_MRST BIT(25) -#define SOP_FUSE BIT(26) -#define SOP_ABG BIT(27) -#define SOP_AMB BIT(28) -#define SOP_RCK BIT(29) -#define SOP_A8M BIT(30) -#define XOP_BTCK BIT(31) - -#define ANAD16V_EN BIT(0) -#define ANA8M BIT(1) -#define MACSLP BIT(4) +#define FEN_PCIEA BIT(6) +#define FEN_PPLL BIT(7) +#define FEN_PCIED BIT(8) +#define FEN_DIOE BIT(9) +#define FEN_CPUEN BIT(10) +#define FEN_DCORE BIT(11) +#define FEN_ELDR BIT(12) +#define FEN_DIO_RF BIT(13) +#define FEN_HWPDN BIT(14) +#define FEN_MREGEN BIT(15) + +#define PFM_LDALL BIT(0) +#define PFM_ALDN BIT(1) +#define PFM_LDKP BIT(2) +#define PFM_WOWL BIT(3) +#define ENPDN BIT(4) +#define PDN_PL BIT(5) +#define APFM_ONMAC BIT(8) +#define APFM_OFF BIT(9) +#define APFM_RSM BIT(10) +#define AFSM_HSUS BIT(11) +#define AFSM_PCIE BIT(12) +#define APDM_MAC BIT(13) +#define APDM_HOST BIT(14) +#define APDM_HPDN BIT(15) +#define RDY_MACON BIT(16) +#define SUS_HOST BIT(17) +#define ROP_ALD BIT(20) +#define ROP_PWR BIT(21) +#define ROP_SPS BIT(22) +#define SOP_MRST BIT(25) +#define SOP_FUSE BIT(26) +#define SOP_ABG BIT(27) +#define SOP_AMB BIT(28) +#define SOP_RCK BIT(29) +#define SOP_A8M BIT(30) +#define XOP_BTCK BIT(31) + +#define ANAD16V_EN BIT(0) +#define ANA8M BIT(1) +#define MACSLP BIT(4) #define LOADER_CLK_EN BIT(5) #define _80M_SSC_DIS BIT(7) #define _80M_SSC_EN_HO BIT(8) #define PHY_SSC_RSTB BIT(9) -#define SEC_CLK_EN BIT(10) -#define MAC_CLK_EN BIT(11) -#define SYS_CLK_EN BIT(12) -#define RING_CLK_EN BIT(13) +#define SEC_CLK_EN BIT(10) +#define MAC_CLK_EN BIT(11) +#define SYS_CLK_EN BIT(12) +#define RING_CLK_EN BIT(13) #define BOOT_FROM_EEPROM BIT(4) -#define EEPROM_EN BIT(5) +#define EEPROM_EN BIT(5) -#define AFE_BGEN BIT(0) -#define AFE_MBEN BIT(1) -#define MAC_ID_EN BIT(7) +#define AFE_BGEN BIT(0) +#define AFE_MBEN BIT(1) +#define MAC_ID_EN BIT(7) -#define WLOCK_ALL BIT(0) -#define WLOCK_00 BIT(1) -#define WLOCK_04 BIT(2) -#define WLOCK_08 BIT(3) -#define WLOCK_40 BIT(4) +#define WLOCK_ALL BIT(0) +#define WLOCK_00 BIT(1) +#define WLOCK_04 BIT(2) +#define WLOCK_08 BIT(3) +#define WLOCK_40 BIT(4) #define R_DIS_PRST_0 BIT(5) #define R_DIS_PRST_1 BIT(6) -#define LOCK_ALL_EN BIT(7) +#define LOCK_ALL_EN BIT(7) -#define RF_EN BIT(0) -#define RF_RSTB BIT(1) -#define RF_SDMRSTB BIT(2) +#define RF_EN BIT(0) +#define RF_RSTB BIT(1) +#define RF_SDMRSTB BIT(2) -#define LDA15_EN BIT(0) -#define LDA15_STBY BIT(1) -#define LDA15_OBUF BIT(2) +#define LDA15_EN BIT(0) +#define LDA15_STBY BIT(1) +#define LDA15_OBUF BIT(2) #define LDA15_REG_VOS BIT(3) #define _LDA15_VOADJ(x) (((x) & 0x7) << 4) -#define LDV12_EN BIT(0) -#define LDV12_SDBY BIT(1) -#define LPLDO_HSM BIT(2) +#define LDV12_EN BIT(0) +#define LDV12_SDBY BIT(1) +#define LPLDO_HSM BIT(2) #define LPLDO_LSM_DIS BIT(3) #define _LDV12_VADJ(x) (((x) & 0xF) << 4) -#define XTAL_EN BIT(0) -#define XTAL_BSEL BIT(1) +#define XTAL_EN BIT(0) +#define XTAL_BSEL BIT(1) #define _XTAL_BOSC(x) (((x) & 0x3) << 2) #define _XTAL_CADJ(x) (((x) & 0xF) << 4) #define XTAL_GATE_USB BIT(8) @@ -826,146 +849,146 @@ #define _XTAL_BT_DRV(x) (((x) & 0x3) << 21) #define _XTAL_GPIO(x) (((x) & 0x7) << 23) -#define CKDLY_AFE BIT(26) -#define CKDLY_USB BIT(27) -#define CKDLY_DIG BIT(28) -#define CKDLY_BT BIT(29) +#define CKDLY_AFE BIT(26) +#define CKDLY_USB BIT(27) +#define CKDLY_DIG BIT(28) +#define CKDLY_BT BIT(29) -#define APLL_EN BIT(0) -#define APLL_320_EN BIT(1) +#define APLL_EN BIT(0) +#define APLL_320_EN BIT(1) #define APLL_FREF_SEL BIT(2) #define APLL_EDGE_SEL BIT(3) -#define APLL_WDOGB BIT(4) -#define APLL_LPFEN BIT(5) +#define APLL_WDOGB BIT(4) +#define APLL_LPFEN BIT(5) #define APLL_REF_CLK_13MHZ 0x1 -#define APLL_REF_CLK_19_2MHZ 0x2 +#define APLL_REF_CLK_19_2MHZ 0x2 #define APLL_REF_CLK_20MHZ 0x3 #define APLL_REF_CLK_25MHZ 0x4 #define APLL_REF_CLK_26MHZ 0x5 -#define APLL_REF_CLK_38_4MHZ 0x6 +#define APLL_REF_CLK_38_4MHZ 0x6 #define APLL_REF_CLK_40MHZ 0x7 -#define APLL_320EN BIT(14) -#define APLL_80EN BIT(15) -#define APLL_1MEN BIT(24) - -#define ALD_EN BIT(18) -#define EF_PD BIT(19) -#define EF_FLAG BIT(31) - -#define EF_TRPT BIT(7) -#define LDOE25_EN BIT(31) - -#define RSM_EN BIT(0) -#define Timer_EN BIT(4) - -#define TRSW0EN BIT(2) -#define TRSW1EN BIT(3) -#define EROM_EN BIT(4) -#define EnBT BIT(5) -#define EnUart BIT(8) -#define Uart_910 BIT(9) -#define EnPMAC BIT(10) -#define SIC_SWRST BIT(11) -#define EnSIC BIT(12) -#define SIC_23 BIT(13) -#define EnHDP BIT(14) -#define SIC_LBK BIT(15) - -#define LED0PL BIT(4) -#define LED1PL BIT(12) -#define LED0DIS BIT(7) - -#define MCUFWDL_EN BIT(0) -#define MCUFWDL_RDY BIT(1) -#define FWDL_ChkSum_rpt BIT(2) -#define MACINI_RDY BIT(3) -#define BBINI_RDY BIT(4) -#define RFINI_RDY BIT(5) -#define WINTINI_RDY BIT(6) -#define CPRST BIT(23) - -#define XCLK_VLD BIT(0) -#define ACLK_VLD BIT(1) -#define UCLK_VLD BIT(2) -#define PCLK_VLD BIT(3) -#define PCIRSTB BIT(4) -#define V15_VLD BIT(5) -#define TRP_B15V_EN BIT(7) -#define SIC_IDLE BIT(8) -#define BD_MAC2 BIT(9) -#define BD_MAC1 BIT(10) +#define APLL_320EN BIT(14) +#define APLL_80EN BIT(15) +#define APLL_1MEN BIT(24) + +#define ALD_EN BIT(18) +#define EF_PD BIT(19) +#define EF_FLAG BIT(31) + +#define EF_TRPT BIT(7) +#define LDOE25_EN BIT(31) + +#define RSM_EN BIT(0) +#define TIMER_EN BIT(4) + +#define TRSW0EN BIT(2) +#define TRSW1EN BIT(3) +#define EROM_EN BIT(4) +#define ENBT BIT(5) +#define ENUART BIT(8) +#define UART_910 BIT(9) +#define ENPMAC BIT(10) +#define SIC_SWRST BIT(11) +#define ENSIC BIT(12) +#define SIC_23 BIT(13) +#define ENHDP BIT(14) +#define SIC_LBK BIT(15) + +#define LED0PL BIT(4) +#define LED1PL BIT(12) +#define LED0DIS BIT(7) + +#define MCUFWDL_EN BIT(0) +#define MCUFWDL_RDY BIT(1) +#define FWDL_CHKSUM_RPT BIT(2) +#define MACINI_RDY BIT(3) +#define BBINI_RDY BIT(4) +#define RFINI_RDY BIT(5) +#define WINTINI_RDY BIT(6) +#define CPRST BIT(23) + +#define XCLK_VLD BIT(0) +#define ACLK_VLD BIT(1) +#define UCLK_VLD BIT(2) +#define PCLK_VLD BIT(3) +#define PCIRSTB BIT(4) +#define V15_VLD BIT(5) +#define TRP_B15V_EN BIT(7) +#define SIC_IDLE BIT(8) +#define BD_MAC2 BIT(9) +#define BD_MAC1 BIT(10) #define IC_MACPHY_MODE BIT(11) -#define BT_FUNC BIT(16) -#define VENDOR_ID BIT(19) +#define BT_FUNC BIT(16) +#define VENDOR_ID BIT(19) #define PAD_HWPD_IDN BIT(22) -#define TRP_VAUX_EN BIT(23) -#define TRP_BT_EN BIT(24) -#define BD_PKG_SEL BIT(25) -#define BD_HCI_SEL BIT(26) -#define TYPE_ID BIT(27) +#define TRP_VAUX_EN BIT(23) +#define TRP_BT_EN BIT(24) +#define BD_PKG_SEL BIT(25) +#define BD_HCI_SEL BIT(26) +#define TYPE_ID BIT(27) #define CHIP_VER_RTL_MASK 0xF000 #define CHIP_VER_RTL_SHIFT 12 -#define REG_LBMODE (REG_CR + 3) +#define REG_LBMODE (REG_CR + 3) #define HCI_TXDMA_EN BIT(0) #define HCI_RXDMA_EN BIT(1) -#define TXDMA_EN BIT(2) -#define RXDMA_EN BIT(3) -#define PROTOCOL_EN BIT(4) -#define SCHEDULE_EN BIT(5) -#define MACTXEN BIT(6) -#define MACRXEN BIT(7) -#define ENSWBCN BIT(8) -#define ENSEC BIT(9) - -#define _NETTYPE(x) (((x) & 0x3) << 16) +#define TXDMA_EN BIT(2) +#define RXDMA_EN BIT(3) +#define PROTOCOL_EN BIT(4) +#define SCHEDULE_EN BIT(5) +#define MACTXEN BIT(6) +#define MACRXEN BIT(7) +#define ENSWBCN BIT(8) +#define ENSEC BIT(9) + +#define _NETTYPE(x) (((x) & 0x3) << 16) #define MASK_NETTYPE 0x30000 -#define NT_NO_LINK 0x0 +#define NT_NO_LINK 0x0 #define NT_LINK_AD_HOC 0x1 -#define NT_LINK_AP 0x2 -#define NT_AS_AP 0x3 +#define NT_LINK_AP 0x2 +#define NT_AS_AP 0x3 -#define _LBMODE(x) (((x) & 0xF) << 24) -#define MASK_LBMODE 0xF000000 +#define _LBMODE(x) (((x) & 0xF) << 24) +#define MASK_LBMODE 0xF000000 #define LOOPBACK_NORMAL 0x0 -#define LOOPBACK_IMMEDIATELY 0xB +#define LOOPBACK_IMMEDIATELY 0xB #define LOOPBACK_MAC_DELAY 0x3 #define LOOPBACK_PHY 0x1 #define LOOPBACK_DMA 0x7 -#define GET_RX_PAGE_SIZE(value) ((value) & 0xF) -#define GET_TX_PAGE_SIZE(value) (((value) & 0xF0) >> 4) -#define _PSRX_MASK 0xF -#define _PSTX_MASK 0xF0 -#define _PSRX(x) (x) -#define _PSTX(x) ((x) << 4) +#define GET_RX_PAGE_SIZE(value) ((value) & 0xF) +#define GET_TX_PAGE_SIZE(value) (((value) & 0xF0) >> 4) +#define _PSRX_MASK 0xF +#define _PSTX_MASK 0xF0 +#define _PSRX(x) (x) +#define _PSTX(x) ((x) << 4) -#define PBP_64 0x0 -#define PBP_128 0x1 -#define PBP_256 0x2 -#define PBP_512 0x3 -#define PBP_1024 0x4 +#define PBP_64 0x0 +#define PBP_128 0x1 +#define PBP_256 0x2 +#define PBP_512 0x3 +#define PBP_1024 0x4 #define RXDMA_ARBBW_EN BIT(0) -#define RXSHFT_EN BIT(1) +#define RXSHFT_EN BIT(1) #define RXDMA_AGG_EN BIT(2) -#define QS_VO_QUEUE BIT(8) -#define QS_VI_QUEUE BIT(9) -#define QS_BE_QUEUE BIT(10) -#define QS_BK_QUEUE BIT(11) +#define QS_VO_QUEUE BIT(8) +#define QS_VI_QUEUE BIT(9) +#define QS_BE_QUEUE BIT(10) +#define QS_BK_QUEUE BIT(11) #define QS_MANAGER_QUEUE BIT(12) #define QS_HIGH_QUEUE BIT(13) -#define HQSEL_VOQ BIT(0) -#define HQSEL_VIQ BIT(1) -#define HQSEL_BEQ BIT(2) -#define HQSEL_BKQ BIT(3) -#define HQSEL_MGTQ BIT(4) -#define HQSEL_HIQ BIT(5) +#define HQSEL_VOQ BIT(0) +#define HQSEL_VIQ BIT(1) +#define HQSEL_BEQ BIT(2) +#define HQSEL_BKQ BIT(3) +#define HQSEL_MGTQ BIT(4) +#define HQSEL_HIQ BIT(5) #define _TXDMA_HIQ_MAP(x) (((x)&0x3) << 14) #define _TXDMA_MGQ_MAP(x) (((x)&0x3) << 12) @@ -974,9 +997,9 @@ #define _TXDMA_VIQ_MAP(x) (((x)&0x3) << 6) #define _TXDMA_VOQ_MAP(x) (((x)&0x3) << 4) -#define QUEUE_LOW 1 +#define QUEUE_LOW 1 #define QUEUE_NORMAL 2 -#define QUEUE_HIGH 3 +#define QUEUE_HIGH 3 #define _LLT_NO_ACTIVE 0x0 #define _LLT_WRITE_ACCESS 0x1 @@ -984,25 +1007,25 @@ #define _LLT_INIT_DATA(x) ((x) & 0xFF) #define _LLT_INIT_ADDR(x) (((x) & 0xFF) << 8) -#define _LLT_OP(x) (((x) & 0x3) << 30) +#define _LLT_OP(x) (((x) & 0x3) << 30) #define _LLT_OP_VALUE(x) (((x) >> 30) & 0x3) #define BB_WRITE_READ_MASK (BIT(31) | BIT(30)) -#define BB_WRITE_EN BIT(30) -#define BB_READ_EN BIT(31) +#define BB_WRITE_EN BIT(30) +#define BB_READ_EN BIT(31) -#define _HPQ(x) ((x) & 0xFF) -#define _LPQ(x) (((x) & 0xFF) << 8) -#define _PUBQ(x) (((x) & 0xFF) << 16) -#define _NPQ(x) ((x) & 0xFF) +#define _HPQ(x) ((x) & 0xFF) +#define _LPQ(x) (((x) & 0xFF) << 8) +#define _PUBQ(x) (((x) & 0xFF) << 16) +#define _NPQ(x) ((x) & 0xFF) -#define HPQ_PUBLIC_DIS BIT(24) -#define LPQ_PUBLIC_DIS BIT(25) -#define LD_RQPN BIT(31) +#define HPQ_PUBLIC_DIS BIT(24) +#define LPQ_PUBLIC_DIS BIT(25) +#define LD_RQPN BIT(31) -#define BCN_VALID BIT(16) -#define BCN_HEAD(x) (((x) & 0xFF) << 8) -#define BCN_HEAD_MASK 0xFF00 +#define BCN_VALID BIT(16) +#define BCN_HEAD(x) (((x) & 0xFF) << 8) +#define BCN_HEAD_MASK 0xFF00 #define BLK_DESC_NUM_SHIFT 4 #define BLK_DESC_NUM_MASK 0xF @@ -1022,9 +1045,9 @@ #define _RRSR_RSC(x) (((x) & 0x3) << 21) #define RRSR_RSC_RESERVED 0x0 -#define RRSR_RSC_UPPER_SUBCHANNEL 0x1 -#define RRSR_RSC_LOWER_SUBCHANNEL 0x2 -#define RRSR_RSC_DUPLICATE_MODE 0x3 +#define RRSR_RSC_UPPER_SUBCHANNEL 0x1 +#define RRSR_RSC_LOWER_SUBCHANNEL 0x2 +#define RRSR_RSC_DUPLICATE_MODE 0x3 #define USE_SHORT_G1 BIT(20) @@ -1037,8 +1060,8 @@ #define _AGGLMT_MCS6(x) (((x) & 0xF) << 24) #define _AGGLMT_MCS7(x) (((x) & 0xF) << 28) -#define RETRY_LIMIT_SHORT_SHIFT 8 -#define RETRY_LIMIT_LONG_SHIFT 0 +#define RETRY_LIMIT_SHORT_SHIFT 8 +#define RETRY_LIMIT_LONG_SHIFT 0 #define _DARF_RC1(x) ((x) & 0x1F) #define _DARF_RC2(x) (((x) & 0x1F) << 8) @@ -1058,123 +1081,123 @@ #define _RARF_RC7(x) (((x) & 0x1F) << 16) #define _RARF_RC8(x) (((x) & 0x1F) << 24) -#define AC_PARAM_TXOP_LIMIT_OFFSET 16 -#define AC_PARAM_ECW_MAX_OFFSET 12 -#define AC_PARAM_ECW_MIN_OFFSET 8 -#define AC_PARAM_AIFS_OFFSET 0 +#define AC_PARAM_TXOP_LIMIT_OFFSET 16 +#define AC_PARAM_ECW_MAX_OFFSET 12 +#define AC_PARAM_ECW_MIN_OFFSET 8 +#define AC_PARAM_AIFS_OFFSET 0 -#define _AIFS(x) (x) +#define _AIFS(x) (x) #define _ECW_MAX_MIN(x) ((x) << 8) #define _TXOP_LIMIT(x) ((x) << 16) -#define _BCNIFS(x) ((x) & 0xFF) -#define _BCNECW(x) ((((x) & 0xF)) << 8) +#define _BCNIFS(x) ((x) & 0xFF) +#define _BCNECW(x) ((((x) & 0xF)) << 8) -#define _LRL(x) ((x) & 0x3F) -#define _SRL(x) (((x) & 0x3F) << 8) +#define _LRL(x) ((x) & 0x3F) +#define _SRL(x) (((x) & 0x3F) << 8) #define _SIFS_CCK_CTX(x) ((x) & 0xFF) -#define _SIFS_CCK_TRX(x) (((x) & 0xFF) << 8); +#define _SIFS_CCK_TRX(x) (((x) & 0xFF) << 8) #define _SIFS_OFDM_CTX(x) ((x) & 0xFF) -#define _SIFS_OFDM_TRX(x) (((x) & 0xFF) << 8); +#define _SIFS_OFDM_TRX(x) (((x) & 0xFF) << 8) -#define _TBTT_PROHIBIT_HOLD(x) (((x) & 0xFF) << 8) +#define _TBTT_PROHIBIT_HOLD(x) (((x) & 0xFF) << 8) #define DIS_EDCA_CNT_DWN BIT(11) -#define EN_MBSSID BIT(1) +#define EN_MBSSID BIT(1) #define EN_TXBCN_RPT BIT(2) #define EN_BCN_FUNCTION BIT(3) -#define TSFTR_RST BIT(0) -#define TSFTR1_RST BIT(1) +#define TSFTR_RST BIT(0) +#define TSFTR1_RST BIT(1) -#define STOP_BCNQ BIT(6) +#define STOP_BCNQ BIT(6) -#define DIS_TSF_UDT0_NORMAL_CHIP BIT(4) -#define DIS_TSF_UDT0_TEST_CHIP BIT(5) +#define DIS_TSF_UDT0_NORMAL_CHIP BIT(4) +#define DIS_TSF_UDT0_TEST_CHIP BIT(5) -#define AcmHw_HwEn BIT(0) -#define AcmHw_BeqEn BIT(1) -#define AcmHw_ViqEn BIT(2) -#define AcmHw_VoqEn BIT(3) -#define AcmHw_BeqStatus BIT(4) -#define AcmHw_ViqStatus BIT(5) -#define AcmHw_VoqStatus BIT(6) +#define ACMHW_HWEN BIT(0) +#define ACMHW_BEQEN BIT(1) +#define ACMHW_VIQEN BIT(2) +#define ACMHW_VOQEN BIT(3) +#define ACMHW_BEQSTATUS BIT(4) +#define ACMHW_VIQSTATUS BIT(5) +#define ACMHW_VOQSTATUS BIT(6) -#define APSDOFF BIT(6) +#define APSDOFF BIT(6) #define APSDOFF_STATUS BIT(7) -#define BW_20MHZ BIT(2) +#define BW_20MHZ BIT(2) #define RATE_BITMAP_ALL 0xFFFFF -#define RATE_RRSR_CCK_ONLY_1M 0xFFFF1 +#define RATE_RRSR_CCK_ONLY_1M 0xFFFF1 -#define TSFRST BIT(0) -#define DIS_GCLK BIT(1) -#define PAD_SEL BIT(2) -#define PWR_ST BIT(6) +#define TSFRST BIT(0) +#define DIS_GCLK BIT(1) +#define PAD_SEL BIT(2) +#define PWR_ST BIT(6) #define PWRBIT_OW_EN BIT(7) -#define ACRC BIT(8) -#define CFENDFORM BIT(9) -#define ICV BIT(10) - -#define AAP BIT(0) -#define APM BIT(1) -#define AM BIT(2) -#define AB BIT(3) -#define ADD3 BIT(4) -#define APWRMGT BIT(5) -#define CBSSID BIT(6) -#define CBSSID_DATA BIT(6) -#define CBSSID_BCN BIT(7) -#define ACRC32 BIT(8) -#define AICV BIT(9) -#define ADF BIT(11) -#define ACF BIT(12) -#define AMF BIT(13) +#define ACRC BIT(8) +#define CFENDFORM BIT(9) +#define ICV BIT(10) + +#define AAP BIT(0) +#define APM BIT(1) +#define AM BIT(2) +#define AB BIT(3) +#define ADD3 BIT(4) +#define APWRMGT BIT(5) +#define CBSSID BIT(6) +#define CBSSID_DATA BIT(6) +#define CBSSID_BCN BIT(7) +#define ACRC32 BIT(8) +#define AICV BIT(9) +#define ADF BIT(11) +#define ACF BIT(12) +#define AMF BIT(13) #define HTC_LOC_CTRL BIT(14) -#define UC_DATA_EN BIT(16) -#define BM_DATA_EN BIT(17) -#define MFBEN BIT(22) -#define LSIGEN BIT(23) -#define EnMBID BIT(24) -#define APP_BASSN BIT(27) -#define APP_PHYSTS BIT(28) -#define APP_ICV BIT(29) -#define APP_MIC BIT(30) -#define APP_FCS BIT(31) +#define UC_DATA_EN BIT(16) +#define BM_DATA_EN BIT(17) +#define MFBEN BIT(22) +#define LSIGEN BIT(23) +#define ENMBID BIT(24) +#define APP_BASSN BIT(27) +#define APP_PHYSTS BIT(28) +#define APP_ICV BIT(29) +#define APP_MIC BIT(30) +#define APP_FCS BIT(31) #define _MIN_SPACE(x) ((x) & 0x7) -#define _SHORT_GI_PADDING(x) (((x) & 0x1F) << 3) +#define _SHORT_GI_PADDING(x) (((x) & 0x1F) << 3) -#define RXERR_TYPE_OFDM_PPDU 0 -#define RXERR_TYPE_OFDM_FALSE_ALARM 1 -#define RXERR_TYPE_OFDM_MPDU_OK 2 -#define RXERR_TYPE_OFDM_MPDU_FAIL 3 +#define RXERR_TYPE_OFDM_PPDU 0 +#define RXERR_TYPE_OFDM_FALSE_ALARM 1 +#define RXERR_TYPE_OFDM_MPDU_OK 2 +#define RXERR_TYPE_OFDM_MPDU_FAIL 3 #define RXERR_TYPE_CCK_PPDU 4 -#define RXERR_TYPE_CCK_FALSE_ALARM 5 -#define RXERR_TYPE_CCK_MPDU_OK 6 -#define RXERR_TYPE_CCK_MPDU_FAIL 7 +#define RXERR_TYPE_CCK_FALSE_ALARM 5 +#define RXERR_TYPE_CCK_MPDU_OK 6 +#define RXERR_TYPE_CCK_MPDU_FAIL 7 #define RXERR_TYPE_HT_PPDU 8 -#define RXERR_TYPE_HT_FALSE_ALARM 9 -#define RXERR_TYPE_HT_MPDU_TOTAL 10 -#define RXERR_TYPE_HT_MPDU_OK 11 -#define RXERR_TYPE_HT_MPDU_FAIL 12 -#define RXERR_TYPE_RX_FULL_DROP 15 +#define RXERR_TYPE_HT_FALSE_ALARM 9 +#define RXERR_TYPE_HT_MPDU_TOTAL 10 +#define RXERR_TYPE_HT_MPDU_OK 11 +#define RXERR_TYPE_HT_MPDU_FAIL 12 +#define RXERR_TYPE_RX_FULL_DROP 15 #define RXERR_COUNTER_MASK 0xFFFFF #define RXERR_RPT_RST BIT(27) -#define _RXERR_RPT_SEL(type) ((type) << 28) - -#define SCR_TxUseDK BIT(0) -#define SCR_RxUseDK BIT(1) -#define SCR_TxEncEnable BIT(2) -#define SCR_RxDecEnable BIT(3) -#define SCR_SKByA2 BIT(4) -#define SCR_NoSKMC BIT(5) +#define _RXERR_RPT_SEL(type) ((type) << 28) + +#define SCR_TXUSEDK BIT(0) +#define SCR_RXUSEDK BIT(1) +#define SCR_TXENCENABLE BIT(2) +#define SCR_RXDECENABLE BIT(3) +#define SCR_SKBYA2 BIT(4) +#define SCR_NOSKMC BIT(5) #define SCR_TXBCUSEDK BIT(6) #define SCR_RXBCUSEDK BIT(7) @@ -1182,32 +1205,32 @@ #define USB_IS_FULL_SPEED 1 #define USB_SPEED_MASK BIT(5) -#define USB_NORMAL_SIE_EP_MASK 0xF -#define USB_NORMAL_SIE_EP_SHIFT 4 +#define USB_NORMAL_SIE_EP_MASK 0xF +#define USB_NORMAL_SIE_EP_SHIFT 4 #define USB_TEST_EP_MASK 0x30 #define USB_TEST_EP_SHIFT 4 -#define USB_AGG_EN BIT(3) +#define USB_AGG_EN BIT(3) #define MAC_ADDR_LEN 6 -#define LAST_ENTRY_OF_TX_PKT_BUFFER 255 +#define LAST_ENTRY_OF_TX_PKT_BUFFER 255 -#define POLLING_LLT_THRESHOLD 20 -#define POLLING_READY_TIMEOUT_COUNT 1000 +#define POLLING_LLT_THRESHOLD 20 +#define POLLING_READY_TIMEOUT_COUNT 1000 #define MAX_MSS_DENSITY_2T 0x13 #define MAX_MSS_DENSITY_1T 0x0A -#define EPROM_CMD_OPERATING_MODE_MASK ((1<<7)|(1<<6)) +#define EPROM_CMD_OPERATING_MODE_MASK ((1<<7)|(1<<6)) #define EPROM_CMD_CONFIG 0x3 #define EPROM_CMD_LOAD 1 #define HWSET_MAX_SIZE_92S HWSET_MAX_SIZE -#define HAL_8192C_HW_GPIO_WPS_BIT BIT(2) +#define HAL_8192C_HW_GPIO_WPS_BIT BIT(2) -#define RPMAC_RESET 0x100 +#define RPMAC_RESET 0x100 #define RPMAC_TXSTART 0x104 #define RPMAC_TXLEGACYSIG 0x108 #define RPMAC_TXHTSIG1 0x10c @@ -1223,12 +1246,12 @@ #define RPMAC_TXMACHEADER5 0x134 #define RPMAC_TXDADATYPE 0x138 #define RPMAC_TXRANDOMSEED 0x13c -#define RPMAC_CCKPLCPPREAMBLE 0x140 +#define RPMAC_CCKPLCPPREAMBLE 0x140 #define RPMAC_CCKPLCPHEADER 0x144 #define RPMAC_CCKCRC16 0x148 #define RPMAC_OFDMRXCRC32OK 0x170 -#define RPMAC_OFDMRXCRC32Er 0x174 -#define RPMAC_OFDMRXPARITYER 0x178 +#define RPMAC_OFDMRXCRC32ER 0x174 +#define RPMAC_OFDMRXPARITYER 0x178 #define RPMAC_OFDMRXCRC8ER 0x17c #define RPMAC_CCKCRXRC16ER 0x180 #define RPMAC_CCKCRXRC32ER 0x184 @@ -1245,44 +1268,44 @@ #define RFPGA0_RFTIMING1 0x810 #define RFPGA0_RFTIMING2 0x814 -#define RFPGA0_XA_HSSIPARAMETER1 0x820 -#define RFPGA0_XA_HSSIPARAMETER2 0x824 -#define RFPGA0_XB_HSSIPARAMETER1 0x828 -#define RFPGA0_XB_HSSIPARAMETER2 0x82c +#define RFPGA0_XA_HSSIPARAMETER1 0x820 +#define RFPGA0_XA_HSSIPARAMETER2 0x824 +#define RFPGA0_XB_HSSIPARAMETER1 0x828 +#define RFPGA0_XB_HSSIPARAMETER2 0x82c -#define RFPGA0_XA_LSSIPARAMETER 0x840 -#define RFPGA0_XB_LSSIPARAMETER 0x844 +#define RFPGA0_XA_LSSIPARAMETER 0x840 +#define RFPGA0_XB_LSSIPARAMETER 0x844 -#define RFPGA0_RFWAKEUPPARAMETER 0x850 -#define RFPGA0_RFSLEEPUPPARAMETER 0x854 +#define RFPGA0_RFWAKEUPPARAMETER 0x850 +#define RFPGA0_RFSLEEPUPPARAMETER 0x854 -#define RFPGA0_XAB_SWITCHCONTROL 0x858 -#define RFPGA0_XCD_SWITCHCONTROL 0x85c +#define RFPGA0_XAB_SWITCHCONTROL 0x858 +#define RFPGA0_XCD_SWITCHCONTROL 0x85c -#define RFPGA0_XA_RFINTERFACEOE 0x860 -#define RFPGA0_XB_RFINTERFACEOE 0x864 +#define RFPGA0_XA_RFINTERFACEOE 0x860 +#define RFPGA0_XB_RFINTERFACEOE 0x864 -#define RFPGA0_XAB_RFINTERFACESW 0x870 -#define RFPGA0_XCD_RFINTERFACESW 0x874 +#define RFPGA0_XAB_RFINTERFACESW 0x870 +#define RFPGA0_XCD_RFINTERFACESW 0x874 -#define rFPGA0_XAB_RFPARAMETER 0x878 -#define rFPGA0_XCD_RFPARAMETER 0x87c +#define RFPGA0_XAB_RFPARAMETER 0x878 +#define RFPGA0_XCD_RFPARAMETER 0x87c -#define RFPGA0_ANALOGPARAMETER1 0x880 -#define RFPGA0_ANALOGPARAMETER2 0x884 -#define RFPGA0_ANALOGPARAMETER3 0x888 -#define RFPGA0_ANALOGPARAMETER4 0x88c +#define RFPGA0_ANALOGPARAMETER1 0x880 +#define RFPGA0_ANALOGPARAMETER2 0x884 +#define RFPGA0_ANALOGPARAMETER3 0x888 +#define RFPGA0_ANALOGPARAMETER4 0x88c -#define RFPGA0_XA_LSSIREADBACK 0x8a0 -#define RFPGA0_XB_LSSIREADBACK 0x8a4 -#define RFPGA0_XC_LSSIREADBACK 0x8a8 -#define RFPGA0_XD_LSSIREADBACK 0x8ac +#define RFPGA0_XA_LSSIREADBACK 0x8a0 +#define RFPGA0_XB_LSSIREADBACK 0x8a4 +#define RFPGA0_XC_LSSIREADBACK 0x8a8 +#define RFPGA0_XD_LSSIREADBACK 0x8ac #define RFPGA0_PSDREPORT 0x8b4 -#define TRANSCEIVEA_HSPI_READBACK 0x8b8 -#define TRANSCEIVEB_HSPI_READBACK 0x8bc -#define RFPGA0_XAB_RFINTERFACERB 0x8e0 -#define RFPGA0_XCD_RFINTERFACERB 0x8e4 +#define TRANSCEIVEA_HSPI_READBACK 0x8b8 +#define TRANSCEIVEB_HSPI_READBACK 0x8bc +#define RFPGA0_XAB_RFINTERFACERB 0x8e0 +#define RFPGA0_XCD_RFINTERFACERB 0x8e4 #define RFPGA1_RFMOD 0x900 @@ -1293,12 +1316,12 @@ #define RCCK0_SYSTEM 0xa00 #define RCCK0_AFESETTING 0xa04 -#define RCCK0_CCA 0xa08 +#define RCCK0_CCA 0xa08 #define RCCK0_RXAGC1 0xa0c #define RCCK0_RXAGC2 0xa10 -#define RCCK0_RXHP 0xa14 +#define RCCK0_RXHP 0xa14 #define RCCK0_DSPPARAMETER1 0xa18 #define RCCK0_DSPPARAMETER2 0xa1c @@ -1306,26 +1329,26 @@ #define RCCK0_TXFILTER1 0xa20 #define RCCK0_TXFILTER2 0xa24 #define RCCK0_DEBUGPORT 0xa28 -#define RCCK0_FALSEALARMREPORT 0xa2c -#define RCCK0_TRSSIREPORT 0xa50 -#define RCCK0_RXREPORT 0xa54 -#define RCCK0_FACOUNTERLOWER 0xa5c -#define RCCK0_FACOUNTERUPPER 0xa58 +#define RCCK0_FALSEALARMREPORT 0xa2c +#define RCCK0_TRSSIREPORT 0xa50 +#define RCCK0_RXREPORT 0xa54 +#define RCCK0_FACOUNTERLOWER 0xa5c +#define RCCK0_FACOUNTERUPPER 0xa58 -#define ROFDM0_LSTF 0xc00 +#define ROFDM0_LSTF 0xc00 -#define ROFDM0_TRXPATHENABLE 0xc04 +#define ROFDM0_TRXPATHENABLE 0xc04 #define ROFDM0_TRMUXPAR 0xc08 -#define ROFDM0_TRSWISOLATION 0xc0c +#define ROFDM0_TRSWISOLATION 0xc0c #define ROFDM0_XARXAFE 0xc10 -#define ROFDM0_XARXIQIMBALANCE 0xc14 -#define ROFDM0_XBRXAFE 0xc18 -#define ROFDM0_XBRXIQIMBALANCE 0xc1c -#define ROFDM0_XCRXAFE 0xc20 -#define ROFDM0_XCRXIQIMBANLANCE 0xc24 -#define ROFDM0_XDRXAFE 0xc28 -#define ROFDM0_XDRXIQIMBALANCE 0xc2c +#define ROFDM0_XARXIQIMBALANCE 0xc14 +#define ROFDM0_XBRXAFE 0xc18 +#define ROFDM0_XBRXIQIMBALANCE 0xc1c +#define ROFDM0_XCRXAFE 0xc20 +#define ROFDM0_XCRXIQIMBANLANCE 0xc24 +#define ROFDM0_XDRXAFE 0xc28 +#define ROFDM0_XDRXIQIMBALANCE 0xc2c #define ROFDM0_RXDETECTOR1 0xc30 #define ROFDM0_RXDETECTOR2 0xc34 @@ -1334,8 +1357,8 @@ #define ROFDM0_RXDSP 0xc40 #define ROFDM0_CFOANDDAGC 0xc44 -#define ROFDM0_CCADROPTHRESHOLD 0xc48 -#define ROFDM0_ECCATHRESHOLD 0xc4c +#define ROFDM0_CCADROPTHRESHOLD 0xc48 +#define ROFDM0_ECCATHRESHOLD 0xc4c #define ROFDM0_XAAGCCORE1 0xc50 #define ROFDM0_XAAGCCORE2 0xc54 @@ -1346,24 +1369,24 @@ #define ROFDM0_XDAGCCORE1 0xc68 #define ROFDM0_XDAGCCORE2 0xc6c -#define ROFDM0_AGCPARAMETER1 0xc70 -#define ROFDM0_AGCPARAMETER2 0xc74 +#define ROFDM0_AGCPARAMETER1 0xc70 +#define ROFDM0_AGCPARAMETER2 0xc74 #define ROFDM0_AGCRSSITABLE 0xc78 #define ROFDM0_HTSTFAGC 0xc7c -#define ROFDM0_XATXIQIMBALANCE 0xc80 +#define ROFDM0_XATXIQIMBALANCE 0xc80 #define ROFDM0_XATXAFE 0xc84 -#define ROFDM0_XBTXIQIMBALANCE 0xc88 +#define ROFDM0_XBTXIQIMBALANCE 0xc88 #define ROFDM0_XBTXAFE 0xc8c -#define ROFDM0_XCTXIQIMBALANCE 0xc90 -#define ROFDM0_XCTXAFE 0xc94 -#define ROFDM0_XDTXIQIMBALANCE 0xc98 +#define ROFDM0_XCTXIQIMBALANCE 0xc90 +#define ROFDM0_XCTXAFE 0xc94 +#define ROFDM0_XDTXIQIMBALANCE 0xc98 #define ROFDM0_XDTXAFE 0xc9c #define ROFDM0_RXIQEXTANTA 0xca0 -#define ROFDM0_RXHPPARAMETER 0xce0 -#define ROFDM0_TXPSEUDONOISEWGT 0xce4 +#define ROFDM0_RXHPPARAMETER 0xce0 +#define ROFDM0_TXPSEUDONOISEWGT 0xce4 #define ROFDM0_FRAMESYNC 0xcf0 #define ROFDM0_DFSREPORT 0xcf4 #define ROFDM0_TXCOEFF1 0xca4 @@ -1373,19 +1396,19 @@ #define ROFDM0_TXCOEFF5 0xcb4 #define ROFDM0_TXCOEFF6 0xcb8 -#define ROFDM1_LSTF 0xd00 -#define ROFDM1_TRXPATHENABLE 0xd04 +#define ROFDM1_LSTF 0xd00 +#define ROFDM1_TRXPATHENABLE 0xd04 -#define ROFDM1_CF0 0xd08 -#define ROFDM1_CSI1 0xd10 -#define ROFDM1_SBD 0xd14 -#define ROFDM1_CSI2 0xd18 +#define ROFDM1_CF0 0xd08 +#define ROFDM1_CSI1 0xd10 +#define ROFDM1_SBD 0xd14 +#define ROFDM1_CSI2 0xd18 #define ROFDM1_CFOTRACKING 0xd2c #define ROFDM1_TRXMESAURE1 0xd34 #define ROFDM1_INTFDET 0xd3c -#define ROFDM1_PSEUDONOISESTATEAB 0xd50 -#define ROFDM1_PSEUDONOISESTATECD 0xd54 -#define ROFDM1_RXPSEUDONOISEWGT 0xd58 +#define ROFDM1_PSEUDONOISESTATEAB 0xd50 +#define ROFDM1_PSEUDONOISESTATECD 0xd54 +#define ROFDM1_RXPSEUDONOISEWGT 0xd58 #define ROFDM_PHYCOUNTER1 0xda0 #define ROFDM_PHYCOUNTER2 0xda4 @@ -1397,35 +1420,35 @@ #define ROFDM_LONGCFOCD 0xdb8 #define ROFDM_TAILCF0AB 0xdbc #define ROFDM_TAILCF0CD 0xdc0 -#define ROFDM_PWMEASURE1 0xdc4 -#define ROFDM_PWMEASURE2 0xdc8 +#define ROFDM_PWMEASURE1 0xdc4 +#define ROFDM_PWMEASURE2 0xdc8 #define ROFDM_BWREPORT 0xdcc #define ROFDM_AGCREPORT 0xdd0 -#define ROFDM_RXSNR 0xdd4 +#define ROFDM_RXSNR 0xdd4 #define ROFDM_RXEVMCSI 0xdd8 #define ROFDM_SIGREPORT 0xddc #define RTXAGC_A_RATE18_06 0xe00 #define RTXAGC_A_RATE54_24 0xe04 #define RTXAGC_A_CCK1_MCS32 0xe08 -#define RTXAGC_A_MCS03_MCS00 0xe10 -#define RTXAGC_A_MCS07_MCS04 0xe14 -#define RTXAGC_A_MCS11_MCS08 0xe18 -#define RTXAGC_A_MCS15_MCS12 0xe1c +#define RTXAGC_A_MCS03_MCS00 0xe10 +#define RTXAGC_A_MCS07_MCS04 0xe14 +#define RTXAGC_A_MCS11_MCS08 0xe18 +#define RTXAGC_A_MCS15_MCS12 0xe1c #define RTXAGC_B_RATE18_06 0x830 #define RTXAGC_B_RATE54_24 0x834 -#define RTXAGC_B_CCK1_55_MCS32 0x838 -#define RTXAGC_B_MCS03_MCS00 0x83c -#define RTXAGC_B_MCS07_MCS04 0x848 -#define RTXAGC_B_MCS11_MCS08 0x84c -#define RTXAGC_B_MCS15_MCS12 0x868 -#define RTXAGC_B_CCK11_A_CCK2_11 0x86c +#define RTXAGC_B_CCK1_55_MCS32 0x838 +#define RTXAGC_B_MCS03_MCS00 0x83c +#define RTXAGC_B_MCS07_MCS04 0x848 +#define RTXAGC_B_MCS11_MCS08 0x84c +#define RTXAGC_B_MCS15_MCS12 0x868 +#define RTXAGC_B_CCK11_A_CCK2_11 0x86c #define RZEBRA1_HSSIENABLE 0x0 #define RZEBRA1_TRXENABLE1 0x1 #define RZEBRA1_TRXENABLE2 0x2 -#define RZEBRA1_AGC 0x4 +#define RZEBRA1_AGC 0x4 #define RZEBRA1_CHARGEPUMP 0x5 #define RZEBRA1_CHANNEL 0x7 @@ -1434,649 +1457,664 @@ #define RZEBRA1_RXLPF 0xb #define RZEBRA1_RXHPFCORNER 0xc -#define RGLOBALCTRL 0 +#define RGLOBALCTRL 0 #define RRTL8256_TXLPF 19 #define RRTL8256_RXLPF 11 #define RRTL8258_TXLPF 0x11 #define RRTL8258_RXLPF 0x13 #define RRTL8258_RSSILPF 0xa -#define RF_AC 0x00 +#define RF_AC 0x00 -#define RF_IQADJ_G1 0x01 -#define RF_IQADJ_G2 0x02 -#define RF_POW_TRSW 0x05 +#define RF_IQADJ_G1 0x01 +#define RF_IQADJ_G2 0x02 +#define RF_POW_TRSW 0x05 -#define RF_GAIN_RX 0x06 -#define RF_GAIN_TX 0x07 +#define RF_GAIN_RX 0x06 +#define RF_GAIN_TX 0x07 -#define RF_TXM_IDAC 0x08 -#define RF_BS_IQGEN 0x0F +#define RF_TXM_IDAC 0x08 +#define RF_BS_IQGEN 0x0F -#define RF_MODE1 0x10 -#define RF_MODE2 0x11 +#define RF_MODE1 0x10 +#define RF_MODE2 0x11 #define RF_RX_AGC_HP 0x12 -#define RF_TX_AGC 0x13 -#define RF_BIAS 0x14 -#define RF_IPA 0x15 +#define RF_TX_AGC 0x13 +#define RF_BIAS 0x14 +#define RF_IPA 0x15 #define RF_POW_ABILITY 0x17 -#define RF_MODE_AG 0x18 -#define RRFCHANNEL 0x18 -#define RF_CHNLBW 0x18 -#define RF_TOP 0x19 - -#define RF_RX_G1 0x1A -#define RF_RX_G2 0x1B - -#define RF_RX_BB2 0x1C -#define RF_RX_BB1 0x1D - -#define RF_RCK1 0x1E -#define RF_RCK2 0x1F - -#define RF_TX_G1 0x20 -#define RF_TX_G2 0x21 -#define RF_TX_G3 0x22 - -#define RF_TX_BB1 0x23 -#define RF_T_METER 0x24 - -#define RF_SYN_G1 0x25 -#define RF_SYN_G2 0x26 -#define RF_SYN_G3 0x27 -#define RF_SYN_G4 0x28 -#define RF_SYN_G5 0x29 -#define RF_SYN_G6 0x2A -#define RF_SYN_G7 0x2B -#define RF_SYN_G8 0x2C - -#define RF_RCK_OS 0x30 -#define RF_TXPA_G1 0x31 -#define RF_TXPA_G2 0x32 -#define RF_TXPA_G3 0x33 - -#define BBBRESETB 0x100 +#define RF_MODE_AG 0x18 +#define RRFCHANNEL 0x18 +#define RF_CHNLBW 0x18 +#define RF_TOP 0x19 + +#define RF_RX_G1 0x1A +#define RF_RX_G2 0x1B + +#define RF_RX_BB2 0x1C +#define RF_RX_BB1 0x1D + +#define RF_RCK1 0x1E +#define RF_RCK2 0x1F + +#define RF_TX_G1 0x20 +#define RF_TX_G2 0x21 +#define RF_TX_G3 0x22 + +#define RF_TX_BB1 0x23 +#define RF_T_METER 0x24 + +#define RF_SYN_G1 0x25 +#define RF_SYN_G2 0x26 +#define RF_SYN_G3 0x27 +#define RF_SYN_G4 0x28 +#define RF_SYN_G5 0x29 +#define RF_SYN_G6 0x2A +#define RF_SYN_G7 0x2B +#define RF_SYN_G8 0x2C + +#define RF_RCK_OS 0x30 +#define RF_TXPA_G1 0x31 +#define RF_TXPA_G2 0x32 +#define RF_TXPA_G3 0x33 + +#define BBBRESETB 0x100 #define BGLOBALRESETB 0x200 #define BOFDMTXSTART 0x4 -#define BCCKTXSTART 0x8 -#define BCRC32DEBUG 0x100 +#define BCCKTXSTART 0x8 +#define BCRC32DEBUG 0x100 #define BPMACLOOPBACK 0x10 -#define BTXLSIG 0xffffff -#define BOFDMTXRATE 0xf +#define BTXLSIG 0xffffff +#define BOFDMTXRATE 0xf #define BOFDMTXRESERVED 0x10 #define BOFDMTXLENGTH 0x1ffe0 #define BOFDMTXPARITY 0x20000 -#define BTXHTSIG1 0xffffff +#define BTXHTSIG1 0xffffff #define BTXHTMCSRATE 0x7f -#define BTXHTBW 0x80 -#define BTXHTLENGTH 0xffff00 -#define BTXHTSIG2 0xffffff +#define BTXHTBW 0x80 +#define BTXHTLENGTH 0xffff00 +#define BTXHTSIG2 0xffffff #define BTXHTSMOOTHING 0x1 #define BTXHTSOUNDING 0x2 #define BTXHTRESERVED 0x4 #define BTXHTAGGREATION 0x8 -#define BTXHTSTBC 0x30 +#define BTXHTSTBC 0x30 #define BTXHTADVANCECODING 0x40 #define BTXHTSHORTGI 0x80 #define BTXHTNUMBERHT_LTF 0x300 -#define BTXHTCRC8 0x3fc00 +#define BTXHTCRC8 0x3fc00 #define BCOUNTERRESET 0x10000 #define BNUMOFOFDMTX 0xffff -#define BNUMOFCCKTX 0xffff0000 +#define BNUMOFCCKTX 0xffff0000 #define BTXIDLEINTERVAL 0xffff #define BOFDMSERVICE 0xffff0000 #define BTXMACHEADER 0xffffffff -#define BTXDATAINIT 0xff -#define BTXHTMODE 0x100 -#define BTXDATATYPE 0x30000 +#define BTXDATAINIT 0xff +#define BTXHTMODE 0x100 +#define BTXDATATYPE 0x30000 #define BTXRANDOMSEED 0xffffffff #define BCCKTXPREAMBLE 0x1 -#define BCCKTXSFD 0xffff0000 -#define BCCKTXSIG 0xff +#define BCCKTXSFD 0xffff0000 +#define BCCKTXSIG 0xff #define BCCKTXSERVICE 0xff00 #define BCCKLENGTHEXT 0x8000 #define BCCKTXLENGHT 0xffff0000 -#define BCCKTXCRC16 0xffff +#define BCCKTXCRC16 0xffff #define BCCKTXSTATUS 0x1 #define BOFDMTXSTATUS 0x2 -#define IS_BB_REG_OFFSET_92S(_Offset) \ - ((_Offset >= 0x800) && (_Offset <= 0xfff)) - -#define BRFMOD 0x1 -#define BJAPANMODE 0x2 -#define BCCKTXSC 0x30 -#define BCCKEN 0x1000000 -#define BOFDMEN 0x2000000 - -#define BOFDMRXADCPHASE 0x10000 -#define BOFDMTXDACPHASE 0x40000 -#define BXATXAGC 0x3f - -#define BXBTXAGC 0xf00 -#define BXCTXAGC 0xf000 -#define BXDTXAGC 0xf0000 - -#define BPASTART 0xf0000000 -#define BTRSTART 0x00f00000 -#define BRFSTART 0x0000f000 -#define BBBSTART 0x000000f0 -#define BBBCCKSTART 0x0000000f -#define BPAEND 0xf -#define BTREND 0x0f000000 -#define BRFEND 0x000f0000 -#define BCCAMASK 0x000000f0 -#define BR2RCCAMASK 0x00000f00 -#define BHSSI_R2TDELAY 0xf8000000 -#define BHSSI_T2RDELAY 0xf80000 -#define BCONTXHSSI 0x400 -#define BIGFROMCCK 0x200 -#define BAGCADDRESS 0x3f -#define BRXHPTX 0x7000 -#define BRXHP2RX 0x38000 -#define BRXHPCCKINI 0xc0000 -#define BAGCTXCODE 0xc00000 -#define BAGCRXCODE 0x300000 - -#define B3WIREDATALENGTH 0x800 -#define B3WIREADDREAALENGTH 0x400 - -#define B3WIRERFPOWERDOWN 0x1 -#define B5GPAPEPOLARITY 0x40000000 -#define B2GPAPEPOLARITY 0x80000000 -#define BRFSW_TXDEFAULTANT 0x3 -#define BRFSW_TXOPTIONANT 0x30 -#define BRFSW_RXDEFAULTANT 0x300 -#define BRFSW_RXOPTIONANT 0x3000 -#define BRFSI_3WIREDATA 0x1 -#define BRFSI_3WIRECLOCK 0x2 -#define BRFSI_3WIRELOAD 0x4 -#define BRFSI_3WIRERW 0x8 -#define BRFSI_3WIRE 0xf - -#define BRFSI_RFENV 0x10 - -#define BRFSI_TRSW 0x20 -#define BRFSI_TRSWB 0x40 -#define BRFSI_ANTSW 0x100 -#define BRFSI_ANTSWB 0x200 -#define BRFSI_PAPE 0x400 -#define BRFSI_PAPE5G 0x800 -#define BBANDSELECT 0x1 -#define BHTSIG2_GI 0x80 -#define BHTSIG2_SMOOTHING 0x01 -#define BHTSIG2_SOUNDING 0x02 -#define BHTSIG2_AGGREATON 0x08 -#define BHTSIG2_STBC 0x30 -#define BHTSIG2_ADVCODING 0x40 -#define BHTSIG2_NUMOFHTLTF 0x300 -#define BHTSIG2_CRC8 0x3fc -#define BHTSIG1_MCS 0x7f -#define BHTSIG1_BANDWIDTH 0x80 -#define BHTSIG1_HTLENGTH 0xffff -#define BLSIG_RATE 0xf -#define BLSIG_RESERVED 0x10 -#define BLSIG_LENGTH 0x1fffe -#define BLSIG_PARITY 0x20 -#define BCCKRXPHASE 0x4 - -#define BLSSIREADADDRESS 0x7f800000 -#define BLSSIREADEDGE 0x80000000 - -#define BLSSIREADBACKDATA 0xfffff - -#define BLSSIREADOKFLAG 0x1000 -#define BCCKSAMPLERATE 0x8 -#define BREGULATOR0STANDBY 0x1 -#define BREGULATORPLLSTANDBY 0x2 -#define BREGULATOR1STANDBY 0x4 -#define BPLLPOWERUP 0x8 -#define BDPLLPOWERUP 0x10 -#define BDA10POWERUP 0x20 -#define BAD7POWERUP 0x200 -#define BDA6POWERUP 0x2000 -#define BXTALPOWERUP 0x4000 -#define B40MDCLKPOWERUP 0x8000 -#define BDA6DEBUGMODE 0x20000 -#define BDA6SWING 0x380000 - -#define BADCLKPHASE 0x4000000 -#define B80MCLKDELAY 0x18000000 -#define BAFEWATCHDOGENABLE 0x20000000 - -#define BXTALCAP01 0xc0000000 -#define BXTALCAP23 0x3 -#define BXTALCAP92X 0x0f000000 -#define BXTALCAP 0x0f000000 - -#define BINTDIFCLKENABLE 0x400 -#define BEXTSIGCLKENABLE 0x800 -#define BBANDGAP_MBIAS_POWERUP 0x10000 -#define BAD11SH_GAIN 0xc0000 -#define BAD11NPUT_RANGE 0x700000 -#define BAD110P_CURRENT 0x3800000 -#define BLPATH_LOOPBACK 0x4000000 -#define BQPATH_LOOPBACK 0x8000000 -#define BAFE_LOOPBACK 0x10000000 -#define BDA10_SWING 0x7e0 -#define BDA10_REVERSE 0x800 -#define BDA_CLK_SOURCE 0x1000 -#define BDA7INPUT_RANGE 0x6000 -#define BDA7_GAIN 0x38000 -#define BDA7OUTPUT_CM_MODE 0x40000 -#define BDA7INPUT_CM_MODE 0x380000 -#define BDA7CURRENT 0xc00000 -#define BREGULATOR_ADJUST 0x7000000 -#define BAD11POWERUP_ATTX 0x1 -#define BDA10PS_ATTX 0x10 -#define BAD11POWERUP_ATRX 0x100 -#define BDA10PS_ATRX 0x1000 -#define BCCKRX_AGC_FORMAT 0x200 -#define BPSDFFT_SAMPLE_POINT 0xc000 -#define BPSD_AVERAGE_NUM 0x3000 -#define BIQPATH_CONTROL 0xc00 -#define BPSD_FREQ 0x3ff -#define BPSD_ANTENNA_PATH 0x30 -#define BPSD_IQ_SWITCH 0x40 -#define BPSD_RX_TRIGGER 0x400000 -#define BPSD_TX_TRIGGER 0x80000000 -#define BPSD_SINE_TONE_SCALE 0x7f000000 -#define BPSD_REPORT 0xffff - -#define BOFDM_TXSC 0x30000000 -#define BCCK_TXON 0x1 -#define BOFDM_TXON 0x2 -#define BDEBUG_PAGE 0xfff -#define BDEBUG_ITEM 0xff -#define BANTL 0x10 -#define BANT_NONHT 0x100 -#define BANT_HT1 0x1000 -#define BANT_HT2 0x10000 -#define BANT_HT1S1 0x100000 -#define BANT_NONHTS1 0x1000000 - -#define BCCK_BBMODE 0x3 -#define BCCK_TXPOWERSAVING 0x80 -#define BCCK_RXPOWERSAVING 0x40 - -#define BCCK_SIDEBAND 0x10 - -#define BCCK_SCRAMBLE 0x8 -#define BCCK_ANTDIVERSITY 0x8000 -#define BCCK_CARRIER_RECOVERY 0x4000 -#define BCCK_TXRATE 0x3000 -#define BCCK_DCCANCEL 0x0800 -#define BCCK_ISICANCEL 0x0400 -#define BCCK_MATCH_FILTER 0x0200 -#define BCCK_EQUALIZER 0x0100 -#define BCCK_PREAMBLE_DETECT 0x800000 -#define BCCK_FAST_FALSECCAi 0x400000 -#define BCCK_CH_ESTSTARTi 0x300000 -#define BCCK_CCA_COUNTi 0x080000 -#define BCCK_CS_LIM 0x070000 -#define BCCK_BIST_MODEi 0x80000000 -#define BCCK_CCAMASK 0x40000000 -#define BCCK_TX_DAC_PHASE 0x4 -#define BCCK_RX_ADC_PHASE 0x20000000 -#define BCCKR_CP_MODE 0x0100 -#define BCCK_TXDC_OFFSET 0xf0 -#define BCCK_RXDC_OFFSET 0xf -#define BCCK_CCA_MODE 0xc000 -#define BCCK_FALSECS_LIM 0x3f00 -#define BCCK_CS_RATIO 0xc00000 -#define BCCK_CORGBIT_SEL 0x300000 -#define BCCK_PD_LIM 0x0f0000 -#define BCCK_NEWCCA 0x80000000 -#define BCCK_RXHP_OF_IG 0x8000 -#define BCCK_RXIG 0x7f00 -#define BCCK_LNA_POLARITY 0x800000 -#define BCCK_RX1ST_BAIN 0x7f0000 -#define BCCK_RF_EXTEND 0x20000000 -#define BCCK_RXAGC_SATLEVEL 0x1f000000 -#define BCCK_RXAGC_SATCOUNT 0xe0 -#define bCCKRxRFSettle 0x1f -#define BCCK_FIXED_RXAGC 0x8000 -#define BCCK_ANTENNA_POLARITY 0x2000 -#define BCCK_TXFILTER_TYPE 0x0c00 -#define BCCK_RXAGC_REPORTTYPE 0x0300 -#define BCCK_RXDAGC_EN 0x80000000 -#define BCCK_RXDAGC_PERIOD 0x20000000 -#define BCCK_RXDAGC_SATLEVEL 0x1f000000 -#define BCCK_TIMING_RECOVERY 0x800000 -#define BCCK_TXC0 0x3f0000 -#define BCCK_TXC1 0x3f000000 -#define BCCK_TXC2 0x3f -#define BCCK_TXC3 0x3f00 -#define BCCK_TXC4 0x3f0000 -#define BCCK_TXC5 0x3f000000 -#define BCCK_TXC6 0x3f -#define BCCK_TXC7 0x3f00 -#define BCCK_DEBUGPORT 0xff0000 -#define BCCK_DAC_DEBUG 0x0f000000 -#define BCCK_FALSEALARM_ENABLE 0x8000 -#define BCCK_FALSEALARM_READ 0x4000 -#define BCCK_TRSSI 0x7f -#define BCCK_RXAGC_REPORT 0xfe -#define BCCK_RXREPORT_ANTSEL 0x80000000 -#define BCCK_RXREPORT_MFOFF 0x40000000 -#define BCCK_RXREPORT_SQLOSS 0x20000000 -#define BCCK_RXREPORT_PKTLOSS 0x10000000 -#define BCCK_RXREPORT_LOCKEDBIT 0x08000000 -#define BCCK_RXREPORT_RATEERROR 0x04000000 -#define BCCK_RXREPORT_RXRATE 0x03000000 -#define BCCK_RXFA_COUNTER_LOWER 0xff -#define BCCK_RXFA_COUNTER_UPPER 0xff000000 -#define BCCK_RXHPAGC_START 0xe000 -#define BCCK_RXHPAGC_FINAL 0x1c00 -#define BCCK_RXFALSEALARM_ENABLE 0x8000 -#define BCCK_FACOUNTER_FREEZE 0x4000 -#define BCCK_TXPATH_SEL 0x10000000 -#define BCCK_DEFAULT_RXPATH 0xc000000 -#define BCCK_OPTION_RXPATH 0x3000000 - -#define BNUM_OFSTF 0x3 -#define BSHIFT_L 0xc0 -#define BGI_TH 0xc -#define BRXPATH_A 0x1 -#define BRXPATH_B 0x2 -#define BRXPATH_C 0x4 -#define BRXPATH_D 0x8 -#define BTXPATH_A 0x1 -#define BTXPATH_B 0x2 -#define BTXPATH_C 0x4 -#define BTXPATH_D 0x8 -#define BTRSSI_FREQ 0x200 -#define BADC_BACKOFF 0x3000 -#define BDFIR_BACKOFF 0xc000 -#define BTRSSI_LATCH_PHASE 0x10000 -#define BRX_LDC_OFFSET 0xff -#define BRX_QDC_OFFSET 0xff00 -#define BRX_DFIR_MODE 0x1800000 -#define BRX_DCNF_TYPE 0xe000000 -#define BRXIQIMB_A 0x3ff -#define BRXIQIMB_B 0xfc00 -#define BRXIQIMB_C 0x3f0000 -#define BRXIQIMB_D 0xffc00000 -#define BDC_DC_NOTCH 0x60000 -#define BRXNB_NOTCH 0x1f000000 -#define BPD_TH 0xf -#define BPD_TH_OPT2 0xc000 -#define BPWED_TH 0x700 -#define BIFMF_WIN_L 0x800 -#define BPD_OPTION 0x1000 -#define BMF_WIN_L 0xe000 -#define BBW_SEARCH_L 0x30000 -#define BWIN_ENH_L 0xc0000 -#define BBW_TH 0x700000 -#define BED_TH2 0x3800000 -#define BBW_OPTION 0x4000000 -#define BRADIO_TH 0x18000000 -#define BWINDOW_L 0xe0000000 -#define BSBD_OPTION 0x1 -#define BFRAME_TH 0x1c -#define BFS_OPTION 0x60 -#define BDC_SLOPE_CHECK 0x80 -#define BFGUARD_COUNTER_DC_L 0xe00 -#define BFRAME_WEIGHT_SHORT 0x7000 -#define BSUB_TUNE 0xe00000 -#define BFRAME_DC_LENGTH 0xe000000 -#define BSBD_START_OFFSET 0x30000000 -#define BFRAME_TH_2 0x7 -#define BFRAME_GI2_TH 0x38 -#define BGI2_SYNC_EN 0x40 -#define BSARCH_SHORT_EARLY 0x300 -#define BSARCH_SHORT_LATE 0xc00 -#define BSARCH_GI2_LATE 0x70000 -#define BCFOANTSUM 0x1 -#define BCFOACC 0x2 -#define BCFOSTARTOFFSET 0xc -#define BCFOLOOPBACK 0x70 -#define BCFOSUMWEIGHT 0x80 -#define BDAGCENABLE 0x10000 -#define BTXIQIMB_A 0x3ff -#define BTXIQIMB_b 0xfc00 -#define BTXIQIMB_C 0x3f0000 -#define BTXIQIMB_D 0xffc00000 -#define BTXIDCOFFSET 0xff -#define BTXIQDCOFFSET 0xff00 -#define BTXDFIRMODE 0x10000 -#define BTXPESUDO_NOISEON 0x4000000 -#define BTXPESUDO_NOISE_A 0xff -#define BTXPESUDO_NOISE_B 0xff00 -#define BTXPESUDO_NOISE_C 0xff0000 -#define BTXPESUDO_NOISE_D 0xff000000 -#define BCCA_DROPOPTION 0x20000 -#define BCCA_DROPTHRES 0xfff00000 -#define BEDCCA_H 0xf -#define BEDCCA_L 0xf0 -#define BLAMBDA_ED 0x300 -#define BRX_INITIALGAIN 0x7f -#define BRX_ANTDIV_EN 0x80 -#define BRX_AGC_ADDRESS_FOR_LNA 0x7f00 -#define BRX_HIGHPOWER_FLOW 0x8000 -#define BRX_AGC_FREEZE_THRES 0xc0000 -#define BRX_FREEZESTEP_AGC1 0x300000 -#define BRX_FREEZESTEP_AGC2 0xc00000 -#define BRX_FREEZESTEP_AGC3 0x3000000 -#define BRX_FREEZESTEP_AGC0 0xc000000 -#define BRXRSSI_CMP_EN 0x10000000 -#define BRXQUICK_AGCEN 0x20000000 -#define BRXAGC_FREEZE_THRES_MODE 0x40000000 -#define BRX_OVERFLOW_CHECKTYPE 0x80000000 -#define BRX_AGCSHIFT 0x7f -#define BTRSW_TRI_ONLY 0x80 -#define BPOWER_THRES 0x300 -#define BRXAGC_EN 0x1 -#define BRXAGC_TOGETHER_EN 0x2 -#define BRXAGC_MIN 0x4 -#define BRXHP_INI 0x7 -#define BRXHP_TRLNA 0x70 -#define BRXHP_RSSI 0x700 -#define BRXHP_BBP1 0x7000 -#define BRXHP_BBP2 0x70000 -#define BRXHP_BBP3 0x700000 -#define BRSSI_H 0x7f0000 -#define BRSSI_GEN 0x7f000000 -#define BRXSETTLE_TRSW 0x7 -#define BRXSETTLE_LNA 0x38 -#define BRXSETTLE_RSSI 0x1c0 -#define BRXSETTLE_BBP 0xe00 -#define BRXSETTLE_RXHP 0x7000 -#define BRXSETTLE_ANTSW_RSSI 0x38000 -#define BRXSETTLE_ANTSW 0xc0000 -#define BRXPROCESS_TIME_DAGC 0x300000 -#define BRXSETTLE_HSSI 0x400000 -#define BRXPROCESS_TIME_BBPPW 0x800000 -#define BRXANTENNA_POWER_SHIFT 0x3000000 -#define BRSSI_TABLE_SELECT 0xc000000 -#define BRXHP_FINAL 0x7000000 -#define BRXHPSETTLE_BBP 0x7 -#define BRXHTSETTLE_HSSI 0x8 -#define BRXHTSETTLE_RXHP 0x70 -#define BRXHTSETTLE_BBPPW 0x80 -#define BRXHTSETTLE_IDLE 0x300 -#define BRXHTSETTLE_RESERVED 0x1c00 -#define BRXHT_RXHP_EN 0x8000 -#define BRXAGC_FREEZE_THRES 0x30000 -#define BRXAGC_TOGETHEREN 0x40000 -#define BRXHTAGC_MIN 0x80000 -#define BRXHTAGC_EN 0x100000 -#define BRXHTDAGC_EN 0x200000 -#define BRXHT_RXHP_BBP 0x1c00000 -#define BRXHT_RXHP_FINAL 0xe0000000 -#define BRXPW_RADIO_TH 0x3 -#define BRXPW_RADIO_EN 0x4 -#define BRXMF_HOLD 0x3800 -#define BRXPD_DELAY_TH1 0x38 -#define BRXPD_DELAY_TH2 0x1c0 -#define BRXPD_DC_COUNT_MAX 0x600 -#define BRXPD_DELAY_TH 0x8000 -#define BRXPROCESS_DELAY 0xf0000 -#define BRXSEARCHRANGE_GI2_EARLY 0x700000 -#define BRXFRAME_FUARD_COUNTER_L 0x3800000 -#define BRXSGI_GUARD_L 0xc000000 -#define BRXSGI_SEARCH_L 0x30000000 -#define BRXSGI_TH 0xc0000000 -#define BDFSCNT0 0xff -#define BDFSCNT1 0xff00 -#define BDFSFLAG 0xf0000 -#define BMF_WEIGHT_SUM 0x300000 -#define BMINIDX_TH 0x7f000000 -#define BDAFORMAT 0x40000 -#define BTXCH_EMU_ENABLE 0x01000000 -#define BTRSW_ISOLATION_A 0x7f -#define BTRSW_ISOLATION_B 0x7f00 -#define BTRSW_ISOLATION_C 0x7f0000 -#define BTRSW_ISOLATION_D 0x7f000000 -#define BEXT_LNA_GAIN 0x7c00 - -#define BSTBC_EN 0x4 -#define BANTENNA_MAPPING 0x10 -#define BNSS 0x20 -#define BCFO_ANTSUM_ID 0x200 -#define BPHY_COUNTER_RESET 0x8000000 -#define BCFO_REPORT_GET 0x4000000 -#define BOFDM_CONTINUE_TX 0x10000000 -#define BOFDM_SINGLE_CARRIER 0x20000000 -#define BOFDM_SINGLE_TONE 0x40000000 -#define BHT_DETECT 0x100 -#define BCFOEN 0x10000 -#define BCFOVALUE 0xfff00000 -#define BSIGTONE_RE 0x3f -#define BSIGTONE_IM 0x7f00 -#define BCOUNTER_CCA 0xffff -#define BCOUNTER_PARITYFAIL 0xffff0000 -#define BCOUNTER_RATEILLEGAL 0xffff -#define BCOUNTER_CRC8FAIL 0xffff0000 -#define BCOUNTER_MCSNOSUPPORT 0xffff -#define BCOUNTER_FASTSYNC 0xffff -#define BSHORTCFO 0xfff -#define BSHORTCFOT_LENGTH 12 -#define BSHORTCFOF_LENGTH 11 -#define BLONGCFO 0x7ff -#define BLONGCFOT_LENGTH 11 -#define BLONGCFOF_LENGTH 11 -#define BTAILCFO 0x1fff -#define BTAILCFOT_LENGTH 13 -#define BTAILCFOF_LENGTH 12 -#define BNOISE_EN_PWDB 0xffff -#define BCC_POWER_DB 0xffff0000 -#define BMOISE_PWDB 0xffff -#define BPOWERMEAST_LENGTH 10 -#define BPOWERMEASF_LENGTH 3 -#define BRX_HT_BW 0x1 -#define BRXSC 0x6 -#define BRX_HT 0x8 -#define BNB_INTF_DET_ON 0x1 -#define BINTF_WIN_LEN_CFG 0x30 -#define BNB_INTF_TH_CFG 0x1c0 -#define BRFGAIN 0x3f -#define BTABLESEL 0x40 -#define BTRSW 0x80 -#define BRXSNR_A 0xff -#define BRXSNR_B 0xff00 -#define BRXSNR_C 0xff0000 -#define BRXSNR_D 0xff000000 -#define BSNR_EVMT_LENGTH 8 -#define BSNR_EVMF_LENGTH 1 -#define BCSI1ST 0xff -#define BCSI2ND 0xff00 -#define BRXEVM1ST 0xff0000 -#define BRXEVM2ND 0xff000000 -#define BSIGEVM 0xff -#define BPWDB 0xff00 -#define BSGIEN 0x10000 - -#define BSFACTOR_QMA1 0xf -#define BSFACTOR_QMA2 0xf0 -#define BSFACTOR_QMA3 0xf00 -#define BSFACTOR_QMA4 0xf000 -#define BSFACTOR_QMA5 0xf0000 -#define BSFACTOR_QMA6 0xf0000 -#define BSFACTOR_QMA7 0xf00000 -#define BSFACTOR_QMA8 0xf000000 -#define BSFACTOR_QMA9 0xf0000000 -#define BCSI_SCHEME 0x100000 - -#define BNOISE_LVL_TOP_SET 0x3 -#define BCHSMOOTH 0x4 -#define BCHSMOOTH_CFG1 0x38 -#define BCHSMOOTH_CFG2 0x1c0 -#define BCHSMOOTH_CFG3 0xe00 -#define BCHSMOOTH_CFG4 0x7000 -#define BMRCMODE 0x800000 -#define BTHEVMCFG 0x7000000 - -#define BLOOP_FIT_TYPE 0x1 -#define BUPD_CFO 0x40 -#define BUPD_CFO_OFFDATA 0x80 -#define BADV_UPD_CFO 0x100 -#define BADV_TIME_CTRL 0x800 -#define BUPD_CLKO 0x1000 -#define BFC 0x6000 -#define BTRACKING_MODE 0x8000 -#define BPHCMP_ENABLE 0x10000 -#define BUPD_CLKO_LTF 0x20000 -#define BCOM_CH_CFO 0x40000 -#define BCSI_ESTI_MODE 0x80000 -#define BADV_UPD_EQZ 0x100000 -#define BUCHCFG 0x7000000 -#define BUPDEQZ 0x8000000 - -#define BRX_PESUDO_NOISE_ON 0x20000000 -#define BRX_PESUDO_NOISE_A 0xff -#define BRX_PESUDO_NOISE_B 0xff00 -#define BRX_PESUDO_NOISE_C 0xff0000 -#define BRX_PESUDO_NOISE_D 0xff000000 -#define BRX_PESUDO_NOISESTATE_A 0xffff -#define BRX_PESUDO_NOISESTATE_B 0xffff0000 -#define BRX_PESUDO_NOISESTATE_C 0xffff -#define BRX_PESUDO_NOISESTATE_D 0xffff0000 - -#define BZEBRA1_HSSIENABLE 0x8 -#define BZEBRA1_TRXCONTROL 0xc00 -#define BZEBRA1_TRXGAINSETTING 0x07f -#define BZEBRA1_RXCOUNTER 0xc00 -#define BZEBRA1_TXCHANGEPUMP 0x38 -#define BZEBRA1_RXCHANGEPUMP 0x7 -#define BZEBRA1_CHANNEL_NUM 0xf80 -#define BZEBRA1_TXLPFBW 0x400 -#define BZEBRA1_RXLPFBW 0x600 - -#define BRTL8256REG_MODE_CTRL1 0x100 -#define BRTL8256REG_MODE_CTRL0 0x40 -#define BRTL8256REG_TXLPFBW 0x18 -#define BRTL8256REG_RXLPFBW 0x600 - -#define BRTL8258_TXLPFBW 0xc -#define BRTL8258_RXLPFBW 0xc00 -#define BRTL8258_RSSILPFBW 0xc0 - -#define BBYTE0 0x1 -#define BBYTE1 0x2 -#define BBYTE2 0x4 -#define BBYTE3 0x8 -#define BWORD0 0x3 -#define BWORD1 0xc -#define BWORD 0xf - -#define BENABLE 0x1 -#define BDISABLE 0x0 - -#define LEFT_ANTENNA 0x0 -#define RIGHT_ANTENNA 0x1 - -#define TCHECK_TXSTATUS 500 -#define TUPDATE_RXCOUNTER 100 +#define IS_BB_REG_OFFSET_92S(_offset) \ + ((_offset >= 0x800) && (_offset <= 0xfff)) + +#define BRFMOD 0x1 +#define BJAPANMODE 0x2 +#define BCCKTXSC 0x30 +#define BCCKEN 0x1000000 +#define BOFDMEN 0x2000000 + +#define BOFDMRXADCPHASE 0x10000 +#define BOFDMTXDACPHASE 0x40000 +#define BXATXAGC 0x3f + +#define BXBTXAGC 0xf00 +#define BXCTXAGC 0xf000 +#define BXDTXAGC 0xf0000 + +#define BPASTART 0xf0000000 +#define BTRSTART 0x00f00000 +#define BRFSTART 0x0000f000 +#define BBBSTART 0x000000f0 +#define BBBCCKSTART 0x0000000f +#define BPAEND 0xf +#define BTREND 0x0f000000 +#define BRFEND 0x000f0000 +#define BCCAMASK 0x000000f0 +#define BR2RCCAMASK 0x00000f00 +#define BHSSI_R2TDELAY 0xf8000000 +#define BHSSI_T2RDELAY 0xf80000 +#define BCONTXHSSI 0x400 +#define BIGFROMCCK 0x200 +#define BAGCADDRESS 0x3f +#define BRXHPTX 0x7000 +#define BRXHP2RX 0x38000 +#define BRXHPCCKINI 0xc0000 +#define BAGCTXCODE 0xc00000 +#define BAGCRXCODE 0x300000 + +#define B3WIREDATALENGTH 0x800 +#define B3WIREADDREAALENGTH 0x400 + +#define B3WIRERFPOWERDOWN 0x1 +#define B5GPAPEPOLARITY 0x40000000 +#define B2GPAPEPOLARITY 0x80000000 +#define BRFSW_TXDEFAULTANT 0x3 +#define BRFSW_TXOPTIONANT 0x30 +#define BRFSW_RXDEFAULTANT 0x300 +#define BRFSW_RXOPTIONANT 0x3000 +#define BRFSI_3WIREDATA 0x1 +#define BRFSI_3WIRECLOCK 0x2 +#define BRFSI_3WIRELOAD 0x4 +#define BRFSI_3WIRERW 0x8 +#define BRFSI_3WIRE 0xf + +#define BRFSI_RFENV 0x10 + +#define BRFSI_TRSW 0x20 +#define BRFSI_TRSWB 0x40 +#define BRFSI_ANTSW 0x100 +#define BRFSI_ANTSWB 0x200 +#define BRFSI_PAPE 0x400 +#define BRFSI_PAPE5G 0x800 +#define BBANDSELECT 0x1 +#define BHTSIG2_GI 0x80 +#define BHTSIG2_SMOOTHING 0x01 +#define BHTSIG2_SOUNDING 0x02 +#define BHTSIG2_AGGREATON 0x08 +#define BHTSIG2_STBC 0x30 +#define BHTSIG2_ADVCODING 0x40 +#define BHTSIG2_NUMOFHTLTF 0x300 +#define BHTSIG2_CRC8 0x3fc +#define BHTSIG1_MCS 0x7f +#define BHTSIG1_BANDWIDTH 0x80 +#define BHTSIG1_HTLENGTH 0xffff +#define BLSIG_RATE 0xf +#define BLSIG_RESERVED 0x10 +#define BLSIG_LENGTH 0x1fffe +#define BLSIG_PARITY 0x20 +#define BCCKRXPHASE 0x4 + +#define BLSSIREADADDRESS 0x7f800000 +#define BLSSIREADEDGE 0x80000000 + +#define BLSSIREADBACKDATA 0xfffff + +#define BLSSIREADOKFLAG 0x1000 +#define BCCKSAMPLERATE 0x8 +#define BREGULATOR0STANDBY 0x1 +#define BREGULATORPLLSTANDBY 0x2 +#define BREGULATOR1STANDBY 0x4 +#define BPLLPOWERUP 0x8 +#define BDPLLPOWERUP 0x10 +#define BDA10POWERUP 0x20 +#define BAD7POWERUP 0x200 +#define BDA6POWERUP 0x2000 +#define BXTALPOWERUP 0x4000 +#define B40MDCLKPOWERUP 0x8000 +#define BDA6DEBUGMODE 0x20000 +#define BDA6SWING 0x380000 + +#define BADCLKPHASE 0x4000000 +#define B80MCLKDELAY 0x18000000 +#define BAFEWATCHDOGENABLE 0x20000000 + +#define BXTALCAP01 0xc0000000 +#define BXTALCAP23 0x3 +#define BXTALCAP92X 0x0f000000 +#define BXTALCAP 0x0f000000 + +#define BINTDIFCLKENABLE 0x400 +#define BEXTSIGCLKENABLE 0x800 +#define BBANDGAP_MBIAS_POWERUP 0x10000 +#define BAD11SH_GAIN 0xc0000 +#define BAD11NPUT_RANGE 0x700000 +#define BAD110P_CURRENT 0x3800000 +#define BLPATH_LOOPBACK 0x4000000 +#define BQPATH_LOOPBACK 0x8000000 +#define BAFE_LOOPBACK 0x10000000 +#define BDA10_SWING 0x7e0 +#define BDA10_REVERSE 0x800 +#define BDA_CLK_SOURCE 0x1000 +#define BDA7INPUT_RANGE 0x6000 +#define BDA7_GAIN 0x38000 +#define BDA7OUTPUT_CM_MODE 0x40000 +#define BDA7INPUT_CM_MODE 0x380000 +#define BDA7CURRENT 0xc00000 +#define BREGULATOR_ADJUST 0x7000000 +#define BAD11POWERUP_ATTX 0x1 +#define BDA10PS_ATTX 0x10 +#define BAD11POWERUP_ATRX 0x100 +#define BDA10PS_ATRX 0x1000 +#define BCCKRX_AGC_FORMAT 0x200 +#define BPSDFFT_SAMPLE_POINT 0xc000 +#define BPSD_AVERAGE_NUM 0x3000 +#define BIQPATH_CONTROL 0xc00 +#define BPSD_FREQ 0x3ff +#define BPSD_ANTENNA_PATH 0x30 +#define BPSD_IQ_SWITCH 0x40 +#define BPSD_RX_TRIGGER 0x400000 +#define BPSD_TX_TRIGGER 0x80000000 +#define BPSD_SINE_TONE_SCALE 0x7f000000 +#define BPSD_REPORT 0xffff + +#define BOFDM_TXSC 0x30000000 +#define BCCK_TXON 0x1 +#define BOFDM_TXON 0x2 +#define BDEBUG_PAGE 0xfff +#define BDEBUG_ITEM 0xff +#define BANTL 0x10 +#define BANT_NONHT 0x100 +#define BANT_HT1 0x1000 +#define BANT_HT2 0x10000 +#define BANT_HT1S1 0x100000 +#define BANT_NONHTS1 0x1000000 + +#define BCCK_BBMODE 0x3 +#define BCCK_TXPOWERSAVING 0x80 +#define BCCK_RXPOWERSAVING 0x40 + +#define BCCK_SIDEBAND 0x10 + +#define BCCK_SCRAMBLE 0x8 +#define BCCK_ANTDIVERSITY 0x8000 +#define BCCK_CARRIER_RECOVERY 0x4000 +#define BCCK_TXRATE 0x3000 +#define BCCK_DCCANCEL 0x0800 +#define BCCK_ISICANCEL 0x0400 +#define BCCK_MATCH_FILTER 0x0200 +#define BCCK_EQUALIZER 0x0100 +#define BCCK_PREAMBLE_DETECT 0x800000 +#define BCCK_FAST_FALSECCA 0x400000 +#define BCCK_CH_ESTSTART 0x300000 +#define BCCK_CCA_COUNT 0x080000 +#define BCCK_CS_LIM 0x070000 +#define BCCK_BIST_MODE 0x80000000 +#define BCCK_CCAMASK 0x40000000 +#define BCCK_TX_DAC_PHASE 0x4 +#define BCCK_RX_ADC_PHASE 0x20000000 +#define BCCKR_CP_MODE 0x0100 +#define BCCK_TXDC_OFFSET 0xf0 +#define BCCK_RXDC_OFFSET 0xf +#define BCCK_CCA_MODE 0xc000 +#define BCCK_FALSECS_LIM 0x3f00 +#define BCCK_CS_RATIO 0xc00000 +#define BCCK_CORGBIT_SEL 0x300000 +#define BCCK_PD_LIM 0x0f0000 +#define BCCK_NEWCCA 0x80000000 +#define BCCK_RXHP_OF_IG 0x8000 +#define BCCK_RXIG 0x7f00 +#define BCCK_LNA_POLARITY 0x800000 +#define BCCK_RX1ST_BAIN 0x7f0000 +#define BCCK_RF_EXTEND 0x20000000 +#define BCCK_RXAGC_SATLEVEL 0x1f000000 +#define BCCK_RXAGC_SATCOUNT 0xe0 +#define BCCKRXRFSETTLE 0x1f +#define BCCK_FIXED_RXAGC 0x8000 +#define BCCK_ANTENNA_POLARITY 0x2000 +#define BCCK_TXFILTER_TYPE 0x0c00 +#define BCCK_RXAGC_REPORTTYPE 0x0300 +#define BCCK_RXDAGC_EN 0x80000000 +#define BCCK_RXDAGC_PERIOD 0x20000000 +#define BCCK_RXDAGC_SATLEVEL 0x1f000000 +#define BCCK_TIMING_RECOVERY 0x800000 +#define BCCK_TXC0 0x3f0000 +#define BCCK_TXC1 0x3f000000 +#define BCCK_TXC2 0x3f +#define BCCK_TXC3 0x3f00 +#define BCCK_TXC4 0x3f0000 +#define BCCK_TXC5 0x3f000000 +#define BCCK_TXC6 0x3f +#define BCCK_TXC7 0x3f00 +#define BCCK_DEBUGPORT 0xff0000 +#define BCCK_DAC_DEBUG 0x0f000000 +#define BCCK_FALSEALARM_ENABLE 0x8000 +#define BCCK_FALSEALARM_READ 0x4000 +#define BCCK_TRSSI 0x7f +#define BCCK_RXAGC_REPORT 0xfe +#define BCCK_RXREPORT_ANTSEL 0x80000000 +#define BCCK_RXREPORT_MFOFF 0x40000000 +#define BCCK_RXREPORT_SQLOSS 0x20000000 +#define BCCK_RXREPORT_PKTLOSS 0x10000000 +#define BCCK_RXREPORT_LOCKEDBIT 0x08000000 +#define BCCK_RXREPORT_RATEERROR 0x04000000 +#define BCCK_RXREPORT_RXRATE 0x03000000 +#define BCCK_RXFA_COUNTER_LOWER 0xff +#define BCCK_RXFA_COUNTER_UPPER 0xff000000 +#define BCCK_RXHPAGC_START 0xe000 +#define BCCK_RXHPAGC_FINAL 0x1c00 +#define BCCK_RXFALSEALARM_ENABLE 0x8000 +#define BCCK_FACOUNTER_FREEZE 0x4000 +#define BCCK_TXPATH_SEL 0x10000000 +#define BCCK_DEFAULT_RXPATH 0xc000000 +#define BCCK_OPTION_RXPATH 0x3000000 + +#define BNUM_OFSTF 0x3 +#define BSHIFT_L 0xc0 +#define BGI_TH 0xc +#define BRXPATH_A 0x1 +#define BRXPATH_B 0x2 +#define BRXPATH_C 0x4 +#define BRXPATH_D 0x8 +#define BTXPATH_A 0x1 +#define BTXPATH_B 0x2 +#define BTXPATH_C 0x4 +#define BTXPATH_D 0x8 +#define BTRSSI_FREQ 0x200 +#define BADC_BACKOFF 0x3000 +#define BDFIR_BACKOFF 0xc000 +#define BTRSSI_LATCH_PHASE 0x10000 +#define BRX_LDC_OFFSET 0xff +#define BRX_QDC_OFFSET 0xff00 +#define BRX_DFIR_MODE 0x1800000 +#define BRX_DCNF_TYPE 0xe000000 +#define BRXIQIMB_A 0x3ff +#define BRXIQIMB_B 0xfc00 +#define BRXIQIMB_C 0x3f0000 +#define BRXIQIMB_D 0xffc00000 +#define BDC_DC_NOTCH 0x60000 +#define BRXNB_NOTCH 0x1f000000 +#define BPD_TH 0xf +#define BPD_TH_OPT2 0xc000 +#define BPWED_TH 0x700 +#define BIFMF_WIN_L 0x800 +#define BPD_OPTION 0x1000 +#define BMF_WIN_L 0xe000 +#define BBW_SEARCH_L 0x30000 +#define BWIN_ENH_L 0xc0000 +#define BBW_TH 0x700000 +#define BED_TH2 0x3800000 +#define BBW_OPTION 0x4000000 +#define BRADIO_TH 0x18000000 +#define BWINDOW_L 0xe0000000 +#define BSBD_OPTION 0x1 +#define BFRAME_TH 0x1c +#define BFS_OPTION 0x60 +#define BDC_SLOPE_CHECK 0x80 +#define BFGUARD_COUNTER_DC_L 0xe00 +#define BFRAME_WEIGHT_SHORT 0x7000 +#define BSUB_TUNE 0xe00000 +#define BFRAME_DC_LENGTH 0xe000000 +#define BSBD_START_OFFSET 0x30000000 +#define BFRAME_TH_2 0x7 +#define BFRAME_GI2_TH 0x38 +#define BGI2_SYNC_EN 0x40 +#define BSARCH_SHORT_EARLY 0x300 +#define BSARCH_SHORT_LATE 0xc00 +#define BSARCH_GI2_LATE 0x70000 +#define BCFOANTSUM 0x1 +#define BCFOACC 0x2 +#define BCFOSTARTOFFSET 0xc +#define BCFOLOOPBACK 0x70 +#define BCFOSUMWEIGHT 0x80 +#define BDAGCENABLE 0x10000 +#define BTXIQIMB_A 0x3ff +#define BTXIQIMB_b 0xfc00 +#define BTXIQIMB_C 0x3f0000 +#define BTXIQIMB_D 0xffc00000 +#define BTXIDCOFFSET 0xff +#define BTXIQDCOFFSET 0xff00 +#define BTXDFIRMODE 0x10000 +#define BTXPESUDO_NOISEON 0x4000000 +#define BTXPESUDO_NOISE_A 0xff +#define BTXPESUDO_NOISE_B 0xff00 +#define BTXPESUDO_NOISE_C 0xff0000 +#define BTXPESUDO_NOISE_D 0xff000000 +#define BCCA_DROPOPTION 0x20000 +#define BCCA_DROPTHRES 0xfff00000 +#define BEDCCA_H 0xf +#define BEDCCA_L 0xf0 +#define BLAMBDA_ED 0x300 +#define BRX_INITIALGAIN 0x7f +#define BRX_ANTDIV_EN 0x80 +#define BRX_AGC_ADDRESS_FOR_LNA 0x7f00 +#define BRX_HIGHPOWER_FLOW 0x8000 +#define BRX_AGC_FREEZE_THRES 0xc0000 +#define BRX_FREEZESTEP_AGC1 0x300000 +#define BRX_FREEZESTEP_AGC2 0xc00000 +#define BRX_FREEZESTEP_AGC3 0x3000000 +#define BRX_FREEZESTEP_AGC0 0xc000000 +#define BRXRSSI_CMP_EN 0x10000000 +#define BRXQUICK_AGCEN 0x20000000 +#define BRXAGC_FREEZE_THRES_MODE 0x40000000 +#define BRX_OVERFLOW_CHECKTYPE 0x80000000 +#define BRX_AGCSHIFT 0x7f +#define BTRSW_TRI_ONLY 0x80 +#define BPOWER_THRES 0x300 +#define BRXAGC_EN 0x1 +#define BRXAGC_TOGETHER_EN 0x2 +#define BRXAGC_MIN 0x4 +#define BRXHP_INI 0x7 +#define BRXHP_TRLNA 0x70 +#define BRXHP_RSSI 0x700 +#define BRXHP_BBP1 0x7000 +#define BRXHP_BBP2 0x70000 +#define BRXHP_BBP3 0x700000 +#define BRSSI_H 0x7f0000 +#define BRSSI_GEN 0x7f000000 +#define BRXSETTLE_TRSW 0x7 +#define BRXSETTLE_LNA 0x38 +#define BRXSETTLE_RSSI 0x1c0 +#define BRXSETTLE_BBP 0xe00 +#define BRXSETTLE_RXHP 0x7000 +#define BRXSETTLE_ANTSW_RSSI 0x38000 +#define BRXSETTLE_ANTSW 0xc0000 +#define BRXPROCESS_TIME_DAGC 0x300000 +#define BRXSETTLE_HSSI 0x400000 +#define BRXPROCESS_TIME_BBPPW 0x800000 +#define BRXANTENNA_POWER_SHIFT 0x3000000 +#define BRSSI_TABLE_SELECT 0xc000000 +#define BRXHP_FINAL 0x7000000 +#define BRXHPSETTLE_BBP 0x7 +#define BRXHTSETTLE_HSSI 0x8 +#define BRXHTSETTLE_RXHP 0x70 +#define BRXHTSETTLE_BBPPW 0x80 +#define BRXHTSETTLE_IDLE 0x300 +#define BRXHTSETTLE_RESERVED 0x1c00 +#define BRXHT_RXHP_EN 0x8000 +#define BRXAGC_FREEZE_THRES 0x30000 +#define BRXAGC_TOGETHEREN 0x40000 +#define BRXHTAGC_MIN 0x80000 +#define BRXHTAGC_EN 0x100000 +#define BRXHTDAGC_EN 0x200000 +#define BRXHT_RXHP_BBP 0x1c00000 +#define BRXHT_RXHP_FINAL 0xe0000000 +#define BRXPW_RADIO_TH 0x3 +#define BRXPW_RADIO_EN 0x4 +#define BRXMF_HOLD 0x3800 +#define BRXPD_DELAY_TH1 0x38 +#define BRXPD_DELAY_TH2 0x1c0 +#define BRXPD_DC_COUNT_MAX 0x600 +#define BRXPD_DELAY_TH 0x8000 +#define BRXPROCESS_DELAY 0xf0000 +#define BRXSEARCHRANGE_GI2_EARLY 0x700000 +#define BRXFRAME_FUARD_COUNTER_L 0x3800000 +#define BRXSGI_GUARD_L 0xc000000 +#define BRXSGI_SEARCH_L 0x30000000 +#define BRXSGI_TH 0xc0000000 +#define BDFSCNT0 0xff +#define BDFSCNT1 0xff00 +#define BDFSFLAG 0xf0000 +#define BMF_WEIGHT_SUM 0x300000 +#define BMINIDX_TH 0x7f000000 +#define BDAFORMAT 0x40000 +#define BTXCH_EMU_ENABLE 0x01000000 +#define BTRSW_ISOLATION_A 0x7f +#define BTRSW_ISOLATION_B 0x7f00 +#define BTRSW_ISOLATION_C 0x7f0000 +#define BTRSW_ISOLATION_D 0x7f000000 +#define BEXT_LNA_GAIN 0x7c00 + +#define BSTBC_EN 0x4 +#define BANTENNA_MAPPING 0x10 +#define BNSS 0x20 +#define BCFO_ANTSUM_ID 0x200 +#define BPHY_COUNTER_RESET 0x8000000 +#define BCFO_REPORT_GET 0x4000000 +#define BOFDM_CONTINUE_TX 0x10000000 +#define BOFDM_SINGLE_CARRIER 0x20000000 +#define BOFDM_SINGLE_TONE 0x40000000 +#define BHT_DETECT 0x100 +#define BCFOEN 0x10000 +#define BCFOVALUE 0xfff00000 +#define BSIGTONE_RE 0x3f +#define BSIGTONE_IM 0x7f00 +#define BCOUNTER_CCA 0xffff +#define BCOUNTER_PARITYFAIL 0xffff0000 +#define BCOUNTER_RATEILLEGAL 0xffff +#define BCOUNTER_CRC8FAIL 0xffff0000 +#define BCOUNTER_MCSNOSUPPORT 0xffff +#define BCOUNTER_FASTSYNC 0xffff +#define BSHORTCFO 0xfff +#define BSHORTCFOT_LENGTH 12 +#define BSHORTCFOF_LENGTH 11 +#define BLONGCFO 0x7ff +#define BLONGCFOT_LENGTH 11 +#define BLONGCFOF_LENGTH 11 +#define BTAILCFO 0x1fff +#define BTAILCFOT_LENGTH 13 +#define BTAILCFOF_LENGTH 12 +#define BNOISE_EN_PWDB 0xffff +#define BCC_POWER_DB 0xffff0000 +#define BMOISE_PWDB 0xffff +#define BPOWERMEAST_LENGTH 10 +#define BPOWERMEASF_LENGTH 3 +#define BRX_HT_BW 0x1 +#define BRXSC 0x6 +#define BRX_HT 0x8 +#define BNB_INTF_DET_ON 0x1 +#define BINTF_WIN_LEN_CFG 0x30 +#define BNB_INTF_TH_CFG 0x1c0 +#define BRFGAIN 0x3f +#define BTABLESEL 0x40 +#define BTRSW 0x80 +#define BRXSNR_A 0xff +#define BRXSNR_B 0xff00 +#define BRXSNR_C 0xff0000 +#define BRXSNR_D 0xff000000 +#define BSNR_EVMT_LENGTH 8 +#define BSNR_EVMF_LENGTH 1 +#define BCSI1ST 0xff +#define BCSI2ND 0xff00 +#define BRXEVM1ST 0xff0000 +#define BRXEVM2ND 0xff000000 +#define BSIGEVM 0xff +#define BPWDB 0xff00 +#define BSGIEN 0x10000 + +#define BSFACTOR_QMA1 0xf +#define BSFACTOR_QMA2 0xf0 +#define BSFACTOR_QMA3 0xf00 +#define BSFACTOR_QMA4 0xf000 +#define BSFACTOR_QMA5 0xf0000 +#define BSFACTOR_QMA6 0xf0000 +#define BSFACTOR_QMA7 0xf00000 +#define BSFACTOR_QMA8 0xf000000 +#define BSFACTOR_QMA9 0xf0000000 +#define BCSI_SCHEME 0x100000 + +#define BNOISE_LVL_TOP_SET 0x3 +#define BCHSMOOTH 0x4 +#define BCHSMOOTH_CFG1 0x38 +#define BCHSMOOTH_CFG2 0x1c0 +#define BCHSMOOTH_CFG3 0xe00 +#define BCHSMOOTH_CFG4 0x7000 +#define BMRCMODE 0x800000 +#define BTHEVMCFG 0x7000000 + +#define BLOOP_FIT_TYPE 0x1 +#define BUPD_CFO 0x40 +#define BUPD_CFO_OFFDATA 0x80 +#define BADV_UPD_CFO 0x100 +#define BADV_TIME_CTRL 0x800 +#define BUPD_CLKO 0x1000 +#define BFC 0x6000 +#define BTRACKING_MODE 0x8000 +#define BPHCMP_ENABLE 0x10000 +#define BUPD_CLKO_LTF 0x20000 +#define BCOM_CH_CFO 0x40000 +#define BCSI_ESTI_MODE 0x80000 +#define BADV_UPD_EQZ 0x100000 +#define BUCHCFG 0x7000000 +#define BUPDEQZ 0x8000000 + +#define BRX_PESUDO_NOISE_ON 0x20000000 +#define BRX_PESUDO_NOISE_A 0xff +#define BRX_PESUDO_NOISE_B 0xff00 +#define BRX_PESUDO_NOISE_C 0xff0000 +#define BRX_PESUDO_NOISE_D 0xff000000 +#define BRX_PESUDO_NOISESTATE_A 0xffff +#define BRX_PESUDO_NOISESTATE_B 0xffff0000 +#define BRX_PESUDO_NOISESTATE_C 0xffff +#define BRX_PESUDO_NOISESTATE_D 0xffff0000 + +#define BZEBRA1_HSSIENABLE 0x8 +#define BZEBRA1_TRXCONTROL 0xc00 +#define BZEBRA1_TRXGAINSETTING 0x07f +#define BZEBRA1_RXCOUNTER 0xc00 +#define BZEBRA1_TXCHANGEPUMP 0x38 +#define BZEBRA1_RXCHANGEPUMP 0x7 +#define BZEBRA1_CHANNEL_NUM 0xf80 +#define BZEBRA1_TXLPFBW 0x400 +#define BZEBRA1_RXLPFBW 0x600 + +#define BRTL8256REG_MODE_CTRL1 0x100 +#define BRTL8256REG_MODE_CTRL0 0x40 +#define BRTL8256REG_TXLPFBW 0x18 +#define BRTL8256REG_RXLPFBW 0x600 + +#define BRTL8258_TXLPFBW 0xc +#define BRTL8258_RXLPFBW 0xc00 +#define BRTL8258_RSSILPFBW 0xc0 + +#define BBYTE0 0x1 +#define BBYTE1 0x2 +#define BBYTE2 0x4 +#define BBYTE3 0x8 +#define BWORD0 0x3 +#define BWORD1 0xc +#define BWORD 0xf + +#define MASKBYTE0 0xff +#define MASKBYTE1 0xff00 +#define MASKBYTE2 0xff0000 +#define MASKBYTE3 0xff000000 +#define MASKHWORD 0xffff0000 +#define MASKLWORD 0x0000ffff +#define MASKDWORD 0xffffffff +#define MASK12BITS 0xfff +#define MASKH4BITS 0xf0000000 +#define MASKOFDM_D 0xffc00000 +#define MASKCCK 0x3f3f3f3f + +#define MASK4BITS 0x0f +#define MASK20BITS 0xfffff +#define RFREG_OFFSET_MASK 0xfffff + +#define BENABLE 0x1 +#define BDISABLE 0x0 + +#define LEFT_ANTENNA 0x0 +#define RIGHT_ANTENNA 0x1 + +#define TCHECK_TXSTATUS 500 +#define TUPDATE_RXCOUNTER 100 /* 2 EFUSE_TEST (For RTL8723 partially) */ -#define EFUSE_SEL(x) (((x) & 0x3) << 8) +#define EFUSE_SEL(x) (((x) & 0x3) << 8) #define EFUSE_SEL_MASK 0x300 -#define EFUSE_WIFI_SEL_0 0x0 - +#define EFUSE_WIFI_SEL_0 0x0 /* Enable GPIO[9] as WiFi HW PDn source*/ -#define WL_HWPDN_EN BIT(0) +#define WL_HWPDN_EN BIT(0) /* WiFi HW PDn polarity control*/ -#define WL_HWPDN_SL BIT(1) +#define WL_HWPDN_SL BIT(1) #endif diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/rf.c b/drivers/net/wireless/rtlwifi/rtl8723ae/rf.c index 50dd2fb2c93d53a7912e33d414486b4e645f39f4..9ebc8281ff99bdd2708410b10683b043d8537151 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/rf.c +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/rf.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -34,10 +30,12 @@ #include "rf.h" #include "dm.h" -void rtl8723ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth) +static bool _rtl8723e_phy_rf6052_config_parafile(struct ieee80211_hw *hw); + +void rtl8723e_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; switch (bandwidth) { case HT_CHANNEL_WIDTH_20: @@ -59,11 +57,11 @@ void rtl8723ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth) } } -void rtl8723ae_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, - u8 *ppowerlevel) +void rtl8723e_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, + u8 *ppowerlevel) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); u32 tx_agc[2] = {0, 0}, tmpval; @@ -79,7 +77,8 @@ void rtl8723ae_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, tx_agc[RF90_PATH_B] = 0x3f3f3f3f; if (turbo_scanoff) { - for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) { + for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; + idx1++) { tx_agc[idx1] = ppowerlevel[idx1] | (ppowerlevel[idx1] << 8) | (ppowerlevel[idx1] << 16) | @@ -89,24 +88,27 @@ void rtl8723ae_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, } else { for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) { tx_agc[idx1] = ppowerlevel[idx1] | - (ppowerlevel[idx1] << 8) | - (ppowerlevel[idx1] << 16) | - (ppowerlevel[idx1] << 24); + (ppowerlevel[idx1] << 8) | + (ppowerlevel[idx1] << 16) | + (ppowerlevel[idx1] << 24); } if (rtlefuse->eeprom_regulatory == 0) { - tmpval = (rtlphy->mcs_offset[0][6]) + - (rtlphy->mcs_offset[0][7] << 8); + tmpval = + (rtlphy->mcs_txpwrlevel_origoffset[0][6]) + + (rtlphy->mcs_txpwrlevel_origoffset[0][7] << + 8); tx_agc[RF90_PATH_A] += tmpval; - tmpval = (rtlphy->mcs_offset[0][14]) + - (rtlphy->mcs_offset[0][15] << 24); + tmpval = (rtlphy->mcs_txpwrlevel_origoffset[0][14]) + + (rtlphy->mcs_txpwrlevel_origoffset[0][15] << + 24); tx_agc[RF90_PATH_B] += tmpval; } } for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) { - ptr = (u8 *) (&(tx_agc[idx1])); + ptr = (u8 *)&tx_agc[idx1]; for (idx2 = 0; idx2 < 4; idx2++) { if (*ptr > RF6052_MAX_TX_PWR) *ptr = RF6052_MAX_TX_PWR; @@ -119,7 +121,7 @@ void rtl8723ae_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n", tmpval, - RTXAGC_A_CCK1_MCS32); + RTXAGC_A_CCK1_MCS32); tmpval = tx_agc[RF90_PATH_A] >> 8; @@ -129,100 +131,99 @@ void rtl8723ae_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "CCK PWR 2~11M (rf-A) = 0x%x (reg 0x%x)\n", tmpval, - RTXAGC_B_CCK11_A_CCK2_11); + RTXAGC_B_CCK11_A_CCK2_11); tmpval = tx_agc[RF90_PATH_B] >> 24; rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, MASKBYTE0, tmpval); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n", tmpval, - RTXAGC_B_CCK11_A_CCK2_11); + RTXAGC_B_CCK11_A_CCK2_11); tmpval = tx_agc[RF90_PATH_B] & 0x00ffffff; rtl_set_bbreg(hw, RTXAGC_B_CCK1_55_MCS32, 0xffffff00, tmpval); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "CCK PWR 1~5.5M (rf-B) = 0x%x (reg 0x%x)\n", tmpval, - RTXAGC_B_CCK1_55_MCS32); + RTXAGC_B_CCK1_55_MCS32); } -static void rtl8723ae_phy_get_power_base(struct ieee80211_hw *hw, - u8 *ppowerlevel, u8 channel, - u32 *ofdmbase, u32 *mcsbase) +static void rtl8723e_phy_get_power_base(struct ieee80211_hw *hw, + u8 *ppowerlevel, u8 channel, + u32 *ofdmbase, u32 *mcsbase) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); - u32 powerBase0, powerBase1; + u32 powerbase0, powerbase1; u8 legacy_pwrdiff, ht20_pwrdiff; u8 i, powerlevel[2]; for (i = 0; i < 2; i++) { powerlevel[i] = ppowerlevel[i]; legacy_pwrdiff = rtlefuse->txpwr_legacyhtdiff[i][channel - 1]; - powerBase0 = powerlevel[i] + legacy_pwrdiff; + powerbase0 = powerlevel[i] + legacy_pwrdiff; - powerBase0 = (powerBase0 << 24) | (powerBase0 << 16) | - (powerBase0 << 8) | powerBase0; - *(ofdmbase + i) = powerBase0; + powerbase0 = (powerbase0 << 24) | (powerbase0 << 16) | + (powerbase0 << 8) | powerbase0; + *(ofdmbase + i) = powerbase0; RTPRINT(rtlpriv, FPHY, PHY_TXPWR, " [OFDM power base index rf(%c) = 0x%x]\n", - ((i == 0) ? 'A' : 'B'), *(ofdmbase + i)); + ((i == 0) ? 'A' : 'B'), *(ofdmbase + i)); } for (i = 0; i < 2; i++) { if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) { - ht20_pwrdiff = rtlefuse->txpwr_ht20diff[i][channel - 1]; + ht20_pwrdiff = + rtlefuse->txpwr_ht20diff[i][channel - 1]; powerlevel[i] += ht20_pwrdiff; } - powerBase1 = powerlevel[i]; - powerBase1 = (powerBase1 << 24) | - (powerBase1 << 16) | (powerBase1 << 8) | powerBase1; + powerbase1 = powerlevel[i]; + powerbase1 = (powerbase1 << 24) | + (powerbase1 << 16) | (powerbase1 << 8) | powerbase1; - *(mcsbase + i) = powerBase1; + *(mcsbase + i) = powerbase1; RTPRINT(rtlpriv, FPHY, PHY_TXPWR, " [MCS power base index rf(%c) = 0x%x]\n", - ((i == 0) ? 'A' : 'B'), *(mcsbase + i)); + ((i == 0) ? 'A' : 'B'), *(mcsbase + i)); } } -static void rtl8723ae_get_txpwr_val_by_reg(struct ieee80211_hw *hw, - u8 channel, u8 index, - u32 *powerBase0, - u32 *powerBase1, - u32 *p_outwriteval) +static void get_txpower_writeval_by_reg(struct ieee80211_hw *hw, + u8 channel, u8 index, + u32 *powerbase0, + u32 *powerbase1, + u32 *p_outwriteval) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); u8 i, chnlgroup = 0, pwr_diff_limit[4]; - u32 writeVal, customer_limit, rf; + u32 writeval, customer_limit, rf; for (rf = 0; rf < 2; rf++) { switch (rtlefuse->eeprom_regulatory) { case 0: chnlgroup = 0; - writeVal = rtlphy->mcs_offset[chnlgroup] - [index + (rf ? 8 : 0)] + - ((index < 2) ? powerBase0[rf] : - powerBase1[rf]); + writeval = + rtlphy->mcs_txpwrlevel_origoffset[chnlgroup][index + + (rf ? 8 : 0)] + + ((index < 2) ? powerbase0[rf] : powerbase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, - "RTK better performance, " - "writeVal(%c) = 0x%x\n", - ((rf == 0) ? 'A' : 'B'), writeVal); + "RTK better performance, writeval(%c) = 0x%x\n", + ((rf == 0) ? 'A' : 'B'), writeval); break; case 1: if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) { - writeVal = ((index < 2) ? powerBase0[rf] : - powerBase1[rf]); + writeval = ((index < 2) ? powerbase0[rf] : + powerbase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, - "Realtek regulatory, 40MHz, " - "writeVal(%c) = 0x%x\n", - ((rf == 0) ? 'A' : 'B'), writeVal); + "Realtek regulatory, 40MHz, writeval(%c) = 0x%x\n", + ((rf == 0) ? 'A' : 'B'), writeval); } else { if (rtlphy->pwrgroup_cnt == 1) chnlgroup = 0; @@ -234,29 +235,30 @@ static void rtl8723ae_get_txpwr_val_by_reg(struct ieee80211_hw *hw, else if (channel > 9) chnlgroup = 2; if (rtlphy->current_chan_bw == - HT_CHANNEL_WIDTH_20) + HT_CHANNEL_WIDTH_20) chnlgroup++; else chnlgroup += 4; } - writeVal = rtlphy->mcs_offset[chnlgroup] + writeval = + rtlphy->mcs_txpwrlevel_origoffset[chnlgroup] [index + (rf ? 8 : 0)] + ((index < 2) ? - powerBase0[rf] : - powerBase1[rf]); + powerbase0[rf] : + powerbase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, - "Realtek regulatory, 20MHz, writeVal(%c) = 0x%x\n", - ((rf == 0) ? 'A' : 'B'), writeVal); + "Realtek regulatory, 20MHz, writeval(%c) = 0x%x\n", + ((rf == 0) ? 'A' : 'B'), writeval); } break; case 2: - writeVal = - ((index < 2) ? powerBase0[rf] : powerBase1[rf]); + writeval = + ((index < 2) ? powerbase0[rf] : powerbase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, - "Better regulatory, writeVal(%c) = 0x%x\n", - ((rf == 0) ? 'A' : 'B'), writeVal); + "Better regulatory, writeval(%c) = 0x%x\n", + ((rf == 0) ? 'A' : 'B'), writeval); break; case 3: chnlgroup = 0; @@ -265,18 +267,21 @@ static void rtl8723ae_get_txpwr_val_by_reg(struct ieee80211_hw *hw, RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "customer's limit, 40MHz rf(%c) = 0x%x\n", ((rf == 0) ? 'A' : 'B'), - rtlefuse->pwrgroup_ht40[rf][channel-1]); + rtlefuse->pwrgroup_ht40[rf][channel - + 1]); } else { RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "customer's limit, 20MHz rf(%c) = 0x%x\n", ((rf == 0) ? 'A' : 'B'), - rtlefuse->pwrgroup_ht20[rf][channel-1]); + rtlefuse->pwrgroup_ht20[rf][channel - + 1]); } for (i = 0; i < 4; i++) { pwr_diff_limit[i] = - (u8) ((rtlphy->mcs_offset - [chnlgroup][index + (rf ? 8 : 0)] & - (0x7f << (i * 8))) >> (i * 8)); + (u8)((rtlphy->mcs_txpwrlevel_origoffset + [chnlgroup][index + + (rf ? 8 : 0)] & (0x7f << + (i * 8))) >> (i * 8)); if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) { @@ -302,41 +307,42 @@ static void rtl8723ae_get_txpwr_val_by_reg(struct ieee80211_hw *hw, RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "Customer's limit rf(%c) = 0x%x\n", - ((rf == 0) ? 'A' : 'B'), customer_limit); + ((rf == 0) ? 'A' : 'B'), customer_limit); - writeVal = customer_limit + - ((index < 2) ? powerBase0[rf] : powerBase1[rf]); + writeval = customer_limit + + ((index < 2) ? powerbase0[rf] : powerbase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, - "Customer, writeVal rf(%c)= 0x%x\n", - ((rf == 0) ? 'A' : 'B'), writeVal); + "Customer, writeval rf(%c)= 0x%x\n", + ((rf == 0) ? 'A' : 'B'), writeval); break; default: chnlgroup = 0; - writeVal = rtlphy->mcs_offset[chnlgroup][index + - (rf ? 8 : 0)] + ((index < 2) ? powerBase0[rf] : - powerBase1[rf]); + writeval = + rtlphy->mcs_txpwrlevel_origoffset[chnlgroup] + [index + (rf ? 8 : 0)] + + ((index < 2) ? powerbase0[rf] : powerbase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, - "RTK better performance, writeVal rf(%c) = 0x%x\n", - ((rf == 0) ? 'A' : 'B'), writeVal); + "RTK better performance, writeval rf(%c) = 0x%x\n", + ((rf == 0) ? 'A' : 'B'), writeval); break; } if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1) - writeVal = writeVal - 0x06060606; + writeval = writeval - 0x06060606; else if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT2) - writeVal = writeVal - 0x0c0c0c0c; - *(p_outwriteval + rf) = writeVal; + writeval = writeval - 0x0c0c0c0c; + *(p_outwriteval + rf) = writeval; } } -static void _rtl8723ae_write_ofdm_power_reg(struct ieee80211_hw *hw, - u8 index, u32 *pValue) +static void _rtl8723e_write_ofdm_power_reg(struct ieee80211_hw *hw, + u8 index, u32 *pvalue) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; u16 regoffset_a[6] = { RTXAGC_A_RATE18_06, RTXAGC_A_RATE54_24, @@ -349,29 +355,29 @@ static void _rtl8723ae_write_ofdm_power_reg(struct ieee80211_hw *hw, RTXAGC_B_MCS11_MCS08, RTXAGC_B_MCS15_MCS12 }; u8 i, rf, pwr_val[4]; - u32 writeVal; + u32 writeval; u16 regoffset; for (rf = 0; rf < 2; rf++) { - writeVal = pValue[rf]; + writeval = pvalue[rf]; for (i = 0; i < 4; i++) { - pwr_val[i] = (u8) ((writeVal & (0x7f << - (i * 8))) >> (i * 8)); + pwr_val[i] = (u8)((writeval & (0x7f << + (i * 8))) >> (i * 8)); if (pwr_val[i] > RF6052_MAX_TX_PWR) pwr_val[i] = RF6052_MAX_TX_PWR; } - writeVal = (pwr_val[3] << 24) | (pwr_val[2] << 16) | + writeval = (pwr_val[3] << 24) | (pwr_val[2] << 16) | (pwr_val[1] << 8) | pwr_val[0]; if (rf == 0) regoffset = regoffset_a[index]; else regoffset = regoffset_b[index]; - rtl_set_bbreg(hw, regoffset, MASKDWORD, writeVal); + rtl_set_bbreg(hw, regoffset, MASKDWORD, writeval); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, - "Set 0x%x = %08x\n", regoffset, writeVal); + "Set 0x%x = %08x\n", regoffset, writeval); if (((get_rf_type(rtlphy) == RF_2T2R) && (regoffset == RTXAGC_A_MCS15_MCS12 || @@ -380,7 +386,7 @@ static void _rtl8723ae_write_ofdm_power_reg(struct ieee80211_hw *hw, (regoffset == RTXAGC_A_MCS07_MCS04 || regoffset == RTXAGC_B_MCS07_MCS04))) { - writeVal = pwr_val[3]; + writeval = pwr_val[3]; if (regoffset == RTXAGC_A_MCS15_MCS12 || regoffset == RTXAGC_A_MCS07_MCS04) regoffset = 0xc90; @@ -389,37 +395,49 @@ static void _rtl8723ae_write_ofdm_power_reg(struct ieee80211_hw *hw, regoffset = 0xc98; for (i = 0; i < 3; i++) { - writeVal = (writeVal > 6) ? (writeVal - 6) : 0; + writeval = (writeval > 6) ? (writeval - 6) : 0; rtl_write_byte(rtlpriv, (u32) (regoffset + i), - (u8) writeVal); + (u8)writeval); } } } } -void rtl8723ae_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, - u8 *ppowerlevel, u8 channel) +void rtl8723e_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, + u8 *ppowerlevel, u8 channel) { - u32 writeVal[2], powerBase0[2], powerBase1[2]; + u32 writeval[2], powerbase0[2], powerbase1[2]; u8 index; - rtl8723ae_phy_get_power_base(hw, ppowerlevel, - channel, &powerBase0[0], &powerBase1[0]); + rtl8723e_phy_get_power_base(hw, ppowerlevel, + channel, &powerbase0[0], &powerbase1[0]); for (index = 0; index < 6; index++) { - rtl8723ae_get_txpwr_val_by_reg(hw, channel, index, - &powerBase0[0], - &powerBase1[0], - &writeVal[0]); + get_txpower_writeval_by_reg(hw, channel, index, &powerbase0[0], + &powerbase1[0], + &writeval[0]); - _rtl8723ae_write_ofdm_power_reg(hw, index, &writeVal[0]); + _rtl8723e_write_ofdm_power_reg(hw, index, &writeval[0]); } } -static bool _rtl8723ae_phy_rf6052_config_parafile(struct ieee80211_hw *hw) +bool rtl8723e_phy_rf6052_config(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; + + if (rtlphy->rf_type == RF_1T1R) + rtlphy->num_total_rfpath = 1; + else + rtlphy->num_total_rfpath = 2; + + return _rtl8723e_phy_rf6052_config_parafile(hw); +} + +static bool _rtl8723e_phy_rf6052_config_parafile(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; u32 u4_regvalue = 0; u8 rfpath; bool rtstatus = true; @@ -457,11 +475,12 @@ static bool _rtl8723ae_phy_rf6052_config_parafile(struct ieee80211_hw *hw) switch (rfpath) { case RF90_PATH_A: - rtstatus = rtl8723ae_phy_config_rf_with_headerfile(hw, + rtstatus = rtl8723e_phy_config_rf_with_headerfile(hw, (enum radio_path)rfpath); break; case RF90_PATH_B: - rtstatus = rtl8723ae_phy_config_rf_with_headerfile(hw, + rtstatus = + rtl8723e_phy_config_rf_with_headerfile(hw, (enum radio_path)rfpath); break; case RF90_PATH_C: @@ -469,6 +488,7 @@ static bool _rtl8723ae_phy_rf6052_config_parafile(struct ieee80211_hw *hw) case RF90_PATH_D: break; } + switch (rfpath) { case RF90_PATH_A: case RF90_PATH_C: @@ -481,25 +501,14 @@ static bool _rtl8723ae_phy_rf6052_config_parafile(struct ieee80211_hw *hw) BRFSI_RFENV << 16, u4_regvalue); break; } + if (rtstatus != true) { RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Radio[%d] Fail!!", rfpath); return false; } } - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "<---\n"); - return rtstatus; -} - -bool rtl8723ae_phy_rf6052_config(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - if (rtlphy->rf_type == RF_1T1R) - rtlphy->num_total_rfpath = 1; - else - rtlphy->num_total_rfpath = 2; - - return _rtl8723ae_phy_rf6052_config_parafile(hw); + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "\n"); + return rtstatus; } diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/rf.h b/drivers/net/wireless/rtlwifi/rtl8723ae/rf.h index 57f1933ee663e48b2433ef887a0b361a107e9770..f3f45b16361f3b98f1a374356290ee7283952903 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/rf.h +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/rf.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -31,12 +27,14 @@ #define __RTL8723E_RF_H__ #define RF6052_MAX_TX_PWR 0x3F +#define RF6052_MAX_REG 0x3F -void rtl8723ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth); -void rtl8723ae_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, - u8 *ppowerlevel); -void rtl8723ae_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, - u8 *ppowerlevel, u8 channel); -bool rtl8723ae_phy_rf6052_config(struct ieee80211_hw *hw); +void rtl8723e_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, + u8 bandwidth); +void rtl8723e_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, + u8 *ppowerlevel); +void rtl8723e_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, + u8 *ppowerlevel, u8 channel); +bool rtl8723e_phy_rf6052_config(struct ieee80211_hw *hw); #endif diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c index 73cba1eec8cf9ce331afde11c69e6fdc7f75e59d..8280bab43df4ce47755a1ab545e8dc2cf5aad086 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -28,34 +24,35 @@ *****************************************************************************/ #include "../wifi.h" -#include -#include - #include "../core.h" #include "../pci.h" -#include "../base.h" #include "reg.h" #include "def.h" #include "phy.h" -#include "../rtl8723com/phy_common.h" #include "dm.h" -#include "hw.h" #include "fw.h" #include "../rtl8723com/fw_common.h" +#include "hw.h" #include "sw.h" #include "trx.h" #include "led.h" #include "table.h" #include "hal_btc.h" +#include "../btcoexist/rtl_btc.h" +#include "../rtl8723com/phy_common.h" -static void rtl8723ae_init_aspm_vars(struct ieee80211_hw *hw) +#include +#include + +static void rtl8723e_init_aspm_vars(struct ieee80211_hw *hw) { struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); /*close ASPM for AMD defaultly */ rtlpci->const_amdpci_aspm = 0; - /* ASPM PS mode. + /** + * ASPM PS mode. * 0 - Disable ASPM, * 1 - Enable ASPM without Clock Req, * 2 - Enable ASPM with Clock Req, @@ -71,7 +68,8 @@ static void rtl8723ae_init_aspm_vars(struct ieee80211_hw *hw) /*Setting for PCI-E bridge */ rtlpci->const_hostpci_aspm_setting = 0x02; - /* In Hw/Sw Radio Off situation. + /** + * In Hw/Sw Radio Off situation. * 0 - Default, * 1 - From ASPM setting without low Mac Pwr, * 2 - From ASPM setting with low Mac Pwr, @@ -80,7 +78,8 @@ static void rtl8723ae_init_aspm_vars(struct ieee80211_hw *hw) */ rtlpci->const_hwsw_rfoff_d3 = 0; - /* This setting works for those device with + /** + * This setting works for those device with * backdoor ASPM setting such as EPHY setting. * 0 - Not support ASPM, * 1 - Support ASPM, @@ -89,14 +88,17 @@ static void rtl8723ae_init_aspm_vars(struct ieee80211_hw *hw) rtlpci->const_support_pciaspm = 1; } -int rtl8723ae_init_sw_vars(struct ieee80211_hw *hw) +int rtl8723e_init_sw_vars(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - int err; + int err = 0; + + rtl8723e_bt_reg_init(hw); + + rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer(); - rtl8723ae_bt_reg_init(hw); rtlpriv->dm.dm_initialgain_enable = 1; rtlpriv->dm.dm_flag = 0; rtlpriv->dm.disable_framebursting = 0; @@ -138,7 +140,9 @@ int rtl8723ae_init_sw_vars(struct ieee80211_hw *hw) PHIMR_PSTIMEOUT | 0); - rtlpci->irq_mask[1] = (u32)(PHIMR_RXFOVW | 0); + rtlpci->irq_mask[1] = + (u32)(PHIMR_RXFOVW | + 0); /* for debug level */ rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug; @@ -146,12 +150,11 @@ int rtl8723ae_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps; + if (rtlpriv->cfg->mod_params->disable_watchdog) + pr_info("watchdog disabled\n"); rtlpriv->psc.reg_fwctrl_lps = 3; rtlpriv->psc.reg_max_lps_awakeintvl = 5; - /* for ASPM, you can close aspm through - * set const_support_pciaspm = 0 - */ - rtl8723ae_init_aspm_vars(hw); + rtl8723e_init_aspm_vars(hw); if (rtlpriv->psc.reg_fwctrl_lps == 1) rtlpriv->psc.fwctrl_psmode = FW_PS_MIN_MODE; @@ -161,7 +164,7 @@ int rtl8723ae_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE; /* for firmware buf */ - rtlpriv->rtlhal.pfirmware = vmalloc(0x6000); + rtlpriv->rtlhal.pfirmware = vzalloc(0x6000); if (!rtlpriv->rtlhal.pfirmware) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't alloc buffer for fw.\n"); @@ -186,7 +189,7 @@ int rtl8723ae_init_sw_vars(struct ieee80211_hw *hw) return 0; } -void rtl8723ae_deinit_sw_vars(struct ieee80211_hw *hw) +void rtl8723e_deinit_sw_vars(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -196,59 +199,69 @@ void rtl8723ae_deinit_sw_vars(struct ieee80211_hw *hw) } } -static bool is_fw_header(struct rtl92c_firmware_header *hdr) +/* get bt coexist status */ +bool rtl8723e_get_btc_status(void) +{ + return true; +} + +static bool is_fw_header(struct rtl8723e_firmware_header *hdr) { return (hdr->signature & 0xfff0) == 0x2300; } -static struct rtl_hal_ops rtl8723ae_hal_ops = { - .init_sw_vars = rtl8723ae_init_sw_vars, - .deinit_sw_vars = rtl8723ae_deinit_sw_vars, - .read_eeprom_info = rtl8723ae_read_eeprom_info, - .interrupt_recognized = rtl8723ae_interrupt_recognized, - .hw_init = rtl8723ae_hw_init, - .hw_disable = rtl8723ae_card_disable, - .hw_suspend = rtl8723ae_suspend, - .hw_resume = rtl8723ae_resume, - .enable_interrupt = rtl8723ae_enable_interrupt, - .disable_interrupt = rtl8723ae_disable_interrupt, - .set_network_type = rtl8723ae_set_network_type, - .set_chk_bssid = rtl8723ae_set_check_bssid, - .set_qos = rtl8723ae_set_qos, - .set_bcn_reg = rtl8723ae_set_beacon_related_registers, - .set_bcn_intv = rtl8723ae_set_beacon_interval, - .update_interrupt_mask = rtl8723ae_update_interrupt_mask, - .get_hw_reg = rtl8723ae_get_hw_reg, - .set_hw_reg = rtl8723ae_set_hw_reg, - .update_rate_tbl = rtl8723ae_update_hal_rate_tbl, - .fill_tx_desc = rtl8723ae_tx_fill_desc, - .fill_tx_cmddesc = rtl8723ae_tx_fill_cmddesc, - .query_rx_desc = rtl8723ae_rx_query_desc, - .set_channel_access = rtl8723ae_update_channel_access_setting, - .radio_onoff_checking = rtl8723ae_gpio_radio_on_off_checking, - .set_bw_mode = rtl8723ae_phy_set_bw_mode, - .switch_channel = rtl8723ae_phy_sw_chnl, - .dm_watchdog = rtl8723ae_dm_watchdog, - .scan_operation_backup = rtl_phy_scan_operation_backup, - .set_rf_power_state = rtl8723ae_phy_set_rf_power_state, - .led_control = rtl8723ae_led_control, - .set_desc = rtl8723ae_set_desc, - .get_desc = rtl8723ae_get_desc, - .tx_polling = rtl8723ae_tx_polling, - .enable_hw_sec = rtl8723ae_enable_hw_security_config, - .set_key = rtl8723ae_set_key, - .init_sw_leds = rtl8723ae_init_sw_leds, +static struct rtl_hal_ops rtl8723e_hal_ops = { + .init_sw_vars = rtl8723e_init_sw_vars, + .deinit_sw_vars = rtl8723e_deinit_sw_vars, + .read_eeprom_info = rtl8723e_read_eeprom_info, + .interrupt_recognized = rtl8723e_interrupt_recognized, + .hw_init = rtl8723e_hw_init, + .hw_disable = rtl8723e_card_disable, + .hw_suspend = rtl8723e_suspend, + .hw_resume = rtl8723e_resume, + .enable_interrupt = rtl8723e_enable_interrupt, + .disable_interrupt = rtl8723e_disable_interrupt, + .set_network_type = rtl8723e_set_network_type, + .set_chk_bssid = rtl8723e_set_check_bssid, + .set_qos = rtl8723e_set_qos, + .set_bcn_reg = rtl8723e_set_beacon_related_registers, + .set_bcn_intv = rtl8723e_set_beacon_interval, + .update_interrupt_mask = rtl8723e_update_interrupt_mask, + .get_hw_reg = rtl8723e_get_hw_reg, + .set_hw_reg = rtl8723e_set_hw_reg, + .update_rate_tbl = rtl8723e_update_hal_rate_tbl, + .fill_tx_desc = rtl8723e_tx_fill_desc, + .fill_tx_cmddesc = rtl8723e_tx_fill_cmddesc, + .query_rx_desc = rtl8723e_rx_query_desc, + .set_channel_access = rtl8723e_update_channel_access_setting, + .radio_onoff_checking = rtl8723e_gpio_radio_on_off_checking, + .set_bw_mode = rtl8723e_phy_set_bw_mode, + .switch_channel = rtl8723e_phy_sw_chnl, + .dm_watchdog = rtl8723e_dm_watchdog, + .scan_operation_backup = rtl8723e_phy_scan_operation_backup, + .set_rf_power_state = rtl8723e_phy_set_rf_power_state, + .led_control = rtl8723e_led_control, + .set_desc = rtl8723e_set_desc, + .get_desc = rtl8723e_get_desc, + .is_tx_desc_closed = rtl8723e_is_tx_desc_closed, + .tx_polling = rtl8723e_tx_polling, + .enable_hw_sec = rtl8723e_enable_hw_security_config, + .set_key = rtl8723e_set_key, + .init_sw_leds = rtl8723e_init_sw_leds, .get_bbreg = rtl8723_phy_query_bb_reg, .set_bbreg = rtl8723_phy_set_bb_reg, - .get_rfreg = rtl8723ae_phy_query_rf_reg, - .set_rfreg = rtl8723ae_phy_set_rf_reg, + .get_rfreg = rtl8723e_phy_query_rf_reg, + .set_rfreg = rtl8723e_phy_set_rf_reg, .c2h_command_handle = rtl_8723e_c2h_command_handle, .bt_wifi_media_status_notify = rtl_8723e_bt_wifi_media_status_notify, - .bt_coex_off_before_lps = rtl8723ae_bt_coex_off_before_lps, + .bt_coex_off_before_lps = + rtl8723e_dm_bt_turn_off_bt_coexist_before_enter_lps, + .get_btc_status = rtl8723e_get_btc_status, + .rx_command_packet = rtl8723e_rx_command_packet, .is_fw_header = is_fw_header, }; -static struct rtl_mod_params rtl8723ae_mod_params = { +static struct rtl_mod_params rtl8723e_mod_params = { .sw_crypto = false, .inactiveps = true, .swctrl_lps = false, @@ -256,13 +269,13 @@ static struct rtl_mod_params rtl8723ae_mod_params = { .debug = DBG_EMERG, }; -static struct rtl_hal_cfg rtl8723ae_hal_cfg = { +static struct rtl_hal_cfg rtl8723e_hal_cfg = { .bar_id = 2, .write_readback = true, - .name = "rtl8723ae_pci", - .fw_name = "rtlwifi/rtl8723fw.bin", - .ops = &rtl8723ae_hal_ops, - .mod_params = &rtl8723ae_mod_params, + .name = "rtl8723e_pci", + .fw_name = "rtlwifi/rtl8723efw.bin", + .ops = &rtl8723e_hal_ops, + .mod_params = &rtl8723e_mod_params, .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL, .maps[SYS_FUNC_EN] = REG_SYS_FUNC_EN, .maps[SYS_CLK] = REG_SYS_CLKR, @@ -271,6 +284,8 @@ static struct rtl_hal_cfg rtl8723ae_hal_cfg = { .maps[MAC_RCR_ACRC32] = ACRC32, .maps[MAC_RCR_ACF] = ACF, .maps[MAC_RCR_AAP] = AAP, + .maps[MAC_HIMR] = REG_HIMR, + .maps[MAC_HIMRE] = REG_HIMRE, .maps[EFUSE_TEST] = REG_EFUSE_TEST, .maps[EFUSE_CTRL] = REG_EFUSE_CTRL, .maps[EFUSE_CLK] = 0, @@ -328,62 +343,63 @@ static struct rtl_hal_cfg rtl8723ae_hal_cfg = { .maps[RTL_IMR_VIDOK] = PHIMR_VIDOK, .maps[RTL_IMR_VODOK] = PHIMR_VODOK, .maps[RTL_IMR_ROK] = PHIMR_ROK, - .maps[RTL_IBSS_INT_MASKS] = (PHIMR_BCNDMAINT0 | - PHIMR_TXBCNOK | PHIMR_TXBCNERR), + .maps[RTL_IBSS_INT_MASKS] = + (PHIMR_BCNDMAINT0 | PHIMR_TXBCNOK | PHIMR_TXBCNERR), .maps[RTL_IMR_C2HCMD] = PHIMR_C2HCMD, - .maps[RTL_RC_CCK_RATE1M] = DESC92_RATE1M, - .maps[RTL_RC_CCK_RATE2M] = DESC92_RATE2M, - .maps[RTL_RC_CCK_RATE5_5M] = DESC92_RATE5_5M, - .maps[RTL_RC_CCK_RATE11M] = DESC92_RATE11M, - .maps[RTL_RC_OFDM_RATE6M] = DESC92_RATE6M, - .maps[RTL_RC_OFDM_RATE9M] = DESC92_RATE9M, - .maps[RTL_RC_OFDM_RATE12M] = DESC92_RATE12M, - .maps[RTL_RC_OFDM_RATE18M] = DESC92_RATE18M, - .maps[RTL_RC_OFDM_RATE24M] = DESC92_RATE24M, - .maps[RTL_RC_OFDM_RATE36M] = DESC92_RATE36M, - .maps[RTL_RC_OFDM_RATE48M] = DESC92_RATE48M, - .maps[RTL_RC_OFDM_RATE54M] = DESC92_RATE54M, - - .maps[RTL_RC_HT_RATEMCS7] = DESC92_RATEMCS7, - .maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15, + .maps[RTL_RC_CCK_RATE1M] = DESC92C_RATE1M, + .maps[RTL_RC_CCK_RATE2M] = DESC92C_RATE2M, + .maps[RTL_RC_CCK_RATE5_5M] = DESC92C_RATE5_5M, + .maps[RTL_RC_CCK_RATE11M] = DESC92C_RATE11M, + .maps[RTL_RC_OFDM_RATE6M] = DESC92C_RATE6M, + .maps[RTL_RC_OFDM_RATE9M] = DESC92C_RATE9M, + .maps[RTL_RC_OFDM_RATE12M] = DESC92C_RATE12M, + .maps[RTL_RC_OFDM_RATE18M] = DESC92C_RATE18M, + .maps[RTL_RC_OFDM_RATE24M] = DESC92C_RATE24M, + .maps[RTL_RC_OFDM_RATE36M] = DESC92C_RATE36M, + .maps[RTL_RC_OFDM_RATE48M] = DESC92C_RATE48M, + .maps[RTL_RC_OFDM_RATE54M] = DESC92C_RATE54M, + + .maps[RTL_RC_HT_RATEMCS7] = DESC92C_RATEMCS7, + .maps[RTL_RC_HT_RATEMCS15] = DESC92C_RATEMCS15, }; -static struct pci_device_id rtl8723ae_pci_ids[] = { - {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8723, rtl8723ae_hal_cfg)}, +static struct pci_device_id rtl8723e_pci_ids[] = { + {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8723, rtl8723e_hal_cfg)}, {}, }; -MODULE_DEVICE_TABLE(pci, rtl8723ae_pci_ids); +MODULE_DEVICE_TABLE(pci, rtl8723e_pci_ids); MODULE_AUTHOR("lizhaoming "); MODULE_AUTHOR("Realtek WlanFAE "); -MODULE_AUTHOR("Larry Finger "); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Realtek 8723E 802.11n PCI wireless"); -MODULE_FIRMWARE("rtlwifi/rtl8723fw.bin"); -MODULE_FIRMWARE("rtlwifi/rtl8723fw_B.bin"); - -module_param_named(swenc, rtl8723ae_mod_params.sw_crypto, bool, 0444); -module_param_named(debug, rtl8723ae_mod_params.debug, int, 0444); -module_param_named(ips, rtl8723ae_mod_params.inactiveps, bool, 0444); -module_param_named(swlps, rtl8723ae_mod_params.swctrl_lps, bool, 0444); -module_param_named(fwlps, rtl8723ae_mod_params.fwctrl_lps, bool, 0444); +MODULE_FIRMWARE("rtlwifi/rtl8723efw.bin"); + +module_param_named(swenc, rtl8723e_mod_params.sw_crypto, bool, 0444); +module_param_named(debug, rtl8723e_mod_params.debug, int, 0444); +module_param_named(ips, rtl8723e_mod_params.inactiveps, bool, 0444); +module_param_named(swlps, rtl8723e_mod_params.swctrl_lps, bool, 0444); +module_param_named(fwlps, rtl8723e_mod_params.fwctrl_lps, bool, 0444); +module_param_named(disable_watchdog, rtl8723e_mod_params.disable_watchdog, + bool, 0444); MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n"); MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n"); MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n"); MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); +MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n"); static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); -static struct pci_driver rtl8723ae_driver = { +static struct pci_driver rtl8723e_driver = { .name = KBUILD_MODNAME, - .id_table = rtl8723ae_pci_ids, + .id_table = rtl8723e_pci_ids, .probe = rtl_pci_probe, .remove = rtl_pci_disconnect, .driver.pm = &rtlwifi_pm_ops, }; -module_pci_driver(rtl8723ae_driver); +module_pci_driver(rtl8723e_driver); diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.h b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.h index fc4fde5e3eb557bdb0f6141642142bfc18ed28e3..46478780d262bd875546efefa22403782b54a545 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.h +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -30,8 +26,10 @@ #ifndef __RTL8723E_SW_H__ #define __RTL8723E_SW_H__ -int rtl8723ae_init_sw_vars(struct ieee80211_hw *hw); -void rtl8723ae_deinit_sw_vars(struct ieee80211_hw *hw); -void rtl8723ae_init_var_map(struct ieee80211_hw *hw); +int rtl8723e_init_sw_vars(struct ieee80211_hw *hw); +void rtl8723e_deinit_sw_vars(struct ieee80211_hw *hw); +void rtl8723e_init_var_map(struct ieee80211_hw *hw); +bool rtl8723e_get_btc_status(void); + #endif diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/table.c b/drivers/net/wireless/rtlwifi/rtl8723ae/table.c index 9b0b50cc4ade0b3c00027210172bc6fd603c8c2b..61e86045f15ca2b16c49aa26c4144b9a0b8a3311 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/table.c +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/table.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -335,7 +331,7 @@ u32 RTL8723EPHY_REG_ARRAY_PG[RTL8723E_PHY_REG_ARRAY_PGLENGTH] = { 0x868, 0xffffffff, 0x00000000, }; -u32 RTL8723E_RADIOA_1TARRAY[Rtl8723ERADIOA_1TARRAYLENGTH] = { +u32 RTL8723E_RADIOA_1TARRAY[RTL8723ERADIOA_1TARRAYLENGTH] = { 0x000, 0x00030159, 0x001, 0x00031284, 0x002, 0x00098000, @@ -479,12 +475,10 @@ u32 RTL8723E_RADIOA_1TARRAY[Rtl8723ERADIOA_1TARRAYLENGTH] = { 0x000, 0x00030159, }; - u32 RTL8723E_RADIOB_1TARRAY[RTL8723E_RADIOB_1TARRAYLENGTH] = { 0x0, }; - u32 RTL8723EMAC_ARRAY[RTL8723E_MACARRAYLENGTH] = { 0x420, 0x00000080, 0x423, 0x00000000, diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/table.h b/drivers/net/wireless/rtlwifi/rtl8723ae/table.h index f5ce71375c207f8e6c32c06f3bc783c100076a32..57a548ceba7d16898b8ac5ebd876392681b56037 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/table.h +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/table.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -38,8 +34,8 @@ extern u32 RTL8723EPHY_REG_1TARRAY[RTL8723E_PHY_REG_1TARRAY_LENGTH]; #define RTL8723E_PHY_REG_ARRAY_PGLENGTH 336 extern u32 RTL8723EPHY_REG_ARRAY_PG[RTL8723E_PHY_REG_ARRAY_PGLENGTH]; -#define Rtl8723ERADIOA_1TARRAYLENGTH 282 -extern u32 RTL8723E_RADIOA_1TARRAY[Rtl8723ERADIOA_1TARRAYLENGTH]; +#define RTL8723ERADIOA_1TARRAYLENGTH 282 +extern u32 RTL8723E_RADIOA_1TARRAY[RTL8723ERADIOA_1TARRAYLENGTH]; #define RTL8723E_RADIOB_1TARRAYLENGTH 1 extern u32 RTL8723E_RADIOB_1TARRAY[RTL8723E_RADIOB_1TARRAYLENGTH]; #define RTL8723E_MACARRAYLENGTH 172 diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c index 10b7577b6ae534906102ec29edff266dbdee12d4..d372ccaf3465e062e173ef531e2e5501e58eab51 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -37,7 +33,7 @@ #include "trx.h" #include "led.h" -static u8 _rtl8723ae_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue) +static u8 _rtl8723e_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue) { __le16 fc = rtl_get_fc(skb); @@ -49,16 +45,174 @@ static u8 _rtl8723ae_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue) return skb->priority; } -static void _rtl8723ae_query_rxphystatus(struct ieee80211_hw *hw, - struct rtl_stats *pstatus, u8 *pdesc, - struct rx_fwinfo_8723e *p_drvinfo, - bool bpacket_match_bssid, - bool bpacket_toself, bool packet_beacon) +/* mac80211's rate_idx is like this: + * + * 2.4G band:rx_status->band == IEEE80211_BAND_2GHZ + * + * B/G rate: + * (rx_status->flag & RX_FLAG_HT) = 0, + * DESC92C_RATE1M-->DESC92C_RATE54M ==> idx is 0-->11, + * + * N rate: + * (rx_status->flag & RX_FLAG_HT) = 1, + * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15 + * + * 5G band:rx_status->band == IEEE80211_BAND_5GHZ + * A rate: + * (rx_status->flag & RX_FLAG_HT) = 0, + * DESC92C_RATE6M-->DESC92C_RATE54M ==> idx is 0-->7, + * + * N rate: + * (rx_status->flag & RX_FLAG_HT) = 1, + * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15 + */ +static int _rtl8723e_rate_mapping(struct ieee80211_hw *hw, + bool isht, u8 desc_rate) +{ + int rate_idx; + + if (!isht) { + if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) { + switch (desc_rate) { + case DESC92C_RATE1M: + rate_idx = 0; + break; + case DESC92C_RATE2M: + rate_idx = 1; + break; + case DESC92C_RATE5_5M: + rate_idx = 2; + break; + case DESC92C_RATE11M: + rate_idx = 3; + break; + case DESC92C_RATE6M: + rate_idx = 4; + break; + case DESC92C_RATE9M: + rate_idx = 5; + break; + case DESC92C_RATE12M: + rate_idx = 6; + break; + case DESC92C_RATE18M: + rate_idx = 7; + break; + case DESC92C_RATE24M: + rate_idx = 8; + break; + case DESC92C_RATE36M: + rate_idx = 9; + break; + case DESC92C_RATE48M: + rate_idx = 10; + break; + case DESC92C_RATE54M: + rate_idx = 11; + break; + default: + rate_idx = 0; + break; + } + } else { + switch (desc_rate) { + case DESC92C_RATE6M: + rate_idx = 0; + break; + case DESC92C_RATE9M: + rate_idx = 1; + break; + case DESC92C_RATE12M: + rate_idx = 2; + break; + case DESC92C_RATE18M: + rate_idx = 3; + break; + case DESC92C_RATE24M: + rate_idx = 4; + break; + case DESC92C_RATE36M: + rate_idx = 5; + break; + case DESC92C_RATE48M: + rate_idx = 6; + break; + case DESC92C_RATE54M: + rate_idx = 7; + break; + default: + rate_idx = 0; + break; + } + } + } else { + switch (desc_rate) { + case DESC92C_RATEMCS0: + rate_idx = 0; + break; + case DESC92C_RATEMCS1: + rate_idx = 1; + break; + case DESC92C_RATEMCS2: + rate_idx = 2; + break; + case DESC92C_RATEMCS3: + rate_idx = 3; + break; + case DESC92C_RATEMCS4: + rate_idx = 4; + break; + case DESC92C_RATEMCS5: + rate_idx = 5; + break; + case DESC92C_RATEMCS6: + rate_idx = 6; + break; + case DESC92C_RATEMCS7: + rate_idx = 7; + break; + case DESC92C_RATEMCS8: + rate_idx = 8; + break; + case DESC92C_RATEMCS9: + rate_idx = 9; + break; + case DESC92C_RATEMCS10: + rate_idx = 10; + break; + case DESC92C_RATEMCS11: + rate_idx = 11; + break; + case DESC92C_RATEMCS12: + rate_idx = 12; + break; + case DESC92C_RATEMCS13: + rate_idx = 13; + break; + case DESC92C_RATEMCS14: + rate_idx = 14; + break; + case DESC92C_RATEMCS15: + rate_idx = 15; + break; + default: + rate_idx = 0; + break; + } + } + return rate_idx; +} + +static void _rtl8723e_query_rxphystatus(struct ieee80211_hw *hw, + struct rtl_stats *pstatus, u8 *pdesc, + struct rx_fwinfo_8723e *p_drvinfo, + bool bpacket_match_bssid, + bool bpacket_toself, bool packet_beacon) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv); struct phy_sts_cck_8723e_t *cck_buf; - s8 rx_pwr_all, rx_pwr[4]; + s8 rx_pwr_all = 0, rx_pwr[4]; u8 rf_rx_num = 0, evm, pwdb_all; u8 i, max_spatial_stream; u32 rssi, total_rssi = 0; @@ -68,8 +222,8 @@ static void _rtl8723ae_query_rxphystatus(struct ieee80211_hw *hw, pstatus->packet_matchbssid = bpacket_match_bssid; pstatus->packet_toself = bpacket_toself; pstatus->packet_beacon = packet_beacon; - pstatus->rx_mimo_sig_qual[0] = -1; - pstatus->rx_mimo_sig_qual[1] = -1; + pstatus->rx_mimo_signalquality[0] = -1; + pstatus->rx_mimo_signalquality[1] = -1; if (is_cck) { u8 report, cck_highpwr; @@ -77,14 +231,14 @@ static void _rtl8723ae_query_rxphystatus(struct ieee80211_hw *hw, /* CCK Driver info Structure is not the same as OFDM packet. */ cck_buf = (struct phy_sts_cck_8723e_t *)p_drvinfo; - /* (1)Hardware does not provide RSSI for CCK - * (2)PWDB, Average PWDB cacluated by + /* (1)Hardware does not provide RSSI for CCK */ + /* (2)PWDB, Average PWDB cacluated by * hardware (for rate adaptive) */ if (ppsc->rfpwr_state == ERFON) - cck_highpwr = (u8) rtl_get_bbreg(hw, - RFPGA0_XA_HSSIPARAMETER2, - BIT(9)); + cck_highpwr = (u8)rtl_get_bbreg(hw, + RFPGA0_XA_HSSIPARAMETER2, + BIT(9)); else cck_highpwr = false; @@ -127,8 +281,9 @@ static void _rtl8723ae_query_rxphystatus(struct ieee80211_hw *hw, } pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all); - /* CCK gain is smaller than OFDM/MCS gain, - * so we add gain diff. From experience, the val is 6 + /* CCK gain is smaller than OFDM/MCS gain, */ + /* so we add gain diff by experiences, + * the val is 6 */ pwdb_all += 6; if (pwdb_all > 100) @@ -152,9 +307,9 @@ static void _rtl8723ae_query_rxphystatus(struct ieee80211_hw *hw, if (bpacket_match_bssid) { u8 sq; - if (pstatus->rx_pwdb_all > 40) { + if (pstatus->rx_pwdb_all > 40) sq = 100; - } else { + else { sq = cck_buf->sq_rpt; if (sq > 64) sq = 0; @@ -165,8 +320,8 @@ static void _rtl8723ae_query_rxphystatus(struct ieee80211_hw *hw, } pstatus->signalquality = sq; - pstatus->rx_mimo_sig_qual[0] = sq; - pstatus->rx_mimo_sig_qual[1] = -1; + pstatus->rx_mimo_signalquality[0] = sq; + pstatus->rx_mimo_signalquality[1] = -1; } } else { rtlpriv->dm.rfpath_rxenable[0] = @@ -179,18 +334,20 @@ static void _rtl8723ae_query_rxphystatus(struct ieee80211_hw *hw, if (rtlpriv->dm.rfpath_rxenable[i]) rf_rx_num++; - rx_pwr[i] = ((p_drvinfo->gain_trsw[i] & 0x3f)*2) - 110; + rx_pwr[i] = ((p_drvinfo->gain_trsw[i] & + 0x3f) * 2) - 110; /* Translate DBM to percentage. */ rssi = rtl_query_rxpwrpercentage(rx_pwr[i]); total_rssi += rssi; /* Get Rx snr value in DB */ - rtlpriv->stats.rx_snr_db[i] = (p_drvinfo->rxsnr[i] / 2); + rtlpriv->stats.rx_snr_db[i] = + (long)(p_drvinfo->rxsnr[i] / 2); /* Record Signal Strength for next packet */ if (bpacket_match_bssid) - pstatus->rx_mimo_signalstrength[i] = (u8) rssi; + pstatus->rx_mimo_signalstrength[i] = (u8)rssi; } /* (2)PWDB, Average PWDB cacluated by @@ -204,8 +361,8 @@ static void _rtl8723ae_query_rxphystatus(struct ieee80211_hw *hw, pstatus->recvsignalpower = rx_pwr_all; /* (3)EVM of HT rate */ - if (pstatus->is_ht && pstatus->rate >= DESC92_RATEMCS8 && - pstatus->rate <= DESC92_RATEMCS15) + if (pstatus->is_ht && pstatus->rate >= DESC92C_RATEMCS8 && + pstatus->rate <= DESC92C_RATEMCS15) max_spatial_stream = 2; else max_spatial_stream = 1; @@ -218,8 +375,10 @@ static void _rtl8723ae_query_rxphystatus(struct ieee80211_hw *hw, * spatial stream only */ if (i == 0) - pstatus->signalquality = (evm & 0xff); - pstatus->rx_mimo_sig_qual[i] = (evm & 0xff); + pstatus->signalquality = + (u8)(evm & 0xff); + pstatus->rx_mimo_signalquality[i] = + (u8)(evm & 0xff); } } } @@ -235,78 +394,83 @@ static void _rtl8723ae_query_rxphystatus(struct ieee80211_hw *hw, total_rssi /= rf_rx_num)); } -static void _rtl8723ae_translate_rx_signal_stuff(struct ieee80211_hw *hw, - struct sk_buff *skb, struct rtl_stats *pstatus, - u8 *pdesc, struct rx_fwinfo_8723e *p_drvinfo) +static void translate_rx_signal_stuff(struct ieee80211_hw *hw, + struct sk_buff *skb, + struct rtl_stats *pstatus, u8 *pdesc, + struct rx_fwinfo_8723e *p_drvinfo) { struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct ieee80211_hdr *hdr; u8 *tmp_buf; u8 *praddr; - __le16 fc; - u16 type; - bool packet_matchbssid, packet_toself, packet_beacon = false; + /*u8 *psaddr;*/ + u16 fc, type; + bool packet_matchbssid, packet_toself, packet_beacon; tmp_buf = skb->data + pstatus->rx_drvinfo_size + pstatus->rx_bufshift; hdr = (struct ieee80211_hdr *)tmp_buf; - fc = hdr->frame_control; - type = WLAN_FC_GET_TYPE(fc); + fc = le16_to_cpu(hdr->frame_control); + type = WLAN_FC_GET_TYPE(hdr->frame_control); praddr = hdr->addr1; - packet_matchbssid = - ((IEEE80211_FTYPE_CTL != type) && - ether_addr_equal(mac->bssid, - (le16_to_cpu(fc) & IEEE80211_FCTL_TODS) ? hdr->addr1 : - (le16_to_cpu(fc) & IEEE80211_FCTL_FROMDS) ? hdr->addr2 : - hdr->addr3) && - (!pstatus->hwerror) && (!pstatus->crc) && (!pstatus->icv)); + packet_matchbssid = ((IEEE80211_FTYPE_CTL != type) && + (ether_addr_equal(mac->bssid, (fc & IEEE80211_FCTL_TODS) ? + hdr->addr1 : (fc & IEEE80211_FCTL_FROMDS) ? + hdr->addr2 : hdr->addr3)) && + (!pstatus->hwerror) && + (!pstatus->crc) && (!pstatus->icv)); - packet_toself = (packet_matchbssid && - ether_addr_equal(praddr, rtlefuse->dev_addr)); + packet_toself = packet_matchbssid && + (ether_addr_equal(praddr, rtlefuse->dev_addr)); - if (ieee80211_is_beacon(fc)) + if (ieee80211_is_beacon(hdr->frame_control)) packet_beacon = true; + else + packet_beacon = false; - _rtl8723ae_query_rxphystatus(hw, pstatus, pdesc, p_drvinfo, - packet_matchbssid, packet_toself, - packet_beacon); + _rtl8723e_query_rxphystatus(hw, pstatus, pdesc, p_drvinfo, + packet_matchbssid, packet_toself, + packet_beacon); rtl_process_phyinfo(hw, tmp_buf, pstatus); } -bool rtl8723ae_rx_query_desc(struct ieee80211_hw *hw, - struct rtl_stats *status, - struct ieee80211_rx_status *rx_status, - u8 *pdesc, struct sk_buff *skb) +bool rtl8723e_rx_query_desc(struct ieee80211_hw *hw, + struct rtl_stats *status, + struct ieee80211_rx_status *rx_status, + u8 *pdesc, struct sk_buff *skb) { struct rx_fwinfo_8723e *p_drvinfo; struct ieee80211_hdr *hdr; u32 phystatus = GET_RX_DESC_PHYST(pdesc); - status->length = (u16) GET_RX_DESC_PKT_LEN(pdesc); - status->rx_drvinfo_size = (u8) GET_RX_DESC_DRV_INFO_SIZE(pdesc) * - RX_DRV_INFO_SIZE_UNIT; - status->rx_bufshift = (u8) (GET_RX_DESC_SHIFT(pdesc) & 0x03); - status->icv = (u16) GET_RX_DESC_ICV(pdesc); - status->crc = (u16) GET_RX_DESC_CRC32(pdesc); + status->length = (u16)GET_RX_DESC_PKT_LEN(pdesc); + status->rx_drvinfo_size = (u8)GET_RX_DESC_DRV_INFO_SIZE(pdesc) * + RX_DRV_INFO_SIZE_UNIT; + status->rx_bufshift = (u8)(GET_RX_DESC_SHIFT(pdesc) & 0x03); + status->icv = (u16)GET_RX_DESC_ICV(pdesc); + status->crc = (u16)GET_RX_DESC_CRC32(pdesc); status->hwerror = (status->crc | status->icv); status->decrypted = !GET_RX_DESC_SWDEC(pdesc); - status->rate = (u8) GET_RX_DESC_RXMCS(pdesc); - status->shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc); - status->isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1); - status->isfirst_ampdu = (bool) ((GET_RX_DESC_PAGGR(pdesc) == 1) - && (GET_RX_DESC_FAGGR(pdesc) == 1)); + status->rate = (u8)GET_RX_DESC_RXMCS(pdesc); + status->shortpreamble = (u16)GET_RX_DESC_SPLCP(pdesc); + status->isampdu = (bool)(GET_RX_DESC_PAGGR(pdesc) == 1); + status->isfirst_ampdu = (bool)((GET_RX_DESC_PAGGR(pdesc) == 1) && + (GET_RX_DESC_FAGGR(pdesc) == 1)); status->timestamp_low = GET_RX_DESC_TSFL(pdesc); - status->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc); + status->rx_is40Mhzpacket = (bool)GET_RX_DESC_BW(pdesc); status->is_ht = (bool)GET_RX_DESC_RXHT(pdesc); - status->is_cck = RTL8723E_RX_HAL_IS_CCK_RATE(status->rate); + status->is_cck = RX_HAL_IS_CCK_RATE(status->rate); rx_status->freq = hw->conf.chandef.chan->center_freq; rx_status->band = hw->conf.chandef.chan->band; + hdr = (struct ieee80211_hdr *)(skb->data + status->rx_drvinfo_size + + status->rx_bufshift); + if (status->crc) rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; @@ -320,69 +484,62 @@ bool rtl8723ae_rx_query_desc(struct ieee80211_hw *hw, /* hw will set status->decrypted true, if it finds the * frame is open data frame or mgmt frame. - * Thus hw will not decrypt a robust managment frame + * So hw will not decryption robust managment frame * for IEEE80211w but still set status->decrypted * true, so here we should set it back to undecrypted * for IEEE80211w frame, and mac80211 sw will help * to decrypt it */ if (status->decrypted) { - hdr = (struct ieee80211_hdr *)(skb->data + - status->rx_drvinfo_size + status->rx_bufshift); - - if (!hdr) { - /* during testing, hdr could be NULL here */ - return false; - } - if ((_ieee80211_is_robust_mgmt_frame(hdr)) && - (ieee80211_has_protected(hdr->frame_control))) - rx_status->flag &= ~RX_FLAG_DECRYPTED; - else + if ((!_ieee80211_is_robust_mgmt_frame(hdr)) && + (ieee80211_has_protected(hdr->frame_control))) rx_status->flag |= RX_FLAG_DECRYPTED; + else + rx_status->flag &= ~RX_FLAG_DECRYPTED; } /* rate_idx: index of data rate into band's * supported rates or MCS index if HT rates * are use (RX_FLAG_HT) + * Notice: this is diff with windows define */ - rx_status->rate_idx = rtlwifi_rate_mapping(hw, status->is_ht, - status->rate, false); + rx_status->rate_idx = _rtl8723e_rate_mapping(hw, + status->is_ht, status->rate); rx_status->mactime = status->timestamp_low; if (phystatus == true) { p_drvinfo = (struct rx_fwinfo_8723e *)(skb->data + - status->rx_bufshift); + status->rx_bufshift); - _rtl8723ae_translate_rx_signal_stuff(hw, - skb, status, pdesc, p_drvinfo); + translate_rx_signal_stuff(hw, skb, status, pdesc, p_drvinfo); } - - /*rx_status->qual = status->signal; */ rx_status->signal = status->recvsignalpower + 10; - return true; } -void rtl8723ae_tx_fill_desc(struct ieee80211_hw *hw, - struct ieee80211_hdr *hdr, u8 *pdesc_tx, - u8 *pbd_desc_tx, struct ieee80211_tx_info *info, - struct ieee80211_sta *sta, - struct sk_buff *skb, u8 hw_queue, - struct rtl_tcb_desc *ptcdesc) +void rtl8723e_tx_fill_desc(struct ieee80211_hw *hw, + struct ieee80211_hdr *hdr, u8 *pdesc_tx, + u8 *txbd, struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, + struct sk_buff *skb, + u8 hw_queue, struct rtl_tcb_desc *ptcb_desc) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); - bool defaultadapter = true; - u8 *pdesc = pdesc_tx; + bool b_defaultadapter = true; + /* bool b_trigger_ac = false; */ + u8 *pdesc = (u8 *)pdesc_tx; u16 seq_number; __le16 fc = hdr->frame_control; - u8 fw_qsel = _rtl8723ae_map_hwqueue_to_fwqueue(skb, hw_queue); + u8 fw_qsel = _rtl8723e_map_hwqueue_to_fwqueue(skb, hw_queue); bool firstseg = ((hdr->seq_ctrl & - cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0); + cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0); + bool lastseg = ((hdr->frame_control & - cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0); + cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0); + dma_addr_t mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); @@ -398,12 +555,13 @@ void rtl8723ae_tx_fill_desc(struct ieee80211_hw *hw, } else if (mac->opmode == NL80211_IFTYPE_AP || mac->opmode == NL80211_IFTYPE_ADHOC) { if (sta) - bw_40 = sta->bandwidth >= IEEE80211_STA_RX_BW_40; + bw_40 = sta->ht_cap.cap & + IEEE80211_HT_CAP_SUP_WIDTH_20_40; } seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4; - rtl_get_tcb_desc(hw, info, sta, skb, ptcdesc); + rtl_get_tcb_desc(hw, info, sta, skb, ptcb_desc); CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_8723e)); @@ -415,9 +573,9 @@ void rtl8723ae_tx_fill_desc(struct ieee80211_hw *hw, if (firstseg) { SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN); - SET_TX_DESC_TX_RATE(pdesc, ptcdesc->hw_rate); + SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate); - if (ptcdesc->use_shortgi || ptcdesc->use_shortpreamble) + if (ptcb_desc->use_shortgi || ptcb_desc->use_shortpreamble) SET_TX_DESC_DATA_SHORTGI(pdesc, 1); if (info->flags & IEEE80211_TX_CTL_AMPDU) { @@ -426,31 +584,33 @@ void rtl8723ae_tx_fill_desc(struct ieee80211_hw *hw, } SET_TX_DESC_SEQ(pdesc, seq_number); - SET_TX_DESC_RTS_ENABLE(pdesc, ((ptcdesc->rts_enable && - !ptcdesc-> - cts_enable) ? 1 : 0)); + SET_TX_DESC_RTS_ENABLE(pdesc, + ((ptcb_desc->rts_enable && + !ptcb_desc->cts_enable) ? 1 : 0)); SET_TX_DESC_HW_RTS_ENABLE(pdesc, - ((ptcdesc->rts_enable - || ptcdesc->cts_enable) ? 1 : 0)); - SET_TX_DESC_CTS2SELF(pdesc, ((ptcdesc->cts_enable) ? 1 : 0)); - SET_TX_DESC_RTS_STBC(pdesc, ((ptcdesc->rts_stbc) ? 1 : 0)); - - SET_TX_DESC_RTS_RATE(pdesc, ptcdesc->rts_rate); + ((ptcb_desc->rts_enable || + ptcb_desc->cts_enable) ? 1 : 0)); + SET_TX_DESC_CTS2SELF(pdesc, + ((ptcb_desc->cts_enable) ? 1 : 0)); + SET_TX_DESC_RTS_STBC(pdesc, + ((ptcb_desc->rts_stbc) ? 1 : 0)); + + SET_TX_DESC_RTS_RATE(pdesc, ptcb_desc->rts_rate); SET_TX_DESC_RTS_BW(pdesc, 0); - SET_TX_DESC_RTS_SC(pdesc, ptcdesc->rts_sc); + SET_TX_DESC_RTS_SC(pdesc, ptcb_desc->rts_sc); SET_TX_DESC_RTS_SHORT(pdesc, - ((ptcdesc->rts_rate <= DESC92_RATE54M) ? - (ptcdesc->rts_use_shortpreamble ? 1 : 0) - : (ptcdesc->rts_use_shortgi ? 1 : 0))); + ((ptcb_desc->rts_rate <= DESC92C_RATE54M) ? + (ptcb_desc->rts_use_shortpreamble ? 1 : 0) + : (ptcb_desc->rts_use_shortgi ? 1 : 0))); if (bw_40) { - if (ptcdesc->packet_bw) { + if (ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_20_40) { SET_TX_DESC_DATA_BW(pdesc, 1); SET_TX_DESC_TX_SUB_CARRIER(pdesc, 3); } else { SET_TX_DESC_DATA_BW(pdesc, 0); SET_TX_DESC_TX_SUB_CARRIER(pdesc, - mac->cur_40_prime_sc); + mac->cur_40_prime_sc); } } else { SET_TX_DESC_DATA_BW(pdesc, 0); @@ -481,6 +641,7 @@ void rtl8723ae_tx_fill_desc(struct ieee80211_hw *hw, default: SET_TX_DESC_SEC_TYPE(pdesc, 0x0); break; + } } @@ -490,7 +651,7 @@ void rtl8723ae_tx_fill_desc(struct ieee80211_hw *hw, SET_TX_DESC_DATA_RATE_FB_LIMIT(pdesc, 0x1F); SET_TX_DESC_RTS_RATE_FB_LIMIT(pdesc, 0xF); SET_TX_DESC_DISABLE_FB(pdesc, 0); - SET_TX_DESC_USE_RATE(pdesc, ptcdesc->use_driver_rate ? 1 : 0); + SET_TX_DESC_USE_RATE(pdesc, ptcb_desc->use_driver_rate ? 1 : 0); if (ieee80211_is_data_qos(fc)) { if (mac->rdg_en) { @@ -510,18 +671,21 @@ void rtl8723ae_tx_fill_desc(struct ieee80211_hw *hw, SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping); if (rtlpriv->dm.useramask) { - SET_TX_DESC_RATE_ID(pdesc, ptcdesc->ratr_index); - SET_TX_DESC_MACID(pdesc, ptcdesc->mac_id); + SET_TX_DESC_RATE_ID(pdesc, ptcb_desc->ratr_index); + SET_TX_DESC_MACID(pdesc, ptcb_desc->mac_id); } else { - SET_TX_DESC_RATE_ID(pdesc, 0xC + ptcdesc->ratr_index); - SET_TX_DESC_MACID(pdesc, ptcdesc->ratr_index); + SET_TX_DESC_RATE_ID(pdesc, 0xC + ptcb_desc->ratr_index); + SET_TX_DESC_MACID(pdesc, ptcb_desc->ratr_index); } if ((!ieee80211_is_data_qos(fc)) && ppsc->fwctrl_lps) { SET_TX_DESC_HWSEQ_EN_8723(pdesc, 1); + /* SET_TX_DESC_HWSEQ_EN(pdesc, 1); */ + /* SET_TX_DESC_PKT_ID(pdesc, 8); */ - if (!defaultadapter) + if (!b_defaultadapter) SET_TX_DESC_HWSEQ_SEL_8723(pdesc, 1); + /* SET_TX_DESC_QOS(pdesc, 1); */ } SET_TX_DESC_MORE_FRAG(pdesc, (lastseg ? 0 : 1)); @@ -534,17 +698,19 @@ void rtl8723ae_tx_fill_desc(struct ieee80211_hw *hw, RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n"); } -void rtl8723ae_tx_fill_cmddesc(struct ieee80211_hw *hw, +void rtl8723e_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool firstseg, bool lastseg, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); u8 fw_queue = QSLT_BEACON; + dma_addr_t mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); + + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); __le16 fc = hdr->frame_control; if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { @@ -557,7 +723,7 @@ void rtl8723ae_tx_fill_cmddesc(struct ieee80211_hw *hw, if (firstseg) SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN); - SET_TX_DESC_TX_RATE(pdesc, DESC92_RATE1M); + SET_TX_DESC_TX_RATE(pdesc, DESC92C_RATE1M); SET_TX_DESC_SEQ(pdesc, 0); @@ -577,7 +743,7 @@ void rtl8723ae_tx_fill_cmddesc(struct ieee80211_hw *hw, SET_TX_DESC_OWN(pdesc, 1); - SET_TX_DESC_PKT_SIZE(pdesc, (u16) (skb->len)); + SET_TX_DESC_PKT_SIZE((u8 *)pdesc, (u16)(skb->len)); SET_TX_DESC_FIRST_SEG(pdesc, 1); SET_TX_DESC_LAST_SEG(pdesc, 1); @@ -597,8 +763,8 @@ void rtl8723ae_tx_fill_cmddesc(struct ieee80211_hw *hw, pdesc, TX_DESC_SIZE); } -void rtl8723ae_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, - u8 desc_name, u8 *val) +void rtl8723e_set_desc(struct ieee80211_hw *hw, u8 *pdesc, + bool istx, u8 desc_name, u8 *val) { if (istx == true) { switch (desc_name) { @@ -635,7 +801,7 @@ void rtl8723ae_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, } } -u32 rtl8723ae_get_desc(u8 *pdesc, bool istx, u8 desc_name) +u32 rtl8723e_get_desc(u8 *pdesc, bool istx, u8 desc_name) { u32 ret = 0; @@ -660,6 +826,9 @@ u32 rtl8723ae_get_desc(u8 *pdesc, bool istx, u8 desc_name) case HW_DESC_RXPKT_LEN: ret = GET_RX_DESC_PKT_LEN(pdesc); break; + case HW_DESC_RXBUFF_ADDR: + ret = GET_RX_DESC_BUFF_ADDR(pdesc); + break; default: RT_ASSERT(false, "ERR rxdesc :%d not process\n", desc_name); @@ -669,7 +838,25 @@ u32 rtl8723ae_get_desc(u8 *pdesc, bool istx, u8 desc_name) return ret; } -void rtl8723ae_tx_polling(struct ieee80211_hw *hw, u8 hw_queue) +bool rtl8723e_is_tx_desc_closed(struct ieee80211_hw *hw, + u8 hw_queue, u16 index) +{ + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue]; + u8 *entry = (u8 *)(&ring->desc[ring->idx]); + u8 own = (u8)rtl8723e_get_desc(entry, true, HW_DESC_OWN); + + /** + *beacon packet will only use the first + *descriptor defautly,and the own may not + *be cleared by the hardware + */ + if (own) + return false; + return true; +} + +void rtl8723e_tx_polling(struct ieee80211_hw *hw, u8 hw_queue) { struct rtl_priv *rtlpriv = rtl_priv(hw); if (hw_queue == BEACON_QUEUE) { @@ -679,3 +866,10 @@ void rtl8723ae_tx_polling(struct ieee80211_hw *hw, u8 hw_queue) BIT(0) << (hw_queue)); } } + +u32 rtl8723e_rx_command_packet(struct ieee80211_hw *hw, + struct rtl_stats status, + struct sk_buff *skb) +{ + return 0; +} diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.h b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.h index 4380b7d3a91ac39c05e192816314bfaa4d4a6bc8..017da7e194d8385754ae1e3de5ab09c630078e53 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.h +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -30,77 +26,77 @@ #ifndef __RTL8723E_TRX_H__ #define __RTL8723E_TRX_H__ -#define TX_DESC_SIZE 64 +#define TX_DESC_SIZE 64 #define TX_DESC_AGGR_SUBFRAME_SIZE 32 -#define RX_DESC_SIZE 32 +#define RX_DESC_SIZE 32 #define RX_DRV_INFO_SIZE_UNIT 8 #define TX_DESC_NEXT_DESC_OFFSET 40 #define USB_HWDESC_HEADER_LEN 32 -#define CRCLENGTH 4 +#define CRCLENGTH 4 #define SET_TX_DESC_PKT_SIZE(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc, 0, 16, __val) -#define SET_TX_DESC_OFFSET(__pdesc, __val) \ +#define SET_TX_DESC_OFFSET(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc, 16, 8, __val) -#define SET_TX_DESC_BMC(__pdesc, __val) \ +#define SET_TX_DESC_BMC(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc, 24, 1, __val) -#define SET_TX_DESC_HTC(__pdesc, __val) \ +#define SET_TX_DESC_HTC(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc, 25, 1, __val) #define SET_TX_DESC_LAST_SEG(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc, 26, 1, __val) #define SET_TX_DESC_FIRST_SEG(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc, 27, 1, __val) -#define SET_TX_DESC_LINIP(__pdesc, __val) \ +#define SET_TX_DESC_LINIP(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc, 28, 1, __val) -#define SET_TX_DESC_NO_ACM(__pdesc, __val) \ +#define SET_TX_DESC_NO_ACM(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc, 29, 1, __val) -#define SET_TX_DESC_GF(__pdesc, __val) \ +#define SET_TX_DESC_GF(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val) -#define SET_TX_DESC_OWN(__pdesc, __val) \ +#define SET_TX_DESC_OWN(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val) -#define GET_TX_DESC_PKT_SIZE(__pdesc) \ +#define GET_TX_DESC_PKT_SIZE(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc, 0, 16) -#define GET_TX_DESC_OFFSET(__pdesc) \ +#define GET_TX_DESC_OFFSET(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc, 16, 8) -#define GET_TX_DESC_BMC(__pdesc) \ +#define GET_TX_DESC_BMC(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc, 24, 1) -#define GET_TX_DESC_HTC(__pdesc) \ +#define GET_TX_DESC_HTC(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc, 25, 1) -#define GET_TX_DESC_LAST_SEG(__pdesc) \ +#define GET_TX_DESC_LAST_SEG(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc, 26, 1) -#define GET_TX_DESC_FIRST_SEG(__pdesc) \ +#define GET_TX_DESC_FIRST_SEG(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc, 27, 1) -#define GET_TX_DESC_LINIP(__pdesc) \ +#define GET_TX_DESC_LINIP(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc, 28, 1) -#define GET_TX_DESC_NO_ACM(__pdesc) \ +#define GET_TX_DESC_NO_ACM(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc, 29, 1) -#define GET_TX_DESC_GF(__pdesc) \ +#define GET_TX_DESC_GF(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc, 30, 1) -#define GET_TX_DESC_OWN(__pdesc) \ +#define GET_TX_DESC_OWN(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc, 31, 1) -#define SET_TX_DESC_MACID(__pdesc, __val) \ +#define SET_TX_DESC_MACID(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+4, 0, 5, __val) #define SET_TX_DESC_AGG_BREAK(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+4, 5, 1, __val) -#define SET_TX_DESC_BK(__pdesc, __val) \ +#define SET_TX_DESC_BK(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+4, 6, 1, __val) #define SET_TX_DESC_RDG_ENABLE(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+4, 7, 1, __val) #define SET_TX_DESC_QUEUE_SEL(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+4, 8, 5, __val) -#define SET_TX_DESC_RDG_NAV_EXT(__pdesc, __val) \ +#define SET_TX_DESC_RDG_NAV_EXT(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+4, 13, 1, __val) #define SET_TX_DESC_LSIG_TXOP_EN(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+4, 14, 1, __val) -#define SET_TX_DESC_PIFS(__pdesc, __val) \ +#define SET_TX_DESC_PIFS(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+4, 15, 1, __val) #define SET_TX_DESC_RATE_ID(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+4, 16, 4, __val) -#define SET_TX_DESC_NAV_USE_HDR(__pdesc, __val) \ +#define SET_TX_DESC_NAV_USE_HDR(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+4, 20, 1, __val) #define SET_TX_DESC_EN_DESC_ID(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+4, 21, 1, __val) @@ -109,34 +105,34 @@ #define SET_TX_DESC_PKT_OFFSET(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+4, 24, 8, __val) -#define GET_TX_DESC_MACID(__pdesc) \ +#define GET_TX_DESC_MACID(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+4, 0, 5) -#define GET_TX_DESC_AGG_ENABLE(__pdesc) \ +#define GET_TX_DESC_AGG_ENABLE(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+4, 5, 1) -#define GET_TX_DESC_AGG_BREAK(__pdesc) \ +#define GET_TX_DESC_AGG_BREAK(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+4, 6, 1) -#define GET_TX_DESC_RDG_ENABLE(__pdesc) \ +#define GET_TX_DESC_RDG_ENABLE(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+4, 7, 1) -#define GET_TX_DESC_QUEUE_SEL(__pdesc) \ +#define GET_TX_DESC_QUEUE_SEL(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+4, 8, 5) -#define GET_TX_DESC_RDG_NAV_EXT(__pdesc) \ +#define GET_TX_DESC_RDG_NAV_EXT(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+4, 13, 1) -#define GET_TX_DESC_LSIG_TXOP_EN(__pdesc) \ +#define GET_TX_DESC_LSIG_TXOP_EN(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+4, 14, 1) -#define GET_TX_DESC_PIFS(__pdesc) \ +#define GET_TX_DESC_PIFS(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+4, 15, 1) -#define GET_TX_DESC_RATE_ID(__pdesc) \ +#define GET_TX_DESC_RATE_ID(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+4, 16, 4) -#define GET_TX_DESC_NAV_USE_HDR(__pdesc) \ +#define GET_TX_DESC_NAV_USE_HDR(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+4, 20, 1) -#define GET_TX_DESC_EN_DESC_ID(__pdesc) \ +#define GET_TX_DESC_EN_DESC_ID(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+4, 21, 1) -#define GET_TX_DESC_SEC_TYPE(__pdesc) \ +#define GET_TX_DESC_SEC_TYPE(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+4, 22, 2) -#define GET_TX_DESC_PKT_OFFSET(__pdesc) \ +#define GET_TX_DESC_PKT_OFFSET(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+4, 24, 8) -#define SET_TX_DESC_RTS_RC(__pdesc, __val) \ +#define SET_TX_DESC_RTS_RC(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+8, 0, 6, __val) #define SET_TX_DESC_DATA_RC(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+8, 6, 6, __val) @@ -144,9 +140,9 @@ SET_BITS_TO_LE_4BYTE(__pdesc+8, 14, 2, __val) #define SET_TX_DESC_MORE_FRAG(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+8, 17, 1, __val) -#define SET_TX_DESC_RAW(__pdesc, __val) \ +#define SET_TX_DESC_RAW(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+8, 18, 1, __val) -#define SET_TX_DESC_CCX(__pdesc, __val) \ +#define SET_TX_DESC_CCX(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+8, 19, 1, __val) #define SET_TX_DESC_AMPDU_DENSITY(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+8, 20, 3, __val) @@ -161,62 +157,62 @@ #define SET_TX_DESC_TX_ANT_HT(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+8, 30, 2, __val) -#define GET_TX_DESC_RTS_RC(__pdesc) \ +#define GET_TX_DESC_RTS_RC(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+8, 0, 6) -#define GET_TX_DESC_DATA_RC(__pdesc) \ +#define GET_TX_DESC_DATA_RC(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+8, 6, 6) -#define GET_TX_DESC_BAR_RTY_TH(__pdesc) \ +#define GET_TX_DESC_BAR_RTY_TH(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+8, 14, 2) -#define GET_TX_DESC_MORE_FRAG(__pdesc) \ +#define GET_TX_DESC_MORE_FRAG(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+8, 17, 1) -#define GET_TX_DESC_RAW(__pdesc) \ +#define GET_TX_DESC_RAW(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+8, 18, 1) -#define GET_TX_DESC_CCX(__pdesc) \ +#define GET_TX_DESC_CCX(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+8, 19, 1) -#define GET_TX_DESC_AMPDU_DENSITY(__pdesc) \ +#define GET_TX_DESC_AMPDU_DENSITY(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+8, 20, 3) -#define GET_TX_DESC_ANTSEL_A(__pdesc) \ +#define GET_TX_DESC_ANTSEL_A(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+8, 24, 1) -#define GET_TX_DESC_ANTSEL_B(__pdesc) \ +#define GET_TX_DESC_ANTSEL_B(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+8, 25, 1) -#define GET_TX_DESC_TX_ANT_CCK(__pdesc) \ +#define GET_TX_DESC_TX_ANT_CCK(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+8, 26, 2) -#define GET_TX_DESC_TX_ANTL(__pdesc) \ +#define GET_TX_DESC_TX_ANTL(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+8, 28, 2) -#define GET_TX_DESC_TX_ANT_HT(__pdesc) \ +#define GET_TX_DESC_TX_ANT_HT(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+8, 30, 2) #define SET_TX_DESC_NEXT_HEAP_PAGE(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+12, 0, 8, __val) #define SET_TX_DESC_TAIL_PAGE(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+12, 8, 8, __val) -#define SET_TX_DESC_SEQ(__pdesc, __val) \ +#define SET_TX_DESC_SEQ(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+12, 16, 12, __val) -#define SET_TX_DESC_PKT_ID(__pdesc, __val) \ +#define SET_TX_DESC_PKT_ID(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+12, 28, 4, __val) #define GET_TX_DESC_NEXT_HEAP_PAGE(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+12, 0, 8) -#define GET_TX_DESC_TAIL_PAGE(__pdesc) \ +#define GET_TX_DESC_TAIL_PAGE(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+12, 8, 8) -#define GET_TX_DESC_SEQ(__pdesc) \ +#define GET_TX_DESC_SEQ(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+12, 16, 12) -#define GET_TX_DESC_PKT_ID(__pdesc) \ +#define GET_TX_DESC_PKT_ID(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+12, 28, 4) /* For RTL8723 */ #define SET_TX_DESC_TRIGGER_INT(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+12, 30, 1, __val) -#define SET_TX_DESC_HWSEQ_EN_8723(__pdesc, __val) \ +#define SET_TX_DESC_HWSEQ_EN_8723(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+12, 31, 1, __val) -#define SET_TX_DESC_HWSEQ_SEL_8723(__pTxDesc, __Value) \ - SET_BITS_TO_LE_4BYTE(__pTxDesc+16, 6, 2, __Value) +#define SET_TX_DESC_HWSEQ_SEL_8723(__txdesc, __value) \ + SET_BITS_TO_LE_4BYTE(__txdesc+16, 6, 2, __value) #define SET_TX_DESC_RTS_RATE(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+16, 0, 5, __val) #define SET_TX_DESC_AP_DCFE(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+16, 5, 1, __val) -#define SET_TX_DESC_QOS(__pdesc, __val) \ +#define SET_TX_DESC_QOS(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+16, 6, 1, __val) #define SET_TX_DESC_HWSEQ_EN(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+16, 7, 1, __val) @@ -248,54 +244,54 @@ SET_BITS_TO_LE_4BYTE(__pdesc+16, 25, 1, __val) #define SET_TX_DESC_RTS_SHORT(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+16, 26, 1, __val) -#define SET_TX_DESC_RTS_BW(__pdesc, __val) \ +#define SET_TX_DESC_RTS_BW(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+16, 27, 1, __val) -#define SET_TX_DESC_RTS_SC(__pdesc, __val) \ +#define SET_TX_DESC_RTS_SC(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+16, 28, 2, __val) #define SET_TX_DESC_RTS_STBC(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+16, 30, 2, __val) -#define GET_TX_DESC_RTS_RATE(__pdesc) \ +#define GET_TX_DESC_RTS_RATE(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+16, 0, 5) -#define GET_TX_DESC_AP_DCFE(__pdesc) \ +#define GET_TX_DESC_AP_DCFE(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+16, 5, 1) -#define GET_TX_DESC_QOS(__pdesc) \ +#define GET_TX_DESC_QOS(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+16, 6, 1) -#define GET_TX_DESC_HWSEQ_EN(__pdesc) \ +#define GET_TX_DESC_HWSEQ_EN(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+16, 7, 1) -#define GET_TX_DESC_USE_RATE(__pdesc) \ +#define GET_TX_DESC_USE_RATE(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+16, 8, 1) #define GET_TX_DESC_DISABLE_RTS_FB(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+16, 9, 1) -#define GET_TX_DESC_DISABLE_FB(__pdesc) \ +#define GET_TX_DESC_DISABLE_FB(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+16, 10, 1) -#define GET_TX_DESC_CTS2SELF(__pdesc) \ +#define GET_TX_DESC_CTS2SELF(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+16, 11, 1) -#define GET_TX_DESC_RTS_ENABLE(__pdesc) \ +#define GET_TX_DESC_RTS_ENABLE(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+16, 12, 1) -#define GET_TX_DESC_HW_RTS_ENABLE(__pdesc) \ +#define GET_TX_DESC_HW_RTS_ENABLE(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+16, 13, 1) -#define GET_TX_DESC_PORT_ID(__pdesc) \ +#define GET_TX_DESC_PORT_ID(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+16, 14, 1) -#define GET_TX_DESC_WAIT_DCTS(__pdesc) \ +#define GET_TX_DESC_WAIT_DCTS(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+16, 18, 1) -#define GET_TX_DESC_CTS2AP_EN(__pdesc) \ +#define GET_TX_DESC_CTS2AP_EN(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+16, 19, 1) #define GET_TX_DESC_TX_SUB_CARRIER(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+16, 20, 2) -#define GET_TX_DESC_TX_STBC(__pdesc) \ +#define GET_TX_DESC_TX_STBC(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+16, 22, 2) -#define GET_TX_DESC_DATA_SHORT(__pdesc) \ +#define GET_TX_DESC_DATA_SHORT(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+16, 24, 1) -#define GET_TX_DESC_DATA_BW(__pdesc) \ +#define GET_TX_DESC_DATA_BW(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+16, 25, 1) -#define GET_TX_DESC_RTS_SHORT(__pdesc) \ +#define GET_TX_DESC_RTS_SHORT(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+16, 26, 1) -#define GET_TX_DESC_RTS_BW(__pdesc) \ +#define GET_TX_DESC_RTS_BW(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+16, 27, 1) -#define GET_TX_DESC_RTS_SC(__pdesc) \ +#define GET_TX_DESC_RTS_SC(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+16, 28, 2) -#define GET_TX_DESC_RTS_STBC(__pdesc) \ +#define GET_TX_DESC_RTS_STBC(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+16, 30, 2) #define SET_TX_DESC_TX_RATE(__pdesc, __val) \ @@ -315,17 +311,17 @@ #define SET_TX_DESC_USB_TXAGG_NUM(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+20, 24, 8, __val) -#define GET_TX_DESC_TX_RATE(__pdesc) \ +#define GET_TX_DESC_TX_RATE(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+20, 0, 6) -#define GET_TX_DESC_DATA_SHORTGI(__pdesc) \ +#define GET_TX_DESC_DATA_SHORTGI(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+20, 6, 1) -#define GET_TX_DESC_CCX_TAG(__pdesc) \ +#define GET_TX_DESC_CCX_TAG(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+20, 7, 1) -#define GET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc) \ +#define GET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+20, 8, 5) #define GET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+20, 13, 4) -#define GET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc) \ +#define GET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+20, 17, 1) #define GET_TX_DESC_DATA_RETRY_LIMIT(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+20, 18, 6) @@ -336,9 +332,9 @@ SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 5, __val) #define SET_TX_DESC_TXAGC_B(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+24, 5, 5, __val) -#define SET_TX_DESC_USE_MAX_LEN(__pdesc, __val) \ +#define SET_TX_DESC_USE_MAX_LEN(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+24, 10, 1, __val) -#define SET_TX_DESC_MAX_AGG_NUM(__pdesc, __val) \ +#define SET_TX_DESC_MAX_AGG_NUM(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+24, 11, 5, __val) #define SET_TX_DESC_MCSG1_MAX_LEN(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+24, 16, 4, __val) @@ -349,19 +345,19 @@ #define SET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc, __val)\ SET_BITS_TO_LE_4BYTE(__pdesc+24, 28, 4, __val) -#define GET_TX_DESC_TXAGC_A(__pdesc) \ +#define GET_TX_DESC_TXAGC_A(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+24, 0, 5) -#define GET_TX_DESC_TXAGC_B(__pdesc) \ +#define GET_TX_DESC_TXAGC_B(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+24, 5, 5) -#define GET_TX_DESC_USE_MAX_LEN(__pdesc) \ +#define GET_TX_DESC_USE_MAX_LEN(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+24, 10, 1) -#define GET_TX_DESC_MAX_AGG_NUM(__pdesc) \ +#define GET_TX_DESC_MAX_AGG_NUM(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+24, 11, 5) -#define GET_TX_DESC_MCSG1_MAX_LEN(__pdesc) \ +#define GET_TX_DESC_MCSG1_MAX_LEN(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+24, 16, 4) -#define GET_TX_DESC_MCSG2_MAX_LEN(__pdesc) \ +#define GET_TX_DESC_MCSG2_MAX_LEN(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+24, 20, 4) -#define GET_TX_DESC_MCSG3_MAX_LEN(__pdesc) \ +#define GET_TX_DESC_MCSG3_MAX_LEN(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+24, 24, 4) #define GET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+24, 28, 4) @@ -379,11 +375,11 @@ #define GET_TX_DESC_TX_BUFFER_SIZE(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+28, 0, 16) -#define GET_TX_DESC_MCSG4_MAX_LEN(__pdesc) \ +#define GET_TX_DESC_MCSG4_MAX_LEN(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+28, 16, 4) -#define GET_TX_DESC_MCSG5_MAX_LEN(__pdesc) \ +#define GET_TX_DESC_MCSG5_MAX_LEN(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+28, 20, 4) -#define GET_TX_DESC_MCSG6_MAX_LEN(__pdesc) \ +#define GET_TX_DESC_MCSG6_MAX_LEN(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+28, 24, 4) #define GET_TX_DESC_MCS15_SGI_MAX_LEN(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+28, 28, 4) @@ -395,7 +391,7 @@ #define GET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+32, 0, 32) -#define GET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc) \ +#define GET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+36, 0, 32) #define SET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc, __val) \ @@ -410,97 +406,97 @@ #define GET_RX_DESC_PKT_LEN(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc, 0, 14) -#define GET_RX_DESC_CRC32(__pdesc) \ +#define GET_RX_DESC_CRC32(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc, 14, 1) -#define GET_RX_DESC_ICV(__pdesc) \ +#define GET_RX_DESC_ICV(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc, 15, 1) #define GET_RX_DESC_DRV_INFO_SIZE(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc, 16, 4) #define GET_RX_DESC_SECURITY(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc, 20, 3) -#define GET_RX_DESC_QOS(__pdesc) \ +#define GET_RX_DESC_QOS(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc, 23, 1) -#define GET_RX_DESC_SHIFT(__pdesc) \ +#define GET_RX_DESC_SHIFT(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc, 24, 2) -#define GET_RX_DESC_PHYST(__pdesc) \ +#define GET_RX_DESC_PHYST(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc, 26, 1) -#define GET_RX_DESC_SWDEC(__pdesc) \ +#define GET_RX_DESC_SWDEC(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc, 27, 1) -#define GET_RX_DESC_LS(__pdesc) \ +#define GET_RX_DESC_LS(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc, 28, 1) -#define GET_RX_DESC_FS(__pdesc) \ +#define GET_RX_DESC_FS(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc, 29, 1) -#define GET_RX_DESC_EOR(__pdesc) \ +#define GET_RX_DESC_EOR(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc, 30, 1) -#define GET_RX_DESC_OWN(__pdesc) \ +#define GET_RX_DESC_OWN(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc, 31, 1) -#define SET_RX_DESC_PKT_LEN(__pdesc, __val) \ +#define SET_RX_DESC_PKT_LEN(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc, 0, 14, __val) #define SET_RX_DESC_EOR(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val) #define SET_RX_DESC_OWN(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val) -#define GET_RX_DESC_MACID(__pdesc) \ +#define GET_RX_DESC_MACID(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+4, 0, 5) -#define GET_RX_DESC_TID(__pdesc) \ +#define GET_RX_DESC_TID(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+4, 5, 4) #define GET_RX_DESC_HWRSVD(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+4, 9, 5) -#define GET_RX_DESC_PAGGR(__pdesc) \ +#define GET_RX_DESC_PAGGR(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+4, 14, 1) -#define GET_RX_DESC_FAGGR(__pdesc) \ +#define GET_RX_DESC_FAGGR(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+4, 15, 1) #define GET_RX_DESC_A1_FIT(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+4, 16, 4) #define GET_RX_DESC_A2_FIT(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+4, 20, 4) -#define GET_RX_DESC_PAM(__pdesc) \ +#define GET_RX_DESC_PAM(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+4, 24, 1) -#define GET_RX_DESC_PWR(__pdesc) \ +#define GET_RX_DESC_PWR(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+4, 25, 1) -#define GET_RX_DESC_MD(__pdesc) \ +#define GET_RX_DESC_MD(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+4, 26, 1) -#define GET_RX_DESC_MF(__pdesc) \ +#define GET_RX_DESC_MF(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+4, 27, 1) -#define GET_RX_DESC_TYPE(__pdesc) \ +#define GET_RX_DESC_TYPE(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+4, 28, 2) -#define GET_RX_DESC_MC(__pdesc) \ +#define GET_RX_DESC_MC(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+4, 30, 1) -#define GET_RX_DESC_BC(__pdesc) \ +#define GET_RX_DESC_BC(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+4, 31, 1) -#define GET_RX_DESC_SEQ(__pdesc) \ +#define GET_RX_DESC_SEQ(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+8, 0, 12) -#define GET_RX_DESC_FRAG(__pdesc) \ +#define GET_RX_DESC_FRAG(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+8, 12, 4) #define GET_RX_DESC_NEXT_PKT_LEN(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+8, 16, 14) #define GET_RX_DESC_NEXT_IND(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+8, 30, 1) -#define GET_RX_DESC_RSVD(__pdesc) \ +#define GET_RX_DESC_RSVD(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+8, 31, 1) -#define GET_RX_DESC_RXMCS(__pdesc) \ +#define GET_RX_DESC_RXMCS(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+12, 0, 6) -#define GET_RX_DESC_RXHT(__pdesc) \ +#define GET_RX_DESC_RXHT(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+12, 6, 1) -#define GET_RX_DESC_SPLCP(__pdesc) \ +#define GET_RX_DESC_SPLCP(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+12, 8, 1) -#define GET_RX_DESC_BW(__pdesc) \ +#define GET_RX_DESC_BW(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+12, 9, 1) -#define GET_RX_DESC_HTC(__pdesc) \ +#define GET_RX_DESC_HTC(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+12, 10, 1) #define GET_RX_DESC_HWPC_ERR(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+12, 14, 1) #define GET_RX_DESC_HWPC_IND(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+12, 15, 1) -#define GET_RX_DESC_IV0(__pdesc) \ +#define GET_RX_DESC_IV0(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+12, 16, 16) -#define GET_RX_DESC_IV1(__pdesc) \ +#define GET_RX_DESC_IV1(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+16, 0, 32) -#define GET_RX_DESC_TSFL(__pdesc) \ +#define GET_RX_DESC_TSFL(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+20, 0, 32) #define GET_RX_DESC_BUFF_ADDR(__pdesc) \ @@ -508,17 +504,17 @@ #define GET_RX_DESC_BUFF_ADDR64(__pdesc) \ LE_BITS_TO_4BYTE(__pdesc+28, 0, 32) -#define SET_RX_DESC_BUFF_ADDR(__pdesc, __val) \ +#define SET_RX_DESC_BUFF_ADDR(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 32, __val) -#define SET_RX_DESC_BUFF_ADDR64(__pdesc, __val) \ +#define SET_RX_DESC_BUFF_ADDR64(__pdesc, __val) \ SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 32, __val) -#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \ -do { \ - if (_size > TX_DESC_NEXT_DESC_OFFSET) \ +#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \ +do { \ + if (_size > TX_DESC_NEXT_DESC_OFFSET) \ memset(__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET); \ - else \ - memset(__pdesc, 0, _size); \ + else \ + memset(__pdesc, 0, _size); \ } while (0) struct rx_fwinfo_8723e { @@ -699,22 +695,27 @@ struct rx_desc_8723e { } __packed; -void rtl8723ae_tx_fill_desc(struct ieee80211_hw *hw, - struct ieee80211_hdr *hdr, u8 *pdesc, - u8 *pbd_desc_tx, struct ieee80211_tx_info *info, - struct ieee80211_sta *sta, - struct sk_buff *skb, u8 hw_queue, - struct rtl_tcb_desc *ptcb_desc); -bool rtl8723ae_rx_query_desc(struct ieee80211_hw *hw, - struct rtl_stats *status, - struct ieee80211_rx_status *rx_status, - u8 *pdesc, struct sk_buff *skb); -void rtl8723ae_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, - u8 desc_name, u8 *val); -u32 rtl8723ae_get_desc(u8 *pdesc, bool istx, u8 desc_name); -void rtl8723ae_tx_polling(struct ieee80211_hw *hw, u8 hw_queue); -void rtl8723ae_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, - bool b_firstseg, bool b_lastseg, +void rtl8723e_tx_fill_desc(struct ieee80211_hw *hw, + struct ieee80211_hdr *hdr, + u8 *pdesc, u8 *txbd, + struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, + struct sk_buff *skb, u8 hw_queue, + struct rtl_tcb_desc *ptcb_desc); +bool rtl8723e_rx_query_desc(struct ieee80211_hw *hw, + struct rtl_stats *status, + struct ieee80211_rx_status *rx_status, + u8 *pdesc, struct sk_buff *skb); +void rtl8723e_set_desc(struct ieee80211_hw *hw, + u8 *pdesc, bool istx, u8 desc_name, u8 *val); +u32 rtl8723e_get_desc(u8 *pdesc, bool istx, u8 desc_name); +bool rtl8723e_is_tx_desc_closed(struct ieee80211_hw *hw, + u8 hw_queue, u16 index); +void rtl8723e_tx_polling(struct ieee80211_hw *hw, u8 hw_queue); +void rtl8723e_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, + bool firstseg, bool lastseg, + struct sk_buff *skb); +u32 rtl8723e_rx_command_packet(struct ieee80211_hw *hw, + struct rtl_stats status, struct sk_buff *skb); - #endif diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/def.h b/drivers/net/wireless/rtlwifi/rtl8723be/def.h index 3c30b74e983d128933abd8ade986e82f6a9844d5..025ea5c0f3f6cd2947ce04e74e7b135d600538a7 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/def.h +++ b/drivers/net/wireless/rtlwifi/rtl8723be/def.h @@ -26,158 +26,24 @@ #ifndef __RTL8723BE_DEF_H__ #define __RTL8723BE_DEF_H__ -#define HAL_RETRY_LIMIT_INFRA 48 -#define HAL_RETRY_LIMIT_AP_ADHOC 7 - -#define RESET_DELAY_8185 20 - -#define RT_IBSS_INT_MASKS (IMR_BCNINT | IMR_TBDOK | IMR_TBDER) -#define RT_AC_INT_MASKS (IMR_VIDOK | IMR_VODOK | IMR_BEDOK|IMR_BKDOK) - -#define NUM_OF_FIRMWARE_QUEUE 10 -#define NUM_OF_PAGES_IN_FW 0x100 -#define NUM_OF_PAGE_IN_FW_QUEUE_BK 0x07 -#define NUM_OF_PAGE_IN_FW_QUEUE_BE 0x07 -#define NUM_OF_PAGE_IN_FW_QUEUE_VI 0x07 -#define NUM_OF_PAGE_IN_FW_QUEUE_VO 0x07 -#define NUM_OF_PAGE_IN_FW_QUEUE_HCCA 0x0 -#define NUM_OF_PAGE_IN_FW_QUEUE_CMD 0x0 -#define NUM_OF_PAGE_IN_FW_QUEUE_MGNT 0x02 -#define NUM_OF_PAGE_IN_FW_QUEUE_HIGH 0x02 -#define NUM_OF_PAGE_IN_FW_QUEUE_BCN 0x2 -#define NUM_OF_PAGE_IN_FW_QUEUE_PUB 0xA1 - -#define NUM_OF_PAGE_IN_FW_QUEUE_BK_DTM 0x026 -#define NUM_OF_PAGE_IN_FW_QUEUE_BE_DTM 0x048 -#define NUM_OF_PAGE_IN_FW_QUEUE_VI_DTM 0x048 -#define NUM_OF_PAGE_IN_FW_QUEUE_VO_DTM 0x026 -#define NUM_OF_PAGE_IN_FW_QUEUE_PUB_DTM 0x00 - -#define MAX_LINES_HWCONFIG_TXT 1000 -#define MAX_BYTES_LINE_HWCONFIG_TXT 256 - -#define SW_THREE_WIRE 0 -#define HW_THREE_WIRE 2 - -#define BT_DEMO_BOARD 0 -#define BT_QA_BOARD 1 -#define BT_FPGA 2 - #define HAL_PRIME_CHNL_OFFSET_DONT_CARE 0 #define HAL_PRIME_CHNL_OFFSET_LOWER 1 #define HAL_PRIME_CHNL_OFFSET_UPPER 2 -#define MAX_H2C_QUEUE_NUM 10 #define RX_MPDU_QUEUE 0 -#define RX_CMD_QUEUE 1 -#define RX_MAX_QUEUE 2 -#define AC2QUEUEID(_AC) (_AC) - -#define C2H_RX_CMD_HDR_LEN 8 -#define GET_C2H_CMD_CMD_LEN(__prxhdr) \ - LE_BITS_TO_4BYTE((__prxhdr), 0, 16) -#define GET_C2H_CMD_ELEMENT_ID(__prxhdr) \ - LE_BITS_TO_4BYTE((__prxhdr), 16, 8) -#define GET_C2H_CMD_CMD_SEQ(__prxhdr) \ - LE_BITS_TO_4BYTE((__prxhdr), 24, 7) -#define GET_C2H_CMD_CONTINUE(__prxhdr) \ - LE_BITS_TO_4BYTE((__prxhdr), 31, 1) -#define GET_C2H_CMD_CONTENT(__prxhdr) \ - ((u8 *)(__prxhdr) + C2H_RX_CMD_HDR_LEN) - -#define GET_C2H_CMD_FEEDBACK_ELEMENT_ID(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE((__pcmdfbhdr), 0, 8) -#define GET_C2H_CMD_FEEDBACK_CCX_LEN(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE((__pcmdfbhdr), 8, 8) -#define GET_C2H_CMD_FEEDBACK_CCX_CMD_CNT(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE((__pcmdfbhdr), 16, 16) -#define GET_C2H_CMD_FEEDBACK_CCX_MAC_ID(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 0, 5) -#define GET_C2H_CMD_FEEDBACK_CCX_VALID(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 7, 1) -#define GET_C2H_CMD_FEEDBACK_CCX_RETRY_CNT(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 8, 5) -#define GET_C2H_CMD_FEEDBACK_CCX_TOK(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 15, 1) -#define GET_C2H_CMD_FEEDBACK_CCX_QSEL(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 16, 4) -#define GET_C2H_CMD_FEEDBACK_CCX_SEQ(__pcmdfbhdr) \ - LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 20, 12) - -#define CHIP_BONDING_IDENTIFIER(_value) (((_value)>>22)&0x3) -#define CHIP_BONDING_92C_1T2R 0x1 - -#define CHIP_8723 BIT(0) -#define CHIP_8723B (BIT(1) | BIT(2)) -#define NORMAL_CHIP BIT(3) -#define RF_TYPE_1T1R (~(BIT(4) | BIT(5) | BIT(6))) -#define RF_TYPE_1T2R BIT(4) -#define RF_TYPE_2T2R BIT(5) -#define CHIP_VENDOR_UMC BIT(7) -#define B_CUT_VERSION BIT(12) -#define C_CUT_VERSION BIT(13) -#define D_CUT_VERSION ((BIT(12) | BIT(13))) -#define E_CUT_VERSION BIT(14) -#define RF_RL_ID (BIT(31) | BIT(30) | BIT(29) | BIT(28)) - -/* MASK */ -#define IC_TYPE_MASK (BIT(0) | BIT(1) | BIT(2)) -#define CHIP_TYPE_MASK BIT(3) -#define RF_TYPE_MASK (BIT(4) | BIT(5) | BIT(6)) -#define MANUFACTUER_MASK BIT(7) -#define ROM_VERSION_MASK (BIT(11) | BIT(10) | BIT(9) | BIT(8)) -#define CUT_VERSION_MASK (BIT(15) | BIT(14) | BIT(13) | BIT(12)) - -/* Get element */ -#define GET_CVID_IC_TYPE(version) ((version) & IC_TYPE_MASK) -#define GET_CVID_CHIP_TYPE(version) ((version) & CHIP_TYPE_MASK) -#define GET_CVID_RF_TYPE(version) ((version) & RF_TYPE_MASK) -#define GET_CVID_MANUFACTUER(version) ((version) & MANUFACTUER_MASK) -#define GET_CVID_ROM_VERSION(version) ((version) & ROM_VERSION_MASK) -#define GET_CVID_CUT_VERSION(version) ((version) & CUT_VERSION_MASK) - -#define IS_92C_SERIAL(version) ((IS_81XXC(version) && IS_2T2R(version)) ?\ - true : false) -#define IS_81XXC(version) ((GET_CVID_IC_TYPE(version) == 0) ?\ - true : false) -#define IS_8723_SERIES(version) ((GET_CVID_IC_TYPE(version) == CHIP_8723) ?\ - true : false) -#define IS_1T1R(version) ((GET_CVID_RF_TYPE(version)) ? false : true) -#define IS_1T2R(version) ((GET_CVID_RF_TYPE(version) == RF_TYPE_1T2R)\ - ? true : false) -#define IS_2T2R(version) ((GET_CVID_RF_TYPE(version) == RF_TYPE_2T2R)\ - ? true : false) -enum rf_optype { - RF_OP_BY_SW_3WIRE = 0, - RF_OP_BY_FW, - RF_OP_MAX -}; - -enum rf_power_state { - RF_ON, - RF_OFF, - RF_SLEEP, - RF_SHUT_DOWN, -}; - -enum power_save_mode { - POWER_SAVE_MODE_ACTIVE, - POWER_SAVE_MODE_SAVE, -}; +#define CHIP_8723B (BIT(1) | BIT(2)) +#define NORMAL_CHIP BIT(3) +#define CHIP_VENDOR_SMIC BIT(8) +/* Currently only for RTL8723B */ +#define EXT_VENDOR_ID (BIT(18) | BIT(19)) -enum power_polocy_config { - POWERCFG_MAX_POWER_SAVINGS, - POWERCFG_GLOBAL_POWER_SAVINGS, - POWERCFG_LOCAL_POWER_SAVINGS, - POWERCFG_LENOVO, -}; - -enum interface_select_pci { - INTF_SEL1_MINICARD = 0, - INTF_SEL0_PCIE = 1, - INTF_SEL2_RSV = 2, - INTF_SEL3_RSV = 3, +enum rx_packet_type { + NORMAL_RX, + TX_REPORT1, + TX_REPORT2, + HIS_REPORT, + C2H_PACKET, }; enum rtl_desc_qsel { @@ -222,27 +88,5 @@ enum rtl_desc8723e_rate { DESC92C_RATEMCS13 = 0x19, DESC92C_RATEMCS14 = 0x1a, DESC92C_RATEMCS15 = 0x1b, - DESC92C_RATEMCS15_SG = 0x1c, - DESC92C_RATEMCS32 = 0x20, }; - -enum rx_packet_type { - NORMAL_RX, - TX_REPORT1, - TX_REPORT2, - HIS_REPORT, -}; - -struct phy_sts_cck_8723e_t { - u8 adc_pwdb_X[4]; - u8 sq_rpt; - u8 cck_agc_rpt; -}; - -struct h2c_cmd_8723e { - u8 element_id; - u32 cmd_len; - u8 *p_cmdbuffer; -}; - #endif diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/dm.c b/drivers/net/wireless/rtlwifi/rtl8723be/dm.c index 13d53a1df78988bbe931c49eeb8d07b770b9c1e8..dd7eb4371f498910476b53635b98a9847c9ac1f6 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/dm.c +++ b/drivers/net/wireless/rtlwifi/rtl8723be/dm.c @@ -32,7 +32,6 @@ #include "dm.h" #include "../rtl8723com/dm_common.h" #include "fw.h" -#include "../rtl8723com/fw_common.h" #include "trx.h" #include "../btcoexist/rtl_btc.h" @@ -209,7 +208,7 @@ void rtl8723be_dm_txpower_track_adjust(struct ieee80211_hw *hw, u8 type, pwr_val = TXPWRTRACK_MAX_IDX; *poutwrite_val = pwr_val | (pwr_val << 8) | - (pwr_val << 16) | (pwr_val << 24); + (pwr_val << 16) | (pwr_val << 24); } static void rtl8723be_dm_diginit(struct ieee80211_hw *hw) @@ -218,8 +217,7 @@ static void rtl8723be_dm_diginit(struct ieee80211_hw *hw) struct dig_t *dm_digtable = &rtlpriv->dm_digtable; dm_digtable->dig_enable_flag = true; - dm_digtable->cur_igvalue = rtl_get_bbreg(hw, - ROFDM0_XAAGCCORE1, 0x7f); + dm_digtable->cur_igvalue = rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f); dm_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW; dm_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH; dm_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW; @@ -234,8 +232,8 @@ static void rtl8723be_dm_diginit(struct ieee80211_hw *hw) dm_digtable->forbidden_igi = DM_DIG_MIN; dm_digtable->large_fa_hit = 0; dm_digtable->recover_cnt = 0; - dm_digtable->dig_min_0 = DM_DIG_MIN; - dm_digtable->dig_min_1 = DM_DIG_MIN; + dm_digtable->dig_dynamic_min = DM_DIG_MIN; + dm_digtable->dig_dynamic_min_1 = DM_DIG_MIN; dm_digtable->media_connect_0 = false; dm_digtable->media_connect_1 = false; rtlpriv->dm.dm_initialgain_enable = true; @@ -245,18 +243,18 @@ static void rtl8723be_dm_diginit(struct ieee80211_hw *hw) void rtl8723be_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rate_adaptive *ra = &(rtlpriv->ra); + struct rate_adaptive *p_ra = &rtlpriv->ra; - ra->ratr_state = DM_RATR_STA_INIT; - ra->pre_ratr_state = DM_RATR_STA_INIT; + p_ra->ratr_state = DM_RATR_STA_INIT; + p_ra->pre_ratr_state = DM_RATR_STA_INIT; if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER) rtlpriv->dm.useramask = true; else rtlpriv->dm.useramask = false; - ra->high_rssi_thresh_for_ra = 50; - ra->low_rssi_thresh_for_ra40m = 20; + p_ra->high_rssi_thresh_for_ra = 50; + p_ra->low_rssi_thresh_for_ra40m = 20; } static void rtl8723be_dm_init_txpower_tracking(struct ieee80211_hw *hw) @@ -279,7 +277,7 @@ static void rtl8723be_dm_init_txpower_tracking(struct ieee80211_hw *hw) RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, " rtlpriv->dm.txpower_tracking = %d\n", - rtlpriv->dm.txpower_tracking); + rtlpriv->dm.txpower_tracking); } static void rtl8723be_dm_init_dynamic_atc_switch(struct ieee80211_hw *hw) @@ -287,6 +285,7 @@ static void rtl8723be_dm_init_dynamic_atc_switch(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); rtlpriv->dm.crystal_cap = rtlpriv->efuse.crystalcap; + rtlpriv->dm.atc_status = rtl_get_bbreg(hw, ROFDM1_CFOTRACKING, 0x800); rtlpriv->dm.cfo_threshold = CFO_THRESHOLD_XTAL; } @@ -308,7 +307,7 @@ void rtl8723be_dm_init(struct ieee80211_hw *hw) static void rtl8723be_dm_find_minimum_rssi(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct dig_t *rtl_dm_dig = &(rtlpriv->dm_digtable); + struct dig_t *rtl_dm_dig = &rtlpriv->dm_digtable; struct rtl_mac *mac = rtl_mac(rtlpriv); /* Determine the minimum RSSI */ @@ -325,20 +324,20 @@ static void rtl8723be_dm_find_minimum_rssi(struct ieee80211_hw *hw) rtlpriv->dm.entry_min_undec_sm_pwdb; RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, "AP Client PWDB = 0x%lx\n", - rtlpriv->dm.entry_min_undec_sm_pwdb); + rtlpriv->dm.entry_min_undec_sm_pwdb); } else { rtl_dm_dig->min_undec_pwdb_for_dm = rtlpriv->dm.undec_sm_pwdb; RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, "STA Default Port PWDB = 0x%x\n", - rtl_dm_dig->min_undec_pwdb_for_dm); + rtl_dm_dig->min_undec_pwdb_for_dm); } } else { rtl_dm_dig->min_undec_pwdb_for_dm = rtlpriv->dm.entry_min_undec_sm_pwdb; RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, "AP Ext Port or disconnet PWDB = 0x%x\n", - rtl_dm_dig->min_undec_pwdb_for_dm); + rtl_dm_dig->min_undec_pwdb_for_dm); } RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "MinUndecoratedPWDBForDM =%d\n", rtl_dm_dig->min_undec_pwdb_for_dm); @@ -347,6 +346,7 @@ static void rtl8723be_dm_find_minimum_rssi(struct ieee80211_hw *hw) static void rtl8723be_dm_check_rssi_monitor(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *dm_digtable = &rtlpriv->dm_digtable; struct rtl_sta_info *drv_priv; u8 h2c_parameter[3] = { 0 }; long tmp_entry_max_pwdb = 0, tmp_entry_min_pwdb = 0xff; @@ -367,69 +367,78 @@ static void rtl8723be_dm_check_rssi_monitor(struct ieee80211_hw *hw) /* If associated entry is found */ if (tmp_entry_max_pwdb != 0) { - rtlpriv->dm.entry_max_undec_sm_pwdb = tmp_entry_max_pwdb; - RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - "EntryMaxPWDB = 0x%lx(%ld)\n", + rtlpriv->dm.entry_max_undec_sm_pwdb = + tmp_entry_max_pwdb; + RTPRINT(rtlpriv, FDM, DM_PWDB, + "EntryMaxPWDB = 0x%lx(%ld)\n", tmp_entry_max_pwdb, tmp_entry_max_pwdb); } else { rtlpriv->dm.entry_max_undec_sm_pwdb = 0; } /* If associated entry is found */ if (tmp_entry_min_pwdb != 0xff) { - rtlpriv->dm.entry_min_undec_sm_pwdb = tmp_entry_min_pwdb; - RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - "EntryMinPWDB = 0x%lx(%ld)\n", + rtlpriv->dm.entry_min_undec_sm_pwdb = + tmp_entry_min_pwdb; + RTPRINT(rtlpriv, FDM, DM_PWDB, + "EntryMinPWDB = 0x%lx(%ld)\n", tmp_entry_min_pwdb, tmp_entry_min_pwdb); } else { rtlpriv->dm.entry_min_undec_sm_pwdb = 0; } /* Indicate Rx signal strength to FW. */ if (rtlpriv->dm.useramask) { - h2c_parameter[2] = (u8) (rtlpriv->dm.undec_sm_pwdb & 0xFF); + h2c_parameter[2] = + (u8)(rtlpriv->dm.undec_sm_pwdb & 0xFF); h2c_parameter[1] = 0x20; h2c_parameter[0] = 0; - rtl8723be_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, h2c_parameter); + rtl8723be_fill_h2c_cmd(hw, H2C_RSSIBE_REPORT, 3, h2c_parameter); } else { - rtl_write_byte(rtlpriv, 0x4fe, rtlpriv->dm.undec_sm_pwdb); + rtl_write_byte(rtlpriv, 0x4fe, + rtlpriv->dm.undec_sm_pwdb); } rtl8723be_dm_find_minimum_rssi(hw); - rtlpriv->dm_digtable.rssi_val_min = - rtlpriv->dm_digtable.min_undec_pwdb_for_dm; + dm_digtable->rssi_val_min = + rtlpriv->dm_digtable.min_undec_pwdb_for_dm; } void rtl8723be_dm_write_dig(struct ieee80211_hw *hw, u8 current_igi) { struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *dm_digtable = &rtlpriv->dm_digtable; - if (rtlpriv->dm_digtable.cur_igvalue != current_igi) { + if (dm_digtable->stop_dig) + return; + + if (dm_digtable->cur_igvalue != current_igi) { rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f, current_igi); if (rtlpriv->phy.rf_type != RF_1T1R) - rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f, current_igi); + rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, + 0x7f, current_igi); } - rtlpriv->dm_digtable.pre_igvalue = rtlpriv->dm_digtable.cur_igvalue; - rtlpriv->dm_digtable.cur_igvalue = current_igi; + dm_digtable->pre_igvalue = dm_digtable->cur_igvalue; + dm_digtable->cur_igvalue = current_igi; } static void rtl8723be_dm_dig(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *dm_digtable = &rtlpriv->dm_digtable; struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - struct dig_t *dm_digtable = &(rtlpriv->dm_digtable); u8 dig_dynamic_min, dig_maxofmin; - bool firstconnect, firstdisconnect; + bool bfirstconnect, bfirstdisconnect; u8 dm_dig_max, dm_dig_min; u8 current_igi = dm_digtable->cur_igvalue; u8 offset; - /* AP, BT */ + /* AP,BT */ if (mac->act_scanning) return; - dig_dynamic_min = dm_digtable->dig_min_0; - firstconnect = (mac->link_state >= MAC80211_LINKED) && + dig_dynamic_min = dm_digtable->dig_dynamic_min; + bfirstconnect = (mac->link_state >= MAC80211_LINKED) && !dm_digtable->media_connect_0; - firstdisconnect = (mac->link_state < MAC80211_LINKED) && - dm_digtable->media_connect_0; + bfirstdisconnect = (mac->link_state < MAC80211_LINKED) && + (dm_digtable->media_connect_0); dm_dig_max = 0x5a; dm_dig_min = DM_DIG_MIN; @@ -457,6 +466,7 @@ static void rtl8723be_dm_dig(struct ieee80211_hw *hw) } else { dig_dynamic_min = dm_dig_min; } + } else { dm_digtable->rx_gain_max = dm_dig_max; dig_dynamic_min = dm_dig_min; @@ -506,7 +516,7 @@ static void rtl8723be_dm_dig(struct ieee80211_hw *hw) dm_digtable->rx_gain_min = dm_digtable->rx_gain_max; if (mac->link_state >= MAC80211_LINKED) { - if (firstconnect) { + if (bfirstconnect) { if (dm_digtable->rssi_val_min <= dig_maxofmin) current_igi = dm_digtable->rssi_val_min; else @@ -522,7 +532,7 @@ static void rtl8723be_dm_dig(struct ieee80211_hw *hw) current_igi -= 2; } } else { - if (firstdisconnect) { + if (bfirstdisconnect) { current_igi = dm_digtable->rx_gain_min; } else { if (rtlpriv->falsealm_cnt.cnt_all > 10000) @@ -542,14 +552,15 @@ static void rtl8723be_dm_dig(struct ieee80211_hw *hw) rtl8723be_dm_write_dig(hw, current_igi); dm_digtable->media_connect_0 = ((mac->link_state >= MAC80211_LINKED) ? true : false); - dm_digtable->dig_min_0 = dig_dynamic_min; + dm_digtable->dig_dynamic_min = dig_dynamic_min; } -static void rtl8723be_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw) +static void rtl8723be_dm_false_alarm_counter_statistics( + struct ieee80211_hw *hw) { u32 ret_value; struct rtl_priv *rtlpriv = rtl_priv(hw); - struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt); + struct false_alarm_statistics *falsealm_cnt = &rtlpriv->falsealm_cnt; rtl_set_bbreg(hw, DM_REG_OFDM_FA_HOLDC_11N, BIT(31), 1); rtl_set_bbreg(hw, DM_REG_OFDM_FA_RSTD_11N, BIT(31), 1); @@ -615,16 +626,14 @@ static void rtl8723be_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw) rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(15) | BIT(14), 2); RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, - "cnt_parity_fail = %d, cnt_rate_illegal = %d, " - "cnt_crc8_fail = %d, cnt_mcs_fail = %d\n", + "cnt_parity_fail = %d, cnt_rate_illegal = %d, cnt_crc8_fail = %d, cnt_mcs_fail = %d\n", falsealm_cnt->cnt_parity_fail, falsealm_cnt->cnt_rate_illegal, falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail); RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, - "cnt_ofdm_fail = %x, cnt_cck_fail = %x," - " cnt_all = %x\n", + "cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n", falsealm_cnt->cnt_ofdm_fail, falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all); @@ -690,7 +699,7 @@ static void rtl8723be_dm_tx_power_track_set_power(struct ieee80211_hw *hw, u8 rfpath, u8 idx) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); u8 swing_idx_ofdm_limit = 36; @@ -762,7 +771,8 @@ static void rtl8723be_dm_tx_power_track_set_power(struct ieee80211_hw *hw, } } -static void txpwr_track_cb_therm(struct ieee80211_hw *hw) +static void rtl8723be_dm_txpower_tracking_callback_thermalmeter( + struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); @@ -773,30 +783,29 @@ static void txpwr_track_cb_therm(struct ieee80211_hw *hw) int i = 0; u8 ofdm_min_index = 6; - u8 index = 0; + u8 index_for_channel = 0; - char delta_swing_table_idx_tup_a[] = { + char delta_swing_table_idx_tup_a[TXSCALE_TABLE_SIZE] = { 0, 0, 1, 2, 2, 2, 3, 3, 3, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 9, 10, 10, 11, 11, 12, 12, 13, 14, 15}; - char delta_swing_table_idx_tdown_a[] = { + char delta_swing_table_idx_tdown_a[TXSCALE_TABLE_SIZE] = { 0, 0, 1, 2, 2, 2, 3, 3, 3, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 8, 8, 9, 9, 10, 10, 11, 12, 13, 14, 15}; - /*Initilization ( 7 steps in total)*/ + /*Initilization ( 7 steps in total )*/ rtlpriv->dm.txpower_trackinginit = true; RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - "rtl8723be_dm_txpower_tracking" - "_callback_thermalmeter\n"); + "rtl8723be_dm_txpower_tracking_callback_thermalmeter\n"); - thermalvalue = (u8)rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0xfc00); + thermalvalue = (u8)rtl_get_rfreg(hw, + RF90_PATH_A, RF_T_METER, 0xfc00); if (!rtlpriv->dm.txpower_track_control || thermalvalue == 0 || rtlefuse->eeprom_thermalmeter == 0xFF) return; RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - "Readback Thermal Meter = 0x%x pre thermal meter 0x%x " - "eeprom_thermalmeter 0x%x\n", + "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x\n", thermalvalue, rtldm->thermalvalue, rtlefuse->eeprom_thermalmeter); /*3 Initialize ThermalValues of RFCalibrateInfo*/ @@ -833,9 +842,7 @@ static void txpwr_track_cb_therm(struct ieee80211_hw *hw) (rtlpriv->dm.thermalvalue_iqk - thermalvalue); RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - "Readback Thermal Meter = 0x%x pre thermal meter 0x%x " - "eeprom_thermalmeter 0x%x delta 0x%x " - "delta_lck 0x%x delta_iqk 0x%x\n", + "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x delta 0x%x delta_lck 0x%x delta_iqk 0x%x\n", thermalvalue, rtlpriv->dm.thermalvalue, rtlefuse->eeprom_thermalmeter, delta, delta_lck, delta_iqk); /* 6 If necessary, do LCK.*/ @@ -905,10 +912,10 @@ static void txpwr_track_cb_therm(struct ieee80211_hw *hw) rtldm->done_txpower = true; if (thermalvalue > rtlefuse->eeprom_thermalmeter) rtl8723be_dm_tx_power_track_set_power(hw, BBSWING, 0, - index); + index_for_channel); else rtl8723be_dm_tx_power_track_set_power(hw, BBSWING, 0, - index); + index_for_channel); rtldm->swing_idx_cck_base = rtldm->swing_idx_cck; rtldm->swing_idx_ofdm_base[RF90_PATH_A] = @@ -923,6 +930,7 @@ static void txpwr_track_cb_therm(struct ieee80211_hw *hw) rtldm->txpowercount = 0; RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "end\n"); + } void rtl8723be_dm_check_txpower_tracking(struct ieee80211_hw *hw) @@ -943,7 +951,7 @@ void rtl8723be_dm_check_txpower_tracking(struct ieee80211_hw *hw) } else { RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Schedule TxPowerTracking !!\n"); - txpwr_track_cb_therm(hw); + rtl8723be_dm_txpower_tracking_callback_thermalmeter(hw); tm_trigger = 0; } } @@ -953,11 +961,11 @@ static void rtl8723be_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - struct rate_adaptive *ra = &(rtlpriv->ra); - struct ieee80211_sta *sta = NULL; - u32 low_rssithresh_for_ra = ra->low2high_rssi_thresh_for_ra40m; - u32 high_rssithresh_for_ra = ra->high_rssi_thresh_for_ra; + struct rate_adaptive *p_ra = &rtlpriv->ra; + u32 low_rssithresh_for_ra = p_ra->low2high_rssi_thresh_for_ra40m; + u32 high_rssithresh_for_ra = p_ra->high_rssi_thresh_for_ra; u8 go_up_gap = 5; + struct ieee80211_sta *sta = NULL; if (is_hal_stop(rtlhal)) { RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, @@ -972,8 +980,8 @@ static void rtl8723be_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw) } if (mac->link_state == MAC80211_LINKED && - mac->opmode == NL80211_IFTYPE_STATION) { - switch (ra->pre_ratr_state) { + mac->opmode == NL80211_IFTYPE_STATION) { + switch (p_ra->pre_ratr_state) { case DM_RATR_STA_MIDDLE: high_rssithresh_for_ra += go_up_gap; break; @@ -987,31 +995,31 @@ static void rtl8723be_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw) if (rtlpriv->dm.undec_sm_pwdb > (long)high_rssithresh_for_ra) - ra->ratr_state = DM_RATR_STA_HIGH; + p_ra->ratr_state = DM_RATR_STA_HIGH; else if (rtlpriv->dm.undec_sm_pwdb > (long)low_rssithresh_for_ra) - ra->ratr_state = DM_RATR_STA_MIDDLE; + p_ra->ratr_state = DM_RATR_STA_MIDDLE; else - ra->ratr_state = DM_RATR_STA_LOW; + p_ra->ratr_state = DM_RATR_STA_LOW; - if (ra->pre_ratr_state != ra->ratr_state) { + if (p_ra->pre_ratr_state != p_ra->ratr_state) { RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, "RSSI = %ld\n", rtlpriv->dm.undec_sm_pwdb); RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, - "RSSI_LEVEL = %d\n", ra->ratr_state); + "RSSI_LEVEL = %d\n", p_ra->ratr_state); RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, "PreState = %d, CurState = %d\n", - ra->pre_ratr_state, ra->ratr_state); + p_ra->pre_ratr_state, p_ra->ratr_state); rcu_read_lock(); sta = rtl_find_sta(hw, mac->bssid); if (sta) rtlpriv->cfg->ops->update_rate_tbl(hw, sta, - ra->ratr_state); + p_ra->ratr_state); rcu_read_unlock(); - ra->pre_ratr_state = ra->ratr_state; + p_ra->pre_ratr_state = p_ra->ratr_state; } } } @@ -1020,10 +1028,6 @@ static bool rtl8723be_dm_is_edca_turbo_disable(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - if (rtlpriv->cfg->ops->get_btc_status()) { - if (rtlpriv->btcoexist.btc_ops->btc_is_disable_edca_turbo(rtlpriv)) - return true; - } if (rtlpriv->mac80211.mode == WIRELESS_MODE_B) return true; @@ -1034,6 +1038,7 @@ static void rtl8723be_dm_check_edca_turbo(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + static u64 last_txok_cnt; static u64 last_rxok_cnt; u64 cur_txok_cnt = 0; @@ -1042,22 +1047,22 @@ static void rtl8723be_dm_check_edca_turbo(struct ieee80211_hw *hw) u32 edca_be_dl = 0x6ea42b;/*not sure*/ u32 edca_be = 0x5ea42b; u32 iot_peer = 0; - bool is_cur_rdlstate; - bool last_is_cur_rdlstate = false; - bool bias_on_rx = false; - bool edca_turbo_on = false; + bool b_is_cur_rdlstate; + bool b_last_is_cur_rdlstate = false; + bool b_bias_on_rx = false; + bool b_edca_turbo_on = false; - last_is_cur_rdlstate = rtlpriv->dm.is_cur_rdlstate; + b_last_is_cur_rdlstate = rtlpriv->dm.is_cur_rdlstate; cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt; cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt; iot_peer = rtlpriv->mac80211.vendor; - bias_on_rx = (iot_peer == PEER_RAL || iot_peer == PEER_ATH) ? - true : false; - edca_turbo_on = ((!rtlpriv->dm.is_any_nonbepkts) && - (!rtlpriv->dm.disable_framebursting)) ? - true : false; + b_bias_on_rx = (iot_peer == PEER_RAL || iot_peer == PEER_ATH) ? + true : false; + b_edca_turbo_on = ((!rtlpriv->dm.is_any_nonbepkts) && + (!rtlpriv->dm.disable_framebursting)) ? + true : false; if ((iot_peer == PEER_CISCO) && (mac->mode == WIRELESS_MODE_N_24G)) { @@ -1067,23 +1072,23 @@ static void rtl8723be_dm_check_edca_turbo(struct ieee80211_hw *hw) if (rtl8723be_dm_is_edca_turbo_disable(hw)) goto exit; - if (edca_turbo_on) { - if (bias_on_rx) - is_cur_rdlstate = (cur_txok_cnt > cur_rxok_cnt * 4) ? - false : true; + if (b_edca_turbo_on) { + if (b_bias_on_rx) + b_is_cur_rdlstate = (cur_txok_cnt > cur_rxok_cnt * 4) ? + false : true; else - is_cur_rdlstate = (cur_rxok_cnt > cur_txok_cnt * 4) ? - true : false; + b_is_cur_rdlstate = (cur_rxok_cnt > cur_txok_cnt * 4) ? + true : false; - edca_be = (is_cur_rdlstate) ? edca_be_dl : edca_be_ul; + edca_be = (b_is_cur_rdlstate) ? edca_be_dl : edca_be_ul; rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, edca_be); - rtlpriv->dm.is_cur_rdlstate = is_cur_rdlstate; + rtlpriv->dm.is_cur_rdlstate = b_is_cur_rdlstate; rtlpriv->dm.current_turbo_edca = true; } else { if (rtlpriv->dm.current_turbo_edca) { u8 tmp = AC0_BE; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM, - &tmp); + (u8 *)(&tmp)); } rtlpriv->dm.current_turbo_edca = false; } @@ -1097,13 +1102,14 @@ static void rtl8723be_dm_check_edca_turbo(struct ieee80211_hw *hw) static void rtl8723be_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *dm_digtable = &rtlpriv->dm_digtable; u8 cur_cck_cca_thresh; if (rtlpriv->mac80211.link_state >= MAC80211_LINKED) { - if (rtlpriv->dm_digtable.rssi_val_min > 25) { + if (dm_digtable->rssi_val_min > 25) { cur_cck_cca_thresh = 0xcd; - } else if ((rtlpriv->dm_digtable.rssi_val_min <= 25) && - (rtlpriv->dm_digtable.rssi_val_min > 10)) { + } else if ((dm_digtable->rssi_val_min <= 25) && + (dm_digtable->rssi_val_min > 10)) { cur_cck_cca_thresh = 0x83; } else { if (rtlpriv->falsealm_cnt.cnt_cck_fail > 1000) @@ -1118,14 +1124,13 @@ static void rtl8723be_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw) cur_cck_cca_thresh = 0x40; } - if (rtlpriv->dm_digtable.cur_cck_cca_thres != cur_cck_cca_thresh) + if (dm_digtable->cur_cck_cca_thres != cur_cck_cca_thresh) rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, cur_cck_cca_thresh); - rtlpriv->dm_digtable.pre_cck_cca_thres = rtlpriv->dm_digtable.cur_cck_cca_thres; - rtlpriv->dm_digtable.cur_cck_cca_thres = cur_cck_cca_thresh; + dm_digtable->pre_cck_cca_thres = dm_digtable->cur_cck_cca_thres; + dm_digtable->cur_cck_cca_thres = cur_cck_cca_thresh; RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, - "CCK cca thresh hold =%x\n", - rtlpriv->dm_digtable.cur_cck_cca_thres); + "CCK cca thresh hold =%x\n", dm_digtable->cur_cck_cca_thres); } static void rtl8723be_dm_dynamic_edcca(struct ieee80211_hw *hw) @@ -1173,8 +1178,7 @@ static void rtl8723be_dm_dynamic_atc_switch(struct ieee80211_hw *hw) if (rtlpriv->cfg->ops->get_btc_status()) { if (!rtlpriv->btcoexist.btc_ops->btc_is_bt_disabled(rtlpriv)) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "odm_DynamicATCSwitch(): Disable" - " CFO tracking for BT!!\n"); + "odm_DynamicATCSwitch(): Disable CFO tracking for BT!!\n"); return; } } @@ -1207,9 +1211,8 @@ static void rtl8723be_dm_dynamic_atc_switch(struct ieee80211_hw *hw) if (cfo_ave_diff > 20 && rtldm->large_cfo_hit == 0) { rtldm->large_cfo_hit = 1; return; - } else { + } else rtldm->large_cfo_hit = 0; - } rtldm->cfo_ave_pre = cfo_ave; @@ -1263,20 +1266,20 @@ static void rtl8723be_dm_dynamic_atc_switch(struct ieee80211_hw *hw) static void rtl8723be_dm_common_info_self_update(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_sta_info *drv_priv; u8 cnt = 0; + struct rtl_sta_info *drv_priv; rtlpriv->dm.one_entry_only = false; if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_STATION && - rtlpriv->mac80211.link_state >= MAC80211_LINKED) { + rtlpriv->mac80211.link_state >= MAC80211_LINKED) { rtlpriv->dm.one_entry_only = true; return; } if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP || - rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC || - rtlpriv->mac80211.opmode == NL80211_IFTYPE_MESH_POINT) { + rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC || + rtlpriv->mac80211.opmode == NL80211_IFTYPE_MESH_POINT) { spin_lock_bh(&rtlpriv->locks.entry_list_lock); list_for_each_entry(drv_priv, &rtlpriv->entry_list, list) { cnt++; @@ -1305,8 +1308,8 @@ void rtl8723be_dm_watchdog(struct ieee80211_hw *hw) fw_ps_awake = false; if ((ppsc->rfpwr_state == ERFON) && - ((!fw_current_inpsmode) && fw_ps_awake) && - (!ppsc->rfchange_inprogress)) { + ((!fw_current_inpsmode) && fw_ps_awake) && + (!ppsc->rfchange_inprogress)) { rtl8723be_dm_common_info_self_update(hw); rtl8723be_dm_false_alarm_counter_statistics(hw); rtl8723be_dm_check_rssi_monitor(hw); @@ -1318,8 +1321,6 @@ void rtl8723be_dm_watchdog(struct ieee80211_hw *hw) rtl8723be_dm_dynamic_atc_switch(hw); rtl8723be_dm_check_txpower_tracking(hw); rtl8723be_dm_dynamic_txpower(hw); - if (rtlpriv->cfg->ops->get_btc_status()) - rtlpriv->btcoexist.btc_ops->btc_periodical(rtlpriv); } rtlpriv->dm.dbginfo.num_qry_beacon_pkt = 0; } diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/dm.h b/drivers/net/wireless/rtlwifi/rtl8723be/dm.h index c6c2f2a78a6606cbc768282be49148ff3dff3f51..e4c0e8ae6f4700385c3e3bec888b7e0c29fd7236 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/dm.h +++ b/drivers/net/wireless/rtlwifi/rtl8723be/dm.h @@ -141,7 +141,7 @@ #define DM_REG_TX_CCK_BBON_11N 0xE78 #define DM_REG_OFDM_RFON_11N 0xE7C #define DM_REG_OFDM_BBON_11N 0xE80 -#define DM_REG_TX2RX_11N 0xE84 +#define DM_REG_TX2RX_11N 0xE84 #define DM_REG_TX2TX_11N 0xE88 #define DM_REG_RX_CCK_11N 0xE8C #define DM_REG_RX_OFDM_11N 0xED0 @@ -202,6 +202,7 @@ #define DM_DIG_BACKOFF_MIN -4 #define DM_DIG_BACKOFF_DEFAULT 10 +#define RXPATHSELECTION_SS_TH_LOW 30 #define RXPATHSELECTION_DIFF_TH 18 #define DM_RATR_STA_INIT 0 @@ -212,6 +213,8 @@ #define CTS2SELF_THVAL 30 #define REGC38_TH 20 +#define WAIOTTHVAL 25 + #define TXHIGHPWRLEVEL_NORMAL 0 #define TXHIGHPWRLEVEL_LEVEL1 1 #define TXHIGHPWRLEVEL_LEVEL2 2 @@ -231,22 +234,6 @@ #define CFO_THRESHOLD_XTAL 10 /* kHz */ #define CFO_THRESHOLD_ATC 80 /* kHz */ -enum FAT_STATE { - FAT_NORMAL_STATE = 0, - FAT_TRAINING_STATE = 1, -}; - -enum tag_dynamic_init_gain_operation_type_definition { - DIG_TYPE_THRESH_HIGH = 0, - DIG_TYPE_THRESH_LOW = 1, - DIG_TYPE_BACKOFF = 2, - DIG_TYPE_RX_GAIN_MIN = 3, - DIG_TYPE_RX_GAIN_MAX = 4, - DIG_TYPE_ENABLE = 5, - DIG_TYPE_DISABLE = 6, - DIG_OP_TYPE_MAX -}; - enum dm_1r_cca_e { CCA_1R = 0, CCA_2R = 1, @@ -292,12 +279,17 @@ enum pwr_track_control_method { #define BT_RSSI_STATE_SPECIAL_LOW BIT_OFFSET_LEN_MASK_32(2, 1) #define BT_RSSI_STATE_BG_EDCA_LOW BIT_OFFSET_LEN_MASK_32(3, 1) #define BT_RSSI_STATE_TXPOWER_LOW BIT_OFFSET_LEN_MASK_32(4, 1) +#define GET_UNDECORATED_AVERAGE_RSSI(_priv) \ + ((((struct rtl_priv *)(_priv))->mac80211.opmode == \ + NL80211_IFTYPE_ADHOC) ? \ + (((struct rtl_priv *)(_priv))->dm.entry_min_undecoratedsmoothed_pwdb) :\ + (((struct rtl_priv *)(_priv))->dm.undecorated_smoothed_pwdb)) void rtl8723be_dm_set_tx_ant_by_tx_info(struct ieee80211_hw *hw, u8 *pdesc, u32 mac_id); void rtl8723be_dm_ant_sel_statistics(struct ieee80211_hw *hw, u8 antsel_tr_mux, u32 mac_id, u32 rx_pwdb_all); -void rtl8723be_dm_fast_antenna_trainning_callback(unsigned long data); +void rtl8723be_dm_fast_antenna_training_callback(unsigned long data); void rtl8723be_dm_init(struct ieee80211_hw *hw); void rtl8723be_dm_watchdog(struct ieee80211_hw *hw); void rtl8723be_dm_write_dig(struct ieee80211_hw *hw, u8 current_igi); @@ -305,6 +297,4 @@ void rtl8723be_dm_check_txpower_tracking(struct ieee80211_hw *hw); void rtl8723be_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw); void rtl8723be_dm_txpower_track_adjust(struct ieee80211_hw *hw, u8 type, u8 *pdirection, u32 *poutwrite_val); -void rtl8723be_dm_init_edca_turbo(struct ieee80211_hw *hw); - #endif diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/fw.c b/drivers/net/wireless/rtlwifi/rtl8723be/fw.c index f856be6fc1386533e389b39a9841a463225f5425..69d4f0fc1af1c1ed9ec459cb97e461e0f7adf880 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/fw.c +++ b/drivers/net/wireless/rtlwifi/rtl8723be/fw.c @@ -26,6 +26,7 @@ #include "../wifi.h" #include "../pci.h" #include "../base.h" +#include "../core.h" #include "reg.h" #include "def.h" #include "fw.h" @@ -55,8 +56,8 @@ static void _rtl8723be_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id, bool isfw_read = false; u8 buf_index = 0; bool bwrite_sucess = false; - u8 wait_h2c_limit = 100; - u8 wait_writeh2c_limit = 100; + u8 wait_h2c_limmit = 100; + u8 wait_writeh2c_limmit = 100; u8 boxcontent[4], boxextcontent[4]; u32 h2c_waitcounter = 0; unsigned long flag; @@ -68,8 +69,8 @@ static void _rtl8723be_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id, spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag); if (rtlhal->h2c_setinprogress) { RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, - "H2C set in progress! Wait to set.." - "element_id(%d).\n", element_id); + "H2C set in progress! Wait to set..element_id(%d).\n", + element_id); while (rtlhal->h2c_setinprogress) { spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, @@ -92,14 +93,15 @@ static void _rtl8723be_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id, break; } } + while (!bwrite_sucess) { - wait_writeh2c_limit--; - if (wait_writeh2c_limit == 0) { + wait_writeh2c_limmit--; + if (wait_writeh2c_limmit == 0) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Write H2C fail because no trigger " - "for FW INT!\n"); + "Write H2C fail because no trigger for FW INT!\n"); break; } + boxnum = rtlhal->last_hmeboxnum; switch (boxnum) { case 0: @@ -120,39 +122,43 @@ static void _rtl8723be_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id, break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case not processed\n"); + "switch case not process\n"); break; } + isfw_read = _rtl8723be_check_fw_read_last_h2c(hw, boxnum); while (!isfw_read) { - wait_h2c_limit--; - if (wait_h2c_limit == 0) { + wait_h2c_limmit--; + if (wait_h2c_limmit == 0) { RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, - "Wating too long for FW read " - "clear HMEBox(%d)!\n", boxnum); + "Waiting too long for FW read clear HMEBox(%d)!\n", + boxnum); break; } + udelay(10); isfw_read = _rtl8723be_check_fw_read_last_h2c(hw, boxnum); u1b_tmp = rtl_read_byte(rtlpriv, 0x130); RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, - "Wating for FW read clear HMEBox(%d)!!! 0x130 = %2x\n", + "Waiting for FW read clear HMEBox(%d)!!! 0x130 = %2x\n", boxnum, u1b_tmp); } + if (!isfw_read) { RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, - "Write H2C register BOX[%d] fail!!!!! " - "Fw do not read.\n", boxnum); + "Write H2C register BOX[%d] fail!!!!! Fw do not read.\n", + boxnum); break; } + memset(boxcontent, 0, sizeof(boxcontent)); memset(boxextcontent, 0, sizeof(boxextcontent)); boxcontent[0] = element_id; RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "Write element_id box_reg(%4x) = %2x\n", - box_reg, element_id); + box_reg, element_id); switch (cmd_len) { case 1: @@ -181,6 +187,7 @@ static void _rtl8723be_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id, rtl_write_byte(rtlpriv, box_extreg + idx, boxextcontent[idx]); } + for (idx = 0; idx < 4; idx++) { rtl_write_byte(rtlpriv, box_reg + idx, boxcontent[idx]); @@ -191,6 +198,7 @@ static void _rtl8723be_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id, "switch case not process\n"); break; } + bwrite_sucess = true; rtlhal->last_hmeboxnum = boxnum + 1; @@ -199,8 +207,9 @@ static void _rtl8723be_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id, RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "pHalData->last_hmeboxnum = %d\n", - rtlhal->last_hmeboxnum); + rtlhal->last_hmeboxnum); } + spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag); rtlhal->h2c_setinprogress = false; spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag); @@ -219,6 +228,7 @@ void rtl8723be_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id, "return H2C cmd because of Fw download fail!!!\n"); return; } + memset(tmp_cmdbuf, 0, 8); memcpy(tmp_cmdbuf, p_cmdbuffer, cmd_len); _rtl8723be_fill_h2c_command(hw, element_id, cmd_len, @@ -229,17 +239,17 @@ void rtl8723be_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id, void rtl8723be_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode) { struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 u1_h2c_set_pwrmode[H2C_8723BE_PWEMODE_LENGTH] = { 0 }; + u8 u1_h2c_set_pwrmode[H2C_PWEMODE_LENGTH] = { 0 }; struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); u8 rlbm, power_state = 0; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode); SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, ((mode) ? 1 : 0)); - rlbm = 0;/*YJ, temp, 120316. FW now not support RLBM = 2.*/ + rlbm = 0;/*YJ,temp,120316. FW now not support RLBM=2.*/ SET_H2CCMD_PWRMODE_PARM_RLBM(u1_h2c_set_pwrmode, rlbm); SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode, (rtlpriv->mac80211.p2p) ? - ppsc->smart_ps : 1); + ppsc->smart_ps : 1); SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(u1_h2c_set_pwrmode, ppsc->reg_max_lps_awakeintvl); SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(u1_h2c_set_pwrmode, 0); @@ -251,44 +261,26 @@ void rtl8723be_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode) RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, "rtl92c_set_fw_pwrmode(): u1_h2c_set_pwrmode\n", - u1_h2c_set_pwrmode, H2C_8723BE_PWEMODE_LENGTH); - rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_SETPWRMODE, - H2C_8723BE_PWEMODE_LENGTH, + u1_h2c_set_pwrmode, H2C_PWEMODE_LENGTH); + rtl8723be_fill_h2c_cmd(hw, H2C_8723B_SETPWRMODE, H2C_PWEMODE_LENGTH, u1_h2c_set_pwrmode); } -static bool _rtl8723be_cmd_send_packet(struct ieee80211_hw *hw, - struct sk_buff *skb) +void rtl8723be_set_fw_media_status_rpt_cmd(struct ieee80211_hw *hw, u8 mstatus) { - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); - struct rtl8192_tx_ring *ring; - struct rtl_tx_desc *pdesc; - struct sk_buff *pskb = NULL; - u8 own; - unsigned long flags; - - ring = &rtlpci->tx_ring[BEACON_QUEUE]; - - pskb = __skb_dequeue(&ring->queue); - if (pskb) - kfree_skb(pskb); - - spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags); - - pdesc = &ring->desc[0]; - own = (u8) rtlpriv->cfg->ops->get_desc((u8 *)pdesc, true, HW_DESC_OWN); - - rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *)pdesc, 1, 1, skb); - - __skb_queue_tail(&ring->queue, skb); - - spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); - - rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE); - - return true; + u8 parm[3] = { 0, 0, 0 }; + /* parm[0]: bit0=0-->Disconnect, bit0=1-->Connect + * bit1=0-->update Media Status to MACID + * bit1=1-->update Media Status from MACID to MACID_End + * parm[1]: MACID, if this is INFRA_STA, MacID = 0 + * parm[2]: MACID_End + */ + SET_H2CCMD_MSRRPT_PARM_OPMODE(parm, mstatus); + SET_H2CCMD_MSRRPT_PARM_MACID_IND(parm, 0); + + rtl8723be_fill_h2c_cmd(hw, H2C_8723B_MSRRPT, 3, parm); } + #define BEACON_PG 0 /* ->1 */ #define PSPOLL_PG 2 #define NULL_PG 3 @@ -407,7 +399,7 @@ static u8 reserved_page_packet[TOTAL_RESERVED_PKT_LEN] = { }; void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, - bool dl_finished) + bool b_dl_finished) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); @@ -416,7 +408,7 @@ void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, u32 totalpacketlen; bool rtstatus; u8 u1rsvdpageloc[5] = { 0 }; - bool dlok = false; + bool b_dlok = false; u8 *beacon; u8 *p_pspoll; @@ -466,43 +458,40 @@ void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, totalpacketlen = TOTAL_RESERVED_PKT_LEN; RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, - "rtl8723be_set_fw_rsvdpagepkt(): " - "HW_VAR_SET_TX_CMD: ALL\n", + "rtl8723be_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n", &reserved_page_packet[0], totalpacketlen); RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, - "rtl8723be_set_fw_rsvdpagepkt(): " - "HW_VAR_SET_TX_CMD: ALL\n", u1rsvdpageloc, 3); - + "rtl8723be_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n", + u1rsvdpageloc, 3); skb = dev_alloc_skb(totalpacketlen); memcpy((u8 *)skb_put(skb, totalpacketlen), &reserved_page_packet, totalpacketlen); - rtstatus = _rtl8723be_cmd_send_packet(hw, skb); + rtstatus = rtl_cmd_send_packet(hw, skb); if (rtstatus) - dlok = true; + b_dlok = true; - if (dlok) { + if (b_dlok) { RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Set RSVD page location to Fw.\n"); RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, "H2C_RSVDPAGE:\n", u1rsvdpageloc, 3); - rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_RSVDPAGE, + rtl8723be_fill_h2c_cmd(hw, H2C_8723B_RSVDPAGE, sizeof(u1rsvdpageloc), u1rsvdpageloc); - } else { + } else RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "Set RSVD page location to Fw FAIL!!!!!!.\n"); - } } /*Should check FW support p2p or not.*/ static void rtl8723be_set_p2p_ctw_period_cmd(struct ieee80211_hw *hw, u8 ctwindow) { - u8 u1_ctwindow_period[1] = {ctwindow}; + u8 u1_ctwindow_period[1] = { ctwindow}; - rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_P2P_PS_CTW_CMD, 1, + rtl8723be_fill_h2c_cmd(hw, H2C_8723B_P2P_PS_CTW_CMD, 1, u1_ctwindow_period); } @@ -521,7 +510,7 @@ void rtl8723be_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, switch (p2p_ps_state) { case P2P_PS_DISABLE: RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_DISABLE\n"); - memset(p2p_ps_offload, 0, sizeof(struct p2p_ps_offload_t)); + memset(p2p_ps_offload, 0, sizeof(*p2p_ps_offload)); break; case P2P_PS_ENABLE: RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_ENABLE\n"); @@ -532,7 +521,7 @@ void rtl8723be_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, rtl8723be_set_p2p_ctw_period_cmd(hw, ctwindow); } /* hw only support 2 set of NoA */ - for (i = 0; i < p2pinfo->noa_num; i++) { + for (i = 0 ; i < p2pinfo->noa_num ; i++) { /* To control the register setting * for which NOA */ @@ -563,6 +552,7 @@ void rtl8723be_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, rtl_write_dword(rtlpriv, 0x5EC, p2pinfo->noa_count_type[i]); } + if ((p2pinfo->opp_ps == 1) || (p2pinfo->noa_num > 0)) { /* rst p2p circuit */ @@ -591,30 +581,60 @@ void rtl8723be_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, default: break; } - rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_P2P_PS_OFFLOAD, 1, + + rtl8723be_fill_h2c_cmd(hw, H2C_8723B_P2P_PS_OFFLOAD, 1, (u8 *)p2p_ps_offload); } -void rtl8723be_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus) +static void _rtl8723be_c2h_content_parsing(struct ieee80211_hw *hw, + u8 c2h_cmd_id, + u8 c2h_cmd_len, u8 *tmp_buf) { - u8 u1_joinbssrpt_parm[1] = { 0 }; - - SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(u1_joinbssrpt_parm, mstatus); + struct rtl_priv *rtlpriv = rtl_priv(hw); - rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_JOINBSSRPT, 1, - u1_joinbssrpt_parm); + switch (c2h_cmd_id) { + case C2H_8723B_DBG: + RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, + "[C2H], C2H_8723BE_DBG!!\n"); + break; + case C2H_8723B_TX_REPORT: + RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, + "[C2H], C2H_8723BE_TX_REPORT!\n"); + break; + case C2H_8723B_BT_INFO: + RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, + "[C2H], C2H_8723BE_BT_INFO!!\n"); + rtlpriv->btcoexist.btc_ops->btc_btinfo_notify(rtlpriv, tmp_buf, + c2h_cmd_len); + break; + case C2H_8723B_BT_MP: + RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, + "[C2H], C2H_8723BE_BT_MP!!\n"); + break; + default: + RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, + "[C2H], Unkown packet!! CmdId(%#X)!\n", c2h_cmd_id); + break; + } } -void rtl8723be_set_fw_ap_off_load_cmd(struct ieee80211_hw *hw, - u8 ap_offload_enable) +void rtl8723be_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, u8 len) { - struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - u8 u1_apoffload_parm[H2C_8723BE_AP_OFFLOAD_LENGTH] = { 0 }; + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 c2h_cmd_id = 0, c2h_cmd_seq = 0, c2h_cmd_len = 0; + u8 *tmp_buf = NULL; + + c2h_cmd_id = buffer[0]; + c2h_cmd_seq = buffer[1]; + c2h_cmd_len = len - 2; + tmp_buf = buffer + 2; + + RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, + "[C2H packet], c2hCmdId=0x%x, c2hCmdSeq=0x%x, c2hCmdLen=%d\n", + c2h_cmd_id, c2h_cmd_seq, c2h_cmd_len); - SET_H2CCMD_AP_OFFLOAD_ON(u1_apoffload_parm, ap_offload_enable); - SET_H2CCMD_AP_OFFLOAD_HIDDEN(u1_apoffload_parm, mac->hiddenssid); - SET_H2CCMD_AP_OFFLOAD_DENYANY(u1_apoffload_parm, 0); + RT_PRINT_DATA(rtlpriv, COMP_FW, DBG_TRACE, + "[C2H packet], Content Hex:\n", tmp_buf, c2h_cmd_len); - rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_AP_OFFLOAD, - H2C_8723BE_AP_OFFLOAD_LENGTH, u1_apoffload_parm); + _rtl8723be_c2h_content_parsing(hw, c2h_cmd_id, c2h_cmd_len, tmp_buf); } diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/fw.h b/drivers/net/wireless/rtlwifi/rtl8723be/fw.h index 31eec281e4468c8f511afd5bd40d310b1d3895d3..067429669bdad6f3382136a87db11033d9d0eb6e 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/fw.h +++ b/drivers/net/wireless/rtlwifi/rtl8723be/fw.h @@ -30,50 +30,23 @@ #define FW_8192C_END_ADDRESS 0x5FFF #define FW_8192C_PAGE_SIZE 4096 #define FW_8192C_POLLING_DELAY 5 -#define FW_8192C_POLLING_TIMEOUT_COUNT 6000 -#define IS_FW_HEADER_EXIST(_pfwhdr) \ - ((_pfwhdr->signature&0xFFF0) == 0x5300) #define USE_OLD_WOWLAN_DEBUG_FW 0 -#define H2C_8723BE_RSVDPAGE_LOC_LEN 5 -#define H2C_8723BE_PWEMODE_LENGTH 5 -#define H2C_8723BE_JOINBSSRPT_LENGTH 1 -#define H2C_8723BE_AP_OFFLOAD_LENGTH 3 -#define H2C_8723BE_WOWLAN_LENGTH 3 -#define H2C_8723BE_KEEP_ALIVE_CTRL_LENGTH 3 -#if (USE_OLD_WOWLAN_DEBUG_FW == 0) -#define H2C_8723BE_REMOTE_WAKE_CTRL_LEN 1 -#else -#define H2C_8723BE_REMOTE_WAKE_CTRL_LEN 3 -#endif -#define H2C_8723BE_AOAC_GLOBAL_INFO_LEN 2 -#define H2C_8723BE_AOAC_RSVDPAGE_LOC_LEN 7 - +#define H2C_PWEMODE_LENGTH 5 /* Fw PS state for RPWM. *BIT[2:0] = HW state *BIT[3] = Protocol PS state, 1: register active state , 0: register sleep state *BIT[4] = sub-state */ -#define FW_PS_GO_ON BIT(0) -#define FW_PS_TX_NULL BIT(1) #define FW_PS_RF_ON BIT(2) #define FW_PS_REGISTER_ACTIVE BIT(3) -#define FW_PS_DPS BIT(0) -#define FW_PS_LCLK (FW_PS_DPS) -#define FW_PS_RF_OFF BIT(1) -#define FW_PS_ALL_ON BIT(2) -#define FW_PS_ST_ACTIVE BIT(3) -#define FW_PS_ISR_ENABLE BIT(4) -#define FW_PS_IMR_ENABLE BIT(5) - - #define FW_PS_ACK BIT(6) #define FW_PS_TOGGLE BIT(7) - /* 88E RPWM value*/ + /* 8723BE RPWM value*/ /* BIT[0] = 1: 32k, 0: 40M*/ #define FW_PS_CLOCK_OFF BIT(0) /* 32k*/ #define FW_PS_CLOCK_ON 0 /*40M*/ @@ -83,75 +56,61 @@ /*ISR_ENABLE, IMR_ENABLE, and PS mode should be inherited.*/ #define FW_PS_STATE_INT_MASK (0x3F) -#define FW_PS_STATE(x) (FW_PS_STATE_MASK & (x)) -#define FW_PS_STATE_HW(x) (FW_PS_STATE_HW_MASK & (x)) -#define FW_PS_STATE_INT(x) (FW_PS_STATE_INT_MASK & (x)) -#define FW_PS_ISR_VAL(x) ((x) & 0x70) -#define FW_PS_IMR_MASK(x) ((x) & 0xDF) -#define FW_PS_KEEP_IMR(x) ((x) & 0x20) - - -#define FW_PS_STATE_S0 (FW_PS_DPS) -#define FW_PS_STATE_S1 (FW_PS_LCLK) -#define FW_PS_STATE_S2 (FW_PS_RF_OFF) -#define FW_PS_STATE_S3 (FW_PS_ALL_ON) -#define FW_PS_STATE_S4 ((FW_PS_ST_ACTIVE) | (FW_PS_ALL_ON)) +#define FW_PS_STATE(x) (FW_PS_STATE_MASK & (x)) /* ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))*/ -#define FW_PS_STATE_ALL_ON_88E (FW_PS_CLOCK_ON) +#define FW_PS_STATE_ALL_ON (FW_PS_CLOCK_ON) /* (FW_PS_RF_ON)*/ -#define FW_PS_STATE_RF_ON_88E (FW_PS_CLOCK_ON) +#define FW_PS_STATE_RF_ON (FW_PS_CLOCK_ON) /* 0x0*/ -#define FW_PS_STATE_RF_OFF_88E (FW_PS_CLOCK_ON) +#define FW_PS_STATE_RF_OFF (FW_PS_CLOCK_ON) /* (FW_PS_STATE_RF_OFF)*/ -#define FW_PS_STATE_RF_OFF_LOW_PWR_88E (FW_PS_CLOCK_OFF) +#define FW_PS_STATE_RF_OFF_LOW_PWR (FW_PS_CLOCK_OFF) -#define FW_PS_STATE_ALL_ON_92C (FW_PS_STATE_S4) -#define FW_PS_STATE_RF_ON_92C (FW_PS_STATE_S3) -#define FW_PS_STATE_RF_OFF_92C (FW_PS_STATE_S2) -#define FW_PS_STATE_RF_OFF_LOW_PWR_92C (FW_PS_STATE_S1) - -/* For 88E H2C PwrMode Cmd ID 5.*/ +/* For 8723BE H2C PwrMode Cmd ID 5.*/ #define FW_PWR_STATE_ACTIVE ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE)) #define FW_PWR_STATE_RF_OFF 0 -#define FW_PS_IS_ACK(x) ((x) & FW_PS_ACK) -#define FW_PS_IS_CLK_ON(x) ((x) & (FW_PS_RF_OFF | FW_PS_ALL_ON)) -#define FW_PS_IS_RF_ON(x) ((x) & (FW_PS_ALL_ON)) -#define FW_PS_IS_ACTIVE(x) ((x) & (FW_PS_ST_ACTIVE)) -#define FW_PS_IS_CPWM_INT(x) ((x) & 0x40) - -#define FW_CLR_PS_STATE(x) ((x) = ((x) & (0xF0))) +#define FW_PS_IS_ACK(x) ((x) & FW_PS_ACK) -#define IS_IN_LOW_POWER_STATE_88E(fwpsstate) \ - (FW_PS_STATE(fwpsstate) == FW_PS_CLOCK_OFF) +#define IS_IN_LOW_POWER_STATE(__fwpsstate) \ + (FW_PS_STATE(__fwpsstate) == FW_PS_CLOCK_OFF) #define FW_PWR_STATE_ACTIVE ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE)) #define FW_PWR_STATE_RF_OFF 0 -#define pagenum_128(_len) (u32)(((_len)>>7) + ((_len)&0x7F ? 1 : 0)) - -#define SET_88E_H2CCMD_WOWLAN_FUNC_ENABLE(__ph2ccmd, __val) \ - SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 1, __val) -#define SET_88E_H2CCMD_WOWLAN_PATTERN_MATCH_ENABLE(__ph2ccmd, __val) \ - SET_BITS_TO_LE_1BYTE(__ph2ccmd, 1, 1, __val) -#define SET_88E_H2CCMD_WOWLAN_MAGIC_PKT_ENABLE(__ph2ccmd, __val) \ - SET_BITS_TO_LE_1BYTE(__ph2ccmd, 2, 1, __val) -#define SET_88E_H2CCMD_WOWLAN_UNICAST_PKT_ENABLE(__ph2ccmd, __val) \ - SET_BITS_TO_LE_1BYTE(__ph2ccmd, 3, 1, __val) -#define SET_88E_H2CCMD_WOWLAN_ALL_PKT_DROP(__ph2ccmd, __val) \ - SET_BITS_TO_LE_1BYTE(__ph2ccmd, 4, 1, __val) -#define SET_88E_H2CCMD_WOWLAN_GPIO_ACTIVE(__ph2ccmd, __val) \ - SET_BITS_TO_LE_1BYTE(__ph2ccmd, 5, 1, __val) -#define SET_88E_H2CCMD_WOWLAN_REKEY_WAKE_UP(__ph2ccmd, __val) \ - SET_BITS_TO_LE_1BYTE(__ph2ccmd, 6, 1, __val) -#define SET_88E_H2CCMD_WOWLAN_DISCONNECT_WAKE_UP(__ph2ccmd, __val) \ - SET_BITS_TO_LE_1BYTE(__ph2ccmd, 7, 1, __val) -#define SET_88E_H2CCMD_WOWLAN_GPIONUM(__ph2ccmd, __val) \ - SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val) -#define SET_88E_H2CCMD_WOWLAN_GPIO_DURATION(__ph2ccmd, __val) \ - SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val) +enum rtl8723b_h2c_cmd { + H2C_8723B_RSVDPAGE = 0, + H2C_8723B_MSRRPT = 1, + H2C_8723B_SCAN = 2, + H2C_8723B_KEEP_ALIVE_CTRL = 3, + H2C_8723B_DISCONNECT_DECISION = 4, + H2C_8723B_BCN_RSVDPAGE = 9, + H2C_8723B_PROBERSP_RSVDPAGE = 10, + + H2C_8723B_SETPWRMODE = 0x20, + H2C_8723B_PS_LPS_PARA = 0x23, + H2C_8723B_P2P_PS_OFFLOAD = 0x24, + + H2C_8723B_RA_MASK = 0x40, + H2C_RSSIBE_REPORT = 0x42, + /*Not defined CTW CMD for P2P yet*/ + H2C_8723B_P2P_PS_CTW_CMD, + MAX_8723B_H2CCMD +}; + +enum rtl8723b_c2h_evt { + C2H_8723B_DBG = 0, + C2H_8723B_LB = 1, + C2H_8723B_TXBF = 2, + C2H_8723B_TX_REPORT = 3, + C2H_8723B_BT_INFO = 9, + C2H_8723B_BT_MP = 11, + MAX_8723B_C2HEVENT +}; + +#define pagenum_128(_len) (u32)(((_len)>>7) + ((_len)&0x7F ? 1 : 0)) #define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \ @@ -169,8 +128,11 @@ #define GET_88E_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd) \ LE_BITS_TO_1BYTE(__ph2ccmd, 0, 8) -#define SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(__ph2ccmd, __val) \ - SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val) +#define SET_H2CCMD_MSRRPT_PARM_OPMODE(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 1, __val) +#define SET_H2CCMD_MSRRPT_PARM_MACID_IND(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE(__ph2ccmd, 1, 1, __val) + #define SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(__ph2ccmd, __val) \ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val) #define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val) \ @@ -178,71 +140,13 @@ #define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val) -/* AP_OFFLOAD */ -#define SET_H2CCMD_AP_OFFLOAD_ON(__ph2ccmd, __val) \ - SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val) -#define SET_H2CCMD_AP_OFFLOAD_HIDDEN(__ph2ccmd, __val) \ - SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val) -#define SET_H2CCMD_AP_OFFLOAD_DENYANY(__ph2ccmd, __val) \ - SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val) -#define SET_H2CCMD_AP_OFFLOAD_WAKEUP_EVT_RPT(__ph2ccmd, __val) \ - SET_BITS_TO_LE_1BYTE((__ph2ccmd)+3, 0, 8, __val) -/* Keep Alive Control*/ -#define SET_88E_H2CCMD_KEEP_ALIVE_ENABLE(__ph2ccmd, __val) \ - SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 1, __val) -#define SET_88E_H2CCMD_KEEP_ALIVE_ACCPEPT_USER_DEFINED(__ph2ccmd, __val)\ - SET_BITS_TO_LE_1BYTE(__ph2ccmd, 1, 1, __val) -#define SET_88E_H2CCMD_KEEP_ALIVE_PERIOD(__ph2ccmd, __val) \ - SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val) - -/*REMOTE_WAKE_CTRL */ -#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_EN(__ph2ccmd, __val) \ - SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 1, __val) -#if (USE_OLD_WOWLAN_DEBUG_FW == 0) -#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_ARP_OFFLOAD_EN(__ph2ccmd, __val)\ - SET_BITS_TO_LE_1BYTE(__ph2ccmd, 1, 1, __val) -#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_NDP_OFFLOAD_EN(__ph2ccmd, __val)\ - SET_BITS_TO_LE_1BYTE(__ph2ccmd, 2, 1, __val) -#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_GTK_OFFLOAD_EN(__ph2ccmd, __val)\ - SET_BITS_TO_LE_1BYTE(__ph2ccmd, 3, 1, __val) -#else -#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_PAIRWISE_ENC_ALG(__ph2ccmd, __val)\ - SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val) -#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_GROUP_ENC_ALG(__ph2ccmd, __val) \ - SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val) -#endif - -/* GTK_OFFLOAD */ -#define SET_88E_H2CCMD_AOAC_GLOBAL_INFO_PAIRWISE_ENC_ALG(__ph2ccmd, __val)\ - SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val) -#define SET_88E_H2CCMD_AOAC_GLOBAL_INFO_GROUP_ENC_ALG(__ph2ccmd, __val) \ - SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val) - -/* AOAC_RSVDPAGE_LOC */ -#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_REM_WAKE_CTRL_INFO(__ph2ccmd, __val)\ - SET_BITS_TO_LE_1BYTE((__ph2ccmd), 0, 8, __val) -#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_ARP_RSP(__ph2ccmd, __val) \ - SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val) -#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_NEIGHBOR_ADV(__ph2ccmd, __val) \ - SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val) -#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_RSP(__ph2ccmd, __val) \ - SET_BITS_TO_LE_1BYTE((__ph2ccmd)+3, 0, 8, __val) -#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_INFO(__ph2ccmd, __val) \ - SET_BITS_TO_LE_1BYTE((__ph2ccmd)+4, 0, 8, __val) - -void rtl8723be_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode); -void rtl8723be_set_fw_ap_off_load_cmd(struct ieee80211_hw *hw, - u8 ap_offload_enable); void rtl8723be_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id, u32 cmd_len, u8 *p_cmdbuffer); -void rtl8723be_firmware_selfreset(struct ieee80211_hw *hw); -void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, - bool dl_finished); -void rtl8723be_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus); -int rtl8723be_download_fw(struct ieee80211_hw *hw, - bool buse_wake_on_wlan_fw); -void rtl8723be_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, - u8 p2p_ps_state); +void rtl8723be_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode); +void rtl8723be_set_fw_media_status_rpt_cmd(struct ieee80211_hw *hw, u8 mstatus); +void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished); +void rtl8723be_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state); +void rtl8723be_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, u8 len); #endif diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/rtlwifi/rtl8723be/hw.c index 3cd286930fe0088d73b2046a970f3349a4531a3b..6dad28e77bbb6c6f07f53ff2ea38e39996e9cd6e 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8723be/hw.c @@ -33,12 +33,14 @@ #include "reg.h" #include "def.h" #include "phy.h" +#include "../rtl8723com/phy_common.h" #include "dm.h" #include "../rtl8723com/dm_common.h" #include "fw.h" #include "../rtl8723com/fw_common.h" #include "led.h" #include "hw.h" +#include "../pwrseqcmd.h" #include "pwrseq.h" #include "../btcoexist/rtl_btc.h" @@ -49,7 +51,9 @@ static void _rtl8723be_return_beacon_queue_skb(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[BEACON_QUEUE]; + unsigned long flags; + spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags); while (skb_queue_len(&ring->queue)) { struct rtl_tx_desc *entry = &ring->desc[ring->idx]; struct sk_buff *skb = __skb_dequeue(&ring->queue); @@ -61,6 +65,7 @@ static void _rtl8723be_return_beacon_queue_skb(struct ieee80211_hw *hw) kfree_skb(skb); ring->idx = (ring->idx + 1) % ring->entries; } + spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); } static void _rtl8723be_set_bcn_ctrl_reg(struct ieee80211_hw *hw, @@ -72,7 +77,7 @@ static void _rtl8723be_set_bcn_ctrl_reg(struct ieee80211_hw *hw, rtlpci->reg_bcn_ctrl_val |= set_bits; rtlpci->reg_bcn_ctrl_val &= ~clear_bits; - rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8) rtlpci->reg_bcn_ctrl_val); + rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8)rtlpci->reg_bcn_ctrl_val); } static void _rtl8723be_stop_tx_beacon(struct ieee80211_hw *hw) @@ -112,15 +117,15 @@ static void _rtl8723be_disable_bcn_sub_func(struct ieee80211_hw *hw) } static void _rtl8723be_set_fw_clock_on(struct ieee80211_hw *hw, u8 rpwm_val, - bool need_turn_off_ckk) + bool b_need_turn_off_ckk) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - bool support_remote_wake_up; + bool b_support_remote_wake_up; u32 count = 0, isr_regaddr, content; - bool schedule_timer = need_turn_off_ckk; + bool b_schedule_timer = b_need_turn_off_ckk; rtlpriv->cfg->ops->get_hw_reg(hw, HAL_DEF_WOWLAN, - (u8 *)(&support_remote_wake_up)); + (u8 *)(&b_support_remote_wake_up)); if (!rtlhal->fw_ready) return; @@ -145,9 +150,10 @@ static void _rtl8723be_set_fw_clock_on(struct ieee80211_hw *hw, u8 rpwm_val, break; } } - if (IS_IN_LOW_POWER_STATE_88E(rtlhal->fw_ps_state)) { + + if (IS_IN_LOW_POWER_STATE(rtlhal->fw_ps_state)) { rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_SET_RPWM, - &rpwm_val); + (u8 *)(&rpwm_val)); if (FW_PS_IS_ACK(rpwm_val)) { isr_regaddr = REG_HISR; content = rtl_read_dword(rtlpriv, isr_regaddr); @@ -159,20 +165,19 @@ static void _rtl8723be_set_fw_clock_on(struct ieee80211_hw *hw, u8 rpwm_val, if (content & IMR_CPWM) { rtl_write_word(rtlpriv, isr_regaddr, 0x0100); - rtlhal->fw_ps_state = FW_PS_STATE_RF_ON_88E; + rtlhal->fw_ps_state = FW_PS_STATE_RF_ON; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, - "Receive CPWM INT!!! Set " - "pHalData->FwPSState = %X\n", + "Receive CPWM INT!!! Set pHalData->FwPSState = %X\n", rtlhal->fw_ps_state); } } + spin_lock_bh(&rtlpriv->locks.fw_ps_lock); rtlhal->fw_clk_change_in_progress = false; spin_unlock_bh(&rtlpriv->locks.fw_ps_lock); - if (schedule_timer) { + if (b_schedule_timer) mod_timer(&rtlpriv->works.fw_clockoff_timer, jiffies + MSECS(10)); - } } else { spin_lock_bh(&rtlpriv->locks.fw_ps_lock); rtlhal->fw_clk_change_in_progress = false; @@ -187,7 +192,7 @@ static void _rtl8723be_set_fw_clock_off(struct ieee80211_hw *hw, u8 rpwm_val) struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl8192_tx_ring *ring; enum rf_pwrstate rtstate; - bool schedule_timer = false; + bool b_schedule_timer = false; u8 queue; if (!rtlhal->fw_ready) @@ -203,17 +208,18 @@ static void _rtl8723be_set_fw_clock_off(struct ieee80211_hw *hw, u8 rpwm_val) for (queue = 0; queue < RTL_PCI_MAX_TX_QUEUE_COUNT; queue++) { ring = &rtlpci->tx_ring[queue]; if (skb_queue_len(&ring->queue)) { - schedule_timer = true; + b_schedule_timer = true; break; } } - if (schedule_timer) { + + if (b_schedule_timer) { mod_timer(&rtlpriv->works.fw_clockoff_timer, jiffies + MSECS(10)); return; } - if (FW_PS_STATE(rtlhal->fw_ps_state) != - FW_PS_STATE_RF_OFF_LOW_PWR_88E) { + + if (FW_PS_STATE(rtlhal->fw_ps_state) != FW_PS_STATE_RF_OFF_LOW_PWR) { spin_lock_bh(&rtlpriv->locks.fw_ps_lock); if (!rtlhal->fw_clk_change_in_progress) { rtlhal->fw_clk_change_in_progress = true; @@ -221,7 +227,7 @@ static void _rtl8723be_set_fw_clock_off(struct ieee80211_hw *hw, u8 rpwm_val) rtlhal->fw_ps_state = FW_PS_STATE(rpwm_val); rtl_write_word(rtlpriv, REG_HISR, 0x0100); rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, - &rpwm_val); + (u8 *)(&rpwm_val)); spin_lock_bh(&rtlpriv->locks.fw_ps_lock); rtlhal->fw_clk_change_in_progress = false; spin_unlock_bh(&rtlpriv->locks.fw_ps_lock); @@ -231,12 +237,13 @@ static void _rtl8723be_set_fw_clock_off(struct ieee80211_hw *hw, u8 rpwm_val) jiffies + MSECS(10)); } } + } static void _rtl8723be_set_fw_ps_rf_on(struct ieee80211_hw *hw) { u8 rpwm_val = 0; - rpwm_val |= (FW_PS_STATE_RF_OFF_88E | FW_PS_ACK); + rpwm_val |= (FW_PS_STATE_RF_OFF | FW_PS_ACK); _rtl8723be_set_fw_clock_on(hw, rpwm_val, true); } @@ -249,21 +256,23 @@ static void _rtl8723be_fwlps_leave(struct ieee80211_hw *hw) u8 rpwm_val = 0, fw_pwrmode = FW_PS_ACTIVE_MODE; if (ppsc->low_power_enable) { - rpwm_val = (FW_PS_STATE_ALL_ON_88E | FW_PS_ACK);/* RF on */ + rpwm_val = (FW_PS_STATE_ALL_ON | FW_PS_ACK);/* RF on */ _rtl8723be_set_fw_clock_on(hw, rpwm_val, false); rtlhal->allow_sw_to_change_hwclc = false; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE, - &fw_pwrmode); + (u8 *)(&fw_pwrmode)); rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS, (u8 *)(&fw_current_inps)); } else { - rpwm_val = FW_PS_STATE_ALL_ON_88E; /* RF on */ - rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, &rpwm_val); + rpwm_val = FW_PS_STATE_ALL_ON; /* RF on */ + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, + (u8 *)(&rpwm_val)); rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE, - &fw_pwrmode); + (u8 *)(&fw_pwrmode)); rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS, (u8 *)(&fw_current_inps)); } + } static void _rtl8723be_fwlps_enter(struct ieee80211_hw *hw) @@ -275,22 +284,23 @@ static void _rtl8723be_fwlps_enter(struct ieee80211_hw *hw) u8 rpwm_val; if (ppsc->low_power_enable) { - rpwm_val = FW_PS_STATE_RF_OFF_LOW_PWR_88E; /* RF off */ + rpwm_val = FW_PS_STATE_RF_OFF_LOW_PWR; /* RF off */ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS, (u8 *)(&fw_current_inps)); rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE, - &ppsc->fwctrl_psmode); + (u8 *)(&ppsc->fwctrl_psmode)); rtlhal->allow_sw_to_change_hwclc = true; _rtl8723be_set_fw_clock_off(hw, rpwm_val); - } else { - rpwm_val = FW_PS_STATE_RF_OFF_88E; /* RF off */ + rpwm_val = FW_PS_STATE_RF_OFF; /* RF off */ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS, (u8 *)(&fw_current_inps)); rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE, - &ppsc->fwctrl_psmode); - rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, &rpwm_val); + (u8 *)(&ppsc->fwctrl_psmode)); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, + (u8 *)(&rpwm_val)); } + } void rtl8723be_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) @@ -306,13 +316,13 @@ void rtl8723be_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) case HW_VAR_RF_STATE: *((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state; break; - case HW_VAR_FWLPS_RF_ON: { - enum rf_pwrstate rfstate; + case HW_VAR_FWLPS_RF_ON:{ + enum rf_pwrstate rfState; u32 val_rcr; rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RF_STATE, - (u8 *)(&rfstate)); - if (rfstate == ERFOFF) { + (u8 *)(&rfState)); + if (rfState == ERFOFF) { *((bool *)(val)) = true; } else { val_rcr = rtl_read_dword(rtlpriv, REG_RCR); @@ -322,11 +332,12 @@ void rtl8723be_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) else *((bool *)(val)) = true; } - break; } + } + break; case HW_VAR_FW_PSMODE_STATUS: *((bool *)(val)) = ppsc->fw_current_inpsmode; break; - case HW_VAR_CORRECT_TSF: { + case HW_VAR_CORRECT_TSF:{ u64 tsf; u32 *ptsf_low = (u32 *)&tsf; u32 *ptsf_high = ((u32 *)&tsf) + 1; @@ -335,15 +346,65 @@ void rtl8723be_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) *ptsf_low = rtl_read_dword(rtlpriv, REG_TSFTR); *((u64 *)(val)) = tsf; - - break; } + } + break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, "switch case not process %x\n", variable); break; } } +static void _rtl8723be_download_rsvd_page(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 tmp_regcr, tmp_reg422, bcnvalid_reg; + u8 count = 0, dlbcn_count = 0; + bool b_recover = false; + + tmp_regcr = rtl_read_byte(rtlpriv, REG_CR + 1); + rtl_write_byte(rtlpriv, REG_CR + 1, + (tmp_regcr | BIT(0))); + + _rtl8723be_set_bcn_ctrl_reg(hw, 0, BIT(3)); + _rtl8723be_set_bcn_ctrl_reg(hw, BIT(4), 0); + + tmp_reg422 = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2); + rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp_reg422 & (~BIT(6))); + if (tmp_reg422 & BIT(6)) + b_recover = true; + + do { + bcnvalid_reg = rtl_read_byte(rtlpriv, REG_TDECTRL + 2); + rtl_write_byte(rtlpriv, REG_TDECTRL + 2, + (bcnvalid_reg | BIT(0))); + _rtl8723be_return_beacon_queue_skb(hw); + + rtl8723be_set_fw_rsvdpagepkt(hw, 0); + bcnvalid_reg = rtl_read_byte(rtlpriv, REG_TDECTRL + 2); + count = 0; + while (!(bcnvalid_reg & BIT(0)) && count < 20) { + count++; + udelay(10); + bcnvalid_reg = rtl_read_byte(rtlpriv, + REG_TDECTRL + 2); + } + dlbcn_count++; + } while (!(bcnvalid_reg & BIT(0)) && dlbcn_count < 5); + + if (bcnvalid_reg & BIT(0)) + rtl_write_byte(rtlpriv, REG_TDECTRL + 2, BIT(0)); + + _rtl8723be_set_bcn_ctrl_reg(hw, BIT(3), 0); + _rtl8723be_set_bcn_ctrl_reg(hw, 0, BIT(4)); + + if (b_recover) + rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp_reg422); + + tmp_regcr = rtl_read_byte(rtlpriv, REG_CR + 1); + rtl_write_byte(rtlpriv, REG_CR + 1, (tmp_regcr & ~(BIT(0)))); +} + void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -358,22 +419,24 @@ void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) for (idx = 0; idx < ETH_ALEN; idx++) rtl_write_byte(rtlpriv, (REG_MACID + idx), val[idx]); break; - case HW_VAR_BASIC_RATE: { - u16 rate_cfg = ((u16 *)val)[0]; + case HW_VAR_BASIC_RATE:{ + u16 b_rate_cfg = ((u16 *)val)[0]; u8 rate_index = 0; - rate_cfg = rate_cfg & 0x15f; - rate_cfg |= 0x01; - rtl_write_byte(rtlpriv, REG_RRSR, rate_cfg & 0xff); - rtl_write_byte(rtlpriv, REG_RRSR + 1, (rate_cfg >> 8) & 0xff); - while (rate_cfg > 0x1) { - rate_cfg = (rate_cfg >> 1); + b_rate_cfg = b_rate_cfg & 0x15f; + b_rate_cfg |= 0x01; + rtl_write_byte(rtlpriv, REG_RRSR, b_rate_cfg & 0xff); + rtl_write_byte(rtlpriv, REG_RRSR + 1, (b_rate_cfg >> 8) & 0xff); + while (b_rate_cfg > 0x1) { + b_rate_cfg = (b_rate_cfg >> 1); rate_index++; } rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL, rate_index); - break; } + } + break; case HW_VAR_BSSID: for (idx = 0; idx < ETH_ALEN; idx++) rtl_write_byte(rtlpriv, (REG_BSSID + idx), val[idx]); + break; case HW_VAR_SIFS: rtl_write_byte(rtlpriv, REG_SIFS_CTX + 1, val[0]); @@ -388,7 +451,7 @@ void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM, *((u16 *)val)); break; - case HW_VAR_SLOT_TIME: { + case HW_VAR_SLOT_TIME:{ u8 e_aci; RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, @@ -398,12 +461,13 @@ void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) for (e_aci = 0; e_aci < AC_MAX; e_aci++) { rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM, - &e_aci); + (u8 *)(&e_aci)); } - break; } - case HW_VAR_ACK_PREAMBLE: { + } + break; + case HW_VAR_ACK_PREAMBLE:{ u8 reg_tmp; - u8 short_preamble = (bool)*val; + u8 short_preamble = (bool)(*(u8 *)val); reg_tmp = rtl_read_byte(rtlpriv, REG_TRXPTCL_CTL + 2); if (short_preamble) { reg_tmp |= 0x02; @@ -412,15 +476,16 @@ void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) reg_tmp &= 0xFD; rtl_write_byte(rtlpriv, REG_TRXPTCL_CTL + 2, reg_tmp); } - break; } + } + break; case HW_VAR_WPA_CONFIG: - rtl_write_byte(rtlpriv, REG_SECCFG, *val); + rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *)val)); break; - case HW_VAR_AMPDU_MIN_SPACE: { + case HW_VAR_AMPDU_MIN_SPACE:{ u8 min_spacing_to_set; u8 sec_min_space; - min_spacing_to_set = *val; + min_spacing_to_set = *((u8 *)val); if (min_spacing_to_set <= 7) { sec_min_space = 0; @@ -434,26 +499,28 @@ void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n", - mac->min_space_cfg); + mac->min_space_cfg); rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, mac->min_space_cfg); } - break; } - case HW_VAR_SHORTGI_DENSITY: { + } + break; + case HW_VAR_SHORTGI_DENSITY:{ u8 density_to_set; - density_to_set = *val; + density_to_set = *((u8 *)val); mac->min_space_cfg |= (density_to_set << 3); RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, "Set HW_VAR_SHORTGI_DENSITY: %#x\n", - mac->min_space_cfg); + mac->min_space_cfg); rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, mac->min_space_cfg); - break; } - case HW_VAR_AMPDU_FACTOR: { + } + break; + case HW_VAR_AMPDU_FACTOR:{ u8 regtoset_normal[4] = {0x41, 0xa8, 0x72, 0xb9}; u8 factor_toset; u8 *p_regtoset = NULL; @@ -461,7 +528,7 @@ void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) p_regtoset = regtoset_normal; - factor_toset = *val; + factor_toset = *((u8 *)val); if (factor_toset <= 3) { factor_toset = (1 << (factor_toset + 2)); if (factor_toset > 0xf) @@ -482,22 +549,26 @@ void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) rtl_write_byte(rtlpriv, (REG_AGGLEN_LMT + index), p_regtoset[index]); + } + RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, "Set HW_VAR_AMPDU_FACTOR: %#x\n", - factor_toset); + factor_toset); } - break; } - case HW_VAR_AC_PARAM: { - u8 e_aci = *val; + } + break; + case HW_VAR_AC_PARAM:{ + u8 e_aci = *((u8 *)val); rtl8723_dm_init_edca_turbo(hw); if (rtlpci->acm_method != EACMWAY2_SW) rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACM_CTRL, - &e_aci); - break; } - case HW_VAR_ACM_CTRL: { - u8 e_aci = *val; + (u8 *)(&e_aci)); + } + break; + case HW_VAR_ACM_CTRL:{ + u8 e_aci = *((u8 *)val); union aci_aifsn *p_aci_aifsn = (union aci_aifsn *)(&(mac->ac[0].aifs)); u8 acm = p_aci_aifsn->f.acm; @@ -519,8 +590,8 @@ void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, - "HW_VAR_ACM_CTRL acm set " - "failed: eACI is %d\n", acm); + "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n", + acm); break; } } else { @@ -535,27 +606,30 @@ void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) acm_ctrl &= (~ACMHW_BEQEN); break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, "switch case not process\n"); break; } } + RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE, - "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] " - "Write 0x%X\n", acm_ctrl); + "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n", + acm_ctrl); rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl); - break; } + } + break; case HW_VAR_RCR: rtl_write_dword(rtlpriv, REG_RCR, ((u32 *)(val))[0]); rtlpci->receive_config = ((u32 *)(val))[0]; break; - case HW_VAR_RETRY_LIMIT: { - u8 retry_limit = *val; + case HW_VAR_RETRY_LIMIT:{ + u8 retry_limit = ((u8 *)(val))[0]; rtl_write_word(rtlpriv, REG_RL, retry_limit << RETRY_LIMIT_SHORT_SHIFT | retry_limit << RETRY_LIMIT_LONG_SHIFT); - break; } + } + break; case HW_VAR_DUAL_TSF_RST: rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, (BIT(0) | BIT(1))); break; @@ -563,25 +637,27 @@ void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) rtlefuse->efuse_usedbytes = *((u16 *)val); break; case HW_VAR_EFUSE_USAGE: - rtlefuse->efuse_usedpercentage = *val; + rtlefuse->efuse_usedpercentage = *((u8 *)val); break; case HW_VAR_IO_CMD: rtl8723be_phy_set_io_cmd(hw, (*(enum io_type *)val)); break; - case HW_VAR_SET_RPWM: { + case HW_VAR_SET_RPWM:{ u8 rpwm_val; rpwm_val = rtl_read_byte(rtlpriv, REG_PCIE_HRPWM); udelay(1); if (rpwm_val & BIT(7)) { - rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, *val); + rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, (*(u8 *)val)); } else { - rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, *val | BIT(7)); + rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, + ((*(u8 *)val) | BIT(7))); } - break; } + } + break; case HW_VAR_H2C_FW_PWRMODE: - rtl8723be_set_fw_pwrmode_cmd(hw, *val); + rtl8723be_set_fw_pwrmode_cmd(hw, (*(u8 *)val)); break; case HW_VAR_FW_PSMODE_STATUS: ppsc->fw_current_inpsmode = *((bool *)val); @@ -589,85 +665,38 @@ void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) case HW_VAR_RESUME_CLK_ON: _rtl8723be_set_fw_ps_rf_on(hw); break; - case HW_VAR_FW_LPS_ACTION: { - bool enter_fwlps = *((bool *)val); + case HW_VAR_FW_LPS_ACTION:{ + bool b_enter_fwlps = *((bool *)val); - if (enter_fwlps) + if (b_enter_fwlps) _rtl8723be_fwlps_enter(hw); else _rtl8723be_fwlps_leave(hw); - - break; } - case HW_VAR_H2C_FW_JOINBSSRPT: { - u8 mstatus = *val; - u8 tmp_regcr, tmp_reg422, bcnvalid_reg; - u8 count = 0, dlbcn_count = 0; - bool recover = false; + } + break; + case HW_VAR_H2C_FW_JOINBSSRPT:{ + u8 mstatus = (*(u8 *)val); if (mstatus == RT_MEDIA_CONNECT) { rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AID, NULL); - - tmp_regcr = rtl_read_byte(rtlpriv, REG_CR + 1); - rtl_write_byte(rtlpriv, REG_CR + 1, - (tmp_regcr | BIT(0))); - - _rtl8723be_set_bcn_ctrl_reg(hw, 0, BIT(3)); - _rtl8723be_set_bcn_ctrl_reg(hw, BIT(4), 0); - - tmp_reg422 = rtl_read_byte(rtlpriv, - REG_FWHW_TXQ_CTRL + 2); - rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, - tmp_reg422 & (~BIT(6))); - if (tmp_reg422 & BIT(6)) - recover = true; - - do { - bcnvalid_reg = rtl_read_byte(rtlpriv, - REG_TDECTRL + 2); - rtl_write_byte(rtlpriv, REG_TDECTRL + 2, - (bcnvalid_reg | BIT(0))); - _rtl8723be_return_beacon_queue_skb(hw); - - rtl8723be_set_fw_rsvdpagepkt(hw, 0); - bcnvalid_reg = rtl_read_byte(rtlpriv, - REG_TDECTRL + 2); - count = 0; - while (!(bcnvalid_reg & BIT(0)) && count < 20) { - count++; - udelay(10); - bcnvalid_reg = rtl_read_byte(rtlpriv, - REG_TDECTRL + 2); - } - dlbcn_count++; - } while (!(bcnvalid_reg & BIT(0)) && dlbcn_count < 5); - - if (bcnvalid_reg & BIT(0)) - rtl_write_byte(rtlpriv, REG_TDECTRL+2, BIT(0)); - - _rtl8723be_set_bcn_ctrl_reg(hw, BIT(3), 0); - _rtl8723be_set_bcn_ctrl_reg(hw, 0, BIT(4)); - - if (recover) { - rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, - tmp_reg422); - } - rtl_write_byte(rtlpriv, REG_CR + 1, - (tmp_regcr & ~(BIT(0)))); + _rtl8723be_download_rsvd_page(hw); + } + rtl8723be_set_fw_media_status_rpt_cmd(hw, mstatus); } - rtl8723be_set_fw_joinbss_report_cmd(hw, *val); - break; } + break; case HW_VAR_H2C_FW_P2P_PS_OFFLOAD: - rtl8723be_set_p2p_ps_offload_cmd(hw, *val); + rtl8723be_set_p2p_ps_offload_cmd(hw, (*(u8 *)val)); break; - case HW_VAR_AID: { + case HW_VAR_AID:{ u16 u2btmp; u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT); u2btmp &= 0xC000; rtl_write_word(rtlpriv, REG_BCN_PSR_RPT, (u2btmp | mac->assoc_id)); - break; } - case HW_VAR_CORRECT_TSF: { - u8 btype_ibss = *val; + } + break; + case HW_VAR_CORRECT_TSF:{ + u8 btype_ibss = ((u8 *)(val))[0]; if (btype_ibss) _rtl8723be_stop_tx_beacon(hw); @@ -683,16 +712,17 @@ void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) if (btype_ibss) _rtl8723be_resume_tx_beacon(hw); - break; } - case HW_VAR_KEEP_ALIVE: { + } + break; + case HW_VAR_KEEP_ALIVE:{ u8 array[2]; array[0] = 0xff; - array[1] = *val; - rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_KEEP_ALIVE_CTRL, - 2, array); - break; } + array[1] = *((u8 *)val); + rtl8723be_fill_h2c_cmd(hw, H2C_8723B_KEEP_ALIVE_CTRL, 2, array); + } + break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, "switch case not process %x\n", variable); break; @@ -703,7 +733,7 @@ static bool _rtl8723be_llt_write(struct ieee80211_hw *hw, u32 address, u32 data) { struct rtl_priv *rtlpriv = rtl_priv(hw); bool status = true; - int count = 0; + long count = 0; u32 value = _LLT_INIT_ADDR(address) | _LLT_INIT_DATA(data) | _LLT_OP(_LLT_WRITE_ACCESS); @@ -716,8 +746,8 @@ static bool _rtl8723be_llt_write(struct ieee80211_hw *hw, u32 address, u32 data) if (count > POLLING_LLT_THRESHOLD) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Failed to polling write LLT done at " - "address %d!\n", address); + "Failed to polling write LLT done at address %d!\n", + address); status = false; break; } @@ -731,10 +761,10 @@ static bool _rtl8723be_llt_table_init(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); unsigned short i; u8 txpktbuf_bndy; - u8 maxpage; + u8 maxPage; bool status; - maxpage = 255; + maxPage = 255; txpktbuf_bndy = 245; rtl_write_dword(rtlpriv, REG_TRXFF_BNDY, @@ -753,17 +783,19 @@ static bool _rtl8723be_llt_table_init(struct ieee80211_hw *hw) if (!status) return status; } + status = _rtl8723be_llt_write(hw, (txpktbuf_bndy - 1), 0xFF); if (!status) return status; - for (i = txpktbuf_bndy; i < maxpage; i++) { + for (i = txpktbuf_bndy; i < maxPage; i++) { status = _rtl8723be_llt_write(hw, i, (i + 1)); if (!status) return status; } - status = _rtl8723be_llt_write(hw, maxpage, txpktbuf_bndy); + + status = _rtl8723be_llt_write(hw, maxPage, txpktbuf_bndy); if (!status) return status; @@ -795,11 +827,9 @@ static bool _rtl8723be_init_mac(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); - + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); unsigned char bytetmp; unsigned short wordtmp; - u16 retry = 0; - bool mac_func_enable; rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x00); @@ -807,12 +837,6 @@ static bool _rtl8723be_init_mac(struct ieee80211_hw *hw) bytetmp = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1) & (~BIT(7)); rtl_write_byte(rtlpriv, REG_APS_FSMCO + 1, bytetmp); - bytetmp = rtl_read_byte(rtlpriv, REG_CR); - if (bytetmp == 0xFF) - mac_func_enable = true; - else - mac_func_enable = false; - /* HW Power on sequence */ if (!rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, @@ -821,6 +845,10 @@ static bool _rtl8723be_init_mac(struct ieee80211_hw *hw) "init MAC Fail as power on failure\n"); return false; } + + bytetmp = rtl_read_byte(rtlpriv, REG_MULTI_FUNC_CTRL); + rtl_write_byte(rtlpriv, REG_MULTI_FUNC_CTRL, bytetmp | BIT(3)); + bytetmp = rtl_read_byte(rtlpriv, REG_APS_FSMCO) | BIT(4); rtl_write_byte(rtlpriv, REG_APS_FSMCO, bytetmp); @@ -837,25 +865,21 @@ static bool _rtl8723be_init_mac(struct ieee80211_hw *hw) bytetmp = rtl_read_byte(rtlpriv, REG_SYS_CFG + 3); if (bytetmp & BIT(0)) { bytetmp = rtl_read_byte(rtlpriv, 0x7c); - bytetmp |= BIT(6); - rtl_write_byte(rtlpriv, 0x7c, bytetmp); + rtl_write_byte(rtlpriv, 0x7c, bytetmp | BIT(6)); } + bytetmp = rtl_read_byte(rtlpriv, REG_SYS_CLKR); - bytetmp |= BIT(3); - rtl_write_byte(rtlpriv, REG_SYS_CLKR, bytetmp); + rtl_write_byte(rtlpriv, REG_SYS_CLKR, bytetmp | BIT(3)); bytetmp = rtl_read_byte(rtlpriv, REG_GPIO_MUXCFG + 1); - bytetmp &= ~BIT(4); - rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG + 1, bytetmp); - - bytetmp = rtl_read_byte(rtlpriv, REG_PCIE_CTRL_REG+3); - rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG+3, bytetmp | 0x77); + rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG + 1, bytetmp & (~BIT(4))); rtl_write_word(rtlpriv, REG_CR, 0x2ff); - if (!mac_func_enable) { - if (!_rtl8723be_llt_table_init(hw)) + if (!rtlhal->mac_func_enable) { + if (_rtl8723be_llt_table_init(hw) == false) return false; } + rtl_write_dword(rtlpriv, REG_HISR, 0xffffffff); rtl_write_dword(rtlpriv, REG_HISRE, 0xffffffff); @@ -873,8 +897,6 @@ static bool _rtl8723be_init_mac(struct ieee80211_hw *hw) rtl_write_word(rtlpriv, REG_RXFLTMAP2, 0xFFFF); rtl_write_dword(rtlpriv, REG_TCR, rtlpci->transmit_config); - rtl_write_byte(rtlpriv, 0x4d0, 0x0); - rtl_write_dword(rtlpriv, REG_BCNQ_DESA, ((u64) rtlpci->tx_ring[BEACON_QUEUE].dma) & DMA_BIT_MASK(32)); @@ -901,57 +923,213 @@ static bool _rtl8723be_init_mac(struct ieee80211_hw *hw) rtl_write_dword(rtlpriv, REG_INT_MIG, 0); - bytetmp = rtl_read_byte(rtlpriv, REG_APSD_CTRL); - rtl_write_byte(rtlpriv, REG_APSD_CTRL, bytetmp & ~BIT(6)); + rtl_write_dword(rtlpriv, REG_MCUTST_1, 0x0); rtl_write_byte(rtlpriv, REG_SECONDARY_CCA_CTRL, 0x3); - do { - retry++; - bytetmp = rtl_read_byte(rtlpriv, REG_APSD_CTRL); - } while ((retry < 200) && (bytetmp & BIT(7))); - - _rtl8723be_gen_refresh_led_state(hw); - - rtl_write_dword(rtlpriv, REG_MCUTST_1, 0x0); + /* <20130114, Kordan> The following setting is + * only for DPDT and Fixed board type. + * TODO: A better solution is configure it + * according EFUSE during the run-time. + */ + rtl_set_bbreg(hw, 0x64, BIT(20), 0x0);/* 0x66[4]=0 */ + rtl_set_bbreg(hw, 0x64, BIT(24), 0x0);/* 0x66[8]=0 */ + rtl_set_bbreg(hw, 0x40, BIT(4), 0x0)/* 0x40[4]=0 */; + rtl_set_bbreg(hw, 0x40, BIT(3), 0x1)/* 0x40[3]=1 */; + rtl_set_bbreg(hw, 0x4C, BIT(24) | BIT(23), 0x2)/* 0x4C[24:23]=10 */; + rtl_set_bbreg(hw, 0x944, BIT(1) | BIT(0), 0x3)/* 0x944[1:0]=11 */; + rtl_set_bbreg(hw, 0x930, MASKBYTE0, 0x77)/* 0x930[7:0]=77 */; + rtl_set_bbreg(hw, 0x38, BIT(11), 0x1)/* 0x38[11]=1 */; bytetmp = rtl_read_byte(rtlpriv, REG_RXDMA_CONTROL); - rtl_write_byte(rtlpriv, REG_RXDMA_CONTROL, bytetmp & ~BIT(2)); + rtl_write_byte(rtlpriv, REG_RXDMA_CONTROL, bytetmp & (~BIT(2))); + _rtl8723be_gen_refresh_led_state(hw); return true; } static void _rtl8723be_hw_configure(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 reg_bw_opmode; - u32 reg_ratr, reg_prsr; + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + u32 reg_rrsr; + + reg_rrsr = RATE_ALL_CCK | RATE_ALL_OFDM_AG; + /* Init value for RRSR. */ + rtl_write_dword(rtlpriv, REG_RRSR, reg_rrsr); + + /* ARFB table 9 for 11ac 5G 2SS */ + rtl_write_dword(rtlpriv, REG_ARFR0 + 4, 0xfffff000); + + /* ARFB table 10 for 11ac 5G 1SS */ + rtl_write_dword(rtlpriv, REG_ARFR1 + 4, 0x003ff000); + + /* CF-End setting. */ + rtl_write_word(rtlpriv, REG_FWHW_TXQ_CTRL, 0x1F00); + + /* 0x456 = 0x70, sugguested by Zhilin */ + rtl_write_byte(rtlpriv, REG_AMPDU_MAX_TIME, 0x70); + + /* Set retry limit */ + rtl_write_word(rtlpriv, REG_RL, 0x0707); + + /* Set Data / Response auto rate fallack retry count */ + rtl_write_dword(rtlpriv, REG_DARFRC, 0x01000000); + rtl_write_dword(rtlpriv, REG_DARFRC + 4, 0x07060504); + rtl_write_dword(rtlpriv, REG_RARFRC, 0x01000000); + rtl_write_dword(rtlpriv, REG_RARFRC + 4, 0x07060504); + + rtlpci->reg_bcn_ctrl_val = 0x1d; + rtl_write_byte(rtlpriv, REG_BCN_CTRL, rtlpci->reg_bcn_ctrl_val); + + /* TBTT prohibit hold time. Suggested by designer TimChen. */ + rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff); /* 8 ms */ + + rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0040); + + /*For Rx TP. Suggested by SD1 Richard. Added by tynli. 2010.04.12.*/ + rtl_write_dword(rtlpriv, REG_FAST_EDCA_CTRL, 0x03086666); - reg_bw_opmode = BW_OPMODE_20MHZ; - reg_ratr = RATE_ALL_CCK | RATE_ALL_OFDM_AG | - RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS; - reg_prsr = RATE_ALL_CCK | RATE_ALL_OFDM_AG; + rtl_write_byte(rtlpriv, REG_HT_SINGLE_AMPDU, 0x80); - rtl_write_dword(rtlpriv, REG_RRSR, reg_prsr); - rtl_write_byte(rtlpriv, REG_HWSEQ_CTRL, 0xFF); + rtl_write_byte(rtlpriv, REG_RX_PKT_LIMIT, 0x20); + + rtl_write_byte(rtlpriv, REG_MAX_AGGR_NUM, 0x1F); +} + +static u8 _rtl8723be_dbi_read(struct rtl_priv *rtlpriv, u16 addr) +{ + u16 read_addr = addr & 0xfffc; + u8 ret = 0, tmp = 0, count = 0; + + rtl_write_word(rtlpriv, REG_DBI_ADDR, read_addr); + rtl_write_byte(rtlpriv, REG_DBI_FLAG, 0x2); + tmp = rtl_read_byte(rtlpriv, REG_DBI_FLAG); + count = 0; + while (tmp && count < 20) { + udelay(10); + tmp = rtl_read_byte(rtlpriv, REG_DBI_FLAG); + count++; + } + if (0 == tmp) { + read_addr = REG_DBI_RDATA + addr % 4; + ret = rtl_read_byte(rtlpriv, read_addr); + } + + return ret; +} + +static void _rtl8723be_dbi_write(struct rtl_priv *rtlpriv, u16 addr, u8 data) +{ + u8 tmp = 0, count = 0; + u16 write_addr = 0, remainder = addr % 4; + + /* Write DBI 1Byte Data */ + write_addr = REG_DBI_WDATA + remainder; + rtl_write_byte(rtlpriv, write_addr, data); + + /* Write DBI 2Byte Address & Write Enable */ + write_addr = (addr & 0xfffc) | (BIT(0) << (remainder + 12)); + rtl_write_word(rtlpriv, REG_DBI_ADDR, write_addr); + + /* Write DBI Write Flag */ + rtl_write_byte(rtlpriv, REG_DBI_FLAG, 0x1); + + tmp = rtl_read_byte(rtlpriv, REG_DBI_FLAG); + count = 0; + while (tmp && count < 20) { + udelay(10); + tmp = rtl_read_byte(rtlpriv, REG_DBI_FLAG); + count++; + } +} + +static u16 _rtl8723be_mdio_read(struct rtl_priv *rtlpriv, u8 addr) +{ + u16 ret = 0; + u8 tmp = 0, count = 0; + + rtl_write_byte(rtlpriv, REG_MDIO_CTL, addr | BIT(6)); + tmp = rtl_read_byte(rtlpriv, REG_MDIO_CTL) & BIT(6); + count = 0; + while (tmp && count < 20) { + udelay(10); + tmp = rtl_read_byte(rtlpriv, REG_MDIO_CTL) & BIT(6); + count++; + } + + if (0 == tmp) + ret = rtl_read_word(rtlpriv, REG_MDIO_RDATA); + + return ret; +} + +static void _rtl8723be_mdio_write(struct rtl_priv *rtlpriv, u8 addr, u16 data) +{ + u8 tmp = 0, count = 0; + + rtl_write_word(rtlpriv, REG_MDIO_WDATA, data); + rtl_write_byte(rtlpriv, REG_MDIO_CTL, addr | BIT(5)); + tmp = rtl_read_byte(rtlpriv, REG_MDIO_CTL) & BIT(5); + count = 0; + while (tmp && count < 20) { + udelay(10); + tmp = rtl_read_byte(rtlpriv, REG_MDIO_CTL) & BIT(5); + count++; + } } static void _rtl8723be_enable_aspm_back_door(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + u8 tmp8 = 0; + u16 tmp16 = 0; - rtl_write_byte(rtlpriv, 0x34b, 0x93); - rtl_write_word(rtlpriv, 0x350, 0x870c); - rtl_write_byte(rtlpriv, 0x352, 0x1); + /* Overwrite following ePHY parameter for + * some platform compatibility issue, + * especially when CLKReq is enabled, 2012.11.09. + */ + tmp16 = _rtl8723be_mdio_read(rtlpriv, 0x01); + if (tmp16 != 0x0663) + _rtl8723be_mdio_write(rtlpriv, 0x01, 0x0663); - if (ppsc->support_backdoor) - rtl_write_byte(rtlpriv, 0x349, 0x1b); - else - rtl_write_byte(rtlpriv, 0x349, 0x03); + tmp16 = _rtl8723be_mdio_read(rtlpriv, 0x04); + if (tmp16 != 0x7544) + _rtl8723be_mdio_write(rtlpriv, 0x04, 0x7544); + + tmp16 = _rtl8723be_mdio_read(rtlpriv, 0x06); + if (tmp16 != 0xB880) + _rtl8723be_mdio_write(rtlpriv, 0x06, 0xB880); + + tmp16 = _rtl8723be_mdio_read(rtlpriv, 0x07); + if (tmp16 != 0x4000) + _rtl8723be_mdio_write(rtlpriv, 0x07, 0x4000); + + tmp16 = _rtl8723be_mdio_read(rtlpriv, 0x08); + if (tmp16 != 0x9003) + _rtl8723be_mdio_write(rtlpriv, 0x08, 0x9003); + + tmp16 = _rtl8723be_mdio_read(rtlpriv, 0x09); + if (tmp16 != 0x0D03) + _rtl8723be_mdio_write(rtlpriv, 0x09, 0x0D03); + + tmp16 = _rtl8723be_mdio_read(rtlpriv, 0x0A); + if (tmp16 != 0x4037) + _rtl8723be_mdio_write(rtlpriv, 0x0A, 0x4037); - rtl_write_word(rtlpriv, 0x350, 0x2718); - rtl_write_byte(rtlpriv, 0x352, 0x1); + tmp16 = _rtl8723be_mdio_read(rtlpriv, 0x0B); + if (tmp16 != 0x0070) + _rtl8723be_mdio_write(rtlpriv, 0x0B, 0x0070); + + /* Configuration Space offset 0x70f BIT7 is used to control L0S */ + tmp8 = _rtl8723be_dbi_read(rtlpriv, 0x70f); + _rtl8723be_dbi_write(rtlpriv, 0x70f, tmp8 | BIT(7)); + + /* Configuration Space offset 0x719 Bit3 is for L1 + * BIT4 is for clock request + */ + tmp8 = _rtl8723be_dbi_read(rtlpriv, 0x719); + _rtl8723be_dbi_write(rtlpriv, 0x719, tmp8 | BIT(3) | BIT(4)); } void rtl8723be_enable_hw_security_config(struct ieee80211_hw *hw) @@ -961,30 +1139,208 @@ void rtl8723be_enable_hw_security_config(struct ieee80211_hw *hw) RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n", - rtlpriv->sec.pairwise_enc_algorithm, - rtlpriv->sec.group_enc_algorithm); + rtlpriv->sec.pairwise_enc_algorithm, + rtlpriv->sec.group_enc_algorithm); if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) { RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "not open hw encryption\n"); return; } + sec_reg_value = SCR_TXENCENABLE | SCR_RXDECENABLE; if (rtlpriv->sec.use_defaultkey) { sec_reg_value |= SCR_TXUSEDK; sec_reg_value |= SCR_RXUSEDK; } + sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK); rtl_write_byte(rtlpriv, REG_CR + 1, 0x02); - RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "The SECR-value %x\n", - sec_reg_value); + RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, + "The SECR-value %x\n", sec_reg_value); rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value); } +static void _rtl8723be_poweroff_adapter(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 u1b_tmp; + + rtlhal->mac_func_enable = false; + /* Combo (PCIe + USB) Card and PCIe-MF Card */ + /* 1. Run LPS WL RFOFF flow */ + rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, + PWR_INTF_PCI_MSK, RTL8723_NIC_LPS_ENTER_FLOW); + + /* 2. 0x1F[7:0] = 0 */ + /* turn off RF */ + /* rtl_write_byte(rtlpriv, REG_RF_CTRL, 0x00); */ + if ((rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) && + rtlhal->fw_ready) { + rtl8723be_firmware_selfreset(hw); + } + + /* Reset MCU. Suggested by Filen. */ + u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, (u1b_tmp & (~BIT(2)))); + + /* g. MCUFWDL 0x80[1:0]=0 */ + /* reset MCU ready status */ + rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00); + + /* HW card disable configuration. */ + rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, + PWR_INTF_PCI_MSK, RTL8723_NIC_DISABLE_FLOW); + + /* Reset MCU IO Wrapper */ + u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1); + rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, (u1b_tmp & (~BIT(0)))); + u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1); + rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, u1b_tmp | BIT(0)); + + /* 7. RSV_CTRL 0x1C[7:0] = 0x0E */ + /* lock ISO/CLK/Power control register */ + rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0e); +} + +static bool _rtl8723be_check_pcie_dma_hang(struct rtl_priv *rtlpriv) +{ + u8 tmp; + + /* write reg 0x350 Bit[26]=1. Enable debug port. */ + tmp = rtl_read_byte(rtlpriv, REG_DBI_CTRL + 3); + if (!(tmp & BIT(2))) { + rtl_write_byte(rtlpriv, REG_DBI_CTRL + 3, (tmp | BIT(2))); + mdelay(100); /* Suggested by DD Justin_tsai. */ + } + + /* read reg 0x350 Bit[25] if 1 : RX hang + * read reg 0x350 Bit[24] if 1 : TX hang + */ + tmp = rtl_read_byte(rtlpriv, REG_DBI_CTRL + 3); + if ((tmp & BIT(0)) || (tmp & BIT(1))) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "CheckPcieDMAHang8723BE(): true!!\n"); + return true; + } + return false; +} + +static void _rtl8723be_reset_pcie_interface_dma(struct rtl_priv *rtlpriv, + bool mac_power_on) +{ + u8 tmp; + bool release_mac_rx_pause; + u8 backup_pcie_dma_pause; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "ResetPcieInterfaceDMA8723BE()\n"); + + /* Revise Note: Follow the document "PCIe RX DMA Hang Reset Flow_v03" + * released by SD1 Alan. + * 2013.05.07, by tynli. + */ + + /* 1. disable register write lock + * write 0x1C bit[1:0] = 2'h0 + * write 0xCC bit[2] = 1'b1 + */ + tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL); + tmp &= ~(BIT(1) | BIT(0)); + rtl_write_byte(rtlpriv, REG_RSV_CTRL, tmp); + tmp = rtl_read_byte(rtlpriv, REG_PMC_DBG_CTRL2); + tmp |= BIT(2); + rtl_write_byte(rtlpriv, REG_PMC_DBG_CTRL2, tmp); + + /* 2. Check and pause TRX DMA + * write 0x284 bit[18] = 1'b1 + * write 0x301 = 0xFF + */ + tmp = rtl_read_byte(rtlpriv, REG_RXDMA_CONTROL); + if (tmp & BIT(2)) { + /* Already pause before the function for another purpose. */ + release_mac_rx_pause = false; + } else { + rtl_write_byte(rtlpriv, REG_RXDMA_CONTROL, (tmp | BIT(2))); + release_mac_rx_pause = true; + } + + backup_pcie_dma_pause = rtl_read_byte(rtlpriv, REG_PCIE_CTRL_REG + 1); + if (backup_pcie_dma_pause != 0xFF) + rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 1, 0xFF); + + if (mac_power_on) { + /* 3. reset TRX function + * write 0x100 = 0x00 + */ + rtl_write_byte(rtlpriv, REG_CR, 0); + } + + /* 4. Reset PCIe DMA + * write 0x003 bit[0] = 0 + */ + tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); + tmp &= ~(BIT(0)); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, tmp); + + /* 5. Enable PCIe DMA + * write 0x003 bit[0] = 1 + */ + tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); + tmp |= BIT(0); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, tmp); + + if (mac_power_on) { + /* 6. enable TRX function + * write 0x100 = 0xFF + */ + rtl_write_byte(rtlpriv, REG_CR, 0xFF); + + /* We should init LLT & RQPN and + * prepare Tx/Rx descrptor address later + * because MAC function is reset. + */ + } + + /* 7. Restore PCIe autoload down bit + * write 0xF8 bit[17] = 1'b1 + */ + tmp = rtl_read_byte(rtlpriv, REG_MAC_PHY_CTRL_NORMAL + 2); + tmp |= BIT(1); + rtl_write_byte(rtlpriv, REG_MAC_PHY_CTRL_NORMAL + 2, tmp); + + /* In MAC power on state, BB and RF maybe in ON state, + * if we release TRx DMA here + * it will cause packets to be started to Tx/Rx, + * so we release Tx/Rx DMA later. + */ + if (!mac_power_on) { + /* 8. release TRX DMA + * write 0x284 bit[18] = 1'b0 + * write 0x301 = 0x00 + */ + if (release_mac_rx_pause) { + tmp = rtl_read_byte(rtlpriv, REG_RXDMA_CONTROL); + rtl_write_byte(rtlpriv, REG_RXDMA_CONTROL, + (tmp & (~BIT(2)))); + } + rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 1, + backup_pcie_dma_pause); + } + + /* 9. lock system register + * write 0xCC bit[2] = 1'b0 + */ + tmp = rtl_read_byte(rtlpriv, REG_PMC_DBG_CTRL2); + tmp &= ~(BIT(2)); + rtl_write_byte(rtlpriv, REG_PMC_DBG_CTRL2, tmp); +} + int rtl8723be_hw_init(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -1002,33 +1358,51 @@ int rtl8723be_hw_init(struct ieee80211_hw *hw) local_save_flags(flags); local_irq_enable(); + rtlhal->fw_ready = false; rtlpriv->rtlhal.being_init_adapter = true; rtlpriv->intf_ops->disable_aspm(hw); + + tmp_u1b = rtl_read_byte(rtlpriv, REG_CR); + if (tmp_u1b != 0 && tmp_u1b != 0xea) { + rtlhal->mac_func_enable = true; + } else { + rtlhal->mac_func_enable = false; + rtlhal->fw_ps_state = FW_PS_STATE_ALL_ON; + } + + if (_rtl8723be_check_pcie_dma_hang(rtlpriv)) { + _rtl8723be_reset_pcie_interface_dma(rtlpriv, + rtlhal->mac_func_enable); + rtlhal->mac_func_enable = false; + } + if (rtlhal->mac_func_enable) { + _rtl8723be_poweroff_adapter(hw); + rtlhal->mac_func_enable = false; + } rtstatus = _rtl8723be_init_mac(hw); if (!rtstatus) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n"); err = 1; goto exit; } + tmp_u1b = rtl_read_byte(rtlpriv, REG_SYS_CFG); - tmp_u1b &= 0x7F; - rtl_write_byte(rtlpriv, REG_SYS_CFG, tmp_u1b); + rtl_write_byte(rtlpriv, REG_SYS_CFG, tmp_u1b & 0x7F); - err = rtl8723_download_fw(hw, true); + err = rtl8723_download_fw(hw, true, FW_8723B_POLLING_TIMEOUT_COUNT); if (err) { RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "Failed to download FW. Init HW without FW now..\n"); err = 1; - rtlhal->fw_ready = false; goto exit; - } else { - rtlhal->fw_ready = true; } + rtlhal->fw_ready = true; + rtlhal->last_hmeboxnum = 0; rtl8723be_phy_mac_config(hw); /* because last function modify RCR, so we update * rcr var here, or TP will unstable for receive_config - * is wrong, RX RCR_ACRC32 will cause TP unstabel & Rx + * is wrong, RX RCR_ACRC32 will cause TP unstable & Rx * RCR_APP_ICV will cause mac80211 unassoc for cisco 1252 */ rtlpci->receive_config = rtl_read_dword(rtlpriv, REG_RCR); @@ -1036,7 +1410,6 @@ int rtl8723be_hw_init(struct ieee80211_hw *hw) rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config); rtl8723be_phy_bb_config(hw); - rtlphy->rf_mode = RF_OP_BY_SW_3WIRE; rtl8723be_phy_rf_config(hw); rtlphy->rfreg_chnlval[0] = rtl_get_rfreg(hw, (enum radio_path)0, @@ -1046,10 +1419,8 @@ int rtl8723be_hw_init(struct ieee80211_hw *hw) rtlphy->rfreg_chnlval[0] &= 0xFFF03FF; rtlphy->rfreg_chnlval[0] |= (BIT(10) | BIT(11)); - rtl_set_bbreg(hw, RFPGA0_RFMOD, BCCKEN, 0x1); - rtl_set_bbreg(hw, RFPGA0_RFMOD, BOFDMEN, 0x1); - rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 1); _rtl8723be_hw_configure(hw); + rtlhal->mac_func_enable = true; rtl_cam_reset_all_entry(hw); rtl8723be_enable_hw_security_config(hw); @@ -1061,36 +1432,32 @@ int rtl8723be_hw_init(struct ieee80211_hw *hw) rtl8723be_bt_hw_init(hw); - rtl_set_bbreg(hw, 0x64, BIT(20), 0); - rtl_set_bbreg(hw, 0x64, BIT(24), 0); - - rtl_set_bbreg(hw, 0x40, BIT(4), 0); - rtl_set_bbreg(hw, 0x40, BIT(3), 1); - - rtl_set_bbreg(hw, 0x944, BIT(0)|BIT(1), 0x3); - rtl_set_bbreg(hw, 0x930, 0xff, 0x77); - - rtl_set_bbreg(hw, 0x38, BIT(11), 0x1); - - rtl_set_bbreg(hw, 0xb2c, 0xffffffff, 0x80000000); - if (ppsc->rfpwr_state == ERFON) { + rtl8723be_phy_set_rfpath_switch(hw, 1); + /* when use 1ant NIC, iqk will disturb BT music + * root cause is not clear now, is something + * related with 'mdelay' and Reg[0x948] + */ + if (rtlpriv->btcoexist.btc_info.ant_num == ANT_X2 || + !rtlpriv->cfg->ops->get_btc_status()) { + rtl8723be_phy_iq_calibrate(hw, false); + rtlphy->iqk_initialized = true; + } rtl8723be_dm_check_txpower_tracking(hw); rtl8723be_phy_lc_calibrate(hw); } - tmp_u1b = efuse_read_1byte(hw, 0x1FA); - if (!(tmp_u1b & BIT(0))) { - rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0F, 0x05); - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "PA BIAS path A\n"); - } - if (!(tmp_u1b & BIT(4))) { - tmp_u1b = rtl_read_byte(rtlpriv, 0x16); - tmp_u1b &= 0x0F; - rtl_write_byte(rtlpriv, 0x16, tmp_u1b | 0x80); - udelay(10); - rtl_write_byte(rtlpriv, 0x16, tmp_u1b | 0x90); - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n"); + rtl_write_byte(rtlpriv, REG_NAV_UPPER, ((30000 + 127) / 128)); + + /* Release Rx DMA. */ + tmp_u1b = rtl_read_byte(rtlpriv, REG_RXDMA_CONTROL); + if (tmp_u1b & BIT(2)) { + /* Release Rx DMA if needed */ + tmp_u1b &= (~BIT(2)); + rtl_write_byte(rtlpriv, REG_RXDMA_CONTROL, tmp_u1b); } + /* Release Tx/Rx PCIE DMA. */ + rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 1, 0); + rtl8723be_dm_init(hw); exit: local_irq_restore(flags); @@ -1103,43 +1470,29 @@ static enum version_8723e _rtl8723be_read_chip_version(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); enum version_8723e version = VERSION_UNKNOWN; - u8 count = 0; - u8 value8; u32 value32; - rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0); - - value8 = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 2); - rtl_write_byte(rtlpriv, REG_APS_FSMCO + 2, value8 | BIT(0)); - - value8 = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1); - rtl_write_byte(rtlpriv, REG_APS_FSMCO + 1, value8 | BIT(0)); - - value8 = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1); - while (((value8 & BIT(0))) && (count++ < 100)) { - udelay(10); - value8 = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1); - } - count = 0; - value8 = rtl_read_byte(rtlpriv, REG_ROM_VERSION); - while ((value8 == 0) && (count++ < 50)) { - value8 = rtl_read_byte(rtlpriv, REG_ROM_VERSION); - mdelay(1); - } value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG1); if ((value32 & (CHIP_8723B)) != CHIP_8723B) RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "unkown chip version\n"); else - version = (enum version_8723e) VERSION_TEST_CHIP_1T1R_8723B; + version = (enum version_8723e)CHIP_8723B; - rtlphy->rf_type = RF_1T1R; + rtlphy->rf_type = RF_1T1R; + + /* treat rtl8723be chip as MP version in default */ + version = (enum version_8723e)(version | NORMAL_CHIP); + + value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG); + /* cut version */ + version |= (enum version_8723e)(value32 & CHIP_VER_RTL_MASK); + /* Manufacture */ + if (((value32 & EXT_VENDOR_ID) >> 18) == 0x01) + version = (enum version_8723e)(version | CHIP_VENDOR_SMIC); - value8 = rtl_read_byte(rtlpriv, REG_ROM_VERSION); - if (value8 >= 0x02) - version |= BIT(3); RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Chip RF Type: %s\n", (rtlphy->rf_type == RF_2T2R) ? - "RF_2T2R" : "RF_1T1R"); + "RF_2T2R" : "RF_1T1R"); return version; } @@ -1150,43 +1503,29 @@ static int _rtl8723be_set_media_status(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv = rtl_priv(hw); u8 bt_msr = rtl_read_byte(rtlpriv, MSR) & 0xfc; enum led_ctl_mode ledaction = LED_CTL_NO_LINK; + u8 mode = MSR_NOLINK; - rtl_write_dword(rtlpriv, REG_BCN_CTRL, 0); - RT_TRACE(rtlpriv, COMP_BEACON, DBG_LOUD, - "clear 0x550 when set HW_VAR_MEDIA_STATUS\n"); - - if (type == NL80211_IFTYPE_UNSPECIFIED || - type == NL80211_IFTYPE_STATION) { - _rtl8723be_stop_tx_beacon(hw); - _rtl8723be_enable_bcn_sub_func(hw); - } else if (type == NL80211_IFTYPE_ADHOC || type == NL80211_IFTYPE_AP) { - _rtl8723be_resume_tx_beacon(hw); - _rtl8723be_disable_bcn_sub_func(hw); - } else { - RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, - "Set HW_VAR_MEDIA_STATUS: " - "No such media status(%x).\n", type); - } switch (type) { case NL80211_IFTYPE_UNSPECIFIED: - bt_msr |= MSR_NOLINK; - ledaction = LED_CTL_LINK; + mode = MSR_NOLINK; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Set Network type to NO LINK!\n"); break; case NL80211_IFTYPE_ADHOC: - bt_msr |= MSR_ADHOC; + case NL80211_IFTYPE_MESH_POINT: + mode = MSR_ADHOC; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Set Network type to Ad Hoc!\n"); break; case NL80211_IFTYPE_STATION: - bt_msr |= MSR_INFRA; + mode = MSR_INFRA; ledaction = LED_CTL_LINK; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Set Network type to STA!\n"); break; case NL80211_IFTYPE_AP: - bt_msr |= MSR_AP; + mode = MSR_AP; + ledaction = LED_CTL_LINK; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Set Network type to AP!\n"); break; @@ -1195,9 +1534,33 @@ static int _rtl8723be_set_media_status(struct ieee80211_hw *hw, "Network type %d not support!\n", type); return 1; } - rtl_write_byte(rtlpriv, (MSR), bt_msr); + + /* MSR_INFRA == Link in infrastructure network; + * MSR_ADHOC == Link in ad hoc network; + * Therefore, check link state is necessary. + * + * MSR_AP == AP mode; link state is not cared here. + */ + if (mode != MSR_AP && rtlpriv->mac80211.link_state < MAC80211_LINKED) { + mode = MSR_NOLINK; + ledaction = LED_CTL_NO_LINK; + } + + if (mode == MSR_NOLINK || mode == MSR_INFRA) { + _rtl8723be_stop_tx_beacon(hw); + _rtl8723be_enable_bcn_sub_func(hw); + } else if (mode == MSR_ADHOC || mode == MSR_AP) { + _rtl8723be_resume_tx_beacon(hw); + _rtl8723be_disable_bcn_sub_func(hw); + } else { + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + "Set HW_VAR_MEDIA_STATUS: No such media status(%x).\n", + mode); + } + + rtl_write_byte(rtlpriv, (MSR), bt_msr | mode); rtlpriv->cfg->ops->led_control(hw, ledaction); - if ((bt_msr & MSR_MASK) == MSR_AP) + if (mode == MSR_AP) rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00); else rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66); @@ -1224,6 +1587,7 @@ void rtl8723be_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid) rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)(®_rcr)); } + } int rtl8723be_set_network_type(struct ieee80211_hw *hw, @@ -1240,6 +1604,7 @@ int rtl8723be_set_network_type(struct ieee80211_hw *hw, } else { rtl8723be_set_check_bssid(hw, false); } + return 0; } @@ -1249,6 +1614,7 @@ int rtl8723be_set_network_type(struct ieee80211_hw *hw, void rtl8723be_set_qos(struct ieee80211_hw *hw, int aci) { struct rtl_priv *rtlpriv = rtl_priv(hw); + rtl8723_dm_init_edca_turbo(hw); switch (aci) { case AC1_BK: @@ -1268,20 +1634,32 @@ void rtl8723be_set_qos(struct ieee80211_hw *hw, int aci) } } +static void rtl8723be_clear_interrupt(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 tmp; + + tmp = rtl_read_dword(rtlpriv, REG_HISR); + rtl_write_dword(rtlpriv, REG_HISR, tmp); + + tmp = rtl_read_dword(rtlpriv, REG_HISRE); + rtl_write_dword(rtlpriv, REG_HISRE, tmp); + + tmp = rtl_read_dword(rtlpriv, REG_HSISR); + rtl_write_dword(rtlpriv, REG_HSISR, tmp); +} + void rtl8723be_enable_interrupt(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + rtl8723be_clear_interrupt(hw);/*clear it here first*/ + rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF); rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF); rtlpci->irq_enabled = true; - /* there are some C2H CMDs have been sent - * before system interrupt is enabled, e.g., C2H, CPWM. - * So we need to clear all C2H events that FW has notified, - * otherwise FW won't schedule any commands anymore. - */ - rtl_write_byte(rtlpriv, REG_C2HEVT_CLEAR, 0); + /*enable system interrupt*/ rtl_write_dword(rtlpriv, REG_HSIMR, rtlpci->sys_irq_mask & 0xFFFFFFFF); } @@ -1294,48 +1672,7 @@ void rtl8723be_disable_interrupt(struct ieee80211_hw *hw) rtl_write_dword(rtlpriv, REG_HIMR, IMR_DISABLED); rtl_write_dword(rtlpriv, REG_HIMRE, IMR_DISABLED); rtlpci->irq_enabled = false; - synchronize_irq(rtlpci->pdev->irq); -} - -static void _rtl8723be_poweroff_adapter(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - u8 u1b_tmp; - - /* Combo (PCIe + USB) Card and PCIe-MF Card */ - /* 1. Run LPS WL RFOFF flow */ - rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, - PWR_INTF_PCI_MSK, RTL8723_NIC_LPS_ENTER_FLOW); - - /* 2. 0x1F[7:0] = 0 */ - /* turn off RF */ - rtl_write_byte(rtlpriv, REG_RF_CTRL, 0x00); - if ((rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) && - rtlhal->fw_ready) - rtl8723be_firmware_selfreset(hw); - - /* Reset MCU. Suggested by Filen. */ - u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); - rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, (u1b_tmp & (~BIT(2)))); - - /* g. MCUFWDL 0x80[1:0]= 0 */ - /* reset MCU ready status */ - rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00); - - /* HW card disable configuration. */ - rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, - PWR_INTF_PCI_MSK, RTL8723_NIC_DISABLE_FLOW); - - /* Reset MCU IO Wrapper */ - u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1); - rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, (u1b_tmp & (~BIT(0)))); - u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1); - rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, u1b_tmp | BIT(0)); - - /* 7. RSV_CTRL 0x1C[7:0] = 0x0E */ - /* lock ISO/CLK/Power control register */ - rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0e); + /*synchronize_irq(rtlpci->pdev->irq);*/ } void rtl8723be_card_disable(struct ieee80211_hw *hw) @@ -1442,10 +1779,9 @@ static void _rtl8723be_read_power_value_fromprom(struct ieee80211_hw *hw, u32 path, addr = EEPROM_TX_PWR_INX, group, cnt = 0; RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "hal_ReadPowerValueFromPROM8723BE(): " - "PROMContent[0x%x]= 0x%x\n", + "hal_ReadPowerValueFromPROM8723BE(): PROMContent[0x%x]=0x%x\n", (addr + 1), hwinfo[addr + 1]); - if (0xFF == hwinfo[addr + 1]) /*YJ, add, 120316*/ + if (0xFF == hwinfo[addr + 1]) /*YJ,add,120316*/ autoload_fail = true; if (autoload_fail) { @@ -1453,7 +1789,7 @@ static void _rtl8723be_read_power_value_fromprom(struct ieee80211_hw *hw, "auto load fail : Use Default value!\n"); for (path = 0; path < MAX_RF_PATH; path++) { /* 2.4G default value */ - for (group = 0; group < MAX_CHNL_GROUP_24G; group++) { + for (group = 0 ; group < MAX_CHNL_GROUP_24G; group++) { pw2g->index_cck_base[path][group] = 0x2D; pw2g->index_bw40_base[path][group] = 0x2D; } @@ -1471,12 +1807,14 @@ static void _rtl8723be_read_power_value_fromprom(struct ieee80211_hw *hw, } return; } + for (path = 0; path < MAX_RF_PATH; path++) { /*2.4G default value*/ for (group = 0; group < MAX_CHNL_GROUP_24G; group++) { pw2g->index_cck_base[path][group] = hwinfo[addr++]; if (pw2g->index_cck_base[path][group] == 0xFF) pw2g->index_cck_base[path][group] = 0x2D; + } for (group = 0; group < MAX_CHNL_GROUP_24G - 1; group++) { pw2g->index_bw40_base[path][group] = hwinfo[addr++]; @@ -1493,8 +1831,10 @@ static void _rtl8723be_read_power_value_fromprom(struct ieee80211_hw *hw, (hwinfo[addr] & 0xf0) >> 4; /*bit sign number to 8 bit sign number*/ if (pw2g->bw20_diff[path][cnt] & BIT(3)) - pw2g->bw20_diff[path][cnt] |= 0xF0; + pw2g->bw20_diff[path][cnt] |= + 0xF0; } + if (hwinfo[addr] == 0xFF) { pw2g->ofdm_diff[path][cnt] = 0x04; } else { @@ -1517,6 +1857,7 @@ static void _rtl8723be_read_power_value_fromprom(struct ieee80211_hw *hw, pw2g->bw40_diff[path][cnt] |= 0xF0; } + if (hwinfo[addr] == 0xFF) { pw2g->bw20_diff[path][cnt] = 0xFE; } else { @@ -1537,9 +1878,10 @@ static void _rtl8723be_read_power_value_fromprom(struct ieee80211_hw *hw, pw2g->ofdm_diff[path][cnt] |= 0xF0; } - if (hwinfo[addr] == 0xFF) { + + if (hwinfo[addr] == 0xFF) pw2g->cck_diff[path][cnt] = 0xFE; - } else { + else { pw2g->cck_diff[path][cnt] = (hwinfo[addr] & 0x0f); if (pw2g->cck_diff[path][cnt] & BIT(3)) @@ -1549,12 +1891,14 @@ static void _rtl8723be_read_power_value_fromprom(struct ieee80211_hw *hw, addr++; } } + /*5G default value*/ for (group = 0; group < MAX_CHNL_GROUP_5G; group++) { pw5g->index_bw40_base[path][group] = hwinfo[addr++]; if (pw5g->index_bw40_base[path][group] == 0xFF) pw5g->index_bw40_base[path][group] = 0xFE; } + for (cnt = 0; cnt < MAX_TX_COUNT; cnt++) { if (cnt == 0) { pw5g->bw40_diff[path][cnt] = 0; @@ -1568,9 +1912,10 @@ static void _rtl8723be_read_power_value_fromprom(struct ieee80211_hw *hw, pw5g->bw20_diff[path][cnt] |= 0xF0; } - if (hwinfo[addr] == 0xFF) { + + if (hwinfo[addr] == 0xFF) pw5g->ofdm_diff[path][cnt] = 0x04; - } else { + else { pw5g->ofdm_diff[path][0] = (hwinfo[addr] & 0x0f); if (pw5g->ofdm_diff[path][cnt] & BIT(3)) @@ -1587,6 +1932,7 @@ static void _rtl8723be_read_power_value_fromprom(struct ieee80211_hw *hw, if (pw5g->bw40_diff[path][cnt] & BIT(3)) pw5g->bw40_diff[path][cnt] |= 0xF0; } + if (hwinfo[addr] == 0xFF) { pw5g->bw20_diff[path][cnt] = 0xFE; } else { @@ -1598,6 +1944,7 @@ static void _rtl8723be_read_power_value_fromprom(struct ieee80211_hw *hw, addr++; } } + if (hwinfo[addr] == 0xFF) { pw5g->ofdm_diff[path][1] = 0xFE; pw5g->ofdm_diff[path][2] = 0xFE; @@ -1653,14 +2000,16 @@ static void _rtl8723be_read_txpower_info_from_hwpg(struct ieee80211_hw *hw, rtlefuse->txpwr_legacyhtdiff[rf_path][i] = pw2g.ofdm_diff[rf_path][i]; } + for (i = 0; i < 14; i++) { - RTPRINT(rtlpriv, FINIT, INIT_EEPROM, - "RF(%d)-Ch(%d) [CCK / HT40_1S ] = " - "[0x%x / 0x%x ]\n", rf_path, i, + RTPRINT(rtlpriv, FINIT, INIT_TXPOWER, + "RF(%d)-Ch(%d) [CCK / HT40_1S ] = [0x%x / 0x%x ]\n", + rf_path, i, rtlefuse->txpwrlevel_cck[rf_path][i], rtlefuse->txpwrlevel_ht40_1s[rf_path][i]); } } + if (!autoload_fail) rtlefuse->eeprom_thermalmeter = hwinfo[EEPROM_THERMAL_METER_88E]; @@ -1671,8 +2020,9 @@ static void _rtl8723be_read_txpower_info_from_hwpg(struct ieee80211_hw *hw, rtlefuse->apk_thermalmeterignore = true; rtlefuse->eeprom_thermalmeter = EEPROM_DEFAULT_THERMALMETER; } + rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter; - RTPRINT(rtlpriv, FINIT, INIT_EEPROM, + RTPRINT(rtlpriv, FINIT, INIT_TXPOWER, "thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter); if (!autoload_fail) { @@ -1683,7 +2033,7 @@ static void _rtl8723be_read_txpower_info_from_hwpg(struct ieee80211_hw *hw, } else { rtlefuse->eeprom_regulatory = 0; } - RTPRINT(rtlpriv, FINIT, INIT_EEPROM, + RTPRINT(rtlpriv, FINIT, INIT_TXPOWER, "eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory); } @@ -1743,6 +2093,7 @@ static void _rtl8723be_read_adapter_info(struct ieee80211_hw *hw, RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n"); rtlefuse->autoload_failflag = false; } + if (rtlefuse->autoload_failflag) return; @@ -1958,100 +2309,10 @@ void rtl8723be_read_eeprom_info(struct ieee80211_hw *hw) _rtl8723be_hal_customized_behavior(hw); } -static void rtl8723be_update_hal_rate_table(struct ieee80211_hw *hw, - struct ieee80211_sta *sta) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - u32 ratr_value; - u8 ratr_index = 0; - u8 nmode = mac->ht_enable; - u8 mimo_ps = IEEE80211_SMPS_OFF; - u16 shortgi_rate; - u32 tmp_ratr_value; - u8 curtxbw_40mhz = mac->bw_40; - u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? - 1 : 0; - u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? - 1 : 0; - enum wireless_mode wirelessmode = mac->mode; - - if (rtlhal->current_bandtype == BAND_ON_5G) - ratr_value = sta->supp_rates[1] << 4; - else - ratr_value = sta->supp_rates[0]; - if (mac->opmode == NL80211_IFTYPE_ADHOC) - ratr_value = 0xfff; - ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 | - sta->ht_cap.mcs.rx_mask[0] << 12); - switch (wirelessmode) { - case WIRELESS_MODE_B: - if (ratr_value & 0x0000000c) - ratr_value &= 0x0000000d; - else - ratr_value &= 0x0000000f; - break; - case WIRELESS_MODE_G: - ratr_value &= 0x00000FF5; - break; - case WIRELESS_MODE_N_24G: - case WIRELESS_MODE_N_5G: - nmode = 1; - if (mimo_ps == IEEE80211_SMPS_STATIC) { - ratr_value &= 0x0007F005; - } else { - u32 ratr_mask; - - if (get_rf_type(rtlphy) == RF_1T2R || - get_rf_type(rtlphy) == RF_1T1R) - ratr_mask = 0x000ff005; - else - ratr_mask = 0x0f0ff005; - ratr_value &= ratr_mask; - } - break; - default: - if (rtlphy->rf_type == RF_1T2R) - ratr_value &= 0x000ff0ff; - else - ratr_value &= 0x0f0ff0ff; - break; - } - if ((rtlpriv->btcoexist.bt_coexistence) && - (rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4) && - (rtlpriv->btcoexist.bt_cur_state) && - (rtlpriv->btcoexist.bt_ant_isolation) && - ((rtlpriv->btcoexist.bt_service == BT_SCO) || - (rtlpriv->btcoexist.bt_service == BT_BUSY))) - ratr_value &= 0x0fffcfc0; - else - ratr_value &= 0x0FFFFFFF; - - if (nmode && ((curtxbw_40mhz && curshortgi_40mhz) || - (!curtxbw_40mhz && curshortgi_20mhz))) { - ratr_value |= 0x10000000; - tmp_ratr_value = (ratr_value >> 12); - - for (shortgi_rate = 15; shortgi_rate > 0; shortgi_rate--) { - if ((1 << shortgi_rate) & tmp_ratr_value) - break; - } - shortgi_rate = (shortgi_rate << 12) | (shortgi_rate << 8) | - (shortgi_rate << 4) | (shortgi_rate); - } - rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value); - - RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, - "%x\n", rtl_read_dword(rtlpriv, REG_ARFR0)); -} - static u8 _rtl8723be_mrate_idx_to_arfr_id(struct ieee80211_hw *hw, u8 rate_index) { u8 ret = 0; - switch (rate_index) { case RATR_INX_WIRELESS_NGB: ret = 1; @@ -2090,16 +2351,15 @@ static void rtl8723be_update_hal_rate_mask(struct ieee80211_hw *hw, u32 ratr_bitmap; u8 ratr_index; u8 curtxbw_40mhz = (sta->ht_cap.cap & - IEEE80211_HT_CAP_SUP_WIDTH_20_40) ? 1 : 0; + IEEE80211_HT_CAP_SUP_WIDTH_20_40) ? 1 : 0; u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? - 1 : 0; + 1 : 0; u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? - 1 : 0; + 1 : 0; enum wireless_mode wirelessmode = 0; bool shortgi = false; u8 rate_mask[7]; u8 macid = 0; - u8 mimo_ps = IEEE80211_SMPS_OFF; sta_entry = (struct rtl_sta_info *)sta->drv_priv; wirelessmode = sta_entry->wireless_mode; @@ -2135,55 +2395,40 @@ static void rtl8723be_update_hal_rate_mask(struct ieee80211_hw *hw, else ratr_bitmap &= 0x00000ff5; break; - case WIRELESS_MODE_A: - ratr_index = RATR_INX_WIRELESS_A; - ratr_bitmap &= 0x00000ff0; - break; case WIRELESS_MODE_N_24G: case WIRELESS_MODE_N_5G: ratr_index = RATR_INX_WIRELESS_NGB; - - if (mimo_ps == IEEE80211_SMPS_STATIC || - mimo_ps == IEEE80211_SMPS_DYNAMIC) { - if (rssi_level == 1) - ratr_bitmap &= 0x00070000; - else if (rssi_level == 2) - ratr_bitmap &= 0x0007f000; - else - ratr_bitmap &= 0x0007f005; + if (rtlphy->rf_type == RF_1T1R) { + if (curtxbw_40mhz) { + if (rssi_level == 1) + ratr_bitmap &= 0x000f0000; + else if (rssi_level == 2) + ratr_bitmap &= 0x000ff000; + else + ratr_bitmap &= 0x000ff015; + } else { + if (rssi_level == 1) + ratr_bitmap &= 0x000f0000; + else if (rssi_level == 2) + ratr_bitmap &= 0x000ff000; + else + ratr_bitmap &= 0x000ff005; + } } else { - if (rtlphy->rf_type == RF_1T1R) { - if (curtxbw_40mhz) { - if (rssi_level == 1) - ratr_bitmap &= 0x000f0000; - else if (rssi_level == 2) - ratr_bitmap &= 0x000ff000; - else - ratr_bitmap &= 0x000ff015; - } else { - if (rssi_level == 1) - ratr_bitmap &= 0x000f0000; - else if (rssi_level == 2) - ratr_bitmap &= 0x000ff000; - else - ratr_bitmap &= 0x000ff005; - } + if (curtxbw_40mhz) { + if (rssi_level == 1) + ratr_bitmap &= 0x0f8f0000; + else if (rssi_level == 2) + ratr_bitmap &= 0x0f8ff000; + else + ratr_bitmap &= 0x0f8ff015; } else { - if (curtxbw_40mhz) { - if (rssi_level == 1) - ratr_bitmap &= 0x0f8f0000; - else if (rssi_level == 2) - ratr_bitmap &= 0x0f8ff000; - else - ratr_bitmap &= 0x0f8ff015; - } else { - if (rssi_level == 1) - ratr_bitmap &= 0x0f8f0000; - else if (rssi_level == 2) - ratr_bitmap &= 0x0f8ff000; - else - ratr_bitmap &= 0x0f8ff005; - } + if (rssi_level == 1) + ratr_bitmap &= 0x0f8f0000; + else if (rssi_level == 2) + ratr_bitmap &= 0x0f8ff000; + else + ratr_bitmap &= 0x0f8ff005; } } if ((curtxbw_40mhz && curshortgi_40mhz) || @@ -2203,18 +2448,17 @@ static void rtl8723be_update_hal_rate_mask(struct ieee80211_hw *hw, ratr_bitmap &= 0x0f0ff0ff; break; } + sta_entry->ratr_index = ratr_index; RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "ratr_bitmap :%x\n", ratr_bitmap); - *(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) | (ratr_index << 28); + *(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) | + (ratr_index << 28); rate_mask[0] = macid; rate_mask[1] = _rtl8723be_mrate_idx_to_arfr_id(hw, ratr_index) | - (shortgi ? 0x80 : 0x00); + (shortgi ? 0x80 : 0x00); rate_mask[2] = curtxbw_40mhz; - /* if (prox_priv->proxim_modeinfo->power_output > 0) - * rate_mask[2] |= BIT(6); - */ rate_mask[3] = (u8)(ratr_bitmap & 0x000000ff); rate_mask[4] = (u8)((ratr_bitmap & 0x0000ff00) >> 8); @@ -2228,7 +2472,7 @@ static void rtl8723be_update_hal_rate_mask(struct ieee80211_hw *hw, rate_mask[2], rate_mask[3], rate_mask[4], rate_mask[5], rate_mask[6]); - rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_RA_MASK, 7, rate_mask); + rtl8723be_fill_h2c_cmd(hw, H2C_8723B_RA_MASK, 7, rate_mask); _rtl8723be_set_bcn_ctrl_reg(hw, BIT(3), 0); } @@ -2239,8 +2483,6 @@ void rtl8723be_update_hal_rate_tbl(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv = rtl_priv(hw); if (rtlpriv->dm.useramask) rtl8723be_update_hal_rate_mask(hw, sta, rssi_level); - else - rtl8723be_update_hal_rate_table(hw, sta); } void rtl8723be_update_channel_access_setting(struct ieee80211_hw *hw) @@ -2264,7 +2506,7 @@ bool rtl8723be_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid) struct rtl_phy *rtlphy = &(rtlpriv->phy); enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate; u8 u1tmp; - bool actuallyset = false; + bool b_actuallyset = false; if (rtlpriv->rtlhal.being_init_adapter) return false; @@ -2280,6 +2522,7 @@ bool rtl8723be_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid) ppsc->rfchange_inprogress = true; spin_unlock(&rtlpriv->locks.rf_ps_lock); } + cur_rfstate = ppsc->rfpwr_state; rtl_write_byte(rtlpriv, REG_GPIO_IO_SEL_2, @@ -2292,24 +2535,23 @@ bool rtl8723be_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid) else e_rfpowerstate_toset = (u1tmp & BIT(1)) ? ERFON : ERFOFF; - if (ppsc->hwradiooff && - (e_rfpowerstate_toset == ERFON)) { + if ((ppsc->hwradiooff) && (e_rfpowerstate_toset == ERFON)) { RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, "GPIOChangeRF - HW Radio ON, RF ON\n"); e_rfpowerstate_toset = ERFON; ppsc->hwradiooff = false; - actuallyset = true; - } else if (!ppsc->hwradiooff && - (e_rfpowerstate_toset == ERFOFF)) { + b_actuallyset = true; + } else if (!ppsc->hwradiooff && (e_rfpowerstate_toset == ERFOFF)) { RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, "GPIOChangeRF - HW Radio OFF, RF OFF\n"); e_rfpowerstate_toset = ERFOFF; ppsc->hwradiooff = true; - actuallyset = true; + b_actuallyset = true; } - if (actuallyset) { + + if (b_actuallyset) { spin_lock(&rtlpriv->locks.rf_ps_lock); ppsc->rfchange_inprogress = false; spin_unlock(&rtlpriv->locks.rf_ps_lock); @@ -2321,8 +2563,10 @@ bool rtl8723be_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid) ppsc->rfchange_inprogress = false; spin_unlock(&rtlpriv->locks.rf_ps_lock); } + *valid = 1; return !ppsc->hwradiooff; + } void rtl8723be_set_key(struct ieee80211_hw *hw, u32 key_index, @@ -2363,6 +2607,7 @@ void rtl8723be_set_key(struct ieee80211_hw *hw, u32 key_index, rtlpriv->sec.key_len[idx] = 0; } } + } else { switch (enc_algo) { case WEP40_ENCRYPTION: @@ -2378,7 +2623,7 @@ void rtl8723be_set_key(struct ieee80211_hw *hw, u32 key_index, enc_algo = CAM_AES; break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, "switch case not process\n"); enc_algo = CAM_TKIP; break; @@ -2398,22 +2643,22 @@ void rtl8723be_set_key(struct ieee80211_hw *hw, u32 key_index, if (entry_id >= TOTAL_CAM_ENTRY) { RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG, - "Can not find free" - " hw security cam " - "entry\n"); + "Can not find free hw security cam entry\n"); return; } } else { entry_id = CAM_PAIRWISE_KEY_POSITION; } + key_index = PAIRWISE_KEYIDX; is_pairwise = true; } } + if (rtlpriv->sec.key_len[key_index] == 0) { RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "delete one entry, entry_id is %d\n", - entry_id); + entry_id); if (mac->opmode == NL80211_IFTYPE_AP) rtl_cam_del_entry(hw, p_macaddr); rtl_cam_delete_one_entry(hw, p_macaddr, entry_id); @@ -2422,12 +2667,12 @@ void rtl8723be_set_key(struct ieee80211_hw *hw, u32 key_index, "add one entry\n"); if (is_pairwise) { RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, - "set Pairwise key\n"); + "set Pairwiase key\n"); rtl_cam_add_one_entry(hw, macaddr, key_index, - entry_id, enc_algo, - CAM_CONFIG_NO_USEDK, - rtlpriv->sec.key_buf[key_index]); + entry_id, enc_algo, + CAM_CONFIG_NO_USEDK, + rtlpriv->sec.key_buf[key_index]); } else { RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "set group key\n"); @@ -2442,10 +2687,11 @@ void rtl8723be_set_key(struct ieee80211_hw *hw, u32 key_index, rtlpriv->sec.key_buf [entry_id]); } + rtl_cam_add_one_entry(hw, macaddr, key_index, - entry_id, enc_algo, - CAM_CONFIG_NO_USEDK, - rtlpriv->sec.key_buf[entry_id]); + entry_id, enc_algo, + CAM_CONFIG_NO_USEDK, + rtlpriv->sec.key_buf[entry_id]); } } } @@ -2464,7 +2710,7 @@ void rtl8723be_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw, rtlpriv->btcoexist.btc_info.btcoexist = 1; else rtlpriv->btcoexist.btc_info.btcoexist = 0; - value = hwinfo[RF_OPTION4]; + value = hwinfo[EEPROM_RF_BT_SETTING_8723B]; rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8723B; rtlpriv->btcoexist.btc_info.ant_num = (value & 0x1); } else { @@ -2472,6 +2718,7 @@ void rtl8723be_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw, rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8723B; rtlpriv->btcoexist.btc_info.ant_num = ANT_X2; } + } void rtl8723be_bt_reg_init(struct ieee80211_hw *hw) @@ -2492,6 +2739,7 @@ void rtl8723be_bt_hw_init(struct ieee80211_hw *hw) if (rtlpriv->cfg->ops->get_btc_status()) rtlpriv->btcoexist.btc_ops->btc_init_hw_config(rtlpriv); + } void rtl8723be_suspend(struct ieee80211_hw *hw) diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/hw.h b/drivers/net/wireless/rtlwifi/rtl8723be/hw.h index 64c7551af6b797969e98086e600d2ece0240c9f4..eae863d08de832b825ef639eaf601b4728a8ba3b 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/hw.h +++ b/drivers/net/wireless/rtlwifi/rtl8723be/hw.h @@ -59,4 +59,5 @@ void rtl8723be_bt_reg_init(struct ieee80211_hw *hw); void rtl8723be_bt_hw_init(struct ieee80211_hw *hw); void rtl8723be_suspend(struct ieee80211_hw *hw); void rtl8723be_resume(struct ieee80211_hw *hw); + #endif diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/led.c b/drivers/net/wireless/rtlwifi/rtl8723be/led.c index cb931a38dc48a3ec381fd042fed99edaf5d388ac..4196efb723a224a5c27cb9523558e3809916c9e8 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/led.c +++ b/drivers/net/wireless/rtlwifi/rtl8723be/led.c @@ -42,7 +42,7 @@ void rtl8723be_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled) struct rtl_priv *rtlpriv = rtl_priv(hw); RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, - "LedAddr:%X ledpin =%d\n", REG_LEDCFG2, pled->ledpin); + "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin); switch (pled->ledpin) { case LED_PIN_GPIO0: @@ -71,7 +71,7 @@ void rtl8723be_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) u8 ledcfg; RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, - "LedAddr:%X ledpin =%d\n", REG_LEDCFG2, pled->ledpin); + "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin); ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2); @@ -100,7 +100,7 @@ void rtl8723be_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case not processed\n"); + "switch case not process\n"); break; } pled->ledon = false; diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/rtlwifi/rtl8723be/phy.c index 1575ef9ece9f86292b57c9348174a20bdcc77fc9..20dcc25c506c3b6dec6c8d20a5b3ce530ed2a4e9 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/phy.c +++ b/drivers/net/wireless/rtlwifi/rtl8723be/phy.c @@ -26,224 +26,28 @@ #include "../wifi.h" #include "../pci.h" #include "../ps.h" -#include "../core.h" #include "reg.h" #include "def.h" #include "phy.h" #include "../rtl8723com/phy_common.h" #include "rf.h" #include "dm.h" +#include "../rtl8723com/dm_common.h" #include "table.h" #include "trx.h" static bool _rtl8723be_phy_bb8723b_config_parafile(struct ieee80211_hw *hw); +static bool _rtl8723be_phy_config_mac_with_headerfile(struct ieee80211_hw *hw); +static bool _rtl8723be_phy_config_bb_with_headerfile(struct ieee80211_hw *hw, + u8 configtype); static bool _rtl8723be_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw, u8 configtype); -static bool rtl8723be_phy_sw_chn_step_by_step(struct ieee80211_hw *hw, - u8 channel, u8 *stage, - u8 *step, u32 *delay); -static bool _rtl8723be_check_condition(struct ieee80211_hw *hw, - const u32 condition) -{ - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); - u32 _board = rtlefuse->board_type; /*need efuse define*/ - u32 _interface = rtlhal->interface; - u32 _platform = 0x08;/*SupportPlatform */ - u32 cond = condition; - - if (condition == 0xCDCDCDCD) - return true; - - cond = condition & 0xFF; - if ((_board & cond) == 0 && cond != 0x1F) - return false; - - cond = condition & 0xFF00; - cond = cond >> 8; - if ((_interface & cond) == 0 && cond != 0x07) - return false; - - cond = condition & 0xFF0000; - cond = cond >> 16; - if ((_platform & cond) == 0 && cond != 0x0F) - return false; - return true; -} - -static bool _rtl8723be_phy_config_mac_with_headerfile(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 i; - u32 arraylength; - u32 *ptrarray; - - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read rtl8723beMACPHY_Array\n"); - arraylength = RTL8723BEMAC_1T_ARRAYLEN; - ptrarray = RTL8723BEMAC_1T_ARRAY; - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "Img:RTL8723bEMAC_1T_ARRAY LEN %d\n", arraylength); - for (i = 0; i < arraylength; i = i + 2) - rtl_write_byte(rtlpriv, ptrarray[i], (u8) ptrarray[i + 1]); - return true; -} - -static bool _rtl8723be_phy_config_bb_with_headerfile(struct ieee80211_hw *hw, - u8 configtype) -{ - #define READ_NEXT_PAIR(v1, v2, i) \ - do { \ - i += 2; \ - v1 = array_table[i];\ - v2 = array_table[i+1]; \ - } while (0) +static bool _rtl8723be_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, + u8 channel, u8 *stage, + u8 *step, u32 *delay); - int i; - u32 *array_table; - u16 arraylen; - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 v1 = 0, v2 = 0; - - if (configtype == BASEBAND_CONFIG_PHY_REG) { - arraylen = RTL8723BEPHY_REG_1TARRAYLEN; - array_table = RTL8723BEPHY_REG_1TARRAY; - - for (i = 0; i < arraylen; i = i + 2) { - v1 = array_table[i]; - v2 = array_table[i+1]; - if (v1 < 0xcdcdcdcd) { - rtl_bb_delay(hw, v1, v2); - } else {/*This line is the start line of branch.*/ - if (!_rtl8723be_check_condition(hw, array_table[i])) { - /*Discard the following (offset, data) pairs*/ - READ_NEXT_PAIR(v1, v2, i); - while (v2 != 0xDEAD && - v2 != 0xCDEF && - v2 != 0xCDCD && - i < arraylen - 2) { - READ_NEXT_PAIR(v1, v2, i); - } - i -= 2; /* prevent from for-loop += 2*/ - /* Configure matched pairs and - * skip to end of if-else. - */ - } else { - READ_NEXT_PAIR(v1, v2, i); - while (v2 != 0xDEAD && - v2 != 0xCDEF && - v2 != 0xCDCD && - i < arraylen - 2) { - rtl_bb_delay(hw, - v1, v2); - READ_NEXT_PAIR(v1, v2, i); - } - - while (v2 != 0xDEAD && i < arraylen - 2) - READ_NEXT_PAIR(v1, v2, i); - } - } - } - } else if (configtype == BASEBAND_CONFIG_AGC_TAB) { - arraylen = RTL8723BEAGCTAB_1TARRAYLEN; - array_table = RTL8723BEAGCTAB_1TARRAY; - - for (i = 0; i < arraylen; i = i + 2) { - v1 = array_table[i]; - v2 = array_table[i+1]; - if (v1 < 0xCDCDCDCD) { - rtl_set_bbreg(hw, array_table[i], - MASKDWORD, - array_table[i + 1]); - udelay(1); - continue; - } else {/*This line is the start line of branch.*/ - if (!_rtl8723be_check_condition(hw, array_table[i])) { - /* Discard the following - * (offset, data) pairs - */ - READ_NEXT_PAIR(v1, v2, i); - while (v2 != 0xDEAD && - v2 != 0xCDEF && - v2 != 0xCDCD && - i < arraylen - 2) { - READ_NEXT_PAIR(v1, v2, i); - } - i -= 2; /* prevent from for-loop += 2*/ - /*Configure matched pairs and - *skip to end of if-else. - */ - } else { - READ_NEXT_PAIR(v1, v2, i); - while (v2 != 0xDEAD && - v2 != 0xCDEF && - v2 != 0xCDCD && - i < arraylen - 2) { - rtl_set_bbreg(hw, array_table[i], - MASKDWORD, - array_table[i + 1]); - udelay(1); - READ_NEXT_PAIR(v1, v2, i); - } - - while (v2 != 0xDEAD && i < arraylen - 2) - READ_NEXT_PAIR(v1, v2, i); - } - } - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "The agctab_array_table[0] is " - "%x Rtl818EEPHY_REGArray[1] is %x\n", - array_table[i], array_table[i + 1]); - } - } - return true; -} - -static u8 _rtl8723be_get_rate_section_index(u32 regaddr) -{ - u8 index = 0; - - switch (regaddr) { - case RTXAGC_A_RATE18_06: - case RTXAGC_B_RATE18_06: - index = 0; - break; - case RTXAGC_A_RATE54_24: - case RTXAGC_B_RATE54_24: - index = 1; - break; - case RTXAGC_A_CCK1_MCS32: - case RTXAGC_B_CCK1_55_MCS32: - index = 2; - break; - case RTXAGC_B_CCK11_A_CCK2_11: - index = 3; - break; - case RTXAGC_A_MCS03_MCS00: - case RTXAGC_B_MCS03_MCS00: - index = 4; - break; - case RTXAGC_A_MCS07_MCS04: - case RTXAGC_B_MCS07_MCS04: - index = 5; - break; - case RTXAGC_A_MCS11_MCS08: - case RTXAGC_B_MCS11_MCS08: - index = 6; - break; - case RTXAGC_A_MCS15_MCS12: - case RTXAGC_B_MCS15_MCS12: - index = 7; - break; - default: - regaddr &= 0xFFF; - if (regaddr >= 0xC20 && regaddr <= 0xC4C) - index = (u8) ((regaddr - 0xC20) / 4); - else if (regaddr >= 0xE20 && regaddr <= 0xE4C) - index = (u8) ((regaddr - 0xE20) / 4); - break; - }; - return index; -} +static void rtl8723be_phy_set_rf_on(struct ieee80211_hw *hw); +static void rtl8723be_phy_set_io(struct ieee80211_hw *hw); u32 rtl8723be_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath, u32 regaddr, u32 bitmask) @@ -265,9 +69,8 @@ u32 rtl8723be_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath, spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags); RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, - "regaddr(%#x), rfpath(%#x), " - "bitmask(%#x), original_value(%#x)\n", - regaddr, rfpath, bitmask, original_value); + "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n", + regaddr, rfpath, bitmask, original_value); return readback_value; } @@ -300,6 +103,7 @@ void rtl8723be_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path path, RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n", regaddr, bitmask, data, path); + } bool rtl8723be_phy_mac_config(struct ieee80211_hw *hw) @@ -316,7 +120,7 @@ bool rtl8723be_phy_bb_config(struct ieee80211_hw *hw) bool rtstatus = true; struct rtl_priv *rtlpriv = rtl_priv(hw); u16 regval; - u8 reg_hwparafile = 1; + u8 b_reg_hwparafile = 1; u32 tmp; u8 crystalcap = rtlpriv->efuse.crystalcap; rtl8723_phy_init_bb_rf_reg_def(hw); @@ -333,7 +137,7 @@ bool rtl8723be_phy_bb_config(struct ieee80211_hw *hw) rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL + 1, 0x80); - if (reg_hwparafile == 1) + if (b_reg_hwparafile == 1) rtstatus = _rtl8723be_phy_bb8723b_config_parafile(hw); crystalcap = crystalcap & 0x3F; @@ -348,18 +152,49 @@ bool rtl8723be_phy_rf_config(struct ieee80211_hw *hw) return rtl8723be_phy_rf6052_config(hw); } +static bool _rtl8723be_check_condition(struct ieee80211_hw *hw, + const u32 condition) +{ + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + u32 _board = rtlefuse->board_type; /*need efuse define*/ + u32 _interface = rtlhal->interface; + u32 _platform = 0x08;/*SupportPlatform */ + u32 cond = condition; + + if (condition == 0xCDCDCDCD) + return true; + + cond = condition & 0xFF; + if ((_board & cond) == 0 && cond != 0x1F) + return false; + + cond = condition & 0xFF00; + cond = cond >> 8; + if ((_interface & cond) == 0 && cond != 0x07) + return false; + + cond = condition & 0xFF0000; + cond = cond >> 16; + if ((_platform & cond) == 0 && cond != 0x0F) + return false; + return true; +} + static void _rtl8723be_config_rf_reg(struct ieee80211_hw *hw, u32 addr, u32 data, enum radio_path rfpath, u32 regaddr) { if (addr == 0xfe || addr == 0xffe) { + /* In order not to disturb BT music + * when wifi init.(1ant NIC only) + */ mdelay(50); } else { rtl_set_rfreg(hw, rfpath, regaddr, RFREG_OFFSET_MASK, data); udelay(1); } } - static void _rtl8723be_config_rf_radio_a(struct ieee80211_hw *hw, u32 addr, u32 data) { @@ -368,12 +203,13 @@ static void _rtl8723be_config_rf_radio_a(struct ieee80211_hw *hw, _rtl8723be_config_rf_reg(hw, addr, data, RF90_PATH_A, addr | maskforphyset); + } static void _rtl8723be_phy_init_tx_power_by_rate(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; u8 band, path, txnum, section; @@ -383,16 +219,38 @@ static void _rtl8723be_phy_init_tx_power_by_rate(struct ieee80211_hw *hw) for (section = 0; section < TX_PWR_BY_RATE_NUM_SECTION; ++section) - rtlphy->tx_power_by_rate_offset[band] - [path][txnum][section] = 0; + rtlphy->tx_power_by_rate_offset + [band][path][txnum][section] = 0; +} + +static void _rtl8723be_config_bb_reg(struct ieee80211_hw *hw, + u32 addr, u32 data) +{ + if (addr == 0xfe) { + mdelay(50); + } else if (addr == 0xfd) { + mdelay(5); + } else if (addr == 0xfc) { + mdelay(1); + } else if (addr == 0xfb) { + udelay(50); + } else if (addr == 0xfa) { + udelay(5); + } else if (addr == 0xf9) { + udelay(1); + } else { + rtl_set_bbreg(hw, addr, MASKDWORD, data); + udelay(1); + } } -static void phy_set_txpwr_by_rate_base(struct ieee80211_hw *hw, u8 band, - u8 path, u8 rate_section, - u8 txnum, u8 value) +static void _rtl8723be_phy_set_txpower_by_rate_base(struct ieee80211_hw *hw, + u8 band, + u8 path, u8 rate_section, + u8 txnum, u8 value) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; if (path > RF90_PATH_D) { RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, @@ -417,23 +275,24 @@ static void phy_set_txpwr_by_rate_base(struct ieee80211_hw *hw, u8 band, break; default: RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "Invalid RateSection %d in Band 2.4G, Rf Path" - " %d, %dTx in PHY_SetTxPowerByRateBase()\n", - rate_section, path, txnum); + "Invalid RateSection %d in Band 2.4G, Rf Path %d, %dTx in PHY_SetTxPowerByRateBase()\n", + rate_section, path, txnum); break; }; } else { RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Invalid Band %d in PHY_SetTxPowerByRateBase()\n", - band); + band); } + } -static u8 phy_get_txpwr_by_rate_base(struct ieee80211_hw *hw, u8 band, u8 path, - u8 txnum, u8 rate_section) +static u8 _rtl8723be_phy_get_txpower_by_rate_base(struct ieee80211_hw *hw, + u8 band, u8 path, u8 txnum, + u8 rate_section) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; u8 value = 0; if (path > RF90_PATH_D) { RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, @@ -458,15 +317,14 @@ static u8 phy_get_txpwr_by_rate_base(struct ieee80211_hw *hw, u8 band, u8 path, break; default: RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "Invalid RateSection %d in Band 2.4G, Rf Path" - " %d, %dTx in PHY_GetTxPowerByRateBase()\n", - rate_section, path, txnum); + "Invalid RateSection %d in Band 2.4G, Rf Path %d, %dTx in PHY_GetTxPowerByRateBase()\n", + rate_section, path, txnum); break; }; } else { RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Invalid Band %d in PHY_GetTxPowerByRateBase()\n", - band); + band); } return value; @@ -475,45 +333,51 @@ static u8 phy_get_txpwr_by_rate_base(struct ieee80211_hw *hw, u8 band, u8 path, static void _rtl8723be_phy_store_txpower_by_rate_base(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - u16 raw_value = 0; + struct rtl_phy *rtlphy = &rtlpriv->phy; + u16 rawvalue = 0; u8 base = 0, path = 0; for (path = RF90_PATH_A; path <= RF90_PATH_B; ++path) { if (path == RF90_PATH_A) { - raw_value = (u16) (rtlphy->tx_power_by_rate_offset + rawvalue = (u16)(rtlphy->tx_power_by_rate_offset [BAND_ON_2_4G][path][RF_1TX][3] >> 24) & 0xFF; - base = (raw_value >> 4) * 10 + (raw_value & 0xF); - phy_set_txpwr_by_rate_base(hw, BAND_ON_2_4G, path, CCK, - RF_1TX, base); + base = (rawvalue >> 4) * 10 + (rawvalue & 0xF); + _rtl8723be_phy_set_txpower_by_rate_base(hw, + BAND_ON_2_4G, path, CCK, RF_1TX, base); } else if (path == RF90_PATH_B) { - raw_value = (u16) (rtlphy->tx_power_by_rate_offset + rawvalue = (u16)(rtlphy->tx_power_by_rate_offset [BAND_ON_2_4G][path][RF_1TX][3] >> 0) & 0xFF; - base = (raw_value >> 4) * 10 + (raw_value & 0xF); - phy_set_txpwr_by_rate_base(hw, BAND_ON_2_4G, path, - CCK, RF_1TX, base); + base = (rawvalue >> 4) * 10 + (rawvalue & 0xF); + _rtl8723be_phy_set_txpower_by_rate_base(hw, + BAND_ON_2_4G, + path, CCK, + RF_1TX, base); } - raw_value = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G] - [path][RF_1TX][1] >> 24) & 0xFF; - base = (raw_value >> 4) * 10 + (raw_value & 0xF); - phy_set_txpwr_by_rate_base(hw, BAND_ON_2_4G, path, OFDM, RF_1TX, - base); - - raw_value = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G] - [path][RF_1TX][5] >> 24) & 0xFF; - base = (raw_value >> 4) * 10 + (raw_value & 0xF); - phy_set_txpwr_by_rate_base(hw, BAND_ON_2_4G, path, HT_MCS0_MCS7, - RF_1TX, base); - - raw_value = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G] - [path][RF_2TX][7] >> 24) & 0xFF; - base = (raw_value >> 4) * 10 + (raw_value & 0xF); - phy_set_txpwr_by_rate_base(hw, BAND_ON_2_4G, path, - HT_MCS8_MCS15, RF_2TX, base); + rawvalue = (u16)(rtlphy->tx_power_by_rate_offset + [BAND_ON_2_4G][path][RF_1TX][1] >> 24) & 0xFF; + base = (rawvalue >> 4) * 10 + (rawvalue & 0xF); + _rtl8723be_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, + path, OFDM, RF_1TX, + base); + + rawvalue = (u16)(rtlphy->tx_power_by_rate_offset + [BAND_ON_2_4G][path][RF_1TX][5] >> 24) & 0xFF; + base = (rawvalue >> 4) * 10 + (rawvalue & 0xF); + _rtl8723be_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, + path, HT_MCS0_MCS7, + RF_1TX, base); + + rawvalue = (u16)(rtlphy->tx_power_by_rate_offset + [BAND_ON_2_4G][path][RF_2TX][7] >> 24) & 0xFF; + base = (rawvalue >> 4) * 10 + (rawvalue & 0xF); + _rtl8723be_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, + path, HT_MCS8_MCS15, + RF_2TX, base); } } -static void phy_conv_dbm_to_rel(u32 *data, u8 start, u8 end, u8 base_val) +static void _phy_convert_txpower_dbm_to_relative_value(u32 *data, u8 start, + u8 end, u8 base_val) { char i = 0; u8 temp_value = 0; @@ -522,15 +386,15 @@ static void phy_conv_dbm_to_rel(u32 *data, u8 start, u8 end, u8 base_val) for (i = 3; i >= 0; --i) { if (i >= start && i <= end) { /* Get the exact value */ - temp_value = (u8) (*data >> (i * 8)) & 0xF; - temp_value += ((u8) ((*data >> (i*8 + 4)) & 0xF)) * 10; + temp_value = (u8)(*data >> (i * 8)) & 0xF; + temp_value += ((u8)((*data >> (i*8 + 4)) & 0xF)) * 10; /* Change the value to a relative value */ temp_value = (temp_value > base_val) ? temp_value - base_val : base_val - temp_value; } else { - temp_value = (u8) (*data >> (i * 8)) & 0xFF; + temp_value = (u8)(*data >> (i * 8)) & 0xFF; } temp_data <<= 8; temp_data |= temp_value; @@ -538,56 +402,65 @@ static void phy_conv_dbm_to_rel(u32 *data, u8 start, u8 end, u8 base_val) *data = temp_data; } -static void conv_dbm_to_rel(struct ieee80211_hw *hw) +static void _rtl8723be_phy_convert_txpower_dbm_to_relative_value( + struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; u8 base = 0, rfpath = RF90_PATH_A; - base = phy_get_txpwr_by_rate_base(hw, BAND_ON_2_4G, rfpath, - RF_1TX, CCK); - phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G] - [rfpath][RF_1TX][2]), 1, 1, base); - phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G] - [rfpath][RF_1TX][3]), 1, 3, base); - - base = phy_get_txpwr_by_rate_base(hw, BAND_ON_2_4G, rfpath, - RF_1TX, OFDM); - phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G] - [rfpath][RF_1TX][0]), 0, 3, base); - phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G] - [rfpath][RF_1TX][1]), 0, 3, base); - - base = phy_get_txpwr_by_rate_base(hw, BAND_ON_2_4G, rfpath, - RF_1TX, HT_MCS0_MCS7); - phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G] - [rfpath][RF_1TX][4]), 0, 3, base); - phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G] - [rfpath][RF_1TX][5]), 0, 3, base); - - base = phy_get_txpwr_by_rate_base(hw, BAND_ON_2_4G, rfpath, - RF_2TX, HT_MCS8_MCS15); - phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G] - [rfpath][RF_2TX][6]), 0, 3, base); - - phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G] - [rfpath][RF_2TX][7]), 0, 3, base); + base = _rtl8723be_phy_get_txpower_by_rate_base(hw, + BAND_ON_2_4G, rfpath, RF_1TX, CCK); + _phy_convert_txpower_dbm_to_relative_value( + &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfpath][RF_1TX][2], + 1, 1, base); + _phy_convert_txpower_dbm_to_relative_value( + &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfpath][RF_1TX][3], + 1, 3, base); + + base = _rtl8723be_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfpath, + RF_1TX, OFDM); + _phy_convert_txpower_dbm_to_relative_value( + &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfpath][RF_1TX][0], + 0, 3, base); + _phy_convert_txpower_dbm_to_relative_value( + &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfpath][RF_1TX][1], + 0, 3, base); + + base = _rtl8723be_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, + rfpath, RF_1TX, HT_MCS0_MCS7); + _phy_convert_txpower_dbm_to_relative_value( + &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfpath][RF_1TX][4], + 0, 3, base); + _phy_convert_txpower_dbm_to_relative_value( + &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfpath][RF_1TX][5], + 0, 3, base); + + base = _rtl8723be_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, + rfpath, RF_2TX, + HT_MCS8_MCS15); + _phy_convert_txpower_dbm_to_relative_value( + &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfpath][RF_2TX][6], + 0, 3, base); + + _phy_convert_txpower_dbm_to_relative_value( + &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfpath][RF_2TX][7], + 0, 3, base); RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE, - "<=== conv_dbm_to_rel()\n"); + "<===_rtl8723be_phy_convert_txpower_dbm_to_relative_value()\n"); } -static void _rtl8723be_phy_txpower_by_rate_configuration( - struct ieee80211_hw *hw) +static void phy_txpower_by_rate_config(struct ieee80211_hw *hw) { _rtl8723be_phy_store_txpower_by_rate_base(hw); - conv_dbm_to_rel(hw); + _rtl8723be_phy_convert_txpower_dbm_to_relative_value(hw); } static bool _rtl8723be_phy_bb8723b_config_parafile(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); bool rtstatus; @@ -603,7 +476,7 @@ static bool _rtl8723be_phy_bb8723b_config_parafile(struct ieee80211_hw *hw) rtstatus = _rtl8723be_phy_config_bb_with_pgheaderfile(hw, BASEBAND_CONFIG_PHY_REG); } - _rtl8723be_phy_txpower_by_rate_configuration(hw); + phy_txpower_by_rate_config(hw); if (!rtstatus) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!"); return false; @@ -614,39 +487,237 @@ static bool _rtl8723be_phy_bb8723b_config_parafile(struct ieee80211_hw *hw) RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n"); return false; } - rtlphy->cck_high_power = (bool) (rtl_get_bbreg(hw, - RFPGA0_XA_HSSIPARAMETER2, - 0x200)); + rtlphy->cck_high_power = (bool)(rtl_get_bbreg(hw, + RFPGA0_XA_HSSIPARAMETER2, + 0x200)); + return true; +} + +static bool _rtl8723be_phy_config_mac_with_headerfile(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 i; + u32 arraylength; + u32 *ptrarray; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read rtl8723beMACPHY_Array\n"); + arraylength = RTL8723BEMAC_1T_ARRAYLEN; + ptrarray = RTL8723BEMAC_1T_ARRAY; + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Img:RTL8723bEMAC_1T_ARRAY LEN %d\n", arraylength); + for (i = 0; i < arraylength; i = i + 2) + rtl_write_byte(rtlpriv, ptrarray[i], (u8)ptrarray[i + 1]); + return true; +} + +static bool _rtl8723be_phy_config_bb_with_headerfile(struct ieee80211_hw *hw, + u8 configtype) +{ + #define READ_NEXT_PAIR(v1, v2, i) \ + do { \ + i += 2; \ + v1 = array_table[i];\ + v2 = array_table[i+1]; \ + } while (0) + + int i; + u32 *array_table; + u16 arraylen; + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 v1 = 0, v2 = 0; + + if (configtype == BASEBAND_CONFIG_PHY_REG) { + arraylen = RTL8723BEPHY_REG_1TARRAYLEN; + array_table = RTL8723BEPHY_REG_1TARRAY; + + for (i = 0; i < arraylen; i = i + 2) { + v1 = array_table[i]; + v2 = array_table[i+1]; + if (v1 < 0xcdcdcdcd) { + _rtl8723be_config_bb_reg(hw, v1, v2); + } else {/*This line is the start line of branch.*/ + /* to protect READ_NEXT_PAIR not overrun */ + if (i >= arraylen - 2) + break; + + if (!_rtl8723be_check_condition(hw, + array_table[i])) { + /*Discard the following + *(offset, data) pairs + */ + READ_NEXT_PAIR(v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && + i < arraylen - 2) { + READ_NEXT_PAIR(v1, v2, i); + } + i -= 2; /* prevent from for-loop += 2*/ + /*Configure matched pairs and + *skip to end of if-else. + */ + } else { + READ_NEXT_PAIR(v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && + i < arraylen - 2) { + _rtl8723be_config_bb_reg(hw, + v1, v2); + READ_NEXT_PAIR(v1, v2, i); + } + + while (v2 != 0xDEAD && i < arraylen - 2) + READ_NEXT_PAIR(v1, v2, i); + } + } + } + } else if (configtype == BASEBAND_CONFIG_AGC_TAB) { + arraylen = RTL8723BEAGCTAB_1TARRAYLEN; + array_table = RTL8723BEAGCTAB_1TARRAY; + + for (i = 0; i < arraylen; i = i + 2) { + v1 = array_table[i]; + v2 = array_table[i+1]; + if (v1 < 0xCDCDCDCD) { + rtl_set_bbreg(hw, array_table[i], + MASKDWORD, + array_table[i + 1]); + udelay(1); + continue; + } else {/*This line is the start line of branch.*/ + /* to protect READ_NEXT_PAIR not overrun */ + if (i >= arraylen - 2) + break; + + if (!_rtl8723be_check_condition(hw, + array_table[i])) { + /*Discard the following + *(offset, data) pairs + */ + READ_NEXT_PAIR(v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && + i < arraylen - 2) { + READ_NEXT_PAIR(v1, v2, i); + } + i -= 2; /* prevent from for-loop += 2*/ + /*Configure matched pairs and + *skip to end of if-else. + */ + } else { + READ_NEXT_PAIR(v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && + i < arraylen - 2) { + rtl_set_bbreg(hw, array_table[i], + MASKDWORD, + array_table[i + 1]); + udelay(1); + READ_NEXT_PAIR(v1, v2, i); + } + + while (v2 != 0xDEAD && i < arraylen - 2) + READ_NEXT_PAIR(v1, v2, i); + } + } + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "The agctab_array_table[0] is %x Rtl818EEPHY_REGArray[1] is %x\n", + array_table[i], array_table[i + 1]); + } + } return true; } +static u8 _rtl8723be_get_rate_section_index(u32 regaddr) +{ + u8 index = 0; + + switch (regaddr) { + case RTXAGC_A_RATE18_06: + index = 0; + break; + case RTXAGC_A_RATE54_24: + index = 1; + break; + case RTXAGC_A_CCK1_MCS32: + index = 2; + break; + case RTXAGC_B_CCK11_A_CCK2_11: + index = 3; + break; + case RTXAGC_A_MCS03_MCS00: + index = 4; + break; + case RTXAGC_A_MCS07_MCS04: + index = 5; + break; + case RTXAGC_A_MCS11_MCS08: + index = 6; + break; + case RTXAGC_A_MCS15_MCS12: + index = 7; + break; + case RTXAGC_B_RATE18_06: + index = 0; + break; + case RTXAGC_B_RATE54_24: + index = 1; + break; + case RTXAGC_B_CCK1_55_MCS32: + index = 2; + break; + case RTXAGC_B_MCS03_MCS00: + index = 4; + break; + case RTXAGC_B_MCS07_MCS04: + index = 5; + break; + case RTXAGC_B_MCS11_MCS08: + index = 6; + break; + case RTXAGC_B_MCS15_MCS12: + index = 7; + break; + default: + regaddr &= 0xFFF; + if (regaddr >= 0xC20 && regaddr <= 0xC4C) + index = (u8)((regaddr - 0xC20) / 4); + else if (regaddr >= 0xE20 && regaddr <= 0xE4C) + index = (u8)((regaddr - 0xE20) / 4); + break; + }; + return index; +} + static void _rtl8723be_store_tx_power_by_rate(struct ieee80211_hw *hw, u32 band, u32 rfpath, u32 txnum, u32 regaddr, u32 bitmask, u32 data) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; u8 rate_section = _rtl8723be_get_rate_section_index(regaddr); if (band != BAND_ON_2_4G && band != BAND_ON_5G) { - RT_TRACE(rtlpriv, COMP_POWER, PHY_TXPWR, - "Invalid Band %d\n", band); + RT_TRACE(rtlpriv, FPHY, PHY_TXPWR, "Invalid Band %d\n", band); return; } - - if (rfpath > TX_PWR_BY_RATE_NUM_RF) { - RT_TRACE(rtlpriv, COMP_POWER, PHY_TXPWR, + if (rfpath > MAX_RF_PATH - 1) { + RT_TRACE(rtlpriv, FPHY, PHY_TXPWR, "Invalid RfPath %d\n", rfpath); return; } - if (txnum > TX_PWR_BY_RATE_NUM_RF) { - RT_TRACE(rtlpriv, COMP_POWER, PHY_TXPWR, - "Invalid TxNum %d\n", txnum); + if (txnum > MAX_RF_PATH - 1) { + RT_TRACE(rtlpriv, FPHY, PHY_TXPWR, "Invalid TxNum %d\n", txnum); return; } + rtlphy->tx_power_by_rate_offset[band][rfpath][txnum][rate_section] = data; + } static bool _rtl8723be_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw, @@ -678,21 +749,6 @@ static bool _rtl8723be_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw, _rtl8723be_store_tx_power_by_rate(hw, v1, v2, v3, v4, v5, v6); continue; - } else { - /*don't need the hw_body*/ - if (!_rtl8723be_check_condition(hw, - phy_regarray_table_pg[i])) { - i += 2; /* skip the pair of expression*/ - v1 = phy_regarray_table_pg[i]; - v2 = phy_regarray_table_pg[i+1]; - v3 = phy_regarray_table_pg[i+2]; - while (v2 != 0xDEAD) { - i += 3; - v1 = phy_regarray_table_pg[i]; - v2 = phy_regarray_table_pg[i+1]; - v3 = phy_regarray_table_pg[i+2]; - } - } } } } else { @@ -733,22 +789,27 @@ bool rtl8723be_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, v2 = radioa_array_table[i+1]; if (v1 < 0xcdcdcdcd) { _rtl8723be_config_rf_radio_a(hw, v1, v2); - } else { /*This line is the start line of branch.*/ + } else {/*This line is the start line of branch.*/ + /* to protect READ_NEXT_PAIR not overrun */ + if (i >= radioa_arraylen - 2) + break; + if (!_rtl8723be_check_condition(hw, radioa_array_table[i])) { - /* Discard the following - * (offset, data) pairs + /*Discard the following + *(offset, data) pairs */ READ_NEXT_RF_PAIR(v1, v2, i); while (v2 != 0xDEAD && v2 != 0xCDEF && v2 != 0xCDCD && - i < radioa_arraylen - 2) + i < radioa_arraylen - 2) { READ_NEXT_RF_PAIR(v1, v2, i); + } i -= 2; /* prevent from for-loop += 2*/ } else { - /* Configure matched pairs - * and skip to end of if-else. + /*Configure matched pairs + *and skip to end of if-else. */ READ_NEXT_RF_PAIR(v1, v2, i); while (v2 != 0xDEAD && @@ -770,18 +831,12 @@ bool rtl8723be_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, if (rtlhal->oem_id == RT_CID_819X_HP) _rtl8723be_config_rf_radio_a(hw, 0x52, 0x7E4BD); - break; case RF90_PATH_B: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case not process\n"); - break; case RF90_PATH_C: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case not process\n"); break; case RF90_PATH_D: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, "switch case not process\n"); break; } @@ -791,26 +846,25 @@ bool rtl8723be_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, void rtl8723be_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; rtlphy->default_initialgain[0] = - (u8) rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0); + (u8)rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0); rtlphy->default_initialgain[1] = - (u8) rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0); + (u8)rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0); rtlphy->default_initialgain[2] = - (u8) rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0); + (u8)rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0); rtlphy->default_initialgain[3] = - (u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0); + (u8)rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0); RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "Default initial gain (c50 = 0x%x, " - "c58 = 0x%x, c60 = 0x%x, c68 = 0x%x\n", - rtlphy->default_initialgain[0], - rtlphy->default_initialgain[1], - rtlphy->default_initialgain[2], - rtlphy->default_initialgain[3]); - - rtlphy->framesync = (u8) rtl_get_bbreg(hw, ROFDM0_RXDETECTOR3, + "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n", + rtlphy->default_initialgain[0], + rtlphy->default_initialgain[1], + rtlphy->default_initialgain[2], + rtlphy->default_initialgain[3]); + + rtlphy->framesync = (u8)rtl_get_bbreg(hw, ROFDM0_RXDETECTOR3, MASKBYTE0); rtlphy->framesync_c34 = rtl_get_bbreg(hw, ROFDM0_RXDETECTOR2, MASKDWORD); @@ -823,7 +877,7 @@ void rtl8723be_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw) void rtl8723be_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; u8 txpwr_level; long txpwr_dbm; @@ -854,6 +908,7 @@ static u8 _rtl8723be_phy_get_ratesection_intxpower_byrate(enum radio_path path, case DESC92C_RATE1M: rate_section = 2; break; + case DESC92C_RATE2M: case DESC92C_RATE5_5M: if (path == RF90_PATH_A) @@ -861,49 +916,58 @@ static u8 _rtl8723be_phy_get_ratesection_intxpower_byrate(enum radio_path path, else if (path == RF90_PATH_B) rate_section = 2; break; + case DESC92C_RATE11M: rate_section = 3; break; + case DESC92C_RATE6M: case DESC92C_RATE9M: case DESC92C_RATE12M: case DESC92C_RATE18M: rate_section = 0; break; + case DESC92C_RATE24M: case DESC92C_RATE36M: case DESC92C_RATE48M: case DESC92C_RATE54M: rate_section = 1; break; + case DESC92C_RATEMCS0: case DESC92C_RATEMCS1: case DESC92C_RATEMCS2: case DESC92C_RATEMCS3: rate_section = 4; break; + case DESC92C_RATEMCS4: case DESC92C_RATEMCS5: case DESC92C_RATEMCS6: case DESC92C_RATEMCS7: rate_section = 5; break; + case DESC92C_RATEMCS8: case DESC92C_RATEMCS9: case DESC92C_RATEMCS10: case DESC92C_RATEMCS11: rate_section = 6; break; + case DESC92C_RATEMCS12: case DESC92C_RATEMCS13: case DESC92C_RATEMCS14: case DESC92C_RATEMCS15: rate_section = 7; break; + default: RT_ASSERT(true, "Rate_Section is Illegal\n"); break; } + return rate_section; } @@ -912,7 +976,7 @@ static u8 _rtl8723be_get_txpower_by_rate(struct ieee80211_hw *hw, enum radio_path rfpath, u8 rate) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; u8 shift = 0, rate_section, tx_num; char tx_pwr_diff = 0; @@ -988,7 +1052,7 @@ static u8 _rtl8723be_get_txpower_index(struct ieee80211_hw *hw, u8 path, RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Illegal channel!\n"); } - if (RTL8723E_RX_HAL_IS_CCK_RATE(rate)) + if (RX_HAL_IS_CCK_RATE(rate)) txpower = rtlefuse->txpwrlevel_cck[path][index]; else if (DESC92C_RATE6M <= rate) txpower = rtlefuse->txpwrlevel_ht40_1s[path][index]; @@ -997,7 +1061,7 @@ static u8 _rtl8723be_get_txpower_index(struct ieee80211_hw *hw, u8 path, "invalid rate\n"); if (DESC92C_RATE6M <= rate && rate <= DESC92C_RATE54M && - !RTL8723E_RX_HAL_IS_CCK_RATE(rate)) + !RX_HAL_IS_CCK_RATE(rate)) txpower += rtlefuse->txpwr_legacyhtdiff[0][TX_1S]; if (bandwidth == HT_CHANNEL_WIDTH_20) { @@ -1011,6 +1075,7 @@ static u8 _rtl8723be_get_txpower_index(struct ieee80211_hw *hw, u8 path, if (DESC92C_RATEMCS8 <= rate && rate <= DESC92C_RATEMCS15) txpower += rtlefuse->txpwr_ht40diff[0][TX_2S]; } + if (rtlefuse->eeprom_regulatory != 2) power_diff_byrate = _rtl8723be_get_txpower_by_rate(hw, BAND_ON_2_4G, @@ -1046,6 +1111,7 @@ static void _rtl8723be_phy_set_txpower_index(struct ieee80211_hw *hw, rtl8723_phy_set_bb_reg(hw, RTXAGC_B_CCK11_A_CCK2_11, MASKBYTE3, power_index); break; + case DESC92C_RATE6M: rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE18_06, MASKBYTE0, power_index); @@ -1062,6 +1128,7 @@ static void _rtl8723be_phy_set_txpower_index(struct ieee80211_hw *hw, rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE18_06, MASKBYTE3, power_index); break; + case DESC92C_RATE24M: rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE54_24, MASKBYTE0, power_index); @@ -1078,6 +1145,7 @@ static void _rtl8723be_phy_set_txpower_index(struct ieee80211_hw *hw, rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE54_24, MASKBYTE3, power_index); break; + case DESC92C_RATEMCS0: rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS03_MCS00, MASKBYTE0, power_index); @@ -1094,6 +1162,7 @@ static void _rtl8723be_phy_set_txpower_index(struct ieee80211_hw *hw, rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS03_MCS00, MASKBYTE3, power_index); break; + case DESC92C_RATEMCS4: rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS07_MCS04, MASKBYTE0, power_index); @@ -1110,6 +1179,7 @@ static void _rtl8723be_phy_set_txpower_index(struct ieee80211_hw *hw, rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS07_MCS04, MASKBYTE3, power_index); break; + case DESC92C_RATEMCS8: rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS11_MCS08, MASKBYTE0, power_index); @@ -1126,9 +1196,9 @@ static void _rtl8723be_phy_set_txpower_index(struct ieee80211_hw *hw, rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS11_MCS08, MASKBYTE3, power_index); break; + default: - RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, - "Invalid Rate!!\n"); + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Invalid Rate!!\n"); break; } } else { @@ -1192,10 +1262,11 @@ void rtl8723be_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation) if (!is_hal_stop(rtlhal)) { switch (operation) { - case SCAN_OPT_BACKUP: - iotype = IO_CMD_PAUSE_DM_BY_SCAN; + case SCAN_OPT_BACKUP_BAND0: + iotype = IO_CMD_PAUSE_BAND0_DM_BY_SCAN; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_IO_CMD, (u8 *)&iotype); + break; case SCAN_OPT_RESTORE: iotype = IO_CMD_RESUME_DM_BY_SCAN; @@ -1214,15 +1285,15 @@ void rtl8723be_phy_set_bw_mode_callback(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); u8 reg_bw_opmode; u8 reg_prsr_rsc; RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "Switch to %s bandwidth\n", - rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ? - "20MHz" : "40MHz"); + rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ? + "20MHz" : "40MHz"); if (is_hal_stop(rtlhal)) { rtlphy->set_bwmode_inprogress = false; @@ -1254,13 +1325,17 @@ void rtl8723be_phy_set_bw_mode_callback(struct ieee80211_hw *hw) case HT_CHANNEL_WIDTH_20: rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x0); rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x0); + /* rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 1);*/ break; case HT_CHANNEL_WIDTH_20_40: rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x1); rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x1); + rtl_set_bbreg(hw, RCCK0_SYSTEM, BCCK_SIDEBAND, (mac->cur_40_prime_sc >> 1)); rtl_set_bbreg(hw, ROFDM1_LSTF, 0xC00, mac->cur_40_prime_sc); + /*rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 0);*/ + rtl_set_bbreg(hw, 0x818, (BIT(26) | BIT(27)), (mac->cur_40_prime_sc == HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1); @@ -1279,7 +1354,7 @@ void rtl8723be_phy_set_bw_mode(struct ieee80211_hw *hw, enum nl80211_channel_type ch_type) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); u8 tmp_bw = rtlphy->current_chan_bw; @@ -1300,7 +1375,7 @@ void rtl8723be_phy_sw_chnl_callback(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; u32 delay; RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, @@ -1310,11 +1385,11 @@ void rtl8723be_phy_sw_chnl_callback(struct ieee80211_hw *hw) do { if (!rtlphy->sw_chnl_inprogress) break; - if (!rtl8723be_phy_sw_chn_step_by_step(hw, - rtlphy->current_channel, - &rtlphy->sw_chnl_stage, - &rtlphy->sw_chnl_step, - &delay)) { + if (!_rtl8723be_phy_sw_chnl_step_by_step(hw, + rtlphy->current_channel, + &rtlphy->sw_chnl_stage, + &rtlphy->sw_chnl_step, + &delay)) { if (delay > 0) mdelay(delay); else @@ -1330,7 +1405,7 @@ void rtl8723be_phy_sw_chnl_callback(struct ieee80211_hw *hw) u8 rtl8723be_phy_sw_chnl(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); if (rtlphy->sw_chnl_inprogress) @@ -1345,25 +1420,23 @@ u8 rtl8723be_phy_sw_chnl(struct ieee80211_hw *hw) if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) { rtl8723be_phy_sw_chnl_callback(hw); RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD, - "sw_chnl_inprogress false schdule " - "workitem current channel %d\n", - rtlphy->current_channel); + "sw_chnl_inprogress false schdule workitem current channel %d\n", + rtlphy->current_channel); rtlphy->sw_chnl_inprogress = false; } else { RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD, - "sw_chnl_inprogress false driver sleep or" - " unload\n"); + "sw_chnl_inprogress false driver sleep or unload\n"); rtlphy->sw_chnl_inprogress = false; } return 1; } -static bool rtl8723be_phy_sw_chn_step_by_step(struct ieee80211_hw *hw, - u8 channel, u8 *stage, - u8 *step, u32 *delay) +static bool _rtl8723be_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, + u8 channel, u8 *stage, + u8 *step, u32 *delay) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; struct swchnlcmd precommoncmd[MAX_PRECMD_CNT]; u32 precommoncmdcnt; struct swchnlcmd postcommoncmd[MAX_POSTCMD_CNT]; @@ -1381,10 +1454,13 @@ static bool rtl8723be_phy_sw_chn_step_by_step(struct ieee80211_hw *hw, 0, 0, 0); rtl8723_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++, MAX_PRECMD_CNT, CMDID_END, 0, 0, 0); + postcommoncmdcnt = 0; + rtl8723_phy_set_sw_chnl_cmdarray(postcommoncmd, postcommoncmdcnt++, MAX_POSTCMD_CNT, CMDID_END, - 0, 0, 0); + 0, 0, 0); + rfdependcmdcnt = 0; RT_ASSERT((channel >= 1 && channel <= 14), @@ -1397,7 +1473,7 @@ static bool rtl8723be_phy_sw_chn_step_by_step(struct ieee80211_hw *hw, rtl8723_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++, MAX_RFDEPENDCMD_CNT, - CMDID_END, 0, 0, 0); + CMDID_END, 0, 0, 0); do { switch (*stage) { @@ -1410,6 +1486,10 @@ static bool rtl8723be_phy_sw_chn_step_by_step(struct ieee80211_hw *hw, case 2: currentcmd = &postcommoncmd[*step]; break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "Invalid 'stage' = %d, Check it!\n", *stage); + return true; } if (currentcmd->cmdid == CMDID_END) { @@ -1432,11 +1512,11 @@ static bool rtl8723be_phy_sw_chn_step_by_step(struct ieee80211_hw *hw, break; case CMDID_WRITEPORT_USHORT: rtl_write_word(rtlpriv, currentcmd->para1, - (u16) currentcmd->para2); + (u16)currentcmd->para2); break; case CMDID_WRITEPORT_UCHAR: rtl_write_byte(rtlpriv, currentcmd->para1, - (u8) currentcmd->para2); + (u8)currentcmd->para2); break; case CMDID_RF_WRITEREG: for (rfpath = 0; rfpath < num_total_rfpath; rfpath++) { @@ -1451,7 +1531,7 @@ static bool rtl8723be_phy_sw_chn_step_by_step(struct ieee80211_hw *hw, } break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, "switch case not process\n"); break; } @@ -1464,54 +1544,515 @@ static bool rtl8723be_phy_sw_chn_step_by_step(struct ieee80211_hw *hw, return false; } -static u8 _rtl8723be_phy_path_a_iqk(struct ieee80211_hw *hw, bool config_pathb) +static u8 _rtl8723be_phy_path_a_iqk(struct ieee80211_hw *hw) { - u32 reg_eac, reg_e94, reg_e9c, reg_ea4; + u32 reg_eac, reg_e94, reg_e9c, tmp; u8 result = 0x00; - rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x10008c1c); - rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x30008c1c); - rtl_set_bbreg(hw, 0xe38, MASKDWORD, 0x8214032a); - rtl_set_bbreg(hw, 0xe3c, MASKDWORD, 0x28160000); - - rtl_set_bbreg(hw, 0xe4c, MASKDWORD, 0x00462911); - rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf9000000); - rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf8000000); + /* leave IQK mode */ + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x00000000); + /* switch to path A */ + rtl_set_bbreg(hw, 0x948, MASKDWORD, 0x00000000); + /* enable path A PA in TXIQK mode */ + rtl_set_rfreg(hw, RF90_PATH_A, RF_WE_LUT, RFREG_OFFSET_MASK, 0x800a0); + rtl_set_rfreg(hw, RF90_PATH_A, RF_RCK_OS, RFREG_OFFSET_MASK, 0x20000); + rtl_set_rfreg(hw, RF90_PATH_A, RF_TXPA_G1, RFREG_OFFSET_MASK, 0x0003f); + rtl_set_rfreg(hw, RF90_PATH_A, RF_TXPA_G2, RFREG_OFFSET_MASK, 0xc7f87); + + /* 1. TX IQK */ + /* path-A IQK setting */ + /* IQK setting */ + rtl_set_bbreg(hw, RTX_IQK, MASKDWORD, 0x01007c00); + rtl_set_bbreg(hw, RRX_IQK, MASKDWORD, 0x01004800); + /* path-A IQK setting */ + rtl_set_bbreg(hw, RTX_IQK_TONE_A, MASKDWORD, 0x18008c1c); + rtl_set_bbreg(hw, RRX_IQK_TONE_A, MASKDWORD, 0x38008c1c); + rtl_set_bbreg(hw, RTX_IQK_TONE_B, MASKDWORD, 0x38008c1c); + rtl_set_bbreg(hw, RRX_IQK_TONE_B, MASKDWORD, 0x38008c1c); + + rtl_set_bbreg(hw, RTX_IQK_PI_A, MASKDWORD, 0x821403ea); + rtl_set_bbreg(hw, RRX_IQK_PI_A, MASKDWORD, 0x28160000); + rtl_set_bbreg(hw, RTX_IQK_PI_B, MASKDWORD, 0x82110000); + rtl_set_bbreg(hw, RRX_IQK_PI_B, MASKDWORD, 0x28110000); + /* LO calibration setting */ + rtl_set_bbreg(hw, RIQK_AGC_RSP, MASKDWORD, 0x00462911); + /* enter IQK mode */ + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x80800000); + + /* One shot, path A LOK & IQK */ + rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xf9000000); + rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xf8000000); mdelay(IQK_DELAY_TIME); + /* leave IQK mode */ + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x00000000); + + /* Check failed */ reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD); reg_e94 = rtl_get_bbreg(hw, 0xe94, MASKDWORD); reg_e9c = rtl_get_bbreg(hw, 0xe9c, MASKDWORD); - reg_ea4 = rtl_get_bbreg(hw, 0xea4, MASKDWORD); if (!(reg_eac & BIT(28)) && (((reg_e94 & 0x03FF0000) >> 16) != 0x142) && (((reg_e9c & 0x03FF0000) >> 16) != 0x42)) result |= 0x01; + else /* if Tx not OK, ignore Rx */ + return result; + + /* Allen 20131125 */ + tmp = (reg_e9c & 0x03FF0000) >> 16; + if ((tmp & 0x200) > 0) + tmp = 0x400 - tmp; + + if (!(reg_eac & BIT(28)) && + (((reg_e94 & 0x03FF0000) >> 16) < 0x110) && + (((reg_e94 & 0x03FF0000) >> 16) > 0xf0) && + (tmp < 0xf)) + result |= 0x01; + else /* if Tx not OK, ignore Rx */ + return result; + return result; } -static bool phy_similarity_cmp(struct ieee80211_hw *hw, long result[][8], - u8 c1, u8 c2) +/* bit0 = 1 => Tx OK, bit1 = 1 => Rx OK */ +static u8 _rtl8723be_phy_path_a_rx_iqk(struct ieee80211_hw *hw) { - u32 i, j, diff, simularity_bitmap, bound; - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u32 reg_eac, reg_e94, reg_e9c, reg_ea4, u32tmp, tmp; + u8 result = 0x00; + + /* leave IQK mode */ + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x00000000); + + /* switch to path A */ + rtl_set_bbreg(hw, 0x948, MASKDWORD, 0x00000000); + + /* 1 Get TXIMR setting */ + /* modify RXIQK mode table */ + rtl_set_rfreg(hw, RF90_PATH_A, RF_WE_LUT, 0x80000, 0x1); + rtl_set_rfreg(hw, RF90_PATH_A, RF_RCK_OS, RFREG_OFFSET_MASK, 0x30000); + rtl_set_rfreg(hw, RF90_PATH_A, RF_TXPA_G1, RFREG_OFFSET_MASK, 0x0001f); + /* LNA2 off, PA on for Dcut */ + rtl_set_rfreg(hw, RF90_PATH_A, RF_TXPA_G2, RFREG_OFFSET_MASK, 0xf7fb7); + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x80800000); + + /* IQK setting */ + rtl_set_bbreg(hw, RTX_IQK, MASKDWORD, 0x01007c00); + rtl_set_bbreg(hw, RRX_IQK, MASKDWORD, 0x01004800); + + /* path-A IQK setting */ + rtl_set_bbreg(hw, RTX_IQK_TONE_A, MASKDWORD, 0x18008c1c); + rtl_set_bbreg(hw, RRX_IQK_TONE_A, MASKDWORD, 0x38008c1c); + rtl_set_bbreg(hw, RTX_IQK_TONE_B, MASKDWORD, 0x38008c1c); + rtl_set_bbreg(hw, RRX_IQK_TONE_B, MASKDWORD, 0x38008c1c); + + rtl_set_bbreg(hw, RTX_IQK_PI_A, MASKDWORD, 0x82160ff0); + rtl_set_bbreg(hw, RRX_IQK_PI_A, MASKDWORD, 0x28110000); + rtl_set_bbreg(hw, RTX_IQK_PI_B, MASKDWORD, 0x82110000); + rtl_set_bbreg(hw, RRX_IQK_PI_B, MASKDWORD, 0x28110000); + + /* LO calibration setting */ + rtl_set_bbreg(hw, RIQK_AGC_RSP, MASKDWORD, 0x0046a911); + + /* enter IQK mode */ + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x80800000); + + /* One shot, path A LOK & IQK */ + rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xf9000000); + rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xf8000000); + + mdelay(IQK_DELAY_TIME); + + /* leave IQK mode */ + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x00000000); + + /* Check failed */ + reg_eac = rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_A_2, MASKDWORD); + reg_e94 = rtl_get_bbreg(hw, RTX_POWER_BEFORE_IQK_A, MASKDWORD); + reg_e9c = rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_A, MASKDWORD); + + if (!(reg_eac & BIT(28)) && + (((reg_e94 & 0x03FF0000) >> 16) != 0x142) && + (((reg_e9c & 0x03FF0000) >> 16) != 0x42)) + result |= 0x01; + else /* if Tx not OK, ignore Rx */ + return result; + + /* Allen 20131125 */ + tmp = (reg_e9c & 0x03FF0000) >> 16; + if ((tmp & 0x200) > 0) + tmp = 0x400 - tmp; + + if (!(reg_eac & BIT(28)) && + (((reg_e94 & 0x03FF0000) >> 16) < 0x110) && + (((reg_e94 & 0x03FF0000) >> 16) > 0xf0) && + (tmp < 0xf)) + result |= 0x01; + else /* if Tx not OK, ignore Rx */ + return result; + + u32tmp = 0x80007C00 | (reg_e94 & 0x3FF0000) | + ((reg_e9c & 0x3FF0000) >> 16); + rtl_set_bbreg(hw, RTX_IQK, MASKDWORD, u32tmp); + + /* 1 RX IQK */ + /* modify RXIQK mode table */ + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x00000000); + rtl_set_rfreg(hw, RF90_PATH_A, RF_WE_LUT, 0x80000, 0x1); + rtl_set_rfreg(hw, RF90_PATH_A, RF_RCK_OS, RFREG_OFFSET_MASK, 0x30000); + rtl_set_rfreg(hw, RF90_PATH_A, RF_TXPA_G1, RFREG_OFFSET_MASK, 0x0001f); + /* LAN2 on, PA off for Dcut */ + rtl_set_rfreg(hw, RF90_PATH_A, RF_TXPA_G2, RFREG_OFFSET_MASK, 0xf7d77); + + /* PA, PAD setting */ + rtl_set_rfreg(hw, RF90_PATH_A, 0xdf, RFREG_OFFSET_MASK, 0xf80); + rtl_set_rfreg(hw, RF90_PATH_A, 0x55, RFREG_OFFSET_MASK, 0x4021f); + + /* IQK setting */ + rtl_set_bbreg(hw, RRX_IQK, MASKDWORD, 0x01004800); + + /* path-A IQK setting */ + rtl_set_bbreg(hw, RTX_IQK_TONE_A, MASKDWORD, 0x38008c1c); + rtl_set_bbreg(hw, RRX_IQK_TONE_A, MASKDWORD, 0x18008c1c); + rtl_set_bbreg(hw, RTX_IQK_TONE_B, MASKDWORD, 0x38008c1c); + rtl_set_bbreg(hw, RRX_IQK_TONE_B, MASKDWORD, 0x38008c1c); + + rtl_set_bbreg(hw, RTX_IQK_PI_A, MASKDWORD, 0x82110000); + rtl_set_bbreg(hw, RRX_IQK_PI_A, MASKDWORD, 0x2816001f); + rtl_set_bbreg(hw, RTX_IQK_PI_B, MASKDWORD, 0x82110000); + rtl_set_bbreg(hw, RRX_IQK_PI_B, MASKDWORD, 0x28110000); + + /* LO calibration setting */ + rtl_set_bbreg(hw, RIQK_AGC_RSP, MASKDWORD, 0x0046a8d1); + + /* enter IQK mode */ + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x80800000); + + /* One shot, path A LOK & IQK */ + rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xf9000000); + rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xf8000000); + + mdelay(IQK_DELAY_TIME); + + /* leave IQK mode */ + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x00000000); + + /* Check failed */ + reg_eac = rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_A_2, MASKDWORD); + reg_ea4 = rtl_get_bbreg(hw, RRX_POWER_BEFORE_IQK_A_2, MASKDWORD); + + /* leave IQK mode */ + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x00000000); + rtl_set_rfreg(hw, RF90_PATH_A, 0xdf, RFREG_OFFSET_MASK, 0x780); + + /* Allen 20131125 */ + tmp = (reg_eac & 0x03FF0000) >> 16; + if ((tmp & 0x200) > 0) + tmp = 0x400 - tmp; + /* if Tx is OK, check whether Rx is OK */ + if (!(reg_eac & BIT(27)) && + (((reg_ea4 & 0x03FF0000) >> 16) != 0x132) && + (((reg_eac & 0x03FF0000) >> 16) != 0x36)) + result |= 0x02; + else if (!(reg_eac & BIT(27)) && + (((reg_ea4 & 0x03FF0000) >> 16) < 0x110) && + (((reg_ea4 & 0x03FF0000) >> 16) > 0xf0) && + (tmp < 0xf)) + result |= 0x02; + + return result; +} + +static u8 _rtl8723be_phy_path_b_iqk(struct ieee80211_hw *hw) +{ + u32 reg_eac, reg_e94, reg_e9c, tmp; + u8 result = 0x00; + + /* leave IQK mode */ + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x00000000); + /* switch to path B */ + rtl_set_bbreg(hw, 0x948, MASKDWORD, 0x00000280); + + /* enable path B PA in TXIQK mode */ + rtl_set_rfreg(hw, RF90_PATH_A, 0xed, RFREG_OFFSET_MASK, 0x00020); + rtl_set_rfreg(hw, RF90_PATH_A, 0x43, RFREG_OFFSET_MASK, 0x40fc1); + + /* 1 Tx IQK */ + /* IQK setting */ + rtl_set_bbreg(hw, RTX_IQK, MASKDWORD, 0x01007c00); + rtl_set_bbreg(hw, RRX_IQK, MASKDWORD, 0x01004800); + /* path-A IQK setting */ + rtl_set_bbreg(hw, RTX_IQK_TONE_A, MASKDWORD, 0x18008c1c); + rtl_set_bbreg(hw, RRX_IQK_TONE_A, MASKDWORD, 0x38008c1c); + rtl_set_bbreg(hw, RTX_IQK_TONE_B, MASKDWORD, 0x38008c1c); + rtl_set_bbreg(hw, RRX_IQK_TONE_B, MASKDWORD, 0x38008c1c); + + rtl_set_bbreg(hw, RTX_IQK_PI_A, MASKDWORD, 0x821403ea); + rtl_set_bbreg(hw, RRX_IQK_PI_A, MASKDWORD, 0x28110000); + rtl_set_bbreg(hw, RTX_IQK_PI_B, MASKDWORD, 0x82110000); + rtl_set_bbreg(hw, RRX_IQK_PI_B, MASKDWORD, 0x28110000); + + /* LO calibration setting */ + rtl_set_bbreg(hw, RIQK_AGC_RSP, MASKDWORD, 0x00462911); + + /* enter IQK mode */ + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x80800000); + + /* One shot, path B LOK & IQK */ + rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xf9000000); + rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xf8000000); + + mdelay(IQK_DELAY_TIME); + + /* leave IQK mode */ + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x00000000); + + /* Check failed */ + reg_eac = rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_A_2, MASKDWORD); + reg_e94 = rtl_get_bbreg(hw, RTX_POWER_BEFORE_IQK_A, MASKDWORD); + reg_e9c = rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_A, MASKDWORD); + + if (!(reg_eac & BIT(28)) && + (((reg_e94 & 0x03FF0000) >> 16) != 0x142) && + (((reg_e9c & 0x03FF0000) >> 16) != 0x42)) + result |= 0x01; + else + return result; + + /* Allen 20131125 */ + tmp = (reg_e9c & 0x03FF0000) >> 16; + if ((tmp & 0x200) > 0) + tmp = 0x400 - tmp; - u8 final_candidate[2] = { 0xFF, 0xFF }; - bool bresult = true, is2t = IS_92C_SERIAL(rtlhal->version); + if (!(reg_eac & BIT(28)) && + (((reg_e94 & 0x03FF0000) >> 16) < 0x110) && + (((reg_e94 & 0x03FF0000) >> 16) > 0xf0) && + (tmp < 0xf)) + result |= 0x01; + else + return result; + + return result; +} + +/* bit0 = 1 => Tx OK, bit1 = 1 => Rx OK */ +static u8 _rtl8723be_phy_path_b_rx_iqk(struct ieee80211_hw *hw) +{ + u32 reg_e94, reg_e9c, reg_ea4, reg_eac, u32tmp, tmp; + u8 result = 0x00; + + /* leave IQK mode */ + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x00000000); + /* switch to path B */ + rtl_set_bbreg(hw, 0x948, MASKDWORD, 0x00000280); + + /* 1 Get TXIMR setting */ + /* modify RXIQK mode table */ + rtl_set_rfreg(hw, RF90_PATH_A, RF_WE_LUT, RFREG_OFFSET_MASK, 0x800a0); + rtl_set_rfreg(hw, RF90_PATH_A, RF_RCK_OS, RFREG_OFFSET_MASK, 0x30000); + rtl_set_rfreg(hw, RF90_PATH_A, RF_TXPA_G1, RFREG_OFFSET_MASK, 0x0001f); + rtl_set_rfreg(hw, RF90_PATH_A, RF_TXPA_G2, RFREG_OFFSET_MASK, 0xf7ff7); + + /* open PA S1 & SMIXER */ + rtl_set_rfreg(hw, RF90_PATH_A, 0xed, RFREG_OFFSET_MASK, 0x00020); + rtl_set_rfreg(hw, RF90_PATH_A, 0x43, RFREG_OFFSET_MASK, 0x60fed); + + /* IQK setting */ + rtl_set_bbreg(hw, RTX_IQK, MASKDWORD, 0x01007c00); + rtl_set_bbreg(hw, RRX_IQK, MASKDWORD, 0x01004800); + + /* path-B IQK setting */ + rtl_set_bbreg(hw, RTX_IQK_TONE_A, MASKDWORD, 0x18008c1c); + rtl_set_bbreg(hw, RRX_IQK_TONE_A, MASKDWORD, 0x38008c1c); + rtl_set_bbreg(hw, RTX_IQK_TONE_B, MASKDWORD, 0x38008c1c); + rtl_set_bbreg(hw, RRX_IQK_TONE_B, MASKDWORD, 0x38008c1c); + + rtl_set_bbreg(hw, RTX_IQK_PI_A, MASKDWORD, 0x82160ff0); + rtl_set_bbreg(hw, RRX_IQK_PI_A, MASKDWORD, 0x28110000); + rtl_set_bbreg(hw, RTX_IQK_PI_B, MASKDWORD, 0x82110000); + rtl_set_bbreg(hw, RRX_IQK_PI_B, MASKDWORD, 0x28110000); + + /* LO calibration setting */ + rtl_set_bbreg(hw, RIQK_AGC_RSP, MASKDWORD, 0x0046a911); + /* enter IQK mode */ + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x80800000); + + /* One shot, path B TXIQK @ RXIQK */ + rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xf9000000); + rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xf8000000); + + mdelay(IQK_DELAY_TIME); + + /* leave IQK mode */ + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x00000000); + /* Check failed */ + reg_eac = rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_A_2, MASKDWORD); + reg_e94 = rtl_get_bbreg(hw, RTX_POWER_BEFORE_IQK_A, MASKDWORD); + reg_e9c = rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_A, MASKDWORD); + + if (!(reg_eac & BIT(28)) && + (((reg_e94 & 0x03FF0000) >> 16) != 0x142) && + (((reg_e9c & 0x03FF0000) >> 16) != 0x42)) + result |= 0x01; + else /* if Tx not OK, ignore Rx */ + return result; + + /* Allen 20131125 */ + tmp = (reg_e9c & 0x03FF0000) >> 16; + if ((tmp & 0x200) > 0) + tmp = 0x400 - tmp; + + if (!(reg_eac & BIT(28)) && + (((reg_e94 & 0x03FF0000) >> 16) < 0x110) && + (((reg_e94 & 0x03FF0000) >> 16) > 0xf0) && + (tmp < 0xf)) + result |= 0x01; + else + return result; + + u32tmp = 0x80007C00 | (reg_e94 & 0x3FF0000) | + ((reg_e9c & 0x3FF0000) >> 16); + rtl_set_bbreg(hw, RTX_IQK, MASKDWORD, u32tmp); + + /* 1 RX IQK */ + + /* <20121009, Kordan> RF Mode = 3 */ + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x00000000); + rtl_set_rfreg(hw, RF90_PATH_A, RF_WE_LUT, 0x80000, 0x1); + rtl_set_rfreg(hw, RF90_PATH_A, RF_RCK_OS, RFREG_OFFSET_MASK, 0x30000); + rtl_set_rfreg(hw, RF90_PATH_A, RF_TXPA_G1, RFREG_OFFSET_MASK, 0x0001f); + rtl_set_rfreg(hw, RF90_PATH_A, RF_TXPA_G2, RFREG_OFFSET_MASK, 0xf7d77); + rtl_set_rfreg(hw, RF90_PATH_A, RF_WE_LUT, 0x80000, 0x0); + + /* open PA S1 & close SMIXER */ + rtl_set_rfreg(hw, RF90_PATH_A, 0xed, RFREG_OFFSET_MASK, 0x00020); + rtl_set_rfreg(hw, RF90_PATH_A, 0x43, RFREG_OFFSET_MASK, 0x60fbd); + + /* IQK setting */ + rtl_set_bbreg(hw, RRX_IQK, MASKDWORD, 0x01004800); + + /* path-B IQK setting */ + rtl_set_bbreg(hw, RTX_IQK_TONE_A, MASKDWORD, 0x38008c1c); + rtl_set_bbreg(hw, RRX_IQK_TONE_A, MASKDWORD, 0x18008c1c); + rtl_set_bbreg(hw, RTX_IQK_TONE_B, MASKDWORD, 0x38008c1c); + rtl_set_bbreg(hw, RRX_IQK_TONE_B, MASKDWORD, 0x38008c1c); + + rtl_set_bbreg(hw, RTX_IQK_PI_A, MASKDWORD, 0x82110000); + rtl_set_bbreg(hw, RRX_IQK_PI_A, MASKDWORD, 0x2816001f); + rtl_set_bbreg(hw, RTX_IQK_PI_B, MASKDWORD, 0x82110000); + rtl_set_bbreg(hw, RRX_IQK_PI_B, MASKDWORD, 0x28110000); + + /* LO calibration setting */ + rtl_set_bbreg(hw, RIQK_AGC_RSP, MASKDWORD, 0x0046a8d1); + /* enter IQK mode */ + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x80800000); + + /* One shot, path B LOK & IQK */ + rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xf9000000); + rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xf8000000); - if (is2t) - bound = 8; + mdelay(IQK_DELAY_TIME); + + /* leave IQK mode */ + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x00000000); + /* Check failed */ + reg_eac = rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_A_2, MASKDWORD); + reg_ea4 = rtl_get_bbreg(hw, RRX_POWER_BEFORE_IQK_A_2, MASKDWORD); + + /* Allen 20131125 */ + tmp = (reg_eac & 0x03FF0000) >> 16; + if ((tmp & 0x200) > 0) + tmp = 0x400 - tmp; + + /* if Tx is OK, check whether Rx is OK */ + if (!(reg_eac & BIT(27)) && + (((reg_ea4 & 0x03FF0000) >> 16) != 0x132) && + (((reg_eac & 0x03FF0000) >> 16) != 0x36)) + result |= 0x02; + else if (!(reg_eac & BIT(27)) && + (((reg_ea4 & 0x03FF0000) >> 16) < 0x110) && + (((reg_ea4 & 0x03FF0000) >> 16) > 0xf0) && + (tmp < 0xf)) + result |= 0x02; else - bound = 4; + return result; + + return result; +} + +static void _rtl8723be_phy_path_b_fill_iqk_matrix(struct ieee80211_hw *hw, + bool b_iqk_ok, + long result[][8], + u8 final_candidate, + bool btxonly) +{ + u32 oldval_1, x, tx1_a, reg; + long y, tx1_c; + + if (final_candidate == 0xFF) { + return; + } else if (b_iqk_ok) { + oldval_1 = (rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, + MASKDWORD) >> 22) & 0x3FF; + x = result[final_candidate][4]; + if ((x & 0x00000200) != 0) + x = x | 0xFFFFFC00; + tx1_a = (x * oldval_1) >> 8; + rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0x3FF, tx1_a); + rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(27), + ((x * oldval_1 >> 7) & 0x1)); + y = result[final_candidate][5]; + if ((y & 0x00000200) != 0) + y = y | 0xFFFFFC00; + tx1_c = (y * oldval_1) >> 8; + rtl_set_bbreg(hw, ROFDM0_XDTXAFE, 0xF0000000, + ((tx1_c & 0x3C0) >> 6)); + rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0x003F0000, + (tx1_c & 0x3F)); + rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(25), + ((y * oldval_1 >> 7) & 0x1)); + if (btxonly) + return; + reg = result[final_candidate][6]; + rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0x3FF, reg); + reg = result[final_candidate][7] & 0x3F; + rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0xFC00, reg); + reg = (result[final_candidate][7] >> 6) & 0xF; + /* rtl_set_bbreg(hw, 0xca0, 0xF0000000, reg); */ + } +} + +static bool _rtl8723be_phy_simularity_compare(struct ieee80211_hw *hw, + long result[][8], u8 c1, u8 c2) +{ + u32 i, j, diff, simularity_bitmap, bound = 0; + + u8 final_candidate[2] = {0xFF, 0xFF}; /* for path A and path B */ + bool bresult = true; /* is2t = true*/ + s32 tmp1 = 0, tmp2 = 0; + + bound = 8; simularity_bitmap = 0; for (i = 0; i < bound; i++) { - diff = (result[c1][i] > result[c2][i]) ? - (result[c1][i] - result[c2][i]) : - (result[c2][i] - result[c1][i]); + if ((i == 1) || (i == 3) || (i == 5) || (i == 7)) { + if ((result[c1][i] & 0x00000200) != 0) + tmp1 = result[c1][i] | 0xFFFFFC00; + else + tmp1 = result[c1][i]; + + if ((result[c2][i] & 0x00000200) != 0) + tmp2 = result[c2][i] | 0xFFFFFC00; + else + tmp2 = result[c2][i]; + } else { + tmp1 = result[c1][i]; + tmp2 = result[c2][i]; + } + + diff = (tmp1 > tmp2) ? (tmp1 - tmp2) : (tmp2 - tmp1); if (diff > MAX_TOLERANCE) { if ((i == 2 || i == 6) && !simularity_bitmap) { @@ -1521,9 +2062,8 @@ static bool phy_similarity_cmp(struct ieee80211_hw *hw, long result[][8], final_candidate[(i / 4)] = c1; else simularity_bitmap |= (1 << i); - } else { + } else simularity_bitmap |= (1 << i); - } } } @@ -1537,15 +2077,23 @@ static bool phy_similarity_cmp(struct ieee80211_hw *hw, long result[][8], } } return bresult; - } else if (!(simularity_bitmap & 0x0F)) { - for (i = 0; i < 4; i++) - result[3][i] = result[c1][i]; - return false; - } else if (!(simularity_bitmap & 0xF0) && is2t) { - for (i = 4; i < 8; i++) - result[3][i] = result[c1][i]; - return false; } else { + if (!(simularity_bitmap & 0x03)) { /* path A TX OK */ + for (i = 0; i < 2; i++) + result[3][i] = result[c1][i]; + } + if (!(simularity_bitmap & 0x0c)) { /* path A RX OK */ + for (i = 2; i < 4; i++) + result[3][i] = result[c1][i]; + } + if (!(simularity_bitmap & 0x30)) { /* path B TX OK */ + for (i = 4; i < 6; i++) + result[3][i] = result[c1][i]; + } + if (!(simularity_bitmap & 0xc0)) { /* path B RX OK */ + for (i = 6; i < 8; i++) + result[3][i] = result[c1][i]; + } return false; } } @@ -1554,9 +2102,9 @@ static void _rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8], u8 t, bool is2t) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; u32 i; - u8 patha_ok; + u8 patha_ok, pathb_ok; u32 adda_reg[IQK_ADDA_REG_NUM] = { 0x85c, 0xe6c, 0xe70, 0xe74, 0xe78, 0xe7c, 0xe80, 0xe84, @@ -1571,10 +2119,12 @@ static void _rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw, ROFDM0_TRXPATHENABLE, ROFDM0_TRMUXPAR, RFPGA0_XCD_RFINTERFACESW, 0xb68, 0xb6c, 0x870, 0x860, - 0x864, 0x800 + 0x864, 0xa04 }; const u32 retrycount = 2; - u32 path_sel_bb, path_sel_rf; + + u32 path_sel_bb;/* path_sel_rf */ + u8 tmp_reg_c50, tmp_reg_c58; tmp_reg_c50 = rtl_get_bbreg(hw, 0xc50, MASKBYTE0); @@ -1591,62 +2141,97 @@ static void _rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw, } rtl8723_phy_path_adda_on(hw, adda_reg, true, is2t); if (t == 0) { - rtlphy->rfpi_enable = (u8) rtl_get_bbreg(hw, + rtlphy->rfpi_enable = (u8)rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1, BIT(8)); } - if (!rtlphy->rfpi_enable) - rtl8723_phy_pi_mode_switch(hw, true); path_sel_bb = rtl_get_bbreg(hw, 0x948, MASKDWORD); - path_sel_rf = rtl_get_rfreg(hw, RF90_PATH_A, 0xb0, 0xfffff); + rtl8723_phy_mac_setting_calibration(hw, iqk_mac_reg, + rtlphy->iqk_mac_backup); /*BB Setting*/ - rtl_set_bbreg(hw, 0x800, BIT(24), 0x00); + rtl_set_bbreg(hw, 0xa04, 0x0f000000, 0xf); rtl_set_bbreg(hw, 0xc04, MASKDWORD, 0x03a05600); rtl_set_bbreg(hw, 0xc08, MASKDWORD, 0x000800e4); rtl_set_bbreg(hw, 0x874, MASKDWORD, 0x22204000); - rtl_set_bbreg(hw, 0x870, BIT(10), 0x01); - rtl_set_bbreg(hw, 0x870, BIT(26), 0x01); - rtl_set_bbreg(hw, 0x860, BIT(10), 0x00); - rtl_set_bbreg(hw, 0x864, BIT(10), 0x00); - - if (is2t) - rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASKDWORD, 0x10000); - rtl8723_phy_mac_setting_calibration(hw, iqk_mac_reg, - rtlphy->iqk_mac_backup); - rtl_set_bbreg(hw, 0xb68, MASKDWORD, 0x0f600000); - - rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000); - rtl_set_bbreg(hw, 0xe40, MASKDWORD, 0x01007c00); - rtl_set_bbreg(hw, 0xe44, MASKDWORD, 0x81004800); + /* path A TX IQK */ for (i = 0; i < retrycount; i++) { - patha_ok = _rtl8723be_phy_path_a_iqk(hw, is2t); + patha_ok = _rtl8723be_phy_path_a_iqk(hw); if (patha_ok == 0x01) { RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "Path A Tx IQK Success!!\n"); + "Path A Tx IQK Success!!\n"); result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) & 0x3FF0000) >> 16; result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) & 0x3FF0000) >> 16; break; + } else { + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Path A Tx IQK Fail!!\n"); } } - - if (0 == patha_ok) + /* path A RX IQK */ + for (i = 0; i < retrycount; i++) { + patha_ok = _rtl8723be_phy_path_a_rx_iqk(hw); + if (patha_ok == 0x03) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Path A Rx IQK Success!!\n"); + result[t][2] = (rtl_get_bbreg(hw, 0xea4, MASKDWORD) & + 0x3FF0000) >> 16; + result[t][3] = (rtl_get_bbreg(hw, 0xeac, MASKDWORD) & + 0x3FF0000) >> 16; + break; + } RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "Path A IQK Success!!\n"); + "Path A Rx IQK Fail!!\n"); + } + + if (0x00 == patha_ok) + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Path A IQK Fail!!\n"); + if (is2t) { - rtl8723_phy_path_a_standby(hw); - rtl8723_phy_path_adda_on(hw, adda_reg, false, is2t); + /* path B TX IQK */ + for (i = 0; i < retrycount; i++) { + pathb_ok = _rtl8723be_phy_path_b_iqk(hw); + if (pathb_ok == 0x01) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Path B Tx IQK Success!!\n"); + result[t][4] = (rtl_get_bbreg(hw, 0xe94, + MASKDWORD) & + 0x3FF0000) >> 16; + result[t][5] = (rtl_get_bbreg(hw, 0xe9c, + MASKDWORD) & + 0x3FF0000) >> 16; + break; + } + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Path B Tx IQK Fail!!\n"); + } + /* path B RX IQK */ + for (i = 0; i < retrycount; i++) { + pathb_ok = _rtl8723be_phy_path_b_rx_iqk(hw); + if (pathb_ok == 0x03) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Path B Rx IQK Success!!\n"); + result[t][6] = (rtl_get_bbreg(hw, 0xea4, + MASKDWORD) & + 0x3FF0000) >> 16; + result[t][7] = (rtl_get_bbreg(hw, 0xeac, + MASKDWORD) & + 0x3FF0000) >> 16; + break; + } + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Path B Rx IQK Fail!!\n"); + } } - rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0); + /* Back to BB mode, load original value */ + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0); if (t != 0) { - if (!rtlphy->rfpi_enable) - rtl8723_phy_pi_mode_switch(hw, false); rtl8723_phy_reload_adda_registers(hw, adda_reg, rtlphy->adda_backup, 16); rtl8723_phy_reload_mac_registers(hw, iqk_mac_reg, @@ -1656,7 +2241,7 @@ static void _rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw, IQK_BB_REG_NUM); rtl_set_bbreg(hw, 0x948, MASKDWORD, path_sel_bb); - rtl_set_rfreg(hw, RF90_PATH_B, 0xb0, 0xfffff, path_sel_rf); + /*rtl_set_rfreg(hw, RF90_PATH_B, 0xb0, 0xfffff, path_sel_rf);*/ rtl_set_bbreg(hw, 0xc50, MASKBYTE0, 0x50); rtl_set_bbreg(hw, 0xc50, MASKBYTE0, tmp_reg_c50); @@ -1670,11 +2255,33 @@ static void _rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw, RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "8723be IQK Finish!!\n"); } +static u8 _get_right_chnl_place_for_iqk(u8 chnl) +{ + u8 channel_all[TARGET_CHNL_NUM_2G_5G] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, + 13, 14, 36, 38, 40, 42, 44, 46, + 48, 50, 52, 54, 56, 58, 60, 62, 64, + 100, 102, 104, 106, 108, 110, + 112, 114, 116, 118, 120, 122, + 124, 126, 128, 130, 132, 134, 136, + 138, 140, 149, 151, 153, 155, 157, + 159, 161, 163, 165}; + u8 place = chnl; + + if (chnl > 14) { + for (place = 14; place < sizeof(channel_all); place++) { + if (channel_all[place] == chnl) + return place - 13; + } + } + return 0; +} + static void _rtl8723be_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t) { - struct rtl_priv *rtlpriv = rtl_priv(hw); u8 tmpreg; u32 rf_a_mode = 0, rf_b_mode = 0, lc_cal; + struct rtl_priv *rtlpriv = rtl_priv(hw); tmpreg = rtl_read_byte(rtlpriv, 0xd03); @@ -1702,7 +2309,10 @@ static void _rtl8723be_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t) rtl_set_rfreg(hw, RF90_PATH_A, 0xb0, RFREG_OFFSET_MASK, 0xdfbe0); rtl_set_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS, 0x8c0a); - mdelay(100); + /* In order not to disturb BT music when wifi init.(1ant NIC only) */ + /*mdelay(100);*/ + /* In order not to disturb BT music when wifi init.(1ant NIC only) */ + mdelay(50); rtl_set_rfreg(hw, RF90_PATH_A, 0xb0, RFREG_OFFSET_MASK, 0xdffe0); @@ -1716,68 +2326,34 @@ static void _rtl8723be_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t) } else { rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00); } - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n"); +RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n"); + } static void _rtl8723be_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain, bool is2t) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n"); - if (is_hal_stop(rtlhal)) { - u8 u1btmp; - u1btmp = rtl_read_byte(rtlpriv, REG_LEDCFG0); - rtl_write_byte(rtlpriv, REG_LEDCFG0, u1btmp | BIT(7)); - rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(13), 0x01); - } - if (is2t) { - if (bmain) - rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, - BIT(5) | BIT(6), 0x1); - else - rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, - BIT(5) | BIT(6), 0x2); - } else { - rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, BIT(8) | BIT(9), 0); - rtl_set_bbreg(hw, 0x914, MASKLWORD, 0x0201); - - /* We use the RF definition of MAIN and AUX, - * left antenna and right antenna repectively. - * Default output at AUX. - */ - if (bmain) { - rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, - BIT(14) | BIT(13) | BIT(12), 0); - rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, - BIT(5) | BIT(4) | BIT(3), 0); - if (rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV) - rtl_set_bbreg(hw, CONFIG_RAM64X16, BIT(31), 0); - } else { - rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, - BIT(14) | BIT(13) | BIT(12), 1); - rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, - BIT(5) | BIT(4) | BIT(3), 1); - if (rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV) - rtl_set_bbreg(hw, CONFIG_RAM64X16, BIT(31), 1); - } - } + if (bmain) /* left antenna */ + rtl_set_bbreg(hw, 0x92C, MASKDWORD, 0x1); + else + rtl_set_bbreg(hw, 0x92C, MASKDWORD, 0x2); } #undef IQK_ADDA_REG_NUM #undef IQK_DELAY_TIME - -void rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery) +/* IQK is merge from Merge Temp */ +void rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_phy *rtlphy = &rtlpriv->phy; long result[4][8]; - u8 i, final_candidate; - bool patha_ok, pathb_ok; - long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4, - reg_ecc, reg_tmp = 0; + u8 i, final_candidate, idx; + bool b_patha_ok, b_pathb_ok; + long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4; + long reg_ecc, reg_tmp = 0; bool is12simular, is13simular, is23simular; u32 iqk_bb_reg[9] = { ROFDM0_XARXIQIMBALANCE, @@ -1790,12 +2366,23 @@ void rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery) ROFDM0_XDTXAFE, ROFDM0_RXIQEXTANTA }; + u32 path_sel_bb = 0; /* path_sel_rf = 0 */ - if (recovery) { + if (rtlphy->lck_inprogress) + return; + + spin_lock(&rtlpriv->locks.iqk_lock); + rtlphy->lck_inprogress = true; + spin_unlock(&rtlpriv->locks.iqk_lock); + + if (b_recovery) { rtl8723_phy_reload_adda_registers(hw, iqk_bb_reg, rtlphy->iqk_bb_backup, 9); return; } + /* Save RF Path */ + path_sel_bb = rtl_get_bbreg(hw, 0x948, MASKDWORD); + /* path_sel_rf = rtl_get_rfreg(hw, RF90_PATH_A, 0xb0, 0xfffff); */ for (i = 0; i < 8; i++) { result[0][i] = 0; @@ -1804,30 +2391,33 @@ void rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery) result[3][i] = 0; } final_candidate = 0xff; - patha_ok = false; - pathb_ok = false; + b_patha_ok = false; + b_pathb_ok = false; is12simular = false; is23simular = false; is13simular = false; for (i = 0; i < 3; i++) { - if (get_rf_type(rtlphy) == RF_2T2R) - _rtl8723be_phy_iq_calibrate(hw, result, i, true); - else - _rtl8723be_phy_iq_calibrate(hw, result, i, false); + _rtl8723be_phy_iq_calibrate(hw, result, i, true); if (i == 1) { - is12simular = phy_similarity_cmp(hw, result, 0, 1); + is12simular = _rtl8723be_phy_simularity_compare(hw, + result, + 0, 1); if (is12simular) { final_candidate = 0; break; } } if (i == 2) { - is13simular = phy_similarity_cmp(hw, result, 0, 2); + is13simular = _rtl8723be_phy_simularity_compare(hw, + result, + 0, 2); if (is13simular) { final_candidate = 0; break; } - is23simular = phy_similarity_cmp(hw, result, 1, 2); + is23simular = _rtl8723be_phy_simularity_compare(hw, + result, + 1, 2); if (is23simular) { final_candidate = 1; } else { @@ -1864,32 +2454,48 @@ void rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery) rtlphy->reg_ebc = reg_ebc; reg_ec4 = result[final_candidate][6]; reg_ecc = result[final_candidate][7]; - patha_ok = true; - pathb_ok = true; + b_patha_ok = true; + b_pathb_ok = true; } else { rtlphy->reg_e94 = 0x100; rtlphy->reg_eb4 = 0x100; rtlphy->reg_e9c = 0x0; rtlphy->reg_ebc = 0x0; } - if (reg_e94 != 0) /*&&(reg_ea4 != 0) */ - rtl8723_phy_path_a_fill_iqk_matrix(hw, patha_ok, result, + if (reg_e94 != 0) + rtl8723_phy_path_a_fill_iqk_matrix(hw, b_patha_ok, result, final_candidate, (reg_ea4 == 0)); - if (final_candidate != 0xFF) { + if (reg_eb4 != 0) + _rtl8723be_phy_path_b_fill_iqk_matrix(hw, b_pathb_ok, result, + final_candidate, + (reg_ec4 == 0)); + + idx = _get_right_chnl_place_for_iqk(rtlphy->current_channel); + + if (final_candidate < 4) { for (i = 0; i < IQK_MATRIX_REG_NUM; i++) - rtlphy->iqk_matrix[0].value[0][i] = + rtlphy->iqk_matrix[idx].value[0][i] = result[final_candidate][i]; - rtlphy->iqk_matrix[0].iqk_done = true; + rtlphy->iqk_matrix[idx].iqk_done = true; + } - rtl8723_save_adda_registers(hw, iqk_bb_reg, rtlphy->iqk_bb_backup, 9); + rtl8723_save_adda_registers(hw, iqk_bb_reg, + rtlphy->iqk_bb_backup, 9); + + rtl_set_bbreg(hw, 0x948, MASKDWORD, path_sel_bb); + /* rtl_set_rfreg(hw, RF90_PATH_A, 0xb0, 0xfffff, path_sel_rf); */ + + spin_lock(&rtlpriv->locks.iqk_lock); + rtlphy->lck_inprogress = false; + spin_unlock(&rtlpriv->locks.iqk_lock); } void rtl8723be_phy_lc_calibrate(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - struct rtl_hal *rtlhal = &(rtlpriv->rtlhal); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct rtl_hal *rtlhal = &rtlpriv->rtlhal; u32 timeout = 2000, timecount = 0; while (rtlpriv->mac80211.act_scanning && timecount < timeout) { @@ -1898,68 +2504,25 @@ void rtl8723be_phy_lc_calibrate(struct ieee80211_hw *hw) } rtlphy->lck_inprogress = true; - RTPRINT(rtlpriv, FINIT, INIT_EEPROM, + RTPRINT(rtlpriv, FINIT, INIT_IQK, "LCK:Start!!! currentband %x delay %d ms\n", - rtlhal->current_bandtype, timecount); + rtlhal->current_bandtype, timecount); _rtl8723be_phy_lc_calibrate(hw, false); rtlphy->lck_inprogress = false; } -void rtl23b_phy_ap_calibrate(struct ieee80211_hw *hw, char delta) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - - if (rtlphy->apk_done) - return; - - return; -} - void rtl8723be_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain) { - _rtl8723be_phy_set_rfpath_switch(hw, bmain, false); -} - -static void rtl8723be_phy_set_io(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - - RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, - "--->Cmd(%#x), set_io_inprogress(%d)\n", - rtlphy->current_io_type, rtlphy->set_io_inprogress); - switch (rtlphy->current_io_type) { - case IO_CMD_RESUME_DM_BY_SCAN: - rtlpriv->dm_digtable.cur_igvalue = - rtlphy->initgain_backup.xaagccore1; - /*rtl92c_dm_write_dig(hw);*/ - rtl8723be_phy_set_txpower_level(hw, rtlphy->current_channel); - rtl_set_bbreg(hw, RCCK0_CCA, 0xff0000, 0x83); - break; - case IO_CMD_PAUSE_DM_BY_SCAN: - rtlphy->initgain_backup.xaagccore1 = - rtlpriv->dm_digtable.cur_igvalue; - rtlpriv->dm_digtable.cur_igvalue = 0x17; - rtl_set_bbreg(hw, RCCK0_CCA, 0xff0000, 0x40); - break; - default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case not process\n"); - break; - } - rtlphy->set_io_inprogress = false; - RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, - "(%#x)\n", rtlphy->current_io_type); + _rtl8723be_phy_set_rfpath_switch(hw, bmain, true); } bool rtl8723be_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - bool postprocessing = false; + struct rtl_phy *rtlphy = &rtlpriv->phy; + bool b_postprocessing = false; RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "-->IO Cmd(%#x), set_io_inprogress(%d)\n", @@ -1969,20 +2532,20 @@ bool rtl8723be_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype) case IO_CMD_RESUME_DM_BY_SCAN: RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "[IO CMD] Resume DM after scan.\n"); - postprocessing = true; + b_postprocessing = true; break; - case IO_CMD_PAUSE_DM_BY_SCAN: + case IO_CMD_PAUSE_BAND0_DM_BY_SCAN: RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "[IO CMD] Pause DM before scan.\n"); - postprocessing = true; + b_postprocessing = true; break; default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, "switch case not process\n"); break; } } while (false); - if (postprocessing && !rtlphy->set_io_inprogress) { + if (b_postprocessing && !rtlphy->set_io_inprogress) { rtlphy->set_io_inprogress = true; rtlphy->current_io_type = iotype; } else { @@ -1993,6 +2556,37 @@ bool rtl8723be_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype) return true; } +static void rtl8723be_phy_set_io(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *dm_digtable = &rtlpriv->dm_digtable; + struct rtl_phy *rtlphy = &rtlpriv->phy; + + RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, + "--->Cmd(%#x), set_io_inprogress(%d)\n", + rtlphy->current_io_type, rtlphy->set_io_inprogress); + switch (rtlphy->current_io_type) { + case IO_CMD_RESUME_DM_BY_SCAN: + dm_digtable->cur_igvalue = rtlphy->initgain_backup.xaagccore1; + /*rtl92c_dm_write_dig(hw);*/ + rtl8723be_phy_set_txpower_level(hw, rtlphy->current_channel); + rtl_set_bbreg(hw, RCCK0_CCA, 0xff0000, 0x83); + break; + case IO_CMD_PAUSE_BAND0_DM_BY_SCAN: + rtlphy->initgain_backup.xaagccore1 = dm_digtable->cur_igvalue; + dm_digtable->cur_igvalue = 0x17; + rtl_set_bbreg(hw, RCCK0_CCA, 0xff0000, 0x40); + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, + "switch case not process\n"); + break; + } + rtlphy->set_io_inprogress = false; + RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, + "(%#x)\n", rtlphy->current_io_type); +} + static void rtl8723be_phy_set_rf_on(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -2028,15 +2622,15 @@ static bool _rtl8723be_phy_set_rf_power_state(struct ieee80211_hw *hw, switch (rfpwr_state) { case ERFON: if ((ppsc->rfpwr_state == ERFOFF) && - RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) { + RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) { bool rtstatus; - u32 initialize_count = 0; + u32 initializecount = 0; do { - initialize_count++; + initializecount++; RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, "IPS Set eRf nic enable\n"); rtstatus = rtl_ps_enable_nic(hw); - } while (!rtstatus && (initialize_count < 10)); + } while (!rtstatus && (initializecount < 10)); RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); } else { @@ -2051,28 +2645,33 @@ static bool _rtl8723be_phy_set_rf_power_state(struct ieee80211_hw *hw, rtlpriv->cfg->ops->led_control(hw, LED_CTL_LINK); else rtlpriv->cfg->ops->led_control(hw, LED_CTL_NO_LINK); + break; + case ERFOFF: for (queue_id = 0, i = 0; queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) { ring = &pcipriv->dev.tx_ring[queue_id]; - if (skb_queue_len(&ring->queue) == 0) { + /* Don't check BEACON Q. + * BEACON Q is always not empty, + * because '_rtl8723be_cmd_send_packet' + */ + if (queue_id == BEACON_QUEUE || + skb_queue_len(&ring->queue) == 0) { queue_id++; continue; } else { RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, - "eRf Off/Sleep: %d times " - "TcbBusyQueue[%d] =%d before " - "doze!\n", (i + 1), queue_id, - skb_queue_len(&ring->queue)); + "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n", + (i + 1), queue_id, + skb_queue_len(&ring->queue)); udelay(10); i++; } if (i >= MAX_DOZE_WAITING_TIMES_9x) { RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, - "\n ERFSLEEP: %d times " - "TcbBusyQueue[%d] = %d !\n", + "ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n", MAX_DOZE_WAITING_TIMES_9x, queue_id, skb_queue_len(&ring->queue)); @@ -2095,6 +2694,7 @@ static bool _rtl8723be_phy_set_rf_power_state(struct ieee80211_hw *hw, } } break; + case ERFSLEEP: if (ppsc->rfpwr_state == ERFOFF) break; @@ -2106,21 +2706,19 @@ static bool _rtl8723be_phy_set_rf_power_state(struct ieee80211_hw *hw, continue; } else { RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, - "eRf Off/Sleep: %d times " - "TcbBusyQueue[%d] =%d before " - "doze!\n", (i + 1), queue_id, - skb_queue_len(&ring->queue)); + "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n", + (i + 1), queue_id, + skb_queue_len(&ring->queue)); udelay(10); i++; } if (i >= MAX_DOZE_WAITING_TIMES_9x) { RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, - "\n ERFSLEEP: %d times " - "TcbBusyQueue[%d] = %d !\n", - MAX_DOZE_WAITING_TIMES_9x, - queue_id, - skb_queue_len(&ring->queue)); + "ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n", + MAX_DOZE_WAITING_TIMES_9x, + queue_id, + skb_queue_len(&ring->queue)); break; } } @@ -2131,8 +2729,9 @@ static bool _rtl8723be_phy_set_rf_power_state(struct ieee80211_hw *hw, ppsc->last_sleep_jiffies = jiffies; _rtl8723be_phy_set_rf_sleep(hw); break; + default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, "switch case not process\n"); bresult = false; break; diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/phy.h b/drivers/net/wireless/rtlwifi/rtl8723be/phy.h index 444ef95bb6afa5b603028ada7e612ee606da0a13..6339738a0e3313e854fe5a8e4c7adeb41bf546e5 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/phy.h +++ b/drivers/net/wireless/rtlwifi/rtl8723be/phy.h @@ -26,22 +26,28 @@ #ifndef __RTL8723BE_PHY_H__ #define __RTL8723BE_PHY_H__ -/*It must always set to 4, otherwise read efuse table secquence will be wrong.*/ +/* MAX_TX_COUNT must always set to 4, otherwise read efuse table sequence + * will be wrong. + */ #define MAX_TX_COUNT 4 #define TX_1S 0 #define TX_2S 1 +#define TX_3S 2 +#define TX_4S 3 #define MAX_POWER_INDEX 0x3F #define MAX_PRECMD_CNT 16 #define MAX_RFDEPENDCMD_CNT 16 -#define MAX_POSTCMD_CNT 16 +#define MAX_POSTCMD_CNT 16 #define MAX_DOZE_WAITING_TIMES_9x 64 #define RT_CANNOT_IO(hw) false #define HIGHPOWER_RADIOA_ARRAYLEN 22 +#define TARGET_CHNL_NUM_2G_5G 59 + #define IQK_ADDA_REG_NUM 16 #define IQK_BB_REG_NUM 9 #define MAX_TOLERANCE 5 @@ -83,104 +89,19 @@ #define RTL92C_MAX_PATH_NUM 2 -enum hw90_block_e { - HW90_BLOCK_MAC = 0, - HW90_BLOCK_PHY0 = 1, - HW90_BLOCK_PHY1 = 2, - HW90_BLOCK_RF = 3, - HW90_BLOCK_MAXIMUM = 4, -}; - enum baseband_config_type { BASEBAND_CONFIG_PHY_REG = 0, BASEBAND_CONFIG_AGC_TAB = 1, }; -enum ra_offset_area { - RA_OFFSET_LEGACY_OFDM1, - RA_OFFSET_LEGACY_OFDM2, - RA_OFFSET_HT_OFDM1, - RA_OFFSET_HT_OFDM2, - RA_OFFSET_HT_OFDM3, - RA_OFFSET_HT_OFDM4, - RA_OFFSET_HT_CCK, -}; - -enum antenna_path { - ANTENNA_NONE, - ANTENNA_D, - ANTENNA_C, - ANTENNA_CD, - ANTENNA_B, - ANTENNA_BD, - ANTENNA_BC, - ANTENNA_BCD, - ANTENNA_A, - ANTENNA_AD, - ANTENNA_AC, - ANTENNA_ACD, - ANTENNA_AB, - ANTENNA_ABD, - ANTENNA_ABC, - ANTENNA_ABCD -}; - -struct r_antenna_select_ofdm { - u32 r_tx_antenna:4; - u32 r_ant_l:4; - u32 r_ant_non_ht:4; - u32 r_ant_ht1:4; - u32 r_ant_ht2:4; - u32 r_ant_ht_s1:4; - u32 r_ant_non_ht_s1:4; - u32 ofdm_txsc:2; - u32 reserved:2; -}; - -struct r_antenna_select_cck { - u8 r_cckrx_enable_2:2; - u8 r_cckrx_enable:2; - u8 r_ccktx_enable:4; -}; - - -struct efuse_contents { - u8 mac_addr[ETH_ALEN]; - u8 cck_tx_power_idx[6]; - u8 ht40_1s_tx_power_idx[6]; - u8 ht40_2s_tx_power_idx_diff[3]; - u8 ht20_tx_power_idx_diff[3]; - u8 ofdm_tx_power_idx_diff[3]; - u8 ht40_max_power_offset[3]; - u8 ht20_max_power_offset[3]; - u8 channel_plan; - u8 thermal_meter; - u8 rf_option[5]; - u8 version; - u8 oem_id; - u8 regulatory; -}; - -struct tx_power_struct { - u8 cck[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER]; - u8 ht40_1s[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER]; - u8 ht40_2s[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER]; - u8 ht20_diff[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER]; - u8 legacy_ht_diff[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER]; - u8 legacy_ht_txpowerdiff; - u8 groupht20[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER]; - u8 groupht40[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER]; - u8 pwrgroup_cnt; - u32 mcs_original_offset[4][16]; -}; - -enum _ANT_DIV_TYPE { - NO_ANTDIV = 0xFF, - CG_TRX_HW_ANTDIV = 0x01, - CGCS_RX_HW_ANTDIV = 0x02, +enum ant_div_type { + NO_ANTDIV = 0xFF, + CG_TRX_HW_ANTDIV = 0x01, + CGCS_RX_HW_ANTDIV = 0x02, FIXED_HW_ANTDIV = 0x03, - CG_TRX_SMART_ANTDIV = 0x04, - CGCS_RX_SW_ANTDIV = 0x05, + CG_TRX_SMART_ANTDIV = 0x04, + CGCS_RX_SW_ANTDIV = 0x05, + }; u32 rtl8723be_phy_query_rf_reg(struct ieee80211_hw *hw, @@ -206,7 +127,6 @@ void rtl8723be_phy_sw_chnl_callback(struct ieee80211_hw *hw); u8 rtl8723be_phy_sw_chnl(struct ieee80211_hw *hw); void rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery); -void rtl23b_phy_ap_calibrate(struct ieee80211_hw *hw, char delta); void rtl8723be_phy_lc_calibrate(struct ieee80211_hw *hw); void rtl8723be_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain); bool rtl8723be_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.c b/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.c index b5167e73fecfdebfdab140959b320e4ece6b04b6..a1bb1f6116fb867e97922689d8f856d605a04967 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.c +++ b/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.c @@ -23,7 +23,7 @@ * *****************************************************************************/ -#include "pwrseqcmd.h" +#include "../pwrseqcmd.h" #include "pwrseq.h" diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.h b/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.h index a62f43ed8d325b14ad0cf4bbfaa47e2e9384977d..0fee5e0e55c29543b6b7ab253f6ad558e717c22e 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.h +++ b/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.h @@ -26,7 +26,9 @@ #ifndef __RTL8723BE_PWRSEQ_H__ #define __RTL8723BE_PWRSEQ_H__ -/* Check document WM-20130425-JackieLau-RTL8723B_Power_Architecture v05.vsd +#include "../pwrseqcmd.h" +/** + * Check document WM-20130425-JackieLau-RTL8723B_Power_Architecture v05.vsd * There are 6 HW Power States: * 0: POFF--Power Off * 1: PDN--Power Down @@ -35,7 +37,7 @@ * 4: LPS--Low Power State * 5: SUS--Suspend * - * The transition from different states are defined below + * The transision from different states are defined below * TRANS_CARDEMU_TO_ACT * TRANS_ACT_TO_CARDEMU * TRANS_CARDEMU_TO_SUS @@ -57,203 +59,320 @@ #define RTL8723B_TRANS_END_STEPS 1 #define RTL8723B_TRANS_CARDEMU_TO_ACT \ + /* format */ \ + /* comments here */ \ + /* {offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value}, */\ + /*0x20[0] = 1b'1 enable LDOA12 MACRO block for all interface*/ \ {0x0020, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)}, \ + /*0x67[0] = 0 to disable BT_GPS_SEL pins*/ \ {0x0067, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, \ + /*Delay 1ms*/ \ {0x0001, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_DELAY, 1, PWRSEQ_DELAY_MS}, \ + /*0x00[5] = 1b'0 release analog Ips to digital ,1:isolation*/ \ {0x0000, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(5), 0}, \ + /* disable SW LPS 0x04[10]=0 and WLSUS_EN 0x04[11]=0*/ \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, (BIT(4)|BIT(3)|BIT(2)), 0}, \ + /* Disable USB suspend */ \ {0x0075, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0) , 0}, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0) , BIT(0)}, \ + /* wait till 0x04[17] = 1 power ready*/ \ {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(1), BIT(1)}, \ + /* Enable USB suspend */ \ {0x0075, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0) , BIT(0)}, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0) , 0}, \ + /* release WLON reset 0x04[16]=1*/ \ {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)}, \ + /* disable HWPDN 0x04[15]=0*/ \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), 0}, \ + /* disable WL suspend*/ \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, (BIT(4)|BIT(3)), 0}, \ + /* polling until return 0*/ \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)}, \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(0), 0}, \ + /* Enable WL control XTAL setting*/ \ {0x0010, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(6), BIT(6)}, \ + /*Enable falling edge triggering interrupt*/ \ {0x0049, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, \ + /*Enable GPIO9 interrupt mode*/ \ {0x0063, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, \ + /*Enable GPIO9 input mode*/ \ {0x0062, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), 0}, \ + /*Enable HSISR GPIO[C:0] interrupt*/ \ {0x0058, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)}, \ + /*Enable HSISR GPIO9 interrupt*/ \ {0x005A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, \ + /*For GPIO9 internal pull high setting by test chip*/ \ {0x0068, PWR_CUT_TESTCHIP_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3), BIT(3)}, \ + /*For GPIO9 internal pull high setting*/ \ {0x0069, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(6), BIT(6)}, #define RTL8723B_TRANS_ACT_TO_CARDEMU \ + /* format */ \ + /* comments here */ \ + /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\ + /*0x1F[7:0] = 0 turn off RF*/ \ {0x001F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0}, \ + /*0x4C[24] = 0x4F[0] = 0, */ \ + /*switch DPDT_SEL_P output from register 0x65[2] */ \ {0x004F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0}, \ + /*Enable rising edge triggering interrupt*/ \ {0x0049, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), 0}, \ + /*0x04[9] = 1 turn off MAC by HW state machine*/ \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, \ + /*wait till 0x04[9] = 0 polling until return 0 to disable*/ \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(1), 0}, \ + /* Enable BT control XTAL setting*/ \ {0x0010, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(6), 0}, \ + /*0x00[5] = 1b'1 analog Ips to digital ,1:isolation*/ \ {0x0000, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, \ PWR_CMD_WRITE, BIT(5), BIT(5)}, \ + /*0x20[0] = 1b'0 disable LDOA12 MACRO block*/ \ {0x0020, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, \ PWR_CMD_WRITE, BIT(0), 0}, #define RTL8723B_TRANS_CARDEMU_TO_SUS \ + /* format */ \ + /* comments here */ \ + /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value },*/\ + /*0x04[12:11] = 2b'11 enable WL suspend for PCIe*/ \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4) | BIT(3), (BIT(4) | BIT(3))}, \ + /*0x04[12:11] = 2b'01 enable WL suspend*/ \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, \ PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3)}, \ + /*0x23[4] = 1b'1 12H LDO enter sleep mode*/ \ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4)}, \ + /*0x07[7:0] = 0x20 SDIO SOP option to disable BG/MB/ACK/SWR*/ \ {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x20}, \ + /*0x04[12:11] = 2b'11 enable WL suspend for PCIe*/ \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3) | BIT(4)},\ + /*Set SDIO suspend local register*/ \ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), BIT(0)}, \ + /*wait power state to suspend*/ \ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), 0}, #define RTL8723B_TRANS_SUS_TO_CARDEMU \ + /* format */ \ + /* comments here */ \ + /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\ + /*clear suspend enable and power down enable*/ \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3) | BIT(7), 0}, \ + /*Set SDIO suspend local register*/ \ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), 0}, \ + /*wait power state to suspend*/ \ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), BIT(1)}, \ + /*0x23[4] = 1b'0 12H LDO enter normal mode*/ \ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, \ + /*0x04[12:11] = 2b'01enable WL suspend*/ \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), 0}, #define RTL8723B_TRANS_CARDEMU_TO_CARDDIS \ + /* format */ \ + /* comments here */ \ + /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\ + /*0x07=0x20 , SOP option to disable BG/MB*/ \ {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x20}, \ + /*0x04[12:11] = 2b'01 enable WL suspend*/ \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), BIT(3)}, \ + /*0x04[10] = 1, enable SW LPS*/ \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(2), BIT(2)}, \ + /*0x48[16] = 1 to enable GPIO9 as EXT WAKEUP*/ \ {0x004A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 1}, \ + /*0x23[4] = 1b'1 12H LDO enter sleep mode*/ \ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4)}, \ + /*Set SDIO suspend local register*/ \ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), BIT(0)}, \ + /*wait power state to suspend*/ \ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), 0}, #define RTL8723B_TRANS_CARDDIS_TO_CARDEMU \ + /* format */ \ + /* comments here */ \ + /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\ + /*clear suspend enable and power down enable*/ \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3) | BIT(7), 0}, \ + /*Set SDIO suspend local register*/ \ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), 0}, \ + /*wait power state to suspend*/ \ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), BIT(1)}, \ + /*0x48[16] = 0 to disable GPIO9 as EXT WAKEUP*/ \ {0x004A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0}, \ + /*0x04[12:11] = 2b'01enable WL suspend*/ \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), 0}, \ + /*0x23[4] = 1b'0 12H LDO enter normal mode*/ \ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, \ + /*PCIe DMA start*/ \ {0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0}, #define RTL8723B_TRANS_CARDEMU_TO_PDN \ + /* format */ \ + /* comments here */ \ + /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\ + /*0x23[4] = 1b'1 12H LDO enter sleep mode*/ \ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4)}, \ + /*0x07[7:0] = 0x20 SOP option to disable BG/MB/ACK/SWR*/ \ {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ PWR_INTF_SDIO_MSK | PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, \ PWR_CMD_WRITE, 0xFF, 0x20}, \ + /* 0x04[16] = 0*/ \ {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0}, \ + /* 0x04[15] = 1*/ \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), BIT(7)}, #define RTL8723B_TRANS_PDN_TO_CARDEMU \ + /* format */ \ + /* comments here */ \ + /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\ + /* 0x04[15] = 0*/ \ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), 0}, #define RTL8723B_TRANS_ACT_TO_LPS \ + /* format */ \ + /* comments here */ \ + /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\ + /*PCIe DMA stop*/ \ {0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF}, \ + /*Tx Pause*/ \ {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF}, \ + /*Should be zero if no packet is transmitting*/ \ {0x05F8, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \ + /*Should be zero if no packet is transmitting*/ \ {0x05F9, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \ + /*Should be zero if no packet is transmitting*/ \ {0x05FA, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \ + /*Should be zero if no packet is transmitting*/ \ {0x05FB, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \ + /*CCK and OFDM are disabled,and clock are gated*/ \ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0}, \ + /*Delay 1us*/ \ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_US}, \ + /*Whole BB is reset*/ \ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), 0}, \ + /*Reset MAC TRX*/ \ {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x03}, \ + /*check if removed later*/ \ {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), 0}, \ + /*When driver enter Sus/ Disable, enable LOP for BT*/ \ {0x0093, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x00}, \ + /*Respond TxOK to scheduler*/ \ {0x0553, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(5), BIT(5)}, #define RTL8723B_TRANS_LPS_TO_ACT \ + /* format */ \ + /* comments here */ \ + /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\ + /*SDIO RPWM*/ \ {0x0080, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ - PWR_BASEADDR_SDIO, PWR_CMD_WRITE, 0xFF, 0x84}, \ + PWR_BASEADDR_SDIO, PWR_CMD_WRITE, 0xFF, 0x84}, \ + /*USB RPWM*/ \ {0xFE58, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84}, \ + /*PCIe RPWM*/ \ {0x0361, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84}, \ + /*Delay*/ \ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_MS}, \ + /*. 0x08[4] = 0 switch TSF to 40M*/ \ {0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, \ + /*Polling 0x109[7]=0 TSF in 40M*/ \ {0x0109, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(7), 0}, \ + PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(7), 0}, \ + /*. 0x29[7:6] = 2b'00 enable BB clock*/ \ {0x0029, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(6)|BIT(7), 0}, \ + /*. 0x101[1] = 1*/ \ {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, \ + /*. 0x100[7:0] = 0xFF enable WMAC TRX*/ \ {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF}, \ + /*. 0x02[1:0] = 2b'11 enable BB macro*/ \ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1) | BIT(0), BIT(1) | BIT(0)}, \ + /*. 0x522 = 0*/ \ {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0}, #define RTL8723B_TRANS_END \ + /* format */ \ + /* comments here */ \ + /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\ {0xFFFF, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, 0, \ PWR_CMD_END, 0, 0}, diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/pwrseqcmd.c b/drivers/net/wireless/rtlwifi/rtl8723be/pwrseqcmd.c deleted file mode 100644 index 4573310c707fb3b82a3fda0c1207d35f3224737c..0000000000000000000000000000000000000000 --- a/drivers/net/wireless/rtlwifi/rtl8723be/pwrseqcmd.c +++ /dev/null @@ -1,139 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ - -#include "pwrseq.h" - -/* Description: - * This routine deal with the Power Configuration CMDs - * parsing for RTL8723/RTL8188E Series IC. - * Assumption: - * We should follow specific format which was released from HW SD. - * - * 2011.07.07, added by Roger. - */ -bool rtlbe_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version, - u8 fab_version, u8 interface_type, - struct wlan_pwr_cfg pwrcfgcmd[]) - -{ - struct wlan_pwr_cfg pwr_cfg_cmd = {0}; - bool b_polling_bit = false; - u32 ary_idx = 0; - u8 value = 0; - u32 offset = 0; - u32 polling_count = 0; - u32 max_polling_cnt = 5000; - - do { - pwr_cfg_cmd = pwrcfgcmd[ary_idx]; - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "rtlbe_hal_pwrseqcmdparsing(): " - "offset(%#x),cut_msk(%#x), fab_msk(%#x)," - "interface_msk(%#x), base(%#x), " - "cmd(%#x), msk(%#x), value(%#x)\n", - GET_PWR_CFG_OFFSET(pwr_cfg_cmd), - GET_PWR_CFG_CUT_MASK(pwr_cfg_cmd), - GET_PWR_CFG_FAB_MASK(pwr_cfg_cmd), - GET_PWR_CFG_INTF_MASK(pwr_cfg_cmd), - GET_PWR_CFG_BASE(pwr_cfg_cmd), - GET_PWR_CFG_CMD(pwr_cfg_cmd), - GET_PWR_CFG_MASK(pwr_cfg_cmd), - GET_PWR_CFG_VALUE(pwr_cfg_cmd)); - - if ((GET_PWR_CFG_FAB_MASK(pwr_cfg_cmd)&fab_version) && - (GET_PWR_CFG_CUT_MASK(pwr_cfg_cmd)&cut_version) && - (GET_PWR_CFG_INTF_MASK(pwr_cfg_cmd)&interface_type)) { - switch (GET_PWR_CFG_CMD(pwr_cfg_cmd)) { - case PWR_CMD_READ: - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "rtlbe_hal_pwrseqcmdparsing(): " - "PWR_CMD_READ\n"); - break; - case PWR_CMD_WRITE: - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "rtlbe_hal_pwrseqcmdparsing(): " - "PWR_CMD_WRITE\n"); - offset = GET_PWR_CFG_OFFSET(pwr_cfg_cmd); - - /*Read the value from system register*/ - value = rtl_read_byte(rtlpriv, offset); - value &= (~(GET_PWR_CFG_MASK(pwr_cfg_cmd))); - value = value | (GET_PWR_CFG_VALUE(pwr_cfg_cmd) - & GET_PWR_CFG_MASK(pwr_cfg_cmd)); - - /*Write the value back to sytem register*/ - rtl_write_byte(rtlpriv, offset, value); - break; - case PWR_CMD_POLLING: - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "rtlbe_hal_pwrseqcmdparsing(): " - "PWR_CMD_POLLING\n"); - b_polling_bit = false; - offset = GET_PWR_CFG_OFFSET(pwr_cfg_cmd); - - do { - value = rtl_read_byte(rtlpriv, offset); - - value &= GET_PWR_CFG_MASK(pwr_cfg_cmd); - if (value == - (GET_PWR_CFG_VALUE(pwr_cfg_cmd) & - GET_PWR_CFG_MASK(pwr_cfg_cmd))) - b_polling_bit = true; - else - udelay(10); - - if (polling_count++ > max_polling_cnt) - return false; - - } while (!b_polling_bit); - break; - case PWR_CMD_DELAY: - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "rtlbe_hal_pwrseqcmdparsing(): " - "PWR_CMD_DELAY\n"); - if (GET_PWR_CFG_VALUE(pwr_cfg_cmd) == - PWRSEQ_DELAY_US) - udelay(GET_PWR_CFG_OFFSET(pwr_cfg_cmd)); - else - mdelay(GET_PWR_CFG_OFFSET(pwr_cfg_cmd)); - break; - case PWR_CMD_END: - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "rtlbe_hal_pwrseqcmdparsing(): " - "PWR_CMD_END\n"); - return true; - default: - RT_ASSERT(false, - "rtlbe_hal_pwrseqcmdparsing(): " - "Unknown CMD!!\n"); - break; - } - } - - ary_idx++; - } while (1); - - return true; -} diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/pwrseqcmd.h b/drivers/net/wireless/rtlwifi/rtl8723be/pwrseqcmd.h deleted file mode 100644 index ce14a3b5cb71dbcd60b6e1400491e644f33f491c..0000000000000000000000000000000000000000 --- a/drivers/net/wireless/rtlwifi/rtl8723be/pwrseqcmd.h +++ /dev/null @@ -1,95 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ - -#ifndef __RTL8723BE_PWRSEQCMD_H__ -#define __RTL8723BE_PWRSEQCMD_H__ - -#include "../wifi.h" -/*---------------------------------------------*/ -/*The value of cmd: 4 bits */ -/*---------------------------------------------*/ -#define PWR_CMD_READ 0x00 -#define PWR_CMD_WRITE 0x01 -#define PWR_CMD_POLLING 0x02 -#define PWR_CMD_DELAY 0x03 -#define PWR_CMD_END 0x04 - -/* define the base address of each block */ -#define PWR_BASEADDR_MAC 0x00 -#define PWR_BASEADDR_USB 0x01 -#define PWR_BASEADDR_PCIE 0x02 -#define PWR_BASEADDR_SDIO 0x03 - -#define PWR_INTF_SDIO_MSK BIT(0) -#define PWR_INTF_USB_MSK BIT(1) -#define PWR_INTF_PCI_MSK BIT(2) -#define PWR_INTF_ALL_MSK (BIT(0) | BIT(1) | BIT(2) | BIT(3)) - -#define PWR_FAB_TSMC_MSK BIT(0) -#define PWR_FAB_UMC_MSK BIT(1) -#define PWR_FAB_ALL_MSK (BIT(0) | BIT(1) | BIT(2) | BIT(3)) - -#define PWR_CUT_TESTCHIP_MSK BIT(0) -#define PWR_CUT_A_MSK BIT(1) -#define PWR_CUT_B_MSK BIT(2) -#define PWR_CUT_C_MSK BIT(3) -#define PWR_CUT_D_MSK BIT(4) -#define PWR_CUT_E_MSK BIT(5) -#define PWR_CUT_F_MSK BIT(6) -#define PWR_CUT_G_MSK BIT(7) -#define PWR_CUT_ALL_MSK 0xFF - - -enum pwrseq_delay_unit { - PWRSEQ_DELAY_US, - PWRSEQ_DELAY_MS, -}; - -struct wlan_pwr_cfg { - u16 offset; - u8 cut_msk; - u8 fab_msk:4; - u8 interface_msk:4; - u8 base:4; - u8 cmd:4; - u8 msk; - u8 value; - -}; - -#define GET_PWR_CFG_OFFSET(__PWR_CMD) __PWR_CMD.offset -#define GET_PWR_CFG_CUT_MASK(__PWR_CMD) __PWR_CMD.cut_msk -#define GET_PWR_CFG_FAB_MASK(__PWR_CMD) __PWR_CMD.fab_msk -#define GET_PWR_CFG_INTF_MASK(__PWR_CMD) __PWR_CMD.interface_msk -#define GET_PWR_CFG_BASE(__PWR_CMD) __PWR_CMD.base -#define GET_PWR_CFG_CMD(__PWR_CMD) __PWR_CMD.cmd -#define GET_PWR_CFG_MASK(__PWR_CMD) __PWR_CMD.msk -#define GET_PWR_CFG_VALUE(__PWR_CMD) __PWR_CMD.value - -bool rtlbe_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version, - u8 fab_version, u8 interface_type, - struct wlan_pwr_cfg pwrcfgcmd[]); - -#endif diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/reg.h b/drivers/net/wireless/rtlwifi/rtl8723be/reg.h index 3006849ed439bde2b40e8858c847761e67433ed0..03581d2a5da04f716233b102fcc3f627bccbb018 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/reg.h +++ b/drivers/net/wireless/rtlwifi/rtl8723be/reg.h @@ -78,11 +78,11 @@ #define REG_WOL_EVENT 0x0081 #define REG_MCUTSTCFG 0x0084 - #define REG_HIMR 0x00B0 #define REG_HISR 0x00B4 #define REG_HIMRE 0x00B8 #define REG_HISRE 0x00BC +#define REG_PMC_DBG_CTRL2 0x00CC #define REG_EFUSE_ACCESS 0x00CF @@ -95,7 +95,8 @@ #define REG_HPON_FSM 0x00EC #define REG_SYS_CFG 0x00F0 #define REG_GPIO_OUTSTS 0x00F4 -#define REG_SYS_CFG1 0x00F0 +#define REG_MAC_PHY_CTRL_NORMAL 0x00F8 +#define REG_SYS_CFG1 0x00FC #define REG_ROM_VERSION 0x00FD #define REG_CR 0x0100 @@ -170,8 +171,14 @@ #define REG_BKQ_DESA 0x0338 #define REG_RX_DESA 0x0340 -#define REG_DBI 0x0348 -#define REG_MDIO 0x0354 +#define REG_DBI_WDATA 0x0348 +#define REG_DBI_RDATA 0x034C +#define REG_DBI_CTRL 0x0350 +#define REG_DBI_ADDR 0x0350 +#define REG_DBI_FLAG 0x0352 +#define REG_MDIO_WDATA 0x0354 +#define REG_MDIO_RDATA 0x0356 +#define REG_MDIO_CTL 0x0358 #define REG_DBG_SEL 0x0360 #define REG_PCIE_HRPWM 0x0361 #define REG_PCIE_HCPWM 0x0363 @@ -180,7 +187,6 @@ #define REG_UART_TX_DESA 0x0370 #define REG_UART_RX_DESA 0x0378 - #define REG_HDAQ_DESA_NODEF 0x0000 #define REG_CMDQ_DESA_NODEF 0x0000 @@ -193,7 +199,6 @@ #define REG_BCNQ_INFORMATION 0x0418 #define REG_TXPKT_EMPTY 0x041A - #define REG_CPU_MGQ_INFORMATION 0x041C #define REG_FWHW_TXQ_CTRL 0x0420 #define REG_HWSEQ_CTRL 0x0423 @@ -207,9 +212,7 @@ #define REG_RARFRC 0x0438 #define REG_RRSR 0x0440 #define REG_ARFR0 0x0444 -#define REG_ARFR1 0x0448 -#define REG_ARFR2 0x044C -#define REG_ARFR3 0x0450 +#define REG_ARFR1 0x044C #define REG_AMPDU_MAX_TIME 0x0456 #define REG_AGGLEN_LMT 0x0458 #define REG_AMPDU_MIN_SPACE 0x045C @@ -223,7 +226,10 @@ #define REG_POWER_STAGE2 0x04B8 #define REG_PKT_LIFE_TIME 0x04C0 #define REG_STBC_SETTING 0x04C4 +#define REG_HT_SINGLE_AMPDU 0x04C7 + #define REG_PROT_MODE_CTRL 0x04C8 +#define REG_MAX_AGGR_NUM 0x04CA #define REG_BAR_MODE_CTRL 0x04CC #define REG_RA_TRY_RATE_AGG_LMT 0x04CF #define REG_EARLY_MODE_CONTROL 0x04D0 @@ -303,6 +309,7 @@ #define REG_EIFS 0x0642 #define REG_NAV_CTRL 0x0650 +#define REG_NAV_UPPER 0x0652 #define REG_BACAMCMD 0x0654 #define REG_BACAMCONTENT 0x0658 #define REG_LBDLY 0x0660 @@ -355,43 +362,43 @@ #define REG_NORMAL_SIE_MAC_ADDR 0xFE70 #define REG_NORMAL_SIE_STRING 0xFE80 -#define CR9346 REG_9346CR -#define MSR (REG_CR + 2) -#define ISR REG_HISR -#define TSFR REG_TSFTR +#define CR9346 REG_9346CR +#define MSR (REG_CR + 2) +#define ISR REG_HISR +#define TSFR REG_TSFTR -#define MACIDR0 REG_MACID -#define MACIDR4 (REG_MACID + 4) +#define MACIDR0 REG_MACID +#define MACIDR4 (REG_MACID + 4) -#define PBP REG_PBP +#define PBP REG_PBP -#define IDR0 MACIDR0 -#define IDR4 MACIDR4 +#define IDR0 MACIDR0 +#define IDR4 MACIDR4 -#define UNUSED_REGISTER 0x1BF -#define DCAM UNUSED_REGISTER -#define PSR UNUSED_REGISTER -#define BBADDR UNUSED_REGISTER -#define PHYDATAR UNUSED_REGISTER +#define UNUSED_REGISTER 0x1BF +#define DCAM UNUSED_REGISTER +#define PSR UNUSED_REGISTER +#define BBADDR UNUSED_REGISTER +#define PHYDATAR UNUSED_REGISTER -#define INVALID_BBRF_VALUE 0x12345678 +#define INVALID_BBRF_VALUE 0x12345678 -#define MAX_MSS_DENSITY_2T 0x13 -#define MAX_MSS_DENSITY_1T 0x0A +#define MAX_MSS_DENSITY_2T 0x13 +#define MAX_MSS_DENSITY_1T 0x0A -#define CMDEEPROM_EN BIT(5) -#define CMDEEPROM_SEL BIT(4) -#define CMD9346CR_9356SEL BIT(4) -#define AUTOLOAD_EEPROM (CMDEEPROM_EN | CMDEEPROM_SEL) -#define AUTOLOAD_EFUSE CMDEEPROM_EN +#define CMDEEPROM_EN BIT(5) +#define CMDEEPROM_SEL BIT(4) +#define CMD9346CR_9356SEL BIT(4) +#define AUTOLOAD_EEPROM (CMDEEPROM_EN | CMDEEPROM_SEL) +#define AUTOLOAD_EFUSE CMDEEPROM_EN -#define GPIOSEL_GPIO 0 -#define GPIOSEL_ENBT BIT(5) +#define GPIOSEL_GPIO 0 +#define GPIOSEL_ENBT BIT(5) -#define GPIO_IN REG_GPIO_PIN_CTRL -#define GPIO_OUT (REG_GPIO_PIN_CTRL + 1) -#define GPIO_IO_SEL (REG_GPIO_PIN_CTRL + 2) -#define GPIO_MOD (REG_GPIO_PIN_CTRL + 3) +#define GPIO_IN REG_GPIO_PIN_CTRL +#define GPIO_OUT (REG_GPIO_PIN_CTRL + 1) +#define GPIO_IO_SEL (REG_GPIO_PIN_CTRL + 2) +#define GPIO_MOD (REG_GPIO_PIN_CTRL + 3) /* 8723/8188E Host System Interrupt Mask Register (offset 0x58, 32 byte) */ #define HSIMR_GPIO12_0_INT_EN BIT(0) @@ -400,8 +407,7 @@ #define HSIMR_PDN_INT_EN BIT(7) #define HSIMR_GPIO9_INT_EN BIT(25) -/* 8723/8188E Host System Interrupt Status Register (offset 0x5C, 32 byte) */ - +/* 8723/8188E Host System Interrupt Status Register (offset 0x5C, 32 byte) */ #define HSISR_GPIO12_0_INT BIT(0) #define HSISR_SPS_OCP_INT BIT(5) #define HSISR_RON_INT_EN BIT(6) @@ -412,7 +418,6 @@ #define MSR_ADHOC 0x01 #define MSR_INFRA 0x02 #define MSR_AP 0x03 -#define MSR_MASK 0x03 #define RRSR_RSC_OFFSET 21 #define RRSR_SHORT_OFFSET 23 @@ -542,7 +547,8 @@ /********************************************* * 8723BE IMR/ISR bits -**********************************************/ +********************************************* +*/ #define IMR_DISABLED 0x0 /* IMR DW0(0x0060-0063) Bit 0-31 */ #define IMR_TXCCK BIT(30) /* TXRPT interrupt when @@ -644,7 +650,7 @@ #define RF_OPTION1 0x79 #define RF_OPTION2 0x7A #define RF_OPTION3 0x7B -#define RF_OPTION4 0xC3 +#define EEPROM_RF_BT_SETTING_8723B 0xC3 #define EEPROM_DEFAULT_PID 0x1234 #define EEPROM_DEFAULT_VID 0x5678 @@ -678,14 +684,11 @@ #define EEPROM_CLK 0x06 #define EEPROM_TESTR 0x08 - #define EEPROM_TXPOWERCCK 0x10 #define EEPROM_TXPOWERHT40_1S 0x16 #define EEPROM_TXPOWERHT20DIFF 0x1B #define EEPROM_TXPOWER_OFDMDIFF 0x1B - - #define EEPROM_TX_PWR_INX 0x10 #define EEPROM_CHANNELPLAN 0xB8 @@ -1198,7 +1201,7 @@ #define APP_MIC BIT(30) #define APP_FCS BIT(31) -#define _MIN_SPACE(x) ((x) & 0x7) +#define _MIN_SPACE(x) ((x) & 0x7) #define _SHORT_GI_PADDING(x) (((x) & 0x1F) << 3) #define RXERR_TYPE_OFDM_PPDU 0 @@ -1216,105 +1219,105 @@ #define RXERR_TYPE_HT_MPDU_FAIL 12 #define RXERR_TYPE_RX_FULL_DROP 15 -#define RXERR_COUNTER_MASK 0xFFFFF -#define RXERR_RPT_RST BIT(27) -#define _RXERR_RPT_SEL(type) ((type) << 28) - -#define SCR_TXUSEDK BIT(0) -#define SCR_RXUSEDK BIT(1) -#define SCR_TXENCENABLE BIT(2) -#define SCR_RXDECENABLE BIT(3) -#define SCR_SKBYA2 BIT(4) -#define SCR_NOSKMC BIT(5) -#define SCR_TXBCUSEDK BIT(6) -#define SCR_RXBCUSEDK BIT(7) - -#define XCLK_VLD BIT(0) -#define ACLK_VLD BIT(1) -#define UCLK_VLD BIT(2) -#define PCLK_VLD BIT(3) -#define PCIRSTB BIT(4) -#define V15_VLD BIT(5) -#define TRP_B15V_EN BIT(7) -#define SIC_IDLE BIT(8) -#define BD_MAC2 BIT(9) -#define BD_MAC1 BIT(10) -#define IC_MACPHY_MODE BIT(11) -#define BT_FUNC BIT(16) -#define VENDOR_ID BIT(19) -#define PAD_HWPD_IDN BIT(22) -#define TRP_VAUX_EN BIT(23) -#define TRP_BT_EN BIT(24) -#define BD_PKG_SEL BIT(25) -#define BD_HCI_SEL BIT(26) -#define TYPE_ID BIT(27) - -#define USB_IS_HIGH_SPEED 0 -#define USB_IS_FULL_SPEED 1 -#define USB_SPEED_MASK BIT(5) - -#define USB_NORMAL_SIE_EP_MASK 0xF -#define USB_NORMAL_SIE_EP_SHIFT 4 - -#define USB_TEST_EP_MASK 0x30 -#define USB_TEST_EP_SHIFT 4 - -#define USB_AGG_EN BIT(3) - -#define MAC_ADDR_LEN 6 -#define LAST_ENTRY_OF_TX_PKT_BUFFER 175/*255 88e*/ - -#define POLLING_LLT_THRESHOLD 20 -#define POLLING_READY_TIMEOUT_COUNT 3000 +#define RXERR_COUNTER_MASK 0xFFFFF +#define RXERR_RPT_RST BIT(27) +#define _RXERR_RPT_SEL(type) ((type) << 28) -#define MAX_MSS_DENSITY_2T 0x13 -#define MAX_MSS_DENSITY_1T 0x0A +#define SCR_TXUSEDK BIT(0) +#define SCR_RXUSEDK BIT(1) +#define SCR_TXENCENABLE BIT(2) +#define SCR_RXDECENABLE BIT(3) +#define SCR_SKBYA2 BIT(4) +#define SCR_NOSKMC BIT(5) +#define SCR_TXBCUSEDK BIT(6) +#define SCR_RXBCUSEDK BIT(7) + +#define XCLK_VLD BIT(0) +#define ACLK_VLD BIT(1) +#define UCLK_VLD BIT(2) +#define PCLK_VLD BIT(3) +#define PCIRSTB BIT(4) +#define V15_VLD BIT(5) +#define TRP_B15V_EN BIT(7) +#define SIC_IDLE BIT(8) +#define BD_MAC2 BIT(9) +#define BD_MAC1 BIT(10) +#define IC_MACPHY_MODE BIT(11) +#define BT_FUNC BIT(16) +#define VENDOR_ID BIT(19) +#define PAD_HWPD_IDN BIT(22) +#define TRP_VAUX_EN BIT(23) +#define TRP_BT_EN BIT(24) +#define BD_PKG_SEL BIT(25) +#define BD_HCI_SEL BIT(26) +#define TYPE_ID BIT(27) + +#define USB_IS_HIGH_SPEED 0 +#define USB_IS_FULL_SPEED 1 +#define USB_SPEED_MASK BIT(5) + +#define USB_NORMAL_SIE_EP_MASK 0xF +#define USB_NORMAL_SIE_EP_SHIFT 4 + +#define USB_TEST_EP_MASK 0x30 +#define USB_TEST_EP_SHIFT 4 + +#define USB_AGG_EN BIT(3) + +#define MAC_ADDR_LEN 6 +#define LAST_ENTRY_OF_TX_PKT_BUFFER 175/*255 88e*/ + +#define POLLING_LLT_THRESHOLD 20 +#define POLLING_READY_TIMEOUT_COUNT 3000 + +#define MAX_MSS_DENSITY_2T 0x13 +#define MAX_MSS_DENSITY_1T 0x0A #define EPROM_CMD_OPERATING_MODE_MASK ((1<<7)|(1<<6)) -#define EPROM_CMD_CONFIG 0x3 -#define EPROM_CMD_LOAD 1 - -#define HWSET_MAX_SIZE_92S HWSET_MAX_SIZE - -#define HAL_8192C_HW_GPIO_WPS_BIT BIT(2) - -#define RPMAC_RESET 0x100 -#define RPMAC_TXSTART 0x104 -#define RPMAC_TXLEGACYSIG 0x108 -#define RPMAC_TXHTSIG1 0x10c -#define RPMAC_TXHTSIG2 0x110 -#define RPMAC_PHYDEBUG 0x114 -#define RPMAC_TXPACKETNUM 0x118 -#define RPMAC_TXIDLE 0x11c -#define RPMAC_TXMACHEADER0 0x120 -#define RPMAC_TXMACHEADER1 0x124 -#define RPMAC_TXMACHEADER2 0x128 -#define RPMAC_TXMACHEADER3 0x12c -#define RPMAC_TXMACHEADER4 0x130 -#define RPMAC_TXMACHEADER5 0x134 -#define RPMAC_TXDADATYPE 0x138 -#define RPMAC_TXRANDOMSEED 0x13c -#define RPMAC_CCKPLCPPREAMBLE 0x140 -#define RPMAC_CCKPLCPHEADER 0x144 -#define RPMAC_CCKCRC16 0x148 -#define RPMAC_OFDMRXCRC32OK 0x170 -#define RPMAC_OFDMRXCRC32ER 0x174 -#define RPMAC_OFDMRXPARITYER 0x178 -#define RPMAC_OFDMRXCRC8ER 0x17c -#define RPMAC_CCKCRXRC16ER 0x180 -#define RPMAC_CCKCRXRC32ER 0x184 -#define RPMAC_CCKCRXRC32OK 0x188 -#define RPMAC_TXSTATUS 0x18c - -#define RFPGA0_RFMOD 0x800 - -#define RFPGA0_TXINFO 0x804 -#define RFPGA0_PSDFUNCTION 0x808 - -#define RFPGA0_TXGAINSTAGE 0x80c - -#define RFPGA0_RFTIMING1 0x810 -#define RFPGA0_RFTIMING2 0x814 +#define EPROM_CMD_CONFIG 0x3 +#define EPROM_CMD_LOAD 1 + +#define HWSET_MAX_SIZE_92S HWSET_MAX_SIZE + +#define HAL_8192C_HW_GPIO_WPS_BIT BIT(2) + +#define RPMAC_RESET 0x100 +#define RPMAC_TXSTART 0x104 +#define RPMAC_TXLEGACYSIG 0x108 +#define RPMAC_TXHTSIG1 0x10c +#define RPMAC_TXHTSIG2 0x110 +#define RPMAC_PHYDEBUG 0x114 +#define RPMAC_TXPACKETNUM 0x118 +#define RPMAC_TXIDLE 0x11c +#define RPMAC_TXMACHEADER0 0x120 +#define RPMAC_TXMACHEADER1 0x124 +#define RPMAC_TXMACHEADER2 0x128 +#define RPMAC_TXMACHEADER3 0x12c +#define RPMAC_TXMACHEADER4 0x130 +#define RPMAC_TXMACHEADER5 0x134 +#define RPMAC_TXDADATYPE 0x138 +#define RPMAC_TXRANDOMSEED 0x13c +#define RPMAC_CCKPLCPPREAMBLE 0x140 +#define RPMAC_CCKPLCPHEADER 0x144 +#define RPMAC_CCKCRC16 0x148 +#define RPMAC_OFDMRXCRC32OK 0x170 +#define RPMAC_OFDMRXCRC32ER 0x174 +#define RPMAC_OFDMRXPARITYER 0x178 +#define RPMAC_OFDMRXCRC8ER 0x17c +#define RPMAC_CCKCRXRC16ER 0x180 +#define RPMAC_CCKCRXRC32ER 0x184 +#define RPMAC_CCKCRXRC32OK 0x188 +#define RPMAC_TXSTATUS 0x18c + +#define RFPGA0_RFMOD 0x800 + +#define RFPGA0_TXINFO 0x804 +#define RFPGA0_PSDFUNCTION 0x808 + +#define RFPGA0_TXGAINSTAGE 0x80c + +#define RFPGA0_RFTIMING1 0x810 +#define RFPGA0_RFTIMING2 0x814 #define RFPGA0_XA_HSSIPARAMETER1 0x820 #define RFPGA0_XA_HSSIPARAMETER2 0x824 @@ -1385,7 +1388,6 @@ #define RCCK0_FACOUNTERUPPER 0xa58 #define RCCK0_CCA_CNT 0xa60 - /* PageB(0xB00) */ #define RPDP_ANTA 0xb00 #define RPDP_ANTA_4 0xb04 @@ -1399,7 +1401,7 @@ #define RPDP_ANTA_24 0xb24 #define RCONFIG_PMPD_ANTA 0xb28 -#define CONFIG_RAM64X16 0xb2c +#define RCONFIG_ram64x16 0xb2c #define RBNDA 0xb30 #define RHSSIPAR 0xb34 @@ -1494,7 +1496,6 @@ #define ROFDM0_FRAMESYNC 0xcf0 #define ROFDM0_DFSREPORT 0xcf4 - #define ROFDM1_LSTF 0xd00 #define ROFDM1_TRXPATHENABLE 0xd04 @@ -1593,144 +1594,144 @@ #define RSLEEP 0xee0 #define RPMPD_ANAEN 0xeec -#define RZEBRA1_HSSIENABLE 0x0 -#define RZEBRA1_TRXENABLE1 0x1 -#define RZEBRA1_TRXENABLE2 0x2 -#define RZEBRA1_AGC 0x4 -#define RZEBRA1_CHARGEPUMP 0x5 -#define RZEBRA1_CHANNEL 0x7 - -#define RZEBRA1_TXGAIN 0x8 -#define RZEBRA1_TXLPF 0x9 -#define RZEBRA1_RXLPF 0xb -#define RZEBRA1_RXHPFCORNER 0xc - -#define RGLOBALCTRL 0 -#define RRTL8256_TXLPF 19 -#define RRTL8256_RXLPF 11 -#define RRTL8258_TXLPF 0x11 -#define RRTL8258_RXLPF 0x13 -#define RRTL8258_RSSILPF 0xa - -#define RF_AC 0x00 - -#define RF_IQADJ_G1 0x01 -#define RF_IQADJ_G2 0x02 -#define RF_POW_TRSW 0x05 - -#define RF_GAIN_RX 0x06 -#define RF_GAIN_TX 0x07 - -#define RF_TXM_IDAC 0x08 -#define RF_BS_IQGEN 0x0F - -#define RF_MODE1 0x10 -#define RF_MODE2 0x11 - -#define RF_RX_AGC_HP 0x12 -#define RF_TX_AGC 0x13 -#define RF_BIAS 0x14 -#define RF_IPA 0x15 -#define RF_POW_ABILITY 0x17 -#define RF_MODE_AG 0x18 -#define RRFCHANNEL 0x18 -#define RF_CHNLBW 0x18 -#define RF_TOP 0x19 - -#define RF_RX_G1 0x1A -#define RF_RX_G2 0x1B - -#define RF_RX_BB2 0x1C -#define RF_RX_BB1 0x1D - -#define RF_RCK1 0x1E -#define RF_RCK2 0x1F - -#define RF_TX_G1 0x20 -#define RF_TX_G2 0x21 -#define RF_TX_G3 0x22 - -#define RF_TX_BB1 0x23 -#define RF_T_METER 0x42 - -#define RF_SYN_G1 0x25 -#define RF_SYN_G2 0x26 -#define RF_SYN_G3 0x27 -#define RF_SYN_G4 0x28 -#define RF_SYN_G5 0x29 -#define RF_SYN_G6 0x2A -#define RF_SYN_G7 0x2B -#define RF_SYN_G8 0x2C - -#define RF_RCK_OS 0x30 -#define RF_TXPA_G1 0x31 -#define RF_TXPA_G2 0x32 -#define RF_TXPA_G3 0x33 - -#define RF_TX_BIAS_A 0x35 -#define RF_TX_BIAS_D 0x36 -#define RF_LOBF_9 0x38 -#define RF_RXRF_A3 0x3C -#define RF_TRSW 0x3F - -#define RF_TXRF_A2 0x41 -#define RF_TXPA_G4 0x46 -#define RF_TXPA_A4 0x4B - -#define RF_WE_LUT 0xEF - -#define BBBRESETB 0x100 -#define BGLOBALRESETB 0x200 -#define BOFDMTXSTART 0x4 -#define BCCKTXSTART 0x8 -#define BCRC32DEBUG 0x100 -#define BPMACLOOPBACK 0x10 -#define BTXLSIG 0xffffff -#define BOFDMTXRATE 0xf -#define BOFDMTXRESERVED 0x10 -#define BOFDMTXLENGTH 0x1ffe0 -#define BOFDMTXPARITY 0x20000 -#define BTXHTSIG1 0xffffff -#define BTXHTMCSRATE 0x7f -#define BTXHTBW 0x80 -#define BTXHTLENGTH 0xffff00 -#define BTXHTSIG2 0xffffff -#define BTXHTSMOOTHING 0x1 -#define BTXHTSOUNDING 0x2 -#define BTXHTRESERVED 0x4 -#define BTXHTAGGREATION 0x8 -#define BTXHTSTBC 0x30 -#define BTXHTADVANCECODING 0x40 -#define BTXHTSHORTGI 0x80 -#define BTXHTNUMBERHT_LTF 0x300 -#define BTXHTCRC8 0x3fc00 -#define BCOUNTERRESET 0x10000 -#define BNUMOFOFDMTX 0xffff -#define BNUMOFCCKTX 0xffff0000 -#define BTXIDLEINTERVAL 0xffff -#define BOFDMSERVICE 0xffff0000 -#define BTXMACHEADER 0xffffffff -#define BTXDATAINIT 0xff -#define BTXHTMODE 0x100 -#define BTXDATATYPE 0x30000 -#define BTXRANDOMSEED 0xffffffff -#define BCCKTXPREAMBLE 0x1 -#define BCCKTXSFD 0xffff0000 -#define BCCKTXSIG 0xff -#define BCCKTXSERVICE 0xff00 -#define BCCKLENGTHEXT 0x8000 -#define BCCKTXLENGHT 0xffff0000 -#define BCCKTXCRC16 0xffff -#define BCCKTXSTATUS 0x1 -#define BOFDMTXSTATUS 0x2 +#define RZEBRA1_HSSIENABLE 0x0 +#define RZEBRA1_TRXENABLE1 0x1 +#define RZEBRA1_TRXENABLE2 0x2 +#define RZEBRA1_AGC 0x4 +#define RZEBRA1_CHARGEPUMP 0x5 +#define RZEBRA1_CHANNEL 0x7 + +#define RZEBRA1_TXGAIN 0x8 +#define RZEBRA1_TXLPF 0x9 +#define RZEBRA1_RXLPF 0xb +#define RZEBRA1_RXHPFCORNER 0xc + +#define RGLOBALCTRL 0 +#define RRTL8256_TXLPF 19 +#define RRTL8256_RXLPF 11 +#define RRTL8258_TXLPF 0x11 +#define RRTL8258_RXLPF 0x13 +#define RRTL8258_RSSILPF 0xa + +#define RF_AC 0x00 + +#define RF_IQADJ_G1 0x01 +#define RF_IQADJ_G2 0x02 +#define RF_POW_TRSW 0x05 + +#define RF_GAIN_RX 0x06 +#define RF_GAIN_TX 0x07 + +#define RF_TXM_IDAC 0x08 +#define RF_BS_IQGEN 0x0F + +#define RF_MODE1 0x10 +#define RF_MODE2 0x11 + +#define RF_RX_AGC_HP 0x12 +#define RF_TX_AGC 0x13 +#define RF_BIAS 0x14 +#define RF_IPA 0x15 +#define RF_POW_ABILITY 0x17 +#define RF_MODE_AG 0x18 +#define RRFCHANNEL 0x18 +#define RF_CHNLBW 0x18 +#define RF_TOP 0x19 + +#define RF_RX_G1 0x1A +#define RF_RX_G2 0x1B + +#define RF_RX_BB2 0x1C +#define RF_RX_BB1 0x1D + +#define RF_RCK1 0x1E +#define RF_RCK2 0x1F + +#define RF_TX_G1 0x20 +#define RF_TX_G2 0x21 +#define RF_TX_G3 0x22 + +#define RF_TX_BB1 0x23 +#define RF_T_METER 0x42 + +#define RF_SYN_G1 0x25 +#define RF_SYN_G2 0x26 +#define RF_SYN_G3 0x27 +#define RF_SYN_G4 0x28 +#define RF_SYN_G5 0x29 +#define RF_SYN_G6 0x2A +#define RF_SYN_G7 0x2B +#define RF_SYN_G8 0x2C + +#define RF_RCK_OS 0x30 +#define RF_TXPA_G1 0x31 +#define RF_TXPA_G2 0x32 +#define RF_TXPA_G3 0x33 + +#define RF_TX_BIAS_A 0x35 +#define RF_TX_BIAS_D 0x36 +#define RF_LOBF_9 0x38 +#define RF_RXRF_A3 0x3C +#define RF_TRSW 0x3F + +#define RF_TXRF_A2 0x41 +#define RF_TXPA_G4 0x46 +#define RF_TXPA_A4 0x4B + +#define RF_WE_LUT 0xEF + +#define BBBRESETB 0x100 +#define BGLOBALRESETB 0x200 +#define BOFDMTXSTART 0x4 +#define BCCKTXSTART 0x8 +#define BCRC32DEBUG 0x100 +#define BPMACLOOPBACK 0x10 +#define BTXLSIG 0xffffff +#define BOFDMTXRATE 0xf +#define BOFDMTXRESERVED 0x10 +#define BOFDMTXLENGTH 0x1ffe0 +#define BOFDMTXPARITY 0x20000 +#define BTXHTSIG1 0xffffff +#define BTXHTMCSRATE 0x7f +#define BTXHTBW 0x80 +#define BTXHTLENGTH 0xffff00 +#define BTXHTSIG2 0xffffff +#define BTXHTSMOOTHING 0x1 +#define BTXHTSOUNDING 0x2 +#define BTXHTRESERVED 0x4 +#define BTXHTAGGREATION 0x8 +#define BTXHTSTBC 0x30 +#define BTXHTADVANCECODING 0x40 +#define BTXHTSHORTGI 0x80 +#define BTXHTNUMBERHT_LTF 0x300 +#define BTXHTCRC8 0x3fc00 +#define BCOUNTERRESET 0x10000 +#define BNUMOFOFDMTX 0xffff +#define BNUMOFCCKTX 0xffff0000 +#define BTXIDLEINTERVAL 0xffff +#define BOFDMSERVICE 0xffff0000 +#define BTXMACHEADER 0xffffffff +#define BTXDATAINIT 0xff +#define BTXHTMODE 0x100 +#define BTXDATATYPE 0x30000 +#define BTXRANDOMSEED 0xffffffff +#define BCCKTXPREAMBLE 0x1 +#define BCCKTXSFD 0xffff0000 +#define BCCKTXSIG 0xff +#define BCCKTXSERVICE 0xff00 +#define BCCKLENGTHEXT 0x8000 +#define BCCKTXLENGHT 0xffff0000 +#define BCCKTXCRC16 0xffff +#define BCCKTXSTATUS 0x1 +#define BOFDMTXSTATUS 0x2 #define IS_BB_REG_OFFSET_92S(_offset) \ ((_offset >= 0x800) && (_offset <= 0xfff)) -#define BRFMOD 0x1 -#define BJAPANMODE 0x2 -#define BCCKTXSC 0x30 -#define BCCKEN 0x1000000 -#define BOFDMEN 0x2000000 +#define BRFMOD 0x1 +#define BJAPANMODE 0x2 +#define BCCKTXSC 0x30 +#define BCCKEN 0x1000000 +#define BOFDMEN 0x2000000 #define BOFDMRXADCPHASE 0x10000 #define BOFDMTXDACPHASE 0x40000 @@ -1824,13 +1825,13 @@ #define BDA6SWING 0x380000 #define BADCLKPHASE 0x4000000 -#define B80MCLKDELAY 0x18000000 -#define BAFEWATCHDOGENABLE 0x20000000 +#define B80MCLKDELAY 0x18000000 +#define BAFEWATCHDOGENABLE 0x20000000 -#define BXTALCAP01 0xc0000000 -#define BXTALCAP23 0x3 +#define BXTALCAP01 0xc0000000 +#define BXTALCAP23 0x3 #define BXTALCAP92X 0x0f000000 -#define BXTALCAP 0x0f000000 +#define BXTALCAP 0x0f000000 #define BINTDIFCLKENABLE 0x400 #define BEXTSIGCLKENABLE 0x800 @@ -1857,7 +1858,7 @@ #define BCCKRX_AGC_FORMAT 0x200 #define BPSDFFT_SAMPLE_POINT 0xc000 #define BPSD_AVERAGE_NUM 0x3000 -#define BIQPATH_CONTROL 0xc00 +#define BIQPATH_CONTROL 0xc00 #define BPSD_FREQ 0x3ff #define BPSD_ANTENNA_PATH 0x30 #define BPSD_IQ_SWITCH 0x40 @@ -1957,300 +1958,316 @@ #define BCCK_DEFAULT_RXPATH 0xc000000 #define BCCK_OPTION_RXPATH 0x3000000 -#define BNUM_OFSTF 0x3 -#define BSHIFT_L 0xc0 -#define BGI_TH 0xc -#define BRXPATH_A 0x1 -#define BRXPATH_B 0x2 -#define BRXPATH_C 0x4 -#define BRXPATH_D 0x8 -#define BTXPATH_A 0x1 -#define BTXPATH_B 0x2 -#define BTXPATH_C 0x4 -#define BTXPATH_D 0x8 -#define BTRSSI_FREQ 0x200 -#define BADC_BACKOFF 0x3000 -#define BDFIR_BACKOFF 0xc000 -#define BTRSSI_LATCH_PHASE 0x10000 -#define BRX_LDC_OFFSET 0xff -#define BRX_QDC_OFFSET 0xff00 -#define BRX_DFIR_MODE 0x1800000 -#define BRX_DCNF_TYPE 0xe000000 -#define BRXIQIMB_A 0x3ff -#define BRXIQIMB_B 0xfc00 -#define BRXIQIMB_C 0x3f0000 -#define BRXIQIMB_D 0xffc00000 -#define BDC_DC_NOTCH 0x60000 -#define BRXNB_NOTCH 0x1f000000 -#define BPD_TH 0xf -#define BPD_TH_OPT2 0xc000 -#define BPWED_TH 0x700 -#define BIFMF_WIN_L 0x800 -#define BPD_OPTION 0x1000 -#define BMF_WIN_L 0xe000 -#define BBW_SEARCH_L 0x30000 -#define BWIN_ENH_L 0xc0000 -#define BBW_TH 0x700000 -#define BED_TH2 0x3800000 -#define BBW_OPTION 0x4000000 -#define BRADIO_TH 0x18000000 -#define BWINDOW_L 0xe0000000 -#define BSBD_OPTION 0x1 -#define BFRAME_TH 0x1c -#define BFS_OPTION 0x60 -#define BDC_SLOPE_CHECK 0x80 -#define BFGUARD_COUNTER_DC_L 0xe00 -#define BFRAME_WEIGHT_SHORT 0x7000 -#define BSUB_TUNE 0xe00000 -#define BFRAME_DC_LENGTH 0xe000000 -#define BSBD_START_OFFSET 0x30000000 -#define BFRAME_TH_2 0x7 -#define BFRAME_GI2_TH 0x38 -#define BGI2_SYNC_EN 0x40 -#define BSARCH_SHORT_EARLY 0x300 -#define BSARCH_SHORT_LATE 0xc00 -#define BSARCH_GI2_LATE 0x70000 -#define BCFOANTSUM 0x1 -#define BCFOACC 0x2 -#define BCFOSTARTOFFSET 0xc -#define BCFOLOOPBACK 0x70 -#define BCFOSUMWEIGHT 0x80 -#define BDAGCENABLE 0x10000 -#define BTXIQIMB_A 0x3ff -#define BTXIQIMB_b 0xfc00 -#define BTXIQIMB_C 0x3f0000 -#define BTXIQIMB_D 0xffc00000 -#define BTXIDCOFFSET 0xff -#define BTXIQDCOFFSET 0xff00 -#define BTXDFIRMODE 0x10000 -#define BTXPESUDO_NOISEON 0x4000000 -#define BTXPESUDO_NOISE_A 0xff -#define BTXPESUDO_NOISE_B 0xff00 -#define BTXPESUDO_NOISE_C 0xff0000 -#define BTXPESUDO_NOISE_D 0xff000000 -#define BCCA_DROPOPTION 0x20000 -#define BCCA_DROPTHRES 0xfff00000 -#define BEDCCA_H 0xf -#define BEDCCA_L 0xf0 -#define BLAMBDA_ED 0x300 -#define BRX_INITIALGAIN 0x7f -#define BRX_ANTDIV_EN 0x80 +#define BNUM_OFSTF 0x3 +#define BSHIFT_L 0xc0 +#define BGI_TH 0xc +#define BRXPATH_A 0x1 +#define BRXPATH_B 0x2 +#define BRXPATH_C 0x4 +#define BRXPATH_D 0x8 +#define BTXPATH_A 0x1 +#define BTXPATH_B 0x2 +#define BTXPATH_C 0x4 +#define BTXPATH_D 0x8 +#define BTRSSI_FREQ 0x200 +#define BADC_BACKOFF 0x3000 +#define BDFIR_BACKOFF 0xc000 +#define BTRSSI_LATCH_PHASE 0x10000 +#define BRX_LDC_OFFSET 0xff +#define BRX_QDC_OFFSET 0xff00 +#define BRX_DFIR_MODE 0x1800000 +#define BRX_DCNF_TYPE 0xe000000 +#define BRXIQIMB_A 0x3ff +#define BRXIQIMB_B 0xfc00 +#define BRXIQIMB_C 0x3f0000 +#define BRXIQIMB_D 0xffc00000 +#define BDC_DC_NOTCH 0x60000 +#define BRXNB_NOTCH 0x1f000000 +#define BPD_TH 0xf +#define BPD_TH_OPT2 0xc000 +#define BPWED_TH 0x700 +#define BIFMF_WIN_L 0x800 +#define BPD_OPTION 0x1000 +#define BMF_WIN_L 0xe000 +#define BBW_SEARCH_L 0x30000 +#define BWIN_ENH_L 0xc0000 +#define BBW_TH 0x700000 +#define BED_TH2 0x3800000 +#define BBW_OPTION 0x4000000 +#define BRADIO_TH 0x18000000 +#define BWINDOW_L 0xe0000000 +#define BSBD_OPTION 0x1 +#define BFRAME_TH 0x1c +#define BFS_OPTION 0x60 +#define BDC_SLOPE_CHECK 0x80 +#define BFGUARD_COUNTER_DC_L 0xe00 +#define BFRAME_WEIGHT_SHORT 0x7000 +#define BSUB_TUNE 0xe00000 +#define BFRAME_DC_LENGTH 0xe000000 +#define BSBD_START_OFFSET 0x30000000 +#define BFRAME_TH_2 0x7 +#define BFRAME_GI2_TH 0x38 +#define BGI2_SYNC_EN 0x40 +#define BSARCH_SHORT_EARLY 0x300 +#define BSARCH_SHORT_LATE 0xc00 +#define BSARCH_GI2_LATE 0x70000 +#define BCFOANTSUM 0x1 +#define BCFOACC 0x2 +#define BCFOSTARTOFFSET 0xc +#define BCFOLOOPBACK 0x70 +#define BCFOSUMWEIGHT 0x80 +#define BDAGCENABLE 0x10000 +#define BTXIQIMB_A 0x3ff +#define BTXIQIMB_b 0xfc00 +#define BTXIQIMB_C 0x3f0000 +#define BTXIQIMB_D 0xffc00000 +#define BTXIDCOFFSET 0xff +#define BTXIQDCOFFSET 0xff00 +#define BTXDFIRMODE 0x10000 +#define BTXPESUDO_NOISEON 0x4000000 +#define BTXPESUDO_NOISE_A 0xff +#define BTXPESUDO_NOISE_B 0xff00 +#define BTXPESUDO_NOISE_C 0xff0000 +#define BTXPESUDO_NOISE_D 0xff000000 +#define BCCA_DROPOPTION 0x20000 +#define BCCA_DROPTHRES 0xfff00000 +#define BEDCCA_H 0xf +#define BEDCCA_L 0xf0 +#define BLAMBDA_ED 0x300 +#define BRX_INITIALGAIN 0x7f +#define BRX_ANTDIV_EN 0x80 #define BRX_AGC_ADDRESS_FOR_LNA 0x7f00 -#define BRX_HIGHPOWER_FLOW 0x8000 +#define BRX_HIGHPOWER_FLOW 0x8000 #define BRX_AGC_FREEZE_THRES 0xc0000 -#define BRX_FREEZESTEP_AGC1 0x300000 -#define BRX_FREEZESTEP_AGC2 0xc00000 -#define BRX_FREEZESTEP_AGC3 0x3000000 -#define BRX_FREEZESTEP_AGC0 0xc000000 -#define BRXRSSI_CMP_EN 0x10000000 -#define BRXQUICK_AGCEN 0x20000000 +#define BRX_FREEZESTEP_AGC1 0x300000 +#define BRX_FREEZESTEP_AGC2 0xc00000 +#define BRX_FREEZESTEP_AGC3 0x3000000 +#define BRX_FREEZESTEP_AGC0 0xc000000 +#define BRXRSSI_CMP_EN 0x10000000 +#define BRXQUICK_AGCEN 0x20000000 #define BRXAGC_FREEZE_THRES_MODE 0x40000000 -#define BRX_OVERFLOW_CHECKTYPE 0x80000000 -#define BRX_AGCSHIFT 0x7f -#define BTRSW_TRI_ONLY 0x80 -#define BPOWER_THRES 0x300 -#define BRXAGC_EN 0x1 -#define BRXAGC_TOGETHER_EN 0x2 -#define BRXAGC_MIN 0x4 -#define BRXHP_INI 0x7 -#define BRXHP_TRLNA 0x70 -#define BRXHP_RSSI 0x700 -#define BRXHP_BBP1 0x7000 -#define BRXHP_BBP2 0x70000 -#define BRXHP_BBP3 0x700000 -#define BRSSI_H 0x7f0000 -#define BRSSI_GEN 0x7f000000 -#define BRXSETTLE_TRSW 0x7 -#define BRXSETTLE_LNA 0x38 -#define BRXSETTLE_RSSI 0x1c0 -#define BRXSETTLE_BBP 0xe00 -#define BRXSETTLE_RXHP 0x7000 -#define BRXSETTLE_ANTSW_RSSI 0x38000 -#define BRXSETTLE_ANTSW 0xc0000 -#define BRXPROCESS_TIME_DAGC 0x300000 -#define BRXSETTLE_HSSI 0x400000 -#define BRXPROCESS_TIME_BBPPW 0x800000 -#define BRXANTENNA_POWER_SHIFT 0x3000000 -#define BRSSI_TABLE_SELECT 0xc000000 -#define BRXHP_FINAL 0x7000000 -#define BRXHPSETTLE_BBP 0x7 -#define BRXHTSETTLE_HSSI 0x8 -#define BRXHTSETTLE_RXHP 0x70 -#define BRXHTSETTLE_BBPPW 0x80 -#define BRXHTSETTLE_IDLE 0x300 -#define BRXHTSETTLE_RESERVED 0x1c00 -#define BRXHT_RXHP_EN 0x8000 -#define BRXAGC_FREEZE_THRES 0x30000 -#define BRXAGC_TOGETHEREN 0x40000 -#define BRXHTAGC_MIN 0x80000 -#define BRXHTAGC_EN 0x100000 -#define BRXHTDAGC_EN 0x200000 -#define BRXHT_RXHP_BBP 0x1c00000 -#define BRXHT_RXHP_FINAL 0xe0000000 -#define BRXPW_RADIO_TH 0x3 -#define BRXPW_RADIO_EN 0x4 -#define BRXMF_HOLD 0x3800 -#define BRXPD_DELAY_TH1 0x38 -#define BRXPD_DELAY_TH2 0x1c0 -#define BRXPD_DC_COUNT_MAX 0x600 -#define BRXPD_DELAY_TH 0x8000 -#define BRXPROCESS_DELAY 0xf0000 +#define BRX_OVERFLOW_CHECKTYPE 0x80000000 +#define BRX_AGCSHIFT 0x7f +#define BTRSW_TRI_ONLY 0x80 +#define BPOWER_THRES 0x300 +#define BRXAGC_EN 0x1 +#define BRXAGC_TOGETHER_EN 0x2 +#define BRXAGC_MIN 0x4 +#define BRXHP_INI 0x7 +#define BRXHP_TRLNA 0x70 +#define BRXHP_RSSI 0x700 +#define BRXHP_BBP1 0x7000 +#define BRXHP_BBP2 0x70000 +#define BRXHP_BBP3 0x700000 +#define BRSSI_H 0x7f0000 +#define BRSSI_GEN 0x7f000000 +#define BRXSETTLE_TRSW 0x7 +#define BRXSETTLE_LNA 0x38 +#define BRXSETTLE_RSSI 0x1c0 +#define BRXSETTLE_BBP 0xe00 +#define BRXSETTLE_RXHP 0x7000 +#define BRXSETTLE_ANTSW_RSSI 0x38000 +#define BRXSETTLE_ANTSW 0xc0000 +#define BRXPROCESS_TIME_DAGC 0x300000 +#define BRXSETTLE_HSSI 0x400000 +#define BRXPROCESS_TIME_BBPPW 0x800000 +#define BRXANTENNA_POWER_SHIFT 0x3000000 +#define BRSSI_TABLE_SELECT 0xc000000 +#define BRXHP_FINAL 0x7000000 +#define BRXHPSETTLE_BBP 0x7 +#define BRXHTSETTLE_HSSI 0x8 +#define BRXHTSETTLE_RXHP 0x70 +#define BRXHTSETTLE_BBPPW 0x80 +#define BRXHTSETTLE_IDLE 0x300 +#define BRXHTSETTLE_RESERVED 0x1c00 +#define BRXHT_RXHP_EN 0x8000 +#define BRXAGC_FREEZE_THRES 0x30000 +#define BRXAGC_TOGETHEREN 0x40000 +#define BRXHTAGC_MIN 0x80000 +#define BRXHTAGC_EN 0x100000 +#define BRXHTDAGC_EN 0x200000 +#define BRXHT_RXHP_BBP 0x1c00000 +#define BRXHT_RXHP_FINAL 0xe0000000 +#define BRXPW_RADIO_TH 0x3 +#define BRXPW_RADIO_EN 0x4 +#define BRXMF_HOLD 0x3800 +#define BRXPD_DELAY_TH1 0x38 +#define BRXPD_DELAY_TH2 0x1c0 +#define BRXPD_DC_COUNT_MAX 0x600 +#define BRXPD_DELAY_TH 0x8000 +#define BRXPROCESS_DELAY 0xf0000 #define BRXSEARCHRANGE_GI2_EARLY 0x700000 #define BRXFRAME_FUARD_COUNTER_L 0x3800000 -#define BRXSGI_GUARD_L 0xc000000 -#define BRXSGI_SEARCH_L 0x30000000 -#define BRXSGI_TH 0xc0000000 -#define BDFSCNT0 0xff -#define BDFSCNT1 0xff00 -#define BDFSFLAG 0xf0000 -#define BMF_WEIGHT_SUM 0x300000 -#define BMINIDX_TH 0x7f000000 -#define BDAFORMAT 0x40000 -#define BTXCH_EMU_ENABLE 0x01000000 -#define BTRSW_ISOLATION_A 0x7f -#define BTRSW_ISOLATION_B 0x7f00 -#define BTRSW_ISOLATION_C 0x7f0000 -#define BTRSW_ISOLATION_D 0x7f000000 -#define BEXT_LNA_GAIN 0x7c00 - -#define BSTBC_EN 0x4 -#define BANTENNA_MAPPING 0x10 -#define BNSS 0x20 +#define BRXSGI_GUARD_L 0xc000000 +#define BRXSGI_SEARCH_L 0x30000000 +#define BRXSGI_TH 0xc0000000 +#define BDFSCNT0 0xff +#define BDFSCNT1 0xff00 +#define BDFSFLAG 0xf0000 +#define BMF_WEIGHT_SUM 0x300000 +#define BMINIDX_TH 0x7f000000 +#define BDAFORMAT 0x40000 +#define BTXCH_EMU_ENABLE 0x01000000 +#define BTRSW_ISOLATION_A 0x7f +#define BTRSW_ISOLATION_B 0x7f00 +#define BTRSW_ISOLATION_C 0x7f0000 +#define BTRSW_ISOLATION_D 0x7f000000 +#define BEXT_LNA_GAIN 0x7c00 + +#define BSTBC_EN 0x4 +#define BANTENNA_MAPPING 0x10 +#define BNSS 0x20 #define BCFO_ANTSUM_ID 0x200 -#define BPHY_COUNTER_RESET 0x8000000 -#define BCFO_REPORT_GET 0x4000000 -#define BOFDM_CONTINUE_TX 0x10000000 -#define BOFDM_SINGLE_CARRIER 0x20000000 -#define BOFDM_SINGLE_TONE 0x40000000 -#define BHT_DETECT 0x100 -#define BCFOEN 0x10000 -#define BCFOVALUE 0xfff00000 -#define BSIGTONE_RE 0x3f -#define BSIGTONE_IM 0x7f00 -#define BCOUNTER_CCA 0xffff -#define BCOUNTER_PARITYFAIL 0xffff0000 -#define BCOUNTER_RATEILLEGAL 0xffff -#define BCOUNTER_CRC8FAIL 0xffff0000 -#define BCOUNTER_MCSNOSUPPORT 0xffff -#define BCOUNTER_FASTSYNC 0xffff -#define BSHORTCFO 0xfff -#define BSHORTCFOT_LENGTH 12 -#define BSHORTCFOF_LENGTH 11 -#define BLONGCFO 0x7ff -#define BLONGCFOT_LENGTH 11 -#define BLONGCFOF_LENGTH 11 -#define BTAILCFO 0x1fff -#define BTAILCFOT_LENGTH 13 -#define BTAILCFOF_LENGTH 12 -#define BNOISE_EN_PWDB 0xffff -#define BCC_POWER_DB 0xffff0000 -#define BMOISE_PWDB 0xffff -#define BPOWERMEAST_LENGTH 10 -#define BPOWERMEASF_LENGTH 3 -#define BRX_HT_BW 0x1 -#define BRXSC 0x6 -#define BRX_HT 0x8 -#define BNB_INTF_DET_ON 0x1 -#define BINTF_WIN_LEN_CFG 0x30 -#define BNB_INTF_TH_CFG 0x1c0 -#define BRFGAIN 0x3f -#define BTABLESEL 0x40 -#define BTRSW 0x80 -#define BRXSNR_A 0xff -#define BRXSNR_B 0xff00 -#define BRXSNR_C 0xff0000 -#define BRXSNR_D 0xff000000 -#define BSNR_EVMT_LENGTH 8 -#define BSNR_EVMF_LENGTH 1 -#define BCSI1ST 0xff -#define BCSI2ND 0xff00 -#define BRXEVM1ST 0xff0000 -#define BRXEVM2ND 0xff000000 -#define BSIGEVM 0xff -#define BPWDB 0xff00 -#define BSGIEN 0x10000 - -#define BSFACTOR_QMA1 0xf -#define BSFACTOR_QMA2 0xf0 -#define BSFACTOR_QMA3 0xf00 -#define BSFACTOR_QMA4 0xf000 -#define BSFACTOR_QMA5 0xf0000 -#define BSFACTOR_QMA6 0xf0000 -#define BSFACTOR_QMA7 0xf00000 -#define BSFACTOR_QMA8 0xf000000 -#define BSFACTOR_QMA9 0xf0000000 -#define BCSI_SCHEME 0x100000 +#define BPHY_COUNTER_RESET 0x8000000 +#define BCFO_REPORT_GET 0x4000000 +#define BOFDM_CONTINUE_TX 0x10000000 +#define BOFDM_SINGLE_CARRIER 0x20000000 +#define BOFDM_SINGLE_TONE 0x40000000 +#define BHT_DETECT 0x100 +#define BCFOEN 0x10000 +#define BCFOVALUE 0xfff00000 +#define BSIGTONE_RE 0x3f +#define BSIGTONE_IM 0x7f00 +#define BCOUNTER_CCA 0xffff +#define BCOUNTER_PARITYFAIL 0xffff0000 +#define BCOUNTER_RATEILLEGAL 0xffff +#define BCOUNTER_CRC8FAIL 0xffff0000 +#define BCOUNTER_MCSNOSUPPORT 0xffff +#define BCOUNTER_FASTSYNC 0xffff +#define BSHORTCFO 0xfff +#define BSHORTCFOT_LENGTH 12 +#define BSHORTCFOF_LENGTH 11 +#define BLONGCFO 0x7ff +#define BLONGCFOT_LENGTH 11 +#define BLONGCFOF_LENGTH 11 +#define BTAILCFO 0x1fff +#define BTAILCFOT_LENGTH 13 +#define BTAILCFOF_LENGTH 12 +#define BNOISE_EN_PWDB 0xffff +#define BCC_POWER_DB 0xffff0000 +#define BMOISE_PWDB 0xffff +#define BPOWERMEAST_LENGTH 10 +#define BPOWERMEASF_LENGTH 3 +#define BRX_HT_BW 0x1 +#define BRXSC 0x6 +#define BRX_HT 0x8 +#define BNB_INTF_DET_ON 0x1 +#define BINTF_WIN_LEN_CFG 0x30 +#define BNB_INTF_TH_CFG 0x1c0 +#define BRFGAIN 0x3f +#define BTABLESEL 0x40 +#define BTRSW 0x80 +#define BRXSNR_A 0xff +#define BRXSNR_B 0xff00 +#define BRXSNR_C 0xff0000 +#define BRXSNR_D 0xff000000 +#define BSNR_EVMT_LENGTH 8 +#define BSNR_EVMF_LENGTH 1 +#define BCSI1ST 0xff +#define BCSI2ND 0xff00 +#define BRXEVM1ST 0xff0000 +#define BRXEVM2ND 0xff000000 +#define BSIGEVM 0xff +#define BPWDB 0xff00 +#define BSGIEN 0x10000 + +#define BSFACTOR_QMA1 0xf +#define BSFACTOR_QMA2 0xf0 +#define BSFACTOR_QMA3 0xf00 +#define BSFACTOR_QMA4 0xf000 +#define BSFACTOR_QMA5 0xf0000 +#define BSFACTOR_QMA6 0xf0000 +#define BSFACTOR_QMA7 0xf00000 +#define BSFACTOR_QMA8 0xf000000 +#define BSFACTOR_QMA9 0xf0000000 +#define BCSI_SCHEME 0x100000 #define BNOISE_LVL_TOP_SET 0x3 -#define BCHSMOOTH 0x4 -#define BCHSMOOTH_CFG1 0x38 -#define BCHSMOOTH_CFG2 0x1c0 -#define BCHSMOOTH_CFG3 0xe00 -#define BCHSMOOTH_CFG4 0x7000 -#define BMRCMODE 0x800000 -#define BTHEVMCFG 0x7000000 - -#define BLOOP_FIT_TYPE 0x1 -#define BUPD_CFO 0x40 -#define BUPD_CFO_OFFDATA 0x80 -#define BADV_UPD_CFO 0x100 -#define BADV_TIME_CTRL 0x800 -#define BUPD_CLKO 0x1000 -#define BFC 0x6000 -#define BTRACKING_MODE 0x8000 -#define BPHCMP_ENABLE 0x10000 -#define BUPD_CLKO_LTF 0x20000 -#define BCOM_CH_CFO 0x40000 -#define BCSI_ESTI_MODE 0x80000 -#define BADV_UPD_EQZ 0x100000 -#define BUCHCFG 0x7000000 -#define BUPDEQZ 0x8000000 +#define BCHSMOOTH 0x4 +#define BCHSMOOTH_CFG1 0x38 +#define BCHSMOOTH_CFG2 0x1c0 +#define BCHSMOOTH_CFG3 0xe00 +#define BCHSMOOTH_CFG4 0x7000 +#define BMRCMODE 0x800000 +#define BTHEVMCFG 0x7000000 + +#define BLOOP_FIT_TYPE 0x1 +#define BUPD_CFO 0x40 +#define BUPD_CFO_OFFDATA 0x80 +#define BADV_UPD_CFO 0x100 +#define BADV_TIME_CTRL 0x800 +#define BUPD_CLKO 0x1000 +#define BFC 0x6000 +#define BTRACKING_MODE 0x8000 +#define BPHCMP_ENABLE 0x10000 +#define BUPD_CLKO_LTF 0x20000 +#define BCOM_CH_CFO 0x40000 +#define BCSI_ESTI_MODE 0x80000 +#define BADV_UPD_EQZ 0x100000 +#define BUCHCFG 0x7000000 +#define BUPDEQZ 0x8000000 #define BRX_PESUDO_NOISE_ON 0x20000000 -#define BRX_PESUDO_NOISE_A 0xff -#define BRX_PESUDO_NOISE_B 0xff00 -#define BRX_PESUDO_NOISE_C 0xff0000 -#define BRX_PESUDO_NOISE_D 0xff000000 +#define BRX_PESUDO_NOISE_A 0xff +#define BRX_PESUDO_NOISE_B 0xff00 +#define BRX_PESUDO_NOISE_C 0xff0000 +#define BRX_PESUDO_NOISE_D 0xff000000 #define BRX_PESUDO_NOISESTATE_A 0xffff #define BRX_PESUDO_NOISESTATE_B 0xffff0000 #define BRX_PESUDO_NOISESTATE_C 0xffff #define BRX_PESUDO_NOISESTATE_D 0xffff0000 -#define BZEBRA1_HSSIENABLE 0x8 -#define BZEBRA1_TRXCONTROL 0xc00 -#define BZEBRA1_TRXGAINSETTING 0x07f -#define BZEBRA1_RXCOUNTER 0xc00 -#define BZEBRA1_TXCHANGEPUMP 0x38 -#define BZEBRA1_RXCHANGEPUMP 0x7 -#define BZEBRA1_CHANNEL_NUM 0xf80 -#define BZEBRA1_TXLPFBW 0x400 -#define BZEBRA1_RXLPFBW 0x600 +#define BZEBRA1_HSSIENABLE 0x8 +#define BZEBRA1_TRXCONTROL 0xc00 +#define BZEBRA1_TRXGAINSETTING 0x07f +#define BZEBRA1_RXCOUNTER 0xc00 +#define BZEBRA1_TXCHANGEPUMP 0x38 +#define BZEBRA1_RXCHANGEPUMP 0x7 +#define BZEBRA1_CHANNEL_NUM 0xf80 +#define BZEBRA1_TXLPFBW 0x400 +#define BZEBRA1_RXLPFBW 0x600 #define BRTL8256REG_MODE_CTRL1 0x100 #define BRTL8256REG_MODE_CTRL0 0x40 #define BRTL8256REG_TXLPFBW 0x18 #define BRTL8256REG_RXLPFBW 0x600 -#define BRTL8258_TXLPFBW 0xc -#define BRTL8258_RXLPFBW 0xc00 -#define BRTL8258_RSSILPFBW 0xc0 - -#define BBYTE0 0x1 -#define BBYTE1 0x2 -#define BBYTE2 0x4 -#define BBYTE3 0x8 -#define BWORD0 0x3 -#define BWORD1 0xc -#define BWORD 0xf - -#define BENABLE 0x1 -#define BDISABLE 0x0 - -#define LEFT_ANTENNA 0x0 -#define RIGHT_ANTENNA 0x1 - -#define TCHECK_TXSTATUS 500 -#define TUPDATE_RXCOUNTER 100 +#define BRTL8258_TXLPFBW 0xc +#define BRTL8258_RXLPFBW 0xc00 +#define BRTL8258_RSSILPFBW 0xc0 + +#define BBYTE0 0x1 +#define BBYTE1 0x2 +#define BBYTE2 0x4 +#define BBYTE3 0x8 +#define BWORD0 0x3 +#define BWORD1 0xc +#define BWORD 0xf + +#define MASKBYTE0 0xff +#define MASKBYTE1 0xff00 +#define MASKBYTE2 0xff0000 +#define MASKBYTE3 0xff000000 +#define MASKHWORD 0xffff0000 +#define MASKLWORD 0x0000ffff +#define MASKDWORD 0xffffffff +#define MASK12BITS 0xfff +#define MASKH4BITS 0xf0000000 +#define MASKOFDM_D 0xffc00000 +#define MASKCCK 0x3f3f3f3f + +#define MASK4BITS 0x0f +#define MASK20BITS 0xfffff +#define RFREG_OFFSET_MASK 0xfffff + +#define BENABLE 0x1 +#define BDISABLE 0x0 + +#define LEFT_ANTENNA 0x0 +#define RIGHT_ANTENNA 0x1 + +#define TCHECK_TXSTATUS 500 +#define TUPDATE_RXCOUNTER 100 #define REG_UN_used_register 0x01bf diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/rf.c b/drivers/net/wireless/rtlwifi/rtl8723be/rf.c index 486294930a7b210cbd198a6fe72d0b9de8c3e16e..5ed4492d3c806abca911f95d8d589280853aef96 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/rf.c +++ b/drivers/net/wireless/rtlwifi/rtl8723be/rf.c @@ -51,7 +51,7 @@ void rtl8723be_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth) rtlphy->rfreg_chnlval[0]); break; default: - RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "unknown bandwidth: %#X\n", bandwidth); break; } @@ -93,18 +93,20 @@ void rtl8723be_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, (ppowerlevel[idx1] << 16) | (ppowerlevel[idx1] << 24); } + if (rtlefuse->eeprom_regulatory == 0) { tmpval = - (rtlphy->mcs_offset[0][6]) + - (rtlphy->mcs_offset[0][7] << 8); + (rtlphy->mcs_txpwrlevel_origoffset[0][6]) + + (rtlphy->mcs_txpwrlevel_origoffset[0][7] << 8); tx_agc[RF90_PATH_A] += tmpval; - tmpval = (rtlphy->mcs_offset[0][14]) + - (rtlphy->mcs_offset[0][15] << + tmpval = (rtlphy->mcs_txpwrlevel_origoffset[0][14]) + + (rtlphy->mcs_txpwrlevel_origoffset[0][15] << 24); tx_agc[RF90_PATH_B] += tmpval; } } + for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) { ptr = (u8 *)(&(tx_agc[idx1])); for (idx2 = 0; idx2 < 4; idx2++) { @@ -124,30 +126,32 @@ void rtl8723be_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, tmpval = tx_agc[RF90_PATH_A] & 0xff; rtl_set_bbreg(hw, RTXAGC_A_CCK1_MCS32, MASKBYTE1, tmpval); - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n", tmpval, + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + "CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n", tmpval, RTXAGC_A_CCK1_MCS32); tmpval = tx_agc[RF90_PATH_A] >> 8; + /*tmpval = tmpval & 0xff00ffff;*/ + rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval); - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "CCK PWR 2~11M (rf-A) = 0x%x (reg 0x%x)\n", tmpval, + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + "CCK PWR 2~11M (rf-A) = 0x%x (reg 0x%x)\n", tmpval, RTXAGC_B_CCK11_A_CCK2_11); tmpval = tx_agc[RF90_PATH_B] >> 24; rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, MASKBYTE0, tmpval); - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n", tmpval, + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + "CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n", tmpval, RTXAGC_B_CCK11_A_CCK2_11); tmpval = tx_agc[RF90_PATH_B] & 0x00ffffff; rtl_set_bbreg(hw, RTXAGC_B_CCK1_55_MCS32, 0xffffff00, tmpval); - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "CCK PWR 1~5.5M (rf-B) = 0x%x (reg 0x%x)\n", tmpval, + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + "CCK PWR 1~5.5M (rf-B) = 0x%x (reg 0x%x)\n", tmpval, RTXAGC_B_CCK1_55_MCS32); } @@ -169,8 +173,8 @@ static void rtl8723be_phy_get_power_base(struct ieee80211_hw *hw, powerbase0 = (powerbase0 << 24) | (powerbase0 << 16) | (powerbase0 << 8) | powerbase0; *(ofdmbase + i) = powerbase0; - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - " [OFDM power base index rf(%c) = 0x%x]\n", + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + " [OFDM power base index rf(%c) = 0x%x]\n", ((i == 0) ? 'A' : 'B'), *(ofdmbase + i)); } @@ -179,27 +183,30 @@ static void rtl8723be_phy_get_power_base(struct ieee80211_hw *hw, powerlevel[i] = ppowerlevel_bw20[i]; else powerlevel[i] = ppowerlevel_bw40[i]; + powerbase1 = powerlevel[i]; powerbase1 = (powerbase1 << 24) | (powerbase1 << 16) | (powerbase1 << 8) | powerbase1; *(mcsbase + i) = powerbase1; - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, " [MCS power base index rf(%c) = 0x%x]\n", - ((i == 0) ? 'A' : 'B'), *(mcsbase + i)); + ((i == 0) ? 'A' : 'B'), *(mcsbase + i)); } } -static void txpwr_by_regulatory(struct ieee80211_hw *hw, u8 channel, u8 index, - u32 *powerbase0, u32 *powerbase1, - u32 *p_outwriteval) +static void _rtl8723be_get_txpower_writeval_by_regulatory( + struct ieee80211_hw *hw, + u8 channel, u8 index, + u32 *powerbase0, + u32 *powerbase1, + u32 *p_outwriteval) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); - u8 i, chnlgroup = 0, pwr_diff_limit[4]; - u8 pwr_diff = 0, customer_pwr_diff; + u8 i, chnlgroup = 0, pwr_diff_limit[4], pwr_diff = 0, customer_pwr_diff; u32 writeval, customer_limit, rf; for (rf = 0; rf < 2; rf++) { @@ -208,13 +215,13 @@ static void txpwr_by_regulatory(struct ieee80211_hw *hw, u8 channel, u8 index, chnlgroup = 0; writeval = - rtlphy->mcs_offset[chnlgroup][index + (rf ? 8 : 0)] + rtlphy->mcs_txpwrlevel_origoffset[chnlgroup][index + + (rf ? 8 : 0)] + ((index < 2) ? powerbase0[rf] : powerbase1[rf]); - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "RTK better performance, " - "writeval(%c) = 0x%x\n", - ((rf == 0) ? 'A' : 'B'), writeval); + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + "RTK better performance, writeval(%c) = 0x%x\n", + ((rf == 0) ? 'A' : 'B'), writeval); break; case 1: if (rtlphy->pwrgroup_cnt == 1) { @@ -233,43 +240,41 @@ static void txpwr_by_regulatory(struct ieee80211_hw *hw, u8 channel, u8 index, else if (channel == 14) chnlgroup = 5; } - writeval = rtlphy->mcs_offset[chnlgroup] + + writeval = + rtlphy->mcs_txpwrlevel_origoffset[chnlgroup] [index + (rf ? 8 : 0)] + ((index < 2) ? powerbase0[rf] : powerbase1[rf]); - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "Realtek regulatory, 20MHz, " - "writeval(%c) = 0x%x\n", - ((rf == 0) ? 'A' : 'B'), writeval); + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + "Realtek regulatory, 20MHz, writeval(%c) = 0x%x\n", + ((rf == 0) ? 'A' : 'B'), writeval); break; case 2: writeval = ((index < 2) ? powerbase0[rf] : powerbase1[rf]); - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "Better regulatory, " - "writeval(%c) = 0x%x\n", - ((rf == 0) ? 'A' : 'B'), writeval); + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + "Better regulatory, writeval(%c) = 0x%x\n", + ((rf == 0) ? 'A' : 'B'), writeval); break; case 3: chnlgroup = 0; if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) { - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "customer's limit, 40MHz " - "rf(%c) = 0x%x\n", - ((rf == 0) ? 'A' : 'B'), - rtlefuse->pwrgroup_ht40[rf] - [channel-1]); + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + "customer's limit, 40MHz rf(%c) = 0x%x\n", + ((rf == 0) ? 'A' : 'B'), + rtlefuse->pwrgroup_ht40 + [rf][channel - 1]); } else { - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "customer's limit, 20MHz " - "rf(%c) = 0x%x\n", - ((rf == 0) ? 'A' : 'B'), - rtlefuse->pwrgroup_ht20[rf] - [channel-1]); + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + "customer's limit, 20MHz rf(%c) = 0x%x\n", + ((rf == 0) ? 'A' : 'B'), + rtlefuse->pwrgroup_ht20 + [rf][channel - 1]); } if (index < 2) @@ -294,9 +299,9 @@ static void txpwr_by_regulatory(struct ieee80211_hw *hw, u8 channel, u8 index, for (i = 0; i < 4; i++) { pwr_diff_limit[i] = - (u8)((rtlphy->mcs_offset - [chnlgroup][index + (rf ? 8 : 0)] & - (0x7f << (i * 8))) >> (i * 8)); + (u8)((rtlphy->mcs_txpwrlevel_origoffset + [chnlgroup][index + (rf ? 8 : 0)] & + (0x7f << (i * 8))) >> (i * 8)); if (pwr_diff_limit[i] > pwr_diff) pwr_diff_limit[i] = pwr_diff; @@ -307,29 +312,28 @@ static void txpwr_by_regulatory(struct ieee80211_hw *hw, u8 channel, u8 index, (pwr_diff_limit[1] << 8) | (pwr_diff_limit[0]); - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "Customer's limit rf(%c) = 0x%x\n", - ((rf == 0) ? 'A' : 'B'), customer_limit); + ((rf == 0) ? 'A' : 'B'), customer_limit); writeval = customer_limit + ((index < 2) ? powerbase0[rf] : powerbase1[rf]); - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "Customer, writeval rf(%c)= 0x%x\n", + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + "Customer, writeval rf(%c)= 0x%x\n", ((rf == 0) ? 'A' : 'B'), writeval); break; default: chnlgroup = 0; writeval = - rtlphy->mcs_offset[chnlgroup] + rtlphy->mcs_txpwrlevel_origoffset[chnlgroup] [index + (rf ? 8 : 0)] + ((index < 2) ? powerbase0[rf] : powerbase1[rf]); - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "RTK better performance, writeval " - "rf(%c) = 0x%x\n", - ((rf == 0) ? 'A' : 'B'), writeval); + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + "RTK better performance, writeval rf(%c) = 0x%x\n", + ((rf == 0) ? 'A' : 'B'), writeval); break; } @@ -343,7 +347,7 @@ static void txpwr_by_regulatory(struct ieee80211_hw *hw, u8 channel, u8 index, } static void _rtl8723be_write_ofdm_power_reg(struct ieee80211_hw *hw, - u8 index, u32 *value) + u8 index, u32 *pvalue) { struct rtl_priv *rtlpriv = rtl_priv(hw); u16 regoffset_a[6] = { @@ -361,9 +365,9 @@ static void _rtl8723be_write_ofdm_power_reg(struct ieee80211_hw *hw, u16 regoffset; for (rf = 0; rf < 2; rf++) { - writeval = value[rf]; + writeval = pvalue[rf]; for (i = 0; i < 4; i++) { - pwr_val[i] = (u8) ((writeval & (0x7f << + pwr_val[i] = (u8)((writeval & (0x7f << (i * 8))) >> (i * 8)); if (pwr_val[i] > RF6052_MAX_TX_PWR) @@ -378,8 +382,8 @@ static void _rtl8723be_write_ofdm_power_reg(struct ieee80211_hw *hw, regoffset = regoffset_b[index]; rtl_set_bbreg(hw, regoffset, MASKDWORD, writeval); - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "Set 0x%x = %08x\n", regoffset, writeval); + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + "Set 0x%x = %08x\n", regoffset, writeval); } } @@ -400,8 +404,11 @@ void rtl8723be_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, rtl8723be_dm_txpower_track_adjust(hw, 1, &direction, &pwrtrac_value); for (index = 0; index < 6; index++) { - txpwr_by_regulatory(hw, channel, index, &powerbase0[0], - &powerbase1[0], &writeval[0]); + _rtl8723be_get_txpower_writeval_by_regulatory(hw, + channel, index, + &powerbase0[0], + &powerbase1[0], + &writeval[0]); if (direction == 1) { writeval[0] += pwrtrac_value; writeval[1] += pwrtrac_value; @@ -424,16 +431,17 @@ bool rtl8723be_phy_rf6052_config(struct ieee80211_hw *hw) rtlphy->num_total_rfpath = 2; return _rtl8723be_phy_rf6052_config_parafile(hw); + } static bool _rtl8723be_phy_rf6052_config_parafile(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); - struct bb_reg_def *pphyreg; u32 u4_regvalue = 0; u8 rfpath; bool rtstatus = true; + struct bb_reg_def *pphyreg; for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) { pphyreg = &rtlphy->phyreg_def[rfpath]; diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c index 532913c6622a3ed10ed6a97334a03272a13379cb..223eb42992bd52511cd967ae341f80a2e968bcbc 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c @@ -31,6 +31,7 @@ #include "phy.h" #include "../rtl8723com/phy_common.h" #include "dm.h" +#include "../rtl8723com/dm_common.h" #include "hw.h" #include "fw.h" #include "../rtl8723com/fw_common.h" @@ -101,6 +102,8 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->dm.thermalvalue = 0; rtlpci->transmit_config = CFENDFORM | BIT(15) | BIT(24) | BIT(25); + rtlpriv->phy.lck_inprogress = false; + mac->ht_enable = true; /* compatible 5G band 88ce just 2.4G band & smsp */ @@ -137,12 +140,19 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw) rtlpci->irq_mask[1] = (u32)(IMR_RXFOVW | 0); + rtlpci->sys_irq_mask = (u32)(HSIMR_PDN_INT_EN | + HSIMR_RON_INT_EN | + 0); + /* for debug level */ rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug; /* for LPS & IPS */ rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps; + rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; + if (rtlpriv->cfg->mod_params->disable_watchdog) + pr_info("watchdog disabled\n"); rtlpriv->psc.reg_fwctrl_lps = 3; rtlpriv->psc.reg_max_lps_awakeintvl = 5; /* for ASPM, you can close aspm through @@ -157,6 +167,11 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw) else if (rtlpriv->psc.reg_fwctrl_lps == 3) rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE; + /*low power: Disable 32k */ + rtlpriv->psc.low_power_enable = false; + + rtlpriv->rtlhal.earlymode_enable = false; + /* for firmware buf */ rtlpriv->rtlhal.pfirmware = vzalloc(0x8000); if (!rtlpriv->rtlhal.pfirmware) { @@ -182,8 +197,6 @@ void rtl8723be_deinit_sw_vars(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - if (rtlpriv->cfg->ops->get_btc_status()) - rtlpriv->btcoexist.btc_ops->btc_halt_notify(); if (rtlpriv->rtlhal.pfirmware) { vfree(rtlpriv->rtlhal.pfirmware); rtlpriv->rtlhal.pfirmware = NULL; @@ -196,7 +209,7 @@ bool rtl8723be_get_btc_status(void) return true; } -static bool is_fw_header(struct rtl92c_firmware_header *hdr) +static bool is_fw_header(struct rtl8723e_firmware_header *hdr) { return (hdr->signature & 0xfff0) == 0x5300; } @@ -245,6 +258,7 @@ static struct rtl_hal_ops rtl8723be_hal_ops = { .set_rfreg = rtl8723be_phy_set_rf_reg, .fill_h2c_cmd = rtl8723be_fill_h2c_cmd, .get_btc_status = rtl8723be_get_btc_status, + .rx_command_packet = rtl8723be_rx_command_packet, .is_fw_header = is_fw_header, }; @@ -253,8 +267,6 @@ static struct rtl_mod_params rtl8723be_mod_params = { .inactiveps = true, .swctrl_lps = false, .fwctrl_lps = true, - .msi_support = false, - .debug = DBG_EMERG, }; static struct rtl_hal_cfg rtl8723be_hal_cfg = { @@ -272,6 +284,9 @@ static struct rtl_hal_cfg rtl8723be_hal_cfg = { .maps[MAC_RCR_ACRC32] = ACRC32, .maps[MAC_RCR_ACF] = ACF, .maps[MAC_RCR_AAP] = AAP, + .maps[MAC_HIMR] = REG_HIMR, + .maps[MAC_HIMRE] = REG_HIMRE, + .maps[MAC_HSISR] = REG_HSISR, .maps[EFUSE_ACCESS] = REG_EFUSE_ACCESS, @@ -305,6 +320,7 @@ static struct rtl_hal_cfg rtl8723be_hal_cfg = { .maps[RTL_IMR_BCNDMAINT3] = IMR_BCNDMAINT3, .maps[RTL_IMR_BCNDMAINT2] = IMR_BCNDMAINT2, .maps[RTL_IMR_BCNDMAINT1] = IMR_BCNDMAINT1, +/* .maps[RTL_IMR_BCNDOK8] = IMR_BCNDOK8, */ /*need check*/ .maps[RTL_IMR_BCNDOK7] = IMR_BCNDOK7, .maps[RTL_IMR_BCNDOK6] = IMR_BCNDOK6, .maps[RTL_IMR_BCNDOK5] = IMR_BCNDOK5, @@ -312,6 +328,8 @@ static struct rtl_hal_cfg rtl8723be_hal_cfg = { .maps[RTL_IMR_BCNDOK3] = IMR_BCNDOK3, .maps[RTL_IMR_BCNDOK2] = IMR_BCNDOK2, .maps[RTL_IMR_BCNDOK1] = IMR_BCNDOK1, +/* .maps[RTL_IMR_TIMEOUT2] = IMR_TIMEOUT2,*/ +/* .maps[RTL_IMR_TIMEOUT1] = IMR_TIMEOUT1,*/ .maps[RTL_IMR_TXFOVW] = IMR_TXFOVW, .maps[RTL_IMR_PSTIMEOUT] = IMR_PSTIMEOUT, @@ -329,6 +347,7 @@ static struct rtl_hal_cfg rtl8723be_hal_cfg = { .maps[RTL_IMR_VIDOK] = IMR_VIDOK, .maps[RTL_IMR_VODOK] = IMR_VODOK, .maps[RTL_IMR_ROK] = IMR_ROK, + .maps[RTL_IMR_HSISR_IND] = IMR_HSISR_IND_ON_INT, .maps[RTL_IBSS_INT_MASKS] = (IMR_BCNDMAINT0 | IMR_TBDOK | IMR_TBDER), .maps[RTL_RC_CCK_RATE1M] = DESC92C_RATE1M, @@ -348,12 +367,12 @@ static struct rtl_hal_cfg rtl8723be_hal_cfg = { .maps[RTL_RC_HT_RATEMCS15] = DESC92C_RATEMCS15, }; -static const struct pci_device_id rtl8723be_pci_id[] = { - {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xb723, rtl8723be_hal_cfg)}, +static struct pci_device_id rtl8723be_pci_ids[] = { + {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xB723, rtl8723be_hal_cfg)}, {}, }; -MODULE_DEVICE_TABLE(pci, rtl8723be_pci_id); +MODULE_DEVICE_TABLE(pci, rtl8723be_pci_ids); MODULE_AUTHOR("PageHe "); MODULE_AUTHOR("Realtek WlanFAE "); @@ -366,21 +385,22 @@ module_param_named(debug, rtl8723be_mod_params.debug, int, 0444); module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444); module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444); module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444); -module_param_named(msi, rtl8723be_mod_params.msi_support, bool, 0444); +module_param_named(disable_watchdog, rtl8723be_mod_params.disable_watchdog, + bool, 0444); MODULE_PARM_DESC(swenc, "using hardware crypto (default 0 [hardware])\n"); MODULE_PARM_DESC(ips, "using no link power save (default 1 is open)\n"); MODULE_PARM_DESC(fwlps, "using linked fw control power save (default 1 is open)\n"); MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n"); MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); +MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n"); static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); static struct pci_driver rtl8723be_driver = { .name = KBUILD_MODNAME, - .id_table = rtl8723be_pci_id, + .id_table = rtl8723be_pci_ids, .probe = rtl_pci_probe, .remove = rtl_pci_disconnect, - .driver.pm = &rtlwifi_pm_ops, }; diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/table.c b/drivers/net/wireless/rtlwifi/rtl8723be/table.c index 4b283cde042e7376a79c3c83350bef513ed82949..a180761e88107e89cf6d4f5c69f915c1d30c6b61 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/table.c +++ b/drivers/net/wireless/rtlwifi/rtl8723be/table.c @@ -27,200 +27,201 @@ #include "table.h" u32 RTL8723BEPHY_REG_1TARRAY[] = { - 0x800, 0x80040000, - 0x804, 0x00000003, - 0x808, 0x0000FC00, - 0x80C, 0x0000000A, - 0x810, 0x10001331, - 0x814, 0x020C3D10, - 0x818, 0x02200385, - 0x81C, 0x00000000, - 0x820, 0x01000100, - 0x824, 0x00390204, - 0x828, 0x00000000, - 0x82C, 0x00000000, - 0x830, 0x00000000, - 0x834, 0x00000000, - 0x838, 0x00000000, - 0x83C, 0x00000000, - 0x840, 0x00010000, - 0x844, 0x00000000, - 0x848, 0x00000000, - 0x84C, 0x00000000, - 0x850, 0x00000000, - 0x854, 0x00000000, - 0x858, 0x569A11A9, - 0x85C, 0x01000014, - 0x860, 0x66F60110, - 0x864, 0x061F0649, - 0x868, 0x00000000, - 0x86C, 0x27272700, - 0x870, 0x07000760, - 0x874, 0x25004000, - 0x878, 0x00000808, - 0x87C, 0x00000000, - 0x880, 0xB0000C1C, - 0x884, 0x00000001, - 0x888, 0x00000000, - 0x88C, 0xCCC000C0, - 0x890, 0x00000800, - 0x894, 0xFFFFFFFE, - 0x898, 0x40302010, - 0x89C, 0x00706050, - 0x900, 0x00000000, - 0x904, 0x00000023, - 0x908, 0x00000000, - 0x90C, 0x81121111, - 0x910, 0x00000002, - 0x914, 0x00000201, - 0x948, 0x00000000, - 0xA00, 0x00D047C8, - 0xA04, 0x80FF000C, - 0xA08, 0x8C838300, - 0xA0C, 0x2E7F120F, - 0xA10, 0x9500BB78, - 0xA14, 0x1114D028, - 0xA18, 0x00881117, - 0xA1C, 0x89140F00, - 0xA20, 0x1A1B0000, - 0xA24, 0x090E1317, - 0xA28, 0x00000204, - 0xA2C, 0x00D30000, - 0xA70, 0x101FBF00, - 0xA74, 0x00000007, - 0xA78, 0x00000900, - 0xA7C, 0x225B0606, - 0xA80, 0x21806490, - 0xB2C, 0x00000000, - 0xC00, 0x48071D40, - 0xC04, 0x03A05611, - 0xC08, 0x000000E4, - 0xC0C, 0x6C6C6C6C, - 0xC10, 0x08800000, - 0xC14, 0x40000100, - 0xC18, 0x08800000, - 0xC1C, 0x40000100, - 0xC20, 0x00000000, - 0xC24, 0x00000000, - 0xC28, 0x00000000, - 0xC2C, 0x00000000, - 0xC30, 0x69E9AC44, - 0xC34, 0x469652AF, - 0xC38, 0x49795994, - 0xC3C, 0x0A97971C, - 0xC40, 0x1F7C403F, - 0xC44, 0x000100B7, - 0xC48, 0xEC020107, - 0xC4C, 0x007F037F, - 0xC50, 0x69553420, - 0xC54, 0x43BC0094, - 0xC58, 0x00023169, - 0xC5C, 0x00250492, - 0xC60, 0x00000000, - 0xC64, 0x7112848B, - 0xC68, 0x47C00BFF, - 0xC6C, 0x00000036, - 0xC70, 0x2C7F000D, - 0xC74, 0x020610DB, - 0xC78, 0x0000001F, - 0xC7C, 0x00B91612, - 0xC80, 0x390000E4, - 0xC84, 0x20F60000, - 0xC88, 0x40000100, - 0xC8C, 0x20200000, - 0xC90, 0x00020E1A, - 0xC94, 0x00000000, - 0xC98, 0x00020E1A, - 0xC9C, 0x00007F7F, - 0xCA0, 0x00000000, - 0xCA4, 0x000300A0, - 0xCA8, 0x00000000, - 0xCAC, 0x00000000, - 0xCB0, 0x00000000, - 0xCB4, 0x00000000, - 0xCB8, 0x00000000, - 0xCBC, 0x28000000, - 0xCC0, 0x00000000, - 0xCC4, 0x00000000, - 0xCC8, 0x00000000, - 0xCCC, 0x00000000, - 0xCD0, 0x00000000, - 0xCD4, 0x00000000, - 0xCD8, 0x64B22427, - 0xCDC, 0x00766932, - 0xCE0, 0x00222222, - 0xCE4, 0x00000000, - 0xCE8, 0x37644302, - 0xCEC, 0x2F97D40C, - 0xD00, 0x00000740, - 0xD04, 0x40020401, - 0xD08, 0x0000907F, - 0xD0C, 0x20010201, - 0xD10, 0xA0633333, - 0xD14, 0x3333BC53, - 0xD18, 0x7A8F5B6F, - 0xD2C, 0xCC979975, - 0xD30, 0x00000000, - 0xD34, 0x80608000, - 0xD38, 0x00000000, - 0xD3C, 0x00127353, - 0xD40, 0x00000000, - 0xD44, 0x00000000, - 0xD48, 0x00000000, - 0xD4C, 0x00000000, - 0xD50, 0x6437140A, - 0xD54, 0x00000000, - 0xD58, 0x00000282, - 0xD5C, 0x30032064, - 0xD60, 0x4653DE68, - 0xD64, 0x04518A3C, - 0xD68, 0x00002101, - 0xD6C, 0x2A201C16, - 0xD70, 0x1812362E, - 0xD74, 0x322C2220, - 0xD78, 0x000E3C24, - 0xE00, 0x2D2D2D2D, - 0xE04, 0x2D2D2D2D, - 0xE08, 0x0390272D, - 0xE10, 0x2D2D2D2D, - 0xE14, 0x2D2D2D2D, - 0xE18, 0x2D2D2D2D, - 0xE1C, 0x2D2D2D2D, - 0xE28, 0x00000000, - 0xE30, 0x1000DC1F, - 0xE34, 0x10008C1F, - 0xE38, 0x02140102, - 0xE3C, 0x681604C2, - 0xE40, 0x01007C00, - 0xE44, 0x01004800, - 0xE48, 0xFB000000, - 0xE4C, 0x000028D1, - 0xE50, 0x1000DC1F, - 0xE54, 0x10008C1F, - 0xE58, 0x02140102, - 0xE5C, 0x28160D05, - 0xE60, 0x00000008, - 0xE68, 0x001B2556, - 0xE6C, 0x00C00096, - 0xE70, 0x00C00096, - 0xE74, 0x01000056, - 0xE78, 0x01000014, - 0xE7C, 0x01000056, - 0xE80, 0x01000014, - 0xE84, 0x00C00096, - 0xE88, 0x01000056, - 0xE8C, 0x00C00096, - 0xED0, 0x00C00096, - 0xED4, 0x00C00096, - 0xED8, 0x00C00096, - 0xEDC, 0x000000D6, - 0xEE0, 0x000000D6, - 0xEEC, 0x01C00016, - 0xF14, 0x00000003, - 0xF4C, 0x00000000, - 0xF00, 0x00000300, - 0x820, 0x01000100, - 0x800, 0x83040000, + 0x800, 0x80040000, + 0x804, 0x00000003, + 0x808, 0x0000FC00, + 0x80C, 0x0000000A, + 0x810, 0x10001331, + 0x814, 0x020C3D10, + 0x818, 0x02200385, + 0x81C, 0x00000000, + 0x820, 0x01000100, + 0x824, 0x00390204, + 0x828, 0x00000000, + 0x82C, 0x00000000, + 0x830, 0x00000000, + 0x834, 0x00000000, + 0x838, 0x00000000, + 0x83C, 0x00000000, + 0x840, 0x00010000, + 0x844, 0x00000000, + 0x848, 0x00000000, + 0x84C, 0x00000000, + 0x850, 0x00000000, + 0x854, 0x00000000, + 0x858, 0x569A11A9, + 0x85C, 0x01000014, + 0x860, 0x66F60110, + 0x864, 0x061F0649, + 0x868, 0x00000000, + 0x86C, 0x27272700, + 0x870, 0x07000760, + 0x874, 0x25004000, + 0x878, 0x00000808, + 0x87C, 0x00000000, + 0x880, 0xB0000C1C, + 0x884, 0x00000001, + 0x888, 0x00000000, + 0x88C, 0xCCC000C0, + 0x890, 0x00000800, + 0x894, 0xFFFFFFFE, + 0x898, 0x40302010, + 0x89C, 0x00706050, + 0x900, 0x00000000, + 0x904, 0x00000023, + 0x908, 0x00000000, + 0x90C, 0x81121111, + 0x910, 0x00000002, + 0x914, 0x00000201, + 0x948, 0x00000280, + 0xA00, 0x00D047C8, + 0xA04, 0x80FF000C, + 0xA08, 0x8C838300, + 0xA0C, 0x2E7F120F, + 0xA10, 0x9500BB78, + 0xA14, 0x1114D028, + 0xA18, 0x00881117, + 0xA1C, 0x89140F00, + 0xA20, 0x1A1B0000, + 0xA24, 0x090E1317, + 0xA28, 0x00000204, + 0xA2C, 0x00D30000, + 0xA70, 0x101FBF00, + 0xA74, 0x00000007, + 0xA78, 0x00000900, + 0xA7C, 0x225B0606, + 0xA80, 0x21806490, + 0xB2C, 0x00000000, + 0xC00, 0x48071D40, + 0xC04, 0x03A05611, + 0xC08, 0x000000E4, + 0xC0C, 0x6C6C6C6C, + 0xC10, 0x08800000, + 0xC14, 0x40000100, + 0xC18, 0x08800000, + 0xC1C, 0x40000100, + 0xC20, 0x00000000, + 0xC24, 0x00000000, + 0xC28, 0x00000000, + 0xC2C, 0x00000000, + 0xC30, 0x69E9AC44, + 0xC34, 0x469652AF, + 0xC38, 0x49795994, + 0xC3C, 0x0A97971C, + 0xC40, 0x1F7C403F, + 0xC44, 0x000100B7, + 0xC48, 0xEC020107, + 0xC4C, 0x007F037F, + 0xC50, 0x69553420, + 0xC54, 0x43BC0094, + 0xC58, 0x00023169, + 0xC5C, 0x00250492, + 0xC60, 0x00000000, + 0xC64, 0x7112848B, + 0xC68, 0x47C00BFF, + 0xC6C, 0x00000036, + 0xC70, 0x2C7F000D, + 0xC74, 0x020610DB, + 0xC78, 0x0000001F, + 0xC7C, 0x00B91612, + 0xC80, 0x390000E4, + 0xC84, 0x20F60000, + 0xC88, 0x40000100, + 0xC8C, 0x20200000, + 0xC90, 0x00020E1A, + 0xC94, 0x00000000, + 0xC98, 0x00020E1A, + 0xC9C, 0x00007F7F, + 0xCA0, 0x00000000, + 0xCA4, 0x000300A0, + 0xCA8, 0x00000000, + 0xCAC, 0x00000000, + 0xCB0, 0x00000000, + 0xCB4, 0x00000000, + 0xCB8, 0x00000000, + 0xCBC, 0x28000000, + 0xCC0, 0x00000000, + 0xCC4, 0x00000000, + 0xCC8, 0x00000000, + 0xCCC, 0x00000000, + 0xCD0, 0x00000000, + 0xCD4, 0x00000000, + 0xCD8, 0x64B22427, + 0xCDC, 0x00766932, + 0xCE0, 0x00222222, + 0xCE4, 0x00000000, + 0xCE8, 0x37644302, + 0xCEC, 0x2F97D40C, + 0xD00, 0x00000740, + 0xD04, 0x40020401, + 0xD08, 0x0000907F, + 0xD0C, 0x20010201, + 0xD10, 0xA0633333, + 0xD14, 0x3333BC53, + 0xD18, 0x7A8F5B6F, + 0xD2C, 0xCC979975, + 0xD30, 0x00000000, + 0xD34, 0x80608000, + 0xD38, 0x00000000, + 0xD3C, 0x00127353, + 0xD40, 0x00000000, + 0xD44, 0x00000000, + 0xD48, 0x00000000, + 0xD4C, 0x00000000, + 0xD50, 0x6437140A, + 0xD54, 0x00000000, + 0xD58, 0x00000282, + 0xD5C, 0x30032064, + 0xD60, 0x4653DE68, + 0xD64, 0x04518A3C, + 0xD68, 0x00002101, + 0xD6C, 0x2A201C16, + 0xD70, 0x1812362E, + 0xD74, 0x322C2220, + 0xD78, 0x000E3C24, + 0xE00, 0x2D2D2D2D, + 0xE04, 0x2D2D2D2D, + 0xE08, 0x0390272D, + 0xE10, 0x2D2D2D2D, + 0xE14, 0x2D2D2D2D, + 0xE18, 0x2D2D2D2D, + 0xE1C, 0x2D2D2D2D, + 0xE28, 0x00000000, + 0xE30, 0x1000DC1F, + 0xE34, 0x10008C1F, + 0xE38, 0x02140102, + 0xE3C, 0x681604C2, + 0xE40, 0x01007C00, + 0xE44, 0x01004800, + 0xE48, 0xFB000000, + 0xE4C, 0x000028D1, + 0xE50, 0x1000DC1F, + 0xE54, 0x10008C1F, + 0xE58, 0x02140102, + 0xE5C, 0x28160D05, + 0xE60, 0x00000008, + 0xE68, 0x001B2556, + 0xE6C, 0x00C00096, + 0xE70, 0x00C00096, + 0xE74, 0x01000056, + 0xE78, 0x01000014, + 0xE7C, 0x01000056, + 0xE80, 0x01000014, + 0xE84, 0x00C00096, + 0xE88, 0x01000056, + 0xE8C, 0x00C00096, + 0xED0, 0x00C00096, + 0xED4, 0x00C00096, + 0xED8, 0x00C00096, + 0xEDC, 0x000000D6, + 0xEE0, 0x000000D6, + 0xEEC, 0x01C00016, + 0xF14, 0x00000003, + 0xF4C, 0x00000000, + 0xF00, 0x00000300, + 0x820, 0x01000100, + 0x800, 0x83040000, + }; u32 RTL8723BEPHY_REG_ARRAY_PG[] = { @@ -233,340 +234,344 @@ u32 RTL8723BEPHY_REG_ARRAY_PG[] = { }; u32 RTL8723BE_RADIOA_1TARRAY[] = { - 0x000, 0x00010000, - 0x0B0, 0x000DFFE0, - 0x0FE, 0x00000000, - 0x0FE, 0x00000000, - 0x0FE, 0x00000000, - 0x0B1, 0x00000018, - 0x0FE, 0x00000000, - 0x0FE, 0x00000000, - 0x0FE, 0x00000000, - 0x0B2, 0x00084C00, - 0x0B5, 0x0000D2CC, - 0x0B6, 0x000925AA, - 0x0B7, 0x00000010, - 0x0B8, 0x0000907F, - 0x05C, 0x00000002, - 0x07C, 0x00000002, - 0x07E, 0x00000005, - 0x08B, 0x0006FC00, - 0x0B0, 0x000FF9F0, - 0x01C, 0x000739D2, - 0x01E, 0x00000000, - 0x0DF, 0x00000780, - 0x050, 0x00067435, - 0x051, 0x0006B04E, - 0x052, 0x000007D2, - 0x053, 0x00000000, - 0x054, 0x00050400, - 0x055, 0x0004026E, - 0x0DD, 0x0000004C, - 0x070, 0x00067435, - 0x071, 0x0006B04E, - 0x072, 0x000007D2, - 0x073, 0x00000000, - 0x074, 0x00050400, - 0x075, 0x0004026E, - 0x0EF, 0x00000100, - 0x034, 0x0000ADD7, - 0x035, 0x00005C00, - 0x034, 0x00009DD4, - 0x035, 0x00005000, - 0x034, 0x00008DD1, - 0x035, 0x00004400, - 0x034, 0x00007DCE, - 0x035, 0x00003800, - 0x034, 0x00006CD1, - 0x035, 0x00004400, - 0x034, 0x00005CCE, - 0x035, 0x00003800, - 0x034, 0x000048CE, - 0x035, 0x00004400, - 0x034, 0x000034CE, - 0x035, 0x00003800, - 0x034, 0x00002451, - 0x035, 0x00004400, - 0x034, 0x0000144E, - 0x035, 0x00003800, - 0x034, 0x00000051, - 0x035, 0x00004400, - 0x0EF, 0x00000000, - 0x0EF, 0x00000100, - 0x0ED, 0x00000010, - 0x044, 0x0000ADD7, - 0x044, 0x00009DD4, - 0x044, 0x00008DD1, - 0x044, 0x00007DCE, - 0x044, 0x00006CC1, - 0x044, 0x00005CCE, - 0x044, 0x000044D1, - 0x044, 0x000034CE, - 0x044, 0x00002451, - 0x044, 0x0000144E, - 0x044, 0x00000051, - 0x0EF, 0x00000000, - 0x0ED, 0x00000000, - 0x0EF, 0x00002000, - 0x03B, 0x000380EF, - 0x03B, 0x000302FE, - 0x03B, 0x00028CE6, - 0x03B, 0x000200BC, - 0x03B, 0x000188A5, - 0x03B, 0x00010FBC, - 0x03B, 0x00008F71, - 0x03B, 0x00000900, - 0x0EF, 0x00000000, - 0x0ED, 0x00000001, - 0x040, 0x000380EF, - 0x040, 0x000302FE, - 0x040, 0x00028CE6, - 0x040, 0x000200BC, - 0x040, 0x000188A5, - 0x040, 0x00010FBC, - 0x040, 0x00008F71, - 0x040, 0x00000900, - 0x0ED, 0x00000000, - 0x082, 0x00080000, - 0x083, 0x00008000, - 0x084, 0x00048D80, - 0x085, 0x00068000, - 0x0A2, 0x00080000, - 0x0A3, 0x00008000, - 0x0A4, 0x00048D80, - 0x0A5, 0x00068000, - 0x000, 0x00033D80, + 0x000, 0x00010000, + 0x0B0, 0x000DFFE0, + 0x0FE, 0x00000000, + 0x0FE, 0x00000000, + 0x0FE, 0x00000000, + 0x0B1, 0x00000018, + 0x0FE, 0x00000000, + 0x0FE, 0x00000000, + 0x0FE, 0x00000000, + 0x0B2, 0x00084C00, + 0x0B5, 0x0000D2CC, + 0x0B6, 0x000925AA, + 0x0B7, 0x00000010, + 0x0B8, 0x0000907F, + 0x05C, 0x00000002, + 0x07C, 0x00000002, + 0x07E, 0x00000005, + 0x08B, 0x0006FC00, + 0x0B0, 0x000FF9F0, + 0x01C, 0x000739D2, + 0x01E, 0x00000000, + 0x0DF, 0x00000780, + 0x050, 0x00067435, + 0x051, 0x0006B04E, + 0x052, 0x000007D2, + 0x053, 0x00000000, + 0x054, 0x00050400, + 0x055, 0x0004026E, + 0x0DD, 0x0000004C, + 0x070, 0x00067435, + 0x071, 0x0006B04E, + 0x072, 0x000007D2, + 0x073, 0x00000000, + 0x074, 0x00050400, + 0x075, 0x0004026E, + 0x0EF, 0x00000100, + 0x034, 0x0000ADD7, + 0x035, 0x00005C00, + 0x034, 0x00009DD4, + 0x035, 0x00005000, + 0x034, 0x00008DD1, + 0x035, 0x00004400, + 0x034, 0x00007DCE, + 0x035, 0x00003800, + 0x034, 0x00006CD1, + 0x035, 0x00004400, + 0x034, 0x00005CCE, + 0x035, 0x00003800, + 0x034, 0x000048CE, + 0x035, 0x00004400, + 0x034, 0x000034CE, + 0x035, 0x00003800, + 0x034, 0x00002451, + 0x035, 0x00004400, + 0x034, 0x0000144E, + 0x035, 0x00003800, + 0x034, 0x00000051, + 0x035, 0x00004400, + 0x0EF, 0x00000000, + 0x0EF, 0x00000100, + 0x0ED, 0x00000010, + 0x044, 0x0000ADD7, + 0x044, 0x00009DD4, + 0x044, 0x00008DD1, + 0x044, 0x00007DCE, + 0x044, 0x00006CC1, + 0x044, 0x00005CCE, + 0x044, 0x000044D1, + 0x044, 0x000034CE, + 0x044, 0x00002451, + 0x044, 0x0000144E, + 0x044, 0x00000051, + 0x0EF, 0x00000000, + 0x0ED, 0x00000000, + 0x0EF, 0x00002000, + 0x03B, 0x000380EF, + 0x03B, 0x000302FE, + 0x03B, 0x00028CE6, + 0x03B, 0x000200BC, + 0x03B, 0x000188A5, + 0x03B, 0x00010FBC, + 0x03B, 0x00008F71, + 0x03B, 0x00000900, + 0x0EF, 0x00000000, + 0x0ED, 0x00000001, + 0x040, 0x000380EF, + 0x040, 0x000302FE, + 0x040, 0x00028CE6, + 0x040, 0x000200BC, + 0x040, 0x000188A5, + 0x040, 0x00010FBC, + 0x040, 0x00008F71, + 0x040, 0x00000900, + 0x0ED, 0x00000000, + 0x082, 0x00080000, + 0x083, 0x00008000, + 0x084, 0x00048D80, + 0x085, 0x00068000, + 0x0A2, 0x00080000, + 0x0A3, 0x00008000, + 0x0A4, 0x00048D80, + 0x0A5, 0x00068000, + 0x000, 0x00033D80, + }; u32 RTL8723BEMAC_1T_ARRAY[] = { - 0x02F, 0x00000030, - 0x035, 0x00000000, - 0x428, 0x0000000A, - 0x429, 0x00000010, - 0x430, 0x00000000, - 0x431, 0x00000000, - 0x432, 0x00000000, - 0x433, 0x00000001, - 0x434, 0x00000004, - 0x435, 0x00000005, - 0x436, 0x00000007, - 0x437, 0x00000008, - 0x43C, 0x00000004, - 0x43D, 0x00000005, - 0x43E, 0x00000007, - 0x43F, 0x00000008, - 0x440, 0x0000005D, - 0x441, 0x00000001, - 0x442, 0x00000000, - 0x444, 0x00000010, - 0x445, 0x00000000, - 0x446, 0x00000000, - 0x447, 0x00000000, - 0x448, 0x00000000, - 0x449, 0x000000F0, - 0x44A, 0x0000000F, - 0x44B, 0x0000003E, - 0x44C, 0x00000010, - 0x44D, 0x00000000, - 0x44E, 0x00000000, - 0x44F, 0x00000000, - 0x450, 0x00000000, - 0x451, 0x000000F0, - 0x452, 0x0000000F, - 0x453, 0x00000000, - 0x456, 0x0000005E, - 0x460, 0x00000066, - 0x461, 0x00000066, - 0x4C8, 0x000000FF, - 0x4C9, 0x00000008, - 0x4CC, 0x000000FF, - 0x4CD, 0x000000FF, - 0x4CE, 0x00000001, - 0x500, 0x00000026, - 0x501, 0x000000A2, - 0x502, 0x0000002F, - 0x503, 0x00000000, - 0x504, 0x00000028, - 0x505, 0x000000A3, - 0x506, 0x0000005E, - 0x507, 0x00000000, - 0x508, 0x0000002B, - 0x509, 0x000000A4, - 0x50A, 0x0000005E, - 0x50B, 0x00000000, - 0x50C, 0x0000004F, - 0x50D, 0x000000A4, - 0x50E, 0x00000000, - 0x50F, 0x00000000, - 0x512, 0x0000001C, - 0x514, 0x0000000A, - 0x516, 0x0000000A, - 0x525, 0x0000004F, - 0x550, 0x00000010, - 0x551, 0x00000010, - 0x559, 0x00000002, - 0x55C, 0x00000050, - 0x55D, 0x000000FF, - 0x605, 0x00000030, - 0x608, 0x0000000E, - 0x609, 0x0000002A, - 0x620, 0x000000FF, - 0x621, 0x000000FF, - 0x622, 0x000000FF, - 0x623, 0x000000FF, - 0x624, 0x000000FF, - 0x625, 0x000000FF, - 0x626, 0x000000FF, - 0x627, 0x000000FF, - 0x638, 0x00000050, - 0x63C, 0x0000000A, - 0x63D, 0x0000000A, - 0x63E, 0x0000000E, - 0x63F, 0x0000000E, - 0x640, 0x00000040, - 0x642, 0x00000040, - 0x643, 0x00000000, - 0x652, 0x000000C8, - 0x66E, 0x00000005, - 0x700, 0x00000021, - 0x701, 0x00000043, - 0x702, 0x00000065, - 0x703, 0x00000087, - 0x708, 0x00000021, - 0x709, 0x00000043, - 0x70A, 0x00000065, - 0x70B, 0x00000087, + 0x02F, 0x00000030, + 0x035, 0x00000000, + 0x067, 0x00000020, + 0x428, 0x0000000A, + 0x429, 0x00000010, + 0x430, 0x00000000, + 0x431, 0x00000000, + 0x432, 0x00000000, + 0x433, 0x00000001, + 0x434, 0x00000004, + 0x435, 0x00000005, + 0x436, 0x00000007, + 0x437, 0x00000008, + 0x43C, 0x00000004, + 0x43D, 0x00000005, + 0x43E, 0x00000007, + 0x43F, 0x00000008, + 0x440, 0x0000005D, + 0x441, 0x00000001, + 0x442, 0x00000000, + 0x444, 0x00000010, + 0x445, 0x00000000, + 0x446, 0x00000000, + 0x447, 0x00000000, + 0x448, 0x00000000, + 0x449, 0x000000F0, + 0x44A, 0x0000000F, + 0x44B, 0x0000003E, + 0x44C, 0x00000010, + 0x44D, 0x00000000, + 0x44E, 0x00000000, + 0x44F, 0x00000000, + 0x450, 0x00000000, + 0x451, 0x000000F0, + 0x452, 0x0000000F, + 0x453, 0x00000000, + 0x456, 0x0000005E, + 0x460, 0x00000066, + 0x461, 0x00000066, + 0x4C8, 0x000000FF, + 0x4C9, 0x00000008, + 0x4CC, 0x000000FF, + 0x4CD, 0x000000FF, + 0x4CE, 0x00000001, + 0x500, 0x00000026, + 0x501, 0x000000A2, + 0x502, 0x0000002F, + 0x503, 0x00000000, + 0x504, 0x00000028, + 0x505, 0x000000A3, + 0x506, 0x0000005E, + 0x507, 0x00000000, + 0x508, 0x0000002B, + 0x509, 0x000000A4, + 0x50A, 0x0000005E, + 0x50B, 0x00000000, + 0x50C, 0x0000004F, + 0x50D, 0x000000A4, + 0x50E, 0x00000000, + 0x50F, 0x00000000, + 0x512, 0x0000001C, + 0x514, 0x0000000A, + 0x516, 0x0000000A, + 0x525, 0x0000004F, + 0x550, 0x00000010, + 0x551, 0x00000010, + 0x559, 0x00000002, + 0x55C, 0x00000050, + 0x55D, 0x000000FF, + 0x605, 0x00000030, + 0x608, 0x0000000E, + 0x609, 0x0000002A, + 0x620, 0x000000FF, + 0x621, 0x000000FF, + 0x622, 0x000000FF, + 0x623, 0x000000FF, + 0x624, 0x000000FF, + 0x625, 0x000000FF, + 0x626, 0x000000FF, + 0x627, 0x000000FF, + 0x638, 0x00000050, + 0x63C, 0x0000000A, + 0x63D, 0x0000000A, + 0x63E, 0x0000000E, + 0x63F, 0x0000000E, + 0x640, 0x00000040, + 0x642, 0x00000040, + 0x643, 0x00000000, + 0x652, 0x000000C8, + 0x66E, 0x00000005, + 0x700, 0x00000021, + 0x701, 0x00000043, + 0x702, 0x00000065, + 0x703, 0x00000087, + 0x708, 0x00000021, + 0x709, 0x00000043, + 0x70A, 0x00000065, + 0x70B, 0x00000087, + }; u32 RTL8723BEAGCTAB_1TARRAY[] = { - 0xC78, 0xFD000001, - 0xC78, 0xFC010001, - 0xC78, 0xFB020001, - 0xC78, 0xFA030001, - 0xC78, 0xF9040001, - 0xC78, 0xF8050001, - 0xC78, 0xF7060001, - 0xC78, 0xF6070001, - 0xC78, 0xF5080001, - 0xC78, 0xF4090001, - 0xC78, 0xF30A0001, - 0xC78, 0xF20B0001, - 0xC78, 0xF10C0001, - 0xC78, 0xF00D0001, - 0xC78, 0xEF0E0001, - 0xC78, 0xEE0F0001, - 0xC78, 0xED100001, - 0xC78, 0xEC110001, - 0xC78, 0xEB120001, - 0xC78, 0xEA130001, - 0xC78, 0xE9140001, - 0xC78, 0xE8150001, - 0xC78, 0xE7160001, - 0xC78, 0xAA170001, - 0xC78, 0xA9180001, - 0xC78, 0xA8190001, - 0xC78, 0xA71A0001, - 0xC78, 0xA61B0001, - 0xC78, 0xA51C0001, - 0xC78, 0xA41D0001, - 0xC78, 0xA31E0001, - 0xC78, 0x671F0001, - 0xC78, 0x66200001, - 0xC78, 0x65210001, - 0xC78, 0x64220001, - 0xC78, 0x63230001, - 0xC78, 0x62240001, - 0xC78, 0x61250001, - 0xC78, 0x47260001, - 0xC78, 0x46270001, - 0xC78, 0x45280001, - 0xC78, 0x44290001, - 0xC78, 0x432A0001, - 0xC78, 0x422B0001, - 0xC78, 0x292C0001, - 0xC78, 0x282D0001, - 0xC78, 0x272E0001, - 0xC78, 0x262F0001, - 0xC78, 0x25300001, - 0xC78, 0x24310001, - 0xC78, 0x09320001, - 0xC78, 0x08330001, - 0xC78, 0x07340001, - 0xC78, 0x06350001, - 0xC78, 0x05360001, - 0xC78, 0x04370001, - 0xC78, 0x03380001, - 0xC78, 0x02390001, - 0xC78, 0x013A0001, - 0xC78, 0x003B0001, - 0xC78, 0x003C0001, - 0xC78, 0x003D0001, - 0xC78, 0x003E0001, - 0xC78, 0x003F0001, - 0xC78, 0xFC400001, - 0xC78, 0xFB410001, - 0xC78, 0xFA420001, - 0xC78, 0xF9430001, - 0xC78, 0xF8440001, - 0xC78, 0xF7450001, - 0xC78, 0xF6460001, - 0xC78, 0xF5470001, - 0xC78, 0xF4480001, - 0xC78, 0xF3490001, - 0xC78, 0xF24A0001, - 0xC78, 0xF14B0001, - 0xC78, 0xF04C0001, - 0xC78, 0xEF4D0001, - 0xC78, 0xEE4E0001, - 0xC78, 0xED4F0001, - 0xC78, 0xEC500001, - 0xC78, 0xEB510001, - 0xC78, 0xEA520001, - 0xC78, 0xE9530001, - 0xC78, 0xE8540001, - 0xC78, 0xE7550001, - 0xC78, 0xE6560001, - 0xC78, 0xE5570001, - 0xC78, 0xAA580001, - 0xC78, 0xA9590001, - 0xC78, 0xA85A0001, - 0xC78, 0xA75B0001, - 0xC78, 0xA65C0001, - 0xC78, 0xA55D0001, - 0xC78, 0xA45E0001, - 0xC78, 0x675F0001, - 0xC78, 0x66600001, - 0xC78, 0x65610001, - 0xC78, 0x64620001, - 0xC78, 0x63630001, - 0xC78, 0x62640001, - 0xC78, 0x61650001, - 0xC78, 0x47660001, - 0xC78, 0x46670001, - 0xC78, 0x45680001, - 0xC78, 0x44690001, - 0xC78, 0x436A0001, - 0xC78, 0x426B0001, - 0xC78, 0x296C0001, - 0xC78, 0x286D0001, - 0xC78, 0x276E0001, - 0xC78, 0x266F0001, - 0xC78, 0x25700001, - 0xC78, 0x24710001, - 0xC78, 0x09720001, - 0xC78, 0x08730001, - 0xC78, 0x07740001, - 0xC78, 0x06750001, - 0xC78, 0x05760001, - 0xC78, 0x04770001, - 0xC78, 0x03780001, - 0xC78, 0x02790001, - 0xC78, 0x017A0001, - 0xC78, 0x007B0001, - 0xC78, 0x007C0001, - 0xC78, 0x007D0001, - 0xC78, 0x007E0001, - 0xC78, 0x007F0001, - 0xC50, 0x69553422, - 0xC50, 0x69553420, + 0xC78, 0xFD000001, + 0xC78, 0xFC010001, + 0xC78, 0xFB020001, + 0xC78, 0xFA030001, + 0xC78, 0xF9040001, + 0xC78, 0xF8050001, + 0xC78, 0xF7060001, + 0xC78, 0xF6070001, + 0xC78, 0xF5080001, + 0xC78, 0xF4090001, + 0xC78, 0xF30A0001, + 0xC78, 0xF20B0001, + 0xC78, 0xF10C0001, + 0xC78, 0xF00D0001, + 0xC78, 0xEF0E0001, + 0xC78, 0xEE0F0001, + 0xC78, 0xED100001, + 0xC78, 0xEC110001, + 0xC78, 0xEB120001, + 0xC78, 0xEA130001, + 0xC78, 0xE9140001, + 0xC78, 0xE8150001, + 0xC78, 0xE7160001, + 0xC78, 0xAA170001, + 0xC78, 0xA9180001, + 0xC78, 0xA8190001, + 0xC78, 0xA71A0001, + 0xC78, 0xA61B0001, + 0xC78, 0xA51C0001, + 0xC78, 0xA41D0001, + 0xC78, 0xA31E0001, + 0xC78, 0x671F0001, + 0xC78, 0x66200001, + 0xC78, 0x65210001, + 0xC78, 0x64220001, + 0xC78, 0x63230001, + 0xC78, 0x62240001, + 0xC78, 0x61250001, + 0xC78, 0x47260001, + 0xC78, 0x46270001, + 0xC78, 0x45280001, + 0xC78, 0x44290001, + 0xC78, 0x432A0001, + 0xC78, 0x422B0001, + 0xC78, 0x292C0001, + 0xC78, 0x282D0001, + 0xC78, 0x272E0001, + 0xC78, 0x262F0001, + 0xC78, 0x25300001, + 0xC78, 0x24310001, + 0xC78, 0x09320001, + 0xC78, 0x08330001, + 0xC78, 0x07340001, + 0xC78, 0x06350001, + 0xC78, 0x05360001, + 0xC78, 0x04370001, + 0xC78, 0x03380001, + 0xC78, 0x02390001, + 0xC78, 0x013A0001, + 0xC78, 0x003B0001, + 0xC78, 0x003C0001, + 0xC78, 0x003D0001, + 0xC78, 0x003E0001, + 0xC78, 0x003F0001, + 0xC78, 0xFC400001, + 0xC78, 0xFB410001, + 0xC78, 0xFA420001, + 0xC78, 0xF9430001, + 0xC78, 0xF8440001, + 0xC78, 0xF7450001, + 0xC78, 0xF6460001, + 0xC78, 0xF5470001, + 0xC78, 0xF4480001, + 0xC78, 0xF3490001, + 0xC78, 0xF24A0001, + 0xC78, 0xF14B0001, + 0xC78, 0xF04C0001, + 0xC78, 0xEF4D0001, + 0xC78, 0xEE4E0001, + 0xC78, 0xED4F0001, + 0xC78, 0xEC500001, + 0xC78, 0xEB510001, + 0xC78, 0xEA520001, + 0xC78, 0xE9530001, + 0xC78, 0xE8540001, + 0xC78, 0xE7550001, + 0xC78, 0xE6560001, + 0xC78, 0xE5570001, + 0xC78, 0xAA580001, + 0xC78, 0xA9590001, + 0xC78, 0xA85A0001, + 0xC78, 0xA75B0001, + 0xC78, 0xA65C0001, + 0xC78, 0xA55D0001, + 0xC78, 0xA45E0001, + 0xC78, 0x675F0001, + 0xC78, 0x66600001, + 0xC78, 0x65610001, + 0xC78, 0x64620001, + 0xC78, 0x63630001, + 0xC78, 0x62640001, + 0xC78, 0x61650001, + 0xC78, 0x47660001, + 0xC78, 0x46670001, + 0xC78, 0x45680001, + 0xC78, 0x44690001, + 0xC78, 0x436A0001, + 0xC78, 0x426B0001, + 0xC78, 0x296C0001, + 0xC78, 0x286D0001, + 0xC78, 0x276E0001, + 0xC78, 0x266F0001, + 0xC78, 0x25700001, + 0xC78, 0x24710001, + 0xC78, 0x09720001, + 0xC78, 0x08730001, + 0xC78, 0x07740001, + 0xC78, 0x06750001, + 0xC78, 0x05760001, + 0xC78, 0x04770001, + 0xC78, 0x03780001, + 0xC78, 0x02790001, + 0xC78, 0x017A0001, + 0xC78, 0x007B0001, + 0xC78, 0x007C0001, + 0xC78, 0x007D0001, + 0xC78, 0x007E0001, + 0xC78, 0x007F0001, + 0xC50, 0x69553422, + 0xC50, 0x69553420, + }; diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/table.h b/drivers/net/wireless/rtlwifi/rtl8723be/table.h index 932760a84827a30a3cecc274afe1c146e1f72d7a..dc17001632f719776dd825862ada7e2a1660867b 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/table.h +++ b/drivers/net/wireless/rtlwifi/rtl8723be/table.h @@ -35,7 +35,7 @@ extern u32 RTL8723BEPHY_REG_1TARRAY[]; extern u32 RTL8723BEPHY_REG_ARRAY_PG[]; #define RTL8723BE_RADIOA_1TARRAYLEN 206 extern u32 RTL8723BE_RADIOA_1TARRAY[]; -#define RTL8723BEMAC_1T_ARRAYLEN 194 +#define RTL8723BEMAC_1T_ARRAYLEN 196 extern u32 RTL8723BEMAC_1T_ARRAY[]; #define RTL8723BEAGCTAB_1TARRAYLEN 260 extern u32 RTL8723BEAGCTAB_1TARRAY[]; diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/rtlwifi/rtl8723be/trx.c index 969eaea5eddd8d23862b272edfc6edbf6d9ac5df..d6a1c70cb657ba692fa93c43b87315224429c071 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8723be/trx.c @@ -33,6 +33,7 @@ #include "trx.h" #include "led.h" #include "dm.h" +#include "fw.h" static u8 _rtl8723be_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue) { @@ -207,196 +208,150 @@ static int _rtl8723be_rate_mapping(struct ieee80211_hw *hw, static void _rtl8723be_query_rxphystatus(struct ieee80211_hw *hw, struct rtl_stats *pstatus, u8 *pdesc, struct rx_fwinfo_8723be *p_drvinfo, - bool packet_match_bssid, - bool packet_toself, + bool bpacket_match_bssid, + bool bpacket_toself, bool packet_beacon) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv); - struct phy_sts_cck_8723e_t *cck_buf; struct phy_status_rpt *p_phystrpt = (struct phy_status_rpt *)p_drvinfo; - struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); char rx_pwr_all = 0, rx_pwr[4]; - u8 rf_rx_num = 0, evm, pwdb_all; + u8 rf_rx_num = 0, evm, pwdb_all, pwdb_all_bt = 0; u8 i, max_spatial_stream; u32 rssi, total_rssi = 0; bool is_cck = pstatus->is_cck; u8 lan_idx, vga_idx; /* Record it for next packet processing */ - pstatus->packet_matchbssid = packet_match_bssid; - pstatus->packet_toself = packet_toself; + pstatus->packet_matchbssid = bpacket_match_bssid; + pstatus->packet_toself = bpacket_toself; pstatus->packet_beacon = packet_beacon; - pstatus->rx_mimo_sig_qual[0] = -1; - pstatus->rx_mimo_sig_qual[1] = -1; + pstatus->rx_mimo_signalquality[0] = -1; + pstatus->rx_mimo_signalquality[1] = -1; if (is_cck) { u8 cck_highpwr; u8 cck_agc_rpt; - /* CCK Driver info Structure is not the same as OFDM packet. */ - cck_buf = (struct phy_sts_cck_8723e_t *)p_drvinfo; - cck_agc_rpt = cck_buf->cck_agc_rpt; - /* (1)Hardware does not provide RSSI for CCK - * (2)PWDB, Average PWDB cacluated by + cck_agc_rpt = p_phystrpt->cck_agc_rpt_ofdm_cfosho_a; + + /* (1)Hardware does not provide RSSI for CCK */ + /* (2)PWDB, Average PWDB cacluated by * hardware (for rate adaptive) */ - if (ppsc->rfpwr_state == ERFON) - cck_highpwr = (u8) rtl_get_bbreg(hw, - RFPGA0_XA_HSSIPARAMETER2, - BIT(9)); - else - cck_highpwr = false; + cck_highpwr = (u8)rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, + BIT(9)); lan_idx = ((cck_agc_rpt & 0xE0) >> 5); vga_idx = (cck_agc_rpt & 0x1f); + switch (lan_idx) { - case 7: - if (vga_idx <= 27)/*VGA_idx = 27~2*/ - rx_pwr_all = -100 + 2 * (27 - vga_idx); - else - rx_pwr_all = -100; + /* 46 53 73 95 201301231630 */ + /* 46 53 77 99 201301241630 */ + case 6: + rx_pwr_all = -34 - (2 * vga_idx); break; - case 6:/*VGA_idx = 2~0*/ - rx_pwr_all = -48 + 2 * (2 - vga_idx); - break; - case 5:/*VGA_idx = 7~5*/ - rx_pwr_all = -42 + 2 * (7 - vga_idx); - break; - case 4:/*VGA_idx = 7~4*/ - rx_pwr_all = -36 + 2 * (7 - vga_idx); - break; - case 3:/*VGA_idx = 7~0*/ - rx_pwr_all = -24 + 2 * (7 - vga_idx); - break; - case 2: - if (cck_highpwr)/*VGA_idx = 5~0*/ - rx_pwr_all = -12 + 2 * (5 - vga_idx); - else - rx_pwr_all = -6 + 2 * (5 - vga_idx); + case 4: + rx_pwr_all = -14 - (2 * vga_idx); break; case 1: - rx_pwr_all = 8 - 2 * vga_idx; + rx_pwr_all = 6 - (2 * vga_idx); break; case 0: - rx_pwr_all = 14 - 2 * vga_idx; + rx_pwr_all = 16 - (2 * vga_idx); break; default: break; } - rx_pwr_all += 6; + pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all); - /* CCK gain is smaller than OFDM/MCS gain, */ - /* so we add gain diff by experiences, - * the val is 6 - */ - pwdb_all += 6; if (pwdb_all > 100) pwdb_all = 100; - /* modify the offset to make the same gain index with OFDM. */ - if (pwdb_all > 34 && pwdb_all <= 42) - pwdb_all -= 2; - else if (pwdb_all > 26 && pwdb_all <= 34) - pwdb_all -= 6; - else if (pwdb_all > 14 && pwdb_all <= 26) - pwdb_all -= 8; - else if (pwdb_all > 4 && pwdb_all <= 14) - pwdb_all -= 4; - if (!cck_highpwr) { - if (pwdb_all >= 80) - pwdb_all = ((pwdb_all - 80) << 1) + - ((pwdb_all - 80) >> 1) + 80; - else if ((pwdb_all <= 78) && (pwdb_all >= 20)) - pwdb_all += 3; - if (pwdb_all > 100) - pwdb_all = 100; - } pstatus->rx_pwdb_all = pwdb_all; + pstatus->bt_rx_rssi_percentage = pwdb_all; pstatus->recvsignalpower = rx_pwr_all; /* (3) Get Signal Quality (EVM) */ - if (packet_match_bssid) { - u8 sq; - + if (bpacket_match_bssid) { + u8 sq, sq_rpt; if (pstatus->rx_pwdb_all > 40) { sq = 100; } else { - sq = cck_buf->sq_rpt; - if (sq > 64) + sq_rpt = p_phystrpt->cck_sig_qual_ofdm_pwdb_all; + if (sq_rpt > 64) sq = 0; - else if (sq < 20) + else if (sq_rpt < 20) sq = 100; else - sq = ((64 - sq) * 100) / 44; + sq = ((64 - sq_rpt) * 100) / 44; } - pstatus->signalquality = sq; - pstatus->rx_mimo_sig_qual[0] = sq; - pstatus->rx_mimo_sig_qual[1] = -1; + pstatus->rx_mimo_signalquality[0] = sq; + pstatus->rx_mimo_signalquality[1] = -1; } } else { - rtlpriv->dm.rfpath_rxenable[0] = true; - rtlpriv->dm.rfpath_rxenable[1] = true; - /* (1)Get RSSI for HT rate */ for (i = RF90_PATH_A; i < RF6052_MAX_PATH; i++) { /* we will judge RF RX path now. */ if (rtlpriv->dm.rfpath_rxenable[i]) rf_rx_num++; - rx_pwr[i] = ((p_drvinfo->gain_trsw[i] & 0x3f)*2) - 110; + rx_pwr[i] = ((p_phystrpt->path_agc[i].gain & 0x3f) * 2) + - 110; + pstatus->rx_pwr[i] = rx_pwr[i]; /* Translate DBM to percentage. */ rssi = rtl_query_rxpwrpercentage(rx_pwr[i]); total_rssi += rssi; - /* Get Rx snr value in DB */ - rtlpriv->stats.rx_snr_db[i] = - (long)(p_drvinfo->rxsnr[i] / 2); - - /* Record Signal Strength for next packet */ - if (packet_match_bssid) - pstatus->rx_mimo_signalstrength[i] = (u8) rssi; + pstatus->rx_mimo_signalstrength[i] = (u8)rssi; } - /* (2)PWDB, Avg cacluated by hardware (for rate adaptive) */ - rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110; + /* (2)PWDB, Average PWDB cacluated by + * hardware (for rate adaptive) + */ + rx_pwr_all = ((p_phystrpt->cck_sig_qual_ofdm_pwdb_all >> 1) & + 0x7f) - 110; pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all); + pwdb_all_bt = pwdb_all; pstatus->rx_pwdb_all = pwdb_all; + pstatus->bt_rx_rssi_percentage = pwdb_all_bt; pstatus->rxpower = rx_pwr_all; pstatus->recvsignalpower = rx_pwr_all; /* (3)EVM of HT rate */ - if (pstatus->is_ht && pstatus->rate >= DESC92C_RATEMCS8 && + if (pstatus->rate >= DESC92C_RATEMCS8 && pstatus->rate <= DESC92C_RATEMCS15) max_spatial_stream = 2; else max_spatial_stream = 1; for (i = 0; i < max_spatial_stream; i++) { - evm = rtl_evm_db_to_percentage(p_drvinfo->rxevm[i]); + evm = rtl_evm_db_to_percentage( + p_phystrpt->stream_rxevm[i]); - if (packet_match_bssid) { + if (bpacket_match_bssid) { /* Fill value in RFD, Get the first * spatial stream only */ if (i == 0) pstatus->signalquality = - (u8) (evm & 0xff); - pstatus->rx_mimo_sig_qual[i] = - (u8) (evm & 0xff); + (u8)(evm & 0xff); + pstatus->rx_mimo_signalquality[i] = + (u8)(evm & 0xff); } } - if (packet_match_bssid) { + + if (bpacket_match_bssid) { for (i = RF90_PATH_A; i <= RF90_PATH_B; i++) rtl_priv(hw)->dm.cfo_tail[i] = - (char)p_phystrpt->path_cfotail[i]; + (int)p_phystrpt->path_cfotail[i]; - rtl_priv(hw)->dm.packet_count++; if (rtl_priv(hw)->dm.packet_count == 0xffffffff) rtl_priv(hw)->dm.packet_count = 0; + else + rtl_priv(hw)->dm.packet_count++; } } @@ -409,10 +364,6 @@ static void _rtl8723be_query_rxphystatus(struct ieee80211_hw *hw, else if (rf_rx_num != 0) pstatus->signalstrength = (u8)(rtl_signal_scale_mapping(hw, total_rssi /= rf_rx_num)); - /*HW antenna diversity*/ - rtldm->fat_table.antsel_rx_keep_0 = p_phystrpt->ant_sel; - rtldm->fat_table.antsel_rx_keep_1 = p_phystrpt->ant_sel_b; - rtldm->fat_table.antsel_rx_keep_2 = p_phystrpt->antsel_rx_keep_2; } static void _rtl8723be_translate_rx_signal_stuff(struct ieee80211_hw *hw, @@ -440,14 +391,14 @@ static void _rtl8723be_translate_rx_signal_stuff(struct ieee80211_hw *hw, memcpy(pstatus->psaddr, psaddr, ETH_ALEN); packet_matchbssid = ((IEEE80211_FTYPE_CTL != type) && - (!ether_addr_equal(mac->bssid, (fc & IEEE80211_FCTL_TODS) ? - hdr->addr1 : (fc & IEEE80211_FCTL_FROMDS) ? - hdr->addr2 : hdr->addr3)) && - (!pstatus->hwerror) && - (!pstatus->crc) && (!pstatus->icv)); + (ether_addr_equal(mac->bssid, (fc & IEEE80211_FCTL_TODS) ? + hdr->addr1 : (fc & IEEE80211_FCTL_FROMDS) ? + hdr->addr2 : hdr->addr3)) && + (!pstatus->hwerror) && + (!pstatus->crc) && (!pstatus->icv)); packet_toself = packet_matchbssid && - (!ether_addr_equal(praddr, rtlefuse->dev_addr)); + (ether_addr_equal(praddr, rtlefuse->dev_addr)); /* YP: packet_beacon is not initialized, * this assignment is neccesary, @@ -531,30 +482,33 @@ bool rtl8723be_rx_query_desc(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr; u32 phystatus = GET_RX_DESC_PHYST(pdesc); - status->packet_report_type = (u8)GET_RX_STATUS_DESC_RPT_SEL(pdesc); - if (status->packet_report_type == TX_REPORT2) - status->length = (u16) GET_RX_RPT2_DESC_PKT_LEN(pdesc); - else - status->length = (u16) GET_RX_DESC_PKT_LEN(pdesc); - status->rx_drvinfo_size = (u8) GET_RX_DESC_DRV_INFO_SIZE(pdesc) * + + status->length = (u16)GET_RX_DESC_PKT_LEN(pdesc); + status->rx_drvinfo_size = (u8)GET_RX_DESC_DRV_INFO_SIZE(pdesc) * RX_DRV_INFO_SIZE_UNIT; - status->rx_bufshift = (u8) (GET_RX_DESC_SHIFT(pdesc) & 0x03); + status->rx_bufshift = (u8)(GET_RX_DESC_SHIFT(pdesc) & 0x03); status->icv = (u16) GET_RX_DESC_ICV(pdesc); status->crc = (u16) GET_RX_DESC_CRC32(pdesc); status->hwerror = (status->crc | status->icv); status->decrypted = !GET_RX_DESC_SWDEC(pdesc); - status->rate = (u8) GET_RX_DESC_RXMCS(pdesc); - status->shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc); - status->isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1); - status->isfirst_ampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1); - if (status->packet_report_type == NORMAL_RX) - status->timestamp_low = GET_RX_DESC_TSFL(pdesc); - status->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc); + status->rate = (u8)GET_RX_DESC_RXMCS(pdesc); + status->shortpreamble = (u16)GET_RX_DESC_SPLCP(pdesc); + status->isampdu = (bool)(GET_RX_DESC_PAGGR(pdesc) == 1); + status->isfirst_ampdu = (bool)(GET_RX_DESC_PAGGR(pdesc) == 1); + status->timestamp_low = GET_RX_DESC_TSFL(pdesc); + status->rx_is40Mhzpacket = (bool)GET_RX_DESC_BW(pdesc); + status->bandwidth = (u8)GET_RX_DESC_BW(pdesc); + status->macid = GET_RX_DESC_MACID(pdesc); status->is_ht = (bool)GET_RX_DESC_RXHT(pdesc); - status->is_cck = RTL8723E_RX_HAL_IS_CCK_RATE(status->rate); + status->is_cck = RX_HAL_IS_CCK_RATE(status->rate); + + if (GET_RX_STATUS_DESC_RPT_SEL(pdesc)) + status->packet_report_type = C2H_PACKET; + else + status->packet_report_type = NORMAL_RX; + - status->macid = GET_RX_DESC_MACID(pdesc); if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc)) status->wake_match = BIT(2); else if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc)) @@ -565,12 +519,11 @@ bool rtl8723be_rx_query_desc(struct ieee80211_hw *hw, status->wake_match = 0; if (status->wake_match) RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD, - "GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n", - status->wake_match); + "GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n", + status->wake_match); rx_status->freq = hw->conf.chandef.chan->center_freq; rx_status->band = hw->conf.chandef.chan->band; - hdr = (struct ieee80211_hdr *)(skb->data + status->rx_drvinfo_size + status->rx_bufshift); @@ -594,24 +547,16 @@ bool rtl8723be_rx_query_desc(struct ieee80211_hw *hw, * to decrypt it */ if (status->decrypted) { - if (!hdr) { - WARN_ON_ONCE(true); - pr_err("decrypted is true but hdr NULL in skb %p\n", - rtl_get_hdr(skb)); - return false; - } - - if ((_ieee80211_is_robust_mgmt_frame(hdr)) && + if ((!_ieee80211_is_robust_mgmt_frame(hdr)) && (ieee80211_has_protected(hdr->frame_control))) - rx_status->flag &= ~RX_FLAG_DECRYPTED; - else rx_status->flag |= RX_FLAG_DECRYPTED; + else + rx_status->flag &= ~RX_FLAG_DECRYPTED; } /* rate_idx: index of data rate into band's * supported rates or MCS index if HT rates * are use (RX_FLAG_HT) - * Notice: this is diff with windows define */ rx_status->rate_idx = _rtl8723be_rate_mapping(hw, status->is_ht, status->rate); @@ -624,21 +569,19 @@ bool rtl8723be_rx_query_desc(struct ieee80211_hw *hw, _rtl8723be_translate_rx_signal_stuff(hw, skb, status, pdesc, p_drvinfo); } - - /*rx_status->qual = status->signal; */ rx_status->signal = status->recvsignalpower + 10; if (status->packet_report_type == TX_REPORT2) { status->macid_valid_entry[0] = - GET_RX_RPT2_DESC_MACID_VALID_1(pdesc); + GET_RX_RPT2_DESC_MACID_VALID_1(pdesc); status->macid_valid_entry[1] = - GET_RX_RPT2_DESC_MACID_VALID_2(pdesc); + GET_RX_RPT2_DESC_MACID_VALID_2(pdesc); } return true; } void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr, u8 *pdesc_tx, - u8 *pbd_desc_tx, struct ieee80211_tx_info *info, + u8 *txbd, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, struct sk_buff *skb, u8 hw_queue, struct rtl_tcb_desc *ptcb_desc) { @@ -646,16 +589,16 @@ void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw, struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtlpriv); - u8 *pdesc = pdesc_tx; + u8 *pdesc = (u8 *)pdesc_tx; u16 seq_number; __le16 fc = hdr->frame_control; unsigned int buf_len = 0; unsigned int skb_len = skb->len; u8 fw_qsel = _rtl8723be_map_hwqueue_to_fwqueue(skb, hw_queue); bool firstseg = ((hdr->seq_ctrl & - cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0); + cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0); bool lastseg = ((hdr->frame_control & - cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0); + cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0); dma_addr_t mapping; u8 bw_40 = 0; u8 short_gi = 0; @@ -732,11 +675,11 @@ void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw, (ptcb_desc->rts_use_shortpreamble ? 1 : 0) : (ptcb_desc->rts_use_shortgi ? 1 : 0))); - if (ptcb_desc->btx_enable_sw_calc_duration) + if (ptcb_desc->tx_enable_sw_calc_duration) SET_TX_DESC_NAV_USE_HDR(pdesc, 1); if (bw_40) { - if (ptcb_desc->packet_bw) { + if (ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_20_40) { SET_TX_DESC_DATA_BW(pdesc, 1); SET_TX_DESC_TX_SUB_CARRIER(pdesc, 3); } else { @@ -776,9 +719,12 @@ void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw, SET_TX_DESC_DATA_RATE_FB_LIMIT(pdesc, 0x1F); SET_TX_DESC_RTS_RATE_FB_LIMIT(pdesc, 0xF); SET_TX_DESC_DISABLE_FB(pdesc, ptcb_desc->disable_ratefallback ? - 1 : 0); + 1 : 0); SET_TX_DESC_USE_RATE(pdesc, ptcb_desc->use_driver_rate ? 1 : 0); + /* Set TxRate and RTSRate in TxDesc */ + /* This prevent Tx initial rate of new-coming packets */ + /* from being overwritten by retried packet rate.*/ if (ieee80211_is_data_qos(fc)) { if (mac->rdg_en) { RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, @@ -793,9 +739,14 @@ void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw, SET_TX_DESC_LAST_SEG(pdesc, (lastseg ? 1 : 0)); SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) buf_len); SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping); - SET_TX_DESC_RATE_ID(pdesc, ptcb_desc->ratr_index); - SET_TX_DESC_MACID(pdesc, ptcb_desc->mac_id); - + /* if (rtlpriv->dm.useramask) { */ + if (1) { + SET_TX_DESC_RATE_ID(pdesc, ptcb_desc->ratr_index); + SET_TX_DESC_MACID(pdesc, ptcb_desc->mac_id); + } else { + SET_TX_DESC_RATE_ID(pdesc, 0xC + ptcb_desc->ratr_index); + SET_TX_DESC_MACID(pdesc, ptcb_desc->mac_id); + } if (!ieee80211_is_data_qos(fc)) { SET_TX_DESC_HWSEQ_EN(pdesc, 1); SET_TX_DESC_HWSEQ_SEL(pdesc, 0); @@ -805,11 +756,12 @@ void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw, is_broadcast_ether_addr(ieee80211_get_DA(hdr))) { SET_TX_DESC_BMC(pdesc, 1); } + RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n"); } void rtl8723be_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, - bool b_firstseg, bool b_lastseg, + bool firstseg, bool lastseg, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -849,16 +801,19 @@ void rtl8723be_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, SET_TX_DESC_OWN(pdesc, 1); - SET_TX_DESC_PKT_SIZE(pdesc, (u16)(skb->len)); + SET_TX_DESC_PKT_SIZE((u8 *)pdesc, (u16)(skb->len)); SET_TX_DESC_FIRST_SEG(pdesc, 1); SET_TX_DESC_LAST_SEG(pdesc, 1); SET_TX_DESC_USE_RATE(pdesc, 1); + + RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, + "H2C Tx Cmd Content\n", pdesc, TX_DESC_SIZE); } -void rtl8723be_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, - u8 desc_name, u8 *val) +void rtl8723be_set_desc(struct ieee80211_hw *hw, u8 *pdesc, + bool istx, u8 desc_name, u8 *val) { if (istx) { switch (desc_name) { @@ -870,7 +825,7 @@ void rtl8723be_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, break; default: RT_ASSERT(false, "ERR txdesc :%d not process\n", - desc_name); + desc_name); break; } } else { @@ -889,7 +844,7 @@ void rtl8723be_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, break; default: RT_ASSERT(false, "ERR rxdesc :%d not process\n", - desc_name); + desc_name); break; } } @@ -909,7 +864,7 @@ u32 rtl8723be_get_desc(u8 *pdesc, bool istx, u8 desc_name) break; default: RT_ASSERT(false, "ERR txdesc :%d not process\n", - desc_name); + desc_name); break; } } else { @@ -920,6 +875,9 @@ u32 rtl8723be_get_desc(u8 *pdesc, bool istx, u8 desc_name) case HW_DESC_RXPKT_LEN: ret = GET_RX_DESC_PKT_LEN(pdesc); break; + case HW_DESC_RXBUFF_ADDR: + ret = GET_RX_DESC_BUFF_ADDR(pdesc); + break; default: RT_ASSERT(false, "ERR rxdesc :%d not process\n", desc_name); @@ -935,16 +893,15 @@ bool rtl8723be_is_tx_desc_closed(struct ieee80211_hw *hw, struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue]; u8 *entry = (u8 *)(&ring->desc[ring->idx]); - u8 own = (u8) rtl8723be_get_desc(entry, true, HW_DESC_OWN); + u8 own = (u8)rtl8723be_get_desc(entry, true, HW_DESC_OWN); /*beacon packet will only use the first - *descriptor by default, and the own may not + *descriptor defautly,and the own may not *be cleared by the hardware */ if (own) return false; - else - return true; + return true; } void rtl8723be_tx_polling(struct ieee80211_hw *hw, u8 hw_queue) @@ -957,3 +914,28 @@ void rtl8723be_tx_polling(struct ieee80211_hw *hw, u8 hw_queue) BIT(0) << (hw_queue)); } } + +u32 rtl8723be_rx_command_packet(struct ieee80211_hw *hw, + struct rtl_stats status, + struct sk_buff *skb) +{ + u32 result = 0; + struct rtl_priv *rtlpriv = rtl_priv(hw); + + switch (status.packet_report_type) { + case NORMAL_RX: + result = 0; + break; + case C2H_PACKET: + rtl8723be_c2h_packet_handler(hw, skb->data, + (u8)skb->len); + result = 1; + break; + default: + RT_TRACE(rtlpriv, COMP_RECV, DBG_TRACE, + "No this packet type!!\n"); + break; + } + + return result; +} diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/trx.h b/drivers/net/wireless/rtlwifi/rtl8723be/trx.h index 102f33dcc988ef6e028864e2c6b1a413402383aa..45949ac4854c6579e4eafc86c20140a519fd94db 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/trx.h +++ b/drivers/net/wireless/rtlwifi/rtl8723be/trx.h @@ -415,21 +415,25 @@ struct phy_status_rpt { } __packed; struct rx_fwinfo_8723be { - u8 gain_trsw[4]; + u8 gain_trsw[2]; + u16 chl_num:10; + u16 sub_chnl:4; + u16 r_rfmod:2; u8 pwdb_all; u8 cfosho[4]; u8 cfotail[4]; char rxevm[2]; - char rxsnr[4]; + char rxsnr[2]; + u8 pcts_msk_rpt[2]; u8 pdsnr[2]; u8 csi_current[2]; - u8 csi_target[2]; + u8 rx_gain_c; + u8 rx_gain_d; u8 sigevm; - u8 max_ex_pwr; - u8 ex_intf_flag:1; - u8 sgi_en:1; - u8 rxsc:2; - u8 reserve:4; + u8 resvd_0; + u8 antidx_anta:3; + u8 antidx_antb:3; + u8 resvd_1:2; } __packed; struct tx_desc_8723be { @@ -597,21 +601,25 @@ struct rx_desc_8723be { } __packed; void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw, - struct ieee80211_hdr *hdr, u8 *pdesc, - u8 *pbd_desc_tx, struct ieee80211_tx_info *info, + struct ieee80211_hdr *hdr, + u8 *pdesc_tx, u8 *txbd, + struct ieee80211_tx_info *info, struct ieee80211_sta *sta, struct sk_buff *skb, u8 hw_queue, struct rtl_tcb_desc *ptcb_desc); bool rtl8723be_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *status, struct ieee80211_rx_status *rx_status, u8 *pdesc, struct sk_buff *skb); -void rtl8723be_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, - u8 desc_name, u8 *val); +void rtl8723be_set_desc(struct ieee80211_hw *hw, u8 *pdesc, + bool istx, u8 desc_name, u8 *val); u32 rtl8723be_get_desc(u8 *pdesc, bool istx, u8 desc_name); bool rtl8723be_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index); void rtl8723be_tx_polling(struct ieee80211_hw *hw, u8 hw_queue); void rtl8723be_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, - bool b_firstseg, bool b_lastseg, + bool firstseg, bool lastseg, struct sk_buff *skb); +u32 rtl8723be_rx_command_packet(struct ieee80211_hw *hw, + struct rtl_stats status, + struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8723com/dm_common.c index 4e254b72bf4592bea36f16320f9b0aefc5e640ca..064340641913cd37c447be04fc83ea300907af67 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723com/dm_common.c +++ b/drivers/net/wireless/rtlwifi/rtl8723com/dm_common.c @@ -44,7 +44,6 @@ EXPORT_SYMBOL_GPL(rtl8723_dm_init_dynamic_txpower); void rtl8723_dm_init_edca_turbo(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - rtlpriv->dm.current_turbo_edca = false; rtlpriv->dm.is_any_nonbepkts = false; rtlpriv->dm.is_cur_rdlstate = false; @@ -54,12 +53,13 @@ EXPORT_SYMBOL_GPL(rtl8723_dm_init_edca_turbo); void rtl8723_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); + struct ps_t *dm_pstable = &rtlpriv->dm_pstable; - rtlpriv->dm_pstable.pre_ccastate = CCA_MAX; - rtlpriv->dm_pstable.cur_ccasate = CCA_MAX; - rtlpriv->dm_pstable.pre_rfstate = RF_MAX; - rtlpriv->dm_pstable.cur_rfstate = RF_MAX; - rtlpriv->dm_pstable.rssi_val_min = 0; - rtlpriv->dm_pstable.initialize = 0; + dm_pstable->pre_ccastate = CCA_MAX; + dm_pstable->cur_ccasate = CCA_MAX; + dm_pstable->pre_rfstate = RF_MAX; + dm_pstable->cur_rfstate = RF_MAX; + dm_pstable->rssi_val_min = 0; + dm_pstable->initialize = 0; } EXPORT_SYMBOL_GPL(rtl8723_dm_init_dynamic_bb_powersaving); diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c index 540278ff462b9403aee5512525508b14ac0319f9..dd698e7e9aceffb90c1e5281f6da123f6635fc56 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c +++ b/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c @@ -36,7 +36,8 @@ void rtl8723_enable_fw_download(struct ieee80211_hw *hw, bool enable) if (enable) { tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); - rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, tmp | 0x04); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, + tmp | 0x04); tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL); rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp | 0x01); @@ -95,7 +96,7 @@ void rtl8723_fw_page_write(struct ieee80211_hw *hw, } EXPORT_SYMBOL_GPL(rtl8723_fw_page_write); -static void rtl8723_fill_dummy(u8 *pfwbuf, u32 *pfwlen) +void rtl8723_fill_dummy(u8 *pfwbuf, u32 *pfwlen) { u32 fwlen = *pfwlen; u8 remain = (u8) (fwlen % 4); @@ -109,60 +110,64 @@ static void rtl8723_fill_dummy(u8 *pfwbuf, u32 *pfwlen) } *pfwlen = fwlen; } +EXPORT_SYMBOL(rtl8723_fill_dummy); void rtl8723_write_fw(struct ieee80211_hw *hw, enum version_8723e version, - u8 *buffer, u32 size) + u8 *buffer, u32 size, u8 max_page) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 *bufferptr = buffer; - u32 pagenums, remainsize; + u32 page_nums, remain_size; u32 page, offset; - RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "FW size is %d bytes,\n", size); + RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes,\n", size); rtl8723_fill_dummy(bufferptr, &size); - pagenums = size / FW_8192C_PAGE_SIZE; - remainsize = size % FW_8192C_PAGE_SIZE; + page_nums = size / FW_8192C_PAGE_SIZE; + remain_size = size % FW_8192C_PAGE_SIZE; - if (pagenums > 8) { + if (page_nums > max_page) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Page numbers should not greater then 8\n"); + "Page numbers should not greater than %d\n", max_page); } - for (page = 0; page < pagenums; page++) { + for (page = 0; page < page_nums; page++) { offset = page * FW_8192C_PAGE_SIZE; rtl8723_fw_page_write(hw, page, (bufferptr + offset), FW_8192C_PAGE_SIZE); } - if (remainsize) { - offset = pagenums * FW_8192C_PAGE_SIZE; - page = pagenums; + + if (remain_size) { + offset = page_nums * FW_8192C_PAGE_SIZE; + page = page_nums; rtl8723_fw_page_write(hw, page, (bufferptr + offset), - remainsize); + remain_size); } + RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW write done.\n"); } EXPORT_SYMBOL_GPL(rtl8723_write_fw); void rtl8723ae_firmware_selfreset(struct ieee80211_hw *hw) { - u8 u1tmp; + u8 u1b_tmp; u8 delay = 100; struct rtl_priv *rtlpriv = rtl_priv(hw); rtl_write_byte(rtlpriv, REG_HMETFR + 3, 0x20); - u1tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); + u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); - while (u1tmp & BIT(2)) { + while (u1b_tmp & BIT(2)) { delay--; if (delay == 0) break; udelay(50); - u1tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); + u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); } if (delay == 0) { - u1tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); - rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, u1tmp&(~BIT(2))); + u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, + u1b_tmp&(~BIT(2))); } } EXPORT_SYMBOL_GPL(rtl8723ae_firmware_selfreset); @@ -190,7 +195,8 @@ void rtl8723be_firmware_selfreset(struct ieee80211_hw *hw) } EXPORT_SYMBOL_GPL(rtl8723be_firmware_selfreset); -int rtl8723_fw_free_to_go(struct ieee80211_hw *hw, bool is_8723be) +int rtl8723_fw_free_to_go(struct ieee80211_hw *hw, bool is_8723be, + int max_count) { struct rtl_priv *rtlpriv = rtl_priv(hw); int err = -EIO; @@ -199,10 +205,10 @@ int rtl8723_fw_free_to_go(struct ieee80211_hw *hw, bool is_8723be) do { value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); - } while ((counter++ < FW_8192C_POLLING_TIMEOUT_COUNT) && + } while ((counter++ < max_count) && (!(value32 & FWDL_CHKSUM_RPT))); - if (counter >= FW_8192C_POLLING_TIMEOUT_COUNT) { + if (counter >= max_count) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "chksum report fail ! REG_MCUFWDL:0x%08x .\n", value32); @@ -223,15 +229,15 @@ int rtl8723_fw_free_to_go(struct ieee80211_hw *hw, bool is_8723be) value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); if (value32 & WINTINI_RDY) { RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "Polling FW ready success!! " - "REG_MCUFWDL:0x%08x .\n", + "Polling FW ready success!! REG_MCUFWDL:0x%08x .\n", value32); err = 0; goto exit; } - udelay(FW_8192C_POLLING_DELAY); - } while (counter++ < FW_8192C_POLLING_TIMEOUT_COUNT); + mdelay(FW_8192C_POLLING_DELAY); + + } while (counter++ < max_count); RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n", @@ -243,51 +249,55 @@ int rtl8723_fw_free_to_go(struct ieee80211_hw *hw, bool is_8723be) EXPORT_SYMBOL_GPL(rtl8723_fw_free_to_go); int rtl8723_download_fw(struct ieee80211_hw *hw, - bool is_8723be) + bool is_8723be, int max_count) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - struct rtl92c_firmware_header *pfwheader; + struct rtl8723e_firmware_header *pfwheader; u8 *pfwdata; u32 fwsize; int err; enum version_8723e version = rtlhal->version; + int max_page; if (!rtlhal->pfirmware) return 1; - pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware; + pfwheader = (struct rtl8723e_firmware_header *)rtlhal->pfirmware; pfwdata = rtlhal->pfirmware; fwsize = rtlhal->fwsize; - RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, - "normal Firmware SIZE %d\n", fwsize); + if (!is_8723be) + max_page = 6; + else + max_page = 8; if (rtlpriv->cfg->ops->is_fw_header(pfwheader)) { RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "Firmware Version(%d), Signature(%#x), Size(%d)\n", pfwheader->version, pfwheader->signature, - (int)sizeof(struct rtl92c_firmware_header)); + (int)sizeof(struct rtl8723e_firmware_header)); - pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header); - fwsize = fwsize - sizeof(struct rtl92c_firmware_header); + pfwdata = pfwdata + sizeof(struct rtl8723e_firmware_header); + fwsize = fwsize - sizeof(struct rtl8723e_firmware_header); } - if (rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) { - rtl_write_byte(rtlpriv, REG_MCUFWDL, 0); + + if (rtl_read_byte(rtlpriv, REG_MCUFWDL)&BIT(7)) { if (is_8723be) rtl8723be_firmware_selfreset(hw); else rtl8723ae_firmware_selfreset(hw); + rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00); } rtl8723_enable_fw_download(hw, true); - rtl8723_write_fw(hw, version, pfwdata, fwsize); + rtl8723_write_fw(hw, version, pfwdata, fwsize, max_page); rtl8723_enable_fw_download(hw, false); - err = rtl8723_fw_free_to_go(hw, is_8723be); + err = rtl8723_fw_free_to_go(hw, is_8723be, max_count); if (err) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Firmware is not ready to run!\n"); } else { - RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, + RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "Firmware is ready to run!\n"); } return 0; diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h b/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h index cf1cc5804d06bb117abe4c487a8f32a1f7b98458..3ebafc80972fc1a3f5aae3f9071e96749ba966d2 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h +++ b/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h @@ -30,7 +30,8 @@ #define REG_MCUFWDL 0x0080 #define FW_8192C_START_ADDRESS 0x1000 #define FW_8192C_PAGE_SIZE 4096 -#define FW_8192C_POLLING_TIMEOUT_COUNT 6000 +#define FW_8723A_POLLING_TIMEOUT_COUNT 1000 +#define FW_8723B_POLLING_TIMEOUT_COUNT 6000 #define FW_8192C_POLLING_DELAY 5 #define MCUFWDL_RDY BIT(1) @@ -49,16 +50,23 @@ enum version_8723e { VERSION_UNKNOWN = 0xFF, }; -enum rtl8723ae_h2c_cmd { - H2C_AP_OFFLOAD = 0, - H2C_SETPWRMODE = 1, - H2C_JOINBSSRPT = 2, - H2C_RSVDPAGE = 3, - H2C_RSSI_REPORT = 4, - H2C_P2P_PS_CTW_CMD = 5, - H2C_P2P_PS_OFFLOAD = 6, - H2C_RA_MASK = 7, - MAX_H2CCMD +struct rtl8723e_firmware_header { + u16 signature; + u8 category; + u8 function; + u16 version; + u8 subversion; + u8 rsvd1; + u8 month; + u8 date; + u8 hour; + u8 minute; + u16 ramcodesize; + u16 rsvd2; + u32 svnindex; + u32 rsvd3; + u32 rsvd4; + u32 rsvd5; }; enum rtl8723be_cmd { @@ -92,25 +100,6 @@ enum rtl8723be_cmd { MAX_8723BE_H2CCMD }; -struct rtl92c_firmware_header { - u16 signature; - u8 category; - u8 function; - u16 version; - u8 subversion; - u8 rsvd1; - u8 month; - u8 date; - u8 hour; - u8 minute; - u16 ramcodesize; - u16 rsvd2; - u32 svnindex; - u32 rsvd3; - u32 rsvd4; - u32 rsvd5; -}; - void rtl8723ae_firmware_selfreset(struct ieee80211_hw *hw); void rtl8723be_firmware_selfreset(struct ieee80211_hw *hw); void rtl8723_enable_fw_download(struct ieee80211_hw *hw, bool enable); @@ -120,7 +109,11 @@ void rtl8723_fw_page_write(struct ieee80211_hw *hw, u32 page, const u8 *buffer, u32 size); void rtl8723_write_fw(struct ieee80211_hw *hw, enum version_8723e version, - u8 *buffer, u32 size); -int rtl8723_fw_free_to_go(struct ieee80211_hw *hw, bool is_8723be); -int rtl8723_download_fw(struct ieee80211_hw *hw, bool is_8723be); + u8 *buffer, u32 size, u8 max_page); +int rtl8723_fw_free_to_go(struct ieee80211_hw *hw, bool is_8723be, int count); +int rtl8723_download_fw(struct ieee80211_hw *hw, bool is_8723be, int count); +bool rtl8723_cmd_send_packet(struct ieee80211_hw *hw, + struct sk_buff *skb); +void rtl8723_fill_dummy(u8 *pfwbuf, u32 *pfwlen); + #endif diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8723com/phy_common.c index d73b659bd2b5a2f0a6244c9c724840b2025bbc17..75cbd1509b52043863de0088ad18b307c412b85f 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723com/phy_common.c +++ b/drivers/net/wireless/rtlwifi/rtl8723com/phy_common.c @@ -43,9 +43,8 @@ u32 rtl8723_phy_query_bb_reg(struct ieee80211_hw *hw, returnvalue = (originalvalue & bitmask) >> bitshift; RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, - "BBR MASK = 0x%x Addr[0x%x]= 0x%x\n", - bitmask, regaddr, originalvalue); - + "BBR MASK=0x%x Addr[0x%x]=0x%x\n", bitmask, + regaddr, originalvalue); return returnvalue; } EXPORT_SYMBOL_GPL(rtl8723_phy_query_bb_reg); @@ -57,8 +56,8 @@ void rtl8723_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 originalvalue, bitshift; RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, - "regaddr(%#x), bitmask(%#x), data(%#x)\n", - regaddr, bitmask, data); + "regaddr(%#x), bitmask(%#x), data(%#x)\n", regaddr, bitmask, + data); if (bitmask != MASKDWORD) { originalvalue = rtl_read_dword(rtlpriv, regaddr); @@ -70,7 +69,7 @@ void rtl8723_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr, RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "regaddr(%#x), bitmask(%#x), data(%#x)\n", - regaddr, bitmask, data); + regaddr, bitmask, data); } EXPORT_SYMBOL_GPL(rtl8723_phy_set_bb_reg); @@ -109,12 +108,15 @@ u32 rtl8723_phy_rf_serial_read(struct ieee80211_hw *hw, else tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD); tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) | - (newoffset << 23) | BLSSIREADEDGE; + (newoffset << 23) | BLSSIREADEDGE; rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD, tmplong & (~BLSSIREADEDGE)); mdelay(1); rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2); - mdelay(2); + mdelay(1); + rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD, + tmplong | BLSSIREADEDGE); + mdelay(1); if (rfpath == RF90_PATH_A) rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1, BIT(8)); @@ -128,8 +130,8 @@ u32 rtl8723_phy_rf_serial_read(struct ieee80211_hw *hw, retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb, BLSSIREADBACKDATA); RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, - "RFR-%d Addr[0x%x]= 0x%x\n", - rfpath, pphyreg->rf_rb, retvalue); + "RFR-%d Addr[0x%x]=0x%x\n", + rfpath, pphyreg->rf_rb, retvalue); return retvalue; } EXPORT_SYMBOL_GPL(rtl8723_phy_rf_serial_read); @@ -153,8 +155,9 @@ void rtl8723_phy_rf_serial_write(struct ieee80211_hw *hw, data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff; rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr); RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, - "RFW-%d Addr[0x%x]= 0x%x\n", rfpath, - pphyreg->rf3wire_offset, data_and_addr); + "RFW-%d Addr[0x%x]=0x%x\n", + rfpath, pphyreg->rf3wire_offset, + data_and_addr); } EXPORT_SYMBOL_GPL(rtl8723_phy_rf_serial_write); @@ -171,6 +174,8 @@ long rtl8723_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw, break; case WIRELESS_MODE_G: case WIRELESS_MODE_N_24G: + offset = -8; + break; default: offset = -8; break; @@ -202,14 +207,14 @@ void rtl8723_phy_init_bb_rf_reg_def(struct ieee80211_hw *hw) rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE; rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset = - RFPGA0_XA_LSSIPARAMETER; + RFPGA0_XA_LSSIPARAMETER; rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset = - RFPGA0_XB_LSSIPARAMETER; + RFPGA0_XB_LSSIPARAMETER; - rtlphy->phyreg_def[RF90_PATH_A].rflssi_select = rFPGA0_XAB_RFPARAMETER; - rtlphy->phyreg_def[RF90_PATH_B].rflssi_select = rFPGA0_XAB_RFPARAMETER; - rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = rFPGA0_XCD_RFPARAMETER; - rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = rFPGA0_XCD_RFPARAMETER; + rtlphy->phyreg_def[RF90_PATH_A].rflssi_select = RFPGA0_XAB_RFPARAMETER; + rtlphy->phyreg_def[RF90_PATH_B].rflssi_select = RFPGA0_XAB_RFPARAMETER; + rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = RFPGA0_XCD_RFPARAMETER; + rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = RFPGA0_XCD_RFPARAMETER; rtlphy->phyreg_def[RF90_PATH_A].rftxgain_stage = RFPGA0_TXGAINSTAGE; rtlphy->phyreg_def[RF90_PATH_B].rftxgain_stage = RFPGA0_TXGAINSTAGE; @@ -264,6 +269,7 @@ void rtl8723_phy_init_bb_rf_reg_def(struct ieee80211_hw *hw) rtlphy->phyreg_def[RF90_PATH_A].rf_rbpi = TRANSCEIVEA_HSPI_READBACK; rtlphy->phyreg_def[RF90_PATH_B].rf_rbpi = TRANSCEIVEB_HSPI_READBACK; + } EXPORT_SYMBOL_GPL(rtl8723_phy_init_bb_rf_reg_def); @@ -384,14 +390,21 @@ EXPORT_SYMBOL_GPL(rtl8723_phy_reload_mac_registers); void rtl8723_phy_path_adda_on(struct ieee80211_hw *hw, u32 *addareg, bool is_patha_on, bool is2t) { + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); u32 pathon; u32 i; - pathon = is_patha_on ? 0x04db25a4 : 0x0b1b25a4; - if (!is2t) { - pathon = 0x0bdb25a0; - rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0b1b25a0); + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723AE) { + pathon = is_patha_on ? 0x04db25a4 : 0x0b1b25a4; + if (!is2t) { + pathon = 0x0bdb25a0; + rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0b1b25a0); + } else { + rtl_set_bbreg(hw, addareg[0], MASKDWORD, pathon); + } } else { + /* rtl8723be */ + pathon = 0x01c00014; rtl_set_bbreg(hw, addareg[0], MASKDWORD, pathon); } diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/Makefile b/drivers/net/wireless/rtlwifi/rtl8821ae/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..87ad604a1eb3267a4756e4d36bcdea19e8502a5f --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/Makefile @@ -0,0 +1,19 @@ +obj-m := rtl8821ae.o + + +rtl8821ae-objs := \ + dm.o \ + fw.o \ + hw.o \ + led.o \ + phy.o \ + pwrseq.o \ + rf.o \ + sw.o \ + table.o \ + trx.o \ + + +obj-$(CONFIG_RTL8821AE) += rtl8821ae.o + +ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/def.h b/drivers/net/wireless/rtlwifi/rtl8821ae/def.h new file mode 100644 index 0000000000000000000000000000000000000000..a730985ae81dc8449d0755d5f4814bed687d58ae --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/def.h @@ -0,0 +1,450 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL8821AE_DEF_H__ +#define __RTL8821AE_DEF_H__ + +/*--------------------------Define -------------------------------------------*/ +#define USE_SPECIFIC_FW_TO_SUPPORT_WOWLAN 1 + +/* BIT 7 HT Rate*/ +/*TxHT = 0*/ +#define MGN_1M 0x02 +#define MGN_2M 0x04 +#define MGN_5_5M 0x0b +#define MGN_11M 0x16 + +#define MGN_6M 0x0c +#define MGN_9M 0x12 +#define MGN_12M 0x18 +#define MGN_18M 0x24 +#define MGN_24M 0x30 +#define MGN_36M 0x48 +#define MGN_48M 0x60 +#define MGN_54M 0x6c + +/* TxHT = 1 */ +#define MGN_MCS0 0x80 +#define MGN_MCS1 0x81 +#define MGN_MCS2 0x82 +#define MGN_MCS3 0x83 +#define MGN_MCS4 0x84 +#define MGN_MCS5 0x85 +#define MGN_MCS6 0x86 +#define MGN_MCS7 0x87 +#define MGN_MCS8 0x88 +#define MGN_MCS9 0x89 +#define MGN_MCS10 0x8a +#define MGN_MCS11 0x8b +#define MGN_MCS12 0x8c +#define MGN_MCS13 0x8d +#define MGN_MCS14 0x8e +#define MGN_MCS15 0x8f +/* VHT rate */ +#define MGN_VHT1SS_MCS0 0x90 +#define MGN_VHT1SS_MCS1 0x91 +#define MGN_VHT1SS_MCS2 0x92 +#define MGN_VHT1SS_MCS3 0x93 +#define MGN_VHT1SS_MCS4 0x94 +#define MGN_VHT1SS_MCS5 0x95 +#define MGN_VHT1SS_MCS6 0x96 +#define MGN_VHT1SS_MCS7 0x97 +#define MGN_VHT1SS_MCS8 0x98 +#define MGN_VHT1SS_MCS9 0x99 +#define MGN_VHT2SS_MCS0 0x9a +#define MGN_VHT2SS_MCS1 0x9b +#define MGN_VHT2SS_MCS2 0x9c +#define MGN_VHT2SS_MCS3 0x9d +#define MGN_VHT2SS_MCS4 0x9e +#define MGN_VHT2SS_MCS5 0x9f +#define MGN_VHT2SS_MCS6 0xa0 +#define MGN_VHT2SS_MCS7 0xa1 +#define MGN_VHT2SS_MCS8 0xa2 +#define MGN_VHT2SS_MCS9 0xa3 + +#define MGN_VHT3SS_MCS0 0xa4 +#define MGN_VHT3SS_MCS1 0xa5 +#define MGN_VHT3SS_MCS2 0xa6 +#define MGN_VHT3SS_MCS3 0xa7 +#define MGN_VHT3SS_MCS4 0xa8 +#define MGN_VHT3SS_MCS5 0xa9 +#define MGN_VHT3SS_MCS6 0xaa +#define MGN_VHT3SS_MCS7 0xab +#define MGN_VHT3SS_MCS8 0xac +#define MGN_VHT3SS_MCS9 0xad + +#define MGN_MCS0_SG 0xc0 +#define MGN_MCS1_SG 0xc1 +#define MGN_MCS2_SG 0xc2 +#define MGN_MCS3_SG 0xc3 +#define MGN_MCS4_SG 0xc4 +#define MGN_MCS5_SG 0xc5 +#define MGN_MCS6_SG 0xc6 +#define MGN_MCS7_SG 0xc7 +#define MGN_MCS8_SG 0xc8 +#define MGN_MCS9_SG 0xc9 +#define MGN_MCS10_SG 0xca +#define MGN_MCS11_SG 0xcb +#define MGN_MCS12_SG 0xcc +#define MGN_MCS13_SG 0xcd +#define MGN_MCS14_SG 0xce +#define MGN_MCS15_SG 0xcf + +#define MGN_UNKNOWN 0xff + +/* 30 ms */ +#define WIFI_NAV_UPPER_US 30000 +#define HAL_92C_NAV_UPPER_UNIT 128 + +#define HAL_RETRY_LIMIT_INFRA 48 +#define HAL_RETRY_LIMIT_AP_ADHOC 7 + +#define RESET_DELAY_8185 20 + +#define RT_IBSS_INT_MASKS (IMR_BCNINT | IMR_TBDOK | IMR_TBDER) +#define RT_AC_INT_MASKS (IMR_VIDOK | IMR_VODOK | IMR_BEDOK|IMR_BKDOK) + +#define NUM_OF_FIRMWARE_QUEUE 10 +#define NUM_OF_PAGES_IN_FW 0x100 +#define NUM_OF_PAGE_IN_FW_QUEUE_BK 0x07 +#define NUM_OF_PAGE_IN_FW_QUEUE_BE 0x07 +#define NUM_OF_PAGE_IN_FW_QUEUE_VI 0x07 +#define NUM_OF_PAGE_IN_FW_QUEUE_VO 0x07 +#define NUM_OF_PAGE_IN_FW_QUEUE_HCCA 0x0 +#define NUM_OF_PAGE_IN_FW_QUEUE_CMD 0x0 +#define NUM_OF_PAGE_IN_FW_QUEUE_MGNT 0x02 +#define NUM_OF_PAGE_IN_FW_QUEUE_HIGH 0x02 +#define NUM_OF_PAGE_IN_FW_QUEUE_BCN 0x2 +#define NUM_OF_PAGE_IN_FW_QUEUE_PUB 0xA1 + +#define NUM_OF_PAGE_IN_FW_QUEUE_BK_DTM 0x026 +#define NUM_OF_PAGE_IN_FW_QUEUE_BE_DTM 0x048 +#define NUM_OF_PAGE_IN_FW_QUEUE_VI_DTM 0x048 +#define NUM_OF_PAGE_IN_FW_QUEUE_VO_DTM 0x026 +#define NUM_OF_PAGE_IN_FW_QUEUE_PUB_DTM 0x00 + +#define MAX_RX_DMA_BUFFER_SIZE 0x3E80 + +#define MAX_LINES_HWCONFIG_TXT 1000 +#define MAX_BYTES_LINE_HWCONFIG_TXT 256 + +#define SW_THREE_WIRE 0 +#define HW_THREE_WIRE 2 + +#define BT_DEMO_BOARD 0 +#define BT_QA_BOARD 1 +#define BT_FPGA 2 + +#define HAL_PRIME_CHNL_OFFSET_DONT_CARE 0 +#define HAL_PRIME_CHNL_OFFSET_LOWER 1 +#define HAL_PRIME_CHNL_OFFSET_UPPER 2 + +#define MAX_H2C_QUEUE_NUM 10 + +#define RX_MPDU_QUEUE 0 +#define RX_CMD_QUEUE 1 +#define RX_MAX_QUEUE 2 +#define AC2QUEUEID(_AC) (_AC) + +#define MAX_RX_DMA_BUFFER_SIZE_8812 0x3E80 + +#define C2H_RX_CMD_HDR_LEN 8 +#define GET_C2H_CMD_CMD_LEN(__prxhdr) \ + LE_BITS_TO_4BYTE((__prxhdr), 0, 16) +#define GET_C2H_CMD_ELEMENT_ID(__prxhdr) \ + LE_BITS_TO_4BYTE((__prxhdr), 16, 8) +#define GET_C2H_CMD_CMD_SEQ(__prxhdr) \ + LE_BITS_TO_4BYTE((__prxhdr), 24, 7) +#define GET_C2H_CMD_CONTINUE(__prxhdr) \ + LE_BITS_TO_4BYTE((__prxhdr), 31, 1) +#define GET_C2H_CMD_CONTENT(__prxhdr) \ + ((u8 *)(__prxhdr) + C2H_RX_CMD_HDR_LEN) + +#define GET_C2H_CMD_FEEDBACK_ELEMENT_ID(__pcmdfbhdr) \ + LE_BITS_TO_4BYTE((__pcmdfbhdr), 0, 8) +#define GET_C2H_CMD_FEEDBACK_CCX_LEN(__pcmdfbhdr) \ + LE_BITS_TO_4BYTE((__pcmdfbhdr), 8, 8) +#define GET_C2H_CMD_FEEDBACK_CCX_CMD_CNT(__pcmdfbhdr) \ + LE_BITS_TO_4BYTE((__pcmdfbhdr), 16, 16) +#define GET_C2H_CMD_FEEDBACK_CCX_MAC_ID(__pcmdfbhdr) \ + LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 0, 5) +#define GET_C2H_CMD_FEEDBACK_CCX_VALID(__pcmdfbhdr) \ + LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 7, 1) +#define GET_C2H_CMD_FEEDBACK_CCX_RETRY_CNT(__pcmdfbhdr) \ + LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 8, 5) +#define GET_C2H_CMD_FEEDBACK_CCX_TOK(__pcmdfbhdr) \ + LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 15, 1) +#define GET_C2H_CMD_FEEDBACK_CCX_QSEL(__pcmdfbhdr) \ + LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 16, 4) +#define GET_C2H_CMD_FEEDBACK_CCX_SEQ(__pcmdfbhdr) \ + LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 20, 12) + +#define CHIP_BONDING_IDENTIFIER(_value) (((_value)>>22)&0x3) + +#define CHIP_8812 BIT(2) +#define CHIP_8821 (BIT(0)|BIT(2)) + +#define CHIP_8821A (BIT(0)|BIT(2)) +#define NORMAL_CHIP BIT(3) +#define RF_TYPE_1T1R (~(BIT(4)|BIT(5)|BIT(6))) +#define RF_TYPE_1T2R BIT(4) +#define RF_TYPE_2T2R BIT(5) +#define CHIP_VENDOR_UMC BIT(7) +#define B_CUT_VERSION BIT(12) +#define C_CUT_VERSION BIT(13) +#define D_CUT_VERSION ((BIT(12)|BIT(13))) +#define E_CUT_VERSION BIT(14) +#define RF_RL_ID (BIT(31)|BIT(30)|BIT(29)|BIT(28)) + +enum version_8821ae { + VERSION_TEST_CHIP_1T1R_8812 = 0x0004, + VERSION_TEST_CHIP_2T2R_8812 = 0x0024, + VERSION_NORMAL_TSMC_CHIP_1T1R_8812 = 0x100c, + VERSION_NORMAL_TSMC_CHIP_2T2R_8812 = 0x102c, + VERSION_NORMAL_TSMC_CHIP_1T1R_8812_C_CUT = 0x200c, + VERSION_NORMAL_TSMC_CHIP_2T2R_8812_C_CUT = 0x202c, + VERSION_TEST_CHIP_8821 = 0x0005, + VERSION_NORMAL_TSMC_CHIP_8821 = 0x000d, + VERSION_NORMAL_TSMC_CHIP_8821_B_CUT = 0x100d, + VERSION_UNKNOWN = 0xFF, +}; + +enum vht_data_sc { + VHT_DATA_SC_DONOT_CARE = 0, + VHT_DATA_SC_20_UPPER_OF_80MHZ = 1, + VHT_DATA_SC_20_LOWER_OF_80MHZ = 2, + VHT_DATA_SC_20_UPPERST_OF_80MHZ = 3, + VHT_DATA_SC_20_LOWEST_OF_80MHZ = 4, + VHT_DATA_SC_20_RECV1 = 5, + VHT_DATA_SC_20_RECV2 = 6, + VHT_DATA_SC_20_RECV3 = 7, + VHT_DATA_SC_20_RECV4 = 8, + VHT_DATA_SC_40_UPPER_OF_80MHZ = 9, + VHT_DATA_SC_40_LOWER_OF_80MHZ = 10, +}; + +/* MASK */ +#define IC_TYPE_MASK (BIT(0)|BIT(1)|BIT(2)) +#define CHIP_TYPE_MASK BIT(3) +#define RF_TYPE_MASK (BIT(4)|BIT(5)|BIT(6)) +#define MANUFACTUER_MASK BIT(7) +#define ROM_VERSION_MASK (BIT(11)|BIT(10)|BIT(9)|BIT(8)) +#define CUT_VERSION_MASK (BIT(15)|BIT(14)|BIT(13)|BIT(12)) + +/* Get element */ +#define GET_CVID_IC_TYPE(version) ((version) & IC_TYPE_MASK) +#define GET_CVID_CHIP_TYPE(version) ((version) & CHIP_TYPE_MASK) +#define GET_CVID_RF_TYPE(version) ((version) & RF_TYPE_MASK) +#define GET_CVID_MANUFACTUER(version) ((version) & MANUFACTUER_MASK) +#define GET_CVID_ROM_VERSION(version) ((version) & ROM_VERSION_MASK) +#define GET_CVID_CUT_VERSION(version) ((version) & CUT_VERSION_MASK) + +#define IS_1T1R(version) ((GET_CVID_RF_TYPE(version)) ? false : true) +#define IS_1T2R(version) ((GET_CVID_RF_TYPE(version) == RF_TYPE_1T2R)\ + ? true : false) +#define IS_2T2R(version) ((GET_CVID_RF_TYPE(version) == RF_TYPE_2T2R)\ + ? true : false) + +#define IS_8812_SERIES(version) ((GET_CVID_IC_TYPE(version) == CHIP_8812) ? \ + true : false) +#define IS_8821_SERIES(version) ((GET_CVID_IC_TYPE(version) == CHIP_8821) ? \ + true : false) + +#define IS_VENDOR_8812A_TEST_CHIP(version) ((IS_8812_SERIES(version)) ? \ + ((IS_NORMAL_CHIP(version)) ? \ + false : true) : false) +#define IS_VENDOR_8812A_MP_CHIP(version) ((IS_8812_SERIES(version)) ? \ + ((IS_NORMAL_CHIP(version)) ? \ + true : false) : false) +#define IS_VENDOR_8812A_C_CUT(version) ((IS_8812_SERIES(version)) ? \ + ((GET_CVID_CUT_VERSION(version) == \ + C_CUT_VERSION) ? \ + true : false) : false) + +#define IS_VENDOR_8821A_TEST_CHIP(version) ((IS_8821_SERIES(version)) ? \ + ((IS_NORMAL_CHIP(version)) ? \ + false : true) : false) +#define IS_VENDOR_8821A_MP_CHIP(version) ((IS_8821_SERIES(version)) ? \ + ((IS_NORMAL_CHIP(version)) ? \ + true : false) : false) +#define IS_VENDOR_8821A_B_CUT(version) ((IS_8821_SERIES(version)) ? \ + ((GET_CVID_CUT_VERSION(version) == \ + B_CUT_VERSION) ? \ + true : false) : false) +enum board_type { + ODM_BOARD_DEFAULT = 0, /* The DEFAULT case. */ + ODM_BOARD_MINICARD = BIT(0), /* 0 = non-mini card, 1 = mini card. */ + ODM_BOARD_SLIM = BIT(1), /* 0 = non-slim card, 1 = slim card */ + ODM_BOARD_BT = BIT(2), /* 0 = without BT card, 1 = with BT */ + ODM_BOARD_EXT_PA = BIT(3), /* 1 = existing 2G ext-PA */ + ODM_BOARD_EXT_LNA = BIT(4), /* 1 = existing 2G ext-LNA */ + ODM_BOARD_EXT_TRSW = BIT(5), /* 1 = existing ext-TRSW */ + ODM_BOARD_EXT_PA_5G = BIT(6), /* 1 = existing 5G ext-PA */ + ODM_BOARD_EXT_LNA_5G = BIT(7), /* 1 = existing 5G ext-LNA */ +}; + +enum rf_optype { + RF_OP_BY_SW_3WIRE = 0, + RF_OP_BY_FW, + RF_OP_MAX +}; + +enum rf_power_state { + RF_ON, + RF_OFF, + RF_SLEEP, + RF_SHUT_DOWN, +}; + +enum power_save_mode { + POWER_SAVE_MODE_ACTIVE, + POWER_SAVE_MODE_SAVE, +}; + +enum power_polocy_config { + POWERCFG_MAX_POWER_SAVINGS, + POWERCFG_GLOBAL_POWER_SAVINGS, + POWERCFG_LOCAL_POWER_SAVINGS, + POWERCFG_LENOVO, +}; + +enum interface_select_pci { + INTF_SEL1_MINICARD = 0, + INTF_SEL0_PCIE = 1, + INTF_SEL2_RSV = 2, + INTF_SEL3_RSV = 3, +}; + +enum hal_fw_c2h_cmd_id { + HAL_FW_C2H_CMD_READ_MACREG = 0, + HAL_FW_C2H_CMD_READ_BBREG = 1, + HAL_FW_C2H_CMD_READ_RFREG = 2, + HAL_FW_C2H_CMD_READ_EEPROM = 3, + HAL_FW_C2H_CMD_READ_EFUSE = 4, + HAL_FW_C2H_CMD_READ_CAM = 5, + HAL_FW_C2H_CMD_GET_BASICRATE = 6, + HAL_FW_C2H_CMD_GET_DATARATE = 7, + HAL_FW_C2H_CMD_SURVEY = 8, + HAL_FW_C2H_CMD_SURVEYDONE = 9, + HAL_FW_C2H_CMD_JOINBSS = 10, + HAL_FW_C2H_CMD_ADDSTA = 11, + HAL_FW_C2H_CMD_DELSTA = 12, + HAL_FW_C2H_CMD_ATIMDONE = 13, + HAL_FW_C2H_CMD_TX_REPORT = 14, + HAL_FW_C2H_CMD_CCX_REPORT = 15, + HAL_FW_C2H_CMD_DTM_REPORT = 16, + HAL_FW_C2H_CMD_TX_RATE_STATISTICS = 17, + HAL_FW_C2H_CMD_C2HLBK = 18, + HAL_FW_C2H_CMD_C2HDBG = 19, + HAL_FW_C2H_CMD_C2HFEEDBACK = 20, + HAL_FW_C2H_CMD_MAX +}; + +enum rtl_desc_qsel { + QSLT_BK = 0x2, + QSLT_BE = 0x0, + QSLT_VI = 0x5, + QSLT_VO = 0x7, + QSLT_BEACON = 0x10, + QSLT_HIGH = 0x11, + QSLT_MGNT = 0x12, + QSLT_CMD = 0x13, +}; + +enum rtl_desc8821ae_rate { + DESC_RATE1M = 0x00, + DESC_RATE2M = 0x01, + DESC_RATE5_5M = 0x02, + DESC_RATE11M = 0x03, + + DESC_RATE6M = 0x04, + DESC_RATE9M = 0x05, + DESC_RATE12M = 0x06, + DESC_RATE18M = 0x07, + DESC_RATE24M = 0x08, + DESC_RATE36M = 0x09, + DESC_RATE48M = 0x0a, + DESC_RATE54M = 0x0b, + + DESC_RATEMCS0 = 0x0c, + DESC_RATEMCS1 = 0x0d, + DESC_RATEMCS2 = 0x0e, + DESC_RATEMCS3 = 0x0f, + DESC_RATEMCS4 = 0x10, + DESC_RATEMCS5 = 0x11, + DESC_RATEMCS6 = 0x12, + DESC_RATEMCS7 = 0x13, + DESC_RATEMCS8 = 0x14, + DESC_RATEMCS9 = 0x15, + DESC_RATEMCS10 = 0x16, + DESC_RATEMCS11 = 0x17, + DESC_RATEMCS12 = 0x18, + DESC_RATEMCS13 = 0x19, + DESC_RATEMCS14 = 0x1a, + DESC_RATEMCS15 = 0x1b, + + DESC_RATEVHT1SS_MCS0 = 0x2c, + DESC_RATEVHT1SS_MCS1 = 0x2d, + DESC_RATEVHT1SS_MCS2 = 0x2e, + DESC_RATEVHT1SS_MCS3 = 0x2f, + DESC_RATEVHT1SS_MCS4 = 0x30, + DESC_RATEVHT1SS_MCS5 = 0x31, + DESC_RATEVHT1SS_MCS6 = 0x32, + DESC_RATEVHT1SS_MCS7 = 0x33, + DESC_RATEVHT1SS_MCS8 = 0x34, + DESC_RATEVHT1SS_MCS9 = 0x35, + DESC_RATEVHT2SS_MCS0 = 0x36, + DESC_RATEVHT2SS_MCS1 = 0x37, + DESC_RATEVHT2SS_MCS2 = 0x38, + DESC_RATEVHT2SS_MCS3 = 0x39, + DESC_RATEVHT2SS_MCS4 = 0x3a, + DESC_RATEVHT2SS_MCS5 = 0x3b, + DESC_RATEVHT2SS_MCS6 = 0x3c, + DESC_RATEVHT2SS_MCS7 = 0x3d, + DESC_RATEVHT2SS_MCS8 = 0x3e, + DESC_RATEVHT2SS_MCS9 = 0x3f, +}; + +enum rx_packet_type { + NORMAL_RX, + TX_REPORT1, + TX_REPORT2, + HIS_REPORT, + C2H_PACKET, +}; + +struct phy_sts_cck_8821ae_t { + u8 adc_pwdb_X[4]; + u8 sq_rpt; + u8 cck_agc_rpt; +}; + +struct h2c_cmd_8821ae { + u8 element_id; + u32 cmd_len; + u8 *p_cmdbuffer; +}; + +#endif diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/rtlwifi/rtl8821ae/dm.c new file mode 100644 index 0000000000000000000000000000000000000000..9be106109921b5ba94cbc44dc88b83e35f6e2759 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/dm.c @@ -0,0 +1,3019 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "../wifi.h" +#include "../base.h" +#include "../pci.h" +#include "reg.h" +#include "def.h" +#include "phy.h" +#include "dm.h" +#include "fw.h" +#include "trx.h" +#include "../btcoexist/rtl_btc.h" + +static const u32 txscaling_tbl[TXSCALE_TABLE_SIZE] = { + 0x081, /* 0, -12.0dB */ + 0x088, /* 1, -11.5dB */ + 0x090, /* 2, -11.0dB */ + 0x099, /* 3, -10.5dB */ + 0x0A2, /* 4, -10.0dB */ + 0x0AC, /* 5, -9.5dB */ + 0x0B6, /* 6, -9.0dB */ + 0x0C0, /* 7, -8.5dB */ + 0x0CC, /* 8, -8.0dB */ + 0x0D8, /* 9, -7.5dB */ + 0x0E5, /* 10, -7.0dB */ + 0x0F2, /* 11, -6.5dB */ + 0x101, /* 12, -6.0dB */ + 0x110, /* 13, -5.5dB */ + 0x120, /* 14, -5.0dB */ + 0x131, /* 15, -4.5dB */ + 0x143, /* 16, -4.0dB */ + 0x156, /* 17, -3.5dB */ + 0x16A, /* 18, -3.0dB */ + 0x180, /* 19, -2.5dB */ + 0x197, /* 20, -2.0dB */ + 0x1AF, /* 21, -1.5dB */ + 0x1C8, /* 22, -1.0dB */ + 0x1E3, /* 23, -0.5dB */ + 0x200, /* 24, +0 dB */ + 0x21E, /* 25, +0.5dB */ + 0x23E, /* 26, +1.0dB */ + 0x261, /* 27, +1.5dB */ + 0x285, /* 28, +2.0dB */ + 0x2AB, /* 29, +2.5dB */ + 0x2D3, /* 30, +3.0dB */ + 0x2FE, /* 31, +3.5dB */ + 0x32B, /* 32, +4.0dB */ + 0x35C, /* 33, +4.5dB */ + 0x38E, /* 34, +5.0dB */ + 0x3C4, /* 35, +5.5dB */ + 0x3FE /* 36, +6.0dB */ +}; + +static const u32 rtl8821ae_txscaling_table[TXSCALE_TABLE_SIZE] = { + 0x081, /* 0, -12.0dB */ + 0x088, /* 1, -11.5dB */ + 0x090, /* 2, -11.0dB */ + 0x099, /* 3, -10.5dB */ + 0x0A2, /* 4, -10.0dB */ + 0x0AC, /* 5, -9.5dB */ + 0x0B6, /* 6, -9.0dB */ + 0x0C0, /* 7, -8.5dB */ + 0x0CC, /* 8, -8.0dB */ + 0x0D8, /* 9, -7.5dB */ + 0x0E5, /* 10, -7.0dB */ + 0x0F2, /* 11, -6.5dB */ + 0x101, /* 12, -6.0dB */ + 0x110, /* 13, -5.5dB */ + 0x120, /* 14, -5.0dB */ + 0x131, /* 15, -4.5dB */ + 0x143, /* 16, -4.0dB */ + 0x156, /* 17, -3.5dB */ + 0x16A, /* 18, -3.0dB */ + 0x180, /* 19, -2.5dB */ + 0x197, /* 20, -2.0dB */ + 0x1AF, /* 21, -1.5dB */ + 0x1C8, /* 22, -1.0dB */ + 0x1E3, /* 23, -0.5dB */ + 0x200, /* 24, +0 dB */ + 0x21E, /* 25, +0.5dB */ + 0x23E, /* 26, +1.0dB */ + 0x261, /* 27, +1.5dB */ + 0x285, /* 28, +2.0dB */ + 0x2AB, /* 29, +2.5dB */ + 0x2D3, /* 30, +3.0dB */ + 0x2FE, /* 31, +3.5dB */ + 0x32B, /* 32, +4.0dB */ + 0x35C, /* 33, +4.5dB */ + 0x38E, /* 34, +5.0dB */ + 0x3C4, /* 35, +5.5dB */ + 0x3FE /* 36, +6.0dB */ +}; + +static const u32 ofdmswing_table[] = { + 0x0b40002d, /* 0, -15.0dB */ + 0x0c000030, /* 1, -14.5dB */ + 0x0cc00033, /* 2, -14.0dB */ + 0x0d800036, /* 3, -13.5dB */ + 0x0e400039, /* 4, -13.0dB */ + 0x0f00003c, /* 5, -12.5dB */ + 0x10000040, /* 6, -12.0dB */ + 0x11000044, /* 7, -11.5dB */ + 0x12000048, /* 8, -11.0dB */ + 0x1300004c, /* 9, -10.5dB */ + 0x14400051, /* 10, -10.0dB */ + 0x15800056, /* 11, -9.5dB */ + 0x16c0005b, /* 12, -9.0dB */ + 0x18000060, /* 13, -8.5dB */ + 0x19800066, /* 14, -8.0dB */ + 0x1b00006c, /* 15, -7.5dB */ + 0x1c800072, /* 16, -7.0dB */ + 0x1e400079, /* 17, -6.5dB */ + 0x20000080, /* 18, -6.0dB */ + 0x22000088, /* 19, -5.5dB */ + 0x24000090, /* 20, -5.0dB */ + 0x26000098, /* 21, -4.5dB */ + 0x288000a2, /* 22, -4.0dB */ + 0x2ac000ab, /* 23, -3.5dB */ + 0x2d4000b5, /* 24, -3.0dB */ + 0x300000c0, /* 25, -2.5dB */ + 0x32c000cb, /* 26, -2.0dB */ + 0x35c000d7, /* 27, -1.5dB */ + 0x390000e4, /* 28, -1.0dB */ + 0x3c8000f2, /* 29, -0.5dB */ + 0x40000100, /* 30, +0dB */ + 0x43c0010f, /* 31, +0.5dB */ + 0x47c0011f, /* 32, +1.0dB */ + 0x4c000130, /* 33, +1.5dB */ + 0x50800142, /* 34, +2.0dB */ + 0x55400155, /* 35, +2.5dB */ + 0x5a400169, /* 36, +3.0dB */ + 0x5fc0017f, /* 37, +3.5dB */ + 0x65400195, /* 38, +4.0dB */ + 0x6b8001ae, /* 39, +4.5dB */ + 0x71c001c7, /* 40, +5.0dB */ + 0x788001e2, /* 41, +5.5dB */ + 0x7f8001fe /* 42, +6.0dB */ +}; + +static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = { + {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01}, /* 0, -16.0dB */ + {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, /* 1, -15.5dB */ + {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 2, -15.0dB */ + {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 3, -14.5dB */ + {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 4, -14.0dB */ + {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 5, -13.5dB */ + {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, /* 6, -13.0dB */ + {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, /* 7, -12.5dB */ + {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, /* 8, -12.0dB */ + {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, /* 9, -11.5dB */ + {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 10, -11.0dB */ + {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 11, -10.5dB */ + {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 12, -10.0dB */ + {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 13, -9.5dB */ + {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, /* 14, -9.0dB */ + {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, /* 15, -8.5dB */ + {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, /* 16, -8.0dB */ + {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, /* 17, -7.5dB */ + {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, /* 18, -7.0dB */ + {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, /* 19, -6.5dB */ + {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, /* 20, -6.0dB */ + {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, /* 21, -5.5dB */ + {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, /* 22, -5.0dB */ + {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, /* 23, -4.5dB */ + {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, /* 24, -4.0dB */ + {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, /* 25, -3.5dB */ + {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, /* 26, -3.0dB */ + {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, /* 27, -2.5dB */ + {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, /* 28, -2.0dB */ + {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, /* 29, -1.5dB */ + {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, /* 30, -1.0dB */ + {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, /* 31, -0.5dB */ + {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04} /* 32, +0dB */ +}; + +static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = { + {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00}, /* 0, -16.0dB */ + {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 1, -15.5dB */ + {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 2, -15.0dB */ + {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 3, -14.5dB */ + {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 4, -14.0dB */ + {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 5, -13.5dB */ + {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 6, -13.0dB */ + {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 7, -12.5dB */ + {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 8, -12.0dB */ + {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 9, -11.5dB */ + {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 10, -11.0dB */ + {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 11, -10.5dB */ + {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 12, -10.0dB */ + {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 13, -9.5dB */ + {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 14, -9.0dB */ + {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 15, -8.5dB */ + {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 16, -8.0dB */ + {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 17, -7.5dB */ + {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, /* 18, -7.0dB */ + {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00}, /* 19, -6.5dB */ + {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 20, -6.0dB */ + {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 21, -5.5dB */ + {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, /* 22, -5.0dB */ + {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00}, /* 23, -4.5dB */ + {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, /* 24, -4.0dB */ + {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00}, /* 25, -3.5dB */ + {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, /* 26, -3.0dB */ + {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00}, /* 27, -2.5dB */ + {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, /* 28, -2.0dB */ + {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00}, /* 29, -1.5dB */ + {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, /* 30, -1.0dB */ + {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00}, /* 31, -0.5dB */ + {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00} /* 32, +0dB */ +}; + +static const u32 edca_setting_dl[PEER_MAX] = { + 0xa44f, /* 0 UNKNOWN */ + 0x5ea44f, /* 1 REALTEK_90 */ + 0x5e4322, /* 2 REALTEK_92SE */ + 0x5ea42b, /* 3 BROAD */ + 0xa44f, /* 4 RAL */ + 0xa630, /* 5 ATH */ + 0x5ea630, /* 6 CISCO */ + 0x5ea42b, /* 7 MARVELL */ +}; + +static const u32 edca_setting_ul[PEER_MAX] = { + 0x5e4322, /* 0 UNKNOWN */ + 0xa44f, /* 1 REALTEK_90 */ + 0x5ea44f, /* 2 REALTEK_92SE */ + 0x5ea32b, /* 3 BROAD */ + 0x5ea422, /* 4 RAL */ + 0x5ea322, /* 5 ATH */ + 0x3ea430, /* 6 CISCO */ + 0x5ea44f, /* 7 MARV */ +}; + +static u8 rtl8818e_delta_swing_table_idx_24gb_p[] = { + 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 4, 4, + 4, 4, 4, 5, 5, 7, 7, 8, 8, 8, 9, 9, 9, 9, 9}; + +static u8 rtl8818e_delta_swing_table_idx_24gb_n[] = { + 0, 0, 0, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 6, + 7, 7, 7, 7, 8, 8, 9, 9, 10, 10, 10, 11, 11, 11, 11}; + +static u8 rtl8812ae_delta_swing_table_idx_24gb_n[] = { + 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, + 6, 6, 7, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11}; + +static u8 rtl8812ae_delta_swing_table_idx_24gb_p[] = { + 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6, + 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 9, 9, 9}; + +static u8 rtl8812ae_delta_swing_table_idx_24ga_n[] = { + 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, + 6, 6, 7, 8, 8, 9, 9, 9, 10, 10, 10, 10, 11, 11}; + +static u8 rtl8812ae_delta_swing_table_idx_24ga_p[] = { + 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6, + 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 9, 9, 9}; + +static u8 rtl8812ae_delta_swing_table_idx_24gcckb_n[] = { + 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, + 6, 6, 7, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11}; + +static u8 rtl8812ae_delta_swing_table_idx_24gcckb_p[] = { + 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6, + 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 9, 9, 9}; + +static u8 rtl8812ae_delta_swing_table_idx_24gccka_n[] = { + 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, + 6, 6, 7, 8, 8, 9, 9, 9, 10, 10, 10, 10, 11, 11}; + +static u8 rtl8812ae_delta_swing_table_idx_24gccka_p[] = { + 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6, + 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 9, 9, 9}; + +static u8 rtl8812ae_delta_swing_table_idx_5gb_n[][DEL_SW_IDX_SZ] = { + {0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7, 7, + 7, 8, 8, 9, 9, 9, 10, 10, 11, 11, 12, 12, 13}, + {0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, + 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 13, 13}, + {0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 9, 10, 11, + 12, 12, 13, 14, 14, 14, 15, 16, 17, 17, 17, 18, 18, 18}, +}; + +static u8 rtl8812ae_delta_swing_table_idx_5gb_p[][DEL_SW_IDX_SZ] = { + {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7, 7, 8, + 8, 9, 9, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11}, + {0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8, + 8, 9, 9, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11}, + {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 7, 8, 8, 9, + 9, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11}, +}; + +static u8 rtl8812ae_delta_swing_table_idx_5ga_n[][DEL_SW_IDX_SZ] = { + {0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8, + 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 13, 13, 13}, + {0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 8, 9, + 9, 10, 10, 11, 11, 11, 12, 12, 12, 12, 12, 13, 13}, + {0, 1, 1, 2, 2, 3, 3, 4, 5, 6, 7, 8, 8, 9, 10, 11, + 12, 13, 14, 14, 15, 15, 15, 16, 16, 16, 17, 17, 18, 18}, +}; + +static u8 rtl8812ae_delta_swing_table_idx_5ga_p[][DEL_SW_IDX_SZ] = { + {0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 7, 7, 8, + 8, 9, 9, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11}, + {0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8, + 9, 9, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11}, + {0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 9, 9, + 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11}, +}; + +static u8 rtl8821ae_delta_swing_table_idx_24gb_n[] = { + 0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, + 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10}; + +static u8 rtl8821ae_delta_swing_table_idx_24gb_p[] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, + 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 12, 12}; + +static u8 rtl8821ae_delta_swing_table_idx_24ga_n[] = { + 0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, + 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10}; + +static u8 rtl8821ae_delta_swing_table_idx_24ga_p[] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, + 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 12, 12}; + +static u8 rtl8821ae_delta_swing_table_idx_24gcckb_n[] = { + 0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, + 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10}; + +static u8 rtl8821ae_delta_swing_table_idx_24gcckb_p[] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, + 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 12, 12}; + +static u8 rtl8821ae_delta_swing_table_idx_24gccka_n[] = { + 0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, + 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10}; + +static u8 rtl8821ae_delta_swing_table_idx_24gccka_p[] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, + 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 12, 12}; + +static u8 rtl8821ae_delta_swing_table_idx_5gb_n[][DEL_SW_IDX_SZ] = { + {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, + 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16}, + {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, + 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16}, + {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, + 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16}, +}; + +static u8 rtl8821ae_delta_swing_table_idx_5gb_p[][DEL_SW_IDX_SZ] = { + {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, + 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16}, + {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, + 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16}, + {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, + 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16}, +}; + +static u8 rtl8821ae_delta_swing_table_idx_5ga_n[][DEL_SW_IDX_SZ] = { + {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, + 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16}, + {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, + 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16}, + {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, + 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16}, +}; + +static u8 rtl8821ae_delta_swing_table_idx_5ga_p[][DEL_SW_IDX_SZ] = { + {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, + 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16}, + {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, + 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16}, + {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, + 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16}, +}; + +void rtl8821ae_dm_txpower_track_adjust(struct ieee80211_hw *hw, + u8 type, u8 *pdirection, + u32 *poutwrite_val) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); + u8 pwr_val = 0; + + if (type == 0) { + if (rtlpriv->dm.swing_idx_ofdm[RF90_PATH_A] <= + rtlpriv->dm.swing_idx_ofdm_base[RF90_PATH_A]) { + *pdirection = 1; + pwr_val = rtldm->swing_idx_ofdm_base[RF90_PATH_A] - + rtldm->swing_idx_ofdm[RF90_PATH_A]; + } else { + *pdirection = 2; + pwr_val = rtldm->swing_idx_ofdm[RF90_PATH_A] - + rtldm->swing_idx_ofdm_base[RF90_PATH_A]; + } + } else if (type == 1) { + if (rtldm->swing_idx_cck <= rtldm->swing_idx_cck_base) { + *pdirection = 1; + pwr_val = rtldm->swing_idx_cck_base - + rtldm->swing_idx_cck; + } else { + *pdirection = 2; + pwr_val = rtldm->swing_idx_cck - + rtldm->swing_idx_cck_base; + } + } + + if (pwr_val >= TXPWRTRACK_MAX_IDX && (*pdirection == 1)) + pwr_val = TXPWRTRACK_MAX_IDX; + + *poutwrite_val = pwr_val | (pwr_val << 8)| + (pwr_val << 16)| + (pwr_val << 24); +} + +void rtl8821ae_dm_clear_txpower_tracking_state(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_dm *rtldm = rtl_dm(rtlpriv); + struct rtl_efuse *rtlefuse = rtl_efuse(rtlpriv); + u8 p = 0; + + rtldm->swing_idx_cck_base = rtldm->default_cck_index; + rtldm->swing_idx_cck = rtldm->default_cck_index; + rtldm->cck_index = 0; + + for (p = RF90_PATH_A; p <= RF90_PATH_B; ++p) { + rtldm->swing_idx_ofdm_base[p] = rtldm->default_ofdm_index; + rtldm->swing_idx_ofdm[p] = rtldm->default_ofdm_index; + rtldm->ofdm_index[p] = rtldm->default_ofdm_index; + + rtldm->power_index_offset[p] = 0; + rtldm->delta_power_index[p] = 0; + rtldm->delta_power_index_last[p] = 0; + /*Initial Mix mode power tracking*/ + rtldm->absolute_ofdm_swing_idx[p] = 0; + rtldm->remnant_ofdm_swing_idx[p] = 0; + } + /*Initial at Modify Tx Scaling Mode*/ + rtldm->modify_txagc_flag_path_a = false; + /*Initial at Modify Tx Scaling Mode*/ + rtldm->modify_txagc_flag_path_b = false; + rtldm->remnant_cck_idx = 0; + rtldm->thermalvalue = rtlefuse->eeprom_thermalmeter; + rtldm->thermalvalue_iqk = rtlefuse->eeprom_thermalmeter; + rtldm->thermalvalue_lck = rtlefuse->eeprom_thermalmeter; +} + +static u8 rtl8821ae_dm_get_swing_index(struct ieee80211_hw *hw) +{ + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 i = 0; + u32 bb_swing; + + bb_swing = phy_get_tx_swing_8812A(hw, rtlhal->current_bandtype, + RF90_PATH_A); + + for (i = 0; i < TXSCALE_TABLE_SIZE; ++i) + if (bb_swing == rtl8821ae_txscaling_table[i]) + break; + + return i; +} + +void rtl8821ae_dm_initialize_txpower_tracking_thermalmeter( + struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_dm *rtldm = rtl_dm(rtlpriv); + struct rtl_efuse *rtlefuse = rtl_efuse(rtlpriv); + u8 default_swing_index = 0; + u8 p = 0; + + rtlpriv->dm.txpower_track_control = true; + rtldm->thermalvalue = rtlefuse->eeprom_thermalmeter; + rtldm->thermalvalue_iqk = rtlefuse->eeprom_thermalmeter; + rtldm->thermalvalue_lck = rtlefuse->eeprom_thermalmeter; + default_swing_index = rtl8821ae_dm_get_swing_index(hw); + + rtldm->default_ofdm_index = + (default_swing_index == TXSCALE_TABLE_SIZE) ? + 24 : default_swing_index; + rtldm->default_cck_index = 24; + + rtldm->swing_idx_cck_base = rtldm->default_cck_index; + rtldm->cck_index = rtldm->default_cck_index; + + for (p = RF90_PATH_A; p < MAX_RF_PATH; ++p) { + rtldm->swing_idx_ofdm_base[p] = + rtldm->default_ofdm_index; + rtldm->ofdm_index[p] = rtldm->default_ofdm_index; + rtldm->delta_power_index[p] = 0; + rtldm->power_index_offset[p] = 0; + rtldm->delta_power_index_last[p] = 0; + } +} + +static void rtl8821ae_dm_diginit(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *dm_digtable = &rtlpriv->dm_digtable; + + dm_digtable->cur_igvalue = rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f); + dm_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW; + dm_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH; + dm_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW; + dm_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH; + dm_digtable->rx_gain_max = DM_DIG_MAX; + dm_digtable->rx_gain_min = DM_DIG_MIN; + dm_digtable->back_val = DM_DIG_BACKOFF_DEFAULT; + dm_digtable->back_range_max = DM_DIG_BACKOFF_MAX; + dm_digtable->back_range_min = DM_DIG_BACKOFF_MIN; + dm_digtable->pre_cck_cca_thres = 0xff; + dm_digtable->cur_cck_cca_thres = 0x83; + dm_digtable->forbidden_igi = DM_DIG_MIN; + dm_digtable->large_fa_hit = 0; + dm_digtable->recover_cnt = 0; + dm_digtable->dig_dynamic_min = DM_DIG_MIN; + dm_digtable->dig_dynamic_min_1 = DM_DIG_MIN; + dm_digtable->media_connect_0 = false; + dm_digtable->media_connect_1 = false; + rtlpriv->dm.dm_initialgain_enable = true; + dm_digtable->bt30_cur_igi = 0x32; +} + +void rtl8821ae_dm_init_edca_turbo(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtlpriv->dm.current_turbo_edca = false; + rtlpriv->dm.is_any_nonbepkts = false; + rtlpriv->dm.is_cur_rdlstate = false; +} + +void rtl8821ae_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rate_adaptive *p_ra = &rtlpriv->ra; + + p_ra->ratr_state = DM_RATR_STA_INIT; + p_ra->pre_ratr_state = DM_RATR_STA_INIT; + + rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER; + if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER) + rtlpriv->dm.useramask = true; + else + rtlpriv->dm.useramask = false; + + p_ra->high_rssi_thresh_for_ra = 50; + p_ra->low_rssi_thresh_for_ra40m = 20; +} + +static void rtl8821ae_dm_init_dynamic_atc_switch(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtlpriv->dm.crystal_cap = rtlpriv->efuse.crystalcap; + + rtlpriv->dm.atc_status = rtl_get_bbreg(hw, ROFDM1_CFOTRACKING, BIT(11)); + rtlpriv->dm.cfo_threshold = CFO_THRESHOLD_XTAL; +} + +static void rtl8821ae_dm_common_info_self_init(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u8 tmp; + + rtlphy->cck_high_power = + (bool)rtl_get_bbreg(hw, ODM_REG_CCK_RPT_FORMAT_11AC, + ODM_BIT_CCK_RPT_FORMAT_11AC); + + tmp = (u8)rtl_get_bbreg(hw, ODM_REG_BB_RX_PATH_11AC, + ODM_BIT_BB_RX_PATH_11AC); + if (tmp & BIT(0)) + rtlpriv->dm.rfpath_rxenable[0] = true; + if (tmp & BIT(1)) + rtlpriv->dm.rfpath_rxenable[1] = true; +} + +void rtl8821ae_dm_init(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + + spin_lock(&rtlpriv->locks.iqk_lock); + rtlphy->lck_inprogress = false; + spin_unlock(&rtlpriv->locks.iqk_lock); + + rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER; + rtl8821ae_dm_common_info_self_init(hw); + rtl8821ae_dm_diginit(hw); + rtl8821ae_dm_init_rate_adaptive_mask(hw); + rtl8821ae_dm_init_edca_turbo(hw); + rtl8821ae_dm_initialize_txpower_tracking_thermalmeter(hw); + rtl8821ae_dm_init_dynamic_atc_switch(hw); +} + +static void rtl8821ae_dm_find_minimum_rssi(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *rtl_dm_dig = &rtlpriv->dm_digtable; + struct rtl_mac *mac = rtl_mac(rtlpriv); + + /* Determine the minimum RSSI */ + if ((mac->link_state < MAC80211_LINKED) && + (rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) { + rtl_dm_dig->min_undec_pwdb_for_dm = 0; + RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, + "Not connected to any\n"); + } + if (mac->link_state >= MAC80211_LINKED) { + if (mac->opmode == NL80211_IFTYPE_AP || + mac->opmode == NL80211_IFTYPE_ADHOC) { + rtl_dm_dig->min_undec_pwdb_for_dm = + rtlpriv->dm.entry_min_undec_sm_pwdb; + RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, + "AP Client PWDB = 0x%lx\n", + rtlpriv->dm.entry_min_undec_sm_pwdb); + } else { + rtl_dm_dig->min_undec_pwdb_for_dm = + rtlpriv->dm.undec_sm_pwdb; + RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, + "STA Default Port PWDB = 0x%x\n", + rtl_dm_dig->min_undec_pwdb_for_dm); + } + } else { + rtl_dm_dig->min_undec_pwdb_for_dm = + rtlpriv->dm.entry_min_undec_sm_pwdb; + RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, + "AP Ext Port or disconnet PWDB = 0x%x\n", + rtl_dm_dig->min_undec_pwdb_for_dm); + } + RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, + "MinUndecoratedPWDBForDM =%d\n", + rtl_dm_dig->min_undec_pwdb_for_dm); +} + +static void rtl8812ae_dm_rssi_dump_to_register(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtl_write_byte(rtlpriv, RA_RSSI_DUMP, + rtlpriv->stats.rx_rssi_percentage[0]); + rtl_write_byte(rtlpriv, RB_RSSI_DUMP, + rtlpriv->stats.rx_rssi_percentage[1]); + + /* Rx EVM*/ + rtl_write_byte(rtlpriv, RS1_RX_EVM_DUMP, + rtlpriv->stats.rx_evm_dbm[0]); + rtl_write_byte(rtlpriv, RS2_RX_EVM_DUMP, + rtlpriv->stats.rx_evm_dbm[1]); + + /*Rx SNR*/ + rtl_write_byte(rtlpriv, RA_RX_SNR_DUMP, + (u8)(rtlpriv->stats.rx_snr_db[0])); + rtl_write_byte(rtlpriv, RB_RX_SNR_DUMP, + (u8)(rtlpriv->stats.rx_snr_db[1])); + + /*Rx Cfo_Short*/ + rtl_write_word(rtlpriv, RA_CFO_SHORT_DUMP, + rtlpriv->stats.rx_cfo_short[0]); + rtl_write_word(rtlpriv, RB_CFO_SHORT_DUMP, + rtlpriv->stats.rx_cfo_short[1]); + + /*Rx Cfo_Tail*/ + rtl_write_word(rtlpriv, RA_CFO_LONG_DUMP, + rtlpriv->stats.rx_cfo_tail[0]); + rtl_write_word(rtlpriv, RB_CFO_LONG_DUMP, + rtlpriv->stats.rx_cfo_tail[1]); +} + +static void rtl8821ae_dm_check_rssi_monitor(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *dm_digtable = &rtlpriv->dm_digtable; + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_sta_info *drv_priv; + u8 h2c_parameter[4] = { 0 }; + long tmp_entry_max_pwdb = 0, tmp_entry_min_pwdb = 0xff; + u8 stbc_tx = 0; + u64 cur_txokcnt = 0, cur_rxokcnt = 0; + static u64 last_txokcnt = 0, last_rxokcnt; + + cur_txokcnt = rtlpriv->stats.txbytesunicast - last_txokcnt; + cur_rxokcnt = rtlpriv->stats.rxbytesunicast - last_rxokcnt; + last_txokcnt = rtlpriv->stats.txbytesunicast; + last_rxokcnt = rtlpriv->stats.rxbytesunicast; + if (cur_rxokcnt > (last_txokcnt * 6)) + h2c_parameter[3] = 0x01; + else + h2c_parameter[3] = 0x00; + + /* AP & ADHOC & MESH */ + if (mac->opmode == NL80211_IFTYPE_AP || + mac->opmode == NL80211_IFTYPE_ADHOC || + mac->opmode == NL80211_IFTYPE_MESH_POINT) { + spin_lock_bh(&rtlpriv->locks.entry_list_lock); + list_for_each_entry(drv_priv, &rtlpriv->entry_list, list) { + if (drv_priv->rssi_stat.undec_sm_pwdb < + tmp_entry_min_pwdb) + tmp_entry_min_pwdb = + drv_priv->rssi_stat.undec_sm_pwdb; + if (drv_priv->rssi_stat.undec_sm_pwdb > + tmp_entry_max_pwdb) + tmp_entry_max_pwdb = + drv_priv->rssi_stat.undec_sm_pwdb; + } + spin_unlock_bh(&rtlpriv->locks.entry_list_lock); + + /* If associated entry is found */ + if (tmp_entry_max_pwdb != 0) { + rtlpriv->dm.entry_max_undec_sm_pwdb = + tmp_entry_max_pwdb; + RTPRINT(rtlpriv, FDM, DM_PWDB, + "EntryMaxPWDB = 0x%lx(%ld)\n", + tmp_entry_max_pwdb, tmp_entry_max_pwdb); + } else { + rtlpriv->dm.entry_max_undec_sm_pwdb = 0; + } + /* If associated entry is found */ + if (tmp_entry_min_pwdb != 0xff) { + rtlpriv->dm.entry_min_undec_sm_pwdb = + tmp_entry_min_pwdb; + RTPRINT(rtlpriv, FDM, DM_PWDB, + "EntryMinPWDB = 0x%lx(%ld)\n", + tmp_entry_min_pwdb, tmp_entry_min_pwdb); + } else { + rtlpriv->dm.entry_min_undec_sm_pwdb = 0; + } + } + /* Indicate Rx signal strength to FW. */ + if (rtlpriv->dm.useramask) { + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { + if (mac->mode == WIRELESS_MODE_AC_24G || + mac->mode == WIRELESS_MODE_AC_5G || + mac->mode == WIRELESS_MODE_AC_ONLY) + stbc_tx = (mac->vht_cur_stbc & + STBC_VHT_ENABLE_TX) ? 1 : 0; + else + stbc_tx = (mac->ht_cur_stbc & + STBC_HT_ENABLE_TX) ? 1 : 0; + h2c_parameter[3] |= stbc_tx << 1; + } + h2c_parameter[2] = + (u8)(rtlpriv->dm.undec_sm_pwdb & 0xFF); + h2c_parameter[1] = 0x20; + h2c_parameter[0] = 0; + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + rtl8821ae_fill_h2c_cmd(hw, H2C_RSSI_21AE_REPORT, 4, + h2c_parameter); + else + rtl8821ae_fill_h2c_cmd(hw, H2C_RSSI_21AE_REPORT, 3, + h2c_parameter); + } else { + rtl_write_byte(rtlpriv, 0x4fe, rtlpriv->dm.undec_sm_pwdb); + } + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + rtl8812ae_dm_rssi_dump_to_register(hw); + rtl8821ae_dm_find_minimum_rssi(hw); + dm_digtable->rssi_val_min = rtlpriv->dm_digtable.min_undec_pwdb_for_dm; +} + +void rtl8821ae_dm_write_cck_cca_thres(struct ieee80211_hw *hw, u8 current_cca) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *dm_digtable = &rtlpriv->dm_digtable; + + if (dm_digtable->cur_cck_cca_thres != current_cca) + rtl_write_byte(rtlpriv, DM_REG_CCK_CCA_11AC, current_cca); + + dm_digtable->pre_cck_cca_thres = dm_digtable->cur_cck_cca_thres; + dm_digtable->cur_cck_cca_thres = current_cca; +} + +void rtl8821ae_dm_write_dig(struct ieee80211_hw *hw, u8 current_igi) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *dm_digtable = &rtlpriv->dm_digtable; + + if (dm_digtable->stop_dig) + return; + + if (dm_digtable->cur_igvalue != current_igi) { + rtl_set_bbreg(hw, DM_REG_IGI_A_11AC, + DM_BIT_IGI_11AC, current_igi); + if (rtlpriv->phy.rf_type != RF_1T1R) + rtl_set_bbreg(hw, DM_REG_IGI_B_11AC, + DM_BIT_IGI_11AC, current_igi); + } + dm_digtable->cur_igvalue = current_igi; +} + +static void rtl8821ae_dm_dig(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *dm_digtable = &rtlpriv->dm_digtable; + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 dig_dynamic_min; + u8 dig_max_of_min; + bool first_connect, first_disconnect; + u8 dm_dig_max, dm_dig_min, offset; + u8 current_igi = dm_digtable->cur_igvalue; + + RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "\n"); + + if (mac->act_scanning) { + RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, + "Return: In Scan Progress\n"); + return; + } + + /*add by Neil Chen to avoid PSD is processing*/ + dig_dynamic_min = dm_digtable->dig_dynamic_min; + first_connect = (mac->link_state >= MAC80211_LINKED) && + (!dm_digtable->media_connect_0); + first_disconnect = (mac->link_state < MAC80211_LINKED) && + (dm_digtable->media_connect_0); + + /*1 Boundary Decision*/ + + dm_dig_max = 0x5A; + + if (rtlhal->hw_type != HARDWARE_TYPE_RTL8821AE) + dm_dig_min = DM_DIG_MIN; + else + dm_dig_min = 0x1C; + + dig_max_of_min = DM_DIG_MAX_AP; + + if (mac->link_state >= MAC80211_LINKED) { + if (rtlhal->hw_type != HARDWARE_TYPE_RTL8821AE) + offset = 20; + else + offset = 10; + + if ((dm_digtable->rssi_val_min + offset) > dm_dig_max) + dm_digtable->rx_gain_max = dm_dig_max; + else if ((dm_digtable->rssi_val_min + offset) < dm_dig_min) + dm_digtable->rx_gain_max = dm_dig_min; + else + dm_digtable->rx_gain_max = + dm_digtable->rssi_val_min + offset; + + RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, + "dm_digtable->rssi_val_min=0x%x,dm_digtable->rx_gain_max = 0x%x", + dm_digtable->rssi_val_min, + dm_digtable->rx_gain_max); + if (rtlpriv->dm.one_entry_only) { + offset = 0; + + if (dm_digtable->rssi_val_min - offset < dm_dig_min) + dig_dynamic_min = dm_dig_min; + else if (dm_digtable->rssi_val_min - + offset > dig_max_of_min) + dig_dynamic_min = dig_max_of_min; + else + dig_dynamic_min = + dm_digtable->rssi_val_min - offset; + + RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, + "bOneEntryOnly=TRUE, dig_dynamic_min=0x%x\n", + dig_dynamic_min); + } else { + dig_dynamic_min = dm_dig_min; + } + } else { + dm_digtable->rx_gain_max = dm_dig_max; + dig_dynamic_min = dm_dig_min; + RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, + "No Link\n"); + } + + if (rtlpriv->falsealm_cnt.cnt_all > 10000) { + RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, + "Abnornally false alarm case.\n"); + + if (dm_digtable->large_fa_hit != 3) + dm_digtable->large_fa_hit++; + if (dm_digtable->forbidden_igi < current_igi) { + dm_digtable->forbidden_igi = current_igi; + dm_digtable->large_fa_hit = 1; + } + + if (dm_digtable->large_fa_hit >= 3) { + if ((dm_digtable->forbidden_igi + 1) > + dm_digtable->rx_gain_max) + dm_digtable->rx_gain_min = + dm_digtable->rx_gain_max; + else + dm_digtable->rx_gain_min = + (dm_digtable->forbidden_igi + 1); + dm_digtable->recover_cnt = 3600; + } + } else { + /*Recovery mechanism for IGI lower bound*/ + if (dm_digtable->recover_cnt != 0) { + dm_digtable->recover_cnt--; + } else { + if (dm_digtable->large_fa_hit < 3) { + if ((dm_digtable->forbidden_igi - 1) < + dig_dynamic_min) { + dm_digtable->forbidden_igi = + dig_dynamic_min; + dm_digtable->rx_gain_min = + dig_dynamic_min; + RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, + "Normal Case: At Lower Bound\n"); + } else { + dm_digtable->forbidden_igi--; + dm_digtable->rx_gain_min = + (dm_digtable->forbidden_igi + 1); + RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, + "Normal Case: Approach Lower Bound\n"); + } + } else { + dm_digtable->large_fa_hit = 0; + } + } + } + RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, + "pDM_DigTable->LargeFAHit=%d\n", + dm_digtable->large_fa_hit); + + if (rtlpriv->dm.dbginfo.num_qry_beacon_pkt < 10) + dm_digtable->rx_gain_min = dm_dig_min; + + if (dm_digtable->rx_gain_min > dm_digtable->rx_gain_max) + dm_digtable->rx_gain_min = dm_digtable->rx_gain_max; + + /*Adjust initial gain by false alarm*/ + if (mac->link_state >= MAC80211_LINKED) { + RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, + "DIG AfterLink\n"); + if (first_connect) { + if (dm_digtable->rssi_val_min <= dig_max_of_min) + current_igi = dm_digtable->rssi_val_min; + else + current_igi = dig_max_of_min; + RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, + "First Connect\n"); + } else { + if (rtlpriv->falsealm_cnt.cnt_all > DM_DIG_FA_TH2) + current_igi = current_igi + 4; + else if (rtlpriv->falsealm_cnt.cnt_all > DM_DIG_FA_TH1) + current_igi = current_igi + 2; + else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH0) + current_igi = current_igi - 2; + + if ((rtlpriv->dm.dbginfo.num_qry_beacon_pkt < 10) && + (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH1)) { + current_igi = dm_digtable->rx_gain_min; + RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, + "Beacon is less than 10 and FA is less than 768, IGI GOES TO 0x1E!!!!!!!!!!!!\n"); + } + } + } else { + RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, + "DIG BeforeLink\n"); + if (first_disconnect) { + current_igi = dm_digtable->rx_gain_min; + RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, + "First DisConnect\n"); + } else { + /* 2012.03.30 LukeLee: enable DIG before + * link but with very high thresholds + */ + if (rtlpriv->falsealm_cnt.cnt_all > 2000) + current_igi = current_igi + 4; + else if (rtlpriv->falsealm_cnt.cnt_all > 600) + current_igi = current_igi + 2; + else if (rtlpriv->falsealm_cnt.cnt_all < 300) + current_igi = current_igi - 2; + + if (current_igi >= 0x3e) + current_igi = 0x3e; + + RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "England DIG\n"); + } + } + RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, + "DIG End Adjust IGI\n"); + /* Check initial gain by upper/lower bound*/ + + if (current_igi > dm_digtable->rx_gain_max) + current_igi = dm_digtable->rx_gain_max; + if (current_igi < dm_digtable->rx_gain_min) + current_igi = dm_digtable->rx_gain_min; + + RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, + "rx_gain_max=0x%x, rx_gain_min=0x%x\n", + dm_digtable->rx_gain_max, dm_digtable->rx_gain_min); + RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, + "TotalFA=%d\n", rtlpriv->falsealm_cnt.cnt_all); + RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, + "CurIGValue=0x%x\n", current_igi); + + rtl8821ae_dm_write_dig(hw, current_igi); + dm_digtable->media_connect_0 = + ((mac->link_state >= MAC80211_LINKED) ? true : false); + dm_digtable->dig_dynamic_min = dig_dynamic_min; +} + +static void rtl8821ae_dm_common_info_self_update(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 cnt = 0; + struct rtl_sta_info *drv_priv; + + rtlpriv->dm.tx_rate = 0xff; + + rtlpriv->dm.one_entry_only = false; + + if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_STATION && + rtlpriv->mac80211.link_state >= MAC80211_LINKED) { + rtlpriv->dm.one_entry_only = true; + return; + } + + if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP || + rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC || + rtlpriv->mac80211.opmode == NL80211_IFTYPE_MESH_POINT) { + spin_lock_bh(&rtlpriv->locks.entry_list_lock); + list_for_each_entry(drv_priv, &rtlpriv->entry_list, list) + cnt++; + spin_unlock_bh(&rtlpriv->locks.entry_list_lock); + + if (cnt == 1) + rtlpriv->dm.one_entry_only = true; + } +} + +static void rtl8821ae_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct false_alarm_statistics *falsealm_cnt = &rtlpriv->falsealm_cnt; + u32 cck_enable = 0; + + /*read OFDM FA counter*/ + falsealm_cnt->cnt_ofdm_fail = + rtl_get_bbreg(hw, ODM_REG_OFDM_FA_11AC, BMASKLWORD); + falsealm_cnt->cnt_cck_fail = + rtl_get_bbreg(hw, ODM_REG_CCK_FA_11AC, BMASKLWORD); + + cck_enable = rtl_get_bbreg(hw, ODM_REG_BB_RX_PATH_11AC, BIT(28)); + if (cck_enable) /*if(pDM_Odm->pBandType == ODM_BAND_2_4G)*/ + falsealm_cnt->cnt_all = falsealm_cnt->cnt_ofdm_fail + + falsealm_cnt->cnt_cck_fail; + else + falsealm_cnt->cnt_all = falsealm_cnt->cnt_ofdm_fail; + + /*reset OFDM FA coutner*/ + rtl_set_bbreg(hw, ODM_REG_OFDM_FA_RST_11AC, BIT(17), 1); + rtl_set_bbreg(hw, ODM_REG_OFDM_FA_RST_11AC, BIT(17), 0); + /* reset CCK FA counter*/ + rtl_set_bbreg(hw, ODM_REG_CCK_FA_RST_11AC, BIT(15), 0); + rtl_set_bbreg(hw, ODM_REG_CCK_FA_RST_11AC, BIT(15), 1); + + RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "Cnt_Cck_fail=%d\n", + falsealm_cnt->cnt_cck_fail); + RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "cnt_ofdm_fail=%d\n", + falsealm_cnt->cnt_ofdm_fail); + RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "Total False Alarm=%d\n", + falsealm_cnt->cnt_all); +} + +static void rtl8812ae_dm_check_txpower_tracking_thermalmeter( + struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + static u8 tm_trigger; + + if (!tm_trigger) { + rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER_88E, + BIT(17) | BIT(16), 0x03); + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "Trigger 8812 Thermal Meter!!\n"); + tm_trigger = 1; + return; + } + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "Schedule TxPowerTracking direct call!!\n"); + rtl8812ae_dm_txpower_tracking_callback_thermalmeter(hw); + tm_trigger = 0; +} + +static void rtl8821ae_dm_iq_calibrate(struct ieee80211_hw *hw) +{ + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + + if (mac->link_state >= MAC80211_LINKED) { + if (rtldm->linked_interval < 3) + rtldm->linked_interval++; + + if (rtldm->linked_interval == 2) { + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + rtl8812ae_phy_iq_calibrate(hw, false); + else + rtl8821ae_phy_iq_calibrate(hw, false); + } + } else { + rtldm->linked_interval = 0; + } +} + +static void rtl8812ae_get_delta_swing_table(struct ieee80211_hw *hw, + u8 **up_a, u8 **down_a, + u8 **up_b, u8 **down_b) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); + u8 channel = rtlphy->current_channel; + u8 rate = rtldm->tx_rate; + + if (1 <= channel && channel <= 14) { + if (RTL8821AE_RX_HAL_IS_CCK_RATE(rate)) { + *up_a = rtl8812ae_delta_swing_table_idx_24gccka_p; + *down_a = rtl8812ae_delta_swing_table_idx_24gccka_n; + *up_b = rtl8812ae_delta_swing_table_idx_24gcckb_p; + *down_b = rtl8812ae_delta_swing_table_idx_24gcckb_n; + } else { + *up_a = rtl8812ae_delta_swing_table_idx_24ga_p; + *down_a = rtl8812ae_delta_swing_table_idx_24ga_n; + *up_b = rtl8812ae_delta_swing_table_idx_24gb_p; + *down_b = rtl8812ae_delta_swing_table_idx_24gb_n; + } + } else if (36 <= channel && channel <= 64) { + *up_a = rtl8812ae_delta_swing_table_idx_5ga_p[0]; + *down_a = rtl8812ae_delta_swing_table_idx_5ga_n[0]; + *up_b = rtl8812ae_delta_swing_table_idx_5gb_p[0]; + *down_b = rtl8812ae_delta_swing_table_idx_5gb_n[0]; + } else if (100 <= channel && channel <= 140) { + *up_a = rtl8812ae_delta_swing_table_idx_5ga_p[1]; + *down_a = rtl8812ae_delta_swing_table_idx_5ga_n[1]; + *up_b = rtl8812ae_delta_swing_table_idx_5gb_p[1]; + *down_b = rtl8812ae_delta_swing_table_idx_5gb_n[1]; + } else if (149 <= channel && channel <= 173) { + *up_a = rtl8812ae_delta_swing_table_idx_5ga_p[2]; + *down_a = rtl8812ae_delta_swing_table_idx_5ga_n[2]; + *up_b = rtl8812ae_delta_swing_table_idx_5gb_p[2]; + *down_b = rtl8812ae_delta_swing_table_idx_5gb_n[2]; + } else { + *up_a = (u8 *)rtl8818e_delta_swing_table_idx_24gb_p; + *down_a = (u8 *)rtl8818e_delta_swing_table_idx_24gb_n; + *up_b = (u8 *)rtl8818e_delta_swing_table_idx_24gb_p; + *down_b = (u8 *)rtl8818e_delta_swing_table_idx_24gb_n; + } +} + +void rtl8821ae_dm_update_init_rate(struct ieee80211_hw *hw, u8 rate) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 p = 0; + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "Get C2H Command! Rate=0x%x\n", rate); + + rtldm->tx_rate = rate; + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) { + rtl8821ae_dm_txpwr_track_set_pwr(hw, MIX_MODE, RF90_PATH_A, 0); + } else { + for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++) + rtl8812ae_dm_txpwr_track_set_pwr(hw, MIX_MODE, p, 0); + } +} + +u8 rtl8821ae_hw_rate_to_mrate(struct ieee80211_hw *hw, u8 rate) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 ret_rate = MGN_1M; + + switch (rate) { + case DESC_RATE1M: + ret_rate = MGN_1M; + break; + case DESC_RATE2M: + ret_rate = MGN_2M; + break; + case DESC_RATE5_5M: + ret_rate = MGN_5_5M; + break; + case DESC_RATE11M: + ret_rate = MGN_11M; + break; + case DESC_RATE6M: + ret_rate = MGN_6M; + break; + case DESC_RATE9M: + ret_rate = MGN_9M; + break; + case DESC_RATE12M: + ret_rate = MGN_12M; + break; + case DESC_RATE18M: + ret_rate = MGN_18M; + break; + case DESC_RATE24M: + ret_rate = MGN_24M; + break; + case DESC_RATE36M: + ret_rate = MGN_36M; + break; + case DESC_RATE48M: + ret_rate = MGN_48M; + break; + case DESC_RATE54M: + ret_rate = MGN_54M; + break; + case DESC_RATEMCS0: + ret_rate = MGN_MCS0; + break; + case DESC_RATEMCS1: + ret_rate = MGN_MCS1; + break; + case DESC_RATEMCS2: + ret_rate = MGN_MCS2; + break; + case DESC_RATEMCS3: + ret_rate = MGN_MCS3; + break; + case DESC_RATEMCS4: + ret_rate = MGN_MCS4; + break; + case DESC_RATEMCS5: + ret_rate = MGN_MCS5; + break; + case DESC_RATEMCS6: + ret_rate = MGN_MCS6; + break; + case DESC_RATEMCS7: + ret_rate = MGN_MCS7; + break; + case DESC_RATEMCS8: + ret_rate = MGN_MCS8; + break; + case DESC_RATEMCS9: + ret_rate = MGN_MCS9; + break; + case DESC_RATEMCS10: + ret_rate = MGN_MCS10; + break; + case DESC_RATEMCS11: + ret_rate = MGN_MCS11; + break; + case DESC_RATEMCS12: + ret_rate = MGN_MCS12; + break; + case DESC_RATEMCS13: + ret_rate = MGN_MCS13; + break; + case DESC_RATEMCS14: + ret_rate = MGN_MCS14; + break; + case DESC_RATEMCS15: + ret_rate = MGN_MCS15; + break; + case DESC_RATEVHT1SS_MCS0: + ret_rate = MGN_VHT1SS_MCS0; + break; + case DESC_RATEVHT1SS_MCS1: + ret_rate = MGN_VHT1SS_MCS1; + break; + case DESC_RATEVHT1SS_MCS2: + ret_rate = MGN_VHT1SS_MCS2; + break; + case DESC_RATEVHT1SS_MCS3: + ret_rate = MGN_VHT1SS_MCS3; + break; + case DESC_RATEVHT1SS_MCS4: + ret_rate = MGN_VHT1SS_MCS4; + break; + case DESC_RATEVHT1SS_MCS5: + ret_rate = MGN_VHT1SS_MCS5; + break; + case DESC_RATEVHT1SS_MCS6: + ret_rate = MGN_VHT1SS_MCS6; + break; + case DESC_RATEVHT1SS_MCS7: + ret_rate = MGN_VHT1SS_MCS7; + break; + case DESC_RATEVHT1SS_MCS8: + ret_rate = MGN_VHT1SS_MCS8; + break; + case DESC_RATEVHT1SS_MCS9: + ret_rate = MGN_VHT1SS_MCS9; + break; + case DESC_RATEVHT2SS_MCS0: + ret_rate = MGN_VHT2SS_MCS0; + break; + case DESC_RATEVHT2SS_MCS1: + ret_rate = MGN_VHT2SS_MCS1; + break; + case DESC_RATEVHT2SS_MCS2: + ret_rate = MGN_VHT2SS_MCS2; + break; + case DESC_RATEVHT2SS_MCS3: + ret_rate = MGN_VHT2SS_MCS3; + break; + case DESC_RATEVHT2SS_MCS4: + ret_rate = MGN_VHT2SS_MCS4; + break; + case DESC_RATEVHT2SS_MCS5: + ret_rate = MGN_VHT2SS_MCS5; + break; + case DESC_RATEVHT2SS_MCS6: + ret_rate = MGN_VHT2SS_MCS6; + break; + case DESC_RATEVHT2SS_MCS7: + ret_rate = MGN_VHT2SS_MCS7; + break; + case DESC_RATEVHT2SS_MCS8: + ret_rate = MGN_VHT2SS_MCS8; + break; + case DESC_RATEVHT2SS_MCS9: + ret_rate = MGN_VHT2SS_MCS9; + break; + default: + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "HwRateToMRate8812(): Non supported Rate [%x]!!!\n", + rate); + break; + } + return ret_rate; +} + +/*----------------------------------------------------------------------------- + * Function: odm_TxPwrTrackSetPwr88E() + * + * Overview: 88E change all channel tx power accordign to flag. + * OFDM & CCK are all different. + * + * Input: NONE + * + * Output: NONE + * + * Return: NONE + * + * Revised History: + * When Who Remark + * 04/23/2012 MHC Create Version 0. + * + *--------------------------------------------------------------------------- + */ +void rtl8812ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw, + enum pwr_track_control_method method, + u8 rf_path, u8 channel_mapped_index) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u32 final_swing_idx[2]; + u8 pwr_tracking_limit = 26; /*+1.0dB*/ + u8 tx_rate = 0xFF; + char final_ofdm_swing_index = 0; + + if (rtldm->tx_rate != 0xFF) + tx_rate = + rtl8821ae_hw_rate_to_mrate(hw, rtldm->tx_rate); + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "===>rtl8812ae_dm_txpwr_track_set_pwr\n"); + /*20130429 Mimic Modify High Rate BBSwing Limit.*/ + if (tx_rate != 0xFF) { + /*CCK*/ + if ((tx_rate >= MGN_1M) && (tx_rate <= MGN_11M)) + pwr_tracking_limit = 32; /*+4dB*/ + /*OFDM*/ + else if ((tx_rate >= MGN_6M) && (tx_rate <= MGN_48M)) + pwr_tracking_limit = 30; /*+3dB*/ + else if (tx_rate == MGN_54M) + pwr_tracking_limit = 28; /*+2dB*/ + /*HT*/ + /*QPSK/BPSK*/ + else if ((tx_rate >= MGN_MCS0) && (tx_rate <= MGN_MCS2)) + pwr_tracking_limit = 34; /*+5dB*/ + /*16QAM*/ + else if ((tx_rate >= MGN_MCS3) && (tx_rate <= MGN_MCS4)) + pwr_tracking_limit = 30; /*+3dB*/ + /*64QAM*/ + else if ((tx_rate >= MGN_MCS5) && (tx_rate <= MGN_MCS7)) + pwr_tracking_limit = 28; /*+2dB*/ + /*QPSK/BPSK*/ + else if ((tx_rate >= MGN_MCS8) && (tx_rate <= MGN_MCS10)) + pwr_tracking_limit = 34; /*+5dB*/ + /*16QAM*/ + else if ((tx_rate >= MGN_MCS11) && (tx_rate <= MGN_MCS12)) + pwr_tracking_limit = 30; /*+3dB*/ + /*64QAM*/ + else if ((tx_rate >= MGN_MCS13) && (tx_rate <= MGN_MCS15)) + pwr_tracking_limit = 28; /*+2dB*/ + + /*2 VHT*/ + /*QPSK/BPSK*/ + else if ((tx_rate >= MGN_VHT1SS_MCS0) && + (tx_rate <= MGN_VHT1SS_MCS2)) + pwr_tracking_limit = 34; /*+5dB*/ + /*16QAM*/ + else if ((tx_rate >= MGN_VHT1SS_MCS3) && + (tx_rate <= MGN_VHT1SS_MCS4)) + pwr_tracking_limit = 30; /*+3dB*/ + /*64QAM*/ + else if ((tx_rate >= MGN_VHT1SS_MCS5) && + (tx_rate <= MGN_VHT1SS_MCS6)) + pwr_tracking_limit = 28; /*+2dB*/ + else if (tx_rate == MGN_VHT1SS_MCS7) /*64QAM*/ + pwr_tracking_limit = 26; /*+1dB*/ + else if (tx_rate == MGN_VHT1SS_MCS8) /*256QAM*/ + pwr_tracking_limit = 24; /*+0dB*/ + else if (tx_rate == MGN_VHT1SS_MCS9) /*256QAM*/ + pwr_tracking_limit = 22; /*-1dB*/ + /*QPSK/BPSK*/ + else if ((tx_rate >= MGN_VHT2SS_MCS0) && + (tx_rate <= MGN_VHT2SS_MCS2)) + pwr_tracking_limit = 34; /*+5dB*/ + /*16QAM*/ + else if ((tx_rate >= MGN_VHT2SS_MCS3) && + (tx_rate <= MGN_VHT2SS_MCS4)) + pwr_tracking_limit = 30; /*+3dB*/ + /*64QAM*/ + else if ((tx_rate >= MGN_VHT2SS_MCS5) && + (tx_rate <= MGN_VHT2SS_MCS6)) + pwr_tracking_limit = 28; /*+2dB*/ + else if (tx_rate == MGN_VHT2SS_MCS7) /*64QAM*/ + pwr_tracking_limit = 26; /*+1dB*/ + else if (tx_rate == MGN_VHT2SS_MCS8) /*256QAM*/ + pwr_tracking_limit = 24; /*+0dB*/ + else if (tx_rate == MGN_VHT2SS_MCS9) /*256QAM*/ + pwr_tracking_limit = 22; /*-1dB*/ + else + pwr_tracking_limit = 24; + } + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "TxRate=0x%x, PwrTrackingLimit=%d\n", + tx_rate, pwr_tracking_limit); + + if (method == BBSWING) { + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "===>rtl8812ae_dm_txpwr_track_set_pwr\n"); + + if (rf_path == RF90_PATH_A) { + u32 tmp; + + final_swing_idx[RF90_PATH_A] = + (rtldm->ofdm_index[RF90_PATH_A] > + pwr_tracking_limit) ? + pwr_tracking_limit : + rtldm->ofdm_index[RF90_PATH_A]; + tmp = final_swing_idx[RF90_PATH_A]; + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "pDM_Odm->RFCalibrateInfo.OFDM_index[ODM_RF_PATH_A]=%d,pDM_Odm->RealBbSwingIdx[ODM_RF_PATH_A]=%d\n", + rtldm->ofdm_index[RF90_PATH_A], + final_swing_idx[RF90_PATH_A]); + + rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000, + txscaling_tbl[tmp]); + } else { + u32 tmp; + + final_swing_idx[RF90_PATH_B] = + rtldm->ofdm_index[RF90_PATH_B] > + pwr_tracking_limit ? + pwr_tracking_limit : + rtldm->ofdm_index[RF90_PATH_B]; + tmp = final_swing_idx[RF90_PATH_B]; + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "pDM_Odm->RFCalibrateInfo.OFDM_index[ODM_RF_PATH_B]=%d, pDM_Odm->RealBbSwingIdx[ODM_RF_PATH_B]=%d\n", + rtldm->ofdm_index[RF90_PATH_B], + final_swing_idx[RF90_PATH_B]); + + rtl_set_bbreg(hw, RB_TXSCALE, 0xFFE00000, + txscaling_tbl[tmp]); + } + } else if (method == MIX_MODE) { + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "pDM_Odm->DefaultOfdmIndex=%d, pDM_Odm->Aboslute_OFDMSwingIdx[RFPath]=%d, RF_Path = %d\n", + rtldm->default_ofdm_index, + rtldm->absolute_ofdm_swing_idx[rf_path], + rf_path); + + final_ofdm_swing_index = rtldm->default_ofdm_index + + rtldm->absolute_ofdm_swing_idx[rf_path]; + + if (rf_path == RF90_PATH_A) { + /*BBSwing higher then Limit*/ + if (final_ofdm_swing_index > pwr_tracking_limit) { + rtldm->remnant_cck_idx = + final_ofdm_swing_index - + pwr_tracking_limit; + /* CCK Follow the same compensation value + * as Path A + */ + rtldm->remnant_ofdm_swing_idx[rf_path] = + final_ofdm_swing_index - + pwr_tracking_limit; + + rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000, + txscaling_tbl[pwr_tracking_limit]); + + rtldm->modify_txagc_flag_path_a = true; + + /*Set TxAGC Page C{};*/ + rtl8821ae_phy_set_txpower_level_by_path(hw, + rtlphy->current_channel, + RF90_PATH_A); + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "******Path_A Over BBSwing Limit ,PwrTrackingLimit = %d ,Remnant TxAGC Value = %d\n", + pwr_tracking_limit, + rtldm->remnant_ofdm_swing_idx[rf_path]); + } else if (final_ofdm_swing_index < 0) { + rtldm->remnant_cck_idx = final_ofdm_swing_index; + /* CCK Follow the same compensate value as Path A*/ + rtldm->remnant_ofdm_swing_idx[rf_path] = + final_ofdm_swing_index; + + rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000, + txscaling_tbl[0]); + + rtldm->modify_txagc_flag_path_a = true; + + /*Set TxAGC Page C{};*/ + rtl8821ae_phy_set_txpower_level_by_path(hw, + rtlphy->current_channel, RF90_PATH_A); + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "******Path_A Lower then BBSwing lower bound 0 , Remnant TxAGC Value = %d\n", + rtldm->remnant_ofdm_swing_idx[rf_path]); + } else { + rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000, + txscaling_tbl[(u8)final_ofdm_swing_index]); + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "******Path_A Compensate with BBSwing, Final_OFDM_Swing_Index = %d\n", + final_ofdm_swing_index); + /*If TxAGC has changed, reset TxAGC again*/ + if (rtldm->modify_txagc_flag_path_a) { + rtldm->remnant_cck_idx = 0; + rtldm->remnant_ofdm_swing_idx[rf_path] = 0; + + /*Set TxAGC Page C{};*/ + rtl8821ae_phy_set_txpower_level_by_path(hw, + rtlphy->current_channel, RF90_PATH_A); + rtldm->modify_txagc_flag_path_a = false; + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, + DBG_LOUD, + "******Path_A pDM_Odm->Modify_TxAGC_Flag = FALSE\n"); + } + } + } + /*BBSwing higher then Limit*/ + if (rf_path == RF90_PATH_B) { + if (final_ofdm_swing_index > pwr_tracking_limit) { + rtldm->remnant_ofdm_swing_idx[rf_path] = + final_ofdm_swing_index - + pwr_tracking_limit; + + rtl_set_bbreg(hw, RB_TXSCALE, + 0xFFE00000, + txscaling_tbl[pwr_tracking_limit]); + + rtldm->modify_txagc_flag_path_b = true; + + /*Set TxAGC Page E{};*/ + rtl8821ae_phy_set_txpower_level_by_path(hw, + rtlphy->current_channel, RF90_PATH_B); + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "******Path_B Over BBSwing Limit , PwrTrackingLimit = %d , Remnant TxAGC Value = %d\n", + pwr_tracking_limit, + rtldm->remnant_ofdm_swing_idx[rf_path]); + } else if (final_ofdm_swing_index < 0) { + rtldm->remnant_ofdm_swing_idx[rf_path] = + final_ofdm_swing_index; + + rtl_set_bbreg(hw, RB_TXSCALE, 0xFFE00000, + txscaling_tbl[0]); + + rtldm->modify_txagc_flag_path_b = true; + + /*Set TxAGC Page E{};*/ + rtl8821ae_phy_set_txpower_level_by_path(hw, + rtlphy->current_channel, RF90_PATH_B); + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "******Path_B Lower then BBSwing lower bound 0 , Remnant TxAGC Value = %d\n", + rtldm->remnant_ofdm_swing_idx[rf_path]); + } else { + rtl_set_bbreg(hw, RB_TXSCALE, 0xFFE00000, + txscaling_tbl[(u8)final_ofdm_swing_index]); + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "******Path_B Compensate with BBSwing ,Final_OFDM_Swing_Index = %d\n", + final_ofdm_swing_index); + /*If TxAGC has changed, reset TxAGC again*/ + if (rtldm->modify_txagc_flag_path_b) { + rtldm->remnant_ofdm_swing_idx[rf_path] = 0; + + /*Set TxAGC Page E{};*/ + rtl8821ae_phy_set_txpower_level_by_path(hw, + rtlphy->current_channel, RF90_PATH_B); + + rtldm->modify_txagc_flag_path_b = + false; + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "******Path_B pDM_Odm->Modify_TxAGC_Flag = FALSE\n"); + } + } + } + } else { + return; + } +} + +void rtl8812ae_dm_txpower_tracking_callback_thermalmeter( + struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 thermal_value = 0, delta, delta_lck, delta_iqk, p = 0, i = 0; + u8 thermal_value_avg_count = 0; + u32 thermal_value_avg = 0; + /* OFDM BB Swing should be less than +3.0dB, */ + u8 ofdm_min_index = 6; + /* GetRightChnlPlaceforIQK(pHalData->CurrentChannel)*/ + u8 index_for_channel = 0; + /* 1. The following TWO tables decide + * the final index of OFDM/CCK swing table. + */ + u8 *delta_swing_table_idx_tup_a; + u8 *delta_swing_table_idx_tdown_a; + u8 *delta_swing_table_idx_tup_b; + u8 *delta_swing_table_idx_tdown_b; + + /*2. Initilization ( 7 steps in total )*/ + rtl8812ae_get_delta_swing_table(hw, + (u8 **)&delta_swing_table_idx_tup_a, + (u8 **)&delta_swing_table_idx_tdown_a, + (u8 **)&delta_swing_table_idx_tup_b, + (u8 **)&delta_swing_table_idx_tdown_b); + + rtldm->txpower_trackinginit = true; + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "pDM_Odm->BbSwingIdxCckBase: %d, pDM_Odm->BbSwingIdxOfdmBase[A]:%d, pDM_Odm->DefaultOfdmIndex: %d\n", + rtldm->swing_idx_cck_base, + rtldm->swing_idx_ofdm_base[RF90_PATH_A], + rtldm->default_ofdm_index); + + thermal_value = (u8)rtl_get_rfreg(hw, RF90_PATH_A, + /*0x42: RF Reg[15:10] 88E*/ + RF_T_METER_8812A, 0xfc00); + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "Thermal Meter = 0x%X, EFUSE Thermal Base = 0x%X\n", + thermal_value, rtlefuse->eeprom_thermalmeter); + if (!rtldm->txpower_track_control || + rtlefuse->eeprom_thermalmeter == 0 || + rtlefuse->eeprom_thermalmeter == 0xFF) + return; + + /* 3. Initialize ThermalValues of RFCalibrateInfo*/ + + if (rtlhal->reloadtxpowerindex) + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "reload ofdm index for band switch\n"); + + /*4. Calculate average thermal meter*/ + rtldm->thermalvalue_avg[rtldm->thermalvalue_avg_index] = thermal_value; + rtldm->thermalvalue_avg_index++; + if (rtldm->thermalvalue_avg_index == AVG_THERMAL_NUM_8812A) + /*Average times = c.AverageThermalNum*/ + rtldm->thermalvalue_avg_index = 0; + + for (i = 0; i < AVG_THERMAL_NUM_8812A; i++) { + if (rtldm->thermalvalue_avg[i]) { + thermal_value_avg += rtldm->thermalvalue_avg[i]; + thermal_value_avg_count++; + } + } + /*Calculate Average ThermalValue after average enough times*/ + if (thermal_value_avg_count) { + thermal_value = (u8)(thermal_value_avg / + thermal_value_avg_count); + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "AVG Thermal Meter = 0x%X, EFUSE Thermal Base = 0x%X\n", + thermal_value, rtlefuse->eeprom_thermalmeter); + } + + /*5. Calculate delta, delta_LCK, delta_IQK. + *"delta" here is used to determine whether + *thermal value changes or not. + */ + delta = (thermal_value > rtldm->thermalvalue) ? + (thermal_value - rtldm->thermalvalue) : + (rtldm->thermalvalue - thermal_value); + delta_lck = (thermal_value > rtldm->thermalvalue_lck) ? + (thermal_value - rtldm->thermalvalue_lck) : + (rtldm->thermalvalue_lck - thermal_value); + delta_iqk = (thermal_value > rtldm->thermalvalue_iqk) ? + (thermal_value - rtldm->thermalvalue_iqk) : + (rtldm->thermalvalue_iqk - thermal_value); + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "(delta, delta_LCK, delta_IQK) = (%d, %d, %d)\n", + delta, delta_lck, delta_iqk); + + /* 6. If necessary, do LCK. + * Delta temperature is equal to or larger than 20 centigrade. + */ + if (delta_lck >= IQK_THRESHOLD) { + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "delta_LCK(%d) >= Threshold_IQK(%d)\n", + delta_lck, IQK_THRESHOLD); + rtldm->thermalvalue_lck = thermal_value; + rtl8821ae_phy_lc_calibrate(hw); + } + + /*7. If necessary, move the index of swing table to adjust Tx power.*/ + + if (delta > 0 && rtldm->txpower_track_control) { + /* "delta" here is used to record the + * absolute value of differrence. + */ + delta = thermal_value > rtlefuse->eeprom_thermalmeter ? + (thermal_value - rtlefuse->eeprom_thermalmeter) : + (rtlefuse->eeprom_thermalmeter - thermal_value); + + if (delta >= TXPWR_TRACK_TABLE_SIZE) + delta = TXPWR_TRACK_TABLE_SIZE - 1; + + /*7.1 The Final Power Index = BaseIndex + PowerIndexOffset*/ + + if (thermal_value > rtlefuse->eeprom_thermalmeter) { + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "delta_swing_table_idx_tup_a[%d] = %d\n", + delta, delta_swing_table_idx_tup_a[delta]); + rtldm->delta_power_index_last[RF90_PATH_A] = + rtldm->delta_power_index[RF90_PATH_A]; + rtldm->delta_power_index[RF90_PATH_A] = + delta_swing_table_idx_tup_a[delta]; + + rtldm->absolute_ofdm_swing_idx[RF90_PATH_A] = + delta_swing_table_idx_tup_a[delta]; + /*Record delta swing for mix mode power tracking*/ + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "******Temp is higher and pDM_Odm->Aboslute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n", + rtldm->absolute_ofdm_swing_idx[RF90_PATH_A]); + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "delta_swing_table_idx_tup_b[%d] = %d\n", + delta, delta_swing_table_idx_tup_b[delta]); + rtldm->delta_power_index_last[RF90_PATH_B] = + rtldm->delta_power_index[RF90_PATH_B]; + rtldm->delta_power_index[RF90_PATH_B] = + delta_swing_table_idx_tup_b[delta]; + + rtldm->absolute_ofdm_swing_idx[RF90_PATH_B] = + delta_swing_table_idx_tup_b[delta]; + /*Record delta swing for mix mode power tracking*/ + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "******Temp is higher and pDM_Odm->Aboslute_OFDMSwingIdx[ODM_RF_PATH_B] = %d\n", + rtldm->absolute_ofdm_swing_idx[RF90_PATH_B]); + } else { + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "delta_swing_table_idx_tdown_a[%d] = %d\n", + delta, delta_swing_table_idx_tdown_a[delta]); + + rtldm->delta_power_index_last[RF90_PATH_A] = + rtldm->delta_power_index[RF90_PATH_A]; + rtldm->delta_power_index[RF90_PATH_A] = + -1 * delta_swing_table_idx_tdown_a[delta]; + + rtldm->absolute_ofdm_swing_idx[RF90_PATH_A] = + -1 * delta_swing_table_idx_tdown_a[delta]; + /* Record delta swing for mix mode power tracking*/ + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "******Temp is lower and pDM_Odm->Aboslute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n", + rtldm->absolute_ofdm_swing_idx[RF90_PATH_A]); + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "deltaSwingTableIdx_TDOWN_B[%d] = %d\n", + delta, delta_swing_table_idx_tdown_b[delta]); + + rtldm->delta_power_index_last[RF90_PATH_B] = + rtldm->delta_power_index[RF90_PATH_B]; + rtldm->delta_power_index[RF90_PATH_B] = + -1 * delta_swing_table_idx_tdown_b[delta]; + + rtldm->absolute_ofdm_swing_idx[RF90_PATH_B] = + -1 * delta_swing_table_idx_tdown_b[delta]; + /*Record delta swing for mix mode power tracking*/ + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "******Temp is lower and pDM_Odm->Aboslute_OFDMSwingIdx[ODM_RF_PATH_B] = %d\n", + rtldm->absolute_ofdm_swing_idx[RF90_PATH_B]); + } + + for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++) { + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "============================= [Path-%c]Calculating PowerIndexOffset =============================\n", + (p == RF90_PATH_A ? 'A' : 'B')); + + if (rtldm->delta_power_index[p] == + rtldm->delta_power_index_last[p]) + /*If Thermal value changes but lookup + table value still the same*/ + rtldm->power_index_offset[p] = 0; + else + rtldm->power_index_offset[p] = + rtldm->delta_power_index[p] - + rtldm->delta_power_index_last[p]; + /* Power Index Diff between 2 + * times Power Tracking + */ + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "[Path-%c] PowerIndexOffset(%d) =DeltaPowerIndex(%d) -DeltaPowerIndexLast(%d)\n", + (p == RF90_PATH_A ? 'A' : 'B'), + rtldm->power_index_offset[p], + rtldm->delta_power_index[p] , + rtldm->delta_power_index_last[p]); + + rtldm->ofdm_index[p] = + rtldm->swing_idx_ofdm_base[p] + + rtldm->power_index_offset[p]; + rtldm->cck_index = + rtldm->swing_idx_cck_base + + rtldm->power_index_offset[p]; + + rtldm->swing_idx_cck = rtldm->cck_index; + rtldm->swing_idx_ofdm[p] = rtldm->ofdm_index[p]; + + /****Print BB Swing Base and Index Offset */ + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "The 'CCK' final index(%d) = BaseIndex(%d) + PowerIndexOffset(%d)\n", + rtldm->swing_idx_cck, + rtldm->swing_idx_cck_base, + rtldm->power_index_offset[p]); + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "The 'OFDM' final index(%d) = BaseIndex[%c](%d) + PowerIndexOffset(%d)\n", + rtldm->swing_idx_ofdm[p], + (p == RF90_PATH_A ? 'A' : 'B'), + rtldm->swing_idx_ofdm_base[p], + rtldm->power_index_offset[p]); + + /*7.1 Handle boundary conditions of index.*/ + + if (rtldm->ofdm_index[p] > TXSCALE_TABLE_SIZE - 1) + rtldm->ofdm_index[p] = TXSCALE_TABLE_SIZE - 1; + else if (rtldm->ofdm_index[p] < ofdm_min_index) + rtldm->ofdm_index[p] = ofdm_min_index; + } + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "\n\n====================================================================================\n"); + if (rtldm->cck_index > TXSCALE_TABLE_SIZE - 1) + rtldm->cck_index = TXSCALE_TABLE_SIZE - 1; + else if (rtldm->cck_index < 0) + rtldm->cck_index = 0; + } else { + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "The thermal meter is unchanged or TxPowerTracking OFF(%d): ThermalValue: %d , pDM_Odm->RFCalibrateInfo.ThermalValue: %d\n", + rtldm->txpower_track_control, + thermal_value, + rtldm->thermalvalue); + + for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++) + rtldm->power_index_offset[p] = 0; + } + /*Print Swing base & current*/ + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "TxPowerTracking: [CCK] Swing Current Index: %d,Swing Base Index: %d\n", + rtldm->cck_index, rtldm->swing_idx_cck_base); + for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++) { + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "TxPowerTracking: [OFDM] Swing Current Index: %d,Swing Base Index[%c]: %d\n", + rtldm->ofdm_index[p], + (p == RF90_PATH_A ? 'A' : 'B'), + rtldm->swing_idx_ofdm_base[p]); + } + + if ((rtldm->power_index_offset[RF90_PATH_A] != 0 || + rtldm->power_index_offset[RF90_PATH_B] != 0) && + rtldm->txpower_track_control) { + /*7.2 Configure the Swing Table to adjust Tx Power. + *Always TRUE after Tx Power is adjusted by power tracking. + * + *2012/04/23 MH According to Luke's suggestion, + *we can not write BB digital + *to increase TX power. Otherwise, EVM will be bad. + * + *2012/04/25 MH Add for tx power tracking to set + *tx power in tx agc for 88E. + */ + if (thermal_value > rtldm->thermalvalue) { + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "Temperature Increasing(A): delta_pi: %d , delta_t: %d, Now_t: %d,EFUSE_t: %d, Last_t: %d\n", + rtldm->power_index_offset[RF90_PATH_A], + delta, thermal_value, + rtlefuse->eeprom_thermalmeter, + rtldm->thermalvalue); + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "Temperature Increasing(B): delta_pi: %d ,delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n", + rtldm->power_index_offset[RF90_PATH_B], + delta, thermal_value, + rtlefuse->eeprom_thermalmeter, + rtldm->thermalvalue); + } else if (thermal_value < rtldm->thermalvalue) { /*Low temperature*/ + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "Temperature Decreasing(A): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n", + rtldm->power_index_offset[RF90_PATH_A], + delta, thermal_value, + rtlefuse->eeprom_thermalmeter, + rtldm->thermalvalue); + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "Temperature Decreasing(B): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n", + rtldm->power_index_offset[RF90_PATH_B], + delta, thermal_value, + rtlefuse->eeprom_thermalmeter, + rtldm->thermalvalue); + } + + if (thermal_value > rtlefuse->eeprom_thermalmeter) { + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "Temperature(%d) higher than PG value(%d)\n", + thermal_value, rtlefuse->eeprom_thermalmeter); + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "**********Enter POWER Tracking MIX_MODE**********\n"); + for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++) + rtl8812ae_dm_txpwr_track_set_pwr(hw, MIX_MODE, + p, 0); + } else { + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "Temperature(%d) lower than PG value(%d)\n", + thermal_value, rtlefuse->eeprom_thermalmeter); + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "**********Enter POWER Tracking MIX_MODE**********\n"); + for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++) + rtl8812ae_dm_txpwr_track_set_pwr(hw, MIX_MODE, + p, index_for_channel); + } + /*Record last time Power Tracking result as base.*/ + rtldm->swing_idx_cck_base = rtldm->swing_idx_cck; + for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++) + rtldm->swing_idx_ofdm_base[p] = + rtldm->swing_idx_ofdm[p]; + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "pDM_Odm->RFCalibrateInfo.ThermalValue =%d ThermalValue= %d\n", + rtldm->thermalvalue, thermal_value); + /*Record last Power Tracking Thermal Value*/ + rtldm->thermalvalue = thermal_value; + } + /*Delta temperature is equal to or larger than + 20 centigrade (When threshold is 8).*/ + if (delta_iqk >= IQK_THRESHOLD) + rtl8812ae_do_iqk(hw, delta_iqk, thermal_value, 8); + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "<===rtl8812ae_dm_txpower_tracking_callback_thermalmeter\n"); +} + +static void rtl8821ae_get_delta_swing_table(struct ieee80211_hw *hw, u8 **up_a, + u8 **down_a, u8 **up_b, u8 **down_b) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); + u8 channel = rtlphy->current_channel; + u8 rate = rtldm->tx_rate; + + if (1 <= channel && channel <= 14) { + if (RTL8821AE_RX_HAL_IS_CCK_RATE(rate)) { + *up_a = rtl8821ae_delta_swing_table_idx_24gccka_p; + *down_a = rtl8821ae_delta_swing_table_idx_24gccka_n; + *up_b = rtl8821ae_delta_swing_table_idx_24gcckb_p; + *down_b = rtl8821ae_delta_swing_table_idx_24gcckb_n; + } else { + *up_a = rtl8821ae_delta_swing_table_idx_24ga_p; + *down_a = rtl8821ae_delta_swing_table_idx_24ga_n; + *up_b = rtl8821ae_delta_swing_table_idx_24gb_p; + *down_b = rtl8821ae_delta_swing_table_idx_24gb_n; + } + } else if (36 <= channel && channel <= 64) { + *up_a = rtl8821ae_delta_swing_table_idx_5ga_p[0]; + *down_a = rtl8821ae_delta_swing_table_idx_5ga_n[0]; + *up_b = rtl8821ae_delta_swing_table_idx_5gb_p[0]; + *down_b = rtl8821ae_delta_swing_table_idx_5gb_n[0]; + } else if (100 <= channel && channel <= 140) { + *up_a = rtl8821ae_delta_swing_table_idx_5ga_p[1]; + *down_a = rtl8821ae_delta_swing_table_idx_5ga_n[1]; + *up_b = rtl8821ae_delta_swing_table_idx_5gb_p[1]; + *down_b = rtl8821ae_delta_swing_table_idx_5gb_n[1]; + } else if (149 <= channel && channel <= 173) { + *up_a = rtl8821ae_delta_swing_table_idx_5ga_p[2]; + *down_a = rtl8821ae_delta_swing_table_idx_5ga_n[2]; + *up_b = rtl8821ae_delta_swing_table_idx_5gb_p[2]; + *down_b = rtl8821ae_delta_swing_table_idx_5gb_n[2]; + } else { + *up_a = (u8 *)rtl8818e_delta_swing_table_idx_24gb_p; + *down_a = (u8 *)rtl8818e_delta_swing_table_idx_24gb_n; + *up_b = (u8 *)rtl8818e_delta_swing_table_idx_24gb_p; + *down_b = (u8 *)rtl8818e_delta_swing_table_idx_24gb_n; + } + return; +} + +/*----------------------------------------------------------------------------- + * Function: odm_TxPwrTrackSetPwr88E() + * + * Overview: 88E change all channel tx power accordign to flag. + * OFDM & CCK are all different. + * + * Input: NONE + * + * Output: NONE + * + * Return: NONE + * + * Revised History: + * When Who Remark + * 04/23/2012 MHC Create Version 0. + * + *--------------------------------------------------------------------------- + */ +void rtl8821ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw, + enum pwr_track_control_method method, + u8 rf_path, u8 channel_mapped_index) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u32 final_swing_idx[1]; + u8 pwr_tracking_limit = 26; /*+1.0dB*/ + u8 tx_rate = 0xFF; + char final_ofdm_swing_index = 0; + + if (rtldm->tx_rate != 0xFF) + tx_rate = rtl8821ae_hw_rate_to_mrate(hw, rtldm->tx_rate); + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "===>rtl8812ae_dm_txpwr_track_set_pwr\n"); + + if (tx_rate != 0xFF) { /* Mimic Modify High Rate BBSwing Limit.*/ + /*CCK*/ + if ((tx_rate >= MGN_1M) && (tx_rate <= MGN_11M)) + pwr_tracking_limit = 32; /*+4dB*/ + /*OFDM*/ + else if ((tx_rate >= MGN_6M) && (tx_rate <= MGN_48M)) + pwr_tracking_limit = 30; /*+3dB*/ + else if (tx_rate == MGN_54M) + pwr_tracking_limit = 28; /*+2dB*/ + /*HT*/ + /*QPSK/BPSK*/ + else if ((tx_rate >= MGN_MCS0) && (tx_rate <= MGN_MCS2)) + pwr_tracking_limit = 34; /*+5dB*/ + /*16QAM*/ + else if ((tx_rate >= MGN_MCS3) && (tx_rate <= MGN_MCS4)) + pwr_tracking_limit = 30; /*+3dB*/ + /*64QAM*/ + else if ((tx_rate >= MGN_MCS5) && (tx_rate <= MGN_MCS7)) + pwr_tracking_limit = 28; /*+2dB*/ + /*2 VHT*/ + /*QPSK/BPSK*/ + else if ((tx_rate >= MGN_VHT1SS_MCS0) && + (tx_rate <= MGN_VHT1SS_MCS2)) + pwr_tracking_limit = 34; /*+5dB*/ + /*16QAM*/ + else if ((tx_rate >= MGN_VHT1SS_MCS3) && + (tx_rate <= MGN_VHT1SS_MCS4)) + pwr_tracking_limit = 30; /*+3dB*/ + /*64QAM*/ + else if ((tx_rate >= MGN_VHT1SS_MCS5) && + (tx_rate <= MGN_VHT1SS_MCS6)) + pwr_tracking_limit = 28; /*+2dB*/ + else if (tx_rate == MGN_VHT1SS_MCS7) /*64QAM*/ + pwr_tracking_limit = 26; /*+1dB*/ + else if (tx_rate == MGN_VHT1SS_MCS8) /*256QAM*/ + pwr_tracking_limit = 24; /*+0dB*/ + else if (tx_rate == MGN_VHT1SS_MCS9) /*256QAM*/ + pwr_tracking_limit = 22; /*-1dB*/ + else + pwr_tracking_limit = 24; + } + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "TxRate=0x%x, PwrTrackingLimit=%d\n", + tx_rate, pwr_tracking_limit); + + if (method == BBSWING) { + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "===>rtl8812ae_dm_txpwr_track_set_pwr\n"); + if (rf_path == RF90_PATH_A) { + final_swing_idx[RF90_PATH_A] = + (rtldm->ofdm_index[RF90_PATH_A] > + pwr_tracking_limit) ? + pwr_tracking_limit : + rtldm->ofdm_index[RF90_PATH_A]; + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "pDM_Odm->RFCalibrateInfo.OFDM_index[ODM_RF_PATH_A]=%d,pDM_Odm->RealBbSwingIdx[ODM_RF_PATH_A]=%d\n", + rtldm->ofdm_index[RF90_PATH_A], + final_swing_idx[RF90_PATH_A]); + + rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000, + txscaling_tbl[final_swing_idx[RF90_PATH_A]]); + } + } else if (method == MIX_MODE) { + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "pDM_Odm->DefaultOfdmIndex=%d,pDM_Odm->Aboslute_OFDMSwingIdx[RFPath]=%d, RF_Path = %d\n", + rtldm->default_ofdm_index, + rtldm->absolute_ofdm_swing_idx[rf_path], + rf_path); + + final_ofdm_swing_index = + rtldm->default_ofdm_index + + rtldm->absolute_ofdm_swing_idx[rf_path]; + /*BBSwing higher then Limit*/ + if (rf_path == RF90_PATH_A) { + if (final_ofdm_swing_index > pwr_tracking_limit) { + rtldm->remnant_cck_idx = + final_ofdm_swing_index - + pwr_tracking_limit; + /* CCK Follow the same compensate value as Path A*/ + rtldm->remnant_ofdm_swing_idx[rf_path] = + final_ofdm_swing_index - + pwr_tracking_limit; + + rtl_set_bbreg(hw, RA_TXSCALE, + 0xFFE00000, + txscaling_tbl[pwr_tracking_limit]); + + rtldm->modify_txagc_flag_path_a = true; + + /*Set TxAGC Page C{};*/ + rtl8821ae_phy_set_txpower_level_by_path(hw, + rtlphy->current_channel, + RF90_PATH_A); + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + " ******Path_A Over BBSwing Limit , PwrTrackingLimit = %d , Remnant TxAGC Value = %d\n", + pwr_tracking_limit, + rtldm->remnant_ofdm_swing_idx[rf_path]); + } else if (final_ofdm_swing_index < 0) { + rtldm->remnant_cck_idx = final_ofdm_swing_index; + /* CCK Follow the same compensate value as Path A*/ + rtldm->remnant_ofdm_swing_idx[rf_path] = + final_ofdm_swing_index; + + rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000, + txscaling_tbl[0]); + + rtldm->modify_txagc_flag_path_a = true; + + /*Set TxAGC Page C{};*/ + rtl8821ae_phy_set_txpower_level_by_path(hw, + rtlphy->current_channel, RF90_PATH_A); + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "******Path_A Lower then BBSwing lower bound 0 , Remnant TxAGC Value = %d\n", + rtldm->remnant_ofdm_swing_idx[rf_path]); + } else { + rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000, + txscaling_tbl[(u8)final_ofdm_swing_index]); + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "******Path_A Compensate with BBSwing ,Final_OFDM_Swing_Index = %d\n", + final_ofdm_swing_index); + /*If TxAGC has changed, reset TxAGC again*/ + if (rtldm->modify_txagc_flag_path_a) { + rtldm->remnant_cck_idx = 0; + rtldm->remnant_ofdm_swing_idx[rf_path] = 0; + + /*Set TxAGC Page C{};*/ + rtl8821ae_phy_set_txpower_level_by_path(hw, + rtlphy->current_channel, RF90_PATH_A); + + rtldm->modify_txagc_flag_path_a = false; + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, + DBG_LOUD, + "******Path_A pDM_Odm->Modify_TxAGC_Flag= FALSE\n"); + } + } + } + } else { + return; + } +} + +void rtl8821ae_dm_txpower_tracking_callback_thermalmeter( + struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_phy *rtlphy = &rtlpriv->phy; + + u8 thermal_value = 0, delta, delta_lck, delta_iqk, p = 0, i = 0; + u8 thermal_value_avg_count = 0; + u32 thermal_value_avg = 0; + + u8 ofdm_min_index = 6; /*OFDM BB Swing should be less than +3.0dB */ + /* GetRightChnlPlaceforIQK(pHalData->CurrentChannel)*/ + u8 index_for_channel = 0; + + /* 1. The following TWO tables decide the final + * index of OFDM/CCK swing table. + */ + u8 *delta_swing_table_idx_tup_a; + u8 *delta_swing_table_idx_tdown_a; + u8 *delta_swing_table_idx_tup_b; + u8 *delta_swing_table_idx_tdown_b; + + /*2. Initilization ( 7 steps in total )*/ + rtl8821ae_get_delta_swing_table(hw, (u8 **)&delta_swing_table_idx_tup_a, + (u8 **)&delta_swing_table_idx_tdown_a, + (u8 **)&delta_swing_table_idx_tup_b, + (u8 **)&delta_swing_table_idx_tdown_b); + + rtldm->txpower_trackinginit = true; + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "===>rtl8812ae_dm_txpower_tracking_callback_thermalmeter,\n pDM_Odm->BbSwingIdxCckBase: %d,pDM_Odm->BbSwingIdxOfdmBase[A]:%d, pDM_Odm->DefaultOfdmIndex: %d\n", + rtldm->swing_idx_cck_base, + rtldm->swing_idx_ofdm_base[RF90_PATH_A], + rtldm->default_ofdm_index); + /*0x42: RF Reg[15:10] 88E*/ + thermal_value = (u8)rtl_get_rfreg(hw, + RF90_PATH_A, RF_T_METER_8812A, 0xfc00); + if (!rtldm->txpower_track_control || + rtlefuse->eeprom_thermalmeter == 0 || + rtlefuse->eeprom_thermalmeter == 0xFF) + return; + + /* 3. Initialize ThermalValues of RFCalibrateInfo*/ + + if (rtlhal->reloadtxpowerindex) { + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "reload ofdm index for band switch\n"); + } + + /*4. Calculate average thermal meter*/ + rtldm->thermalvalue_avg[rtldm->thermalvalue_avg_index] = thermal_value; + rtldm->thermalvalue_avg_index++; + if (rtldm->thermalvalue_avg_index == AVG_THERMAL_NUM_8812A) + /*Average times = c.AverageThermalNum*/ + rtldm->thermalvalue_avg_index = 0; + + for (i = 0; i < AVG_THERMAL_NUM_8812A; i++) { + if (rtldm->thermalvalue_avg[i]) { + thermal_value_avg += rtldm->thermalvalue_avg[i]; + thermal_value_avg_count++; + } + } + /*Calculate Average ThermalValue after average enough times*/ + if (thermal_value_avg_count) { + thermal_value = (u8)(thermal_value_avg / + thermal_value_avg_count); + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "AVG Thermal Meter = 0x%X, EFUSE Thermal Base = 0x%X\n", + thermal_value, rtlefuse->eeprom_thermalmeter); + } + + /*5. Calculate delta, delta_LCK, delta_IQK. + *"delta" here is used to determine whether + * thermal value changes or not. + */ + delta = (thermal_value > rtldm->thermalvalue) ? + (thermal_value - rtldm->thermalvalue) : + (rtldm->thermalvalue - thermal_value); + delta_lck = (thermal_value > rtldm->thermalvalue_lck) ? + (thermal_value - rtldm->thermalvalue_lck) : + (rtldm->thermalvalue_lck - thermal_value); + delta_iqk = (thermal_value > rtldm->thermalvalue_iqk) ? + (thermal_value - rtldm->thermalvalue_iqk) : + (rtldm->thermalvalue_iqk - thermal_value); + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "(delta, delta_LCK, delta_IQK) = (%d, %d, %d)\n", + delta, delta_lck, delta_iqk); + + /* 6. If necessary, do LCK. */ + /*Delta temperature is equal to or larger than 20 centigrade.*/ + if (delta_lck >= IQK_THRESHOLD) { + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "delta_LCK(%d) >= Threshold_IQK(%d)\n", + delta_lck, IQK_THRESHOLD); + rtldm->thermalvalue_lck = thermal_value; + rtl8821ae_phy_lc_calibrate(hw); + } + + /*7. If necessary, move the index of swing table to adjust Tx power.*/ + + if (delta > 0 && rtldm->txpower_track_control) { + /*"delta" here is used to record the + * absolute value of differrence. + */ + delta = thermal_value > rtlefuse->eeprom_thermalmeter ? + (thermal_value - rtlefuse->eeprom_thermalmeter) : + (rtlefuse->eeprom_thermalmeter - thermal_value); + + if (delta >= TXSCALE_TABLE_SIZE) + delta = TXSCALE_TABLE_SIZE - 1; + + /*7.1 The Final Power Index = BaseIndex + PowerIndexOffset*/ + + if (thermal_value > rtlefuse->eeprom_thermalmeter) { + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "delta_swing_table_idx_tup_a[%d] = %d\n", + delta, delta_swing_table_idx_tup_a[delta]); + rtldm->delta_power_index_last[RF90_PATH_A] = + rtldm->delta_power_index[RF90_PATH_A]; + rtldm->delta_power_index[RF90_PATH_A] = + delta_swing_table_idx_tup_a[delta]; + + rtldm->absolute_ofdm_swing_idx[RF90_PATH_A] = + delta_swing_table_idx_tup_a[delta]; + /*Record delta swing for mix mode power tracking*/ + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "******Temp is higher and pDM_Odm->Aboslute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n", + rtldm->absolute_ofdm_swing_idx[RF90_PATH_A]); + } else { + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "delta_swing_table_idx_tdown_a[%d] = %d\n", + delta, delta_swing_table_idx_tdown_a[delta]); + + rtldm->delta_power_index_last[RF90_PATH_A] = + rtldm->delta_power_index[RF90_PATH_A]; + rtldm->delta_power_index[RF90_PATH_A] = + -1 * delta_swing_table_idx_tdown_a[delta]; + + rtldm->absolute_ofdm_swing_idx[RF90_PATH_A] = + -1 * delta_swing_table_idx_tdown_a[delta]; + /* Record delta swing for mix mode power tracking*/ + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "******Temp is lower and pDM_Odm->Aboslute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n", + rtldm->absolute_ofdm_swing_idx[RF90_PATH_A]); + } + + for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++) { + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "\n\n================================ [Path-%c]Calculating PowerIndexOffset ================================\n", + (p == RF90_PATH_A ? 'A' : 'B')); + /*If Thermal value changes but lookup table value + * still the same + */ + if (rtldm->delta_power_index[p] == + rtldm->delta_power_index_last[p]) + + rtldm->power_index_offset[p] = 0; + else + rtldm->power_index_offset[p] = + rtldm->delta_power_index[p] - + rtldm->delta_power_index_last[p]; + /*Power Index Diff between 2 times Power Tracking*/ + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "[Path-%c] PowerIndexOffset(%d) = DeltaPowerIndex(%d) - DeltaPowerIndexLast(%d)\n", + (p == RF90_PATH_A ? 'A' : 'B'), + rtldm->power_index_offset[p], + rtldm->delta_power_index[p] , + rtldm->delta_power_index_last[p]); + + rtldm->ofdm_index[p] = + rtldm->swing_idx_ofdm_base[p] + + rtldm->power_index_offset[p]; + rtldm->cck_index = + rtldm->swing_idx_cck_base + + rtldm->power_index_offset[p]; + + rtldm->swing_idx_cck = rtldm->cck_index; + rtldm->swing_idx_ofdm[p] = rtldm->ofdm_index[p]; + + /*********Print BB Swing Base and Index Offset********/ + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "The 'CCK' final index(%d) = BaseIndex(%d) + PowerIndexOffset(%d)\n", + rtldm->swing_idx_cck, + rtldm->swing_idx_cck_base, + rtldm->power_index_offset[p]); + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "The 'OFDM' final index(%d) = BaseIndex[%c](%d) + PowerIndexOffset(%d)\n", + rtldm->swing_idx_ofdm[p], + (p == RF90_PATH_A ? 'A' : 'B'), + rtldm->swing_idx_ofdm_base[p], + rtldm->power_index_offset[p]); + + /*7.1 Handle boundary conditions of index.*/ + + if (rtldm->ofdm_index[p] > TXSCALE_TABLE_SIZE - 1) + rtldm->ofdm_index[p] = TXSCALE_TABLE_SIZE - 1; + else if (rtldm->ofdm_index[p] < ofdm_min_index) + rtldm->ofdm_index[p] = ofdm_min_index; + } + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "\n\n========================================================================================================\n"); + if (rtldm->cck_index > TXSCALE_TABLE_SIZE - 1) + rtldm->cck_index = TXSCALE_TABLE_SIZE - 1; + else if (rtldm->cck_index < 0) + rtldm->cck_index = 0; + } else { + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "The thermal meter is unchanged or TxPowerTracking OFF(%d):ThermalValue: %d , pDM_Odm->RFCalibrateInfo.ThermalValue: %d\n", + rtldm->txpower_track_control, + thermal_value, + rtldm->thermalvalue); + + for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++) + rtldm->power_index_offset[p] = 0; + } + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "TxPowerTracking: [CCK] Swing Current Index: %d, Swing Base Index: %d\n", + /*Print Swing base & current*/ + rtldm->cck_index, rtldm->swing_idx_cck_base); + for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++) { + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "TxPowerTracking: [OFDM] Swing Current Index: %d, Swing Base Index[%c]: %d\n", + rtldm->ofdm_index[p], + (p == RF90_PATH_A ? 'A' : 'B'), + rtldm->swing_idx_ofdm_base[p]); + } + + if ((rtldm->power_index_offset[RF90_PATH_A] != 0 || + rtldm->power_index_offset[RF90_PATH_B] != 0) && + rtldm->txpower_track_control) { + /*7.2 Configure the Swing Table to adjust Tx Power.*/ + /*Always TRUE after Tx Power is adjusted by power tracking.*/ + /* + * 2012/04/23 MH According to Luke's suggestion, + * we can not write BB digital + * to increase TX power. Otherwise, EVM will be bad. + * + * 2012/04/25 MH Add for tx power tracking to + * set tx power in tx agc for 88E. + */ + if (thermal_value > rtldm->thermalvalue) { + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "Temperature Increasing(A): delta_pi: %d , delta_t: %d,Now_t: %d, EFUSE_t: %d, Last_t: %d\n", + rtldm->power_index_offset[RF90_PATH_A], + delta, thermal_value, + rtlefuse->eeprom_thermalmeter, + rtldm->thermalvalue); + } else if (thermal_value < rtldm->thermalvalue) { /*Low temperature*/ + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "Temperature Decreasing(A): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n", + rtldm->power_index_offset[RF90_PATH_A], + delta, thermal_value, + rtlefuse->eeprom_thermalmeter, + rtldm->thermalvalue); + } + + if (thermal_value > rtlefuse->eeprom_thermalmeter) { + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "Temperature(%d) higher than PG value(%d)\n", + thermal_value, rtlefuse->eeprom_thermalmeter); + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "****Enter POWER Tracking MIX_MODE****\n"); + for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++) + rtl8821ae_dm_txpwr_track_set_pwr(hw, + MIX_MODE, p, index_for_channel); + } else { + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "Temperature(%d) lower than PG value(%d)\n", + thermal_value, rtlefuse->eeprom_thermalmeter); + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "*****Enter POWER Tracking MIX_MODE*****\n"); + for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++) + rtl8812ae_dm_txpwr_track_set_pwr(hw, + MIX_MODE, p, index_for_channel); + } + /*Record last time Power Tracking result as base.*/ + rtldm->swing_idx_cck_base = rtldm->swing_idx_cck; + for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++) + rtldm->swing_idx_ofdm_base[p] = rtldm->swing_idx_ofdm[p]; + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n", + rtldm->thermalvalue, thermal_value); + /*Record last Power Tracking Thermal Value*/ + rtldm->thermalvalue = thermal_value; + } + /* Delta temperature is equal to or larger than + * 20 centigrade (When threshold is 8). + */ + if (delta_iqk >= IQK_THRESHOLD) { + if (!rtlphy->lck_inprogress) { + spin_lock(&rtlpriv->locks.iqk_lock); + rtlphy->lck_inprogress = true; + spin_unlock(&rtlpriv->locks.iqk_lock); + + rtl8821ae_do_iqk(hw, delta_iqk, thermal_value, 8); + + spin_lock(&rtlpriv->locks.iqk_lock); + rtlphy->lck_inprogress = false; + spin_unlock(&rtlpriv->locks.iqk_lock); + } + } + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "<===rtl8812ae_dm_txpower_tracking_callback_thermalmeter\n"); +} + +void rtl8821ae_dm_check_txpower_tracking_thermalmeter(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + static u8 tm_trigger; + + if (!tm_trigger) { + rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER_88E, BIT(17)|BIT(16), + 0x03); + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "Trigger 8821ae Thermal Meter!!\n"); + tm_trigger = 1; + return; + } else { + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "Schedule TxPowerTracking !!\n"); + + rtl8821ae_dm_txpower_tracking_callback_thermalmeter(hw); + tm_trigger = 0; + } +} + +static void rtl8821ae_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rate_adaptive *p_ra = &rtlpriv->ra; + u32 low_rssithresh_for_ra = p_ra->low2high_rssi_thresh_for_ra40m; + u32 high_rssithresh_for_ra = p_ra->high_rssi_thresh_for_ra; + u8 go_up_gap = 5; + struct ieee80211_sta *sta = NULL; + + if (is_hal_stop(rtlhal)) { + RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, + "driver is going to unload\n"); + return; + } + + if (!rtlpriv->dm.useramask) { + RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, + "driver does not control rate adaptive mask\n"); + return; + } + + if (mac->link_state == MAC80211_LINKED && + mac->opmode == NL80211_IFTYPE_STATION) { + switch (p_ra->pre_ratr_state) { + case DM_RATR_STA_MIDDLE: + high_rssithresh_for_ra += go_up_gap; + break; + case DM_RATR_STA_LOW: + high_rssithresh_for_ra += go_up_gap; + low_rssithresh_for_ra += go_up_gap; + break; + default: + break; + } + + if (rtlpriv->dm.undec_sm_pwdb > + (long)high_rssithresh_for_ra) + p_ra->ratr_state = DM_RATR_STA_HIGH; + else if (rtlpriv->dm.undec_sm_pwdb > + (long)low_rssithresh_for_ra) + p_ra->ratr_state = DM_RATR_STA_MIDDLE; + else + p_ra->ratr_state = DM_RATR_STA_LOW; + + if (p_ra->pre_ratr_state != p_ra->ratr_state) { + RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, + "RSSI = %ld\n", + rtlpriv->dm.undec_sm_pwdb); + RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, + "RSSI_LEVEL = %d\n", p_ra->ratr_state); + RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, + "PreState = %d, CurState = %d\n", + p_ra->pre_ratr_state, p_ra->ratr_state); + + rcu_read_lock(); + sta = rtl_find_sta(hw, mac->bssid); + if (sta) + rtlpriv->cfg->ops->update_rate_tbl(hw, + sta, p_ra->ratr_state); + rcu_read_unlock(); + + p_ra->pre_ratr_state = p_ra->ratr_state; + } + } +} + +static void rtl8821ae_dm_refresh_basic_rate_mask(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *dm_digtable = &rtlpriv->dm_digtable; + struct rtl_mac *mac = &rtlpriv->mac80211; + static u8 stage; + u8 cur_stage = 0; + u16 basic_rate = RRSR_1M | RRSR_2M | RRSR_5_5M | RRSR_11M | RRSR_6M; + + if (mac->link_state < MAC80211_LINKED) + cur_stage = 0; + else if (dm_digtable->rssi_val_min < 25) + cur_stage = 1; + else if (dm_digtable->rssi_val_min > 30) + cur_stage = 3; + else + cur_stage = 2; + + if (cur_stage != stage) { + if (cur_stage == 1) { + basic_rate &= (!(basic_rate ^ mac->basic_rates)); + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_BASIC_RATE, (u8 *)&basic_rate); + } else if (cur_stage == 3 && (stage == 1 || stage == 2)) { + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_BASIC_RATE, (u8 *)&mac->basic_rates); + } + } + stage = cur_stage; +} + +static void rtl8821ae_dm_edca_choose_traffic_idx( + struct ieee80211_hw *hw, u64 cur_tx_bytes, + u64 cur_rx_bytes, bool b_bias_on_rx, + bool *pb_is_cur_rdl_state) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (b_bias_on_rx) { + if (cur_tx_bytes > (cur_rx_bytes*4)) { + *pb_is_cur_rdl_state = false; + RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD, + "Uplink Traffic\n "); + } else { + *pb_is_cur_rdl_state = true; + RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD, + "Balance Traffic\n"); + } + } else { + if (cur_rx_bytes > (cur_tx_bytes*4)) { + *pb_is_cur_rdl_state = true; + RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD, + "Downlink Traffic\n"); + } else { + *pb_is_cur_rdl_state = false; + RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD, + "Balance Traffic\n"); + } + } + return; +} + +static void rtl8821ae_dm_check_edca_turbo(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); + + /*Keep past Tx/Rx packet count for RT-to-RT EDCA turbo.*/ + u64 cur_tx_ok_cnt = 0; + u64 cur_rx_ok_cnt = 0; + u32 edca_be_ul = 0x5ea42b; + u32 edca_be_dl = 0x5ea42b; + u32 edca_be = 0x5ea42b; + u8 iot_peer = 0; + bool *pb_is_cur_rdl_state = NULL; + bool b_last_is_cur_rdl_state = false; + bool b_bias_on_rx = false; + bool b_edca_turbo_on = false; + + RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD, + "rtl8821ae_dm_check_edca_turbo=====>"); + RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD, + "Orginial BE PARAM: 0x%x\n", + rtl_read_dword(rtlpriv, DM_REG_EDCA_BE_11N)); + + if (rtlpriv->dm.dbginfo.num_non_be_pkt > 0x100) + rtlpriv->dm.is_any_nonbepkts = true; + rtlpriv->dm.dbginfo.num_non_be_pkt = 0; + + /*=============================== + * list paramter for different platform + *=============================== + */ + b_last_is_cur_rdl_state = rtlpriv->dm.is_cur_rdlstate; + pb_is_cur_rdl_state = &rtlpriv->dm.is_cur_rdlstate; + + cur_tx_ok_cnt = rtlpriv->stats.txbytesunicast - rtldm->last_tx_ok_cnt; + cur_rx_ok_cnt = rtlpriv->stats.rxbytesunicast - rtldm->last_rx_ok_cnt; + + rtldm->last_tx_ok_cnt = rtlpriv->stats.txbytesunicast; + rtldm->last_rx_ok_cnt = rtlpriv->stats.rxbytesunicast; + + iot_peer = rtlpriv->mac80211.vendor; + b_bias_on_rx = false; + b_edca_turbo_on = ((!rtlpriv->dm.is_any_nonbepkts) && + (!rtlpriv->dm.disable_framebursting)) ? + true : false; + + if (rtlpriv->rtlhal.hw_type != HARDWARE_TYPE_RTL8812AE) { + if ((iot_peer == PEER_CISCO) && + (mac->mode == WIRELESS_MODE_N_24G)) { + edca_be_dl = edca_setting_dl[iot_peer]; + edca_be_ul = edca_setting_ul[iot_peer]; + } + } + + RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD, + "bIsAnyNonBEPkts : 0x%x bDisableFrameBursting : 0x%x\n", + rtlpriv->dm.is_any_nonbepkts, + rtlpriv->dm.disable_framebursting); + + RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD, + "bEdcaTurboOn : 0x%x bBiasOnRx : 0x%x\n", + b_edca_turbo_on, b_bias_on_rx); + + if (b_edca_turbo_on) { + RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD, + "curTxOkCnt : 0x%llx\n", cur_tx_ok_cnt); + RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD, + "curRxOkCnt : 0x%llx\n", cur_rx_ok_cnt); + if (b_bias_on_rx) + rtl8821ae_dm_edca_choose_traffic_idx(hw, cur_tx_ok_cnt, + cur_rx_ok_cnt, true, pb_is_cur_rdl_state); + else + rtl8821ae_dm_edca_choose_traffic_idx(hw, cur_tx_ok_cnt, + cur_rx_ok_cnt, false, pb_is_cur_rdl_state); + + edca_be = (*pb_is_cur_rdl_state) ? edca_be_dl : edca_be_ul; + + rtl_write_dword(rtlpriv, DM_REG_EDCA_BE_11N, edca_be); + + RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD, + "EDCA Turbo on: EDCA_BE:0x%x\n", edca_be); + + rtlpriv->dm.current_turbo_edca = true; + + RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD, + "EDCA_BE_DL : 0x%x EDCA_BE_UL : 0x%x EDCA_BE : 0x%x\n", + edca_be_dl, edca_be_ul, edca_be); + } else { + if (rtlpriv->dm.current_turbo_edca) { + u8 tmp = AC0_BE; + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM, + (u8 *)(&tmp)); + } + rtlpriv->dm.current_turbo_edca = false; + } + + rtlpriv->dm.is_any_nonbepkts = false; + rtldm->last_tx_ok_cnt = rtlpriv->stats.txbytesunicast; + rtldm->last_rx_ok_cnt = rtlpriv->stats.rxbytesunicast; +} + +static void rtl8821ae_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *dm_digtable = &rtlpriv->dm_digtable; + u8 cur_cck_cca_thresh; + + if (rtlpriv->mac80211.link_state >= MAC80211_LINKED) { + if (dm_digtable->rssi_val_min > 25) { + cur_cck_cca_thresh = 0xcd; + } else if ((dm_digtable->rssi_val_min <= 25) && + (dm_digtable->rssi_val_min > 10)) { + cur_cck_cca_thresh = 0x83; + } else { + if (rtlpriv->falsealm_cnt.cnt_cck_fail > 1000) + cur_cck_cca_thresh = 0x83; + else + cur_cck_cca_thresh = 0x40; + } + } else { + if (rtlpriv->falsealm_cnt.cnt_cck_fail > 1000) + cur_cck_cca_thresh = 0x83; + else + cur_cck_cca_thresh = 0x40; + } + + if (dm_digtable->cur_cck_cca_thres != cur_cck_cca_thresh) + rtl_write_byte(rtlpriv, ODM_REG_CCK_CCA_11AC, + cur_cck_cca_thresh); + + dm_digtable->pre_cck_cca_thres = dm_digtable->cur_cck_cca_thres; + dm_digtable->cur_cck_cca_thres = cur_cck_cca_thresh; + RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, + "CCK cca thresh hold =%x\n", dm_digtable->cur_cck_cca_thres); +} + +static void rtl8821ae_dm_dynamic_atc_switch(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); + u8 crystal_cap; + u32 packet_count; + int cfo_khz_a, cfo_khz_b, cfo_ave = 0, adjust_xtal = 0; + int cfo_ave_diff; + + if (rtlpriv->mac80211.link_state < MAC80211_LINKED) { + /*1.Enable ATC*/ + if (rtldm->atc_status == ATC_STATUS_OFF) { + rtl_set_bbreg(hw, RFC_AREA, BIT(14), ATC_STATUS_ON); + rtldm->atc_status = ATC_STATUS_ON; + } + + RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "No link!!\n"); + RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, + "atc_status = %d\n", rtldm->atc_status); + + if (rtldm->crystal_cap != rtlpriv->efuse.crystalcap) { + rtldm->crystal_cap = rtlpriv->efuse.crystalcap; + crystal_cap = rtldm->crystal_cap & 0x3f; + crystal_cap = crystal_cap & 0x3f; + if (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8812AE) + rtl_set_bbreg(hw, REG_MAC_PHY_CTRL, + 0x7ff80000, (crystal_cap | + (crystal_cap << 6))); + else + rtl_set_bbreg(hw, REG_MAC_PHY_CTRL, + 0xfff000, (crystal_cap | + (crystal_cap << 6))); + } + RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "crystal_cap = 0x%x\n", + rtldm->crystal_cap); + } else{ + /*1. Calculate CFO for path-A & path-B*/ + cfo_khz_a = (int)(rtldm->cfo_tail[0] * 3125) / 1280; + cfo_khz_b = (int)(rtldm->cfo_tail[1] * 3125) / 1280; + packet_count = rtldm->packet_count; + + /*2.No new packet*/ + if (packet_count == rtldm->packet_count_pre) { + RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, + "packet counter doesn't change\n"); + return; + } + + rtldm->packet_count_pre = packet_count; + RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, + "packet counter = %d\n", + rtldm->packet_count); + + /*3.Average CFO*/ + if (rtlpriv->phy.rf_type == RF_1T1R) + cfo_ave = cfo_khz_a; + else + cfo_ave = (cfo_khz_a + cfo_khz_b) >> 1; + + RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, + "cfo_khz_a = %dkHz, cfo_khz_b = %dkHz, cfo_ave = %dkHz\n", + cfo_khz_a, cfo_khz_b, cfo_ave); + + /*4.Avoid abnormal large CFO*/ + cfo_ave_diff = (rtldm->cfo_ave_pre >= cfo_ave) ? + (rtldm->cfo_ave_pre - cfo_ave) : + (cfo_ave - rtldm->cfo_ave_pre); + + if (cfo_ave_diff > 20 && rtldm->large_cfo_hit == 0) { + RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, + "first large CFO hit\n"); + rtldm->large_cfo_hit = 1; + return; + } else + rtldm->large_cfo_hit = 0; + + rtldm->cfo_ave_pre = cfo_ave; + + /*CFO tracking by adjusting Xtal cap.*/ + + /*1.Dynamic Xtal threshold*/ + if (cfo_ave >= -rtldm->cfo_threshold && + cfo_ave <= rtldm->cfo_threshold && + rtldm->is_freeze == 0) { + if (rtldm->cfo_threshold == CFO_THRESHOLD_XTAL) { + rtldm->cfo_threshold = CFO_THRESHOLD_XTAL + 10; + rtldm->is_freeze = 1; + } else { + rtldm->cfo_threshold = CFO_THRESHOLD_XTAL; + } + } + RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, + "Dynamic threshold = %d\n", + rtldm->cfo_threshold); + + /* 2.Calculate Xtal offset*/ + if (cfo_ave > rtldm->cfo_threshold && rtldm->crystal_cap < 0x3f) + adjust_xtal = ((cfo_ave - CFO_THRESHOLD_XTAL) >> 2) + 1; + else if ((cfo_ave < -rtlpriv->dm.cfo_threshold) && + rtlpriv->dm.crystal_cap > 0) + adjust_xtal = ((cfo_ave + CFO_THRESHOLD_XTAL) >> 2) - 1; + RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, + "Crystal cap = 0x%x, Crystal cap offset = %d\n", + rtldm->crystal_cap, adjust_xtal); + + /*3.Adjudt Crystal Cap.*/ + if (adjust_xtal != 0) { + rtldm->is_freeze = 0; + rtldm->crystal_cap += adjust_xtal; + + if (rtldm->crystal_cap > 0x3f) + rtldm->crystal_cap = 0x3f; + else if (rtldm->crystal_cap < 0) + rtldm->crystal_cap = 0; + + crystal_cap = rtldm->crystal_cap & 0x3f; + crystal_cap = crystal_cap & 0x3f; + if (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8812AE) + rtl_set_bbreg(hw, REG_MAC_PHY_CTRL, + 0x7ff80000, (crystal_cap | + (crystal_cap << 6))); + else + rtl_set_bbreg(hw, REG_MAC_PHY_CTRL, + 0xfff000, (crystal_cap | + (crystal_cap << 6))); + RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, + "New crystal cap = 0x%x\n", + rtldm->crystal_cap); + } + } +} + +void rtl8821ae_dm_watchdog(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + bool fw_current_inpsmode = false; + bool fw_ps_awake = true; + + rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS, + (u8 *)(&fw_current_inpsmode)); + + rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON, + (u8 *)(&fw_ps_awake)); + + if (ppsc->p2p_ps_info.p2p_ps_mode) + fw_ps_awake = false; + + if ((ppsc->rfpwr_state == ERFON) && + ((!fw_current_inpsmode) && fw_ps_awake) && + (!ppsc->rfchange_inprogress)) { + rtl8821ae_dm_common_info_self_update(hw); + rtl8821ae_dm_false_alarm_counter_statistics(hw); + rtl8821ae_dm_check_rssi_monitor(hw); + rtl8821ae_dm_dig(hw); + rtl8821ae_dm_cck_packet_detection_thresh(hw); + rtl8821ae_dm_refresh_rate_adaptive_mask(hw); + rtl8821ae_dm_refresh_basic_rate_mask(hw); + rtl8821ae_dm_check_edca_turbo(hw); + rtl8821ae_dm_dynamic_atc_switch(hw); + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + rtl8812ae_dm_check_txpower_tracking_thermalmeter(hw); + else + rtl8821ae_dm_check_txpower_tracking_thermalmeter(hw); + rtl8821ae_dm_iq_calibrate(hw); + } + + rtlpriv->dm.dbginfo.num_qry_beacon_pkt = 0; + RT_TRACE(rtlpriv, COMP_DIG, DBG_DMESG, "\n"); +} + +void rtl8821ae_dm_set_tx_ant_by_tx_info(struct ieee80211_hw *hw, + u8 *pdesc, u32 mac_id) +{ + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); + struct fast_ant_training *pfat_table = &rtldm->fat_table; + + if (rtlhal->hw_type != HARDWARE_TYPE_RTL8812AE) + return; + + if (rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) + SET_TX_DESC_TX_ANT(pdesc, pfat_table->antsel_a[mac_id]); +} diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/dm.h b/drivers/net/wireless/rtlwifi/rtl8821ae/dm.h new file mode 100644 index 0000000000000000000000000000000000000000..9dd40dd316c16ee257dc284e6fe21a24a4bacb5f --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/dm.h @@ -0,0 +1,356 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL8821AE_DM_H__ +#define __RTL8821AE_DM_H__ + +#define MAIN_ANT 0 +#define AUX_ANT 1 +#define MAIN_ANT_CG_TRX 1 +#define AUX_ANT_CG_TRX 0 +#define MAIN_ANT_CGCS_RX 0 +#define AUX_ANT_CGCS_RX 1 + +#define TXSCALE_TABLE_SIZE 37 + +/*RF REG LIST*/ +#define DM_REG_RF_MODE_11N 0x00 +#define DM_REG_RF_0B_11N 0x0B +#define DM_REG_CHNBW_11N 0x18 +#define DM_REG_T_METER_11N 0x24 +#define DM_REG_RF_25_11N 0x25 +#define DM_REG_RF_26_11N 0x26 +#define DM_REG_RF_27_11N 0x27 +#define DM_REG_RF_2B_11N 0x2B +#define DM_REG_RF_2C_11N 0x2C +#define DM_REG_RXRF_A3_11N 0x3C +#define DM_REG_T_METER_92D_11N 0x42 +#define DM_REG_T_METER_88E_11N 0x42 + +/*BB REG LIST*/ +/*PAGE 8 */ +#define DM_REG_BB_CTRL_11N 0x800 +#define DM_REG_RF_PIN_11N 0x804 +#define DM_REG_PSD_CTRL_11N 0x808 +#define DM_REG_TX_ANT_CTRL_11N 0x80C +#define DM_REG_BB_PWR_SAV5_11N 0x818 +#define DM_REG_CCK_RPT_FORMAT_11N 0x824 +#define DM_REG_RX_DEFUALT_A_11N 0x858 +#define DM_REG_RX_DEFUALT_B_11N 0x85A +#define DM_REG_BB_PWR_SAV3_11N 0x85C +#define DM_REG_ANTSEL_CTRL_11N 0x860 +#define DM_REG_RX_ANT_CTRL_11N 0x864 +#define DM_REG_PIN_CTRL_11N 0x870 +#define DM_REG_BB_PWR_SAV1_11N 0x874 +#define DM_REG_ANTSEL_PATH_11N 0x878 +#define DM_REG_BB_3WIRE_11N 0x88C +#define DM_REG_SC_CNT_11N 0x8C4 +#define DM_REG_PSD_DATA_11N 0x8B4 +/*PAGE 9*/ +#define DM_REG_ANT_MAPPING1_11N 0x914 +#define DM_REG_ANT_MAPPING2_11N 0x918 +/*PAGE A*/ +#define DM_REG_CCK_ANTDIV_PARA1_11N 0xA00 +#define DM_REG_CCK_CCA_11N 0xA0A +#define DM_REG_CCK_CCA_11AC 0xA0A +#define DM_REG_CCK_ANTDIV_PARA2_11N 0xA0C +#define DM_REG_CCK_ANTDIV_PARA3_11N 0xA10 +#define DM_REG_CCK_ANTDIV_PARA4_11N 0xA14 +#define DM_REG_CCK_FILTER_PARA1_11N 0xA22 +#define DM_REG_CCK_FILTER_PARA2_11N 0xA23 +#define DM_REG_CCK_FILTER_PARA3_11N 0xA24 +#define DM_REG_CCK_FILTER_PARA4_11N 0xA25 +#define DM_REG_CCK_FILTER_PARA5_11N 0xA26 +#define DM_REG_CCK_FILTER_PARA6_11N 0xA27 +#define DM_REG_CCK_FILTER_PARA7_11N 0xA28 +#define DM_REG_CCK_FILTER_PARA8_11N 0xA29 +#define DM_REG_CCK_FA_RST_11N 0xA2C +#define DM_REG_CCK_FA_MSB_11N 0xA58 +#define DM_REG_CCK_FA_LSB_11N 0xA5C +#define DM_REG_CCK_CCA_CNT_11N 0xA60 +#define DM_REG_BB_PWR_SAV4_11N 0xA74 +/*PAGE B */ +#define DM_REG_LNA_SWITCH_11N 0xB2C +#define DM_REG_PATH_SWITCH_11N 0xB30 +#define DM_REG_RSSI_CTRL_11N 0xB38 +#define DM_REG_CONFIG_ANTA_11N 0xB68 +#define DM_REG_RSSI_BT_11N 0xB9C +/*PAGE C */ +#define DM_REG_OFDM_FA_HOLDC_11N 0xC00 +#define DM_REG_RX_PATH_11N 0xC04 +#define DM_REG_TRMUX_11N 0xC08 +#define DM_REG_OFDM_FA_RSTC_11N 0xC0C +#define DM_REG_RXIQI_MATRIX_11N 0xC14 +#define DM_REG_TXIQK_MATRIX_LSB1_11N 0xC4C +#define DM_REG_IGI_A_11N 0xC50 +#define DM_REG_IGI_A_11AC 0xC50 +#define DM_REG_ANTDIV_PARA2_11N 0xC54 +#define DM_REG_IGI_B_11N 0xC58 +#define DM_REG_IGI_B_11AC 0xE50 +#define DM_REG_ANTDIV_PARA3_11N 0xC5C +#define DM_REG_BB_PWR_SAV2_11N 0xC70 +#define DM_REG_RX_OFF_11N 0xC7C +#define DM_REG_TXIQK_MATRIXA_11N 0xC80 +#define DM_REG_TXIQK_MATRIXB_11N 0xC88 +#define DM_REG_TXIQK_MATRIXA_LSB2_11N 0xC94 +#define DM_REG_TXIQK_MATRIXB_LSB2_11N 0xC9C +#define DM_REG_RXIQK_MATRIX_LSB_11N 0xCA0 +#define DM_REG_ANTDIV_PARA1_11N 0xCA4 +#define DM_REG_OFDM_FA_TYPE1_11N 0xCF0 +/*PAGE D */ +#define DM_REG_OFDM_FA_RSTD_11N 0xD00 +#define DM_REG_OFDM_FA_TYPE2_11N 0xDA0 +#define DM_REG_OFDM_FA_TYPE3_11N 0xDA4 +#define DM_REG_OFDM_FA_TYPE4_11N 0xDA8 +/*PAGE E */ +#define DM_REG_TXAGC_A_6_18_11N 0xE00 +#define DM_REG_TXAGC_A_24_54_11N 0xE04 +#define DM_REG_TXAGC_A_1_MCS32_11N 0xE08 +#define DM_REG_TXAGC_A_MCS0_3_11N 0xE10 +#define DM_REG_TXAGC_A_MCS4_7_11N 0xE14 +#define DM_REG_TXAGC_A_MCS8_11_11N 0xE18 +#define DM_REG_TXAGC_A_MCS12_15_11N 0xE1C +#define DM_REG_FPGA0_IQK_11N 0xE28 +#define DM_REG_TXIQK_TONE_A_11N 0xE30 +#define DM_REG_RXIQK_TONE_A_11N 0xE34 +#define DM_REG_TXIQK_PI_A_11N 0xE38 +#define DM_REG_RXIQK_PI_A_11N 0xE3C +#define DM_REG_TXIQK_11N 0xE40 +#define DM_REG_RXIQK_11N 0xE44 +#define DM_REG_IQK_AGC_PTS_11N 0xE48 +#define DM_REG_IQK_AGC_RSP_11N 0xE4C +#define DM_REG_BLUETOOTH_11N 0xE6C +#define DM_REG_RX_WAIT_CCA_11N 0xE70 +#define DM_REG_TX_CCK_RFON_11N 0xE74 +#define DM_REG_TX_CCK_BBON_11N 0xE78 +#define DM_REG_OFDM_RFON_11N 0xE7C +#define DM_REG_OFDM_BBON_11N 0xE80 +#define DM_REG_TX2RX_11N 0xE84 +#define DM_REG_TX2TX_11N 0xE88 +#define DM_REG_RX_CCK_11N 0xE8C +#define DM_REG_RX_OFDM_11N 0xED0 +#define DM_REG_RX_WAIT_RIFS_11N 0xED4 +#define DM_REG_RX2RX_11N 0xED8 +#define DM_REG_STANDBY_11N 0xEDC +#define DM_REG_SLEEP_11N 0xEE0 +#define DM_REG_PMPD_ANAEN_11N 0xEEC + +/*MAC REG LIST*/ +#define DM_REG_BB_RST_11N 0x02 +#define DM_REG_ANTSEL_PIN_11N 0x4C +#define DM_REG_EARLY_MODE_11N 0x4D0 +#define DM_REG_RSSI_MONITOR_11N 0x4FE +#define DM_REG_EDCA_VO_11N 0x500 +#define DM_REG_EDCA_VI_11N 0x504 +#define DM_REG_EDCA_BE_11N 0x508 +#define DM_REG_EDCA_BK_11N 0x50C +#define DM_REG_TXPAUSE_11N 0x522 +#define DM_REG_RESP_TX_11N 0x6D8 +#define DM_REG_ANT_TRAIN_PARA1_11N 0x7b0 +#define DM_REG_ANT_TRAIN_PARA2_11N 0x7b4 + +/*DIG Related*/ +#define DM_BIT_IGI_11N 0x0000007F +#define DM_BIT_IGI_11AC 0xFFFFFFFF + +#define HAL_DM_DIG_DISABLE BIT(0) +#define HAL_DM_HIPWR_DISABLE BIT(1) + +#define OFDM_TABLE_LENGTH 43 +#define CCK_TABLE_LENGTH 33 + +#define OFDM_TABLE_SIZE 37 +#define CCK_TABLE_SIZE 33 + +#define BW_AUTO_SWITCH_HIGH_LOW 25 +#define BW_AUTO_SWITCH_LOW_HIGH 30 + +#define DM_DIG_THRESH_HIGH 40 +#define DM_DIG_THRESH_LOW 35 + +#define DM_FALSEALARM_THRESH_LOW 400 +#define DM_FALSEALARM_THRESH_HIGH 1000 + +#define DM_DIG_MAX 0x3e +#define DM_DIG_MIN 0x1e + +#define DM_DIG_MAX_AP 0x32 +#define DM_DIG_MIN_AP 0x20 + +#define DM_DIG_FA_UPPER 0x3e +#define DM_DIG_FA_LOWER 0x1e +#define DM_DIG_FA_TH0 200 +#define DM_DIG_FA_TH1 0x300 +#define DM_DIG_FA_TH2 0x400 + +#define DM_DIG_BACKOFF_MAX 12 +#define DM_DIG_BACKOFF_MIN -4 +#define DM_DIG_BACKOFF_DEFAULT 10 + +#define RXPATHSELECTION_SS_TH_LOW 30 +#define RXPATHSELECTION_DIFF_TH 18 + +#define DM_RATR_STA_INIT 0 +#define DM_RATR_STA_HIGH 1 +#define DM_RATR_STA_MIDDLE 2 +#define DM_RATR_STA_LOW 3 + +#define CTS2SELF_THVAL 30 +#define REGC38_TH 20 + +#define WAIOTTHVAL 25 + +#define TXHIGHPWRLEVEL_NORMAL 0 +#define TXHIGHPWRLEVEL_LEVEL1 1 +#define TXHIGHPWRLEVEL_LEVEL2 2 +#define TXHIGHPWRLEVEL_BT1 3 +#define TXHIGHPWRLEVEL_BT2 4 + +#define DM_TYPE_BYFW 0 +#define DM_TYPE_BYDRIVER 1 + +#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74 +#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67 +#define TXPWRTRACK_MAX_IDX 6 + +/* Dynamic ATC switch */ +#define ATC_STATUS_OFF 0x0 /* enable */ +#define ATC_STATUS_ON 0x1 /* disable */ +#define CFO_THRESHOLD_XTAL 10 /* kHz */ +#define CFO_THRESHOLD_ATC 80 /* kHz */ + +#define AVG_THERMAL_NUM_8812A 4 +#define TXPWR_TRACK_TABLE_SIZE 30 +#define MAX_PATH_NUM_8812A 2 +#define MAX_PATH_NUM_8821A 1 + +enum FAT_STATE { + FAT_NORMAL_STATE = 0, + FAT_TRAINING_STATE = 1, +}; + +enum tag_dynamic_init_gain_operation_type_definition { + DIG_TYPE_THRESH_HIGH = 0, + DIG_TYPE_THRESH_LOW = 1, + DIG_TYPE_BACKOFF = 2, + DIG_TYPE_RX_GAIN_MIN = 3, + DIG_TYPE_RX_GAIN_MAX = 4, + DIG_TYPE_ENABLE = 5, + DIG_TYPE_DISABLE = 6, + DIG_OP_TYPE_MAX +}; + +enum tag_cck_packet_detection_threshold_type_definition { + CCK_PD_STAGE_LOWRSSI = 0, + CCK_PD_STAGE_HIGHRSSI = 1, + CCK_FA_STAGE_LOW = 2, + CCK_FA_STAGE_HIGH = 3, + CCK_PD_STAGE_MAX = 4, +}; + +enum dm_1r_cca_e { + CCA_1R = 0, + CCA_2R = 1, + CCA_MAX = 2, +}; + +enum dm_rf_e { + RF_SAVE = 0, + RF_NORMAL = 1, + RF_MAX = 2, +}; + +enum dm_sw_ant_switch_e { + ANS_ANTENNA_B = 1, + ANS_ANTENNA_A = 2, + ANS_ANTENNA_MAX = 3, +}; + +enum dm_dig_ext_port_alg_e { + DIG_EXT_PORT_STAGE_0 = 0, + DIG_EXT_PORT_STAGE_1 = 1, + DIG_EXT_PORT_STAGE_2 = 2, + DIG_EXT_PORT_STAGE_3 = 3, + DIG_EXT_PORT_STAGE_MAX = 4, +}; + +enum dm_dig_connect_e { + DIG_STA_DISCONNECT = 0, + DIG_STA_CONNECT = 1, + DIG_STA_BEFORE_CONNECT = 2, + DIG_MULTISTA_DISCONNECT = 3, + DIG_MULTISTA_CONNECT = 4, + DIG_CONNECT_MAX +}; + +enum pwr_track_control_method { + BBSWING, + TXAGC, + MIX_MODE +}; + +#define BT_RSSI_STATE_NORMAL_POWER BIT_OFFSET_LEN_MASK_32(0, 1) +#define BT_RSSI_STATE_AMDPU_OFF BIT_OFFSET_LEN_MASK_32(1, 1) +#define BT_RSSI_STATE_SPECIAL_LOW BIT_OFFSET_LEN_MASK_32(2, 1) +#define BT_RSSI_STATE_BG_EDCA_LOW BIT_OFFSET_LEN_MASK_32(3, 1) +#define BT_RSSI_STATE_TXPOWER_LOW BIT_OFFSET_LEN_MASK_32(4, 1) +#define GET_UNDECORATED_AVERAGE_RSSI(_priv) \ + ((((struct rtl_priv *)(_priv))->mac80211.opmode == \ + NL80211_IFTYPE_ADHOC) ? \ + (((struct rtl_priv *)(_priv))->dm.entry_min_undec_sm_pwdb) : \ + (((struct rtl_priv *)(_priv))->dm.undec_sm_pwdb)) + +void rtl8821ae_dm_set_tx_ant_by_tx_info(struct ieee80211_hw *hw, + u8 *pdesc, u32 mac_id); +void rtl8821ae_dm_ant_sel_statistics(struct ieee80211_hw *hw, + u8 antsel_tr_mux, u32 mac_id, + u32 rx_pwdb_all); +void rtl8821ae_dm_fast_antenna_training_callback(unsigned long data); +void rtl8821ae_dm_init(struct ieee80211_hw *hw); +void rtl8821ae_dm_watchdog(struct ieee80211_hw *hw); +void rtl8821ae_dm_write_dig(struct ieee80211_hw *hw, u8 current_igi); +void rtl8821ae_dm_init_edca_turbo(struct ieee80211_hw *hw); +void rtl8821ae_dm_check_txpower_tracking_thermalmeter(struct ieee80211_hw *hw); +void rtl8821ae_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw); +void rtl8821ae_dm_txpower_track_adjust(struct ieee80211_hw *hw, + u8 type, u8 *pdirection, + u32 *poutwrite_val); +void rtl8821ae_dm_clear_txpower_tracking_state(struct ieee80211_hw *hw); +void rtl8821ae_dm_write_cck_cca_thres(struct ieee80211_hw *hw, u8 current_cca); +void rtl8821ae_dm_initialize_txpower_tracking_thermalmeter(struct ieee80211_hw *hw); +void rtl8812ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw, + enum pwr_track_control_method method, + u8 rf_path, + u8 channel_mapped_index); +void rtl8821ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw, + enum pwr_track_control_method method, + u8 rf_path, u8 channel_mapped_index); + +void rtl8821ae_dm_update_init_rate(struct ieee80211_hw *hw, u8 rate); +u8 rtl8821ae_hw_rate_to_mrate(struct ieee80211_hw *hw, u8 rate); +void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw *hw); +void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw *hw); + +#endif diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/fw.c new file mode 100644 index 0000000000000000000000000000000000000000..95e95626b6325c29c43ceb440b4b3ea0b352874e --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/fw.c @@ -0,0 +1,1857 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "../wifi.h" +#include "../pci.h" +#include "../base.h" +#include "../core.h" +#include "reg.h" +#include "def.h" +#include "fw.h" +#include "dm.h" + +static void _rtl8821ae_enable_fw_download(struct ieee80211_hw *hw, bool enable) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 tmp; + + if (enable) { + rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x05); + + tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL + 2); + rtl_write_byte(rtlpriv, REG_MCUFWDL + 2, tmp & 0xf7); + + tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL); + } else { + tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL); + rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp & 0xfe); + tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL); + } +} + +static void _rtl8821ae_fw_block_write(struct ieee80211_hw *hw, + const u8 *buffer, u32 size) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 blocksize = sizeof(u32); + u8 *bufferptr = (u8 *)buffer; + u32 *pu4byteptr = (u32 *)buffer; + u32 i, offset, blockcount, remainsize; + + blockcount = size / blocksize; + remainsize = size % blocksize; + + for (i = 0; i < blockcount; i++) { + offset = i * blocksize; + rtl_write_dword(rtlpriv, (FW_8821AE_START_ADDRESS + offset), + *(pu4byteptr + i)); + } + + if (remainsize) { + offset = blockcount * blocksize; + bufferptr += offset; + for (i = 0; i < remainsize; i++) { + rtl_write_byte(rtlpriv, (FW_8821AE_START_ADDRESS + + offset + i), *(bufferptr + i)); + } + } +} + +static void _rtl8821ae_fw_page_write(struct ieee80211_hw *hw, + u32 page, const u8 *buffer, u32 size) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 value8; + u8 u8page = (u8)(page & 0x07); + + value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page; + + rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8); + _rtl8821ae_fw_block_write(hw, buffer, size); +} + +static void _rtl8821ae_fill_dummy(u8 *pfwbuf, u32 *pfwlen) +{ + u32 fwlen = *pfwlen; + u8 remain = (u8)(fwlen % 4); + + remain = (remain == 0) ? 0 : (4 - remain); + + while (remain > 0) { + pfwbuf[fwlen] = 0; + fwlen++; + remain--; + } + + *pfwlen = fwlen; +} + +static void _rtl8821ae_write_fw(struct ieee80211_hw *hw, + enum version_8821ae version, + u8 *buffer, u32 size) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 *bufferptr = (u8 *)buffer; + u32 pagenums, remainsize; + u32 page, offset; + + RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "FW size is %d bytes,\n", size); + + _rtl8821ae_fill_dummy(bufferptr, &size); + + pagenums = size / FW_8821AE_PAGE_SIZE; + remainsize = size % FW_8821AE_PAGE_SIZE; + + if (pagenums > 8) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "Page numbers should not greater then 8\n"); + } + + for (page = 0; page < pagenums; page++) { + offset = page * FW_8821AE_PAGE_SIZE; + _rtl8821ae_fw_page_write(hw, page, (bufferptr + offset), + FW_8821AE_PAGE_SIZE); + } + + if (remainsize) { + offset = pagenums * FW_8821AE_PAGE_SIZE; + page = pagenums; + _rtl8821ae_fw_page_write(hw, page, (bufferptr + offset), + remainsize); + } +} + +static int _rtl8821ae_fw_free_to_go(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + int err = -EIO; + u32 counter = 0; + u32 value32; + + do { + value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); + } while ((counter++ < FW_8821AE_POLLING_TIMEOUT_COUNT) && + (!(value32 & FWDL_CHKSUM_RPT))); + + if (counter >= FW_8821AE_POLLING_TIMEOUT_COUNT) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, + "chksum report faill ! REG_MCUFWDL:0x%08x .\n", + value32); + goto exit; + } + + RT_TRACE(rtlpriv, COMP_FW, DBG_EMERG, + "Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32); + + value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); + value32 |= MCUFWDL_RDY; + value32 &= ~WINTINI_RDY; + rtl_write_dword(rtlpriv, REG_MCUFWDL, value32); + + rtl8821ae_firmware_selfreset(hw); + + counter = 0; + do { + value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); + if (value32 & WINTINI_RDY) { + RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, + "Polling FW ready success!! REG_MCUFWDL:0x%08x .\n", + value32); + err = 0; + goto exit; + } + + udelay(FW_8821AE_POLLING_DELAY); + } while (counter++ < FW_8821AE_POLLING_TIMEOUT_COUNT); + + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n", + value32); + +exit: + return err; +} + +static void _rtl8821ae_wait_for_h2c_cmd_finish(struct rtl_priv *rtlpriv) +{ + u8 val; + u16 count = 0; + + do { + val = rtl_read_byte(rtlpriv, REG_HMETFR); + mdelay(1); + count++; + } while ((val & 0x0F) && (count < 1000)); +} + +int rtl8821ae_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl8821a_firmware_header *pfwheader; + u8 *pfwdata; + u32 fwsize; + int err; + bool support_remote_wakeup; + enum version_8821ae version = rtlhal->version; + + rtlpriv->cfg->ops->get_hw_reg(hw, HAL_DEF_WOWLAN, + (u8 *)(&support_remote_wakeup)); + + if (support_remote_wakeup) + _rtl8821ae_wait_for_h2c_cmd_finish(rtlpriv); + + if (buse_wake_on_wlan_fw) { + if (!rtlhal->wowlan_firmware) + return 1; + + pfwheader = + (struct rtl8821a_firmware_header *)rtlhal->wowlan_firmware; + rtlhal->fw_version = pfwheader->version; + rtlhal->fw_subversion = pfwheader->subversion; + pfwdata = (u8 *)rtlhal->wowlan_firmware; + fwsize = rtlhal->wowlan_fwsize; + } else { + if (!rtlhal->pfirmware) + return 1; + + pfwheader = + (struct rtl8821a_firmware_header *)rtlhal->pfirmware; + rtlhal->fw_version = pfwheader->version; + rtlhal->fw_subversion = pfwheader->subversion; + pfwdata = (u8 *)rtlhal->pfirmware; + fwsize = rtlhal->fwsize; + } + + RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, + "%s Firmware SIZE %d\n", + buse_wake_on_wlan_fw ? "Wowlan" : "Normal", fwsize); + + if (IS_FW_HEADER_EXIST_8812(pfwheader) || + IS_FW_HEADER_EXIST_8821(pfwheader)) { + RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, + "Firmware Version(%d), Signature(%#x)\n", + pfwheader->version, pfwheader->signature); + + pfwdata = pfwdata + sizeof(struct rtl8821a_firmware_header); + fwsize = fwsize - sizeof(struct rtl8821a_firmware_header); + } + + if (rtlhal->mac_func_enable) { + if (rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) { + rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00); + rtl8821ae_firmware_selfreset(hw); + } + } + _rtl8821ae_enable_fw_download(hw, true); + _rtl8821ae_write_fw(hw, version, pfwdata, fwsize); + _rtl8821ae_enable_fw_download(hw, false); + + err = _rtl8821ae_fw_free_to_go(hw); + if (err) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "Firmware is not ready to run!\n"); + } else { + RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, + "Firmware is ready to run!\n"); + } + + return 0; +} + +#if (USE_SPECIFIC_FW_TO_SUPPORT_WOWLAN == 1) +void rtl8821ae_set_fw_related_for_wowlan(struct ieee80211_hw *hw, + bool used_wowlan_fw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + /* 1. Before WoWLAN or After WOWLAN we need to re-download Fw. */ + if (rtl8821ae_download_fw(hw, used_wowlan_fw)) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "Re-Download Firmware failed!!\n"); + rtlhal->fw_ready = false; + return; + } + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "Re-Download Firmware Success !!\n"); + rtlhal->fw_ready = true; + + /* 2. Re-Init the variables about Fw related setting. */ + ppsc->fw_current_inpsmode = false; + rtlhal->fw_ps_state = FW_PS_STATE_ALL_ON_8821AE; + rtlhal->fw_clk_change_in_progress = false; + rtlhal->allow_sw_to_change_hwclc = false; + rtlhal->last_hmeboxnum = 0; +} +#endif + +static bool _rtl8821ae_check_fw_read_last_h2c(struct ieee80211_hw *hw, + u8 boxnum) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 val_hmetfr; + bool result = false; + + val_hmetfr = rtl_read_byte(rtlpriv, REG_HMETFR); + if (((val_hmetfr >> boxnum) & BIT(0)) == 0) + result = true; + return result; +} + +static void _rtl8821ae_fill_h2c_command(struct ieee80211_hw *hw, + u8 element_id, u32 cmd_len, + u8 *cmdbuffer) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 boxnum = 0; + u16 box_reg = 0, box_extreg = 0; + u8 u1b_tmp = 0; + bool isfw_read = false; + u8 buf_index = 0; + bool bwrite_sucess = false; + u8 wait_h2c_limmit = 100; + /*u8 wait_writeh2c_limmit = 100;*/ + u8 boxcontent[4], boxextcontent[4]; + u32 h2c_waitcounter = 0; + unsigned long flag = 0; + u8 idx = 0; + + RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n"); + + while (true) { + spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag); + if (rtlhal->h2c_setinprogress) { + RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, + "H2C set in progress! Wait to set..element_id(%d).\n", + element_id); + + while (rtlhal->h2c_setinprogress) { + spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, + flag); + h2c_waitcounter++; + RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, + "Wait 100 us (%d times)...\n", + h2c_waitcounter); + udelay(100); + + if (h2c_waitcounter > 1000) + return; + spin_lock_irqsave(&rtlpriv->locks.h2c_lock, + flag); + } + spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag); + } else { + rtlhal->h2c_setinprogress = true; + spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag); + break; + } + } + + while (!bwrite_sucess) { + boxnum = rtlhal->last_hmeboxnum; + switch (boxnum) { + case 0: + box_reg = REG_HMEBOX_0; + box_extreg = REG_HMEBOX_EXT_0; + break; + case 1: + box_reg = REG_HMEBOX_1; + box_extreg = REG_HMEBOX_EXT_1; + break; + case 2: + box_reg = REG_HMEBOX_2; + box_extreg = REG_HMEBOX_EXT_2; + break; + case 3: + box_reg = REG_HMEBOX_3; + box_extreg = REG_HMEBOX_EXT_3; + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, + "switch case not process\n"); + break; + } + + isfw_read = false; + u1b_tmp = rtl_read_byte(rtlpriv, REG_CR); + + if (u1b_tmp != 0xEA) { + isfw_read = true; + } else { + if (rtl_read_byte(rtlpriv, REG_TXDMA_STATUS) == 0xEA || + rtl_read_byte(rtlpriv, REG_TXPKT_EMPTY) == 0xEA) + rtl_write_byte(rtlpriv, REG_SYS_CFG1 + 3, 0xFF); + } + + if (isfw_read) { + wait_h2c_limmit = 100; + isfw_read = + _rtl8821ae_check_fw_read_last_h2c(hw, boxnum); + while (!isfw_read) { + /*wait until Fw read*/ + wait_h2c_limmit--; + if (wait_h2c_limmit == 0) { + RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, + "Waiting too long for FW read clear HMEBox(%d)!\n", + boxnum); + break; + } + + udelay(10); + + isfw_read = + _rtl8821ae_check_fw_read_last_h2c(hw, boxnum); + u1b_tmp = rtl_read_byte(rtlpriv, 0x130); + RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, + "Waiting for FW read clear HMEBox(%d)!!! 0x130 = %2x\n", + boxnum, u1b_tmp); + } + } + + if (!isfw_read) { + RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, + "Write H2C register BOX[%d] fail!!!!! Fw do not read.\n", + boxnum); + break; + } + + memset(boxcontent, 0, sizeof(boxcontent)); + memset(boxextcontent, 0, sizeof(boxextcontent)); + boxcontent[0] = element_id; + RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, + "Write element_id box_reg(%4x) = %2x\n", + box_reg, element_id); + + switch (cmd_len) { + case 1: + case 2: + case 3: + /*boxcontent[0] &= ~(BIT(7));*/ + memcpy((u8 *)(boxcontent) + 1, + cmdbuffer + buf_index, cmd_len); + + for (idx = 0; idx < 4; idx++) { + rtl_write_byte(rtlpriv, box_reg + idx, + boxcontent[idx]); + } + break; + case 4: + case 5: + case 6: + case 7: + /*boxcontent[0] |= (BIT(7));*/ + memcpy((u8 *)(boxextcontent), + cmdbuffer + buf_index+3, cmd_len-3); + memcpy((u8 *)(boxcontent) + 1, + cmdbuffer + buf_index, 3); + + for (idx = 0; idx < 4; idx++) { + rtl_write_byte(rtlpriv, box_extreg + idx, + boxextcontent[idx]); + } + + for (idx = 0; idx < 4; idx++) { + rtl_write_byte(rtlpriv, box_reg + idx, + boxcontent[idx]); + } + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, + "switch case not process\n"); + break; + } + + bwrite_sucess = true; + + rtlhal->last_hmeboxnum = boxnum + 1; + if (rtlhal->last_hmeboxnum == 4) + rtlhal->last_hmeboxnum = 0; + + RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, + "pHalData->last_hmeboxnum = %d\n", + rtlhal->last_hmeboxnum); + } + + spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag); + rtlhal->h2c_setinprogress = false; + spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag); + + RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n"); +} + +void rtl8821ae_fill_h2c_cmd(struct ieee80211_hw *hw, + u8 element_id, u32 cmd_len, u8 *cmdbuffer) +{ + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u32 tmp_cmdbuf[2]; + + if (!rtlhal->fw_ready) { + RT_ASSERT(false, + "return H2C cmd because of Fw download fail!!!\n"); + return; + } + + memset(tmp_cmdbuf, 0, 8); + memcpy(tmp_cmdbuf, cmdbuffer, cmd_len); + _rtl8821ae_fill_h2c_command(hw, element_id, cmd_len, (u8 *)&tmp_cmdbuf); +} + +void rtl8821ae_firmware_selfreset(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 u1b_tmp; + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { + u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL+1); + rtl_write_byte(rtlpriv, REG_RSV_CTRL+1, (u1b_tmp & (~BIT(3)))); + } else { + u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL+1); + rtl_write_byte(rtlpriv, REG_RSV_CTRL+1, (u1b_tmp & (~BIT(0)))); + } + + u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN+1); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN+1, (u1b_tmp & (~BIT(2)))); + udelay(50); + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { + u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL+1); + rtl_write_byte(rtlpriv, REG_RSV_CTRL+1, (u1b_tmp | BIT(3))); + } else { + u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL+1); + rtl_write_byte(rtlpriv, REG_RSV_CTRL+1, (u1b_tmp | BIT(0))); + } + + u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN+1); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN+1, (u1b_tmp | BIT(2))); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "_8051Reset8812ae(): 8051 reset success .\n"); +} + +void rtl8821ae_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 u1_h2c_set_pwrmode[H2C_8821AE_PWEMODE_LENGTH] = { 0 }; + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + u8 rlbm, power_state = 0; + + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode); + + SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, ((mode) ? 1 : 0)); + rlbm = 0;/*YJ,temp,120316. FW now not support RLBM=2.*/ + SET_H2CCMD_PWRMODE_PARM_RLBM(u1_h2c_set_pwrmode, rlbm); + SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode, + (rtlpriv->mac80211.p2p) ? + ppsc->smart_ps : 1); + SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(u1_h2c_set_pwrmode, + ppsc->reg_max_lps_awakeintvl); + SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(u1_h2c_set_pwrmode, 0); + if (mode == FW_PS_ACTIVE_MODE) + power_state |= FW_PWR_STATE_ACTIVE; + else + power_state |= FW_PWR_STATE_RF_OFF; + + SET_H2CCMD_PWRMODE_PARM_PWR_STATE(u1_h2c_set_pwrmode, power_state); + + RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, + "rtl92c_set_fw_pwrmode(): u1_h2c_set_pwrmode\n", + u1_h2c_set_pwrmode, H2C_8821AE_PWEMODE_LENGTH); + rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_SETPWRMODE, + H2C_8821AE_PWEMODE_LENGTH, + u1_h2c_set_pwrmode); +} + +void rtl8821ae_set_fw_media_status_rpt_cmd(struct ieee80211_hw *hw, + u8 mstatus) +{ + u8 parm[3] = { 0, 0, 0 }; + /* parm[0]: bit0=0-->Disconnect, bit0=1-->Connect + * bit1=0-->update Media Status to MACID + * bit1=1-->update Media Status from MACID to MACID_End + * parm[1]: MACID, if this is INFRA_STA, MacID = 0 + * parm[2]: MACID_End + */ + + SET_H2CCMD_MSRRPT_PARM_OPMODE(parm, mstatus); + SET_H2CCMD_MSRRPT_PARM_MACID_IND(parm, 0); + + rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_MSRRPT, 3, parm); +} + +void rtl8821ae_set_fw_ap_off_load_cmd(struct ieee80211_hw *hw, + u8 ap_offload_enable) +{ + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + u8 u1_apoffload_parm[H2C_8821AE_AP_OFFLOAD_LENGTH] = { 0 }; + + SET_H2CCMD_AP_OFFLOAD_ON(u1_apoffload_parm, ap_offload_enable); + SET_H2CCMD_AP_OFFLOAD_HIDDEN(u1_apoffload_parm, mac->hiddenssid); + SET_H2CCMD_AP_OFFLOAD_DENYANY(u1_apoffload_parm, 0); + + rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_AP_OFFLOAD, + H2C_8821AE_AP_OFFLOAD_LENGTH, + u1_apoffload_parm); +} + +void rtl8821ae_set_fw_wowlan_mode(struct ieee80211_hw *hw, bool func_en) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + u8 fw_wowlan_info[H2C_8821AE_WOWLAN_LENGTH] = {0}; + + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "enable(%d)\n", func_en); + + SET_8812_H2CCMD_WOWLAN_FUNC_ENABLE(fw_wowlan_info, + (func_en ? true : false)); + + SET_8812_H2CCMD_WOWLAN_PATTERN_MATCH_ENABLE(fw_wowlan_info, + ((ppsc->wo_wlan_mode & WAKE_ON_PATTERN_MATCH) ? 1 : 0)); + SET_8812_H2CCMD_WOWLAN_MAGIC_PKT_ENABLE(fw_wowlan_info, + ((ppsc->wo_wlan_mode & WAKE_ON_MAGIC_PACKET) ? 1 : 0)); + + SET_8812_H2CCMD_WOWLAN_UNICAST_PKT_ENABLE(fw_wowlan_info, 0); + SET_8812_H2CCMD_WOWLAN_ALL_PKT_DROP(fw_wowlan_info, false); + SET_8812_H2CCMD_WOWLAN_GPIO_ACTIVE(fw_wowlan_info, 0); + SET_8812_H2CCMD_WOWLAN_DISCONNECT_WAKE_UP(fw_wowlan_info, 1); + SET_8812_H2CCMD_WOWLAN_GPIONUM(fw_wowlan_info, 0); + SET_8812_H2CCMD_WOWLAN_GPIO_DURATION(fw_wowlan_info, 0); + + RT_PRINT_DATA(rtlpriv, COMP_POWER, DBG_DMESG, + "wowlan mode: cmd 0x80: Content:\n", + fw_wowlan_info, H2C_8821AE_WOWLAN_LENGTH); + + rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_WO_WLAN, + H2C_8821AE_WOWLAN_LENGTH, + fw_wowlan_info); +} + +void rtl8821ae_set_fw_remote_wake_ctrl_cmd(struct ieee80211_hw *hw, + u8 enable) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 remote_wake_ctrl_parm[H2C_8821AE_REMOTE_WAKE_CTRL_LEN] = {0}; + + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, + "enable=%d, ARP offload=%d, GTK offload=%d\n", + enable, ppsc->arp_offload_enable, ppsc->gtk_offload_enable); + + SET_8812_H2CCMD_REMOTE_WAKECTRL_ENABLE(remote_wake_ctrl_parm, enable); + SET_8812_H2CCMD_REMOTE_WAKE_CTRL_ARP_OFFLOAD_EN(remote_wake_ctrl_parm, + (ppsc->arp_offload_enable ? 1 : 0)); + SET_8812_H2CCMD_REMOTE_WAKE_CTRL_GTK_OFFLOAD_EN(remote_wake_ctrl_parm, + (ppsc->gtk_offload_enable ? 1 : 0)); + SET_8812_H2CCMD_REMOTE_WAKE_CTRL_REALWOWV2_EN(remote_wake_ctrl_parm, + (rtlhal->real_wow_v2_enable ? 1 : 0)); + + RT_PRINT_DATA(rtlpriv, COMP_POWER, DBG_TRACE, + "remote_wake_ctrl: cmd 0x4: Content:\n", + remote_wake_ctrl_parm, H2C_8821AE_REMOTE_WAKE_CTRL_LEN); + + rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_REMOTE_WAKE_CTRL, + H2C_8821AE_REMOTE_WAKE_CTRL_LEN, + remote_wake_ctrl_parm); +} + +void rtl8821ae_set_fw_keep_alive_cmd(struct ieee80211_hw *hw, + bool func_en) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 keep_alive_info[H2C_8821AE_KEEP_ALIVE_CTRL_LENGTH] = {0}; + + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Enable(%d)\n", func_en); + + SET_8812_H2CCMD_KEEP_ALIVE_ENABLE(keep_alive_info, func_en); + /* 1: the period is controled by driver, 0: by Fw default */ + SET_8812_H2CCMD_KEEP_ALIVE_ACCPEPT_USER_DEFINED(keep_alive_info, 1); + SET_8812_H2CCMD_KEEP_ALIVE_PERIOD(keep_alive_info, 10); /* 10 sec */ + + RT_PRINT_DATA(rtlpriv, COMP_POWER, DBG_TRACE, + "keep alive: cmd 0x3: Content:\n", + keep_alive_info, H2C_8821AE_KEEP_ALIVE_CTRL); + rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_KEEP_ALIVE_CTRL, + H2C_8821AE_KEEP_ALIVE_CTRL_LENGTH, + keep_alive_info); +} + +void rtl8821ae_set_fw_disconnect_decision_ctrl_cmd(struct ieee80211_hw *hw, + bool enabled) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 parm[H2C_8821AE_DISCONNECT_DECISION_CTRL_LEN] = {0}; + + SET_8812_H2CCMD_DISCONNECT_DECISION_CTRL_ENABLE(parm, enabled); + SET_8812_H2CCMD_DISCONNECT_DECISION_CTRL_USER_SETTING(parm, 1); + SET_8812_H2CCMD_DISCONNECT_DECISION_CTRL_CHECK_PERIOD(parm, 30); + SET_8812_H2CCMD_DISCONNECT_DECISION_CTRL_TRYPKT_NUM(parm, 3); + + RT_PRINT_DATA(rtlpriv, COMP_POWER, DBG_TRACE, + "disconnect_decision_ctrl: cmd 0x4: Content:\n", + parm, H2C_8821AE_DISCONNECT_DECISION_CTRL_LEN); + rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_DISCONNECT_DECISION, + H2C_8821AE_DISCONNECT_DECISION_CTRL_LEN, parm); +} + +void rtl8821ae_set_fw_global_info_cmd(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_security *sec = &rtlpriv->sec; + u8 remote_wakeup_sec_info[H2C_8821AE_AOAC_GLOBAL_INFO_LEN] = {0}; + + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, + "PairwiseEncAlgorithm=%d, GroupEncAlgorithm=%d\n", + sec->pairwise_enc_algorithm, sec->group_enc_algorithm); + + SET_8812_H2CCMD_AOAC_GLOBAL_INFO_PAIRWISE_ENC_ALG( + remote_wakeup_sec_info, + sec->pairwise_enc_algorithm); + SET_8812_H2CCMD_AOAC_GLOBAL_INFO_GROUP_ENC_ALG(remote_wakeup_sec_info, + sec->group_enc_algorithm); + + rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_AOAC_GLOBAL_INFO, + H2C_8821AE_AOAC_GLOBAL_INFO_LEN, + remote_wakeup_sec_info); + + RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_TRACE, + "rtl8821ae_set_global_info: cmd 0x82:\n", + remote_wakeup_sec_info, H2C_8821AE_AOAC_GLOBAL_INFO_LEN); +} + +#define BEACON_PG 0 +#define PSPOLL_PG 1 +#define NULL_PG 2 +#define QOSNULL_PG 3 +#define ARPRESP_PG 4 +#define REMOTE_PG 5 +#define GTKEXT_PG 6 + +#define TOTAL_RESERVED_PKT_LEN_8812 3584 +#define TOTAL_RESERVED_PKT_LEN_8821 1792 + +static u8 reserved_page_packet_8821[TOTAL_RESERVED_PKT_LEN_8821] = { + /* page 0: beacon */ + 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0x00, 0xe0, 0x4c, 0x02, 0xe2, 0x64, + 0x40, 0x16, 0x9f, 0x23, 0xd4, 0x46, 0x20, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x64, 0x00, 0x20, 0x04, 0x00, 0x06, 0x64, 0x6c, + 0x69, 0x6e, 0x6b, 0x31, 0x01, 0x08, 0x82, 0x84, + 0x8b, 0x96, 0x0c, 0x18, 0x30, 0x48, 0x03, 0x01, + 0x0b, 0x06, 0x02, 0x00, 0x00, 0x2a, 0x01, 0x8b, + 0x32, 0x04, 0x12, 0x24, 0x60, 0x6c, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x10, 0x00, 0x28, 0x8c, 0x00, 0x12, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* page 1: ps-poll */ + 0xa4, 0x10, 0x01, 0xc0, 0x40, 0x16, 0x9f, 0x23, + 0xd4, 0x46, 0x00, 0xe0, 0x4c, 0x02, 0xe2, 0x64, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x18, 0x00, 0x28, 0x8c, 0x00, 0x12, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* page 2: null data */ + 0x48, 0x01, 0x00, 0x00, 0x40, 0x16, 0x9f, 0x23, + 0xd4, 0x46, 0x00, 0xe0, 0x4c, 0x02, 0xe2, 0x64, + 0x40, 0x16, 0x9f, 0x23, 0xd4, 0x46, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x1A, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* page 3: qos null data */ + 0xC8, 0x01, 0x00, 0x00, 0x84, 0xC9, 0xB2, 0xA7, + 0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02, + 0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x3C, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* page 4~6 is for wowlan */ + /* page 4: ARP resp */ + 0x08, 0x01, 0x00, 0x00, 0x84, 0xC9, 0xB2, 0xA7, + 0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02, + 0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x00, 0x00, + 0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00, 0x08, 0x06, + 0x00, 0x01, 0x08, 0x00, 0x06, 0x04, 0x00, 0x02, + 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02, 0x00, 0x00, + 0x00, 0x00, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* page 5: H2C_REMOTE_WAKE_CTRL_INFO */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* page 6: Rsvd GTK extend memory (zero memory) */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static u8 reserved_page_packet_8812[TOTAL_RESERVED_PKT_LEN_8812] = { + /* page 0: beacon */ + 0x80, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02, + 0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x64, 0x00, 0x20, 0x04, 0x00, 0x03, 0x32, 0x31, + 0x35, 0x01, 0x08, 0x82, 0x84, 0x8B, 0x96, 0x0C, + 0x12, 0x18, 0x24, 0x03, 0x01, 0x01, 0x06, 0x02, + 0x00, 0x00, 0x2A, 0x01, 0x02, 0x32, 0x04, 0x30, + 0x48, 0x60, 0x6C, 0x2D, 0x1A, 0xED, 0x09, 0x03, + 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3D, + 0x00, 0xDD, 0x07, 0x00, 0xE0, 0x4C, 0x02, 0x02, + 0x08, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x10, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0x00, 0x00, + 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* page 1: ps-poll */ + 0xA4, 0x10, 0x09, 0xC0, 0x84, 0xC9, 0xB2, 0xA7, + 0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x18, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* page 2: null data */ + 0x48, 0x01, 0x00, 0x00, 0x84, 0xC9, 0xB2, 0xA7, + 0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02, + 0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x1A, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* page 3: Qos null data */ + 0xC8, 0x01, 0x00, 0x00, 0x84, 0xC9, 0xB2, 0xA7, + 0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02, + 0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x3C, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* page 4~6 is for wowlan */ + /* page 4: ARP resp */ + 0x08, 0x01, 0x00, 0x00, 0x84, 0xC9, 0xB2, 0xA7, + 0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02, + 0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x00, 0x00, + 0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00, 0x08, 0x06, + 0x00, 0x01, 0x08, 0x00, 0x06, 0x04, 0x00, 0x02, + 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02, 0x00, 0x00, + 0x00, 0x00, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* page 5: H2C_REMOTE_WAKE_CTRL_INFO */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* page 6: Rsvd GTK extend memory (zero memory) */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +void rtl8812ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, + bool b_dl_finished, bool dl_whole_packets) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtlpriv); + struct sk_buff *skb = NULL; + u32 totalpacketlen; + bool rtstatus; + u8 u1RsvdPageLoc[5] = { 0 }; + u8 u1RsvdPageLoc2[7] = { 0 }; + bool b_dlok = false; + u8 *beacon; + u8 *p_pspoll; + u8 *nullfunc; + u8 *qosnull; + u8 *arpresp; + + /*--------------------------------------------------------- + * (1) beacon + *--------------------------------------------------------- + */ + beacon = &reserved_page_packet_8812[BEACON_PG * 512]; + SET_80211_HDR_ADDRESS2(beacon, mac->mac_addr); + SET_80211_HDR_ADDRESS3(beacon, mac->bssid); + + if (b_dl_finished) { + totalpacketlen = 512 - 40; + goto out; + } + /*------------------------------------------------------- + * (2) ps-poll + *-------------------------------------------------------- + */ + p_pspoll = &reserved_page_packet_8812[PSPOLL_PG * 512]; + SET_80211_PS_POLL_AID(p_pspoll, (mac->assoc_id | 0xc000)); + SET_80211_PS_POLL_BSSID(p_pspoll, mac->bssid); + SET_80211_PS_POLL_TA(p_pspoll, mac->mac_addr); + + SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1RsvdPageLoc, PSPOLL_PG); + + /*-------------------------------------------------------- + * (3) null data + *--------------------------------------------------------- + */ + nullfunc = &reserved_page_packet_8812[NULL_PG * 512]; + SET_80211_HDR_ADDRESS1(nullfunc, mac->bssid); + SET_80211_HDR_ADDRESS2(nullfunc, mac->mac_addr); + SET_80211_HDR_ADDRESS3(nullfunc, mac->bssid); + + SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1RsvdPageLoc, NULL_PG); + + /*--------------------------------------------------------- + * (4) Qos null data + *---------------------------------------------------------- + */ + qosnull = &reserved_page_packet_8812[QOSNULL_PG * 512]; + SET_80211_HDR_ADDRESS1(qosnull, mac->bssid); + SET_80211_HDR_ADDRESS2(qosnull, mac->mac_addr); + SET_80211_HDR_ADDRESS3(qosnull, mac->bssid); + + SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(u1RsvdPageLoc, QOSNULL_PG); + + if (!dl_whole_packets) { + totalpacketlen = 512 * (QOSNULL_PG + 1) - 40; + goto out; + } + /*--------------------------------------------------------- + * (5) ARP Resp + *---------------------------------------------------------- + */ + arpresp = &reserved_page_packet_8812[ARPRESP_PG * 512]; + SET_80211_HDR_ADDRESS1(arpresp, mac->bssid); + SET_80211_HDR_ADDRESS2(arpresp, mac->mac_addr); + SET_80211_HDR_ADDRESS3(arpresp, mac->bssid); + + SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_ARP_RSP(u1RsvdPageLoc2, ARPRESP_PG); + + /*--------------------------------------------------------- + * (6) Remote Wake Ctrl + *---------------------------------------------------------- + */ + SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_REMOTE_WAKE_CTRL_INFO(u1RsvdPageLoc2, + REMOTE_PG); + + /*--------------------------------------------------------- + * (7) GTK Ext Memory + *---------------------------------------------------------- + */ + SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_EXT_MEM(u1RsvdPageLoc2, GTKEXT_PG); + + totalpacketlen = TOTAL_RESERVED_PKT_LEN_8812 - 40; + +out: + RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, + "rtl8812ae_set_fw_rsvdpagepkt(): packet data\n", + &reserved_page_packet_8812[0], totalpacketlen); + + skb = dev_alloc_skb(totalpacketlen); + memcpy((u8 *)skb_put(skb, totalpacketlen), + &reserved_page_packet_8812, totalpacketlen); + + rtstatus = rtl_cmd_send_packet(hw, skb); + + if (rtstatus) + b_dlok = true; + + if (!b_dl_finished && b_dlok) { + RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, + "H2C_RSVDPAGE:\n", u1RsvdPageLoc, 5); + rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_RSVDPAGE, + sizeof(u1RsvdPageLoc), u1RsvdPageLoc); + if (dl_whole_packets) { + RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, + "wowlan H2C_RSVDPAGE:\n", u1RsvdPageLoc2, 7); + rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_AOAC_RSVDPAGE, + sizeof(u1RsvdPageLoc2), u1RsvdPageLoc2); + } + } + + if (!b_dlok) + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + "Set RSVD page location to Fw FAIL!!!!!!.\n"); +} + +void rtl8821ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, + bool b_dl_finished, bool dl_whole_packets) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct sk_buff *skb = NULL; + u32 totalpacketlen; + bool rtstatus; + u8 u1RsvdPageLoc[5] = { 0 }; + u8 u1RsvdPageLoc2[7] = { 0 }; + bool b_dlok = false; + u8 *beacon; + u8 *p_pspoll; + u8 *nullfunc; + u8 *qosnull; + u8 *arpresp; + + /*--------------------------------------------------------- + * (1) beacon + *--------------------------------------------------------- + */ + beacon = &reserved_page_packet_8821[BEACON_PG * 256]; + SET_80211_HDR_ADDRESS2(beacon, mac->mac_addr); + SET_80211_HDR_ADDRESS3(beacon, mac->bssid); + + if (b_dl_finished) { + totalpacketlen = 256 - 40; + goto out; + } + /*------------------------------------------------------- + * (2) ps-poll + *-------------------------------------------------------- + */ + p_pspoll = &reserved_page_packet_8821[PSPOLL_PG * 256]; + SET_80211_PS_POLL_AID(p_pspoll, (mac->assoc_id | 0xc000)); + SET_80211_PS_POLL_BSSID(p_pspoll, mac->bssid); + SET_80211_PS_POLL_TA(p_pspoll, mac->mac_addr); + + SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1RsvdPageLoc, PSPOLL_PG); + + /*-------------------------------------------------------- + * (3) null data + *---------------------------------------------------------i + */ + nullfunc = &reserved_page_packet_8821[NULL_PG * 256]; + SET_80211_HDR_ADDRESS1(nullfunc, mac->bssid); + SET_80211_HDR_ADDRESS2(nullfunc, mac->mac_addr); + SET_80211_HDR_ADDRESS3(nullfunc, mac->bssid); + + SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1RsvdPageLoc, NULL_PG); + + /*--------------------------------------------------------- + * (4) Qos null data + *---------------------------------------------------------- + */ + qosnull = &reserved_page_packet_8821[QOSNULL_PG * 256]; + SET_80211_HDR_ADDRESS1(qosnull, mac->bssid); + SET_80211_HDR_ADDRESS2(qosnull, mac->mac_addr); + SET_80211_HDR_ADDRESS3(qosnull, mac->bssid); + + SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(u1RsvdPageLoc, QOSNULL_PG); + + if (!dl_whole_packets) { + totalpacketlen = 256 * (QOSNULL_PG + 1) - 40; + goto out; + } + /*--------------------------------------------------------- + * (5) ARP Resp + *---------------------------------------------------------- + */ + arpresp = &reserved_page_packet_8821[ARPRESP_PG * 256]; + SET_80211_HDR_ADDRESS1(arpresp, mac->bssid); + SET_80211_HDR_ADDRESS2(arpresp, mac->mac_addr); + SET_80211_HDR_ADDRESS3(arpresp, mac->bssid); + + SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_ARP_RSP(u1RsvdPageLoc2, ARPRESP_PG); + + /*--------------------------------------------------------- + * (6) Remote Wake Ctrl + *---------------------------------------------------------- + */ + SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_REMOTE_WAKE_CTRL_INFO(u1RsvdPageLoc2, + REMOTE_PG); + + /*--------------------------------------------------------- + * (7) GTK Ext Memory + *---------------------------------------------------------- + */ + SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_EXT_MEM(u1RsvdPageLoc2, GTKEXT_PG); + + totalpacketlen = TOTAL_RESERVED_PKT_LEN_8821 - 40; + +out: + + RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, + "rtl8821ae_set_fw_rsvdpagepkt(): packet data\n", + &reserved_page_packet_8821[0], totalpacketlen); + + skb = dev_alloc_skb(totalpacketlen); + memcpy((u8 *)skb_put(skb, totalpacketlen), + &reserved_page_packet_8821, totalpacketlen); + + rtstatus = rtl_cmd_send_packet(hw, skb); + + if (rtstatus) + b_dlok = true; + + if (!b_dl_finished && b_dlok) { + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, + "Set RSVD page location to Fw.\n"); + RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, + "H2C_RSVDPAGE:\n", u1RsvdPageLoc, 5); + rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_RSVDPAGE, + sizeof(u1RsvdPageLoc), u1RsvdPageLoc); + if (dl_whole_packets) { + RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, + "wowlan H2C_RSVDPAGE:\n", + u1RsvdPageLoc2, 7); + rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_AOAC_RSVDPAGE, + sizeof(u1RsvdPageLoc2), + u1RsvdPageLoc2); + } + } + + if (!b_dlok) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + "Set RSVD page location to Fw FAIL!!!!!!.\n"); + } +} + +/*Should check FW support p2p or not.*/ +static void rtl8821ae_set_p2p_ctw_period_cmd(struct ieee80211_hw *hw, u8 ctwindow) +{ + u8 u1_ctwindow_period[1] = { ctwindow}; + + rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_P2P_PS_CTW_CMD, 1, + u1_ctwindow_period); +} + +void rtl8821ae_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *rtlps = rtl_psc(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_p2p_ps_info *p2pinfo = &rtlps->p2p_ps_info; + struct p2p_ps_offload_t *p2p_ps_offload = &rtlhal->p2p_ps_offload; + u8 i; + u16 ctwindow; + u32 start_time, tsf_low; + + switch (p2p_ps_state) { + case P2P_PS_DISABLE: + RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_DISABLE\n"); + memset(p2p_ps_offload, 0, sizeof(*p2p_ps_offload)); + break; + case P2P_PS_ENABLE: + RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_ENABLE\n"); + /* update CTWindow value. */ + if (p2pinfo->ctwindow > 0) { + p2p_ps_offload->ctwindow_en = 1; + ctwindow = p2pinfo->ctwindow; + rtl8821ae_set_p2p_ctw_period_cmd(hw, ctwindow); + } + + /* hw only support 2 set of NoA */ + for (i = 0 ; i < p2pinfo->noa_num ; i++) { + /* To control the register setting for which NOA*/ + rtl_write_byte(rtlpriv, 0x5cf, (i << 4)); + if (i == 0) + p2p_ps_offload->noa0_en = 1; + else + p2p_ps_offload->noa1_en = 1; + + /* config P2P NoA Descriptor Register */ + rtl_write_dword(rtlpriv, 0x5E0, p2pinfo->noa_duration[i]); + rtl_write_dword(rtlpriv, 0x5E4, p2pinfo->noa_interval[i]); + + /*Get Current TSF value */ + tsf_low = rtl_read_dword(rtlpriv, REG_TSFTR); + + start_time = p2pinfo->noa_start_time[i]; + if (p2pinfo->noa_count_type[i] != 1) { + while (start_time <= (tsf_low+(50*1024))) { + start_time += p2pinfo->noa_interval[i]; + if (p2pinfo->noa_count_type[i] != 255) + p2pinfo->noa_count_type[i]--; + } + } + rtl_write_dword(rtlpriv, 0x5E8, start_time); + rtl_write_dword(rtlpriv, 0x5EC, + p2pinfo->noa_count_type[i]); + } + + if ((p2pinfo->opp_ps == 1) || (p2pinfo->noa_num > 0)) { + /* rst p2p circuit */ + rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, BIT(4)); + + p2p_ps_offload->offload_en = 1; + + if (P2P_ROLE_GO == rtlpriv->mac80211.p2p) { + p2p_ps_offload->role = 1; + p2p_ps_offload->allstasleep = 0; + } else { + p2p_ps_offload->role = 0; + } + + p2p_ps_offload->discovery = 0; + } + break; + case P2P_PS_SCAN: + RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN\n"); + p2p_ps_offload->discovery = 1; + break; + case P2P_PS_SCAN_DONE: + RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN_DONE\n"); + p2p_ps_offload->discovery = 0; + p2pinfo->p2p_ps_state = P2P_PS_ENABLE; + break; + default: + break; + } + + rtl8821ae_fill_h2c_cmd(hw, + H2C_8821AE_P2P_PS_OFFLOAD, 1, (u8 *)p2p_ps_offload); +} + +static void rtl8821ae_c2h_ra_report_handler(struct ieee80211_hw *hw, + u8 *cmd_buf, u8 cmd_len) +{ + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 rate = cmd_buf[0] & 0x3F; + + rtlhal->current_ra_rate = rtl8821ae_hw_rate_to_mrate(hw, rate); + + rtl8821ae_dm_update_init_rate(hw, rate); +} + +static void _rtl8821ae_c2h_content_parsing(struct ieee80211_hw *hw, + u8 c2h_cmd_id, u8 c2h_cmd_len, + u8 *tmp_buf) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + switch (c2h_cmd_id) { + case C2H_8812_DBG: + RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "[C2H], C2H_8812_DBG!!\n"); + break; + case C2H_8812_RA_RPT: + rtl8821ae_c2h_ra_report_handler(hw, tmp_buf, c2h_cmd_len); + break; + case C2H_8812_BT_INFO: + RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, + "[C2H], C2H_8812_BT_INFO!!\n"); + if (rtlpriv->cfg->ops->get_btc_status()) + rtlpriv->btcoexist.btc_ops->btc_btinfo_notify(rtlpriv, + tmp_buf, + c2h_cmd_len); + break; + default: + break; + } +} + +void rtl8821ae_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, + u8 length) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 c2h_cmd_id = 0, c2h_cmd_seq = 0, c2h_cmd_len = 0; + u8 *tmp_buf = NULL; + + c2h_cmd_id = buffer[0]; + c2h_cmd_seq = buffer[1]; + c2h_cmd_len = length - 2; + tmp_buf = buffer + 2; + + RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, + "[C2H packet], c2hCmdId=0x%x, c2hCmdSeq=0x%x, c2hCmdLen=%d\n", + c2h_cmd_id, c2h_cmd_seq, c2h_cmd_len); + + RT_PRINT_DATA(rtlpriv, COMP_FW, DBG_LOUD, + "[C2H packet], Content Hex:\n", tmp_buf, c2h_cmd_len); + _rtl8821ae_c2h_content_parsing(hw, c2h_cmd_id, c2h_cmd_len, tmp_buf); +} diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/fw.h b/drivers/net/wireless/rtlwifi/rtl8821ae/fw.h new file mode 100644 index 0000000000000000000000000000000000000000..591c14c0b9b52bd9247676cf972b18853874d07a --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/fw.h @@ -0,0 +1,351 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL8821AE__FW__H__ +#define __RTL8821AE__FW__H__ +#include "def.h" + +#define FW_8821AE_SIZE 0x8000 +#define FW_8821AE_START_ADDRESS 0x1000 +#define FW_8821AE_END_ADDRESS 0x5FFF +#define FW_8821AE_PAGE_SIZE 4096 +#define FW_8821AE_POLLING_DELAY 5 +#define FW_8821AE_POLLING_TIMEOUT_COUNT 6000 + +#define IS_FW_HEADER_EXIST_8812(_pfwhdr) \ + ((_pfwhdr->signature&0xFFF0) == 0x9500) + +#define IS_FW_HEADER_EXIST_8821(_pfwhdr) \ + ((_pfwhdr->signature&0xFFF0) == 0x2100) + +#define USE_OLD_WOWLAN_DEBUG_FW 0 + +#define H2C_8821AE_RSVDPAGE_LOC_LEN 5 +#define H2C_8821AE_PWEMODE_LENGTH 5 +#define H2C_8821AE_JOINBSSRPT_LENGTH 1 +#define H2C_8821AE_AP_OFFLOAD_LENGTH 3 +#define H2C_8821AE_WOWLAN_LENGTH 3 +#define H2C_8821AE_KEEP_ALIVE_CTRL_LENGTH 3 +#if (USE_OLD_WOWLAN_DEBUG_FW == 0) +#define H2C_8821AE_REMOTE_WAKE_CTRL_LEN 1 +#else +#define H2C_8821AE_REMOTE_WAKE_CTRL_LEN 3 +#endif +#define H2C_8821AE_AOAC_GLOBAL_INFO_LEN 2 +#define H2C_8821AE_AOAC_RSVDPAGE_LOC_LEN 7 +#define H2C_8821AE_DISCONNECT_DECISION_CTRL_LEN 3 + +/* Fw PS state for RPWM. +*BIT[2:0] = HW state + +*BIT[3] = Protocol PS state, +1: register active state , +0: register sleep state + +*BIT[4] = sub-state +*/ +#define FW_PS_GO_ON BIT(0) +#define FW_PS_TX_NULL BIT(1) +#define FW_PS_RF_ON BIT(2) +#define FW_PS_REGISTER_ACTIVE BIT(3) + +#define FW_PS_DPS BIT(0) +#define FW_PS_LCLK (FW_PS_DPS) +#define FW_PS_RF_OFF BIT(1) +#define FW_PS_ALL_ON BIT(2) +#define FW_PS_ST_ACTIVE BIT(3) +#define FW_PS_ISR_ENABLE BIT(4) +#define FW_PS_IMR_ENABLE BIT(5) + +#define FW_PS_ACK BIT(6) +#define FW_PS_TOGGLE BIT(7) + + /* 8821AE RPWM value*/ + /* BIT[0] = 1: 32k, 0: 40M*/ + /* 32k*/ +#define FW_PS_CLOCK_OFF BIT(0) +/*40M*/ +#define FW_PS_CLOCK_ON 0 + +#define FW_PS_STATE_MASK (0x0F) +#define FW_PS_STATE_HW_MASK (0x07) +/*ISR_ENABLE, IMR_ENABLE, and PS mode should be inherited.*/ +#define FW_PS_STATE_INT_MASK (0x3F) + +#define FW_PS_STATE(x) (FW_PS_STATE_MASK & (x)) +#define FW_PS_STATE_HW(x) (FW_PS_STATE_HW_MASK & (x)) +#define FW_PS_STATE_INT(x) (FW_PS_STATE_INT_MASK & (x)) +#define FW_PS_ISR_VAL(x) ((x) & 0x70) +#define FW_PS_IMR_MASK(x) ((x) & 0xDF) +#define FW_PS_KEEP_IMR(x) ((x) & 0x20) + +#define FW_PS_STATE_S0 (FW_PS_DPS) +#define FW_PS_STATE_S1 (FW_PS_LCLK) +#define FW_PS_STATE_S2 (FW_PS_RF_OFF) +#define FW_PS_STATE_S3 (FW_PS_ALL_ON) +#define FW_PS_STATE_S4 ((FW_PS_ST_ACTIVE) | (FW_PS_ALL_ON)) + /* ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))*/ +#define FW_PS_STATE_ALL_ON_8821AE (FW_PS_CLOCK_ON) + /* (FW_PS_RF_ON)*/ +#define FW_PS_STATE_RF_ON_8821AE (FW_PS_CLOCK_ON) + /* 0x0*/ +#define FW_PS_STATE_RF_OFF_8821AE (FW_PS_CLOCK_ON) + /* (FW_PS_STATE_RF_OFF)*/ +#define FW_PS_STATE_RF_OFF_LOW_PWR_8821AE (FW_PS_CLOCK_OFF) + +#define FW_PS_STATE_ALL_ON_92C (FW_PS_STATE_S4) +#define FW_PS_STATE_RF_ON_92C (FW_PS_STATE_S3) +#define FW_PS_STATE_RF_OFF_92C (FW_PS_STATE_S2) +#define FW_PS_STATE_RF_OFF_LOW_PWR_92C (FW_PS_STATE_S1) + +/* For 8821AE H2C PwrMode Cmd ID 5.*/ +#define FW_PWR_STATE_ACTIVE ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE)) +#define FW_PWR_STATE_RF_OFF 0 + +#define FW_PS_IS_ACK(x) ((x) & FW_PS_ACK) +#define FW_PS_IS_CLK_ON(x) ((x) & (FW_PS_RF_OFF | FW_PS_ALL_ON)) +#define FW_PS_IS_RF_ON(x) ((x) & (FW_PS_ALL_ON)) +#define FW_PS_IS_ACTIVE(x) ((x) & (FW_PS_ST_ACTIVE)) +#define FW_PS_IS_CPWM_INT(x) ((x) & 0x40) + +#define FW_CLR_PS_STATE(x) ((x) = ((x) & (0xF0))) + +#define IS_IN_LOW_POWER_STATE_8821AE(__state) \ + (FW_PS_STATE(__state) == FW_PS_CLOCK_OFF) + +#define FW_PWR_STATE_ACTIVE ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE)) +#define FW_PWR_STATE_RF_OFF 0 + +struct rtl8821a_firmware_header { + u16 signature; + u8 category; + u8 function; + u16 version; + u8 subversion; + u8 rsvd1; + u8 month; + u8 date; + u8 hour; + u8 minute; + u16 ramcodeSize; + u16 rsvd2; + u32 svnindex; + u32 rsvd3; + u32 rsvd4; + u32 rsvd5; +}; + +enum rtl8812_c2h_evt { + C2H_8812_DBG = 0, + C2H_8812_LB = 1, + C2H_8812_TXBF = 2, + C2H_8812_TX_REPORT = 3, + C2H_8812_BT_INFO = 9, + C2H_8812_BT_MP = 11, + C2H_8812_RA_RPT = 12, + + C2H_8812_FW_SWCHNL = 0x10, + C2H_8812_IQK_FINISH = 0x11, + MAX_8812_C2HEVENT +}; + +enum rtl8821a_h2c_cmd { + H2C_8821AE_RSVDPAGE = 0, + H2C_8821AE_MSRRPT = 1, + H2C_8821AE_SCAN = 2, + H2C_8821AE_KEEP_ALIVE_CTRL = 3, + H2C_8821AE_DISCONNECT_DECISION = 4, + H2C_8821AE_INIT_OFFLOAD = 6, + H2C_8821AE_AP_OFFLOAD = 8, + H2C_8821AE_BCN_RSVDPAGE = 9, + H2C_8821AE_PROBERSP_RSVDPAGE = 10, + + H2C_8821AE_SETPWRMODE = 0x20, + H2C_8821AE_PS_TUNING_PARA = 0x21, + H2C_8821AE_PS_TUNING_PARA2 = 0x22, + H2C_8821AE_PS_LPS_PARA = 0x23, + H2C_8821AE_P2P_PS_OFFLOAD = 024, + + H2C_8821AE_WO_WLAN = 0x80, + H2C_8821AE_REMOTE_WAKE_CTRL = 0x81, + H2C_8821AE_AOAC_GLOBAL_INFO = 0x82, + H2C_8821AE_AOAC_RSVDPAGE = 0x83, + + H2C_RSSI_21AE_REPORT = 0x42, + H2C_8821AE_RA_MASK = 0x40, + H2C_8821AE_SELECTIVE_SUSPEND_ROF_CMD, + H2C_8821AE_P2P_PS_MODE, + H2C_8821AE_PSD_RESULT, + /*Not defined CTW CMD for P2P yet*/ + H2C_8821AE_P2P_PS_CTW_CMD, + MAX_8821AE_H2CCMD +}; + +#define pagenum_128(_len) (u32)(((_len)>>7) + ((_len)&0x7F ? 1 : 0)) + +#define SET_8812_H2CCMD_WOWLAN_FUNC_ENABLE(__cmd, __value) \ + SET_BITS_TO_LE_1BYTE(__cmd, 0, 1, __value) +#define SET_8812_H2CCMD_WOWLAN_PATTERN_MATCH_ENABLE(__cmd, __value) \ + SET_BITS_TO_LE_1BYTE(__cmd, 1, 1, __value) +#define SET_8812_H2CCMD_WOWLAN_MAGIC_PKT_ENABLE(__cmd, __value) \ + SET_BITS_TO_LE_1BYTE(__cmd, 2, 1, __value) +#define SET_8812_H2CCMD_WOWLAN_UNICAST_PKT_ENABLE(__cmd, __value) \ + SET_BITS_TO_LE_1BYTE(__cmd, 3, 1, __value) +#define SET_8812_H2CCMD_WOWLAN_ALL_PKT_DROP(__cmd, __value) \ + SET_BITS_TO_LE_1BYTE(__cmd, 4, 1, __value) +#define SET_8812_H2CCMD_WOWLAN_GPIO_ACTIVE(__cmd, __value) \ + SET_BITS_TO_LE_1BYTE(__cmd, 5, 1, __value) +#define SET_8812_H2CCMD_WOWLAN_REKEY_WAKE_UP(__cmd, __value) \ + SET_BITS_TO_LE_1BYTE(__cmd, 6, 1, __value) +#define SET_8812_H2CCMD_WOWLAN_DISCONNECT_WAKE_UP(__cmd, __value) \ + SET_BITS_TO_LE_1BYTE(__cmd, 7, 1, __value) +#define SET_8812_H2CCMD_WOWLAN_GPIONUM(__cmd, __value) \ + SET_BITS_TO_LE_1BYTE((__cmd) + 1, 0, 8, __value) +#define SET_8812_H2CCMD_WOWLAN_GPIO_DURATION(__cmd, __value) \ + SET_BITS_TO_LE_1BYTE((__cmd) + 2, 0, 8, __value) + +#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val) +#define SET_H2CCMD_PWRMODE_PARM_RLBM(__cmd, __value) \ + SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 4, __value) +#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__cmd, __value) \ + SET_BITS_TO_LE_1BYTE((__cmd)+1, 4, 4, __value) +#define SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(__cmd, __value) \ + SET_BITS_TO_LE_1BYTE((__cmd)+2, 0, 8, __value) +#define SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(__cmd, __value) \ + SET_BITS_TO_LE_1BYTE((__cmd)+3, 0, 8, __value) +#define SET_H2CCMD_PWRMODE_PARM_PWR_STATE(__cmd, __value) \ + SET_BITS_TO_LE_1BYTE((__cmd)+4, 0, 8, __value) +#define GET_8821AE_H2CCMD_PWRMODE_PARM_MODE(__cmd) \ + LE_BITS_TO_1BYTE(__cmd, 0, 8) + +#define SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val) +#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val) +#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val) +#define SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE((__ph2ccmd)+3, 0, 8, __val) + +/* _MEDIA_STATUS_RPT_PARM_CMD1 */ +#define SET_H2CCMD_MSRRPT_PARM_OPMODE(__cmd, __value) \ + SET_BITS_TO_LE_1BYTE(__cmd, 0, 1, __value) +#define SET_H2CCMD_MSRRPT_PARM_MACID_IND(__cmd, __value) \ + SET_BITS_TO_LE_1BYTE(__cmd, 1, 1, __value) +#define SET_H2CCMD_MSRRPT_PARM_MACID(__cmd, __value) \ + SET_BITS_TO_LE_1BYTE(__cmd+1, 0, 8, __value) +#define SET_H2CCMD_MSRRPT_PARM_MACID_END(__cmd, __value) \ + SET_BITS_TO_LE_1BYTE(__cmd+2, 0, 8, __value) + +/* AP_OFFLOAD */ +#define SET_H2CCMD_AP_OFFLOAD_ON(__cmd, __value) \ + SET_BITS_TO_LE_1BYTE(__cmd, 0, 8, __value) +#define SET_H2CCMD_AP_OFFLOAD_HIDDEN(__cmd, __value) \ + SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 8, __value) +#define SET_H2CCMD_AP_OFFLOAD_DENYANY(__cmd, __value) \ + SET_BITS_TO_LE_1BYTE((__cmd)+2, 0, 8, __value) +#define SET_H2CCMD_AP_OFFLOAD_WAKEUP_EVT_RPT(__cmd, __value) \ + SET_BITS_TO_LE_1BYTE((__cmd)+3, 0, 8, __value) + +/* Keep Alive Control*/ +#define SET_8812_H2CCMD_KEEP_ALIVE_ENABLE(__cmd, __value) \ + SET_BITS_TO_LE_1BYTE(__cmd, 0, 1, __value) +#define SET_8812_H2CCMD_KEEP_ALIVE_ACCPEPT_USER_DEFINED(__cmd, __value) \ + SET_BITS_TO_LE_1BYTE(__cmd, 1, 1, __value) +#define SET_8812_H2CCMD_KEEP_ALIVE_PERIOD(__cmd, __value) \ + SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 8, __value) + +/*REMOTE_WAKE_CTRL */ +#define SET_8812_H2CCMD_REMOTE_WAKECTRL_ENABLE(__cmd, __value) \ + SET_BITS_TO_LE_1BYTE(__cmd, 0, 1, __value) +#define SET_8812_H2CCMD_REMOTE_WAKE_CTRL_ARP_OFFLOAD_EN(__cmd, __value)\ + SET_BITS_TO_LE_1BYTE(__cmd, 1, 1, __value) +#define SET_8812_H2CCMD_REMOTE_WAKE_CTRL_NDP_OFFLOAD_EN(__cmd, __value)\ + SET_BITS_TO_LE_1BYTE(__cmd, 2, 1, __value) +#define SET_8812_H2CCMD_REMOTE_WAKE_CTRL_GTK_OFFLOAD_EN(__cmd, __value)\ + SET_BITS_TO_LE_1BYTE(__cmd, 3, 1, __value) +#define SET_8812_H2CCMD_REMOTE_WAKE_CTRL_REALWOWV2_EN(__cmd, __value)\ + SET_BITS_TO_LE_1BYTE(__cmd, 6, 1, __value) + +/* GTK_OFFLOAD */ +#define SET_8812_H2CCMD_AOAC_GLOBAL_INFO_PAIRWISE_ENC_ALG(__cmd, __value)\ + SET_BITS_TO_LE_1BYTE(__cmd, 0, 8, __value) +#define SET_8812_H2CCMD_AOAC_GLOBAL_INFO_GROUP_ENC_ALG(__cmd, __value) \ + SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 8, __value) + +/* AOAC_RSVDPAGE_LOC */ +#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_REMOTE_WAKE_CTRL_INFO(__cmd, __value) \ + SET_BITS_TO_LE_1BYTE((__cmd), 0, 8, __value) +#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_ARP_RSP(__cmd, __value) \ + SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 8, __value) +#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_NEIGHBOR_ADV(__cmd, __value)\ + SET_BITS_TO_LE_1BYTE((__cmd)+2, 0, 8, __value) +#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_RSP(__cmd, __value) \ + SET_BITS_TO_LE_1BYTE((__cmd)+3, 0, 8, __value) +#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_INFO(__cmd, __value) \ + SET_BITS_TO_LE_1BYTE((__cmd)+4, 0, 8, __value) +#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_EXT_MEM(__cmd, __value) \ + SET_BITS_TO_LE_1BYTE((__cmd)+5, 0, 8, __value) + +/* Disconnect_Decision_Control */ +#define SET_8812_H2CCMD_DISCONNECT_DECISION_CTRL_ENABLE(__cmd, __value) \ + SET_BITS_TO_LE_1BYTE(__cmd, 0, 1, __value) +#define SET_8812_H2CCMD_DISCONNECT_DECISION_CTRL_USER_SETTING(__cmd, __value)\ + SET_BITS_TO_LE_1BYTE(__cmd, 1, 1, __value) +#define SET_8812_H2CCMD_DISCONNECT_DECISION_CTRL_CHECK_PERIOD(__cmd, __value)\ + SET_BITS_TO_LE_1BYTE((__cmd)+1, 0, 8, __value) /* unit: beacon period */ +#define SET_8812_H2CCMD_DISCONNECT_DECISION_CTRL_TRYPKT_NUM(__cmd, __value)\ + SET_BITS_TO_LE_1BYTE((__cmd)+2, 0, 8, __value) + +int rtl8821ae_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw); +#if (USE_SPECIFIC_FW_TO_SUPPORT_WOWLAN == 1) +void rtl8821ae_set_fw_related_for_wowlan(struct ieee80211_hw *hw, + bool used_wowlan_fw); + +#endif +void rtl8821ae_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id, + u32 cmd_len, u8 *cmdbuffer); +void rtl8821ae_firmware_selfreset(struct ieee80211_hw *hw); +void rtl8821ae_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode); +void rtl8821ae_set_fw_media_status_rpt_cmd(struct ieee80211_hw *hw, + u8 mstatus); +void rtl8821ae_set_fw_ap_off_load_cmd(struct ieee80211_hw *hw, + u8 ap_offload_enable); +void rtl8821ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, + bool b_dl_finished, bool dl_whole_packet); +void rtl8812ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, + bool b_dl_finished, bool dl_whole_packet); +void rtl8821ae_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, + u8 p2p_ps_state); +void rtl8821ae_set_fw_wowlan_mode(struct ieee80211_hw *hw, bool func_en); +void rtl8821ae_set_fw_remote_wake_ctrl_cmd(struct ieee80211_hw *hw, + u8 enable); +void rtl8821ae_set_fw_keep_alive_cmd(struct ieee80211_hw *hw, bool func_en); +void rtl8821ae_set_fw_disconnect_decision_ctrl_cmd(struct ieee80211_hw *hw, + bool enabled); +void rtl8821ae_set_fw_global_info_cmd(struct ieee80211_hw *hw); +void rtl8821ae_c2h_packet_handler(struct ieee80211_hw *hw, + u8 *buffer, u8 length); +#endif diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c new file mode 100644 index 0000000000000000000000000000000000000000..310d3163dc5b6a3f1e51a9a59ed999fe991a2f06 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c @@ -0,0 +1,4218 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "../wifi.h" +#include "../efuse.h" +#include "../base.h" +#include "../regd.h" +#include "../cam.h" +#include "../ps.h" +#include "../pci.h" +#include "reg.h" +#include "def.h" +#include "phy.h" +#include "dm.h" +#include "fw.h" +#include "led.h" +#include "hw.h" +#include "../pwrseqcmd.h" +#include "pwrseq.h" +#include "../btcoexist/rtl_btc.h" + +#define LLT_CONFIG 5 + +static void _rtl8821ae_return_beacon_queue_skb(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[BEACON_QUEUE]; + unsigned long flags; + + spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags); + while (skb_queue_len(&ring->queue)) { + struct rtl_tx_desc *entry = &ring->desc[ring->idx]; + struct sk_buff *skb = __skb_dequeue(&ring->queue); + + pci_unmap_single(rtlpci->pdev, + rtlpriv->cfg->ops->get_desc( + (u8 *)entry, true, HW_DESC_TXBUFF_ADDR), + skb->len, PCI_DMA_TODEVICE); + kfree_skb(skb); + ring->idx = (ring->idx + 1) % ring->entries; + } + spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); +} + +static void _rtl8821ae_set_bcn_ctrl_reg(struct ieee80211_hw *hw, + u8 set_bits, u8 clear_bits) +{ + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtlpci->reg_bcn_ctrl_val |= set_bits; + rtlpci->reg_bcn_ctrl_val &= ~clear_bits; + + rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8)rtlpci->reg_bcn_ctrl_val); +} + +void _rtl8821ae_stop_tx_beacon(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 tmp1byte; + + tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2); + rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte & (~BIT(6))); + rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0x64); + tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2); + tmp1byte &= ~(BIT(0)); + rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte); +} + +void _rtl8821ae_resume_tx_beacon(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 tmp1byte; + + tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2); + rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte | BIT(6)); + rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff); + tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2); + tmp1byte |= BIT(0); + rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte); +} + +static void _rtl8821ae_enable_bcn_sub_func(struct ieee80211_hw *hw) +{ + _rtl8821ae_set_bcn_ctrl_reg(hw, 0, BIT(1)); +} + +static void _rtl8821ae_disable_bcn_sub_func(struct ieee80211_hw *hw) +{ + _rtl8821ae_set_bcn_ctrl_reg(hw, BIT(1), 0); +} + +static void _rtl8821ae_set_fw_clock_on(struct ieee80211_hw *hw, + u8 rpwm_val, bool b_need_turn_off_ckk) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + bool b_support_remote_wake_up; + u32 count = 0, isr_regaddr, content; + bool b_schedule_timer = b_need_turn_off_ckk; + + rtlpriv->cfg->ops->get_hw_reg(hw, HAL_DEF_WOWLAN, + (u8 *)(&b_support_remote_wake_up)); + + if (!rtlhal->fw_ready) + return; + if (!rtlpriv->psc.fw_current_inpsmode) + return; + + while (1) { + spin_lock_bh(&rtlpriv->locks.fw_ps_lock); + if (rtlhal->fw_clk_change_in_progress) { + while (rtlhal->fw_clk_change_in_progress) { + spin_unlock_bh(&rtlpriv->locks.fw_ps_lock); + count++; + udelay(100); + if (count > 1000) + goto change_done; + spin_lock_bh(&rtlpriv->locks.fw_ps_lock); + } + spin_unlock_bh(&rtlpriv->locks.fw_ps_lock); + } else { + rtlhal->fw_clk_change_in_progress = false; + spin_unlock_bh(&rtlpriv->locks.fw_ps_lock); + goto change_done; + } + } +change_done: + if (IS_IN_LOW_POWER_STATE_8821AE(rtlhal->fw_ps_state)) { + rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_SET_RPWM, + (u8 *)(&rpwm_val)); + if (FW_PS_IS_ACK(rpwm_val)) { + isr_regaddr = REG_HISR; + content = rtl_read_dword(rtlpriv, isr_regaddr); + while (!(content & IMR_CPWM) && (count < 500)) { + udelay(50); + count++; + content = rtl_read_dword(rtlpriv, isr_regaddr); + } + + if (content & IMR_CPWM) { + rtl_write_word(rtlpriv, isr_regaddr, 0x0100); + rtlhal->fw_ps_state = FW_PS_STATE_RF_ON_8821AE; + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, + "Receive CPWM INT!!! Set rtlhal->FwPSState = %X\n", + rtlhal->fw_ps_state); + } + } + + spin_lock_bh(&rtlpriv->locks.fw_ps_lock); + rtlhal->fw_clk_change_in_progress = false; + spin_unlock_bh(&rtlpriv->locks.fw_ps_lock); + if (b_schedule_timer) + mod_timer(&rtlpriv->works.fw_clockoff_timer, + jiffies + MSECS(10)); + } else { + spin_lock_bh(&rtlpriv->locks.fw_ps_lock); + rtlhal->fw_clk_change_in_progress = false; + spin_unlock_bh(&rtlpriv->locks.fw_ps_lock); + } +} + +static void _rtl8821ae_set_fw_clock_off(struct ieee80211_hw *hw, + u8 rpwm_val) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl8192_tx_ring *ring; + enum rf_pwrstate rtstate; + bool b_schedule_timer = false; + u8 queue; + + if (!rtlhal->fw_ready) + return; + if (!rtlpriv->psc.fw_current_inpsmode) + return; + if (!rtlhal->allow_sw_to_change_hwclc) + return; + rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RF_STATE, (u8 *)(&rtstate)); + if (rtstate == ERFOFF || rtlpriv->psc.inactive_pwrstate == ERFOFF) + return; + + for (queue = 0; queue < RTL_PCI_MAX_TX_QUEUE_COUNT; queue++) { + ring = &rtlpci->tx_ring[queue]; + if (skb_queue_len(&ring->queue)) { + b_schedule_timer = true; + break; + } + } + + if (b_schedule_timer) { + mod_timer(&rtlpriv->works.fw_clockoff_timer, + jiffies + MSECS(10)); + return; + } + + if (FW_PS_STATE(rtlhal->fw_ps_state) != + FW_PS_STATE_RF_OFF_LOW_PWR_8821AE) { + spin_lock_bh(&rtlpriv->locks.fw_ps_lock); + if (!rtlhal->fw_clk_change_in_progress) { + rtlhal->fw_clk_change_in_progress = true; + spin_unlock_bh(&rtlpriv->locks.fw_ps_lock); + rtlhal->fw_ps_state = FW_PS_STATE(rpwm_val); + rtl_write_word(rtlpriv, REG_HISR, 0x0100); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, + (u8 *)(&rpwm_val)); + spin_lock_bh(&rtlpriv->locks.fw_ps_lock); + rtlhal->fw_clk_change_in_progress = false; + spin_unlock_bh(&rtlpriv->locks.fw_ps_lock); + } else { + spin_unlock_bh(&rtlpriv->locks.fw_ps_lock); + mod_timer(&rtlpriv->works.fw_clockoff_timer, + jiffies + MSECS(10)); + } + } +} + +static void _rtl8821ae_set_fw_ps_rf_on(struct ieee80211_hw *hw) +{ + u8 rpwm_val = 0; + + rpwm_val |= (FW_PS_STATE_RF_OFF_8821AE | FW_PS_ACK); + _rtl8821ae_set_fw_clock_on(hw, rpwm_val, true); +} + +static void _rtl8821ae_fwlps_leave(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + bool fw_current_inps = false; + u8 rpwm_val = 0, fw_pwrmode = FW_PS_ACTIVE_MODE; + + if (ppsc->low_power_enable) { + rpwm_val = (FW_PS_STATE_ALL_ON_8821AE|FW_PS_ACK);/* RF on */ + _rtl8821ae_set_fw_clock_on(hw, rpwm_val, false); + rtlhal->allow_sw_to_change_hwclc = false; + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE, + (u8 *)(&fw_pwrmode)); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS, + (u8 *)(&fw_current_inps)); + } else { + rpwm_val = FW_PS_STATE_ALL_ON_8821AE; /* RF on */ + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, + (u8 *)(&rpwm_val)); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE, + (u8 *)(&fw_pwrmode)); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS, + (u8 *)(&fw_current_inps)); + } +} + +static void _rtl8821ae_fwlps_enter(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + bool fw_current_inps = true; + u8 rpwm_val; + + if (ppsc->low_power_enable) { + rpwm_val = FW_PS_STATE_RF_OFF_LOW_PWR_8821AE; /* RF off */ + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_FW_PSMODE_STATUS, + (u8 *)(&fw_current_inps)); + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_H2C_FW_PWRMODE, + (u8 *)(&ppsc->fwctrl_psmode)); + rtlhal->allow_sw_to_change_hwclc = true; + _rtl8821ae_set_fw_clock_off(hw, rpwm_val); + } else { + rpwm_val = FW_PS_STATE_RF_OFF_8821AE; /* RF off */ + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_FW_PSMODE_STATUS, + (u8 *)(&fw_current_inps)); + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_H2C_FW_PWRMODE, + (u8 *)(&ppsc->fwctrl_psmode)); + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_SET_RPWM, + (u8 *)(&rpwm_val)); + } +} + +static void _rtl8821ae_download_rsvd_page(struct ieee80211_hw *hw, + bool dl_whole_packets) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + u8 tmp_regcr, tmp_reg422, bcnvalid_reg; + u8 count = 0, dlbcn_count = 0; + bool send_beacon = false; + + tmp_regcr = rtl_read_byte(rtlpriv, REG_CR + 1); + rtl_write_byte(rtlpriv, REG_CR + 1, (tmp_regcr | BIT(0))); + + _rtl8821ae_set_bcn_ctrl_reg(hw, 0, BIT(3)); + _rtl8821ae_set_bcn_ctrl_reg(hw, BIT(4), 0); + + tmp_reg422 = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2); + rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, + tmp_reg422 & (~BIT(6))); + if (tmp_reg422 & BIT(6)) + send_beacon = true; + + do { + bcnvalid_reg = rtl_read_byte(rtlpriv, REG_TDECTRL + 2); + rtl_write_byte(rtlpriv, REG_TDECTRL + 2, + (bcnvalid_reg | BIT(0))); + _rtl8821ae_return_beacon_queue_skb(hw); + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + rtl8812ae_set_fw_rsvdpagepkt(hw, false, + dl_whole_packets); + else + rtl8821ae_set_fw_rsvdpagepkt(hw, false, + dl_whole_packets); + + bcnvalid_reg = rtl_read_byte(rtlpriv, REG_TDECTRL + 2); + count = 0; + while (!(bcnvalid_reg & BIT(0)) && count < 20) { + count++; + udelay(10); + bcnvalid_reg = rtl_read_byte(rtlpriv, REG_TDECTRL + 2); + } + dlbcn_count++; + } while (!(bcnvalid_reg & BIT(0)) && dlbcn_count < 5); + + if (!(bcnvalid_reg & BIT(0))) + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Download RSVD page failed!\n"); + if (bcnvalid_reg & BIT(0) && rtlhal->enter_pnp_sleep) { + rtl_write_byte(rtlpriv, REG_TDECTRL + 2, bcnvalid_reg | BIT(0)); + _rtl8821ae_return_beacon_queue_skb(hw); + if (send_beacon) { + dlbcn_count = 0; + do { + rtl_write_byte(rtlpriv, REG_TDECTRL + 2, + bcnvalid_reg | BIT(0)); + + _rtl8821ae_return_beacon_queue_skb(hw); + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + rtl8812ae_set_fw_rsvdpagepkt(hw, true, + false); + else + rtl8821ae_set_fw_rsvdpagepkt(hw, true, + false); + + /* check rsvd page download OK. */ + bcnvalid_reg = rtl_read_byte(rtlpriv, + REG_TDECTRL + 2); + count = 0; + while (!(bcnvalid_reg & BIT(0)) && count < 20) { + count++; + udelay(10); + bcnvalid_reg = + rtl_read_byte(rtlpriv, + REG_TDECTRL + 2); + } + dlbcn_count++; + } while (!(bcnvalid_reg & BIT(0)) && dlbcn_count < 5); + + if (!(bcnvalid_reg & BIT(0))) + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "2 Download RSVD page failed!\n"); + } + } + + if (bcnvalid_reg & BIT(0)) + rtl_write_byte(rtlpriv, REG_TDECTRL + 2, BIT(0)); + + _rtl8821ae_set_bcn_ctrl_reg(hw, BIT(3), 0); + _rtl8821ae_set_bcn_ctrl_reg(hw, 0, BIT(4)); + + if (send_beacon) + rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp_reg422); + + if (!rtlhal->enter_pnp_sleep) { + tmp_regcr = rtl_read_byte(rtlpriv, REG_CR + 1); + rtl_write_byte(rtlpriv, REG_CR + 1, (tmp_regcr & ~(BIT(0)))); + } +} + +void rtl8821ae_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + + switch (variable) { + case HW_VAR_ETHER_ADDR: + *((u32 *)(val)) = rtl_read_dword(rtlpriv, REG_MACID); + *((u16 *)(val+4)) = rtl_read_word(rtlpriv, REG_MACID + 4); + break; + case HW_VAR_BSSID: + *((u32 *)(val)) = rtl_read_dword(rtlpriv, REG_BSSID); + *((u16 *)(val+4)) = rtl_read_word(rtlpriv, REG_BSSID+4); + break; + case HW_VAR_MEDIA_STATUS: + val[0] = rtl_read_byte(rtlpriv, REG_CR+2) & 0x3; + break; + case HW_VAR_SLOT_TIME: + *((u8 *)(val)) = mac->slot_time; + break; + case HW_VAR_BEACON_INTERVAL: + *((u16 *)(val)) = rtl_read_word(rtlpriv, REG_BCN_INTERVAL); + break; + case HW_VAR_ATIM_WINDOW: + *((u16 *)(val)) = rtl_read_word(rtlpriv, REG_ATIMWND); + break; + case HW_VAR_RCR: + *((u32 *)(val)) = rtlpci->receive_config; + break; + case HW_VAR_RF_STATE: + *((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state; + break; + case HW_VAR_FWLPS_RF_ON:{ + enum rf_pwrstate rfstate; + u32 val_rcr; + + rtlpriv->cfg->ops->get_hw_reg(hw, + HW_VAR_RF_STATE, + (u8 *)(&rfstate)); + if (rfstate == ERFOFF) { + *((bool *)(val)) = true; + } else { + val_rcr = rtl_read_dword(rtlpriv, REG_RCR); + val_rcr &= 0x00070000; + if (val_rcr) + *((bool *)(val)) = false; + else + *((bool *)(val)) = true; + } + break; } + case HW_VAR_FW_PSMODE_STATUS: + *((bool *)(val)) = ppsc->fw_current_inpsmode; + break; + case HW_VAR_CORRECT_TSF:{ + u64 tsf; + u32 *ptsf_low = (u32 *)&tsf; + u32 *ptsf_high = ((u32 *)&tsf) + 1; + + *ptsf_high = rtl_read_dword(rtlpriv, (REG_TSFTR + 4)); + *ptsf_low = rtl_read_dword(rtlpriv, REG_TSFTR); + + *((u64 *)(val)) = tsf; + + break; } + case HAL_DEF_WOWLAN: + if (ppsc->wo_wlan_mode) + *((bool *)(val)) = true; + else + *((bool *)(val)) = false; + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, + "switch case not process %x\n", variable); + break; + } +} + +void rtl8821ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 idx; + + switch (variable) { + case HW_VAR_ETHER_ADDR:{ + for (idx = 0; idx < ETH_ALEN; idx++) { + rtl_write_byte(rtlpriv, (REG_MACID + idx), + val[idx]); + } + break; + } + case HW_VAR_BASIC_RATE:{ + u16 b_rate_cfg = ((u16 *)val)[0]; + b_rate_cfg = b_rate_cfg & 0x15f; + rtl_write_word(rtlpriv, REG_RRSR, b_rate_cfg); + break; + } + case HW_VAR_BSSID:{ + for (idx = 0; idx < ETH_ALEN; idx++) { + rtl_write_byte(rtlpriv, (REG_BSSID + idx), + val[idx]); + } + break; + } + case HW_VAR_SIFS: + rtl_write_byte(rtlpriv, REG_SIFS_CTX + 1, val[0]); + rtl_write_byte(rtlpriv, REG_SIFS_TRX + 1, val[0]); + + rtl_write_byte(rtlpriv, REG_SPEC_SIFS + 1, val[0]); + rtl_write_byte(rtlpriv, REG_MAC_SPEC_SIFS + 1, val[0]); + + rtl_write_byte(rtlpriv, REG_RESP_SIFS_OFDM + 1, val[0]); + rtl_write_byte(rtlpriv, REG_RESP_SIFS_OFDM, val[0]); + break; + case HW_VAR_R2T_SIFS: + rtl_write_byte(rtlpriv, REG_RESP_SIFS_OFDM + 1, val[0]); + break; + case HW_VAR_SLOT_TIME:{ + u8 e_aci; + + RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, + "HW_VAR_SLOT_TIME %x\n", val[0]); + + rtl_write_byte(rtlpriv, REG_SLOT, val[0]); + + for (e_aci = 0; e_aci < AC_MAX; e_aci++) { + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_AC_PARAM, + (u8 *)(&e_aci)); + } + break; } + case HW_VAR_ACK_PREAMBLE:{ + u8 reg_tmp; + u8 short_preamble = (bool)(*(u8 *)val); + + reg_tmp = rtl_read_byte(rtlpriv, REG_TRXPTCL_CTL+2); + if (short_preamble) { + reg_tmp |= BIT(1); + rtl_write_byte(rtlpriv, REG_TRXPTCL_CTL + 2, + reg_tmp); + } else { + reg_tmp &= (~BIT(1)); + rtl_write_byte(rtlpriv, + REG_TRXPTCL_CTL + 2, + reg_tmp); + } + break; } + case HW_VAR_WPA_CONFIG: + rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *)val)); + break; + case HW_VAR_AMPDU_MIN_SPACE:{ + u8 min_spacing_to_set; + u8 sec_min_space; + + min_spacing_to_set = *((u8 *)val); + if (min_spacing_to_set <= 7) { + sec_min_space = 0; + + if (min_spacing_to_set < sec_min_space) + min_spacing_to_set = sec_min_space; + + mac->min_space_cfg = ((mac->min_space_cfg & + 0xf8) | + min_spacing_to_set); + + *val = min_spacing_to_set; + + RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, + "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n", + mac->min_space_cfg); + + rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, + mac->min_space_cfg); + } + break; } + case HW_VAR_SHORTGI_DENSITY:{ + u8 density_to_set; + + density_to_set = *((u8 *)val); + mac->min_space_cfg |= (density_to_set << 3); + + RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, + "Set HW_VAR_SHORTGI_DENSITY: %#x\n", + mac->min_space_cfg); + + rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, + mac->min_space_cfg); + + break; } + case HW_VAR_AMPDU_FACTOR:{ + u32 ampdu_len = (*((u8 *)val)); + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { + if (ampdu_len < VHT_AGG_SIZE_128K) + ampdu_len = + (0x2000 << (*((u8 *)val))) - 1; + else + ampdu_len = 0x1ffff; + } else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) { + if (ampdu_len < HT_AGG_SIZE_64K) + ampdu_len = + (0x2000 << (*((u8 *)val))) - 1; + else + ampdu_len = 0xffff; + } + ampdu_len |= BIT(31); + + rtl_write_dword(rtlpriv, + REG_AMPDU_MAX_LENGTH_8812, ampdu_len); + break; } + case HW_VAR_AC_PARAM:{ + u8 e_aci = *((u8 *)val); + + rtl8821ae_dm_init_edca_turbo(hw); + if (rtlpci->acm_method != EACMWAY2_SW) + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_ACM_CTRL, + (u8 *)(&e_aci)); + break; } + case HW_VAR_ACM_CTRL:{ + u8 e_aci = *((u8 *)val); + union aci_aifsn *p_aci_aifsn = + (union aci_aifsn *)(&mac->ac[0].aifs); + u8 acm = p_aci_aifsn->f.acm; + u8 acm_ctrl = rtl_read_byte(rtlpriv, REG_ACMHWCTRL); + + acm_ctrl = + acm_ctrl | ((rtlpci->acm_method == 2) ? 0x0 : 0x1); + + if (acm) { + switch (e_aci) { + case AC0_BE: + acm_ctrl |= ACMHW_BEQEN; + break; + case AC2_VI: + acm_ctrl |= ACMHW_VIQEN; + break; + case AC3_VO: + acm_ctrl |= ACMHW_VOQEN; + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n", + acm); + break; + } + } else { + switch (e_aci) { + case AC0_BE: + acm_ctrl &= (~ACMHW_BEQEN); + break; + case AC2_VI: + acm_ctrl &= (~ACMHW_VIQEN); + break; + case AC3_VO: + acm_ctrl &= (~ACMHW_BEQEN); + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, + "switch case not process\n"); + break; + } + } + + RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE, + "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n", + acm_ctrl); + rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl); + break; } + case HW_VAR_RCR: + rtl_write_dword(rtlpriv, REG_RCR, ((u32 *)(val))[0]); + rtlpci->receive_config = ((u32 *)(val))[0]; + break; + case HW_VAR_RETRY_LIMIT:{ + u8 retry_limit = ((u8 *)(val))[0]; + + rtl_write_word(rtlpriv, REG_RL, + retry_limit << RETRY_LIMIT_SHORT_SHIFT | + retry_limit << RETRY_LIMIT_LONG_SHIFT); + break; } + case HW_VAR_DUAL_TSF_RST: + rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, (BIT(0) | BIT(1))); + break; + case HW_VAR_EFUSE_BYTES: + rtlefuse->efuse_usedbytes = *((u16 *)val); + break; + case HW_VAR_EFUSE_USAGE: + rtlefuse->efuse_usedpercentage = *((u8 *)val); + break; + case HW_VAR_IO_CMD: + rtl8821ae_phy_set_io_cmd(hw, (*(enum io_type *)val)); + break; + case HW_VAR_SET_RPWM:{ + u8 rpwm_val; + + rpwm_val = rtl_read_byte(rtlpriv, REG_PCIE_HRPWM); + udelay(1); + + if (rpwm_val & BIT(7)) { + rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, + (*(u8 *)val)); + } else { + rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, + ((*(u8 *)val) | BIT(7))); + } + + break; } + case HW_VAR_H2C_FW_PWRMODE: + rtl8821ae_set_fw_pwrmode_cmd(hw, (*(u8 *)val)); + break; + case HW_VAR_FW_PSMODE_STATUS: + ppsc->fw_current_inpsmode = *((bool *)val); + break; + case HW_VAR_INIT_RTS_RATE: + break; + case HW_VAR_RESUME_CLK_ON: + _rtl8821ae_set_fw_ps_rf_on(hw); + break; + case HW_VAR_FW_LPS_ACTION:{ + bool b_enter_fwlps = *((bool *)val); + + if (b_enter_fwlps) + _rtl8821ae_fwlps_enter(hw); + else + _rtl8821ae_fwlps_leave(hw); + break; } + case HW_VAR_H2C_FW_JOINBSSRPT:{ + u8 mstatus = (*(u8 *)val); + + if (mstatus == RT_MEDIA_CONNECT) { + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AID, + NULL); + _rtl8821ae_download_rsvd_page(hw, false); + } + rtl8821ae_set_fw_media_status_rpt_cmd(hw, mstatus); + + break; } + case HW_VAR_H2C_FW_P2P_PS_OFFLOAD: + rtl8821ae_set_p2p_ps_offload_cmd(hw, (*(u8 *)val)); + break; + case HW_VAR_AID:{ + u16 u2btmp; + u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT); + u2btmp &= 0xC000; + rtl_write_word(rtlpriv, REG_BCN_PSR_RPT, (u2btmp | + mac->assoc_id)); + break; } + case HW_VAR_CORRECT_TSF:{ + u8 btype_ibss = ((u8 *)(val))[0]; + + if (btype_ibss) + _rtl8821ae_stop_tx_beacon(hw); + + _rtl8821ae_set_bcn_ctrl_reg(hw, 0, BIT(3)); + + rtl_write_dword(rtlpriv, REG_TSFTR, + (u32)(mac->tsf & 0xffffffff)); + rtl_write_dword(rtlpriv, REG_TSFTR + 4, + (u32)((mac->tsf >> 32) & 0xffffffff)); + + _rtl8821ae_set_bcn_ctrl_reg(hw, BIT(3), 0); + + if (btype_ibss) + _rtl8821ae_resume_tx_beacon(hw); + break; } + case HW_VAR_NAV_UPPER: { + u32 us_nav_upper = ((u32)*val); + + if (us_nav_upper > HAL_92C_NAV_UPPER_UNIT * 0xFF) { + RT_TRACE(rtlpriv, COMP_INIT , DBG_WARNING, + "The setting value (0x%08X us) of NAV_UPPER is larger than (%d * 0xFF)!!!\n", + us_nav_upper, HAL_92C_NAV_UPPER_UNIT); + break; + } + rtl_write_byte(rtlpriv, REG_NAV_UPPER, + ((u8)((us_nav_upper + + HAL_92C_NAV_UPPER_UNIT - 1) / + HAL_92C_NAV_UPPER_UNIT))); + break; } + case HW_VAR_KEEP_ALIVE: { + u8 array[2]; + array[0] = 0xff; + array[1] = *((u8 *)val); + rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_KEEP_ALIVE_CTRL, 2, + array); + break; } + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, + "switch case not process %x\n", variable); + break; + } +} + +static bool _rtl8821ae_llt_write(struct ieee80211_hw *hw, u32 address, u32 data) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + bool status = true; + long count = 0; + u32 value = _LLT_INIT_ADDR(address) | _LLT_INIT_DATA(data) | + _LLT_OP(_LLT_WRITE_ACCESS); + + rtl_write_dword(rtlpriv, REG_LLT_INIT, value); + + do { + value = rtl_read_dword(rtlpriv, REG_LLT_INIT); + if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value)) + break; + + if (count > POLLING_LLT_THRESHOLD) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "Failed to polling write LLT done at address %d!\n", + address); + status = false; + break; + } + } while (++count); + + return status; +} + +static bool _rtl8821ae_llt_table_init(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + unsigned short i; + u8 txpktbuf_bndy; + u32 rqpn; + u8 maxpage; + bool status; + + maxpage = 255; + txpktbuf_bndy = 0xF8; + rqpn = 0x80e70808; + if (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8812AE) { + txpktbuf_bndy = 0xFA; + rqpn = 0x80e90808; + } + + rtl_write_byte(rtlpriv, REG_TRXFF_BNDY, txpktbuf_bndy); + rtl_write_word(rtlpriv, REG_TRXFF_BNDY + 2, MAX_RX_DMA_BUFFER_SIZE - 1); + + rtl_write_byte(rtlpriv, REG_TDECTRL + 1, txpktbuf_bndy); + + rtl_write_byte(rtlpriv, REG_TXPKTBUF_BCNQ_BDNY, txpktbuf_bndy); + rtl_write_byte(rtlpriv, REG_TXPKTBUF_MGQ_BDNY, txpktbuf_bndy); + + rtl_write_byte(rtlpriv, REG_PBP, 0x31); + rtl_write_byte(rtlpriv, REG_RX_DRVINFO_SZ, 0x4); + + for (i = 0; i < (txpktbuf_bndy - 1); i++) { + status = _rtl8821ae_llt_write(hw, i, i + 1); + if (!status) + return status; + } + + status = _rtl8821ae_llt_write(hw, (txpktbuf_bndy - 1), 0xFF); + if (!status) + return status; + + for (i = txpktbuf_bndy; i < maxpage; i++) { + status = _rtl8821ae_llt_write(hw, i, (i + 1)); + if (!status) + return status; + } + + status = _rtl8821ae_llt_write(hw, maxpage, txpktbuf_bndy); + if (!status) + return status; + + rtl_write_dword(rtlpriv, REG_RQPN, rqpn); + + rtl_write_byte(rtlpriv, REG_RQPN_NPQ, 0x00); + + return true; +} + +static void _rtl8821ae_gen_refresh_led_state(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_led *pled0 = &pcipriv->ledctl.sw_led0; + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + + if (rtlpriv->rtlhal.up_first_time) + return; + + if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS) + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + rtl8812ae_sw_led_on(hw, pled0); + else + rtl8821ae_sw_led_on(hw, pled0); + else if (ppsc->rfoff_reason == RF_CHANGE_BY_INIT) + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + rtl8812ae_sw_led_on(hw, pled0); + else + rtl8821ae_sw_led_on(hw, pled0); + else + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + rtl8812ae_sw_led_off(hw, pled0); + else + rtl8821ae_sw_led_off(hw, pled0); +} + +static bool _rtl8821ae_init_mac(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + + u8 bytetmp = 0; + u16 wordtmp = 0; + bool mac_func_enable = rtlhal->mac_func_enable; + + rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x00); + + /*Auto Power Down to CHIP-off State*/ + bytetmp = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1) & (~BIT(7)); + rtl_write_byte(rtlpriv, REG_APS_FSMCO + 1, bytetmp); + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { + /* HW Power on sequence*/ + if (!rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, + PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, + RTL8812_NIC_ENABLE_FLOW)) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "init 8812 MAC Fail as power on failure\n"); + return false; + } + } else { + /* HW Power on sequence */ + if (!rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_A_MSK, + PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, + RTL8821A_NIC_ENABLE_FLOW)){ + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "init 8821 MAC Fail as power on failure\n"); + return false; + } + } + + bytetmp = rtl_read_byte(rtlpriv, REG_APS_FSMCO) | BIT(4); + rtl_write_byte(rtlpriv, REG_APS_FSMCO, bytetmp); + + bytetmp = rtl_read_byte(rtlpriv, REG_CR); + bytetmp = 0xff; + rtl_write_byte(rtlpriv, REG_CR, bytetmp); + mdelay(2); + + bytetmp = 0xff; + rtl_write_byte(rtlpriv, REG_HWSEQ_CTRL, bytetmp); + mdelay(2); + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) { + bytetmp = rtl_read_byte(rtlpriv, REG_SYS_CFG + 3); + if (bytetmp & BIT(0)) { + bytetmp = rtl_read_byte(rtlpriv, 0x7c); + bytetmp |= BIT(6); + rtl_write_byte(rtlpriv, 0x7c, bytetmp); + } + } + + bytetmp = rtl_read_byte(rtlpriv, REG_GPIO_MUXCFG + 1); + bytetmp &= ~BIT(4); + rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG + 1, bytetmp); + + rtl_write_word(rtlpriv, REG_CR, 0x2ff); + + if (!mac_func_enable) { + if (!_rtl8821ae_llt_table_init(hw)) + return false; + } + + rtl_write_dword(rtlpriv, REG_HISR, 0xffffffff); + rtl_write_dword(rtlpriv, REG_HISRE, 0xffffffff); + + /* Enable FW Beamformer Interrupt */ + bytetmp = rtl_read_byte(rtlpriv, REG_FWIMR + 3); + rtl_write_byte(rtlpriv, REG_FWIMR + 3, bytetmp | BIT(6)); + + wordtmp = rtl_read_word(rtlpriv, REG_TRXDMA_CTRL); + wordtmp &= 0xf; + wordtmp |= 0xF5B1; + rtl_write_word(rtlpriv, REG_TRXDMA_CTRL, wordtmp); + + rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 1, 0x1F); + rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config); + rtl_write_word(rtlpriv, REG_RXFLTMAP2, 0xFFFF); + /*low address*/ + rtl_write_dword(rtlpriv, REG_BCNQ_DESA, + rtlpci->tx_ring[BEACON_QUEUE].dma & DMA_BIT_MASK(32)); + rtl_write_dword(rtlpriv, REG_MGQ_DESA, + rtlpci->tx_ring[MGNT_QUEUE].dma & DMA_BIT_MASK(32)); + rtl_write_dword(rtlpriv, REG_VOQ_DESA, + rtlpci->tx_ring[VO_QUEUE].dma & DMA_BIT_MASK(32)); + rtl_write_dword(rtlpriv, REG_VIQ_DESA, + rtlpci->tx_ring[VI_QUEUE].dma & DMA_BIT_MASK(32)); + rtl_write_dword(rtlpriv, REG_BEQ_DESA, + rtlpci->tx_ring[BE_QUEUE].dma & DMA_BIT_MASK(32)); + rtl_write_dword(rtlpriv, REG_BKQ_DESA, + rtlpci->tx_ring[BK_QUEUE].dma & DMA_BIT_MASK(32)); + rtl_write_dword(rtlpriv, REG_HQ_DESA, + rtlpci->tx_ring[HIGH_QUEUE].dma & DMA_BIT_MASK(32)); + rtl_write_dword(rtlpriv, REG_RX_DESA, + rtlpci->rx_ring[RX_MPDU_QUEUE].dma & DMA_BIT_MASK(32)); + + rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 3, 0x77); + + rtl_write_dword(rtlpriv, REG_INT_MIG, 0); + + rtl_write_dword(rtlpriv, REG_MCUTST_1, 0); + + rtl_write_byte(rtlpriv, REG_SECONDARY_CCA_CTRL, 0x3); + _rtl8821ae_gen_refresh_led_state(hw); + + return true; +} + +static void _rtl8821ae_hw_configure(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + u32 reg_rrsr; + + reg_rrsr = RATE_ALL_CCK | RATE_ALL_OFDM_AG; + + rtl_write_dword(rtlpriv, REG_RRSR, reg_rrsr); + /* ARFB table 9 for 11ac 5G 2SS */ + rtl_write_dword(rtlpriv, REG_ARFR0 + 4, 0xfffff000); + /* ARFB table 10 for 11ac 5G 1SS */ + rtl_write_dword(rtlpriv, REG_ARFR1 + 4, 0x003ff000); + /* ARFB table 11 for 11ac 24G 1SS */ + rtl_write_dword(rtlpriv, REG_ARFR2, 0x00000015); + rtl_write_dword(rtlpriv, REG_ARFR2 + 4, 0x003ff000); + /* ARFB table 12 for 11ac 24G 1SS */ + rtl_write_dword(rtlpriv, REG_ARFR3, 0x00000015); + rtl_write_dword(rtlpriv, REG_ARFR3 + 4, 0xffcff000); + /* 0x420[7] = 0 , enable retry AMPDU in new AMPD not singal MPDU. */ + rtl_write_word(rtlpriv, REG_FWHW_TXQ_CTRL, 0x1F00); + rtl_write_byte(rtlpriv, REG_AMPDU_MAX_TIME, 0x70); + + /*Set retry limit*/ + rtl_write_word(rtlpriv, REG_RL, 0x0707); + + /* Set Data / Response auto rate fallack retry count*/ + rtl_write_dword(rtlpriv, REG_DARFRC, 0x01000000); + rtl_write_dword(rtlpriv, REG_DARFRC + 4, 0x07060504); + rtl_write_dword(rtlpriv, REG_RARFRC, 0x01000000); + rtl_write_dword(rtlpriv, REG_RARFRC + 4, 0x07060504); + + rtlpci->reg_bcn_ctrl_val = 0x1d; + rtl_write_byte(rtlpriv, REG_BCN_CTRL, rtlpci->reg_bcn_ctrl_val); + + /* TBTT prohibit hold time. Suggested by designer TimChen. */ + rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff); + + /* AGGR_BK_TIME Reg51A 0x16 */ + rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0040); + + /*For Rx TP. Suggested by SD1 Richard. Added by tynli. 2010.04.12.*/ + rtl_write_dword(rtlpriv, REG_FAST_EDCA_CTRL, 0x03086666); + + rtl_write_byte(rtlpriv, REG_HT_SINGLE_AMPDU, 0x80); + rtl_write_byte(rtlpriv, REG_RX_PKT_LIMIT, 0x20); + rtl_write_word(rtlpriv, REG_MAX_AGGR_NUM, 0x1F1F); +} + +static u16 _rtl8821ae_mdio_read(struct rtl_priv *rtlpriv, u8 addr) +{ + u16 ret = 0; + u8 tmp = 0, count = 0; + + rtl_write_byte(rtlpriv, REG_MDIO_CTL, addr | BIT(6)); + tmp = rtl_read_byte(rtlpriv, REG_MDIO_CTL) & BIT(6); + count = 0; + while (tmp && count < 20) { + udelay(10); + tmp = rtl_read_byte(rtlpriv, REG_MDIO_CTL) & BIT(6); + count++; + } + if (0 == tmp) + ret = rtl_read_word(rtlpriv, REG_MDIO_RDATA); + + return ret; +} + +static void _rtl8821ae_mdio_write(struct rtl_priv *rtlpriv, u8 addr, u16 data) +{ + u8 tmp = 0, count = 0; + + rtl_write_word(rtlpriv, REG_MDIO_WDATA, data); + rtl_write_byte(rtlpriv, REG_MDIO_CTL, addr | BIT(5)); + tmp = rtl_read_byte(rtlpriv, REG_MDIO_CTL) & BIT(5); + count = 0; + while (tmp && count < 20) { + udelay(10); + tmp = rtl_read_byte(rtlpriv, REG_MDIO_CTL) & BIT(5); + count++; + } +} + +static u8 _rtl8821ae_dbi_read(struct rtl_priv *rtlpriv, u16 addr) +{ + u16 read_addr = addr & 0xfffc; + u8 tmp = 0, count = 0, ret = 0; + + rtl_write_word(rtlpriv, REG_DBI_ADDR, read_addr); + rtl_write_byte(rtlpriv, REG_DBI_FLAG, 0x2); + tmp = rtl_read_byte(rtlpriv, REG_DBI_FLAG); + count = 0; + while (tmp && count < 20) { + udelay(10); + tmp = rtl_read_byte(rtlpriv, REG_DBI_FLAG); + count++; + } + if (0 == tmp) { + read_addr = REG_DBI_RDATA + addr % 4; + ret = rtl_read_word(rtlpriv, read_addr); + } + return ret; +} + +static void _rtl8821ae_dbi_write(struct rtl_priv *rtlpriv, u16 addr, u8 data) +{ + u8 tmp = 0, count = 0; + u16 wrtie_addr, remainder = addr % 4; + + wrtie_addr = REG_DBI_WDATA + remainder; + rtl_write_byte(rtlpriv, wrtie_addr, data); + + wrtie_addr = (addr & 0xfffc) | (BIT(0) << (remainder + 12)); + rtl_write_word(rtlpriv, REG_DBI_ADDR, wrtie_addr); + + rtl_write_byte(rtlpriv, REG_DBI_FLAG, 0x1); + + tmp = rtl_read_byte(rtlpriv, REG_DBI_FLAG); + count = 0; + while (tmp && count < 20) { + udelay(10); + tmp = rtl_read_byte(rtlpriv, REG_DBI_FLAG); + count++; + } +} + +static void _rtl8821ae_enable_aspm_back_door(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 tmp; + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) { + if (_rtl8821ae_mdio_read(rtlpriv, 0x04) != 0x8544) + _rtl8821ae_mdio_write(rtlpriv, 0x04, 0x8544); + + if (_rtl8821ae_mdio_read(rtlpriv, 0x0b) != 0x0070) + _rtl8821ae_mdio_write(rtlpriv, 0x0b, 0x0070); + } + + tmp = _rtl8821ae_dbi_read(rtlpriv, 0x70f); + _rtl8821ae_dbi_write(rtlpriv, 0x70f, tmp | BIT(7)); + + tmp = _rtl8821ae_dbi_read(rtlpriv, 0x719); + _rtl8821ae_dbi_write(rtlpriv, 0x719, tmp | BIT(3) | BIT(4)); + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { + tmp = _rtl8821ae_dbi_read(rtlpriv, 0x718); + _rtl8821ae_dbi_write(rtlpriv, 0x718, tmp|BIT(4)); + } +} + +void rtl8821ae_enable_hw_security_config(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 sec_reg_value; + u8 tmp; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n", + rtlpriv->sec.pairwise_enc_algorithm, + rtlpriv->sec.group_enc_algorithm); + + if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) { + RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, + "not open hw encryption\n"); + return; + } + + sec_reg_value = SCR_TXENCENABLE | SCR_RXDECENABLE; + + if (rtlpriv->sec.use_defaultkey) { + sec_reg_value |= SCR_TXUSEDK; + sec_reg_value |= SCR_RXUSEDK; + } + + sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK); + + tmp = rtl_read_byte(rtlpriv, REG_CR + 1); + rtl_write_byte(rtlpriv, REG_CR + 1, tmp | BIT(1)); + + RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, + "The SECR-value %x\n", sec_reg_value); + + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value); +} + +/* Static MacID Mapping (cf. Used in MacIdDoStaticMapping) ---------- */ +#define MAC_ID_STATIC_FOR_DEFAULT_PORT 0 +#define MAC_ID_STATIC_FOR_BROADCAST_MULTICAST 1 +#define MAC_ID_STATIC_FOR_BT_CLIENT_START 2 +#define MAC_ID_STATIC_FOR_BT_CLIENT_END 3 +/* ----------------------------------------------------------- */ + +static void rtl8821ae_macid_initialize_mediastatus(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 media_rpt[4] = {RT_MEDIA_CONNECT, 1, + MAC_ID_STATIC_FOR_BROADCAST_MULTICAST, + MAC_ID_STATIC_FOR_BT_CLIENT_END}; + + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_H2C_FW_MEDIASTATUSRPT, media_rpt); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Initialize MacId media status: from %d to %d\n", + MAC_ID_STATIC_FOR_BROADCAST_MULTICAST, + MAC_ID_STATIC_FOR_BT_CLIENT_END); +} + +static bool _rtl8821ae_check_pcie_dma_hang(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 tmp; + + /* write reg 0x350 Bit[26]=1. Enable debug port. */ + tmp = rtl_read_byte(rtlpriv, REG_DBI_CTRL + 3); + if (!(tmp & BIT(2))) { + rtl_write_byte(rtlpriv, REG_DBI_CTRL + 3, (tmp | BIT(2))); + mdelay(100); + } + + /* read reg 0x350 Bit[25] if 1 : RX hang */ + /* read reg 0x350 Bit[24] if 1 : TX hang */ + tmp = rtl_read_byte(rtlpriv, REG_DBI_CTRL + 3); + if ((tmp & BIT(0)) || (tmp & BIT(1))) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "CheckPcieDMAHang8821AE(): true! Reset PCIE DMA!\n"); + return true; + } else { + return false; + } +} + +static bool _rtl8821ae_reset_pcie_interface_dma(struct ieee80211_hw *hw, + bool mac_power_on, + bool in_watchdog) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 tmp; + bool release_mac_rx_pause; + u8 backup_pcie_dma_pause; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n"); + + /* 1. Disable register write lock. 0x1c[1] = 0 */ + tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL); + tmp &= ~(BIT(1)); + rtl_write_byte(rtlpriv, REG_RSV_CTRL, tmp); + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) { + /* write 0xCC bit[2] = 1'b1 */ + tmp = rtl_read_byte(rtlpriv, REG_PMC_DBG_CTRL2); + tmp |= BIT(2); + rtl_write_byte(rtlpriv, REG_PMC_DBG_CTRL2, tmp); + } + + /* 2. Check and pause TRX DMA */ + /* write 0x284 bit[18] = 1'b1 */ + /* write 0x301 = 0xFF */ + tmp = rtl_read_byte(rtlpriv, REG_RXDMA_CONTROL); + if (tmp & BIT(2)) { + /* Already pause before the function for another purpose. */ + release_mac_rx_pause = false; + } else { + rtl_write_byte(rtlpriv, REG_RXDMA_CONTROL, (tmp | BIT(2))); + release_mac_rx_pause = true; + } + backup_pcie_dma_pause = rtl_read_byte(rtlpriv, REG_PCIE_CTRL_REG + 1); + if (backup_pcie_dma_pause != 0xFF) + rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 1, 0xFF); + + if (mac_power_on) { + /* 3. reset TRX function */ + /* write 0x100 = 0x00 */ + rtl_write_byte(rtlpriv, REG_CR, 0); + } + + /* 4. Reset PCIe DMA. 0x3[0] = 0 */ + tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); + tmp &= ~(BIT(0)); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, tmp); + + /* 5. Enable PCIe DMA. 0x3[0] = 1 */ + tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); + tmp |= BIT(0); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, tmp); + + if (mac_power_on) { + /* 6. enable TRX function */ + /* write 0x100 = 0xFF */ + rtl_write_byte(rtlpriv, REG_CR, 0xFF); + + /* We should init LLT & RQPN and + * prepare Tx/Rx descrptor address later + * because MAC function is reset.*/ + } + + /* 7. Restore PCIe autoload down bit */ + /* 8812AE does not has the defination. */ + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) { + /* write 0xF8 bit[17] = 1'b1 */ + tmp = rtl_read_byte(rtlpriv, REG_MAC_PHY_CTRL_NORMAL + 2); + tmp |= BIT(1); + rtl_write_byte(rtlpriv, REG_MAC_PHY_CTRL_NORMAL + 2, tmp); + } + + /* In MAC power on state, BB and RF maybe in ON state, + * if we release TRx DMA here. + * it will cause packets to be started to Tx/Rx, + * so we release Tx/Rx DMA later.*/ + if (!mac_power_on/* || in_watchdog*/) { + /* 8. release TRX DMA */ + /* write 0x284 bit[18] = 1'b0 */ + /* write 0x301 = 0x00 */ + if (release_mac_rx_pause) { + tmp = rtl_read_byte(rtlpriv, REG_RXDMA_CONTROL); + rtl_write_byte(rtlpriv, REG_RXDMA_CONTROL, + tmp & (~BIT(2))); + } + rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 1, + backup_pcie_dma_pause); + } + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) { + /* 9. lock system register */ + /* write 0xCC bit[2] = 1'b0 */ + tmp = rtl_read_byte(rtlpriv, REG_PMC_DBG_CTRL2); + tmp &= ~(BIT(2)); + rtl_write_byte(rtlpriv, REG_PMC_DBG_CTRL2, tmp); + } + return true; +} + +static void _rtl8821ae_get_wakeup_reason(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv); + u8 fw_reason = 0; + struct timeval ts; + + fw_reason = rtl_read_byte(rtlpriv, REG_MCUTST_WOWLAN); + + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "WOL Read 0x1c7 = %02X\n", + fw_reason); + + ppsc->wakeup_reason = 0; + + rtlhal->last_suspend_sec = ts.tv_sec; + + switch (fw_reason) { + case FW_WOW_V2_PTK_UPDATE_EVENT: + ppsc->wakeup_reason = WOL_REASON_PTK_UPDATE; + do_gettimeofday(&ts); + ppsc->last_wakeup_time = ts.tv_sec*1000 + ts.tv_usec/1000; + RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, + "It's a WOL PTK Key update event!\n"); + break; + case FW_WOW_V2_GTK_UPDATE_EVENT: + ppsc->wakeup_reason = WOL_REASON_GTK_UPDATE; + do_gettimeofday(&ts); + ppsc->last_wakeup_time = ts.tv_sec*1000 + ts.tv_usec/1000; + RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, + "It's a WOL GTK Key update event!\n"); + break; + case FW_WOW_V2_DISASSOC_EVENT: + ppsc->wakeup_reason = WOL_REASON_DISASSOC; + RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, + "It's a disassociation event!\n"); + break; + case FW_WOW_V2_DEAUTH_EVENT: + ppsc->wakeup_reason = WOL_REASON_DEAUTH; + RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, + "It's a deauth event!\n"); + break; + case FW_WOW_V2_FW_DISCONNECT_EVENT: + ppsc->wakeup_reason = WOL_REASON_AP_LOST; + RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, + "It's a Fw disconnect decision (AP lost) event!\n"); + break; + case FW_WOW_V2_MAGIC_PKT_EVENT: + ppsc->wakeup_reason = WOL_REASON_MAGIC_PKT; + RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, + "It's a magic packet event!\n"); + break; + case FW_WOW_V2_UNICAST_PKT_EVENT: + ppsc->wakeup_reason = WOL_REASON_UNICAST_PKT; + RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, + "It's an unicast packet event!\n"); + break; + case FW_WOW_V2_PATTERN_PKT_EVENT: + ppsc->wakeup_reason = WOL_REASON_PATTERN_PKT; + RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, + "It's a pattern match event!\n"); + break; + case FW_WOW_V2_RTD3_SSID_MATCH_EVENT: + ppsc->wakeup_reason = WOL_REASON_RTD3_SSID_MATCH; + RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, + "It's an RTD3 Ssid match event!\n"); + break; + case FW_WOW_V2_REALWOW_V2_WAKEUPPKT: + ppsc->wakeup_reason = WOL_REASON_REALWOW_V2_WAKEUPPKT; + RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, + "It's an RealWoW wake packet event!\n"); + break; + case FW_WOW_V2_REALWOW_V2_ACKLOST: + ppsc->wakeup_reason = WOL_REASON_REALWOW_V2_ACKLOST; + RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, + "It's an RealWoW ack lost event!\n"); + break; + default: + RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, + "WOL Read 0x1c7 = %02X, Unknown reason!\n", + fw_reason); + break; + } +} + +static void _rtl8821ae_init_trx_desc_hw_address(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + + /*low address*/ + rtl_write_dword(rtlpriv, REG_BCNQ_DESA, + rtlpci->tx_ring[BEACON_QUEUE].dma & DMA_BIT_MASK(32)); + rtl_write_dword(rtlpriv, REG_MGQ_DESA, + rtlpci->tx_ring[MGNT_QUEUE].dma & DMA_BIT_MASK(32)); + rtl_write_dword(rtlpriv, REG_VOQ_DESA, + rtlpci->tx_ring[VO_QUEUE].dma & DMA_BIT_MASK(32)); + rtl_write_dword(rtlpriv, REG_VIQ_DESA, + rtlpci->tx_ring[VI_QUEUE].dma & DMA_BIT_MASK(32)); + rtl_write_dword(rtlpriv, REG_BEQ_DESA, + rtlpci->tx_ring[BE_QUEUE].dma & DMA_BIT_MASK(32)); + rtl_write_dword(rtlpriv, REG_BKQ_DESA, + rtlpci->tx_ring[BK_QUEUE].dma & DMA_BIT_MASK(32)); + rtl_write_dword(rtlpriv, REG_HQ_DESA, + rtlpci->tx_ring[HIGH_QUEUE].dma & DMA_BIT_MASK(32)); + rtl_write_dword(rtlpriv, REG_RX_DESA, + rtlpci->rx_ring[RX_MPDU_QUEUE].dma & DMA_BIT_MASK(32)); +} + +static bool _rtl8821ae_init_llt_table(struct ieee80211_hw *hw, u32 boundary) +{ + bool status = true; + u32 i; + u32 txpktbuf_bndy = boundary; + u32 last_entry_of_txpktbuf = LAST_ENTRY_OF_TX_PKT_BUFFER; + + for (i = 0 ; i < (txpktbuf_bndy - 1) ; i++) { + status = _rtl8821ae_llt_write(hw, i , i + 1); + if (!status) + return status; + } + + status = _rtl8821ae_llt_write(hw, (txpktbuf_bndy - 1), 0xFF); + if (!status) + return status; + + for (i = txpktbuf_bndy ; i < last_entry_of_txpktbuf ; i++) { + status = _rtl8821ae_llt_write(hw, i, (i + 1)); + if (!status) + return status; + } + + status = _rtl8821ae_llt_write(hw, last_entry_of_txpktbuf, + txpktbuf_bndy); + if (!status) + return status; + + return status; +} + +static bool _rtl8821ae_dynamic_rqpn(struct ieee80211_hw *hw, u32 boundary, + u16 npq_rqpn_value, u32 rqpn_val) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 tmp; + bool ret = true; + u16 count = 0, tmp16; + bool support_remote_wakeup; + + rtlpriv->cfg->ops->get_hw_reg(hw, HAL_DEF_WOWLAN, + (u8 *)(&support_remote_wakeup)); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "boundary=0x%#X, NPQ_RQPNValue=0x%#X, RQPNValue=0x%#X\n", + boundary, npq_rqpn_value, rqpn_val); + + /* stop PCIe DMA + * 1. 0x301[7:0] = 0xFE */ + rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 1, 0xFE); + + /* wait TXFF empty + * 2. polling till 0x41A[15:0]=0x07FF */ + tmp16 = rtl_read_word(rtlpriv, REG_TXPKT_EMPTY); + while ((tmp16 & 0x07FF) != 0x07FF) { + udelay(100); + tmp16 = rtl_read_word(rtlpriv, REG_TXPKT_EMPTY); + count++; + if ((count % 200) == 0) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Tx queue is not empty for 20ms!\n"); + } + if (count >= 1000) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Wait for Tx FIFO empty timeout!\n"); + break; + } + } + + /* TX pause + * 3. reg 0x522=0xFF */ + rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF); + + /* Wait TX State Machine OK + * 4. polling till reg 0x5FB~0x5F8 = 0x00000000 for 50ms */ + count = 0; + while (rtl_read_byte(rtlpriv, REG_SCH_TXCMD) != 0) { + udelay(100); + count++; + if (count >= 500) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Wait for TX State Machine ready timeout !!\n"); + break; + } + } + + /* stop RX DMA path + * 5. 0x284[18] = 1 + * 6. wait till 0x284[17] == 1 + * wait RX DMA idle */ + count = 0; + tmp = rtl_read_byte(rtlpriv, REG_RXDMA_CONTROL); + rtl_write_byte(rtlpriv, REG_RXDMA_CONTROL, (tmp | BIT(2))); + do { + tmp = rtl_read_byte(rtlpriv, REG_RXDMA_CONTROL); + udelay(10); + count++; + } while (!(tmp & BIT(1)) && count < 100); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Wait until Rx DMA Idle. count=%d REG[0x286]=0x%x\n", + count, tmp); + + /* reset BB + * 7. 0x02 [0] = 0 */ + tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN); + tmp &= ~(BIT(0)); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, tmp); + + /* Reset TRX MAC + * 8. 0x100 = 0x00 + * Delay (1ms) */ + rtl_write_byte(rtlpriv, REG_CR, 0x00); + udelay(1000); + + /* Disable MAC Security Engine + * 9. 0x100 bit[9]=0 */ + tmp = rtl_read_byte(rtlpriv, REG_CR + 1); + tmp &= ~(BIT(1)); + rtl_write_byte(rtlpriv, REG_CR + 1, tmp); + + /* To avoid DD-Tim Circuit hang + * 10. 0x553 bit[5]=1 */ + tmp = rtl_read_byte(rtlpriv, REG_DUAL_TSF_RST); + rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, (tmp | BIT(5))); + + /* Enable MAC Security Engine + * 11. 0x100 bit[9]=1 */ + tmp = rtl_read_byte(rtlpriv, REG_CR + 1); + rtl_write_byte(rtlpriv, REG_CR + 1, (tmp | BIT(1))); + + /* Enable TRX MAC + * 12. 0x100 = 0xFF + * Delay (1ms) */ + rtl_write_byte(rtlpriv, REG_CR, 0xFF); + udelay(1000); + + /* Enable BB + * 13. 0x02 [0] = 1 */ + tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, (tmp | BIT(0))); + + /* beacon setting + * 14,15. set beacon head page (reg 0x209 and 0x424) */ + rtl_write_byte(rtlpriv, REG_TDECTRL + 1, (u8)boundary); + rtl_write_byte(rtlpriv, REG_TXPKTBUF_BCNQ_BDNY, (u8)boundary); + rtl_write_byte(rtlpriv, REG_TXPKTBUF_MGQ_BDNY, (u8)boundary); + + /* 16. WMAC_LBK_BF_HD 0x45D[7:0] + * WMAC_LBK_BF_HD */ + rtl_write_byte(rtlpriv, REG_TXPKTBUF_WMAC_LBK_BF_HD, + (u8)boundary); + + rtl_write_word(rtlpriv, REG_TRXFF_BNDY, boundary); + + /* init LLT + * 17. init LLT */ + if (!_rtl8821ae_init_llt_table(hw, boundary)) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_WARNING, + "Failed to init LLT table!\n"); + return false; + } + + /* reallocate RQPN + * 18. reallocate RQPN and init LLT */ + rtl_write_word(rtlpriv, REG_RQPN_NPQ, npq_rqpn_value); + rtl_write_dword(rtlpriv, REG_RQPN, rqpn_val); + + /* release Tx pause + * 19. 0x522=0x00 */ + rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00); + + /* enable PCIE DMA + * 20. 0x301[7:0] = 0x00 + * 21. 0x284[18] = 0 */ + rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 1, 0x00); + tmp = rtl_read_byte(rtlpriv, REG_RXDMA_CONTROL); + rtl_write_byte(rtlpriv, REG_RXDMA_CONTROL, (tmp&~BIT(2))); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "End.\n"); + return ret; +} + +static void _rtl8821ae_simple_initialize_adapter(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv); + +#if (USE_SPECIFIC_FW_TO_SUPPORT_WOWLAN == 1) + /* Re-download normal Fw. */ + rtl8821ae_set_fw_related_for_wowlan(hw, false); +#endif + + /* Re-Initialize LLT table. */ + if (rtlhal->re_init_llt_table) { + u32 rqpn = 0x80e70808; + u8 rqpn_npq = 0, boundary = 0xF8; + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { + rqpn = 0x80e90808; + boundary = 0xFA; + } + if (_rtl8821ae_dynamic_rqpn(hw, boundary, rqpn_npq, rqpn)) + rtlhal->re_init_llt_table = false; + } + + ppsc->rfpwr_state = ERFON; +} + +static void _rtl8821ae_enable_l1off(struct ieee80211_hw *hw) +{ + u8 tmp = 0; + struct rtl_priv *rtlpriv = rtl_priv(hw); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "--->\n"); + + tmp = _rtl8821ae_dbi_read(rtlpriv, 0x160); + if (!(tmp & (BIT(2) | BIT(3)))) { + RT_TRACE(rtlpriv, COMP_POWER | COMP_INIT, DBG_LOUD, + "0x160(%#x)return!!\n", tmp); + return; + } + + tmp = _rtl8821ae_mdio_read(rtlpriv, 0x1b); + _rtl8821ae_mdio_write(rtlpriv, 0x1b, (tmp | BIT(4))); + + tmp = _rtl8821ae_dbi_read(rtlpriv, 0x718); + _rtl8821ae_dbi_write(rtlpriv, 0x718, tmp | BIT(5)); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "<---\n"); +} + +static void _rtl8821ae_enable_ltr(struct ieee80211_hw *hw) +{ + u8 tmp = 0; + struct rtl_priv *rtlpriv = rtl_priv(hw); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "--->\n"); + + /* Check 0x98[10] */ + tmp = _rtl8821ae_dbi_read(rtlpriv, 0x99); + if (!(tmp & BIT(2))) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "<---0x99(%#x) return!!\n", tmp); + return; + } + + /* LTR idle latency, 0x90 for 144us */ + rtl_write_dword(rtlpriv, 0x798, 0x88908890); + + /* LTR active latency, 0x3c for 60us */ + rtl_write_dword(rtlpriv, 0x79c, 0x883c883c); + + tmp = rtl_read_byte(rtlpriv, 0x7a4); + rtl_write_byte(rtlpriv, 0x7a4, (tmp | BIT(4))); + + tmp = rtl_read_byte(rtlpriv, 0x7a4); + rtl_write_byte(rtlpriv, 0x7a4, (tmp & (~BIT(0)))); + rtl_write_byte(rtlpriv, 0x7a4, (tmp | BIT(0))); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "<---\n"); +} + +static bool _rtl8821ae_wowlan_initialize_adapter(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + bool init_finished = true; + u8 tmp = 0; + + /* Get Fw wake up reason. */ + _rtl8821ae_get_wakeup_reason(hw); + + /* Patch Pcie Rx DMA hang after S3/S4 several times. + * The root cause has not be found. */ + if (_rtl8821ae_check_pcie_dma_hang(hw)) + _rtl8821ae_reset_pcie_interface_dma(hw, true, false); + + /* Prepare Tx/Rx Desc Hw address. */ + _rtl8821ae_init_trx_desc_hw_address(hw); + + /* Release Pcie Interface Rx DMA to allow wake packet DMA. */ + rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 1, 0xFE); + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Enable PCIE Rx DMA.\n"); + + /* Check wake up event. + * We should check wake packet bit before disable wowlan by H2C or + * Fw will clear the bit. */ + tmp = rtl_read_byte(rtlpriv, REG_FTISR + 3); + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, + "Read REG_FTISR 0x13f = %#X\n", tmp); + + /* Set the WoWLAN related function control disable. */ + rtl8821ae_set_fw_wowlan_mode(hw, false); + rtl8821ae_set_fw_remote_wake_ctrl_cmd(hw, 0); + + if (rtlhal->hw_rof_enable) { + tmp = rtl_read_byte(rtlpriv, REG_HSISR + 3); + if (tmp & BIT(1)) { + /* Clear GPIO9 ISR */ + rtl_write_byte(rtlpriv, REG_HSISR + 3, tmp | BIT(1)); + init_finished = false; + } else { + init_finished = true; + } + } + + if (init_finished) { + _rtl8821ae_simple_initialize_adapter(hw); + + /* Release Pcie Interface Tx DMA. */ + rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 1, 0x00); + /* Release Pcie RX DMA */ + rtl_write_byte(rtlpriv, REG_RXDMA_CONTROL, 0x02); + + tmp = rtl_read_byte(rtlpriv, REG_CR + 1); + rtl_write_byte(rtlpriv, REG_CR + 1, (tmp & (~BIT(0)))); + + _rtl8821ae_enable_l1off(hw); + _rtl8821ae_enable_ltr(hw); + } + + return init_finished; +} + +static void _rtl8812ae_bb8812_config_1t(struct ieee80211_hw *hw) +{ + /* BB OFDM RX Path_A */ + rtl_set_bbreg(hw, 0x808, 0xff, 0x11); + /* BB OFDM TX Path_A */ + rtl_set_bbreg(hw, 0x80c, MASKLWORD, 0x1111); + /* BB CCK R/Rx Path_A */ + rtl_set_bbreg(hw, 0xa04, 0x0c000000, 0x0); + /* MCS support */ + rtl_set_bbreg(hw, 0x8bc, 0xc0000060, 0x4); + /* RF Path_B HSSI OFF */ + rtl_set_bbreg(hw, 0xe00, 0xf, 0x4); + /* RF Path_B Power Down */ + rtl_set_bbreg(hw, 0xe90, MASKDWORD, 0); + /* ADDA Path_B OFF */ + rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0); + rtl_set_bbreg(hw, 0xe64, MASKDWORD, 0); +} + +static void _rtl8821ae_poweroff_adapter(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 u1b_tmp; + + rtlhal->mac_func_enable = false; + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) { + /* Combo (PCIe + USB) Card and PCIe-MF Card */ + /* 1. Run LPS WL RFOFF flow */ + /* RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "=====>CardDisableRTL8812E,RTL8821A_NIC_LPS_ENTER_FLOW\n"); + */ + rtl_hal_pwrseqcmdparsing(rtlpriv, + PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, + PWR_INTF_PCI_MSK, RTL8821A_NIC_LPS_ENTER_FLOW); + } + /* 2. 0x1F[7:0] = 0 */ + /* turn off RF */ + /* rtl_write_byte(rtlpriv, REG_RF_CTRL, 0x00); */ + if ((rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) && + rtlhal->fw_ready) { + rtl8821ae_firmware_selfreset(hw); + } + + /* Reset MCU. Suggested by Filen. */ + u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN+1); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN+1, (u1b_tmp & (~BIT(2)))); + + /* g. MCUFWDL 0x80[1:0]=0 */ + /* reset MCU ready status */ + rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00); + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) { + /* HW card disable configuration. */ + rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, + PWR_INTF_PCI_MSK, RTL8821A_NIC_DISABLE_FLOW); + } else { + /* HW card disable configuration. */ + rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, + PWR_INTF_PCI_MSK, RTL8812_NIC_DISABLE_FLOW); + } + + /* Reset MCU IO Wrapper */ + u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1); + rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, (u1b_tmp & (~BIT(0)))); + u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1); + rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, u1b_tmp | BIT(0)); + + /* 7. RSV_CTRL 0x1C[7:0] = 0x0E */ + /* lock ISO/CLK/Power control register */ + rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0e); +} + +int rtl8821ae_hw_init(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + bool rtstatus = true; + int err; + u8 tmp_u1b; + bool support_remote_wakeup; + u32 nav_upper = WIFI_NAV_UPPER_US; + + rtlhal->being_init_adapter = true; + rtlpriv->cfg->ops->get_hw_reg(hw, HAL_DEF_WOWLAN, + (u8 *)(&support_remote_wakeup)); + rtlpriv->intf_ops->disable_aspm(hw); + + /*YP wowlan not considered*/ + + tmp_u1b = rtl_read_byte(rtlpriv, REG_CR); + if (tmp_u1b != 0 && tmp_u1b != 0xEA) { + rtlhal->mac_func_enable = true; + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "MAC has already power on.\n"); + } else { + rtlhal->mac_func_enable = false; + rtlhal->fw_ps_state = FW_PS_STATE_ALL_ON_8821AE; + } + + if (support_remote_wakeup && + rtlhal->wake_from_pnp_sleep && + rtlhal->mac_func_enable) { + if (_rtl8821ae_wowlan_initialize_adapter(hw)) { + rtlhal->being_init_adapter = false; + return 0; + } + } + + if (_rtl8821ae_check_pcie_dma_hang(hw)) { + _rtl8821ae_reset_pcie_interface_dma(hw, + rtlhal->mac_func_enable, + false); + rtlhal->mac_func_enable = false; + } + + /* Reset MAC/BB/RF status if it is not powered off + * before calling initialize Hw flow to prevent + * from interface and MAC status mismatch. + * 2013.06.21, by tynli. Suggested by SD1 JackieLau. */ + if (rtlhal->mac_func_enable) { + _rtl8821ae_poweroff_adapter(hw); + rtlhal->mac_func_enable = false; + } + + rtstatus = _rtl8821ae_init_mac(hw); + if (rtstatus != true) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n"); + err = 1; + return err; + } + + tmp_u1b = rtl_read_byte(rtlpriv, REG_SYS_CFG); + tmp_u1b &= 0x7F; + rtl_write_byte(rtlpriv, REG_SYS_CFG, tmp_u1b); + + err = rtl8821ae_download_fw(hw, false); + if (err) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + "Failed to download FW. Init HW without FW now\n"); + err = 1; + rtlhal->fw_ready = false; + return err; + } else { + rtlhal->fw_ready = true; + } + ppsc->fw_current_inpsmode = false; + rtlhal->fw_ps_state = FW_PS_STATE_ALL_ON_8821AE; + rtlhal->fw_clk_change_in_progress = false; + rtlhal->allow_sw_to_change_hwclc = false; + rtlhal->last_hmeboxnum = 0; + + /*SIC_Init(Adapter); + if(rtlhal->AMPDUBurstMode) + rtl_write_byte(rtlpriv,REG_AMPDU_BURST_MODE_8812, 0x7F);*/ + + rtl8821ae_phy_mac_config(hw); + /* because last function modify RCR, so we update + * rcr var here, or TP will unstable for receive_config + * is wrong, RX RCR_ACRC32 will cause TP unstabel & Rx + * RCR_APP_ICV will cause mac80211 unassoc for cisco 1252 + rtlpci->receive_config = rtl_read_dword(rtlpriv, REG_RCR); + rtlpci->receive_config &= ~(RCR_ACRC32 | RCR_AICV); + rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);*/ + rtl8821ae_phy_bb_config(hw); + + rtl8821ae_phy_rf_config(hw); + + if (rtlpriv->phy.rf_type == RF_1T1R && + rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + _rtl8812ae_bb8812_config_1t(hw); + + _rtl8821ae_hw_configure(hw); + + rtl8821ae_phy_switch_wirelessband(hw, BAND_ON_2_4G); + + /*set wireless mode*/ + + rtlhal->mac_func_enable = true; + + rtl_cam_reset_all_entry(hw); + + rtl8821ae_enable_hw_security_config(hw); + + ppsc->rfpwr_state = ERFON; + + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, mac->mac_addr); + _rtl8821ae_enable_aspm_back_door(hw); + rtlpriv->intf_ops->enable_aspm(hw); + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE && + (rtlhal->rfe_type == 1 || rtlhal->rfe_type == 5)) + rtl_set_bbreg(hw, 0x900, 0x00000303, 0x0302); + + rtl8821ae_bt_hw_init(hw); + rtlpriv->rtlhal.being_init_adapter = false; + + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_NAV_UPPER, (u8 *)&nav_upper); + + /* rtl8821ae_dm_check_txpower_tracking(hw); */ + /* rtl8821ae_phy_lc_calibrate(hw); */ + if (support_remote_wakeup) + rtl_write_byte(rtlpriv, REG_WOW_CTRL, 0); + + /* Release Rx DMA*/ + tmp_u1b = rtl_read_byte(rtlpriv, REG_RXDMA_CONTROL); + if (tmp_u1b & BIT(2)) { + /* Release Rx DMA if needed*/ + tmp_u1b &= ~BIT(2); + rtl_write_byte(rtlpriv, REG_RXDMA_CONTROL, tmp_u1b); + } + + /* Release Tx/Rx PCIE DMA if*/ + rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 1, 0); + + rtl8821ae_dm_init(hw); + rtl8821ae_macid_initialize_mediastatus(hw); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "rtl8821ae_hw_init() <====\n"); + return err; +} + +static enum version_8821ae _rtl8821ae_read_chip_version(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + enum version_8821ae version = VERSION_UNKNOWN; + u32 value32; + + value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG); + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "ReadChipVersion8812A 0xF0 = 0x%x\n", value32); + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + rtlphy->rf_type = RF_2T2R; + else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) + rtlphy->rf_type = RF_1T1R; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "RF_Type is %x!!\n", rtlphy->rf_type); + + if (value32 & TRP_VAUX_EN) { + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { + if (rtlphy->rf_type == RF_2T2R) + version = VERSION_TEST_CHIP_2T2R_8812; + else + version = VERSION_TEST_CHIP_1T1R_8812; + } else + version = VERSION_TEST_CHIP_8821; + } else { + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { + u32 rtl_id = ((value32 & CHIP_VER_RTL_MASK) >> 12) + 1; + + if (rtlphy->rf_type == RF_2T2R) + version = + (enum version_8821ae)(CHIP_8812 + | NORMAL_CHIP | + RF_TYPE_2T2R); + else + version = (enum version_8821ae)(CHIP_8812 + | NORMAL_CHIP); + + version = (enum version_8821ae)(version | (rtl_id << 12)); + } else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) { + u32 rtl_id = value32 & CHIP_VER_RTL_MASK; + + version = (enum version_8821ae)(CHIP_8821 + | NORMAL_CHIP | rtl_id); + } + } + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) { + /*WL_HWROF_EN.*/ + value32 = rtl_read_dword(rtlpriv, REG_MULTI_FUNC_CTRL); + rtlhal->hw_rof_enable = ((value32 & WL_HWROF_EN) ? 1 : 0); + } + + switch (version) { + case VERSION_TEST_CHIP_1T1R_8812: + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Chip Version ID: VERSION_TEST_CHIP_1T1R_8812\n"); + break; + case VERSION_TEST_CHIP_2T2R_8812: + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Chip Version ID: VERSION_TEST_CHIP_2T2R_8812\n"); + break; + case VERSION_NORMAL_TSMC_CHIP_1T1R_8812: + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Chip Version ID:VERSION_NORMAL_TSMC_CHIP_1T1R_8812\n"); + break; + case VERSION_NORMAL_TSMC_CHIP_2T2R_8812: + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Chip Version ID: VERSION_NORMAL_TSMC_CHIP_2T2R_8812\n"); + break; + case VERSION_NORMAL_TSMC_CHIP_1T1R_8812_C_CUT: + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Chip Version ID: VERSION_NORMAL_TSMC_CHIP_1T1R_8812 C CUT\n"); + break; + case VERSION_NORMAL_TSMC_CHIP_2T2R_8812_C_CUT: + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Chip Version ID: VERSION_NORMAL_TSMC_CHIP_2T2R_8812 C CUT\n"); + break; + case VERSION_TEST_CHIP_8821: + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Chip Version ID: VERSION_TEST_CHIP_8821\n"); + break; + case VERSION_NORMAL_TSMC_CHIP_8821: + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Chip Version ID: VERSION_NORMAL_TSMC_CHIP_8821 A CUT\n"); + break; + case VERSION_NORMAL_TSMC_CHIP_8821_B_CUT: + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Chip Version ID: VERSION_NORMAL_TSMC_CHIP_8821 B CUT\n"); + break; + default: + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Chip Version ID: Unknow (0x%X)\n", version); + break; + } + + return version; +} + +static int _rtl8821ae_set_media_status(struct ieee80211_hw *hw, + enum nl80211_iftype type) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 bt_msr = rtl_read_byte(rtlpriv, MSR); + enum led_ctl_mode ledaction = LED_CTL_NO_LINK; + bt_msr &= 0xfc; + + rtl_write_dword(rtlpriv, REG_BCN_CTRL, 0); + RT_TRACE(rtlpriv, COMP_BEACON, DBG_LOUD, + "clear 0x550 when set HW_VAR_MEDIA_STATUS\n"); + + if (type == NL80211_IFTYPE_UNSPECIFIED || + type == NL80211_IFTYPE_STATION) { + _rtl8821ae_stop_tx_beacon(hw); + _rtl8821ae_enable_bcn_sub_func(hw); + } else if (type == NL80211_IFTYPE_ADHOC || + type == NL80211_IFTYPE_AP) { + _rtl8821ae_resume_tx_beacon(hw); + _rtl8821ae_disable_bcn_sub_func(hw); + } else { + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + "Set HW_VAR_MEDIA_STATUS: No such media status(%x).\n", + type); + } + + switch (type) { + case NL80211_IFTYPE_UNSPECIFIED: + bt_msr |= MSR_NOLINK; + ledaction = LED_CTL_LINK; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "Set Network type to NO LINK!\n"); + break; + case NL80211_IFTYPE_ADHOC: + bt_msr |= MSR_ADHOC; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "Set Network type to Ad Hoc!\n"); + break; + case NL80211_IFTYPE_STATION: + bt_msr |= MSR_INFRA; + ledaction = LED_CTL_LINK; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "Set Network type to STA!\n"); + break; + case NL80211_IFTYPE_AP: + bt_msr |= MSR_AP; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "Set Network type to AP!\n"); + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "Network type %d not support!\n", type); + return 1; + } + + rtl_write_byte(rtlpriv, (MSR), bt_msr); + rtlpriv->cfg->ops->led_control(hw, ledaction); + if ((bt_msr & 0xfc) == MSR_AP) + rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00); + else + rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66); + + return 0; +} + +void rtl8821ae_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + u32 reg_rcr = rtlpci->receive_config; + + if (rtlpriv->psc.rfpwr_state != ERFON) + return; + + if (check_bssid) { + reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, + (u8 *)(®_rcr)); + _rtl8821ae_set_bcn_ctrl_reg(hw, 0, BIT(4)); + } else if (!check_bssid) { + reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN)); + _rtl8821ae_set_bcn_ctrl_reg(hw, BIT(4), 0); + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_RCR, (u8 *)(®_rcr)); + } +} + +int rtl8821ae_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "rtl8821ae_set_network_type!\n"); + + if (_rtl8821ae_set_media_status(hw, type)) + return -EOPNOTSUPP; + + if (rtlpriv->mac80211.link_state == MAC80211_LINKED) { + if (type != NL80211_IFTYPE_AP) + rtl8821ae_set_check_bssid(hw, true); + } else { + rtl8821ae_set_check_bssid(hw, false); + } + + return 0; +} + +/* don't set REG_EDCA_BE_PARAM here because mac80211 will send pkt when scan */ +void rtl8821ae_set_qos(struct ieee80211_hw *hw, int aci) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + rtl8821ae_dm_init_edca_turbo(hw); + switch (aci) { + case AC1_BK: + rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0xa44f); + break; + case AC0_BE: + /* rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, u4b_ac_param); */ + break; + case AC2_VI: + rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, 0x5e4322); + break; + case AC3_VO: + rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222); + break; + default: + RT_ASSERT(false, "invalid aci: %d !\n", aci); + break; + } +} + +static void rtl8821ae_clear_interrupt(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 tmp; + tmp = rtl_read_dword(rtlpriv, REG_HISR); + /*printk("clear interrupt first:\n"); + printk("0x%x = 0x%08x\n",REG_HISR, tmp);*/ + rtl_write_dword(rtlpriv, REG_HISR, tmp); + + tmp = rtl_read_dword(rtlpriv, REG_HISRE); + /*printk("0x%x = 0x%08x\n",REG_HISRE, tmp);*/ + rtl_write_dword(rtlpriv, REG_HISRE, tmp); + + tmp = rtl_read_dword(rtlpriv, REG_HSISR); + /*printk("0x%x = 0x%08x\n",REG_HSISR, tmp);*/ + rtl_write_dword(rtlpriv, REG_HSISR, tmp); +} + +void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + + rtl8821ae_clear_interrupt(hw);/*clear it here first*/ + + rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF); + rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF); + rtlpci->irq_enabled = true; + /* there are some C2H CMDs have been sent before + system interrupt is enabled, e.g., C2H, CPWM. + *So we need to clear all C2H events that FW has + notified, otherwise FW won't schedule any commands anymore. + */ + /* rtl_write_byte(rtlpriv, REG_C2HEVT_CLEAR, 0); */ + /*enable system interrupt*/ + rtl_write_dword(rtlpriv, REG_HSIMR, rtlpci->sys_irq_mask & 0xFFFFFFFF); +} + +void rtl8821ae_disable_interrupt(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + + rtl_write_dword(rtlpriv, REG_HIMR, IMR_DISABLED); + rtl_write_dword(rtlpriv, REG_HIMRE, IMR_DISABLED); + rtlpci->irq_enabled = false; + /*synchronize_irq(rtlpci->pdev->irq);*/ +} + +static void _rtl8821ae_clear_pci_pme_status(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + u16 cap_hdr; + u8 cap_pointer; + u8 cap_id = 0xff; + u8 pmcs_reg; + u8 cnt = 0; + + /* Get the Capability pointer first, + * the Capability Pointer is located at + * offset 0x34 from the Function Header */ + + pci_read_config_byte(rtlpci->pdev, 0x34, &cap_pointer); + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "PCI configration 0x34 = 0x%2x\n", cap_pointer); + + do { + pci_read_config_word(rtlpci->pdev, cap_pointer, &cap_hdr); + cap_id = cap_hdr & 0xFF; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "in pci configration, cap_pointer%x = %x\n", + cap_pointer, cap_id); + + if (cap_id == 0x01) { + break; + } else { + /* point to next Capability */ + cap_pointer = (cap_hdr >> 8) & 0xFF; + /* 0: end of pci capability, 0xff: invalid value */ + if (cap_pointer == 0x00 || cap_pointer == 0xff) { + cap_id = 0xff; + break; + } + } + } while (cnt++ < 200); + + if (cap_id == 0x01) { + /* Get the PM CSR (Control/Status Register), + * The PME_Status is located at PM Capatibility offset 5, bit 7 + */ + pci_read_config_byte(rtlpci->pdev, cap_pointer + 5, &pmcs_reg); + + if (pmcs_reg & BIT(7)) { + /* PME event occured, clear the PM_Status by write 1 */ + pmcs_reg = pmcs_reg | BIT(7); + + pci_write_config_byte(rtlpci->pdev, cap_pointer + 5, + pmcs_reg); + /* Read it back to check */ + pci_read_config_byte(rtlpci->pdev, cap_pointer + 5, + &pmcs_reg); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "Clear PME status 0x%2x to 0x%2x\n", + cap_pointer + 5, pmcs_reg); + } else { + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "PME status(0x%2x) = 0x%2x\n", + cap_pointer + 5, pmcs_reg); + } + } else { + RT_TRACE(rtlpriv, COMP_INIT, DBG_WARNING, + "Cannot find PME Capability\n"); + } +} + +void rtl8821ae_card_disable(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv); + struct rtl_mac *mac = rtl_mac(rtlpriv); + enum nl80211_iftype opmode; + bool support_remote_wakeup; + u8 tmp; + u32 count = 0; + + rtlpriv->cfg->ops->get_hw_reg(hw, HAL_DEF_WOWLAN, + (u8 *)(&support_remote_wakeup)); + + RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); + + if (!(support_remote_wakeup && mac->opmode == NL80211_IFTYPE_STATION) + || !rtlhal->enter_pnp_sleep) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Normal Power off\n"); + mac->link_state = MAC80211_NOLINK; + opmode = NL80211_IFTYPE_UNSPECIFIED; + _rtl8821ae_set_media_status(hw, opmode); + _rtl8821ae_poweroff_adapter(hw); + } else { + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Wowlan Supported.\n"); + /* 3 <1> Prepare for configuring wowlan related infomations */ + /* Clear Fw WoWLAN event. */ + rtl_write_byte(rtlpriv, REG_MCUTST_WOWLAN, 0x0); + +#if (USE_SPECIFIC_FW_TO_SUPPORT_WOWLAN == 1) + rtl8821ae_set_fw_related_for_wowlan(hw, true); +#endif + /* Dynamically adjust Tx packet boundary + * for download reserved page packet. + * reserve 30 pages for rsvd page */ + if (_rtl8821ae_dynamic_rqpn(hw, 0xE0, 0x3, 0x80c20d0d)) + rtlhal->re_init_llt_table = true; + + /* 3 <2> Set Fw releted H2C cmd. */ + + /* Set WoWLAN related security information. */ + rtl8821ae_set_fw_global_info_cmd(hw); + + _rtl8821ae_download_rsvd_page(hw, true); + + /* Just enable AOAC related functions when we connect to AP. */ + printk("mac->link_state = %d\n", mac->link_state); + if (mac->link_state >= MAC80211_LINKED && + mac->opmode == NL80211_IFTYPE_STATION) { + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AID, NULL); + rtl8821ae_set_fw_media_status_rpt_cmd(hw, + RT_MEDIA_CONNECT); + + rtl8821ae_set_fw_wowlan_mode(hw, true); + /* Enable Fw Keep alive mechanism. */ + rtl8821ae_set_fw_keep_alive_cmd(hw, true); + + /* Enable disconnect decision control. */ + rtl8821ae_set_fw_disconnect_decision_ctrl_cmd(hw, true); + } + + /* 3 <3> Hw Configutations */ + + /* Wait untill Rx DMA Finished before host sleep. + * FW Pause Rx DMA may happens when received packet doing dma. + */ + rtl_write_byte(rtlpriv, REG_RXDMA_CONTROL, BIT(2)); + + tmp = rtl_read_byte(rtlpriv, REG_RXDMA_CONTROL); + count = 0; + while (!(tmp & BIT(1)) && (count++ < 100)) { + udelay(10); + tmp = rtl_read_byte(rtlpriv, REG_RXDMA_CONTROL); + } + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Wait Rx DMA Finished before host sleep. count=%d\n", + count); + + /* reset trx ring */ + rtlpriv->intf_ops->reset_trx_ring(hw); + + rtl_write_byte(rtlpriv, REG_APS_FSMCO + 1, 0x0); + + _rtl8821ae_clear_pci_pme_status(hw); + tmp = rtl_read_byte(rtlpriv, REG_SYS_CLKR); + rtl_write_byte(rtlpriv, REG_SYS_CLKR, tmp | BIT(3)); + /* prevent 8051 to be reset by PERST */ + rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x20); + rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x60); + } + + if (rtlpriv->rtlhal.driver_is_goingto_unload || + ppsc->rfoff_reason > RF_CHANGE_BY_PS) + rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF); + /* For wowlan+LPS+32k. */ + if (support_remote_wakeup && rtlhal->enter_pnp_sleep) { + /* Set the WoWLAN related function control enable. + * It should be the last H2C cmd in the WoWLAN flow. */ + rtl8821ae_set_fw_remote_wake_ctrl_cmd(hw, 1); + + /* Stop Pcie Interface Tx DMA. */ + rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 1, 0xff); + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Stop PCIE Tx DMA.\n"); + + /* Wait for TxDMA idle. */ + count = 0; + do { + tmp = rtl_read_byte(rtlpriv, REG_PCIE_CTRL_REG); + udelay(10); + count++; + } while ((tmp != 0) && (count < 100)); + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Wait Tx DMA Finished before host sleep. count=%d\n", + count); + + if (rtlhal->hw_rof_enable) { + printk("hw_rof_enable\n"); + tmp = rtl_read_byte(rtlpriv, REG_HSISR + 3); + rtl_write_byte(rtlpriv, REG_HSISR + 3, tmp | BIT(1)); + } + } + /* after power off we should do iqk again */ + rtlpriv->phy.iqk_initialized = false; +} + +void rtl8821ae_interrupt_recognized(struct ieee80211_hw *hw, + u32 *p_inta, u32 *p_intb) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + + *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0]; + rtl_write_dword(rtlpriv, ISR, *p_inta); + + *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1]; + rtl_write_dword(rtlpriv, REG_HISRE, *p_intb); +} + +void rtl8821ae_set_beacon_related_registers(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + u16 bcn_interval, atim_window; + + bcn_interval = mac->beacon_interval; + atim_window = 2; /*FIX MERGE */ + rtl8821ae_disable_interrupt(hw); + rtl_write_word(rtlpriv, REG_ATIMWND, atim_window); + rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval); + rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660f); + rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_CCK, 0x18); + rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x18); + rtl_write_byte(rtlpriv, 0x606, 0x30); + rtlpci->reg_bcn_ctrl_val |= BIT(3); + rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8)rtlpci->reg_bcn_ctrl_val); + rtl8821ae_enable_interrupt(hw); +} + +void rtl8821ae_set_beacon_interval(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + u16 bcn_interval = mac->beacon_interval; + + RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG, + "beacon_interval:%d\n", bcn_interval); + rtl8821ae_disable_interrupt(hw); + rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval); + rtl8821ae_enable_interrupt(hw); +} + +void rtl8821ae_update_interrupt_mask(struct ieee80211_hw *hw, + u32 add_msr, u32 rm_msr) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + + RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD, + "add_msr:%x, rm_msr:%x\n", add_msr, rm_msr); + + if (add_msr) + rtlpci->irq_mask[0] |= add_msr; + if (rm_msr) + rtlpci->irq_mask[0] &= (~rm_msr); + rtl8821ae_disable_interrupt(hw); + rtl8821ae_enable_interrupt(hw); +} + +static u8 _rtl8821ae_get_chnl_group(u8 chnl) +{ + u8 group = 0; + + if (chnl <= 14) { + if (1 <= chnl && chnl <= 2) + group = 0; + else if (3 <= chnl && chnl <= 5) + group = 1; + else if (6 <= chnl && chnl <= 8) + group = 2; + else if (9 <= chnl && chnl <= 11) + group = 3; + else /*if (12 <= chnl && chnl <= 14)*/ + group = 4; + } else { + if (36 <= chnl && chnl <= 42) + group = 0; + else if (44 <= chnl && chnl <= 48) + group = 1; + else if (50 <= chnl && chnl <= 58) + group = 2; + else if (60 <= chnl && chnl <= 64) + group = 3; + else if (100 <= chnl && chnl <= 106) + group = 4; + else if (108 <= chnl && chnl <= 114) + group = 5; + else if (116 <= chnl && chnl <= 122) + group = 6; + else if (124 <= chnl && chnl <= 130) + group = 7; + else if (132 <= chnl && chnl <= 138) + group = 8; + else if (140 <= chnl && chnl <= 144) + group = 9; + else if (149 <= chnl && chnl <= 155) + group = 10; + else if (157 <= chnl && chnl <= 161) + group = 11; + else if (165 <= chnl && chnl <= 171) + group = 12; + else if (173 <= chnl && chnl <= 177) + group = 13; + else + /*RT_TRACE(rtlpriv, COMP_EFUSE,DBG_LOUD, + "5G, Channel %d in Group not found\n",chnl);*/ + RT_ASSERT(!COMP_EFUSE, + "5G, Channel %d in Group not found\n", chnl); + } + return group; +} + +static void _rtl8821ae_read_power_value_fromprom(struct ieee80211_hw *hw, + struct txpower_info_2g *pwrinfo24g, + struct txpower_info_5g *pwrinfo5g, + bool autoload_fail, + u8 *hwinfo) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 rfPath, eeAddr = EEPROM_TX_PWR_INX, group, TxCount = 0; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "hal_ReadPowerValueFromPROM8821ae(): hwinfo[0x%x]=0x%x\n", + (eeAddr+1), hwinfo[eeAddr+1]); + if (0xFF == hwinfo[eeAddr+1]) /*YJ,add,120316*/ + autoload_fail = true; + + if (autoload_fail) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "auto load fail : Use Default value!\n"); + for (rfPath = 0 ; rfPath < MAX_RF_PATH ; rfPath++) { + /*2.4G default value*/ + for (group = 0 ; group < MAX_CHNL_GROUP_24G; group++) { + pwrinfo24g->index_cck_base[rfPath][group] = 0x2D; + pwrinfo24g->index_bw40_base[rfPath][group] = 0x2D; + } + for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) { + if (TxCount == 0) { + pwrinfo24g->bw20_diff[rfPath][0] = 0x02; + pwrinfo24g->ofdm_diff[rfPath][0] = 0x04; + } else { + pwrinfo24g->bw20_diff[rfPath][TxCount] = 0xFE; + pwrinfo24g->bw40_diff[rfPath][TxCount] = 0xFE; + pwrinfo24g->cck_diff[rfPath][TxCount] = 0xFE; + pwrinfo24g->ofdm_diff[rfPath][TxCount] = 0xFE; + } + } + /*5G default value*/ + for (group = 0 ; group < MAX_CHNL_GROUP_5G; group++) + pwrinfo5g->index_bw40_base[rfPath][group] = 0x2A; + + for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) { + if (TxCount == 0) { + pwrinfo5g->ofdm_diff[rfPath][0] = 0x04; + pwrinfo5g->bw20_diff[rfPath][0] = 0x00; + pwrinfo5g->bw80_diff[rfPath][0] = 0xFE; + pwrinfo5g->bw160_diff[rfPath][0] = 0xFE; + } else { + pwrinfo5g->ofdm_diff[rfPath][0] = 0xFE; + pwrinfo5g->bw20_diff[rfPath][0] = 0xFE; + pwrinfo5g->bw40_diff[rfPath][0] = 0xFE; + pwrinfo5g->bw80_diff[rfPath][0] = 0xFE; + pwrinfo5g->bw160_diff[rfPath][0] = 0xFE; + } + } + } + return; + } + + rtl_priv(hw)->efuse.txpwr_fromeprom = true; + + for (rfPath = 0 ; rfPath < MAX_RF_PATH ; rfPath++) { + /*2.4G default value*/ + for (group = 0 ; group < MAX_CHNL_GROUP_24G; group++) { + pwrinfo24g->index_cck_base[rfPath][group] = hwinfo[eeAddr++]; + if (pwrinfo24g->index_cck_base[rfPath][group] == 0xFF) + pwrinfo24g->index_cck_base[rfPath][group] = 0x2D; + } + for (group = 0 ; group < MAX_CHNL_GROUP_24G - 1; group++) { + pwrinfo24g->index_bw40_base[rfPath][group] = hwinfo[eeAddr++]; + if (pwrinfo24g->index_bw40_base[rfPath][group] == 0xFF) + pwrinfo24g->index_bw40_base[rfPath][group] = 0x2D; + } + for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) { + if (TxCount == 0) { + pwrinfo24g->bw40_diff[rfPath][TxCount] = 0; + /*bit sign number to 8 bit sign number*/ + pwrinfo24g->bw20_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0xf0) >> 4; + if (pwrinfo24g->bw20_diff[rfPath][TxCount] & BIT(3)) + pwrinfo24g->bw20_diff[rfPath][TxCount] |= 0xF0; + /*bit sign number to 8 bit sign number*/ + pwrinfo24g->ofdm_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0x0f); + if (pwrinfo24g->ofdm_diff[rfPath][TxCount] & BIT(3)) + pwrinfo24g->ofdm_diff[rfPath][TxCount] |= 0xF0; + + pwrinfo24g->cck_diff[rfPath][TxCount] = 0; + eeAddr++; + } else { + pwrinfo24g->bw40_diff[rfPath][TxCount] = (hwinfo[eeAddr]&0xf0) >> 4; + if (pwrinfo24g->bw40_diff[rfPath][TxCount] & BIT(3)) + pwrinfo24g->bw40_diff[rfPath][TxCount] |= 0xF0; + + pwrinfo24g->bw20_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0x0f); + if (pwrinfo24g->bw20_diff[rfPath][TxCount] & BIT(3)) + pwrinfo24g->bw20_diff[rfPath][TxCount] |= 0xF0; + + eeAddr++; + + pwrinfo24g->ofdm_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0xf0) >> 4; + if (pwrinfo24g->ofdm_diff[rfPath][TxCount] & BIT(3)) + pwrinfo24g->ofdm_diff[rfPath][TxCount] |= 0xF0; + + pwrinfo24g->cck_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0x0f); + if (pwrinfo24g->cck_diff[rfPath][TxCount] & BIT(3)) + pwrinfo24g->cck_diff[rfPath][TxCount] |= 0xF0; + + eeAddr++; + } + } + + /*5G default value*/ + for (group = 0 ; group < MAX_CHNL_GROUP_5G; group++) { + pwrinfo5g->index_bw40_base[rfPath][group] = hwinfo[eeAddr++]; + if (pwrinfo5g->index_bw40_base[rfPath][group] == 0xFF) + pwrinfo5g->index_bw40_base[rfPath][group] = 0xFE; + } + + for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) { + if (TxCount == 0) { + pwrinfo5g->bw40_diff[rfPath][TxCount] = 0; + + pwrinfo5g->bw20_diff[rfPath][0] = (hwinfo[eeAddr] & 0xf0) >> 4; + if (pwrinfo5g->bw20_diff[rfPath][TxCount] & BIT(3)) + pwrinfo5g->bw20_diff[rfPath][TxCount] |= 0xF0; + + pwrinfo5g->ofdm_diff[rfPath][0] = (hwinfo[eeAddr] & 0x0f); + if (pwrinfo5g->ofdm_diff[rfPath][TxCount] & BIT(3)) + pwrinfo5g->ofdm_diff[rfPath][TxCount] |= 0xF0; + + eeAddr++; + } else { + pwrinfo5g->bw40_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0xf0) >> 4; + if (pwrinfo5g->bw40_diff[rfPath][TxCount] & BIT(3)) + pwrinfo5g->bw40_diff[rfPath][TxCount] |= 0xF0; + + pwrinfo5g->bw20_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0x0f); + if (pwrinfo5g->bw20_diff[rfPath][TxCount] & BIT(3)) + pwrinfo5g->bw20_diff[rfPath][TxCount] |= 0xF0; + + eeAddr++; + } + } + + pwrinfo5g->ofdm_diff[rfPath][1] = (hwinfo[eeAddr] & 0xf0) >> 4; + pwrinfo5g->ofdm_diff[rfPath][2] = (hwinfo[eeAddr] & 0x0f); + + eeAddr++; + + pwrinfo5g->ofdm_diff[rfPath][3] = (hwinfo[eeAddr] & 0x0f); + + eeAddr++; + + for (TxCount = 1; TxCount < MAX_TX_COUNT; TxCount++) { + if (pwrinfo5g->ofdm_diff[rfPath][TxCount] & BIT(3)) + pwrinfo5g->ofdm_diff[rfPath][TxCount] |= 0xF0; + } + for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) { + pwrinfo5g->bw80_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0xf0) >> 4; + /* 4bit sign number to 8 bit sign number */ + if (pwrinfo5g->bw80_diff[rfPath][TxCount] & BIT(3)) + pwrinfo5g->bw80_diff[rfPath][TxCount] |= 0xF0; + /* 4bit sign number to 8 bit sign number */ + pwrinfo5g->bw160_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0x0f); + if (pwrinfo5g->bw160_diff[rfPath][TxCount] & BIT(3)) + pwrinfo5g->bw160_diff[rfPath][TxCount] |= 0xF0; + + eeAddr++; + } + } +} +#if 0 +static void _rtl8812ae_read_txpower_info_from_hwpg(struct ieee80211_hw *hw, + bool autoload_fail, + u8 *hwinfo) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct txpower_info_2g pwrinfo24g; + struct txpower_info_5g pwrinfo5g; + u8 channel5g[CHANNEL_MAX_NUMBER_5G] = { + 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, + 56, 58, 60, 62, 64, 100, 102, 104, 106, + 108, 110, 112, 114, 116, 118, 120, 122, + 124, 126, 128, 130, 132, 134, 136, 138, + 140, 142, 144, 149, 151, 153, 155, 157, + 159, 161, 163, 165, 167, 168, 169, 171, 173, 175, 177}; + u8 channel5g_80m[CHANNEL_MAX_NUMBER_5G_80M] = {42, 58, 106, 122, 138, 155, 171}; + u8 rf_path, index; + u8 i; + + _rtl8821ae_read_power_value_fromprom(hw, &pwrinfo24g, + &pwrinfo5g, autoload_fail, hwinfo); + + for (rf_path = 0; rf_path < 2; rf_path++) { + for (i = 0; i < CHANNEL_MAX_NUMBER_2G; i++) { + index = _rtl8821ae_get_chnl_group(i + 1); + + if (i == CHANNEL_MAX_NUMBER_2G - 1) { + rtlefuse->txpwrlevel_cck[rf_path][i] = + pwrinfo24g.index_cck_base[rf_path][5]; + rtlefuse->txpwrlevel_ht40_1s[rf_path][i] = + pwrinfo24g.index_bw40_base[rf_path][index]; + } else { + rtlefuse->txpwrlevel_cck[rf_path][i] = + pwrinfo24g.index_cck_base[rf_path][index]; + rtlefuse->txpwrlevel_ht40_1s[rf_path][i] = + pwrinfo24g.index_bw40_base[rf_path][index]; + } + } + + for (i = 0; i < CHANNEL_MAX_NUMBER_5G; i++) { + index = _rtl8821ae_get_chnl_group(channel5g[i]); + rtlefuse->txpwr_5g_bw40base[rf_path][i] = + pwrinfo5g.index_bw40_base[rf_path][index]; + } + for (i = 0; i < CHANNEL_MAX_NUMBER_5G_80M; i++) { + u8 upper, lower; + index = _rtl8821ae_get_chnl_group(channel5g_80m[i]); + upper = pwrinfo5g.index_bw40_base[rf_path][index]; + lower = pwrinfo5g.index_bw40_base[rf_path][index + 1]; + + rtlefuse->txpwr_5g_bw80base[rf_path][i] = (upper + lower) / 2; + } + for (i = 0; i < MAX_TX_COUNT; i++) { + rtlefuse->txpwr_cckdiff[rf_path][i] = + pwrinfo24g.cck_diff[rf_path][i]; + rtlefuse->txpwr_legacyhtdiff[rf_path][i] = + pwrinfo24g.ofdm_diff[rf_path][i]; + rtlefuse->txpwr_ht20diff[rf_path][i] = + pwrinfo24g.bw20_diff[rf_path][i]; + rtlefuse->txpwr_ht40diff[rf_path][i] = + pwrinfo24g.bw40_diff[rf_path][i]; + + rtlefuse->txpwr_5g_ofdmdiff[rf_path][i] = + pwrinfo5g.ofdm_diff[rf_path][i]; + rtlefuse->txpwr_5g_bw20diff[rf_path][i] = + pwrinfo5g.bw20_diff[rf_path][i]; + rtlefuse->txpwr_5g_bw40diff[rf_path][i] = + pwrinfo5g.bw40_diff[rf_path][i]; + rtlefuse->txpwr_5g_bw80diff[rf_path][i] = + pwrinfo5g.bw80_diff[rf_path][i]; + } + } + + if (!autoload_fail) { + rtlefuse->eeprom_regulatory = + hwinfo[EEPROM_RF_BOARD_OPTION] & 0x07;/*bit0~2*/ + if (hwinfo[EEPROM_RF_BOARD_OPTION] == 0xFF) + rtlefuse->eeprom_regulatory = 0; + } else { + rtlefuse->eeprom_regulatory = 0; + } + + RTPRINT(rtlpriv, FINIT, INIT_TXPOWER, + "eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory); +} +#endif +static void _rtl8821ae_read_txpower_info_from_hwpg(struct ieee80211_hw *hw, + bool autoload_fail, + u8 *hwinfo) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct txpower_info_2g pwrinfo24g; + struct txpower_info_5g pwrinfo5g; + u8 channel5g[CHANNEL_MAX_NUMBER_5G] = { + 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, + 56, 58, 60, 62, 64, 100, 102, 104, 106, + 108, 110, 112, 114, 116, 118, 120, 122, + 124, 126, 128, 130, 132, 134, 136, 138, + 140, 142, 144, 149, 151, 153, 155, 157, + 159, 161, 163, 165, 167, 168, 169, 171, + 173, 175, 177}; + u8 channel5g_80m[CHANNEL_MAX_NUMBER_5G_80M] = { + 42, 58, 106, 122, 138, 155, 171}; + u8 rf_path, index; + u8 i; + + _rtl8821ae_read_power_value_fromprom(hw, &pwrinfo24g, + &pwrinfo5g, autoload_fail, hwinfo); + + for (rf_path = 0; rf_path < 2; rf_path++) { + for (i = 0; i < CHANNEL_MAX_NUMBER_2G; i++) { + index = _rtl8821ae_get_chnl_group(i + 1); + + if (i == CHANNEL_MAX_NUMBER_2G - 1) { + rtlefuse->txpwrlevel_cck[rf_path][i] = + pwrinfo24g.index_cck_base[rf_path][5]; + rtlefuse->txpwrlevel_ht40_1s[rf_path][i] = + pwrinfo24g.index_bw40_base[rf_path][index]; + } else { + rtlefuse->txpwrlevel_cck[rf_path][i] = + pwrinfo24g.index_cck_base[rf_path][index]; + rtlefuse->txpwrlevel_ht40_1s[rf_path][i] = + pwrinfo24g.index_bw40_base[rf_path][index]; + } + } + + for (i = 0; i < CHANNEL_MAX_NUMBER_5G; i++) { + index = _rtl8821ae_get_chnl_group(channel5g[i]); + rtlefuse->txpwr_5g_bw40base[rf_path][i] = + pwrinfo5g.index_bw40_base[rf_path][index]; + } + for (i = 0; i < CHANNEL_MAX_NUMBER_5G_80M; i++) { + u8 upper, lower; + index = _rtl8821ae_get_chnl_group(channel5g_80m[i]); + upper = pwrinfo5g.index_bw40_base[rf_path][index]; + lower = pwrinfo5g.index_bw40_base[rf_path][index + 1]; + + rtlefuse->txpwr_5g_bw80base[rf_path][i] = (upper + lower) / 2; + } + for (i = 0; i < MAX_TX_COUNT; i++) { + rtlefuse->txpwr_cckdiff[rf_path][i] = + pwrinfo24g.cck_diff[rf_path][i]; + rtlefuse->txpwr_legacyhtdiff[rf_path][i] = + pwrinfo24g.ofdm_diff[rf_path][i]; + rtlefuse->txpwr_ht20diff[rf_path][i] = + pwrinfo24g.bw20_diff[rf_path][i]; + rtlefuse->txpwr_ht40diff[rf_path][i] = + pwrinfo24g.bw40_diff[rf_path][i]; + + rtlefuse->txpwr_5g_ofdmdiff[rf_path][i] = + pwrinfo5g.ofdm_diff[rf_path][i]; + rtlefuse->txpwr_5g_bw20diff[rf_path][i] = + pwrinfo5g.bw20_diff[rf_path][i]; + rtlefuse->txpwr_5g_bw40diff[rf_path][i] = + pwrinfo5g.bw40_diff[rf_path][i]; + rtlefuse->txpwr_5g_bw80diff[rf_path][i] = + pwrinfo5g.bw80_diff[rf_path][i]; + } + } + /*bit0~2*/ + if (!autoload_fail) { + rtlefuse->eeprom_regulatory = hwinfo[EEPROM_RF_BOARD_OPTION] & 0x07; + if (hwinfo[EEPROM_RF_BOARD_OPTION] == 0xFF) + rtlefuse->eeprom_regulatory = 0; + } else { + rtlefuse->eeprom_regulatory = 0; + } + + RTPRINT(rtlpriv, FINIT, INIT_TXPOWER, + "eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory); +} + +static void _rtl8812ae_read_pa_type(struct ieee80211_hw *hw, u8 *hwinfo, + bool autoload_fail) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + + if (!autoload_fail) { + rtlhal->pa_type_2g = hwinfo[0xBC]; + rtlhal->lna_type_2g = hwinfo[0xBD]; + if (rtlhal->pa_type_2g == 0xFF && rtlhal->lna_type_2g == 0xFF) { + rtlhal->pa_type_2g = 0; + rtlhal->lna_type_2g = 0; + } + rtlhal->external_pa_2g = ((rtlhal->pa_type_2g & BIT(5)) && + (rtlhal->pa_type_2g & BIT(4))) ? + 1 : 0; + rtlhal->external_lna_2g = ((rtlhal->lna_type_2g & BIT(7)) && + (rtlhal->lna_type_2g & BIT(3))) ? + 1 : 0; + + rtlhal->pa_type_5g = hwinfo[0xBC]; + rtlhal->lna_type_5g = hwinfo[0xBF]; + if (rtlhal->pa_type_5g == 0xFF && rtlhal->lna_type_5g == 0xFF) { + rtlhal->pa_type_5g = 0; + rtlhal->lna_type_5g = 0; + } + rtlhal->external_pa_5g = ((rtlhal->pa_type_5g & BIT(1)) && + (rtlhal->pa_type_5g & BIT(0))) ? + 1 : 0; + rtlhal->external_lna_5g = ((rtlhal->lna_type_5g & BIT(7)) && + (rtlhal->lna_type_5g & BIT(3))) ? + 1 : 0; + } else { + rtlhal->external_pa_2g = 0; + rtlhal->external_lna_2g = 0; + rtlhal->external_pa_5g = 0; + rtlhal->external_lna_5g = 0; + } +} + +static void _rtl8821ae_read_pa_type(struct ieee80211_hw *hw, u8 *hwinfo, + bool autoload_fail) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + + if (!autoload_fail) { + rtlhal->pa_type_2g = hwinfo[0xBC]; + rtlhal->lna_type_2g = hwinfo[0xBD]; + if (rtlhal->pa_type_2g == 0xFF && rtlhal->lna_type_2g == 0xFF) { + rtlhal->pa_type_2g = 0; + rtlhal->lna_type_2g = 0; + } + rtlhal->external_pa_2g = (rtlhal->pa_type_2g & BIT(5)) ? 1 : 0; + rtlhal->external_lna_2g = (rtlhal->lna_type_2g & BIT(7)) ? 1 : 0; + + rtlhal->pa_type_5g = hwinfo[0xBC]; + rtlhal->lna_type_5g = hwinfo[0xBF]; + if (rtlhal->pa_type_5g == 0xFF && rtlhal->lna_type_5g == 0xFF) { + rtlhal->pa_type_5g = 0; + rtlhal->lna_type_5g = 0; + } + rtlhal->external_pa_5g = (rtlhal->pa_type_5g & BIT(1)) ? 1 : 0; + rtlhal->external_lna_5g = (rtlhal->lna_type_5g & BIT(7)) ? 1 : 0; + } else { + rtlhal->external_pa_2g = 0; + rtlhal->external_lna_2g = 0; + rtlhal->external_pa_5g = 0; + rtlhal->external_lna_5g = 0; + } +} + +static void _rtl8821ae_read_rfe_type(struct ieee80211_hw *hw, u8 *hwinfo, + bool autoload_fail) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + + if (!autoload_fail) { + if (hwinfo[EEPROM_RFE_OPTION] & BIT(7)) { + if (rtlhal->external_lna_5g) { + if (rtlhal->external_pa_5g) { + if (rtlhal->external_lna_2g && + rtlhal->external_pa_2g) + rtlhal->rfe_type = 3; + else + rtlhal->rfe_type = 0; + } else { + rtlhal->rfe_type = 2; + } + } else { + rtlhal->rfe_type = 4; + } + } else { + rtlhal->rfe_type = hwinfo[EEPROM_RFE_OPTION] & 0x3F; + + if (rtlhal->rfe_type == 4 && + (rtlhal->external_pa_5g || + rtlhal->external_pa_2g || + rtlhal->external_lna_5g || + rtlhal->external_lna_2g)) { + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + rtlhal->rfe_type = 2; + } + } + } else { + rtlhal->rfe_type = 0x04; + } + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "RFE Type: 0x%2x\n", rtlhal->rfe_type); +} + +static void _rtl8812ae_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw, + bool auto_load_fail, u8 *hwinfo) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 value; + + if (!auto_load_fail) { + value = *(u8 *)&hwinfo[EEPROM_RF_BOARD_OPTION]; + if (((value & 0xe0) >> 5) == 0x1) + rtlpriv->btcoexist.btc_info.btcoexist = 1; + else + rtlpriv->btcoexist.btc_info.btcoexist = 0; + rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8812A; + + value = hwinfo[EEPROM_RF_BT_SETTING]; + rtlpriv->btcoexist.btc_info.ant_num = (value & 0x1); + } else { + rtlpriv->btcoexist.btc_info.btcoexist = 0; + rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8812A; + rtlpriv->btcoexist.btc_info.ant_num = ANT_X2; + } + /*move BT_InitHalVars() to init_sw_vars*/ +} + +static void _rtl8821ae_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw, + bool auto_load_fail, u8 *hwinfo) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 value; + u32 tmpu_32; + + if (!auto_load_fail) { + tmpu_32 = rtl_read_dword(rtlpriv, REG_MULTI_FUNC_CTRL); + if (tmpu_32 & BIT(18)) + rtlpriv->btcoexist.btc_info.btcoexist = 1; + else + rtlpriv->btcoexist.btc_info.btcoexist = 0; + rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8821A; + + value = hwinfo[EEPROM_RF_BT_SETTING]; + rtlpriv->btcoexist.btc_info.ant_num = (value & 0x1); + } else { + rtlpriv->btcoexist.btc_info.btcoexist = 0; + rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8821A; + rtlpriv->btcoexist.btc_info.ant_num = ANT_X2; + } + /*move BT_InitHalVars() to init_sw_vars*/ +} + +static void _rtl8821ae_read_adapter_info(struct ieee80211_hw *hw, bool b_pseudo_test) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + u16 i, usvalue; + u8 hwinfo[HWSET_MAX_SIZE]; + u16 eeprom_id; + + if (b_pseudo_test) { + ;/* need add */ + } + + if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) { + rtl_efuse_shadow_map_update(hw); + memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], + HWSET_MAX_SIZE); + } else if (rtlefuse->epromtype == EEPROM_93C46) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "RTL819X Not boot from eeprom, check it !!"); + } + + RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP\n", + hwinfo, HWSET_MAX_SIZE); + + eeprom_id = *((u16 *)&hwinfo[0]); + if (eeprom_id != RTL_EEPROM_ID) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + "EEPROM ID(%#x) is invalid!!\n", eeprom_id); + rtlefuse->autoload_failflag = true; + } else { + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n"); + rtlefuse->autoload_failflag = false; + } + + if (rtlefuse->autoload_failflag) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "RTL8812AE autoload_failflag, check it !!"); + return; + } + + rtlefuse->eeprom_version = *(u8 *)&hwinfo[EEPROM_VERSION]; + if (rtlefuse->eeprom_version == 0xff) + rtlefuse->eeprom_version = 0; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "EEPROM version: 0x%2x\n", rtlefuse->eeprom_version); + + rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID]; + rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID]; + rtlefuse->eeprom_svid = *(u16 *)&hwinfo[EEPROM_SVID]; + rtlefuse->eeprom_smid = *(u16 *)&hwinfo[EEPROM_SMID]; + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "EEPROMId = 0x%4x\n", eeprom_id); + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid); + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did); + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid); + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid); + + /*customer ID*/ + rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID]; + if (rtlefuse->eeprom_oemid == 0xFF) + rtlefuse->eeprom_oemid = 0; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid); + + for (i = 0; i < 6; i += 2) { + usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR + i]; + *((u16 *)(&rtlefuse->dev_addr[i])) = usvalue; + } + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "dev_addr: %pM\n", rtlefuse->dev_addr); + + _rtl8821ae_read_txpower_info_from_hwpg(hw, rtlefuse->autoload_failflag, + hwinfo); + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { + _rtl8812ae_read_pa_type(hw, hwinfo, rtlefuse->autoload_failflag); + _rtl8812ae_read_bt_coexist_info_from_hwpg(hw, + rtlefuse->autoload_failflag, hwinfo); + } else { + _rtl8821ae_read_pa_type(hw, hwinfo, rtlefuse->autoload_failflag); + _rtl8821ae_read_bt_coexist_info_from_hwpg(hw, + rtlefuse->autoload_failflag, hwinfo); + } + + _rtl8821ae_read_rfe_type(hw, hwinfo, rtlefuse->autoload_failflag); + /*board type*/ + rtlefuse->board_type = ODM_BOARD_DEFAULT; + if (rtlhal->external_lna_2g != 0) + rtlefuse->board_type |= ODM_BOARD_EXT_LNA; + if (rtlhal->external_lna_5g != 0) + rtlefuse->board_type |= ODM_BOARD_EXT_LNA_5G; + if (rtlhal->external_pa_2g != 0) + rtlefuse->board_type |= ODM_BOARD_EXT_PA; + if (rtlhal->external_pa_5g != 0) + rtlefuse->board_type |= ODM_BOARD_EXT_PA_5G; + + if (rtlpriv->btcoexist.btc_info.btcoexist == 1) + rtlefuse->board_type |= ODM_BOARD_BT; + + rtlhal->board_type = rtlefuse->board_type; + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "board_type = 0x%x\n", rtlefuse->board_type); + + rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN]; + if (rtlefuse->eeprom_channelplan == 0xff) + rtlefuse->eeprom_channelplan = 0x7F; + + /* set channel paln to world wide 13 */ + /* rtlefuse->channel_plan = (u8)rtlefuse->eeprom_channelplan; */ + + /*parse xtal*/ + rtlefuse->crystalcap = hwinfo[EEPROM_XTAL_8821AE]; + if (rtlefuse->crystalcap == 0xFF) + rtlefuse->crystalcap = 0x20; + + rtlefuse->eeprom_thermalmeter = *(u8 *)&hwinfo[EEPROM_THERMAL_METER]; + if ((rtlefuse->eeprom_thermalmeter == 0xff) || + rtlefuse->autoload_failflag) { + rtlefuse->apk_thermalmeterignore = true; + rtlefuse->eeprom_thermalmeter = 0xff; + } + + rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter; + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter); + + if (!rtlefuse->autoload_failflag) { + rtlefuse->antenna_div_cfg = + (hwinfo[EEPROM_RF_BOARD_OPTION] & 0x18) >> 3; + if (hwinfo[EEPROM_RF_BOARD_OPTION] == 0xff) + rtlefuse->antenna_div_cfg = 0; + + if (rtlpriv->btcoexist.btc_info.btcoexist == 1 && + rtlpriv->btcoexist.btc_info.ant_num == ANT_X1) + rtlefuse->antenna_div_cfg = 0; + + rtlefuse->antenna_div_type = hwinfo[EEPROM_RF_ANTENNA_OPT_88E]; + if (rtlefuse->antenna_div_type == 0xff) + rtlefuse->antenna_div_type = FIXED_HW_ANTDIV; + } else { + rtlefuse->antenna_div_cfg = 0; + rtlefuse->antenna_div_type = 0; + } + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "SWAS: bHwAntDiv = %x, TRxAntDivType = %x\n", + rtlefuse->antenna_div_cfg, rtlefuse->antenna_div_type); + + pcipriv->ledctl.led_opendrain = true; + + if (rtlhal->oem_id == RT_CID_DEFAULT) { + switch (rtlefuse->eeprom_oemid) { + case RT_CID_DEFAULT: + break; + case EEPROM_CID_TOSHIBA: + rtlhal->oem_id = RT_CID_TOSHIBA; + break; + case EEPROM_CID_CCX: + rtlhal->oem_id = RT_CID_CCX; + break; + case EEPROM_CID_QMI: + rtlhal->oem_id = RT_CID_819X_QMI; + break; + case EEPROM_CID_WHQL: + break; + default: + break; + } + } +} + +/*static void _rtl8821ae_hal_customized_behavior(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + + pcipriv->ledctl.led_opendrain = true; + switch (rtlhal->oem_id) { + case RT_CID_819X_HP: + pcipriv->ledctl.led_opendrain = true; + break; + case RT_CID_819X_LENOVO: + case RT_CID_DEFAULT: + case RT_CID_TOSHIBA: + case RT_CID_CCX: + case RT_CID_819X_ACER: + case RT_CID_WHQL: + default: + break; + } + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "RT Customized ID: 0x%02X\n", rtlhal->oem_id); +}*/ + +void rtl8821ae_read_eeprom_info(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 tmp_u1b; + + rtlhal->version = _rtl8821ae_read_chip_version(hw); + if (get_rf_type(rtlphy) == RF_1T1R) + rtlpriv->dm.rfpath_rxenable[0] = true; + else + rtlpriv->dm.rfpath_rxenable[0] = + rtlpriv->dm.rfpath_rxenable[1] = true; + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n", + rtlhal->version); + + tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR); + if (tmp_u1b & BIT(4)) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n"); + rtlefuse->epromtype = EEPROM_93C46; + } else { + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n"); + rtlefuse->epromtype = EEPROM_BOOT_EFUSE; + } + + if (tmp_u1b & BIT(5)) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n"); + rtlefuse->autoload_failflag = false; + _rtl8821ae_read_adapter_info(hw, false); + } else { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n"); + } + /*hal_ReadRFType_8812A()*/ + /* _rtl8821ae_hal_customized_behavior(hw); */ +} + +static void rtl8821ae_update_hal_rate_table(struct ieee80211_hw *hw, + struct ieee80211_sta *sta) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u32 ratr_value; + u8 ratr_index = 0; + u8 b_nmode = mac->ht_enable; + u8 mimo_ps = IEEE80211_SMPS_OFF; + u16 shortgi_rate; + u32 tmp_ratr_value; + u8 curtxbw_40mhz = mac->bw_40; + u8 b_curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? + 1 : 0; + u8 b_curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? + 1 : 0; + enum wireless_mode wirelessmode = mac->mode; + + if (rtlhal->current_bandtype == BAND_ON_5G) + ratr_value = sta->supp_rates[1] << 4; + else + ratr_value = sta->supp_rates[0]; + if (mac->opmode == NL80211_IFTYPE_ADHOC) + ratr_value = 0xfff; + ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 | + sta->ht_cap.mcs.rx_mask[0] << 12); + switch (wirelessmode) { + case WIRELESS_MODE_B: + if (ratr_value & 0x0000000c) + ratr_value &= 0x0000000d; + else + ratr_value &= 0x0000000f; + break; + case WIRELESS_MODE_G: + ratr_value &= 0x00000FF5; + break; + case WIRELESS_MODE_N_24G: + case WIRELESS_MODE_N_5G: + b_nmode = 1; + if (mimo_ps == IEEE80211_SMPS_STATIC) { + ratr_value &= 0x0007F005; + } else { + u32 ratr_mask; + + if (get_rf_type(rtlphy) == RF_1T2R || + get_rf_type(rtlphy) == RF_1T1R) + ratr_mask = 0x000ff005; + else + ratr_mask = 0x0f0ff005; + + ratr_value &= ratr_mask; + } + break; + default: + if (rtlphy->rf_type == RF_1T2R) + ratr_value &= 0x000ff0ff; + else + ratr_value &= 0x0f0ff0ff; + + break; + } + + if ((rtlpriv->btcoexist.bt_coexistence) && + (rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4) && + (rtlpriv->btcoexist.bt_cur_state) && + (rtlpriv->btcoexist.bt_ant_isolation) && + ((rtlpriv->btcoexist.bt_service == BT_SCO) || + (rtlpriv->btcoexist.bt_service == BT_BUSY))) + ratr_value &= 0x0fffcfc0; + else + ratr_value &= 0x0FFFFFFF; + + if (b_nmode && ((curtxbw_40mhz && + b_curshortgi_40mhz) || (!curtxbw_40mhz && + b_curshortgi_20mhz))) { + ratr_value |= 0x10000000; + tmp_ratr_value = (ratr_value >> 12); + + for (shortgi_rate = 15; shortgi_rate > 0; shortgi_rate--) { + if ((1 << shortgi_rate) & tmp_ratr_value) + break; + } + + shortgi_rate = (shortgi_rate << 12) | (shortgi_rate << 8) | + (shortgi_rate << 4) | (shortgi_rate); + } + + rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value); + + RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, + "%x\n", rtl_read_dword(rtlpriv, REG_ARFR0)); +} + +static u8 _rtl8821ae_mrate_idx_to_arfr_id( + struct ieee80211_hw *hw, u8 rate_index, + enum wireless_mode wirelessmode) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u8 ret = 0; + switch (rate_index) { + case RATR_INX_WIRELESS_NGB: + if (rtlphy->rf_type == RF_1T1R) + ret = 1; + else + ret = 0; + ; break; + case RATR_INX_WIRELESS_N: + case RATR_INX_WIRELESS_NG: + if (rtlphy->rf_type == RF_1T1R) + ret = 5; + else + ret = 4; + ; break; + case RATR_INX_WIRELESS_NB: + if (rtlphy->rf_type == RF_1T1R) + ret = 3; + else + ret = 2; + ; break; + case RATR_INX_WIRELESS_GB: + ret = 6; + break; + case RATR_INX_WIRELESS_G: + ret = 7; + break; + case RATR_INX_WIRELESS_B: + ret = 8; + break; + case RATR_INX_WIRELESS_MC: + if ((wirelessmode == WIRELESS_MODE_B) + || (wirelessmode == WIRELESS_MODE_G) + || (wirelessmode == WIRELESS_MODE_N_24G) + || (wirelessmode == WIRELESS_MODE_AC_24G)) + ret = 6; + else + ret = 7; + case RATR_INX_WIRELESS_AC_5N: + if (rtlphy->rf_type == RF_1T1R) + ret = 10; + else + ret = 9; + break; + case RATR_INX_WIRELESS_AC_24N: + if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80) { + if (rtlphy->rf_type == RF_1T1R) + ret = 10; + else + ret = 9; + } else { + if (rtlphy->rf_type == RF_1T1R) + ret = 11; + else + ret = 12; + } + break; + default: + ret = 0; break; + } + return ret; +} + +static u32 _rtl8821ae_rate_to_bitmap_2ssvht(__le16 vht_rate) +{ + u8 i, j, tmp_rate; + u32 rate_bitmap = 0; + + for (i = j = 0; i < 4; i += 2, j += 10) { + tmp_rate = (le16_to_cpu(vht_rate) >> i) & 3; + + switch (tmp_rate) { + case 2: + rate_bitmap = rate_bitmap | (0x03ff << j); + break; + case 1: + rate_bitmap = rate_bitmap | (0x01ff << j); + break; + case 0: + rate_bitmap = rate_bitmap | (0x00ff << j); + break; + default: + break; + } + } + + return rate_bitmap; +} + +static u32 _rtl8821ae_set_ra_vht_ratr_bitmap(struct ieee80211_hw *hw, + enum wireless_mode wirelessmode, + u32 ratr_bitmap) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u32 ret_bitmap = ratr_bitmap; + + if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40 + || rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80) + ret_bitmap = ratr_bitmap; + else if (wirelessmode == WIRELESS_MODE_AC_5G + || wirelessmode == WIRELESS_MODE_AC_24G) { + if (rtlphy->rf_type == RF_1T1R) + ret_bitmap = ratr_bitmap & (~BIT21); + else + ret_bitmap = ratr_bitmap & (~(BIT31|BIT21)); + } + + return ret_bitmap; +} + +static u8 _rtl8821ae_get_vht_eni(enum wireless_mode wirelessmode, + u32 ratr_bitmap) +{ + u8 ret = 0; + if (wirelessmode < WIRELESS_MODE_N_24G) + ret = 0; + else if (wirelessmode == WIRELESS_MODE_AC_24G) { + if (ratr_bitmap & 0xfff00000) /* Mix , 2SS */ + ret = 3; + else /* Mix, 1SS */ + ret = 2; + } else if (wirelessmode == WIRELESS_MODE_AC_5G) { + ret = 1; + } /* VHT */ + + return ret << 4; +} + +static u8 _rtl8821ae_get_ra_ldpc(struct ieee80211_hw *hw, + u8 mac_id, struct rtl_sta_info *sta_entry, + enum wireless_mode wirelessmode) +{ + u8 b_ldpc = 0; + /*not support ldpc, do not open*/ + return b_ldpc << 2; +} + +static u8 _rtl8821ae_get_ra_rftype(struct ieee80211_hw *hw, + enum wireless_mode wirelessmode, + u32 ratr_bitmap) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u8 rf_type = RF_1T1R; + + if (rtlphy->rf_type == RF_1T1R) + rf_type = RF_1T1R; + else if (wirelessmode == WIRELESS_MODE_AC_5G + || wirelessmode == WIRELESS_MODE_AC_24G + || wirelessmode == WIRELESS_MODE_AC_ONLY) { + if (ratr_bitmap & 0xffc00000) + rf_type = RF_2T2R; + } else if (wirelessmode == WIRELESS_MODE_N_5G + || wirelessmode == WIRELESS_MODE_N_24G) { + if (ratr_bitmap & 0xfff00000) + rf_type = RF_2T2R; + } + + return rf_type; +} + +static bool _rtl8821ae_get_ra_shortgi(struct ieee80211_hw *hw, struct ieee80211_sta *sta, + u8 mac_id) +{ + bool b_short_gi = false; + u8 b_curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? + 1 : 0; + u8 b_curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? + 1 : 0; + u8 b_curshortgi_80mhz = 0; + b_curshortgi_80mhz = (sta->vht_cap.cap & + IEEE80211_VHT_CAP_SHORT_GI_80) ? 1 : 0; + + if (mac_id == MAC_ID_STATIC_FOR_BROADCAST_MULTICAST) + b_short_gi = false; + + if (b_curshortgi_40mhz || b_curshortgi_80mhz + || b_curshortgi_20mhz) + b_short_gi = true; + + return b_short_gi; +} + +static void rtl8821ae_update_hal_rate_mask(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, u8 rssi_level) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_sta_info *sta_entry = NULL; + u32 ratr_bitmap; + u8 ratr_index; + enum wireless_mode wirelessmode = 0; + u8 curtxbw_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) + ? 1 : 0; + bool b_shortgi = false; + u8 rate_mask[7]; + u8 macid = 0; + u8 mimo_ps = IEEE80211_SMPS_OFF; + u8 rf_type; + + sta_entry = (struct rtl_sta_info *)sta->drv_priv; + wirelessmode = sta_entry->wireless_mode; + + RT_TRACE(rtlpriv, COMP_RATR, DBG_LOUD, + "wireless mode = 0x%x\n", wirelessmode); + if (mac->opmode == NL80211_IFTYPE_STATION || + mac->opmode == NL80211_IFTYPE_MESH_POINT) { + curtxbw_40mhz = mac->bw_40; + } else if (mac->opmode == NL80211_IFTYPE_AP || + mac->opmode == NL80211_IFTYPE_ADHOC) + macid = sta->aid + 1; + if (wirelessmode == WIRELESS_MODE_N_5G || + wirelessmode == WIRELESS_MODE_AC_5G) + ratr_bitmap = sta->supp_rates[NL80211_BAND_5GHZ]; + else + ratr_bitmap = sta->supp_rates[NL80211_BAND_2GHZ]; + + if (mac->opmode == NL80211_IFTYPE_ADHOC) + ratr_bitmap = 0xfff; + + if (wirelessmode == WIRELESS_MODE_N_24G + || wirelessmode == WIRELESS_MODE_N_5G) + ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 | + sta->ht_cap.mcs.rx_mask[0] << 12); + else if (wirelessmode == WIRELESS_MODE_AC_24G + || wirelessmode == WIRELESS_MODE_AC_5G + || wirelessmode == WIRELESS_MODE_AC_ONLY) + ratr_bitmap |= _rtl8821ae_rate_to_bitmap_2ssvht( + sta->vht_cap.vht_mcs.rx_mcs_map) << 12; + + b_shortgi = _rtl8821ae_get_ra_shortgi(hw, sta, macid); + rf_type = _rtl8821ae_get_ra_rftype(hw, wirelessmode, ratr_bitmap); + +/*mac id owner*/ + switch (wirelessmode) { + case WIRELESS_MODE_B: + ratr_index = RATR_INX_WIRELESS_B; + if (ratr_bitmap & 0x0000000c) + ratr_bitmap &= 0x0000000d; + else + ratr_bitmap &= 0x0000000f; + break; + case WIRELESS_MODE_G: + ratr_index = RATR_INX_WIRELESS_GB; + + if (rssi_level == 1) + ratr_bitmap &= 0x00000f00; + else if (rssi_level == 2) + ratr_bitmap &= 0x00000ff0; + else + ratr_bitmap &= 0x00000ff5; + break; + case WIRELESS_MODE_A: + ratr_index = RATR_INX_WIRELESS_G; + ratr_bitmap &= 0x00000ff0; + break; + case WIRELESS_MODE_N_24G: + case WIRELESS_MODE_N_5G: + if (wirelessmode == WIRELESS_MODE_N_24G) + ratr_index = RATR_INX_WIRELESS_NGB; + else + ratr_index = RATR_INX_WIRELESS_NG; + + if (mimo_ps == IEEE80211_SMPS_STATIC + || mimo_ps == IEEE80211_SMPS_DYNAMIC) { + if (rssi_level == 1) + ratr_bitmap &= 0x000f0000; + else if (rssi_level == 2) + ratr_bitmap &= 0x000ff000; + else + ratr_bitmap &= 0x000ff005; + } else { + if (rf_type == RF_1T1R) { + if (curtxbw_40mhz) { + if (rssi_level == 1) + ratr_bitmap &= 0x000f0000; + else if (rssi_level == 2) + ratr_bitmap &= 0x000ff000; + else + ratr_bitmap &= 0x000ff015; + } else { + if (rssi_level == 1) + ratr_bitmap &= 0x000f0000; + else if (rssi_level == 2) + ratr_bitmap &= 0x000ff000; + else + ratr_bitmap &= 0x000ff005; + } + } else { + if (curtxbw_40mhz) { + if (rssi_level == 1) + ratr_bitmap &= 0x0fff0000; + else if (rssi_level == 2) + ratr_bitmap &= 0x0ffff000; + else + ratr_bitmap &= 0x0ffff015; + } else { + if (rssi_level == 1) + ratr_bitmap &= 0x0fff0000; + else if (rssi_level == 2) + ratr_bitmap &= 0x0ffff000; + else + ratr_bitmap &= 0x0ffff005; + } + } + } + break; + + case WIRELESS_MODE_AC_24G: + ratr_index = RATR_INX_WIRELESS_AC_24N; + if (rssi_level == 1) + ratr_bitmap &= 0xfc3f0000; + else if (rssi_level == 2) + ratr_bitmap &= 0xfffff000; + else + ratr_bitmap &= 0xffffffff; + break; + + case WIRELESS_MODE_AC_5G: + ratr_index = RATR_INX_WIRELESS_AC_5N; + + if (rf_type == RF_1T1R) { + if (rssi_level == 1) /*add by Gary for ac-series*/ + ratr_bitmap &= 0x003f8000; + else if (rssi_level == 2) + ratr_bitmap &= 0x003ff000; + else + ratr_bitmap &= 0x003ff010; + } else { + if (rssi_level == 1) + ratr_bitmap &= 0xfe3f8000; + else if (rssi_level == 2) + ratr_bitmap &= 0xfffff000; + else + ratr_bitmap &= 0xfffff010; + } + break; + + default: + ratr_index = RATR_INX_WIRELESS_NGB; + + if (rf_type == RF_1T2R) + ratr_bitmap &= 0x000ff0ff; + else + ratr_bitmap &= 0x0f8ff0ff; + break; + } + + ratr_index = _rtl8821ae_mrate_idx_to_arfr_id(hw, ratr_index, wirelessmode); + sta_entry->ratr_index = ratr_index; + ratr_bitmap = _rtl8821ae_set_ra_vht_ratr_bitmap(hw, wirelessmode, + ratr_bitmap); + + RT_TRACE(rtlpriv, COMP_RATR, DBG_LOUD, + "ratr_bitmap :%x\n", ratr_bitmap); + + /* *(u32 *)& rate_mask = EF4BYTE((ratr_bitmap & 0x0fffffff) | + (ratr_index << 28)); */ + + rate_mask[0] = macid; + rate_mask[1] = ratr_index | (b_shortgi ? 0x80 : 0x00); + rate_mask[2] = rtlphy->current_chan_bw + | _rtl8821ae_get_vht_eni(wirelessmode, ratr_bitmap) + | _rtl8821ae_get_ra_ldpc(hw, macid, sta_entry, wirelessmode); + + rate_mask[3] = (u8)(ratr_bitmap & 0x000000ff); + rate_mask[4] = (u8)((ratr_bitmap & 0x0000ff00) >> 8); + rate_mask[5] = (u8)((ratr_bitmap & 0x00ff0000) >> 16); + rate_mask[6] = (u8)((ratr_bitmap & 0xff000000) >> 24); + + RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, + "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x:%x:%x\n", + ratr_index, ratr_bitmap, + rate_mask[0], rate_mask[1], + rate_mask[2], rate_mask[3], + rate_mask[4], rate_mask[5], + rate_mask[6]); + rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_RA_MASK, 7, rate_mask); + _rtl8821ae_set_bcn_ctrl_reg(hw, BIT(3), 0); +} + +void rtl8821ae_update_hal_rate_tbl(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, u8 rssi_level) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + if (rtlpriv->dm.useramask) + rtl8821ae_update_hal_rate_mask(hw, sta, rssi_level); + else + /*RT_TRACE(rtlpriv, COMP_RATR,DBG_LOUD, + "rtl8821ae_update_hal_rate_tbl() Error! 8821ae FW RA Only");*/ + rtl8821ae_update_hal_rate_table(hw, sta); +} + +void rtl8821ae_update_channel_access_setting(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + u8 wireless_mode = mac->mode; + u8 sifs_timer, r2t_sifs; + + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME, + (u8 *)&mac->slot_time); + if (wireless_mode == WIRELESS_MODE_G) + sifs_timer = 0x0a; + else + sifs_timer = 0x0e; + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *)&sifs_timer); + + r2t_sifs = 0xa; + + if (wireless_mode == WIRELESS_MODE_AC_5G && + (mac->vht_ldpc_cap & LDPC_VHT_ENABLE_RX) && + (mac->vht_stbc_cap & STBC_VHT_ENABLE_RX)) { + if (mac->vendor == PEER_ATH) + r2t_sifs = 0x8; + else + r2t_sifs = 0xa; + } else if (wireless_mode == WIRELESS_MODE_AC_5G) { + r2t_sifs = 0xa; + } + + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_R2T_SIFS, (u8 *)&r2t_sifs); +} + +bool rtl8821ae_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_phy *rtlphy = &rtlpriv->phy; + enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate; + u8 u1tmp = 0; + bool b_actuallyset = false; + + if (rtlpriv->rtlhal.being_init_adapter) + return false; + + if (ppsc->swrf_processing) + return false; + + spin_lock(&rtlpriv->locks.rf_ps_lock); + if (ppsc->rfchange_inprogress) { + spin_unlock(&rtlpriv->locks.rf_ps_lock); + return false; + } else { + ppsc->rfchange_inprogress = true; + spin_unlock(&rtlpriv->locks.rf_ps_lock); + } + + cur_rfstate = ppsc->rfpwr_state; + + rtl_write_byte(rtlpriv, REG_GPIO_IO_SEL_2, + rtl_read_byte(rtlpriv, + REG_GPIO_IO_SEL_2) & ~(BIT(1))); + + u1tmp = rtl_read_byte(rtlpriv, REG_GPIO_PIN_CTRL_2); + + if (rtlphy->polarity_ctl) + e_rfpowerstate_toset = (u1tmp & BIT(1)) ? ERFOFF : ERFON; + else + e_rfpowerstate_toset = (u1tmp & BIT(1)) ? ERFON : ERFOFF; + + if ((ppsc->hwradiooff) && (e_rfpowerstate_toset == ERFON)) { + RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, + "GPIOChangeRF - HW Radio ON, RF ON\n"); + + e_rfpowerstate_toset = ERFON; + ppsc->hwradiooff = false; + b_actuallyset = true; + } else if ((!ppsc->hwradiooff) + && (e_rfpowerstate_toset == ERFOFF)) { + RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, + "GPIOChangeRF - HW Radio OFF, RF OFF\n"); + + e_rfpowerstate_toset = ERFOFF; + ppsc->hwradiooff = true; + b_actuallyset = true; + } + + if (b_actuallyset) { + spin_lock(&rtlpriv->locks.rf_ps_lock); + ppsc->rfchange_inprogress = false; + spin_unlock(&rtlpriv->locks.rf_ps_lock); + } else { + if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) + RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); + + spin_lock(&rtlpriv->locks.rf_ps_lock); + ppsc->rfchange_inprogress = false; + spin_unlock(&rtlpriv->locks.rf_ps_lock); + } + + *valid = 1; + return !ppsc->hwradiooff; +} + +void rtl8821ae_set_key(struct ieee80211_hw *hw, u32 key_index, + u8 *p_macaddr, bool is_group, u8 enc_algo, + bool is_wepkey, bool clear_all) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + u8 *macaddr = p_macaddr; + u32 entry_id = 0; + bool is_pairwise = false; + + static u8 cam_const_addr[4][6] = { + {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + {0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, + {0x00, 0x00, 0x00, 0x00, 0x00, 0x02}, + {0x00, 0x00, 0x00, 0x00, 0x00, 0x03} + }; + static u8 cam_const_broad[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff + }; + + if (clear_all) { + u8 idx = 0; + u8 cam_offset = 0; + u8 clear_number = 5; + + RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n"); + + for (idx = 0; idx < clear_number; idx++) { + rtl_cam_mark_invalid(hw, cam_offset + idx); + rtl_cam_empty_entry(hw, cam_offset + idx); + + if (idx < 5) { + memset(rtlpriv->sec.key_buf[idx], 0, + MAX_KEY_LEN); + rtlpriv->sec.key_len[idx] = 0; + } + } + } else { + switch (enc_algo) { + case WEP40_ENCRYPTION: + enc_algo = CAM_WEP40; + break; + case WEP104_ENCRYPTION: + enc_algo = CAM_WEP104; + break; + case TKIP_ENCRYPTION: + enc_algo = CAM_TKIP; + break; + case AESCCMP_ENCRYPTION: + enc_algo = CAM_AES; + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, + "switch case not process\n"); + enc_algo = CAM_TKIP; + break; + } + + if (is_wepkey || rtlpriv->sec.use_defaultkey) { + macaddr = cam_const_addr[key_index]; + entry_id = key_index; + } else { + if (is_group) { + macaddr = cam_const_broad; + entry_id = key_index; + } else { + if (mac->opmode == NL80211_IFTYPE_AP) { + entry_id = rtl_cam_get_free_entry(hw, p_macaddr); + if (entry_id >= TOTAL_CAM_ENTRY) { + RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG, + "Can not find free hwsecurity cam entry\n"); + return; + } + } else { + entry_id = CAM_PAIRWISE_KEY_POSITION; + } + + key_index = PAIRWISE_KEYIDX; + is_pairwise = true; + } + } + + if (rtlpriv->sec.key_len[key_index] == 0) { + RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, + "delete one entry, entry_id is %d\n", + entry_id); + if (mac->opmode == NL80211_IFTYPE_AP) + rtl_cam_del_entry(hw, p_macaddr); + rtl_cam_delete_one_entry(hw, p_macaddr, entry_id); + } else { + RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, + "add one entry\n"); + if (is_pairwise) { + RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, + "set Pairwise key\n"); + + rtl_cam_add_one_entry(hw, macaddr, key_index, + entry_id, enc_algo, + CAM_CONFIG_NO_USEDK, + rtlpriv->sec.key_buf[key_index]); + } else { + RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, + "set group key\n"); + + if (mac->opmode == NL80211_IFTYPE_ADHOC) { + rtl_cam_add_one_entry(hw, + rtlefuse->dev_addr, + PAIRWISE_KEYIDX, + CAM_PAIRWISE_KEY_POSITION, + enc_algo, + CAM_CONFIG_NO_USEDK, + rtlpriv->sec.key_buf + [entry_id]); + } + + rtl_cam_add_one_entry(hw, macaddr, key_index, + entry_id, enc_algo, + CAM_CONFIG_NO_USEDK, + rtlpriv->sec.key_buf[entry_id]); + } + } + } +} + +void rtl8821ae_bt_reg_init(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + /* 0:Low, 1:High, 2:From Efuse. */ + rtlpriv->btcoexist.reg_bt_iso = 2; + /* 0:Idle, 1:None-SCO, 2:SCO, 3:From Counter. */ + rtlpriv->btcoexist.reg_bt_sco = 3; + /* 0:Disable BT control A-MPDU, 1:Enable BT control A-MPDU. */ + rtlpriv->btcoexist.reg_bt_sco = 0; +} + +void rtl8821ae_bt_hw_init(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (rtlpriv->cfg->ops->get_btc_status()) + rtlpriv->btcoexist.btc_ops->btc_init_hw_config(rtlpriv); +} + +void rtl8821ae_suspend(struct ieee80211_hw *hw) +{ +} + +void rtl8821ae_resume(struct ieee80211_hw *hw) +{ +} + +/* Turn on AAP (RCR:bit 0) for promicuous mode. */ +void rtl8821ae_allow_all_destaddr(struct ieee80211_hw *hw, + bool allow_all_da, bool write_into_reg) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + + if (allow_all_da) /* Set BIT0 */ + rtlpci->receive_config |= RCR_AAP; + else /* Clear BIT0 */ + rtlpci->receive_config &= ~RCR_AAP; + + if (write_into_reg) + rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config); + + RT_TRACE(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD, + "receive_config=0x%08X, write_into_reg=%d\n", + rtlpci->receive_config, write_into_reg); +} + +/* WKFMCAMAddAllEntry8812 */ +void rtl8821ae_add_wowlan_pattern(struct ieee80211_hw *hw, + struct rtl_wow_pattern *rtl_pattern, + u8 index) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 cam = 0; + u8 addr = 0; + u16 rxbuf_addr; + u8 tmp, count = 0; + u16 cam_start; + u16 offset; + + /* Count the WFCAM entry start offset. */ + + /* RX page size = 128 byte */ + offset = MAX_RX_DMA_BUFFER_SIZE_8812 / 128; + /* We should start from the boundry */ + cam_start = offset * 128; + + /* Enable Rx packet buffer access. */ + rtl_write_byte(rtlpriv, REG_PKT_BUFF_ACCESS_CTRL, RXPKT_BUF_SELECT); + for (addr = 0; addr < WKFMCAM_ADDR_NUM; addr++) { + /* Set Rx packet buffer offset. + * RxBufer pointer increases 1, + * we can access 8 bytes in Rx packet buffer. + * CAM start offset (unit: 1 byte) = index*WKFMCAM_SIZE + * RxBufer addr = (CAM start offset + + * per entry offset of a WKFM CAM)/8 + * * index: The index of the wake up frame mask + * * WKFMCAM_SIZE: the total size of one WKFM CAM + * * per entry offset of a WKFM CAM: Addr*4 bytes + */ + rxbuf_addr = (cam_start + index * WKFMCAM_SIZE + addr * 4) >> 3; + /* Set R/W start offset */ + rtl_write_word(rtlpriv, REG_PKTBUF_DBG_CTRL, rxbuf_addr); + + if (addr == 0) { + cam = BIT(31) | rtl_pattern->crc; + + if (rtl_pattern->type == UNICAST_PATTERN) + cam |= BIT(24); + else if (rtl_pattern->type == MULTICAST_PATTERN) + cam |= BIT(25); + else if (rtl_pattern->type == BROADCAST_PATTERN) + cam |= BIT(26); + + rtl_write_dword(rtlpriv, REG_PKTBUF_DBG_DATA_L, cam); + RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE, + "WRITE entry[%d] 0x%x: %x\n", addr, + REG_PKTBUF_DBG_DATA_L, cam); + + /* Write to Rx packet buffer. */ + rtl_write_word(rtlpriv, REG_RXPKTBUF_CTRL, 0x0f01); + } else if (addr == 2 || addr == 4) {/* WKFM[127:0] */ + cam = rtl_pattern->mask[addr - 2]; + + rtl_write_dword(rtlpriv, REG_PKTBUF_DBG_DATA_L, cam); + RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE, + "WRITE entry[%d] 0x%x: %x\n", addr, + REG_PKTBUF_DBG_DATA_L, cam); + + rtl_write_word(rtlpriv, REG_RXPKTBUF_CTRL, 0x0f01); + } else if (addr == 3 || addr == 5) {/* WKFM[127:0] */ + cam = rtl_pattern->mask[addr - 2]; + + rtl_write_dword(rtlpriv, REG_PKTBUF_DBG_DATA_H, cam); + RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE, + "WRITE entry[%d] 0x%x: %x\n", addr, + REG_PKTBUF_DBG_DATA_H, cam); + + rtl_write_word(rtlpriv, REG_RXPKTBUF_CTRL, 0xf001); + } + + count = 0; + do { + tmp = rtl_read_byte(rtlpriv, REG_RXPKTBUF_CTRL); + udelay(2); + count++; + } while (tmp && count < 100); + + RT_ASSERT((count < 100), + "Write wake up frame mask FAIL %d value!\n", tmp); + } + /* Disable Rx packet buffer access. */ + rtl_write_byte(rtlpriv, REG_PKT_BUFF_ACCESS_CTRL, + DISABLE_TRXPKT_BUF_ACCESS); +} diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.h b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.h new file mode 100644 index 0000000000000000000000000000000000000000..a3553e3abaa1f125aa859c2e3417bae0a3ceaec6 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.h @@ -0,0 +1,70 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL8821AE_HW_H__ +#define __RTL8821AE_HW_H__ + +void rtl8821ae_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); +void rtl8821ae_read_eeprom_info(struct ieee80211_hw *hw); + +void rtl8821ae_interrupt_recognized(struct ieee80211_hw *hw, + u32 *p_inta, u32 *p_intb); +int rtl8821ae_hw_init(struct ieee80211_hw *hw); +void rtl8821ae_card_disable(struct ieee80211_hw *hw); +void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw); +void rtl8821ae_disable_interrupt(struct ieee80211_hw *hw); +int rtl8821ae_set_network_type(struct ieee80211_hw *hw, + enum nl80211_iftype type); +void rtl8821ae_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid); +void rtl8821ae_set_qos(struct ieee80211_hw *hw, int aci); +void rtl8821ae_set_beacon_related_registers(struct ieee80211_hw *hw); +void rtl8821ae_set_beacon_interval(struct ieee80211_hw *hw); +void rtl8821ae_update_interrupt_mask(struct ieee80211_hw *hw, + u32 add_msr, u32 rm_msr); +void rtl8821ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); +void rtl8821ae_update_hal_rate_tbl(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + u8 rssi_level); +void rtl8821ae_update_channel_access_setting(struct ieee80211_hw *hw); +bool rtl8821ae_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid); +void rtl8821ae_enable_hw_security_config(struct ieee80211_hw *hw); +void rtl8821ae_set_key(struct ieee80211_hw *hw, u32 key_index, + u8 *p_macaddr, bool is_group, u8 enc_algo, + bool is_wepkey, bool clear_all); + +void rtl8821ae_bt_reg_init(struct ieee80211_hw *hw); +void rtl8821ae_bt_hw_init(struct ieee80211_hw *hw); +void rtl8821ae_suspend(struct ieee80211_hw *hw); +void rtl8821ae_resume(struct ieee80211_hw *hw); +void rtl8821ae_allow_all_destaddr(struct ieee80211_hw *hw, + bool allow_all_da, + bool write_into_reg); +void _rtl8821ae_stop_tx_beacon(struct ieee80211_hw *hw); +void _rtl8821ae_resume_tx_beacon(struct ieee80211_hw *hw); +void rtl8821ae_add_wowlan_pattern(struct ieee80211_hw *hw, + struct rtl_wow_pattern *rtl_pattern, + u8 index); + +#endif diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/led.c b/drivers/net/wireless/rtlwifi/rtl8821ae/led.c new file mode 100644 index 0000000000000000000000000000000000000000..ba1946a0280e0ee8ae88b4443164106621432811 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/led.c @@ -0,0 +1,237 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "../wifi.h" +#include "../pci.h" +#include "reg.h" +#include "led.h" + +static void _rtl8821ae_init_led(struct ieee80211_hw *hw, + struct rtl_led *pled, + enum rtl_led_pin ledpin) +{ + pled->hw = hw; + pled->ledpin = ledpin; + pled->ledon = false; +} + +void rtl8821ae_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled) +{ + u8 ledcfg; + struct rtl_priv *rtlpriv = rtl_priv(hw); + + RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, + "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin); + + switch (pled->ledpin) { + case LED_PIN_GPIO0: + break; + case LED_PIN_LED0: + ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2); + ledcfg &= ~BIT(6); + rtl_write_byte(rtlpriv, + REG_LEDCFG2, (ledcfg & 0xf0) | BIT(5)); + break; + case LED_PIN_LED1: + ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG1); + rtl_write_byte(rtlpriv, REG_LEDCFG1, ledcfg & 0x10); + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, + "switch case not process\n"); + break; + } + pled->ledon = true; +} + +void rtl8812ae_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled) +{ + u16 ledreg = REG_LEDCFG1; + u8 ledcfg = 0; + struct rtl_priv *rtlpriv = rtl_priv(hw); + + switch (pled->ledpin) { + case LED_PIN_LED0: + ledreg = REG_LEDCFG1; + break; + + case LED_PIN_LED1: + ledreg = REG_LEDCFG2; + break; + + case LED_PIN_GPIO0: + default: + break; + } + + RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, + "In SwLedOn, LedAddr:%X LEDPIN=%d\n", + ledreg, pled->ledpin); + + ledcfg = rtl_read_byte(rtlpriv, ledreg); + ledcfg |= BIT(5); /*Set 0x4c[21]*/ + ledcfg &= ~(BIT(7) | BIT(6) | BIT(3) | BIT(2) | BIT(1) | BIT(0)); + /*Clear 0x4c[23:22] and 0x4c[19:16]*/ + rtl_write_byte(rtlpriv, ledreg, ledcfg); /*SW control led0 on.*/ + pled->ledon = true; +} + +void rtl8821ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + u8 ledcfg; + + RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, + "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin); + + ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2); + + switch (pled->ledpin) { + case LED_PIN_GPIO0: + break; + case LED_PIN_LED0: + ledcfg &= 0xf0; + if (pcipriv->ledctl.led_opendrain) { + ledcfg &= 0x90; /* Set to software control. */ + rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg|BIT(3))); + ledcfg = rtl_read_byte(rtlpriv, REG_MAC_PINMUX_CFG); + ledcfg &= 0xFE; + rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG, ledcfg); + } else { + ledcfg &= ~BIT(6); + rtl_write_byte(rtlpriv, REG_LEDCFG2, + (ledcfg | BIT(3) | BIT(5))); + } + break; + case LED_PIN_LED1: + ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG1); + ledcfg &= 0x10; /* Set to software control. */ + rtl_write_byte(rtlpriv, REG_LEDCFG1, ledcfg|BIT(3)); + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, + "switch case not process\n"); + break; + } + pled->ledon = false; +} + +void rtl8812ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) +{ + u16 ledreg = REG_LEDCFG1; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + + switch (pled->ledpin) { + case LED_PIN_LED0: + ledreg = REG_LEDCFG1; + break; + + case LED_PIN_LED1: + ledreg = REG_LEDCFG2; + break; + + case LED_PIN_GPIO0: + default: + break; + } + + RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, + "In SwLedOff,LedAddr:%X LEDPIN=%d\n", + ledreg, pled->ledpin); + /*Open-drain arrangement for controlling the LED*/ + if (pcipriv->ledctl.led_opendrain) { + u8 ledcfg = rtl_read_byte(rtlpriv, ledreg); + + ledreg &= 0xd0; /* Set to software control.*/ + rtl_write_byte(rtlpriv, ledreg, (ledcfg | BIT(3))); + + /*Open-drain arrangement*/ + ledcfg = rtl_read_byte(rtlpriv, REG_MAC_PINMUX_CFG); + ledcfg &= 0xFE;/*Set GPIO[8] to input mode*/ + rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG, ledcfg); + } else { + rtl_write_byte(rtlpriv, ledreg, 0x28); + } + + pled->ledon = false; +} + +void rtl8821ae_init_sw_leds(struct ieee80211_hw *hw) +{ + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + + _rtl8821ae_init_led(hw, &pcipriv->ledctl.sw_led0, LED_PIN_LED0); + _rtl8821ae_init_led(hw, &pcipriv->ledctl.sw_led1, LED_PIN_LED1); +} + +static void _rtl8821ae_sw_led_control(struct ieee80211_hw *hw, + enum led_ctl_mode ledaction) +{ + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + struct rtl_led *pLed0 = &pcipriv->ledctl.sw_led0; + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + + switch (ledaction) { + case LED_CTL_POWER_ON: + case LED_CTL_LINK: + case LED_CTL_NO_LINK: + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + rtl8812ae_sw_led_on(hw, pLed0); + else + rtl8821ae_sw_led_on(hw, pLed0); + break; + case LED_CTL_POWER_OFF: + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + rtl8812ae_sw_led_off(hw, pLed0); + else + rtl8821ae_sw_led_off(hw, pLed0); + break; + default: + break; + } +} + +void rtl8821ae_led_control(struct ieee80211_hw *hw, + enum led_ctl_mode ledaction) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + + if ((ppsc->rfoff_reason > RF_CHANGE_BY_PS) && + (ledaction == LED_CTL_TX || + ledaction == LED_CTL_RX || + ledaction == LED_CTL_SITE_SURVEY || + ledaction == LED_CTL_LINK || + ledaction == LED_CTL_NO_LINK || + ledaction == LED_CTL_START_TO_LINK || + ledaction == LED_CTL_POWER_ON)) { + return; + } + RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d,\n", + ledaction); + _rtl8821ae_sw_led_control(hw, ledaction); +} diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/led.h b/drivers/net/wireless/rtlwifi/rtl8821ae/led.h new file mode 100644 index 0000000000000000000000000000000000000000..038e64e18ae88bac6d4aea087e8e015a24173cd0 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/led.h @@ -0,0 +1,37 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL8821AE_LED_H__ +#define __RTL8821AE_LED_H__ + +void rtl8821ae_init_sw_leds(struct ieee80211_hw *hw); +void rtl8821ae_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled); +void rtl8812ae_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled); +void rtl8821ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled); +void rtl8812ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled); +void rtl8821ae_led_control(struct ieee80211_hw *hw, + enum led_ctl_mode ledaction); + +#endif diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/rtlwifi/rtl8821ae/phy.c new file mode 100644 index 0000000000000000000000000000000000000000..9786313dc62f2b3a50143bffc59255e8f1d45af8 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/phy.c @@ -0,0 +1,4855 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "../wifi.h" +#include "../pci.h" +#include "../ps.h" +#include "reg.h" +#include "def.h" +#include "phy.h" +#include "rf.h" +#include "dm.h" +#include "table.h" +#include "trx.h" +#include "../btcoexist/halbt_precomp.h" +#include "hw.h" +#include "../efuse.h" + +#define READ_NEXT_PAIR(array_table, v1, v2, i) \ + do { \ + i += 2; \ + v1 = array_table[i]; \ + v2 = array_table[i+1]; \ + } while (0) + +static u32 _rtl8821ae_phy_rf_serial_read(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 offset); +static void _rtl8821ae_phy_rf_serial_write(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 offset, + u32 data); +static u32 _rtl8821ae_phy_calculate_bit_shift(u32 bitmask); +static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw); +/*static bool _rtl8812ae_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);*/ +static bool _rtl8821ae_phy_config_mac_with_headerfile(struct ieee80211_hw *hw); +static bool _rtl8821ae_phy_config_bb_with_headerfile(struct ieee80211_hw *hw, + u8 configtype); +static bool _rtl8821ae_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw, + u8 configtype); +static void phy_init_bb_rf_register_definition(struct ieee80211_hw *hw); + +static long _rtl8821ae_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw, + enum wireless_mode wirelessmode, + u8 txpwridx); +static void rtl8821ae_phy_set_rf_on(struct ieee80211_hw *hw); +static void rtl8821ae_phy_set_io(struct ieee80211_hw *hw); + +static void rtl8812ae_fixspur(struct ieee80211_hw *hw, + enum ht_channel_width band_width, u8 channel) +{ + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + + /*C cut Item12 ADC FIFO CLOCK*/ + if (IS_VENDOR_8812A_C_CUT(rtlhal->version)) { + if (band_width == HT_CHANNEL_WIDTH_20_40 && channel == 11) + rtl_set_bbreg(hw, RRFMOD, 0xC00, 0x3); + /* 0x8AC[11:10] = 2'b11*/ + else + rtl_set_bbreg(hw, RRFMOD, 0xC00, 0x2); + /* 0x8AC[11:10] = 2'b10*/ + + /* <20120914, Kordan> A workarould to resolve + * 2480Mhz spur by setting ADC clock as 160M. (Asked by Binson) + */ + if (band_width == HT_CHANNEL_WIDTH_20 && + (channel == 13 || channel == 14)) { + rtl_set_bbreg(hw, RRFMOD, 0x300, 0x3); + /*0x8AC[9:8] = 2'b11*/ + rtl_set_bbreg(hw, RADC_BUF_CLK, BIT(30), 1); + /* 0x8C4[30] = 1*/ + } else if (band_width == HT_CHANNEL_WIDTH_20_40 && + channel == 11) { + rtl_set_bbreg(hw, RADC_BUF_CLK, BIT(30), 1); + /*0x8C4[30] = 1*/ + } else if (band_width != HT_CHANNEL_WIDTH_80) { + rtl_set_bbreg(hw, RRFMOD, 0x300, 0x2); + /*0x8AC[9:8] = 2'b10*/ + rtl_set_bbreg(hw, RADC_BUF_CLK, BIT(30), 0); + /*0x8C4[30] = 0*/ + } + } else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { + /* <20120914, Kordan> A workarould to resolve + * 2480Mhz spur by setting ADC clock as 160M. + */ + if (band_width == HT_CHANNEL_WIDTH_20 && + (channel == 13 || channel == 14)) + rtl_set_bbreg(hw, RRFMOD, 0x300, 0x3); + /*0x8AC[9:8] = 11*/ + else if (channel <= 14) /*2.4G only*/ + rtl_set_bbreg(hw, RRFMOD, 0x300, 0x2); + /*0x8AC[9:8] = 10*/ + } +} + +u32 rtl8821ae_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, + u32 bitmask) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 returnvalue, originalvalue, bitshift; + + RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, + "regaddr(%#x), bitmask(%#x)\n", + regaddr, bitmask); + originalvalue = rtl_read_dword(rtlpriv, regaddr); + bitshift = _rtl8821ae_phy_calculate_bit_shift(bitmask); + returnvalue = (originalvalue & bitmask) >> bitshift; + + RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, + "BBR MASK=0x%x Addr[0x%x]=0x%x\n", + bitmask, regaddr, originalvalue); + return returnvalue; +} + +void rtl8821ae_phy_set_bb_reg(struct ieee80211_hw *hw, + u32 regaddr, u32 bitmask, u32 data) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 originalvalue, bitshift; + + RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, + "regaddr(%#x), bitmask(%#x), data(%#x)\n", + regaddr, bitmask, data); + + if (bitmask != MASKDWORD) { + originalvalue = rtl_read_dword(rtlpriv, regaddr); + bitshift = _rtl8821ae_phy_calculate_bit_shift(bitmask); + data = ((originalvalue & (~bitmask)) | + ((data << bitshift) & bitmask)); + } + + rtl_write_dword(rtlpriv, regaddr, data); + + RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, + "regaddr(%#x), bitmask(%#x), data(%#x)\n", + regaddr, bitmask, data); +} + +u32 rtl8821ae_phy_query_rf_reg(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 regaddr, + u32 bitmask) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 original_value, readback_value, bitshift; + unsigned long flags; + + RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, + "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n", + regaddr, rfpath, bitmask); + + spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags); + + original_value = _rtl8821ae_phy_rf_serial_read(hw, rfpath, regaddr); + bitshift = _rtl8821ae_phy_calculate_bit_shift(bitmask); + readback_value = (original_value & bitmask) >> bitshift; + + spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags); + + RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, + "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n", + regaddr, rfpath, bitmask, original_value); + + return readback_value; +} + +void rtl8821ae_phy_set_rf_reg(struct ieee80211_hw *hw, + enum radio_path rfpath, + u32 regaddr, u32 bitmask, u32 data) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 original_value, bitshift; + unsigned long flags; + + RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, + "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n", + regaddr, bitmask, data, rfpath); + + spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags); + + if (bitmask != RFREG_OFFSET_MASK) { + original_value = + _rtl8821ae_phy_rf_serial_read(hw, rfpath, regaddr); + bitshift = _rtl8821ae_phy_calculate_bit_shift(bitmask); + data = ((original_value & (~bitmask)) | (data << bitshift)); + } + + _rtl8821ae_phy_rf_serial_write(hw, rfpath, regaddr, data); + + spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags); + + RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, + "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n", + regaddr, bitmask, data, rfpath); +} + +static u32 _rtl8821ae_phy_rf_serial_read(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 offset) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + bool is_pi_mode = false; + u32 retvalue = 0; + + /* 2009/06/17 MH We can not execute IO for power + save or other accident mode.*/ + if (RT_CANNOT_IO(hw)) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "return all one\n"); + return 0xFFFFFFFF; + } + /* <20120809, Kordan> CCA OFF(when entering), + asked by James to avoid reading the wrong value. + <20120828, Kordan> Toggling CCA would affect RF 0x0, skip it!*/ + if (offset != 0x0 && + !((rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) || + (IS_VENDOR_8812A_C_CUT(rtlhal->version)))) + rtl_set_bbreg(hw, RCCAONSEC, 0x8, 1); + offset &= 0xff; + + if (rfpath == RF90_PATH_A) + is_pi_mode = (bool)rtl_get_bbreg(hw, 0xC00, 0x4); + else if (rfpath == RF90_PATH_B) + is_pi_mode = (bool)rtl_get_bbreg(hw, 0xE00, 0x4); + + rtl_set_bbreg(hw, RHSSIREAD_8821AE, 0xff, offset); + + if ((rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) || + (IS_VENDOR_8812A_C_CUT(rtlhal->version))) + udelay(20); + + if (is_pi_mode) { + if (rfpath == RF90_PATH_A) + retvalue = + rtl_get_bbreg(hw, RA_PIREAD_8821A, BLSSIREADBACKDATA); + else if (rfpath == RF90_PATH_B) + retvalue = + rtl_get_bbreg(hw, RB_PIREAD_8821A, BLSSIREADBACKDATA); + } else { + if (rfpath == RF90_PATH_A) + retvalue = + rtl_get_bbreg(hw, RA_SIREAD_8821A, BLSSIREADBACKDATA); + else if (rfpath == RF90_PATH_B) + retvalue = + rtl_get_bbreg(hw, RB_SIREAD_8821A, BLSSIREADBACKDATA); + } + + /*<20120809, Kordan> CCA ON(when exiting), + * asked by James to avoid reading the wrong value. + * <20120828, Kordan> Toggling CCA would affect RF 0x0, skip it! + */ + if (offset != 0x0 && + !((rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) || + (IS_VENDOR_8812A_C_CUT(rtlhal->version)))) + rtl_set_bbreg(hw, RCCAONSEC, 0x8, 0); + return retvalue; +} + +static void _rtl8821ae_phy_rf_serial_write(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 offset, + u32 data) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath]; + u32 data_and_addr; + u32 newoffset; + + if (RT_CANNOT_IO(hw)) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "stop\n"); + return; + } + offset &= 0xff; + newoffset = offset; + data_and_addr = ((newoffset << 20) | + (data & 0x000fffff)) & 0x0fffffff; + rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr); + RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, + "RFW-%d Addr[0x%x]=0x%x\n", + rfpath, pphyreg->rf3wire_offset, data_and_addr); +} + +static u32 _rtl8821ae_phy_calculate_bit_shift(u32 bitmask) +{ + u32 i; + + for (i = 0; i <= 31; i++) { + if (((bitmask >> i) & 0x1) == 1) + break; + } + return i; +} + +bool rtl8821ae_phy_mac_config(struct ieee80211_hw *hw) +{ + bool rtstatus = 0; + + rtstatus = _rtl8821ae_phy_config_mac_with_headerfile(hw); + + return rtstatus; +} + +bool rtl8821ae_phy_bb_config(struct ieee80211_hw *hw) +{ + bool rtstatus = true; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 regval; + u8 crystal_cap; + + phy_init_bb_rf_register_definition(hw); + + regval = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN); + regval |= FEN_PCIEA; + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, regval); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, + regval | FEN_BB_GLB_RSTN | FEN_BBRSTB); + + rtl_write_byte(rtlpriv, REG_RF_CTRL, 0x7); + rtl_write_byte(rtlpriv, REG_OPT_CTRL + 2, 0x7); + + rtstatus = _rtl8821ae_phy_bb8821a_config_parafile(hw); + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { + crystal_cap = rtlefuse->crystalcap & 0x3F; + rtl_set_bbreg(hw, REG_MAC_PHY_CTRL, 0x7FF80000, + (crystal_cap | (crystal_cap << 6))); + } else { + crystal_cap = rtlefuse->crystalcap & 0x3F; + rtl_set_bbreg(hw, REG_MAC_PHY_CTRL, 0xFFF000, + (crystal_cap | (crystal_cap << 6))); + } + rtlphy->reg_837 = rtl_read_byte(rtlpriv, 0x837); + + return rtstatus; +} + +bool rtl8821ae_phy_rf_config(struct ieee80211_hw *hw) +{ + return rtl8821ae_phy_rf6052_config(hw); +} + +u32 phy_get_tx_swing_8812A(struct ieee80211_hw *hw, u8 band, + u8 rf_path) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_dm *rtldm = rtl_dm(rtlpriv); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + char reg_swing_2g = -1;/* 0xff; */ + char reg_swing_5g = -1;/* 0xff; */ + char swing_2g = -1 * reg_swing_2g; + char swing_5g = -1 * reg_swing_5g; + u32 out = 0x200; + const char auto_temp = -1; + + RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD, + "===> PHY_GetTxBBSwing_8812A, bbSwing_2G: %d, bbSwing_5G: %d,autoload_failflag=%d.\n", + (int)swing_2g, (int)swing_5g, + (int)rtlefuse->autoload_failflag); + + if (rtlefuse->autoload_failflag) { + if (band == BAND_ON_2_4G) { + rtldm->swing_diff_2g = swing_2g; + if (swing_2g == 0) { + out = 0x200; /* 0 dB */ + } else if (swing_2g == -3) { + out = 0x16A; /* -3 dB */ + } else if (swing_2g == -6) { + out = 0x101; /* -6 dB */ + } else if (swing_2g == -9) { + out = 0x0B6; /* -9 dB */ + } else { + rtldm->swing_diff_2g = 0; + out = 0x200; + } + } else if (band == BAND_ON_5G) { + rtldm->swing_diff_5g = swing_5g; + if (swing_5g == 0) { + out = 0x200; /* 0 dB */ + } else if (swing_5g == -3) { + out = 0x16A; /* -3 dB */ + } else if (swing_5g == -6) { + out = 0x101; /* -6 dB */ + } else if (swing_5g == -9) { + out = 0x0B6; /* -9 dB */ + } else { + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) { + rtldm->swing_diff_5g = -3; + out = 0x16A; + } else { + rtldm->swing_diff_5g = 0; + out = 0x200; + } + } + } else { + rtldm->swing_diff_2g = -3; + rtldm->swing_diff_5g = -3; + out = 0x16A; /* -3 dB */ + } + } else { + u32 swing = 0, swing_a = 0, swing_b = 0; + + if (band == BAND_ON_2_4G) { + if (reg_swing_2g == auto_temp) { + efuse_shadow_read(hw, 1, 0xC6, (u32 *)&swing); + swing = (swing == 0xFF) ? 0x00 : swing; + } else if (swing_2g == 0) { + swing = 0x00; /* 0 dB */ + } else if (swing_2g == -3) { + swing = 0x05; /* -3 dB */ + } else if (swing_2g == -6) { + swing = 0x0A; /* -6 dB */ + } else if (swing_2g == -9) { + swing = 0xFF; /* -9 dB */ + } else { + swing = 0x00; + } + } else { + if (reg_swing_5g == auto_temp) { + efuse_shadow_read(hw, 1, 0xC7, (u32 *)&swing); + swing = (swing == 0xFF) ? 0x00 : swing; + } else if (swing_5g == 0) { + swing = 0x00; /* 0 dB */ + } else if (swing_5g == -3) { + swing = 0x05; /* -3 dB */ + } else if (swing_5g == -6) { + swing = 0x0A; /* -6 dB */ + } else if (swing_5g == -9) { + swing = 0xFF; /* -9 dB */ + } else { + swing = 0x00; + } + } + + swing_a = (swing & 0x3) >> 0; /* 0xC6/C7[1:0] */ + swing_b = (swing & 0xC) >> 2; /* 0xC6/C7[3:2] */ + RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD, + "===> PHY_GetTxBBSwing_8812A, swingA: 0x%X, swingB: 0x%X\n", + swing_a, swing_b); + + /* 3 Path-A */ + if (swing_a == 0x0) { + if (band == BAND_ON_2_4G) + rtldm->swing_diff_2g = 0; + else + rtldm->swing_diff_5g = 0; + out = 0x200; /* 0 dB */ + } else if (swing_a == 0x1) { + if (band == BAND_ON_2_4G) + rtldm->swing_diff_2g = -3; + else + rtldm->swing_diff_5g = -3; + out = 0x16A; /* -3 dB */ + } else if (swing_a == 0x2) { + if (band == BAND_ON_2_4G) + rtldm->swing_diff_2g = -6; + else + rtldm->swing_diff_5g = -6; + out = 0x101; /* -6 dB */ + } else if (swing_a == 0x3) { + if (band == BAND_ON_2_4G) + rtldm->swing_diff_2g = -9; + else + rtldm->swing_diff_5g = -9; + out = 0x0B6; /* -9 dB */ + } + /* 3 Path-B */ + if (swing_b == 0x0) { + if (band == BAND_ON_2_4G) + rtldm->swing_diff_2g = 0; + else + rtldm->swing_diff_5g = 0; + out = 0x200; /* 0 dB */ + } else if (swing_b == 0x1) { + if (band == BAND_ON_2_4G) + rtldm->swing_diff_2g = -3; + else + rtldm->swing_diff_5g = -3; + out = 0x16A; /* -3 dB */ + } else if (swing_b == 0x2) { + if (band == BAND_ON_2_4G) + rtldm->swing_diff_2g = -6; + else + rtldm->swing_diff_5g = -6; + out = 0x101; /* -6 dB */ + } else if (swing_b == 0x3) { + if (band == BAND_ON_2_4G) + rtldm->swing_diff_2g = -9; + else + rtldm->swing_diff_5g = -9; + out = 0x0B6; /* -9 dB */ + } + } + + RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD, + "<=== PHY_GetTxBBSwing_8812A, out = 0x%X\n", out); + return out; +} + +void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_dm *rtldm = rtl_dm(rtlpriv); + u8 current_band = rtlhal->current_bandtype; + u32 txpath, rxpath; + char bb_diff_between_band; + + txpath = rtl8821ae_phy_query_bb_reg(hw, RTXPATH, 0xf0); + rxpath = rtl8821ae_phy_query_bb_reg(hw, RCCK_RX, 0x0f000000); + rtlhal->current_bandtype = (enum band_type) band; + /* reconfig BB/RF according to wireless mode */ + if (rtlhal->current_bandtype == BAND_ON_2_4G) { + /* BB & RF Config */ + rtl_set_bbreg(hw, ROFDMCCKEN, BOFDMEN|BCCKEN, 0x03); + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) { + /* 0xCB0[15:12] = 0x7 (LNA_On)*/ + rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xF000, 0x7); + /* 0xCB0[7:4] = 0x7 (PAPE_A)*/ + rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xF0, 0x7); + } + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { + /*0x834[1:0] = 0x1*/ + rtl_set_bbreg(hw, 0x834, 0x3, 0x1); + } + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) { + /* 0xC1C[11:8] = 0 */ + rtl_set_bbreg(hw, RA_TXSCALE, 0xF00, 0); + } else { + /* 0x82C[1:0] = 2b'00 */ + rtl_set_bbreg(hw, 0x82c, 0x3, 0); + } + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { + rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, + 0x77777777); + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, + 0x77777777); + rtl_set_bbreg(hw, RA_RFE_INV, 0x3ff00000, 0x000); + rtl_set_bbreg(hw, RB_RFE_INV, 0x3ff00000, 0x000); + } + + rtl_set_bbreg(hw, RTXPATH, 0xf0, 0x1); + rtl_set_bbreg(hw, RCCK_RX, 0x0f000000, 0x1); + + rtl_write_byte(rtlpriv, REG_CCK_CHECK, 0x0); + } else {/* 5G band */ + u16 count, reg_41a; + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) { + /*0xCB0[15:12] = 0x5 (LNA_On)*/ + rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xF000, 0x5); + /*0xCB0[7:4] = 0x4 (PAPE_A)*/ + rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xF0, 0x4); + } + /*CCK_CHECK_en*/ + rtl_write_byte(rtlpriv, REG_CCK_CHECK, 0x80); + + count = 0; + reg_41a = rtl_read_word(rtlpriv, REG_TXPKT_EMPTY); + RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD, + "Reg41A value %d", reg_41a); + reg_41a &= 0x30; + while ((reg_41a != 0x30) && (count < 50)) { + udelay(50); + RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD, "Delay 50us\n"); + + reg_41a = rtl_read_word(rtlpriv, REG_TXPKT_EMPTY); + reg_41a &= 0x30; + count++; + RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD, + "Reg41A value %d", reg_41a); + } + if (count != 0) + RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, + "PHY_SwitchWirelessBand8812(): Switch to 5G Band. Count = %d reg41A=0x%x\n", + count, reg_41a); + + /* 2012/02/01, Sinda add registry to switch workaround + without long-run verification for scan issue. */ + rtl_set_bbreg(hw, ROFDMCCKEN, BOFDMEN|BCCKEN, 0x03); + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { + /*0x834[1:0] = 0x2*/ + rtl_set_bbreg(hw, 0x834, 0x3, 0x2); + } + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) { + /* AGC table select */ + /* 0xC1C[11:8] = 1*/ + rtl_set_bbreg(hw, RA_TXSCALE, 0xF00, 1); + } else + /* 0x82C[1:0] = 2'b00 */ + rtl_set_bbreg(hw, 0x82c, 0x3, 1); + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { + rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, + 0x77337777); + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, + 0x77337777); + rtl_set_bbreg(hw, RA_RFE_INV, 0x3ff00000, 0x010); + rtl_set_bbreg(hw, RB_RFE_INV, 0x3ff00000, 0x010); + } + + rtl_set_bbreg(hw, RTXPATH, 0xf0, 0); + rtl_set_bbreg(hw, RCCK_RX, 0x0f000000, 0xf); + + RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD, + "==>PHY_SwitchWirelessBand8812() BAND_ON_5G settings OFDM index 0x%x\n", + rtlpriv->dm.ofdm_index[RF90_PATH_A]); + } + + if ((rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) || + (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)) { + /* 0xC1C[31:21] */ + rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000, + phy_get_tx_swing_8812A(hw, band, RF90_PATH_A)); + /* 0xE1C[31:21] */ + rtl_set_bbreg(hw, RB_TXSCALE, 0xFFE00000, + phy_get_tx_swing_8812A(hw, band, RF90_PATH_B)); + + /* <20121005, Kordan> When TxPowerTrack is ON, + * we should take care of the change of BB swing. + * That is, reset all info to trigger Tx power tracking. + */ + if (band != current_band) { + bb_diff_between_band = + (rtldm->swing_diff_2g - rtldm->swing_diff_5g); + bb_diff_between_band = (band == BAND_ON_2_4G) ? + bb_diff_between_band : + (-1 * bb_diff_between_band); + rtldm->default_ofdm_index += bb_diff_between_band * 2; + } + rtl8821ae_dm_clear_txpower_tracking_state(hw); + } + + RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, + "<==rtl8821ae_phy_switch_wirelessband():Switch Band OK.\n"); + return; +} + +static bool _rtl8821ae_check_condition(struct ieee80211_hw *hw, + const u32 condition) +{ + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + u32 _board = rtlefuse->board_type; /*need efuse define*/ + u32 _interface = 0x01; /* ODM_ITRF_PCIE */ + u32 _platform = 0x08;/* ODM_WIN */ + u32 cond = condition; + + if (condition == 0xCDCDCDCD) + return true; + + cond = condition & 0xFF; + if ((_board != cond) && cond != 0xFF) + return false; + + cond = condition & 0xFF00; + cond = cond >> 8; + if ((_interface & cond) == 0 && cond != 0x07) + return false; + + cond = condition & 0xFF0000; + cond = cond >> 16; + if ((_platform & cond) == 0 && cond != 0x0F) + return false; + return true; +} + +static void _rtl8821ae_config_rf_reg(struct ieee80211_hw *hw, + u32 addr, u32 data, + enum radio_path rfpath, u32 regaddr) +{ + if (addr == 0xfe || addr == 0xffe) { + /* In order not to disturb BT music when + * wifi init.(1ant NIC only) + */ + mdelay(50); + } else { + rtl_set_rfreg(hw, rfpath, regaddr, RFREG_OFFSET_MASK, data); + udelay(1); + } +} + +static void _rtl8821ae_config_rf_radio_a(struct ieee80211_hw *hw, + u32 addr, u32 data) +{ + u32 content = 0x1000; /*RF Content: radio_a_txt*/ + u32 maskforphyset = (u32)(content & 0xE000); + + _rtl8821ae_config_rf_reg(hw, addr, data, + RF90_PATH_A, addr | maskforphyset); +} + +static void _rtl8821ae_config_rf_radio_b(struct ieee80211_hw *hw, + u32 addr, u32 data) +{ + u32 content = 0x1001; /*RF Content: radio_b_txt*/ + u32 maskforphyset = (u32)(content & 0xE000); + + _rtl8821ae_config_rf_reg(hw, addr, data, + RF90_PATH_B, addr | maskforphyset); +} + +static void _rtl8821ae_config_bb_reg(struct ieee80211_hw *hw, + u32 addr, u32 data) +{ + if (addr == 0xfe) + mdelay(50); + else if (addr == 0xfd) + mdelay(5); + else if (addr == 0xfc) + mdelay(1); + else if (addr == 0xfb) + udelay(50); + else if (addr == 0xfa) + udelay(5); + else if (addr == 0xf9) + udelay(1); + else + rtl_set_bbreg(hw, addr, MASKDWORD, data); + + udelay(1); +} + +static void _rtl8821ae_phy_init_tx_power_by_rate(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u8 band, rfpath, txnum, rate_section; + + for (band = BAND_ON_2_4G; band <= BAND_ON_5G; ++band) + for (rfpath = 0; rfpath < TX_PWR_BY_RATE_NUM_RF; ++rfpath) + for (txnum = 0; txnum < TX_PWR_BY_RATE_NUM_RF; ++txnum) + for (rate_section = 0; + rate_section < TX_PWR_BY_RATE_NUM_SECTION; + ++rate_section) + rtlphy->tx_power_by_rate_offset[band] + [rfpath][txnum][rate_section] = 0; +} + +static void _rtl8821ae_phy_set_txpower_by_rate_base(struct ieee80211_hw *hw, + u8 band, u8 path, + u8 rate_section, + u8 txnum, u8 value) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + + if (path > RF90_PATH_D) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Invalid Rf Path %d in phy_SetTxPowerByRatBase()\n", path); + return; + } + + if (band == BAND_ON_2_4G) { + switch (rate_section) { + case CCK: + rtlphy->txpwr_by_rate_base_24g[path][txnum][0] = value; + break; + case OFDM: + rtlphy->txpwr_by_rate_base_24g[path][txnum][1] = value; + break; + case HT_MCS0_MCS7: + rtlphy->txpwr_by_rate_base_24g[path][txnum][2] = value; + break; + case HT_MCS8_MCS15: + rtlphy->txpwr_by_rate_base_24g[path][txnum][3] = value; + break; + case VHT_1SSMCS0_1SSMCS9: + rtlphy->txpwr_by_rate_base_24g[path][txnum][4] = value; + break; + case VHT_2SSMCS0_2SSMCS9: + rtlphy->txpwr_by_rate_base_24g[path][txnum][5] = value; + break; + default: + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Invalid RateSection %d in Band 2.4G,Rf Path %d, %dTx in PHY_SetTxPowerByRateBase()\n", + rate_section, path, txnum); + break; + }; + } else if (band == BAND_ON_5G) { + switch (rate_section) { + case OFDM: + rtlphy->txpwr_by_rate_base_5g[path][txnum][0] = value; + break; + case HT_MCS0_MCS7: + rtlphy->txpwr_by_rate_base_5g[path][txnum][1] = value; + break; + case HT_MCS8_MCS15: + rtlphy->txpwr_by_rate_base_5g[path][txnum][2] = value; + break; + case VHT_1SSMCS0_1SSMCS9: + rtlphy->txpwr_by_rate_base_5g[path][txnum][3] = value; + break; + case VHT_2SSMCS0_2SSMCS9: + rtlphy->txpwr_by_rate_base_5g[path][txnum][4] = value; + break; + default: + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Invalid RateSection %d in Band 5G, Rf Path %d, %dTx in PHY_SetTxPowerByRateBase()\n", + rate_section, path, txnum); + break; + }; + } else { + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Invalid Band %d in PHY_SetTxPowerByRateBase()\n", band); + } +} + +static u8 _rtl8821ae_phy_get_txpower_by_rate_base(struct ieee80211_hw *hw, + u8 band, u8 path, + u8 txnum, u8 rate_section) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u8 value = 0; + + if (path > RF90_PATH_D) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Invalid Rf Path %d in PHY_GetTxPowerByRateBase()\n", + path); + return 0; + } + + if (band == BAND_ON_2_4G) { + switch (rate_section) { + case CCK: + value = rtlphy->txpwr_by_rate_base_24g[path][txnum][0]; + break; + case OFDM: + value = rtlphy->txpwr_by_rate_base_24g[path][txnum][1]; + break; + case HT_MCS0_MCS7: + value = rtlphy->txpwr_by_rate_base_24g[path][txnum][2]; + break; + case HT_MCS8_MCS15: + value = rtlphy->txpwr_by_rate_base_24g[path][txnum][3]; + break; + case VHT_1SSMCS0_1SSMCS9: + value = rtlphy->txpwr_by_rate_base_24g[path][txnum][4]; + break; + case VHT_2SSMCS0_2SSMCS9: + value = rtlphy->txpwr_by_rate_base_24g[path][txnum][5]; + break; + default: + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Invalid RateSection %d in Band 2.4G, Rf Path %d, %dTx in PHY_GetTxPowerByRateBase()\n", + rate_section, path, txnum); + break; + }; + } else if (band == BAND_ON_5G) { + switch (rate_section) { + case OFDM: + value = rtlphy->txpwr_by_rate_base_5g[path][txnum][0]; + break; + case HT_MCS0_MCS7: + value = rtlphy->txpwr_by_rate_base_5g[path][txnum][1]; + break; + case HT_MCS8_MCS15: + value = rtlphy->txpwr_by_rate_base_5g[path][txnum][2]; + break; + case VHT_1SSMCS0_1SSMCS9: + value = rtlphy->txpwr_by_rate_base_5g[path][txnum][3]; + break; + case VHT_2SSMCS0_2SSMCS9: + value = rtlphy->txpwr_by_rate_base_5g[path][txnum][4]; + break; + default: + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Invalid RateSection %d in Band 5G, Rf Path %d, %dTx in PHY_GetTxPowerByRateBase()\n", + rate_section, path, txnum); + break; + }; + } else { + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Invalid Band %d in PHY_GetTxPowerByRateBase()\n", band); + } + + return value; +} + +static void _rtl8821ae_phy_store_txpower_by_rate_base(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u16 rawValue = 0; + u8 base = 0, path = 0; + + for (path = RF90_PATH_A; path <= RF90_PATH_B; ++path) { + rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_1TX][0] >> 24) & 0xFF; + base = (rawValue >> 4) * 10 + (rawValue & 0xF); + _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, CCK, RF_1TX, base); + + rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_1TX][2] >> 24) & 0xFF; + base = (rawValue >> 4) * 10 + (rawValue & 0xF); + _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, OFDM, RF_1TX, base); + + rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_1TX][4] >> 24) & 0xFF; + base = (rawValue >> 4) * 10 + (rawValue & 0xF); + _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, HT_MCS0_MCS7, RF_1TX, base); + + rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_2TX][6] >> 24) & 0xFF; + base = (rawValue >> 4) * 10 + (rawValue & 0xF); + _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, HT_MCS8_MCS15, RF_2TX, base); + + rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_1TX][8] >> 24) & 0xFF; + base = (rawValue >> 4) * 10 + (rawValue & 0xF); + _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, VHT_1SSMCS0_1SSMCS9, RF_1TX, base); + + rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_2TX][11] >> 8) & 0xFF; + base = (rawValue >> 4) * 10 + (rawValue & 0xF); + _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, VHT_2SSMCS0_2SSMCS9, RF_2TX, base); + + rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_1TX][2] >> 24) & 0xFF; + base = (rawValue >> 4) * 10 + (rawValue & 0xF); + _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_5G, path, OFDM, RF_1TX, base); + + rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_1TX][4] >> 24) & 0xFF; + base = (rawValue >> 4) * 10 + (rawValue & 0xF); + _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_5G, path, HT_MCS0_MCS7, RF_1TX, base); + + rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_2TX][6] >> 24) & 0xFF; + base = (rawValue >> 4) * 10 + (rawValue & 0xF); + _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_5G, path, HT_MCS8_MCS15, RF_2TX, base); + + rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_1TX][8] >> 24) & 0xFF; + base = (rawValue >> 4) * 10 + (rawValue & 0xF); + _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_5G, path, VHT_1SSMCS0_1SSMCS9, RF_1TX, base); + + rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_2TX][11] >> 8) & 0xFF; + base = (rawValue >> 4) * 10 + (rawValue & 0xF); + _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_5G, path, VHT_2SSMCS0_2SSMCS9, RF_2TX, base); + } +} + +static void _phy_convert_txpower_dbm_to_relative_value(u32 *data, u8 start, + u8 end, u8 base_val) +{ + char i = 0; + u8 temp_value = 0; + u32 temp_data = 0; + + for (i = 3; i >= 0; --i) { + if (i >= start && i <= end) { + /* Get the exact value */ + temp_value = (u8)(*data >> (i * 8)) & 0xF; + temp_value += ((u8)((*data >> (i * 8 + 4)) & 0xF)) * 10; + + /* Change the value to a relative value */ + temp_value = (temp_value > base_val) ? temp_value - + base_val : base_val - temp_value; + } else { + temp_value = (u8)(*data >> (i * 8)) & 0xFF; + } + temp_data <<= 8; + temp_data |= temp_value; + } + *data = temp_data; +} + +static void _rtl8812ae_phy_cross_reference_ht_and_vht_txpower_limit(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u8 regulation, bw, channel, rate_section; + char temp_pwrlmt = 0; + + for (regulation = 0; regulation < MAX_REGULATION_NUM; ++regulation) { + for (bw = 0; bw < MAX_5G_BANDWITH_NUM; ++bw) { + for (channel = 0; channel < CHANNEL_MAX_NUMBER_5G; ++channel) { + for (rate_section = 0; rate_section < MAX_RATE_SECTION_NUM; ++rate_section) { + temp_pwrlmt = rtlphy->txpwr_limit_5g[regulation] + [bw][rate_section][channel][RF90_PATH_A]; + if (temp_pwrlmt == MAX_POWER_INDEX) { + if (bw == 0 || bw == 1) { /*5G 20M 40M VHT and HT can cross reference*/ + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "No power limit table of the specified band %d, bandwidth %d, ratesection %d, channel %d, rf path %d\n", + 1, bw, rate_section, channel, RF90_PATH_A); + if (rate_section == 2) { + rtlphy->txpwr_limit_5g[regulation][bw][2][channel][RF90_PATH_A] = + rtlphy->txpwr_limit_5g[regulation][bw][4][channel][RF90_PATH_A]; + } else if (rate_section == 4) { + rtlphy->txpwr_limit_5g[regulation][bw][4][channel][RF90_PATH_A] = + rtlphy->txpwr_limit_5g[regulation][bw][2][channel][RF90_PATH_A]; + } else if (rate_section == 3) { + rtlphy->txpwr_limit_5g[regulation][bw][3][channel][RF90_PATH_A] = + rtlphy->txpwr_limit_5g[regulation][bw][5][channel][RF90_PATH_A]; + } else if (rate_section == 5) { + rtlphy->txpwr_limit_5g[regulation][bw][5][channel][RF90_PATH_A] = + rtlphy->txpwr_limit_5g[regulation][bw][3][channel][RF90_PATH_A]; + } + + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "use other value %d", temp_pwrlmt); + } + } + } + } + } + } +} + +static u8 _rtl8812ae_phy_get_txpower_by_rate_base_index(struct ieee80211_hw *hw, + enum band_type band, u8 rate) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 index = 0; + if (band == BAND_ON_2_4G) { + switch (rate) { + case MGN_1M: + case MGN_2M: + case MGN_5_5M: + case MGN_11M: + index = 0; + break; + + case MGN_6M: + case MGN_9M: + case MGN_12M: + case MGN_18M: + case MGN_24M: + case MGN_36M: + case MGN_48M: + case MGN_54M: + index = 1; + break; + + case MGN_MCS0: + case MGN_MCS1: + case MGN_MCS2: + case MGN_MCS3: + case MGN_MCS4: + case MGN_MCS5: + case MGN_MCS6: + case MGN_MCS7: + index = 2; + break; + + case MGN_MCS8: + case MGN_MCS9: + case MGN_MCS10: + case MGN_MCS11: + case MGN_MCS12: + case MGN_MCS13: + case MGN_MCS14: + case MGN_MCS15: + index = 3; + break; + + default: + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Wrong rate 0x%x to obtain index in 2.4G in PHY_GetTxPowerByRateBaseIndex()\n", + rate); + break; + } + } else if (band == BAND_ON_5G) { + switch (rate) { + case MGN_6M: + case MGN_9M: + case MGN_12M: + case MGN_18M: + case MGN_24M: + case MGN_36M: + case MGN_48M: + case MGN_54M: + index = 0; + break; + + case MGN_MCS0: + case MGN_MCS1: + case MGN_MCS2: + case MGN_MCS3: + case MGN_MCS4: + case MGN_MCS5: + case MGN_MCS6: + case MGN_MCS7: + index = 1; + break; + + case MGN_MCS8: + case MGN_MCS9: + case MGN_MCS10: + case MGN_MCS11: + case MGN_MCS12: + case MGN_MCS13: + case MGN_MCS14: + case MGN_MCS15: + index = 2; + break; + + case MGN_VHT1SS_MCS0: + case MGN_VHT1SS_MCS1: + case MGN_VHT1SS_MCS2: + case MGN_VHT1SS_MCS3: + case MGN_VHT1SS_MCS4: + case MGN_VHT1SS_MCS5: + case MGN_VHT1SS_MCS6: + case MGN_VHT1SS_MCS7: + case MGN_VHT1SS_MCS8: + case MGN_VHT1SS_MCS9: + index = 3; + break; + + case MGN_VHT2SS_MCS0: + case MGN_VHT2SS_MCS1: + case MGN_VHT2SS_MCS2: + case MGN_VHT2SS_MCS3: + case MGN_VHT2SS_MCS4: + case MGN_VHT2SS_MCS5: + case MGN_VHT2SS_MCS6: + case MGN_VHT2SS_MCS7: + case MGN_VHT2SS_MCS8: + case MGN_VHT2SS_MCS9: + index = 4; + break; + + default: + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Wrong rate 0x%x to obtain index in 5G in PHY_GetTxPowerByRateBaseIndex()\n", + rate); + break; + } + } + + return index; +} + +static void _rtl8812ae_phy_convert_txpower_limit_to_power_index(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u8 bw40_pwr_base_dbm2_4G, bw40_pwr_base_dbm5G; + u8 regulation, bw, channel, rate_section; + u8 base_index2_4G = 0; + u8 base_index5G = 0; + char temp_value = 0, temp_pwrlmt = 0; + u8 rf_path = 0; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "=====> _rtl8812ae_phy_convert_txpower_limit_to_power_index()\n"); + + _rtl8812ae_phy_cross_reference_ht_and_vht_txpower_limit(hw); + + for (regulation = 0; regulation < MAX_REGULATION_NUM; ++regulation) { + for (bw = 0; bw < MAX_2_4G_BANDWITH_NUM; ++bw) { + for (channel = 0; channel < CHANNEL_MAX_NUMBER_2G; ++channel) { + for (rate_section = 0; rate_section < MAX_RATE_SECTION_NUM; ++rate_section) { + /* obtain the base dBm values in 2.4G band + CCK => 11M, OFDM => 54M, HT 1T => MCS7, HT 2T => MCS15*/ + if (rate_section == 0) { /*CCK*/ + base_index2_4G = + _rtl8812ae_phy_get_txpower_by_rate_base_index(hw, + BAND_ON_2_4G, MGN_11M); + } else if (rate_section == 1) { /*OFDM*/ + base_index2_4G = + _rtl8812ae_phy_get_txpower_by_rate_base_index(hw, + BAND_ON_2_4G, MGN_54M); + } else if (rate_section == 2) { /*HT IT*/ + base_index2_4G = + _rtl8812ae_phy_get_txpower_by_rate_base_index(hw, + BAND_ON_2_4G, MGN_MCS7); + } else if (rate_section == 3) { /*HT 2T*/ + base_index2_4G = + _rtl8812ae_phy_get_txpower_by_rate_base_index(hw, + BAND_ON_2_4G, MGN_MCS15); + } + + temp_pwrlmt = rtlphy->txpwr_limit_2_4g[regulation] + [bw][rate_section][channel][RF90_PATH_A]; + + for (rf_path = RF90_PATH_A; + rf_path < MAX_RF_PATH_NUM; + ++rf_path) { + if (rate_section == 3) + bw40_pwr_base_dbm2_4G = + rtlphy->txpwr_by_rate_base_24g[rf_path][RF_2TX][base_index2_4G]; + else + bw40_pwr_base_dbm2_4G = + rtlphy->txpwr_by_rate_base_24g[rf_path][RF_1TX][base_index2_4G]; + + if (temp_pwrlmt != MAX_POWER_INDEX) { + temp_value = temp_pwrlmt - bw40_pwr_base_dbm2_4G; + rtlphy->txpwr_limit_2_4g[regulation] + [bw][rate_section][channel][rf_path] = + temp_value; + } + + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "TxPwrLimit_2_4G[regulation %d][bw %d][rateSection %d][channel %d] = %d\n(TxPwrLimit in dBm %d - BW40PwrLmt2_4G[channel %d][rfPath %d] %d)\n", + regulation, bw, rate_section, channel, + rtlphy->txpwr_limit_2_4g[regulation][bw] + [rate_section][channel][rf_path], (temp_pwrlmt == 63) + ? 0 : temp_pwrlmt/2, channel, rf_path, + bw40_pwr_base_dbm2_4G); + } + } + } + } + } + for (regulation = 0; regulation < MAX_REGULATION_NUM; ++regulation) { + for (bw = 0; bw < MAX_5G_BANDWITH_NUM; ++bw) { + for (channel = 0; channel < CHANNEL_MAX_NUMBER_5G; ++channel) { + for (rate_section = 0; rate_section < MAX_RATE_SECTION_NUM; ++rate_section) { + /* obtain the base dBm values in 5G band + OFDM => 54M, HT 1T => MCS7, HT 2T => MCS15, + VHT => 1SSMCS7, VHT 2T => 2SSMCS7*/ + if (rate_section == 1) { /*OFDM*/ + base_index5G = + _rtl8812ae_phy_get_txpower_by_rate_base_index(hw, + BAND_ON_5G, MGN_54M); + } else if (rate_section == 2) { /*HT 1T*/ + base_index5G = + _rtl8812ae_phy_get_txpower_by_rate_base_index(hw, + BAND_ON_5G, MGN_MCS7); + } else if (rate_section == 3) { /*HT 2T*/ + base_index5G = + _rtl8812ae_phy_get_txpower_by_rate_base_index(hw, + BAND_ON_5G, MGN_MCS15); + } else if (rate_section == 4) { /*VHT 1T*/ + base_index5G = + _rtl8812ae_phy_get_txpower_by_rate_base_index(hw, + BAND_ON_5G, MGN_VHT1SS_MCS7); + } else if (rate_section == 5) { /*VHT 2T*/ + base_index5G = + _rtl8812ae_phy_get_txpower_by_rate_base_index(hw, + BAND_ON_5G, MGN_VHT2SS_MCS7); + } + + temp_pwrlmt = rtlphy->txpwr_limit_5g[regulation] + [bw][rate_section][channel] + [RF90_PATH_A]; + + for (rf_path = RF90_PATH_A; + rf_path < MAX_RF_PATH_NUM; + ++rf_path) { + if (rate_section == 3 || rate_section == 5) + bw40_pwr_base_dbm5G = + rtlphy->txpwr_by_rate_base_5g[rf_path] + [RF_2TX][base_index5G]; + else + bw40_pwr_base_dbm5G = + rtlphy->txpwr_by_rate_base_5g[rf_path] + [RF_1TX][base_index5G]; + + if (temp_pwrlmt != MAX_POWER_INDEX) { + temp_value = + temp_pwrlmt - bw40_pwr_base_dbm5G; + rtlphy->txpwr_limit_5g[regulation] + [bw][rate_section][channel] + [rf_path] = temp_value; + } + + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "TxPwrLimit_5G[regulation %d][bw %d][rateSection %d][channel %d] =%d\n(TxPwrLimit in dBm %d - BW40PwrLmt5G[chnl group %d][rfPath %d] %d)\n", + regulation, bw, rate_section, + channel, rtlphy->txpwr_limit_5g[regulation] + [bw][rate_section][channel][rf_path], + temp_pwrlmt, channel, rf_path, bw40_pwr_base_dbm5G); + } + } + } + } + } + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "<===== _rtl8812ae_phy_convert_txpower_limit_to_power_index()\n"); +} + +static void _rtl8821ae_phy_init_txpower_limit(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u8 i, j, k, l, m; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "=====> _rtl8821ae_phy_init_txpower_limit()!\n"); + + for (i = 0; i < MAX_REGULATION_NUM; ++i) { + for (j = 0; j < MAX_2_4G_BANDWITH_NUM; ++j) + for (k = 0; k < MAX_RATE_SECTION_NUM; ++k) + for (m = 0; m < CHANNEL_MAX_NUMBER_2G; ++m) + for (l = 0; l < MAX_RF_PATH_NUM; ++l) + rtlphy->txpwr_limit_2_4g + [i][j][k][m][l] + = MAX_POWER_INDEX; + } + for (i = 0; i < MAX_REGULATION_NUM; ++i) { + for (j = 0; j < MAX_5G_BANDWITH_NUM; ++j) + for (k = 0; k < MAX_RATE_SECTION_NUM; ++k) + for (m = 0; m < CHANNEL_MAX_NUMBER_5G; ++m) + for (l = 0; l < MAX_RF_PATH_NUM; ++l) + rtlphy->txpwr_limit_5g + [i][j][k][m][l] + = MAX_POWER_INDEX; + } + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "<===== _rtl8821ae_phy_init_txpower_limit()!\n"); +} + +static void _rtl8821ae_phy_convert_txpower_dbm_to_relative_value(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u8 base = 0, rfPath = 0; + + for (rfPath = RF90_PATH_A; rfPath <= RF90_PATH_B; ++rfPath) { + base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfPath, RF_1TX, CCK); + _phy_convert_txpower_dbm_to_relative_value( + &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][0], + 0, 3, base); + + base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfPath, RF_1TX, OFDM); + _phy_convert_txpower_dbm_to_relative_value( + &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][1], + 0, 3, base); + _phy_convert_txpower_dbm_to_relative_value( + &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][2], + 0, 3, base); + + base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfPath, RF_1TX, HT_MCS0_MCS7); + _phy_convert_txpower_dbm_to_relative_value( + &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][3], + 0, 3, base); + _phy_convert_txpower_dbm_to_relative_value( + &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][4], + 0, 3, base); + + base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfPath, RF_2TX, HT_MCS8_MCS15); + + _phy_convert_txpower_dbm_to_relative_value( + &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_2TX][5], + 0, 3, base); + + _phy_convert_txpower_dbm_to_relative_value( + &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_2TX][6], + 0, 3, base); + + base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfPath, RF_1TX, VHT_1SSMCS0_1SSMCS9); + _phy_convert_txpower_dbm_to_relative_value( + &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][7], + 0, 3, base); + _phy_convert_txpower_dbm_to_relative_value( + &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][8], + 0, 3, base); + _phy_convert_txpower_dbm_to_relative_value( + &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][9], + 0, 1, base); + + base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfPath, RF_2TX, VHT_2SSMCS0_2SSMCS9); + _phy_convert_txpower_dbm_to_relative_value( + &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][9], + 2, 3, base); + _phy_convert_txpower_dbm_to_relative_value( + &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_2TX][10], + 0, 3, base); + _phy_convert_txpower_dbm_to_relative_value( + &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_2TX][11], + 0, 3, base); + + base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfPath, RF_1TX, OFDM); + _phy_convert_txpower_dbm_to_relative_value( + &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][1], + 0, 3, base); + _phy_convert_txpower_dbm_to_relative_value( + &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][2], + 0, 3, base); + + base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfPath, RF_1TX, HT_MCS0_MCS7); + _phy_convert_txpower_dbm_to_relative_value( + &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][3], + 0, 3, base); + _phy_convert_txpower_dbm_to_relative_value( + &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][4], + 0, 3, base); + + base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfPath, RF_2TX, HT_MCS8_MCS15); + _phy_convert_txpower_dbm_to_relative_value( + &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_2TX][5], + 0, 3, base); + _phy_convert_txpower_dbm_to_relative_value( + &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_2TX][6], + 0, 3, base); + + base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfPath, RF_1TX, VHT_1SSMCS0_1SSMCS9); + _phy_convert_txpower_dbm_to_relative_value( + &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][7], + 0, 3, base); + _phy_convert_txpower_dbm_to_relative_value( + &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][8], + 0, 3, base); + _phy_convert_txpower_dbm_to_relative_value( + &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][9], + 0, 1, base); + + base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfPath, RF_2TX, VHT_2SSMCS0_2SSMCS9); + _phy_convert_txpower_dbm_to_relative_value( + &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][9], + 2, 3, base); + _phy_convert_txpower_dbm_to_relative_value( + &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_2TX][10], + 0, 3, base); + _phy_convert_txpower_dbm_to_relative_value( + &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_2TX][11], + 0, 3, base); + } + + RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE, + "<===_rtl8821ae_phy_convert_txpower_dbm_to_relative_value()\n"); +} + +static void _rtl8821ae_phy_txpower_by_rate_configuration(struct ieee80211_hw *hw) +{ + _rtl8821ae_phy_store_txpower_by_rate_base(hw); + _rtl8821ae_phy_convert_txpower_dbm_to_relative_value(hw); +} + +/* string is in decimal */ +static bool _rtl8812ae_get_integer_from_string(char *str, u8 *pint) +{ + u16 i = 0; + *pint = 0; + + while (str[i] != '\0') { + if (str[i] >= '0' && str[i] <= '9') { + *pint *= 10; + *pint += (str[i] - '0'); + } else { + return false; + } + ++i; + } + + return true; +} + +static bool _rtl8812ae_eq_n_byte(u8 *str1, u8 *str2, u32 num) +{ + if (num == 0) + return false; + while (num > 0) { + num--; + if (str1[num] != str2[num]) + return false; + } + return true; +} + +static char _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(struct ieee80211_hw *hw, + u8 band, u8 channel) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + char channel_index = -1; + u8 channel_5g[CHANNEL_MAX_NUMBER_5G] = { + 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, + 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, + 124, 126, 128, 130, 132, 134, 136, 138, 140, 142, 144, 149, + 151, 153, 155, 157, 159, 161, 163, 165, 167, 168, 169, 171, + 173, 175, 177}; + u8 i = 0; + if (band == BAND_ON_2_4G) + channel_index = channel - 1; + else if (band == BAND_ON_5G) { + for (i = 0; i < sizeof(channel_5g)/sizeof(u8); ++i) { + if (channel_5g[i] == channel) + channel_index = i; + } + } else + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Invalid Band %d in %s", + band, __func__); + + if (channel_index == -1) + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, + "Invalid Channel %d of Band %d in %s", channel, + band, __func__); + + return channel_index; +} + +static void _rtl8812ae_phy_set_txpower_limit(struct ieee80211_hw *hw, u8 *pregulation, + u8 *pband, u8 *pbandwidth, + u8 *prate_section, u8 *prf_path, + u8 *pchannel, u8 *ppower_limit) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u8 regulation = 0, bandwidth = 0, rate_section = 0, channel; + u8 channel_index; + char power_limit = 0, prev_power_limit, ret; + + if (!_rtl8812ae_get_integer_from_string((char *)pchannel, &channel) || + !_rtl8812ae_get_integer_from_string((char *)ppower_limit, + &power_limit)) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "Illegal index of pwr_lmt table [chnl %d][val %d]\n", + channel, power_limit); + } + + power_limit = power_limit > MAX_POWER_INDEX ? + MAX_POWER_INDEX : power_limit; + + if (_rtl8812ae_eq_n_byte(pregulation, (u8 *)("FCC"), 3)) + regulation = 0; + else if (_rtl8812ae_eq_n_byte(pregulation, (u8 *)("MKK"), 3)) + regulation = 1; + else if (_rtl8812ae_eq_n_byte(pregulation, (u8 *)("ETSI"), 4)) + regulation = 2; + else if (_rtl8812ae_eq_n_byte(pregulation, (u8 *)("WW13"), 4)) + regulation = 3; + + if (_rtl8812ae_eq_n_byte(prate_section, (u8 *)("CCK"), 3)) + rate_section = 0; + else if (_rtl8812ae_eq_n_byte(prate_section, (u8 *)("OFDM"), 4)) + rate_section = 1; + else if (_rtl8812ae_eq_n_byte(prate_section, (u8 *)("HT"), 2) && + _rtl8812ae_eq_n_byte(prf_path, (u8 *)("1T"), 2)) + rate_section = 2; + else if (_rtl8812ae_eq_n_byte(prate_section, (u8 *)("HT"), 2) && + _rtl8812ae_eq_n_byte(prf_path, (u8 *)("2T"), 2)) + rate_section = 3; + else if (_rtl8812ae_eq_n_byte(prate_section, (u8 *)("VHT"), 3) && + _rtl8812ae_eq_n_byte(prf_path, (u8 *)("1T"), 2)) + rate_section = 4; + else if (_rtl8812ae_eq_n_byte(prate_section, (u8 *)("VHT"), 3) && + _rtl8812ae_eq_n_byte(prf_path, (u8 *)("2T"), 2)) + rate_section = 5; + + if (_rtl8812ae_eq_n_byte(pbandwidth, (u8 *)("20M"), 3)) + bandwidth = 0; + else if (_rtl8812ae_eq_n_byte(pbandwidth, (u8 *)("40M"), 3)) + bandwidth = 1; + else if (_rtl8812ae_eq_n_byte(pbandwidth, (u8 *)("80M"), 3)) + bandwidth = 2; + else if (_rtl8812ae_eq_n_byte(pbandwidth, (u8 *)("160M"), 4)) + bandwidth = 3; + + if (_rtl8812ae_eq_n_byte(pband, (u8 *)("2.4G"), 4)) { + ret = _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(hw, + BAND_ON_2_4G, + channel); + + if (ret == -1) + return; + + channel_index = ret; + + prev_power_limit = rtlphy->txpwr_limit_2_4g[regulation] + [bandwidth][rate_section] + [channel_index][RF90_PATH_A]; + + if (power_limit < prev_power_limit) + rtlphy->txpwr_limit_2_4g[regulation][bandwidth] + [rate_section][channel_index][RF90_PATH_A] = + power_limit; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "2.4G [regula %d][bw %d][sec %d][chnl %d][val %d]\n", + regulation, bandwidth, rate_section, channel_index, + rtlphy->txpwr_limit_2_4g[regulation][bandwidth] + [rate_section][channel_index][RF90_PATH_A]); + } else if (_rtl8812ae_eq_n_byte(pband, (u8 *)("5G"), 2)) { + ret = _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(hw, + BAND_ON_5G, + channel); + + if (ret == -1) + return; + + channel_index = ret; + + prev_power_limit = rtlphy->txpwr_limit_5g[regulation][bandwidth] + [rate_section][channel_index] + [RF90_PATH_A]; + + if (power_limit < prev_power_limit) + rtlphy->txpwr_limit_5g[regulation][bandwidth] + [rate_section][channel_index][RF90_PATH_A] = power_limit; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "5G: [regul %d][bw %d][sec %d][chnl %d][val %d]\n", + regulation, bandwidth, rate_section, channel, + rtlphy->txpwr_limit_5g[regulation][bandwidth] + [rate_section][channel_index][RF90_PATH_A]); + } else { + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "Cannot recognize the band info in %s\n", pband); + return; + } +} + +static void _rtl8812ae_phy_config_bb_txpwr_lmt(struct ieee80211_hw *hw, + u8 *regulation, u8 *band, + u8 *bandwidth, u8 *rate_section, + u8 *rf_path, u8 *channel, + u8 *power_limit) +{ + _rtl8812ae_phy_set_txpower_limit(hw, regulation, band, bandwidth, + rate_section, rf_path, channel, + power_limit); +} + +static void _rtl8821ae_phy_read_and_config_txpwr_lmt(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + u32 i = 0; + u32 array_len; + u8 **array; + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { + array_len = RTL8812AE_TXPWR_LMT_ARRAY_LEN; + array = RTL8812AE_TXPWR_LMT; + } else { + array_len = RTL8821AE_TXPWR_LMT_ARRAY_LEN; + array = RTL8821AE_TXPWR_LMT; + } + + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "\n"); + + for (i = 0; i < array_len; i += 7) { + u8 *regulation = array[i]; + u8 *band = array[i+1]; + u8 *bandwidth = array[i+2]; + u8 *rate = array[i+3]; + u8 *rf_path = array[i+4]; + u8 *chnl = array[i+5]; + u8 *val = array[i+6]; + + _rtl8812ae_phy_config_bb_txpwr_lmt(hw, regulation, band, + bandwidth, rate, rf_path, + chnl, val); + } +} + +static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + bool rtstatus; + + _rtl8821ae_phy_init_txpower_limit(hw); + + /* RegEnableTxPowerLimit == 1 for 8812a & 8821a */ + if (rtlefuse->eeprom_regulatory != 2) + _rtl8821ae_phy_read_and_config_txpwr_lmt(hw); + + rtstatus = _rtl8821ae_phy_config_bb_with_headerfile(hw, + BASEBAND_CONFIG_PHY_REG); + if (rtstatus != true) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!"); + return false; + } + _rtl8821ae_phy_init_tx_power_by_rate(hw); + if (rtlefuse->autoload_failflag == false) { + rtstatus = _rtl8821ae_phy_config_bb_with_pgheaderfile(hw, + BASEBAND_CONFIG_PHY_REG); + } + if (rtstatus != true) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!"); + return false; + } + + _rtl8821ae_phy_txpower_by_rate_configuration(hw); + + /* RegEnableTxPowerLimit == 1 for 8812a & 8821a */ + if (rtlefuse->eeprom_regulatory != 2) + _rtl8812ae_phy_convert_txpower_limit_to_power_index(hw); + + rtstatus = _rtl8821ae_phy_config_bb_with_headerfile(hw, + BASEBAND_CONFIG_AGC_TAB); + + if (rtstatus != true) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n"); + return false; + } + rtlphy->cck_high_power = (bool)(rtl_get_bbreg(hw, + RFPGA0_XA_HSSIPARAMETER2, 0x200)); + return true; +} + +static bool _rtl8821ae_phy_config_mac_with_headerfile(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + u32 i, v1, v2; + u32 arraylength; + u32 *ptrarray; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read MAC_REG_Array\n"); + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) { + arraylength = RTL8821AEMAC_1T_ARRAYLEN; + ptrarray = RTL8821AE_MAC_REG_ARRAY; + } else { + arraylength = RTL8812AEMAC_1T_ARRAYLEN; + ptrarray = RTL8812AE_MAC_REG_ARRAY; + } + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Img: MAC_REG_ARRAY LEN %d\n", arraylength); + for (i = 0; i < arraylength; i += 2) { + v1 = ptrarray[i]; + v2 = (u8)ptrarray[i + 1]; + if (v1 < 0xCDCDCDCD) { + rtl_write_byte(rtlpriv, v1, (u8)v2); + continue; + } else { + if (!_rtl8821ae_check_condition(hw, v1)) { + /*Discard the following (offset, data) pairs*/ + READ_NEXT_PAIR(ptrarray, v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && i < arraylength - 2) { + READ_NEXT_PAIR(ptrarray, v1, v2, i); + } + i -= 2; /* prevent from for-loop += 2*/ + } else {/*Configure matched pairs and skip to end of if-else.*/ + READ_NEXT_PAIR(ptrarray, v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && i < arraylength - 2) { + rtl_write_byte(rtlpriv, v1, v2); + READ_NEXT_PAIR(ptrarray, v1, v2, i); + } + + while (v2 != 0xDEAD && i < arraylength - 2) + READ_NEXT_PAIR(ptrarray, v1, v2, i); + } + } + } + return true; +} + +static bool _rtl8821ae_phy_config_bb_with_headerfile(struct ieee80211_hw *hw, + u8 configtype) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + int i; + u32 *array_table; + u16 arraylen; + u32 v1 = 0, v2 = 0; + + if (configtype == BASEBAND_CONFIG_PHY_REG) { + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { + arraylen = RTL8812AEPHY_REG_1TARRAYLEN; + array_table = RTL8812AE_PHY_REG_ARRAY; + } else { + arraylen = RTL8821AEPHY_REG_1TARRAYLEN; + array_table = RTL8821AE_PHY_REG_ARRAY; + } + + for (i = 0; i < arraylen; i += 2) { + v1 = array_table[i]; + v2 = array_table[i + 1]; + if (v1 < 0xCDCDCDCD) { + _rtl8821ae_config_bb_reg(hw, v1, v2); + continue; + } else {/*This line is the start line of branch.*/ + if (!_rtl8821ae_check_condition(hw, v1)) { + /*Discard the following (offset, data) pairs*/ + READ_NEXT_PAIR(array_table, v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && + i < arraylen - 2) { + READ_NEXT_PAIR(array_table, v1, + v2, i); + } + + i -= 2; /* prevent from for-loop += 2*/ + } else {/*Configure matched pairs and skip to end of if-else.*/ + READ_NEXT_PAIR(array_table, v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && + i < arraylen - 2) { + _rtl8821ae_config_bb_reg(hw, v1, + v2); + READ_NEXT_PAIR(array_table, v1, + v2, i); + } + + while (v2 != 0xDEAD && + i < arraylen - 2) { + READ_NEXT_PAIR(array_table, v1, + v2, i); + } + } + } + } + } else if (configtype == BASEBAND_CONFIG_AGC_TAB) { + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { + arraylen = RTL8812AEAGCTAB_1TARRAYLEN; + array_table = RTL8812AE_AGC_TAB_ARRAY; + } else { + arraylen = RTL8821AEAGCTAB_1TARRAYLEN; + array_table = RTL8821AE_AGC_TAB_ARRAY; + } + + for (i = 0; i < arraylen; i = i + 2) { + v1 = array_table[i]; + v2 = array_table[i+1]; + if (v1 < 0xCDCDCDCD) { + rtl_set_bbreg(hw, v1, MASKDWORD, v2); + udelay(1); + continue; + } else {/*This line is the start line of branch.*/ + if (!_rtl8821ae_check_condition(hw, v1)) { + /*Discard the following (offset, data) pairs*/ + READ_NEXT_PAIR(array_table, v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && + i < arraylen - 2) { + READ_NEXT_PAIR(array_table, v1, + v2, i); + } + i -= 2; /* prevent from for-loop += 2*/ + } else {/*Configure matched pairs and skip to end of if-else.*/ + READ_NEXT_PAIR(array_table, v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && + i < arraylen - 2) { + rtl_set_bbreg(hw, v1, MASKDWORD, + v2); + udelay(1); + READ_NEXT_PAIR(array_table, v1, + v2, i); + } + + while (v2 != 0xDEAD && + i < arraylen - 2) { + READ_NEXT_PAIR(array_table, v1, + v2, i); + } + } + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "The agctab_array_table[0] is %x Rtl818EEPHY_REGArray[1] is %x\n", + array_table[i], array_table[i + 1]); + } + } + } + return true; +} + +static u8 _rtl8821ae_get_rate_section_index(u32 regaddr) +{ + u8 index = 0; + regaddr &= 0xFFF; + if (regaddr >= 0xC20 && regaddr <= 0xC4C) + index = (u8)((regaddr - 0xC20) / 4); + else if (regaddr >= 0xE20 && regaddr <= 0xE4C) + index = (u8)((regaddr - 0xE20) / 4); + else + RT_ASSERT(!COMP_INIT, + "Invalid RegAddr 0x%x\n", regaddr); + return index; +} + +static void _rtl8821ae_store_tx_power_by_rate(struct ieee80211_hw *hw, + u32 band, u32 rfpath, + u32 txnum, u32 regaddr, + u32 bitmask, u32 data) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u8 rate_section = _rtl8821ae_get_rate_section_index(regaddr); + + if (band != BAND_ON_2_4G && band != BAND_ON_5G) + RT_TRACE(rtlpriv, COMP_INIT, DBG_WARNING, "Invalid Band %d\n", band); + + if (rfpath >= MAX_RF_PATH) + RT_TRACE(rtlpriv, COMP_INIT, DBG_WARNING, "Invalid RfPath %d\n", rfpath); + + if (txnum >= MAX_RF_PATH) + RT_TRACE(rtlpriv, COMP_INIT, DBG_WARNING, "Invalid TxNum %d\n", txnum); + + rtlphy->tx_power_by_rate_offset[band][rfpath][txnum][rate_section] = data; + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "TxPwrByRateOffset[Band %d][RfPath %d][TxNum %d][RateSection %d] = 0x%x\n", + band, rfpath, txnum, rate_section, + rtlphy->tx_power_by_rate_offset[band][rfpath][txnum][rate_section]); +} + +static bool _rtl8821ae_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw, + u8 configtype) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + int i; + u32 *array; + u16 arraylen; + u32 v1, v2, v3, v4, v5, v6; + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { + arraylen = RTL8812AEPHY_REG_ARRAY_PGLEN; + array = RTL8812AE_PHY_REG_ARRAY_PG; + } else { + arraylen = RTL8821AEPHY_REG_ARRAY_PGLEN; + array = RTL8821AE_PHY_REG_ARRAY_PG; + } + + if (configtype != BASEBAND_CONFIG_PHY_REG) { + RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, + "configtype != BaseBand_Config_PHY_REG\n"); + return true; + } + for (i = 0; i < arraylen; i += 6) { + v1 = array[i]; + v2 = array[i+1]; + v3 = array[i+2]; + v4 = array[i+3]; + v5 = array[i+4]; + v6 = array[i+5]; + + if (v1 < 0xCDCDCDCD) { + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE && + (v4 == 0xfe || v4 == 0xffe)) { + msleep(50); + continue; + } + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) { + if (v4 == 0xfe) + msleep(50); + else if (v4 == 0xfd) + mdelay(5); + else if (v4 == 0xfc) + mdelay(1); + else if (v4 == 0xfb) + udelay(50); + else if (v4 == 0xfa) + udelay(5); + else if (v4 == 0xf9) + udelay(1); + } + _rtl8821ae_store_tx_power_by_rate(hw, v1, v2, v3, + v4, v5, v6); + continue; + } else { + /*don't need the hw_body*/ + if (!_rtl8821ae_check_condition(hw, v1)) { + i += 2; /* skip the pair of expression*/ + v1 = array[i]; + v2 = array[i+1]; + v3 = array[i+2]; + while (v2 != 0xDEAD) { + i += 3; + v1 = array[i]; + v2 = array[i+1]; + v3 = array[i+2]; + } + } + } + } + + return true; +} + +bool rtl8812ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, + enum radio_path rfpath) +{ + int i; + bool rtstatus = true; + u32 *radioa_array_table_a, *radioa_array_table_b; + u16 radioa_arraylen_a, radioa_arraylen_b; + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 v1 = 0, v2 = 0; + + radioa_arraylen_a = RTL8812AE_RADIOA_1TARRAYLEN; + radioa_array_table_a = RTL8812AE_RADIOA_ARRAY; + radioa_arraylen_b = RTL8812AE_RADIOB_1TARRAYLEN; + radioa_array_table_b = RTL8812AE_RADIOB_ARRAY; + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Radio_A:RTL8821AE_RADIOA_ARRAY %d\n", radioa_arraylen_a); + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath); + rtstatus = true; + switch (rfpath) { + case RF90_PATH_A: + for (i = 0; i < radioa_arraylen_a; i = i + 2) { + v1 = radioa_array_table_a[i]; + v2 = radioa_array_table_a[i+1]; + if (v1 < 0xcdcdcdcd) { + _rtl8821ae_config_rf_radio_a(hw, v1, v2); + continue; + } else{/*This line is the start line of branch.*/ + if (!_rtl8821ae_check_condition(hw, v1)) { + /*Discard the following (offset, data) pairs*/ + READ_NEXT_PAIR(radioa_array_table_a, v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && i < radioa_arraylen_a-2) + READ_NEXT_PAIR(radioa_array_table_a, v1, v2, i); + + i -= 2; /* prevent from for-loop += 2*/ + } else {/*Configure matched pairs and skip to end of if-else.*/ + READ_NEXT_PAIR(radioa_array_table_a, v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && i < radioa_arraylen_a - 2) { + _rtl8821ae_config_rf_radio_a(hw, v1, v2); + READ_NEXT_PAIR(radioa_array_table_a, v1, v2, i); + } + + while (v2 != 0xDEAD && i < radioa_arraylen_a-2) + READ_NEXT_PAIR(radioa_array_table_a, v1, v2, i); + + } + } + } + break; + case RF90_PATH_B: + for (i = 0; i < radioa_arraylen_b; i = i + 2) { + v1 = radioa_array_table_b[i]; + v2 = radioa_array_table_b[i+1]; + if (v1 < 0xcdcdcdcd) { + _rtl8821ae_config_rf_radio_b(hw, v1, v2); + continue; + } else{/*This line is the start line of branch.*/ + if (!_rtl8821ae_check_condition(hw, v1)) { + /*Discard the following (offset, data) pairs*/ + READ_NEXT_PAIR(radioa_array_table_b, v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && i < radioa_arraylen_b-2) + READ_NEXT_PAIR(radioa_array_table_b, v1, v2, i); + + i -= 2; /* prevent from for-loop += 2*/ + } else {/*Configure matched pairs and skip to end of if-else.*/ + READ_NEXT_PAIR(radioa_array_table_b, v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && i < radioa_arraylen_b-2) { + _rtl8821ae_config_rf_radio_b(hw, v1, v2); + READ_NEXT_PAIR(radioa_array_table_b, v1, v2, i); + } + + while (v2 != 0xDEAD && i < radioa_arraylen_b-2) + READ_NEXT_PAIR(radioa_array_table_b, v1, v2, i); + } + } + } + break; + case RF90_PATH_C: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "switch case not process\n"); + break; + case RF90_PATH_D: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "switch case not process\n"); + break; + } + return true; +} + +bool rtl8821ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, + enum radio_path rfpath) +{ + #define READ_NEXT_RF_PAIR(v1, v2, i) \ + do { \ + i += 2; \ + v1 = radioa_array_table[i]; \ + v2 = radioa_array_table[i+1]; \ + } \ + while (0) + + int i; + bool rtstatus = true; + u32 *radioa_array_table; + u16 radioa_arraylen; + struct rtl_priv *rtlpriv = rtl_priv(hw); + /* struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); */ + u32 v1 = 0, v2 = 0; + + radioa_arraylen = RTL8821AE_RADIOA_1TARRAYLEN; + radioa_array_table = RTL8821AE_RADIOA_ARRAY; + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Radio_A:RTL8821AE_RADIOA_ARRAY %d\n", radioa_arraylen); + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath); + rtstatus = true; + switch (rfpath) { + case RF90_PATH_A: + for (i = 0; i < radioa_arraylen; i = i + 2) { + v1 = radioa_array_table[i]; + v2 = radioa_array_table[i+1]; + if (v1 < 0xcdcdcdcd) + _rtl8821ae_config_rf_radio_a(hw, v1, v2); + else{/*This line is the start line of branch.*/ + if (!_rtl8821ae_check_condition(hw, v1)) { + /*Discard the following (offset, data) pairs*/ + READ_NEXT_RF_PAIR(v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && i < radioa_arraylen - 2) + READ_NEXT_RF_PAIR(v1, v2, i); + + i -= 2; /* prevent from for-loop += 2*/ + } else {/*Configure matched pairs and skip to end of if-else.*/ + READ_NEXT_RF_PAIR(v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && i < radioa_arraylen - 2) { + _rtl8821ae_config_rf_radio_a(hw, v1, v2); + READ_NEXT_RF_PAIR(v1, v2, i); + } + + while (v2 != 0xDEAD && i < radioa_arraylen - 2) + READ_NEXT_RF_PAIR(v1, v2, i); + } + } + } + break; + + case RF90_PATH_B: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "switch case not process\n"); + break; + case RF90_PATH_C: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "switch case not process\n"); + break; + case RF90_PATH_D: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "switch case not process\n"); + break; + } + return true; +} + +void rtl8821ae_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + + rtlphy->default_initialgain[0] = + (u8)rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0); + rtlphy->default_initialgain[1] = + (u8)rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0); + rtlphy->default_initialgain[2] = + (u8)rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0); + rtlphy->default_initialgain[3] = + (u8)rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n", + rtlphy->default_initialgain[0], + rtlphy->default_initialgain[1], + rtlphy->default_initialgain[2], + rtlphy->default_initialgain[3]); + + rtlphy->framesync = (u8)rtl_get_bbreg(hw, + ROFDM0_RXDETECTOR3, MASKBYTE0); + rtlphy->framesync_c34 = rtl_get_bbreg(hw, + ROFDM0_RXDETECTOR2, MASKDWORD); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "Default framesync (0x%x) = 0x%x\n", + ROFDM0_RXDETECTOR3, rtlphy->framesync); +} + +static void phy_init_bb_rf_register_definition(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + + rtlphy->phyreg_def[RF90_PATH_A].rfintfs = RFPGA0_XAB_RFINTERFACESW; + rtlphy->phyreg_def[RF90_PATH_B].rfintfs = RFPGA0_XAB_RFINTERFACESW; + + rtlphy->phyreg_def[RF90_PATH_A].rfintfo = RFPGA0_XA_RFINTERFACEOE; + rtlphy->phyreg_def[RF90_PATH_B].rfintfo = RFPGA0_XB_RFINTERFACEOE; + + rtlphy->phyreg_def[RF90_PATH_A].rfintfe = RFPGA0_XA_RFINTERFACEOE; + rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE; + + rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset = RA_LSSIWRITE_8821A; + rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset = RB_LSSIWRITE_8821A; + + rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RHSSIREAD_8821AE; + rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RHSSIREAD_8821AE; + + rtlphy->phyreg_def[RF90_PATH_A].rf_rb = RA_SIREAD_8821A; + rtlphy->phyreg_def[RF90_PATH_B].rf_rb = RB_SIREAD_8821A; + + rtlphy->phyreg_def[RF90_PATH_A].rf_rbpi = RA_PIREAD_8821A; + rtlphy->phyreg_def[RF90_PATH_B].rf_rbpi = RB_PIREAD_8821A; +} + +void rtl8821ae_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u8 txpwr_level; + long txpwr_dbm; + + txpwr_level = rtlphy->cur_cck_txpwridx; + txpwr_dbm = _rtl8821ae_phy_txpwr_idx_to_dbm(hw, + WIRELESS_MODE_B, txpwr_level); + txpwr_level = rtlphy->cur_ofdm24g_txpwridx; + if (_rtl8821ae_phy_txpwr_idx_to_dbm(hw, + WIRELESS_MODE_G, + txpwr_level) > txpwr_dbm) + txpwr_dbm = + _rtl8821ae_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G, + txpwr_level); + txpwr_level = rtlphy->cur_ofdm24g_txpwridx; + if (_rtl8821ae_phy_txpwr_idx_to_dbm(hw, + WIRELESS_MODE_N_24G, + txpwr_level) > txpwr_dbm) + txpwr_dbm = + _rtl8821ae_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G, + txpwr_level); + *powerlevel = txpwr_dbm; +} + +static bool _rtl8821ae_phy_get_chnl_index(u8 channel, u8 *chnl_index) +{ + u8 channel_5g[CHANNEL_MAX_NUMBER_5G] = { + 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, + 64, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, + 120, 122, 124, 126, 128, 130, 132, 134, 136, 138, 140, + 142, 144, 149, 151, 153, 155, 157, 159, 161, 163, 165, + 167, 168, 169, 171, 173, 175, 177 + }; + u8 i = 0; + bool in_24g = true; + + if (channel <= 14) { + in_24g = true; + *chnl_index = channel - 1; + } else { + in_24g = false; + + for (i = 0; i < CHANNEL_MAX_NUMBER_5G; ++i) { + if (channel_5g[i] == channel) { + *chnl_index = i; + return in_24g; + } + } + } + return in_24g; +} + +static char _rtl8821ae_phy_get_ratesection_intxpower_byrate(u8 path, u8 rate) +{ + char rate_section = 0; + switch (rate) { + case DESC_RATE1M: + case DESC_RATE2M: + case DESC_RATE5_5M: + case DESC_RATE11M: + rate_section = 0; + break; + case DESC_RATE6M: + case DESC_RATE9M: + case DESC_RATE12M: + case DESC_RATE18M: + rate_section = 1; + break; + case DESC_RATE24M: + case DESC_RATE36M: + case DESC_RATE48M: + case DESC_RATE54M: + rate_section = 2; + break; + case DESC_RATEMCS0: + case DESC_RATEMCS1: + case DESC_RATEMCS2: + case DESC_RATEMCS3: + rate_section = 3; + break; + case DESC_RATEMCS4: + case DESC_RATEMCS5: + case DESC_RATEMCS6: + case DESC_RATEMCS7: + rate_section = 4; + break; + case DESC_RATEMCS8: + case DESC_RATEMCS9: + case DESC_RATEMCS10: + case DESC_RATEMCS11: + rate_section = 5; + break; + case DESC_RATEMCS12: + case DESC_RATEMCS13: + case DESC_RATEMCS14: + case DESC_RATEMCS15: + rate_section = 6; + break; + case DESC_RATEVHT1SS_MCS0: + case DESC_RATEVHT1SS_MCS1: + case DESC_RATEVHT1SS_MCS2: + case DESC_RATEVHT1SS_MCS3: + rate_section = 7; + break; + case DESC_RATEVHT1SS_MCS4: + case DESC_RATEVHT1SS_MCS5: + case DESC_RATEVHT1SS_MCS6: + case DESC_RATEVHT1SS_MCS7: + rate_section = 8; + break; + case DESC_RATEVHT1SS_MCS8: + case DESC_RATEVHT1SS_MCS9: + case DESC_RATEVHT2SS_MCS0: + case DESC_RATEVHT2SS_MCS1: + rate_section = 9; + break; + case DESC_RATEVHT2SS_MCS2: + case DESC_RATEVHT2SS_MCS3: + case DESC_RATEVHT2SS_MCS4: + case DESC_RATEVHT2SS_MCS5: + rate_section = 10; + break; + case DESC_RATEVHT2SS_MCS6: + case DESC_RATEVHT2SS_MCS7: + case DESC_RATEVHT2SS_MCS8: + case DESC_RATEVHT2SS_MCS9: + rate_section = 11; + break; + default: + RT_ASSERT(true, "Rate_Section is Illegal\n"); + break; + } + + return rate_section; +} + +static char _rtl8812ae_phy_get_world_wide_limit(char *limit_table) +{ + char min = limit_table[0]; + u8 i = 0; + + for (i = 0; i < MAX_REGULATION_NUM; ++i) { + if (limit_table[i] < min) + min = limit_table[i]; + } + return min; +} + +static char _rtl8812ae_phy_get_txpower_limit(struct ieee80211_hw *hw, + u8 band, + enum ht_channel_width bandwidth, + enum radio_path rf_path, + u8 rate, u8 channel) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_efuse *rtlefuse = rtl_efuse(rtlpriv); + struct rtl_phy *rtlphy = &rtlpriv->phy; + short band_temp = -1, regulation = -1, bandwidth_temp = -1, + rate_section = -1, channel_temp = -1; + u16 bd, regu, bdwidth, sec, chnl; + char power_limit = MAX_POWER_INDEX; + + if (rtlefuse->eeprom_regulatory == 2) + return MAX_POWER_INDEX; + + regulation = TXPWR_LMT_WW; + + if (band == BAND_ON_2_4G) + band_temp = 0; + else if (band == BAND_ON_5G) + band_temp = 1; + + if (bandwidth == HT_CHANNEL_WIDTH_20) + bandwidth_temp = 0; + else if (bandwidth == HT_CHANNEL_WIDTH_20_40) + bandwidth_temp = 1; + else if (bandwidth == HT_CHANNEL_WIDTH_80) + bandwidth_temp = 2; + + switch (rate) { + case DESC_RATE1M: + case DESC_RATE2M: + case DESC_RATE5_5M: + case DESC_RATE11M: + rate_section = 0; + break; + case DESC_RATE6M: + case DESC_RATE9M: + case DESC_RATE12M: + case DESC_RATE18M: + case DESC_RATE24M: + case DESC_RATE36M: + case DESC_RATE48M: + case DESC_RATE54M: + rate_section = 1; + break; + case DESC_RATEMCS0: + case DESC_RATEMCS1: + case DESC_RATEMCS2: + case DESC_RATEMCS3: + case DESC_RATEMCS4: + case DESC_RATEMCS5: + case DESC_RATEMCS6: + case DESC_RATEMCS7: + rate_section = 2; + break; + case DESC_RATEMCS8: + case DESC_RATEMCS9: + case DESC_RATEMCS10: + case DESC_RATEMCS11: + case DESC_RATEMCS12: + case DESC_RATEMCS13: + case DESC_RATEMCS14: + case DESC_RATEMCS15: + rate_section = 3; + break; + case DESC_RATEVHT1SS_MCS0: + case DESC_RATEVHT1SS_MCS1: + case DESC_RATEVHT1SS_MCS2: + case DESC_RATEVHT1SS_MCS3: + case DESC_RATEVHT1SS_MCS4: + case DESC_RATEVHT1SS_MCS5: + case DESC_RATEVHT1SS_MCS6: + case DESC_RATEVHT1SS_MCS7: + case DESC_RATEVHT1SS_MCS8: + case DESC_RATEVHT1SS_MCS9: + rate_section = 4; + break; + case DESC_RATEVHT2SS_MCS0: + case DESC_RATEVHT2SS_MCS1: + case DESC_RATEVHT2SS_MCS2: + case DESC_RATEVHT2SS_MCS3: + case DESC_RATEVHT2SS_MCS4: + case DESC_RATEVHT2SS_MCS5: + case DESC_RATEVHT2SS_MCS6: + case DESC_RATEVHT2SS_MCS7: + case DESC_RATEVHT2SS_MCS8: + case DESC_RATEVHT2SS_MCS9: + rate_section = 5; + break; + default: + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, + "Wrong rate 0x%x\n", rate); + break; + } + + if (band_temp == BAND_ON_5G && rate_section == 0) + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, + "Wrong rate 0x%x: No CCK in 5G Band\n", rate); + + /*workaround for wrong index combination to obtain tx power limit, + OFDM only exists in BW 20M*/ + if (rate_section == 1) + bandwidth_temp = 0; + + /*workaround for wrong index combination to obtain tx power limit, + *HT on 80M will reference to HT on 40M + */ + if ((rate_section == 2 || rate_section == 3) && band == BAND_ON_5G && + bandwidth_temp == 2) + bandwidth_temp = 1; + + if (band == BAND_ON_2_4G) + channel_temp = _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(hw, + BAND_ON_2_4G, channel); + else if (band == BAND_ON_5G) + channel_temp = _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(hw, + BAND_ON_5G, channel); + else if (band == BAND_ON_BOTH) + ;/* BAND_ON_BOTH don't care temporarily */ + + if (band_temp == -1 || regulation == -1 || bandwidth_temp == -1 || + rate_section == -1 || channel_temp == -1) { + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, + "Wrong index value to access power limit table [band %d][regulation %d][bandwidth %d][rf_path %d][rate_section %d][chnl %d]\n", + band_temp, regulation, bandwidth_temp, rf_path, + rate_section, channel_temp); + return MAX_POWER_INDEX; + } + + bd = band_temp; + regu = regulation; + bdwidth = bandwidth_temp; + sec = rate_section; + chnl = channel_temp; + + if (band == BAND_ON_2_4G) { + char limits[10] = {0}; + u8 i; + + for (i = 0; i < 4; ++i) + limits[i] = rtlphy->txpwr_limit_2_4g[i][bdwidth] + [sec][chnl][rf_path]; + + power_limit = (regulation == TXPWR_LMT_WW) ? + _rtl8812ae_phy_get_world_wide_limit(limits) : + rtlphy->txpwr_limit_2_4g[regu][bdwidth] + [sec][chnl][rf_path]; + } else if (band == BAND_ON_5G) { + char limits[10] = {0}; + u8 i; + + for (i = 0; i < MAX_REGULATION_NUM; ++i) + limits[i] = rtlphy->txpwr_limit_5g[i][bdwidth] + [sec][chnl][rf_path]; + + power_limit = (regulation == TXPWR_LMT_WW) ? + _rtl8812ae_phy_get_world_wide_limit(limits) : + rtlphy->txpwr_limit_5g[regu][chnl] + [sec][chnl][rf_path]; + } else { + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "No power limit table of the specified band\n"); + } + return power_limit; +} + +static char _rtl8821ae_phy_get_txpower_by_rate(struct ieee80211_hw *hw, + u8 band, u8 path, u8 rate) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u8 shift = 0, rate_section, tx_num; + char tx_pwr_diff = 0; + char limit = 0; + + rate_section = _rtl8821ae_phy_get_ratesection_intxpower_byrate(path, rate); + tx_num = RF_TX_NUM_NONIMPLEMENT; + + if (tx_num == RF_TX_NUM_NONIMPLEMENT) { + if ((rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) || + (rate >= DESC_RATEVHT2SS_MCS2 && rate <= DESC_RATEVHT2SS_MCS9)) + tx_num = RF_2TX; + else + tx_num = RF_1TX; + } + + switch (rate) { + case DESC_RATE1M: + case DESC_RATE6M: + case DESC_RATE24M: + case DESC_RATEMCS0: + case DESC_RATEMCS4: + case DESC_RATEMCS8: + case DESC_RATEMCS12: + case DESC_RATEVHT1SS_MCS0: + case DESC_RATEVHT1SS_MCS4: + case DESC_RATEVHT1SS_MCS8: + case DESC_RATEVHT2SS_MCS2: + case DESC_RATEVHT2SS_MCS6: + shift = 0; + break; + case DESC_RATE2M: + case DESC_RATE9M: + case DESC_RATE36M: + case DESC_RATEMCS1: + case DESC_RATEMCS5: + case DESC_RATEMCS9: + case DESC_RATEMCS13: + case DESC_RATEVHT1SS_MCS1: + case DESC_RATEVHT1SS_MCS5: + case DESC_RATEVHT1SS_MCS9: + case DESC_RATEVHT2SS_MCS3: + case DESC_RATEVHT2SS_MCS7: + shift = 8; + break; + case DESC_RATE5_5M: + case DESC_RATE12M: + case DESC_RATE48M: + case DESC_RATEMCS2: + case DESC_RATEMCS6: + case DESC_RATEMCS10: + case DESC_RATEMCS14: + case DESC_RATEVHT1SS_MCS2: + case DESC_RATEVHT1SS_MCS6: + case DESC_RATEVHT2SS_MCS0: + case DESC_RATEVHT2SS_MCS4: + case DESC_RATEVHT2SS_MCS8: + shift = 16; + break; + case DESC_RATE11M: + case DESC_RATE18M: + case DESC_RATE54M: + case DESC_RATEMCS3: + case DESC_RATEMCS7: + case DESC_RATEMCS11: + case DESC_RATEMCS15: + case DESC_RATEVHT1SS_MCS3: + case DESC_RATEVHT1SS_MCS7: + case DESC_RATEVHT2SS_MCS1: + case DESC_RATEVHT2SS_MCS5: + case DESC_RATEVHT2SS_MCS9: + shift = 24; + break; + default: + RT_ASSERT(true, "Rate_Section is Illegal\n"); + break; + } + + tx_pwr_diff = (u8)(rtlphy->tx_power_by_rate_offset[band][path] + [tx_num][rate_section] >> shift) & 0xff; + + /* RegEnableTxPowerLimit == 1 for 8812a & 8821a */ + if (rtlpriv->efuse.eeprom_regulatory != 2) { + limit = _rtl8812ae_phy_get_txpower_limit(hw, band, + rtlphy->current_chan_bw, path, rate, + rtlphy->current_channel); + + if (rate == DESC_RATEVHT1SS_MCS8 || rate == DESC_RATEVHT1SS_MCS9 || + rate == DESC_RATEVHT2SS_MCS8 || rate == DESC_RATEVHT2SS_MCS9) { + if (limit < 0) { + if (tx_pwr_diff < (-limit)) + tx_pwr_diff = -limit; + } + } else { + if (limit < 0) + tx_pwr_diff = limit; + else + tx_pwr_diff = tx_pwr_diff > limit ? limit : tx_pwr_diff; + } + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "Maximum power by rate %d, final power by rate %d\n", + limit, tx_pwr_diff); + } + + return tx_pwr_diff; +} + +static u8 _rtl8821ae_get_txpower_index(struct ieee80211_hw *hw, u8 path, + u8 rate, u8 bandwidth, u8 channel) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + u8 index = (channel - 1); + u8 txpower = 0; + bool in_24g = false; + char powerdiff_byrate = 0; + + if (((rtlhal->current_bandtype == BAND_ON_2_4G) && + (channel > 14 || channel < 1)) || + ((rtlhal->current_bandtype == BAND_ON_5G) && (channel <= 14))) { + index = 0; + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "Illegal channel!!\n"); + } + + in_24g = _rtl8821ae_phy_get_chnl_index(channel, &index); + if (in_24g) { + if (RTL8821AE_RX_HAL_IS_CCK_RATE(rate)) + txpower = rtlefuse->txpwrlevel_cck[path][index]; + else if (DESC_RATE6M <= rate) + txpower = rtlefuse->txpwrlevel_ht40_1s[path][index]; + else + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "invalid rate\n"); + + if (DESC_RATE6M <= rate && rate <= DESC_RATE54M && + !RTL8821AE_RX_HAL_IS_CCK_RATE(rate)) + txpower += rtlefuse->txpwr_legacyhtdiff[path][TX_1S]; + + if (bandwidth == HT_CHANNEL_WIDTH_20) { + if ((DESC_RATEMCS0 <= rate && rate <= DESC_RATEMCS15) || + (DESC_RATEVHT1SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9)) + txpower += rtlefuse->txpwr_ht20diff[path][TX_1S]; + if ((DESC_RATEMCS8 <= rate && rate <= DESC_RATEMCS15) || + (DESC_RATEVHT2SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9)) + txpower += rtlefuse->txpwr_ht20diff[path][TX_2S]; + } else if (bandwidth == HT_CHANNEL_WIDTH_20_40) { + if ((DESC_RATEMCS0 <= rate && rate <= DESC_RATEMCS15) || + (DESC_RATEVHT1SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9)) + txpower += rtlefuse->txpwr_ht40diff[path][TX_1S]; + if ((DESC_RATEMCS8 <= rate && rate <= DESC_RATEMCS15) || + (DESC_RATEVHT2SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9)) + txpower += rtlefuse->txpwr_ht40diff[path][TX_2S]; + } else if (bandwidth == HT_CHANNEL_WIDTH_80) { + if ((DESC_RATEMCS0 <= rate && rate <= DESC_RATEMCS15) || + (DESC_RATEVHT1SS_MCS0 <= rate && + rate <= DESC_RATEVHT2SS_MCS9)) + txpower += rtlefuse->txpwr_ht40diff[path][TX_1S]; + if ((DESC_RATEMCS8 <= rate && rate <= DESC_RATEMCS15) || + (DESC_RATEVHT2SS_MCS0 <= rate && + rate <= DESC_RATEVHT2SS_MCS9)) + txpower += rtlefuse->txpwr_ht40diff[path][TX_2S]; + } + } else { + if (DESC_RATE6M <= rate) + txpower = rtlefuse->txpwr_5g_bw40base[path][index]; + else + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_WARNING, + "INVALID Rate.\n"); + + if (DESC_RATE6M <= rate && rate <= DESC_RATE54M && + !RTL8821AE_RX_HAL_IS_CCK_RATE(rate)) + txpower += rtlefuse->txpwr_5g_ofdmdiff[path][TX_1S]; + + if (bandwidth == HT_CHANNEL_WIDTH_20) { + if ((DESC_RATEMCS0 <= rate && rate <= DESC_RATEMCS15) || + (DESC_RATEVHT1SS_MCS0 <= rate && + rate <= DESC_RATEVHT2SS_MCS9)) + txpower += rtlefuse->txpwr_5g_bw20diff[path][TX_1S]; + if ((DESC_RATEMCS8 <= rate && rate <= DESC_RATEMCS15) || + (DESC_RATEVHT2SS_MCS0 <= rate && + rate <= DESC_RATEVHT2SS_MCS9)) + txpower += rtlefuse->txpwr_5g_bw20diff[path][TX_2S]; + } else if (bandwidth == HT_CHANNEL_WIDTH_20_40) { + if ((DESC_RATEMCS0 <= rate && rate <= DESC_RATEMCS15) || + (DESC_RATEVHT1SS_MCS0 <= rate && + rate <= DESC_RATEVHT2SS_MCS9)) + txpower += rtlefuse->txpwr_5g_bw40diff[path][TX_1S]; + if ((DESC_RATEMCS8 <= rate && rate <= DESC_RATEMCS15) || + (DESC_RATEVHT2SS_MCS0 <= rate && + rate <= DESC_RATEVHT2SS_MCS9)) + txpower += rtlefuse->txpwr_5g_bw40diff[path][TX_2S]; + } else if (bandwidth == HT_CHANNEL_WIDTH_80) { + u8 channel_5g_80m[CHANNEL_MAX_NUMBER_5G_80M] = { + 42, 58, 106, 122, 138, 155, 171 + }; + u8 i; + + for (i = 0; i < sizeof(channel_5g_80m) / sizeof(u8); ++i) + if (channel_5g_80m[i] == channel) + index = i; + + if ((DESC_RATEMCS0 <= rate && rate <= DESC_RATEMCS15) || + (DESC_RATEVHT1SS_MCS0 <= rate && + rate <= DESC_RATEVHT2SS_MCS9)) + txpower = rtlefuse->txpwr_5g_bw80base[path][index] + + rtlefuse->txpwr_5g_bw80diff[path][TX_1S]; + if ((DESC_RATEMCS8 <= rate && rate <= DESC_RATEMCS15) || + (DESC_RATEVHT2SS_MCS0 <= rate && + rate <= DESC_RATEVHT2SS_MCS9)) + txpower = rtlefuse->txpwr_5g_bw80base[path][index] + + rtlefuse->txpwr_5g_bw80diff[path][TX_1S] + + rtlefuse->txpwr_5g_bw80diff[path][TX_2S]; + } + } + if (rtlefuse->eeprom_regulatory != 2) + powerdiff_byrate = + _rtl8821ae_phy_get_txpower_by_rate(hw, (u8)(!in_24g), + path, rate); + + if (rate == DESC_RATEVHT1SS_MCS8 || rate == DESC_RATEVHT1SS_MCS9 || + rate == DESC_RATEVHT2SS_MCS8 || rate == DESC_RATEVHT2SS_MCS9) + txpower -= powerdiff_byrate; + else + txpower += powerdiff_byrate; + + if (rate > DESC_RATE11M) + txpower += rtlpriv->dm.remnant_ofdm_swing_idx[path]; + else + txpower += rtlpriv->dm.remnant_cck_idx; + + if (txpower > MAX_POWER_INDEX) + txpower = MAX_POWER_INDEX; + + return txpower; +} + +static void _rtl8821ae_phy_set_txpower_index(struct ieee80211_hw *hw, + u8 power_index, u8 path, u8 rate) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (path == RF90_PATH_A) { + switch (rate) { + case DESC_RATE1M: + rtl_set_bbreg(hw, RTXAGC_A_CCK11_CCK1, + MASKBYTE0, power_index); + break; + case DESC_RATE2M: + rtl_set_bbreg(hw, RTXAGC_A_CCK11_CCK1, + MASKBYTE1, power_index); + break; + case DESC_RATE5_5M: + rtl_set_bbreg(hw, RTXAGC_A_CCK11_CCK1, + MASKBYTE2, power_index); + break; + case DESC_RATE11M: + rtl_set_bbreg(hw, RTXAGC_A_CCK11_CCK1, + MASKBYTE3, power_index); + break; + case DESC_RATE6M: + rtl_set_bbreg(hw, RTXAGC_A_OFDM18_OFDM6, + MASKBYTE0, power_index); + break; + case DESC_RATE9M: + rtl_set_bbreg(hw, RTXAGC_A_OFDM18_OFDM6, + MASKBYTE1, power_index); + break; + case DESC_RATE12M: + rtl_set_bbreg(hw, RTXAGC_A_OFDM18_OFDM6, + MASKBYTE2, power_index); + break; + case DESC_RATE18M: + rtl_set_bbreg(hw, RTXAGC_A_OFDM18_OFDM6, + MASKBYTE3, power_index); + break; + case DESC_RATE24M: + rtl_set_bbreg(hw, RTXAGC_A_OFDM54_OFDM24, + MASKBYTE0, power_index); + break; + case DESC_RATE36M: + rtl_set_bbreg(hw, RTXAGC_A_OFDM54_OFDM24, + MASKBYTE1, power_index); + break; + case DESC_RATE48M: + rtl_set_bbreg(hw, RTXAGC_A_OFDM54_OFDM24, + MASKBYTE2, power_index); + break; + case DESC_RATE54M: + rtl_set_bbreg(hw, RTXAGC_A_OFDM54_OFDM24, + MASKBYTE3, power_index); + break; + case DESC_RATEMCS0: + rtl_set_bbreg(hw, RTXAGC_A_MCS03_MCS00, + MASKBYTE0, power_index); + break; + case DESC_RATEMCS1: + rtl_set_bbreg(hw, RTXAGC_A_MCS03_MCS00, + MASKBYTE1, power_index); + break; + case DESC_RATEMCS2: + rtl_set_bbreg(hw, RTXAGC_A_MCS03_MCS00, + MASKBYTE2, power_index); + break; + case DESC_RATEMCS3: + rtl_set_bbreg(hw, RTXAGC_A_MCS03_MCS00, + MASKBYTE3, power_index); + break; + case DESC_RATEMCS4: + rtl_set_bbreg(hw, RTXAGC_A_MCS07_MCS04, + MASKBYTE0, power_index); + break; + case DESC_RATEMCS5: + rtl_set_bbreg(hw, RTXAGC_A_MCS07_MCS04, + MASKBYTE1, power_index); + break; + case DESC_RATEMCS6: + rtl_set_bbreg(hw, RTXAGC_A_MCS07_MCS04, + MASKBYTE2, power_index); + break; + case DESC_RATEMCS7: + rtl_set_bbreg(hw, RTXAGC_A_MCS07_MCS04, + MASKBYTE3, power_index); + break; + case DESC_RATEMCS8: + rtl_set_bbreg(hw, RTXAGC_A_MCS11_MCS08, + MASKBYTE0, power_index); + break; + case DESC_RATEMCS9: + rtl_set_bbreg(hw, RTXAGC_A_MCS11_MCS08, + MASKBYTE1, power_index); + break; + case DESC_RATEMCS10: + rtl_set_bbreg(hw, RTXAGC_A_MCS11_MCS08, + MASKBYTE2, power_index); + break; + case DESC_RATEMCS11: + rtl_set_bbreg(hw, RTXAGC_A_MCS11_MCS08, + MASKBYTE3, power_index); + break; + case DESC_RATEMCS12: + rtl_set_bbreg(hw, RTXAGC_A_MCS15_MCS12, + MASKBYTE0, power_index); + break; + case DESC_RATEMCS13: + rtl_set_bbreg(hw, RTXAGC_A_MCS15_MCS12, + MASKBYTE1, power_index); + break; + case DESC_RATEMCS14: + rtl_set_bbreg(hw, RTXAGC_A_MCS15_MCS12, + MASKBYTE2, power_index); + break; + case DESC_RATEMCS15: + rtl_set_bbreg(hw, RTXAGC_A_MCS15_MCS12, + MASKBYTE3, power_index); + break; + case DESC_RATEVHT1SS_MCS0: + rtl_set_bbreg(hw, RTXAGC_A_NSS1INDEX3_NSS1INDEX0, + MASKBYTE0, power_index); + break; + case DESC_RATEVHT1SS_MCS1: + rtl_set_bbreg(hw, RTXAGC_A_NSS1INDEX3_NSS1INDEX0, + MASKBYTE1, power_index); + break; + case DESC_RATEVHT1SS_MCS2: + rtl_set_bbreg(hw, RTXAGC_A_NSS1INDEX3_NSS1INDEX0, + MASKBYTE2, power_index); + break; + case DESC_RATEVHT1SS_MCS3: + rtl_set_bbreg(hw, RTXAGC_A_NSS1INDEX3_NSS1INDEX0, + MASKBYTE3, power_index); + break; + case DESC_RATEVHT1SS_MCS4: + rtl_set_bbreg(hw, RTXAGC_A_NSS1INDEX7_NSS1INDEX4, + MASKBYTE0, power_index); + break; + case DESC_RATEVHT1SS_MCS5: + rtl_set_bbreg(hw, RTXAGC_A_NSS1INDEX7_NSS1INDEX4, + MASKBYTE1, power_index); + break; + case DESC_RATEVHT1SS_MCS6: + rtl_set_bbreg(hw, RTXAGC_A_NSS1INDEX7_NSS1INDEX4, + MASKBYTE2, power_index); + break; + case DESC_RATEVHT1SS_MCS7: + rtl_set_bbreg(hw, RTXAGC_A_NSS1INDEX7_NSS1INDEX4, + MASKBYTE3, power_index); + break; + case DESC_RATEVHT1SS_MCS8: + rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX1_NSS1INDEX8, + MASKBYTE0, power_index); + break; + case DESC_RATEVHT1SS_MCS9: + rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX1_NSS1INDEX8, + MASKBYTE1, power_index); + break; + case DESC_RATEVHT2SS_MCS0: + rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX1_NSS1INDEX8, + MASKBYTE2, power_index); + break; + case DESC_RATEVHT2SS_MCS1: + rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX1_NSS1INDEX8, + MASKBYTE3, power_index); + break; + case DESC_RATEVHT2SS_MCS2: + rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX5_NSS2INDEX2, + MASKBYTE0, power_index); + break; + case DESC_RATEVHT2SS_MCS3: + rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX5_NSS2INDEX2, + MASKBYTE1, power_index); + break; + case DESC_RATEVHT2SS_MCS4: + rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX5_NSS2INDEX2, + MASKBYTE2, power_index); + break; + case DESC_RATEVHT2SS_MCS5: + rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX5_NSS2INDEX2, + MASKBYTE3, power_index); + break; + case DESC_RATEVHT2SS_MCS6: + rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX9_NSS2INDEX6, + MASKBYTE0, power_index); + break; + case DESC_RATEVHT2SS_MCS7: + rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX9_NSS2INDEX6, + MASKBYTE1, power_index); + break; + case DESC_RATEVHT2SS_MCS8: + rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX9_NSS2INDEX6, + MASKBYTE2, power_index); + break; + case DESC_RATEVHT2SS_MCS9: + rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX9_NSS2INDEX6, + MASKBYTE3, power_index); + break; + default: + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, + "Invalid Rate!!\n"); + break; + } + } else if (path == RF90_PATH_B) { + switch (rate) { + case DESC_RATE1M: + rtl_set_bbreg(hw, RTXAGC_B_CCK11_CCK1, + MASKBYTE0, power_index); + break; + case DESC_RATE2M: + rtl_set_bbreg(hw, RTXAGC_B_CCK11_CCK1, + MASKBYTE1, power_index); + break; + case DESC_RATE5_5M: + rtl_set_bbreg(hw, RTXAGC_B_CCK11_CCK1, + MASKBYTE2, power_index); + break; + case DESC_RATE11M: + rtl_set_bbreg(hw, RTXAGC_B_CCK11_CCK1, + MASKBYTE3, power_index); + break; + case DESC_RATE6M: + rtl_set_bbreg(hw, RTXAGC_B_OFDM18_OFDM6, + MASKBYTE0, power_index); + break; + case DESC_RATE9M: + rtl_set_bbreg(hw, RTXAGC_B_OFDM18_OFDM6, + MASKBYTE1, power_index); + break; + case DESC_RATE12M: + rtl_set_bbreg(hw, RTXAGC_B_OFDM18_OFDM6, + MASKBYTE2, power_index); + break; + case DESC_RATE18M: + rtl_set_bbreg(hw, RTXAGC_B_OFDM18_OFDM6, + MASKBYTE3, power_index); + break; + case DESC_RATE24M: + rtl_set_bbreg(hw, RTXAGC_B_OFDM54_OFDM24, + MASKBYTE0, power_index); + break; + case DESC_RATE36M: + rtl_set_bbreg(hw, RTXAGC_B_OFDM54_OFDM24, + MASKBYTE1, power_index); + break; + case DESC_RATE48M: + rtl_set_bbreg(hw, RTXAGC_B_OFDM54_OFDM24, + MASKBYTE2, power_index); + break; + case DESC_RATE54M: + rtl_set_bbreg(hw, RTXAGC_B_OFDM54_OFDM24, + MASKBYTE3, power_index); + break; + case DESC_RATEMCS0: + rtl_set_bbreg(hw, RTXAGC_B_MCS03_MCS00, + MASKBYTE0, power_index); + break; + case DESC_RATEMCS1: + rtl_set_bbreg(hw, RTXAGC_B_MCS03_MCS00, + MASKBYTE1, power_index); + break; + case DESC_RATEMCS2: + rtl_set_bbreg(hw, RTXAGC_B_MCS03_MCS00, + MASKBYTE2, power_index); + break; + case DESC_RATEMCS3: + rtl_set_bbreg(hw, RTXAGC_B_MCS03_MCS00, + MASKBYTE3, power_index); + break; + case DESC_RATEMCS4: + rtl_set_bbreg(hw, RTXAGC_B_MCS07_MCS04, + MASKBYTE0, power_index); + break; + case DESC_RATEMCS5: + rtl_set_bbreg(hw, RTXAGC_B_MCS07_MCS04, + MASKBYTE1, power_index); + break; + case DESC_RATEMCS6: + rtl_set_bbreg(hw, RTXAGC_B_MCS07_MCS04, + MASKBYTE2, power_index); + break; + case DESC_RATEMCS7: + rtl_set_bbreg(hw, RTXAGC_B_MCS07_MCS04, + MASKBYTE3, power_index); + break; + case DESC_RATEMCS8: + rtl_set_bbreg(hw, RTXAGC_B_MCS11_MCS08, + MASKBYTE0, power_index); + break; + case DESC_RATEMCS9: + rtl_set_bbreg(hw, RTXAGC_B_MCS11_MCS08, + MASKBYTE1, power_index); + break; + case DESC_RATEMCS10: + rtl_set_bbreg(hw, RTXAGC_B_MCS11_MCS08, + MASKBYTE2, power_index); + break; + case DESC_RATEMCS11: + rtl_set_bbreg(hw, RTXAGC_B_MCS11_MCS08, + MASKBYTE3, power_index); + break; + case DESC_RATEMCS12: + rtl_set_bbreg(hw, RTXAGC_B_MCS15_MCS12, + MASKBYTE0, power_index); + break; + case DESC_RATEMCS13: + rtl_set_bbreg(hw, RTXAGC_B_MCS15_MCS12, + MASKBYTE1, power_index); + break; + case DESC_RATEMCS14: + rtl_set_bbreg(hw, RTXAGC_B_MCS15_MCS12, + MASKBYTE2, power_index); + break; + case DESC_RATEMCS15: + rtl_set_bbreg(hw, RTXAGC_B_MCS15_MCS12, + MASKBYTE3, power_index); + break; + case DESC_RATEVHT1SS_MCS0: + rtl_set_bbreg(hw, RTXAGC_B_NSS1INDEX3_NSS1INDEX0, + MASKBYTE0, power_index); + break; + case DESC_RATEVHT1SS_MCS1: + rtl_set_bbreg(hw, RTXAGC_B_NSS1INDEX3_NSS1INDEX0, + MASKBYTE1, power_index); + break; + case DESC_RATEVHT1SS_MCS2: + rtl_set_bbreg(hw, RTXAGC_B_NSS1INDEX3_NSS1INDEX0, + MASKBYTE2, power_index); + break; + case DESC_RATEVHT1SS_MCS3: + rtl_set_bbreg(hw, RTXAGC_B_NSS1INDEX3_NSS1INDEX0, + MASKBYTE3, power_index); + break; + case DESC_RATEVHT1SS_MCS4: + rtl_set_bbreg(hw, RTXAGC_B_NSS1INDEX7_NSS1INDEX4, + MASKBYTE0, power_index); + break; + case DESC_RATEVHT1SS_MCS5: + rtl_set_bbreg(hw, RTXAGC_B_NSS1INDEX7_NSS1INDEX4, + MASKBYTE1, power_index); + break; + case DESC_RATEVHT1SS_MCS6: + rtl_set_bbreg(hw, RTXAGC_B_NSS1INDEX7_NSS1INDEX4, + MASKBYTE2, power_index); + break; + case DESC_RATEVHT1SS_MCS7: + rtl_set_bbreg(hw, RTXAGC_B_NSS1INDEX7_NSS1INDEX4, + MASKBYTE3, power_index); + break; + case DESC_RATEVHT1SS_MCS8: + rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX1_NSS1INDEX8, + MASKBYTE0, power_index); + break; + case DESC_RATEVHT1SS_MCS9: + rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX1_NSS1INDEX8, + MASKBYTE1, power_index); + break; + case DESC_RATEVHT2SS_MCS0: + rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX1_NSS1INDEX8, + MASKBYTE2, power_index); + break; + case DESC_RATEVHT2SS_MCS1: + rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX1_NSS1INDEX8, + MASKBYTE3, power_index); + break; + case DESC_RATEVHT2SS_MCS2: + rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX5_NSS2INDEX2, + MASKBYTE0, power_index); + break; + case DESC_RATEVHT2SS_MCS3: + rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX5_NSS2INDEX2, + MASKBYTE1, power_index); + break; + case DESC_RATEVHT2SS_MCS4: + rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX5_NSS2INDEX2, + MASKBYTE2, power_index); + break; + case DESC_RATEVHT2SS_MCS5: + rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX5_NSS2INDEX2, + MASKBYTE3, power_index); + break; + case DESC_RATEVHT2SS_MCS6: + rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX9_NSS2INDEX6, + MASKBYTE0, power_index); + break; + case DESC_RATEVHT2SS_MCS7: + rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX9_NSS2INDEX6, + MASKBYTE1, power_index); + break; + case DESC_RATEVHT2SS_MCS8: + rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX9_NSS2INDEX6, + MASKBYTE2, power_index); + break; + case DESC_RATEVHT2SS_MCS9: + rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX9_NSS2INDEX6, + MASKBYTE3, power_index); + break; + default: + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, + "Invalid Rate!!\n"); + break; + } + } else { + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, + "Invalid RFPath!!\n"); + } +} + +static void _rtl8821ae_phy_set_txpower_level_by_path(struct ieee80211_hw *hw, + u8 *array, u8 path, + u8 channel, u8 size) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u8 i; + u8 power_index; + + for (i = 0; i < size; i++) { + power_index = + _rtl8821ae_get_txpower_index(hw, path, array[i], + rtlphy->current_chan_bw, + channel); + _rtl8821ae_phy_set_txpower_index(hw, power_index, path, + array[i]); + } +} + +static void _rtl8821ae_phy_txpower_training_by_path(struct ieee80211_hw *hw, + u8 bw, u8 channel, u8 path) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + + u8 i; + u32 power_level, data, offset; + + if (path >= rtlphy->num_total_rfpath) + return; + + data = 0; + if (path == RF90_PATH_A) { + power_level = + _rtl8821ae_get_txpower_index(hw, RF90_PATH_A, + DESC_RATEMCS7, bw, channel); + offset = RA_TXPWRTRAING; + } else { + power_level = + _rtl8821ae_get_txpower_index(hw, RF90_PATH_B, + DESC_RATEMCS7, bw, channel); + offset = RB_TXPWRTRAING; + } + + for (i = 0; i < 3; i++) { + if (i == 0) + power_level = power_level - 10; + else if (i == 1) + power_level = power_level - 8; + else + power_level = power_level - 6; + + data |= (((power_level > 2) ? (power_level) : 2) << (i * 8)); + } + rtl_set_bbreg(hw, offset, 0xffffff, data); +} + +void rtl8821ae_phy_set_txpower_level_by_path(struct ieee80211_hw *hw, + u8 channel, u8 path) +{ + /* struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); */ + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u8 cck_rates[] = {DESC_RATE1M, DESC_RATE2M, DESC_RATE5_5M, + DESC_RATE11M}; + u8 sizes_of_cck_retes = 4; + u8 ofdm_rates[] = {DESC_RATE6M, DESC_RATE9M, DESC_RATE12M, + DESC_RATE18M, DESC_RATE24M, DESC_RATE36M, + DESC_RATE48M, DESC_RATE54M}; + u8 sizes_of_ofdm_retes = 8; + u8 ht_rates_1t[] = {DESC_RATEMCS0, DESC_RATEMCS1, DESC_RATEMCS2, + DESC_RATEMCS3, DESC_RATEMCS4, DESC_RATEMCS5, + DESC_RATEMCS6, DESC_RATEMCS7}; + u8 sizes_of_ht_retes_1t = 8; + u8 ht_rates_2t[] = {DESC_RATEMCS8, DESC_RATEMCS9, + DESC_RATEMCS10, DESC_RATEMCS11, + DESC_RATEMCS12, DESC_RATEMCS13, + DESC_RATEMCS14, DESC_RATEMCS15}; + u8 sizes_of_ht_retes_2t = 8; + u8 vht_rates_1t[] = {DESC_RATEVHT1SS_MCS0, DESC_RATEVHT1SS_MCS1, + DESC_RATEVHT1SS_MCS2, DESC_RATEVHT1SS_MCS3, + DESC_RATEVHT1SS_MCS4, DESC_RATEVHT1SS_MCS5, + DESC_RATEVHT1SS_MCS6, DESC_RATEVHT1SS_MCS7, + DESC_RATEVHT1SS_MCS8, DESC_RATEVHT1SS_MCS9}; + u8 vht_rates_2t[] = {DESC_RATEVHT2SS_MCS0, DESC_RATEVHT2SS_MCS1, + DESC_RATEVHT2SS_MCS2, DESC_RATEVHT2SS_MCS3, + DESC_RATEVHT2SS_MCS4, DESC_RATEVHT2SS_MCS5, + DESC_RATEVHT2SS_MCS6, DESC_RATEVHT2SS_MCS7, + DESC_RATEVHT2SS_MCS8, DESC_RATEVHT2SS_MCS9}; + u8 sizes_of_vht_retes = 10; + + if (rtlhal->current_bandtype == BAND_ON_2_4G) + _rtl8821ae_phy_set_txpower_level_by_path(hw, cck_rates, path, channel, + sizes_of_cck_retes); + + _rtl8821ae_phy_set_txpower_level_by_path(hw, ofdm_rates, path, channel, + sizes_of_ofdm_retes); + _rtl8821ae_phy_set_txpower_level_by_path(hw, ht_rates_1t, path, channel, + sizes_of_ht_retes_1t); + _rtl8821ae_phy_set_txpower_level_by_path(hw, vht_rates_1t, path, channel, + sizes_of_vht_retes); + + if (rtlphy->num_total_rfpath >= 2) { + _rtl8821ae_phy_set_txpower_level_by_path(hw, ht_rates_2t, path, + channel, + sizes_of_ht_retes_2t); + _rtl8821ae_phy_set_txpower_level_by_path(hw, vht_rates_2t, path, + channel, + sizes_of_vht_retes); + } + + _rtl8821ae_phy_txpower_training_by_path(hw, rtlphy->current_chan_bw, + channel, path); +} + +/*just in case, write txpower in DW, to reduce time*/ +void rtl8821ae_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u8 path = 0; + + for (path = RF90_PATH_A; path < rtlphy->num_total_rfpath; ++path) + rtl8821ae_phy_set_txpower_level_by_path(hw, channel, path); +} + +static long _rtl8821ae_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw, + enum wireless_mode wirelessmode, + u8 txpwridx) +{ + long offset; + long pwrout_dbm; + + switch (wirelessmode) { + case WIRELESS_MODE_B: + offset = -7; + break; + case WIRELESS_MODE_G: + case WIRELESS_MODE_N_24G: + offset = -8; + break; + default: + offset = -8; + break; + } + pwrout_dbm = txpwridx / 2 + offset; + return pwrout_dbm; +} + +void rtl8821ae_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + enum io_type iotype = IO_CMD_PAUSE_BAND0_DM_BY_SCAN; + + if (!is_hal_stop(rtlhal)) { + switch (operation) { + case SCAN_OPT_BACKUP_BAND0: + iotype = IO_CMD_PAUSE_BAND0_DM_BY_SCAN; + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_IO_CMD, + (u8 *)&iotype); + + break; + case SCAN_OPT_BACKUP_BAND1: + iotype = IO_CMD_PAUSE_BAND1_DM_BY_SCAN; + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_IO_CMD, + (u8 *)&iotype); + + break; + case SCAN_OPT_RESTORE: + iotype = IO_CMD_RESUME_DM_BY_SCAN; + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_IO_CMD, + (u8 *)&iotype); + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "Unknown Scan Backup operation.\n"); + break; + } + } +} + +static void _rtl8821ae_phy_set_reg_bw(struct rtl_priv *rtlpriv, u8 bw) +{ + u16 reg_rf_mode_bw, tmp = 0; + + reg_rf_mode_bw = rtl_read_word(rtlpriv, REG_TRXPTCL_CTL); + switch (bw) { + case HT_CHANNEL_WIDTH_20: + rtl_write_word(rtlpriv, REG_TRXPTCL_CTL, reg_rf_mode_bw & 0xFE7F); + break; + case HT_CHANNEL_WIDTH_20_40: + tmp = reg_rf_mode_bw | BIT(7); + rtl_write_word(rtlpriv, REG_TRXPTCL_CTL, tmp & 0xFEFF); + break; + case HT_CHANNEL_WIDTH_80: + tmp = reg_rf_mode_bw | BIT(8); + rtl_write_word(rtlpriv, REG_TRXPTCL_CTL, tmp & 0xFF7F); + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "unknown Bandwidth: 0x%x\n", bw); + break; + } +} + +static u8 _rtl8821ae_phy_get_secondary_chnl(struct rtl_priv *rtlpriv) +{ + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct rtl_mac *mac = rtl_mac(rtlpriv); + u8 sc_set_40 = 0, sc_set_20 = 0; + + if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80) { + if (mac->cur_80_prime_sc == PRIME_CHNL_OFFSET_LOWER) + sc_set_40 = VHT_DATA_SC_40_LOWER_OF_80MHZ; + else if (mac->cur_80_prime_sc == PRIME_CHNL_OFFSET_UPPER) + sc_set_40 = VHT_DATA_SC_40_UPPER_OF_80MHZ; + else + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "SCMapping: Not Correct Primary40MHz Setting\n"); + + if ((mac->cur_40_prime_sc == PRIME_CHNL_OFFSET_LOWER) && + (mac->cur_80_prime_sc == HAL_PRIME_CHNL_OFFSET_LOWER)) + sc_set_20 = VHT_DATA_SC_20_LOWEST_OF_80MHZ; + else if ((mac->cur_40_prime_sc == PRIME_CHNL_OFFSET_UPPER) && + (mac->cur_80_prime_sc == HAL_PRIME_CHNL_OFFSET_LOWER)) + sc_set_20 = VHT_DATA_SC_20_LOWER_OF_80MHZ; + else if ((mac->cur_40_prime_sc == PRIME_CHNL_OFFSET_LOWER) && + (mac->cur_80_prime_sc == HAL_PRIME_CHNL_OFFSET_UPPER)) + sc_set_20 = VHT_DATA_SC_20_UPPER_OF_80MHZ; + else if ((mac->cur_40_prime_sc == PRIME_CHNL_OFFSET_UPPER) && + (mac->cur_80_prime_sc == HAL_PRIME_CHNL_OFFSET_UPPER)) + sc_set_20 = VHT_DATA_SC_20_UPPERST_OF_80MHZ; + else + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "SCMapping: Not Correct Primary40MHz Setting\n"); + } else if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) { + if (mac->cur_40_prime_sc == PRIME_CHNL_OFFSET_UPPER) + sc_set_20 = VHT_DATA_SC_20_UPPER_OF_80MHZ; + else if (mac->cur_40_prime_sc == PRIME_CHNL_OFFSET_LOWER) + sc_set_20 = VHT_DATA_SC_20_LOWER_OF_80MHZ; + else + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "SCMapping: Not Correct Primary40MHz Setting\n"); + } + return (sc_set_40 << 4) | sc_set_20; +} + +void rtl8821ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u8 sub_chnl = 0; + u8 l1pk_val = 0; + + RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, + "Switch to %s bandwidth\n", + (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ? + "20MHz" : + (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40 ? + "40MHz" : "80MHz"))); + + _rtl8821ae_phy_set_reg_bw(rtlpriv, rtlphy->current_chan_bw); + sub_chnl = _rtl8821ae_phy_get_secondary_chnl(rtlpriv); + rtl_write_byte(rtlpriv, 0x0483, sub_chnl); + + switch (rtlphy->current_chan_bw) { + case HT_CHANNEL_WIDTH_20: + rtl_set_bbreg(hw, RRFMOD, 0x003003C3, 0x00300200); + rtl_set_bbreg(hw, RADC_BUF_CLK, BIT(30), 0); + + if (rtlphy->rf_type == RF_2T2R) + rtl_set_bbreg(hw, RL1PEAKTH, 0x03C00000, 7); + else + rtl_set_bbreg(hw, RL1PEAKTH, 0x03C00000, 8); + break; + case HT_CHANNEL_WIDTH_20_40: + rtl_set_bbreg(hw, RRFMOD, 0x003003C3, 0x00300201); + rtl_set_bbreg(hw, RADC_BUF_CLK, BIT(30), 0); + rtl_set_bbreg(hw, RRFMOD, 0x3C, sub_chnl); + rtl_set_bbreg(hw, RCCAONSEC, 0xf0000000, sub_chnl); + + if (rtlphy->reg_837 & BIT(2)) + l1pk_val = 6; + else { + if (rtlphy->rf_type == RF_2T2R) + l1pk_val = 7; + else + l1pk_val = 8; + } + /* 0x848[25:22] = 0x6 */ + rtl_set_bbreg(hw, RL1PEAKTH, 0x03C00000, l1pk_val); + + if (sub_chnl == VHT_DATA_SC_20_UPPER_OF_80MHZ) + rtl_set_bbreg(hw, RCCK_SYSTEM, BCCK_SYSTEM, 1); + else + rtl_set_bbreg(hw, RCCK_SYSTEM, BCCK_SYSTEM, 0); + break; + + case HT_CHANNEL_WIDTH_80: + /* 0x8ac[21,20,9:6,1,0]=8'b11100010 */ + rtl_set_bbreg(hw, RRFMOD, 0x003003C3, 0x00300202); + /* 0x8c4[30] = 1 */ + rtl_set_bbreg(hw, RADC_BUF_CLK, BIT(30), 1); + rtl_set_bbreg(hw, RRFMOD, 0x3C, sub_chnl); + rtl_set_bbreg(hw, RCCAONSEC, 0xf0000000, sub_chnl); + + if (rtlphy->reg_837 & BIT(2)) + l1pk_val = 5; + else { + if (rtlphy->rf_type == RF_2T2R) + l1pk_val = 6; + else + l1pk_val = 7; + } + rtl_set_bbreg(hw, RL1PEAKTH, 0x03C00000, l1pk_val); + + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "unknown bandwidth: %#X\n", rtlphy->current_chan_bw); + break; + } + + rtl8812ae_fixspur(hw, rtlphy->current_chan_bw, rtlphy->current_channel); + + rtl8821ae_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw); + rtlphy->set_bwmode_inprogress = false; + + RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD, "\n"); +} + +void rtl8821ae_phy_set_bw_mode(struct ieee80211_hw *hw, + enum nl80211_channel_type ch_type) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 tmp_bw = rtlphy->current_chan_bw; + + if (rtlphy->set_bwmode_inprogress) + return; + rtlphy->set_bwmode_inprogress = true; + if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) + rtl8821ae_phy_set_bw_mode_callback(hw); + else { + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + "FALSE driver sleep or unload\n"); + rtlphy->set_bwmode_inprogress = false; + rtlphy->current_chan_bw = tmp_bw; + } +} + +void rtl8821ae_phy_sw_chnl_callback(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u8 channel = rtlphy->current_channel; + u8 path; + u32 data; + + RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, + "switch to channel%d\n", rtlphy->current_channel); + if (is_hal_stop(rtlhal)) + return; + + if (36 <= channel && channel <= 48) + data = 0x494; + else if (50 <= channel && channel <= 64) + data = 0x453; + else if (100 <= channel && channel <= 116) + data = 0x452; + else if (118 <= channel) + data = 0x412; + else + data = 0x96a; + rtl_set_bbreg(hw, RFC_AREA, 0x1ffe0000, data); + + for (path = RF90_PATH_A; path < rtlphy->num_total_rfpath; path++) { + if (36 <= channel && channel <= 64) + data = 0x101; + else if (100 <= channel && channel <= 140) + data = 0x301; + else if (140 < channel) + data = 0x501; + else + data = 0x000; + rtl8821ae_phy_set_rf_reg(hw, path, RF_CHNLBW, + BIT(18)|BIT(17)|BIT(16)|BIT(9)|BIT(8), data); + + rtl8821ae_phy_set_rf_reg(hw, path, RF_CHNLBW, + BMASKBYTE0, channel); + + if (channel > 14) { + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) { + if (36 <= channel && channel <= 64) + data = 0x114E9; + else if (100 <= channel && channel <= 140) + data = 0x110E9; + else + data = 0x110E9; + rtl8821ae_phy_set_rf_reg(hw, path, RF_APK, + BRFREGOFFSETMASK, data); + } + } + } + RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "\n"); +} + +u8 rtl8821ae_phy_sw_chnl(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u32 timeout = 1000, timecount = 0; + u8 channel = rtlphy->current_channel; + + if (rtlphy->sw_chnl_inprogress) + return 0; + if (rtlphy->set_bwmode_inprogress) + return 0; + + if ((is_hal_stop(rtlhal)) || (RT_CANNOT_IO(hw))) { + RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD, + "sw_chnl_inprogress false driver sleep or unload\n"); + return 0; + } + while (rtlphy->lck_inprogress && timecount < timeout) { + mdelay(50); + timecount += 50; + } + + if (rtlphy->current_channel > 14 && rtlhal->current_bandtype != BAND_ON_5G) + rtl8821ae_phy_switch_wirelessband(hw, BAND_ON_5G); + else if (rtlphy->current_channel <= 14 && rtlhal->current_bandtype != BAND_ON_2_4G) + rtl8821ae_phy_switch_wirelessband(hw, BAND_ON_2_4G); + + rtlphy->sw_chnl_inprogress = true; + if (channel == 0) + channel = 1; + + RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, + "switch to channel%d, band type is %d\n", + rtlphy->current_channel, rtlhal->current_bandtype); + + rtl8821ae_phy_sw_chnl_callback(hw); + + rtl8821ae_dm_clear_txpower_tracking_state(hw); + rtl8821ae_phy_set_txpower_level(hw, rtlphy->current_channel); + + RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "\n"); + rtlphy->sw_chnl_inprogress = false; + return 1; +} + +u8 _rtl8812ae_get_right_chnl_place_for_iqk(u8 chnl) +{ + u8 channel_all[TARGET_CHNL_NUM_2G_5G_8812] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, + 14, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, + 56, 58, 60, 62, 64, 100, 102, 104, 106, 108, + 110, 112, 114, 116, 118, 120, 122, 124, 126, + 128, 130, 132, 134, 136, 138, 140, 149, 151, + 153, 155, 157, 159, 161, 163, 165}; + u8 place = chnl; + + if (chnl > 14) { + for (place = 14; place < sizeof(channel_all); place++) + if (channel_all[place] == chnl) + return place-13; + } + + return 0; +} + +#define MACBB_REG_NUM 10 +#define AFE_REG_NUM 14 +#define RF_REG_NUM 3 + +static void _rtl8821ae_iqk_backup_macbb(struct ieee80211_hw *hw, + u32 *macbb_backup, + u32 *backup_macbb_reg, u32 mac_bb_num) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 i; + + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/ + /*save MACBB default value*/ + for (i = 0; i < mac_bb_num; i++) + macbb_backup[i] = rtl_read_dword(rtlpriv, backup_macbb_reg[i]); + + RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD, "BackupMacBB Success!!!!\n"); +} + +static void _rtl8821ae_iqk_backup_afe(struct ieee80211_hw *hw, u32 *afe_backup, + u32 *backup_afe_REG, u32 afe_num) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 i; + + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/ + /*Save AFE Parameters */ + for (i = 0; i < afe_num; i++) + afe_backup[i] = rtl_read_dword(rtlpriv, backup_afe_REG[i]); + RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD, "BackupAFE Success!!!!\n"); +} + +static void _rtl8821ae_iqk_backup_rf(struct ieee80211_hw *hw, u32 *rfa_backup, + u32 *rfb_backup, u32 *backup_rf_reg, + u32 rf_num) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 i; + + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/ + /*Save RF Parameters*/ + for (i = 0; i < rf_num; i++) { + rfa_backup[i] = rtl_get_rfreg(hw, RF90_PATH_A, backup_rf_reg[i], + BMASKDWORD); + rfb_backup[i] = rtl_get_rfreg(hw, RF90_PATH_B, backup_rf_reg[i], + BMASKDWORD); + } + RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD, "BackupRF Success!!!!\n"); +} + +static void _rtl8821ae_iqk_configure_mac( + struct ieee80211_hw *hw + ) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + /* ========MAC register setting========*/ + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/ + rtl_write_byte(rtlpriv, 0x522, 0x3f); + rtl_set_bbreg(hw, 0x550, BIT(11) | BIT(3), 0x0); + rtl_write_byte(rtlpriv, 0x808, 0x00); /*RX ante off*/ + rtl_set_bbreg(hw, 0x838, 0xf, 0xc); /*CCA off*/ +} + +static void _rtl8821ae_iqk_tx_fill_iqc(struct ieee80211_hw *hw, + enum radio_path path, u32 tx_x, u32 tx_y) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + switch (path) { + case RF90_PATH_A: + /* [31] = 1 --> Page C1 */ + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); + rtl_write_dword(rtlpriv, 0xc90, 0x00000080); + rtl_write_dword(rtlpriv, 0xcc4, 0x20040000); + rtl_write_dword(rtlpriv, 0xcc8, 0x20000000); + rtl_set_bbreg(hw, 0xccc, 0x000007ff, tx_y); + rtl_set_bbreg(hw, 0xcd4, 0x000007ff, tx_x); + RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD, + "TX_X = %x;;TX_Y = %x =====> fill to IQC\n", + tx_x, tx_y); + RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD, + "0xcd4 = %x;;0xccc = %x ====>fill to IQC\n", + rtl_get_bbreg(hw, 0xcd4, 0x000007ff), + rtl_get_bbreg(hw, 0xccc, 0x000007ff)); + break; + default: + break; + }; +} + +static void _rtl8821ae_iqk_rx_fill_iqc(struct ieee80211_hw *hw, + enum radio_path path, u32 rx_x, u32 rx_y) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + switch (path) { + case RF90_PATH_A: + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /* [31] = 0 --> Page C */ + rtl_set_bbreg(hw, 0xc10, 0x000003ff, rx_x>>1); + rtl_set_bbreg(hw, 0xc10, 0x03ff0000, rx_y>>1); + RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD, + "rx_x = %x;;rx_y = %x ====>fill to IQC\n", + rx_x>>1, rx_y>>1); + RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD, + "0xc10 = %x ====>fill to IQC\n", + rtl_read_dword(rtlpriv, 0xc10)); + break; + default: + break; + }; +} + +#define cal_num 10 + +static void _rtl8821ae_iqk_tx(struct ieee80211_hw *hw, enum radio_path path) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + + u32 tx_fail, rx_fail, delay_count, iqk_ready, cal_retry, cal = 0, temp_reg65; + int tx_x = 0, tx_y = 0, rx_x = 0, rx_y = 0, tx_average = 0, rx_average = 0; + int tx_x0[cal_num], tx_y0[cal_num], tx_x0_rxk[cal_num], + tx_y0_rxk[cal_num], rx_x0[cal_num], rx_y0[cal_num]; + bool tx0iqkok = false, rx0iqkok = false; + bool vdf_enable = false; + int i, k, vdf_y[3], vdf_x[3], tx_dt[3], rx_dt[3], + ii, dx = 0, dy = 0, tx_finish = 0, rx_finish = 0; + + RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD, + "BandWidth = %d.\n", + rtlphy->current_chan_bw); + if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80) + vdf_enable = true; + + while (cal < cal_num) { + switch (path) { + case RF90_PATH_A: + temp_reg65 = rtl_get_rfreg(hw, path, 0x65, 0xffffffff); + /* Path-A LOK */ + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/ + /*========Path-A AFE all on========*/ + /*Port 0 DAC/ADC on*/ + rtl_write_dword(rtlpriv, 0xc60, 0x77777777); + rtl_write_dword(rtlpriv, 0xc64, 0x77777777); + rtl_write_dword(rtlpriv, 0xc68, 0x19791979); + rtl_write_dword(rtlpriv, 0xc6c, 0x19791979); + rtl_write_dword(rtlpriv, 0xc70, 0x19791979); + rtl_write_dword(rtlpriv, 0xc74, 0x19791979); + rtl_write_dword(rtlpriv, 0xc78, 0x19791979); + rtl_write_dword(rtlpriv, 0xc7c, 0x19791979); + rtl_write_dword(rtlpriv, 0xc80, 0x19791979); + rtl_write_dword(rtlpriv, 0xc84, 0x19791979); + + rtl_set_bbreg(hw, 0xc00, 0xf, 0x4); /*hardware 3-wire off*/ + + /* LOK Setting */ + /* ====== LOK ====== */ + /*DAC/ADC sampling rate (160 MHz)*/ + rtl_set_bbreg(hw, 0xc5c, BIT(26) | BIT(25) | BIT(24), 0x7); + + /* 2. LoK RF Setting (at BW = 20M) */ + rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x80002); + rtl_set_rfreg(hw, path, 0x18, 0x00c00, 0x3); /* BW 20M */ + rtl_set_rfreg(hw, path, 0x30, RFREG_OFFSET_MASK, 0x20000); + rtl_set_rfreg(hw, path, 0x31, RFREG_OFFSET_MASK, 0x0003f); + rtl_set_rfreg(hw, path, 0x32, RFREG_OFFSET_MASK, 0xf3fc3); + rtl_set_rfreg(hw, path, 0x65, RFREG_OFFSET_MASK, 0x931d5); + rtl_set_rfreg(hw, path, 0x8f, RFREG_OFFSET_MASK, 0x8a001); + rtl_set_bbreg(hw, 0xcb8, 0xf, 0xd); + rtl_write_dword(rtlpriv, 0x90c, 0x00008000); + rtl_write_dword(rtlpriv, 0xb00, 0x03000100); + rtl_set_bbreg(hw, 0xc94, BIT(0), 0x1); + rtl_write_dword(rtlpriv, 0x978, 0x29002000);/* TX (X,Y) */ + rtl_write_dword(rtlpriv, 0x97c, 0xa9002000);/* RX (X,Y) */ + rtl_write_dword(rtlpriv, 0x984, 0x00462910);/* [0]:AGC_en, [15]:idac_K_Mask */ + + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /* [31] = 1 --> Page C1 */ + rtl_write_dword(rtlpriv, 0xc88, 0x821403f4); + + if (rtlhal->current_bandtype) + rtl_write_dword(rtlpriv, 0xc8c, 0x68163e96); + else + rtl_write_dword(rtlpriv, 0xc8c, 0x28163e96); + + rtl_write_dword(rtlpriv, 0xc80, 0x18008c10);/* TX_TONE_idx[9:0], TxK_Mask[29] TX_Tone = 16 */ + rtl_write_dword(rtlpriv, 0xc84, 0x38008c10);/* RX_TONE_idx[9:0], RxK_Mask[29] */ + rtl_write_dword(rtlpriv, 0xcb8, 0x00100000);/* cb8[20] \B1N SI/PI \A8Ï¥\CE\C5v\A4\C1\B5\B9 iqk_dpk module */ + rtl_write_dword(rtlpriv, 0x980, 0xfa000000); + rtl_write_dword(rtlpriv, 0x980, 0xf8000000); + + mdelay(10); /* Delay 10ms */ + rtl_write_dword(rtlpriv, 0xcb8, 0x00000000); + + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /* [31] = 0 --> Page C */ + rtl_set_rfreg(hw, path, 0x58, 0x7fe00, rtl_get_rfreg(hw, path, 0x8, 0xffc00)); /* Load LOK */ + + switch (rtlphy->current_chan_bw) { + case 1: + rtl_set_rfreg(hw, path, 0x18, 0x00c00, 0x1); + break; + case 2: + rtl_set_rfreg(hw, path, 0x18, 0x00c00, 0x0); + break; + default: + break; + } + + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /* [31] = 1 --> Page C1 */ + + /* 3. TX RF Setting */ + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /* [31] = 0 --> Page C */ + rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x80000); + rtl_set_rfreg(hw, path, 0x30, RFREG_OFFSET_MASK, 0x20000); + rtl_set_rfreg(hw, path, 0x31, RFREG_OFFSET_MASK, 0x0003f); + rtl_set_rfreg(hw, path, 0x32, RFREG_OFFSET_MASK, 0xf3fc3); + rtl_set_rfreg(hw, path, 0x65, RFREG_OFFSET_MASK, 0x931d5); + rtl_set_rfreg(hw, path, 0x8f, RFREG_OFFSET_MASK, 0x8a001); + rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x00000); + /* ODM_SetBBReg(pDM_Odm, 0xcb8, 0xf, 0xd); */ + rtl_write_dword(rtlpriv, 0x90c, 0x00008000); + rtl_write_dword(rtlpriv, 0xb00, 0x03000100); + rtl_set_bbreg(hw, 0xc94, BIT(0), 0x1); + rtl_write_dword(rtlpriv, 0x978, 0x29002000);/* TX (X,Y) */ + rtl_write_dword(rtlpriv, 0x97c, 0xa9002000);/* RX (X,Y) */ + rtl_write_dword(rtlpriv, 0x984, 0x0046a910);/* [0]:AGC_en, [15]:idac_K_Mask */ + + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /* [31] = 1 --> Page C1 */ + rtl_write_dword(rtlpriv, 0xc88, 0x821403f1); + if (rtlhal->current_bandtype) + rtl_write_dword(rtlpriv, 0xc8c, 0x40163e96); + else + rtl_write_dword(rtlpriv, 0xc8c, 0x00163e96); + + if (vdf_enable == 1) { + RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD, "VDF_enable\n"); + for (k = 0; k <= 2; k++) { + switch (k) { + case 0: + rtl_write_dword(rtlpriv, 0xc80, 0x18008c38);/* TX_TONE_idx[9:0], TxK_Mask[29] TX_Tone = 16 */ + rtl_write_dword(rtlpriv, 0xc84, 0x38008c38);/* RX_TONE_idx[9:0], RxK_Mask[29] */ + rtl_set_bbreg(hw, 0xce8, BIT(31), 0x0); + break; + case 1: + rtl_set_bbreg(hw, 0xc80, BIT(28), 0x0); + rtl_set_bbreg(hw, 0xc84, BIT(28), 0x0); + rtl_set_bbreg(hw, 0xce8, BIT(31), 0x0); + break; + case 2: + RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD, + "vdf_y[1] = %x;;;vdf_y[0] = %x\n", vdf_y[1]>>21 & 0x00007ff, vdf_y[0]>>21 & 0x00007ff); + RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD, + "vdf_x[1] = %x;;;vdf_x[0] = %x\n", vdf_x[1]>>21 & 0x00007ff, vdf_x[0]>>21 & 0x00007ff); + tx_dt[cal] = (vdf_y[1]>>20)-(vdf_y[0]>>20); + tx_dt[cal] = ((16*tx_dt[cal])*10000/15708); + tx_dt[cal] = (tx_dt[cal] >> 1)+(tx_dt[cal] & BIT(0)); + rtl_write_dword(rtlpriv, 0xc80, 0x18008c20);/* TX_TONE_idx[9:0], TxK_Mask[29] TX_Tone = 16 */ + rtl_write_dword(rtlpriv, 0xc84, 0x38008c20);/* RX_TONE_idx[9:0], RxK_Mask[29] */ + rtl_set_bbreg(hw, 0xce8, BIT(31), 0x1); + rtl_set_bbreg(hw, 0xce8, 0x3fff0000, tx_dt[cal] & 0x00003fff); + break; + default: + break; + } + rtl_write_dword(rtlpriv, 0xcb8, 0x00100000);/* cb8[20] \B1N SI/PI \A8Ï¥\CE\C5v\A4\C1\B5\B9 iqk_dpk module */ + cal_retry = 0; + while (1) { + /* one shot */ + rtl_write_dword(rtlpriv, 0x980, 0xfa000000); + rtl_write_dword(rtlpriv, 0x980, 0xf8000000); + + mdelay(10); /* Delay 10ms */ + rtl_write_dword(rtlpriv, 0xcb8, 0x00000000); + delay_count = 0; + while (1) { + iqk_ready = rtl_get_bbreg(hw, 0xd00, BIT(10)); + if ((~iqk_ready) || (delay_count > 20)) + break; + else{ + mdelay(1); + delay_count++; + } + } + + if (delay_count < 20) { /* If 20ms No Result, then cal_retry++ */ + /* ============TXIQK Check============== */ + tx_fail = rtl_get_bbreg(hw, 0xd00, BIT(12)); + + if (~tx_fail) { + rtl_write_dword(rtlpriv, 0xcb8, 0x02000000); + vdf_x[k] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21; + rtl_write_dword(rtlpriv, 0xcb8, 0x04000000); + vdf_y[k] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21; + tx0iqkok = true; + break; + } else { + rtl_set_bbreg(hw, 0xccc, 0x000007ff, 0x0); + rtl_set_bbreg(hw, 0xcd4, 0x000007ff, 0x200); + tx0iqkok = false; + cal_retry++; + if (cal_retry == 10) + break; + } + } else { + tx0iqkok = false; + cal_retry++; + if (cal_retry == 10) + break; + } + } + } + if (k == 3) { + tx_x0[cal] = vdf_x[k-1]; + tx_y0[cal] = vdf_y[k-1]; + } + } else { + rtl_write_dword(rtlpriv, 0xc80, 0x18008c10);/* TX_TONE_idx[9:0], TxK_Mask[29] TX_Tone = 16 */ + rtl_write_dword(rtlpriv, 0xc84, 0x38008c10);/* RX_TONE_idx[9:0], RxK_Mask[29] */ + rtl_write_dword(rtlpriv, 0xcb8, 0x00100000);/* cb8[20] \B1N SI/PI \A8Ï¥\CE\C5v\A4\C1\B5\B9 iqk_dpk module */ + cal_retry = 0; + while (1) { + /* one shot */ + rtl_write_dword(rtlpriv, 0x980, 0xfa000000); + rtl_write_dword(rtlpriv, 0x980, 0xf8000000); + + mdelay(10); /* Delay 10ms */ + rtl_write_dword(rtlpriv, 0xcb8, 0x00000000); + delay_count = 0; + while (1) { + iqk_ready = rtl_get_bbreg(hw, 0xd00, BIT(10)); + if ((~iqk_ready) || (delay_count > 20)) + break; + else{ + mdelay(1); + delay_count++; + } + } + + if (delay_count < 20) { /* If 20ms No Result, then cal_retry++ */ + /* ============TXIQK Check============== */ + tx_fail = rtl_get_bbreg(hw, 0xd00, BIT(12)); + + if (~tx_fail) { + rtl_write_dword(rtlpriv, 0xcb8, 0x02000000); + tx_x0[cal] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21; + rtl_write_dword(rtlpriv, 0xcb8, 0x04000000); + tx_y0[cal] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21; + tx0iqkok = true; + break; + } else { + rtl_set_bbreg(hw, 0xccc, 0x000007ff, 0x0); + rtl_set_bbreg(hw, 0xcd4, 0x000007ff, 0x200); + tx0iqkok = false; + cal_retry++; + if (cal_retry == 10) + break; + } + } else { + tx0iqkok = false; + cal_retry++; + if (cal_retry == 10) + break; + } + } + } + + if (tx0iqkok == false) + break; /* TXK fail, Don't do RXK */ + + if (vdf_enable == 1) { + rtl_set_bbreg(hw, 0xce8, BIT(31), 0x0); /* TX VDF Disable */ + RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD, "RXVDF Start\n"); + for (k = 0; k <= 2; k++) { + /* ====== RX mode TXK (RXK Step 1) ====== */ + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /* [31] = 0 --> Page C */ + /* 1. TX RF Setting */ + rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x80000); + rtl_set_rfreg(hw, path, 0x30, RFREG_OFFSET_MASK, 0x30000); + rtl_set_rfreg(hw, path, 0x31, RFREG_OFFSET_MASK, 0x00029); + rtl_set_rfreg(hw, path, 0x32, RFREG_OFFSET_MASK, 0xd7ffb); + rtl_set_rfreg(hw, path, 0x65, RFREG_OFFSET_MASK, temp_reg65); + rtl_set_rfreg(hw, path, 0x8f, RFREG_OFFSET_MASK, 0x8a001); + rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x00000); + + rtl_set_bbreg(hw, 0xcb8, 0xf, 0xd); + rtl_write_dword(rtlpriv, 0x978, 0x29002000);/* TX (X,Y) */ + rtl_write_dword(rtlpriv, 0x97c, 0xa9002000);/* RX (X,Y) */ + rtl_write_dword(rtlpriv, 0x984, 0x0046a910);/* [0]:AGC_en, [15]:idac_K_Mask */ + rtl_write_dword(rtlpriv, 0x90c, 0x00008000); + rtl_write_dword(rtlpriv, 0xb00, 0x03000100); + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /* [31] = 1 --> Page C1 */ + switch (k) { + case 0: + { + rtl_write_dword(rtlpriv, 0xc80, 0x18008c38);/* TX_TONE_idx[9:0], TxK_Mask[29] TX_Tone = 16 */ + rtl_write_dword(rtlpriv, 0xc84, 0x38008c38);/* RX_TONE_idx[9:0], RxK_Mask[29] */ + rtl_set_bbreg(hw, 0xce8, BIT(30), 0x0); + } + break; + case 1: + { + rtl_write_dword(rtlpriv, 0xc80, 0x08008c38);/* TX_TONE_idx[9:0], TxK_Mask[29] TX_Tone = 16 */ + rtl_write_dword(rtlpriv, 0xc84, 0x28008c38);/* RX_TONE_idx[9:0], RxK_Mask[29] */ + rtl_set_bbreg(hw, 0xce8, BIT(30), 0x0); + } + break; + case 2: + { + RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD, + "VDF_Y[1] = %x;;;VDF_Y[0] = %x\n", + vdf_y[1]>>21 & 0x00007ff, vdf_y[0]>>21 & 0x00007ff); + RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD, + "VDF_X[1] = %x;;;VDF_X[0] = %x\n", + vdf_x[1]>>21 & 0x00007ff, vdf_x[0]>>21 & 0x00007ff); + rx_dt[cal] = (vdf_y[1]>>20)-(vdf_y[0]>>20); + RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD, "Rx_dt = %d\n", rx_dt[cal]); + rx_dt[cal] = ((16*rx_dt[cal])*10000/13823); + rx_dt[cal] = (rx_dt[cal] >> 1)+(rx_dt[cal] & BIT(0)); + rtl_write_dword(rtlpriv, 0xc80, 0x18008c20);/* TX_TONE_idx[9:0], TxK_Mask[29] TX_Tone = 16 */ + rtl_write_dword(rtlpriv, 0xc84, 0x38008c20);/* RX_TONE_idx[9:0], RxK_Mask[29] */ + rtl_set_bbreg(hw, 0xce8, 0x00003fff, rx_dt[cal] & 0x00003fff); + } + break; + default: + break; + } + rtl_write_dword(rtlpriv, 0xc88, 0x821603e0); + rtl_write_dword(rtlpriv, 0xc8c, 0x68163e96); + rtl_write_dword(rtlpriv, 0xcb8, 0x00100000);/* cb8[20] \B1N SI/PI \A8Ï¥\CE\C5v\A4\C1\B5\B9 iqk_dpk module */ + cal_retry = 0; + while (1) { + /* one shot */ + rtl_write_dword(rtlpriv, 0x980, 0xfa000000); + rtl_write_dword(rtlpriv, 0x980, 0xf8000000); + + mdelay(10); /* Delay 10ms */ + rtl_write_dword(rtlpriv, 0xcb8, 0x00000000); + delay_count = 0; + while (1) { + iqk_ready = rtl_get_bbreg(hw, 0xd00, BIT(10)); + if ((~iqk_ready) || (delay_count > 20)) + break; + else{ + mdelay(1); + delay_count++; + } + } + + if (delay_count < 20) { /* If 20ms No Result, then cal_retry++ */ + /* ============TXIQK Check============== */ + tx_fail = rtl_get_bbreg(hw, 0xd00, BIT(12)); + + if (~tx_fail) { + rtl_write_dword(rtlpriv, 0xcb8, 0x02000000); + tx_x0_rxk[cal] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21; + rtl_write_dword(rtlpriv, 0xcb8, 0x04000000); + tx_y0_rxk[cal] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21; + tx0iqkok = true; + break; + } else{ + tx0iqkok = false; + cal_retry++; + if (cal_retry == 10) + break; + } + } else { + tx0iqkok = false; + cal_retry++; + if (cal_retry == 10) + break; + } + } + + if (tx0iqkok == false) { /* If RX mode TXK fail, then take TXK Result */ + tx_x0_rxk[cal] = tx_x0[cal]; + tx_y0_rxk[cal] = tx_y0[cal]; + tx0iqkok = true; + RT_TRACE(rtlpriv, + COMP_IQK, + DBG_LOUD, + "RXK Step 1 fail\n"); + } + + /* ====== RX IQK ====== */ + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /* [31] = 0 --> Page C */ + /* 1. RX RF Setting */ + rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x80000); + rtl_set_rfreg(hw, path, 0x30, RFREG_OFFSET_MASK, 0x30000); + rtl_set_rfreg(hw, path, 0x31, RFREG_OFFSET_MASK, 0x0002f); + rtl_set_rfreg(hw, path, 0x32, RFREG_OFFSET_MASK, 0xfffbb); + rtl_set_rfreg(hw, path, 0x8f, RFREG_OFFSET_MASK, 0x88001); + rtl_set_rfreg(hw, path, 0x65, RFREG_OFFSET_MASK, 0x931d8); + rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x00000); + + rtl_set_bbreg(hw, 0x978, 0x03FF8000, (tx_x0_rxk[cal])>>21&0x000007ff); + rtl_set_bbreg(hw, 0x978, 0x000007FF, (tx_y0_rxk[cal])>>21&0x000007ff); + rtl_set_bbreg(hw, 0x978, BIT(31), 0x1); + rtl_set_bbreg(hw, 0x97c, BIT(31), 0x0); + rtl_set_bbreg(hw, 0xcb8, 0xF, 0xe); + rtl_write_dword(rtlpriv, 0x90c, 0x00008000); + rtl_write_dword(rtlpriv, 0x984, 0x0046a911); + + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /* [31] = 1 --> Page C1 */ + rtl_set_bbreg(hw, 0xc80, BIT(29), 0x1); + rtl_set_bbreg(hw, 0xc84, BIT(29), 0x0); + rtl_write_dword(rtlpriv, 0xc88, 0x02140119); + + rtl_write_dword(rtlpriv, 0xc8c, 0x28160d00); /* pDM_Odm->SupportInterface == 1 */ + + if (k == 2) + rtl_set_bbreg(hw, 0xce8, BIT(30), 0x1); /* RX VDF Enable */ + rtl_write_dword(rtlpriv, 0xcb8, 0x00100000);/* cb8[20] \B1N SI/PI \A8Ï¥\CE\C5v\A4\C1\B5\B9 iqk_dpk module */ + + cal_retry = 0; + while (1) { + /* one shot */ + rtl_write_dword(rtlpriv, 0x980, 0xfa000000); + rtl_write_dword(rtlpriv, 0x980, 0xf8000000); + + mdelay(10); /* Delay 10ms */ + rtl_write_dword(rtlpriv, 0xcb8, 0x00000000); + delay_count = 0; + while (1) { + iqk_ready = rtl_get_bbreg(hw, 0xd00, BIT(10)); + if ((~iqk_ready) || (delay_count > 20)) + break; + else{ + mdelay(1); + delay_count++; + } + } + + if (delay_count < 20) { /* If 20ms No Result, then cal_retry++ */ + /* ============RXIQK Check============== */ + rx_fail = rtl_get_bbreg(hw, 0xd00, BIT(11)); + if (rx_fail == 0) { + rtl_write_dword(rtlpriv, 0xcb8, 0x06000000); + vdf_x[k] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21; + rtl_write_dword(rtlpriv, 0xcb8, 0x08000000); + vdf_y[k] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21; + rx0iqkok = true; + break; + } else { + rtl_set_bbreg(hw, 0xc10, 0x000003ff, 0x200>>1); + rtl_set_bbreg(hw, 0xc10, 0x03ff0000, 0x0>>1); + rx0iqkok = false; + cal_retry++; + if (cal_retry == 10) + break; + + } + } else{ + rx0iqkok = false; + cal_retry++; + if (cal_retry == 10) + break; + } + } + + } + if (k == 3) { + rx_x0[cal] = vdf_x[k-1]; + rx_y0[cal] = vdf_y[k-1]; + } + rtl_set_bbreg(hw, 0xce8, BIT(31), 0x1); /* TX VDF Enable */ + } + + else{ + /* ====== RX mode TXK (RXK Step 1) ====== */ + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /* [31] = 0 --> Page C */ + /* 1. TX RF Setting */ + rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x80000); + rtl_set_rfreg(hw, path, 0x30, RFREG_OFFSET_MASK, 0x30000); + rtl_set_rfreg(hw, path, 0x31, RFREG_OFFSET_MASK, 0x00029); + rtl_set_rfreg(hw, path, 0x32, RFREG_OFFSET_MASK, 0xd7ffb); + rtl_set_rfreg(hw, path, 0x65, RFREG_OFFSET_MASK, temp_reg65); + rtl_set_rfreg(hw, path, 0x8f, RFREG_OFFSET_MASK, 0x8a001); + rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x00000); + rtl_write_dword(rtlpriv, 0x90c, 0x00008000); + rtl_write_dword(rtlpriv, 0xb00, 0x03000100); + rtl_write_dword(rtlpriv, 0x984, 0x0046a910);/* [0]:AGC_en, [15]:idac_K_Mask */ + + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /* [31] = 1 --> Page C1 */ + rtl_write_dword(rtlpriv, 0xc80, 0x18008c10);/* TX_TONE_idx[9:0], TxK_Mask[29] TX_Tone = 16 */ + rtl_write_dword(rtlpriv, 0xc84, 0x38008c10);/* RX_TONE_idx[9:0], RxK_Mask[29] */ + rtl_write_dword(rtlpriv, 0xc88, 0x821603e0); + /* ODM_Write4Byte(pDM_Odm, 0xc8c, 0x68163e96); */ + rtl_write_dword(rtlpriv, 0xcb8, 0x00100000);/* cb8[20] \B1N SI/PI \A8Ï¥\CE\C5v\A4\C1\B5\B9 iqk_dpk module */ + cal_retry = 0; + while (1) { + /* one shot */ + rtl_write_dword(rtlpriv, 0x980, 0xfa000000); + rtl_write_dword(rtlpriv, 0x980, 0xf8000000); + + mdelay(10); /* Delay 10ms */ + rtl_write_dword(rtlpriv, 0xcb8, 0x00000000); + delay_count = 0; + while (1) { + iqk_ready = rtl_get_bbreg(hw, 0xd00, BIT(10)); + if ((~iqk_ready) || (delay_count > 20)) + break; + else{ + mdelay(1); + delay_count++; + } + } + + if (delay_count < 20) { /* If 20ms No Result, then cal_retry++ */ + /* ============TXIQK Check============== */ + tx_fail = rtl_get_bbreg(hw, 0xd00, BIT(12)); + + if (~tx_fail) { + rtl_write_dword(rtlpriv, 0xcb8, 0x02000000); + tx_x0_rxk[cal] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21; + rtl_write_dword(rtlpriv, 0xcb8, 0x04000000); + tx_y0_rxk[cal] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21; + tx0iqkok = true; + break; + } else { + tx0iqkok = false; + cal_retry++; + if (cal_retry == 10) + break; + } + } else{ + tx0iqkok = false; + cal_retry++; + if (cal_retry == 10) + break; + } + } + + if (tx0iqkok == false) { /* If RX mode TXK fail, then take TXK Result */ + tx_x0_rxk[cal] = tx_x0[cal]; + tx_y0_rxk[cal] = tx_y0[cal]; + tx0iqkok = true; + RT_TRACE(rtlpriv, COMP_IQK, + DBG_LOUD, "1"); + } + + /* ====== RX IQK ====== */ + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /* [31] = 0 --> Page C */ + /* 1. RX RF Setting */ + rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x80000); + rtl_set_rfreg(hw, path, 0x30, RFREG_OFFSET_MASK, 0x30000); + rtl_set_rfreg(hw, path, 0x31, RFREG_OFFSET_MASK, 0x0002f); + rtl_set_rfreg(hw, path, 0x32, RFREG_OFFSET_MASK, 0xfffbb); + rtl_set_rfreg(hw, path, 0x8f, RFREG_OFFSET_MASK, 0x88001); + rtl_set_rfreg(hw, path, 0x65, RFREG_OFFSET_MASK, 0x931d8); + rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x00000); + + rtl_set_bbreg(hw, 0x978, 0x03FF8000, (tx_x0_rxk[cal])>>21&0x000007ff); + rtl_set_bbreg(hw, 0x978, 0x000007FF, (tx_y0_rxk[cal])>>21&0x000007ff); + rtl_set_bbreg(hw, 0x978, BIT(31), 0x1); + rtl_set_bbreg(hw, 0x97c, BIT(31), 0x0); + /* ODM_SetBBReg(pDM_Odm, 0xcb8, 0xF, 0xe); */ + rtl_write_dword(rtlpriv, 0x90c, 0x00008000); + rtl_write_dword(rtlpriv, 0x984, 0x0046a911); + + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /* [31] = 1 --> Page C1 */ + rtl_write_dword(rtlpriv, 0xc80, 0x38008c10);/* TX_TONE_idx[9:0], TxK_Mask[29] TX_Tone = 16 */ + rtl_write_dword(rtlpriv, 0xc84, 0x18008c10);/* RX_TONE_idx[9:0], RxK_Mask[29] */ + rtl_write_dword(rtlpriv, 0xc88, 0x02140119); + + rtl_write_dword(rtlpriv, 0xc8c, 0x28160d00); /*pDM_Odm->SupportInterface == 1*/ + + rtl_write_dword(rtlpriv, 0xcb8, 0x00100000);/* cb8[20] \B1N SI/PI \A8Ï¥\CE\C5v\A4\C1\B5\B9 iqk_dpk module */ + + cal_retry = 0; + while (1) { + /* one shot */ + rtl_write_dword(rtlpriv, 0x980, 0xfa000000); + rtl_write_dword(rtlpriv, 0x980, 0xf8000000); + + mdelay(10); /* Delay 10ms */ + rtl_write_dword(rtlpriv, 0xcb8, 0x00000000); + delay_count = 0; + while (1) { + iqk_ready = rtl_get_bbreg(hw, 0xd00, BIT(10)); + if ((~iqk_ready) || (delay_count > 20)) + break; + else{ + mdelay(1); + delay_count++; + } + } + + if (delay_count < 20) { /* If 20ms No Result, then cal_retry++ */ + /* ============RXIQK Check============== */ + rx_fail = rtl_get_bbreg(hw, 0xd00, BIT(11)); + if (rx_fail == 0) { + rtl_write_dword(rtlpriv, 0xcb8, 0x06000000); + rx_x0[cal] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21; + rtl_write_dword(rtlpriv, 0xcb8, 0x08000000); + rx_y0[cal] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21; + rx0iqkok = true; + break; + } else{ + rtl_set_bbreg(hw, 0xc10, 0x000003ff, 0x200>>1); + rtl_set_bbreg(hw, 0xc10, 0x03ff0000, 0x0>>1); + rx0iqkok = false; + cal_retry++; + if (cal_retry == 10) + break; + + } + } else{ + rx0iqkok = false; + cal_retry++; + if (cal_retry == 10) + break; + } + } + } + + if (tx0iqkok) + tx_average++; + if (rx0iqkok) + rx_average++; + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /* [31] = 0 --> Page C */ + rtl_set_rfreg(hw, path, 0x65, RFREG_OFFSET_MASK, temp_reg65); + break; + default: + break; + } + cal++; + } + + /* FillIQK Result */ + switch (path) { + case RF90_PATH_A: + RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD, + "========Path_A =======\n"); + if (tx_average == 0) + break; + + for (i = 0; i < tx_average; i++) { + RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD, + "TX_X0_RXK[%d] = %x ;; TX_Y0_RXK[%d] = %x\n", i, + (tx_x0_rxk[i])>>21&0x000007ff, i, + (tx_y0_rxk[i])>>21&0x000007ff); + RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD, + "TX_X0[%d] = %x ;; TX_Y0[%d] = %x\n", i, + (tx_x0[i])>>21&0x000007ff, i, + (tx_y0[i])>>21&0x000007ff); + } + for (i = 0; i < tx_average; i++) { + for (ii = i+1; ii < tx_average; ii++) { + dx = (tx_x0[i]>>21) - (tx_x0[ii]>>21); + if (dx < 3 && dx > -3) { + dy = (tx_y0[i]>>21) - (tx_y0[ii]>>21); + if (dy < 3 && dy > -3) { + tx_x = ((tx_x0[i]>>21) + (tx_x0[ii]>>21))/2; + tx_y = ((tx_y0[i]>>21) + (tx_y0[ii]>>21))/2; + tx_finish = 1; + break; + } + } + } + if (tx_finish == 1) + break; + } + + if (tx_finish == 1) + _rtl8821ae_iqk_tx_fill_iqc(hw, path, tx_x, tx_y); /* ? */ + else + _rtl8821ae_iqk_tx_fill_iqc(hw, path, 0x200, 0x0); + + if (rx_average == 0) + break; + + for (i = 0; i < rx_average; i++) + RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD, + "RX_X0[%d] = %x ;; RX_Y0[%d] = %x\n", i, + (rx_x0[i])>>21&0x000007ff, i, + (rx_y0[i])>>21&0x000007ff); + for (i = 0; i < rx_average; i++) { + for (ii = i+1; ii < rx_average; ii++) { + dx = (rx_x0[i]>>21) - (rx_x0[ii]>>21); + if (dx < 4 && dx > -4) { + dy = (rx_y0[i]>>21) - (rx_y0[ii]>>21); + if (dy < 4 && dy > -4) { + rx_x = ((rx_x0[i]>>21) + (rx_x0[ii]>>21))/2; + rx_y = ((rx_y0[i]>>21) + (rx_y0[ii]>>21))/2; + rx_finish = 1; + break; + } + } + } + if (rx_finish == 1) + break; + } + + if (rx_finish == 1) + _rtl8821ae_iqk_rx_fill_iqc(hw, path, rx_x, rx_y); + else + _rtl8821ae_iqk_rx_fill_iqc(hw, path, 0x200, 0x0); + break; + default: + break; + } +} + +static void _rtl8821ae_iqk_restore_rf(struct ieee80211_hw *hw, + enum radio_path path, + u32 *backup_rf_reg, + u32 *rf_backup, u32 rf_reg_num) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 i; + + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /* [31] = 0 --> Page C */ + for (i = 0; i < RF_REG_NUM; i++) + rtl_set_rfreg(hw, path, backup_rf_reg[i], RFREG_OFFSET_MASK, + rf_backup[i]); + + switch (path) { + case RF90_PATH_A: + RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD, + "RestoreRF Path A Success!!!!\n"); + break; + default: + break; + } +} + +static void _rtl8821ae_iqk_restore_afe(struct ieee80211_hw *hw, + u32 *afe_backup, u32 *backup_afe_reg, + u32 afe_num) +{ + u32 i; + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /* [31] = 0 --> Page C */ + /* Reload AFE Parameters */ + for (i = 0; i < afe_num; i++) + rtl_write_dword(rtlpriv, backup_afe_reg[i], afe_backup[i]); + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /* [31] = 1 --> Page C1 */ + rtl_write_dword(rtlpriv, 0xc80, 0x0); + rtl_write_dword(rtlpriv, 0xc84, 0x0); + rtl_write_dword(rtlpriv, 0xc88, 0x0); + rtl_write_dword(rtlpriv, 0xc8c, 0x3c000000); + rtl_write_dword(rtlpriv, 0xc90, 0x00000080); + rtl_write_dword(rtlpriv, 0xc94, 0x00000000); + rtl_write_dword(rtlpriv, 0xcc4, 0x20040000); + rtl_write_dword(rtlpriv, 0xcc8, 0x20000000); + rtl_write_dword(rtlpriv, 0xcb8, 0x0); + RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD, "RestoreAFE Success!!!!\n"); +} + +static void _rtl8821ae_iqk_restore_macbb(struct ieee80211_hw *hw, + u32 *macbb_backup, + u32 *backup_macbb_reg, + u32 macbb_num) +{ + u32 i; + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /* [31] = 0 --> Page C */ + /* Reload MacBB Parameters */ + for (i = 0; i < macbb_num; i++) + rtl_write_dword(rtlpriv, backup_macbb_reg[i], macbb_backup[i]); + RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD, "RestoreMacBB Success!!!!\n"); +} + +#undef MACBB_REG_NUM +#undef AFE_REG_NUM +#undef RF_REG_NUM + +#define MACBB_REG_NUM 11 +#define AFE_REG_NUM 12 +#define RF_REG_NUM 3 + +static void _rtl8821ae_phy_iq_calibrate(struct ieee80211_hw *hw) +{ + u32 macbb_backup[MACBB_REG_NUM]; + u32 afe_backup[AFE_REG_NUM]; + u32 rfa_backup[RF_REG_NUM]; + u32 rfb_backup[RF_REG_NUM]; + u32 backup_macbb_reg[MACBB_REG_NUM] = { + 0xb00, 0x520, 0x550, 0x808, 0x90c, 0xc00, 0xc50, + 0xe00, 0xe50, 0x838, 0x82c + }; + u32 backup_afe_reg[AFE_REG_NUM] = { + 0xc5c, 0xc60, 0xc64, 0xc68, 0xc6c, 0xc70, 0xc74, + 0xc78, 0xc7c, 0xc80, 0xc84, 0xcb8 + }; + u32 backup_rf_reg[RF_REG_NUM] = {0x65, 0x8f, 0x0}; + + _rtl8821ae_iqk_backup_macbb(hw, macbb_backup, backup_macbb_reg, + MACBB_REG_NUM); + _rtl8821ae_iqk_backup_afe(hw, afe_backup, backup_afe_reg, AFE_REG_NUM); + _rtl8821ae_iqk_backup_rf(hw, rfa_backup, rfb_backup, backup_rf_reg, + RF_REG_NUM); + + _rtl8821ae_iqk_configure_mac(hw); + _rtl8821ae_iqk_tx(hw, RF90_PATH_A); + _rtl8821ae_iqk_restore_rf(hw, RF90_PATH_A, backup_rf_reg, rfa_backup, + RF_REG_NUM); + + _rtl8821ae_iqk_restore_afe(hw, afe_backup, backup_afe_reg, AFE_REG_NUM); + _rtl8821ae_iqk_restore_macbb(hw, macbb_backup, backup_macbb_reg, + MACBB_REG_NUM); +} + +static void _rtl8821ae_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool main) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + /* struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); */ + /* struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); */ + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n"); + + if (main) + rtl_set_bbreg(hw, RA_RFE_PINMUX + 4, BIT(29) | BIT(28), 0x1); + else + rtl_set_bbreg(hw, RA_RFE_PINMUX + 4, BIT(29) | BIT(28), 0x2); +} + +#undef IQK_ADDA_REG_NUM +#undef IQK_DELAY_TIME + +void rtl8812ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery) +{ +} + +void rtl8812ae_do_iqk(struct ieee80211_hw *hw, u8 delta_thermal_index, + u8 thermal_value, u8 threshold) +{ + struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); + + rtldm->thermalvalue_iqk = thermal_value; + rtl8812ae_phy_iq_calibrate(hw, false); +} + +void rtl8821ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + + if (!rtlphy->lck_inprogress) { + spin_lock(&rtlpriv->locks.iqk_lock); + rtlphy->lck_inprogress = true; + spin_unlock(&rtlpriv->locks.iqk_lock); + + _rtl8821ae_phy_iq_calibrate(hw); + + spin_lock(&rtlpriv->locks.iqk_lock); + rtlphy->lck_inprogress = false; + spin_unlock(&rtlpriv->locks.iqk_lock); + } +} + +void rtl8821ae_reset_iqk_result(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u8 i; + + RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD, + "rtl8812ae_dm_reset_iqk_result:: settings regs %d default regs %d\n", + (int)(sizeof(rtlphy->iqk_matrix) / + sizeof(struct iqk_matrix_regs)), + IQK_MATRIX_SETTINGS_NUM); + + for (i = 0; i < IQK_MATRIX_SETTINGS_NUM; i++) { + rtlphy->iqk_matrix[i].value[0][0] = 0x100; + rtlphy->iqk_matrix[i].value[0][2] = 0x100; + rtlphy->iqk_matrix[i].value[0][4] = 0x100; + rtlphy->iqk_matrix[i].value[0][6] = 0x100; + + rtlphy->iqk_matrix[i].value[0][1] = 0x0; + rtlphy->iqk_matrix[i].value[0][3] = 0x0; + rtlphy->iqk_matrix[i].value[0][5] = 0x0; + rtlphy->iqk_matrix[i].value[0][7] = 0x0; + + rtlphy->iqk_matrix[i].iqk_done = false; + } +} + +void rtl8821ae_do_iqk(struct ieee80211_hw *hw, u8 delta_thermal_index, + u8 thermal_value, u8 threshold) +{ + struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); + + rtl8821ae_reset_iqk_result(hw); + + rtldm->thermalvalue_iqk = thermal_value; + rtl8821ae_phy_iq_calibrate(hw, false); +} + +void rtl8821ae_phy_lc_calibrate(struct ieee80211_hw *hw) +{ +} + +void rtl8821ae_phy_ap_calibrate(struct ieee80211_hw *hw, char delta) +{ +} + +void rtl8821ae_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain) +{ + _rtl8821ae_phy_set_rfpath_switch(hw, bmain); +} + +bool rtl8821ae_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + bool postprocessing = false; + + RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, + "-->IO Cmd(%#x), set_io_inprogress(%d)\n", + iotype, rtlphy->set_io_inprogress); + do { + switch (iotype) { + case IO_CMD_RESUME_DM_BY_SCAN: + RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, + "[IO CMD] Resume DM after scan.\n"); + postprocessing = true; + break; + case IO_CMD_PAUSE_BAND0_DM_BY_SCAN: + case IO_CMD_PAUSE_BAND1_DM_BY_SCAN: + RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, + "[IO CMD] Pause DM before scan.\n"); + postprocessing = true; + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "switch case not process\n"); + break; + } + } while (false); + if (postprocessing && !rtlphy->set_io_inprogress) { + rtlphy->set_io_inprogress = true; + rtlphy->current_io_type = iotype; + } else { + return false; + } + rtl8821ae_phy_set_io(hw); + RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "IO Type(%#x)\n", iotype); + return true; +} + +static void rtl8821ae_phy_set_io(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *dm_digtable = &rtlpriv->dm_digtable; + struct rtl_phy *rtlphy = &rtlpriv->phy; + + RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, + "--->Cmd(%#x), set_io_inprogress(%d)\n", + rtlphy->current_io_type, rtlphy->set_io_inprogress); + switch (rtlphy->current_io_type) { + case IO_CMD_RESUME_DM_BY_SCAN: + if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC) + _rtl8821ae_resume_tx_beacon(hw); + rtl8821ae_dm_write_dig(hw, rtlphy->initgain_backup.xaagccore1); + rtl8821ae_dm_write_cck_cca_thres(hw, + rtlphy->initgain_backup.cca); + break; + case IO_CMD_PAUSE_BAND0_DM_BY_SCAN: + if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC) + _rtl8821ae_stop_tx_beacon(hw); + rtlphy->initgain_backup.xaagccore1 = dm_digtable->cur_igvalue; + rtl8821ae_dm_write_dig(hw, 0x17); + rtlphy->initgain_backup.cca = dm_digtable->cur_cck_cca_thres; + rtl8821ae_dm_write_cck_cca_thres(hw, 0x40); + break; + case IO_CMD_PAUSE_BAND1_DM_BY_SCAN: + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "switch case not process\n"); + break; + } + rtlphy->set_io_inprogress = false; + RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, + "(%#x)\n", rtlphy->current_io_type); +} + +static void rtl8821ae_phy_set_rf_on(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3); + rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00); +} + +static bool _rtl8821ae_phy_set_rf_power_state(struct ieee80211_hw *hw, + enum rf_pwrstate rfpwr_state) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + bool bresult = true; + u8 i, queue_id; + struct rtl8192_tx_ring *ring = NULL; + + switch (rfpwr_state) { + case ERFON: + if ((ppsc->rfpwr_state == ERFOFF) && + RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) { + bool rtstatus = false; + u32 initializecount = 0; + + do { + initializecount++; + RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, + "IPS Set eRf nic enable\n"); + rtstatus = rtl_ps_enable_nic(hw); + } while (!rtstatus && (initializecount < 10)); + RT_CLEAR_PS_LEVEL(ppsc, + RT_RF_OFF_LEVL_HALT_NIC); + } else { + RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, + "Set ERFON sleeped:%d ms\n", + jiffies_to_msecs(jiffies - + ppsc-> + last_sleep_jiffies)); + ppsc->last_awake_jiffies = jiffies; + rtl8821ae_phy_set_rf_on(hw); + } + if (mac->link_state == MAC80211_LINKED) { + rtlpriv->cfg->ops->led_control(hw, + LED_CTL_LINK); + } else { + rtlpriv->cfg->ops->led_control(hw, + LED_CTL_NO_LINK); + } + break; + case ERFOFF: + for (queue_id = 0, i = 0; + queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) { + ring = &pcipriv->dev.tx_ring[queue_id]; + if (queue_id == BEACON_QUEUE || + skb_queue_len(&ring->queue) == 0) { + queue_id++; + continue; + } else { + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n", + (i + 1), queue_id, + skb_queue_len(&ring->queue)); + + udelay(10); + i++; + } + if (i >= MAX_DOZE_WAITING_TIMES_9x) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + "\n ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n", + MAX_DOZE_WAITING_TIMES_9x, + queue_id, + skb_queue_len(&ring->queue)); + break; + } + } + + if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) { + RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, + "IPS Set eRf nic disable\n"); + rtl_ps_disable_nic(hw); + RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); + } else { + if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS) { + rtlpriv->cfg->ops->led_control(hw, + LED_CTL_NO_LINK); + } else { + rtlpriv->cfg->ops->led_control(hw, + LED_CTL_POWER_OFF); + } + } + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "switch case not process\n"); + bresult = false; + break; + } + if (bresult) + ppsc->rfpwr_state = rfpwr_state; + return bresult; +} + +bool rtl8821ae_phy_set_rf_power_state(struct ieee80211_hw *hw, + enum rf_pwrstate rfpwr_state) +{ + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + + bool bresult = false; + + if (rfpwr_state == ppsc->rfpwr_state) + return bresult; + bresult = _rtl8821ae_phy_set_rf_power_state(hw, rfpwr_state); + return bresult; +} diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/phy.h b/drivers/net/wireless/rtlwifi/rtl8821ae/phy.h new file mode 100644 index 0000000000000000000000000000000000000000..c411f0a95cc49c35630dbda113b444301c30de19 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/phy.h @@ -0,0 +1,259 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL8821AE_PHY_H__ +#define __RTL8821AE_PHY_H__ + +/* MAX_TX_COUNT must always be set to 4, otherwise read + * efuse table sequence will be wrong. + */ +#define MAX_TX_COUNT 4 +#define TX_1S 0 +#define TX_2S 1 +#define TX_3S 2 +#define TX_4S 3 + +#define MAX_POWER_INDEX 0x3F + +#define MAX_PRECMD_CNT 16 +#define MAX_RFDEPENDCMD_CNT 16 +#define MAX_POSTCMD_CNT 16 + +#define MAX_DOZE_WAITING_TIMES_9x 64 + +#define RT_CANNOT_IO(hw) false +#define HIGHPOWER_RADIOA_ARRAYLEN 22 + +#define IQK_ADDA_REG_NUM 16 +#define IQK_BB_REG_NUM 9 +#define MAX_TOLERANCE 5 +#define IQK_DELAY_TIME 10 +#define index_mapping_NUM 15 + +#define APK_BB_REG_NUM 5 +#define APK_AFE_REG_NUM 16 +#define APK_CURVE_REG_NUM 4 +#define PATH_NUM 2 + +#define LOOP_LIMIT 5 +#define MAX_STALL_TIME 50 +#define AntennaDiversityValue 0x80 +#define MAX_TXPWR_IDX_NMODE_92S 63 +#define Reset_Cnt_Limit 3 + +#define IQK_ADDA_REG_NUM 16 +#define IQK_MAC_REG_NUM 4 + +#define RF6052_MAX_PATH 2 + +#define CT_OFFSET_MAC_ADDR 0X16 + +#define CT_OFFSET_CCK_TX_PWR_IDX 0x5A +#define CT_OFFSET_HT401S_TX_PWR_IDX 0x60 +#define CT_OFFSET_HT402S_TX_PWR_IDX_DIFF 0x66 +#define CT_OFFSET_HT20_TX_PWR_IDX_DIFF 0x69 +#define CT_OFFSET_OFDM_TX_PWR_IDX_DIFF 0x6C + +#define CT_OFFSET_HT40_MAX_PWR_OFFSET 0x6F +#define CT_OFFSET_HT20_MAX_PWR_OFFSET 0x72 + +#define CT_OFFSET_CHANNEL_PLAH 0x75 +#define CT_OFFSET_THERMAL_METER 0x78 +#define CT_OFFSET_RF_OPTION 0x79 +#define CT_OFFSET_VERSION 0x7E +#define CT_OFFSET_CUSTOMER_ID 0x7F + +#define RTL8821AE_MAX_PATH_NUM 2 + +#define TARGET_CHNL_NUM_2G_5G_8812 59 + +enum swchnlcmd_id { + CMDID_END, + CMDID_SET_TXPOWEROWER_LEVEL, + CMDID_BBREGWRITE10, + CMDID_WRITEPORT_ULONG, + CMDID_WRITEPORT_USHORT, + CMDID_WRITEPORT_UCHAR, + CMDID_RF_WRITEREG, +}; + +struct swchnlcmd { + enum swchnlcmd_id cmdid; + u32 para1; + u32 para2; + u32 msdelay; +}; + +enum hw90_block_e { + HW90_BLOCK_MAC = 0, + HW90_BLOCK_PHY0 = 1, + HW90_BLOCK_PHY1 = 2, + HW90_BLOCK_RF = 3, + HW90_BLOCK_MAXIMUM = 4, +}; + +enum baseband_config_type { + BASEBAND_CONFIG_PHY_REG = 0, + BASEBAND_CONFIG_AGC_TAB = 1, +}; + +enum ra_offset_area { + RA_OFFSET_LEGACY_OFDM1, + RA_OFFSET_LEGACY_OFDM2, + RA_OFFSET_HT_OFDM1, + RA_OFFSET_HT_OFDM2, + RA_OFFSET_HT_OFDM3, + RA_OFFSET_HT_OFDM4, + RA_OFFSET_HT_CCK, +}; + +enum antenna_path { + ANTENNA_NONE, + ANTENNA_D, + ANTENNA_C, + ANTENNA_CD, + ANTENNA_B, + ANTENNA_BD, + ANTENNA_BC, + ANTENNA_BCD, + ANTENNA_A, + ANTENNA_AD, + ANTENNA_AC, + ANTENNA_ACD, + ANTENNA_AB, + ANTENNA_ABD, + ANTENNA_ABC, + ANTENNA_ABCD +}; + +struct r_antenna_select_ofdm { + u32 r_tx_antenna:4; + u32 r_ant_l:4; + u32 r_ant_non_ht:4; + u32 r_ant_ht1:4; + u32 r_ant_ht2:4; + u32 r_ant_ht_s1:4; + u32 r_ant_non_ht_s1:4; + u32 ofdm_txsc:2; + u32 reserved:2; +}; + +struct r_antenna_select_cck { + u8 r_cckrx_enable_2:2; + u8 r_cckrx_enable:2; + u8 r_ccktx_enable:4; +}; + +struct efuse_contents { + u8 mac_addr[ETH_ALEN]; + u8 cck_tx_power_idx[6]; + u8 ht40_1s_tx_power_idx[6]; + u8 ht40_2s_tx_power_idx_diff[3]; + u8 ht20_tx_power_idx_diff[3]; + u8 ofdm_tx_power_idx_diff[3]; + u8 ht40_max_power_offset[3]; + u8 ht20_max_power_offset[3]; + u8 channel_plan; + u8 thermal_meter; + u8 rf_option[5]; + u8 version; + u8 oem_id; + u8 regulatory; +}; + +struct tx_power_struct { + u8 cck[RTL8821AE_MAX_PATH_NUM][CHANNEL_MAX_NUMBER]; + u8 ht40_1s[RTL8821AE_MAX_PATH_NUM][CHANNEL_MAX_NUMBER]; + u8 ht40_2s[RTL8821AE_MAX_PATH_NUM][CHANNEL_MAX_NUMBER]; + u8 ht20_diff[RTL8821AE_MAX_PATH_NUM][CHANNEL_MAX_NUMBER]; + u8 legacy_ht_diff[RTL8821AE_MAX_PATH_NUM][CHANNEL_MAX_NUMBER]; + u8 legacy_ht_txpowerdiff; + u8 groupht20[RTL8821AE_MAX_PATH_NUM][CHANNEL_MAX_NUMBER]; + u8 groupht40[RTL8821AE_MAX_PATH_NUM][CHANNEL_MAX_NUMBER]; + u8 pwrgroup_cnt; + u32 mcs_original_offset[4][16]; +}; +enum _ANT_DIV_TYPE { + NO_ANTDIV = 0xFF, + CG_TRX_HW_ANTDIV = 0x01, + CGCS_RX_HW_ANTDIV = 0x02, + FIXED_HW_ANTDIV = 0x03, + CG_TRX_SMART_ANTDIV = 0x04, + CGCS_RX_SW_ANTDIV = 0x05, + +}; + +u32 rtl8821ae_phy_query_bb_reg(struct ieee80211_hw *hw, + u32 regaddr, u32 bitmask); +void rtl8821ae_phy_set_bb_reg(struct ieee80211_hw *hw, + u32 regaddr, u32 bitmask, u32 data); +u32 rtl8821ae_phy_query_rf_reg(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 regaddr, + u32 bitmask); +void rtl8821ae_phy_set_rf_reg(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 regaddr, + u32 bitmask, u32 data); +bool rtl8821ae_phy_mac_config(struct ieee80211_hw *hw); +bool rtl8821ae_phy_bb_config(struct ieee80211_hw *hw); +bool rtl8821ae_phy_rf_config(struct ieee80211_hw *hw); +void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, + u8 band); +void rtl8821ae_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw); +void rtl8821ae_phy_get_txpower_level(struct ieee80211_hw *hw, + long *powerlevel); +void rtl8821ae_phy_set_txpower_level(struct ieee80211_hw *hw, + u8 channel); +void rtl8821ae_phy_scan_operation_backup(struct ieee80211_hw *hw, + u8 operation); +void rtl8821ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw); +void rtl8821ae_phy_set_bw_mode(struct ieee80211_hw *hw, + enum nl80211_channel_type ch_type); +void rtl8821ae_phy_sw_chnl_callback(struct ieee80211_hw *hw); +u8 rtl8821ae_phy_sw_chnl(struct ieee80211_hw *hw); +void rtl8821ae_phy_iq_calibrate(struct ieee80211_hw *hw, + bool b_recovery); +void rtl8812ae_phy_iq_calibrate(struct ieee80211_hw *hw, + bool b_recovery); +void rtl8821ae_phy_ap_calibrate(struct ieee80211_hw *hw, char delta); +void rtl8821ae_phy_lc_calibrate(struct ieee80211_hw *hw); +void rtl8821ae_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain); +bool rtl8812ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, + enum radio_path rfpath); +bool rtl8821ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, + enum radio_path rfpath); +bool rtl8821ae_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype); +bool rtl8821ae_phy_set_rf_power_state(struct ieee80211_hw *hw, + enum rf_pwrstate rfpwr_state); +u8 _rtl8812ae_get_right_chnl_place_for_iqk(u8 chnl); +void rtl8821ae_phy_set_txpower_level_by_path(struct ieee80211_hw *hw, + u8 channel, u8 path); +void rtl8812ae_do_iqk(struct ieee80211_hw *hw, u8 delta_thermal_index, + u8 thermal_value, u8 threshold); +void rtl8821ae_do_iqk(struct ieee80211_hw *hw, u8 delta_thermal_index, + u8 thermal_value, u8 threshold); +void rtl8821ae_reset_iqk_result(struct ieee80211_hw *hw); +u32 phy_get_tx_swing_8812A(struct ieee80211_hw *hw, u8 band, u8 rf_path); + +#endif diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/pwrseq.c b/drivers/net/wireless/rtlwifi/rtl8821ae/pwrseq.c new file mode 100644 index 0000000000000000000000000000000000000000..9ddf78a187dd46e763e8a79192f3d6e956959100 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/pwrseq.c @@ -0,0 +1,182 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "../pwrseqcmd.h" +#include "pwrseq.h" + +/* drivers should parse below arrays and do the corresponding actions */ +/* 3 Power on Array */ +struct wlan_pwr_cfg rtl8812_power_on_flow[RTL8812_TRANS_CARDEMU_TO_ACT_STEPS + + RTL8812_TRANS_END_STEPS] = { + RTL8812_TRANS_CARDEMU_TO_ACT + RTL8812_TRANS_END +}; + +/* 3Radio off GPIO Array */ +struct wlan_pwr_cfg rtl8812_radio_off_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8812_TRANS_END_STEPS] = { + RTL8812_TRANS_ACT_TO_CARDEMU + RTL8812_TRANS_END +}; + +/* 3Card Disable Array */ +struct wlan_pwr_cfg rtl8812_card_disable_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8812_TRANS_CARDEMU_TO_PDN_STEPS + + RTL8812_TRANS_END_STEPS] = { + RTL8812_TRANS_ACT_TO_CARDEMU + RTL8812_TRANS_CARDEMU_TO_CARDDIS + RTL8812_TRANS_END +}; + +/* 3 Card Enable Array */ +struct wlan_pwr_cfg rtl8812_card_enable_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8812_TRANS_CARDEMU_TO_PDN_STEPS + + RTL8812_TRANS_END_STEPS] = { + RTL8812_TRANS_CARDDIS_TO_CARDEMU + RTL8812_TRANS_CARDEMU_TO_ACT + RTL8812_TRANS_END +}; + +/* 3Suspend Array */ +struct wlan_pwr_cfg rtl8812_suspend_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8812_TRANS_CARDEMU_TO_SUS_STEPS + + RTL8812_TRANS_END_STEPS] = { + RTL8812_TRANS_ACT_TO_CARDEMU + RTL8812_TRANS_CARDEMU_TO_SUS + RTL8812_TRANS_END +}; + +/* 3 Resume Array */ +struct wlan_pwr_cfg rtl8812_resume_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8812_TRANS_CARDEMU_TO_SUS_STEPS + + RTL8812_TRANS_END_STEPS] = { + RTL8812_TRANS_SUS_TO_CARDEMU + RTL8812_TRANS_CARDEMU_TO_ACT + RTL8812_TRANS_END +}; + +/* 3HWPDN Array */ +struct wlan_pwr_cfg rtl8812_hwpdn_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8812_TRANS_CARDEMU_TO_PDN_STEPS + + RTL8812_TRANS_END_STEPS] = { + RTL8812_TRANS_ACT_TO_CARDEMU + RTL8812_TRANS_CARDEMU_TO_PDN + RTL8812_TRANS_END +}; + +/* 3 Enter LPS */ +struct wlan_pwr_cfg rtl8812_enter_lps_flow[RTL8812_TRANS_ACT_TO_LPS_STEPS + + RTL8812_TRANS_END_STEPS] = { + /* FW behavior */ + RTL8812_TRANS_ACT_TO_LPS + RTL8812_TRANS_END +}; + +/* 3 Leave LPS */ +struct wlan_pwr_cfg rtl8812_leave_lps_flow[RTL8812_TRANS_LPS_TO_ACT_STEPS + + RTL8812_TRANS_END_STEPS] = { + /* FW behavior */ + RTL8812_TRANS_LPS_TO_ACT + RTL8812_TRANS_END +}; + +/* drivers should parse below arrays and do the corresponding actions */ +/*3 Power on Array*/ +struct wlan_pwr_cfg rtl8821A_power_on_flow[RTL8821A_TRANS_CARDEMU_TO_ACT_STEPS + + RTL8821A_TRANS_END_STEPS] = { + RTL8821A_TRANS_CARDEMU_TO_ACT + RTL8821A_TRANS_END +}; + +/*3Radio off GPIO Array */ +struct wlan_pwr_cfg rtl8821A_radio_off_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8821A_TRANS_END_STEPS] = { + RTL8821A_TRANS_ACT_TO_CARDEMU + RTL8821A_TRANS_END +}; + +/*3Card Disable Array*/ +struct wlan_pwr_cfg rtl8821A_card_disable_flow + [RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8821A_TRANS_CARDEMU_TO_PDN_STEPS + + RTL8821A_TRANS_END_STEPS] = { + RTL8821A_TRANS_ACT_TO_CARDEMU + RTL8821A_TRANS_CARDEMU_TO_CARDDIS + RTL8821A_TRANS_END +}; + +/*3 Card Enable Array*/ +/*RTL8821A_TRANS_CARDEMU_TO_PDN_STEPS*/ +struct wlan_pwr_cfg rtl8821A_card_enable_flow + [RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8821A_TRANS_CARDEMU_TO_ACT_STEPS + + RTL8821A_TRANS_END_STEPS] = { + RTL8821A_TRANS_CARDDIS_TO_CARDEMU + RTL8821A_TRANS_CARDEMU_TO_ACT + RTL8821A_TRANS_END +}; + +/*3Suspend Array*/ +struct wlan_pwr_cfg rtl8821A_suspend_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8821A_TRANS_CARDEMU_TO_SUS_STEPS + + RTL8821A_TRANS_END_STEPS] = { + RTL8821A_TRANS_ACT_TO_CARDEMU + RTL8821A_TRANS_CARDEMU_TO_SUS + RTL8821A_TRANS_END +}; + +/*3 Resume Array*/ +struct wlan_pwr_cfg rtl8821A_resume_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8821A_TRANS_CARDEMU_TO_SUS_STEPS + + RTL8821A_TRANS_END_STEPS] = { + RTL8821A_TRANS_SUS_TO_CARDEMU + RTL8821A_TRANS_CARDEMU_TO_ACT + RTL8821A_TRANS_END +}; + +/*3HWPDN Array*/ +struct wlan_pwr_cfg rtl8821A_hwpdn_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8821A_TRANS_CARDEMU_TO_PDN_STEPS + + RTL8821A_TRANS_END_STEPS] = { + RTL8821A_TRANS_ACT_TO_CARDEMU + RTL8821A_TRANS_CARDEMU_TO_PDN + RTL8821A_TRANS_END +}; + +/*3 Enter LPS */ +struct wlan_pwr_cfg rtl8821A_enter_lps_flow[RTL8821A_TRANS_ACT_TO_LPS_STEPS + + RTL8821A_TRANS_END_STEPS] = { + /*FW behavior*/ + RTL8821A_TRANS_ACT_TO_LPS + RTL8821A_TRANS_END +}; + +/*3 Leave LPS */ +struct wlan_pwr_cfg rtl8821A_leave_lps_flow[RTL8821A_TRANS_LPS_TO_ACT_STEPS + + RTL8821A_TRANS_END_STEPS] = { + /*FW behavior*/ + RTL8821A_TRANS_LPS_TO_ACT + RTL8821A_TRANS_END +}; diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/pwrseq.h b/drivers/net/wireless/rtlwifi/rtl8821ae/pwrseq.h new file mode 100644 index 0000000000000000000000000000000000000000..bf0b0ce9519c99f8f2cc7d4a1d990149bd02ef29 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/pwrseq.h @@ -0,0 +1,738 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL8821AE_PWRSEQ_H__ +#define __RTL8821AE_PWRSEQ_H__ + +#include "../pwrseqcmd.h" +#include "../btcoexist/halbt_precomp.h" + +#define RTL8812_TRANS_CARDEMU_TO_ACT_STEPS 15 +#define RTL8812_TRANS_ACT_TO_CARDEMU_STEPS 15 +#define RTL8812_TRANS_CARDEMU_TO_SUS_STEPS 15 +#define RTL8812_TRANS_SUS_TO_CARDEMU_STEPS 15 +#define RTL8812_TRANS_CARDEMU_TO_PDN_STEPS 25 +#define RTL8812_TRANS_PDN_TO_CARDEMU_STEPS 15 +#define RTL8812_TRANS_ACT_TO_LPS_STEPS 15 +#define RTL8812_TRANS_LPS_TO_ACT_STEPS 15 +#define RTL8812_TRANS_END_STEPS 1 + +/* The following macros have the following format: + * { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value + * comments }, + */ +#define RTL8812_TRANS_CARDEMU_TO_ACT \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT2, 0 \ + /* disable SW LPS 0x04[10]=0*/}, \ + {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT1, BIT1 \ + /* wait till 0x04[17] = 1 power ready*/}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT7, 0 \ + /* disable HWPDN 0x04[15]=0*/}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT3, 0 \ + /* disable WL suspend*/}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, BIT0 \ + /* polling until return 0*/}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT0, 0}, + +#define RTL8812_TRANS_ACT_TO_CARDEMU \ + {0x0c00, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x04 \ + /* 0xc00[7:0] = 4 turn off 3-wire */}, \ + {0x0e00, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x04 \ + /* 0xe00[7:0] = 4 turn off 3-wire */}, \ + {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, 0 \ + /* 0x2[0] = 0 RESET BB, CLOSE RF */}, \ + {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_US \ + /*Delay 1us*/}, \ + {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1, 0 \ + /* Whole BB is reset*/}, \ + {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x2A \ + /* 0x07[7:0] = 0x28 sps pwm mode 0x2a for BT coex*/}, \ + {0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0x02, 0 \ + /*0x8[1] = 0 ANA clk =500k */}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1, BIT1 \ + /*0x04[9] = 1 turn off MAC by HW state machine*/}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT1, 0 \ + /*wait till 0x04[9] = 0 polling until return 0 to disable*/}, + +#define RTL8812_TRANS_CARDEMU_TO_SUS \ + {0x0042, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xF0, 0xcc}, \ + {0x0042, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xF0, 0xEC}, \ + {0x0043, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x07 \ + /* gpio11 input mode, gpio10~8 output mode */}, \ + {0x0045, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x00 \ + /* gpio 0~7 output same value as input ?? */}, \ + {0x0046, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xff \ + /* gpio0~7 output mode */}, \ + {0x0047, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0 \ + /* 0x47[7:0] = 00 gpio mode */}, \ + {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0 \ + /* suspend option all off */}, \ + {0x0014, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0x80, BIT7 \ + /*0x14[7] = 1 turn on ZCD */}, \ + {0x0015, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0x01, BIT0 \ + /* 0x15[0] =1 trun on ZCD */}, \ + {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0x10, BIT4 \ + /*0x23[4] = 1 hpon LDO sleep mode */}, \ + {0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0x02, 0 \ + /*0x8[1] = 0 ANA clk =500k */}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT3, BIT3 \ + /*0x04[11] = 2b'11 enable WL suspend for PCIe*/}, + +#define RTL8812_TRANS_SUS_TO_CARDEMU \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT3, 0 \ + /*0x04[11] = 2b'01enable WL suspend*/}, \ + {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0x10, 0 \ + /*0x23[4] = 0 hpon LDO sleep mode leave */}, \ + {0x0015, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0x01, 0 \ + /* 0x15[0] =0 trun off ZCD */}, \ + {0x0014, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0x80, 0 \ + /*0x14[7] = 0 turn off ZCD */}, \ + {0x0046, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x00 \ + /* gpio0~7 input mode */}, \ + {0x0043, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x00 \ + /* gpio11 input mode, gpio10~8 input mode */}, + +#define RTL8812_TRANS_CARDEMU_TO_CARDDIS \ + {0x0003, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT2, 0 \ + /*0x03[2] = 0, reset 8051*/}, \ + {0x0080, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x05 \ + /*0x80=05h if reload fw, fill the default value of host_CPU handshake field*/}, \ + {0x0042, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xF0, 0xcc}, \ + {0x0042, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xF0, 0xEC}, \ + {0x0043, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x07 \ + /* gpio11 input mode, gpio10~8 output mode */}, \ + {0x0045, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x00 \ + /* gpio 0~7 output same value as input ?? */}, \ + {0x0046, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xff \ + /* gpio0~7 output mode */}, \ + {0x0047, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0 \ + /* 0x47[7:0] = 00 gpio mode */}, \ + {0x0014, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0x80, BIT7 \ + /*0x14[7] = 1 turn on ZCD */}, \ + {0x0015, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0x01, BIT0 \ + /* 0x15[0] =1 trun on ZCD */}, \ + {0x0012, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0x01, 0 \ + /*0x12[0] = 0 force PFM mode */}, \ + {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0x10, BIT4 \ + /*0x23[4] = 1 hpon LDO sleep mode */}, \ + {0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0x02, 0 \ + /*0x8[1] = 0 ANA clk =500k */}, \ + {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x20 \ + /*0x07=0x20 , SOP option to disable BG/MB*/}, \ + {0x001f, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1, 0 \ + /*0x01f[1]=0 , disable RFC_0 control REG_RF_CTRL_8812 */}, \ + {0x0076, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1, 0 \ + /*0x076[1]=0 , disable RFC_1 control REG_OPT_CTRL_8812 +2 */}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT3, BIT3 \ + /*0x04[11] = 2b'01 enable WL suspend*/}, + +#define RTL8812_TRANS_CARDDIS_TO_CARDEMU \ + {0x0012, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, BIT0 \ + /*0x12[0] = 1 force PWM mode */}, \ + {0x0014, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0x80, 0 \ + /*0x14[7] = 0 turn off ZCD */}, \ + {0x0015, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0x01, 0 \ + /* 0x15[0] =0 trun off ZCD */}, \ + {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0x10, 0 \ + /*0x23[4] = 0 hpon LDO leave sleep mode */}, \ + {0x0046, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x00 \ + /* gpio0~7 input mode */}, \ + {0x0043, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x00 \ + /* gpio11 input mode, gpio10~8 input mode */}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT2, 0 \ + /*0x04[10] = 0, enable SW LPS PCIE only*/}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT3, 0 \ + /*0x04[11] = 2b'01enable WL suspend*/}, \ + {0x0003, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT2, BIT2 \ + /*0x03[2] = 1, enable 8051*/}, \ + {0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0 \ + /*PCIe DMA start*/}, + +#define RTL8812_TRANS_CARDEMU_TO_PDN \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT7, BIT7 \ + /* 0x04[15] = 1*/}, + +#define RTL8812_TRANS_PDN_TO_CARDEMU \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT7, 0 \ + /* 0x04[15] = 0*/}, + +#define RTL8812_TRANS_ACT_TO_LPS \ + {0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF \ + /*PCIe DMA stop*/}, \ + {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x7F \ + /*Tx Pause*/}, \ + {0x05F8, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0 \ + /*Should be zero if no packet is transmitting*/}, \ + {0x05F9, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0 \ + /*Should be zero if no packet is transmitting*/}, \ + {0x05FA, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0 \ + /*Should be zero if no packet is transmitting*/}, \ + {0x05FB, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0 \ + /*Should be zero if no packet is transmitting*/}, \ + {0x0c00, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x04 \ + /* 0xc00[7:0] = 4 turn off 3-wire */}, \ + {0x0e00, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x04 \ + /* 0xe00[7:0] = 4 turn off 3-wire */}, \ + {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, 0 \ + /*CCK and OFDM are disabled,and clock are gated,and RF closed*/}, \ + {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_US \ + /*Delay 1us*/}, \ + {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1, 0 \ + /* Whole BB is reset*/}, \ + {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x03 \ + /*Reset MAC TRX*/}, \ + {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1, 0 \ + /*check if removed later*/}, \ + {0x0553, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT5, BIT5 \ + /*Respond TxOK to scheduler*/}, + +#define RTL8812_TRANS_LPS_TO_ACT \ + {0x0080, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,\ + PWR_BASEADDR_SDIO, PWR_CMD_WRITE, 0xFF, 0x84 \ + /*SDIO RPWM*/}, \ + {0xFE58, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84 \ + /*USB RPWM*/}, \ + {0x0361, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84 \ + /*PCIe RPWM*/}, \ + {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_MS \ + /*Delay*/}, \ + {0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, 0 \ + /*. 0x08[4] = 0 switch TSF to 40M*/}, \ + {0x0109, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT7, 0 \ + /*Polling 0x109[7]=0 TSF in 40M*/}, \ + {0x0029, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT6|BIT7, 0 \ + /*. 0x29[7:6] = 2b'00 enable BB clock*/}, \ + {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1, BIT1 \ + /*. 0x101[1] = 1*/}, \ + {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF \ + /*. 0x100[7:0] = 0xFF enable WMAC TRX*/}, \ + {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1|BIT0, BIT1|BIT0 \ + /*. 0x02[1:0] = 2b'11 enable BB macro*/}, \ + {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0 \ + /*. 0x522 = 0*/}, + +#define RTL8812_TRANS_END \ + {0xFFFF, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + 0, PWR_CMD_END, 0, 0}, + +extern struct wlan_pwr_cfg rtl8812_power_on_flow + [RTL8812_TRANS_CARDEMU_TO_ACT_STEPS + + RTL8812_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8812_radio_off_flow + [RTL8812_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8812_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8812_card_disable_flow + [RTL8812_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8812_TRANS_CARDEMU_TO_PDN_STEPS + + RTL8812_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8812_card_enable_flow + [RTL8812_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8812_TRANS_CARDEMU_TO_PDN_STEPS + + RTL8812_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8812_suspend_flow + [RTL8812_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8812_TRANS_CARDEMU_TO_SUS_STEPS + + RTL8812_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8812_resume_flow + [RTL8812_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8812_TRANS_CARDEMU_TO_SUS_STEPS + + RTL8812_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8812_hwpdn_flow + [RTL8812_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8812_TRANS_CARDEMU_TO_PDN_STEPS + + RTL8812_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8812_enter_lps_flow + [RTL8812_TRANS_ACT_TO_LPS_STEPS + + RTL8812_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8812_leave_lps_flow + [RTL8812_TRANS_LPS_TO_ACT_STEPS + + RTL8812_TRANS_END_STEPS]; + +/* Check document WM-20130516-JackieLau-RTL8821A_Power_Architecture-R10.vsd + * There are 6 HW Power States: + * 0: POFF--Power Off + * 1: PDN--Power Down + * 2: CARDEMU--Card Emulation + * 3: ACT--Active Mode + * 4: LPS--Low Power State + * 5: SUS--Suspend + * + * The transision from different states are defined below + * TRANS_CARDEMU_TO_ACT + * TRANS_ACT_TO_CARDEMU + * TRANS_CARDEMU_TO_SUS + * TRANS_SUS_TO_CARDEMU + * TRANS_CARDEMU_TO_PDN + * TRANS_ACT_TO_LPS + * TRANS_LPS_TO_ACT + * + * TRANS_END + */ +#define RTL8821A_TRANS_CARDEMU_TO_ACT_STEPS 25 +#define RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS 15 +#define RTL8821A_TRANS_CARDEMU_TO_SUS_STEPS 15 +#define RTL8821A_TRANS_SUS_TO_CARDEMU_STEPS 15 +#define RTL8821A_TRANS_CARDDIS_TO_CARDEMU_STEPS 15 +#define RTL8821A_TRANS_CARDEMU_TO_PDN_STEPS 15 +#define RTL8821A_TRANS_PDN_TO_CARDEMU_STEPS 15 +#define RTL8821A_TRANS_ACT_TO_LPS_STEPS 15 +#define RTL8821A_TRANS_LPS_TO_ACT_STEPS 15 +#define RTL8821A_TRANS_END_STEPS 1 + +#define RTL8821A_TRANS_CARDEMU_TO_ACT \ + {0x0020, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, BIT0 \ + /*0x20[0] = 1b'1 enable LDOA12 MACRO block for all interface*/}, \ + {0x0067, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, 0 \ + /*0x67[0] = 0 to disable BT_GPS_SEL pins*/}, \ + {0x0001, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_DELAY, 1, PWRSEQ_DELAY_MS \ + /*Delay 1ms*/}, \ + {0x0000, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT5, 0 \ + /*0x00[5] = 1b'0 release analog Ips to digital ,1:isolation*/}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, (BIT4|BIT3|BIT2), 0 \ + /* disable SW LPS 0x04[10]=0 and WLSUS_EN 0x04[12:11]=0*/}, \ + {0x0075, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0 , BIT0 \ + /* Disable USB suspend */}, \ + {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT1, BIT1 \ + /* wait till 0x04[17] = 1 power ready*/}, \ + {0x0075, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0 , 0 \ + /* Enable USB suspend */}, \ + {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, BIT0 \ + /* release WLON reset 0x04[16]=1*/}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT7, 0 \ + /* disable HWPDN 0x04[15]=0*/}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, (BIT4|BIT3), 0 \ + /* disable WL suspend*/}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, BIT0 \ + /* polling until return 0*/}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT0, 0 \ + /**/}, \ + {0x004F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, BIT0 \ + /*0x4C[24] = 0x4F[0] = 1, switch DPDT_SEL_P output from WL BB */},\ + {0x0067, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, (BIT5|BIT4), (BIT5|BIT4) \ + /*0x66[13] = 0x67[5] = 1, switch for PAPE_G/PAPE_A \ + from WL BB ; 0x66[12] = 0x67[4] = 1, switch LNAON from WL BB */},\ + {0x0025, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT6, 0 \ + /*anapar_mac<118> , 0x25[6]=0 by wlan single function*/},\ + {0x0049, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1, BIT1 \ + /*Enable falling edge triggering interrupt*/},\ + {0x0063, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1, BIT1 \ + /*Enable GPIO9 interrupt mode*/},\ + {0x0062, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1, 0 \ + /*Enable GPIO9 input mode*/},\ + {0x0058, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, BIT0 \ + /*Enable HSISR GPIO[C:0] interrupt*/},\ + {0x005A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1, BIT1 \ + /*Enable HSISR GPIO9 interrupt*/},\ + {0x007A, PWR_CUT_TESTCHIP_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x3A \ + /*0x7A = 0x3A start BT*/},\ + {0x002E, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF , 0x82 \ + /* 0x2C[23:12]=0x820 ; XTAL trim */}, \ + {0x0010, PWR_CUT_A_MSK , PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT6 , BIT6 \ + /* 0x10[6]=1 */}, + +#define RTL8821A_TRANS_ACT_TO_CARDEMU \ + {0x001F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0 \ + /*0x1F[7:0] = 0 turn off RF*/}, \ + {0x004F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, 0 \ + /*0x4C[24] = 0x4F[0] = 0, switch DPDT_SEL_P output from \ + register 0x65[2] */},\ + {0x0049, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1, 0 \ + /*Enable rising edge triggering interrupt*/}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1, BIT1 \ + /*0x04[9] = 1 turn off MAC by HW state machine*/}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT1, 0 \ + /*wait till 0x04[9] = 0 polling until return 0 to disable*/}, \ + {0x0000, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT5, BIT5 \ + /*0x00[5] = 1b'1 analog Ips to digital ,1:isolation*/}, \ + {0x0020, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, 0 \ + /*0x20[0] = 1b'0 disable LDOA12 MACRO block*/}, + +#define RTL8821A_TRANS_CARDEMU_TO_SUS \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4|BIT3, (BIT4|BIT3) \ + /*0x04[12:11] = 2b'11 enable WL suspend for PCIe*/}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT3|BIT4, BIT3 \ + /*0x04[12:11] = 2b'01 enable WL suspend*/}, \ + {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, BIT4 \ + /*0x23[4] = 1b'1 12H LDO enter sleep mode*/}, \ + {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x20 \ + /*0x07[7:0] = 0x20 SDIO SOP option to disable BG/MB/ACK/SWR*/}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT3|BIT4, BIT3|BIT4 \ + /*0x04[12:11] = 2b'11 enable WL suspend for PCIe*/}, \ + {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,\ + PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT0, BIT0 \ + /*Set SDIO suspend local register*/}, \ + {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,\ + PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT1, 0 \ + /*wait power state to suspend*/}, + +#define RTL8821A_TRANS_SUS_TO_CARDEMU \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT3 | BIT7, 0 \ + /*clear suspend enable and power down enable*/}, \ + {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,\ + PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT0, 0 \ + /*Set SDIO suspend local register*/}, \ + {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,\ + PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT1, BIT1 \ + /*wait power state to suspend*/},\ + {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, 0 \ + /*0x23[4] = 1b'0 12H LDO enter normal mode*/}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT3|BIT4, 0 \ + /*0x04[12:11] = 2b'01enable WL suspend*/}, + +#define RTL8821A_TRANS_CARDEMU_TO_CARDDIS \ + {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x20 \ + /*0x07=0x20 , SOP option to disable BG/MB*/}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT3|BIT4, BIT3 \ + /*0x04[12:11] = 2b'01 enable WL suspend*/}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT2, BIT2 \ + /*0x04[10] = 1, enable SW LPS*/}, \ + {0x004A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, 1 \ + /*0x48[16] = 1 to enable GPIO9 as EXT WAKEUP*/}, \ + {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, BIT4 \ + /*0x23[4] = 1b'1 12H LDO enter sleep mode*/}, \ + {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,\ + PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT0, BIT0 \ + /*Set SDIO suspend local register*/}, \ + {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,\ + PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT1, 0 \ + /*wait power state to suspend*/}, + +#define RTL8821A_TRANS_CARDDIS_TO_CARDEMU \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT3 | BIT7, 0 \ + /*clear suspend enable and power down enable*/}, \ + {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,\ + PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT0, 0 \ + /*Set SDIO suspend local register*/}, \ + {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,\ + PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT1, BIT1 \ + /*wait power state to suspend*/},\ + {0x004A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, 0 \ + /*0x48[16] = 0 to disable GPIO9 as EXT WAKEUP*/}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT3|BIT4, 0 \ + /*0x04[12:11] = 2b'01enable WL suspend*/},\ + {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, 0 \ + /*0x23[4] = 1b'0 12H LDO enter normal mode*/}, \ + {0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0 \ + /*PCIe DMA start*/}, + +#define RTL8821A_TRANS_CARDEMU_TO_PDN \ + {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, BIT4 \ + /*0x23[4] = 1b'1 12H LDO enter sleep mode*/}, \ + {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_SDIO_MSK|PWR_INTF_USB_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x20 \ + /*0x07[7:0] = 0x20 SOP option to disable BG/MB/ACK/SWR*/}, \ + {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, 0 \ + /* 0x04[16] = 0*/},\ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT7, BIT7 \ + /* 0x04[15] = 1*/}, + +#define RTL8821A_TRANS_PDN_TO_CARDEMU \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT7, 0 \ + /* 0x04[15] = 0*/}, + +#define RTL8821A_TRANS_ACT_TO_LPS \ + {0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF \ + /*PCIe DMA stop*/}, \ + {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF \ + /*Tx Pause*/}, \ + {0x05F8, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0 \ + /*Should be zero if no packet is transmitting*/}, \ + {0x05F9, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0 \ + /*Should be zero if no packet is transmitting*/}, \ + {0x05FA, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0 \ + /*Should be zero if no packet is transmitting*/}, \ + {0x05FB, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0 \ + /*Should be zero if no packet is transmitting*/}, \ + {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, 0 \ + /*CCK and OFDM are disabled,and clock are gated*/}, \ + {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_US \ + /*Delay 1us*/}, \ + {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1, 0 \ + /*Whole BB is reset*/}, \ + {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x03 \ + /*Reset MAC TRX*/}, \ + {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1, 0 \ + /*check if removed later*/}, \ + {0x0093, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x00 \ + /*When driver enter Sus/ Disable, enable LOP for BT*/}, \ + {0x0553, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT5, BIT5 \ + /*Respond TxOK to scheduler*/}, + +#define RTL8821A_TRANS_LPS_TO_ACT \ + {0x0080, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,\ + PWR_BASEADDR_SDIO, PWR_CMD_WRITE, 0xFF, 0x84 \ + /*SDIO RPWM*/},\ + {0xFE58, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84 \ + /*USB RPWM*/},\ + {0x0361, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84 \ + /*PCIe RPWM*/},\ + {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_MS \ + /*Delay*/},\ + {0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, 0 \ + /*. 0x08[4] = 0 switch TSF to 40M*/},\ + {0x0109, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT7, 0 \ + /*Polling 0x109[7]=0 TSF in 40M*/},\ + {0x0029, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT6|BIT7, 0 \ + /*. 0x29[7:6] = 2b'00 enable BB clock*/},\ + {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1, BIT1 \ + /*. 0x101[1] = 1*/},\ + {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF \ + /*. 0x100[7:0] = 0xFF enable WMAC TRX*/},\ + {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1|BIT0, BIT1|BIT0 \ + /*. 0x02[1:0] = 2b'11 enable BB macro*/},\ + {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0 \ + /*. 0x522 = 0*/}, + +#define RTL8821A_TRANS_END \ + {0xFFFF, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + 0, PWR_CMD_END, 0, 0}, + +extern struct wlan_pwr_cfg rtl8821A_power_on_flow + [RTL8821A_TRANS_CARDEMU_TO_ACT_STEPS + + RTL8821A_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8821A_radio_off_flow + [RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8821A_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8821A_card_disable_flow + [RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8821A_TRANS_CARDEMU_TO_PDN_STEPS + + RTL8821A_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8821A_card_enable_flow + [RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8821A_TRANS_CARDEMU_TO_ACT_STEPS + + RTL8821A_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8821A_suspend_flow + [RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8821A_TRANS_CARDEMU_TO_SUS_STEPS + + RTL8821A_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8821A_resume_flow + [RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8821A_TRANS_CARDEMU_TO_SUS_STEPS + + RTL8821A_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8821A_hwpdn_flow + [RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8821A_TRANS_CARDEMU_TO_PDN_STEPS + + RTL8821A_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8821A_enter_lps_flow + [RTL8821A_TRANS_ACT_TO_LPS_STEPS + + RTL8821A_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8821A_leave_lps_flow + [RTL8821A_TRANS_LPS_TO_ACT_STEPS + + RTL8821A_TRANS_END_STEPS]; + +/*RTL8812 Power Configuration CMDs for PCIe interface*/ +#define RTL8812_NIC_PWR_ON_FLOW rtl8812_power_on_flow +#define RTL8812_NIC_RF_OFF_FLOW rtl8812_radio_off_flow +#define RTL8812_NIC_DISABLE_FLOW rtl8812_card_disable_flow +#define RTL8812_NIC_ENABLE_FLOW rtl8812_card_enable_flow +#define RTL8812_NIC_SUSPEND_FLOW rtl8812_suspend_flow +#define RTL8812_NIC_RESUME_FLOW rtl8812_resume_flow +#define RTL8812_NIC_PDN_FLOW rtl8812_hwpdn_flow +#define RTL8812_NIC_LPS_ENTER_FLOW rtl8812_enter_lps_flow +#define RTL8812_NIC_LPS_LEAVE_FLOW rtl8812_leave_lps_flow + +/* RTL8821 Power Configuration CMDs for PCIe interface */ +#define RTL8821A_NIC_PWR_ON_FLOW rtl8821A_power_on_flow +#define RTL8821A_NIC_RF_OFF_FLOW rtl8821A_radio_off_flow +#define RTL8821A_NIC_DISABLE_FLOW rtl8821A_card_disable_flow +#define RTL8821A_NIC_ENABLE_FLOW rtl8821A_card_enable_flow +#define RTL8821A_NIC_SUSPEND_FLOW rtl8821A_suspend_flow +#define RTL8821A_NIC_RESUME_FLOW rtl8821A_resume_flow +#define RTL8821A_NIC_PDN_FLOW rtl8821A_hwpdn_flow +#define RTL8821A_NIC_LPS_ENTER_FLOW rtl8821A_enter_lps_flow +#define RTL8821A_NIC_LPS_LEAVE_FLOW rtl8821A_leave_lps_flow + +#endif diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h b/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h new file mode 100644 index 0000000000000000000000000000000000000000..53668fc8f23e211dab03085359ce013fc65b81c2 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h @@ -0,0 +1,2464 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL8821AE_REG_H__ +#define __RTL8821AE_REG_H__ + +#define TXPKT_BUF_SELECT 0x69 +#define RXPKT_BUF_SELECT 0xA5 +#define DISABLE_TRXPKT_BUF_ACCESS 0x0 + +#define REG_SYS_ISO_CTRL 0x0000 +#define REG_SYS_FUNC_EN 0x0002 +#define REG_APS_FSMCO 0x0004 +#define REG_SYS_CLKR 0x0008 +#define REG_9346CR 0x000A +#define REG_EE_VPD 0x000C +#define REG_AFE_MISC 0x0010 +#define REG_SPS0_CTRL 0x0011 +#define REG_SPS_OCP_CFG 0x0018 +#define REG_RSV_CTRL 0x001C +#define REG_RF_CTRL 0x001F +#define REG_LDOA15_CTRL 0x0020 +#define REG_LDOV12D_CTRL 0x0021 +#define REG_LDOHCI12_CTRL 0x0022 +#define REG_LPLDO_CTRL 0x0023 +#define REG_AFE_XTAL_CTRL 0x0024 + /* 1.5v for 8188EE test chip, 1.4v for MP chip */ +#define REG_AFE_LDO_CTRL 0x0027 +#define REG_AFE_PLL_CTRL 0x0028 +#define REG_MAC_PHY_CTRL 0x002c +#define REG_EFUSE_CTRL 0x0030 +#define REG_EFUSE_TEST 0x0034 +#define REG_PWR_DATA 0x0038 +#define REG_CAL_TIMER 0x003C +#define REG_ACLK_MON 0x003E +#define REG_GPIO_MUXCFG 0x0040 +#define REG_GPIO_IO_SEL 0x0042 +#define REG_MAC_PINMUX_CFG 0x0043 +#define REG_GPIO_PIN_CTRL 0x0044 +#define REG_GPIO_INTM 0x0048 +#define REG_LEDCFG0 0x004C +#define REG_LEDCFG1 0x004D +#define REG_LEDCFG2 0x004E +#define REG_LEDCFG3 0x004F +#define REG_FSIMR 0x0050 +#define REG_FSISR 0x0054 +#define REG_HSIMR 0x0058 +#define REG_HSISR 0x005c +#define REG_GPIO_PIN_CTRL_2 0x0060 +#define REG_GPIO_IO_SEL_2 0x0062 +#define REG_MULTI_FUNC_CTRL 0x0068 +#define REG_GPIO_OUTPUT 0x006c +#define REG_OPT_CTRL 0x0074 +#define REG_AFE_XTAL_CTRL_EXT 0x0078 +#define REG_XCK_OUT_CTRL 0x007c +#define REG_MCUFWDL 0x0080 +#define REG_WOL_EVENT 0x0081 +#define REG_MCUTSTCFG 0x0084 + +#define REG_HIMR 0x00B0 +#define REG_HISR 0x00B4 +#define REG_HIMRE 0x00B8 +#define REG_HISRE 0x00BC + +#define REG_PMC_DBG_CTRL2 0x00CC + +#define REG_EFUSE_ACCESS 0x00CF + +#define REG_BIST_SCAN 0x00D0 +#define REG_BIST_RPT 0x00D4 +#define REG_BIST_ROM_RPT 0x00D8 +#define REG_USB_SIE_INTF 0x00E0 +#define REG_PCIE_MIO_INTF 0x00E4 +#define REG_PCIE_MIO_INTD 0x00E8 +#define REG_HPON_FSM 0x00EC +#define REG_SYS_CFG 0x00F0 +#define REG_GPIO_OUTSTS 0x00F4 +#define REG_MAC_PHY_CTRL_NORMAL 0x00F8 +#define REG_SYS_CFG1 0x00FC +#define REG_ROM_VERSION 0x00FD + +#define REG_CR 0x0100 +#define REG_PBP 0x0104 +#define REG_PKT_BUFF_ACCESS_CTRL 0x0106 +#define REG_TRXDMA_CTRL 0x010C +#define REG_TRXFF_BNDY 0x0114 +#define REG_TRXFF_STATUS 0x0118 +#define REG_RXFF_PTR 0x011C + +#define REG_CPWM 0x012F +#define REG_FWIMR 0x0130 +#define REG_FWISR 0x0134 +#define REG_FTISR 0x013C +#define REG_PKTBUF_DBG_CTRL 0x0140 +#define REG_PKTBUF_DBG_DATA_L 0x0144 +#define REG_PKTBUF_DBG_DATA_H 0x0148 +#define REG_RXPKTBUF_CTRL (REG_PKTBUF_DBG_CTRL+2) + +#define REG_TC0_CTRL 0x0150 +#define REG_TC1_CTRL 0x0154 +#define REG_TC2_CTRL 0x0158 +#define REG_TC3_CTRL 0x015C +#define REG_TC4_CTRL 0x0160 +#define REG_TCUNIT_BASE 0x0164 +#define REG_MBIST_START 0x0174 +#define REG_MBIST_DONE 0x0178 +#define REG_MBIST_FAIL 0x017C +#define REG_32K_CTRL 0x0194 +#define REG_C2HEVT_MSG_NORMAL 0x01A0 +#define REG_C2HEVT_CLEAR 0x01AF +#define REG_C2HEVT_MSG_TEST 0x01B8 +#define REG_MCUTST_1 0x01c0 +#define REG_MCUTST_WOWLAN 0x01C7 +#define REG_FMETHR 0x01C8 +#define REG_HMETFR 0x01CC +#define REG_HMEBOX_0 0x01D0 +#define REG_HMEBOX_1 0x01D4 +#define REG_HMEBOX_2 0x01D8 +#define REG_HMEBOX_3 0x01DC + +#define REG_LLT_INIT 0x01E0 +#define REG_BB_ACCEESS_CTRL 0x01E8 +#define REG_BB_ACCESS_DATA 0x01EC + +#define REG_HMEBOX_EXT_0 0x01F0 +#define REG_HMEBOX_EXT_1 0x01F4 +#define REG_HMEBOX_EXT_2 0x01F8 +#define REG_HMEBOX_EXT_3 0x01FC + +#define REG_RQPN 0x0200 +#define REG_FIFOPAGE 0x0204 +#define REG_TDECTRL 0x0208 +#define REG_TXDMA_OFFSET_CHK 0x020C +#define REG_TXDMA_STATUS 0x0210 +#define REG_RQPN_NPQ 0x0214 + +#define REG_RXDMA_AGG_PG_TH 0x0280 + /* FW shall update this register before FW write RXPKT_RELEASE_POLL to 1 */ +#define REG_FW_UPD_RDPTR 0x0284 + /* Control the RX DMA.*/ +#define REG_RXDMA_CONTROL 0x0286 +/* The number of packets in RXPKTBUF. */ +#define REG_RXPKT_NUM 0x0287 + +#define REG_PCIE_CTRL_REG 0x0300 +#define REG_INT_MIG 0x0304 +#define REG_BCNQ_DESA 0x0308 +#define REG_HQ_DESA 0x0310 +#define REG_MGQ_DESA 0x0318 +#define REG_VOQ_DESA 0x0320 +#define REG_VIQ_DESA 0x0328 +#define REG_BEQ_DESA 0x0330 +#define REG_BKQ_DESA 0x0338 +#define REG_RX_DESA 0x0340 + +#define REG_DBI_WDATA 0x0348 +#define REG_DBI_RDATA 0x034C +#define REG_DBI_CTRL 0x0350 +#define REG_DBI_ADDR 0x0350 +#define REG_DBI_FLAG 0x0352 +#define REG_MDIO_WDATA 0x0354 +#define REG_MDIO_RDATA 0x0356 +#define REG_MDIO_CTL 0x0358 +#define REG_DBG_SEL 0x0360 +#define REG_PCIE_HRPWM 0x0361 +#define REG_PCIE_HCPWM 0x0363 +#define REG_UART_CTRL 0x0364 +#define REG_WATCH_DOG 0x0368 +#define REG_UART_TX_DESA 0x0370 +#define REG_UART_RX_DESA 0x0378 + +#define REG_HDAQ_DESA_NODEF 0x0000 +#define REG_CMDQ_DESA_NODEF 0x0000 + +#define REG_VOQ_INFORMATION 0x0400 +#define REG_VIQ_INFORMATION 0x0404 +#define REG_BEQ_INFORMATION 0x0408 +#define REG_BKQ_INFORMATION 0x040C +#define REG_MGQ_INFORMATION 0x0410 +#define REG_HGQ_INFORMATION 0x0414 +#define REG_BCNQ_INFORMATION 0x0418 +#define REG_TXPKT_EMPTY 0x041A + +#define REG_CPU_MGQ_INFORMATION 0x041C +#define REG_FWHW_TXQ_CTRL 0x0420 +#define REG_HWSEQ_CTRL 0x0423 +#define REG_TXPKTBUF_BCNQ_BDNY 0x0424 +#define REG_TXPKTBUF_MGQ_BDNY 0x0425 +#define REG_MULTI_BCNQ_EN 0x0426 +#define REG_MULTI_BCNQ_OFFSET 0x0427 +#define REG_SPEC_SIFS 0x0428 +#define REG_RL 0x042A +#define REG_DARFRC 0x0430 +#define REG_RARFRC 0x0438 +#define REG_RRSR 0x0440 +#define REG_ARFR0 0x0444 +#define REG_ARFR1 0x044C +#define REG_CCK_CHECK 0x0454 +#define REG_AMPDU_MAX_TIME 0x0456 +#define REG_AGGLEN_LMT 0x0458 +#define REG_AMPDU_MIN_SPACE 0x045C +#define REG_TXPKTBUF_WMAC_LBK_BF_HD 0x045D +#define REG_FAST_EDCA_CTRL 0x0460 +#define REG_RD_RESP_PKT_TH 0x0463 +#define REG_INIRTS_RATE_SEL 0x0480 +#define REG_INIDATA_RATE_SEL 0x0484 +#define REG_ARFR2 0x048C +#define REG_ARFR3 0x0494 +#define REG_POWER_STATUS 0x04A4 +#define REG_POWER_STAGE1 0x04B4 +#define REG_POWER_STAGE2 0x04B8 +#define REG_PKT_LIFE_TIME 0x04C0 +#define REG_STBC_SETTING 0x04C4 +#define REG_HT_SINGLE_AMPDU 0x04C7 +#define REG_PROT_MODE_CTRL 0x04C8 +#define REG_MAX_AGGR_NUM 0x04CA +#define REG_BAR_MODE_CTRL 0x04CC +#define REG_RA_TRY_RATE_AGG_LMT 0x04CF +#define REG_EARLY_MODE_CONTROL 0x04D0 +#define REG_NQOS_SEQ 0x04DC +#define REG_QOS_SEQ 0x04DE +#define REG_NEED_CPU_HANDLE 0x04E0 +#define REG_PKT_LOSE_RPT 0x04E1 +#define REG_PTCL_ERR_STATUS 0x04E2 +#define REG_TX_RPT_CTRL 0x04EC +#define REG_TX_RPT_TIME 0x04F0 +#define REG_DUMMY 0x04FC + +#define REG_EDCA_VO_PARAM 0x0500 +#define REG_EDCA_VI_PARAM 0x0504 +#define REG_EDCA_BE_PARAM 0x0508 +#define REG_EDCA_BK_PARAM 0x050C +#define REG_BCNTCFG 0x0510 +#define REG_PIFS 0x0512 +#define REG_RDG_PIFS 0x0513 +#define REG_SIFS_CTX 0x0514 +#define REG_SIFS_TRX 0x0516 +#define REG_AGGR_BREAK_TIME 0x051A +#define REG_SLOT 0x051B +#define REG_TX_PTCL_CTRL 0x0520 +#define REG_TXPAUSE 0x0522 +#define REG_DIS_TXREQ_CLR 0x0523 +#define REG_RD_CTRL 0x0524 +#define REG_TBTT_PROHIBIT 0x0540 +#define REG_RD_NAV_NXT 0x0544 +#define REG_NAV_PROT_LEN 0x0546 +#define REG_BCN_CTRL 0x0550 +#define REG_USTIME_TSF 0x0551 +#define REG_MBID_NUM 0x0552 +#define REG_DUAL_TSF_RST 0x0553 +#define REG_BCN_INTERVAL 0x0554 +#define REG_MBSSID_BCN_SPACE 0x0554 +#define REG_DRVERLYINT 0x0558 +#define REG_BCNDMATIM 0x0559 +#define REG_ATIMWND 0x055A +#define REG_BCN_MAX_ERR 0x055D +#define REG_RXTSF_OFFSET_CCK 0x055E +#define REG_RXTSF_OFFSET_OFDM 0x055F +#define REG_TSFTR 0x0560 +#define REG_INIT_TSFTR 0x0564 +#define REG_SECONDARY_CCA_CTRL 0x0577 +#define REG_PSTIMER 0x0580 +#define REG_TIMER0 0x0584 +#define REG_TIMER1 0x0588 +#define REG_ACMHWCTRL 0x05C0 +#define REG_ACMRSTCTRL 0x05C1 +#define REG_ACMAVG 0x05C2 +#define REG_VO_ADMTIME 0x05C4 +#define REG_VI_ADMTIME 0x05C6 +#define REG_BE_ADMTIME 0x05C8 +#define REG_EDCA_RANDOM_GEN 0x05CC +#define REG_NOA_DESC_SEL 0x05CF +#define REG_NOA_DESC_DURATION 0x05E0 +#define REG_NOA_DESC_INTERVAL 0x05E4 +#define REG_NOA_DESC_START 0x05E8 +#define REG_NOA_DESC_COUNT 0x05EC +#define REG_SCH_TXCMD 0x05F8 + +#define REG_APSD_CTRL 0x0600 +#define REG_BWOPMODE 0x0603 +#define REG_TCR 0x0604 +#define REG_RCR 0x0608 +#define REG_RX_PKT_LIMIT 0x060C +#define REG_RX_DLK_TIME 0x060D +#define REG_RX_DRVINFO_SZ 0x060F + +#define REG_MACID 0x0610 +#define REG_BSSID 0x0618 +#define REG_MAR 0x0620 +#define REG_MBIDCAMCFG 0x0628 + +#define REG_USTIME_EDCA 0x0638 +#define REG_MAC_SPEC_SIFS 0x063A +#define REG_RESP_SIFS_CCK 0x063C +#define REG_RESP_SIFS_OFDM 0x063E +#define REG_ACKTO 0x0640 +#define REG_CTS2TO 0x0641 +#define REG_EIFS 0x0642 + +#define REG_NAV_CTRL 0x0650 +#define REG_NAV_UPPER 0x0652 +#define REG_BACAMCMD 0x0654 +#define REG_BACAMCONTENT 0x0658 +#define REG_LBDLY 0x0660 +#define REG_FWDLY 0x0661 +#define REG_RXERR_RPT 0x0664 +#define REG_TRXPTCL_CTL 0x0668 + +#define REG_CAMCMD 0x0670 +#define REG_CAMWRITE 0x0674 +#define REG_CAMREAD 0x0678 +#define REG_CAMDBG 0x067C +#define REG_SECCFG 0x0680 + +#define REG_WOW_CTRL 0x0690 +#define REG_PSSTATUS 0x0691 +#define REG_PS_RX_INFO 0x0692 +#define REG_UAPSD_TID 0x0693 +#define REG_LPNAV_CTRL 0x0694 +#define REG_WKFMCAM_NUM 0x0698 +#define REG_WKFMCAM_RWD 0x069C +#define REG_RXFLTMAP0 0x06A0 +#define REG_RXFLTMAP1 0x06A2 +#define REG_RXFLTMAP2 0x06A4 +#define REG_BCN_PSR_RPT 0x06A8 +#define REG_CALB32K_CTRL 0x06AC +#define REG_PKT_MON_CTRL 0x06B4 +#define REG_BT_COEX_TABLE 0x06C0 +#define REG_WMAC_RESP_TXINFO 0x06D8 + +#define REG_USB_INFO 0xFE17 +#define REG_USB_SPECIAL_OPTION 0xFE55 +#define REG_USB_DMA_AGG_TO 0xFE5B +#define REG_USB_AGG_TO 0xFE5C +#define REG_USB_AGG_TH 0xFE5D + +#define REG_TEST_USB_TXQS 0xFE48 +#define REG_TEST_SIE_VID 0xFE60 +#define REG_TEST_SIE_PID 0xFE62 +#define REG_TEST_SIE_OPTIONAL 0xFE64 +#define REG_TEST_SIE_CHIRP_K 0xFE65 +#define REG_TEST_SIE_PHY 0xFE66 +#define REG_TEST_SIE_MAC_ADDR 0xFE70 +#define REG_TEST_SIE_STRING 0xFE80 + +#define REG_NORMAL_SIE_VID 0xFE60 +#define REG_NORMAL_SIE_PID 0xFE62 +#define REG_NORMAL_SIE_OPTIONAL 0xFE64 +#define REG_NORMAL_SIE_EP 0xFE65 +#define REG_NORMAL_SIE_PHY 0xFE68 +#define REG_NORMAL_SIE_MAC_ADDR 0xFE70 +#define REG_NORMAL_SIE_STRING 0xFE80 + +#define CR9346 REG_9346CR +#define MSR (REG_CR + 2) +#define ISR REG_HISR +#define TSFR REG_TSFTR + +#define MACIDR0 REG_MACID +#define MACIDR4 (REG_MACID + 4) + +#define PBP REG_PBP + +#define IDR0 MACIDR0 +#define IDR4 MACIDR4 + +#define UNUSED_REGISTER 0x1BF +#define DCAM UNUSED_REGISTER +#define PSR UNUSED_REGISTER +#define BBADDR UNUSED_REGISTER +#define PHYDATAR UNUSED_REGISTER + +#define INVALID_BBRF_VALUE 0x12345678 + +#define MAX_MSS_DENSITY_2T 0x13 +#define MAX_MSS_DENSITY_1T 0x0A + +#define CMDEEPROM_EN BIT(5) +#define CMDEEPROM_SEL BIT(4) +#define CMD9346CR_9356SEL BIT(4) +#define AUTOLOAD_EEPROM (CMDEEPROM_EN|CMDEEPROM_SEL) +#define AUTOLOAD_EFUSE CMDEEPROM_EN + +#define GPIOSEL_GPIO 0 +#define GPIOSEL_ENBT BIT(5) + +#define GPIO_IN REG_GPIO_PIN_CTRL +#define GPIO_OUT (REG_GPIO_PIN_CTRL+1) +#define GPIO_IO_SEL (REG_GPIO_PIN_CTRL+2) +#define GPIO_MOD (REG_GPIO_PIN_CTRL+3) + +/* 8723/8188E Host System Interrupt Mask Register (offset 0x58, 32 byte) */ +#define HSIMR_GPIO12_0_INT_EN BIT(0) +#define HSIMR_SPS_OCP_INT_EN BIT(5) +#define HSIMR_RON_INT_EN BIT(6) +#define HSIMR_PDN_INT_EN BIT(7) +#define HSIMR_GPIO9_INT_EN BIT(25) + +/* 8723/8188E Host System Interrupt Status Register (offset 0x5C, 32 byte) */ +#define HSISR_GPIO12_0_INT BIT(0) +#define HSISR_SPS_OCP_INT BIT(5) +#define HSISR_RON_INT_EN BIT(6) +#define HSISR_PDNINT BIT(7) +#define HSISR_GPIO9_INT BIT(25) + +#define MSR_NOLINK 0x00 +#define MSR_ADHOC 0x01 +#define MSR_INFRA 0x02 +#define MSR_AP 0x03 + +#define RRSR_RSC_OFFSET 21 +#define RRSR_SHORT_OFFSET 23 +#define RRSR_RSC_BW_40M 0x600000 +#define RRSR_RSC_UPSUBCHNL 0x400000 +#define RRSR_RSC_LOWSUBCHNL 0x200000 +#define RRSR_SHORT 0x800000 +#define RRSR_1M BIT(0) +#define RRSR_2M BIT(1) +#define RRSR_5_5M BIT(2) +#define RRSR_11M BIT(3) +#define RRSR_6M BIT(4) +#define RRSR_9M BIT(5) +#define RRSR_12M BIT(6) +#define RRSR_18M BIT(7) +#define RRSR_24M BIT(8) +#define RRSR_36M BIT(9) +#define RRSR_48M BIT(10) +#define RRSR_54M BIT(11) +#define RRSR_MCS0 BIT(12) +#define RRSR_MCS1 BIT(13) +#define RRSR_MCS2 BIT(14) +#define RRSR_MCS3 BIT(15) +#define RRSR_MCS4 BIT(16) +#define RRSR_MCS5 BIT(17) +#define RRSR_MCS6 BIT(18) +#define RRSR_MCS7 BIT(19) +#define BRSR_ACKSHORTPMB BIT(23) + +#define RATR_1M 0x00000001 +#define RATR_2M 0x00000002 +#define RATR_55M 0x00000004 +#define RATR_11M 0x00000008 +#define RATR_6M 0x00000010 +#define RATR_9M 0x00000020 +#define RATR_12M 0x00000040 +#define RATR_18M 0x00000080 +#define RATR_24M 0x00000100 +#define RATR_36M 0x00000200 +#define RATR_48M 0x00000400 +#define RATR_54M 0x00000800 +#define RATR_MCS0 0x00001000 +#define RATR_MCS1 0x00002000 +#define RATR_MCS2 0x00004000 +#define RATR_MCS3 0x00008000 +#define RATR_MCS4 0x00010000 +#define RATR_MCS5 0x00020000 +#define RATR_MCS6 0x00040000 +#define RATR_MCS7 0x00080000 +#define RATR_MCS8 0x00100000 +#define RATR_MCS9 0x00200000 +#define RATR_MCS10 0x00400000 +#define RATR_MCS11 0x00800000 +#define RATR_MCS12 0x01000000 +#define RATR_MCS13 0x02000000 +#define RATR_MCS14 0x04000000 +#define RATR_MCS15 0x08000000 + +#define RATE_1M BIT(0) +#define RATE_2M BIT(1) +#define RATE_5_5M BIT(2) +#define RATE_11M BIT(3) +#define RATE_6M BIT(4) +#define RATE_9M BIT(5) +#define RATE_12M BIT(6) +#define RATE_18M BIT(7) +#define RATE_24M BIT(8) +#define RATE_36M BIT(9) +#define RATE_48M BIT(10) +#define RATE_54M BIT(11) +#define RATE_MCS0 BIT(12) +#define RATE_MCS1 BIT(13) +#define RATE_MCS2 BIT(14) +#define RATE_MCS3 BIT(15) +#define RATE_MCS4 BIT(16) +#define RATE_MCS5 BIT(17) +#define RATE_MCS6 BIT(18) +#define RATE_MCS7 BIT(19) +#define RATE_MCS8 BIT(20) +#define RATE_MCS9 BIT(21) +#define RATE_MCS10 BIT(22) +#define RATE_MCS11 BIT(23) +#define RATE_MCS12 BIT(24) +#define RATE_MCS13 BIT(25) +#define RATE_MCS14 BIT(26) +#define RATE_MCS15 BIT(27) + +#define RATE_ALL_CCK (RATR_1M | RATR_2M | RATR_55M | RATR_11M) +#define RATE_ALL_OFDM_AG (RATR_6M | RATR_9M | RATR_12M | RATR_18M |\ + RATR_24M | RATR_36M | RATR_48M | RATR_54M) +#define RATE_ALL_OFDM_1SS (RATR_MCS0 | RATR_MCS1 | RATR_MCS2 |\ + RATR_MCS3 | RATR_MCS4 | RATR_MCS5 |\ + RATR_MCS6 | RATR_MCS7) +#define RATE_ALL_OFDM_2SS (RATR_MCS8 | RATR_MCS9 | RATR_MCS10 |\ + RATR_MCS11 | RATR_MCS12 | RATR_MCS13 |\ + RATR_MCS14 | RATR_MCS15) + +#define BW_OPMODE_20MHZ BIT(2) +#define BW_OPMODE_5G BIT(1) +#define BW_OPMODE_11J BIT(0) + +#define CAM_VALID BIT(15) +#define CAM_NOTVALID 0x0000 +#define CAM_USEDK BIT(5) + +#define CAM_NONE 0x0 +#define CAM_WEP40 0x01 +#define CAM_TKIP 0x02 +#define CAM_AES 0x04 +#define CAM_WEP104 0x05 + +#define TOTAL_CAM_ENTRY 32 +#define HALF_CAM_ENTRY 16 + +#define CAM_WRITE BIT(16) +#define CAM_READ 0x00000000 +#define CAM_POLLINIG BIT(31) + +#define SCR_USEDK 0x01 +#define SCR_TXSEC_ENABLE 0x02 +#define SCR_RXSEC_ENABLE 0x04 + +#define WOW_PMEN BIT(0) +#define WOW_WOMEN BIT(1) +#define WOW_MAGIC BIT(2) +#define WOW_UWF BIT(3) + +/********************************************* +* 8188 IMR/ISR bits +**********************************************/ +#define IMR_DISABLED 0x0 +/* IMR DW0(0x0060-0063) Bit 0-31 */ +/* TXRPT interrupt when CCX bit of the packet is set */ +#define IMR_TXCCK BIT(30) +/* Power Save Time Out Interrupt */ +#define IMR_PSTIMEOUT BIT(29) +/* When GTIMER4 expires, this bit is set to 1 */ +#define IMR_GTINT4 BIT(28) +/* When GTIMER3 expires, this bit is set to 1 */ +#define IMR_GTINT3 BIT(27) +/* Transmit Beacon0 Error */ +#define IMR_TBDER BIT(26) +/* Transmit Beacon0 OK */ +#define IMR_TBDOK BIT(25) +/* TSF Timer BIT32 toggle indication interrupt */ +#define IMR_TSF_BIT32_TOGGLE BIT(24) +/* Beacon DMA Interrupt 0 */ +#define IMR_BCNDMAINT0 BIT(20) +/* Beacon Queue DMA OK0 */ +#define IMR_BCNDOK0 BIT(16) +/* HSISR Indicator (HSIMR & HSISR is true, this bit is set to 1) */ +#define IMR_HSISR_IND_ON_INT BIT(15) +/* Beacon DMA Interrupt Extension for Win7 */ +#define IMR_BCNDMAINT_E BIT(14) +/* CTWidnow End or ATIM Window End */ +#define IMR_ATIMEND BIT(12) +/* HISR1 Indicator (HISR1 & HIMR1 is true, this bit is set to 1)*/ +#define IMR_HISR1_IND_INT BIT(11) +/* CPU to Host Command INT Status, Write 1 clear */ +#define IMR_C2HCMD BIT(10) +/* CPU power Mode exchange INT Status, Write 1 clear */ +#define IMR_CPWM2 BIT(9) +/* CPU power Mode exchange INT Status, Write 1 clear */ +#define IMR_CPWM BIT(8) +/* High Queue DMA OK */ +#define IMR_HIGHDOK BIT(7) +/* Management Queue DMA OK */ +#define IMR_MGNTDOK BIT(6) +/* AC_BK DMA OK */ +#define IMR_BKDOK BIT(5) +/* AC_BE DMA OK */ +#define IMR_BEDOK BIT(4) +/* AC_VI DMA OK */ +#define IMR_VIDOK BIT(3) +/* AC_VO DMA OK */ +#define IMR_VODOK BIT(2) +/* Rx Descriptor Unavailable */ +#define IMR_RDU BIT(1) +#define IMR_ROK BIT(0) /* Receive DMA OK */ + +/* IMR DW1(0x00B4-00B7) Bit 0-31 */ +/* Beacon DMA Interrupt 7 */ +#define IMR_BCNDMAINT7 BIT(27) +/* Beacon DMA Interrupt 6 */ +#define IMR_BCNDMAINT6 BIT(26) +/* Beacon DMA Interrupt 5 */ +#define IMR_BCNDMAINT5 BIT(25) +/* Beacon DMA Interrupt 4 */ +#define IMR_BCNDMAINT4 BIT(24) +/* Beacon DMA Interrupt 3 */ +#define IMR_BCNDMAINT3 BIT(23) +/* Beacon DMA Interrupt 2 */ +#define IMR_BCNDMAINT2 BIT(22) +/* Beacon DMA Interrupt 1 */ +#define IMR_BCNDMAINT1 BIT(21) +/* Beacon Queue DMA OK Interrup 7 */ +#define IMR_BCNDOK7 BIT(20) +/* Beacon Queue DMA OK Interrup 6 */ +#define IMR_BCNDOK6 BIT(19) +/* Beacon Queue DMA OK Interrup 5 */ +#define IMR_BCNDOK5 BIT(18) +/* Beacon Queue DMA OK Interrup 4 */ +#define IMR_BCNDOK4 BIT(17) +/* Beacon Queue DMA OK Interrup 3 */ +#define IMR_BCNDOK3 BIT(16) +/* Beacon Queue DMA OK Interrup 2 */ +#define IMR_BCNDOK2 BIT(15) +/* Beacon Queue DMA OK Interrup 1 */ +#define IMR_BCNDOK1 BIT(14) +/* ATIM Window End Extension for Win7 */ +#define IMR_ATIMEND_E BIT(13) +/* Tx Error Flag Interrupt Status, write 1 clear. */ +#define IMR_TXERR BIT(11) +/* Rx Error Flag INT Status, Write 1 clear */ +#define IMR_RXERR BIT(10) +/* Transmit FIFO Overflow */ +#define IMR_TXFOVW BIT(9) +/* Receive FIFO Overflow */ +#define IMR_RXFOVW BIT(8) + +#define HWSET_MAX_SIZE 512 +#define EFUSE_MAX_SECTION 64 +#define EFUSE_REAL_CONTENT_LEN 256 +/* PG data exclude header, dummy 7 bytes frome CP test and reserved 1byte.*/ +#define EFUSE_OOB_PROTECT_BYTES 18 + +#define EEPROM_DEFAULT_TSSI 0x0 +#define EEPROM_DEFAULT_TXPOWERDIFF 0x0 +#define EEPROM_DEFAULT_CRYSTALCAP 0x5 +#define EEPROM_DEFAULT_BOARDTYPE 0x02 +#define EEPROM_DEFAULT_TXPOWER 0x1010 +#define EEPROM_DEFAULT_HT2T_TXPWR 0x10 + +#define EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF 0x3 +#define EEPROM_DEFAULT_THERMALMETER 0x18 +#define EEPROM_DEFAULT_ANTTXPOWERDIFF 0x0 +#define EEPROM_DEFAULT_TXPWDIFF_CRYSTALCAP 0x5 +#define EEPROM_DEFAULT_TXPOWERLEVEL 0x22 +#define EEPROM_DEFAULT_HT40_2SDIFF 0x0 +#define EEPROM_DEFAULT_HT20_DIFF 2 +#define EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF 0x3 +#define EEPROM_DEFAULT_HT40_PWRMAXOFFSET 0 +#define EEPROM_DEFAULT_HT20_PWRMAXOFFSET 0 + +#define RF_OPTION1 0x79 +#define RF_OPTION2 0x7A +#define RF_OPTION3 0x7B +#define RF_OPTION4 0xC3 + +#define EEPROM_DEFAULT_PID 0x1234 +#define EEPROM_DEFAULT_VID 0x5678 +#define EEPROM_DEFAULT_CUSTOMERID 0xAB +#define EEPROM_DEFAULT_SUBCUSTOMERID 0xCD +#define EEPROM_DEFAULT_VERSION 0 + +#define EEPROM_CHANNEL_PLAN_FCC 0x0 +#define EEPROM_CHANNEL_PLAN_IC 0x1 +#define EEPROM_CHANNEL_PLAN_ETSI 0x2 +#define EEPROM_CHANNEL_PLAN_SPAIN 0x3 +#define EEPROM_CHANNEL_PLAN_FRANCE 0x4 +#define EEPROM_CHANNEL_PLAN_MKK 0x5 +#define EEPROM_CHANNEL_PLAN_MKK1 0x6 +#define EEPROM_CHANNEL_PLAN_ISRAEL 0x7 +#define EEPROM_CHANNEL_PLAN_TELEC 0x8 +#define EEPROM_CHANNEL_PLAN_GLOBAL_DOMAIN 0x9 +#define EEPROM_CHANNEL_PLAN_WORLD_WIDE_13 0xA +#define EEPROM_CHANNEL_PLAN_NCC 0xB +#define EEPROM_CHANNEL_PLAN_BY_HW_MASK 0x80 + +#define EEPROM_CID_DEFAULT 0x0 +#define EEPROM_CID_TOSHIBA 0x4 +#define EEPROM_CID_CCX 0x10 +#define EEPROM_CID_QMI 0x0D +#define EEPROM_CID_WHQL 0xFE + +#define RTL_EEPROM_ID 0x8129 + +#define EEPROM_HPON 0x02 +#define EEPROM_CLK 0x06 +#define EEPROM_TESTR 0x08 + +#define EEPROM_TXPOWERCCK 0x10 +#define EEPROM_TXPOWERHT40_1S 0x16 +#define EEPROM_TXPOWERHT20DIFF 0x1B +#define EEPROM_TXPOWER_OFDMDIFF 0x1B + +#define EEPROM_TX_PWR_INX 0x10 + +#define EEPROM_CHANNELPLAN 0xB8 +#define EEPROM_XTAL_8821AE 0xB9 +#define EEPROM_THERMAL_METER 0xBA +#define EEPROM_IQK_LCK_88E 0xBB + +#define EEPROM_RF_BOARD_OPTION 0xC1 +#define EEPROM_RF_FEATURE_OPTION_88E 0xC2 +#define EEPROM_RF_BT_SETTING 0xC3 +#define EEPROM_VERSION 0xC4 +#define EEPROM_CUSTOMER_ID 0xC5 +#define EEPROM_RF_ANTENNA_OPT_88E 0xC9 +#define EEPROM_RFE_OPTION 0xCA + +#define EEPROM_MAC_ADDR 0xD0 +#define EEPROM_VID 0xD6 +#define EEPROM_DID 0xD8 +#define EEPROM_SVID 0xDA +#define EEPROM_SMID 0xDC + +#define STOPBECON BIT(6) +#define STOPHIGHT BIT(5) +#define STOPMGT BIT(4) +#define STOPVO BIT(3) +#define STOPVI BIT(2) +#define STOPBE BIT(1) +#define STOPBK BIT(0) + +#define RCR_APPFCS BIT(31) +#define RCR_APP_MIC BIT(30) +#define RCR_APP_ICV BIT(29) +#define RCR_APP_PHYST_RXFF BIT(28) +#define RCR_APP_BA_SSN BIT(27) +#define RCR_NONQOS_VHT BIT(26) +#define RCR_ENMBID BIT(24) +#define RCR_LSIGEN BIT(23) +#define RCR_MFBEN BIT(22) +#define RCR_HTC_LOC_CTRL BIT(14) +#define RCR_AMF BIT(13) +#define RCR_ACF BIT(12) +#define RCR_ADF BIT(11) +#define RCR_AICV BIT(9) +#define RCR_ACRC32 BIT(8) +#define RCR_CBSSID_BCN BIT(7) +#define RCR_CBSSID_DATA BIT(6) +#define RCR_CBSSID RCR_CBSSID_DATA +#define RCR_APWRMGT BIT(5) +#define RCR_ADD3 BIT(4) +#define RCR_AB BIT(3) +#define RCR_AM BIT(2) +#define RCR_APM BIT(1) +#define RCR_AAP BIT(0) +#define RCR_MXDMA_OFFSET 8 +#define RCR_FIFO_OFFSET 13 + +#define RSV_CTRL 0x001C +#define RD_CTRL 0x0524 + +#define REG_USB_INFO 0xFE17 +#define REG_USB_SPECIAL_OPTION 0xFE55 +#define REG_USB_DMA_AGG_TO 0xFE5B +#define REG_USB_AGG_TO 0xFE5C +#define REG_USB_AGG_TH 0xFE5D + +#define REG_USB_VID 0xFE60 +#define REG_USB_PID 0xFE62 +#define REG_USB_OPTIONAL 0xFE64 +#define REG_USB_CHIRP_K 0xFE65 +#define REG_USB_PHY 0xFE66 +#define REG_USB_MAC_ADDR 0xFE70 +#define REG_USB_HRPWM 0xFE58 +#define REG_USB_HCPWM 0xFE57 + +#define SW18_FPWM BIT(3) + +#define ISO_MD2PP BIT(0) +#define ISO_UA2USB BIT(1) +#define ISO_UD2CORE BIT(2) +#define ISO_PA2PCIE BIT(3) +#define ISO_PD2CORE BIT(4) +#define ISO_IP2MAC BIT(5) +#define ISO_DIOP BIT(6) +#define ISO_DIOE BIT(7) +#define ISO_EB2CORE BIT(8) +#define ISO_DIOR BIT(9) + +#define PWC_EV25V BIT(14) +#define PWC_EV12V BIT(15) + +#define FEN_BBRSTB BIT(0) +#define FEN_BB_GLB_RSTN BIT(1) +#define FEN_USBA BIT(2) +#define FEN_UPLL BIT(3) +#define FEN_USBD BIT(4) +#define FEN_DIO_PCIE BIT(5) +#define FEN_PCIEA BIT(6) +#define FEN_PPLL BIT(7) +#define FEN_PCIED BIT(8) +#define FEN_DIOE BIT(9) +#define FEN_CPUEN BIT(10) +#define FEN_DCORE BIT(11) +#define FEN_ELDR BIT(12) +#define FEN_DIO_RF BIT(13) +#define FEN_HWPDN BIT(14) +#define FEN_MREGEN BIT(15) + +#define PFM_LDALL BIT(0) +#define PFM_ALDN BIT(1) +#define PFM_LDKP BIT(2) +#define PFM_WOWL BIT(3) +#define ENPDN BIT(4) +#define PDN_PL BIT(5) +#define APFM_ONMAC BIT(8) +#define APFM_OFF BIT(9) +#define APFM_RSM BIT(10) +#define AFSM_HSUS BIT(11) +#define AFSM_PCIE BIT(12) +#define APDM_MAC BIT(13) +#define APDM_HOST BIT(14) +#define APDM_HPDN BIT(15) +#define RDY_MACON BIT(16) +#define SUS_HOST BIT(17) +#define ROP_ALD BIT(20) +#define ROP_PWR BIT(21) +#define ROP_SPS BIT(22) +#define SOP_MRST BIT(25) +#define SOP_FUSE BIT(26) +#define SOP_ABG BIT(27) +#define SOP_AMB BIT(28) +#define SOP_RCK BIT(29) +#define SOP_A8M BIT(30) +#define XOP_BTCK BIT(31) + +#define ANAD16V_EN BIT(0) +#define ANA8M BIT(1) +#define MACSLP BIT(4) +#define LOADER_CLK_EN BIT(5) +#define _80M_SSC_DIS BIT(7) +#define _80M_SSC_EN_HO BIT(8) +#define PHY_SSC_RSTB BIT(9) +#define SEC_CLK_EN BIT(10) +#define MAC_CLK_EN BIT(11) +#define SYS_CLK_EN BIT(12) +#define RING_CLK_EN BIT(13) + +#define BOOT_FROM_EEPROM BIT(4) +#define EEPROM_EN BIT(5) + +#define AFE_BGEN BIT(0) +#define AFE_MBEN BIT(1) +#define MAC_ID_EN BIT(7) + +#define WLOCK_ALL BIT(0) +#define WLOCK_00 BIT(1) +#define WLOCK_04 BIT(2) +#define WLOCK_08 BIT(3) +#define WLOCK_40 BIT(4) +#define R_DIS_PRST_0 BIT(5) +#define R_DIS_PRST_1 BIT(6) +#define LOCK_ALL_EN BIT(7) + +#define RF_EN BIT(0) +#define RF_RSTB BIT(1) +#define RF_SDMRSTB BIT(2) + +#define LDA15_EN BIT(0) +#define LDA15_STBY BIT(1) +#define LDA15_OBUF BIT(2) +#define LDA15_REG_VOS BIT(3) +#define _LDA15_VOADJ(x) (((x) & 0x7) << 4) + +#define LDV12_EN BIT(0) +#define LDV12_SDBY BIT(1) +#define LPLDO_HSM BIT(2) +#define LPLDO_LSM_DIS BIT(3) +#define _LDV12_VADJ(x) (((x) & 0xF) << 4) + +#define XTAL_EN BIT(0) +#define XTAL_BSEL BIT(1) +#define _XTAL_BOSC(x) (((x) & 0x3) << 2) +#define _XTAL_CADJ(x) (((x) & 0xF) << 4) +#define XTAL_GATE_USB BIT(8) +#define _XTAL_USB_DRV(x) (((x) & 0x3) << 9) +#define XTAL_GATE_AFE BIT(11) +#define _XTAL_AFE_DRV(x) (((x) & 0x3) << 12) +#define XTAL_RF_GATE BIT(14) +#define _XTAL_RF_DRV(x) (((x) & 0x3) << 15) +#define XTAL_GATE_DIG BIT(17) +#define _XTAL_DIG_DRV(x) (((x) & 0x3) << 18) +#define XTAL_BT_GATE BIT(20) +#define _XTAL_BT_DRV(x) (((x) & 0x3) << 21) +#define _XTAL_GPIO(x) (((x) & 0x7) << 23) + +#define CKDLY_AFE BIT(26) +#define CKDLY_USB BIT(27) +#define CKDLY_DIG BIT(28) +#define CKDLY_BT BIT(29) + +#define APLL_EN BIT(0) +#define APLL_320_EN BIT(1) +#define APLL_FREF_SEL BIT(2) +#define APLL_EDGE_SEL BIT(3) +#define APLL_WDOGB BIT(4) +#define APLL_LPFEN BIT(5) + +#define APLL_REF_CLK_13MHZ 0x1 +#define APLL_REF_CLK_19_2MHZ 0x2 +#define APLL_REF_CLK_20MHZ 0x3 +#define APLL_REF_CLK_25MHZ 0x4 +#define APLL_REF_CLK_26MHZ 0x5 +#define APLL_REF_CLK_38_4MHZ 0x6 +#define APLL_REF_CLK_40MHZ 0x7 + +#define APLL_320EN BIT(14) +#define APLL_80EN BIT(15) +#define APLL_1MEN BIT(24) + +#define ALD_EN BIT(18) +#define EF_PD BIT(19) +#define EF_FLAG BIT(31) + +#define EF_TRPT BIT(7) +#define LDOE25_EN BIT(31) + +#define RSM_EN BIT(0) +#define TIMER_EN BIT(4) + +#define TRSW0EN BIT(2) +#define TRSW1EN BIT(3) +#define EROM_EN BIT(4) +#define ENBT BIT(5) +#define ENUART BIT(8) +#define UART_910 BIT(9) +#define ENPMAC BIT(10) +#define SIC_SWRST BIT(11) +#define ENSIC BIT(12) +#define SIC_23 BIT(13) +#define ENHDP BIT(14) +#define SIC_LBK BIT(15) + +#define LED0PL BIT(4) +#define LED1PL BIT(12) +#define LED0DIS BIT(7) + +#define MCUFWDL_EN BIT(0) +#define MCUFWDL_RDY BIT(1) +#define FWDL_CHKSUM_RPT BIT(2) +#define MACINI_RDY BIT(3) +#define BBINI_RDY BIT(4) +#define RFINI_RDY BIT(5) +#define WINTINI_RDY BIT(6) +#define CPRST BIT(23) + +#define XCLK_VLD BIT(0) +#define ACLK_VLD BIT(1) +#define UCLK_VLD BIT(2) +#define PCLK_VLD BIT(3) +#define PCIRSTB BIT(4) +#define V15_VLD BIT(5) +#define TRP_B15V_EN BIT(7) +#define SIC_IDLE BIT(8) +#define BD_MAC2 BIT(9) +#define BD_MAC1 BIT(10) +#define IC_MACPHY_MODE BIT(11) +#define VENDOR_ID BIT(19) +#define PAD_HWPD_IDN BIT(22) +#define TRP_VAUX_EN BIT(23) +#define TRP_BT_EN BIT(24) +#define BD_PKG_SEL BIT(25) +#define BD_HCI_SEL BIT(26) +#define TYPE_ID BIT(27) + +#define CHIP_VER_RTL_MASK 0xF000 +#define CHIP_VER_RTL_SHIFT 12 + +#define REG_LBMODE (REG_CR + 3) + +#define HCI_TXDMA_EN BIT(0) +#define HCI_RXDMA_EN BIT(1) +#define TXDMA_EN BIT(2) +#define RXDMA_EN BIT(3) +#define PROTOCOL_EN BIT(4) +#define SCHEDULE_EN BIT(5) +#define MACTXEN BIT(6) +#define MACRXEN BIT(7) +#define ENSWBCN BIT(8) +#define ENSEC BIT(9) + +#define _NETTYPE(x) (((x) & 0x3) << 16) +#define MASK_NETTYPE 0x30000 +#define NT_NO_LINK 0x0 +#define NT_LINK_AD_HOC 0x1 +#define NT_LINK_AP 0x2 +#define NT_AS_AP 0x3 + +#define _LBMODE(x) (((x) & 0xF) << 24) +#define MASK_LBMODE 0xF000000 +#define LOOPBACK_NORMAL 0x0 +#define LOOPBACK_IMMEDIATELY 0xB +#define LOOPBACK_MAC_DELAY 0x3 +#define LOOPBACK_PHY 0x1 +#define LOOPBACK_DMA 0x7 + +#define GET_RX_PAGE_SIZE(value) ((value) & 0xF) +#define GET_TX_PAGE_SIZE(value) (((value) & 0xF0) >> 4) +#define _PSRX_MASK 0xF +#define _PSTX_MASK 0xF0 +#define _PSRX(x) (x) +#define _PSTX(x) ((x) << 4) + +#define PBP_64 0x0 +#define PBP_128 0x1 +#define PBP_256 0x2 +#define PBP_512 0x3 +#define PBP_1024 0x4 + +#define RXDMA_ARBBW_EN BIT(0) +#define RXSHFT_EN BIT(1) +#define RXDMA_AGG_EN BIT(2) +#define QS_VO_QUEUE BIT(8) +#define QS_VI_QUEUE BIT(9) +#define QS_BE_QUEUE BIT(10) +#define QS_BK_QUEUE BIT(11) +#define QS_MANAGER_QUEUE BIT(12) +#define QS_HIGH_QUEUE BIT(13) + +#define HQSEL_VOQ BIT(0) +#define HQSEL_VIQ BIT(1) +#define HQSEL_BEQ BIT(2) +#define HQSEL_BKQ BIT(3) +#define HQSEL_MGTQ BIT(4) +#define HQSEL_HIQ BIT(5) + +#define _TXDMA_HIQ_MAP(x) (((x)&0x3) << 14) +#define _TXDMA_MGQ_MAP(x) (((x)&0x3) << 12) +#define _TXDMA_BKQ_MAP(x) (((x)&0x3) << 10) +#define _TXDMA_BEQ_MAP(x) (((x)&0x3) << 8) +#define _TXDMA_VIQ_MAP(x) (((x)&0x3) << 6) +#define _TXDMA_VOQ_MAP(x) (((x)&0x3) << 4) + +#define QUEUE_LOW 1 +#define QUEUE_NORMAL 2 +#define QUEUE_HIGH 3 + +#define _LLT_NO_ACTIVE 0x0 +#define _LLT_WRITE_ACCESS 0x1 +#define _LLT_READ_ACCESS 0x2 + +#define _LLT_INIT_DATA(x) ((x) & 0xFF) +#define _LLT_INIT_ADDR(x) (((x) & 0xFF) << 8) +#define _LLT_OP(x) (((x) & 0x3) << 30) +#define _LLT_OP_VALUE(x) (((x) >> 30) & 0x3) + +#define BB_WRITE_READ_MASK (BIT(31) | BIT(30)) +#define BB_WRITE_EN BIT(30) +#define BB_READ_EN BIT(31) + +#define _HPQ(x) ((x) & 0xFF) +#define _LPQ(x) (((x) & 0xFF) << 8) +#define _PUBQ(x) (((x) & 0xFF) << 16) +#define _NPQ(x) ((x) & 0xFF) + +#define HPQ_PUBLIC_DIS BIT(24) +#define LPQ_PUBLIC_DIS BIT(25) +#define LD_RQPN BIT(31) + +#define BCN_VALID BIT(16) +#define BCN_HEAD(x) (((x) & 0xFF) << 8) +#define BCN_HEAD_MASK 0xFF00 + +#define BLK_DESC_NUM_SHIFT 4 +#define BLK_DESC_NUM_MASK 0xF + +#define DROP_DATA_EN BIT(9) + +#define EN_AMPDU_RTY_NEW BIT(7) + +#define _INIRTSMCS_SEL(x) ((x) & 0x3F) + +#define _SPEC_SIFS_CCK(x) ((x) & 0xFF) +#define _SPEC_SIFS_OFDM(x) (((x) & 0xFF) << 8) + +#define RATE_REG_BITMAP_ALL 0xFFFFF + +#define _RRSC_BITMAP(x) ((x) & 0xFFFFF) + +#define _RRSR_RSC(x) (((x) & 0x3) << 21) +#define RRSR_RSC_RESERVED 0x0 +#define RRSR_RSC_UPPER_SUBCHANNEL 0x1 +#define RRSR_RSC_LOWER_SUBCHANNEL 0x2 +#define RRSR_RSC_DUPLICATE_MODE 0x3 + +#define USE_SHORT_G1 BIT(20) + +#define _AGGLMT_MCS0(x) ((x) & 0xF) +#define _AGGLMT_MCS1(x) (((x) & 0xF) << 4) +#define _AGGLMT_MCS2(x) (((x) & 0xF) << 8) +#define _AGGLMT_MCS3(x) (((x) & 0xF) << 12) +#define _AGGLMT_MCS4(x) (((x) & 0xF) << 16) +#define _AGGLMT_MCS5(x) (((x) & 0xF) << 20) +#define _AGGLMT_MCS6(x) (((x) & 0xF) << 24) +#define _AGGLMT_MCS7(x) (((x) & 0xF) << 28) + +#define RETRY_LIMIT_SHORT_SHIFT 8 +#define RETRY_LIMIT_LONG_SHIFT 0 + +#define _DARF_RC1(x) ((x) & 0x1F) +#define _DARF_RC2(x) (((x) & 0x1F) << 8) +#define _DARF_RC3(x) (((x) & 0x1F) << 16) +#define _DARF_RC4(x) (((x) & 0x1F) << 24) +#define _DARF_RC5(x) ((x) & 0x1F) +#define _DARF_RC6(x) (((x) & 0x1F) << 8) +#define _DARF_RC7(x) (((x) & 0x1F) << 16) +#define _DARF_RC8(x) (((x) & 0x1F) << 24) + +#define _RARF_RC1(x) ((x) & 0x1F) +#define _RARF_RC2(x) (((x) & 0x1F) << 8) +#define _RARF_RC3(x) (((x) & 0x1F) << 16) +#define _RARF_RC4(x) (((x) & 0x1F) << 24) +#define _RARF_RC5(x) ((x) & 0x1F) +#define _RARF_RC6(x) (((x) & 0x1F) << 8) +#define _RARF_RC7(x) (((x) & 0x1F) << 16) +#define _RARF_RC8(x) (((x) & 0x1F) << 24) + +#define AC_PARAM_TXOP_LIMIT_OFFSET 16 +#define AC_PARAM_ECW_MAX_OFFSET 12 +#define AC_PARAM_ECW_MIN_OFFSET 8 +#define AC_PARAM_AIFS_OFFSET 0 + +#define _AIFS(x) (x) +#define _ECW_MAX_MIN(x) ((x) << 8) +#define _TXOP_LIMIT(x) ((x) << 16) + +#define _BCNIFS(x) ((x) & 0xFF) +#define _BCNECW(x) ((((x) & 0xF)) << 8) + +#define _LRL(x) ((x) & 0x3F) +#define _SRL(x) (((x) & 0x3F) << 8) + +#define _SIFS_CCK_CTX(x) ((x) & 0xFF) +#define _SIFS_CCK_TRX(x) (((x) & 0xFF) << 8) + +#define _SIFS_OFDM_CTX(x) ((x) & 0xFF) +#define _SIFS_OFDM_TRX(x) (((x) & 0xFF) << 8) + +#define _TBTT_PROHIBIT_HOLD(x) (((x) & 0xFF) << 8) + +#define DIS_EDCA_CNT_DWN BIT(11) + +#define EN_MBSSID BIT(1) +#define EN_TXBCN_RPT BIT(2) +#define EN_BCN_FUNCTION BIT(3) + +#define TSFTR_RST BIT(0) +#define TSFTR1_RST BIT(1) + +#define STOP_BCNQ BIT(6) + +#define DIS_TSF_UDT0_NORMAL_CHIP BIT(4) +#define DIS_TSF_UDT0_TEST_CHIP BIT(5) + +#define ACMHW_HWEN BIT(0) +#define ACMHW_BEQEN BIT(1) +#define ACMHW_VIQEN BIT(2) +#define ACMHW_VOQEN BIT(3) +#define ACMHW_BEQSTATUS BIT(4) +#define ACMHW_VIQSTATUS BIT(5) +#define ACMHW_VOQSTATUS BIT(6) + +#define APSDOFF BIT(6) +#define APSDOFF_STATUS BIT(7) + +#define BW_20MHZ BIT(2) + +#define RATE_BITMAP_ALL 0xFFFFF + +#define RATE_RRSR_CCK_ONLY_1M 0xFFFF1 + +#define TSFRST BIT(0) +#define DIS_GCLK BIT(1) +#define PAD_SEL BIT(2) +#define PWR_ST BIT(6) +#define PWRBIT_OW_EN BIT(7) +#define ACRC BIT(8) +#define CFENDFORM BIT(9) +#define ICV BIT(10) + +#define AAP BIT(0) +#define APM BIT(1) +#define AM BIT(2) +#define AB BIT(3) +#define ADD3 BIT(4) +#define APWRMGT BIT(5) +#define CBSSID BIT(6) +#define CBSSID_DATA BIT(6) +#define CBSSID_BCN BIT(7) +#define ACRC32 BIT(8) +#define AICV BIT(9) +#define ADF BIT(11) +#define ACF BIT(12) +#define AMF BIT(13) +#define HTC_LOC_CTRL BIT(14) +#define UC_DATA_EN BIT(16) +#define BM_DATA_EN BIT(17) +#define MFBEN BIT(22) +#define LSIGEN BIT(23) +#define ENMBID BIT(24) +#define APP_BASSN BIT(27) +#define APP_PHYSTS BIT(28) +#define APP_ICV BIT(29) +#define APP_MIC BIT(30) +#define APP_FCS BIT(31) + +#define _MIN_SPACE(x) ((x) & 0x7) +#define _SHORT_GI_PADDING(x) (((x) & 0x1F) << 3) + +#define RXERR_TYPE_OFDM_PPDU 0 +#define RXERR_TYPE_OFDM_FALSE_ALARM 1 +#define RXERR_TYPE_OFDM_MPDU_OK 2 +#define RXERR_TYPE_OFDM_MPDU_FAIL 3 +#define RXERR_TYPE_CCK_PPDU 4 +#define RXERR_TYPE_CCK_FALSE_ALARM 5 +#define RXERR_TYPE_CCK_MPDU_OK 6 +#define RXERR_TYPE_CCK_MPDU_FAIL 7 +#define RXERR_TYPE_HT_PPDU 8 +#define RXERR_TYPE_HT_FALSE_ALARM 9 +#define RXERR_TYPE_HT_MPDU_TOTAL 10 +#define RXERR_TYPE_HT_MPDU_OK 11 +#define RXERR_TYPE_HT_MPDU_FAIL 12 +#define RXERR_TYPE_RX_FULL_DROP 15 + +#define RXERR_COUNTER_MASK 0xFFFFF +#define RXERR_RPT_RST BIT(27) +#define _RXERR_RPT_SEL(type) ((type) << 28) + +#define SCR_TXUSEDK BIT(0) +#define SCR_RXUSEDK BIT(1) +#define SCR_TXENCENABLE BIT(2) +#define SCR_RXDECENABLE BIT(3) +#define SCR_SKBYA2 BIT(4) +#define SCR_NOSKMC BIT(5) +#define SCR_TXBCUSEDK BIT(6) +#define SCR_RXBCUSEDK BIT(7) + +#define XCLK_VLD BIT(0) +#define ACLK_VLD BIT(1) +#define UCLK_VLD BIT(2) +#define PCLK_VLD BIT(3) +#define PCIRSTB BIT(4) +#define V15_VLD BIT(5) +#define TRP_B15V_EN BIT(7) +#define SIC_IDLE BIT(8) +#define BD_MAC2 BIT(9) +#define BD_MAC1 BIT(10) +#define IC_MACPHY_MODE BIT(11) +#define BT_FUNC BIT(16) +#define VENDOR_ID BIT(19) +#define PAD_HWPD_IDN BIT(22) +#define TRP_VAUX_EN BIT(23) +#define TRP_BT_EN BIT(24) +#define BD_PKG_SEL BIT(25) +#define BD_HCI_SEL BIT(26) +#define TYPE_ID BIT(27) + +#define USB_IS_HIGH_SPEED 0 +#define USB_IS_FULL_SPEED 1 +#define USB_SPEED_MASK BIT(5) + +#define USB_NORMAL_SIE_EP_MASK 0xF +#define USB_NORMAL_SIE_EP_SHIFT 4 + +#define USB_TEST_EP_MASK 0x30 +#define USB_TEST_EP_SHIFT 4 + +#define USB_AGG_EN BIT(3) + +#define MAC_ADDR_LEN 6 +#define LAST_ENTRY_OF_TX_PKT_BUFFER 255 + +#define POLLING_LLT_THRESHOLD 20 +#define POLLING_READY_TIMEOUT_COUNT 3000 + +#define MAX_MSS_DENSITY_2T 0x13 +#define MAX_MSS_DENSITY_1T 0x0A + +#define EPROM_CMD_OPERATING_MODE_MASK ((1<<7)|(1<<6)) +#define EPROM_CMD_CONFIG 0x3 +#define EPROM_CMD_LOAD 1 + +#define HWSET_MAX_SIZE_92S HWSET_MAX_SIZE + +#define HAL_8192C_HW_GPIO_WPS_BIT BIT(2) + +#define RA_LSSIWRITE_8821A 0xc90 +#define RB_LSSIWRITE_8821A 0xe90 + +#define RA_PIREAD_8821A 0xd04 +#define RB_PIREAD_8821A 0xd44 +#define RA_SIREAD_8821A 0xd08 +#define RB_SIREAD_8821A 0xd48 + +#define RPMAC_RESET 0x100 +#define RPMAC_TXSTART 0x104 +#define RPMAC_TXLEGACYSIG 0x108 +#define RPMAC_TXHTSIG1 0x10c +#define RPMAC_TXHTSIG2 0x110 +#define RPMAC_PHYDEBUG 0x114 +#define RPMAC_TXPACKETNUM 0x118 +#define RPMAC_TXIDLE 0x11c +#define RPMAC_TXMACHEADER0 0x120 +#define RPMAC_TXMACHEADER1 0x124 +#define RPMAC_TXMACHEADER2 0x128 +#define RPMAC_TXMACHEADER3 0x12c +#define RPMAC_TXMACHEADER4 0x130 +#define RPMAC_TXMACHEADER5 0x134 +#define RPMAC_TXDADATYPE 0x138 +#define RPMAC_TXRANDOMSEED 0x13c +#define RPMAC_CCKPLCPPREAMBLE 0x140 +#define RPMAC_CCKPLCPHEADER 0x144 +#define RPMAC_CCKCRC16 0x148 +#define RPMAC_OFDMRXCRC32OK 0x170 +#define RPMAC_OFDMRXCRC32ER 0x174 +#define RPMAC_OFDMRXPARITYER 0x178 +#define RPMAC_OFDMRXCRC8ER 0x17c +#define RPMAC_CCKCRXRC16ER 0x180 +#define RPMAC_CCKCRXRC32ER 0x184 +#define RPMAC_CCKCRXRC32OK 0x188 +#define RPMAC_TXSTATUS 0x18c + +#define RFPGA0_RFMOD 0x800 + +#define RFPGA0_TXINFO 0x804 +#define RFPGA0_PSDFUNCTION 0x808 + +#define RFPGA0_TXGAINSTAGE 0x80c + +#define RFPGA0_RFTIMING1 0x810 +#define RFPGA0_RFTIMING2 0x814 + +#define RFPGA0_XA_HSSIPARAMETER1 0x820 +#define RFPGA0_XA_HSSIPARAMETER2 0x824 +#define RFPGA0_XB_HSSIPARAMETER1 0x828 +#define RFPGA0_XB_HSSIPARAMETER2 0x82c +#define RCCAONSEC 0x838 + +#define RFPGA0_XA_LSSIPARAMETER 0x840 +#define RFPGA0_XB_LSSIPARAMETER 0x844 +#define RL1PEAKTH 0x848 + +#define RFPGA0_RFWAKEUPPARAMETER 0x850 +#define RFPGA0_RFSLEEPUPPARAMETER 0x854 + +#define RFPGA0_XAB_SWITCHCONTROL 0x858 +#define RFPGA0_XCD_SWITCHCONTROL 0x85c + +#define RFPGA0_XA_RFINTERFACEOE 0x860 +#define RFC_AREA 0x860 +#define RFPGA0_XB_RFINTERFACEOE 0x864 + +#define RFPGA0_XAB_RFINTERFACESW 0x870 +#define RFPGA0_XCD_RFINTERFACESW 0x874 + +#define RFPGA0_XAB_RFPARAMETER 0x878 +#define RFPGA0_XCD_RFPARAMETER 0x87c + +#define RFPGA0_ANALOGPARAMETER1 0x880 +#define RFPGA0_ANALOGPARAMETER2 0x884 +#define RFPGA0_ANALOGPARAMETER3 0x888 +#define RFPGA0_ANALOGPARAMETER4 0x88c + +#define RFPGA0_XA_LSSIREADBACK 0x8a0 +#define RFPGA0_XB_LSSIREADBACK 0x8a4 +#define RFPGA0_XC_LSSIREADBACK 0x8a8 +#define RRFMOD 0x8ac +#define RHSSIREAD_8821AE 0x8b0 + +#define RFPGA0_PSDREPORT 0x8b4 +#define TRANSCEIVEA_HSPI_READBACK 0x8b8 +#define TRANSCEIVEB_HSPI_READBACK 0x8bc +#define RADC_BUF_CLK 0x8c4 +#define RFPGA0_XAB_RFINTERFACERB 0x8e0 +#define RFPGA0_XCD_RFINTERFACERB 0x8e4 + +#define RFPGA1_RFMOD 0x900 + +#define RFPGA1_TXBLOCK 0x904 +#define RFPGA1_DEBUGSELECT 0x908 +#define RFPGA1_TXINFO 0x90c + +#define RCCK_SYSTEM 0xa00 +#define BCCK_SYSTEM 0x10 + +#define RCCK0_AFESETTING 0xa04 +#define RCCK0_CCA 0xa08 + +#define RCCK0_RXAGC1 0xa0c +#define RCCK0_RXAGC2 0xa10 + +#define RCCK0_RXHP 0xa14 + +#define RCCK0_DSPPARAMETER1 0xa18 +#define RCCK0_DSPPARAMETER2 0xa1c + +#define RCCK0_TXFILTER1 0xa20 +#define RCCK0_TXFILTER2 0xa24 +#define RCCK0_DEBUGPORT 0xa28 +#define RCCK0_FALSEALARMREPORT 0xa2c +#define RCCK0_TRSSIREPORT 0xa50 +#define RCCK0_RXREPORT 0xa54 +#define RCCK0_FACOUNTERLOWER 0xa5c +#define RCCK0_FACOUNTERUPPER 0xa58 +#define RCCK0_CCA_CNT 0xa60 + +/* PageB(0xB00) */ +#define RPDP_ANTA 0xb00 +#define RPDP_ANTA_4 0xb04 +#define RPDP_ANTA_8 0xb08 +#define RPDP_ANTA_C 0xb0c +#define RPDP_ANTA_10 0xb10 +#define RPDP_ANTA_14 0xb14 +#define RPDP_ANTA_18 0xb18 +#define RPDP_ANTA_1C 0xb1c +#define RPDP_ANTA_20 0xb20 +#define RPDP_ANTA_24 0xb24 + +#define RCONFIG_PMPD_ANTA 0xb28 +#define RCONFIG_RAM64x16 0xb2c + +#define RBNDA 0xb30 +#define RHSSIPAR 0xb34 + +#define RCONFIG_ANTA 0xb68 +#define RCONFIG_ANTB 0xb6c + +#define RPDP_ANTB 0xb70 +#define RPDP_ANTB_4 0xb74 +#define RPDP_ANTB_8 0xb78 +#define RPDP_ANTB_C 0xb7c +#define RPDP_ANTB_10 0xb80 +#define RPDP_ANTB_14 0xb84 +#define RPDP_ANTB_18 0xb88 +#define RPDP_ANTB_1C 0xb8c +#define RPDP_ANTB_20 0xb90 +#define RPDP_ANTB_24 0xb94 + +#define RCONFIG_PMPD_ANTB 0xb98 + +#define RBNDB 0xba0 + +#define RAPK 0xbd8 +#define RPM_RX0_ANTA 0xbdc +#define RPM_RX1_ANTA 0xbe0 +#define RPM_RX2_ANTA 0xbe4 +#define RPM_RX3_ANTA 0xbe8 +#define RPM_RX0_ANTB 0xbec +#define RPM_RX1_ANTB 0xbf0 +#define RPM_RX2_ANTB 0xbf4 +#define RPM_RX3_ANTB 0xbf8 + +/*RSSI Dump*/ +#define RA_RSSI_DUMP 0xBF0 +#define RB_RSSI_DUMP 0xBF1 +#define RS1_RX_EVM_DUMP 0xBF4 +#define RS2_RX_EVM_DUMP 0xBF5 +#define RA_RX_SNR_DUMP 0xBF6 +#define RB_RX_SNR_DUMP 0xBF7 +#define RA_CFO_SHORT_DUMP 0xBF8 +#define RB_CFO_SHORT_DUMP 0xBFA +#define RA_CFO_LONG_DUMP 0xBEC +#define RB_CFO_LONG_DUMP 0xBEE + +/*Page C*/ +#define ROFDM0_LSTF 0xc00 + +#define ROFDM0_TRXPATHENABLE 0xc04 +#define ROFDM0_TRMUXPAR 0xc08 +#define ROFDM0_TRSWISOLATION 0xc0c + +#define ROFDM0_XARXAFE 0xc10 +#define ROFDM0_XARXIQIMBALANCE 0xc14 +#define ROFDM0_XBRXAFE 0xc18 +#define ROFDM0_XBRXIQIMBALANCE 0xc1c +#define ROFDM0_XCRXAFE 0xc20 +#define ROFDM0_XCRXIQIMBANLANCE 0xc24 +#define ROFDM0_XDRXAFE 0xc28 +#define ROFDM0_XDRXIQIMBALANCE 0xc2c + +#define ROFDM0_RXDETECTOR1 0xc30 +#define ROFDM0_RXDETECTOR2 0xc34 +#define ROFDM0_RXDETECTOR3 0xc38 +#define ROFDM0_RXDETECTOR4 0xc3c + +#define ROFDM0_RXDSP 0xc40 +#define ROFDM0_CFOANDDAGC 0xc44 +#define ROFDM0_CCADROPTHRESHOLD 0xc48 +#define ROFDM0_ECCATHRESHOLD 0xc4c + +#define ROFDM0_XAAGCCORE1 0xc50 +#define ROFDM0_XAAGCCORE2 0xc54 +#define ROFDM0_XBAGCCORE1 0xc58 +#define ROFDM0_XBAGCCORE2 0xc5c +#define ROFDM0_XCAGCCORE1 0xc60 +#define ROFDM0_XCAGCCORE2 0xc64 +#define ROFDM0_XDAGCCORE1 0xc68 +#define ROFDM0_XDAGCCORE2 0xc6c + +#define ROFDM0_AGCPARAMETER1 0xc70 +#define ROFDM0_AGCPARAMETER2 0xc74 +#define ROFDM0_AGCRSSITABLE 0xc78 +#define ROFDM0_HTSTFAGC 0xc7c + +#define ROFDM0_XATXIQIMBALANCE 0xc80 +#define ROFDM0_XATXAFE 0xc84 +#define ROFDM0_XBTXIQIMBALANCE 0xc88 +#define ROFDM0_XBTXAFE 0xc8c +#define ROFDM0_XCTXIQIMBALANCE 0xc90 +#define ROFDM0_XCTXAFE 0xc94 +#define ROFDM0_XDTXIQIMBALANCE 0xc98 +#define ROFDM0_XDTXAFE 0xc9c + +#define ROFDM0_RXIQEXTANTA 0xca0 +#define ROFDM0_TXCOEFF1 0xca4 +#define ROFDM0_TXCOEFF2 0xca8 +#define ROFDM0_TXCOEFF3 0xcac +#define ROFDM0_TXCOEFF4 0xcb0 +#define ROFDM0_TXCOEFF5 0xcb4 +#define ROFDM0_TXCOEFF6 0xcb8 + +/*Path_A RFE cotrol */ +#define RA_RFE_CTRL_8812 0xcb8 +/*Path_B RFE control*/ +#define RB_RFE_CTRL_8812 0xeb8 + +#define ROFDM0_RXHPPARAMETER 0xce0 +#define ROFDM0_TXPSEUDONOISEWGT 0xce4 +#define ROFDM0_FRAMESYNC 0xcf0 +#define ROFDM0_DFSREPORT 0xcf4 + +#define ROFDM1_LSTF 0xd00 +#define ROFDM1_TRXPATHENABLE 0xd04 + +#define ROFDM1_CF0 0xd08 +#define ROFDM1_CSI1 0xd10 +#define ROFDM1_SBD 0xd14 +#define ROFDM1_CSI2 0xd18 +#define ROFDM1_CFOTRACKING 0xd2c +#define ROFDM1_TRXMESAURE1 0xd34 +#define ROFDM1_INTFDET 0xd3c +#define ROFDM1_PSEUDONOISESTATEAB 0xd50 +#define ROFDM1_PSEUDONOISESTATECD 0xd54 +#define ROFDM1_RXPSEUDONOISEWGT 0xd58 + +#define ROFDM_PHYCOUNTER1 0xda0 +#define ROFDM_PHYCOUNTER2 0xda4 +#define ROFDM_PHYCOUNTER3 0xda8 + +#define ROFDM_SHORTCFOAB 0xdac +#define ROFDM_SHORTCFOCD 0xdb0 +#define ROFDM_LONGCFOAB 0xdb4 +#define ROFDM_LONGCFOCD 0xdb8 +#define ROFDM_TAILCF0AB 0xdbc +#define ROFDM_TAILCF0CD 0xdc0 +#define ROFDM_PWMEASURE1 0xdc4 +#define ROFDM_PWMEASURE2 0xdc8 +#define ROFDM_BWREPORT 0xdcc +#define ROFDM_AGCREPORT 0xdd0 +#define ROFDM_RXSNR 0xdd4 +#define ROFDM_RXEVMCSI 0xdd8 +#define ROFDM_SIGREPORT 0xddc + +#define RTXAGC_A_CCK11_CCK1 0xc20 +#define RTXAGC_A_OFDM18_OFDM6 0xc24 +#define RTXAGC_A_OFDM54_OFDM24 0xc28 +#define RTXAGC_A_MCS03_MCS00 0xc2c +#define RTXAGC_A_MCS07_MCS04 0xc30 +#define RTXAGC_A_MCS11_MCS08 0xc34 +#define RTXAGC_A_MCS15_MCS12 0xc38 +#define RTXAGC_A_NSS1INDEX3_NSS1INDEX0 0xc3c +#define RTXAGC_A_NSS1INDEX7_NSS1INDEX4 0xc40 +#define RTXAGC_A_NSS2INDEX1_NSS1INDEX8 0xc44 +#define RTXAGC_A_NSS2INDEX5_NSS2INDEX2 0xc48 +#define RTXAGC_A_NSS2INDEX9_NSS2INDEX6 0xc4c +#define RTXAGC_B_CCK11_CCK1 0xe20 +#define RTXAGC_B_OFDM18_OFDM6 0xe24 +#define RTXAGC_B_OFDM54_OFDM24 0xe28 +#define RTXAGC_B_MCS03_MCS00 0xe2c +#define RTXAGC_B_MCS07_MCS04 0xe30 +#define RTXAGC_B_MCS11_MCS08 0xe34 +#define RTXAGC_B_MCS15_MCS12 0xe38 +#define RTXAGC_B_NSS1INDEX3_NSS1INDEX0 0xe3c +#define RTXAGC_B_NSS1INDEX7_NSS1INDEX4 0xe40 +#define RTXAGC_B_NSS2INDEX1_NSS1INDEX8 0xe44 +#define RTXAGC_B_NSS2INDEX5_NSS2INDEX2 0xe48 +#define RTXAGC_B_NSS2INDEX9_NSS2INDEX6 0xe4c + +#define RA_TXPWRTRAING 0xc54 +#define RB_TXPWRTRAING 0xe54 + +#define RFPGA0_IQK 0xe28 +#define RTX_IQK_TONE_A 0xe30 +#define RRX_IQK_TONE_A 0xe34 +#define RTX_IQK_PI_A 0xe38 +#define RRX_IQK_PI_A 0xe3c + +#define RTX_IQK 0xe40 +#define RRX_IQK 0xe44 +#define RIQK_AGC_PTS 0xe48 +#define RIQK_AGC_RSP 0xe4c +#define RTX_IQK_TONE_B 0xe50 +#define RRX_IQK_TONE_B 0xe54 +#define RTX_IQK_PI_B 0xe58 +#define RRX_IQK_PI_B 0xe5c +#define RIQK_AGC_CONT 0xe60 + +#define RBLUE_TOOTH 0xe6c +#define RRX_WAIT_CCA 0xe70 +#define RTX_CCK_RFON 0xe74 +#define RTX_CCK_BBON 0xe78 +#define RTX_OFDM_RFON 0xe7c +#define RTX_OFDM_BBON 0xe80 +#define RTX_TO_RX 0xe84 +#define RTX_TO_TX 0xe88 +#define RRX_CCK 0xe8c + +#define RTX_POWER_BEFORE_IQK_A 0xe94 +#define RTX_POWER_AFTER_IQK_A 0xe9c + +#define RRX_POWER_BEFORE_IQK_A 0xea0 +#define RRX_POWER_BEFORE_IQK_A_2 0xea4 +#define RRX_POWER_AFTER_IQK_A 0xea8 +#define RRX_POWER_AFTER_IQK_A_2 0xeac + +#define RTX_POWER_BEFORE_IQK_B 0xeb4 +#define RTX_POWER_AFTER_IQK_B 0xebc + +#define RRX_POER_BEFORE_IQK_B 0xec0 +#define RRX_POER_BEFORE_IQK_B_2 0xec4 +#define RRX_POWER_AFTER_IQK_B 0xec8 +#define RRX_POWER_AFTER_IQK_B_2 0xecc + +#define RRX_OFDM 0xed0 +#define RRX_WAIT_RIFS 0xed4 +#define RRX_TO_RX 0xed8 +#define RSTANDBY 0xedc +#define RSLEEP 0xee0 +#define RPMPD_ANAEN 0xeec + +#define RZEBRA1_HSSIENABLE 0x0 +#define RZEBRA1_TRXENABLE1 0x1 +#define RZEBRA1_TRXENABLE2 0x2 +#define RZEBRA1_AGC 0x4 +#define RZEBRA1_CHARGEPUMP 0x5 +#define RZEBRA1_CHANNEL 0x7 + +#define RZEBRA1_TXGAIN 0x8 +#define RZEBRA1_TXLPF 0x9 +#define RZEBRA1_RXLPF 0xb +#define RZEBRA1_RXHPFCORNER 0xc + +#define RGLOBALCTRL 0 +#define RRTL8256_TXLPF 19 +#define RRTL8256_RXLPF 11 +#define RRTL8258_TXLPF 0x11 +#define RRTL8258_RXLPF 0x13 +#define RRTL8258_RSSILPF 0xa + +#define RF_AC 0x00 + +#define RF_IQADJ_G1 0x01 +#define RF_IQADJ_G2 0x02 +#define RF_POW_TRSW 0x05 + +#define RF_GAIN_RX 0x06 +#define RF_GAIN_TX 0x07 + +#define RF_TXM_IDAC 0x08 +#define RF_BS_IQGEN 0x0F + +#define RF_MODE1 0x10 +#define RF_MODE2 0x11 + +#define RF_RX_AGC_HP 0x12 +#define RF_TX_AGC 0x13 +#define RF_BIAS 0x14 +#define RF_IPA 0x15 +#define RF_POW_ABILITY 0x17 +#define RF_MODE_AG 0x18 +#define RRFCHANNEL 0x18 +#define RF_CHNLBW 0x18 +#define RF_TOP 0x19 + +#define RF_RX_G1 0x1A +#define RF_RX_G2 0x1B + +#define RF_RX_BB2 0x1C +#define RF_RX_BB1 0x1D + +#define RF_RCK1 0x1E +#define RF_RCK2 0x1F + +#define RF_TX_G1 0x20 +#define RF_TX_G2 0x21 +#define RF_TX_G3 0x22 + +#define RF_TX_BB1 0x23 +#define RF_T_METER 0x24 +#define RF_T_METER_88E 0x42 +#define RF_T_METER_8812A 0x42 + +#define RF_SYN_G1 0x25 +#define RF_SYN_G2 0x26 +#define RF_SYN_G3 0x27 +#define RF_SYN_G4 0x28 +#define RF_SYN_G5 0x29 +#define RF_SYN_G6 0x2A +#define RF_SYN_G7 0x2B +#define RF_SYN_G8 0x2C + +#define RF_RCK_OS 0x30 +#define RF_TXPA_G1 0x31 +#define RF_TXPA_G2 0x32 +#define RF_TXPA_G3 0x33 + +#define RF_TX_BIAS_A 0x35 +#define RF_TX_BIAS_D 0x36 +#define RF_LOBF_9 0x38 +#define RF_RXRF_A3 0x3C +#define RF_TRSW 0x3F + +#define RF_TXRF_A2 0x41 +#define RF_TXPA_G4 0x46 +#define RF_TXPA_A4 0x4B + +#define RF_APK 0x63 + +#define RF_WE_LUT 0xEF + +#define BBBRESETB 0x100 +#define BGLOBALRESETB 0x200 +#define BOFDMTXSTART 0x4 +#define BCCKTXSTART 0x8 +#define BCRC32DEBUG 0x100 +#define BPMACLOOPBACK 0x10 +#define BTXLSIG 0xffffff +#define BOFDMTXRATE 0xf +#define BOFDMTXRESERVED 0x10 +#define BOFDMTXLENGTH 0x1ffe0 +#define BOFDMTXPARITY 0x20000 +#define BTXHTSIG1 0xffffff +#define BTXHTMCSRATE 0x7f +#define BTXHTBW 0x80 +#define BTXHTLENGTH 0xffff00 +#define BTXHTSIG2 0xffffff +#define BTXHTSMOOTHING 0x1 +#define BTXHTSOUNDING 0x2 +#define BTXHTRESERVED 0x4 +#define BTXHTAGGREATION 0x8 +#define BTXHTSTBC 0x30 +#define BTXHTADVANCECODING 0x40 +#define BTXHTSHORTGI 0x80 +#define BTXHTNUMBERHT_LTF 0x300 +#define BTXHTCRC8 0x3fc00 +#define BCOUNTERRESET 0x10000 +#define BNUMOFOFDMTX 0xffff +#define BNUMOFCCKTX 0xffff0000 +#define BTXIDLEINTERVAL 0xffff +#define BOFDMSERVICE 0xffff0000 +#define BTXMACHEADER 0xffffffff +#define BTXDATAINIT 0xff +#define BTXHTMODE 0x100 +#define BTXDATATYPE 0x30000 +#define BTXRANDOMSEED 0xffffffff +#define BCCKTXPREAMBLE 0x1 +#define BCCKTXSFD 0xffff0000 +#define BCCKTXSIG 0xff +#define BCCKTXSERVICE 0xff00 +#define BCCKLENGTHEXT 0x8000 +#define BCCKTXLENGHT 0xffff0000 +#define BCCKTXCRC16 0xffff +#define BCCKTXSTATUS 0x1 +#define BOFDMTXSTATUS 0x2 +#define IS_BB_REG_OFFSET_92S(__offset) \ + ((__offset >= 0x800) && (__offset <= 0xfff)) + +#define BRFMOD 0x1 +#define BJAPANMODE 0x2 +#define BCCKTXSC 0x30 +/* Block & Path enable*/ +#define ROFDMCCKEN 0x808 +#define BCCKEN 0x10000000 +#define BOFDMEN 0x20000000 +/* Rx antenna*/ +#define RRXPATH 0x808 +#define BRXPATH 0xff +/* Tx antenna*/ +#define RTXPATH 0x80c +#define BTXPATH 0x0fffffff +/* for cck rx path selection*/ +#define RCCK_RX 0xa04 +#define BCCK_RX 0x0c000000 +/* Use LSIG for VHT length*/ +#define RVHTLEN_USE_LSIG 0x8c3 + +#define BOFDMRXADCPHASE 0x10000 +#define BOFDMTXDACPHASE 0x40000 +#define BXATXAGC 0x3f + +#define BXBTXAGC 0xf00 +#define BXCTXAGC 0xf000 +#define BXDTXAGC 0xf0000 + +#define BPASTART 0xf0000000 +#define BTRSTART 0x00f00000 +#define BRFSTART 0x0000f000 +#define BBBSTART 0x000000f0 +#define BBBCCKSTART 0x0000000f +#define BPAEND 0xf +#define BTREND 0x0f000000 +#define BRFEND 0x000f0000 +#define BCCAMASK 0x000000f0 +#define BR2RCCAMASK 0x00000f00 +#define BHSSI_R2TDELAY 0xf8000000 +#define BHSSI_T2RDELAY 0xf80000 +#define BCONTXHSSI 0x400 +#define BIGFROMCCK 0x200 +#define BAGCADDRESS 0x3f +#define BRXHPTX 0x7000 +#define BRXHP2RX 0x38000 +#define BRXHPCCKINI 0xc0000 +#define BAGCTXCODE 0xc00000 +#define BAGCRXCODE 0x300000 + +#define B3WIREDATALENGTH 0x800 +#define B3WIREADDREAALENGTH 0x400 + +#define B3WIRERFPOWERDOWN 0x1 +#define B5GPAPEPOLARITY 0x40000000 +#define B2GPAPEPOLARITY 0x80000000 +#define BRFSW_TXDEFAULTANT 0x3 +#define BRFSW_TXOPTIONANT 0x30 +#define BRFSW_RXDEFAULTANT 0x300 +#define BRFSW_RXOPTIONANT 0x3000 +#define BRFSI_3WIREDATA 0x1 +#define BRFSI_3WIRECLOCK 0x2 +#define BRFSI_3WIRELOAD 0x4 +#define BRFSI_3WIRERW 0x8 +#define BRFSI_3WIRE 0xf + +#define BRFSI_RFENV 0x10 + +#define BRFSI_TRSW 0x20 +#define BRFSI_TRSWB 0x40 +#define BRFSI_ANTSW 0x100 +#define BRFSI_ANTSWB 0x200 +#define BRFSI_PAPE 0x400 +#define BRFSI_PAPE5G 0x800 +#define BBANDSELECT 0x1 +#define BHTSIG2_GI 0x80 +#define BHTSIG2_SMOOTHING 0x01 +#define BHTSIG2_SOUNDING 0x02 +#define BHTSIG2_AGGREATON 0x08 +#define BHTSIG2_STBC 0x30 +#define BHTSIG2_ADVCODING 0x40 +#define BHTSIG2_NUMOFHTLTF 0x300 +#define BHTSIG2_CRC8 0x3fc +#define BHTSIG1_MCS 0x7f +#define BHTSIG1_BANDWIDTH 0x80 +#define BHTSIG1_HTLENGTH 0xffff +#define BLSIG_RATE 0xf +#define BLSIG_RESERVED 0x10 +#define BLSIG_LENGTH 0x1fffe +#define BLSIG_PARITY 0x20 +#define BCCKRXPHASE 0x4 + +#define BLSSIREADADDRESS 0x7f800000 +#define BLSSIREADEDGE 0x80000000 + +#define BLSSIREADBACKDATA 0xfffff + +#define BLSSIREADOKFLAG 0x1000 +#define BCCKSAMPLERATE 0x8 +#define BREGULATOR0STANDBY 0x1 +#define BREGULATORPLLSTANDBY 0x2 +#define BREGULATOR1STANDBY 0x4 +#define BPLLPOWERUP 0x8 +#define BDPLLPOWERUP 0x10 +#define BDA10POWERUP 0x20 +#define BAD7POWERUP 0x200 +#define BDA6POWERUP 0x2000 +#define BXTALPOWERUP 0x4000 +#define B40MDCLKPOWERUP 0x8000 +#define BDA6DEBUGMODE 0x20000 +#define BDA6SWING 0x380000 + +#define BADCLKPHASE 0x4000000 +#define B80MCLKDELAY 0x18000000 +#define BAFEWATCHDOGENABLE 0x20000000 + +#define BXTALCAP01 0xc0000000 +#define BXTALCAP23 0x3 +#define BXTALCAP92X 0x0f000000 +#define BXTALCAP 0x0f000000 + +#define BINTDIFCLKENABLE 0x400 +#define BEXTSIGCLKENABLE 0x800 +#define BBANDGAP_MBIAS_POWERUP 0x10000 +#define BAD11SH_GAIN 0xc0000 +#define BAD11NPUT_RANGE 0x700000 +#define BAD110P_CURRENT 0x3800000 +#define BLPATH_LOOPBACK 0x4000000 +#define BQPATH_LOOPBACK 0x8000000 +#define BAFE_LOOPBACK 0x10000000 +#define BDA10_SWING 0x7e0 +#define BDA10_REVERSE 0x800 +#define BDA_CLK_SOURCE 0x1000 +#define BDA7INPUT_RANGE 0x6000 +#define BDA7_GAIN 0x38000 +#define BDA7OUTPUT_CM_MODE 0x40000 +#define BDA7INPUT_CM_MODE 0x380000 +#define BDA7CURRENT 0xc00000 +#define BREGULATOR_ADJUST 0x7000000 +#define BAD11POWERUP_ATTX 0x1 +#define BDA10PS_ATTX 0x10 +#define BAD11POWERUP_ATRX 0x100 +#define BDA10PS_ATRX 0x1000 +#define BCCKRX_AGC_FORMAT 0x200 +#define BPSDFFT_SAMPLE_POINT 0xc000 +#define BPSD_AVERAGE_NUM 0x3000 +#define BIQPATH_CONTROL 0xc00 +#define BPSD_FREQ 0x3ff +#define BPSD_ANTENNA_PATH 0x30 +#define BPSD_IQ_SWITCH 0x40 +#define BPSD_RX_TRIGGER 0x400000 +#define BPSD_TX_TRIGGER 0x80000000 +#define BPSD_SINE_TONE_SCALE 0x7f000000 +#define BPSD_REPORT 0xffff + +#define BOFDM_TXSC 0x30000000 +#define BCCK_TXON 0x1 +#define BOFDM_TXON 0x2 +#define BDEBUG_PAGE 0xfff +#define BDEBUG_ITEM 0xff +#define BANTL 0x10 +#define BANT_NONHT 0x100 +#define BANT_HT1 0x1000 +#define BANT_HT2 0x10000 +#define BANT_HT1S1 0x100000 +#define BANT_NONHTS1 0x1000000 + +#define BCCK_BBMODE 0x3 +#define BCCK_TXPOWERSAVING 0x80 +#define BCCK_RXPOWERSAVING 0x40 + +#define BCCK_SIDEBAND 0x10 + +#define BCCK_SCRAMBLE 0x8 +#define BCCK_ANTDIVERSITY 0x8000 +#define BCCK_CARRIER_RECOVERY 0x4000 +#define BCCK_TXRATE 0x3000 +#define BCCK_DCCANCEL 0x0800 +#define BCCK_ISICANCEL 0x0400 +#define BCCK_MATCH_FILTER 0x0200 +#define BCCK_EQUALIZER 0x0100 +#define BCCK_PREAMBLE_DETECT 0x800000 +#define BCCK_FAST_FALSECCA 0x400000 +#define BCCK_CH_ESTSTART 0x300000 +#define BCCK_CCA_COUNT 0x080000 +#define BCCK_CS_LIM 0x070000 +#define BCCK_BIST_MODE 0x80000000 +#define BCCK_CCAMASK 0x40000000 +#define BCCK_TX_DAC_PHASE 0x4 +#define BCCK_RX_ADC_PHASE 0x20000000 +#define BCCKR_CP_MODE 0x0100 +#define BCCK_TXDC_OFFSET 0xf0 +#define BCCK_RXDC_OFFSET 0xf +#define BCCK_CCA_MODE 0xc000 +#define BCCK_FALSECS_LIM 0x3f00 +#define BCCK_CS_RATIO 0xc00000 +#define BCCK_CORGBIT_SEL 0x300000 +#define BCCK_PD_LIM 0x0f0000 +#define BCCK_NEWCCA 0x80000000 +#define BCCK_RXHP_OF_IG 0x8000 +#define BCCK_RXIG 0x7f00 +#define BCCK_LNA_POLARITY 0x800000 +#define BCCK_RX1ST_BAIN 0x7f0000 +#define BCCK_RF_EXTEND 0x20000000 +#define BCCK_RXAGC_SATLEVEL 0x1f000000 +#define BCCK_RXAGC_SATCOUNT 0xe0 +#define BCCKRXRFSETTLE 0x1f +#define BCCK_FIXED_RXAGC 0x8000 +#define BCCK_ANTENNA_POLARITY 0x2000 +#define BCCK_TXFILTER_TYPE 0x0c00 +#define BCCK_RXAGC_REPORTTYPE 0x0300 +#define BCCK_RXDAGC_EN 0x80000000 +#define BCCK_RXDAGC_PERIOD 0x20000000 +#define BCCK_RXDAGC_SATLEVEL 0x1f000000 +#define BCCK_TIMING_RECOVERY 0x800000 +#define BCCK_TXC0 0x3f0000 +#define BCCK_TXC1 0x3f000000 +#define BCCK_TXC2 0x3f +#define BCCK_TXC3 0x3f00 +#define BCCK_TXC4 0x3f0000 +#define BCCK_TXC5 0x3f000000 +#define BCCK_TXC6 0x3f +#define BCCK_TXC7 0x3f00 +#define BCCK_DEBUGPORT 0xff0000 +#define BCCK_DAC_DEBUG 0x0f000000 +#define BCCK_FALSEALARM_ENABLE 0x8000 +#define BCCK_FALSEALARM_READ 0x4000 +#define BCCK_TRSSI 0x7f +#define BCCK_RXAGC_REPORT 0xfe +#define BCCK_RXREPORT_ANTSEL 0x80000000 +#define BCCK_RXREPORT_MFOFF 0x40000000 +#define BCCK_RXREPORT_SQLOSS 0x20000000 +#define BCCK_RXREPORT_PKTLOSS 0x10000000 +#define BCCK_RXREPORT_LOCKEDBIT 0x08000000 +#define BCCK_RXREPORT_RATEERROR 0x04000000 +#define BCCK_RXREPORT_RXRATE 0x03000000 +#define BCCK_RXFA_COUNTER_LOWER 0xff +#define BCCK_RXFA_COUNTER_UPPER 0xff000000 +#define BCCK_RXHPAGC_START 0xe000 +#define BCCK_RXHPAGC_FINAL 0x1c00 +#define BCCK_RXFALSEALARM_ENABLE 0x8000 +#define BCCK_FACOUNTER_FREEZE 0x4000 +#define BCCK_TXPATH_SEL 0x10000000 +#define BCCK_DEFAULT_RXPATH 0xc000000 +#define BCCK_OPTION_RXPATH 0x3000000 + +#define BNUM_OFSTF 0x3 +#define BSHIFT_L 0xc0 +#define BGI_TH 0xc +#define BRXPATH_A 0x1 +#define BRXPATH_B 0x2 +#define BRXPATH_C 0x4 +#define BRXPATH_D 0x8 +#define BTXPATH_A 0x1 +#define BTXPATH_B 0x2 +#define BTXPATH_C 0x4 +#define BTXPATH_D 0x8 +#define BTRSSI_FREQ 0x200 +#define BADC_BACKOFF 0x3000 +#define BDFIR_BACKOFF 0xc000 +#define BTRSSI_LATCH_PHASE 0x10000 +#define BRX_LDC_OFFSET 0xff +#define BRX_QDC_OFFSET 0xff00 +#define BRX_DFIR_MODE 0x1800000 +#define BRX_DCNF_TYPE 0xe000000 +#define BRXIQIMB_A 0x3ff +#define BRXIQIMB_B 0xfc00 +#define BRXIQIMB_C 0x3f0000 +#define BRXIQIMB_D 0xffc00000 +#define BDC_DC_NOTCH 0x60000 +#define BRXNB_NOTCH 0x1f000000 +#define BPD_TH 0xf +#define BPD_TH_OPT2 0xc000 +#define BPWED_TH 0x700 +#define BIFMF_WIN_L 0x800 +#define BPD_OPTION 0x1000 +#define BMF_WIN_L 0xe000 +#define BBW_SEARCH_L 0x30000 +#define BWIN_ENH_L 0xc0000 +#define BBW_TH 0x700000 +#define BED_TH2 0x3800000 +#define BBW_OPTION 0x4000000 +#define BRADIO_TH 0x18000000 +#define BWINDOW_L 0xe0000000 +#define BSBD_OPTION 0x1 +#define BFRAME_TH 0x1c +#define BFS_OPTION 0x60 +#define BDC_SLOPE_CHECK 0x80 +#define BFGUARD_COUNTER_DC_L 0xe00 +#define BFRAME_WEIGHT_SHORT 0x7000 +#define BSUB_TUNE 0xe00000 +#define BFRAME_DC_LENGTH 0xe000000 +#define BSBD_START_OFFSET 0x30000000 +#define BFRAME_TH_2 0x7 +#define BFRAME_GI2_TH 0x38 +#define BGI2_SYNC_EN 0x40 +#define BSARCH_SHORT_EARLY 0x300 +#define BSARCH_SHORT_LATE 0xc00 +#define BSARCH_GI2_LATE 0x70000 +#define BCFOANTSUM 0x1 +#define BCFOACC 0x2 +#define BCFOSTARTOFFSET 0xc +#define BCFOLOOPBACK 0x70 +#define BCFOSUMWEIGHT 0x80 +#define BDAGCENABLE 0x10000 +#define BTXIQIMB_A 0x3ff +#define BTXIQIMB_b 0xfc00 +#define BTXIQIMB_C 0x3f0000 +#define BTXIQIMB_D 0xffc00000 +#define BTXIDCOFFSET 0xff +#define BTXIQDCOFFSET 0xff00 +#define BTXDFIRMODE 0x10000 +#define BTXPESUDO_NOISEON 0x4000000 +#define BTXPESUDO_NOISE_A 0xff +#define BTXPESUDO_NOISE_B 0xff00 +#define BTXPESUDO_NOISE_C 0xff0000 +#define BTXPESUDO_NOISE_D 0xff000000 +#define BCCA_DROPOPTION 0x20000 +#define BCCA_DROPTHRES 0xfff00000 +#define BEDCCA_H 0xf +#define BEDCCA_L 0xf0 +#define BLAMBDA_ED 0x300 +#define BRX_INITIALGAIN 0x7f +#define BRX_ANTDIV_EN 0x80 +#define BRX_AGC_ADDRESS_FOR_LNA 0x7f00 +#define BRX_HIGHPOWER_FLOW 0x8000 +#define BRX_AGC_FREEZE_THRES 0xc0000 +#define BRX_FREEZESTEP_AGC1 0x300000 +#define BRX_FREEZESTEP_AGC2 0xc00000 +#define BRX_FREEZESTEP_AGC3 0x3000000 +#define BRX_FREEZESTEP_AGC0 0xc000000 +#define BRXRSSI_CMP_EN 0x10000000 +#define BRXQUICK_AGCEN 0x20000000 +#define BRXAGC_FREEZE_THRES_MODE 0x40000000 +#define BRX_OVERFLOW_CHECKTYPE 0x80000000 +#define BRX_AGCSHIFT 0x7f +#define BTRSW_TRI_ONLY 0x80 +#define BPOWER_THRES 0x300 +#define BRXAGC_EN 0x1 +#define BRXAGC_TOGETHER_EN 0x2 +#define BRXAGC_MIN 0x4 +#define BRXHP_INI 0x7 +#define BRXHP_TRLNA 0x70 +#define BRXHP_RSSI 0x700 +#define BRXHP_BBP1 0x7000 +#define BRXHP_BBP2 0x70000 +#define BRXHP_BBP3 0x700000 +#define BRSSI_H 0x7f0000 +#define BRSSI_GEN 0x7f000000 +#define BRXSETTLE_TRSW 0x7 +#define BRXSETTLE_LNA 0x38 +#define BRXSETTLE_RSSI 0x1c0 +#define BRXSETTLE_BBP 0xe00 +#define BRXSETTLE_RXHP 0x7000 +#define BRXSETTLE_ANTSW_RSSI 0x38000 +#define BRXSETTLE_ANTSW 0xc0000 +#define BRXPROCESS_TIME_DAGC 0x300000 +#define BRXSETTLE_HSSI 0x400000 +#define BRXPROCESS_TIME_BBPPW 0x800000 +#define BRXANTENNA_POWER_SHIFT 0x3000000 +#define BRSSI_TABLE_SELECT 0xc000000 +#define BRXHP_FINAL 0x7000000 +#define BRXHPSETTLE_BBP 0x7 +#define BRXHTSETTLE_HSSI 0x8 +#define BRXHTSETTLE_RXHP 0x70 +#define BRXHTSETTLE_BBPPW 0x80 +#define BRXHTSETTLE_IDLE 0x300 +#define BRXHTSETTLE_RESERVED 0x1c00 +#define BRXHT_RXHP_EN 0x8000 +#define BRXAGC_FREEZE_THRES 0x30000 +#define BRXAGC_TOGETHEREN 0x40000 +#define BRXHTAGC_MIN 0x80000 +#define BRXHTAGC_EN 0x100000 +#define BRXHTDAGC_EN 0x200000 +#define BRXHT_RXHP_BBP 0x1c00000 +#define BRXHT_RXHP_FINAL 0xe0000000 +#define BRXPW_RADIO_TH 0x3 +#define BRXPW_RADIO_EN 0x4 +#define BRXMF_HOLD 0x3800 +#define BRXPD_DELAY_TH1 0x38 +#define BRXPD_DELAY_TH2 0x1c0 +#define BRXPD_DC_COUNT_MAX 0x600 +#define BRXPD_DELAY_TH 0x8000 +#define BRXPROCESS_DELAY 0xf0000 +#define BRXSEARCHRANGE_GI2_EARLY 0x700000 +#define BRXFRAME_FUARD_COUNTER_L 0x3800000 +#define BRXSGI_GUARD_L 0xc000000 +#define BRXSGI_SEARCH_L 0x30000000 +#define BRXSGI_TH 0xc0000000 +#define BDFSCNT0 0xff +#define BDFSCNT1 0xff00 +#define BDFSFLAG 0xf0000 +#define BMF_WEIGHT_SUM 0x300000 +#define BMINIDX_TH 0x7f000000 +#define BDAFORMAT 0x40000 +#define BTXCH_EMU_ENABLE 0x01000000 +#define BTRSW_ISOLATION_A 0x7f +#define BTRSW_ISOLATION_B 0x7f00 +#define BTRSW_ISOLATION_C 0x7f0000 +#define BTRSW_ISOLATION_D 0x7f000000 +#define BEXT_LNA_GAIN 0x7c00 + +#define BSTBC_EN 0x4 +#define BANTENNA_MAPPING 0x10 +#define BNSS 0x20 +#define BCFO_ANTSUM_ID 0x200 +#define BPHY_COUNTER_RESET 0x8000000 +#define BCFO_REPORT_GET 0x4000000 +#define BOFDM_CONTINUE_TX 0x10000000 +#define BOFDM_SINGLE_CARRIER 0x20000000 +#define BOFDM_SINGLE_TONE 0x40000000 +#define BHT_DETECT 0x100 +#define BCFOEN 0x10000 +#define BCFOVALUE 0xfff00000 +#define BSIGTONE_RE 0x3f +#define BSIGTONE_IM 0x7f00 +#define BCOUNTER_CCA 0xffff +#define BCOUNTER_PARITYFAIL 0xffff0000 +#define BCOUNTER_RATEILLEGAL 0xffff +#define BCOUNTER_CRC8FAIL 0xffff0000 +#define BCOUNTER_MCSNOSUPPORT 0xffff +#define BCOUNTER_FASTSYNC 0xffff +#define BSHORTCFO 0xfff +#define BSHORTCFOT_LENGTH 12 +#define BSHORTCFOF_LENGTH 11 +#define BLONGCFO 0x7ff +#define BLONGCFOT_LENGTH 11 +#define BLONGCFOF_LENGTH 11 +#define BTAILCFO 0x1fff +#define BTAILCFOT_LENGTH 13 +#define BTAILCFOF_LENGTH 12 +#define BNOISE_EN_PWDB 0xffff +#define BCC_POWER_DB 0xffff0000 +#define BMOISE_PWDB 0xffff +#define BPOWERMEAST_LENGTH 10 +#define BPOWERMEASF_LENGTH 3 +#define BRX_HT_BW 0x1 +#define BRXSC 0x6 +#define BRX_HT 0x8 +#define BNB_INTF_DET_ON 0x1 +#define BINTF_WIN_LEN_CFG 0x30 +#define BNB_INTF_TH_CFG 0x1c0 +#define BRFGAIN 0x3f +#define BTABLESEL 0x40 +#define BTRSW 0x80 +#define BRXSNR_A 0xff +#define BRXSNR_B 0xff00 +#define BRXSNR_C 0xff0000 +#define BRXSNR_D 0xff000000 +#define BSNR_EVMT_LENGTH 8 +#define BSNR_EVMF_LENGTH 1 +#define BCSI1ST 0xff +#define BCSI2ND 0xff00 +#define BRXEVM1ST 0xff0000 +#define BRXEVM2ND 0xff000000 +#define BSIGEVM 0xff +#define BPWDB 0xff00 +#define BSGIEN 0x10000 + +#define BSFACTOR_QMA1 0xf +#define BSFACTOR_QMA2 0xf0 +#define BSFACTOR_QMA3 0xf00 +#define BSFACTOR_QMA4 0xf000 +#define BSFACTOR_QMA5 0xf0000 +#define BSFACTOR_QMA6 0xf0000 +#define BSFACTOR_QMA7 0xf00000 +#define BSFACTOR_QMA8 0xf000000 +#define BSFACTOR_QMA9 0xf0000000 +#define BCSI_SCHEME 0x100000 + +#define BNOISE_LVL_TOP_SET 0x3 +#define BCHSMOOTH 0x4 +#define BCHSMOOTH_CFG1 0x38 +#define BCHSMOOTH_CFG2 0x1c0 +#define BCHSMOOTH_CFG3 0xe00 +#define BCHSMOOTH_CFG4 0x7000 +#define BMRCMODE 0x800000 +#define BTHEVMCFG 0x7000000 + +#define BLOOP_FIT_TYPE 0x1 +#define BUPD_CFO 0x40 +#define BUPD_CFO_OFFDATA 0x80 +#define BADV_UPD_CFO 0x100 +#define BADV_TIME_CTRL 0x800 +#define BUPD_CLKO 0x1000 +#define BFC 0x6000 +#define BTRACKING_MODE 0x8000 +#define BPHCMP_ENABLE 0x10000 +#define BUPD_CLKO_LTF 0x20000 +#define BCOM_CH_CFO 0x40000 +#define BCSI_ESTI_MODE 0x80000 +#define BADV_UPD_EQZ 0x100000 +#define BUCHCFG 0x7000000 +#define BUPDEQZ 0x8000000 + +#define BRX_PESUDO_NOISE_ON 0x20000000 +#define BRX_PESUDO_NOISE_A 0xff +#define BRX_PESUDO_NOISE_B 0xff00 +#define BRX_PESUDO_NOISE_C 0xff0000 +#define BRX_PESUDO_NOISE_D 0xff000000 +#define BRX_PESUDO_NOISESTATE_A 0xffff +#define BRX_PESUDO_NOISESTATE_B 0xffff0000 +#define BRX_PESUDO_NOISESTATE_C 0xffff +#define BRX_PESUDO_NOISESTATE_D 0xffff0000 + +#define BZEBRA1_HSSIENABLE 0x8 +#define BZEBRA1_TRXCONTROL 0xc00 +#define BZEBRA1_TRXGAINSETTING 0x07f +#define BZEBRA1_RXCOUNTER 0xc00 +#define BZEBRA1_TXCHANGEPUMP 0x38 +#define BZEBRA1_RXCHANGEPUMP 0x7 +#define BZEBRA1_CHANNEL_NUM 0xf80 +#define BZEBRA1_TXLPFBW 0x400 +#define BZEBRA1_RXLPFBW 0x600 + +#define BRTL8256REG_MODE_CTRL1 0x100 +#define BRTL8256REG_MODE_CTRL0 0x40 +#define BRTL8256REG_TXLPFBW 0x18 +#define BRTL8256REG_RXLPFBW 0x600 + +#define BRTL8258_TXLPFBW 0xc +#define BRTL8258_RXLPFBW 0xc00 +#define BRTL8258_RSSILPFBW 0xc0 + +#define BBYTE0 0x1 +#define BBYTE1 0x2 +#define BBYTE2 0x4 +#define BBYTE3 0x8 +#define BWORD0 0x3 +#define BWORD1 0xc +#define BWORD 0xf + +#define MASKBYTE0 0xff +#define MASKBYTE1 0xff00 +#define MASKBYTE2 0xff0000 +#define MASKBYTE3 0xff000000 +#define MASKHWORD 0xffff0000 +#define MASKLWORD 0x0000ffff +#define MASKDWORD 0xffffffff +#define MASK12BITS 0xfff +#define MASKH4BITS 0xf0000000 +#define MASKOFDM_D 0xffc00000 +#define MASKCCK 0x3f3f3f3f + +#define MASK4BITS 0x0f +#define MASK20BITS 0xfffff +#define RFREG_OFFSET_MASK 0xfffff + +#define BENABLE 0x1 +#define BDISABLE 0x0 + +#define LEFT_ANTENNA 0x0 +#define RIGHT_ANTENNA 0x1 + +#define TCHECK_TXSTATUS 500 +#define TUPDATE_RXCOUNTER 100 + +#define REG_UN_used_register 0x01bf + +/* Path_A RFE cotrol pinmux*/ +#define RA_RFE_PINMUX 0xcb0 +/* Path_B RFE control pinmux*/ +#define RB_RFE_PINMUX 0xeb0 + +#define RA_RFE_INV 0xcb4 +#define RB_RFE_INV 0xeb4 + +/* RXIQC */ +/*RxIQ imblance matrix coeff. A & B*/ +#define RA_RXIQC_AB 0xc10 +/*RxIQ imblance matrix coeff. C & D*/ +#define RA_RXIQC_CD 0xc14 +/* Pah_A TX scaling factor*/ +#define RA_TXSCALE 0xc1c +/* Path_B TX scaling factor*/ +#define RB_TXSCALE 0xe1c +/*RxIQ imblance matrix coeff. A & B*/ +#define RB_RXIQC_AB 0xe10 +/*RxIQ imblance matrix coeff. C & D*/ +#define RB_RXIQC_CD 0xe14 +/*bit mask for IQC matrix element A & C*/ +#define RXIQC_AC 0x02ff + /*bit mask for IQC matrix element A & C*/ +#define RXIQC_BD 0x02ff0000 + +/* 2 EFUSE_TEST (For RTL8723 partially) */ +#define EFUSE_SEL(x) (((x) & 0x3) << 8) +#define EFUSE_SEL_MASK 0x300 +#define EFUSE_WIFI_SEL_0 0x0 + +/*REG_MULTI_FUNC_CTRL(For RTL8723 Only)*/ +/* Enable GPIO[9] as WiFi HW PDn source*/ +#define WL_HWPDN_EN BIT(0) +/* WiFi HW PDn polarity control*/ +#define WL_HWPDN_SL BIT(1) +/* WiFi function enable */ +#define WL_FUNC_EN BIT(2) +/* Enable GPIO[9] as WiFi RF HW PDn source */ +#define WL_HWROF_EN BIT(3) +/* Enable GPIO[11] as BT HW PDn source */ +#define BT_HWPDN_EN BIT(16) +/* BT HW PDn polarity control */ +#define BT_HWPDN_SL BIT(17) +/* BT function enable */ +#define BT_FUNC_EN BIT(18) +/* Enable GPIO[11] as BT/GPS RF HW PDn source */ +#define BT_HWROF_EN BIT(19) +/* Enable GPIO[10] as GPS HW PDn source */ +#define GPS_HWPDN_EN BIT(20) +/* GPS HW PDn polarity control */ +#define GPS_HWPDN_SL BIT(21) +/* GPS function enable */ +#define GPS_FUNC_EN BIT(22) + +#define BMASKBYTE0 0xff +#define BMASKBYTE1 0xff00 +#define BMASKBYTE2 0xff0000 +#define BMASKBYTE3 0xff000000 +#define BMASKHWORD 0xffff0000 +#define BMASKLWORD 0x0000ffff +#define BMASKDWORD 0xffffffff +#define BMASK12BITS 0xfff +#define BMASKH4BITS 0xf0000000 +#define BMASKOFDM_D 0xffc00000 +#define BMASKCCK 0x3f3f3f3f + +#define BRFREGOFFSETMASK 0xfffff + +#define ODM_REG_CCK_RPT_FORMAT_11AC 0x804 +#define ODM_REG_BB_RX_PATH_11AC 0x808 +/*PAGE 9*/ +#define ODM_REG_OFDM_FA_RST_11AC 0x9A4 +/*PAGE A*/ +#define ODM_REG_CCK_CCA_11AC 0xA0A +#define ODM_REG_CCK_FA_RST_11AC 0xA2C +#define ODM_REG_CCK_FA_11AC 0xA5C +/*PAGE C*/ +#define ODM_REG_IGI_A_11AC 0xC50 +/*PAGE E*/ +#define ODM_REG_IGI_B_11AC 0xE50 +/*PAGE F*/ +#define ODM_REG_OFDM_FA_11AC 0xF48 + +/* 2 MAC REG LIST */ + +/* DIG Related */ +#define ODM_BIT_IGI_11AC 0xFFFFFFFF +#define ODM_BIT_CCK_RPT_FORMAT_11AC BIT16 +#define ODM_BIT_BB_RX_PATH_11AC 0xF + +enum AGGRE_SIZE { + HT_AGG_SIZE_8K = 0, + HT_AGG_SIZE_16K = 1, + HT_AGG_SIZE_32K = 2, + HT_AGG_SIZE_64K = 3, + VHT_AGG_SIZE_128K = 4, + VHT_AGG_SIZE_256K = 5, + VHT_AGG_SIZE_512K = 6, + VHT_AGG_SIZE_1024K = 7, +}; + +#define REG_AMPDU_MAX_LENGTH_8812 0x0458 + +#endif diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/rf.c b/drivers/net/wireless/rtlwifi/rtl8821ae/rf.c new file mode 100644 index 0000000000000000000000000000000000000000..2922538160e5c568efbd01b1de801cf5525961bd --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/rf.c @@ -0,0 +1,465 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "../wifi.h" +#include "reg.h" +#include "def.h" +#include "phy.h" +#include "rf.h" +#include "dm.h" + +static bool _rtl8821ae_phy_rf6052_config_parafile(struct ieee80211_hw *hw); + +void rtl8821ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + switch (bandwidth) { + case HT_CHANNEL_WIDTH_20: + rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, BIT(11)|BIT(10), 3); + rtl_set_rfreg(hw, RF90_PATH_B, RF_CHNLBW, BIT(11)|BIT(10), 3); + break; + case HT_CHANNEL_WIDTH_20_40: + rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, BIT(11)|BIT(10), 1); + rtl_set_rfreg(hw, RF90_PATH_B, RF_CHNLBW, BIT(11)|BIT(10), 1); + break; + case HT_CHANNEL_WIDTH_80: + rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, BIT(11)|BIT(10), 0); + rtl_set_rfreg(hw, RF90_PATH_B, RF_CHNLBW, BIT(11)|BIT(10), 0); + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "unknown bandwidth: %#X\n", bandwidth); + break; + } +} + +void rtl8821ae_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, + u8 *ppowerlevel) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + u32 tx_agc[2] = {0, 0}, tmpval; + bool turbo_scanoff = false; + u8 idx1, idx2; + u8 *ptr; + u8 direction; + u32 pwrtrac_value; + + if (rtlefuse->eeprom_regulatory != 0) + turbo_scanoff = true; + + if (mac->act_scanning) { + tx_agc[RF90_PATH_A] = 0x3f3f3f3f; + tx_agc[RF90_PATH_B] = 0x3f3f3f3f; + + if (turbo_scanoff) { + for (idx1 = RF90_PATH_A; + idx1 <= RF90_PATH_B; + idx1++) { + tx_agc[idx1] = ppowerlevel[idx1] | + (ppowerlevel[idx1] << 8) | + (ppowerlevel[idx1] << 16) | + (ppowerlevel[idx1] << 24); + } + } + } else { + for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) { + tx_agc[idx1] = ppowerlevel[idx1] | + (ppowerlevel[idx1] << 8) | + (ppowerlevel[idx1] << 16) | + (ppowerlevel[idx1] << 24); + } + + if (rtlefuse->eeprom_regulatory == 0) { + tmpval = + (rtlphy->mcs_txpwrlevel_origoffset[0][6]) + + (rtlphy->mcs_txpwrlevel_origoffset[0][7] << + 8); + tx_agc[RF90_PATH_A] += tmpval; + + tmpval = (rtlphy->mcs_txpwrlevel_origoffset[0][14]) + + (rtlphy->mcs_txpwrlevel_origoffset[0][15] << + 24); + tx_agc[RF90_PATH_B] += tmpval; + } + } + + for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) { + ptr = (u8 *)(&tx_agc[idx1]); + for (idx2 = 0; idx2 < 4; idx2++) { + if (*ptr > RF6052_MAX_TX_PWR) + *ptr = RF6052_MAX_TX_PWR; + ptr++; + } + } + rtl8821ae_dm_txpower_track_adjust(hw, 1, &direction, &pwrtrac_value); + if (direction == 1) { + tx_agc[0] += pwrtrac_value; + tx_agc[1] += pwrtrac_value; + } else if (direction == 2) { + tx_agc[0] -= pwrtrac_value; + tx_agc[1] -= pwrtrac_value; + } + tmpval = tx_agc[RF90_PATH_A]; + rtl_set_bbreg(hw, RTXAGC_A_CCK11_CCK1, MASKDWORD, tmpval); + + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + "CCK PWR 1~11M (rf-A) = 0x%x (reg 0x%x)\n", tmpval, + RTXAGC_A_CCK11_CCK1); + + tmpval = tx_agc[RF90_PATH_B]; + rtl_set_bbreg(hw, RTXAGC_B_CCK11_CCK1, MASKDWORD, tmpval); + + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + "CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n", tmpval, + RTXAGC_B_CCK11_CCK1); +} + +static void rtl8821ae_phy_get_power_base(struct ieee80211_hw *hw, + u8 *ppowerlevel_ofdm, + u8 *ppowerlevel_bw20, + u8 *ppowerlevel_bw40, u8 channel, + u32 *ofdmbase, u32 *mcsbase) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u32 powerbase0, powerbase1; + u8 i, powerlevel[2]; + + for (i = 0; i < 2; i++) { + powerbase0 = ppowerlevel_ofdm[i]; + + powerbase0 = (powerbase0 << 24) | (powerbase0 << 16) | + (powerbase0 << 8) | powerbase0; + *(ofdmbase + i) = powerbase0; + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + " [OFDM power base index rf(%c) = 0x%x]\n", + ((i == 0) ? 'A' : 'B'), *(ofdmbase + i)); + } + + for (i = 0; i < 2; i++) { + if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) + powerlevel[i] = ppowerlevel_bw20[i]; + else + powerlevel[i] = ppowerlevel_bw40[i]; + + powerbase1 = powerlevel[i]; + powerbase1 = (powerbase1 << 24) | + (powerbase1 << 16) | (powerbase1 << 8) | powerbase1; + + *(mcsbase + i) = powerbase1; + + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + " [MCS power base index rf(%c) = 0x%x]\n", + ((i == 0) ? 'A' : 'B'), *(mcsbase + i)); + } +} + +static void get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw, + u8 channel, u8 index, + u32 *powerbase0, + u32 *powerbase1, + u32 *p_outwriteval) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + u8 i, chnlgroup = 0, pwr_diff_limit[4], pwr_diff = 0, customer_pwr_diff; + u32 writeval, customer_limit, rf; + + for (rf = 0; rf < 2; rf++) { + switch (rtlefuse->eeprom_regulatory) { + case 0: + chnlgroup = 0; + + writeval = + rtlphy->mcs_txpwrlevel_origoffset[chnlgroup][index + + (rf ? 8 : 0)] + + ((index < 2) ? powerbase0[rf] : powerbase1[rf]); + + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + "RTK better performance, writeval(%c) = 0x%x\n", + ((rf == 0) ? 'A' : 'B'), writeval); + break; + case 1: + if (rtlphy->pwrgroup_cnt == 1) { + chnlgroup = 0; + } else { + if (channel < 3) + chnlgroup = 0; + else if (channel < 6) + chnlgroup = 1; + else if (channel < 9) + chnlgroup = 2; + else if (channel < 12) + chnlgroup = 3; + else if (channel < 14) + chnlgroup = 4; + else if (channel == 14) + chnlgroup = 5; + } + + writeval = + rtlphy->mcs_txpwrlevel_origoffset[chnlgroup] + [index + (rf ? 8 : 0)] + ((index < 2) ? + powerbase0[rf] : + powerbase1[rf]); + + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + "Realtek regulatory, 20MHz, writeval(%c) = 0x%x\n", + ((rf == 0) ? 'A' : 'B'), writeval); + + break; + case 2: + writeval = + ((index < 2) ? powerbase0[rf] : powerbase1[rf]); + + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + "Better regulatory, writeval(%c) = 0x%x\n", + ((rf == 0) ? 'A' : 'B'), writeval); + break; + case 3: + chnlgroup = 0; + + if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) { + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + "customer's limit, 40MHz rf(%c) = 0x%x\n", + ((rf == 0) ? 'A' : 'B'), + rtlefuse->pwrgroup_ht40[rf][channel - + 1]); + } else { + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + "customer's limit, 20MHz rf(%c) = 0x%x\n", + ((rf == 0) ? 'A' : 'B'), + rtlefuse->pwrgroup_ht20[rf][channel - + 1]); + } + + if (index < 2) + pwr_diff = rtlefuse->txpwr_legacyhtdiff[rf][channel-1]; + else if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) + pwr_diff = + rtlefuse->txpwr_ht20diff[rf][channel-1]; + + if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) + customer_pwr_diff = + rtlefuse->pwrgroup_ht40[rf][channel-1]; + else + customer_pwr_diff = + rtlefuse->pwrgroup_ht20[rf][channel-1]; + + if (pwr_diff > customer_pwr_diff) + pwr_diff = 0; + else + pwr_diff = customer_pwr_diff - pwr_diff; + + for (i = 0; i < 4; i++) { + pwr_diff_limit[i] = + (u8)((rtlphy->mcs_txpwrlevel_origoffset + [chnlgroup][index + (rf ? 8 : 0)] & + (0x7f << (i * 8))) >> (i * 8)); + + if (pwr_diff_limit[i] > pwr_diff) + pwr_diff_limit[i] = pwr_diff; + } + + customer_limit = (pwr_diff_limit[3] << 24) | + (pwr_diff_limit[2] << 16) | + (pwr_diff_limit[1] << 8) | (pwr_diff_limit[0]); + + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + "Customer's limit rf(%c) = 0x%x\n", + ((rf == 0) ? 'A' : 'B'), customer_limit); + + writeval = customer_limit + + ((index < 2) ? powerbase0[rf] : powerbase1[rf]); + + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + "Customer, writeval rf(%c)= 0x%x\n", + ((rf == 0) ? 'A' : 'B'), writeval); + break; + default: + chnlgroup = 0; + writeval = + rtlphy->mcs_txpwrlevel_origoffset[chnlgroup] + [index + (rf ? 8 : 0)] + + ((index < 2) ? powerbase0[rf] : powerbase1[rf]); + + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + "RTK better performance, writeval rf(%c) = 0x%x\n", + ((rf == 0) ? 'A' : 'B'), writeval); + break; + } + + if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1) + writeval = writeval - 0x06060606; + else if (rtlpriv->dm.dynamic_txhighpower_lvl == + TXHIGHPWRLEVEL_BT2) + writeval = writeval - 0x0c0c0c0c; + *(p_outwriteval + rf) = writeval; + } +} + +static void _rtl8821ae_write_ofdm_power_reg(struct ieee80211_hw *hw, + u8 index, u32 *pvalue) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u16 regoffset_a[6] = { + RTXAGC_A_OFDM18_OFDM6, RTXAGC_A_OFDM54_OFDM24, + RTXAGC_A_MCS03_MCS00, RTXAGC_A_MCS07_MCS04, + RTXAGC_A_MCS11_MCS08, RTXAGC_A_MCS15_MCS12 + }; + u16 regoffset_b[6] = { + RTXAGC_B_OFDM18_OFDM6, RTXAGC_B_OFDM54_OFDM24, + RTXAGC_B_MCS03_MCS00, RTXAGC_B_MCS07_MCS04, + RTXAGC_B_MCS11_MCS08, RTXAGC_B_MCS15_MCS12 + }; + u8 i, rf, pwr_val[4]; + u32 writeval; + u16 regoffset; + + for (rf = 0; rf < 2; rf++) { + writeval = pvalue[rf]; + for (i = 0; i < 4; i++) { + pwr_val[i] = (u8)((writeval & (0x7f << + (i * 8))) >> (i * 8)); + + if (pwr_val[i] > RF6052_MAX_TX_PWR) + pwr_val[i] = RF6052_MAX_TX_PWR; + } + writeval = (pwr_val[3] << 24) | (pwr_val[2] << 16) | + (pwr_val[1] << 8) | pwr_val[0]; + + if (rf == 0) + regoffset = regoffset_a[index]; + else + regoffset = regoffset_b[index]; + rtl_set_bbreg(hw, regoffset, MASKDWORD, writeval); + + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + "Set 0x%x = %08x\n", regoffset, writeval); + } +} + +void rtl8821ae_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, + u8 *ppowerlevel_ofdm, + u8 *ppowerlevel_bw20, + u8 *ppowerlevel_bw40, + u8 channel) +{ + u32 writeval[2], powerbase0[2], powerbase1[2]; + u8 index; + u8 direction; + u32 pwrtrac_value; + + rtl8821ae_phy_get_power_base(hw, ppowerlevel_ofdm, + ppowerlevel_bw20, + ppowerlevel_bw40, + channel, + &powerbase0[0], + &powerbase1[0]); + + rtl8821ae_dm_txpower_track_adjust(hw, 1, &direction, &pwrtrac_value); + + for (index = 0; index < 6; index++) { + get_txpower_writeval_by_regulatory(hw, channel, index, + &powerbase0[0], + &powerbase1[0], + &writeval[0]); + if (direction == 1) { + writeval[0] += pwrtrac_value; + writeval[1] += pwrtrac_value; + } else if (direction == 2) { + writeval[0] -= pwrtrac_value; + writeval[1] -= pwrtrac_value; + } + _rtl8821ae_write_ofdm_power_reg(hw, index, &writeval[0]); + } +} + +bool rtl8821ae_phy_rf6052_config(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + + if (rtlphy->rf_type == RF_1T1R) + rtlphy->num_total_rfpath = 1; + else + rtlphy->num_total_rfpath = 2; + + return _rtl8821ae_phy_rf6052_config_parafile(hw); +} + +static bool _rtl8821ae_phy_rf6052_config_parafile(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 rfpath; + bool rtstatus = true; + + for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) { + switch (rfpath) { + case RF90_PATH_A: { + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + rtstatus = + rtl8812ae_phy_config_rf_with_headerfile(hw, + (enum radio_path)rfpath); + else + rtstatus = + rtl8821ae_phy_config_rf_with_headerfile(hw, + (enum radio_path)rfpath); + break; + } + case RF90_PATH_B: + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + rtstatus = + rtl8812ae_phy_config_rf_with_headerfile(hw, + (enum radio_path)rfpath); + else + rtstatus = + rtl8821ae_phy_config_rf_with_headerfile(hw, + (enum radio_path)rfpath); + break; + case RF90_PATH_C: + break; + case RF90_PATH_D: + break; + } + + if (!rtstatus) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "Radio[%d] Fail!!", rfpath); + return false; + } + } + + /*put arrays in dm.c*/ + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "\n"); + return rtstatus; +} diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/rf.h b/drivers/net/wireless/rtlwifi/rtl8821ae/rf.h new file mode 100644 index 0000000000000000000000000000000000000000..d9582ee1c335462de324f9d347cdb96be73a971c --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/rf.h @@ -0,0 +1,43 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL8821AE_RF_H__ +#define __RTL8821AE_RF_H__ + +#define RF6052_MAX_TX_PWR 0x3F +#define RF6052_MAX_REG 0x3F + +void rtl8821ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, + u8 bandwidth); +void rtl8821ae_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, + u8 *ppowerlevel); +void rtl8821ae_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, + u8 *ppowerlevel_ofdm, + u8 *ppowerlevel_bw20, + u8 *ppowerlevel_bw40, + u8 channel); +bool rtl8821ae_phy_rf6052_config(struct ieee80211_hw *hw); + +#endif diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/sw.c new file mode 100644 index 0000000000000000000000000000000000000000..fc92dd6a0d078bf107f461a660b2ca5d3cf03e71 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/sw.c @@ -0,0 +1,484 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "../wifi.h" +#include "../core.h" +#include "../pci.h" +#include "reg.h" +#include "def.h" +#include "phy.h" +#include "dm.h" +#include "hw.h" +#include "fw.h" +#include "sw.h" +#include "trx.h" +#include "led.h" +#include "table.h" +#include "../btcoexist/rtl_btc.h" + +#include +#include + +static void rtl8821ae_init_aspm_vars(struct ieee80211_hw *hw) +{ + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + + /*close ASPM for AMD defaultly */ + rtlpci->const_amdpci_aspm = 0; + + /** + * ASPM PS mode. + * 0 - Disable ASPM, + * 1 - Enable ASPM without Clock Req, + * 2 - Enable ASPM with Clock Req, + * 3 - Alwyas Enable ASPM with Clock Req, + * 4 - Always Enable ASPM without Clock Req. + * set defult to RTL8192CE:3 RTL8192E:2 + */ + rtlpci->const_pci_aspm = 3; + + /*Setting for PCI-E device */ + rtlpci->const_devicepci_aspm_setting = 0x03; + + /*Setting for PCI-E bridge */ + rtlpci->const_hostpci_aspm_setting = 0x02; + + /** + * In Hw/Sw Radio Off situation. + * 0 - Default, + * 1 - From ASPM setting without low Mac Pwr, + * 2 - From ASPM setting with low Mac Pwr, + * 3 - Bus D3 + * set default to RTL8192CE:0 RTL8192SE:2 + */ + rtlpci->const_hwsw_rfoff_d3 = 0; + + /** + * This setting works for those device with + * backdoor ASPM setting such as EPHY setting. + * 0 - Not support ASPM, + * 1 - Support ASPM, + * 2 - According to chipset. + */ + rtlpci->const_support_pciaspm = 1; +} + +static void load_wowlan_fw(struct rtl_priv *rtlpriv) +{ + /* callback routine to load wowlan firmware after main fw has + * been loaded + */ + const struct firmware *wowlan_firmware; + char *fw_name = NULL; + int err; + + /* for wowlan firmware buf */ + rtlpriv->rtlhal.wowlan_firmware = vzalloc(0x8000); + if (!rtlpriv->rtlhal.wowlan_firmware) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "Can't alloc buffer for wowlan fw.\n"); + return; + } + + if (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8821AE) + fw_name = "rtlwifi/rtl8821aefw_wowlan.bin"; + else + fw_name = "rtlwifi/rtl8812aefw_wowlan.bin"; + err = request_firmware(&wowlan_firmware, fw_name, rtlpriv->io.dev); + if (err) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "Failed to request wowlan firmware!\n"); + goto error; + } + + if (wowlan_firmware->size > 0x8000) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "Wowlan Firmware is too big!\n"); + goto error; + } + + memcpy(rtlpriv->rtlhal.wowlan_firmware, wowlan_firmware->data, + wowlan_firmware->size); + rtlpriv->rtlhal.wowlan_fwsize = wowlan_firmware->size; + release_firmware(wowlan_firmware); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "WOWLAN FirmwareDownload OK\n"); + return; +error: + release_firmware(wowlan_firmware); + vfree(rtlpriv->rtlhal.wowlan_firmware); +} + +/*InitializeVariables8812E*/ +int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw) +{ + int err = 0; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + + rtl8821ae_bt_reg_init(hw); + rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; + rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer(); + + rtlpriv->dm.dm_initialgain_enable = 1; + rtlpriv->dm.dm_flag = 0; + rtlpriv->dm.disable_framebursting = 0; + rtlpriv->dm.thermalvalue = 0; + rtlpci->transmit_config = CFENDFORM | BIT(15) | BIT(24) | BIT(25); + + mac->ht_enable = true; + mac->ht_cur_stbc = 0; + mac->ht_stbc_cap = 0; + mac->vht_cur_ldpc = 0; + mac->vht_ldpc_cap = 0; + mac->vht_cur_stbc = 0; + mac->vht_stbc_cap = 0; + + rtlpriv->rtlhal.current_bandtype = BAND_ON_2_4G; + /*following 2 is for register 5G band, refer to _rtl_init_mac80211()*/ + rtlpriv->rtlhal.bandset = BAND_ON_BOTH; + rtlpriv->rtlhal.macphymode = SINGLEMAC_SINGLEPHY; + + rtlpci->receive_config = (RCR_APPFCS | + RCR_APP_MIC | + RCR_APP_ICV | + RCR_APP_PHYST_RXFF | + RCR_NONQOS_VHT | + RCR_HTC_LOC_CTRL | + RCR_AMF | + RCR_ACF | + /*This bit controls the PS-Poll packet filter.*/ + RCR_ADF | + RCR_AICV | + RCR_ACRC32 | + RCR_AB | + RCR_AM | + RCR_APM | + 0); + + rtlpci->irq_mask[0] = + (u32)(IMR_PSTIMEOUT | + IMR_GTINT3 | + IMR_HSISR_IND_ON_INT | + IMR_C2HCMD | + IMR_HIGHDOK | + IMR_MGNTDOK | + IMR_BKDOK | + IMR_BEDOK | + IMR_VIDOK | + IMR_VODOK | + IMR_RDU | + IMR_ROK | + 0); + + rtlpci->irq_mask[1] = + (u32)(IMR_RXFOVW | + IMR_TXFOVW | + 0); + rtlpci->sys_irq_mask = (u32)(HSIMR_PDN_INT_EN | + HSIMR_RON_INT_EN | + 0); + /* for WOWLAN */ + rtlpriv->psc.wo_wlan_mode = WAKE_ON_MAGIC_PACKET | + WAKE_ON_PATTERN_MATCH; + + /* for debug level */ + rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug; + /* for LPS & IPS */ + rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; + rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; + rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps; + rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; + if (rtlpriv->cfg->mod_params->disable_watchdog) + pr_info("watchdog disabled\n"); + rtlpriv->psc.reg_fwctrl_lps = 3; + rtlpriv->psc.reg_max_lps_awakeintvl = 5; + rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; + + /* for ASPM, you can close aspm through + * set const_support_pciaspm = 0 + */ + rtl8821ae_init_aspm_vars(hw); + + if (rtlpriv->psc.reg_fwctrl_lps == 1) + rtlpriv->psc.fwctrl_psmode = FW_PS_MIN_MODE; + else if (rtlpriv->psc.reg_fwctrl_lps == 2) + rtlpriv->psc.fwctrl_psmode = FW_PS_MAX_MODE; + else if (rtlpriv->psc.reg_fwctrl_lps == 3) + rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE; + + rtlpriv->rtl_fw_second_cb = load_wowlan_fw; + /* for firmware buf */ + rtlpriv->rtlhal.pfirmware = vzalloc(0x8000); + if (!rtlpriv->rtlhal.pfirmware) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "Can't alloc buffer for fw.\n"); + return 1; + } + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + rtlpriv->cfg->fw_name = "rtlwifi/rtl8812aefw.bin"; + else + rtlpriv->cfg->fw_name = "rtlwifi/rtl8821aefw.bin"; + + rtlpriv->max_fw_size = 0x8000; + pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name); + err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, + rtlpriv->io.dev, GFP_KERNEL, hw, + rtl_fw_cb); + if (err) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "Failed to request firmware!\n"); + return 1; + } + return 0; +} + +void rtl8821ae_deinit_sw_vars(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (rtlpriv->rtlhal.pfirmware) { + vfree(rtlpriv->rtlhal.pfirmware); + rtlpriv->rtlhal.pfirmware = NULL; + } +#if (USE_SPECIFIC_FW_TO_SUPPORT_WOWLAN == 1) + if (rtlpriv->rtlhal.wowlan_firmware) { + vfree(rtlpriv->rtlhal.wowlan_firmware); + rtlpriv->rtlhal.wowlan_firmware = NULL; + } +#endif +} + +/* get bt coexist status */ +bool rtl8821ae_get_btc_status(void) +{ + return true; +} + +static struct rtl_hal_ops rtl8821ae_hal_ops = { + .init_sw_vars = rtl8821ae_init_sw_vars, + .deinit_sw_vars = rtl8821ae_deinit_sw_vars, + .read_eeprom_info = rtl8821ae_read_eeprom_info, + .interrupt_recognized = rtl8821ae_interrupt_recognized, + .hw_init = rtl8821ae_hw_init, + .hw_disable = rtl8821ae_card_disable, + .hw_suspend = rtl8821ae_suspend, + .hw_resume = rtl8821ae_resume, + .enable_interrupt = rtl8821ae_enable_interrupt, + .disable_interrupt = rtl8821ae_disable_interrupt, + .set_network_type = rtl8821ae_set_network_type, + .set_chk_bssid = rtl8821ae_set_check_bssid, + .set_qos = rtl8821ae_set_qos, + .set_bcn_reg = rtl8821ae_set_beacon_related_registers, + .set_bcn_intv = rtl8821ae_set_beacon_interval, + .update_interrupt_mask = rtl8821ae_update_interrupt_mask, + .get_hw_reg = rtl8821ae_get_hw_reg, + .set_hw_reg = rtl8821ae_set_hw_reg, + .update_rate_tbl = rtl8821ae_update_hal_rate_tbl, + .fill_tx_desc = rtl8821ae_tx_fill_desc, + .fill_tx_cmddesc = rtl8821ae_tx_fill_cmddesc, + .query_rx_desc = rtl8821ae_rx_query_desc, + .set_channel_access = rtl8821ae_update_channel_access_setting, + .radio_onoff_checking = rtl8821ae_gpio_radio_on_off_checking, + .set_bw_mode = rtl8821ae_phy_set_bw_mode, + .switch_channel = rtl8821ae_phy_sw_chnl, + .dm_watchdog = rtl8821ae_dm_watchdog, + .scan_operation_backup = rtl8821ae_phy_scan_operation_backup, + .set_rf_power_state = rtl8821ae_phy_set_rf_power_state, + .led_control = rtl8821ae_led_control, + .set_desc = rtl8821ae_set_desc, + .get_desc = rtl8821ae_get_desc, + .is_tx_desc_closed = rtl8821ae_is_tx_desc_closed, + .tx_polling = rtl8821ae_tx_polling, + .enable_hw_sec = rtl8821ae_enable_hw_security_config, + .set_key = rtl8821ae_set_key, + .init_sw_leds = rtl8821ae_init_sw_leds, + .get_bbreg = rtl8821ae_phy_query_bb_reg, + .set_bbreg = rtl8821ae_phy_set_bb_reg, + .get_rfreg = rtl8821ae_phy_query_rf_reg, + .set_rfreg = rtl8821ae_phy_set_rf_reg, + .fill_h2c_cmd = rtl8821ae_fill_h2c_cmd, + .get_btc_status = rtl8821ae_get_btc_status, + .rx_command_packet = rtl8821ae_rx_command_packet, + .add_wowlan_pattern = rtl8821ae_add_wowlan_pattern, +}; + +static struct rtl_mod_params rtl8821ae_mod_params = { + .sw_crypto = false, + .inactiveps = true, + .swctrl_lps = false, + .fwctrl_lps = true, + .msi_support = true, + .debug = DBG_EMERG, + .disable_watchdog = 0, +}; + +static struct rtl_hal_cfg rtl8821ae_hal_cfg = { + .bar_id = 2, + .write_readback = true, + .name = "rtl8821ae_pci", + .fw_name = "rtlwifi/rtl8821aefw.bin", + .ops = &rtl8821ae_hal_ops, + .mod_params = &rtl8821ae_mod_params, + .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL, + .maps[SYS_FUNC_EN] = REG_SYS_FUNC_EN, + .maps[SYS_CLK] = REG_SYS_CLKR, + .maps[MAC_RCR_AM] = AM, + .maps[MAC_RCR_AB] = AB, + .maps[MAC_RCR_ACRC32] = ACRC32, + .maps[MAC_RCR_ACF] = ACF, + .maps[MAC_RCR_AAP] = AAP, + .maps[MAC_HIMR] = REG_HIMR, + .maps[MAC_HIMRE] = REG_HIMRE, + + .maps[EFUSE_ACCESS] = REG_EFUSE_ACCESS, + + .maps[EFUSE_TEST] = REG_EFUSE_TEST, + .maps[EFUSE_CTRL] = REG_EFUSE_CTRL, + .maps[EFUSE_CLK] = 0, + .maps[EFUSE_CLK_CTRL] = REG_EFUSE_CTRL, + .maps[EFUSE_PWC_EV12V] = PWC_EV12V, + .maps[EFUSE_FEN_ELDR] = FEN_ELDR, + .maps[EFUSE_LOADER_CLK_EN] = LOADER_CLK_EN, + .maps[EFUSE_ANA8M] = ANA8M, + .maps[EFUSE_HWSET_MAX_SIZE] = HWSET_MAX_SIZE, + .maps[EFUSE_MAX_SECTION_MAP] = EFUSE_MAX_SECTION, + .maps[EFUSE_REAL_CONTENT_SIZE] = EFUSE_REAL_CONTENT_LEN, + .maps[EFUSE_OOB_PROTECT_BYTES_LEN] = EFUSE_OOB_PROTECT_BYTES, + + .maps[RWCAM] = REG_CAMCMD, + .maps[WCAMI] = REG_CAMWRITE, + .maps[RCAMO] = REG_CAMREAD, + .maps[CAMDBG] = REG_CAMDBG, + .maps[SECR] = REG_SECCFG, + .maps[SEC_CAM_NONE] = CAM_NONE, + .maps[SEC_CAM_WEP40] = CAM_WEP40, + .maps[SEC_CAM_TKIP] = CAM_TKIP, + .maps[SEC_CAM_AES] = CAM_AES, + .maps[SEC_CAM_WEP104] = CAM_WEP104, + + .maps[RTL_IMR_BCNDMAINT6] = IMR_BCNDMAINT6, + .maps[RTL_IMR_BCNDMAINT5] = IMR_BCNDMAINT5, + .maps[RTL_IMR_BCNDMAINT4] = IMR_BCNDMAINT4, + .maps[RTL_IMR_BCNDMAINT3] = IMR_BCNDMAINT3, + .maps[RTL_IMR_BCNDMAINT2] = IMR_BCNDMAINT2, + .maps[RTL_IMR_BCNDMAINT1] = IMR_BCNDMAINT1, +/* .maps[RTL_IMR_BCNDOK8] = IMR_BCNDOK8, */ /*need check*/ + .maps[RTL_IMR_BCNDOK7] = IMR_BCNDOK7, + .maps[RTL_IMR_BCNDOK6] = IMR_BCNDOK6, + .maps[RTL_IMR_BCNDOK5] = IMR_BCNDOK5, + .maps[RTL_IMR_BCNDOK4] = IMR_BCNDOK4, + .maps[RTL_IMR_BCNDOK3] = IMR_BCNDOK3, + .maps[RTL_IMR_BCNDOK2] = IMR_BCNDOK2, + .maps[RTL_IMR_BCNDOK1] = IMR_BCNDOK1, +/* .maps[RTL_IMR_TIMEOUT2] = IMR_TIMEOUT2,*/ +/* .maps[RTL_IMR_TIMEOUT1] = IMR_TIMEOUT1,*/ + + .maps[RTL_IMR_TXFOVW] = IMR_TXFOVW, + .maps[RTL_IMR_PSTIMEOUT] = IMR_PSTIMEOUT, + .maps[RTL_IMR_BCNINT] = IMR_BCNDMAINT0, + .maps[RTL_IMR_RXFOVW] = IMR_RXFOVW, + .maps[RTL_IMR_RDU] = IMR_RDU, + .maps[RTL_IMR_ATIMEND] = IMR_ATIMEND, + .maps[RTL_IMR_BDOK] = IMR_BCNDOK0, + .maps[RTL_IMR_MGNTDOK] = IMR_MGNTDOK, + .maps[RTL_IMR_TBDER] = IMR_TBDER, + .maps[RTL_IMR_HIGHDOK] = IMR_HIGHDOK, + .maps[RTL_IMR_TBDOK] = IMR_TBDOK, + .maps[RTL_IMR_BKDOK] = IMR_BKDOK, + .maps[RTL_IMR_BEDOK] = IMR_BEDOK, + .maps[RTL_IMR_VIDOK] = IMR_VIDOK, + .maps[RTL_IMR_VODOK] = IMR_VODOK, + .maps[RTL_IMR_ROK] = IMR_ROK, + .maps[RTL_IBSS_INT_MASKS] = (IMR_BCNDMAINT0 | IMR_TBDOK | IMR_TBDER), + + .maps[RTL_RC_CCK_RATE1M] = DESC_RATE1M, + .maps[RTL_RC_CCK_RATE2M] = DESC_RATE2M, + .maps[RTL_RC_CCK_RATE5_5M] = DESC_RATE5_5M, + .maps[RTL_RC_CCK_RATE11M] = DESC_RATE11M, + .maps[RTL_RC_OFDM_RATE6M] = DESC_RATE6M, + .maps[RTL_RC_OFDM_RATE9M] = DESC_RATE9M, + .maps[RTL_RC_OFDM_RATE12M] = DESC_RATE12M, + .maps[RTL_RC_OFDM_RATE18M] = DESC_RATE18M, + .maps[RTL_RC_OFDM_RATE24M] = DESC_RATE24M, + .maps[RTL_RC_OFDM_RATE36M] = DESC_RATE36M, + .maps[RTL_RC_OFDM_RATE48M] = DESC_RATE48M, + .maps[RTL_RC_OFDM_RATE54M] = DESC_RATE54M, + + .maps[RTL_RC_HT_RATEMCS7] = DESC_RATEMCS7, + .maps[RTL_RC_HT_RATEMCS15] = DESC_RATEMCS15, + + /*VHT hightest rate*/ + .maps[RTL_RC_VHT_RATE_1SS_MCS7] = DESC_RATEVHT1SS_MCS7, + .maps[RTL_RC_VHT_RATE_1SS_MCS8] = DESC_RATEVHT1SS_MCS8, + .maps[RTL_RC_VHT_RATE_1SS_MCS9] = DESC_RATEVHT1SS_MCS9, + .maps[RTL_RC_VHT_RATE_2SS_MCS7] = DESC_RATEVHT2SS_MCS7, + .maps[RTL_RC_VHT_RATE_2SS_MCS8] = DESC_RATEVHT2SS_MCS8, + .maps[RTL_RC_VHT_RATE_2SS_MCS9] = DESC_RATEVHT2SS_MCS9, +}; + +static struct pci_device_id rtl8821ae_pci_ids[] = { + {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8812, rtl8821ae_hal_cfg)}, + {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8821, rtl8821ae_hal_cfg)}, + {}, +}; + +MODULE_DEVICE_TABLE(pci, rtl8821ae_pci_ids); + +MODULE_AUTHOR("Realtek WlanFAE "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Realtek 8821ae 802.11ac PCI wireless"); +MODULE_FIRMWARE("rtlwifi/rtl8821aefw.bin"); + +module_param_named(swenc, rtl8821ae_mod_params.sw_crypto, bool, 0444); +module_param_named(debug, rtl8821ae_mod_params.debug, int, 0444); +module_param_named(ips, rtl8821ae_mod_params.inactiveps, bool, 0444); +module_param_named(swlps, rtl8821ae_mod_params.swctrl_lps, bool, 0444); +module_param_named(fwlps, rtl8821ae_mod_params.fwctrl_lps, bool, 0444); +module_param_named(msi, rtl8821ae_mod_params.msi_support, bool, 0444); +module_param_named(disable_watchdog, rtl8821ae_mod_params.disable_watchdog, + bool, 0444); +MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); +MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n"); +MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n"); +MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n"); +MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 1)\n"); +MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); +MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n"); + +static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); + +static struct pci_driver rtl8821ae_driver = { + .name = KBUILD_MODNAME, + .id_table = rtl8821ae_pci_ids, + .probe = rtl_pci_probe, + .remove = rtl_pci_disconnect, + .driver.pm = &rtlwifi_pm_ops, +}; + +module_pci_driver(rtl8821ae_driver); diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/sw.h b/drivers/net/wireless/rtlwifi/rtl8821ae/sw.h new file mode 100644 index 0000000000000000000000000000000000000000..d001e7ce305212f24c5da93e31a7b97856111893 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/sw.h @@ -0,0 +1,34 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL8821AE_SW_H__ +#define __RTL8821AE_SW_H__ + +int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw); +void rtl8821ae_deinit_sw_vars(struct ieee80211_hw *hw); +void rtl8821ae_init_var_map(struct ieee80211_hw *hw); +bool rtl8821ae_get_btc_status(void); + +#endif diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/table.c b/drivers/net/wireless/rtlwifi/rtl8821ae/table.c new file mode 100644 index 0000000000000000000000000000000000000000..62a0fb76f080d6c756d73a1302eeabc891330c60 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/table.c @@ -0,0 +1,4572 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Created on 2010/ 5/18, 1:41 + * + * Larry Finger + * + *****************************************************************************/ + +#include "table.h" +u32 RTL8812AE_PHY_REG_ARRAY[] = { + 0x800, 0x8020D010, + 0x804, 0x080112E0, + 0x808, 0x0E028233, + 0x80C, 0x12131113, + 0x810, 0x20101263, + 0x814, 0x020C3D10, + 0x818, 0x03A00385, + 0x820, 0x00000000, + 0x824, 0x00030FE0, + 0x828, 0x00000000, + 0x82C, 0x002083DD, + 0x830, 0x2AAA6C86, + 0x834, 0x0037A706, + 0x838, 0x06C89B44, + 0x83C, 0x0000095B, + 0x840, 0xC0000001, + 0x844, 0x40003CDE, + 0x848, 0x6210FF8B, + 0x84C, 0x6CFDFFB8, + 0x850, 0x28874706, + 0x854, 0x0001520C, + 0x858, 0x8060E000, + 0x85C, 0x74210168, + 0x860, 0x6929C321, + 0x864, 0x79727432, + 0x868, 0x8CA7A314, + 0x86C, 0x338C2878, + 0x870, 0x03333333, + 0x874, 0x31602C2E, + 0x878, 0x00003152, + 0x87C, 0x000FC000, + 0x8A0, 0x00000013, + 0x8A4, 0x7F7F7F7F, + 0x8A8, 0xA202033E, + 0x8AC, 0x0FF0FA0A, + 0x8B0, 0x00000600, + 0x8B4, 0x000FC080, + 0x8B8, 0x6C0057FF, + 0x8BC, 0x4CA520A3, + 0x8C0, 0x27F00020, + 0x8C4, 0x00000000, + 0x8C8, 0x00013169, + 0x8CC, 0x08248492, + 0x8D0, 0x0000B800, + 0x8DC, 0x00000000, + 0x8D4, 0x940008A0, + 0x8D8, 0x290B5612, + 0x8F8, 0x400002C0, + 0x8FC, 0x00000000, + 0xFF0F07D8, 0xABCD, + 0x900, 0x00000701, + 0xFF0F07D0, 0xCDEF, + 0x900, 0x00000701, + 0xCDCDCDCD, 0xCDCD, + 0x900, 0x00000700, + 0xFF0F07D8, 0xDEAD, + 0x90C, 0x00000000, + 0x910, 0x0000FC00, + 0x914, 0x00000404, + 0x918, 0x1C1028C0, + 0x91C, 0x64B11A1C, + 0x920, 0xE0767233, + 0x924, 0x055AA500, + 0x928, 0x00000004, + 0x92C, 0xFFFE0000, + 0x930, 0xFFFFFFFE, + 0x934, 0x001FFFFF, + 0x960, 0x00000000, + 0x964, 0x00000000, + 0x968, 0x00000000, + 0x96C, 0x00000000, + 0x970, 0x801FFFFF, + 0x978, 0x00000000, + 0x97C, 0x00000000, + 0x980, 0x00000000, + 0x984, 0x00000000, + 0x988, 0x00000000, + 0x990, 0x27100000, + 0x994, 0xFFFF0100, + 0x998, 0xFFFFFF5C, + 0x99C, 0xFFFFFFFF, + 0x9A0, 0x000000FF, + 0x9A4, 0x00080080, + 0x9A8, 0x00000000, + 0x9AC, 0x00000000, + 0x9B0, 0x81081008, + 0x9B4, 0x00000000, + 0x9B8, 0x01081008, + 0x9BC, 0x01081008, + 0x9D0, 0x00000000, + 0x9D4, 0x00000000, + 0x9D8, 0x00000000, + 0x9DC, 0x00000000, + 0x9E4, 0x00000002, + 0x9E8, 0x000002D5, + 0xA00, 0x00D047C8, + 0xA04, 0x01FF000C, + 0xA08, 0x8C838300, + 0xA0C, 0x2E7F000F, + 0xA10, 0x9500BB78, + 0xA14, 0x11144028, + 0xA18, 0x00881117, + 0xA1C, 0x89140F00, + 0xA20, 0x1A1B0000, + 0xA24, 0x090E1317, + 0xA28, 0x00000204, + 0xA2C, 0x00900000, + 0xA70, 0x101FFF00, + 0xA74, 0x00000008, + 0xA78, 0x00000900, + 0xA7C, 0x225B0606, + 0xA80, 0x218075B2, + 0xA84, 0x001F8C80, + 0xB00, 0x03100000, + 0xB04, 0x0000B000, + 0xB08, 0xAE0201EB, + 0xB0C, 0x01003207, + 0xB10, 0x00009807, + 0xB14, 0x01000000, + 0xB18, 0x00000002, + 0xB1C, 0x00000002, + 0xB20, 0x0000001F, + 0xB24, 0x03020100, + 0xB28, 0x07060504, + 0xB2C, 0x0B0A0908, + 0xB30, 0x0F0E0D0C, + 0xB34, 0x13121110, + 0xB38, 0x17161514, + 0xB3C, 0x0000003A, + 0xB40, 0x00000000, + 0xB44, 0x00000000, + 0xB48, 0x13000032, + 0xB4C, 0x48080000, + 0xB50, 0x00000000, + 0xB54, 0x00000000, + 0xB58, 0x00000000, + 0xB5C, 0x00000000, + 0xC00, 0x00000007, + 0xC04, 0x00042020, + 0xC08, 0x80410231, + 0xC0C, 0x00000000, + 0xC10, 0x00000100, + 0xC14, 0x01000000, + 0xC1C, 0x40000003, + 0xC20, 0x12121212, + 0xC24, 0x12121212, + 0xC28, 0x12121212, + 0xC2C, 0x12121212, + 0xC30, 0x12121212, + 0xC34, 0x12121212, + 0xC38, 0x12121212, + 0xC3C, 0x12121212, + 0xC40, 0x12121212, + 0xC44, 0x12121212, + 0xC48, 0x12121212, + 0xC4C, 0x12121212, + 0xC50, 0x00000020, + 0xC54, 0x0008121C, + 0xC58, 0x30000C1C, + 0xC5C, 0x00000058, + 0xC60, 0x34344443, + 0xC64, 0x07003333, + 0xC68, 0x59791979, + 0xC6C, 0x59795979, + 0xC70, 0x19795979, + 0xC74, 0x19795979, + 0xC78, 0x19791979, + 0xC7C, 0x19791979, + 0xC80, 0x19791979, + 0xC84, 0x19791979, + 0xC94, 0x0100005C, + 0xC98, 0x00000000, + 0xC9C, 0x00000000, + 0xCA0, 0x00000029, + 0xCA4, 0x08040201, + 0xCA8, 0x80402010, + 0xFF0F0740, 0xABCD, + 0xCB0, 0x77547717, + 0xFF0F01C0, 0xCDEF, + 0xCB0, 0x77547717, + 0xFF0F02C0, 0xCDEF, + 0xCB0, 0x77547717, + 0xFF0F07D8, 0xCDEF, + 0xCB0, 0x54547710, + 0xFF0F07D0, 0xCDEF, + 0xCB0, 0x54547710, + 0xCDCDCDCD, 0xCDCD, + 0xCB0, 0x77547777, + 0xFF0F0740, 0xDEAD, + 0xCB4, 0x00000077, + 0xCB8, 0x00508242, + 0xE00, 0x00000007, + 0xE04, 0x00042020, + 0xE08, 0x80410231, + 0xE0C, 0x00000000, + 0xE10, 0x00000100, + 0xE14, 0x01000000, + 0xE1C, 0x40000003, + 0xE20, 0x12121212, + 0xE24, 0x12121212, + 0xE28, 0x12121212, + 0xE2C, 0x12121212, + 0xE30, 0x12121212, + 0xE34, 0x12121212, + 0xE38, 0x12121212, + 0xE3C, 0x12121212, + 0xE40, 0x12121212, + 0xE44, 0x12121212, + 0xE48, 0x12121212, + 0xE4C, 0x12121212, + 0xE50, 0x00000020, + 0xE54, 0x0008121C, + 0xE58, 0x30000C1C, + 0xE5C, 0x00000058, + 0xE60, 0x34344443, + 0xE64, 0x07003333, + 0xE68, 0x59791979, + 0xE6C, 0x59795979, + 0xE70, 0x19795979, + 0xE74, 0x19795979, + 0xE78, 0x19791979, + 0xE7C, 0x19791979, + 0xE80, 0x19791979, + 0xE84, 0x19791979, + 0xE94, 0x0100005C, + 0xE98, 0x00000000, + 0xE9C, 0x00000000, + 0xEA0, 0x00000029, + 0xEA4, 0x08040201, + 0xEA8, 0x80402010, + 0xFF0F0740, 0xABCD, + 0xEB0, 0x77547717, + 0xFF0F01C0, 0xCDEF, + 0xEB0, 0x77547717, + 0xFF0F02C0, 0xCDEF, + 0xEB0, 0x77547717, + 0xFF0F07D8, 0xCDEF, + 0xEB0, 0x54547710, + 0xFF0F07D0, 0xCDEF, + 0xEB0, 0x54547710, + 0xCDCDCDCD, 0xCDCD, + 0xEB0, 0x77547777, + 0xFF0F0740, 0xDEAD, + 0xEB4, 0x00000077, + 0xEB8, 0x00508242, +}; + +u32 RTL8821AE_PHY_REG_ARRAY[] = { + 0x800, 0x0020D090, + 0x804, 0x080112E0, + 0x808, 0x0E028211, + 0x80C, 0x92131111, + 0x810, 0x20101261, + 0x814, 0x020C3D10, + 0x818, 0x03A00385, + 0x820, 0x00000000, + 0x824, 0x00030FE0, + 0x828, 0x00000000, + 0x82C, 0x002081DD, + 0x830, 0x2AAA8E24, + 0x834, 0x0037A706, + 0x838, 0x06489B44, + 0x83C, 0x0000095B, + 0x840, 0xC0000001, + 0x844, 0x40003CDE, + 0x848, 0x62103F8B, + 0x84C, 0x6CFDFFB8, + 0x850, 0x28874706, + 0x854, 0x0001520C, + 0x858, 0x8060E000, + 0x85C, 0x74210168, + 0x860, 0x6929C321, + 0x864, 0x79727432, + 0x868, 0x8CA7A314, + 0x86C, 0x888C2878, + 0x870, 0x08888888, + 0x874, 0x31612C2E, + 0x878, 0x00000152, + 0x87C, 0x000FD000, + 0x8A0, 0x00000013, + 0x8A4, 0x7F7F7F7F, + 0x8A8, 0xA2000338, + 0x8AC, 0x0FF0FA0A, + 0x8B4, 0x000FC080, + 0x8B8, 0x6C10D7FF, + 0x8BC, 0x0CA52090, + 0x8C0, 0x1BF00020, + 0x8C4, 0x00000000, + 0x8C8, 0x00013169, + 0x8CC, 0x08248492, + 0x8D4, 0x940008A0, + 0x8D8, 0x290B5612, + 0x8F8, 0x400002C0, + 0x8FC, 0x00000000, + 0x900, 0x00000700, + 0x90C, 0x00000000, + 0x910, 0x0000FC00, + 0x914, 0x00000404, + 0x918, 0x1C1028C0, + 0x91C, 0x64B11A1C, + 0x920, 0xE0767233, + 0x924, 0x055AA500, + 0x928, 0x00000004, + 0x92C, 0xFFFE0000, + 0x930, 0xFFFFFFFE, + 0x934, 0x001FFFFF, + 0x960, 0x00000000, + 0x964, 0x00000000, + 0x968, 0x00000000, + 0x96C, 0x00000000, + 0x970, 0x801FFFFF, + 0x974, 0x000003FF, + 0x978, 0x00000000, + 0x97C, 0x00000000, + 0x980, 0x00000000, + 0x984, 0x00000000, + 0x988, 0x00000000, + 0x990, 0x27100000, + 0x994, 0xFFFF0100, + 0x998, 0xFFFFFF5C, + 0x99C, 0xFFFFFFFF, + 0x9A0, 0x000000FF, + 0x9A4, 0x00480080, + 0x9A8, 0x00000000, + 0x9AC, 0x00000000, + 0x9B0, 0x81081008, + 0x9B4, 0x01081008, + 0x9B8, 0x01081008, + 0x9BC, 0x01081008, + 0x9D0, 0x00000000, + 0x9D4, 0x00000000, + 0x9D8, 0x00000000, + 0x9DC, 0x00000000, + 0x9E0, 0x00005D00, + 0x9E4, 0x00000002, + 0x9E8, 0x00000001, + 0xA00, 0x00D047C8, + 0xA04, 0x01FF000C, + 0xA08, 0x8C8A8300, + 0xA0C, 0x2E68000F, + 0xA10, 0x9500BB78, + 0xA14, 0x11144028, + 0xA18, 0x00881117, + 0xA1C, 0x89140F00, + 0xA20, 0x1A1B0000, + 0xA24, 0x090E1317, + 0xA28, 0x00000204, + 0xA2C, 0x00900000, + 0xA70, 0x101FFF00, + 0xA74, 0x00000008, + 0xA78, 0x00000900, + 0xA7C, 0x225B0606, + 0xA80, 0x21805490, + 0xA84, 0x001F0000, + 0xB00, 0x03100040, + 0xB04, 0x0000B000, + 0xB08, 0xAE0201EB, + 0xB0C, 0x01003207, + 0xB10, 0x00009807, + 0xB14, 0x01000000, + 0xB18, 0x00000002, + 0xB1C, 0x00000002, + 0xB20, 0x0000001F, + 0xB24, 0x03020100, + 0xB28, 0x07060504, + 0xB2C, 0x0B0A0908, + 0xB30, 0x0F0E0D0C, + 0xB34, 0x13121110, + 0xB38, 0x17161514, + 0xB3C, 0x0000003A, + 0xB40, 0x00000000, + 0xB44, 0x00000000, + 0xB48, 0x13000032, + 0xB4C, 0x48080000, + 0xB50, 0x00000000, + 0xB54, 0x00000000, + 0xB58, 0x00000000, + 0xB5C, 0x00000000, + 0xC00, 0x00000007, + 0xC04, 0x00042020, + 0xC08, 0x80410231, + 0xC0C, 0x00000000, + 0xC10, 0x00000100, + 0xC14, 0x01000000, + 0xC1C, 0x40000003, + 0xC20, 0x2C2C2C2C, + 0xC24, 0x30303030, + 0xC28, 0x30303030, + 0xC2C, 0x2C2C2C2C, + 0xC30, 0x2C2C2C2C, + 0xC34, 0x2C2C2C2C, + 0xC38, 0x2C2C2C2C, + 0xC3C, 0x2A2A2A2A, + 0xC40, 0x2A2A2A2A, + 0xC44, 0x2A2A2A2A, + 0xC48, 0x2A2A2A2A, + 0xC4C, 0x2A2A2A2A, + 0xC50, 0x00000020, + 0xC54, 0x001C1208, + 0xC58, 0x30000C1C, + 0xC5C, 0x00000058, + 0xC60, 0x34344443, + 0xC64, 0x07003333, + 0xC68, 0x19791979, + 0xC6C, 0x19791979, + 0xC70, 0x19791979, + 0xC74, 0x19791979, + 0xC78, 0x19791979, + 0xC7C, 0x19791979, + 0xC80, 0x19791979, + 0xC84, 0x19791979, + 0xC94, 0x0100005C, + 0xC98, 0x00000000, + 0xC9C, 0x00000000, + 0xCA0, 0x00000029, + 0xCA4, 0x08040201, + 0xCA8, 0x80402010, + 0xCB0, 0x77775747, + 0xCB4, 0x10000077, + 0xCB8, 0x00508240, +}; + +u32 RTL8812AE_PHY_REG_ARRAY_PG[] = { + 0, 0, 0, 0x00000c20, 0xffffffff, 0x34363840, + 0, 0, 0, 0x00000c24, 0xffffffff, 0x42424444, + 0, 0, 0, 0x00000c28, 0xffffffff, 0x30323638, + 0, 0, 0, 0x00000c2c, 0xffffffff, 0x40424444, + 0, 0, 0, 0x00000c30, 0xffffffff, 0x28303236, + 0, 0, 1, 0x00000c34, 0xffffffff, 0x38404242, + 0, 0, 1, 0x00000c38, 0xffffffff, 0x26283034, + 0, 0, 0, 0x00000c3c, 0xffffffff, 0x40424444, + 0, 0, 0, 0x00000c40, 0xffffffff, 0x28303236, + 0, 0, 0, 0x00000c44, 0xffffffff, 0x42422426, + 0, 0, 1, 0x00000c48, 0xffffffff, 0x30343840, + 0, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628, + 0, 1, 0, 0x00000e20, 0xffffffff, 0x34363840, + 0, 1, 0, 0x00000e24, 0xffffffff, 0x42424444, + 0, 1, 0, 0x00000e28, 0xffffffff, 0x30323638, + 0, 1, 0, 0x00000e2c, 0xffffffff, 0x40424444, + 0, 1, 0, 0x00000e30, 0xffffffff, 0x28303236, + 0, 1, 1, 0x00000e34, 0xffffffff, 0x38404242, + 0, 1, 1, 0x00000e38, 0xffffffff, 0x26283034, + 0, 1, 0, 0x00000e3c, 0xffffffff, 0x40424444, + 0, 1, 0, 0x00000e40, 0xffffffff, 0x28303236, + 0, 1, 0, 0x00000e44, 0xffffffff, 0x42422426, + 0, 1, 1, 0x00000e48, 0xffffffff, 0x30343840, + 0, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628, + 1, 0, 0, 0x00000c24, 0xffffffff, 0x42424444, + 1, 0, 0, 0x00000c28, 0xffffffff, 0x30323640, + 1, 0, 0, 0x00000c2c, 0xffffffff, 0x40424444, + 1, 0, 0, 0x00000c30, 0xffffffff, 0x28303236, + 1, 0, 1, 0x00000c34, 0xffffffff, 0x38404242, + 1, 0, 1, 0x00000c38, 0xffffffff, 0x26283034, + 1, 0, 0, 0x00000c3c, 0xffffffff, 0x40424444, + 1, 0, 0, 0x00000c40, 0xffffffff, 0x28303236, + 1, 0, 0, 0x00000c44, 0xffffffff, 0x42422426, + 1, 0, 1, 0x00000c48, 0xffffffff, 0x30343840, + 1, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628, + 1, 1, 0, 0x00000e24, 0xffffffff, 0x42424444, + 1, 1, 0, 0x00000e28, 0xffffffff, 0x30323640, + 1, 1, 0, 0x00000e2c, 0xffffffff, 0x40424444, + 1, 1, 0, 0x00000e30, 0xffffffff, 0x28303236, + 1, 1, 1, 0x00000e34, 0xffffffff, 0x38404242, + 1, 1, 1, 0x00000e38, 0xffffffff, 0x26283034, + 1, 1, 0, 0x00000e3c, 0xffffffff, 0x40424444, + 1, 1, 0, 0x00000e40, 0xffffffff, 0x28303236, + 1, 1, 0, 0x00000e44, 0xffffffff, 0x42422426, + 1, 1, 1, 0x00000e48, 0xffffffff, 0x30343840, + 1, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628 +}; + +u32 RTL8821AE_PHY_REG_ARRAY_PG[] = { + 0, 0, 0, 0x00000c20, 0xffffffff, 0x32343638, + 0, 0, 0, 0x00000c24, 0xffffffff, 0x36363838, + 0, 0, 0, 0x00000c28, 0xffffffff, 0x28303234, + 0, 0, 0, 0x00000c2c, 0xffffffff, 0x34363838, + 0, 0, 0, 0x00000c30, 0xffffffff, 0x26283032, + 0, 0, 0, 0x00000c3c, 0xffffffff, 0x32343636, + 0, 0, 0, 0x00000c40, 0xffffffff, 0x24262830, + 0, 0, 0, 0x00000c44, 0x0000ffff, 0x00002022, + 1, 0, 0, 0x00000c24, 0xffffffff, 0x34343636, + 1, 0, 0, 0x00000c28, 0xffffffff, 0x26283032, + 1, 0, 0, 0x00000c2c, 0xffffffff, 0x32343636, + 1, 0, 0, 0x00000c30, 0xffffffff, 0x24262830, + 1, 0, 0, 0x00000c3c, 0xffffffff, 0x32343636, + 1, 0, 0, 0x00000c40, 0xffffffff, 0x24262830, + 1, 0, 0, 0x00000c44, 0x0000ffff, 0x00002022 +}; + +u32 RTL8812AE_RADIOA_ARRAY[] = { + 0x000, 0x00010000, + 0x018, 0x0001712A, + 0x056, 0x00051CF2, + 0x066, 0x00040000, + 0x01E, 0x00080000, + 0x089, 0x00000080, + 0xFF0F0740, 0xABCD, + 0x086, 0x00014B38, + 0xFF0F02C0, 0xCDEF, + 0x086, 0x00014B38, + 0xFF0F01C0, 0xCDEF, + 0x086, 0x00014B38, + 0xFF0F07D8, 0xCDEF, + 0x086, 0x00014B3A, + 0xFF0F07D0, 0xCDEF, + 0x086, 0x00014B3A, + 0xCDCDCDCD, 0xCDCD, + 0x086, 0x00014B38, + 0xFF0F0740, 0xDEAD, + 0x0B1, 0x0001FC1A, + 0x0B3, 0x000F0810, + 0x0B4, 0x0001A78D, + 0x0BA, 0x00086180, + 0x018, 0x00000006, + 0x0EF, 0x00002000, + 0xFF0F07D8, 0xABCD, + 0x03B, 0x0003F218, + 0x03B, 0x00030A58, + 0x03B, 0x0002FA58, + 0x03B, 0x00022590, + 0x03B, 0x0001FA50, + 0x03B, 0x00010248, + 0x03B, 0x00008240, + 0xFF0F07D0, 0xCDEF, + 0x03B, 0x0003F218, + 0x03B, 0x00030A58, + 0x03B, 0x0002FA58, + 0x03B, 0x00022590, + 0x03B, 0x0001FA50, + 0x03B, 0x00010248, + 0x03B, 0x00008240, + 0xCDCDCDCD, 0xCDCD, + 0x03B, 0x00038A58, + 0x03B, 0x00037A58, + 0x03B, 0x0002A590, + 0x03B, 0x00027A50, + 0x03B, 0x00018248, + 0x03B, 0x00010240, + 0x03B, 0x00008240, + 0xFF0F07D8, 0xDEAD, + 0x0EF, 0x00000100, + 0xFF0F07D8, 0xABCD, + 0x034, 0x0000A4EE, + 0x034, 0x00009076, + 0x034, 0x00008073, + 0x034, 0x00007070, + 0x034, 0x0000606D, + 0x034, 0x0000506A, + 0x034, 0x00004049, + 0x034, 0x00003046, + 0x034, 0x00002028, + 0x034, 0x00001025, + 0x034, 0x00000022, + 0xCDCDCDCD, 0xCDCD, + 0x034, 0x0000ADF4, + 0x034, 0x00009DF1, + 0x034, 0x00008DEE, + 0x034, 0x00007DEB, + 0x034, 0x00006DE8, + 0x034, 0x00005CEC, + 0x034, 0x00004CE9, + 0x034, 0x000034EA, + 0x034, 0x000024E7, + 0x034, 0x0000146B, + 0x034, 0x0000006D, + 0xFF0F07D8, 0xDEAD, + 0x0EF, 0x00000000, + 0x0EF, 0x000020A2, + 0x0DF, 0x00000080, + 0x035, 0x00000192, + 0x035, 0x00008192, + 0x035, 0x00010192, + 0x036, 0x00000024, + 0x036, 0x00008024, + 0x036, 0x00010024, + 0x036, 0x00018024, + 0x0EF, 0x00000000, + 0x051, 0x00000C21, + 0x052, 0x000006D9, + 0x053, 0x000FC649, + 0x054, 0x0000017E, + 0x0EF, 0x00000002, + 0x008, 0x00008400, + 0x018, 0x0001712A, + 0x0EF, 0x00001000, + 0x03A, 0x00000080, + 0x03B, 0x0003A02C, + 0x03C, 0x00004000, + 0x03A, 0x00000400, + 0x03B, 0x0003202C, + 0x03C, 0x00010000, + 0x03A, 0x000000A0, + 0x03B, 0x0002B064, + 0x03C, 0x00004000, + 0x03A, 0x000000D8, + 0x03B, 0x00023070, + 0x03C, 0x00004000, + 0x03A, 0x00000468, + 0x03B, 0x0001B870, + 0x03C, 0x00010000, + 0x03A, 0x00000098, + 0x03B, 0x00012085, + 0x03C, 0x000E4000, + 0x03A, 0x00000418, + 0x03B, 0x0000A080, + 0x03C, 0x000F0000, + 0x03A, 0x00000418, + 0x03B, 0x00002080, + 0x03C, 0x00010000, + 0x03A, 0x00000080, + 0x03B, 0x0007A02C, + 0x03C, 0x00004000, + 0x03A, 0x00000400, + 0x03B, 0x0007202C, + 0x03C, 0x00010000, + 0x03A, 0x000000A0, + 0x03B, 0x0006B064, + 0x03C, 0x00004000, + 0x03A, 0x000000D8, + 0x03B, 0x00023070, + 0x03C, 0x00004000, + 0x03A, 0x00000468, + 0x03B, 0x0005B870, + 0x03C, 0x00010000, + 0x03A, 0x00000098, + 0x03B, 0x00052085, + 0x03C, 0x000E4000, + 0x03A, 0x00000418, + 0x03B, 0x0004A080, + 0x03C, 0x000F0000, + 0x03A, 0x00000418, + 0x03B, 0x00042080, + 0x03C, 0x00010000, + 0x03A, 0x00000080, + 0x03B, 0x000BA02C, + 0x03C, 0x00004000, + 0x03A, 0x00000400, + 0x03B, 0x000B202C, + 0x03C, 0x00010000, + 0x03A, 0x000000A0, + 0x03B, 0x000AB064, + 0x03C, 0x00004000, + 0x03A, 0x000000D8, + 0x03B, 0x000A3070, + 0x03C, 0x00004000, + 0x03A, 0x00000468, + 0x03B, 0x0009B870, + 0x03C, 0x00010000, + 0x03A, 0x00000098, + 0x03B, 0x00092085, + 0x03C, 0x000E4000, + 0x03A, 0x00000418, + 0x03B, 0x0008A080, + 0x03C, 0x000F0000, + 0x03A, 0x00000418, + 0x03B, 0x00082080, + 0x03C, 0x00010000, + 0x0EF, 0x00001100, + 0xFF0F0740, 0xABCD, + 0x034, 0x0004A0B2, + 0x034, 0x000490AF, + 0x034, 0x00048070, + 0x034, 0x0004706D, + 0x034, 0x00046050, + 0x034, 0x0004504D, + 0x034, 0x0004404A, + 0x034, 0x00043047, + 0x034, 0x0004200A, + 0x034, 0x00041007, + 0x034, 0x00040004, + 0xFF0F02C0, 0xCDEF, + 0x034, 0x0004A0B2, + 0x034, 0x000490AF, + 0x034, 0x00048070, + 0x034, 0x0004706D, + 0x034, 0x00046050, + 0x034, 0x0004504D, + 0x034, 0x0004404A, + 0x034, 0x00043047, + 0x034, 0x0004200A, + 0x034, 0x00041007, + 0x034, 0x00040004, + 0xFF0F01C0, 0xCDEF, + 0x034, 0x0004A0B2, + 0x034, 0x000490AF, + 0x034, 0x00048070, + 0x034, 0x0004706D, + 0x034, 0x00046050, + 0x034, 0x0004504D, + 0x034, 0x0004404A, + 0x034, 0x00043047, + 0x034, 0x0004200A, + 0x034, 0x00041007, + 0x034, 0x00040004, + 0xFF0F07D8, 0xCDEF, + 0x034, 0x0004A0B2, + 0x034, 0x000490AF, + 0x034, 0x00048070, + 0x034, 0x0004706D, + 0x034, 0x00046050, + 0x034, 0x0004504D, + 0x034, 0x0004404A, + 0x034, 0x00043047, + 0x034, 0x0004200A, + 0x034, 0x00041007, + 0x034, 0x00040004, + 0xFF0F07D0, 0xCDEF, + 0x034, 0x0004A0B2, + 0x034, 0x000490AF, + 0x034, 0x00048070, + 0x034, 0x0004706D, + 0x034, 0x00046050, + 0x034, 0x0004504D, + 0x034, 0x0004404A, + 0x034, 0x00043047, + 0x034, 0x0004200A, + 0x034, 0x00041007, + 0x034, 0x00040004, + 0xCDCDCDCD, 0xCDCD, + 0x034, 0x0004ADF5, + 0x034, 0x00049DF2, + 0x034, 0x00048DEF, + 0x034, 0x00047DEC, + 0x034, 0x00046DE9, + 0x034, 0x00045DC9, + 0x034, 0x00044CE8, + 0x034, 0x000438CA, + 0x034, 0x00042889, + 0x034, 0x0004184A, + 0x034, 0x0004044A, + 0xFF0F0740, 0xDEAD, + 0xFF0F0740, 0xABCD, + 0x034, 0x0002A0B2, + 0x034, 0x000290AF, + 0x034, 0x00028070, + 0x034, 0x0002706D, + 0x034, 0x00026050, + 0x034, 0x0002504D, + 0x034, 0x0002404A, + 0x034, 0x00023047, + 0x034, 0x0002200A, + 0x034, 0x00021007, + 0x034, 0x00020004, + 0xFF0F02C0, 0xCDEF, + 0x034, 0x0002A0B2, + 0x034, 0x000290AF, + 0x034, 0x00028070, + 0x034, 0x0002706D, + 0x034, 0x00026050, + 0x034, 0x0002504D, + 0x034, 0x0002404A, + 0x034, 0x00023047, + 0x034, 0x0002200A, + 0x034, 0x00021007, + 0x034, 0x00020004, + 0xFF0F01C0, 0xCDEF, + 0x034, 0x0002A0B2, + 0x034, 0x000290AF, + 0x034, 0x00028070, + 0x034, 0x0002706D, + 0x034, 0x00026050, + 0x034, 0x0002504D, + 0x034, 0x0002404A, + 0x034, 0x00023047, + 0x034, 0x0002200A, + 0x034, 0x00021007, + 0x034, 0x00020004, + 0xFF0F07D8, 0xCDEF, + 0x034, 0x0002A0B2, + 0x034, 0x000290AF, + 0x034, 0x00028070, + 0x034, 0x0002706D, + 0x034, 0x00026050, + 0x034, 0x0002504D, + 0x034, 0x0002404A, + 0x034, 0x00023047, + 0x034, 0x0002200A, + 0x034, 0x00021007, + 0x034, 0x00020004, + 0xFF0F07D0, 0xCDEF, + 0x034, 0x0002A0B2, + 0x034, 0x000290AF, + 0x034, 0x00028070, + 0x034, 0x0002706D, + 0x034, 0x00026050, + 0x034, 0x0002504D, + 0x034, 0x0002404A, + 0x034, 0x00023047, + 0x034, 0x0002200A, + 0x034, 0x00021007, + 0x034, 0x00020004, + 0xCDCDCDCD, 0xCDCD, + 0x034, 0x0002ADF5, + 0x034, 0x00029DF2, + 0x034, 0x00028DEF, + 0x034, 0x00027DEC, + 0x034, 0x00026DE9, + 0x034, 0x00025DC9, + 0x034, 0x00024CE8, + 0x034, 0x000238CA, + 0x034, 0x00022889, + 0x034, 0x0002184A, + 0x034, 0x0002044A, + 0xFF0F0740, 0xDEAD, + 0xFF0F0740, 0xABCD, + 0x034, 0x0000A0B2, + 0x034, 0x000090AF, + 0x034, 0x00008070, + 0x034, 0x0000706D, + 0x034, 0x00006050, + 0x034, 0x0000504D, + 0x034, 0x0000404A, + 0x034, 0x00003047, + 0x034, 0x0000200A, + 0x034, 0x00001007, + 0x034, 0x00000004, + 0xFF0F02C0, 0xCDEF, + 0x034, 0x0000A0B2, + 0x034, 0x000090AF, + 0x034, 0x00008070, + 0x034, 0x0000706D, + 0x034, 0x00006050, + 0x034, 0x0000504D, + 0x034, 0x0000404A, + 0x034, 0x00003047, + 0x034, 0x0000200A, + 0x034, 0x00001007, + 0x034, 0x00000004, + 0xFF0F01C0, 0xCDEF, + 0x034, 0x0000A0B2, + 0x034, 0x000090AF, + 0x034, 0x00008070, + 0x034, 0x0000706D, + 0x034, 0x00006050, + 0x034, 0x0000504D, + 0x034, 0x0000404A, + 0x034, 0x00003047, + 0x034, 0x0000200A, + 0x034, 0x00001007, + 0x034, 0x00000004, + 0xFF0F07D8, 0xCDEF, + 0x034, 0x0000A0B2, + 0x034, 0x000090AF, + 0x034, 0x00008070, + 0x034, 0x0000706D, + 0x034, 0x00006050, + 0x034, 0x0000504D, + 0x034, 0x0000404A, + 0x034, 0x00003047, + 0x034, 0x0000200A, + 0x034, 0x00001007, + 0x034, 0x00000004, + 0xFF0F07D0, 0xCDEF, + 0x034, 0x0000A0B2, + 0x034, 0x000090AF, + 0x034, 0x00008070, + 0x034, 0x0000706D, + 0x034, 0x00006050, + 0x034, 0x0000504D, + 0x034, 0x0000404A, + 0x034, 0x00003047, + 0x034, 0x0000200A, + 0x034, 0x00001007, + 0x034, 0x00000004, + 0xCDCDCDCD, 0xCDCD, + 0x034, 0x0000AFF7, + 0x034, 0x00009DF7, + 0x034, 0x00008DF4, + 0x034, 0x00007DF1, + 0x034, 0x00006DEE, + 0x034, 0x00005DCD, + 0x034, 0x00004CEB, + 0x034, 0x000038CC, + 0x034, 0x0000288B, + 0x034, 0x0000184C, + 0x034, 0x0000044C, + 0xFF0F0740, 0xDEAD, + 0x0EF, 0x00000000, + 0xFF0F0740, 0xABCD, + 0x018, 0x0001712A, + 0x0EF, 0x00000040, + 0x035, 0x000001D4, + 0x035, 0x000081D4, + 0x035, 0x000101D4, + 0x035, 0x000201B4, + 0x035, 0x000281B4, + 0x035, 0x000301B4, + 0x035, 0x000401B4, + 0x035, 0x000481B4, + 0x035, 0x000501B4, + 0xFF0F02C0, 0xCDEF, + 0x018, 0x0001712A, + 0x0EF, 0x00000040, + 0x035, 0x000001D4, + 0x035, 0x000081D4, + 0x035, 0x000101D4, + 0x035, 0x000201B4, + 0x035, 0x000281B4, + 0x035, 0x000301B4, + 0x035, 0x000401B4, + 0x035, 0x000481B4, + 0x035, 0x000501B4, + 0xFF0F01C0, 0xCDEF, + 0x018, 0x0001712A, + 0x0EF, 0x00000040, + 0x035, 0x000001D4, + 0x035, 0x000081D4, + 0x035, 0x000101D4, + 0x035, 0x000201B4, + 0x035, 0x000281B4, + 0x035, 0x000301B4, + 0x035, 0x000401B4, + 0x035, 0x000481B4, + 0x035, 0x000501B4, + 0xFF0F07D8, 0xCDEF, + 0x018, 0x0001712A, + 0x0EF, 0x00000040, + 0x035, 0x000001D4, + 0x035, 0x000081D4, + 0x035, 0x000101D4, + 0x035, 0x000201B4, + 0x035, 0x000281B4, + 0x035, 0x000301B4, + 0x035, 0x000401B4, + 0x035, 0x000481B4, + 0x035, 0x000501B4, + 0xFF0F07D0, 0xCDEF, + 0x018, 0x0001712A, + 0x0EF, 0x00000040, + 0x035, 0x000001D4, + 0x035, 0x000081D4, + 0x035, 0x000101D4, + 0x035, 0x000201B4, + 0x035, 0x000281B4, + 0x035, 0x000301B4, + 0x035, 0x000401B4, + 0x035, 0x000481B4, + 0x035, 0x000501B4, + 0xCDCDCDCD, 0xCDCD, + 0x018, 0x0001712A, + 0x0EF, 0x00000040, + 0x035, 0x00000188, + 0x035, 0x00008147, + 0x035, 0x00010147, + 0x035, 0x000201D7, + 0x035, 0x000281D7, + 0x035, 0x000301D7, + 0x035, 0x000401D8, + 0x035, 0x000481D8, + 0x035, 0x000501D8, + 0xFF0F0740, 0xDEAD, + 0x0EF, 0x00000000, + 0xFF0F0740, 0xABCD, + 0x018, 0x0001712A, + 0x0EF, 0x00000010, + 0x036, 0x00004BFB, + 0x036, 0x0000CBFB, + 0x036, 0x00014BFB, + 0x036, 0x0001CBFB, + 0x036, 0x00024F4B, + 0x036, 0x0002CF4B, + 0x036, 0x00034F4B, + 0x036, 0x0003CF4B, + 0x036, 0x00044F4B, + 0x036, 0x0004CF4B, + 0x036, 0x00054F4B, + 0x036, 0x0005CF4B, + 0xFF0F02C0, 0xCDEF, + 0x018, 0x0001712A, + 0x0EF, 0x00000010, + 0x036, 0x00004BFB, + 0x036, 0x0000CBFB, + 0x036, 0x00014BFB, + 0x036, 0x0001CBFB, + 0x036, 0x00024F4B, + 0x036, 0x0002CF4B, + 0x036, 0x00034F4B, + 0x036, 0x0003CF4B, + 0x036, 0x00044F4B, + 0x036, 0x0004CF4B, + 0x036, 0x00054F4B, + 0x036, 0x0005CF4B, + 0xFF0F01C0, 0xCDEF, + 0x018, 0x0001712A, + 0x0EF, 0x00000010, + 0x036, 0x00004BFB, + 0x036, 0x0000CBFB, + 0x036, 0x00014BFB, + 0x036, 0x0001CBFB, + 0x036, 0x00024F4B, + 0x036, 0x0002CF4B, + 0x036, 0x00034F4B, + 0x036, 0x0003CF4B, + 0x036, 0x00044F4B, + 0x036, 0x0004CF4B, + 0x036, 0x00054F4B, + 0x036, 0x0005CF4B, + 0xFF0F07D8, 0xCDEF, + 0x018, 0x0001712A, + 0x0EF, 0x00000010, + 0x036, 0x00004BFB, + 0x036, 0x0000CBFB, + 0x036, 0x00014BFB, + 0x036, 0x0001CBFB, + 0x036, 0x00024F4B, + 0x036, 0x0002CF4B, + 0x036, 0x00034F4B, + 0x036, 0x0003CF4B, + 0x036, 0x00044F4B, + 0x036, 0x0004CF4B, + 0x036, 0x00054F4B, + 0x036, 0x0005CF4B, + 0xFF0F07D0, 0xCDEF, + 0x018, 0x0001712A, + 0x0EF, 0x00000010, + 0x036, 0x00004BFB, + 0x036, 0x0000CBFB, + 0x036, 0x00014BFB, + 0x036, 0x0001CBFB, + 0x036, 0x00024F4B, + 0x036, 0x0002CF4B, + 0x036, 0x00034F4B, + 0x036, 0x0003CF4B, + 0x036, 0x00044F4B, + 0x036, 0x0004CF4B, + 0x036, 0x00054F4B, + 0x036, 0x0005CF4B, + 0xCDCDCDCD, 0xCDCD, + 0x018, 0x0001712A, + 0x0EF, 0x00000010, + 0x036, 0x00084EB4, + 0x036, 0x0008CC35, + 0x036, 0x00094C35, + 0x036, 0x0009CC35, + 0x036, 0x000A4935, + 0x036, 0x000ACC35, + 0x036, 0x000B4C35, + 0x036, 0x000BCC35, + 0x036, 0x000C4EB4, + 0x036, 0x000CCEB5, + 0x036, 0x000D4EB5, + 0x036, 0x000DCEB5, + 0xFF0F0740, 0xDEAD, + 0x0EF, 0x00000000, + 0x0EF, 0x00000008, + 0xFF0F0740, 0xABCD, + 0x03C, 0x000002CC, + 0x03C, 0x00000522, + 0x03C, 0x00000902, + 0xFF0F02C0, 0xCDEF, + 0x03C, 0x000002CC, + 0x03C, 0x00000522, + 0x03C, 0x00000902, + 0xFF0F01C0, 0xCDEF, + 0x03C, 0x000002CC, + 0x03C, 0x00000522, + 0x03C, 0x00000902, + 0xFF0F07D8, 0xCDEF, + 0x03C, 0x000002CC, + 0x03C, 0x00000522, + 0x03C, 0x00000902, + 0xFF0F07D0, 0xCDEF, + 0x03C, 0x000002CC, + 0x03C, 0x00000522, + 0x03C, 0x00000902, + 0xCDCDCDCD, 0xCDCD, + 0x03C, 0x000002A8, + 0x03C, 0x000005A2, + 0x03C, 0x00000880, + 0xFF0F0740, 0xDEAD, + 0x0EF, 0x00000000, + 0x018, 0x0001712A, + 0x0EF, 0x00000002, + 0x0DF, 0x00000080, + 0x01F, 0x00040064, + 0xFF0F0740, 0xABCD, + 0x061, 0x000FDD43, + 0x062, 0x00038F4B, + 0x063, 0x00032117, + 0x064, 0x000194AC, + 0x065, 0x000931D1, + 0xFF0F02C0, 0xCDEF, + 0x061, 0x000FDD43, + 0x062, 0x00038F4B, + 0x063, 0x00032117, + 0x064, 0x000194AC, + 0x065, 0x000931D1, + 0xFF0F01C0, 0xCDEF, + 0x061, 0x000FDD43, + 0x062, 0x00038F4B, + 0x063, 0x00032117, + 0x064, 0x000194AC, + 0x065, 0x000931D1, + 0xFF0F07D8, 0xCDEF, + 0x061, 0x000FDD43, + 0x062, 0x00038F4B, + 0x063, 0x00032117, + 0x064, 0x000194AC, + 0x065, 0x000931D1, + 0xFF0F07D0, 0xCDEF, + 0x061, 0x000FDD43, + 0x062, 0x00038F4B, + 0x063, 0x00032117, + 0x064, 0x000194AC, + 0x065, 0x000931D1, + 0xCDCDCDCD, 0xCDCD, + 0x061, 0x000E5D53, + 0x062, 0x00038FCD, + 0x063, 0x000314EB, + 0x064, 0x000196AC, + 0x065, 0x000911D7, + 0xFF0F0740, 0xDEAD, + 0x008, 0x00008400, + 0x01C, 0x000739D2, + 0x0B4, 0x0001E78D, + 0x018, 0x0001F12A, + 0x0FE, 0x00000000, + 0x0FE, 0x00000000, + 0x0FE, 0x00000000, + 0x0FE, 0x00000000, + 0x0B4, 0x0001A78D, + 0x018, 0x0001712A, + +}; + +u32 RTL8812AE_RADIOB_ARRAY[] = { + 0x056, 0x00051CF2, + 0x066, 0x00040000, + 0x089, 0x00000080, + 0xFF0F0740, 0xABCD, + 0x086, 0x00014B38, + 0xFF0F01C0, 0xCDEF, + 0x086, 0x00014B38, + 0xFF0F02C0, 0xCDEF, + 0x086, 0x00014B38, + 0xFF0F07D8, 0xCDEF, + 0x086, 0x00014B3A, + 0xFF0F07D0, 0xCDEF, + 0x086, 0x00014B3A, + 0xCDCDCDCD, 0xCDCD, + 0x086, 0x00014B38, + 0xFF0F0740, 0xDEAD, + 0x018, 0x00000006, + 0x0EF, 0x00002000, + 0xFF0F07D8, 0xABCD, + 0x03B, 0x0003F218, + 0x03B, 0x00030A58, + 0x03B, 0x0002FA58, + 0x03B, 0x00022590, + 0x03B, 0x0001FA50, + 0x03B, 0x00010248, + 0x03B, 0x00008240, + 0xFF0F07D0, 0xCDEF, + 0x03B, 0x0003F218, + 0x03B, 0x00030A58, + 0x03B, 0x0002FA58, + 0x03B, 0x00022590, + 0x03B, 0x0001FA50, + 0x03B, 0x00010248, + 0x03B, 0x00008240, + 0xCDCDCDCD, 0xCDCD, + 0x03B, 0x00038A58, + 0x03B, 0x00037A58, + 0x03B, 0x0002A590, + 0x03B, 0x00027A50, + 0x03B, 0x00018248, + 0x03B, 0x00010240, + 0x03B, 0x00008240, + 0xFF0F07D8, 0xDEAD, + 0x0EF, 0x00000100, + 0xFF0F07D8, 0xABCD, + 0x034, 0x0000A4EE, + 0x034, 0x00009076, + 0x034, 0x00008073, + 0x034, 0x00007070, + 0x034, 0x0000606D, + 0x034, 0x0000506A, + 0x034, 0x00004049, + 0x034, 0x00003046, + 0x034, 0x00002028, + 0x034, 0x00001025, + 0x034, 0x00000022, + 0xCDCDCDCD, 0xCDCD, + 0x034, 0x0000ADF4, + 0x034, 0x00009DF1, + 0x034, 0x00008DEE, + 0x034, 0x00007DEB, + 0x034, 0x00006DE8, + 0x034, 0x00005CEC, + 0x034, 0x00004CE9, + 0x034, 0x000034EA, + 0x034, 0x000024E7, + 0x034, 0x0000146B, + 0x034, 0x0000006D, + 0xFF0F07D8, 0xDEAD, + 0x0EF, 0x00000000, + 0x0EF, 0x000020A2, + 0x0DF, 0x00000080, + 0x035, 0x00000192, + 0x035, 0x00008192, + 0x035, 0x00010192, + 0x036, 0x00000024, + 0x036, 0x00008024, + 0x036, 0x00010024, + 0x036, 0x00018024, + 0x0EF, 0x00000000, + 0x051, 0x00000C21, + 0x052, 0x000006D9, + 0x053, 0x000FC649, + 0x054, 0x0000017E, + 0x0EF, 0x00000002, + 0x008, 0x00008400, + 0x018, 0x0001712A, + 0x0EF, 0x00001000, + 0x03A, 0x00000080, + 0x03B, 0x0003A02C, + 0x03C, 0x00004000, + 0x03A, 0x00000400, + 0x03B, 0x0003202C, + 0x03C, 0x00010000, + 0x03A, 0x000000A0, + 0x03B, 0x0002B064, + 0x03C, 0x00004000, + 0x03A, 0x000000D8, + 0x03B, 0x00023070, + 0x03C, 0x00004000, + 0x03A, 0x00000468, + 0x03B, 0x0001B870, + 0x03C, 0x00010000, + 0x03A, 0x00000098, + 0x03B, 0x00012085, + 0x03C, 0x000E4000, + 0x03A, 0x00000418, + 0x03B, 0x0000A080, + 0x03C, 0x000F0000, + 0x03A, 0x00000418, + 0x03B, 0x00002080, + 0x03C, 0x00010000, + 0x03A, 0x00000080, + 0x03B, 0x0007A02C, + 0x03C, 0x00004000, + 0x03A, 0x00000400, + 0x03B, 0x0007202C, + 0x03C, 0x00010000, + 0x03A, 0x000000A0, + 0x03B, 0x0006B064, + 0x03C, 0x00004000, + 0x03A, 0x000000D8, + 0x03B, 0x00063070, + 0x03C, 0x00004000, + 0x03A, 0x00000468, + 0x03B, 0x0005B870, + 0x03C, 0x00010000, + 0x03A, 0x00000098, + 0x03B, 0x00052085, + 0x03C, 0x000E4000, + 0x03A, 0x00000418, + 0x03B, 0x0004A080, + 0x03C, 0x000F0000, + 0x03A, 0x00000418, + 0x03B, 0x00042080, + 0x03C, 0x00010000, + 0x03A, 0x00000080, + 0x03B, 0x000BA02C, + 0x03C, 0x00004000, + 0x03A, 0x00000400, + 0x03B, 0x000B202C, + 0x03C, 0x00010000, + 0x03A, 0x000000A0, + 0x03B, 0x000AB064, + 0x03C, 0x00004000, + 0x03A, 0x000000D8, + 0x03B, 0x000A3070, + 0x03C, 0x00004000, + 0x03A, 0x00000468, + 0x03B, 0x0009B870, + 0x03C, 0x00010000, + 0x03A, 0x00000098, + 0x03B, 0x00092085, + 0x03C, 0x000E4000, + 0x03A, 0x00000418, + 0x03B, 0x0008A080, + 0x03C, 0x000F0000, + 0x03A, 0x00000418, + 0x03B, 0x00082080, + 0x03C, 0x00010000, + 0x0EF, 0x00001100, + 0xFF0F0740, 0xABCD, + 0x034, 0x0004A0B2, + 0x034, 0x000490AF, + 0x034, 0x00048070, + 0x034, 0x0004706D, + 0x034, 0x00046050, + 0x034, 0x0004504D, + 0x034, 0x0004404A, + 0x034, 0x00043047, + 0x034, 0x0004200A, + 0x034, 0x00041007, + 0x034, 0x00040004, + 0xFF0F01C0, 0xCDEF, + 0x034, 0x0004A0B2, + 0x034, 0x000490AF, + 0x034, 0x00048070, + 0x034, 0x0004706D, + 0x034, 0x00046050, + 0x034, 0x0004504D, + 0x034, 0x0004404A, + 0x034, 0x00043047, + 0x034, 0x0004200A, + 0x034, 0x00041007, + 0x034, 0x00040004, + 0xFF0F02C0, 0xCDEF, + 0x034, 0x0004A0B2, + 0x034, 0x000490AF, + 0x034, 0x00048070, + 0x034, 0x0004706D, + 0x034, 0x00046050, + 0x034, 0x0004504D, + 0x034, 0x0004404A, + 0x034, 0x00043047, + 0x034, 0x0004200A, + 0x034, 0x00041007, + 0x034, 0x00040004, + 0xFF0F07D8, 0xCDEF, + 0x034, 0x0004A0B2, + 0x034, 0x000490AF, + 0x034, 0x00048070, + 0x034, 0x0004706D, + 0x034, 0x00046050, + 0x034, 0x0004504D, + 0x034, 0x0004404A, + 0x034, 0x00043047, + 0x034, 0x0004200A, + 0x034, 0x00041007, + 0x034, 0x00040004, + 0xFF0F07D0, 0xCDEF, + 0x034, 0x0004A0B2, + 0x034, 0x000490AF, + 0x034, 0x00048070, + 0x034, 0x0004706D, + 0x034, 0x00046050, + 0x034, 0x0004504D, + 0x034, 0x0004404A, + 0x034, 0x00043047, + 0x034, 0x0004200A, + 0x034, 0x00041007, + 0x034, 0x00040004, + 0xCDCDCDCD, 0xCDCD, + 0x034, 0x0004ADF5, + 0x034, 0x00049DF2, + 0x034, 0x00048DEF, + 0x034, 0x00047DEC, + 0x034, 0x00046DE9, + 0x034, 0x00045DC9, + 0x034, 0x00044CE8, + 0x034, 0x000438CA, + 0x034, 0x00042889, + 0x034, 0x0004184A, + 0x034, 0x0004044A, + 0xFF0F0740, 0xDEAD, + 0xFF0F0740, 0xABCD, + 0x034, 0x0002A0B2, + 0x034, 0x000290AF, + 0x034, 0x00028070, + 0x034, 0x0002706D, + 0x034, 0x00026050, + 0x034, 0x0002504D, + 0x034, 0x0002404A, + 0x034, 0x00023047, + 0x034, 0x0002200A, + 0x034, 0x00021007, + 0x034, 0x00020004, + 0xFF0F01C0, 0xCDEF, + 0x034, 0x0002A0B2, + 0x034, 0x000290AF, + 0x034, 0x00028070, + 0x034, 0x0002706D, + 0x034, 0x00026050, + 0x034, 0x0002504D, + 0x034, 0x0002404A, + 0x034, 0x00023047, + 0x034, 0x0002200A, + 0x034, 0x00021007, + 0x034, 0x00020004, + 0xFF0F02C0, 0xCDEF, + 0x034, 0x0002A0B2, + 0x034, 0x000290AF, + 0x034, 0x00028070, + 0x034, 0x0002706D, + 0x034, 0x00026050, + 0x034, 0x0002504D, + 0x034, 0x0002404A, + 0x034, 0x00023047, + 0x034, 0x0002200A, + 0x034, 0x00021007, + 0x034, 0x00020004, + 0xFF0F07D8, 0xCDEF, + 0x034, 0x0002A0B2, + 0x034, 0x000290AF, + 0x034, 0x00028070, + 0x034, 0x0002706D, + 0x034, 0x00026050, + 0x034, 0x0002504D, + 0x034, 0x0002404A, + 0x034, 0x00023047, + 0x034, 0x0002200A, + 0x034, 0x00021007, + 0x034, 0x00020004, + 0xFF0F07D0, 0xCDEF, + 0x034, 0x0002A0B2, + 0x034, 0x000290AF, + 0x034, 0x00028070, + 0x034, 0x0002706D, + 0x034, 0x00026050, + 0x034, 0x0002504D, + 0x034, 0x0002404A, + 0x034, 0x00023047, + 0x034, 0x0002200A, + 0x034, 0x00021007, + 0x034, 0x00020004, + 0xCDCDCDCD, 0xCDCD, + 0x034, 0x0002ADF5, + 0x034, 0x00029DF2, + 0x034, 0x00028DEF, + 0x034, 0x00027DEC, + 0x034, 0x00026DE9, + 0x034, 0x00025DC9, + 0x034, 0x00024CE8, + 0x034, 0x000238CA, + 0x034, 0x00022889, + 0x034, 0x0002184A, + 0x034, 0x0002044A, + 0xFF0F0740, 0xDEAD, + 0xFF0F0740, 0xABCD, + 0x034, 0x0000A0B2, + 0x034, 0x000090AF, + 0x034, 0x00008070, + 0x034, 0x0000706D, + 0x034, 0x00006050, + 0x034, 0x0000504D, + 0x034, 0x0000404A, + 0x034, 0x00003047, + 0x034, 0x0000200A, + 0x034, 0x00001007, + 0x034, 0x00000004, + 0xFF0F01C0, 0xCDEF, + 0x034, 0x0000A0B2, + 0x034, 0x000090AF, + 0x034, 0x00008070, + 0x034, 0x0000706D, + 0x034, 0x00006050, + 0x034, 0x0000504D, + 0x034, 0x0000404A, + 0x034, 0x00003047, + 0x034, 0x0000200A, + 0x034, 0x00001007, + 0x034, 0x00000004, + 0xFF0F02C0, 0xCDEF, + 0x034, 0x0000A0B2, + 0x034, 0x000090AF, + 0x034, 0x00008070, + 0x034, 0x0000706D, + 0x034, 0x00006050, + 0x034, 0x0000504D, + 0x034, 0x0000404A, + 0x034, 0x00003047, + 0x034, 0x0000200A, + 0x034, 0x00001007, + 0x034, 0x00000004, + 0xFF0F07D8, 0xCDEF, + 0x034, 0x0000A0B2, + 0x034, 0x000090AF, + 0x034, 0x00008070, + 0x034, 0x0000706D, + 0x034, 0x00006050, + 0x034, 0x0000504D, + 0x034, 0x0000404A, + 0x034, 0x00003047, + 0x034, 0x0000200A, + 0x034, 0x00001007, + 0x034, 0x00000004, + 0xFF0F07D0, 0xCDEF, + 0x034, 0x0000A0B2, + 0x034, 0x000090AF, + 0x034, 0x00008070, + 0x034, 0x0000706D, + 0x034, 0x00006050, + 0x034, 0x0000504D, + 0x034, 0x0000404A, + 0x034, 0x00003047, + 0x034, 0x0000200A, + 0x034, 0x00001007, + 0x034, 0x00000004, + 0xCDCDCDCD, 0xCDCD, + 0x034, 0x0000AFF7, + 0x034, 0x00009DF7, + 0x034, 0x00008DF4, + 0x034, 0x00007DF1, + 0x034, 0x00006DEE, + 0x034, 0x00005DCD, + 0x034, 0x00004CEB, + 0x034, 0x000038CC, + 0x034, 0x0000288B, + 0x034, 0x0000184C, + 0x034, 0x0000044C, + 0xFF0F0740, 0xDEAD, + 0x0EF, 0x00000000, + 0xFF0F0740, 0xABCD, + 0x018, 0x0001712A, + 0x0EF, 0x00000040, + 0x035, 0x000001C5, + 0x035, 0x000081C5, + 0x035, 0x000101C5, + 0x035, 0x00020174, + 0x035, 0x00028174, + 0x035, 0x00030174, + 0x035, 0x00040185, + 0x035, 0x00048185, + 0x035, 0x00050185, + 0x0EF, 0x00000000, + 0xFF0F01C0, 0xCDEF, + 0x018, 0x0001712A, + 0x0EF, 0x00000040, + 0x035, 0x000001C5, + 0x035, 0x000081C5, + 0x035, 0x000101C5, + 0x035, 0x00020174, + 0x035, 0x00028174, + 0x035, 0x00030174, + 0x035, 0x00040185, + 0x035, 0x00048185, + 0x035, 0x00050185, + 0x0EF, 0x00000000, + 0xFF0F02C0, 0xCDEF, + 0x018, 0x0001712A, + 0x0EF, 0x00000040, + 0x035, 0x000001C5, + 0x035, 0x000081C5, + 0x035, 0x000101C5, + 0x035, 0x00020174, + 0x035, 0x00028174, + 0x035, 0x00030174, + 0x035, 0x00040185, + 0x035, 0x00048185, + 0x035, 0x00050185, + 0x0EF, 0x00000000, + 0xFF0F07D8, 0xCDEF, + 0x018, 0x0001712A, + 0x0EF, 0x00000040, + 0x035, 0x000001C5, + 0x035, 0x000081C5, + 0x035, 0x000101C5, + 0x035, 0x00020174, + 0x035, 0x00028174, + 0x035, 0x00030174, + 0x035, 0x00040185, + 0x035, 0x00048185, + 0x035, 0x00050185, + 0x0EF, 0x00000000, + 0xFF0F07D0, 0xCDEF, + 0x018, 0x0001712A, + 0x0EF, 0x00000040, + 0x035, 0x000001C5, + 0x035, 0x000081C5, + 0x035, 0x000101C5, + 0x035, 0x00020174, + 0x035, 0x00028174, + 0x035, 0x00030174, + 0x035, 0x00040185, + 0x035, 0x00048185, + 0x035, 0x00050185, + 0x0EF, 0x00000000, + 0xCDCDCDCD, 0xCDCD, + 0x018, 0x0001712A, + 0x0EF, 0x00000040, + 0x035, 0x00000186, + 0x035, 0x00008186, + 0x035, 0x00010185, + 0x035, 0x000201D5, + 0x035, 0x000281D5, + 0x035, 0x000301D5, + 0x035, 0x000401D5, + 0x035, 0x000481D5, + 0x035, 0x000501D5, + 0x0EF, 0x00000000, + 0xFF0F0740, 0xDEAD, + 0xFF0F0740, 0xABCD, + 0x018, 0x0001712A, + 0x0EF, 0x00000010, + 0x036, 0x00005B8B, + 0x036, 0x0000DB8B, + 0x036, 0x00015B8B, + 0x036, 0x0001DB8B, + 0x036, 0x000262DB, + 0x036, 0x0002E2DB, + 0x036, 0x000362DB, + 0x036, 0x0003E2DB, + 0x036, 0x0004553B, + 0x036, 0x0004D53B, + 0x036, 0x0005553B, + 0x036, 0x0005D53B, + 0xFF0F01C0, 0xCDEF, + 0x018, 0x0001712A, + 0x0EF, 0x00000010, + 0x036, 0x00005B8B, + 0x036, 0x0000DB8B, + 0x036, 0x00015B8B, + 0x036, 0x0001DB8B, + 0x036, 0x000262DB, + 0x036, 0x0002E2DB, + 0x036, 0x000362DB, + 0x036, 0x0003E2DB, + 0x036, 0x0004553B, + 0x036, 0x0004D53B, + 0x036, 0x0005553B, + 0x036, 0x0005D53B, + 0xFF0F02C0, 0xCDEF, + 0x018, 0x0001712A, + 0x0EF, 0x00000010, + 0x036, 0x00005B8B, + 0x036, 0x0000DB8B, + 0x036, 0x00015B8B, + 0x036, 0x0001DB8B, + 0x036, 0x000262DB, + 0x036, 0x0002E2DB, + 0x036, 0x000362DB, + 0x036, 0x0003E2DB, + 0x036, 0x0004553B, + 0x036, 0x0004D53B, + 0x036, 0x0005553B, + 0x036, 0x0005D53B, + 0xFF0F07D8, 0xCDEF, + 0x018, 0x0001712A, + 0x0EF, 0x00000010, + 0x036, 0x00005B8B, + 0x036, 0x0000DB8B, + 0x036, 0x00015B8B, + 0x036, 0x0001DB8B, + 0x036, 0x000262DB, + 0x036, 0x0002E2DB, + 0x036, 0x000362DB, + 0x036, 0x0003E2DB, + 0x036, 0x0004553B, + 0x036, 0x0004D53B, + 0x036, 0x0005553B, + 0x036, 0x0005D53B, + 0xFF0F07D0, 0xCDEF, + 0x018, 0x0001712A, + 0x0EF, 0x00000010, + 0x036, 0x00005B8B, + 0x036, 0x0000DB8B, + 0x036, 0x00015B8B, + 0x036, 0x0001DB8B, + 0x036, 0x000262DB, + 0x036, 0x0002E2DB, + 0x036, 0x000362DB, + 0x036, 0x0003E2DB, + 0x036, 0x0004553B, + 0x036, 0x0004D53B, + 0x036, 0x0005553B, + 0x036, 0x0005D53B, + 0xCDCDCDCD, 0xCDCD, + 0x018, 0x0001712A, + 0x0EF, 0x00000010, + 0x036, 0x00084EB4, + 0x036, 0x0008C9B4, + 0x036, 0x000949B4, + 0x036, 0x0009C9B4, + 0x036, 0x000A4935, + 0x036, 0x000AC935, + 0x036, 0x000B4935, + 0x036, 0x000BC935, + 0x036, 0x000C4EB4, + 0x036, 0x000CCEB4, + 0x036, 0x000D4EB4, + 0x036, 0x000DCEB4, + 0xFF0F0740, 0xDEAD, + 0x0EF, 0x00000000, + 0x0EF, 0x00000008, + 0xFF0F0740, 0xABCD, + 0x03C, 0x000002DC, + 0x03C, 0x00000524, + 0x03C, 0x00000902, + 0xFF0F01C0, 0xCDEF, + 0x03C, 0x000002DC, + 0x03C, 0x00000524, + 0x03C, 0x00000902, + 0xFF0F02C0, 0xCDEF, + 0x03C, 0x000002DC, + 0x03C, 0x00000524, + 0x03C, 0x00000902, + 0xFF0F07D8, 0xCDEF, + 0x03C, 0x000002DC, + 0x03C, 0x00000524, + 0x03C, 0x00000902, + 0xFF0F07D0, 0xCDEF, + 0x03C, 0x000002DC, + 0x03C, 0x00000524, + 0x03C, 0x00000902, + 0xCDCDCDCD, 0xCDCD, + 0x03C, 0x000002AA, + 0x03C, 0x000005A2, + 0x03C, 0x00000880, + 0xFF0F0740, 0xDEAD, + 0x0EF, 0x00000000, + 0x018, 0x0001712A, + 0x0EF, 0x00000002, + 0x0DF, 0x00000080, + 0xFF0F0740, 0xABCD, + 0x061, 0x000EAC43, + 0x062, 0x00038F47, + 0x063, 0x00031157, + 0x064, 0x0001C4AC, + 0x065, 0x000931D1, + 0xFF0F01C0, 0xCDEF, + 0x061, 0x000EAC43, + 0x062, 0x00038F47, + 0x063, 0x00031157, + 0x064, 0x0001C4AC, + 0x065, 0x000931D1, + 0xFF0F02C0, 0xCDEF, + 0x061, 0x000EAC43, + 0x062, 0x00038F47, + 0x063, 0x00031157, + 0x064, 0x0001C4AC, + 0x065, 0x000931D1, + 0xFF0F07D8, 0xCDEF, + 0x061, 0x000EAC43, + 0x062, 0x00038F47, + 0x063, 0x00031157, + 0x064, 0x0001C4AC, + 0x065, 0x000931D1, + 0xFF0F07D0, 0xCDEF, + 0x061, 0x000EAC43, + 0x062, 0x00038F47, + 0x063, 0x00031157, + 0x064, 0x0001C4AC, + 0x065, 0x000931D1, + 0xCDCDCDCD, 0xCDCD, + 0x061, 0x000E5D53, + 0x062, 0x00038FCD, + 0x063, 0x000314EB, + 0x064, 0x000196AC, + 0x065, 0x000931D7, + 0xFF0F0740, 0xDEAD, + 0x008, 0x00008400, + +}; + +u32 RTL8821AE_RADIOA_ARRAY[] = { + 0x018, 0x0001712A, + 0x056, 0x00051CF2, + 0x066, 0x00040000, + 0x000, 0x00010000, + 0x01E, 0x00080000, + 0x082, 0x00000830, + 0x083, 0x00021800, + 0x084, 0x00028000, + 0x085, 0x00048000, + 0x086, 0x00094838, + 0x087, 0x00044980, + 0x088, 0x00048000, + 0x089, 0x0000D480, + 0x08A, 0x00042240, + 0x08B, 0x000F0380, + 0x08C, 0x00090000, + 0x08D, 0x00022852, + 0x08E, 0x00065540, + 0x08F, 0x00088001, + 0x0EF, 0x00020000, + 0x03E, 0x00000380, + 0x03F, 0x00090018, + 0x03E, 0x00020380, + 0x03F, 0x000A0018, + 0x03E, 0x00040308, + 0x03F, 0x000A0018, + 0x03E, 0x00060018, + 0x03F, 0x000A0018, + 0x0EF, 0x00000000, + 0x018, 0x0001712A, + 0x089, 0x00000080, + 0x08B, 0x00080180, + 0x0EF, 0x00001000, + 0x03A, 0x00000244, + 0x03B, 0x00038027, + 0x03C, 0x00082000, + 0x03A, 0x00000244, + 0x03B, 0x00030113, + 0x03C, 0x00082000, + 0x03A, 0x0000014C, + 0x03B, 0x00028027, + 0x03C, 0x00082000, + 0x03A, 0x000000CC, + 0x03B, 0x00027027, + 0x03C, 0x00042000, + 0x03A, 0x0000014C, + 0x03B, 0x0001F913, + 0x03C, 0x00042000, + 0x03A, 0x0000010C, + 0x03B, 0x00017F10, + 0x03C, 0x00012000, + 0x03A, 0x000000D0, + 0x03B, 0x00008027, + 0x03C, 0x000CA000, + 0x03A, 0x00000244, + 0x03B, 0x00078027, + 0x03C, 0x00082000, + 0x03A, 0x00000244, + 0x03B, 0x00070113, + 0x03C, 0x00082000, + 0x03A, 0x0000014C, + 0x03B, 0x00068027, + 0x03C, 0x00082000, + 0x03A, 0x000000CC, + 0x03B, 0x00067027, + 0x03C, 0x00042000, + 0x03A, 0x0000014C, + 0x03B, 0x0005F913, + 0x03C, 0x00042000, + 0x03A, 0x0000010C, + 0x03B, 0x00057F10, + 0x03C, 0x00012000, + 0x03A, 0x000000D0, + 0x03B, 0x00048027, + 0x03C, 0x000CA000, + 0x03A, 0x00000244, + 0x03B, 0x000B8027, + 0x03C, 0x00082000, + 0x03A, 0x00000244, + 0x03B, 0x000B0113, + 0x03C, 0x00082000, + 0x03A, 0x0000014C, + 0x03B, 0x000A8027, + 0x03C, 0x00082000, + 0x03A, 0x000000CC, + 0x03B, 0x000A7027, + 0x03C, 0x00042000, + 0x03A, 0x0000014C, + 0x03B, 0x0009F913, + 0x03C, 0x00042000, + 0x03A, 0x0000010C, + 0x03B, 0x00097F10, + 0x03C, 0x00012000, + 0x03A, 0x000000D0, + 0x03B, 0x00088027, + 0x03C, 0x000CA000, + 0x0EF, 0x00000000, + 0x0EF, 0x00001100, + 0xFF0F0104, 0xABCD, + 0x034, 0x0004ADF3, + 0x034, 0x00049DF0, + 0xFF0F0204, 0xCDEF, + 0x034, 0x0004ADF3, + 0x034, 0x00049DF0, + 0xFF0F0404, 0xCDEF, + 0x034, 0x0004ADF3, + 0x034, 0x00049DF0, + 0xFF0F0200, 0xCDEF, + 0x034, 0x0004ADF5, + 0x034, 0x00049DF2, + 0xFF0F02C0, 0xCDEF, + 0x034, 0x0004A0F3, + 0x034, 0x000490B1, + 0xCDCDCDCD, 0xCDCD, + 0x034, 0x0004ADF7, + 0x034, 0x00049DF3, + 0xFF0F0104, 0xDEAD, + 0xFF0F0104, 0xABCD, + 0x034, 0x00048DED, + 0x034, 0x00047DEA, + 0x034, 0x00046DE7, + 0x034, 0x00045CE9, + 0x034, 0x00044CE6, + 0x034, 0x000438C6, + 0x034, 0x00042886, + 0x034, 0x00041486, + 0x034, 0x00040447, + 0xFF0F0204, 0xCDEF, + 0x034, 0x00048DED, + 0x034, 0x00047DEA, + 0x034, 0x00046DE7, + 0x034, 0x00045CE9, + 0x034, 0x00044CE6, + 0x034, 0x000438C6, + 0x034, 0x00042886, + 0x034, 0x00041486, + 0x034, 0x00040447, + 0xFF0F0404, 0xCDEF, + 0x034, 0x00048DED, + 0x034, 0x00047DEA, + 0x034, 0x00046DE7, + 0x034, 0x00045CE9, + 0x034, 0x00044CE6, + 0x034, 0x000438C6, + 0x034, 0x00042886, + 0x034, 0x00041486, + 0x034, 0x00040447, + 0xFF0F02C0, 0xCDEF, + 0x034, 0x000480AE, + 0x034, 0x000470AB, + 0x034, 0x0004608B, + 0x034, 0x00045069, + 0x034, 0x00044048, + 0x034, 0x00043045, + 0x034, 0x00042026, + 0x034, 0x00041023, + 0x034, 0x00040002, + 0xCDCDCDCD, 0xCDCD, + 0x034, 0x00048DEF, + 0x034, 0x00047DEC, + 0x034, 0x00046DE9, + 0x034, 0x00045CCB, + 0x034, 0x0004488D, + 0x034, 0x0004348D, + 0x034, 0x0004248A, + 0x034, 0x0004108D, + 0x034, 0x0004008A, + 0xFF0F0104, 0xDEAD, + 0xFF0F0200, 0xABCD, + 0x034, 0x0002ADF4, + 0xFF0F02C0, 0xCDEF, + 0x034, 0x0002A0F3, + 0xCDCDCDCD, 0xCDCD, + 0x034, 0x0002ADF7, + 0xFF0F0200, 0xDEAD, + 0xFF0F0104, 0xABCD, + 0x034, 0x00029DF4, + 0xFF0F0204, 0xCDEF, + 0x034, 0x00029DF4, + 0xFF0F0404, 0xCDEF, + 0x034, 0x00029DF4, + 0xFF0F0200, 0xCDEF, + 0x034, 0x00029DF1, + 0xFF0F02C0, 0xCDEF, + 0x034, 0x000290F0, + 0xCDCDCDCD, 0xCDCD, + 0x034, 0x00029DF2, + 0xFF0F0104, 0xDEAD, + 0xFF0F0104, 0xABCD, + 0x034, 0x00028DF1, + 0x034, 0x00027DEE, + 0x034, 0x00026DEB, + 0x034, 0x00025CEC, + 0x034, 0x00024CE9, + 0x034, 0x000238CA, + 0x034, 0x00022889, + 0x034, 0x00021489, + 0x034, 0x0002044A, + 0xFF0F0204, 0xCDEF, + 0x034, 0x00028DF1, + 0x034, 0x00027DEE, + 0x034, 0x00026DEB, + 0x034, 0x00025CEC, + 0x034, 0x00024CE9, + 0x034, 0x000238CA, + 0x034, 0x00022889, + 0x034, 0x00021489, + 0x034, 0x0002044A, + 0xFF0F0404, 0xCDEF, + 0x034, 0x00028DF1, + 0x034, 0x00027DEE, + 0x034, 0x00026DEB, + 0x034, 0x00025CEC, + 0x034, 0x00024CE9, + 0x034, 0x000238CA, + 0x034, 0x00022889, + 0x034, 0x00021489, + 0x034, 0x0002044A, + 0xFF0F02C0, 0xCDEF, + 0x034, 0x000280AF, + 0x034, 0x000270AC, + 0x034, 0x0002608B, + 0x034, 0x00025069, + 0x034, 0x00024048, + 0x034, 0x00023045, + 0x034, 0x00022026, + 0x034, 0x00021023, + 0x034, 0x00020002, + 0xCDCDCDCD, 0xCDCD, + 0x034, 0x00028DEE, + 0x034, 0x00027DEB, + 0x034, 0x00026CCD, + 0x034, 0x00025CCA, + 0x034, 0x0002488C, + 0x034, 0x0002384C, + 0x034, 0x00022849, + 0x034, 0x00021449, + 0x034, 0x0002004D, + 0xFF0F0104, 0xDEAD, + 0xFF0F02C0, 0xABCD, + 0x034, 0x0000A0D7, + 0x034, 0x000090D3, + 0x034, 0x000080B1, + 0x034, 0x000070AE, + 0xCDCDCDCD, 0xCDCD, + 0x034, 0x0000ADF7, + 0x034, 0x00009DF4, + 0x034, 0x00008DF1, + 0x034, 0x00007DEE, + 0xFF0F02C0, 0xDEAD, + 0xFF0F0104, 0xABCD, + 0x034, 0x00006DEB, + 0x034, 0x00005CEC, + 0x034, 0x00004CE9, + 0x034, 0x000038CA, + 0x034, 0x00002889, + 0x034, 0x00001489, + 0x034, 0x0000044A, + 0xFF0F0204, 0xCDEF, + 0x034, 0x00006DEB, + 0x034, 0x00005CEC, + 0x034, 0x00004CE9, + 0x034, 0x000038CA, + 0x034, 0x00002889, + 0x034, 0x00001489, + 0x034, 0x0000044A, + 0xFF0F0404, 0xCDEF, + 0x034, 0x00006DEB, + 0x034, 0x00005CEC, + 0x034, 0x00004CE9, + 0x034, 0x000038CA, + 0x034, 0x00002889, + 0x034, 0x00001489, + 0x034, 0x0000044A, + 0xFF0F02C0, 0xCDEF, + 0x034, 0x0000608D, + 0x034, 0x0000506B, + 0x034, 0x0000404A, + 0x034, 0x00003047, + 0x034, 0x00002044, + 0x034, 0x00001025, + 0x034, 0x00000004, + 0xCDCDCDCD, 0xCDCD, + 0x034, 0x00006DCD, + 0x034, 0x00005CCD, + 0x034, 0x00004CCA, + 0x034, 0x0000388C, + 0x034, 0x00002888, + 0x034, 0x00001488, + 0x034, 0x00000486, + 0xFF0F0104, 0xDEAD, + 0x0EF, 0x00000000, + 0x018, 0x0001712A, + 0x0EF, 0x00000040, + 0xFF0F0104, 0xABCD, + 0x035, 0x00000187, + 0x035, 0x00008187, + 0x035, 0x00010187, + 0x035, 0x00020188, + 0x035, 0x00028188, + 0x035, 0x00030188, + 0x035, 0x00040188, + 0x035, 0x00048188, + 0x035, 0x00050188, + 0xFF0F0204, 0xCDEF, + 0x035, 0x00000187, + 0x035, 0x00008187, + 0x035, 0x00010187, + 0x035, 0x00020188, + 0x035, 0x00028188, + 0x035, 0x00030188, + 0x035, 0x00040188, + 0x035, 0x00048188, + 0x035, 0x00050188, + 0xFF0F0404, 0xCDEF, + 0x035, 0x00000187, + 0x035, 0x00008187, + 0x035, 0x00010187, + 0x035, 0x00020188, + 0x035, 0x00028188, + 0x035, 0x00030188, + 0x035, 0x00040188, + 0x035, 0x00048188, + 0x035, 0x00050188, + 0xCDCDCDCD, 0xCDCD, + 0x035, 0x00000145, + 0x035, 0x00008145, + 0x035, 0x00010145, + 0x035, 0x00020196, + 0x035, 0x00028196, + 0x035, 0x00030196, + 0x035, 0x000401C7, + 0x035, 0x000481C7, + 0x035, 0x000501C7, + 0xFF0F0104, 0xDEAD, + 0x0EF, 0x00000000, + 0x018, 0x0001712A, + 0x0EF, 0x00000010, + 0xFF0F0104, 0xABCD, + 0x036, 0x00085733, + 0x036, 0x0008D733, + 0x036, 0x00095733, + 0x036, 0x0009D733, + 0x036, 0x000A64B4, + 0x036, 0x000AE4B4, + 0x036, 0x000B64B4, + 0x036, 0x000BE4B4, + 0x036, 0x000C64B4, + 0x036, 0x000CE4B4, + 0x036, 0x000D64B4, + 0x036, 0x000DE4B4, + 0xFF0F0204, 0xCDEF, + 0x036, 0x00085733, + 0x036, 0x0008D733, + 0x036, 0x00095733, + 0x036, 0x0009D733, + 0x036, 0x000A64B4, + 0x036, 0x000AE4B4, + 0x036, 0x000B64B4, + 0x036, 0x000BE4B4, + 0x036, 0x000C64B4, + 0x036, 0x000CE4B4, + 0x036, 0x000D64B4, + 0x036, 0x000DE4B4, + 0xFF0F0404, 0xCDEF, + 0x036, 0x00085733, + 0x036, 0x0008D733, + 0x036, 0x00095733, + 0x036, 0x0009D733, + 0x036, 0x000A64B4, + 0x036, 0x000AE4B4, + 0x036, 0x000B64B4, + 0x036, 0x000BE4B4, + 0x036, 0x000C64B4, + 0x036, 0x000CE4B4, + 0x036, 0x000D64B4, + 0x036, 0x000DE4B4, + 0xCDCDCDCD, 0xCDCD, + 0x036, 0x000056B3, + 0x036, 0x0000D6B3, + 0x036, 0x000156B3, + 0x036, 0x0001D6B3, + 0x036, 0x00026634, + 0x036, 0x0002E634, + 0x036, 0x00036634, + 0x036, 0x0003E634, + 0x036, 0x000467B4, + 0x036, 0x0004E7B4, + 0x036, 0x000567B4, + 0x036, 0x0005E7B4, + 0xFF0F0104, 0xDEAD, + 0x0EF, 0x00000000, + 0x0EF, 0x00000008, + 0xFF0F0104, 0xABCD, + 0x03C, 0x000001C8, + 0x03C, 0x00000492, + 0xFF0F0204, 0xCDEF, + 0x03C, 0x000001C8, + 0x03C, 0x00000492, + 0xFF0F0404, 0xCDEF, + 0x03C, 0x000001C8, + 0x03C, 0x00000492, + 0xCDCDCDCD, 0xCDCD, + 0x03C, 0x0000022A, + 0x03C, 0x00000594, + 0xFF0F0104, 0xDEAD, + 0xFF0F0104, 0xABCD, + 0x03C, 0x00000800, + 0xFF0F0204, 0xCDEF, + 0x03C, 0x00000800, + 0xFF0F0404, 0xCDEF, + 0x03C, 0x00000800, + 0xFF0F02C0, 0xCDEF, + 0x03C, 0x00000820, + 0xCDCDCDCD, 0xCDCD, + 0x03C, 0x00000900, + 0xFF0F0104, 0xDEAD, + 0x0EF, 0x00000000, + 0x018, 0x0001712A, + 0x0EF, 0x00000002, + 0xFF0F0104, 0xABCD, + 0x008, 0x0004E400, + 0xFF0F0204, 0xCDEF, + 0x008, 0x0004E400, + 0xFF0F0404, 0xCDEF, + 0x008, 0x0004E400, + 0xCDCDCDCD, 0xCDCD, + 0x008, 0x00002000, + 0xFF0F0104, 0xDEAD, + 0x0EF, 0x00000000, + 0x0DF, 0x000000C0, + 0x01F, 0x00040064, + 0xFF0F0104, 0xABCD, + 0x058, 0x000A7284, + 0x059, 0x000600EC, + 0xFF0F0204, 0xCDEF, + 0x058, 0x000A7284, + 0x059, 0x000600EC, + 0xFF0F0404, 0xCDEF, + 0x058, 0x000A7284, + 0x059, 0x000600EC, + 0xCDCDCDCD, 0xCDCD, + 0x058, 0x00081184, + 0x059, 0x0006016C, + 0xFF0F0104, 0xDEAD, + 0xFF0F0104, 0xABCD, + 0x061, 0x000E8D73, + 0x062, 0x00093FC5, + 0xFF0F0204, 0xCDEF, + 0x061, 0x000E8D73, + 0x062, 0x00093FC5, + 0xFF0F0404, 0xCDEF, + 0x061, 0x000E8D73, + 0x062, 0x00093FC5, + 0xCDCDCDCD, 0xCDCD, + 0x061, 0x000EAD53, + 0x062, 0x00093BC4, + 0xFF0F0104, 0xDEAD, + 0xFF0F0104, 0xABCD, + 0x063, 0x000110E9, + 0xFF0F0204, 0xCDEF, + 0x063, 0x000110E9, + 0xFF0F0404, 0xCDEF, + 0x063, 0x000110E9, + 0xFF0F0200, 0xCDEF, + 0x063, 0x000710E9, + 0xFF0F02C0, 0xCDEF, + 0x063, 0x000110E9, + 0xCDCDCDCD, 0xCDCD, + 0x063, 0x000714E9, + 0xFF0F0104, 0xDEAD, + 0xFF0F0104, 0xABCD, + 0x064, 0x0001C27C, + 0xFF0F0204, 0xCDEF, + 0x064, 0x0001C27C, + 0xFF0F0404, 0xCDEF, + 0x064, 0x0001C27C, + 0xCDCDCDCD, 0xCDCD, + 0x064, 0x0001C67C, + 0xFF0F0104, 0xDEAD, + 0xFF0F0200, 0xABCD, + 0x065, 0x00093016, + 0xFF0F02C0, 0xCDEF, + 0x065, 0x00093015, + 0xCDCDCDCD, 0xCDCD, + 0x065, 0x00091016, + 0xFF0F0200, 0xDEAD, + 0x018, 0x00000006, + 0x0EF, 0x00002000, + 0x03B, 0x0003824B, + 0x03B, 0x0003024B, + 0x03B, 0x0002844B, + 0x03B, 0x00020F4B, + 0x03B, 0x00018F4B, + 0x03B, 0x000104B2, + 0x03B, 0x00008049, + 0x03B, 0x00000148, + 0x03B, 0x0007824B, + 0x03B, 0x0007024B, + 0x03B, 0x0006824B, + 0x03B, 0x00060F4B, + 0x03B, 0x00058F4B, + 0x03B, 0x000504B2, + 0x03B, 0x00048049, + 0x03B, 0x00040148, + 0x0EF, 0x00000000, + 0x0EF, 0x00000100, + 0x034, 0x0000ADF3, + 0x034, 0x00009DEF, + 0x034, 0x00008DEC, + 0x034, 0x00007DE9, + 0x034, 0x00006CED, + 0x034, 0x00005CE9, + 0x034, 0x000044E9, + 0x034, 0x000034E6, + 0x034, 0x0000246A, + 0x034, 0x00001467, + 0x034, 0x00000068, + 0x0EF, 0x00000000, + 0x0ED, 0x00000010, + 0x044, 0x0000ADF2, + 0x044, 0x00009DEF, + 0x044, 0x00008DEC, + 0x044, 0x00007DE9, + 0x044, 0x00006CEC, + 0x044, 0x00005CE9, + 0x044, 0x000044EC, + 0x044, 0x000034E9, + 0x044, 0x0000246C, + 0x044, 0x00001469, + 0x044, 0x0000006C, + 0x0ED, 0x00000000, + 0x0ED, 0x00000001, + 0x040, 0x00038DA7, + 0x040, 0x000300C2, + 0x040, 0x000288E2, + 0x040, 0x000200B8, + 0x040, 0x000188A5, + 0x040, 0x00010FBC, + 0x040, 0x00008F71, + 0x040, 0x00000240, + 0x0ED, 0x00000000, + 0x0EF, 0x000020A2, + 0x0DF, 0x00000080, + 0x035, 0x00000120, + 0x035, 0x00008120, + 0x035, 0x00010120, + 0x036, 0x00000085, + 0x036, 0x00008085, + 0x036, 0x00010085, + 0x036, 0x00018085, + 0x0EF, 0x00000000, + 0x051, 0x00000C31, + 0x052, 0x00000622, + 0x053, 0x000FC70B, + 0x054, 0x0000017E, + 0x056, 0x00051DF3, + 0x051, 0x00000C01, + 0x052, 0x000006D6, + 0x053, 0x000FC649, + 0x070, 0x00049661, + 0x071, 0x0007843E, + 0x072, 0x00000382, + 0x074, 0x00051400, + 0x035, 0x00000160, + 0x035, 0x00008160, + 0x035, 0x00010160, + 0x036, 0x00000124, + 0x036, 0x00008124, + 0x036, 0x00010124, + 0x036, 0x00018124, + 0x0ED, 0x0000000C, + 0x045, 0x00000140, + 0x045, 0x00008140, + 0x045, 0x00010140, + 0x046, 0x00000124, + 0x046, 0x00008124, + 0x046, 0x00010124, + 0x046, 0x00018124, + 0x0DF, 0x00000088, + 0x0B3, 0x000F0E18, + 0x0B4, 0x0001214C, + 0x0B7, 0x0003000C, + 0x01C, 0x000539D2, + 0x018, 0x0001F12A, + 0x0FE, 0x00000000, + 0x0FE, 0x00000000, + 0x018, 0x0001712A, +}; + +u32 RTL8812AE_MAC_REG_ARRAY[] = { + 0x010, 0x0000000C, + 0xFF0F0180, 0xABCD, + 0x025, 0x0000000F, + 0xFF0F01C0, 0xCDEF, + 0x025, 0x0000000F, + 0xCDCDCDCD, 0xCDCD, + 0x025, 0x0000006F, + 0xFF0F0180, 0xDEAD, + 0x072, 0x00000000, + 0x428, 0x0000000A, + 0x429, 0x00000010, + 0x430, 0x00000000, + 0x431, 0x00000000, + 0x432, 0x00000000, + 0x433, 0x00000001, + 0x434, 0x00000004, + 0x435, 0x00000005, + 0x436, 0x00000007, + 0x437, 0x00000008, + 0x43C, 0x00000004, + 0x43D, 0x00000005, + 0x43E, 0x00000007, + 0x43F, 0x00000008, + 0x440, 0x0000005D, + 0x441, 0x00000001, + 0x442, 0x00000000, + 0x444, 0x00000010, + 0x445, 0x00000000, + 0x446, 0x00000000, + 0x447, 0x00000000, + 0x448, 0x00000000, + 0x449, 0x000000F0, + 0x44A, 0x0000000F, + 0x44B, 0x0000003E, + 0x44C, 0x00000010, + 0x44D, 0x00000000, + 0x44E, 0x00000000, + 0x44F, 0x00000000, + 0x450, 0x00000000, + 0x451, 0x000000F0, + 0x452, 0x0000000F, + 0x453, 0x00000000, + 0x45B, 0x00000080, + 0x460, 0x00000066, + 0x461, 0x00000066, + 0x4C8, 0x000000FF, + 0x4C9, 0x00000008, + 0x4CC, 0x000000FF, + 0x4CD, 0x000000FF, + 0x4CE, 0x00000001, + 0x500, 0x00000026, + 0x501, 0x000000A2, + 0x502, 0x0000002F, + 0x503, 0x00000000, + 0x504, 0x00000028, + 0x505, 0x000000A3, + 0x506, 0x0000005E, + 0x507, 0x00000000, + 0x508, 0x0000002B, + 0x509, 0x000000A4, + 0x50A, 0x0000005E, + 0x50B, 0x00000000, + 0x50C, 0x0000004F, + 0x50D, 0x000000A4, + 0x50E, 0x00000000, + 0x50F, 0x00000000, + 0x512, 0x0000001C, + 0x514, 0x0000000A, + 0x516, 0x0000000A, + 0x525, 0x0000004F, + 0x550, 0x00000010, + 0x551, 0x00000010, + 0x559, 0x00000002, + 0x55C, 0x00000050, + 0x55D, 0x000000FF, + 0x604, 0x00000001, + 0x605, 0x00000030, + 0x607, 0x00000003, + 0x608, 0x0000000E, + 0x609, 0x0000002A, + 0x620, 0x000000FF, + 0x621, 0x000000FF, + 0x622, 0x000000FF, + 0x623, 0x000000FF, + 0x624, 0x000000FF, + 0x625, 0x000000FF, + 0x626, 0x000000FF, + 0x627, 0x000000FF, + 0x638, 0x00000050, + 0x63C, 0x0000000A, + 0x63D, 0x0000000A, + 0x63E, 0x0000000E, + 0x63F, 0x0000000E, + 0x640, 0x00000080, + 0x642, 0x00000040, + 0x643, 0x00000000, + 0x652, 0x000000C8, + 0x66E, 0x00000005, + 0x700, 0x00000021, + 0x701, 0x00000043, + 0x702, 0x00000065, + 0x703, 0x00000087, + 0x708, 0x00000021, + 0x709, 0x00000043, + 0x70A, 0x00000065, + 0x70B, 0x00000087, + 0x718, 0x00000040, + +}; + +u32 RTL8821AE_MAC_REG_ARRAY[] = { + 0x428, 0x0000000A, + 0x429, 0x00000010, + 0x430, 0x00000000, + 0x431, 0x00000000, + 0x432, 0x00000000, + 0x433, 0x00000001, + 0x434, 0x00000004, + 0x435, 0x00000005, + 0x436, 0x00000007, + 0x437, 0x00000008, + 0x43C, 0x00000004, + 0x43D, 0x00000005, + 0x43E, 0x00000007, + 0x43F, 0x00000008, + 0x440, 0x0000005D, + 0x441, 0x00000001, + 0x442, 0x00000000, + 0x444, 0x00000010, + 0x445, 0x00000000, + 0x446, 0x00000000, + 0x447, 0x00000000, + 0x448, 0x00000000, + 0x449, 0x000000F0, + 0x44A, 0x0000000F, + 0x44B, 0x0000003E, + 0x44C, 0x00000010, + 0x44D, 0x00000000, + 0x44E, 0x00000000, + 0x44F, 0x00000000, + 0x450, 0x00000000, + 0x451, 0x000000F0, + 0x452, 0x0000000F, + 0x453, 0x00000000, + 0x456, 0x0000005E, + 0x460, 0x00000066, + 0x461, 0x00000066, + 0x4C8, 0x0000003F, + 0x4C9, 0x000000FF, + 0x4CC, 0x000000FF, + 0x4CD, 0x000000FF, + 0x4CE, 0x00000001, + 0x500, 0x00000026, + 0x501, 0x000000A2, + 0x502, 0x0000002F, + 0x503, 0x00000000, + 0x504, 0x00000028, + 0x505, 0x000000A3, + 0x506, 0x0000005E, + 0x507, 0x00000000, + 0x508, 0x0000002B, + 0x509, 0x000000A4, + 0x50A, 0x0000005E, + 0x50B, 0x00000000, + 0x50C, 0x0000004F, + 0x50D, 0x000000A4, + 0x50E, 0x00000000, + 0x50F, 0x00000000, + 0x512, 0x0000001C, + 0x514, 0x0000000A, + 0x516, 0x0000000A, + 0x525, 0x0000004F, + 0x550, 0x00000010, + 0x551, 0x00000010, + 0x559, 0x00000002, + 0x55C, 0x00000050, + 0x55D, 0x000000FF, + 0x605, 0x00000030, + 0x607, 0x00000007, + 0x608, 0x0000000E, + 0x609, 0x0000002A, + 0x620, 0x000000FF, + 0x621, 0x000000FF, + 0x622, 0x000000FF, + 0x623, 0x000000FF, + 0x624, 0x000000FF, + 0x625, 0x000000FF, + 0x626, 0x000000FF, + 0x627, 0x000000FF, + 0x638, 0x00000050, + 0x63C, 0x0000000A, + 0x63D, 0x0000000A, + 0x63E, 0x0000000E, + 0x63F, 0x0000000E, + 0x640, 0x00000040, + 0x642, 0x00000040, + 0x643, 0x00000000, + 0x652, 0x000000C8, + 0x66E, 0x00000005, + 0x700, 0x00000021, + 0x701, 0x00000043, + 0x702, 0x00000065, + 0x703, 0x00000087, + 0x708, 0x00000021, + 0x709, 0x00000043, + 0x70A, 0x00000065, + 0x70B, 0x00000087, + 0x718, 0x00000040, +}; + +u32 RTL8812AE_AGC_TAB_ARRAY[] = { + 0xFF0F07D8, 0xABCD, + 0x81C, 0xFC000001, + 0x81C, 0xFB020001, + 0x81C, 0xFA040001, + 0x81C, 0xF9060001, + 0x81C, 0xF8080001, + 0x81C, 0xF70A0001, + 0x81C, 0xF60C0001, + 0x81C, 0xF50E0001, + 0x81C, 0xF4100001, + 0x81C, 0xF3120001, + 0x81C, 0xF2140001, + 0x81C, 0xF1160001, + 0x81C, 0xF0180001, + 0x81C, 0xEF1A0001, + 0x81C, 0xEE1C0001, + 0x81C, 0xED1E0001, + 0x81C, 0xEC200001, + 0x81C, 0xEB220001, + 0x81C, 0xEA240001, + 0x81C, 0xCD260001, + 0x81C, 0xCC280001, + 0x81C, 0xCB2A0001, + 0x81C, 0xCA2C0001, + 0x81C, 0xC92E0001, + 0x81C, 0xC8300001, + 0x81C, 0xA6320001, + 0x81C, 0xA5340001, + 0x81C, 0xA4360001, + 0x81C, 0xA3380001, + 0x81C, 0xA23A0001, + 0x81C, 0x883C0001, + 0x81C, 0x873E0001, + 0x81C, 0x86400001, + 0x81C, 0x85420001, + 0x81C, 0x84440001, + 0x81C, 0x83460001, + 0x81C, 0x82480001, + 0x81C, 0x814A0001, + 0x81C, 0x484C0001, + 0x81C, 0x474E0001, + 0x81C, 0x46500001, + 0x81C, 0x45520001, + 0x81C, 0x44540001, + 0x81C, 0x43560001, + 0x81C, 0x42580001, + 0x81C, 0x415A0001, + 0x81C, 0x255C0001, + 0x81C, 0x245E0001, + 0x81C, 0x23600001, + 0x81C, 0x22620001, + 0x81C, 0x21640001, + 0x81C, 0x21660001, + 0x81C, 0x21680001, + 0x81C, 0x216A0001, + 0x81C, 0x216C0001, + 0x81C, 0x216E0001, + 0x81C, 0x21700001, + 0x81C, 0x21720001, + 0x81C, 0x21740001, + 0x81C, 0x21760001, + 0x81C, 0x21780001, + 0x81C, 0x217A0001, + 0x81C, 0x217C0001, + 0x81C, 0x217E0001, + 0xFF0F07D0, 0xCDEF, + 0x81C, 0xF9000001, + 0x81C, 0xF8020001, + 0x81C, 0xF7040001, + 0x81C, 0xF6060001, + 0x81C, 0xF5080001, + 0x81C, 0xF40A0001, + 0x81C, 0xF30C0001, + 0x81C, 0xF20E0001, + 0x81C, 0xF1100001, + 0x81C, 0xF0120001, + 0x81C, 0xEF140001, + 0x81C, 0xEE160001, + 0x81C, 0xED180001, + 0x81C, 0xEC1A0001, + 0x81C, 0xEB1C0001, + 0x81C, 0xEA1E0001, + 0x81C, 0xCD200001, + 0x81C, 0xCC220001, + 0x81C, 0xCB240001, + 0x81C, 0xCA260001, + 0x81C, 0xC9280001, + 0x81C, 0xC82A0001, + 0x81C, 0xC72C0001, + 0x81C, 0xC62E0001, + 0x81C, 0xA5300001, + 0x81C, 0xA4320001, + 0x81C, 0xA3340001, + 0x81C, 0xA2360001, + 0x81C, 0x88380001, + 0x81C, 0x873A0001, + 0x81C, 0x863C0001, + 0x81C, 0x853E0001, + 0x81C, 0x84400001, + 0x81C, 0x83420001, + 0x81C, 0x82440001, + 0x81C, 0x81460001, + 0x81C, 0x48480001, + 0x81C, 0x474A0001, + 0x81C, 0x464C0001, + 0x81C, 0x454E0001, + 0x81C, 0x44500001, + 0x81C, 0x43520001, + 0x81C, 0x42540001, + 0x81C, 0x41560001, + 0x81C, 0x25580001, + 0x81C, 0x245A0001, + 0x81C, 0x235C0001, + 0x81C, 0x225E0001, + 0x81C, 0x21600001, + 0x81C, 0x21620001, + 0x81C, 0x21640001, + 0x81C, 0x21660001, + 0x81C, 0x21680001, + 0x81C, 0x216A0001, + 0x81C, 0x236C0001, + 0x81C, 0x226E0001, + 0x81C, 0x21700001, + 0x81C, 0x21720001, + 0x81C, 0x21740001, + 0x81C, 0x21760001, + 0x81C, 0x21780001, + 0x81C, 0x217A0001, + 0x81C, 0x217C0001, + 0x81C, 0x217E0001, + 0xCDCDCDCD, 0xCDCD, + 0x81C, 0xFF000001, + 0x81C, 0xFF020001, + 0x81C, 0xFF040001, + 0x81C, 0xFF060001, + 0x81C, 0xFF080001, + 0x81C, 0xFE0A0001, + 0x81C, 0xFD0C0001, + 0x81C, 0xFC0E0001, + 0x81C, 0xFB100001, + 0x81C, 0xFA120001, + 0x81C, 0xF9140001, + 0x81C, 0xF8160001, + 0x81C, 0xF7180001, + 0x81C, 0xF61A0001, + 0x81C, 0xF51C0001, + 0x81C, 0xF41E0001, + 0x81C, 0xF3200001, + 0x81C, 0xF2220001, + 0x81C, 0xF1240001, + 0x81C, 0xF0260001, + 0x81C, 0xEF280001, + 0x81C, 0xEE2A0001, + 0x81C, 0xED2C0001, + 0x81C, 0xEC2E0001, + 0x81C, 0xEB300001, + 0x81C, 0xEA320001, + 0x81C, 0xE9340001, + 0x81C, 0xE8360001, + 0x81C, 0xE7380001, + 0x81C, 0xE63A0001, + 0x81C, 0xE53C0001, + 0x81C, 0xC73E0001, + 0x81C, 0xC6400001, + 0x81C, 0xC5420001, + 0x81C, 0xC4440001, + 0x81C, 0xC3460001, + 0x81C, 0xC2480001, + 0x81C, 0xC14A0001, + 0x81C, 0xA74C0001, + 0x81C, 0xA64E0001, + 0x81C, 0xA5500001, + 0x81C, 0xA4520001, + 0x81C, 0xA3540001, + 0x81C, 0xA2560001, + 0x81C, 0xA1580001, + 0x81C, 0x675A0001, + 0x81C, 0x665C0001, + 0x81C, 0x655E0001, + 0x81C, 0x64600001, + 0x81C, 0x63620001, + 0x81C, 0x48640001, + 0x81C, 0x47660001, + 0x81C, 0x46680001, + 0x81C, 0x456A0001, + 0x81C, 0x446C0001, + 0x81C, 0x436E0001, + 0x81C, 0x42700001, + 0x81C, 0x41720001, + 0x81C, 0x41740001, + 0x81C, 0x41760001, + 0x81C, 0x41780001, + 0x81C, 0x417A0001, + 0x81C, 0x417C0001, + 0x81C, 0x417E0001, + 0xFF0F07D8, 0xDEAD, + 0xFF0F0180, 0xABCD, + 0x81C, 0xFC800001, + 0x81C, 0xFB820001, + 0x81C, 0xFA840001, + 0x81C, 0xF9860001, + 0x81C, 0xF8880001, + 0x81C, 0xF78A0001, + 0x81C, 0xF68C0001, + 0x81C, 0xF58E0001, + 0x81C, 0xF4900001, + 0x81C, 0xF3920001, + 0x81C, 0xF2940001, + 0x81C, 0xF1960001, + 0x81C, 0xF0980001, + 0x81C, 0xEF9A0001, + 0x81C, 0xEE9C0001, + 0x81C, 0xED9E0001, + 0x81C, 0xECA00001, + 0x81C, 0xEBA20001, + 0x81C, 0xEAA40001, + 0x81C, 0xE9A60001, + 0x81C, 0xE8A80001, + 0x81C, 0xE7AA0001, + 0x81C, 0xE6AC0001, + 0x81C, 0xE5AE0001, + 0x81C, 0xE4B00001, + 0x81C, 0xE3B20001, + 0x81C, 0xA8B40001, + 0x81C, 0xA7B60001, + 0x81C, 0xA6B80001, + 0x81C, 0xA5BA0001, + 0x81C, 0xA4BC0001, + 0x81C, 0xA3BE0001, + 0x81C, 0xA2C00001, + 0x81C, 0xA1C20001, + 0x81C, 0x68C40001, + 0x81C, 0x67C60001, + 0x81C, 0x66C80001, + 0x81C, 0x65CA0001, + 0x81C, 0x64CC0001, + 0x81C, 0x47CE0001, + 0x81C, 0x46D00001, + 0x81C, 0x45D20001, + 0x81C, 0x44D40001, + 0x81C, 0x43D60001, + 0x81C, 0x42D80001, + 0x81C, 0x08DA0001, + 0x81C, 0x07DC0001, + 0x81C, 0x06DE0001, + 0x81C, 0x05E00001, + 0x81C, 0x04E20001, + 0x81C, 0x03E40001, + 0x81C, 0x02E60001, + 0x81C, 0x01E80001, + 0x81C, 0x01EA0001, + 0x81C, 0x01EC0001, + 0x81C, 0x01EE0001, + 0x81C, 0x01F00001, + 0x81C, 0x01F20001, + 0x81C, 0x01F40001, + 0x81C, 0x01F60001, + 0x81C, 0x01F80001, + 0x81C, 0x01FA0001, + 0x81C, 0x01FC0001, + 0x81C, 0x01FE0001, + 0xFF0F0280, 0xCDEF, + 0x81C, 0xFC800001, + 0x81C, 0xFB820001, + 0x81C, 0xFA840001, + 0x81C, 0xF9860001, + 0x81C, 0xF8880001, + 0x81C, 0xF78A0001, + 0x81C, 0xF68C0001, + 0x81C, 0xF58E0001, + 0x81C, 0xF4900001, + 0x81C, 0xF3920001, + 0x81C, 0xF2940001, + 0x81C, 0xF1960001, + 0x81C, 0xF0980001, + 0x81C, 0xEF9A0001, + 0x81C, 0xEE9C0001, + 0x81C, 0xED9E0001, + 0x81C, 0xECA00001, + 0x81C, 0xEBA20001, + 0x81C, 0xEAA40001, + 0x81C, 0xE9A60001, + 0x81C, 0xE8A80001, + 0x81C, 0xE7AA0001, + 0x81C, 0xE6AC0001, + 0x81C, 0xE5AE0001, + 0x81C, 0xE4B00001, + 0x81C, 0xE3B20001, + 0x81C, 0xA8B40001, + 0x81C, 0xA7B60001, + 0x81C, 0xA6B80001, + 0x81C, 0xA5BA0001, + 0x81C, 0xA4BC0001, + 0x81C, 0xA3BE0001, + 0x81C, 0xA2C00001, + 0x81C, 0xA1C20001, + 0x81C, 0x68C40001, + 0x81C, 0x67C60001, + 0x81C, 0x66C80001, + 0x81C, 0x65CA0001, + 0x81C, 0x64CC0001, + 0x81C, 0x47CE0001, + 0x81C, 0x46D00001, + 0x81C, 0x45D20001, + 0x81C, 0x44D40001, + 0x81C, 0x43D60001, + 0x81C, 0x42D80001, + 0x81C, 0x08DA0001, + 0x81C, 0x07DC0001, + 0x81C, 0x06DE0001, + 0x81C, 0x05E00001, + 0x81C, 0x04E20001, + 0x81C, 0x03E40001, + 0x81C, 0x02E60001, + 0x81C, 0x01E80001, + 0x81C, 0x01EA0001, + 0x81C, 0x01EC0001, + 0x81C, 0x01EE0001, + 0x81C, 0x01F00001, + 0x81C, 0x01F20001, + 0x81C, 0x01F40001, + 0x81C, 0x01F60001, + 0x81C, 0x01F80001, + 0x81C, 0x01FA0001, + 0x81C, 0x01FC0001, + 0x81C, 0x01FE0001, + 0xFF0F01C0, 0xCDEF, + 0x81C, 0xFC800001, + 0x81C, 0xFB820001, + 0x81C, 0xFA840001, + 0x81C, 0xF9860001, + 0x81C, 0xF8880001, + 0x81C, 0xF78A0001, + 0x81C, 0xF68C0001, + 0x81C, 0xF58E0001, + 0x81C, 0xF4900001, + 0x81C, 0xF3920001, + 0x81C, 0xF2940001, + 0x81C, 0xF1960001, + 0x81C, 0xF0980001, + 0x81C, 0xEF9A0001, + 0x81C, 0xEE9C0001, + 0x81C, 0xED9E0001, + 0x81C, 0xECA00001, + 0x81C, 0xEBA20001, + 0x81C, 0xEAA40001, + 0x81C, 0xE9A60001, + 0x81C, 0xE8A80001, + 0x81C, 0xE7AA0001, + 0x81C, 0xE6AC0001, + 0x81C, 0xE5AE0001, + 0x81C, 0xE4B00001, + 0x81C, 0xE3B20001, + 0x81C, 0xA8B40001, + 0x81C, 0xA7B60001, + 0x81C, 0xA6B80001, + 0x81C, 0xA5BA0001, + 0x81C, 0xA4BC0001, + 0x81C, 0xA3BE0001, + 0x81C, 0xA2C00001, + 0x81C, 0xA1C20001, + 0x81C, 0x68C40001, + 0x81C, 0x67C60001, + 0x81C, 0x66C80001, + 0x81C, 0x65CA0001, + 0x81C, 0x64CC0001, + 0x81C, 0x47CE0001, + 0x81C, 0x46D00001, + 0x81C, 0x45D20001, + 0x81C, 0x44D40001, + 0x81C, 0x43D60001, + 0x81C, 0x42D80001, + 0x81C, 0x08DA0001, + 0x81C, 0x07DC0001, + 0x81C, 0x06DE0001, + 0x81C, 0x05E00001, + 0x81C, 0x04E20001, + 0x81C, 0x03E40001, + 0x81C, 0x02E60001, + 0x81C, 0x01E80001, + 0x81C, 0x01EA0001, + 0x81C, 0x01EC0001, + 0x81C, 0x01EE0001, + 0x81C, 0x01F00001, + 0x81C, 0x01F20001, + 0x81C, 0x01F40001, + 0x81C, 0x01F60001, + 0x81C, 0x01F80001, + 0x81C, 0x01FA0001, + 0x81C, 0x01FC0001, + 0x81C, 0x01FE0001, + 0xFF0F02C0, 0xCDEF, + 0x81C, 0xFC800001, + 0x81C, 0xFB820001, + 0x81C, 0xFA840001, + 0x81C, 0xF9860001, + 0x81C, 0xF8880001, + 0x81C, 0xF78A0001, + 0x81C, 0xF68C0001, + 0x81C, 0xF58E0001, + 0x81C, 0xF4900001, + 0x81C, 0xF3920001, + 0x81C, 0xF2940001, + 0x81C, 0xF1960001, + 0x81C, 0xF0980001, + 0x81C, 0xEF9A0001, + 0x81C, 0xEE9C0001, + 0x81C, 0xED9E0001, + 0x81C, 0xECA00001, + 0x81C, 0xEBA20001, + 0x81C, 0xEAA40001, + 0x81C, 0xE9A60001, + 0x81C, 0xE8A80001, + 0x81C, 0xE7AA0001, + 0x81C, 0xE6AC0001, + 0x81C, 0xE5AE0001, + 0x81C, 0xE4B00001, + 0x81C, 0xE3B20001, + 0x81C, 0xA8B40001, + 0x81C, 0xA7B60001, + 0x81C, 0xA6B80001, + 0x81C, 0xA5BA0001, + 0x81C, 0xA4BC0001, + 0x81C, 0xA3BE0001, + 0x81C, 0xA2C00001, + 0x81C, 0xA1C20001, + 0x81C, 0x68C40001, + 0x81C, 0x67C60001, + 0x81C, 0x66C80001, + 0x81C, 0x65CA0001, + 0x81C, 0x64CC0001, + 0x81C, 0x47CE0001, + 0x81C, 0x46D00001, + 0x81C, 0x45D20001, + 0x81C, 0x44D40001, + 0x81C, 0x43D60001, + 0x81C, 0x42D80001, + 0x81C, 0x08DA0001, + 0x81C, 0x07DC0001, + 0x81C, 0x06DE0001, + 0x81C, 0x05E00001, + 0x81C, 0x04E20001, + 0x81C, 0x03E40001, + 0x81C, 0x02E60001, + 0x81C, 0x01E80001, + 0x81C, 0x01EA0001, + 0x81C, 0x01EC0001, + 0x81C, 0x01EE0001, + 0x81C, 0x01F00001, + 0x81C, 0x01F20001, + 0x81C, 0x01F40001, + 0x81C, 0x01F60001, + 0x81C, 0x01F80001, + 0x81C, 0x01FA0001, + 0x81C, 0x01FC0001, + 0x81C, 0x01FE0001, + 0xFF0F07D8, 0xCDEF, + 0x81C, 0xFC800001, + 0x81C, 0xFB820001, + 0x81C, 0xFA840001, + 0x81C, 0xF9860001, + 0x81C, 0xF8880001, + 0x81C, 0xF78A0001, + 0x81C, 0xF68C0001, + 0x81C, 0xF58E0001, + 0x81C, 0xF4900001, + 0x81C, 0xF3920001, + 0x81C, 0xF2940001, + 0x81C, 0xF1960001, + 0x81C, 0xF0980001, + 0x81C, 0xEF9A0001, + 0x81C, 0xEE9C0001, + 0x81C, 0xED9E0001, + 0x81C, 0xECA00001, + 0x81C, 0xEBA20001, + 0x81C, 0xEAA40001, + 0x81C, 0xE9A60001, + 0x81C, 0xE8A80001, + 0x81C, 0xE7AA0001, + 0x81C, 0xE6AC0001, + 0x81C, 0xE5AE0001, + 0x81C, 0xE4B00001, + 0x81C, 0xE3B20001, + 0x81C, 0xA8B40001, + 0x81C, 0xA7B60001, + 0x81C, 0xA6B80001, + 0x81C, 0xA5BA0001, + 0x81C, 0xA4BC0001, + 0x81C, 0xA3BE0001, + 0x81C, 0xA2C00001, + 0x81C, 0xA1C20001, + 0x81C, 0x68C40001, + 0x81C, 0x67C60001, + 0x81C, 0x66C80001, + 0x81C, 0x65CA0001, + 0x81C, 0x64CC0001, + 0x81C, 0x47CE0001, + 0x81C, 0x46D00001, + 0x81C, 0x45D20001, + 0x81C, 0x44D40001, + 0x81C, 0x43D60001, + 0x81C, 0x42D80001, + 0x81C, 0x08DA0001, + 0x81C, 0x07DC0001, + 0x81C, 0x06DE0001, + 0x81C, 0x05E00001, + 0x81C, 0x04E20001, + 0x81C, 0x03E40001, + 0x81C, 0x02E60001, + 0x81C, 0x01E80001, + 0x81C, 0x01EA0001, + 0x81C, 0x01EC0001, + 0x81C, 0x01EE0001, + 0x81C, 0x01F00001, + 0x81C, 0x01F20001, + 0x81C, 0x01F40001, + 0x81C, 0x01F60001, + 0x81C, 0x01F80001, + 0x81C, 0x01FA0001, + 0x81C, 0x01FC0001, + 0x81C, 0x01FE0001, + 0xFF0F07D0, 0xCDEF, + 0x81C, 0xFC800001, + 0x81C, 0xFB820001, + 0x81C, 0xFA840001, + 0x81C, 0xF9860001, + 0x81C, 0xF8880001, + 0x81C, 0xF78A0001, + 0x81C, 0xF68C0001, + 0x81C, 0xF58E0001, + 0x81C, 0xF4900001, + 0x81C, 0xF3920001, + 0x81C, 0xF2940001, + 0x81C, 0xF1960001, + 0x81C, 0xF0980001, + 0x81C, 0xEF9A0001, + 0x81C, 0xEE9C0001, + 0x81C, 0xED9E0001, + 0x81C, 0xECA00001, + 0x81C, 0xEBA20001, + 0x81C, 0xEAA40001, + 0x81C, 0xE9A60001, + 0x81C, 0xE8A80001, + 0x81C, 0xE7AA0001, + 0x81C, 0xE6AC0001, + 0x81C, 0xE5AE0001, + 0x81C, 0xE4B00001, + 0x81C, 0xE3B20001, + 0x81C, 0xA8B40001, + 0x81C, 0xA7B60001, + 0x81C, 0xA6B80001, + 0x81C, 0xA5BA0001, + 0x81C, 0xA4BC0001, + 0x81C, 0xA3BE0001, + 0x81C, 0xA2C00001, + 0x81C, 0xA1C20001, + 0x81C, 0x68C40001, + 0x81C, 0x67C60001, + 0x81C, 0x66C80001, + 0x81C, 0x65CA0001, + 0x81C, 0x64CC0001, + 0x81C, 0x47CE0001, + 0x81C, 0x46D00001, + 0x81C, 0x45D20001, + 0x81C, 0x44D40001, + 0x81C, 0x43D60001, + 0x81C, 0x42D80001, + 0x81C, 0x08DA0001, + 0x81C, 0x07DC0001, + 0x81C, 0x06DE0001, + 0x81C, 0x05E00001, + 0x81C, 0x04E20001, + 0x81C, 0x03E40001, + 0x81C, 0x02E60001, + 0x81C, 0x01E80001, + 0x81C, 0x01EA0001, + 0x81C, 0x01EC0001, + 0x81C, 0x01EE0001, + 0x81C, 0x01F00001, + 0x81C, 0x01F20001, + 0x81C, 0x01F40001, + 0x81C, 0x01F60001, + 0x81C, 0x01F80001, + 0x81C, 0x01FA0001, + 0x81C, 0x01FC0001, + 0x81C, 0x01FE0001, + 0xCDCDCDCD, 0xCDCD, + 0x81C, 0xFF800001, + 0x81C, 0xFF820001, + 0x81C, 0xFF840001, + 0x81C, 0xFE860001, + 0x81C, 0xFD880001, + 0x81C, 0xFC8A0001, + 0x81C, 0xFB8C0001, + 0x81C, 0xFA8E0001, + 0x81C, 0xF9900001, + 0x81C, 0xF8920001, + 0x81C, 0xF7940001, + 0x81C, 0xF6960001, + 0x81C, 0xF5980001, + 0x81C, 0xF49A0001, + 0x81C, 0xF39C0001, + 0x81C, 0xF29E0001, + 0x81C, 0xF1A00001, + 0x81C, 0xF0A20001, + 0x81C, 0xEFA40001, + 0x81C, 0xEEA60001, + 0x81C, 0xEDA80001, + 0x81C, 0xECAA0001, + 0x81C, 0xEBAC0001, + 0x81C, 0xEAAE0001, + 0x81C, 0xE9B00001, + 0x81C, 0xE8B20001, + 0x81C, 0xE7B40001, + 0x81C, 0xE6B60001, + 0x81C, 0xE5B80001, + 0x81C, 0xE4BA0001, + 0x81C, 0xE3BC0001, + 0x81C, 0xA8BE0001, + 0x81C, 0xA7C00001, + 0x81C, 0xA6C20001, + 0x81C, 0xA5C40001, + 0x81C, 0xA4C60001, + 0x81C, 0xA3C80001, + 0x81C, 0xA2CA0001, + 0x81C, 0xA1CC0001, + 0x81C, 0x68CE0001, + 0x81C, 0x67D00001, + 0x81C, 0x66D20001, + 0x81C, 0x65D40001, + 0x81C, 0x64D60001, + 0x81C, 0x47D80001, + 0x81C, 0x46DA0001, + 0x81C, 0x45DC0001, + 0x81C, 0x44DE0001, + 0x81C, 0x43E00001, + 0x81C, 0x42E20001, + 0x81C, 0x08E40001, + 0x81C, 0x07E60001, + 0x81C, 0x06E80001, + 0x81C, 0x05EA0001, + 0x81C, 0x04EC0001, + 0x81C, 0x03EE0001, + 0x81C, 0x02F00001, + 0x81C, 0x01F20001, + 0x81C, 0x01F40001, + 0x81C, 0x01F60001, + 0x81C, 0x01F80001, + 0x81C, 0x01FA0001, + 0x81C, 0x01FC0001, + 0x81C, 0x01FE0001, + 0xFF0F0180, 0xDEAD, + 0xC50, 0x00000022, + 0xC50, 0x00000020, + 0xE50, 0x00000022, + 0xE50, 0x00000020, + +}; + +u32 RTL8821AE_AGC_TAB_ARRAY[] = { + 0x81C, 0xBF000001, + 0x81C, 0xBF020001, + 0x81C, 0xBF040001, + 0x81C, 0xBF060001, + 0x81C, 0xBE080001, + 0x81C, 0xBD0A0001, + 0x81C, 0xBC0C0001, + 0x81C, 0xBA0E0001, + 0x81C, 0xB9100001, + 0x81C, 0xB8120001, + 0x81C, 0xB7140001, + 0x81C, 0xB6160001, + 0x81C, 0xB5180001, + 0x81C, 0xB41A0001, + 0x81C, 0xB31C0001, + 0x81C, 0xB21E0001, + 0x81C, 0xB1200001, + 0x81C, 0xB0220001, + 0x81C, 0xAF240001, + 0x81C, 0xAE260001, + 0x81C, 0xAD280001, + 0x81C, 0xAC2A0001, + 0x81C, 0xAB2C0001, + 0x81C, 0xAA2E0001, + 0x81C, 0xA9300001, + 0x81C, 0xA8320001, + 0x81C, 0xA7340001, + 0x81C, 0xA6360001, + 0x81C, 0xA5380001, + 0x81C, 0xA43A0001, + 0x81C, 0xA33C0001, + 0x81C, 0x673E0001, + 0x81C, 0x66400001, + 0x81C, 0x65420001, + 0x81C, 0x64440001, + 0x81C, 0x63460001, + 0x81C, 0x62480001, + 0x81C, 0x614A0001, + 0x81C, 0x474C0001, + 0x81C, 0x464E0001, + 0x81C, 0x45500001, + 0x81C, 0x44520001, + 0x81C, 0x43540001, + 0x81C, 0x42560001, + 0x81C, 0x41580001, + 0x81C, 0x285A0001, + 0x81C, 0x275C0001, + 0x81C, 0x265E0001, + 0x81C, 0x25600001, + 0x81C, 0x24620001, + 0x81C, 0x0A640001, + 0x81C, 0x09660001, + 0x81C, 0x08680001, + 0x81C, 0x076A0001, + 0x81C, 0x066C0001, + 0x81C, 0x056E0001, + 0x81C, 0x04700001, + 0x81C, 0x03720001, + 0x81C, 0x02740001, + 0x81C, 0x01760001, + 0x81C, 0x01780001, + 0x81C, 0x017A0001, + 0x81C, 0x017C0001, + 0x81C, 0x017E0001, + 0xFF0F02C0, 0xABCD, + 0x81C, 0xFB000101, + 0x81C, 0xFA020101, + 0x81C, 0xF9040101, + 0x81C, 0xF8060101, + 0x81C, 0xF7080101, + 0x81C, 0xF60A0101, + 0x81C, 0xF50C0101, + 0x81C, 0xF40E0101, + 0x81C, 0xF3100101, + 0x81C, 0xF2120101, + 0x81C, 0xF1140101, + 0x81C, 0xF0160101, + 0x81C, 0xEF180101, + 0x81C, 0xEE1A0101, + 0x81C, 0xED1C0101, + 0x81C, 0xEC1E0101, + 0x81C, 0xEB200101, + 0x81C, 0xEA220101, + 0x81C, 0xE9240101, + 0x81C, 0xE8260101, + 0x81C, 0xE7280101, + 0x81C, 0xE62A0101, + 0x81C, 0xE52C0101, + 0x81C, 0xE42E0101, + 0x81C, 0xE3300101, + 0x81C, 0xA5320101, + 0x81C, 0xA4340101, + 0x81C, 0xA3360101, + 0x81C, 0x87380101, + 0x81C, 0x863A0101, + 0x81C, 0x853C0101, + 0x81C, 0x843E0101, + 0x81C, 0x69400101, + 0x81C, 0x68420101, + 0x81C, 0x67440101, + 0x81C, 0x66460101, + 0x81C, 0x49480101, + 0x81C, 0x484A0101, + 0x81C, 0x474C0101, + 0x81C, 0x2A4E0101, + 0x81C, 0x29500101, + 0x81C, 0x28520101, + 0x81C, 0x27540101, + 0x81C, 0x26560101, + 0x81C, 0x25580101, + 0x81C, 0x245A0101, + 0x81C, 0x235C0101, + 0x81C, 0x055E0101, + 0x81C, 0x04600101, + 0x81C, 0x03620101, + 0x81C, 0x02640101, + 0x81C, 0x01660101, + 0x81C, 0x01680101, + 0x81C, 0x016A0101, + 0x81C, 0x016C0101, + 0x81C, 0x016E0101, + 0x81C, 0x01700101, + 0x81C, 0x01720101, + 0xCDCDCDCD, 0xCDCD, + 0x81C, 0xFF000101, + 0x81C, 0xFF020101, + 0x81C, 0xFE040101, + 0x81C, 0xFD060101, + 0x81C, 0xFC080101, + 0x81C, 0xFD0A0101, + 0x81C, 0xFC0C0101, + 0x81C, 0xFB0E0101, + 0x81C, 0xFA100101, + 0x81C, 0xF9120101, + 0x81C, 0xF8140101, + 0x81C, 0xF7160101, + 0x81C, 0xF6180101, + 0x81C, 0xF51A0101, + 0x81C, 0xF41C0101, + 0x81C, 0xF31E0101, + 0x81C, 0xF2200101, + 0x81C, 0xF1220101, + 0x81C, 0xF0240101, + 0x81C, 0xEF260101, + 0x81C, 0xEE280101, + 0x81C, 0xED2A0101, + 0x81C, 0xEC2C0101, + 0x81C, 0xEB2E0101, + 0x81C, 0xEA300101, + 0x81C, 0xE9320101, + 0x81C, 0xE8340101, + 0x81C, 0xE7360101, + 0x81C, 0xE6380101, + 0x81C, 0xE53A0101, + 0x81C, 0xE43C0101, + 0x81C, 0xE33E0101, + 0x81C, 0xA5400101, + 0x81C, 0xA4420101, + 0x81C, 0xA3440101, + 0x81C, 0x87460101, + 0x81C, 0x86480101, + 0x81C, 0x854A0101, + 0x81C, 0x844C0101, + 0x81C, 0x694E0101, + 0x81C, 0x68500101, + 0x81C, 0x67520101, + 0x81C, 0x66540101, + 0x81C, 0x49560101, + 0x81C, 0x48580101, + 0x81C, 0x475A0101, + 0x81C, 0x2A5C0101, + 0x81C, 0x295E0101, + 0x81C, 0x28600101, + 0x81C, 0x27620101, + 0x81C, 0x26640101, + 0x81C, 0x25660101, + 0x81C, 0x24680101, + 0x81C, 0x236A0101, + 0x81C, 0x056C0101, + 0x81C, 0x046E0101, + 0x81C, 0x03700101, + 0x81C, 0x02720101, + 0xFF0F02C0, 0xDEAD, + 0x81C, 0x01740101, + 0x81C, 0x01760101, + 0x81C, 0x01780101, + 0x81C, 0x017A0101, + 0x81C, 0x017C0101, + 0x81C, 0x017E0101, + 0xC50, 0x00000022, + 0xC50, 0x00000020, + +}; + +/****************************************************************************** +* TXPWR_LMT.TXT +******************************************************************************/ + +u8 *RTL8812AE_TXPWR_LMT[] = { + "FCC", "2.4G", "20M", "CCK", "1T", "01", "36", + "ETSI", "2.4G", "20M", "CCK", "1T", "01", "32", + "MKK", "2.4G", "20M", "CCK", "1T", "01", "32", + "FCC", "2.4G", "20M", "CCK", "1T", "02", "36", + "ETSI", "2.4G", "20M", "CCK", "1T", "02", "32", + "MKK", "2.4G", "20M", "CCK", "1T", "02", "32", + "FCC", "2.4G", "20M", "CCK", "1T", "03", "36", + "ETSI", "2.4G", "20M", "CCK", "1T", "03", "32", + "MKK", "2.4G", "20M", "CCK", "1T", "03", "32", + "FCC", "2.4G", "20M", "CCK", "1T", "04", "36", + "ETSI", "2.4G", "20M", "CCK", "1T", "04", "32", + "MKK", "2.4G", "20M", "CCK", "1T", "04", "32", + "FCC", "2.4G", "20M", "CCK", "1T", "05", "36", + "ETSI", "2.4G", "20M", "CCK", "1T", "05", "32", + "MKK", "2.4G", "20M", "CCK", "1T", "05", "32", + "FCC", "2.4G", "20M", "CCK", "1T", "06", "36", + "ETSI", "2.4G", "20M", "CCK", "1T", "06", "32", + "MKK", "2.4G", "20M", "CCK", "1T", "06", "32", + "FCC", "2.4G", "20M", "CCK", "1T", "07", "36", + "ETSI", "2.4G", "20M", "CCK", "1T", "07", "32", + "MKK", "2.4G", "20M", "CCK", "1T", "07", "32", + "FCC", "2.4G", "20M", "CCK", "1T", "08", "36", + "ETSI", "2.4G", "20M", "CCK", "1T", "08", "32", + "MKK", "2.4G", "20M", "CCK", "1T", "08", "32", + "FCC", "2.4G", "20M", "CCK", "1T", "09", "36", + "ETSI", "2.4G", "20M", "CCK", "1T", "09", "32", + "MKK", "2.4G", "20M", "CCK", "1T", "09", "32", + "FCC", "2.4G", "20M", "CCK", "1T", "10", "36", + "ETSI", "2.4G", "20M", "CCK", "1T", "10", "32", + "MKK", "2.4G", "20M", "CCK", "1T", "10", "32", + "FCC", "2.4G", "20M", "CCK", "1T", "11", "36", + "ETSI", "2.4G", "20M", "CCK", "1T", "11", "32", + "MKK", "2.4G", "20M", "CCK", "1T", "11", "32", + "FCC", "2.4G", "20M", "CCK", "1T", "12", "63", + "ETSI", "2.4G", "20M", "CCK", "1T", "12", "32", + "MKK", "2.4G", "20M", "CCK", "1T", "12", "32", + "FCC", "2.4G", "20M", "CCK", "1T", "13", "63", + "ETSI", "2.4G", "20M", "CCK", "1T", "13", "32", + "MKK", "2.4G", "20M", "CCK", "1T", "13", "32", + "FCC", "2.4G", "20M", "CCK", "1T", "14", "63", + "ETSI", "2.4G", "20M", "CCK", "1T", "14", "63", + "MKK", "2.4G", "20M", "CCK", "1T", "14", "32", + "FCC", "2.4G", "20M", "OFDM", "1T", "01", "34", + "ETSI", "2.4G", "20M", "OFDM", "1T", "01", "32", + "MKK", "2.4G", "20M", "OFDM", "1T", "01", "32", + "FCC", "2.4G", "20M", "OFDM", "1T", "02", "36", + "ETSI", "2.4G", "20M", "OFDM", "1T", "02", "32", + "MKK", "2.4G", "20M", "OFDM", "1T", "02", "32", + "FCC", "2.4G", "20M", "OFDM", "1T", "03", "36", + "ETSI", "2.4G", "20M", "OFDM", "1T", "03", "32", + "MKK", "2.4G", "20M", "OFDM", "1T", "03", "32", + "FCC", "2.4G", "20M", "OFDM", "1T", "04", "36", + "ETSI", "2.4G", "20M", "OFDM", "1T", "04", "32", + "MKK", "2.4G", "20M", "OFDM", "1T", "04", "32", + "FCC", "2.4G", "20M", "OFDM", "1T", "05", "36", + "ETSI", "2.4G", "20M", "OFDM", "1T", "05", "32", + "MKK", "2.4G", "20M", "OFDM", "1T", "05", "32", + "FCC", "2.4G", "20M", "OFDM", "1T", "06", "36", + "ETSI", "2.4G", "20M", "OFDM", "1T", "06", "32", + "MKK", "2.4G", "20M", "OFDM", "1T", "06", "32", + "FCC", "2.4G", "20M", "OFDM", "1T", "07", "36", + "ETSI", "2.4G", "20M", "OFDM", "1T", "07", "32", + "MKK", "2.4G", "20M", "OFDM", "1T", "07", "32", + "FCC", "2.4G", "20M", "OFDM", "1T", "08", "36", + "ETSI", "2.4G", "20M", "OFDM", "1T", "08", "32", + "MKK", "2.4G", "20M", "OFDM", "1T", "08", "32", + "FCC", "2.4G", "20M", "OFDM", "1T", "09", "36", + "ETSI", "2.4G", "20M", "OFDM", "1T", "09", "32", + "MKK", "2.4G", "20M", "OFDM", "1T", "09", "32", + "FCC", "2.4G", "20M", "OFDM", "1T", "10", "36", + "ETSI", "2.4G", "20M", "OFDM", "1T", "10", "32", + "MKK", "2.4G", "20M", "OFDM", "1T", "10", "32", + "FCC", "2.4G", "20M", "OFDM", "1T", "11", "32", + "ETSI", "2.4G", "20M", "OFDM", "1T", "11", "32", + "MKK", "2.4G", "20M", "OFDM", "1T", "11", "32", + "FCC", "2.4G", "20M", "OFDM", "1T", "12", "63", + "ETSI", "2.4G", "20M", "OFDM", "1T", "12", "32", + "MKK", "2.4G", "20M", "OFDM", "1T", "12", "32", + "FCC", "2.4G", "20M", "OFDM", "1T", "13", "63", + "ETSI", "2.4G", "20M", "OFDM", "1T", "13", "32", + "MKK", "2.4G", "20M", "OFDM", "1T", "13", "32", + "FCC", "2.4G", "20M", "OFDM", "1T", "14", "63", + "ETSI", "2.4G", "20M", "OFDM", "1T", "14", "63", + "MKK", "2.4G", "20M", "OFDM", "1T", "14", "63", + "FCC", "2.4G", "20M", "HT", "1T", "01", "34", + "ETSI", "2.4G", "20M", "HT", "1T", "01", "32", + "MKK", "2.4G", "20M", "HT", "1T", "01", "32", + "FCC", "2.4G", "20M", "HT", "1T", "02", "36", + "ETSI", "2.4G", "20M", "HT", "1T", "02", "32", + "MKK", "2.4G", "20M", "HT", "1T", "02", "32", + "FCC", "2.4G", "20M", "HT", "1T", "03", "36", + "ETSI", "2.4G", "20M", "HT", "1T", "03", "32", + "MKK", "2.4G", "20M", "HT", "1T", "03", "32", + "FCC", "2.4G", "20M", "HT", "1T", "04", "36", + "ETSI", "2.4G", "20M", "HT", "1T", "04", "32", + "MKK", "2.4G", "20M", "HT", "1T", "04", "32", + "FCC", "2.4G", "20M", "HT", "1T", "05", "36", + "ETSI", "2.4G", "20M", "HT", "1T", "05", "32", + "MKK", "2.4G", "20M", "HT", "1T", "05", "32", + "FCC", "2.4G", "20M", "HT", "1T", "06", "36", + "ETSI", "2.4G", "20M", "HT", "1T", "06", "32", + "MKK", "2.4G", "20M", "HT", "1T", "06", "32", + "FCC", "2.4G", "20M", "HT", "1T", "07", "36", + "ETSI", "2.4G", "20M", "HT", "1T", "07", "32", + "MKK", "2.4G", "20M", "HT", "1T", "07", "32", + "FCC", "2.4G", "20M", "HT", "1T", "08", "36", + "ETSI", "2.4G", "20M", "HT", "1T", "08", "32", + "MKK", "2.4G", "20M", "HT", "1T", "08", "32", + "FCC", "2.4G", "20M", "HT", "1T", "09", "36", + "ETSI", "2.4G", "20M", "HT", "1T", "09", "32", + "MKK", "2.4G", "20M", "HT", "1T", "09", "32", + "FCC", "2.4G", "20M", "HT", "1T", "10", "36", + "ETSI", "2.4G", "20M", "HT", "1T", "10", "32", + "MKK", "2.4G", "20M", "HT", "1T", "10", "32", + "FCC", "2.4G", "20M", "HT", "1T", "11", "32", + "ETSI", "2.4G", "20M", "HT", "1T", "11", "32", + "MKK", "2.4G", "20M", "HT", "1T", "11", "32", + "FCC", "2.4G", "20M", "HT", "1T", "12", "63", + "ETSI", "2.4G", "20M", "HT", "1T", "12", "32", + "MKK", "2.4G", "20M", "HT", "1T", "12", "32", + "FCC", "2.4G", "20M", "HT", "1T", "13", "63", + "ETSI", "2.4G", "20M", "HT", "1T", "13", "32", + "MKK", "2.4G", "20M", "HT", "1T", "13", "32", + "FCC", "2.4G", "20M", "HT", "1T", "14", "63", + "ETSI", "2.4G", "20M", "HT", "1T", "14", "63", + "MKK", "2.4G", "20M", "HT", "1T", "14", "63", + "FCC", "2.4G", "20M", "HT", "2T", "01", "32", + "ETSI", "2.4G", "20M", "HT", "2T", "01", "32", + "MKK", "2.4G", "20M", "HT", "2T", "01", "32", + "FCC", "2.4G", "20M", "HT", "2T", "02", "34", + "ETSI", "2.4G", "20M", "HT", "2T", "02", "32", + "MKK", "2.4G", "20M", "HT", "2T", "02", "32", + "FCC", "2.4G", "20M", "HT", "2T", "03", "34", + "ETSI", "2.4G", "20M", "HT", "2T", "03", "32", + "MKK", "2.4G", "20M", "HT", "2T", "03", "32", + "FCC", "2.4G", "20M", "HT", "2T", "04", "34", + "ETSI", "2.4G", "20M", "HT", "2T", "04", "32", + "MKK", "2.4G", "20M", "HT", "2T", "04", "32", + "FCC", "2.4G", "20M", "HT", "2T", "05", "34", + "ETSI", "2.4G", "20M", "HT", "2T", "05", "32", + "MKK", "2.4G", "20M", "HT", "2T", "05", "32", + "FCC", "2.4G", "20M", "HT", "2T", "06", "34", + "ETSI", "2.4G", "20M", "HT", "2T", "06", "32", + "MKK", "2.4G", "20M", "HT", "2T", "06", "32", + "FCC", "2.4G", "20M", "HT", "2T", "07", "34", + "ETSI", "2.4G", "20M", "HT", "2T", "07", "32", + "MKK", "2.4G", "20M", "HT", "2T", "07", "32", + "FCC", "2.4G", "20M", "HT", "2T", "08", "34", + "ETSI", "2.4G", "20M", "HT", "2T", "08", "32", + "MKK", "2.4G", "20M", "HT", "2T", "08", "32", + "FCC", "2.4G", "20M", "HT", "2T", "09", "34", + "ETSI", "2.4G", "20M", "HT", "2T", "09", "32", + "MKK", "2.4G", "20M", "HT", "2T", "09", "32", + "FCC", "2.4G", "20M", "HT", "2T", "10", "34", + "ETSI", "2.4G", "20M", "HT", "2T", "10", "32", + "MKK", "2.4G", "20M", "HT", "2T", "10", "32", + "FCC", "2.4G", "20M", "HT", "2T", "11", "30", + "ETSI", "2.4G", "20M", "HT", "2T", "11", "32", + "MKK", "2.4G", "20M", "HT", "2T", "11", "32", + "FCC", "2.4G", "20M", "HT", "2T", "12", "63", + "ETSI", "2.4G", "20M", "HT", "2T", "12", "32", + "MKK", "2.4G", "20M", "HT", "2T", "12", "32", + "FCC", "2.4G", "20M", "HT", "2T", "13", "63", + "ETSI", "2.4G", "20M", "HT", "2T", "13", "32", + "MKK", "2.4G", "20M", "HT", "2T", "13", "32", + "FCC", "2.4G", "20M", "HT", "2T", "14", "63", + "ETSI", "2.4G", "20M", "HT", "2T", "14", "63", + "MKK", "2.4G", "20M", "HT", "2T", "14", "63", + "FCC", "2.4G", "40M", "HT", "1T", "01", "63", + "ETSI", "2.4G", "40M", "HT", "1T", "01", "63", + "MKK", "2.4G", "40M", "HT", "1T", "01", "63", + "FCC", "2.4G", "40M", "HT", "1T", "02", "63", + "ETSI", "2.4G", "40M", "HT", "1T", "02", "63", + "MKK", "2.4G", "40M", "HT", "1T", "02", "63", + "FCC", "2.4G", "40M", "HT", "1T", "03", "32", + "ETSI", "2.4G", "40M", "HT", "1T", "03", "32", + "MKK", "2.4G", "40M", "HT", "1T", "03", "32", + "FCC", "2.4G", "40M", "HT", "1T", "04", "36", + "ETSI", "2.4G", "40M", "HT", "1T", "04", "32", + "MKK", "2.4G", "40M", "HT", "1T", "04", "32", + "FCC", "2.4G", "40M", "HT", "1T", "05", "36", + "ETSI", "2.4G", "40M", "HT", "1T", "05", "32", + "MKK", "2.4G", "40M", "HT", "1T", "05", "32", + "FCC", "2.4G", "40M", "HT", "1T", "06", "36", + "ETSI", "2.4G", "40M", "HT", "1T", "06", "32", + "MKK", "2.4G", "40M", "HT", "1T", "06", "32", + "FCC", "2.4G", "40M", "HT", "1T", "07", "36", + "ETSI", "2.4G", "40M", "HT", "1T", "07", "32", + "MKK", "2.4G", "40M", "HT", "1T", "07", "32", + "FCC", "2.4G", "40M", "HT", "1T", "08", "36", + "ETSI", "2.4G", "40M", "HT", "1T", "08", "32", + "MKK", "2.4G", "40M", "HT", "1T", "08", "32", + "FCC", "2.4G", "40M", "HT", "1T", "09", "36", + "ETSI", "2.4G", "40M", "HT", "1T", "09", "32", + "MKK", "2.4G", "40M", "HT", "1T", "09", "32", + "FCC", "2.4G", "40M", "HT", "1T", "10", "36", + "ETSI", "2.4G", "40M", "HT", "1T", "10", "32", + "MKK", "2.4G", "40M", "HT", "1T", "10", "32", + "FCC", "2.4G", "40M", "HT", "1T", "11", "32", + "ETSI", "2.4G", "40M", "HT", "1T", "11", "32", + "MKK", "2.4G", "40M", "HT", "1T", "11", "32", + "FCC", "2.4G", "40M", "HT", "1T", "12", "63", + "ETSI", "2.4G", "40M", "HT", "1T", "12", "32", + "MKK", "2.4G", "40M", "HT", "1T", "12", "32", + "FCC", "2.4G", "40M", "HT", "1T", "13", "63", + "ETSI", "2.4G", "40M", "HT", "1T", "13", "32", + "MKK", "2.4G", "40M", "HT", "1T", "13", "32", + "FCC", "2.4G", "40M", "HT", "1T", "14", "63", + "ETSI", "2.4G", "40M", "HT", "1T", "14", "63", + "MKK", "2.4G", "40M", "HT", "1T", "14", "63", + "FCC", "2.4G", "40M", "HT", "2T", "01", "63", + "ETSI", "2.4G", "40M", "HT", "2T", "01", "63", + "MKK", "2.4G", "40M", "HT", "2T", "01", "63", + "FCC", "2.4G", "40M", "HT", "2T", "02", "63", + "ETSI", "2.4G", "40M", "HT", "2T", "02", "63", + "MKK", "2.4G", "40M", "HT", "2T", "02", "63", + "FCC", "2.4G", "40M", "HT", "2T", "03", "30", + "ETSI", "2.4G", "40M", "HT", "2T", "03", "30", + "MKK", "2.4G", "40M", "HT", "2T", "03", "30", + "FCC", "2.4G", "40M", "HT", "2T", "04", "34", + "ETSI", "2.4G", "40M", "HT", "2T", "04", "30", + "MKK", "2.4G", "40M", "HT", "2T", "04", "30", + "FCC", "2.4G", "40M", "HT", "2T", "05", "34", + "ETSI", "2.4G", "40M", "HT", "2T", "05", "30", + "MKK", "2.4G", "40M", "HT", "2T", "05", "30", + "FCC", "2.4G", "40M", "HT", "2T", "06", "34", + "ETSI", "2.4G", "40M", "HT", "2T", "06", "30", + "MKK", "2.4G", "40M", "HT", "2T", "06", "30", + "FCC", "2.4G", "40M", "HT", "2T", "07", "34", + "ETSI", "2.4G", "40M", "HT", "2T", "07", "30", + "MKK", "2.4G", "40M", "HT", "2T", "07", "30", + "FCC", "2.4G", "40M", "HT", "2T", "08", "34", + "ETSI", "2.4G", "40M", "HT", "2T", "08", "30", + "MKK", "2.4G", "40M", "HT", "2T", "08", "30", + "FCC", "2.4G", "40M", "HT", "2T", "09", "34", + "ETSI", "2.4G", "40M", "HT", "2T", "09", "30", + "MKK", "2.4G", "40M", "HT", "2T", "09", "30", + "FCC", "2.4G", "40M", "HT", "2T", "10", "34", + "ETSI", "2.4G", "40M", "HT", "2T", "10", "30", + "MKK", "2.4G", "40M", "HT", "2T", "10", "30", + "FCC", "2.4G", "40M", "HT", "2T", "11", "30", + "ETSI", "2.4G", "40M", "HT", "2T", "11", "30", + "MKK", "2.4G", "40M", "HT", "2T", "11", "30", + "FCC", "2.4G", "40M", "HT", "2T", "12", "63", + "ETSI", "2.4G", "40M", "HT", "2T", "12", "32", + "MKK", "2.4G", "40M", "HT", "2T", "12", "32", + "FCC", "2.4G", "40M", "HT", "2T", "13", "63", + "ETSI", "2.4G", "40M", "HT", "2T", "13", "32", + "MKK", "2.4G", "40M", "HT", "2T", "13", "32", + "FCC", "2.4G", "40M", "HT", "2T", "14", "63", + "ETSI", "2.4G", "40M", "HT", "2T", "14", "63", + "MKK", "2.4G", "40M", "HT", "2T", "14", "63", + "FCC", "5G", "20M", "OFDM", "1T", "36", "30", + "ETSI", "5G", "20M", "OFDM", "1T", "36", "32", + "MKK", "5G", "20M", "OFDM", "1T", "36", "32", + "FCC", "5G", "20M", "OFDM", "1T", "40", "30", + "ETSI", "5G", "20M", "OFDM", "1T", "40", "32", + "MKK", "5G", "20M", "OFDM", "1T", "40", "32", + "FCC", "5G", "20M", "OFDM", "1T", "44", "30", + "ETSI", "5G", "20M", "OFDM", "1T", "44", "32", + "MKK", "5G", "20M", "OFDM", "1T", "44", "32", + "FCC", "5G", "20M", "OFDM", "1T", "48", "30", + "ETSI", "5G", "20M", "OFDM", "1T", "48", "32", + "MKK", "5G", "20M", "OFDM", "1T", "48", "32", + "FCC", "5G", "20M", "OFDM", "1T", "52", "36", + "ETSI", "5G", "20M", "OFDM", "1T", "52", "32", + "MKK", "5G", "20M", "OFDM", "1T", "52", "32", + "FCC", "5G", "20M", "OFDM", "1T", "56", "34", + "ETSI", "5G", "20M", "OFDM", "1T", "56", "32", + "MKK", "5G", "20M", "OFDM", "1T", "56", "32", + "FCC", "5G", "20M", "OFDM", "1T", "60", "32", + "ETSI", "5G", "20M", "OFDM", "1T", "60", "32", + "MKK", "5G", "20M", "OFDM", "1T", "60", "32", + "FCC", "5G", "20M", "OFDM", "1T", "64", "28", + "ETSI", "5G", "20M", "OFDM", "1T", "64", "32", + "MKK", "5G", "20M", "OFDM", "1T", "64", "32", + "FCC", "5G", "20M", "OFDM", "1T", "100", "30", + "ETSI", "5G", "20M", "OFDM", "1T", "100", "32", + "MKK", "5G", "20M", "OFDM", "1T", "100", "32", + "FCC", "5G", "20M", "OFDM", "1T", "114", "30", + "ETSI", "5G", "20M", "OFDM", "1T", "114", "32", + "MKK", "5G", "20M", "OFDM", "1T", "114", "32", + "FCC", "5G", "20M", "OFDM", "1T", "108", "32", + "ETSI", "5G", "20M", "OFDM", "1T", "108", "32", + "MKK", "5G", "20M", "OFDM", "1T", "108", "32", + "FCC", "5G", "20M", "OFDM", "1T", "112", "34", + "ETSI", "5G", "20M", "OFDM", "1T", "112", "32", + "MKK", "5G", "20M", "OFDM", "1T", "112", "32", + "FCC", "5G", "20M", "OFDM", "1T", "116", "34", + "ETSI", "5G", "20M", "OFDM", "1T", "116", "32", + "MKK", "5G", "20M", "OFDM", "1T", "116", "32", + "FCC", "5G", "20M", "OFDM", "1T", "120", "36", + "ETSI", "5G", "20M", "OFDM", "1T", "120", "32", + "MKK", "5G", "20M", "OFDM", "1T", "120", "32", + "FCC", "5G", "20M", "OFDM", "1T", "124", "34", + "ETSI", "5G", "20M", "OFDM", "1T", "124", "32", + "MKK", "5G", "20M", "OFDM", "1T", "124", "32", + "FCC", "5G", "20M", "OFDM", "1T", "128", "32", + "ETSI", "5G", "20M", "OFDM", "1T", "128", "32", + "MKK", "5G", "20M", "OFDM", "1T", "128", "32", + "FCC", "5G", "20M", "OFDM", "1T", "132", "30", + "ETSI", "5G", "20M", "OFDM", "1T", "132", "32", + "MKK", "5G", "20M", "OFDM", "1T", "132", "32", + "FCC", "5G", "20M", "OFDM", "1T", "136", "30", + "ETSI", "5G", "20M", "OFDM", "1T", "136", "32", + "MKK", "5G", "20M", "OFDM", "1T", "136", "32", + "FCC", "5G", "20M", "OFDM", "1T", "140", "28", + "ETSI", "5G", "20M", "OFDM", "1T", "140", "32", + "MKK", "5G", "20M", "OFDM", "1T", "140", "32", + "FCC", "5G", "20M", "OFDM", "1T", "149", "36", + "ETSI", "5G", "20M", "OFDM", "1T", "149", "32", + "MKK", "5G", "20M", "OFDM", "1T", "149", "63", + "FCC", "5G", "20M", "OFDM", "1T", "153", "36", + "ETSI", "5G", "20M", "OFDM", "1T", "153", "32", + "MKK", "5G", "20M", "OFDM", "1T", "153", "63", + "FCC", "5G", "20M", "OFDM", "1T", "157", "36", + "ETSI", "5G", "20M", "OFDM", "1T", "157", "32", + "MKK", "5G", "20M", "OFDM", "1T", "157", "63", + "FCC", "5G", "20M", "OFDM", "1T", "161", "36", + "ETSI", "5G", "20M", "OFDM", "1T", "161", "32", + "MKK", "5G", "20M", "OFDM", "1T", "161", "63", + "FCC", "5G", "20M", "OFDM", "1T", "165", "36", + "ETSI", "5G", "20M", "OFDM", "1T", "165", "32", + "MKK", "5G", "20M", "OFDM", "1T", "165", "63", + "FCC", "5G", "20M", "HT", "1T", "36", "30", + "ETSI", "5G", "20M", "HT", "1T", "36", "32", + "MKK", "5G", "20M", "HT", "1T", "36", "32", + "FCC", "5G", "20M", "HT", "1T", "40", "30", + "ETSI", "5G", "20M", "HT", "1T", "40", "32", + "MKK", "5G", "20M", "HT", "1T", "40", "32", + "FCC", "5G", "20M", "HT", "1T", "44", "30", + "ETSI", "5G", "20M", "HT", "1T", "44", "32", + "MKK", "5G", "20M", "HT", "1T", "44", "32", + "FCC", "5G", "20M", "HT", "1T", "48", "30", + "ETSI", "5G", "20M", "HT", "1T", "48", "32", + "MKK", "5G", "20M", "HT", "1T", "48", "32", + "FCC", "5G", "20M", "HT", "1T", "52", "36", + "ETSI", "5G", "20M", "HT", "1T", "52", "32", + "MKK", "5G", "20M", "HT", "1T", "52", "32", + "FCC", "5G", "20M", "HT", "1T", "56", "34", + "ETSI", "5G", "20M", "HT", "1T", "56", "32", + "MKK", "5G", "20M", "HT", "1T", "56", "32", + "FCC", "5G", "20M", "HT", "1T", "60", "32", + "ETSI", "5G", "20M", "HT", "1T", "60", "32", + "MKK", "5G", "20M", "HT", "1T", "60", "32", + "FCC", "5G", "20M", "HT", "1T", "64", "28", + "ETSI", "5G", "20M", "HT", "1T", "64", "32", + "MKK", "5G", "20M", "HT", "1T", "64", "32", + "FCC", "5G", "20M", "HT", "1T", "100", "30", + "ETSI", "5G", "20M", "HT", "1T", "100", "32", + "MKK", "5G", "20M", "HT", "1T", "100", "32", + "FCC", "5G", "20M", "HT", "1T", "114", "30", + "ETSI", "5G", "20M", "HT", "1T", "114", "32", + "MKK", "5G", "20M", "HT", "1T", "114", "32", + "FCC", "5G", "20M", "HT", "1T", "108", "32", + "ETSI", "5G", "20M", "HT", "1T", "108", "32", + "MKK", "5G", "20M", "HT", "1T", "108", "32", + "FCC", "5G", "20M", "HT", "1T", "112", "34", + "ETSI", "5G", "20M", "HT", "1T", "112", "32", + "MKK", "5G", "20M", "HT", "1T", "112", "32", + "FCC", "5G", "20M", "HT", "1T", "116", "34", + "ETSI", "5G", "20M", "HT", "1T", "116", "32", + "MKK", "5G", "20M", "HT", "1T", "116", "32", + "FCC", "5G", "20M", "HT", "1T", "120", "36", + "ETSI", "5G", "20M", "HT", "1T", "120", "32", + "MKK", "5G", "20M", "HT", "1T", "120", "32", + "FCC", "5G", "20M", "HT", "1T", "124", "34", + "ETSI", "5G", "20M", "HT", "1T", "124", "32", + "MKK", "5G", "20M", "HT", "1T", "124", "32", + "FCC", "5G", "20M", "HT", "1T", "128", "32", + "ETSI", "5G", "20M", "HT", "1T", "128", "32", + "MKK", "5G", "20M", "HT", "1T", "128", "32", + "FCC", "5G", "20M", "HT", "1T", "132", "30", + "ETSI", "5G", "20M", "HT", "1T", "132", "32", + "MKK", "5G", "20M", "HT", "1T", "132", "32", + "FCC", "5G", "20M", "HT", "1T", "136", "30", + "ETSI", "5G", "20M", "HT", "1T", "136", "32", + "MKK", "5G", "20M", "HT", "1T", "136", "32", + "FCC", "5G", "20M", "HT", "1T", "140", "28", + "ETSI", "5G", "20M", "HT", "1T", "140", "32", + "MKK", "5G", "20M", "HT", "1T", "140", "32", + "FCC", "5G", "20M", "HT", "1T", "149", "36", + "ETSI", "5G", "20M", "HT", "1T", "149", "32", + "MKK", "5G", "20M", "HT", "1T", "149", "63", + "FCC", "5G", "20M", "HT", "1T", "153", "36", + "ETSI", "5G", "20M", "HT", "1T", "153", "32", + "MKK", "5G", "20M", "HT", "1T", "153", "63", + "FCC", "5G", "20M", "HT", "1T", "157", "36", + "ETSI", "5G", "20M", "HT", "1T", "157", "32", + "MKK", "5G", "20M", "HT", "1T", "157", "63", + "FCC", "5G", "20M", "HT", "1T", "161", "36", + "ETSI", "5G", "20M", "HT", "1T", "161", "32", + "MKK", "5G", "20M", "HT", "1T", "161", "63", + "FCC", "5G", "20M", "HT", "1T", "165", "36", + "ETSI", "5G", "20M", "HT", "1T", "165", "32", + "MKK", "5G", "20M", "HT", "1T", "165", "63", + "FCC", "5G", "20M", "HT", "2T", "36", "28", + "ETSI", "5G", "20M", "HT", "2T", "36", "30", + "MKK", "5G", "20M", "HT", "2T", "36", "30", + "FCC", "5G", "20M", "HT", "2T", "40", "28", + "ETSI", "5G", "20M", "HT", "2T", "40", "30", + "MKK", "5G", "20M", "HT", "2T", "40", "30", + "FCC", "5G", "20M", "HT", "2T", "44", "28", + "ETSI", "5G", "20M", "HT", "2T", "44", "30", + "MKK", "5G", "20M", "HT", "2T", "44", "30", + "FCC", "5G", "20M", "HT", "2T", "48", "28", + "ETSI", "5G", "20M", "HT", "2T", "48", "30", + "MKK", "5G", "20M", "HT", "2T", "48", "30", + "FCC", "5G", "20M", "HT", "2T", "52", "34", + "ETSI", "5G", "20M", "HT", "2T", "52", "30", + "MKK", "5G", "20M", "HT", "2T", "52", "30", + "FCC", "5G", "20M", "HT", "2T", "56", "32", + "ETSI", "5G", "20M", "HT", "2T", "56", "30", + "MKK", "5G", "20M", "HT", "2T", "56", "30", + "FCC", "5G", "20M", "HT", "2T", "60", "30", + "ETSI", "5G", "20M", "HT", "2T", "60", "30", + "MKK", "5G", "20M", "HT", "2T", "60", "30", + "FCC", "5G", "20M", "HT", "2T", "64", "26", + "ETSI", "5G", "20M", "HT", "2T", "64", "30", + "MKK", "5G", "20M", "HT", "2T", "64", "30", + "FCC", "5G", "20M", "HT", "2T", "100", "28", + "ETSI", "5G", "20M", "HT", "2T", "100", "30", + "MKK", "5G", "20M", "HT", "2T", "100", "30", + "FCC", "5G", "20M", "HT", "2T", "114", "28", + "ETSI", "5G", "20M", "HT", "2T", "114", "30", + "MKK", "5G", "20M", "HT", "2T", "114", "30", + "FCC", "5G", "20M", "HT", "2T", "108", "30", + "ETSI", "5G", "20M", "HT", "2T", "108", "30", + "MKK", "5G", "20M", "HT", "2T", "108", "30", + "FCC", "5G", "20M", "HT", "2T", "112", "32", + "ETSI", "5G", "20M", "HT", "2T", "112", "30", + "MKK", "5G", "20M", "HT", "2T", "112", "30", + "FCC", "5G", "20M", "HT", "2T", "116", "32", + "ETSI", "5G", "20M", "HT", "2T", "116", "30", + "MKK", "5G", "20M", "HT", "2T", "116", "30", + "FCC", "5G", "20M", "HT", "2T", "120", "34", + "ETSI", "5G", "20M", "HT", "2T", "120", "30", + "MKK", "5G", "20M", "HT", "2T", "120", "30", + "FCC", "5G", "20M", "HT", "2T", "124", "32", + "ETSI", "5G", "20M", "HT", "2T", "124", "30", + "MKK", "5G", "20M", "HT", "2T", "124", "30", + "FCC", "5G", "20M", "HT", "2T", "128", "30", + "ETSI", "5G", "20M", "HT", "2T", "128", "30", + "MKK", "5G", "20M", "HT", "2T", "128", "30", + "FCC", "5G", "20M", "HT", "2T", "132", "28", + "ETSI", "5G", "20M", "HT", "2T", "132", "30", + "MKK", "5G", "20M", "HT", "2T", "132", "30", + "FCC", "5G", "20M", "HT", "2T", "136", "28", + "ETSI", "5G", "20M", "HT", "2T", "136", "30", + "MKK", "5G", "20M", "HT", "2T", "136", "30", + "FCC", "5G", "20M", "HT", "2T", "140", "26", + "ETSI", "5G", "20M", "HT", "2T", "140", "30", + "MKK", "5G", "20M", "HT", "2T", "140", "30", + "FCC", "5G", "20M", "HT", "2T", "149", "34", + "ETSI", "5G", "20M", "HT", "2T", "149", "30", + "MKK", "5G", "20M", "HT", "2T", "149", "63", + "FCC", "5G", "20M", "HT", "2T", "153", "34", + "ETSI", "5G", "20M", "HT", "2T", "153", "30", + "MKK", "5G", "20M", "HT", "2T", "153", "63", + "FCC", "5G", "20M", "HT", "2T", "157", "34", + "ETSI", "5G", "20M", "HT", "2T", "157", "30", + "MKK", "5G", "20M", "HT", "2T", "157", "63", + "FCC", "5G", "20M", "HT", "2T", "161", "34", + "ETSI", "5G", "20M", "HT", "2T", "161", "30", + "MKK", "5G", "20M", "HT", "2T", "161", "63", + "FCC", "5G", "20M", "HT", "2T", "165", "34", + "ETSI", "5G", "20M", "HT", "2T", "165", "30", + "MKK", "5G", "20M", "HT", "2T", "165", "63", + "FCC", "5G", "40M", "HT", "1T", "38", "30", + "ETSI", "5G", "40M", "HT", "1T", "38", "32", + "MKK", "5G", "40M", "HT", "1T", "38", "32", + "FCC", "5G", "40M", "HT", "1T", "46", "30", + "ETSI", "5G", "40M", "HT", "1T", "46", "32", + "MKK", "5G", "40M", "HT", "1T", "46", "32", + "FCC", "5G", "40M", "HT", "1T", "54", "32", + "ETSI", "5G", "40M", "HT", "1T", "54", "32", + "MKK", "5G", "40M", "HT", "1T", "54", "32", + "FCC", "5G", "40M", "HT", "1T", "62", "32", + "ETSI", "5G", "40M", "HT", "1T", "62", "32", + "MKK", "5G", "40M", "HT", "1T", "62", "32", + "FCC", "5G", "40M", "HT", "1T", "102", "28", + "ETSI", "5G", "40M", "HT", "1T", "102", "32", + "MKK", "5G", "40M", "HT", "1T", "102", "32", + "FCC", "5G", "40M", "HT", "1T", "110", "32", + "ETSI", "5G", "40M", "HT", "1T", "110", "32", + "MKK", "5G", "40M", "HT", "1T", "110", "32", + "FCC", "5G", "40M", "HT", "1T", "118", "36", + "ETSI", "5G", "40M", "HT", "1T", "118", "32", + "MKK", "5G", "40M", "HT", "1T", "118", "32", + "FCC", "5G", "40M", "HT", "1T", "126", "34", + "ETSI", "5G", "40M", "HT", "1T", "126", "32", + "MKK", "5G", "40M", "HT", "1T", "126", "32", + "FCC", "5G", "40M", "HT", "1T", "134", "32", + "ETSI", "5G", "40M", "HT", "1T", "134", "32", + "MKK", "5G", "40M", "HT", "1T", "134", "32", + "FCC", "5G", "40M", "HT", "1T", "151", "36", + "ETSI", "5G", "40M", "HT", "1T", "151", "32", + "MKK", "5G", "40M", "HT", "1T", "151", "63", + "FCC", "5G", "40M", "HT", "1T", "159", "36", + "ETSI", "5G", "40M", "HT", "1T", "159", "32", + "MKK", "5G", "40M", "HT", "1T", "159", "63", + "FCC", "5G", "40M", "HT", "2T", "38", "28", + "ETSI", "5G", "40M", "HT", "2T", "38", "30", + "MKK", "5G", "40M", "HT", "2T", "38", "30", + "FCC", "5G", "40M", "HT", "2T", "46", "28", + "ETSI", "5G", "40M", "HT", "2T", "46", "30", + "MKK", "5G", "40M", "HT", "2T", "46", "30", + "FCC", "5G", "40M", "HT", "2T", "54", "30", + "ETSI", "5G", "40M", "HT", "2T", "54", "30", + "MKK", "5G", "40M", "HT", "2T", "54", "30", + "FCC", "5G", "40M", "HT", "2T", "62", "30", + "ETSI", "5G", "40M", "HT", "2T", "62", "30", + "MKK", "5G", "40M", "HT", "2T", "62", "30", + "FCC", "5G", "40M", "HT", "2T", "102", "26", + "ETSI", "5G", "40M", "HT", "2T", "102", "30", + "MKK", "5G", "40M", "HT", "2T", "102", "30", + "FCC", "5G", "40M", "HT", "2T", "110", "30", + "ETSI", "5G", "40M", "HT", "2T", "110", "30", + "MKK", "5G", "40M", "HT", "2T", "110", "30", + "FCC", "5G", "40M", "HT", "2T", "118", "34", + "ETSI", "5G", "40M", "HT", "2T", "118", "30", + "MKK", "5G", "40M", "HT", "2T", "118", "30", + "FCC", "5G", "40M", "HT", "2T", "126", "32", + "ETSI", "5G", "40M", "HT", "2T", "126", "30", + "MKK", "5G", "40M", "HT", "2T", "126", "30", + "FCC", "5G", "40M", "HT", "2T", "134", "30", + "ETSI", "5G", "40M", "HT", "2T", "134", "30", + "MKK", "5G", "40M", "HT", "2T", "134", "30", + "FCC", "5G", "40M", "HT", "2T", "151", "34", + "ETSI", "5G", "40M", "HT", "2T", "151", "30", + "MKK", "5G", "40M", "HT", "2T", "151", "63", + "FCC", "5G", "40M", "HT", "2T", "159", "34", + "ETSI", "5G", "40M", "HT", "2T", "159", "30", + "MKK", "5G", "40M", "HT", "2T", "159", "63", + "FCC", "5G", "80M", "VHT", "1T", "42", "30", + "ETSI", "5G", "80M", "VHT", "1T", "42", "32", + "MKK", "5G", "80M", "VHT", "1T", "42", "32", + "FCC", "5G", "80M", "VHT", "1T", "58", "28", + "ETSI", "5G", "80M", "VHT", "1T", "58", "32", + "MKK", "5G", "80M", "VHT", "1T", "58", "32", + "FCC", "5G", "80M", "VHT", "1T", "106", "30", + "ETSI", "5G", "80M", "VHT", "1T", "106", "32", + "MKK", "5G", "80M", "VHT", "1T", "106", "32", + "FCC", "5G", "80M", "VHT", "1T", "122", "34", + "ETSI", "5G", "80M", "VHT", "1T", "122", "32", + "MKK", "5G", "80M", "VHT", "1T", "122", "32", + "FCC", "5G", "80M", "VHT", "1T", "155", "36", + "ETSI", "5G", "80M", "VHT", "1T", "155", "32", + "MKK", "5G", "80M", "VHT", "1T", "155", "63", + "FCC", "5G", "80M", "VHT", "2T", "42", "28", + "ETSI", "5G", "80M", "VHT", "2T", "42", "30", + "MKK", "5G", "80M", "VHT", "2T", "42", "30", + "FCC", "5G", "80M", "VHT", "2T", "58", "26", + "ETSI", "5G", "80M", "VHT", "2T", "58", "30", + "MKK", "5G", "80M", "VHT", "2T", "58", "30", + "FCC", "5G", "80M", "VHT", "2T", "106", "28", + "ETSI", "5G", "80M", "VHT", "2T", "106", "30", + "MKK", "5G", "80M", "VHT", "2T", "106", "30", + "FCC", "5G", "80M", "VHT", "2T", "122", "32", + "ETSI", "5G", "80M", "VHT", "2T", "122", "30", + "MKK", "5G", "80M", "VHT", "2T", "122", "30", + "FCC", "5G", "80M", "VHT", "2T", "155", "34", + "ETSI", "5G", "80M", "VHT", "2T", "155", "30", + "MKK", "5G", "80M", "VHT", "2T", "155", "63" +}; + +u8 *RTL8821AE_TXPWR_LMT[] = { + "FCC", "2.4G", "20M", "CCK", "1T", "01", "32", + "ETSI", "2.4G", "20M", "CCK", "1T", "01", "32", + "MKK", "2.4G", "20M", "CCK", "1T", "01", "32", + "FCC", "2.4G", "20M", "CCK", "1T", "02", "32", + "ETSI", "2.4G", "20M", "CCK", "1T", "02", "32", + "MKK", "2.4G", "20M", "CCK", "1T", "02", "32", + "FCC", "2.4G", "20M", "CCK", "1T", "03", "36", + "ETSI", "2.4G", "20M", "CCK", "1T", "03", "32", + "MKK", "2.4G", "20M", "CCK", "1T", "03", "32", + "FCC", "2.4G", "20M", "CCK", "1T", "04", "36", + "ETSI", "2.4G", "20M", "CCK", "1T", "04", "32", + "MKK", "2.4G", "20M", "CCK", "1T", "04", "32", + "FCC", "2.4G", "20M", "CCK", "1T", "05", "36", + "ETSI", "2.4G", "20M", "CCK", "1T", "05", "32", + "MKK", "2.4G", "20M", "CCK", "1T", "05", "32", + "FCC", "2.4G", "20M", "CCK", "1T", "06", "36", + "ETSI", "2.4G", "20M", "CCK", "1T", "06", "32", + "MKK", "2.4G", "20M", "CCK", "1T", "06", "32", + "FCC", "2.4G", "20M", "CCK", "1T", "07", "36", + "ETSI", "2.4G", "20M", "CCK", "1T", "07", "32", + "MKK", "2.4G", "20M", "CCK", "1T", "07", "32", + "FCC", "2.4G", "20M", "CCK", "1T", "08", "36", + "ETSI", "2.4G", "20M", "CCK", "1T", "08", "32", + "MKK", "2.4G", "20M", "CCK", "1T", "08", "32", + "FCC", "2.4G", "20M", "CCK", "1T", "09", "32", + "ETSI", "2.4G", "20M", "CCK", "1T", "09", "32", + "MKK", "2.4G", "20M", "CCK", "1T", "09", "32", + "FCC", "2.4G", "20M", "CCK", "1T", "10", "32", + "ETSI", "2.4G", "20M", "CCK", "1T", "10", "32", + "MKK", "2.4G", "20M", "CCK", "1T", "10", "32", + "FCC", "2.4G", "20M", "CCK", "1T", "11", "32", + "ETSI", "2.4G", "20M", "CCK", "1T", "11", "32", + "MKK", "2.4G", "20M", "CCK", "1T", "11", "32", + "FCC", "2.4G", "20M", "CCK", "1T", "12", "63", + "ETSI", "2.4G", "20M", "CCK", "1T", "12", "32", + "MKK", "2.4G", "20M", "CCK", "1T", "12", "32", + "FCC", "2.4G", "20M", "CCK", "1T", "13", "63", + "ETSI", "2.4G", "20M", "CCK", "1T", "13", "32", + "MKK", "2.4G", "20M", "CCK", "1T", "13", "32", + "FCC", "2.4G", "20M", "CCK", "1T", "14", "63", + "ETSI", "2.4G", "20M", "CCK", "1T", "14", "63", + "MKK", "2.4G", "20M", "CCK", "1T", "14", "32", + "FCC", "2.4G", "20M", "OFDM", "1T", "01", "30", + "ETSI", "2.4G", "20M", "OFDM", "1T", "01", "32", + "MKK", "2.4G", "20M", "OFDM", "1T", "01", "32", + "FCC", "2.4G", "20M", "OFDM", "1T", "02", "30", + "ETSI", "2.4G", "20M", "OFDM", "1T", "02", "32", + "MKK", "2.4G", "20M", "OFDM", "1T", "02", "32", + "FCC", "2.4G", "20M", "OFDM", "1T", "03", "32", + "ETSI", "2.4G", "20M", "OFDM", "1T", "03", "32", + "MKK", "2.4G", "20M", "OFDM", "1T", "03", "32", + "FCC", "2.4G", "20M", "OFDM", "1T", "04", "32", + "ETSI", "2.4G", "20M", "OFDM", "1T", "04", "32", + "MKK", "2.4G", "20M", "OFDM", "1T", "04", "32", + "FCC", "2.4G", "20M", "OFDM", "1T", "05", "32", + "ETSI", "2.4G", "20M", "OFDM", "1T", "05", "32", + "MKK", "2.4G", "20M", "OFDM", "1T", "05", "32", + "FCC", "2.4G", "20M", "OFDM", "1T", "06", "32", + "ETSI", "2.4G", "20M", "OFDM", "1T", "06", "32", + "MKK", "2.4G", "20M", "OFDM", "1T", "06", "32", + "FCC", "2.4G", "20M", "OFDM", "1T", "07", "32", + "ETSI", "2.4G", "20M", "OFDM", "1T", "07", "32", + "MKK", "2.4G", "20M", "OFDM", "1T", "07", "32", + "FCC", "2.4G", "20M", "OFDM", "1T", "08", "32", + "ETSI", "2.4G", "20M", "OFDM", "1T", "08", "32", + "MKK", "2.4G", "20M", "OFDM", "1T", "08", "32", + "FCC", "2.4G", "20M", "OFDM", "1T", "09", "30", + "ETSI", "2.4G", "20M", "OFDM", "1T", "09", "32", + "MKK", "2.4G", "20M", "OFDM", "1T", "09", "32", + "FCC", "2.4G", "20M", "OFDM", "1T", "10", "30", + "ETSI", "2.4G", "20M", "OFDM", "1T", "10", "32", + "MKK", "2.4G", "20M", "OFDM", "1T", "10", "32", + "FCC", "2.4G", "20M", "OFDM", "1T", "11", "30", + "ETSI", "2.4G", "20M", "OFDM", "1T", "11", "32", + "MKK", "2.4G", "20M", "OFDM", "1T", "11", "32", + "FCC", "2.4G", "20M", "OFDM", "1T", "12", "63", + "ETSI", "2.4G", "20M", "OFDM", "1T", "12", "32", + "MKK", "2.4G", "20M", "OFDM", "1T", "12", "32", + "FCC", "2.4G", "20M", "OFDM", "1T", "13", "63", + "ETSI", "2.4G", "20M", "OFDM", "1T", "13", "32", + "MKK", "2.4G", "20M", "OFDM", "1T", "13", "32", + "FCC", "2.4G", "20M", "OFDM", "1T", "14", "63", + "ETSI", "2.4G", "20M", "OFDM", "1T", "14", "63", + "MKK", "2.4G", "20M", "OFDM", "1T", "14", "63", + "FCC", "2.4G", "20M", "HT", "1T", "01", "26", + "ETSI", "2.4G", "20M", "HT", "1T", "01", "32", + "MKK", "2.4G", "20M", "HT", "1T", "01", "32", + "FCC", "2.4G", "20M", "HT", "1T", "02", "26", + "ETSI", "2.4G", "20M", "HT", "1T", "02", "32", + "MKK", "2.4G", "20M", "HT", "1T", "02", "32", + "FCC", "2.4G", "20M", "HT", "1T", "03", "32", + "ETSI", "2.4G", "20M", "HT", "1T", "03", "32", + "MKK", "2.4G", "20M", "HT", "1T", "03", "32", + "FCC", "2.4G", "20M", "HT", "1T", "04", "32", + "ETSI", "2.4G", "20M", "HT", "1T", "04", "32", + "MKK", "2.4G", "20M", "HT", "1T", "04", "32", + "FCC", "2.4G", "20M", "HT", "1T", "05", "32", + "ETSI", "2.4G", "20M", "HT", "1T", "05", "32", + "MKK", "2.4G", "20M", "HT", "1T", "05", "32", + "FCC", "2.4G", "20M", "HT", "1T", "06", "32", + "ETSI", "2.4G", "20M", "HT", "1T", "06", "32", + "MKK", "2.4G", "20M", "HT", "1T", "06", "32", + "FCC", "2.4G", "20M", "HT", "1T", "07", "32", + "ETSI", "2.4G", "20M", "HT", "1T", "07", "32", + "MKK", "2.4G", "20M", "HT", "1T", "07", "32", + "FCC", "2.4G", "20M", "HT", "1T", "08", "32", + "ETSI", "2.4G", "20M", "HT", "1T", "08", "32", + "MKK", "2.4G", "20M", "HT", "1T", "08", "32", + "FCC", "2.4G", "20M", "HT", "1T", "09", "26", + "ETSI", "2.4G", "20M", "HT", "1T", "09", "32", + "MKK", "2.4G", "20M", "HT", "1T", "09", "32", + "FCC", "2.4G", "20M", "HT", "1T", "10", "26", + "ETSI", "2.4G", "20M", "HT", "1T", "10", "32", + "MKK", "2.4G", "20M", "HT", "1T", "10", "32", + "FCC", "2.4G", "20M", "HT", "1T", "11", "26", + "ETSI", "2.4G", "20M", "HT", "1T", "11", "32", + "MKK", "2.4G", "20M", "HT", "1T", "11", "32", + "FCC", "2.4G", "20M", "HT", "1T", "12", "63", + "ETSI", "2.4G", "20M", "HT", "1T", "12", "32", + "MKK", "2.4G", "20M", "HT", "1T", "12", "32", + "FCC", "2.4G", "20M", "HT", "1T", "13", "63", + "ETSI", "2.4G", "20M", "HT", "1T", "13", "32", + "MKK", "2.4G", "20M", "HT", "1T", "13", "32", + "FCC", "2.4G", "20M", "HT", "1T", "14", "63", + "ETSI", "2.4G", "20M", "HT", "1T", "14", "63", + "MKK", "2.4G", "20M", "HT", "1T", "14", "63", + "FCC", "2.4G", "20M", "HT", "2T", "01", "30", + "ETSI", "2.4G", "20M", "HT", "2T", "01", "32", + "MKK", "2.4G", "20M", "HT", "2T", "01", "32", + "FCC", "2.4G", "20M", "HT", "2T", "02", "32", + "ETSI", "2.4G", "20M", "HT", "2T", "02", "32", + "MKK", "2.4G", "20M", "HT", "2T", "02", "32", + "FCC", "2.4G", "20M", "HT", "2T", "03", "32", + "ETSI", "2.4G", "20M", "HT", "2T", "03", "32", + "MKK", "2.4G", "20M", "HT", "2T", "03", "32", + "FCC", "2.4G", "20M", "HT", "2T", "04", "32", + "ETSI", "2.4G", "20M", "HT", "2T", "04", "32", + "MKK", "2.4G", "20M", "HT", "2T", "04", "32", + "FCC", "2.4G", "20M", "HT", "2T", "05", "32", + "ETSI", "2.4G", "20M", "HT", "2T", "05", "32", + "MKK", "2.4G", "20M", "HT", "2T", "05", "32", + "FCC", "2.4G", "20M", "HT", "2T", "06", "32", + "ETSI", "2.4G", "20M", "HT", "2T", "06", "32", + "MKK", "2.4G", "20M", "HT", "2T", "06", "32", + "FCC", "2.4G", "20M", "HT", "2T", "07", "32", + "ETSI", "2.4G", "20M", "HT", "2T", "07", "32", + "MKK", "2.4G", "20M", "HT", "2T", "07", "32", + "FCC", "2.4G", "20M", "HT", "2T", "08", "32", + "ETSI", "2.4G", "20M", "HT", "2T", "08", "32", + "MKK", "2.4G", "20M", "HT", "2T", "08", "32", + "FCC", "2.4G", "20M", "HT", "2T", "09", "32", + "ETSI", "2.4G", "20M", "HT", "2T", "09", "32", + "MKK", "2.4G", "20M", "HT", "2T", "09", "32", + "FCC", "2.4G", "20M", "HT", "2T", "10", "32", + "ETSI", "2.4G", "20M", "HT", "2T", "10", "32", + "MKK", "2.4G", "20M", "HT", "2T", "10", "32", + "FCC", "2.4G", "20M", "HT", "2T", "11", "30", + "ETSI", "2.4G", "20M", "HT", "2T", "11", "32", + "MKK", "2.4G", "20M", "HT", "2T", "11", "32", + "FCC", "2.4G", "20M", "HT", "2T", "12", "63", + "ETSI", "2.4G", "20M", "HT", "2T", "12", "32", + "MKK", "2.4G", "20M", "HT", "2T", "12", "32", + "FCC", "2.4G", "20M", "HT", "2T", "13", "63", + "ETSI", "2.4G", "20M", "HT", "2T", "13", "32", + "MKK", "2.4G", "20M", "HT", "2T", "13", "32", + "FCC", "2.4G", "20M", "HT", "2T", "14", "63", + "ETSI", "2.4G", "20M", "HT", "2T", "14", "63", + "MKK", "2.4G", "20M", "HT", "2T", "14", "63", + "FCC", "2.4G", "40M", "HT", "1T", "01", "63", + "ETSI", "2.4G", "40M", "HT", "1T", "01", "63", + "MKK", "2.4G", "40M", "HT", "1T", "01", "63", + "FCC", "2.4G", "40M", "HT", "1T", "02", "63", + "ETSI", "2.4G", "40M", "HT", "1T", "02", "63", + "MKK", "2.4G", "40M", "HT", "1T", "02", "63", + "FCC", "2.4G", "40M", "HT", "1T", "03", "26", + "ETSI", "2.4G", "40M", "HT", "1T", "03", "32", + "MKK", "2.4G", "40M", "HT", "1T", "03", "32", + "FCC", "2.4G", "40M", "HT", "1T", "04", "26", + "ETSI", "2.4G", "40M", "HT", "1T", "04", "32", + "MKK", "2.4G", "40M", "HT", "1T", "04", "32", + "FCC", "2.4G", "40M", "HT", "1T", "05", "26", + "ETSI", "2.4G", "40M", "HT", "1T", "05", "32", + "MKK", "2.4G", "40M", "HT", "1T", "05", "32", + "FCC", "2.4G", "40M", "HT", "1T", "06", "32", + "ETSI", "2.4G", "40M", "HT", "1T", "06", "32", + "MKK", "2.4G", "40M", "HT", "1T", "06", "32", + "FCC", "2.4G", "40M", "HT", "1T", "07", "32", + "ETSI", "2.4G", "40M", "HT", "1T", "07", "32", + "MKK", "2.4G", "40M", "HT", "1T", "07", "32", + "FCC", "2.4G", "40M", "HT", "1T", "08", "32", + "ETSI", "2.4G", "40M", "HT", "1T", "08", "32", + "MKK", "2.4G", "40M", "HT", "1T", "08", "32", + "FCC", "2.4G", "40M", "HT", "1T", "09", "26", + "ETSI", "2.4G", "40M", "HT", "1T", "09", "32", + "MKK", "2.4G", "40M", "HT", "1T", "09", "32", + "FCC", "2.4G", "40M", "HT", "1T", "10", "26", + "ETSI", "2.4G", "40M", "HT", "1T", "10", "32", + "MKK", "2.4G", "40M", "HT", "1T", "10", "32", + "FCC", "2.4G", "40M", "HT", "1T", "11", "26", + "ETSI", "2.4G", "40M", "HT", "1T", "11", "32", + "MKK", "2.4G", "40M", "HT", "1T", "11", "32", + "FCC", "2.4G", "40M", "HT", "1T", "12", "63", + "ETSI", "2.4G", "40M", "HT", "1T", "12", "32", + "MKK", "2.4G", "40M", "HT", "1T", "12", "32", + "FCC", "2.4G", "40M", "HT", "1T", "13", "63", + "ETSI", "2.4G", "40M", "HT", "1T", "13", "32", + "MKK", "2.4G", "40M", "HT", "1T", "13", "32", + "FCC", "2.4G", "40M", "HT", "1T", "14", "63", + "ETSI", "2.4G", "40M", "HT", "1T", "14", "63", + "MKK", "2.4G", "40M", "HT", "1T", "14", "63", + "FCC", "2.4G", "40M", "HT", "2T", "01", "63", + "ETSI", "2.4G", "40M", "HT", "2T", "01", "63", + "MKK", "2.4G", "40M", "HT", "2T", "01", "63", + "FCC", "2.4G", "40M", "HT", "2T", "02", "63", + "ETSI", "2.4G", "40M", "HT", "2T", "02", "63", + "MKK", "2.4G", "40M", "HT", "2T", "02", "63", + "FCC", "2.4G", "40M", "HT", "2T", "03", "30", + "ETSI", "2.4G", "40M", "HT", "2T", "03", "30", + "MKK", "2.4G", "40M", "HT", "2T", "03", "30", + "FCC", "2.4G", "40M", "HT", "2T", "04", "32", + "ETSI", "2.4G", "40M", "HT", "2T", "04", "30", + "MKK", "2.4G", "40M", "HT", "2T", "04", "30", + "FCC", "2.4G", "40M", "HT", "2T", "05", "32", + "ETSI", "2.4G", "40M", "HT", "2T", "05", "30", + "MKK", "2.4G", "40M", "HT", "2T", "05", "30", + "FCC", "2.4G", "40M", "HT", "2T", "06", "32", + "ETSI", "2.4G", "40M", "HT", "2T", "06", "30", + "MKK", "2.4G", "40M", "HT", "2T", "06", "30", + "FCC", "2.4G", "40M", "HT", "2T", "07", "32", + "ETSI", "2.4G", "40M", "HT", "2T", "07", "30", + "MKK", "2.4G", "40M", "HT", "2T", "07", "30", + "FCC", "2.4G", "40M", "HT", "2T", "08", "32", + "ETSI", "2.4G", "40M", "HT", "2T", "08", "30", + "MKK", "2.4G", "40M", "HT", "2T", "08", "30", + "FCC", "2.4G", "40M", "HT", "2T", "09", "32", + "ETSI", "2.4G", "40M", "HT", "2T", "09", "30", + "MKK", "2.4G", "40M", "HT", "2T", "09", "30", + "FCC", "2.4G", "40M", "HT", "2T", "10", "32", + "ETSI", "2.4G", "40M", "HT", "2T", "10", "30", + "MKK", "2.4G", "40M", "HT", "2T", "10", "30", + "FCC", "2.4G", "40M", "HT", "2T", "11", "30", + "ETSI", "2.4G", "40M", "HT", "2T", "11", "30", + "MKK", "2.4G", "40M", "HT", "2T", "11", "30", + "FCC", "2.4G", "40M", "HT", "2T", "12", "63", + "ETSI", "2.4G", "40M", "HT", "2T", "12", "32", + "MKK", "2.4G", "40M", "HT", "2T", "12", "32", + "FCC", "2.4G", "40M", "HT", "2T", "13", "63", + "ETSI", "2.4G", "40M", "HT", "2T", "13", "32", + "MKK", "2.4G", "40M", "HT", "2T", "13", "32", + "FCC", "2.4G", "40M", "HT", "2T", "14", "63", + "ETSI", "2.4G", "40M", "HT", "2T", "14", "63", + "MKK", "2.4G", "40M", "HT", "2T", "14", "63", + "FCC", "5G", "20M", "OFDM", "1T", "36", "32", + "ETSI", "5G", "20M", "OFDM", "1T", "36", "30", + "MKK", "5G", "20M", "OFDM", "1T", "36", "30", + "FCC", "5G", "20M", "OFDM", "1T", "40", "32", + "ETSI", "5G", "20M", "OFDM", "1T", "40", "30", + "MKK", "5G", "20M", "OFDM", "1T", "40", "30", + "FCC", "5G", "20M", "OFDM", "1T", "44", "32", + "ETSI", "5G", "20M", "OFDM", "1T", "44", "30", + "MKK", "5G", "20M", "OFDM", "1T", "44", "30", + "FCC", "5G", "20M", "OFDM", "1T", "48", "32", + "ETSI", "5G", "20M", "OFDM", "1T", "48", "30", + "MKK", "5G", "20M", "OFDM", "1T", "48", "30", + "FCC", "5G", "20M", "OFDM", "1T", "52", "32", + "ETSI", "5G", "20M", "OFDM", "1T", "52", "30", + "MKK", "5G", "20M", "OFDM", "1T", "52", "30", + "FCC", "5G", "20M", "OFDM", "1T", "56", "32", + "ETSI", "5G", "20M", "OFDM", "1T", "56", "30", + "MKK", "5G", "20M", "OFDM", "1T", "56", "30", + "FCC", "5G", "20M", "OFDM", "1T", "60", "32", + "ETSI", "5G", "20M", "OFDM", "1T", "60", "30", + "MKK", "5G", "20M", "OFDM", "1T", "60", "30", + "FCC", "5G", "20M", "OFDM", "1T", "64", "32", + "ETSI", "5G", "20M", "OFDM", "1T", "64", "30", + "MKK", "5G", "20M", "OFDM", "1T", "64", "30", + "FCC", "5G", "20M", "OFDM", "1T", "100", "32", + "ETSI", "5G", "20M", "OFDM", "1T", "100", "30", + "MKK", "5G", "20M", "OFDM", "1T", "100", "30", + "FCC", "5G", "20M", "OFDM", "1T", "114", "32", + "ETSI", "5G", "20M", "OFDM", "1T", "114", "30", + "MKK", "5G", "20M", "OFDM", "1T", "114", "30", + "FCC", "5G", "20M", "OFDM", "1T", "108", "32", + "ETSI", "5G", "20M", "OFDM", "1T", "108", "30", + "MKK", "5G", "20M", "OFDM", "1T", "108", "30", + "FCC", "5G", "20M", "OFDM", "1T", "112", "32", + "ETSI", "5G", "20M", "OFDM", "1T", "112", "30", + "MKK", "5G", "20M", "OFDM", "1T", "112", "30", + "FCC", "5G", "20M", "OFDM", "1T", "116", "32", + "ETSI", "5G", "20M", "OFDM", "1T", "116", "30", + "MKK", "5G", "20M", "OFDM", "1T", "116", "30", + "FCC", "5G", "20M", "OFDM", "1T", "120", "32", + "ETSI", "5G", "20M", "OFDM", "1T", "120", "30", + "MKK", "5G", "20M", "OFDM", "1T", "120", "30", + "FCC", "5G", "20M", "OFDM", "1T", "124", "32", + "ETSI", "5G", "20M", "OFDM", "1T", "124", "30", + "MKK", "5G", "20M", "OFDM", "1T", "124", "30", + "FCC", "5G", "20M", "OFDM", "1T", "128", "32", + "ETSI", "5G", "20M", "OFDM", "1T", "128", "30", + "MKK", "5G", "20M", "OFDM", "1T", "128", "30", + "FCC", "5G", "20M", "OFDM", "1T", "132", "32", + "ETSI", "5G", "20M", "OFDM", "1T", "132", "30", + "MKK", "5G", "20M", "OFDM", "1T", "132", "30", + "FCC", "5G", "20M", "OFDM", "1T", "136", "32", + "ETSI", "5G", "20M", "OFDM", "1T", "136", "30", + "MKK", "5G", "20M", "OFDM", "1T", "136", "30", + "FCC", "5G", "20M", "OFDM", "1T", "140", "32", + "ETSI", "5G", "20M", "OFDM", "1T", "140", "30", + "MKK", "5G", "20M", "OFDM", "1T", "140", "30", + "FCC", "5G", "20M", "OFDM", "1T", "149", "32", + "ETSI", "5G", "20M", "OFDM", "1T", "149", "30", + "MKK", "5G", "20M", "OFDM", "1T", "149", "63", + "FCC", "5G", "20M", "OFDM", "1T", "153", "32", + "ETSI", "5G", "20M", "OFDM", "1T", "153", "30", + "MKK", "5G", "20M", "OFDM", "1T", "153", "63", + "FCC", "5G", "20M", "OFDM", "1T", "157", "32", + "ETSI", "5G", "20M", "OFDM", "1T", "157", "30", + "MKK", "5G", "20M", "OFDM", "1T", "157", "63", + "FCC", "5G", "20M", "OFDM", "1T", "161", "32", + "ETSI", "5G", "20M", "OFDM", "1T", "161", "30", + "MKK", "5G", "20M", "OFDM", "1T", "161", "63", + "FCC", "5G", "20M", "OFDM", "1T", "165", "32", + "ETSI", "5G", "20M", "OFDM", "1T", "165", "30", + "MKK", "5G", "20M", "OFDM", "1T", "165", "63", + "FCC", "5G", "20M", "HT", "1T", "36", "32", + "ETSI", "5G", "20M", "HT", "1T", "36", "30", + "MKK", "5G", "20M", "HT", "1T", "36", "30", + "FCC", "5G", "20M", "HT", "1T", "40", "32", + "ETSI", "5G", "20M", "HT", "1T", "40", "30", + "MKK", "5G", "20M", "HT", "1T", "40", "30", + "FCC", "5G", "20M", "HT", "1T", "44", "32", + "ETSI", "5G", "20M", "HT", "1T", "44", "30", + "MKK", "5G", "20M", "HT", "1T", "44", "30", + "FCC", "5G", "20M", "HT", "1T", "48", "32", + "ETSI", "5G", "20M", "HT", "1T", "48", "30", + "MKK", "5G", "20M", "HT", "1T", "48", "30", + "FCC", "5G", "20M", "HT", "1T", "52", "32", + "ETSI", "5G", "20M", "HT", "1T", "52", "30", + "MKK", "5G", "20M", "HT", "1T", "52", "30", + "FCC", "5G", "20M", "HT", "1T", "56", "32", + "ETSI", "5G", "20M", "HT", "1T", "56", "30", + "MKK", "5G", "20M", "HT", "1T", "56", "30", + "FCC", "5G", "20M", "HT", "1T", "60", "32", + "ETSI", "5G", "20M", "HT", "1T", "60", "30", + "MKK", "5G", "20M", "HT", "1T", "60", "30", + "FCC", "5G", "20M", "HT", "1T", "64", "32", + "ETSI", "5G", "20M", "HT", "1T", "64", "30", + "MKK", "5G", "20M", "HT", "1T", "64", "30", + "FCC", "5G", "20M", "HT", "1T", "100", "32", + "ETSI", "5G", "20M", "HT", "1T", "100", "30", + "MKK", "5G", "20M", "HT", "1T", "100", "30", + "FCC", "5G", "20M", "HT", "1T", "114", "32", + "ETSI", "5G", "20M", "HT", "1T", "114", "30", + "MKK", "5G", "20M", "HT", "1T", "114", "30", + "FCC", "5G", "20M", "HT", "1T", "108", "32", + "ETSI", "5G", "20M", "HT", "1T", "108", "30", + "MKK", "5G", "20M", "HT", "1T", "108", "30", + "FCC", "5G", "20M", "HT", "1T", "112", "32", + "ETSI", "5G", "20M", "HT", "1T", "112", "30", + "MKK", "5G", "20M", "HT", "1T", "112", "30", + "FCC", "5G", "20M", "HT", "1T", "116", "32", + "ETSI", "5G", "20M", "HT", "1T", "116", "30", + "MKK", "5G", "20M", "HT", "1T", "116", "30", + "FCC", "5G", "20M", "HT", "1T", "120", "32", + "ETSI", "5G", "20M", "HT", "1T", "120", "30", + "MKK", "5G", "20M", "HT", "1T", "120", "30", + "FCC", "5G", "20M", "HT", "1T", "124", "32", + "ETSI", "5G", "20M", "HT", "1T", "124", "30", + "MKK", "5G", "20M", "HT", "1T", "124", "30", + "FCC", "5G", "20M", "HT", "1T", "128", "32", + "ETSI", "5G", "20M", "HT", "1T", "128", "30", + "MKK", "5G", "20M", "HT", "1T", "128", "30", + "FCC", "5G", "20M", "HT", "1T", "132", "32", + "ETSI", "5G", "20M", "HT", "1T", "132", "30", + "MKK", "5G", "20M", "HT", "1T", "132", "30", + "FCC", "5G", "20M", "HT", "1T", "136", "32", + "ETSI", "5G", "20M", "HT", "1T", "136", "30", + "MKK", "5G", "20M", "HT", "1T", "136", "30", + "FCC", "5G", "20M", "HT", "1T", "140", "32", + "ETSI", "5G", "20M", "HT", "1T", "140", "30", + "MKK", "5G", "20M", "HT", "1T", "140", "30", + "FCC", "5G", "20M", "HT", "1T", "149", "32", + "ETSI", "5G", "20M", "HT", "1T", "149", "30", + "MKK", "5G", "20M", "HT", "1T", "149", "63", + "FCC", "5G", "20M", "HT", "1T", "153", "32", + "ETSI", "5G", "20M", "HT", "1T", "153", "30", + "MKK", "5G", "20M", "HT", "1T", "153", "63", + "FCC", "5G", "20M", "HT", "1T", "157", "32", + "ETSI", "5G", "20M", "HT", "1T", "157", "30", + "MKK", "5G", "20M", "HT", "1T", "157", "63", + "FCC", "5G", "20M", "HT", "1T", "161", "32", + "ETSI", "5G", "20M", "HT", "1T", "161", "30", + "MKK", "5G", "20M", "HT", "1T", "161", "63", + "FCC", "5G", "20M", "HT", "1T", "165", "32", + "ETSI", "5G", "20M", "HT", "1T", "165", "30", + "MKK", "5G", "20M", "HT", "1T", "165", "63", + "FCC", "5G", "20M", "HT", "2T", "36", "28", + "ETSI", "5G", "20M", "HT", "2T", "36", "30", + "MKK", "5G", "20M", "HT", "2T", "36", "30", + "FCC", "5G", "20M", "HT", "2T", "40", "28", + "ETSI", "5G", "20M", "HT", "2T", "40", "30", + "MKK", "5G", "20M", "HT", "2T", "40", "30", + "FCC", "5G", "20M", "HT", "2T", "44", "28", + "ETSI", "5G", "20M", "HT", "2T", "44", "30", + "MKK", "5G", "20M", "HT", "2T", "44", "30", + "FCC", "5G", "20M", "HT", "2T", "48", "28", + "ETSI", "5G", "20M", "HT", "2T", "48", "30", + "MKK", "5G", "20M", "HT", "2T", "48", "30", + "FCC", "5G", "20M", "HT", "2T", "52", "34", + "ETSI", "5G", "20M", "HT", "2T", "52", "30", + "MKK", "5G", "20M", "HT", "2T", "52", "30", + "FCC", "5G", "20M", "HT", "2T", "56", "32", + "ETSI", "5G", "20M", "HT", "2T", "56", "30", + "MKK", "5G", "20M", "HT", "2T", "56", "30", + "FCC", "5G", "20M", "HT", "2T", "60", "30", + "ETSI", "5G", "20M", "HT", "2T", "60", "30", + "MKK", "5G", "20M", "HT", "2T", "60", "30", + "FCC", "5G", "20M", "HT", "2T", "64", "26", + "ETSI", "5G", "20M", "HT", "2T", "64", "30", + "MKK", "5G", "20M", "HT", "2T", "64", "30", + "FCC", "5G", "20M", "HT", "2T", "100", "28", + "ETSI", "5G", "20M", "HT", "2T", "100", "30", + "MKK", "5G", "20M", "HT", "2T", "100", "30", + "FCC", "5G", "20M", "HT", "2T", "114", "28", + "ETSI", "5G", "20M", "HT", "2T", "114", "30", + "MKK", "5G", "20M", "HT", "2T", "114", "30", + "FCC", "5G", "20M", "HT", "2T", "108", "30", + "ETSI", "5G", "20M", "HT", "2T", "108", "30", + "MKK", "5G", "20M", "HT", "2T", "108", "30", + "FCC", "5G", "20M", "HT", "2T", "112", "32", + "ETSI", "5G", "20M", "HT", "2T", "112", "30", + "MKK", "5G", "20M", "HT", "2T", "112", "30", + "FCC", "5G", "20M", "HT", "2T", "116", "32", + "ETSI", "5G", "20M", "HT", "2T", "116", "30", + "MKK", "5G", "20M", "HT", "2T", "116", "30", + "FCC", "5G", "20M", "HT", "2T", "120", "34", + "ETSI", "5G", "20M", "HT", "2T", "120", "30", + "MKK", "5G", "20M", "HT", "2T", "120", "30", + "FCC", "5G", "20M", "HT", "2T", "124", "32", + "ETSI", "5G", "20M", "HT", "2T", "124", "30", + "MKK", "5G", "20M", "HT", "2T", "124", "30", + "FCC", "5G", "20M", "HT", "2T", "128", "30", + "ETSI", "5G", "20M", "HT", "2T", "128", "30", + "MKK", "5G", "20M", "HT", "2T", "128", "30", + "FCC", "5G", "20M", "HT", "2T", "132", "28", + "ETSI", "5G", "20M", "HT", "2T", "132", "30", + "MKK", "5G", "20M", "HT", "2T", "132", "30", + "FCC", "5G", "20M", "HT", "2T", "136", "28", + "ETSI", "5G", "20M", "HT", "2T", "136", "30", + "MKK", "5G", "20M", "HT", "2T", "136", "30", + "FCC", "5G", "20M", "HT", "2T", "140", "26", + "ETSI", "5G", "20M", "HT", "2T", "140", "30", + "MKK", "5G", "20M", "HT", "2T", "140", "30", + "FCC", "5G", "20M", "HT", "2T", "149", "34", + "ETSI", "5G", "20M", "HT", "2T", "149", "30", + "MKK", "5G", "20M", "HT", "2T", "149", "63", + "FCC", "5G", "20M", "HT", "2T", "153", "34", + "ETSI", "5G", "20M", "HT", "2T", "153", "30", + "MKK", "5G", "20M", "HT", "2T", "153", "63", + "FCC", "5G", "20M", "HT", "2T", "157", "34", + "ETSI", "5G", "20M", "HT", "2T", "157", "30", + "MKK", "5G", "20M", "HT", "2T", "157", "63", + "FCC", "5G", "20M", "HT", "2T", "161", "34", + "ETSI", "5G", "20M", "HT", "2T", "161", "30", + "MKK", "5G", "20M", "HT", "2T", "161", "63", + "FCC", "5G", "20M", "HT", "2T", "165", "34", + "ETSI", "5G", "20M", "HT", "2T", "165", "30", + "MKK", "5G", "20M", "HT", "2T", "165", "63", + "FCC", "5G", "40M", "HT", "1T", "38", "26", + "ETSI", "5G", "40M", "HT", "1T", "38", "30", + "MKK", "5G", "40M", "HT", "1T", "38", "30", + "FCC", "5G", "40M", "HT", "1T", "46", "32", + "ETSI", "5G", "40M", "HT", "1T", "46", "30", + "MKK", "5G", "40M", "HT", "1T", "46", "30", + "FCC", "5G", "40M", "HT", "1T", "54", "32", + "ETSI", "5G", "40M", "HT", "1T", "54", "30", + "MKK", "5G", "40M", "HT", "1T", "54", "30", + "FCC", "5G", "40M", "HT", "1T", "62", "24", + "ETSI", "5G", "40M", "HT", "1T", "62", "30", + "MKK", "5G", "40M", "HT", "1T", "62", "30", + "FCC", "5G", "40M", "HT", "1T", "102", "24", + "ETSI", "5G", "40M", "HT", "1T", "102", "30", + "MKK", "5G", "40M", "HT", "1T", "102", "30", + "FCC", "5G", "40M", "HT", "1T", "110", "32", + "ETSI", "5G", "40M", "HT", "1T", "110", "30", + "MKK", "5G", "40M", "HT", "1T", "110", "30", + "FCC", "5G", "40M", "HT", "1T", "118", "32", + "ETSI", "5G", "40M", "HT", "1T", "118", "30", + "MKK", "5G", "40M", "HT", "1T", "118", "30", + "FCC", "5G", "40M", "HT", "1T", "126", "32", + "ETSI", "5G", "40M", "HT", "1T", "126", "30", + "MKK", "5G", "40M", "HT", "1T", "126", "30", + "FCC", "5G", "40M", "HT", "1T", "134", "32", + "ETSI", "5G", "40M", "HT", "1T", "134", "30", + "MKK", "5G", "40M", "HT", "1T", "134", "30", + "FCC", "5G", "40M", "HT", "1T", "151", "30", + "ETSI", "5G", "40M", "HT", "1T", "151", "30", + "MKK", "5G", "40M", "HT", "1T", "151", "63", + "FCC", "5G", "40M", "HT", "1T", "159", "32", + "ETSI", "5G", "40M", "HT", "1T", "159", "30", + "MKK", "5G", "40M", "HT", "1T", "159", "63", + "FCC", "5G", "40M", "HT", "2T", "38", "28", + "ETSI", "5G", "40M", "HT", "2T", "38", "30", + "MKK", "5G", "40M", "HT", "2T", "38", "30", + "FCC", "5G", "40M", "HT", "2T", "46", "28", + "ETSI", "5G", "40M", "HT", "2T", "46", "30", + "MKK", "5G", "40M", "HT", "2T", "46", "30", + "FCC", "5G", "40M", "HT", "2T", "54", "30", + "ETSI", "5G", "40M", "HT", "2T", "54", "30", + "MKK", "5G", "40M", "HT", "2T", "54", "30", + "FCC", "5G", "40M", "HT", "2T", "62", "30", + "ETSI", "5G", "40M", "HT", "2T", "62", "30", + "MKK", "5G", "40M", "HT", "2T", "62", "30", + "FCC", "5G", "40M", "HT", "2T", "102", "26", + "ETSI", "5G", "40M", "HT", "2T", "102", "30", + "MKK", "5G", "40M", "HT", "2T", "102", "30", + "FCC", "5G", "40M", "HT", "2T", "110", "30", + "ETSI", "5G", "40M", "HT", "2T", "110", "30", + "MKK", "5G", "40M", "HT", "2T", "110", "30", + "FCC", "5G", "40M", "HT", "2T", "118", "34", + "ETSI", "5G", "40M", "HT", "2T", "118", "30", + "MKK", "5G", "40M", "HT", "2T", "118", "30", + "FCC", "5G", "40M", "HT", "2T", "126", "32", + "ETSI", "5G", "40M", "HT", "2T", "126", "30", + "MKK", "5G", "40M", "HT", "2T", "126", "30", + "FCC", "5G", "40M", "HT", "2T", "134", "30", + "ETSI", "5G", "40M", "HT", "2T", "134", "30", + "MKK", "5G", "40M", "HT", "2T", "134", "30", + "FCC", "5G", "40M", "HT", "2T", "151", "34", + "ETSI", "5G", "40M", "HT", "2T", "151", "30", + "MKK", "5G", "40M", "HT", "2T", "151", "63", + "FCC", "5G", "40M", "HT", "2T", "159", "34", + "ETSI", "5G", "40M", "HT", "2T", "159", "30", + "MKK", "5G", "40M", "HT", "2T", "159", "63", + "FCC", "5G", "80M", "VHT", "1T", "42", "22", + "ETSI", "5G", "80M", "VHT", "1T", "42", "30", + "MKK", "5G", "80M", "VHT", "1T", "42", "30", + "FCC", "5G", "80M", "VHT", "1T", "58", "20", + "ETSI", "5G", "80M", "VHT", "1T", "58", "30", + "MKK", "5G", "80M", "VHT", "1T", "58", "30", + "FCC", "5G", "80M", "VHT", "1T", "106", "20", + "ETSI", "5G", "80M", "VHT", "1T", "106", "30", + "MKK", "5G", "80M", "VHT", "1T", "106", "30", + "FCC", "5G", "80M", "VHT", "1T", "122", "20", + "ETSI", "5G", "80M", "VHT", "1T", "122", "30", + "MKK", "5G", "80M", "VHT", "1T", "122", "30", + "FCC", "5G", "80M", "VHT", "1T", "155", "28", + "ETSI", "5G", "80M", "VHT", "1T", "155", "30", + "MKK", "5G", "80M", "VHT", "1T", "155", "63", + "FCC", "5G", "80M", "VHT", "2T", "42", "28", + "ETSI", "5G", "80M", "VHT", "2T", "42", "30", + "MKK", "5G", "80M", "VHT", "2T", "42", "30", + "FCC", "5G", "80M", "VHT", "2T", "58", "26", + "ETSI", "5G", "80M", "VHT", "2T", "58", "30", + "MKK", "5G", "80M", "VHT", "2T", "58", "30", + "FCC", "5G", "80M", "VHT", "2T", "106", "28", + "ETSI", "5G", "80M", "VHT", "2T", "106", "30", + "MKK", "5G", "80M", "VHT", "2T", "106", "30", + "FCC", "5G", "80M", "VHT", "2T", "122", "32", + "ETSI", "5G", "80M", "VHT", "2T", "122", "30", + "MKK", "5G", "80M", "VHT", "2T", "122", "30", + "FCC", "5G", "80M", "VHT", "2T", "155", "34", + "ETSI", "5G", "80M", "VHT", "2T", "155", "30", + "MKK", "5G", "80M", "VHT", "2T", "155", "63" +}; diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/table.h b/drivers/net/wireless/rtlwifi/rtl8821ae/table.h new file mode 100644 index 0000000000000000000000000000000000000000..24bcff6bc507bff6b27a03e939790db9d59bd5dd --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/table.h @@ -0,0 +1,60 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Created on 2010/ 5/18, 1:41 + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL8821AE_TABLE__H_ +#define __RTL8821AE_TABLE__H_ + +#include +#define RTL8821AEPHY_REG_1TARRAYLEN 344 +extern u32 RTL8821AE_PHY_REG_ARRAY[]; +#define RTL8812AEPHY_REG_1TARRAYLEN 490 +extern u32 RTL8812AE_PHY_REG_ARRAY[]; +#define RTL8821AEPHY_REG_ARRAY_PGLEN 90 +extern u32 RTL8821AE_PHY_REG_ARRAY_PG[]; +#define RTL8812AEPHY_REG_ARRAY_PGLEN 276 +extern u32 RTL8812AE_PHY_REG_ARRAY_PG[]; +/* #define RTL8723BE_RADIOA_1TARRAYLEN 206 */ +/* extern u8 *RTL8821AE_TXPWR_LMT_ARRAY[]; */ +#define RTL8812AE_RADIOA_1TARRAYLEN 1264 +extern u32 RTL8812AE_RADIOA_ARRAY[]; +#define RTL8812AE_RADIOB_1TARRAYLEN 1240 +extern u32 RTL8812AE_RADIOB_ARRAY[]; +#define RTL8821AE_RADIOA_1TARRAYLEN 1176 +extern u32 RTL8821AE_RADIOA_ARRAY[]; +#define RTL8821AEMAC_1T_ARRAYLEN 194 +extern u32 RTL8821AE_MAC_REG_ARRAY[]; +#define RTL8812AEMAC_1T_ARRAYLEN 214 +extern u32 RTL8812AE_MAC_REG_ARRAY[]; +#define RTL8821AEAGCTAB_1TARRAYLEN 382 +extern u32 RTL8821AE_AGC_TAB_ARRAY[]; +#define RTL8812AEAGCTAB_1TARRAYLEN 1312 +extern u32 RTL8812AE_AGC_TAB_ARRAY[]; +#define RTL8812AE_TXPWR_LMT_ARRAY_LEN 3948 +extern u8 *RTL8812AE_TXPWR_LMT[]; +#define RTL8821AE_TXPWR_LMT_ARRAY_LEN 3948 +extern u8 *RTL8821AE_TXPWR_LMT[]; +#endif diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/trx.c b/drivers/net/wireless/rtlwifi/rtl8821ae/trx.c new file mode 100644 index 0000000000000000000000000000000000000000..383b86b05cba17531cac2b5428015fc78cee69f9 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/trx.c @@ -0,0 +1,1236 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "../wifi.h" +#include "../pci.h" +#include "../base.h" +#include "../stats.h" +#include "reg.h" +#include "def.h" +#include "phy.h" +#include "trx.h" +#include "led.h" +#include "dm.h" +#include "phy.h" +#include "fw.h" + +static u8 _rtl8821ae_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue) +{ + __le16 fc = rtl_get_fc(skb); + + if (unlikely(ieee80211_is_beacon(fc))) + return QSLT_BEACON; + if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)) + return QSLT_MGNT; + + return skb->priority; +} + +/* mac80211's rate_idx is like this: + * + * 2.4G band:rx_status->band == IEEE80211_BAND_2GHZ + * + * B/G rate: + * (rx_status->flag & RX_FLAG_HT) = 0, + * DESC_RATE1M-->DESC_RATE54M ==> idx is 0-->11, + * + * N rate: + * (rx_status->flag & RX_FLAG_HT) = 1, + * DESC_RATEMCS0-->DESC_RATEMCS15 ==> idx is 0-->15 + * + * 5G band:rx_status->band == IEEE80211_BAND_5GHZ + * A rate: + * (rx_status->flag & RX_FLAG_HT) = 0, + * DESC_RATE6M-->DESC_RATE54M ==> idx is 0-->7, + * + * N rate: + * (rx_status->flag & RX_FLAG_HT) = 1, + * DESC_RATEMCS0-->DESC_RATEMCS15 ==> idx is 0-->15 + */ +static int _rtl8821ae_rate_mapping(struct ieee80211_hw *hw, + bool isht, bool isvht, u8 desc_rate) +{ + int rate_idx; + + if (!isht) { + if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) { + switch (desc_rate) { + case DESC_RATE1M: + rate_idx = 0; + break; + case DESC_RATE2M: + rate_idx = 1; + break; + case DESC_RATE5_5M: + rate_idx = 2; + break; + case DESC_RATE11M: + rate_idx = 3; + break; + case DESC_RATE6M: + rate_idx = 4; + break; + case DESC_RATE9M: + rate_idx = 5; + break; + case DESC_RATE12M: + rate_idx = 6; + break; + case DESC_RATE18M: + rate_idx = 7; + break; + case DESC_RATE24M: + rate_idx = 8; + break; + case DESC_RATE36M: + rate_idx = 9; + break; + case DESC_RATE48M: + rate_idx = 10; + break; + case DESC_RATE54M: + rate_idx = 11; + break; + default: + rate_idx = 0; + break; + } + } else { + switch (desc_rate) { + case DESC_RATE6M: + rate_idx = 0; + break; + case DESC_RATE9M: + rate_idx = 1; + break; + case DESC_RATE12M: + rate_idx = 2; + break; + case DESC_RATE18M: + rate_idx = 3; + break; + case DESC_RATE24M: + rate_idx = 4; + break; + case DESC_RATE36M: + rate_idx = 5; + break; + case DESC_RATE48M: + rate_idx = 6; + break; + case DESC_RATE54M: + rate_idx = 7; + break; + default: + rate_idx = 0; + break; + } + } + } else { + switch (desc_rate) { + case DESC_RATEMCS0: + rate_idx = 0; + break; + case DESC_RATEMCS1: + rate_idx = 1; + break; + case DESC_RATEMCS2: + rate_idx = 2; + break; + case DESC_RATEMCS3: + rate_idx = 3; + break; + case DESC_RATEMCS4: + rate_idx = 4; + break; + case DESC_RATEMCS5: + rate_idx = 5; + break; + case DESC_RATEMCS6: + rate_idx = 6; + break; + case DESC_RATEMCS7: + rate_idx = 7; + break; + case DESC_RATEMCS8: + rate_idx = 8; + break; + case DESC_RATEMCS9: + rate_idx = 9; + break; + case DESC_RATEMCS10: + rate_idx = 10; + break; + case DESC_RATEMCS11: + rate_idx = 11; + break; + case DESC_RATEMCS12: + rate_idx = 12; + break; + case DESC_RATEMCS13: + rate_idx = 13; + break; + case DESC_RATEMCS14: + rate_idx = 14; + break; + case DESC_RATEMCS15: + rate_idx = 15; + break; + default: + rate_idx = 0; + break; + } + } + + if (isvht) { + switch (desc_rate) { + case DESC_RATEVHT1SS_MCS0: + rate_idx = 0; + break; + case DESC_RATEVHT1SS_MCS1: + rate_idx = 1; + break; + case DESC_RATEVHT1SS_MCS2: + rate_idx = 2; + break; + case DESC_RATEVHT1SS_MCS3: + rate_idx = 3; + break; + case DESC_RATEVHT1SS_MCS4: + rate_idx = 4; + break; + case DESC_RATEVHT1SS_MCS5: + rate_idx = 5; + break; + case DESC_RATEVHT1SS_MCS6: + rate_idx = 6; + break; + case DESC_RATEVHT1SS_MCS7: + rate_idx = 7; + break; + case DESC_RATEVHT1SS_MCS8: + rate_idx = 8; + break; + case DESC_RATEVHT1SS_MCS9: + rate_idx = 9; + break; + case DESC_RATEVHT2SS_MCS0: + rate_idx = 0; + break; + case DESC_RATEVHT2SS_MCS1: + rate_idx = 1; + break; + case DESC_RATEVHT2SS_MCS2: + rate_idx = 2; + break; + case DESC_RATEVHT2SS_MCS3: + rate_idx = 3; + break; + case DESC_RATEVHT2SS_MCS4: + rate_idx = 4; + break; + case DESC_RATEVHT2SS_MCS5: + rate_idx = 5; + break; + case DESC_RATEVHT2SS_MCS6: + rate_idx = 6; + break; + case DESC_RATEVHT2SS_MCS7: + rate_idx = 7; + break; + case DESC_RATEVHT2SS_MCS8: + rate_idx = 8; + break; + case DESC_RATEVHT2SS_MCS9: + rate_idx = 9; + break; + default: + rate_idx = 0; + break; + } + } + return rate_idx; +} + +static u16 odm_cfo(char value) +{ + int ret_val; + + if (value < 0) { + ret_val = 0 - value; + ret_val = (ret_val << 1) + (ret_val >> 1); + /* set bit12 as 1 for negative cfo */ + ret_val = ret_val | BIT(12); + } else { + ret_val = value; + ret_val = (ret_val << 1) + (ret_val >> 1); + } + return ret_val; +} + +static void query_rxphystatus(struct ieee80211_hw *hw, + struct rtl_stats *pstatus, u8 *pdesc, + struct rx_fwinfo_8821ae *p_drvinfo, + bool bpacket_match_bssid, + bool bpacket_toself, bool packet_beacon) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct phy_status_rpt *p_phystrpt = (struct phy_status_rpt *)p_drvinfo; + struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); + struct rtl_phy *rtlphy = &rtlpriv->phy; + char rx_pwr_all = 0, rx_pwr[4]; + u8 rf_rx_num = 0, evm, evmdbm, pwdb_all; + u8 i, max_spatial_stream; + u32 rssi, total_rssi = 0; + bool is_cck = pstatus->is_cck; + u8 lan_idx, vga_idx; + + /* Record it for next packet processing */ + pstatus->packet_matchbssid = bpacket_match_bssid; + pstatus->packet_toself = bpacket_toself; + pstatus->packet_beacon = packet_beacon; + pstatus->rx_mimo_signalquality[0] = -1; + pstatus->rx_mimo_signalquality[1] = -1; + + if (is_cck) { + u8 cck_highpwr; + u8 cck_agc_rpt; + + cck_agc_rpt = p_phystrpt->cfosho[0]; + + /* (1)Hardware does not provide RSSI for CCK + * (2)PWDB, Average PWDB cacluated by + * hardware (for rate adaptive) + */ + cck_highpwr = (u8)rtlphy->cck_high_power; + + lan_idx = ((cck_agc_rpt & 0xE0) >> 5); + vga_idx = (cck_agc_rpt & 0x1f); + if (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8812AE) { + switch (lan_idx) { + case 7: + if (vga_idx <= 27) + /*VGA_idx = 27~2*/ + rx_pwr_all = -100 + 2*(27-vga_idx); + else + rx_pwr_all = -100; + break; + case 6: + /*VGA_idx = 2~0*/ + rx_pwr_all = -48 + 2*(2-vga_idx); + break; + case 5: + /*VGA_idx = 7~5*/ + rx_pwr_all = -42 + 2*(7-vga_idx); + break; + case 4: + /*VGA_idx = 7~4*/ + rx_pwr_all = -36 + 2*(7-vga_idx); + break; + case 3: + /*VGA_idx = 7~0*/ + rx_pwr_all = -24 + 2*(7-vga_idx); + break; + case 2: + if (cck_highpwr) + /*VGA_idx = 5~0*/ + rx_pwr_all = -12 + 2*(5-vga_idx); + else + rx_pwr_all = -6 + 2*(5-vga_idx); + break; + case 1: + rx_pwr_all = 8-2*vga_idx; + break; + case 0: + rx_pwr_all = 14-2*vga_idx; + break; + default: + break; + } + rx_pwr_all += 6; + pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all); + if (!cck_highpwr) { + if (pwdb_all >= 80) + pwdb_all = + ((pwdb_all - 80)<<1) + + ((pwdb_all - 80)>>1) + 80; + else if ((pwdb_all <= 78) && (pwdb_all >= 20)) + pwdb_all += 3; + if (pwdb_all > 100) + pwdb_all = 100; + } + } else { /* 8821 */ + char pout = -6; + + switch (lan_idx) { + case 5: + rx_pwr_all = pout - 32 - (2*vga_idx); + break; + case 4: + rx_pwr_all = pout - 24 - (2*vga_idx); + break; + case 2: + rx_pwr_all = pout - 11 - (2*vga_idx); + break; + case 1: + rx_pwr_all = pout + 5 - (2*vga_idx); + break; + case 0: + rx_pwr_all = pout + 21 - (2*vga_idx); + break; + } + pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all); + } + + pstatus->rx_pwdb_all = pwdb_all; + pstatus->recvsignalpower = rx_pwr_all; + + /* (3) Get Signal Quality (EVM) */ + if (bpacket_match_bssid) { + u8 sq; + + if (pstatus->rx_pwdb_all > 40) { + sq = 100; + } else { + sq = p_phystrpt->pwdb_all; + if (sq > 64) + sq = 0; + else if (sq < 20) + sq = 100; + else + sq = ((64 - sq) * 100) / 44; + } + + pstatus->signalquality = sq; + pstatus->rx_mimo_signalquality[0] = sq; + pstatus->rx_mimo_signalquality[1] = -1; + } + } else { + /* (1)Get RSSI for HT rate */ + for (i = RF90_PATH_A; i < RF6052_MAX_PATH; i++) { + /* we will judge RF RX path now. */ + if (rtlpriv->dm.rfpath_rxenable[i]) + rf_rx_num++; + + rx_pwr[i] = (p_phystrpt->gain_trsw[i] & 0x7f) - 110; + + /* Translate DBM to percentage. */ + rssi = rtl_query_rxpwrpercentage(rx_pwr[i]); + total_rssi += rssi; + + /* Get Rx snr value in DB */ + pstatus->rx_snr[i] = p_phystrpt->rxsnr[i] / 2; + rtlpriv->stats.rx_snr_db[i] = p_phystrpt->rxsnr[i] / 2; + + pstatus->cfo_short[i] = odm_cfo(p_phystrpt->cfosho[i]); + pstatus->cfo_tail[i] = odm_cfo(p_phystrpt->cfotail[i]); + /* Record Signal Strength for next packet */ + pstatus->rx_mimo_signalstrength[i] = (u8)rssi; + } + + /* (2)PWDB, Average PWDB cacluated by + * hardware (for rate adaptive) + */ + rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110; + + pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all); + pstatus->rx_pwdb_all = pwdb_all; + pstatus->rxpower = rx_pwr_all; + pstatus->recvsignalpower = rx_pwr_all; + + /* (3)EVM of HT rate */ + if ((pstatus->is_ht && pstatus->rate >= DESC_RATEMCS8 && + pstatus->rate <= DESC_RATEMCS15) || + (pstatus->is_vht && + pstatus->rate >= DESC_RATEVHT2SS_MCS0 && + pstatus->rate <= DESC_RATEVHT2SS_MCS9)) + max_spatial_stream = 2; + else + max_spatial_stream = 1; + + for (i = 0; i < max_spatial_stream; i++) { + evm = rtl_evm_db_to_percentage(p_phystrpt->rxevm[i]); + evmdbm = rtl_evm_dbm_jaguar(p_phystrpt->rxevm[i]); + + if (bpacket_match_bssid) { + /* Fill value in RFD, Get the first + * spatial stream only + */ + if (i == 0) + pstatus->signalquality = evm; + pstatus->rx_mimo_signalquality[i] = evm; + pstatus->rx_mimo_evm_dbm[i] = evmdbm; + } + } + if (bpacket_match_bssid) { + for (i = RF90_PATH_A; i <= RF90_PATH_B; i++) + rtl_priv(hw)->dm.cfo_tail[i] = + (char)p_phystrpt->cfotail[i]; + + rtl_priv(hw)->dm.packet_count++; + } + } + + /* UI BSS List signal strength(in percentage), + * make it good looking, from 0~100. + */ + if (is_cck) + pstatus->signalstrength = (u8)(rtl_signal_scale_mapping(hw, + pwdb_all)); + else if (rf_rx_num != 0) + pstatus->signalstrength = (u8)(rtl_signal_scale_mapping(hw, + total_rssi /= rf_rx_num)); + /*HW antenna diversity*/ + rtldm->fat_table.antsel_rx_keep_0 = p_phystrpt->antidx_anta; + rtldm->fat_table.antsel_rx_keep_1 = p_phystrpt->antidx_antb; +} + +static void translate_rx_signal_stuff(struct ieee80211_hw *hw, + struct sk_buff *skb, + struct rtl_stats *pstatus, u8 *pdesc, + struct rx_fwinfo_8821ae *p_drvinfo) +{ + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct ieee80211_hdr *hdr; + u8 *tmp_buf; + u8 *praddr; + u8 *psaddr; + __le16 fc; + u16 type; + bool packet_matchbssid, packet_toself, packet_beacon; + + tmp_buf = skb->data + pstatus->rx_drvinfo_size + pstatus->rx_bufshift; + + hdr = (struct ieee80211_hdr *)tmp_buf; + fc = hdr->frame_control; + type = WLAN_FC_GET_TYPE(hdr->frame_control); + praddr = hdr->addr1; + psaddr = ieee80211_get_SA(hdr); + ether_addr_copy(pstatus->psaddr, psaddr); + + packet_matchbssid = (!ieee80211_is_ctl(fc) && + (ether_addr_equal(mac->bssid, + ieee80211_has_tods(fc) ? + hdr->addr1 : + ieee80211_has_fromds(fc) ? + hdr->addr2 : hdr->addr3)) && + (!pstatus->hwerror) && + (!pstatus->crc) && (!pstatus->icv)); + + packet_toself = packet_matchbssid && + (ether_addr_equal(praddr, rtlefuse->dev_addr)); + + if (ieee80211_is_beacon(hdr->frame_control)) + packet_beacon = true; + else + packet_beacon = false; + + if (packet_beacon && packet_matchbssid) + rtl_priv(hw)->dm.dbginfo.num_qry_beacon_pkt++; + + if (packet_matchbssid && + ieee80211_is_data_qos(hdr->frame_control) && + !is_multicast_ether_addr(ieee80211_get_DA(hdr))) { + struct ieee80211_qos_hdr *hdr_qos = + (struct ieee80211_qos_hdr *)tmp_buf; + u16 tid = le16_to_cpu(hdr_qos->qos_ctrl) & 0xf; + + if (tid != 0 && tid != 3) + rtl_priv(hw)->dm.dbginfo.num_non_be_pkt++; + } + + query_rxphystatus(hw, pstatus, pdesc, p_drvinfo, + packet_matchbssid, packet_toself, + packet_beacon); + /*_rtl8821ae_smart_antenna(hw, pstatus); */ + rtl_process_phyinfo(hw, tmp_buf, pstatus); +} + +static void _rtl8821ae_insert_emcontent(struct rtl_tcb_desc *ptcb_desc, + u8 *virtualaddress) +{ + u32 dwtmp = 0; + + memset(virtualaddress, 0, 8); + + SET_EARLYMODE_PKTNUM(virtualaddress, ptcb_desc->empkt_num); + if (ptcb_desc->empkt_num == 1) { + dwtmp = ptcb_desc->empkt_len[0]; + } else { + dwtmp = ptcb_desc->empkt_len[0]; + dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0)+4; + dwtmp += ptcb_desc->empkt_len[1]; + } + SET_EARLYMODE_LEN0(virtualaddress, dwtmp); + + if (ptcb_desc->empkt_num <= 3) { + dwtmp = ptcb_desc->empkt_len[2]; + } else { + dwtmp = ptcb_desc->empkt_len[2]; + dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0)+4; + dwtmp += ptcb_desc->empkt_len[3]; + } + SET_EARLYMODE_LEN1(virtualaddress, dwtmp); + if (ptcb_desc->empkt_num <= 5) { + dwtmp = ptcb_desc->empkt_len[4]; + } else { + dwtmp = ptcb_desc->empkt_len[4]; + dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0)+4; + dwtmp += ptcb_desc->empkt_len[5]; + } + SET_EARLYMODE_LEN2_1(virtualaddress, dwtmp & 0xF); + SET_EARLYMODE_LEN2_2(virtualaddress, dwtmp >> 4); + if (ptcb_desc->empkt_num <= 7) { + dwtmp = ptcb_desc->empkt_len[6]; + } else { + dwtmp = ptcb_desc->empkt_len[6]; + dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0)+4; + dwtmp += ptcb_desc->empkt_len[7]; + } + SET_EARLYMODE_LEN3(virtualaddress, dwtmp); + if (ptcb_desc->empkt_num <= 9) { + dwtmp = ptcb_desc->empkt_len[8]; + } else { + dwtmp = ptcb_desc->empkt_len[8]; + dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0)+4; + dwtmp += ptcb_desc->empkt_len[9]; + } + SET_EARLYMODE_LEN4(virtualaddress, dwtmp); +} + +static bool rtl8821ae_get_rxdesc_is_ht(struct ieee80211_hw *hw, u8 *pdesc) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 rx_rate = 0; + + rx_rate = GET_RX_DESC_RXMCS(pdesc); + + RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD, "rx_rate=0x%02x.\n", rx_rate); + + if ((rx_rate >= DESC_RATEMCS0) && (rx_rate <= DESC_RATEMCS15)) + return true; + return false; +} + +static bool rtl8821ae_get_rxdesc_is_vht(struct ieee80211_hw *hw, u8 *pdesc) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 rx_rate = 0; + + rx_rate = GET_RX_DESC_RXMCS(pdesc); + + RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD, "rx_rate=0x%02x.\n", rx_rate); + + if (rx_rate >= DESC_RATEVHT1SS_MCS0) + return true; + return false; +} + +static u8 rtl8821ae_get_rx_vht_nss(struct ieee80211_hw *hw, u8 *pdesc) +{ + u8 rx_rate = 0; + u8 vht_nss = 0; + + rx_rate = GET_RX_DESC_RXMCS(pdesc); + if ((rx_rate >= DESC_RATEVHT1SS_MCS0) && + (rx_rate <= DESC_RATEVHT1SS_MCS9)) + vht_nss = 1; + else if ((rx_rate >= DESC_RATEVHT2SS_MCS0) && + (rx_rate <= DESC_RATEVHT2SS_MCS9)) + vht_nss = 2; + + return vht_nss; +} + +bool rtl8821ae_rx_query_desc(struct ieee80211_hw *hw, + struct rtl_stats *status, + struct ieee80211_rx_status *rx_status, + u8 *pdesc, struct sk_buff *skb) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rx_fwinfo_8821ae *p_drvinfo; + struct ieee80211_hdr *hdr; + + u32 phystatus = GET_RX_DESC_PHYST(pdesc); + + status->length = (u16)GET_RX_DESC_PKT_LEN(pdesc); + status->rx_drvinfo_size = (u8)GET_RX_DESC_DRV_INFO_SIZE(pdesc) * + RX_DRV_INFO_SIZE_UNIT; + status->rx_bufshift = (u8)(GET_RX_DESC_SHIFT(pdesc) & 0x03); + status->icv = (u16)GET_RX_DESC_ICV(pdesc); + status->crc = (u16)GET_RX_DESC_CRC32(pdesc); + status->hwerror = (status->crc | status->icv); + status->decrypted = !GET_RX_DESC_SWDEC(pdesc); + status->rate = (u8)GET_RX_DESC_RXMCS(pdesc); + status->shortpreamble = (u16)GET_RX_DESC_SPLCP(pdesc); + status->isampdu = (bool)(GET_RX_DESC_PAGGR(pdesc) == 1); + status->isfirst_ampdu = (bool)(GET_RX_DESC_PAGGR(pdesc) == 1); + status->timestamp_low = GET_RX_DESC_TSFL(pdesc); + status->rx_packet_bw = GET_RX_DESC_BW(pdesc); + status->macid = GET_RX_DESC_MACID(pdesc); + status->is_short_gi = !(bool)GET_RX_DESC_SPLCP(pdesc); + status->is_ht = rtl8821ae_get_rxdesc_is_ht(hw, pdesc); + status->is_vht = rtl8821ae_get_rxdesc_is_vht(hw, pdesc); + status->vht_nss = rtl8821ae_get_rx_vht_nss(hw, pdesc); + status->is_cck = RTL8821AE_RX_HAL_IS_CCK_RATE(status->rate); + + RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD, + "rx_packet_bw=%s,is_ht %d, is_vht %d, vht_nss=%d,is_short_gi %d.\n", + (status->rx_packet_bw == 2) ? "80M" : + (status->rx_packet_bw == 1) ? "40M" : "20M", + status->is_ht, status->is_vht, status->vht_nss, + status->is_short_gi); + + if (GET_RX_STATUS_DESC_RPT_SEL(pdesc)) + status->packet_report_type = C2H_PACKET; + else + status->packet_report_type = NORMAL_RX; + + if (GET_RX_STATUS_DESC_PATTERN_MATCH(pdesc)) + status->wake_match = BIT(2); + else if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc)) + status->wake_match = BIT(1); + else if (GET_RX_STATUS_DESC_UNICAST_MATCH(pdesc)) + status->wake_match = BIT(0); + else + status->wake_match = 0; + + if (status->wake_match) + RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD, + "GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n", + status->wake_match); + rx_status->freq = hw->conf.chandef.chan->center_freq; + rx_status->band = hw->conf.chandef.chan->band; + + hdr = (struct ieee80211_hdr *)(skb->data + + status->rx_drvinfo_size + status->rx_bufshift); + + if (status->crc) + rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; + + if (status->rx_packet_bw == HT_CHANNEL_WIDTH_20_40) + rx_status->flag |= RX_FLAG_40MHZ; + else if (status->rx_packet_bw == HT_CHANNEL_WIDTH_80) + rx_status->vht_flag |= RX_VHT_FLAG_80MHZ; + if (status->is_ht) + rx_status->flag |= RX_FLAG_HT; + if (status->is_vht) + rx_status->flag |= RX_FLAG_VHT; + + if (status->is_short_gi) + rx_status->flag |= RX_FLAG_SHORT_GI; + + rx_status->vht_nss = status->vht_nss; + rx_status->flag |= RX_FLAG_MACTIME_START; + + /* hw will set status->decrypted true, if it finds the + * frame is open data frame or mgmt frame. + * So hw will not decryption robust managment frame + * for IEEE80211w but still set status->decrypted + * true, so here we should set it back to undecrypted + * for IEEE80211w frame, and mac80211 sw will help + * to decrypt it + */ + if (status->decrypted) { + if ((!_ieee80211_is_robust_mgmt_frame(hdr)) && + (ieee80211_has_protected(hdr->frame_control))) + rx_status->flag |= RX_FLAG_DECRYPTED; + else + rx_status->flag &= ~RX_FLAG_DECRYPTED; + } + + /* rate_idx: index of data rate into band's + * supported rates or MCS index if HT rates + * are use (RX_FLAG_HT) + */ + rx_status->rate_idx = + _rtl8821ae_rate_mapping(hw, status->is_ht, + status->is_vht, status->rate); + + rx_status->mactime = status->timestamp_low; + if (phystatus) { + p_drvinfo = (struct rx_fwinfo_8821ae *)(skb->data + + status->rx_bufshift); + + translate_rx_signal_stuff(hw, skb, status, pdesc, p_drvinfo); + } + rx_status->signal = status->recvsignalpower + 10; + if (status->packet_report_type == TX_REPORT2) { + status->macid_valid_entry[0] = + GET_RX_RPT2_DESC_MACID_VALID_1(pdesc); + status->macid_valid_entry[1] = + GET_RX_RPT2_DESC_MACID_VALID_2(pdesc); + } + return true; +} + +static u8 rtl8821ae_bw_mapping(struct ieee80211_hw *hw, + struct rtl_tcb_desc *ptcb_desc) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u8 bw_setting_of_desc = 0; + + RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, + "rtl8821ae_bw_mapping, current_chan_bw %d, packet_bw %d\n", + rtlphy->current_chan_bw, ptcb_desc->packet_bw); + + if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80) { + if (ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_80) + bw_setting_of_desc = 2; + else if (ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_20_40) + bw_setting_of_desc = 1; + else + bw_setting_of_desc = 0; + } else if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) { + if ((ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_20_40) || + (ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_80)) + bw_setting_of_desc = 1; + else + bw_setting_of_desc = 0; + } else { + bw_setting_of_desc = 0; + } + return bw_setting_of_desc; +} + +static u8 rtl8821ae_sc_mapping(struct ieee80211_hw *hw, + struct rtl_tcb_desc *ptcb_desc) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct rtl_mac *mac = rtl_mac(rtlpriv); + u8 sc_setting_of_desc = 0; + + if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80) { + if (ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_80) { + sc_setting_of_desc = VHT_DATA_SC_DONOT_CARE; + } else if (ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_20_40) { + if (mac->cur_80_prime_sc == + HAL_PRIME_CHNL_OFFSET_LOWER) + sc_setting_of_desc = + VHT_DATA_SC_40_LOWER_OF_80MHZ; + else if (mac->cur_80_prime_sc == + HAL_PRIME_CHNL_OFFSET_UPPER) + sc_setting_of_desc = + VHT_DATA_SC_40_UPPER_OF_80MHZ; + else + RT_TRACE(rtlpriv, COMP_SEND, DBG_LOUD, + "rtl8821ae_sc_mapping: Not Correct Primary40MHz Setting\n"); + } else { + if ((mac->cur_40_prime_sc == + HAL_PRIME_CHNL_OFFSET_LOWER) && + (mac->cur_80_prime_sc == + HAL_PRIME_CHNL_OFFSET_LOWER)) + sc_setting_of_desc = + VHT_DATA_SC_20_LOWEST_OF_80MHZ; + else if ((mac->cur_40_prime_sc == + HAL_PRIME_CHNL_OFFSET_UPPER) && + (mac->cur_80_prime_sc == + HAL_PRIME_CHNL_OFFSET_LOWER)) + sc_setting_of_desc = + VHT_DATA_SC_20_LOWER_OF_80MHZ; + else if ((mac->cur_40_prime_sc == + HAL_PRIME_CHNL_OFFSET_LOWER) && + (mac->cur_80_prime_sc == + HAL_PRIME_CHNL_OFFSET_UPPER)) + sc_setting_of_desc = + VHT_DATA_SC_20_UPPER_OF_80MHZ; + else if ((mac->cur_40_prime_sc == + HAL_PRIME_CHNL_OFFSET_UPPER) && + (mac->cur_80_prime_sc == + HAL_PRIME_CHNL_OFFSET_UPPER)) + sc_setting_of_desc = + VHT_DATA_SC_20_UPPERST_OF_80MHZ; + else + RT_TRACE(rtlpriv, COMP_SEND, DBG_LOUD, + "rtl8821ae_sc_mapping: Not Correct Primary40MHz Setting\n"); + } + } else if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) { + if (ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_20_40) { + sc_setting_of_desc = VHT_DATA_SC_DONOT_CARE; + } else if (ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_20) { + if (mac->cur_40_prime_sc == + HAL_PRIME_CHNL_OFFSET_UPPER) { + sc_setting_of_desc = + VHT_DATA_SC_20_UPPER_OF_80MHZ; + } else if (mac->cur_40_prime_sc == + HAL_PRIME_CHNL_OFFSET_LOWER){ + sc_setting_of_desc = + VHT_DATA_SC_20_LOWER_OF_80MHZ; + } else { + sc_setting_of_desc = VHT_DATA_SC_DONOT_CARE; + } + } + } else { + sc_setting_of_desc = VHT_DATA_SC_DONOT_CARE; + } + + return sc_setting_of_desc; +} + +void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw, + struct ieee80211_hdr *hdr, u8 *pdesc_tx, u8 *txbd, + struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, + struct sk_buff *skb, + u8 hw_queue, struct rtl_tcb_desc *ptcb_desc) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + u8 *pdesc = (u8 *)pdesc_tx; + u16 seq_number; + __le16 fc = hdr->frame_control; + unsigned int buf_len = 0; + unsigned int skb_len = skb->len; + u8 fw_qsel = _rtl8821ae_map_hwqueue_to_fwqueue(skb, hw_queue); + bool firstseg = ((hdr->seq_ctrl & + cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0); + bool lastseg = ((hdr->frame_control & + cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0); + dma_addr_t mapping; + u8 short_gi = 0; + + seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4; + rtl_get_tcb_desc(hw, info, sta, skb, ptcb_desc); + /* reserve 8 byte for AMPDU early mode */ + if (rtlhal->earlymode_enable) { + skb_push(skb, EM_HDR_LEN); + memset(skb->data, 0, EM_HDR_LEN); + } + buf_len = skb->len; + mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len, + PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { + RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, + "DMA mapping error"); + return; + } + CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_8821ae)); + if (ieee80211_is_nullfunc(fc) || ieee80211_is_ctl(fc)) { + firstseg = true; + lastseg = true; + } + if (firstseg) { + if (rtlhal->earlymode_enable) { + SET_TX_DESC_PKT_OFFSET(pdesc, 1); + SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN + + EM_HDR_LEN); + if (ptcb_desc->empkt_num) { + RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, + "Insert 8 byte.pTcb->EMPktNum:%d\n", + ptcb_desc->empkt_num); + _rtl8821ae_insert_emcontent(ptcb_desc, + (u8 *)(skb->data)); + } + } else { + SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN); + } + + /* ptcb_desc->use_driver_rate = true; */ + SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate); + if (ptcb_desc->hw_rate > DESC_RATEMCS0) + short_gi = (ptcb_desc->use_shortgi) ? 1 : 0; + else + short_gi = (ptcb_desc->use_shortpreamble) ? 1 : 0; + + SET_TX_DESC_DATA_SHORTGI(pdesc, short_gi); + + if (info->flags & IEEE80211_TX_CTL_AMPDU) { + SET_TX_DESC_AGG_ENABLE(pdesc, 1); + SET_TX_DESC_MAX_AGG_NUM(pdesc, 0x1f); + } + SET_TX_DESC_SEQ(pdesc, seq_number); + SET_TX_DESC_RTS_ENABLE(pdesc, ((ptcb_desc->rts_enable && + !ptcb_desc->cts_enable) ? 1 : 0)); + SET_TX_DESC_HW_RTS_ENABLE(pdesc, 0); + SET_TX_DESC_CTS2SELF(pdesc, ((ptcb_desc->cts_enable) ? 1 : 0)); + + SET_TX_DESC_RTS_RATE(pdesc, ptcb_desc->rts_rate); + SET_TX_DESC_RTS_SC(pdesc, ptcb_desc->rts_sc); + SET_TX_DESC_RTS_SHORT(pdesc, + ((ptcb_desc->rts_rate <= DESC_RATE54M) ? + (ptcb_desc->rts_use_shortpreamble ? 1 : 0) : + (ptcb_desc->rts_use_shortgi ? 1 : 0))); + + if (ptcb_desc->tx_enable_sw_calc_duration) + SET_TX_DESC_NAV_USE_HDR(pdesc, 1); + + SET_TX_DESC_DATA_BW(pdesc, + rtl8821ae_bw_mapping(hw, ptcb_desc)); + + SET_TX_DESC_TX_SUB_CARRIER(pdesc, + rtl8821ae_sc_mapping(hw, ptcb_desc)); + + SET_TX_DESC_LINIP(pdesc, 0); + SET_TX_DESC_PKT_SIZE(pdesc, (u16)skb_len); + if (sta) { + u8 ampdu_density = sta->ht_cap.ampdu_density; + + SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density); + } + if (info->control.hw_key) { + struct ieee80211_key_conf *keyconf = + info->control.hw_key; + switch (keyconf->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + case WLAN_CIPHER_SUITE_TKIP: + SET_TX_DESC_SEC_TYPE(pdesc, 0x1); + break; + case WLAN_CIPHER_SUITE_CCMP: + SET_TX_DESC_SEC_TYPE(pdesc, 0x3); + break; + default: + SET_TX_DESC_SEC_TYPE(pdesc, 0x0); + break; + } + } + + SET_TX_DESC_QUEUE_SEL(pdesc, fw_qsel); + SET_TX_DESC_DATA_RATE_FB_LIMIT(pdesc, 0x1F); + SET_TX_DESC_RTS_RATE_FB_LIMIT(pdesc, 0xF); + SET_TX_DESC_DISABLE_FB(pdesc, ptcb_desc->disable_ratefallback ? + 1 : 0); + SET_TX_DESC_USE_RATE(pdesc, ptcb_desc->use_driver_rate ? 1 : 0); + + if (ieee80211_is_data_qos(fc)) { + if (mac->rdg_en) { + RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, + "Enable RDG function.\n"); + SET_TX_DESC_RDG_ENABLE(pdesc, 1); + SET_TX_DESC_HTC(pdesc, 1); + } + } + } + + SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0)); + SET_TX_DESC_LAST_SEG(pdesc, (lastseg ? 1 : 0)); + SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)buf_len); + SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping); + /* if (rtlpriv->dm.useramask) { */ + if (1) { + SET_TX_DESC_RATE_ID(pdesc, ptcb_desc->ratr_index); + SET_TX_DESC_MACID(pdesc, ptcb_desc->mac_id); + } else { + SET_TX_DESC_RATE_ID(pdesc, 0xC + ptcb_desc->ratr_index); + SET_TX_DESC_MACID(pdesc, ptcb_desc->mac_id); + } + if (!ieee80211_is_data_qos(fc)) { + SET_TX_DESC_HWSEQ_EN(pdesc, 1); + SET_TX_DESC_HWSEQ_SEL(pdesc, 0); + } + SET_TX_DESC_MORE_FRAG(pdesc, (lastseg ? 0 : 1)); + if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) || + is_broadcast_ether_addr(ieee80211_get_DA(hdr))) { + SET_TX_DESC_BMC(pdesc, 1); + } + + rtl8821ae_dm_set_tx_ant_by_tx_info(hw, pdesc, ptcb_desc->mac_id); + RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n"); +} + +void rtl8821ae_tx_fill_cmddesc(struct ieee80211_hw *hw, + u8 *pdesc, bool firstseg, + bool lastseg, struct sk_buff *skb) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + u8 fw_queue = QSLT_BEACON; + + dma_addr_t mapping = pci_map_single(rtlpci->pdev, + skb->data, skb->len, + PCI_DMA_TODEVICE); + + if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { + RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, + "DMA mapping error"); + return; + } + CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE); + + SET_TX_DESC_FIRST_SEG(pdesc, 1); + SET_TX_DESC_LAST_SEG(pdesc, 1); + + SET_TX_DESC_PKT_SIZE((u8 *)pdesc, (u16)(skb->len)); + + SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN); + + SET_TX_DESC_USE_RATE(pdesc, 1); + SET_TX_DESC_TX_RATE(pdesc, DESC_RATE1M); + SET_TX_DESC_DISABLE_FB(pdesc, 1); + + SET_TX_DESC_DATA_BW(pdesc, 0); + + SET_TX_DESC_HWSEQ_EN(pdesc, 1); + + SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue); + + SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len)); + + SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping); + + SET_TX_DESC_MACID(pdesc, 0); + + SET_TX_DESC_OWN(pdesc, 1); + + RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, + "H2C Tx Cmd Content\n", + pdesc, TX_DESC_SIZE); +} + +void rtl8821ae_set_desc(struct ieee80211_hw *hw, u8 *pdesc, + bool istx, u8 desc_name, u8 *val) +{ + if (istx) { + switch (desc_name) { + case HW_DESC_OWN: + SET_TX_DESC_OWN(pdesc, 1); + break; + case HW_DESC_TX_NEXTDESC_ADDR: + SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *)val); + break; + default: + RT_ASSERT(false, + "ERR txdesc :%d not process\n", desc_name); + break; + } + } else { + switch (desc_name) { + case HW_DESC_RXOWN: + SET_RX_DESC_OWN(pdesc, 1); + break; + case HW_DESC_RXBUFF_ADDR: + SET_RX_DESC_BUFF_ADDR(pdesc, *(u32 *)val); + break; + case HW_DESC_RXPKT_LEN: + SET_RX_DESC_PKT_LEN(pdesc, *(u32 *)val); + break; + case HW_DESC_RXERO: + SET_RX_DESC_EOR(pdesc, 1); + break; + default: + RT_ASSERT(false, + "ERR rxdesc :%d not process\n", desc_name); + break; + } + } +} + +u32 rtl8821ae_get_desc(u8 *pdesc, bool istx, u8 desc_name) +{ + u32 ret = 0; + + if (istx) { + switch (desc_name) { + case HW_DESC_OWN: + ret = GET_TX_DESC_OWN(pdesc); + break; + case HW_DESC_TXBUFF_ADDR: + ret = GET_TX_DESC_TX_BUFFER_ADDRESS(pdesc); + break; + default: + RT_ASSERT(false, + "ERR txdesc :%d not process\n", desc_name); + break; + } + } else { + switch (desc_name) { + case HW_DESC_OWN: + ret = GET_RX_DESC_OWN(pdesc); + break; + case HW_DESC_RXPKT_LEN: + ret = GET_RX_DESC_PKT_LEN(pdesc); + break; + case HW_DESC_RXBUFF_ADDR: + ret = GET_RX_DESC_BUFF_ADDR(pdesc); + break; + default: + RT_ASSERT(false, + "ERR rxdesc :%d not process\n", desc_name); + break; + } + } + return ret; +} + +bool rtl8821ae_is_tx_desc_closed(struct ieee80211_hw *hw, + u8 hw_queue, u16 index) +{ + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue]; + u8 *entry = (u8 *)(&ring->desc[ring->idx]); + u8 own = (u8)rtl8821ae_get_desc(entry, true, HW_DESC_OWN); + + /** + *beacon packet will only use the first + *descriptor defautly,and the own may not + *be cleared by the hardware + */ + if (own) + return false; + return true; +} + +void rtl8821ae_tx_polling(struct ieee80211_hw *hw, u8 hw_queue) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (hw_queue == BEACON_QUEUE) { + rtl_write_word(rtlpriv, REG_PCIE_CTRL_REG, BIT(4)); + } else { + rtl_write_word(rtlpriv, REG_PCIE_CTRL_REG, + BIT(0) << (hw_queue)); + } +} + +u32 rtl8821ae_rx_command_packet(struct ieee80211_hw *hw, + struct rtl_stats status, + struct sk_buff *skb) +{ + u32 result = 0; + struct rtl_priv *rtlpriv = rtl_priv(hw); + + switch (status.packet_report_type) { + case NORMAL_RX: + result = 0; + break; + case C2H_PACKET: + rtl8821ae_c2h_packet_handler(hw, skb->data, (u8)skb->len); + result = 1; + RT_TRACE(rtlpriv, COMP_RECV, DBG_LOUD, + "skb->len=%d\n\n", skb->len); + break; + default: + RT_TRACE(rtlpriv, COMP_RECV, DBG_LOUD, + "No this packet type!!\n"); + break; + } + + return result; +} diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/trx.h b/drivers/net/wireless/rtlwifi/rtl8821ae/trx.h new file mode 100644 index 0000000000000000000000000000000000000000..31409042d8dd313cf3905c1a873ea2011963b3e9 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/trx.h @@ -0,0 +1,620 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL8821AE_TRX_H__ +#define __RTL8821AE_TRX_H__ + +#define TX_DESC_SIZE 40 +#define TX_DESC_AGGR_SUBFRAME_SIZE 32 + +#define RX_DESC_SIZE 32 +#define RX_DRV_INFO_SIZE_UNIT 8 + +#define TX_DESC_NEXT_DESC_OFFSET 40 +#define USB_HWDESC_HEADER_LEN 40 +#define CRCLENGTH 4 + +#define SET_TX_DESC_PKT_SIZE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 0, 16, __val) +#define SET_TX_DESC_OFFSET(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 16, 8, __val) +#define SET_TX_DESC_BMC(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 24, 1, __val) +#define SET_TX_DESC_HTC(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 25, 1, __val) +#define SET_TX_DESC_LAST_SEG(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 26, 1, __val) +#define SET_TX_DESC_FIRST_SEG(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 27, 1, __val) +#define SET_TX_DESC_LINIP(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 28, 1, __val) +#define SET_TX_DESC_NO_ACM(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 29, 1, __val) +#define SET_TX_DESC_GF(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val) +#define SET_TX_DESC_OWN(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val) + +#define GET_TX_DESC_PKT_SIZE(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 0, 16) +#define GET_TX_DESC_OFFSET(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 16, 8) +#define GET_TX_DESC_BMC(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 24, 1) +#define GET_TX_DESC_HTC(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 25, 1) +#define GET_TX_DESC_LAST_SEG(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 26, 1) +#define GET_TX_DESC_FIRST_SEG(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 27, 1) +#define GET_TX_DESC_LINIP(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 28, 1) +#define GET_TX_DESC_NO_ACM(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 29, 1) +#define GET_TX_DESC_GF(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 30, 1) +#define GET_TX_DESC_OWN(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 31, 1) + +#define SET_TX_DESC_MACID(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+4, 0, 7, __val) +#define SET_TX_DESC_QUEUE_SEL(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+4, 8, 5, __val) +#define SET_TX_DESC_RDG_NAV_EXT(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+4, 13, 1, __val) +#define SET_TX_DESC_LSIG_TXOP_EN(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+4, 14, 1, __val) +#define SET_TX_DESC_PIFS(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+4, 15, 1, __val) +#define SET_TX_DESC_RATE_ID(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+4, 16, 5, __val) +#define SET_TX_DESC_EN_DESC_ID(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+4, 21, 1, __val) +#define SET_TX_DESC_SEC_TYPE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+4, 22, 2, __val) +#define SET_TX_DESC_PKT_OFFSET(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+4, 24, 5, __val) + +#define SET_TX_DESC_PAID(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 0, 9, __val) +#define SET_TX_DESC_CCA_RTS(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 10, 2, __val) +#define SET_TX_DESC_AGG_ENABLE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 12, 1, __val) +#define SET_TX_DESC_RDG_ENABLE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 13, 1, __val) +#define SET_TX_DESC_BAR_RTY_TH(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 14, 2, __val) +#define SET_TX_DESC_AGG_BREAK(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 16, 1, __val) +#define SET_TX_DESC_MORE_FRAG(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 17, 1, __val) +#define SET_TX_DESC_RAW(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 18, 1, __val) +#define SET_TX_DESC_SPE_RPT(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 19, 1, __val) +#define SET_TX_DESC_AMPDU_DENSITY(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 20, 3, __val) +#define SET_TX_DESC_BT_INT(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 23, 1, __val) +#define SET_TX_DESC_GID(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 24, 6, __val) + +#define SET_TX_DESC_WHEADER_LEN(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 0, 4, __val) +#define SET_TX_DESC_CHK_EN(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 4, 1, __val) +#define SET_TX_DESC_EARLY_MODE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 5, 1, __val) +#define SET_TX_DESC_HWSEQ_SEL(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 6, 2, __val) +#define SET_TX_DESC_USE_RATE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 8, 1, __val) +#define SET_TX_DESC_DISABLE_RTS_FB(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 9, 1, __val) +#define SET_TX_DESC_DISABLE_FB(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 10, 1, __val) +#define SET_TX_DESC_CTS2SELF(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 11, 1, __val) +#define SET_TX_DESC_RTS_ENABLE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 12, 1, __val) +#define SET_TX_DESC_HW_RTS_ENABLE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 13, 1, __val) +#define SET_TX_DESC_NAV_USE_HDR(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 15, 1, __val) +#define SET_TX_DESC_USE_MAX_LEN(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 16, 1, __val) +#define SET_TX_DESC_MAX_AGG_NUM(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 17, 5, __val) +#define SET_TX_DESC_NDPA(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 22, 2, __val) +#define SET_TX_DESC_AMPDU_MAX_TIME(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 24, 8, __val) +#define SET_TX_DESC_TX_ANT(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+20, 24, 4, __val) + +#define SET_TX_DESC_TX_RATE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+16, 0, 7, __val) +#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+16, 8, 5, __val) +#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+16, 13, 4, __val) +#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+16, 17, 1, __val) +#define SET_TX_DESC_DATA_RETRY_LIMIT(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+16, 18, 6, __val) +#define SET_TX_DESC_RTS_RATE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+16, 24, 5, __val) + +#define SET_TX_DESC_TX_SUB_CARRIER(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+20, 0, 4, __val) +#define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val) \ + SET_BITS_TO_LE_1BYTE(__pdesc+20, 4, 1, __val) +#define SET_TX_DESC_DATA_BW(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+20, 5, 2, __val) +#define SET_TX_DESC_DATA_LDPC(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+20, 7, 1, __val) +#define SET_TX_DESC_DATA_STBC(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+20, 8, 2, __val) +#define SET_TX_DESC_CTROL_STBC(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+20, 10, 2, __val) +#define SET_TX_DESC_RTS_SHORT(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+20, 12, 1, __val) +#define SET_TX_DESC_RTS_SC(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+20, 13, 4, __val) + +#define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 16, __val) + +#define GET_TX_DESC_TX_BUFFER_SIZE(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+28, 0, 16) + +#define SET_TX_DESC_HWSEQ_EN(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+32, 15, 1, __val) + +#define SET_TX_DESC_SEQ(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+36, 12, 12, __val) + +#define SET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+40, 0, 32, __val) + +#define GET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+40, 0, 32) + +#define SET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+48, 0, 32, __val) + +#define GET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+48, 0, 32) + +#define GET_RX_DESC_PKT_LEN(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 0, 14) +#define GET_RX_DESC_CRC32(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 14, 1) +#define GET_RX_DESC_ICV(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 15, 1) +#define GET_RX_DESC_DRV_INFO_SIZE(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 16, 4) +#define GET_RX_DESC_SECURITY(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 20, 3) +#define GET_RX_DESC_QOS(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 23, 1) +#define GET_RX_DESC_SHIFT(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 24, 2) +#define GET_RX_DESC_PHYST(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 26, 1) +#define GET_RX_DESC_SWDEC(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 27, 1) +#define GET_RX_DESC_LS(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 28, 1) +#define GET_RX_DESC_FS(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 29, 1) +#define GET_RX_DESC_EOR(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 30, 1) +#define GET_RX_DESC_OWN(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 31, 1) + +#define SET_RX_DESC_PKT_LEN(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 0, 14, __val) +#define SET_RX_DESC_EOR(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val) +#define SET_RX_DESC_OWN(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val) + +#define GET_RX_DESC_MACID(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 0, 7) +#define GET_RX_DESC_TID(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 8, 4) +#define GET_RX_DESC_AMSDU(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 13, 1) +#define GET_RX_STATUS_DESC_RXID_MATCH(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 14, 1) +#define GET_RX_DESC_PAGGR(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 15, 1) +#define GET_RX_DESC_A1_FIT(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 16, 4) +#define GET_RX_DESC_CHKERR(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 20, 1) +#define GET_RX_DESC_IPVER(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 21, 1) +#define GET_RX_STATUS_DESC_IS_TCPUDP(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 22, 1) +#define GET_RX_STATUS_DESC_CHK_VLD(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 23, 1) +#define GET_RX_DESC_PAM(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 24, 1) +#define GET_RX_DESC_PWR(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 25, 1) +#define GET_RX_DESC_MD(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 26, 1) +#define GET_RX_DESC_MF(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 27, 1) +#define GET_RX_DESC_TYPE(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 28, 2) +#define GET_RX_DESC_MC(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 30, 1) +#define GET_RX_DESC_BC(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 31, 1) + +#define GET_RX_DESC_SEQ(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+8, 0, 12) +#define GET_RX_DESC_FRAG(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+8, 12, 4) +#define GET_RX_STATUS_DESC_RX_IS_QOS(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+8, 16, 1) +#define GET_RX_STATUS_DESC_WLANHD_IV_LEN(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+8, 18, 6) +#define GET_RX_STATUS_DESC_RPT_SEL(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+8, 28, 1) + +#define GET_RX_DESC_RXMCS(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+12, 0, 7) +#define GET_RX_DESC_HTC(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+12, 10, 1) +#define GET_RX_STATUS_DESC_EOSP(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+12, 11, 1) +#define GET_RX_STATUS_DESC_BSSID_FIT(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+12, 12, 2) + +#define GET_RX_STATUS_DESC_PATTERN_MATCH(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+12, 29, 1) +#define GET_RX_STATUS_DESC_UNICAST_MATCH(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+12, 30, 1) +#define GET_RX_STATUS_DESC_MAGIC_MATCH(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+12, 31, 1) + +#define GET_RX_DESC_SPLCP(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+16, 0, 1) +#define GET_RX_STATUS_DESC_LDPC(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+16, 1, 1) +#define GET_RX_STATUS_DESC_STBC(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+16, 2, 1) +#define GET_RX_DESC_BW(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+16, 4, 2) + +#define GET_RX_DESC_TSFL(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+20, 0, 32) + +#define GET_RX_DESC_BUFF_ADDR(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+24, 0, 32) +#define GET_RX_DESC_BUFF_ADDR64(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+28, 0, 32) + +#define SET_RX_DESC_BUFF_ADDR(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 32, __val) +#define SET_RX_DESC_BUFF_ADDR64(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 32, __val) + +/* TX report 2 format in Rx desc*/ + +#define GET_RX_RPT2_DESC_PKT_LEN(__status) \ + LE_BITS_TO_4BYTE(__status, 0, 9) +#define GET_RX_RPT2_DESC_MACID_VALID_1(__status) \ + LE_BITS_TO_4BYTE(__status+16, 0, 32) +#define GET_RX_RPT2_DESC_MACID_VALID_2(__status) \ + LE_BITS_TO_4BYTE(__status+20, 0, 32) + +#define SET_EARLYMODE_PKTNUM(__paddr, __value) \ + SET_BITS_TO_LE_4BYTE(__paddr, 0, 4, __value) +#define SET_EARLYMODE_LEN0(__paddr, __value) \ + SET_BITS_TO_LE_4BYTE(__paddr, 4, 12, __value) +#define SET_EARLYMODE_LEN1(__paddr, __value) \ + SET_BITS_TO_LE_4BYTE(__paddr, 16, 12, __value) +#define SET_EARLYMODE_LEN2_1(__paddr, __value) \ + SET_BITS_TO_LE_4BYTE(__paddr, 28, 4, __value) +#define SET_EARLYMODE_LEN2_2(__paddr, __value) \ + SET_BITS_TO_LE_4BYTE(__paddr+4, 0, 8, __value) +#define SET_EARLYMODE_LEN3(__paddr, __value) \ + SET_BITS_TO_LE_4BYTE(__paddr+4, 8, 12, __value) +#define SET_EARLYMODE_LEN4(__paddr, __value) \ + SET_BITS_TO_LE_4BYTE(__paddr+4, 20, 12, __value) + +#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \ +do { \ + if (_size > TX_DESC_NEXT_DESC_OFFSET) \ + memset(__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET); \ + else \ + memset(__pdesc, 0, _size); \ +} while (0) + +#define RTL8821AE_RX_HAL_IS_CCK_RATE(rxmcs)\ + (rxmcs == DESC_RATE1M ||\ + rxmcs == DESC_RATE2M ||\ + rxmcs == DESC_RATE5_5M ||\ + rxmcs == DESC_RATE11M) + +struct phy_rx_agc_info_t { + #ifdef __LITTLE_ENDIAN + u8 gain:7, trsw:1; + #else + u8 trsw:1, gain:7; + #endif +}; + +struct phy_status_rpt { + /* DWORD 0 */ + u8 gain_trsw[2]; +#ifdef __LITTLE_ENDIAN + u16 chl_num:10; + u16 sub_chnl:4; + u16 r_rfmod:2; +#else /* _BIG_ENDIAN_ */ + u16 r_rfmod:2; + u16 sub_chnl:4; + u16 chl_num:10; +#endif + /* DWORD 1 */ + u8 pwdb_all; + u8 cfosho[4]; /* DW 1 byte 1 DW 2 byte 0 */ + + /* DWORD 2 */ + char cfotail[4]; /* DW 2 byte 1 DW 3 byte 0 */ + + /* DWORD 3 */ + char rxevm[2]; /* DW 3 byte 1 DW 3 byte 2 */ + char rxsnr[2]; /* DW 3 byte 3 DW 4 byte 0 */ + + /* DWORD 4 */ + u8 pcts_msk_rpt[2]; + u8 pdsnr[2]; /* DW 4 byte 3 DW 5 Byte 0 */ + + /* DWORD 5 */ + u8 csi_current[2]; + u8 rx_gain_c; + + /* DWORD 6 */ + u8 rx_gain_d; + u8 sigevm; + u8 resvd_0; + u8 antidx_anta:3; + u8 antidx_antb:3; + u8 resvd_1:2; +} __packed; + +struct rx_fwinfo_8821ae { + u8 gain_trsw[4]; + u8 pwdb_all; + u8 cfosho[4]; + u8 cfotail[4]; + char rxevm[2]; + char rxsnr[4]; + u8 pdsnr[2]; + u8 csi_current[2]; + u8 csi_target[2]; + u8 sigevm; + u8 max_ex_pwr; + u8 ex_intf_flag:1; + u8 sgi_en:1; + u8 rxsc:2; + u8 reserve:4; +} __packed; + +struct tx_desc_8821ae { + u32 pktsize:16; + u32 offset:8; + u32 bmc:1; + u32 htc:1; + u32 lastseg:1; + u32 firstseg:1; + u32 linip:1; + u32 noacm:1; + u32 gf:1; + u32 own:1; + + u32 macid:6; + u32 rsvd0:2; + u32 queuesel:5; + u32 rd_nav_ext:1; + u32 lsig_txop_en:1; + u32 pifs:1; + u32 rateid:4; + u32 nav_usehdr:1; + u32 en_descid:1; + u32 sectype:2; + u32 pktoffset:8; + + u32 rts_rc:6; + u32 data_rc:6; + u32 agg_en:1; + u32 rdg_en:1; + u32 bar_retryht:2; + u32 agg_break:1; + u32 morefrag:1; + u32 raw:1; + u32 ccx:1; + u32 ampdudensity:3; + u32 bt_int:1; + u32 ant_sela:1; + u32 ant_selb:1; + u32 txant_cck:2; + u32 txant_l:2; + u32 txant_ht:2; + + u32 nextheadpage:8; + u32 tailpage:8; + u32 seq:12; + u32 cpu_handle:1; + u32 tag1:1; + u32 trigger_int:1; + u32 hwseq_en:1; + + u32 rtsrate:5; + u32 apdcfe:1; + u32 qos:1; + u32 hwseq_ssn:1; + u32 userrate:1; + u32 dis_rtsfb:1; + u32 dis_datafb:1; + u32 cts2self:1; + u32 rts_en:1; + u32 hwrts_en:1; + u32 portid:1; + u32 pwr_status:3; + u32 waitdcts:1; + u32 cts2ap_en:1; + u32 txsc:2; + u32 stbc:2; + u32 txshort:1; + u32 txbw:1; + u32 rtsshort:1; + u32 rtsbw:1; + u32 rtssc:2; + u32 rtsstbc:2; + + u32 txrate:6; + u32 shortgi:1; + u32 ccxt:1; + u32 txrate_fb_lmt:5; + u32 rtsrate_fb_lmt:4; + u32 retrylmt_en:1; + u32 txretrylmt:6; + u32 usb_txaggnum:8; + + u32 txagca:5; + u32 txagcb:5; + u32 usemaxlen:1; + u32 maxaggnum:5; + u32 mcsg1maxlen:4; + u32 mcsg2maxlen:4; + u32 mcsg3maxlen:4; + u32 mcs7sgimaxlen:4; + + u32 txbuffersize:16; + u32 sw_offset30:8; + u32 sw_offset31:4; + u32 rsvd1:1; + u32 antsel_c:1; + u32 null_0:1; + u32 null_1:1; + + u32 txbuffaddr; + u32 txbufferaddr64; + u32 nextdescaddress; + u32 nextdescaddress64; + + u32 reserve_pass_pcie_mm_limit[4]; +} __packed; + +struct rx_desc_8821ae { + u32 length:14; + u32 crc32:1; + u32 icverror:1; + u32 drv_infosize:4; + u32 security:3; + u32 qos:1; + u32 shift:2; + u32 phystatus:1; + u32 swdec:1; + u32 lastseg:1; + u32 firstseg:1; + u32 eor:1; + u32 own:1; + + u32 macid:6; + u32 tid:4; + u32 hwrsvd:5; + u32 paggr:1; + u32 faggr:1; + u32 a1_fit:4; + u32 a2_fit:4; + u32 pam:1; + u32 pwr:1; + u32 moredata:1; + u32 morefrag:1; + u32 type:2; + u32 mc:1; + u32 bc:1; + + u32 seq:12; + u32 frag:4; + u32 nextpktlen:14; + u32 nextind:1; + u32 rsvd:1; + + u32 rxmcs:6; + u32 rxht:1; + u32 amsdu:1; + u32 splcp:1; + u32 bandwidth:1; + u32 htc:1; + u32 tcpchk_rpt:1; + u32 ipcchk_rpt:1; + u32 tcpchk_valid:1; + u32 hwpcerr:1; + u32 hwpcind:1; + u32 iv0:16; + + u32 iv1; + + u32 tsfl; + + u32 bufferaddress; + u32 bufferaddress64; + +} __packed; + +void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw, + struct ieee80211_hdr *hdr, u8 *pdesc_tx, u8 *txbd, + struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, + struct sk_buff *skb, + u8 hw_queue, struct rtl_tcb_desc *ptcb_desc); +bool rtl8821ae_rx_query_desc(struct ieee80211_hw *hw, + struct rtl_stats *status, + struct ieee80211_rx_status *rx_status, + u8 *pdesc, struct sk_buff *skb); +void rtl8821ae_set_desc(struct ieee80211_hw *hw, u8 *pdesc, + bool istx, u8 desc_name, u8 *val); +u32 rtl8821ae_get_desc(u8 *pdesc, bool istx, u8 desc_name); +bool rtl8821ae_is_tx_desc_closed(struct ieee80211_hw *hw, + u8 hw_queue, u16 index); +void rtl8821ae_tx_polling(struct ieee80211_hw *hw, u8 hw_queue); +void rtl8821ae_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, + bool firstseg, bool lastseg, + struct sk_buff *skb); +u32 rtl8821ae_rx_command_packet(struct ieee80211_hw *hw, + struct rtl_stats status, + struct sk_buff *skb); +#endif diff --git a/drivers/net/wireless/rtlwifi/stats.c b/drivers/net/wireless/rtlwifi/stats.c index 4f083fc1d3607c41eb5a471f30ff04c3d234e557..2d0736a09fc0e0a590724ff4415134018b3f8293 100644 --- a/drivers/net/wireless/rtlwifi/stats.c +++ b/drivers/net/wireless/rtlwifi/stats.c @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -59,8 +55,23 @@ u8 rtl_evm_db_to_percentage(char value) } EXPORT_SYMBOL(rtl_evm_db_to_percentage); +u8 rtl_evm_dbm_jaguar(char value) +{ + char ret_val = value; + + /* -33dB~0dB to 33dB ~ 0dB*/ + if (ret_val == -128) + ret_val = 127; + else if (ret_val < 0) + ret_val = 0 - ret_val; + + ret_val = ret_val >> 1; + return ret_val; +} +EXPORT_SYMBOL(rtl_evm_dbm_jaguar); + static long rtl_translate_todbm(struct ieee80211_hw *hw, - u8 signal_strength_index) + u8 signal_strength_index) { long signal_power; @@ -106,6 +117,10 @@ static void rtl_process_ui_rssi(struct ieee80211_hw *hw, u8 rfpath; u32 last_rssi, tmpval; + if (!pstatus->packet_toself && !pstatus->packet_beacon) + return; + + rtlpriv->stats.pwdb_all_cnt += pstatus->rx_pwdb_all; rtlpriv->stats.rssi_calculate_cnt++; if (rtlpriv->stats.ui_rssi.total_num++ >= PHY_RSSI_SLID_WIN_MAX) { @@ -151,6 +166,12 @@ static void rtl_process_ui_rssi(struct ieee80211_hw *hw, (pstatus->rx_mimo_signalstrength[rfpath])) / (RX_SMOOTH_FACTOR); } + rtlpriv->stats.rx_snr_db[rfpath] = pstatus->rx_snr[rfpath]; + rtlpriv->stats.rx_evm_dbm[rfpath] = + pstatus->rx_mimo_evm_dbm[rfpath]; + rtlpriv->stats.rx_cfo_short[rfpath] = + pstatus->cfo_short[rfpath]; + rtlpriv->stats.rx_cfo_tail[rfpath] = pstatus->cfo_tail[rfpath]; } } @@ -176,7 +197,6 @@ static void rtl_process_pwdb(struct ieee80211_hw *hw, struct rtl_stats *pstatus) struct rtl_sta_info *drv_priv = NULL; struct ieee80211_sta *sta = NULL; long undec_sm_pwdb; - long undec_sm_cck; rcu_read_lock(); if (rtlpriv->mac80211.opmode != NL80211_IFTYPE_STATION) @@ -186,33 +206,21 @@ static void rtl_process_pwdb(struct ieee80211_hw *hw, struct rtl_stats *pstatus) if (sta) { drv_priv = (struct rtl_sta_info *) sta->drv_priv; undec_sm_pwdb = drv_priv->rssi_stat.undec_sm_pwdb; - undec_sm_cck = drv_priv->rssi_stat.undec_sm_cck; } else { undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb; - undec_sm_cck = rtlpriv->dm.undec_sm_cck; } if (undec_sm_pwdb < 0) undec_sm_pwdb = pstatus->rx_pwdb_all; - if (undec_sm_cck < 0) - undec_sm_cck = pstatus->rx_pwdb_all; if (pstatus->rx_pwdb_all > (u32) undec_sm_pwdb) { undec_sm_pwdb = (((undec_sm_pwdb) * (RX_SMOOTH_FACTOR - 1)) + (pstatus->rx_pwdb_all)) / (RX_SMOOTH_FACTOR); undec_sm_pwdb = undec_sm_pwdb + 1; } else { - undec_sm_pwdb = (((undec_sm_pwdb) * (RX_SMOOTH_FACTOR - 1)) + - (pstatus->rx_pwdb_all)) / (RX_SMOOTH_FACTOR); - } - if (pstatus->rx_pwdb_all > (u32) undec_sm_cck) { - undec_sm_cck = (((undec_sm_pwdb) * + undec_sm_pwdb = (((undec_sm_pwdb) * (RX_SMOOTH_FACTOR - 1)) + (pstatus->rx_pwdb_all)) / (RX_SMOOTH_FACTOR); - undec_sm_cck = undec_sm_cck + 1; - } else { - undec_sm_pwdb = (((undec_sm_cck) * (RX_SMOOTH_FACTOR - 1)) + - (pstatus->rx_pwdb_all)) / (RX_SMOOTH_FACTOR); } if (sta) { @@ -245,7 +253,7 @@ static void rtl_process_ui_link_quality(struct ieee80211_hw *hw, rtlpriv->stats.ui_link_quality.total_val += pstatus->signalquality; rtlpriv->stats.ui_link_quality.elements[ rtlpriv->stats.ui_link_quality.index++] = - pstatus->signalquality; + pstatus->signalquality; if (rtlpriv->stats.ui_link_quality.index >= PHY_LINKQUALITY_SLID_WIN_MAX) rtlpriv->stats.ui_link_quality.index = 0; @@ -269,7 +277,7 @@ static void rtl_process_ui_link_quality(struct ieee80211_hw *hw, } void rtl_process_phyinfo(struct ieee80211_hw *hw, u8 *buffer, - struct rtl_stats *pstatus) + struct rtl_stats *pstatus) { if (!pstatus->packet_matchbssid) diff --git a/drivers/net/wireless/rtlwifi/stats.h b/drivers/net/wireless/rtlwifi/stats.h index 0dbdc5203830beaf1d84184f04de4643268c2ce1..aa4eec80ccf7a7588e3ec58f817172130fcc4a1d 100644 --- a/drivers/net/wireless/rtlwifi/stats.h +++ b/drivers/net/wireless/rtlwifi/stats.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -39,8 +35,9 @@ u8 rtl_query_rxpwrpercentage(char antpower); u8 rtl_evm_db_to_percentage(char value); +u8 rtl_evm_dbm_jaguar(char value); long rtl_signal_scale_mapping(struct ieee80211_hw *hw, long currsig); void rtl_process_phyinfo(struct ieee80211_hw *hw, u8 *buffer, - struct rtl_stats *pstatus); + struct rtl_stats *pstatus); #endif diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c index 0398d3ea15b00b6c68b4b1e160700df276f90254..10cf69c4bc42ea93adefdab5569cbe90a19a4a63 100644 --- a/drivers/net/wireless/rtlwifi/usb.c +++ b/drivers/net/wireless/rtlwifi/usb.c @@ -75,11 +75,11 @@ static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request, pipe = usb_sndctrlpipe(udev, 0); /* write_out */ reqtype = REALTEK_USB_VENQT_WRITE; - dr = kmalloc(sizeof(*dr), GFP_ATOMIC); + dr = kzalloc(sizeof(*dr), GFP_ATOMIC); if (!dr) return -ENOMEM; - databuf = kmalloc(databuf_maxlen, GFP_ATOMIC); + databuf = kzalloc(databuf_maxlen, GFP_ATOMIC); if (!databuf) { kfree(dr); return -ENOMEM; diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h index 407a7936d3642f39fb4fb0364ab0568e20f75153..976667ae85498feeb7abb06b505a254706dcefee 100644 --- a/drivers/net/wireless/rtlwifi/wifi.h +++ b/drivers/net/wireless/rtlwifi/wifi.h @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -147,7 +143,27 @@ #define FCS_LEN 4 #define EM_HDR_LEN 8 +enum rtl8192c_h2c_cmd { + H2C_AP_OFFLOAD = 0, + H2C_SETPWRMODE = 1, + H2C_JOINBSSRPT = 2, + H2C_RSVDPAGE = 3, + H2C_RSSI_REPORT = 5, + H2C_RA_MASK = 6, + H2C_MACID_PS_MODE = 7, + H2C_P2P_PS_OFFLOAD = 8, + H2C_MAC_MODE_SEL = 9, + H2C_PWRM = 15, + H2C_P2P_PS_CTW_CMD = 24, + MAX_H2CCMD +}; + #define MAX_TX_COUNT 4 +#define MAX_REGULATION_NUM 4 +#define MAX_RF_PATH_NUM 4 +#define MAX_RATE_SECTION_NUM 6 +#define MAX_2_4G_BANDWITH_NUM 4 +#define MAX_5G_BANDWITH_NUM 4 #define MAX_RF_PATH 4 #define MAX_CHNL_GROUP_24G 6 #define MAX_CHNL_GROUP_5G 14 @@ -163,6 +179,12 @@ #define DEL_SW_IDX_SZ 30 #define BAND_NUM 3 +/* For now, it's just for 8192ee + * but not OK yet, keep it 0 + */ +#define DMA_IS_64BIT 0 +#define RTL8192EE_SEG_NUM 1 /* 0:2 seg, 1: 4 seg, 2: 8 seg */ + enum rf_tx_num { RF_1TX = 0, RF_2TX, @@ -170,6 +192,36 @@ enum rf_tx_num { RF_TX_NUM_NONIMPLEMENT, }; +#define PACKET_NORMAL 0 +#define PACKET_DHCP 1 +#define PACKET_ARP 2 +#define PACKET_EAPOL 3 + +#define MAX_SUPPORT_WOL_PATTERN_NUM 16 +#define RSVD_WOL_PATTERN_NUM 1 +#define WKFMCAM_ADDR_NUM 6 +#define WKFMCAM_SIZE 24 + +#define MAX_WOL_BIT_MASK_SIZE 16 +/* MIN LEN keeps 13 here */ +#define MIN_WOL_PATTERN_SIZE 13 +#define MAX_WOL_PATTERN_SIZE 128 + +#define WAKE_ON_MAGIC_PACKET BIT(0) +#define WAKE_ON_PATTERN_MATCH BIT(1) + +#define WOL_REASON_PTK_UPDATE BIT(0) +#define WOL_REASON_GTK_UPDATE BIT(1) +#define WOL_REASON_DISASSOC BIT(2) +#define WOL_REASON_DEAUTH BIT(3) +#define WOL_REASON_AP_LOST BIT(4) +#define WOL_REASON_MAGIC_PKT BIT(5) +#define WOL_REASON_UNICAST_PKT BIT(6) +#define WOL_REASON_PATTERN_PKT BIT(7) +#define WOL_REASON_RTD3_SSID_MATCH BIT(8) +#define WOL_REASON_REALWOW_V2_WAKEUPPKT BIT(9) +#define WOL_REASON_REALWOW_V2_ACKLOST BIT(10) + struct txpower_info_2g { u8 index_cck_base[MAX_RF_PATH][MAX_CHNL_GROUP_24G]; u8 index_bw40_base[MAX_RF_PATH][MAX_CHNL_GROUP_24G]; @@ -213,6 +265,15 @@ enum radio_path { RF90_PATH_D = 3, }; +enum regulation_txpwr_lmt { + TXPWR_LMT_FCC = 0, + TXPWR_LMT_MKK = 1, + TXPWR_LMT_ETSI = 2, + TXPWR_LMT_WW = 3, + + TXPWR_LMT_MAX_REGULATION_NUM = 4 +}; + enum rt_eeprom_type { EEPROM_93C46, EEPROM_93C56, @@ -234,8 +295,9 @@ enum hardware_type { HARDWARE_TYPE_RTL8192DU, HARDWARE_TYPE_RTL8723AE, HARDWARE_TYPE_RTL8723U, - HARDWARE_TYPE_RTL8723BE, HARDWARE_TYPE_RTL8188EE, + HARDWARE_TYPE_RTL8723BE, + HARDWARE_TYPE_RTL8192EE, HARDWARE_TYPE_RTL8821AE, HARDWARE_TYPE_RTL8812AE, @@ -268,13 +330,7 @@ enum hardware_type { #define IS_HARDWARE_TYPE_8723(rtlhal) \ (IS_HARDWARE_TYPE_8723E(rtlhal) || IS_HARDWARE_TYPE_8723U(rtlhal)) -#define RX_HAL_IS_CCK_RATE(_pdesc)\ - (_pdesc->rxmcs == DESC92_RATE1M || \ - _pdesc->rxmcs == DESC92_RATE2M || \ - _pdesc->rxmcs == DESC92_RATE5_5M || \ - _pdesc->rxmcs == DESC92_RATE11M) - -#define RTL8723E_RX_HAL_IS_CCK_RATE(rxmcs) \ +#define RX_HAL_IS_CCK_RATE(rxmcs) \ ((rxmcs) == DESC92_RATE1M || \ (rxmcs) == DESC92_RATE2M || \ (rxmcs) == DESC92_RATE5_5M || \ @@ -339,6 +395,7 @@ enum hw_variables { HW_VAR_DEFAULTKEY2, HW_VAR_DEFAULTKEY3, HW_VAR_SIFS, + HW_VAR_R2T_SIFS, HW_VAR_DIFS, HW_VAR_EIFS, HW_VAR_SLOT_TIME, @@ -390,6 +447,7 @@ enum hw_variables { HW_VAR_H2C_FW_MEDIASTATUSRPT, HW_VAR_H2C_FW_P2P_PS_OFFLOAD, HW_VAR_FW_PSMODE_STATUS, + HW_VAR_INIT_RTS_RATE, HW_VAR_RESUME_CLK_ON, HW_VAR_FW_LPS_ACTION, HW_VAR_1X1_RECV_COMBINE, @@ -428,7 +486,7 @@ enum hw_variables { HW_VAR_DATA_FILTER, }; -enum _RT_MEDIA_STATUS { +enum rt_media_status { RT_MEDIA_DISCONNECT = 0, RT_MEDIA_CONNECT = 1 }; @@ -630,6 +688,7 @@ enum rtl_var_map { RTL_IMR_VIDOK, /*AC_VI DMA OK Interrupt */ RTL_IMR_VODOK, /*AC_VO DMA Interrupt */ RTL_IMR_ROK, /*Receive DMA OK Interrupt */ + RTL_IMR_HSISR_IND, /*HSISR Interrupt*/ RTL_IBSS_INT_MASKS, /*(RTL_IMR_BCNINT | RTL_IMR_TBDOK | * RTL_IMR_TBDER) */ RTL_IMR_C2HCMD, /*fw interrupt*/ @@ -653,6 +712,13 @@ enum rtl_var_map { RTL_RC_HT_RATEMCS7, RTL_RC_HT_RATEMCS15, + RTL_RC_VHT_RATE_1SS_MCS7, + RTL_RC_VHT_RATE_1SS_MCS8, + RTL_RC_VHT_RATE_1SS_MCS9, + RTL_RC_VHT_RATE_2SS_MCS7, + RTL_RC_VHT_RATE_2SS_MCS8, + RTL_RC_VHT_RATE_2SS_MCS9, + /*keep it last */ RTL_VAR_MAP_MAX, }; @@ -744,7 +810,9 @@ enum wireless_mode { WIRELESS_MODE_N_24G = 0x10, WIRELESS_MODE_N_5G = 0x20, WIRELESS_MODE_AC_5G = 0x40, - WIRELESS_MODE_AC_24G = 0x80 + WIRELESS_MODE_AC_24G = 0x80, + WIRELESS_MODE_AC_ONLY = 0x100, + WIRELESS_MODE_MAX = 0x800 }; #define IS_WIRELESS_MODE_A(wirelessmode) \ @@ -798,6 +866,30 @@ enum rt_polarity_ctl { RT_POLARITY_HIGH_ACT = 1, }; +/* After 8188E, we use V2 reason define. 88C/8723A use V1 reason. */ +enum fw_wow_reason_v2 { + FW_WOW_V2_PTK_UPDATE_EVENT = 0x01, + FW_WOW_V2_GTK_UPDATE_EVENT = 0x02, + FW_WOW_V2_DISASSOC_EVENT = 0x04, + FW_WOW_V2_DEAUTH_EVENT = 0x08, + FW_WOW_V2_FW_DISCONNECT_EVENT = 0x10, + FW_WOW_V2_MAGIC_PKT_EVENT = 0x21, + FW_WOW_V2_UNICAST_PKT_EVENT = 0x22, + FW_WOW_V2_PATTERN_PKT_EVENT = 0x23, + FW_WOW_V2_RTD3_SSID_MATCH_EVENT = 0x24, + FW_WOW_V2_REALWOW_V2_WAKEUPPKT = 0x30, + FW_WOW_V2_REALWOW_V2_ACKLOST = 0x31, + FW_WOW_V2_REASON_MAX = 0xff, +}; + +enum wolpattern_type { + UNICAST_PATTERN = 0, + MULTICAST_PATTERN = 1, + BROADCAST_PATTERN = 2, + DONT_CARE_DA = 3, + UNKNOWN_TYPE = 4, +}; + struct octet_string { u8 *octet; u16 length; @@ -898,6 +990,7 @@ struct wireless_stats { long last_sigstrength_inpercent; u32 rssi_calculate_cnt; + u32 pwdb_all_cnt; /*Transformed, in dbm. Beautified signal strength for UI, not correct. */ @@ -1106,6 +1199,8 @@ struct rtl_phy { u8 pwrgroup_cnt; u8 cck_high_power; + /* this is for 88E & 8723A */ + u32 mcs_txpwrlevel_origoffset[MAX_PG_GROUP][16]; /* MAX_PG_GROUP groups of pwr diff by rates */ u32 mcs_offset[MAX_PG_GROUP][16]; u32 tx_power_by_rate_offset[TX_PWR_BY_RATE_NUM_BAND] @@ -1126,6 +1221,17 @@ struct rtl_phy { u8 cur_bw20_txpwridx; u8 cur_bw40_txpwridx; + char txpwr_limit_2_4g[MAX_REGULATION_NUM] + [MAX_2_4G_BANDWITH_NUM] + [MAX_RATE_SECTION_NUM] + [CHANNEL_MAX_NUMBER_2G] + [MAX_RF_PATH_NUM]; + char txpwr_limit_5g[MAX_REGULATION_NUM] + [MAX_5G_BANDWITH_NUM] + [MAX_RATE_SECTION_NUM] + [CHANNEL_MAX_NUMBER_5G] + [MAX_RF_PATH_NUM]; + u32 rfreg_chnlval[2]; bool apk_done; u32 reg_rf3c[2]; /* pathA / pathB */ @@ -1249,6 +1355,17 @@ struct rtl_mac { /* skb wait queue */ struct sk_buff_head skb_waitq[MAX_TID_COUNT]; + u8 ht_stbc_cap; + u8 ht_cur_stbc; + + /*vht support*/ + u8 vht_enable; + u8 bw_80; + u8 vht_cur_ldpc; + u8 vht_cur_stbc; + u8 vht_stbc_cap; + u8 vht_ldpc_cap; + /*RDG*/ bool rdg_en; @@ -1261,7 +1378,7 @@ struct rtl_mac { u8 sgi_40; u8 sgi_20; u8 bw_40; - u8 mode; /* wireless mode */ + u16 mode; /* wireless mode */ u8 slot_time; u8 short_preamble; u8 use_cts_protect; @@ -1358,6 +1475,18 @@ struct rtl_hal { u32 version; /*version of chip */ u8 state; /*stop 0, start 1 */ u8 board_type; + u8 external_pa; + + u8 pa_mode; + u8 pa_type_2g; + u8 pa_type_5g; + u8 lna_type_2g; + u8 lna_type_5g; + u8 external_pa_2g; + u8 external_lna_2g; + u8 external_pa_5g; + u8 external_lna_5g; + u8 rfe_type; /*firmware */ u32 fwsize; @@ -1413,6 +1542,20 @@ struct rtl_hal { u16 rx_tag;/*for 92ee*/ u8 rts_en; + + /*for wowlan*/ + bool wow_enable; + bool enter_pnp_sleep; + bool wake_from_pnp_sleep; + bool wow_enabled; + __kernel_time_t last_suspend_sec; + u32 wowlan_fwsize; + u8 *wowlan_firmware; + + u8 hw_rof_enable; /*Enable GPIO[9] as WL RF HW PDn source*/ + + bool real_wow_v2_enable; + bool re_init_llt_table; }; struct rtl_security { @@ -1759,6 +1902,15 @@ struct rtl_ps_ctl { struct rtl_p2p_ps_info p2p_ps_info; u8 pwr_mode; u8 smart_ps; + + /* wake up on line */ + u8 wo_wlan_mode; + u8 arp_offload_enable; + u8 gtk_offload_enable; + /* Used for WOL, indicates the reason for waking event.*/ + u32 wakeup_reason; + /* Record the last waking time for comparison with setting key. */ + u64 last_wakeup_time; }; struct rtl_stats { @@ -1794,17 +1946,26 @@ struct rtl_stats { u16 wakeup:1; u32 timestamp_low; u32 timestamp_high; + bool shift; u8 rx_drvinfo_size; u8 rx_bufshift; bool isampdu; bool isfirst_ampdu; bool rx_is40Mhzpacket; + u8 rx_packet_bw; u32 rx_pwdb_all; u8 rx_mimo_signalstrength[4]; /*in 0~100 index */ + s8 rx_mimo_signalquality[4]; + u8 rx_mimo_evm_dbm[4]; + u16 cfo_short[4]; /* per-path's Cfo_short */ + u16 cfo_tail[4]; + s8 rx_mimo_sig_qual[4]; u8 rx_pwr[4]; /* per-path's pwdb */ u8 rx_snr[4]; /* per-path's SNR */ + u8 bandwidth; + u8 bt_coex_pwr_adjust; bool packet_matchbssid; bool is_cck; bool is_ht; @@ -1812,6 +1973,10 @@ struct rtl_stats { bool packet_beacon; /*for rssi */ char cck_adc_pwdb[4]; /*for rx path selection */ + bool is_vht; + bool is_short_gi; + u8 vht_nss; + u8 packet_report_type; u32 macid; @@ -1844,7 +2009,7 @@ struct rt_link_detect { }; struct rtl_tcb_desc { - u8 packet_bw:1; + u8 packet_bw:2; u8 multicast:1; u8 broadcast:1; @@ -1874,11 +2039,19 @@ struct rtl_tcb_desc { u8 empkt_num; /* The max value by HW */ u32 empkt_len[10]; - bool btx_enable_sw_calc_duration; + bool tx_enable_sw_calc_duration; }; struct rtl92c_firmware_header; +struct rtl_wow_pattern { + u8 type; + u16 crc; + u32 mask[4]; +}; + +struct rtl8723e_firmware_header; + struct rtl_hal_ops { int (*init_sw_vars) (struct ieee80211_hw *hw); void (*deinit_sw_vars) (struct ieee80211_hw *hw); @@ -1928,7 +2101,6 @@ struct rtl_hal_ops { void (*fill_tx_cmddesc) (struct ieee80211_hw *hw, u8 *pdesc, bool firstseg, bool lastseg, struct sk_buff *skb); - bool (*cmd_send_packet)(struct ieee80211_hw *hw, struct sk_buff *skb); bool (*query_rx_desc) (struct ieee80211_hw *hw, struct rtl_stats *stats, struct ieee80211_rx_status *rx_status, @@ -1983,9 +2155,12 @@ struct rtl_hal_ops { void (*fill_h2c_cmd) (struct ieee80211_hw *hw, u8 element_id, u32 cmd_len, u8 *p_cmdbuffer); bool (*get_btc_status) (void); - bool (*is_fw_header) (struct rtl92c_firmware_header *hdr); + bool (*is_fw_header)(struct rtl8723e_firmware_header *hdr); u32 (*rx_command_packet)(struct ieee80211_hw *hw, struct rtl_stats status, struct sk_buff *skb); + void (*add_wowlan_pattern)(struct ieee80211_hw *hw, + struct rtl_wow_pattern *rtl_pattern, + u8 index); }; struct rtl_intf_ops { @@ -2000,7 +2175,7 @@ struct rtl_intf_ops { struct ieee80211_sta *sta, struct sk_buff *skb, struct rtl_tcb_desc *ptcb_desc); - void (*flush)(struct ieee80211_hw *hw, bool drop); + void (*flush)(struct ieee80211_hw *hw, u32 queues, bool drop); int (*reset_trx_ring) (struct ieee80211_hw *hw); bool (*waitq_insert) (struct ieee80211_hw *hw, struct ieee80211_sta *sta, @@ -2029,9 +2204,13 @@ struct rtl_mod_params { /* default: 1 = using linked fw power save */ bool fwctrl_lps; - /* default: 0 = not using MSI interrupts mode */ - /* submodules should set their own defalut value */ + /* default: 0 = not using MSI interrupts mode + * submodules should set their own default value + */ bool msi_support; + + /* default 0: 1 means disable */ + bool disable_watchdog; }; struct rtl_hal_usbint_cfg { @@ -2312,10 +2491,11 @@ struct rtl_btc_ops { void (*btc_init_hal_vars) (struct rtl_priv *rtlpriv); void (*btc_init_hw_config) (struct rtl_priv *rtlpriv); void (*btc_ips_notify) (struct rtl_priv *rtlpriv, u8 type); + void (*btc_lps_notify)(struct rtl_priv *rtlpriv, u8 type); void (*btc_scan_notify) (struct rtl_priv *rtlpriv, u8 scantype); void (*btc_connect_notify) (struct rtl_priv *rtlpriv, u8 action); void (*btc_mediastatus_notify) (struct rtl_priv *rtlpriv, - enum _RT_MEDIA_STATUS mstatus); + enum rt_media_status mstatus); void (*btc_periodical) (struct rtl_priv *rtlpriv); void (*btc_halt_notify) (void); void (*btc_btinfo_notify) (struct rtl_priv *rtlpriv, @@ -2323,6 +2503,8 @@ struct rtl_btc_ops { bool (*btc_is_limited_dig) (struct rtl_priv *rtlpriv); bool (*btc_is_disable_edca_turbo) (struct rtl_priv *rtlpriv); bool (*btc_is_bt_disabled) (struct rtl_priv *rtlpriv); + void (*btc_special_packet_notify)(struct rtl_priv *rtlpriv, + u8 pkt_type); }; struct proxim { @@ -2336,6 +2518,8 @@ struct proxim { struct rtl_priv { struct ieee80211_hw *hw; + /* Used to load a second firmware */ + void (*rtl_fw_second_cb)(struct rtl_priv *rtlpriv); struct completion firmware_loading_complete; struct list_head list; struct rtl_priv *buddy_priv; @@ -2411,6 +2595,9 @@ struct rtl_priv { */ bool use_new_trx_flow; +#ifdef CONFIG_PM + struct wiphy_wowlan_support wowlan; +#endif /*This must be the last item so that it points to the data allocated beyond this structure like: @@ -2659,6 +2846,26 @@ value to host byte ordering.*/ (des)[2] = (src)[2], (des)[3] = (src)[3],\ (des)[4] = (src)[4], (des)[5] = (src)[5]) +#define LDPC_HT_ENABLE_RX BIT(0) +#define LDPC_HT_ENABLE_TX BIT(1) +#define LDPC_HT_TEST_TX_ENABLE BIT(2) +#define LDPC_HT_CAP_TX BIT(3) + +#define STBC_HT_ENABLE_RX BIT(0) +#define STBC_HT_ENABLE_TX BIT(1) +#define STBC_HT_TEST_TX_ENABLE BIT(2) +#define STBC_HT_CAP_TX BIT(3) + +#define LDPC_VHT_ENABLE_RX BIT(0) +#define LDPC_VHT_ENABLE_TX BIT(1) +#define LDPC_VHT_TEST_TX_ENABLE BIT(2) +#define LDPC_VHT_CAP_TX BIT(3) + +#define STBC_VHT_ENABLE_RX BIT(0) +#define STBC_VHT_ENABLE_TX BIT(1) +#define STBC_VHT_TEST_TX_ENABLE BIT(2) +#define STBC_VHT_CAP_TX BIT(3) + static inline u8 rtl_read_byte(struct rtl_priv *rtlpriv, u32 addr) { return rtlpriv->io.read8_sync(rtlpriv, addr); diff --git a/drivers/net/wireless/ti/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c index a0aa8fa72392830f65f8c8b52303be5ad44019b9..735be5352143e9a1e757d6953f708a3ce5fd9e54 100644 --- a/drivers/net/wireless/ti/wl1251/spi.c +++ b/drivers/net/wireless/ti/wl1251/spi.c @@ -345,7 +345,6 @@ static int wl1251_spi_remove(struct spi_device *spi) { struct wl1251 *wl = spi_get_drvdata(spi); - free_irq(wl->irq, wl); wl1251_free_hw(wl); regulator_disable(wl->vio); diff --git a/drivers/net/wireless/ti/wlcore/debug.h b/drivers/net/wireless/ti/wlcore/debug.h index 0420bd45e4ee4f9e16c2de51f4240995cbf45254..27bfb7c11e7bf215fd318c7b9fbf457574b79563 100644 --- a/drivers/net/wireless/ti/wlcore/debug.h +++ b/drivers/net/wireless/ti/wlcore/debug.h @@ -65,7 +65,7 @@ extern u32 wl12xx_debug_level; pr_err(DRIVER_PREFIX "ERROR " fmt "\n", ##arg) #define wl1271_warning(fmt, arg...) \ - pr_warning(DRIVER_PREFIX "WARNING " fmt "\n", ##arg) + pr_warn(DRIVER_PREFIX "WARNING " fmt "\n", ##arg) #define wl1271_notice(fmt, arg...) \ pr_info(DRIVER_PREFIX fmt "\n", ##arg) diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c index 392c882b28f03d9da2be51c6487db2d423b58976..69601f6741d9dde900df6162fc4c7ee485123c3a 100644 --- a/drivers/net/wireless/ti/wlcore/spi.c +++ b/drivers/net/wireless/ti/wlcore/spi.c @@ -327,23 +327,22 @@ static int wl1271_probe(struct spi_device *spi) struct wl12xx_spi_glue *glue; struct wlcore_platdev_data pdev_data; struct resource res[1]; - int ret = -ENOMEM; + int ret; memset(&pdev_data, 0x00, sizeof(pdev_data)); pdev_data.pdata = dev_get_platdata(&spi->dev); if (!pdev_data.pdata) { dev_err(&spi->dev, "no platform data\n"); - ret = -ENODEV; - goto out; + return -ENODEV; } pdev_data.if_ops = &spi_ops; - glue = kzalloc(sizeof(*glue), GFP_KERNEL); + glue = devm_kzalloc(&spi->dev, sizeof(*glue), GFP_KERNEL); if (!glue) { dev_err(&spi->dev, "can't allocate glue\n"); - goto out; + return -ENOMEM; } glue->dev = &spi->dev; @@ -357,14 +356,13 @@ static int wl1271_probe(struct spi_device *spi) ret = spi_setup(spi); if (ret < 0) { dev_err(glue->dev, "spi_setup failed\n"); - goto out_free_glue; + return ret; } glue->core = platform_device_alloc("wl12xx", PLATFORM_DEVID_AUTO); if (!glue->core) { dev_err(glue->dev, "can't allocate platform_device\n"); - ret = -ENOMEM; - goto out_free_glue; + return -ENOMEM; } glue->core->dev.parent = &spi->dev; @@ -398,11 +396,6 @@ static int wl1271_probe(struct spi_device *spi) out_dev_put: platform_device_put(glue->core); - -out_free_glue: - kfree(glue); - -out: return ret; } @@ -411,7 +404,6 @@ static int wl1271_remove(struct spi_device *spi) struct wl12xx_spi_glue *glue = spi_get_drvdata(spi); platform_device_unregister(glue->core); - kfree(glue); return 0; } diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c index ff31939978ae595713fea28e9bbd8f153fc4640c..0ea756b7751941f4594ed8f67c60bad9bb271c75 100644 --- a/drivers/nfc/st21nfca/i2c.c +++ b/drivers/nfc/st21nfca/i2c.c @@ -271,6 +271,7 @@ static int st21nfca_hci_i2c_write(void *phy_id, struct sk_buff *skb) static int get_frame_size(u8 *buf, int buflen) { int len = 0; + if (buf[len + 1] == ST21NFCA_SOF_EOF) return 0; @@ -311,6 +312,7 @@ static int check_crc(u8 *buf, int buflen) static int st21nfca_hci_i2c_repack(struct sk_buff *skb) { int i, j, r, size; + if (skb->len < 1 || (skb->len > 1 && skb->data[1] != 0)) return -EBADMSG; @@ -525,24 +527,19 @@ static int st21nfca_hci_i2c_of_request_resources(struct i2c_client *client) } /* GPIO request and configuration */ - r = devm_gpio_request(&client->dev, gpio, "clf_enable"); + r = devm_gpio_request_one(&client->dev, gpio, GPIOF_OUT_INIT_HIGH, + "clf_enable"); if (r) { nfc_err(&client->dev, "Failed to request enable pin\n"); return -ENODEV; } - r = gpio_direction_output(gpio, 1); - if (r) { - nfc_err(&client->dev, "Failed to set enable pin direction as output\n"); - return -ENODEV; - } phy->gpio_ena = gpio; /* IRQ */ r = irq_of_parse_and_map(pp, 0); if (r < 0) { - nfc_err(&client->dev, - "Unable to get irq, error: %d\n", r); + nfc_err(&client->dev, "Unable to get irq, error: %d\n", r); return r; } @@ -576,32 +573,20 @@ static int st21nfca_hci_i2c_request_resources(struct i2c_client *client) phy->gpio_ena = pdata->gpio_ena; phy->irq_polarity = pdata->irq_polarity; - r = devm_gpio_request(&client->dev, phy->gpio_irq, "wake_up"); + r = devm_gpio_request_one(&client->dev, phy->gpio_irq, GPIOF_IN, + "wake_up"); if (r) { pr_err("%s : gpio_request failed\n", __FILE__); return -ENODEV; } - r = gpio_direction_input(phy->gpio_irq); - if (r) { - pr_err("%s : gpio_direction_input failed\n", __FILE__); - return -ENODEV; - } - if (phy->gpio_ena > 0) { - r = devm_gpio_request(&client->dev, - phy->gpio_ena, "clf_enable"); + r = devm_gpio_request_one(&client->dev, phy->gpio_ena, + GPIOF_OUT_INIT_HIGH, "clf_enable"); if (r) { pr_err("%s : ena gpio_request failed\n", __FILE__); return -ENODEV; } - r = gpio_direction_output(phy->gpio_ena, 1); - - if (r) { - pr_err("%s : ena gpio_direction_output failed\n", - __FILE__); - return -ENODEV; - } } /* IRQ */ @@ -711,7 +696,6 @@ static struct i2c_driver st21nfca_hci_i2c_driver = { .driver = { .owner = THIS_MODULE, .name = ST21NFCA_HCI_I2C_DRIVER_NAME, - .owner = THIS_MODULE, .of_match_table = of_match_ptr(of_st21nfca_i2c_match), }, .probe = st21nfca_hci_i2c_probe, diff --git a/drivers/nfc/st21nfca/st21nfca.c b/drivers/nfc/st21nfca/st21nfca.c index a902b0551c86a10b0d158c76c568613144f4d254..a89e56c2c74954a0ac248bb00f603cd0528af3c4 100644 --- a/drivers/nfc/st21nfca/st21nfca.c +++ b/drivers/nfc/st21nfca/st21nfca.c @@ -34,7 +34,7 @@ #define ST21NFCA_RF_READER_CMD_PRESENCE_CHECK 0x30 #define ST21NFCA_RF_READER_ISO15693_GATE 0x12 -#define ST21NFCA_RF_READER_ISO15693_INVENTORY 0x01 +#define ST21NFCA_RF_READER_ISO15693_INVENTORY 0x01 /* * Reader gate for communication with contact-less cards using Type A @@ -45,21 +45,42 @@ #define ST21NFCA_RF_READER_14443_3_A_ATQA 0x03 #define ST21NFCA_RF_READER_14443_3_A_SAK 0x04 +#define ST21NFCA_RF_READER_F_DATARATE 0x01 +#define ST21NFCA_RF_READER_F_DATARATE_106 0x01 +#define ST21NFCA_RF_READER_F_DATARATE_212 0x02 +#define ST21NFCA_RF_READER_F_DATARATE_424 0x04 +#define ST21NFCA_RF_READER_F_POL_REQ 0x02 +#define ST21NFCA_RF_READER_F_POL_REQ_DEFAULT 0xffff0000 +#define ST21NFCA_RF_READER_F_NFCID2 0x03 +#define ST21NFCA_RF_READER_F_NFCID1 0x04 + +#define ST21NFCA_RF_CARD_F_MODE 0x01 +#define ST21NFCA_RF_CARD_F_NFCID2_LIST 0x04 +#define ST21NFCA_RF_CARD_F_NFCID1 0x05 +#define ST21NFCA_RF_CARD_F_SENS_RES 0x06 +#define ST21NFCA_RF_CARD_F_SEL_RES 0x07 +#define ST21NFCA_RF_CARD_F_DATARATE 0x08 +#define ST21NFCA_RF_CARD_F_DATARATE_212_424 0x01 + #define ST21NFCA_DEVICE_MGNT_GATE 0x01 #define ST21NFCA_DEVICE_MGNT_PIPE 0x02 -#define ST21NFCA_DM_GETINFO 0x13 -#define ST21NFCA_DM_GETINFO_PIPE_LIST 0x02 -#define ST21NFCA_DM_GETINFO_PIPE_INFO 0x01 -#define ST21NFCA_DM_PIPE_CREATED 0x02 -#define ST21NFCA_DM_PIPE_OPEN 0x04 -#define ST21NFCA_DM_RF_ACTIVE 0x80 -#define ST21NFCA_DM_DISCONNECT 0x30 +#define ST21NFCA_DM_GETINFO 0x13 +#define ST21NFCA_DM_GETINFO_PIPE_LIST 0x02 +#define ST21NFCA_DM_GETINFO_PIPE_INFO 0x01 +#define ST21NFCA_DM_PIPE_CREATED 0x02 +#define ST21NFCA_DM_PIPE_OPEN 0x04 +#define ST21NFCA_DM_RF_ACTIVE 0x80 +#define ST21NFCA_DM_DISCONNECT 0x30 #define ST21NFCA_DM_IS_PIPE_OPEN(p) \ ((p & 0x0f) == (ST21NFCA_DM_PIPE_CREATED | ST21NFCA_DM_PIPE_OPEN)) -#define ST21NFCA_NFC_MODE 0x03 /* NFC_MODE parameter*/ +#define ST21NFCA_NFC_MODE 0x03 /* NFC_MODE parameter*/ +#define ST21NFCA_EVT_FIELD_ON 0x11 +#define ST21NFCA_EVT_CARD_DEACTIVATED 0x12 +#define ST21NFCA_EVT_CARD_ACTIVATED 0x13 +#define ST21NFCA_EVT_FIELD_OFF 0x14 static DECLARE_BITMAP(dev_mask, ST21NFCA_NUM_DEVICES); @@ -355,8 +376,8 @@ static int st21nfca_hci_start_poll(struct nfc_hci_dev *hdev, if (r < 0) return r; - pol_req = - be32_to_cpu(ST21NFCA_RF_READER_F_POL_REQ_DEFAULT); + pol_req = be32_to_cpu((__force __be32) + ST21NFCA_RF_READER_F_POL_REQ_DEFAULT); r = nfc_hci_set_param(hdev, ST21NFCA_RF_READER_F_GATE, ST21NFCA_RF_READER_F_POL_REQ, (u8 *) &pol_req, 4); @@ -790,6 +811,7 @@ static int st21nfca_hci_check_presence(struct nfc_hci_dev *hdev, struct nfc_target *target) { u8 fwi = 0x11; + switch (target->hci_reader_gate) { case NFC_HCI_RF_READER_A_GATE: case NFC_HCI_RF_READER_B_GATE: @@ -839,20 +861,16 @@ static int st21nfca_hci_event_received(struct nfc_hci_dev *hdev, u8 gate, if (gate == ST21NFCA_RF_CARD_F_GATE) { r = st21nfca_tm_event_send_data(hdev, skb, gate); if (r < 0) - goto exit; + return r; return 0; - } else { - info->dep_info.curr_nfc_dep_pni = 0; - return 1; } - break; + info->dep_info.curr_nfc_dep_pni = 0; + return 1; default: return 1; } kfree_skb(skb); return 0; -exit: - return r; } static struct nfc_hci_ops st21nfca_hci_ops = { @@ -904,8 +922,11 @@ int st21nfca_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, * persistent info to discriminate 2 identical chips */ dev_num = find_first_zero_bit(dev_mask, ST21NFCA_NUM_DEVICES); + if (dev_num >= ST21NFCA_NUM_DEVICES) - goto err_alloc_hdev; + return -ENODEV; + + set_bit(dev_num, dev_mask); scnprintf(init_data.session_id, sizeof(init_data.session_id), "%s%2x", "ST21AH", dev_num); diff --git a/drivers/nfc/st21nfca/st21nfca.h b/drivers/nfc/st21nfca/st21nfca.h index 96fe5a62dc0dced5dd7ed41ee2f33496e1a1d61e..a0b77f1ba6d905ae07f322414250bfc4ee6cbc9c 100644 --- a/drivers/nfc/st21nfca/st21nfca.h +++ b/drivers/nfc/st21nfca/st21nfca.h @@ -82,30 +82,9 @@ struct st21nfca_hci_info { #define ST21NFCA_WR_XCHG_DATA 0x10 #define ST21NFCA_RF_READER_F_GATE 0x14 -#define ST21NFCA_RF_READER_F_DATARATE 0x01 -#define ST21NFCA_RF_READER_F_DATARATE_106 0x01 -#define ST21NFCA_RF_READER_F_DATARATE_212 0x02 -#define ST21NFCA_RF_READER_F_DATARATE_424 0x04 -#define ST21NFCA_RF_READER_F_POL_REQ 0x02 -#define ST21NFCA_RF_READER_F_POL_REQ_DEFAULT 0xffff0000 -#define ST21NFCA_RF_READER_F_NFCID2 0x03 -#define ST21NFCA_RF_READER_F_NFCID1 0x04 -#define ST21NFCA_RF_READER_F_SENS_RES 0x05 #define ST21NFCA_RF_CARD_F_GATE 0x24 -#define ST21NFCA_RF_CARD_F_MODE 0x01 -#define ST21NFCA_RF_CARD_F_NFCID2_LIST 0x04 -#define ST21NFCA_RF_CARD_F_NFCID1 0x05 -#define ST21NFCA_RF_CARD_F_SENS_RES 0x06 -#define ST21NFCA_RF_CARD_F_SEL_RES 0x07 -#define ST21NFCA_RF_CARD_F_DATARATE 0x08 -#define ST21NFCA_RF_CARD_F_DATARATE_106 0x00 -#define ST21NFCA_RF_CARD_F_DATARATE_212_424 0x01 #define ST21NFCA_EVT_SEND_DATA 0x10 -#define ST21NFCA_EVT_FIELD_ON 0x11 -#define ST21NFCA_EVT_CARD_DEACTIVATED 0x12 -#define ST21NFCA_EVT_CARD_ACTIVATED 0x13 -#define ST21NFCA_EVT_FIELD_OFF 0x14 #endif /* __LOCAL_ST21NFCA_H_ */ diff --git a/drivers/nfc/st21nfca/st21nfca_dep.c b/drivers/nfc/st21nfca/st21nfca_dep.c index b2d9957b57f8cef1ce1ffab57a526ec032a52d27..bfb6df56c5058d36d007c0753a8aa31e25e4f9ce 100644 --- a/drivers/nfc/st21nfca/st21nfca_dep.c +++ b/drivers/nfc/st21nfca/st21nfca_dep.c @@ -121,6 +121,7 @@ static void st21nfca_tx_work(struct work_struct *work) struct nfc_dev *dev; struct sk_buff *skb; + if (info) { dev = info->hdev->ndev; skb = info->dep_info.tx_pending; @@ -128,9 +129,8 @@ static void st21nfca_tx_work(struct work_struct *work) device_lock(&dev->dev); nfc_hci_send_cmd_async(info->hdev, ST21NFCA_RF_READER_F_GATE, - ST21NFCA_WR_XCHG_DATA, - skb->data, skb->len, - info->async_cb, info); + ST21NFCA_WR_XCHG_DATA, skb->data, skb->len, + info->async_cb, info); device_unlock(&dev->dev); kfree_skb(skb); } @@ -185,8 +185,10 @@ static int st21nfca_tm_send_atr_res(struct nfc_hci_dev *hdev, info->dep_info.curr_nfc_dep_pni = 0; - return nfc_hci_send_event(hdev, ST21NFCA_RF_CARD_F_GATE, + r = nfc_hci_send_event(hdev, ST21NFCA_RF_CARD_F_GATE, ST21NFCA_EVT_SEND_DATA, skb->data, skb->len); + kfree_skb(skb); + return r; } static int st21nfca_tm_recv_atr_req(struct nfc_hci_dev *hdev, @@ -197,10 +199,6 @@ static int st21nfca_tm_recv_atr_req(struct nfc_hci_dev *hdev, int r; skb_trim(skb, skb->len - 1); - if (IS_ERR(skb)) { - r = PTR_ERR(skb); - goto exit; - } if (!skb->len) { r = -EIO; @@ -214,6 +212,11 @@ static int st21nfca_tm_recv_atr_req(struct nfc_hci_dev *hdev, atr_req = (struct st21nfca_atr_req *)skb->data; + if (atr_req->length < sizeof(struct st21nfca_atr_req)) { + r = -EPROTO; + goto exit; + } + r = st21nfca_tm_send_atr_res(hdev, atr_req); if (r) goto exit; @@ -237,7 +240,6 @@ static int st21nfca_tm_send_psl_res(struct nfc_hci_dev *hdev, struct st21nfca_psl_res *psl_res; struct sk_buff *skb; u8 bitrate[2] = {0, 0}; - int r; skb = alloc_skb(sizeof(struct st21nfca_psl_res), GFP_KERNEL); @@ -254,6 +256,8 @@ static int st21nfca_tm_send_psl_res(struct nfc_hci_dev *hdev, r = nfc_hci_send_event(hdev, ST21NFCA_RF_CARD_F_GATE, ST21NFCA_EVT_SEND_DATA, skb->data, skb->len); + if (r < 0) + goto error; /* * ST21NFCA only support P2P passive. @@ -269,8 +273,11 @@ static int st21nfca_tm_send_psl_res(struct nfc_hci_dev *hdev, } /* Send an event to change bitrate change event to card f */ - return nfc_hci_send_event(hdev, ST21NFCA_RF_CARD_F_GATE, + r = nfc_hci_send_event(hdev, ST21NFCA_RF_CARD_F_GATE, ST21NFCA_EVT_CARD_F_BITRATE, bitrate, 2); +error: + kfree_skb(skb); + return r; } static int st21nfca_tm_recv_psl_req(struct nfc_hci_dev *hdev, @@ -280,11 +287,6 @@ static int st21nfca_tm_recv_psl_req(struct nfc_hci_dev *hdev, int r; skb_trim(skb, skb->len - 1); - if (IS_ERR(skb)) { - r = PTR_ERR(skb); - skb = NULL; - goto exit; - } if (!skb->len) { r = -EIO; @@ -314,7 +316,7 @@ int st21nfca_tm_send_dep_res(struct nfc_hci_dev *hdev, struct sk_buff *skb) *skb_push(skb, 1) = skb->len; r = nfc_hci_send_event(hdev, ST21NFCA_RF_CARD_F_GATE, - ST21NFCA_EVT_SEND_DATA, skb->data, skb->len); + ST21NFCA_EVT_SEND_DATA, skb->data, skb->len); kfree_skb(skb); return r; @@ -330,11 +332,6 @@ static int st21nfca_tm_recv_dep_req(struct nfc_hci_dev *hdev, struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); skb_trim(skb, skb->len - 1); - if (IS_ERR(skb)) { - r = PTR_ERR(skb); - skb = NULL; - goto exit; - } size = 4; @@ -368,12 +365,6 @@ static int st21nfca_tm_recv_dep_req(struct nfc_hci_dev *hdev, break; } - if (IS_ERR(skb)) { - r = PTR_ERR(skb); - skb = NULL; - goto exit; - } - skb_pull(skb, size); return nfc_tm_data_received(hdev->ndev, skb); @@ -437,8 +428,6 @@ static void st21nfca_im_send_psl_req(struct nfc_hci_dev *hdev, u8 did, u8 bsi, *skb_push(skb, 1) = info->dep_info.to | 0x10; st21nfca_im_send_pdu(info, skb); - - kfree_skb(skb); } #define ST21NFCA_CB_TYPE_READER_F 1 @@ -452,7 +441,7 @@ static void st21nfca_im_recv_atr_res_cb(void *context, struct sk_buff *skb, if (err != 0) return; - if (IS_ERR(skb)) + if (!skb) return; switch (info->async_cb_type) { @@ -484,8 +473,7 @@ static void st21nfca_im_recv_atr_res_cb(void *context, struct sk_buff *skb, ST21NFCA_PP2LRI(atr_res->ppi)); break; default: - if (err == 0) - kfree_skb(skb); + kfree_skb(skb); break; } } @@ -522,7 +510,7 @@ int st21nfca_im_send_atr_req(struct nfc_hci_dev *hdev, u8 *gb, size_t gb_len) memset(atr_req->nfcid3, 0, NFC_NFCID3_MAXSIZE); target = hdev->ndev->targets; - if (target->sensf_res) + if (target->sensf_res_len > 0) memcpy(atr_req->nfcid3, target->sensf_res, target->sensf_res_len); else @@ -565,7 +553,7 @@ static void st21nfca_im_recv_dep_res_cb(void *context, struct sk_buff *skb, if (err != 0) return; - if (IS_ERR(skb)) + if (!skb) return; switch (info->async_cb_type) { @@ -615,8 +603,7 @@ static void st21nfca_im_recv_dep_res_cb(void *context, struct sk_buff *skb, } exit: - if (err == 0) - kfree_skb(skb); + kfree_skb(skb); } int st21nfca_im_send_dep_req(struct nfc_hci_dev *hdev, struct sk_buff *skb) diff --git a/drivers/nfc/st21nfcb/i2c.c b/drivers/nfc/st21nfcb/i2c.c index 8af880ead5db3f1b558aaa1ff28ac38c1b1e078c..c5d2427a3db276064c9669722b2039e5ddf90e3f 100644 --- a/drivers/nfc/st21nfcb/i2c.c +++ b/drivers/nfc/st21nfcb/i2c.c @@ -17,24 +17,16 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include #include #include #include #include #include -#include #include #include #include -#include -#include #include -#include -#include -#include - #include "ndlc.h" #define DRIVER_DESC "NCI NFC driver for ST21NFCB" @@ -63,12 +55,6 @@ struct st21nfcb_i2c_phy { unsigned int irq_polarity; int powered; - - /* - * < 0 if hardware error occured (e.g. i2c err) - * and prevents normal operation. - */ - int hard_fault; }; #define I2C_DUMP_SKB(info, skb) \ @@ -122,8 +108,8 @@ static int st21nfcb_nci_i2c_write(void *phy_id, struct sk_buff *skb) I2C_DUMP_SKB("st21nfcb_nci_i2c_write", skb); - if (phy->hard_fault != 0) - return phy->hard_fault; + if (phy->ndlc->hard_fault != 0) + return phy->ndlc->hard_fault; r = i2c_master_send(client, skb->data, skb->len); if (r == -EREMOTEIO) { /* Retry, chip was in standby */ @@ -168,11 +154,11 @@ static int st21nfcb_nci_i2c_read(struct st21nfcb_i2c_phy *phy, if (r == -EREMOTEIO) { /* Retry, chip was in standby */ usleep_range(1000, 4000); r = i2c_master_recv(client, buf, ST21NFCB_NCI_I2C_MIN_SIZE); - } else if (r != ST21NFCB_NCI_I2C_MIN_SIZE) { - nfc_err(&client->dev, "cannot read ndlc & nci header\n"); - return -EREMOTEIO; } + if (r != ST21NFCB_NCI_I2C_MIN_SIZE) + return -EREMOTEIO; + len = be16_to_cpu(*(__be16 *) (buf + 2)); if (len > ST21NFCB_NCI_I2C_MAX_SIZE) { nfc_err(&client->dev, "invalid frame len\n"); @@ -224,7 +210,7 @@ static irqreturn_t st21nfcb_nci_irq_thread_fn(int irq, void *phy_id) client = phy->i2c_dev; dev_dbg(&client->dev, "IRQ\n"); - if (phy->hard_fault) + if (phy->ndlc->hard_fault) return IRQ_HANDLED; if (!phy->powered) { @@ -233,13 +219,8 @@ static irqreturn_t st21nfcb_nci_irq_thread_fn(int irq, void *phy_id) } r = st21nfcb_nci_i2c_read(phy, &skb); - if (r == -EREMOTEIO) { - phy->hard_fault = r; - ndlc_recv(phy->ndlc, NULL); - return IRQ_HANDLED; - } else if (r == -ENOMEM || r == -EBADMSG) { + if (r == -EREMOTEIO || r == -ENOMEM || r == -EBADMSG) return IRQ_HANDLED; - } ndlc_recv(phy->ndlc, skb); @@ -273,25 +254,18 @@ static int st21nfcb_nci_i2c_of_request_resources(struct i2c_client *client) } /* GPIO request and configuration */ - r = devm_gpio_request(&client->dev, gpio, "clf_reset"); + r = devm_gpio_request_one(&client->dev, gpio, + GPIOF_OUT_INIT_HIGH, "clf_reset"); if (r) { nfc_err(&client->dev, "Failed to request reset pin\n"); return -ENODEV; } - - r = gpio_direction_output(gpio, 1); - if (r) { - nfc_err(&client->dev, - "Failed to set reset pin direction as output\n"); - return -ENODEV; - } phy->gpio_reset = gpio; /* IRQ */ r = irq_of_parse_and_map(pp, 0); if (r < 0) { - nfc_err(&client->dev, - "Unable to get irq, error: %d\n", r); + nfc_err(&client->dev, "Unable to get irq, error: %d\n", r); return r; } @@ -325,32 +299,20 @@ static int st21nfcb_nci_i2c_request_resources(struct i2c_client *client) phy->gpio_reset = pdata->gpio_reset; phy->irq_polarity = pdata->irq_polarity; - r = devm_gpio_request(&client->dev, phy->gpio_irq, "wake_up"); + r = devm_gpio_request_one(&client->dev, phy->gpio_irq, + GPIOF_IN, "clf_irq"); if (r) { pr_err("%s : gpio_request failed\n", __FILE__); return -ENODEV; } - r = gpio_direction_input(phy->gpio_irq); - if (r) { - pr_err("%s : gpio_direction_input failed\n", __FILE__); - return -ENODEV; - } - - r = devm_gpio_request(&client->dev, - phy->gpio_reset, "clf_reset"); + r = devm_gpio_request_one(&client->dev, + phy->gpio_reset, GPIOF_OUT_INIT_HIGH, "clf_reset"); if (r) { pr_err("%s : reset gpio_request failed\n", __FILE__); return -ENODEV; } - r = gpio_direction_output(phy->gpio_reset, 1); - if (r) { - pr_err("%s : reset gpio_direction_output failed\n", - __FILE__); - return -ENODEV; - } - /* IRQ */ irq = gpio_to_irq(phy->gpio_irq); if (irq < 0) { @@ -448,7 +410,6 @@ static struct i2c_driver st21nfcb_nci_i2c_driver = { .driver = { .owner = THIS_MODULE, .name = ST21NFCB_NCI_I2C_DRIVER_NAME, - .owner = THIS_MODULE, .of_match_table = of_match_ptr(of_st21nfcb_i2c_match), }, .probe = st21nfcb_nci_i2c_probe, diff --git a/drivers/nfc/st21nfcb/ndlc.c b/drivers/nfc/st21nfcb/ndlc.c index 83c97c36112bc7bb9e42d952fb18b8e42d8fed82..e7bff8921d1143876246c90955649181af573dce 100644 --- a/drivers/nfc/st21nfcb/ndlc.c +++ b/drivers/nfc/st21nfcb/ndlc.c @@ -112,6 +112,10 @@ static void llt_ndlc_send_queue(struct llt_ndlc *ndlc) ndlc->t1_active = true; mod_timer(&ndlc->t1_timer, time_sent + msecs_to_jiffies(NDLC_TIMER_T1)); + /* start timer t2 for chip availability */ + ndlc->t2_active = true; + mod_timer(&ndlc->t2_timer, time_sent + + msecs_to_jiffies(NDLC_TIMER_T2)); } } @@ -207,7 +211,7 @@ static void llt_ndlc_sm_work(struct work_struct *work) ndlc->t2_active = false; ndlc->t1_active = false; del_timer_sync(&ndlc->t1_timer); - + del_timer_sync(&ndlc->t2_timer); ndlc_close(ndlc); ndlc->hard_fault = -EREMOTEIO; } diff --git a/drivers/nfc/st21nfcb/ndlc.h b/drivers/nfc/st21nfcb/ndlc.h index c30a2f0faa5fde80ce1cf6e3032275069d554c9e..b28140e0cd78865fa5635ea3394e7f5fc2695858 100644 --- a/drivers/nfc/st21nfcb/ndlc.h +++ b/drivers/nfc/st21nfcb/ndlc.h @@ -42,6 +42,10 @@ struct llt_ndlc { struct device *dev; + /* + * < 0 if hardware error occured + * and prevents normal operation. + */ int hard_fault; }; diff --git a/drivers/nfc/st21nfcb/st21nfcb.c b/drivers/nfc/st21nfcb/st21nfcb.c index 4d95863e30639f7e63c41cdcc066665eef988e65..ea63d5877831bf40b3dafbc059ef6651bf3c9a28 100644 --- a/drivers/nfc/st21nfcb/st21nfcb.c +++ b/drivers/nfc/st21nfcb/st21nfcb.c @@ -22,10 +22,11 @@ #include #include "st21nfcb.h" -#include "ndlc.h" #define DRIVER_DESC "NCI NFC driver for ST21NFCB" +#define ST21NFCB_NCI1_X_PROPRIETARY_ISO15693 0x83 + static int st21nfcb_nci_open(struct nci_dev *ndev) { struct st21nfcb_nci_info *info = nci_get_drvdata(ndev); @@ -65,10 +66,18 @@ static int st21nfcb_nci_send(struct nci_dev *ndev, struct sk_buff *skb) return ndlc_send(info->ndlc, skb); } +static __u32 st21nfcb_nci_get_rfprotocol(struct nci_dev *ndev, + __u8 rf_protocol) +{ + return rf_protocol == ST21NFCB_NCI1_X_PROPRIETARY_ISO15693 ? + NFC_PROTO_ISO15693_MASK : 0; +} + static struct nci_ops st21nfcb_nci_ops = { .open = st21nfcb_nci_open, .close = st21nfcb_nci_close, .send = st21nfcb_nci_send, + .get_rfprotocol = st21nfcb_nci_get_rfprotocol, }; int st21nfcb_nci_probe(struct llt_ndlc *ndlc, int phy_headroom, @@ -88,29 +97,25 @@ int st21nfcb_nci_probe(struct llt_ndlc *ndlc, int phy_headroom, | NFC_PROTO_FELICA_MASK | NFC_PROTO_ISO14443_MASK | NFC_PROTO_ISO14443_B_MASK + | NFC_PROTO_ISO15693_MASK | NFC_PROTO_NFC_DEP_MASK; ndlc->ndev = nci_allocate_device(&st21nfcb_nci_ops, protocols, phy_headroom, phy_tailroom); if (!ndlc->ndev) { pr_err("Cannot allocate nfc ndev\n"); - r = -ENOMEM; - goto err_alloc_ndev; + return -ENOMEM; } info->ndlc = ndlc; nci_set_drvdata(ndlc->ndev, info); r = nci_register_device(ndlc->ndev); - if (r) - goto err_regdev; - - return r; -err_regdev: - nci_free_device(ndlc->ndev); + if (r) { + pr_err("Cannot register nfc device to nci core\n"); + nci_free_device(ndlc->ndev); + } -err_alloc_ndev: - kfree(info); return r; } EXPORT_SYMBOL_GPL(st21nfcb_nci_probe); diff --git a/drivers/nfc/st21nfcb/st21nfcb.h b/drivers/nfc/st21nfcb/st21nfcb.h index 4bbbebb9f34d50136d32e4a3706acb0648195229..ea58a56ad794b792865aaaa59f37d801e8c83bb1 100644 --- a/drivers/nfc/st21nfcb/st21nfcb.h +++ b/drivers/nfc/st21nfcb/st21nfcb.h @@ -19,8 +19,6 @@ #ifndef __LOCAL_ST21NFCB_H_ #define __LOCAL_ST21NFCB_H_ -#include - #include "ndlc.h" /* Define private flags: */ diff --git a/drivers/nfc/trf7970a.c b/drivers/nfc/trf7970a.c index 3b78b031e617df51e98101a231519a000cd159ed..d2ccd289064739a17c9463e4698eab84ce716b64 100644 --- a/drivers/nfc/trf7970a.c +++ b/drivers/nfc/trf7970a.c @@ -36,7 +36,13 @@ * The trf7970a is very timing sensitive and the VIN, EN2, and EN * pins must asserted in that order and with specific delays in between. * The delays used in the driver were provided by TI and have been - * confirmed to work with this driver. + * confirmed to work with this driver. There is a bug with the current + * version of the trf7970a that requires that EN2 remain low no matter + * what. If it goes high, it will generate an RF field even when in + * passive target mode. TI has indicated that the chip will work okay + * when EN2 is left low. The 'en2-rf-quirk' device tree property + * indicates that trf7970a currently being used has the erratum and + * that EN2 must be kept low. * * Timeouts are implemented using the delayed workqueue kernel facility. * Timeouts are required so things don't hang when there is no response @@ -56,7 +62,7 @@ * way to abort a command that's already been sent to the tag is so turn * off power to the tag. If we do that, though, we'd have to go through * the entire anticollision procedure again but the digital layer doesn't - * support that. So, if an abort is received before trf7970a_in_send_cmd() + * support that. So, if an abort is received before trf7970a_send_cmd() * has sent the command to the tag, it simply returns -ECANCELED. If the * command has already been sent to the tag, then the driver continues * normally and recieves the response data (or error) but just before @@ -77,6 +83,13 @@ * been received and there isn't an error). The delay is 20 ms since delays * of ~16 ms have been observed during testing. * + * When transmitting a frame larger than the FIFO size (127 bytes), the + * driver will wait 20 ms for the FIFO to drain past the low-watermark + * and generate an interrupt. The low-watermark set to 32 bytes so the + * interrupt should fire after 127 - 32 = 95 bytes have been sent. At + * the lowest possible bit rate (6.62 kbps for 15693), it will take up + * to ~14.35 ms so 20 ms is used for the timeout. + * * Type 2 write and sector select commands respond with a 4-bit ACK or NACK. * Having only 4 bits in the FIFO won't normally generate an interrupt so * driver enables the '4_bit_RX' bit of the Special Functions register 1 @@ -99,40 +112,43 @@ * Note under Table 1-1 in section 1.6 of * http://www.ti.com/lit/ug/scbu011/scbu011.pdf, that wait should be at least * 10 ms for TI Tag-it HF-I tags; however testing has shown that is not long - * enough. For this reason, the driver waits 20 ms which seems to work + * enough so 20 ms is used. So the timer is set to 40 ms - 20 ms to drain + * up to 127 bytes in the FIFO at the lowest bit rate plus another 20 ms to + * ensure the wait is long enough before sending the EOF. This seems to work * reliably. */ #define TRF7970A_SUPPORTED_PROTOCOLS \ (NFC_PROTO_MIFARE_MASK | NFC_PROTO_ISO14443_MASK | \ NFC_PROTO_ISO14443_B_MASK | NFC_PROTO_FELICA_MASK | \ - NFC_PROTO_ISO15693_MASK) + NFC_PROTO_ISO15693_MASK | NFC_PROTO_NFC_DEP_MASK) #define TRF7970A_AUTOSUSPEND_DELAY 30000 /* 30 seconds */ -/* TX data must be prefixed with a FIFO reset cmd, a cmd that depends - * on what the current framing is, the address of the TX length byte 1 - * register (0x1d), and the 2 byte length of the data to be transmitted. - * That totals 5 bytes. - */ -#define TRF7970A_TX_SKB_HEADROOM 5 - #define TRF7970A_RX_SKB_ALLOC_SIZE 256 -#define TRF7970A_FIFO_SIZE 128 +#define TRF7970A_FIFO_SIZE 127 /* TX length is 3 nibbles long ==> 4KB - 1 bytes max */ #define TRF7970A_TX_MAX (4096 - 1) +#define TRF7970A_WAIT_FOR_TX_IRQ 20 #define TRF7970A_WAIT_FOR_RX_DATA_TIMEOUT 20 -#define TRF7970A_WAIT_FOR_FIFO_DRAIN_TIMEOUT 3 -#define TRF7970A_WAIT_TO_ISSUE_ISO15693_EOF 20 +#define TRF7970A_WAIT_FOR_FIFO_DRAIN_TIMEOUT 20 +#define TRF7970A_WAIT_TO_ISSUE_ISO15693_EOF 40 + +/* Guard times for various RF technologies (in us) */ +#define TRF7970A_GUARD_TIME_NFCA 5000 +#define TRF7970A_GUARD_TIME_NFCB 5000 +#define TRF7970A_GUARD_TIME_NFCF 20000 +#define TRF7970A_GUARD_TIME_15693 1000 /* Quirks */ /* Erratum: When reading IRQ Status register on trf7970a, we must issue a * read continuous command for IRQ Status and Collision Position registers. */ -#define TRF7970A_QUIRK_IRQ_STATUS_READ_ERRATA BIT(0) +#define TRF7970A_QUIRK_IRQ_STATUS_READ BIT(0) +#define TRF7970A_QUIRK_EN2_MUST_STAY_LOW BIT(1) /* Direct commands */ #define TRF7970A_CMD_IDLE 0x00 @@ -149,8 +165,8 @@ #define TRF7970A_CMD_CLOSE_SLOT 0x15 #define TRF7970A_CMD_BLOCK_RX 0x16 #define TRF7970A_CMD_ENABLE_RX 0x17 -#define TRF7970A_CMD_TEST_EXT_RF 0x18 -#define TRF7970A_CMD_TEST_INT_RF 0x19 +#define TRF7970A_CMD_TEST_INT_RF 0x18 +#define TRF7970A_CMD_TEST_EXT_RF 0x19 #define TRF7970A_CMD_RX_GAIN_ADJUST 0x1a /* Bits determining whether its a direct command or register R/W, @@ -224,6 +240,15 @@ #define TRF7970A_ISO_CTRL_14443B_848 0x0f #define TRF7970A_ISO_CTRL_FELICA_212 0x1a #define TRF7970A_ISO_CTRL_FELICA_424 0x1b +#define TRF7970A_ISO_CTRL_NFC_NFCA_106 0x01 +#define TRF7970A_ISO_CTRL_NFC_NFCF_212 0x02 +#define TRF7970A_ISO_CTRL_NFC_NFCF_424 0x03 +#define TRF7970A_ISO_CTRL_NFC_CE_14443A 0x00 +#define TRF7970A_ISO_CTRL_NFC_CE_14443B 0x01 +#define TRF7970A_ISO_CTRL_NFC_CE BIT(2) +#define TRF7970A_ISO_CTRL_NFC_ACTIVE BIT(3) +#define TRF7970A_ISO_CTRL_NFC_INITIATOR BIT(4) +#define TRF7970A_ISO_CTRL_NFC_NFC_CE_MODE BIT(5) #define TRF7970A_ISO_CTRL_RFID BIT(5) #define TRF7970A_ISO_CTRL_DIR_MODE BIT(6) #define TRF7970A_ISO_CTRL_RX_CRC_N BIT(7) /* true == No CRC */ @@ -249,12 +274,32 @@ #define TRF7970A_MODULATOR_EN_OOK BIT(6) #define TRF7970A_MODULATOR_27MHZ BIT(7) +#define TRF7970A_RX_SPECIAL_SETTINGS_NO_LIM BIT(0) +#define TRF7970A_RX_SPECIAL_SETTINGS_AGCR BIT(1) +#define TRF7970A_RX_SPECIAL_SETTINGS_GD_0DB (0x0 << 2) +#define TRF7970A_RX_SPECIAL_SETTINGS_GD_5DB (0x1 << 2) +#define TRF7970A_RX_SPECIAL_SETTINGS_GD_10DB (0x2 << 2) +#define TRF7970A_RX_SPECIAL_SETTINGS_GD_15DB (0x3 << 2) +#define TRF7970A_RX_SPECIAL_SETTINGS_HBT BIT(4) +#define TRF7970A_RX_SPECIAL_SETTINGS_M848 BIT(5) +#define TRF7970A_RX_SPECIAL_SETTINGS_C424 BIT(6) +#define TRF7970A_RX_SPECIAL_SETTINGS_C212 BIT(7) + +#define TRF7970A_REG_IO_CTRL_VRS(v) ((v) & 0x07) +#define TRF7970A_REG_IO_CTRL_IO_LOW BIT(5) +#define TRF7970A_REG_IO_CTRL_EN_EXT_PA BIT(6) +#define TRF7970A_REG_IO_CTRL_AUTO_REG BIT(7) + /* IRQ Status Register Bits */ #define TRF7970A_IRQ_STATUS_NORESP BIT(0) /* ISO15693 only */ +#define TRF7970A_IRQ_STATUS_NFC_COL_ERROR BIT(0) #define TRF7970A_IRQ_STATUS_COL BIT(1) #define TRF7970A_IRQ_STATUS_FRAMING_EOF_ERROR BIT(2) +#define TRF7970A_IRQ_STATUS_NFC_RF BIT(2) #define TRF7970A_IRQ_STATUS_PARITY_ERROR BIT(3) +#define TRF7970A_IRQ_STATUS_NFC_SDD BIT(3) #define TRF7970A_IRQ_STATUS_CRC_ERROR BIT(4) +#define TRF7970A_IRQ_STATUS_NFC_PROTO_ERROR BIT(4) #define TRF7970A_IRQ_STATUS_FIFO BIT(5) #define TRF7970A_IRQ_STATUS_SRX BIT(6) #define TRF7970A_IRQ_STATUS_TX BIT(7) @@ -265,6 +310,10 @@ TRF7970A_IRQ_STATUS_PARITY_ERROR | \ TRF7970A_IRQ_STATUS_CRC_ERROR) +#define TRF7970A_RSSI_OSC_STATUS_RSSI_MASK (BIT(2) | BIT(1) | BIT(0)) +#define TRF7970A_RSSI_OSC_STATUS_RSSI_X_MASK (BIT(5) | BIT(4) | BIT(3)) +#define TRF7970A_RSSI_OSC_STATUS_RSSI_OSC_OK BIT(6) + #define TRF7970A_SPECIAL_FCN_REG1_COL_7_6 BIT(0) #define TRF7970A_SPECIAL_FCN_REG1_14_ANTICOLL BIT(1) #define TRF7970A_SPECIAL_FCN_REG1_4_BIT_RX BIT(2) @@ -281,6 +330,49 @@ #define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLL_16 0x2 #define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLL_32 0x3 +#define TRF7970A_NFC_LOW_FIELD_LEVEL_RFDET(v) ((v) & 0x07) +#define TRF7970A_NFC_LOW_FIELD_LEVEL_CLEX_DIS BIT(7) + +#define TRF7970A_NFC_TARGET_LEVEL_RFDET(v) ((v) & 0x07) +#define TRF7970A_NFC_TARGET_LEVEL_HI_RF BIT(3) +#define TRF7970A_NFC_TARGET_LEVEL_SDD_EN BIT(3) +#define TRF7970A_NFC_TARGET_LEVEL_LD_S_4BYTES (0x0 << 6) +#define TRF7970A_NFC_TARGET_LEVEL_LD_S_7BYTES (0x1 << 6) +#define TRF7970A_NFC_TARGET_LEVEL_LD_S_10BYTES (0x2 << 6) + +#define TRF79070A_NFC_TARGET_PROTOCOL_NFCBR_106 BIT(0) +#define TRF79070A_NFC_TARGET_PROTOCOL_NFCBR_212 BIT(1) +#define TRF79070A_NFC_TARGET_PROTOCOL_NFCBR_424 (BIT(0) | BIT(1)) +#define TRF79070A_NFC_TARGET_PROTOCOL_PAS_14443B BIT(2) +#define TRF79070A_NFC_TARGET_PROTOCOL_PAS_106 BIT(3) +#define TRF79070A_NFC_TARGET_PROTOCOL_FELICA BIT(4) +#define TRF79070A_NFC_TARGET_PROTOCOL_RF_L BIT(6) +#define TRF79070A_NFC_TARGET_PROTOCOL_RF_H BIT(7) + +#define TRF79070A_NFC_TARGET_PROTOCOL_106A \ + (TRF79070A_NFC_TARGET_PROTOCOL_RF_H | \ + TRF79070A_NFC_TARGET_PROTOCOL_RF_L | \ + TRF79070A_NFC_TARGET_PROTOCOL_PAS_106 | \ + TRF79070A_NFC_TARGET_PROTOCOL_NFCBR_106) + +#define TRF79070A_NFC_TARGET_PROTOCOL_106B \ + (TRF79070A_NFC_TARGET_PROTOCOL_RF_H | \ + TRF79070A_NFC_TARGET_PROTOCOL_RF_L | \ + TRF79070A_NFC_TARGET_PROTOCOL_PAS_14443B | \ + TRF79070A_NFC_TARGET_PROTOCOL_NFCBR_106) + +#define TRF79070A_NFC_TARGET_PROTOCOL_212F \ + (TRF79070A_NFC_TARGET_PROTOCOL_RF_H | \ + TRF79070A_NFC_TARGET_PROTOCOL_RF_L | \ + TRF79070A_NFC_TARGET_PROTOCOL_FELICA | \ + TRF79070A_NFC_TARGET_PROTOCOL_NFCBR_212) + +#define TRF79070A_NFC_TARGET_PROTOCOL_424F \ + (TRF79070A_NFC_TARGET_PROTOCOL_RF_H | \ + TRF79070A_NFC_TARGET_PROTOCOL_RF_L | \ + TRF79070A_NFC_TARGET_PROTOCOL_FELICA | \ + TRF79070A_NFC_TARGET_PROTOCOL_NFCBR_424) + #define TRF7970A_FIFO_STATUS_OVERFLOW BIT(7) /* NFC (ISO/IEC 14443A) Type 2 Tag commands */ @@ -317,13 +409,16 @@ (ISO15693_REQ_FLAG_SUB_CARRIER | ISO15693_REQ_FLAG_DATA_RATE) enum trf7970a_state { - TRF7970A_ST_OFF, + TRF7970A_ST_PWR_OFF, + TRF7970A_ST_RF_OFF, TRF7970A_ST_IDLE, TRF7970A_ST_IDLE_RX_BLOCKED, TRF7970A_ST_WAIT_FOR_TX_FIFO, TRF7970A_ST_WAIT_FOR_RX_DATA, TRF7970A_ST_WAIT_FOR_RX_DATA_CONT, TRF7970A_ST_WAIT_TO_ISSUE_EOF, + TRF7970A_ST_LISTENING, + TRF7970A_ST_LISTENING_MD, TRF7970A_ST_MAX }; @@ -334,6 +429,7 @@ struct trf7970a { struct regulator *regulator; struct nfc_digital_dev *ddev; u32 quirks; + bool is_initiator; bool aborting; struct sk_buff *tx_skb; struct sk_buff *rx_skb; @@ -344,8 +440,10 @@ struct trf7970a { u8 iso_ctrl_tech; u8 modulator_sys_clk_ctrl; u8 special_fcn_reg1; + unsigned int guard_time; int technology; int framing; + u8 md_rf_tech; u8 tx_cmd; bool issue_eof; int en2_gpio; @@ -386,15 +484,28 @@ static int trf7970a_read(struct trf7970a *trf, u8 reg, u8 *val) return ret; } -static int trf7970a_read_cont(struct trf7970a *trf, u8 reg, - u8 *buf, size_t len) +static int trf7970a_read_cont(struct trf7970a *trf, u8 reg, u8 *buf, size_t len) { u8 addr = reg | TRF7970A_CMD_BIT_RW | TRF7970A_CMD_BIT_CONTINUOUS; + struct spi_transfer t[2]; + struct spi_message m; int ret; dev_dbg(trf->dev, "read_cont(0x%x, %zd)\n", addr, len); - ret = spi_write_then_read(trf->spi, &addr, 1, buf, len); + spi_message_init(&m); + + memset(&t, 0, sizeof(t)); + + t[0].tx_buf = &addr; + t[0].len = sizeof(addr); + spi_message_add_tail(&t[0], &m); + + t[1].rx_buf = buf; + t[1].len = len; + spi_message_add_tail(&t[1], &m); + + ret = spi_sync(trf->spi, &m); if (ret) dev_err(trf->dev, "%s - addr: 0x%x, ret: %d\n", __func__, addr, ret); @@ -424,7 +535,7 @@ static int trf7970a_read_irqstatus(struct trf7970a *trf, u8 *status) addr = TRF7970A_IRQ_STATUS | TRF7970A_CMD_BIT_RW; - if (trf->quirks & TRF7970A_QUIRK_IRQ_STATUS_READ_ERRATA) { + if (trf->quirks & TRF7970A_QUIRK_IRQ_STATUS_READ) { addr |= TRF7970A_CMD_BIT_CONTINUOUS; ret = spi_write_then_read(trf->spi, &addr, 1, buf, 2); } else { @@ -440,10 +551,60 @@ static int trf7970a_read_irqstatus(struct trf7970a *trf, u8 *status) return ret; } -static void trf7970a_send_upstream(struct trf7970a *trf) +static int trf7970a_read_target_proto(struct trf7970a *trf, u8 *target_proto) { - u8 rssi; + int ret; + u8 buf[2]; + u8 addr; + addr = TRF79070A_NFC_TARGET_PROTOCOL | TRF7970A_CMD_BIT_RW | + TRF7970A_CMD_BIT_CONTINUOUS; + + ret = spi_write_then_read(trf->spi, &addr, 1, buf, 2); + if (ret) + dev_err(trf->dev, "%s - target_proto: Read failed: %d\n", + __func__, ret); + else + *target_proto = buf[0]; + + return ret; +} + +static int trf7970a_mode_detect(struct trf7970a *trf, u8 *rf_tech) +{ + int ret; + u8 target_proto, tech; + + ret = trf7970a_read_target_proto(trf, &target_proto); + if (ret) + return ret; + + switch (target_proto) { + case TRF79070A_NFC_TARGET_PROTOCOL_106A: + tech = NFC_DIGITAL_RF_TECH_106A; + break; + case TRF79070A_NFC_TARGET_PROTOCOL_106B: + tech = NFC_DIGITAL_RF_TECH_106B; + break; + case TRF79070A_NFC_TARGET_PROTOCOL_212F: + tech = NFC_DIGITAL_RF_TECH_212F; + break; + case TRF79070A_NFC_TARGET_PROTOCOL_424F: + tech = NFC_DIGITAL_RF_TECH_424F; + break; + default: + dev_dbg(trf->dev, "%s - mode_detect: target_proto: 0x%x\n", + __func__, target_proto); + return -EIO; + } + + *rf_tech = tech; + + return ret; +} + +static void trf7970a_send_upstream(struct trf7970a *trf) +{ dev_kfree_skb_any(trf->tx_skb); trf->tx_skb = NULL; @@ -452,13 +613,6 @@ static void trf7970a_send_upstream(struct trf7970a *trf) 16, 1, trf->rx_skb->data, trf->rx_skb->len, false); - /* According to the manual it is "good form" to reset the fifo and - * read the RSSI levels & oscillator status register here. It doesn't - * explain why. - */ - trf7970a_cmd(trf, TRF7970A_CMD_FIFO_RESET); - trf7970a_read(trf, TRF7970A_RSSI_OSC_STATUS, &rssi); - trf->state = TRF7970A_ST_IDLE; if (trf->aborting) { @@ -481,6 +635,8 @@ static void trf7970a_send_err_upstream(struct trf7970a *trf, int errno) { dev_dbg(trf->dev, "Error - state: %d, errno: %d\n", trf->state, errno); + cancel_delayed_work(&trf->timeout_work); + kfree_skb(trf->rx_skb); trf->rx_skb = ERR_PTR(errno); @@ -488,15 +644,29 @@ static void trf7970a_send_err_upstream(struct trf7970a *trf, int errno) } static int trf7970a_transmit(struct trf7970a *trf, struct sk_buff *skb, - unsigned int len) + unsigned int len, u8 *prefix, unsigned int prefix_len) { + struct spi_transfer t[2]; + struct spi_message m; unsigned int timeout; int ret; print_hex_dump_debug("trf7970a tx data: ", DUMP_PREFIX_NONE, 16, 1, skb->data, len, false); - ret = spi_write(trf->spi, skb->data, len); + spi_message_init(&m); + + memset(&t, 0, sizeof(t)); + + t[0].tx_buf = prefix; + t[0].len = prefix_len; + spi_message_add_tail(&t[0], &m); + + t[1].tx_buf = skb->data; + t[1].len = len; + spi_message_add_tail(&t[1], &m); + + ret = spi_sync(trf->spi, &m); if (ret) { dev_err(trf->dev, "%s - Can't send tx data: %d\n", __func__, ret); @@ -514,7 +684,11 @@ static int trf7970a_transmit(struct trf7970a *trf, struct sk_buff *skb, timeout = TRF7970A_WAIT_TO_ISSUE_ISO15693_EOF; } else { trf->state = TRF7970A_ST_WAIT_FOR_RX_DATA; - timeout = trf->timeout; + + if (!trf->timeout) + timeout = TRF7970A_WAIT_FOR_TX_IRQ; + else + timeout = trf->timeout; } } @@ -532,6 +706,7 @@ static void trf7970a_fill_fifo(struct trf7970a *trf) unsigned int len; int ret; u8 fifo_bytes; + u8 prefix; ret = trf7970a_read(trf, TRF7970A_FIFO_STATUS, &fifo_bytes); if (ret) { @@ -541,18 +716,21 @@ static void trf7970a_fill_fifo(struct trf7970a *trf) dev_dbg(trf->dev, "Filling FIFO - fifo_bytes: 0x%x\n", fifo_bytes); - if (fifo_bytes & TRF7970A_FIFO_STATUS_OVERFLOW) { - dev_err(trf->dev, "%s - fifo overflow: 0x%x\n", __func__, - fifo_bytes); - trf7970a_send_err_upstream(trf, -EIO); - return; - } + fifo_bytes &= ~TRF7970A_FIFO_STATUS_OVERFLOW; /* Calculate how much more data can be written to the fifo */ len = TRF7970A_FIFO_SIZE - fifo_bytes; + if (!len) { + schedule_delayed_work(&trf->timeout_work, + msecs_to_jiffies(TRF7970A_WAIT_FOR_FIFO_DRAIN_TIMEOUT)); + return; + } + len = min(skb->len, len); - ret = trf7970a_transmit(trf, skb, len); + prefix = TRF7970A_CMD_BIT_CONTINUOUS | TRF7970A_FIFO_IO_REGISTER; + + ret = trf7970a_transmit(trf, skb, len, &prefix, sizeof(prefix)); if (ret) trf7970a_send_err_upstream(trf, ret); } @@ -576,16 +754,11 @@ static void trf7970a_drain_fifo(struct trf7970a *trf, u8 status) dev_dbg(trf->dev, "Draining FIFO - fifo_bytes: 0x%x\n", fifo_bytes); + fifo_bytes &= ~TRF7970A_FIFO_STATUS_OVERFLOW; + if (!fifo_bytes) goto no_rx_data; - if (fifo_bytes & TRF7970A_FIFO_STATUS_OVERFLOW) { - dev_err(trf->dev, "%s - fifo overflow: 0x%x\n", __func__, - fifo_bytes); - trf7970a_send_err_upstream(trf, -EIO); - return; - } - if (fifo_bytes > skb_tailroom(skb)) { skb = skb_copy_expand(skb, skb_headroom(skb), max_t(int, fifo_bytes, @@ -615,6 +788,21 @@ static void trf7970a_drain_fifo(struct trf7970a *trf, u8 status) status = TRF7970A_IRQ_STATUS_SRX; } else { trf->state = TRF7970A_ST_WAIT_FOR_RX_DATA_CONT; + + ret = trf7970a_read(trf, TRF7970A_FIFO_STATUS, &fifo_bytes); + if (ret) { + trf7970a_send_err_upstream(trf, ret); + return; + } + + fifo_bytes &= ~TRF7970A_FIFO_STATUS_OVERFLOW; + + /* If there are bytes in the FIFO, set status to '0' so + * the if stmt below doesn't fire and the driver will wait + * for the trf7970a to generate another RX interrupt. + */ + if (fifo_bytes) + status = 0; } no_rx_data: @@ -634,11 +822,11 @@ static irqreturn_t trf7970a_irq(int irq, void *dev_id) { struct trf7970a *trf = dev_id; int ret; - u8 status; + u8 status, fifo_bytes, iso_ctrl; mutex_lock(&trf->lock); - if (trf->state == TRF7970A_ST_OFF) { + if (trf->state == TRF7970A_ST_RF_OFF) { mutex_unlock(&trf->lock); return IRQ_NONE; } @@ -660,12 +848,12 @@ static irqreturn_t trf7970a_irq(int irq, void *dev_id) switch (trf->state) { case TRF7970A_ST_IDLE: case TRF7970A_ST_IDLE_RX_BLOCKED: - /* If getting interrupts caused by RF noise, turn off the - * receiver to avoid unnecessary interrupts. It will be - * turned back on in trf7970a_in_send_cmd() when the next - * command is issued. + /* If initiator and getting interrupts caused by RF noise, + * turn off the receiver to avoid unnecessary interrupts. + * It will be turned back on in trf7970a_send_cmd() when + * the next command is issued. */ - if (status & TRF7970A_IRQ_STATUS_ERROR) { + if (trf->is_initiator && (status & TRF7970A_IRQ_STATUS_ERROR)) { trf7970a_cmd(trf, TRF7970A_CMD_BLOCK_RX); trf->state = TRF7970A_ST_IDLE_RX_BLOCKED; } @@ -687,8 +875,68 @@ static irqreturn_t trf7970a_irq(int irq, void *dev_id) trf->ignore_timeout = !cancel_delayed_work(&trf->timeout_work); trf7970a_drain_fifo(trf, status); - } else if (status == TRF7970A_IRQ_STATUS_TX) { + } else if (status & TRF7970A_IRQ_STATUS_FIFO) { + ret = trf7970a_read(trf, TRF7970A_FIFO_STATUS, + &fifo_bytes); + + fifo_bytes &= ~TRF7970A_FIFO_STATUS_OVERFLOW; + + if (ret) + trf7970a_send_err_upstream(trf, ret); + else if (!fifo_bytes) + trf7970a_cmd(trf, TRF7970A_CMD_FIFO_RESET); + } else if ((status == TRF7970A_IRQ_STATUS_TX) || + (!trf->is_initiator && + (status == (TRF7970A_IRQ_STATUS_TX | + TRF7970A_IRQ_STATUS_NFC_RF)))) { trf7970a_cmd(trf, TRF7970A_CMD_FIFO_RESET); + + if (!trf->timeout) { + trf->ignore_timeout = !cancel_delayed_work( + &trf->timeout_work); + trf->rx_skb = ERR_PTR(0); + trf7970a_send_upstream(trf); + break; + } + + if (trf->is_initiator) + break; + + iso_ctrl = trf->iso_ctrl; + + switch (trf->framing) { + case NFC_DIGITAL_FRAMING_NFCA_STANDARD: + trf->tx_cmd = TRF7970A_CMD_TRANSMIT_NO_CRC; + iso_ctrl |= TRF7970A_ISO_CTRL_RX_CRC_N; + trf->iso_ctrl = 0xff; /* Force ISO_CTRL write */ + break; + case NFC_DIGITAL_FRAMING_NFCA_STANDARD_WITH_CRC_A: + trf->tx_cmd = TRF7970A_CMD_TRANSMIT; + iso_ctrl &= ~TRF7970A_ISO_CTRL_RX_CRC_N; + trf->iso_ctrl = 0xff; /* Force ISO_CTRL write */ + break; + case NFC_DIGITAL_FRAMING_NFCA_ANTICOL_COMPLETE: + ret = trf7970a_write(trf, + TRF7970A_SPECIAL_FCN_REG1, + TRF7970A_SPECIAL_FCN_REG1_14_ANTICOLL); + if (ret) + goto err_unlock_exit; + + trf->special_fcn_reg1 = + TRF7970A_SPECIAL_FCN_REG1_14_ANTICOLL; + break; + default: + break; + } + + if (iso_ctrl != trf->iso_ctrl) { + ret = trf7970a_write(trf, TRF7970A_ISO_CTRL, + iso_ctrl); + if (ret) + goto err_unlock_exit; + + trf->iso_ctrl = iso_ctrl; + } } else { trf7970a_send_err_upstream(trf, -EIO); } @@ -697,11 +945,37 @@ static irqreturn_t trf7970a_irq(int irq, void *dev_id) if (status != TRF7970A_IRQ_STATUS_TX) trf7970a_send_err_upstream(trf, -EIO); break; + case TRF7970A_ST_LISTENING: + if (status & TRF7970A_IRQ_STATUS_SRX) { + trf->ignore_timeout = + !cancel_delayed_work(&trf->timeout_work); + trf7970a_drain_fifo(trf, status); + } else if (!(status & TRF7970A_IRQ_STATUS_NFC_RF)) { + trf7970a_send_err_upstream(trf, -EIO); + } + break; + case TRF7970A_ST_LISTENING_MD: + if (status & TRF7970A_IRQ_STATUS_SRX) { + trf->ignore_timeout = + !cancel_delayed_work(&trf->timeout_work); + + ret = trf7970a_mode_detect(trf, &trf->md_rf_tech); + if (ret) { + trf7970a_send_err_upstream(trf, ret); + } else { + trf->state = TRF7970A_ST_LISTENING; + trf7970a_drain_fifo(trf, status); + } + } else if (!(status & TRF7970A_IRQ_STATUS_NFC_RF)) { + trf7970a_send_err_upstream(trf, -EIO); + } + break; default: dev_err(trf->dev, "%s - Driver in invalid state: %d\n", __func__, trf->state); } +err_unlock_exit: mutex_unlock(&trf->lock); return IRQ_HANDLED; } @@ -742,7 +1016,7 @@ static void trf7970a_timeout_work_handler(struct work_struct *work) if (trf->ignore_timeout) trf->ignore_timeout = false; else if (trf->state == TRF7970A_ST_WAIT_FOR_RX_DATA_CONT) - trf7970a_send_upstream(trf); /* No more rx data so send up */ + trf7970a_drain_fifo(trf, TRF7970A_IRQ_STATUS_SRX); else if (trf->state == TRF7970A_ST_WAIT_TO_ISSUE_EOF) trf7970a_issue_eof(trf); else @@ -765,11 +1039,16 @@ static int trf7970a_init(struct trf7970a *trf) if (ret) goto err_out; - /* Must clear NFC Target Detection Level reg due to erratum */ - ret = trf7970a_write(trf, TRF7970A_NFC_TARGET_LEVEL, 0); + usleep_range(1000, 2000); + + trf->chip_status_ctrl &= ~TRF7970A_CHIP_STATUS_RF_ON; + + ret = trf7970a_write(trf, TRF7970A_MODULATOR_SYS_CLK_CTRL, 0); if (ret) goto err_out; + trf->modulator_sys_clk_ctrl = 0; + ret = trf7970a_write(trf, TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS, TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLH_96 | TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLL_32); @@ -792,6 +1071,10 @@ static int trf7970a_init(struct trf7970a *trf) static void trf7970a_switch_rf_off(struct trf7970a *trf) { + if ((trf->state == TRF7970A_ST_PWR_OFF) || + (trf->state == TRF7970A_ST_RF_OFF)) + return; + dev_dbg(trf->dev, "Switching rf off\n"); trf->chip_status_ctrl &= ~TRF7970A_CHIP_STATUS_RF_ON; @@ -799,24 +1082,41 @@ static void trf7970a_switch_rf_off(struct trf7970a *trf) trf7970a_write(trf, TRF7970A_CHIP_STATUS_CTRL, trf->chip_status_ctrl); trf->aborting = false; - trf->state = TRF7970A_ST_OFF; + trf->state = TRF7970A_ST_RF_OFF; pm_runtime_mark_last_busy(trf->dev); pm_runtime_put_autosuspend(trf->dev); } -static void trf7970a_switch_rf_on(struct trf7970a *trf) +static int trf7970a_switch_rf_on(struct trf7970a *trf) { + int ret; + dev_dbg(trf->dev, "Switching rf on\n"); pm_runtime_get_sync(trf->dev); + if (trf->state != TRF7970A_ST_RF_OFF) { /* Power on, RF off */ + dev_err(trf->dev, "%s - Incorrect state: %d\n", __func__, + trf->state); + return -EINVAL; + } + + ret = trf7970a_init(trf); + if (ret) { + dev_err(trf->dev, "%s - Can't initialize: %d\n", __func__, ret); + return ret; + } + trf->state = TRF7970A_ST_IDLE; + + return 0; } static int trf7970a_switch_rf(struct nfc_digital_dev *ddev, bool on) { struct trf7970a *trf = nfc_digital_get_drvdata(ddev); + int ret = 0; dev_dbg(trf->dev, "Switching RF - state: %d, on: %d\n", trf->state, on); @@ -824,8 +1124,9 @@ static int trf7970a_switch_rf(struct nfc_digital_dev *ddev, bool on) if (on) { switch (trf->state) { - case TRF7970A_ST_OFF: - trf7970a_switch_rf_on(trf); + case TRF7970A_ST_PWR_OFF: + case TRF7970A_ST_RF_OFF: + ret = trf7970a_switch_rf_on(trf); break; case TRF7970A_ST_IDLE: case TRF7970A_ST_IDLE_RX_BLOCKED: @@ -834,26 +1135,31 @@ static int trf7970a_switch_rf(struct nfc_digital_dev *ddev, bool on) dev_err(trf->dev, "%s - Invalid request: %d %d\n", __func__, trf->state, on); trf7970a_switch_rf_off(trf); + ret = -EINVAL; } } else { switch (trf->state) { - case TRF7970A_ST_OFF: + case TRF7970A_ST_PWR_OFF: + case TRF7970A_ST_RF_OFF: break; default: dev_err(trf->dev, "%s - Invalid request: %d %d\n", __func__, trf->state, on); + ret = -EINVAL; /* FALLTHROUGH */ case TRF7970A_ST_IDLE: case TRF7970A_ST_IDLE_RX_BLOCKED: + case TRF7970A_ST_WAIT_FOR_RX_DATA: + case TRF7970A_ST_WAIT_FOR_RX_DATA_CONT: trf7970a_switch_rf_off(trf); } } mutex_unlock(&trf->lock); - return 0; + return ret; } -static int trf7970a_config_rf_tech(struct trf7970a *trf, int tech) +static int trf7970a_in_config_rf_tech(struct trf7970a *trf, int tech) { int ret = 0; @@ -863,22 +1169,27 @@ static int trf7970a_config_rf_tech(struct trf7970a *trf, int tech) case NFC_DIGITAL_RF_TECH_106A: trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_14443A_106; trf->modulator_sys_clk_ctrl = TRF7970A_MODULATOR_DEPTH_OOK; + trf->guard_time = TRF7970A_GUARD_TIME_NFCA; break; case NFC_DIGITAL_RF_TECH_106B: trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_14443B_106; trf->modulator_sys_clk_ctrl = TRF7970A_MODULATOR_DEPTH_ASK10; + trf->guard_time = TRF7970A_GUARD_TIME_NFCB; break; case NFC_DIGITAL_RF_TECH_212F: trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_FELICA_212; trf->modulator_sys_clk_ctrl = TRF7970A_MODULATOR_DEPTH_ASK10; + trf->guard_time = TRF7970A_GUARD_TIME_NFCF; break; case NFC_DIGITAL_RF_TECH_424F: trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_FELICA_424; trf->modulator_sys_clk_ctrl = TRF7970A_MODULATOR_DEPTH_ASK10; + trf->guard_time = TRF7970A_GUARD_TIME_NFCF; break; case NFC_DIGITAL_RF_TECH_ISO15693: trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_15693_SGL_1OF4_2648; trf->modulator_sys_clk_ctrl = TRF7970A_MODULATOR_DEPTH_OOK; + trf->guard_time = TRF7970A_GUARD_TIME_15693; break; default: dev_dbg(trf->dev, "Unsupported rf technology: %d\n", tech); @@ -887,12 +1198,54 @@ static int trf7970a_config_rf_tech(struct trf7970a *trf, int tech) trf->technology = tech; + /* If in initiator mode and not changing the RF tech due to a + * PSL sequence (indicated by 'trf->iso_ctrl == 0xff' from + * trf7970a_init()), clear the NFC Target Detection Level register + * due to erratum. + */ + if (trf->iso_ctrl == 0xff) + ret = trf7970a_write(trf, TRF7970A_NFC_TARGET_LEVEL, 0); + return ret; } -static int trf7970a_config_framing(struct trf7970a *trf, int framing) +static int trf7970a_is_rf_field(struct trf7970a *trf, bool *is_rf_field) +{ + int ret; + u8 rssi; + + ret = trf7970a_write(trf, TRF7970A_CHIP_STATUS_CTRL, + trf->chip_status_ctrl | TRF7970A_CHIP_STATUS_REC_ON); + if (ret) + return ret; + + ret = trf7970a_cmd(trf, TRF7970A_CMD_TEST_EXT_RF); + if (ret) + return ret; + + usleep_range(50, 60); + + ret = trf7970a_read(trf, TRF7970A_RSSI_OSC_STATUS, &rssi); + if (ret) + return ret; + + ret = trf7970a_write(trf, TRF7970A_CHIP_STATUS_CTRL, + trf->chip_status_ctrl); + if (ret) + return ret; + + if (rssi & TRF7970A_RSSI_OSC_STATUS_RSSI_MASK) + *is_rf_field = true; + else + *is_rf_field = false; + + return 0; +} + +static int trf7970a_in_config_framing(struct trf7970a *trf, int framing) { u8 iso_ctrl = trf->iso_ctrl_tech; + bool is_rf_field = false; int ret; dev_dbg(trf->dev, "framing: %d\n", framing); @@ -911,6 +1264,8 @@ static int trf7970a_config_framing(struct trf7970a *trf, int framing) case NFC_DIGITAL_FRAMING_NFCF_T3T: case NFC_DIGITAL_FRAMING_ISO15693_INVENTORY: case NFC_DIGITAL_FRAMING_ISO15693_T5T: + case NFC_DIGITAL_FRAMING_NFCA_NFC_DEP: + case NFC_DIGITAL_FRAMING_NFCF_NFC_DEP: trf->tx_cmd = TRF7970A_CMD_TRANSMIT; iso_ctrl &= ~TRF7970A_ISO_CTRL_RX_CRC_N; break; @@ -925,6 +1280,15 @@ static int trf7970a_config_framing(struct trf7970a *trf, int framing) trf->framing = framing; + if (!(trf->chip_status_ctrl & TRF7970A_CHIP_STATUS_RF_ON)) { + ret = trf7970a_is_rf_field(trf, &is_rf_field); + if (ret) + return ret; + + if (is_rf_field) + return -EBUSY; + } + if (iso_ctrl != trf->iso_ctrl) { ret = trf7970a_write(trf, TRF7970A_ISO_CTRL, iso_ctrl); if (ret) @@ -947,7 +1311,7 @@ static int trf7970a_config_framing(struct trf7970a *trf, int framing) trf->chip_status_ctrl |= TRF7970A_CHIP_STATUS_RF_ON; - usleep_range(5000, 6000); + usleep_range(trf->guard_time, trf->guard_time + 1000); } return 0; @@ -963,21 +1327,28 @@ static int trf7970a_in_configure_hw(struct nfc_digital_dev *ddev, int type, mutex_lock(&trf->lock); - if (trf->state == TRF7970A_ST_OFF) - trf7970a_switch_rf_on(trf); + trf->is_initiator = true; + + if ((trf->state == TRF7970A_ST_PWR_OFF) || + (trf->state == TRF7970A_ST_RF_OFF)) { + ret = trf7970a_switch_rf_on(trf); + if (ret) + goto err_unlock; + } switch (type) { case NFC_DIGITAL_CONFIG_RF_TECH: - ret = trf7970a_config_rf_tech(trf, param); + ret = trf7970a_in_config_rf_tech(trf, param); break; case NFC_DIGITAL_CONFIG_FRAMING: - ret = trf7970a_config_framing(trf, param); + ret = trf7970a_in_config_framing(trf, param); break; default: dev_dbg(trf->dev, "Unknown type: %d\n", type); ret = -EINVAL; } +err_unlock: mutex_unlock(&trf->lock); return ret; } @@ -1067,14 +1438,15 @@ static int trf7970a_per_cmd_config(struct trf7970a *trf, struct sk_buff *skb) return 0; } -static int trf7970a_in_send_cmd(struct nfc_digital_dev *ddev, +static int trf7970a_send_cmd(struct nfc_digital_dev *ddev, struct sk_buff *skb, u16 timeout, nfc_digital_cmd_complete_t cb, void *arg) { struct trf7970a *trf = nfc_digital_get_drvdata(ddev); - char *prefix; + u8 prefix[5]; unsigned int len; int ret; + u8 status; dev_dbg(trf->dev, "New request - state: %d, timeout: %d ms, len: %d\n", trf->state, timeout, skb->len); @@ -1099,12 +1471,14 @@ static int trf7970a_in_send_cmd(struct nfc_digital_dev *ddev, goto out_err; } - trf->rx_skb = nfc_alloc_recv_skb(TRF7970A_RX_SKB_ALLOC_SIZE, - GFP_KERNEL); - if (!trf->rx_skb) { - dev_dbg(trf->dev, "Can't alloc rx_skb\n"); - ret = -ENOMEM; - goto out_err; + if (timeout) { + trf->rx_skb = nfc_alloc_recv_skb(TRF7970A_RX_SKB_ALLOC_SIZE, + GFP_KERNEL); + if (!trf->rx_skb) { + dev_dbg(trf->dev, "Can't alloc rx_skb\n"); + ret = -ENOMEM; + goto out_err; + } } if (trf->state == TRF7970A_ST_IDLE_RX_BLOCKED) { @@ -1115,9 +1489,11 @@ static int trf7970a_in_send_cmd(struct nfc_digital_dev *ddev, trf->state = TRF7970A_ST_IDLE; } - ret = trf7970a_per_cmd_config(trf, skb); - if (ret) - goto out_err; + if (trf->is_initiator) { + ret = trf7970a_per_cmd_config(trf, skb); + if (ret) + goto out_err; + } trf->ddev = ddev; trf->tx_skb = skb; @@ -1127,11 +1503,11 @@ static int trf7970a_in_send_cmd(struct nfc_digital_dev *ddev, trf->ignore_timeout = false; len = skb->len; - prefix = skb_push(skb, TRF7970A_TX_SKB_HEADROOM); /* TX data must be prefixed with a FIFO reset cmd, a cmd that depends * on what the current framing is, the address of the TX length byte 1 * register (0x1d), and the 2 byte length of the data to be transmitted. + * That totals 5 bytes. */ prefix[0] = TRF7970A_CMD_BIT_CTRL | TRF7970A_CMD_BIT_OPCODE(TRF7970A_CMD_FIFO_RESET); @@ -1150,9 +1526,12 @@ static int trf7970a_in_send_cmd(struct nfc_digital_dev *ddev, len = min_t(int, skb->len, TRF7970A_FIFO_SIZE); - usleep_range(1000, 2000); + /* Clear possible spurious interrupt */ + ret = trf7970a_read_irqstatus(trf, &status); + if (ret) + goto out_err; - ret = trf7970a_transmit(trf, skb, len); + ret = trf7970a_transmit(trf, skb, len, prefix, sizeof(prefix)); if (ret) { kfree_skb(trf->rx_skb); trf->rx_skb = NULL; @@ -1163,46 +1542,272 @@ static int trf7970a_in_send_cmd(struct nfc_digital_dev *ddev, return ret; } -static int trf7970a_tg_configure_hw(struct nfc_digital_dev *ddev, - int type, int param) +static int trf7970a_tg_config_rf_tech(struct trf7970a *trf, int tech) +{ + int ret = 0; + + dev_dbg(trf->dev, "rf technology: %d\n", tech); + + switch (tech) { + case NFC_DIGITAL_RF_TECH_106A: + trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_NFC_NFC_CE_MODE | + TRF7970A_ISO_CTRL_NFC_CE | + TRF7970A_ISO_CTRL_NFC_CE_14443A; + trf->modulator_sys_clk_ctrl = TRF7970A_MODULATOR_DEPTH_OOK; + break; + case NFC_DIGITAL_RF_TECH_212F: + trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_NFC_NFC_CE_MODE | + TRF7970A_ISO_CTRL_NFC_NFCF_212; + trf->modulator_sys_clk_ctrl = TRF7970A_MODULATOR_DEPTH_ASK10; + break; + case NFC_DIGITAL_RF_TECH_424F: + trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_NFC_NFC_CE_MODE | + TRF7970A_ISO_CTRL_NFC_NFCF_424; + trf->modulator_sys_clk_ctrl = TRF7970A_MODULATOR_DEPTH_ASK10; + break; + default: + dev_dbg(trf->dev, "Unsupported rf technology: %d\n", tech); + return -EINVAL; + } + + trf->technology = tech; + + /* Normally we write the ISO_CTRL register in + * trf7970a_tg_config_framing() because the framing can change + * the value written. However, when sending a PSL RES, + * digital_tg_send_psl_res_complete() doesn't call + * trf7970a_tg_config_framing() so we must write the register + * here. + */ + if ((trf->framing == NFC_DIGITAL_FRAMING_NFC_DEP_ACTIVATED) && + (trf->iso_ctrl_tech != trf->iso_ctrl)) { + ret = trf7970a_write(trf, TRF7970A_ISO_CTRL, + trf->iso_ctrl_tech); + + trf->iso_ctrl = trf->iso_ctrl_tech; + } + + return ret; +} + +/* Since this is a target routine, several of the framing calls are + * made between receiving the request and sending the response so they + * should take effect until after the response is sent. This is accomplished + * by skipping the ISO_CTRL register write here and doing it in the interrupt + * handler. + */ +static int trf7970a_tg_config_framing(struct trf7970a *trf, int framing) +{ + u8 iso_ctrl = trf->iso_ctrl_tech; + int ret; + + dev_dbg(trf->dev, "framing: %d\n", framing); + + switch (framing) { + case NFC_DIGITAL_FRAMING_NFCA_NFC_DEP: + trf->tx_cmd = TRF7970A_CMD_TRANSMIT_NO_CRC; + iso_ctrl |= TRF7970A_ISO_CTRL_RX_CRC_N; + break; + case NFC_DIGITAL_FRAMING_NFCA_STANDARD: + case NFC_DIGITAL_FRAMING_NFCA_STANDARD_WITH_CRC_A: + case NFC_DIGITAL_FRAMING_NFCA_ANTICOL_COMPLETE: + /* These ones are applied in the interrupt handler */ + iso_ctrl = trf->iso_ctrl; /* Don't write to ISO_CTRL yet */ + break; + case NFC_DIGITAL_FRAMING_NFCF_NFC_DEP: + trf->tx_cmd = TRF7970A_CMD_TRANSMIT; + iso_ctrl &= ~TRF7970A_ISO_CTRL_RX_CRC_N; + break; + case NFC_DIGITAL_FRAMING_NFC_DEP_ACTIVATED: + trf->tx_cmd = TRF7970A_CMD_TRANSMIT; + iso_ctrl &= ~TRF7970A_ISO_CTRL_RX_CRC_N; + break; + default: + dev_dbg(trf->dev, "Unsupported Framing: %d\n", framing); + return -EINVAL; + } + + trf->framing = framing; + + if (iso_ctrl != trf->iso_ctrl) { + ret = trf7970a_write(trf, TRF7970A_ISO_CTRL, iso_ctrl); + if (ret) + return ret; + + trf->iso_ctrl = iso_ctrl; + + ret = trf7970a_write(trf, TRF7970A_MODULATOR_SYS_CLK_CTRL, + trf->modulator_sys_clk_ctrl); + if (ret) + return ret; + } + + if (!(trf->chip_status_ctrl & TRF7970A_CHIP_STATUS_RF_ON)) { + ret = trf7970a_write(trf, TRF7970A_CHIP_STATUS_CTRL, + trf->chip_status_ctrl | + TRF7970A_CHIP_STATUS_RF_ON); + if (ret) + return ret; + + trf->chip_status_ctrl |= TRF7970A_CHIP_STATUS_RF_ON; + } + + return 0; +} + +static int trf7970a_tg_configure_hw(struct nfc_digital_dev *ddev, int type, + int param) +{ + struct trf7970a *trf = nfc_digital_get_drvdata(ddev); + int ret; + + dev_dbg(trf->dev, "Configure hw - type: %d, param: %d\n", type, param); + + mutex_lock(&trf->lock); + + trf->is_initiator = false; + + if ((trf->state == TRF7970A_ST_PWR_OFF) || + (trf->state == TRF7970A_ST_RF_OFF)) { + ret = trf7970a_switch_rf_on(trf); + if (ret) + goto err_unlock; + } + + switch (type) { + case NFC_DIGITAL_CONFIG_RF_TECH: + ret = trf7970a_tg_config_rf_tech(trf, param); + break; + case NFC_DIGITAL_CONFIG_FRAMING: + ret = trf7970a_tg_config_framing(trf, param); + break; + default: + dev_dbg(trf->dev, "Unknown type: %d\n", type); + ret = -EINVAL; + } + +err_unlock: + mutex_unlock(&trf->lock); + return ret; +} + +static int _trf7970a_tg_listen(struct nfc_digital_dev *ddev, u16 timeout, + nfc_digital_cmd_complete_t cb, void *arg, bool mode_detect) { struct trf7970a *trf = nfc_digital_get_drvdata(ddev); + int ret; + + mutex_lock(&trf->lock); - dev_dbg(trf->dev, "Unsupported interface\n"); + if ((trf->state != TRF7970A_ST_IDLE) && + (trf->state != TRF7970A_ST_IDLE_RX_BLOCKED)) { + dev_err(trf->dev, "%s - Bogus state: %d\n", __func__, + trf->state); + ret = -EIO; + goto out_err; + } - return -EINVAL; + if (trf->aborting) { + dev_dbg(trf->dev, "Abort process complete\n"); + trf->aborting = false; + ret = -ECANCELED; + goto out_err; + } + + trf->rx_skb = nfc_alloc_recv_skb(TRF7970A_RX_SKB_ALLOC_SIZE, + GFP_KERNEL); + if (!trf->rx_skb) { + dev_dbg(trf->dev, "Can't alloc rx_skb\n"); + ret = -ENOMEM; + goto out_err; + } + + ret = trf7970a_write(trf, TRF7970A_RX_SPECIAL_SETTINGS, + TRF7970A_RX_SPECIAL_SETTINGS_HBT | + TRF7970A_RX_SPECIAL_SETTINGS_M848 | + TRF7970A_RX_SPECIAL_SETTINGS_C424 | + TRF7970A_RX_SPECIAL_SETTINGS_C212); + if (ret) + goto out_err; + + ret = trf7970a_write(trf, TRF7970A_REG_IO_CTRL, + TRF7970A_REG_IO_CTRL_VRS(0x1)); + if (ret) + goto out_err; + + ret = trf7970a_write(trf, TRF7970A_NFC_LOW_FIELD_LEVEL, + TRF7970A_NFC_LOW_FIELD_LEVEL_RFDET(0x3)); + if (ret) + goto out_err; + + ret = trf7970a_write(trf, TRF7970A_NFC_TARGET_LEVEL, + TRF7970A_NFC_TARGET_LEVEL_RFDET(0x7)); + if (ret) + goto out_err; + + trf->ddev = ddev; + trf->cb = cb; + trf->cb_arg = arg; + trf->timeout = timeout; + trf->ignore_timeout = false; + + ret = trf7970a_cmd(trf, TRF7970A_CMD_ENABLE_RX); + if (ret) + goto out_err; + + trf->state = mode_detect ? TRF7970A_ST_LISTENING_MD : + TRF7970A_ST_LISTENING; + + schedule_delayed_work(&trf->timeout_work, msecs_to_jiffies(timeout)); + +out_err: + mutex_unlock(&trf->lock); + return ret; } -static int trf7970a_tg_send_cmd(struct nfc_digital_dev *ddev, - struct sk_buff *skb, u16 timeout, +static int trf7970a_tg_listen(struct nfc_digital_dev *ddev, u16 timeout, nfc_digital_cmd_complete_t cb, void *arg) { struct trf7970a *trf = nfc_digital_get_drvdata(ddev); - dev_dbg(trf->dev, "Unsupported interface\n"); + dev_dbg(trf->dev, "Listen - state: %d, timeout: %d ms\n", + trf->state, timeout); - return -EINVAL; + return _trf7970a_tg_listen(ddev, timeout, cb, arg, false); } -static int trf7970a_tg_listen(struct nfc_digital_dev *ddev, +static int trf7970a_tg_listen_md(struct nfc_digital_dev *ddev, u16 timeout, nfc_digital_cmd_complete_t cb, void *arg) { struct trf7970a *trf = nfc_digital_get_drvdata(ddev); + int ret; + + dev_dbg(trf->dev, "Listen MD - state: %d, timeout: %d ms\n", + trf->state, timeout); - dev_dbg(trf->dev, "Unsupported interface\n"); + ret = trf7970a_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH, + NFC_DIGITAL_RF_TECH_106A); + if (ret) + return ret; - return -EINVAL; + ret = trf7970a_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING, + NFC_DIGITAL_FRAMING_NFCA_NFC_DEP); + if (ret) + return ret; + + return _trf7970a_tg_listen(ddev, timeout, cb, arg, true); } -static int trf7970a_tg_listen_mdaa(struct nfc_digital_dev *ddev, - struct digital_tg_mdaa_params *mdaa_params, - u16 timeout, nfc_digital_cmd_complete_t cb, void *arg) +static int trf7970a_tg_get_rf_tech(struct nfc_digital_dev *ddev, u8 *rf_tech) { struct trf7970a *trf = nfc_digital_get_drvdata(ddev); - dev_dbg(trf->dev, "Unsupported interface\n"); + dev_dbg(trf->dev, "Get RF Tech - state: %d, rf_tech: %d\n", + trf->state, trf->md_rf_tech); - return -EINVAL; + *rf_tech = trf->md_rf_tech; + + return 0; } static void trf7970a_abort_cmd(struct nfc_digital_dev *ddev) @@ -1220,6 +1825,11 @@ static void trf7970a_abort_cmd(struct nfc_digital_dev *ddev) case TRF7970A_ST_WAIT_TO_ISSUE_EOF: trf->aborting = true; break; + case TRF7970A_ST_LISTENING: + trf->ignore_timeout = !cancel_delayed_work(&trf->timeout_work); + trf7970a_send_err_upstream(trf, -ECANCELED); + dev_dbg(trf->dev, "Abort process complete\n"); + break; default: break; } @@ -1229,15 +1839,114 @@ static void trf7970a_abort_cmd(struct nfc_digital_dev *ddev) static struct nfc_digital_ops trf7970a_nfc_ops = { .in_configure_hw = trf7970a_in_configure_hw, - .in_send_cmd = trf7970a_in_send_cmd, + .in_send_cmd = trf7970a_send_cmd, .tg_configure_hw = trf7970a_tg_configure_hw, - .tg_send_cmd = trf7970a_tg_send_cmd, + .tg_send_cmd = trf7970a_send_cmd, .tg_listen = trf7970a_tg_listen, - .tg_listen_mdaa = trf7970a_tg_listen_mdaa, + .tg_listen_md = trf7970a_tg_listen_md, + .tg_get_rf_tech = trf7970a_tg_get_rf_tech, .switch_rf = trf7970a_switch_rf, .abort_cmd = trf7970a_abort_cmd, }; +static int trf7970a_power_up(struct trf7970a *trf) +{ + int ret; + + dev_dbg(trf->dev, "Powering up - state: %d\n", trf->state); + + if (trf->state != TRF7970A_ST_PWR_OFF) + return 0; + + ret = regulator_enable(trf->regulator); + if (ret) { + dev_err(trf->dev, "%s - Can't enable VIN: %d\n", __func__, ret); + return ret; + } + + usleep_range(5000, 6000); + + if (!(trf->quirks & TRF7970A_QUIRK_EN2_MUST_STAY_LOW)) { + gpio_set_value(trf->en2_gpio, 1); + usleep_range(1000, 2000); + } + + gpio_set_value(trf->en_gpio, 1); + + usleep_range(20000, 21000); + + trf->state = TRF7970A_ST_RF_OFF; + + return 0; +} + +static int trf7970a_power_down(struct trf7970a *trf) +{ + int ret; + + dev_dbg(trf->dev, "Powering down - state: %d\n", trf->state); + + if (trf->state == TRF7970A_ST_PWR_OFF) + return 0; + + if (trf->state != TRF7970A_ST_RF_OFF) { + dev_dbg(trf->dev, "Can't power down - not RF_OFF state (%d)\n", + trf->state); + return -EBUSY; + } + + gpio_set_value(trf->en_gpio, 0); + gpio_set_value(trf->en2_gpio, 0); + + ret = regulator_disable(trf->regulator); + if (ret) + dev_err(trf->dev, "%s - Can't disable VIN: %d\n", __func__, + ret); + + trf->state = TRF7970A_ST_PWR_OFF; + + return ret; +} + +static int trf7970a_startup(struct trf7970a *trf) +{ + int ret; + + ret = trf7970a_power_up(trf); + if (ret) + return ret; + + pm_runtime_set_active(trf->dev); + pm_runtime_enable(trf->dev); + pm_runtime_mark_last_busy(trf->dev); + + return 0; +} + +static void trf7970a_shutdown(struct trf7970a *trf) +{ + switch (trf->state) { + case TRF7970A_ST_WAIT_FOR_TX_FIFO: + case TRF7970A_ST_WAIT_FOR_RX_DATA: + case TRF7970A_ST_WAIT_FOR_RX_DATA_CONT: + case TRF7970A_ST_WAIT_TO_ISSUE_EOF: + case TRF7970A_ST_LISTENING: + trf7970a_send_err_upstream(trf, -ECANCELED); + /* FALLTHROUGH */ + case TRF7970A_ST_IDLE: + case TRF7970A_ST_IDLE_RX_BLOCKED: + trf7970a_switch_rf_off(trf); + break; + default: + break; + } + + pm_runtime_disable(trf->dev); + pm_runtime_set_suspended(trf->dev); + + trf7970a_power_down(trf); +} + static int trf7970a_get_autosuspend_delay(struct device_node *np) { int autosuspend_delay, ret; @@ -1246,15 +1955,18 @@ static int trf7970a_get_autosuspend_delay(struct device_node *np) if (ret) autosuspend_delay = TRF7970A_AUTOSUSPEND_DELAY; - of_node_put(np); - return autosuspend_delay; } +static int trf7970a_get_vin_voltage_override(struct device_node *np, + u32 *vin_uvolts) +{ + return of_property_read_u32(np, "vin-voltage-override", vin_uvolts); +} + static int trf7970a_probe(struct spi_device *spi) { struct device_node *np = spi->dev.of_node; - const struct spi_device_id *id = spi_get_device_id(spi); struct trf7970a *trf; int uvolts, autosuspend_delay, ret; @@ -1267,14 +1979,22 @@ static int trf7970a_probe(struct spi_device *spi) if (!trf) return -ENOMEM; - trf->state = TRF7970A_ST_OFF; + trf->state = TRF7970A_ST_PWR_OFF; trf->dev = &spi->dev; trf->spi = spi; - trf->quirks = id->driver_data; spi->mode = SPI_MODE_1; spi->bits_per_word = 8; + ret = spi_setup(spi); + if (ret < 0) { + dev_err(trf->dev, "Can't set up SPI Communication\n"); + return ret; + } + + if (of_property_read_bool(np, "irq-status-read-quirk")) + trf->quirks |= TRF7970A_QUIRK_IRQ_STATUS_READ; + /* There are two enable pins - both must be present */ trf->en_gpio = of_get_named_gpio(np, "ti,enable-gpios", 0); if (!gpio_is_valid(trf->en_gpio)) { @@ -1283,7 +2003,7 @@ static int trf7970a_probe(struct spi_device *spi) } ret = devm_gpio_request_one(trf->dev, trf->en_gpio, - GPIOF_DIR_OUT | GPIOF_INIT_LOW, "EN"); + GPIOF_DIR_OUT | GPIOF_INIT_LOW, "trf7970a EN"); if (ret) { dev_err(trf->dev, "Can't request EN GPIO: %d\n", ret); return ret; @@ -1296,12 +2016,15 @@ static int trf7970a_probe(struct spi_device *spi) } ret = devm_gpio_request_one(trf->dev, trf->en2_gpio, - GPIOF_DIR_OUT | GPIOF_INIT_LOW, "EN2"); + GPIOF_DIR_OUT | GPIOF_INIT_LOW, "trf7970a EN2"); if (ret) { dev_err(trf->dev, "Can't request EN2 GPIO: %d\n", ret); return ret; } + if (of_property_read_bool(np, "en2-rf-quirk")) + trf->quirks |= TRF7970A_QUIRK_EN2_MUST_STAY_LOW; + ret = devm_request_threaded_irq(trf->dev, spi->irq, NULL, trf7970a_irq, IRQF_TRIGGER_RISING | IRQF_ONESHOT, "trf7970a", trf); @@ -1326,15 +2049,17 @@ static int trf7970a_probe(struct spi_device *spi) goto err_destroy_lock; } - uvolts = regulator_get_voltage(trf->regulator); + ret = trf7970a_get_vin_voltage_override(np, &uvolts); + if (ret) + uvolts = regulator_get_voltage(trf->regulator); if (uvolts > 4000000) trf->chip_status_ctrl = TRF7970A_CHIP_STATUS_VRS5_3; trf->ddev = nfc_digital_allocate_device(&trf7970a_nfc_ops, TRF7970A_SUPPORTED_PROTOCOLS, - NFC_DIGITAL_DRV_CAPS_IN_CRC, TRF7970A_TX_SKB_HEADROOM, - 0); + NFC_DIGITAL_DRV_CAPS_IN_CRC | + NFC_DIGITAL_DRV_CAPS_TG_CRC, 0, 0); if (!trf->ddev) { dev_err(trf->dev, "Can't allocate NFC digital device\n"); ret = -ENOMEM; @@ -1349,19 +2074,23 @@ static int trf7970a_probe(struct spi_device *spi) pm_runtime_set_autosuspend_delay(trf->dev, autosuspend_delay); pm_runtime_use_autosuspend(trf->dev); - pm_runtime_enable(trf->dev); + + ret = trf7970a_startup(trf); + if (ret) + goto err_free_ddev; ret = nfc_digital_register_device(trf->ddev); if (ret) { dev_err(trf->dev, "Can't register NFC digital device: %d\n", ret); - goto err_free_ddev; + goto err_shutdown; } return 0; +err_shutdown: + trf7970a_shutdown(trf); err_free_ddev: - pm_runtime_disable(trf->dev); nfc_digital_free_device(trf->ddev); err_disable_regulator: regulator_disable(trf->regulator); @@ -1376,25 +2105,10 @@ static int trf7970a_remove(struct spi_device *spi) mutex_lock(&trf->lock); - switch (trf->state) { - case TRF7970A_ST_WAIT_FOR_TX_FIFO: - case TRF7970A_ST_WAIT_FOR_RX_DATA: - case TRF7970A_ST_WAIT_FOR_RX_DATA_CONT: - case TRF7970A_ST_WAIT_TO_ISSUE_EOF: - trf7970a_send_err_upstream(trf, -ECANCELED); - /* FALLTHROUGH */ - case TRF7970A_ST_IDLE: - case TRF7970A_ST_IDLE_RX_BLOCKED: - pm_runtime_put_sync(trf->dev); - break; - default: - break; - } + trf7970a_shutdown(trf); mutex_unlock(&trf->lock); - pm_runtime_disable(trf->dev); - nfc_digital_unregister_device(trf->ddev); nfc_digital_free_device(trf->ddev); @@ -1405,72 +2119,83 @@ static int trf7970a_remove(struct spi_device *spi) return 0; } -#ifdef CONFIG_PM_RUNTIME -static int trf7970a_pm_runtime_suspend(struct device *dev) +#ifdef CONFIG_PM_SLEEP +static int trf7970a_suspend(struct device *dev) +{ + struct spi_device *spi = container_of(dev, struct spi_device, dev); + struct trf7970a *trf = spi_get_drvdata(spi); + + dev_dbg(dev, "Suspend\n"); + + mutex_lock(&trf->lock); + + trf7970a_shutdown(trf); + + mutex_unlock(&trf->lock); + + return 0; +} + +static int trf7970a_resume(struct device *dev) { struct spi_device *spi = container_of(dev, struct spi_device, dev); struct trf7970a *trf = spi_get_drvdata(spi); int ret; - dev_dbg(dev, "Runtime suspend\n"); + dev_dbg(dev, "Resume\n"); - if (trf->state != TRF7970A_ST_OFF) { - dev_dbg(dev, "Can't suspend - not in OFF state (%d)\n", - trf->state); - return -EBUSY; - } + mutex_lock(&trf->lock); - gpio_set_value(trf->en_gpio, 0); - gpio_set_value(trf->en2_gpio, 0); + ret = trf7970a_startup(trf); - ret = regulator_disable(trf->regulator); - if (ret) - dev_err(dev, "%s - Can't disable VIN: %d\n", __func__, ret); + mutex_unlock(&trf->lock); return ret; } +#endif -static int trf7970a_pm_runtime_resume(struct device *dev) +#ifdef CONFIG_PM_RUNTIME +static int trf7970a_pm_runtime_suspend(struct device *dev) { struct spi_device *spi = container_of(dev, struct spi_device, dev); struct trf7970a *trf = spi_get_drvdata(spi); int ret; - dev_dbg(dev, "Runtime resume\n"); + dev_dbg(dev, "Runtime suspend\n"); - ret = regulator_enable(trf->regulator); - if (ret) { - dev_err(dev, "%s - Can't enable VIN: %d\n", __func__, ret); - return ret; - } + mutex_lock(&trf->lock); - usleep_range(5000, 6000); + ret = trf7970a_power_down(trf); - gpio_set_value(trf->en2_gpio, 1); - usleep_range(1000, 2000); - gpio_set_value(trf->en_gpio, 1); + mutex_unlock(&trf->lock); - usleep_range(20000, 21000); + return ret; +} - ret = trf7970a_init(trf); - if (ret) { - dev_err(dev, "%s - Can't initialize: %d\n", __func__, ret); - return ret; - } +static int trf7970a_pm_runtime_resume(struct device *dev) +{ + struct spi_device *spi = container_of(dev, struct spi_device, dev); + struct trf7970a *trf = spi_get_drvdata(spi); + int ret; - pm_runtime_mark_last_busy(dev); + dev_dbg(dev, "Runtime resume\n"); - return 0; + ret = trf7970a_power_up(trf); + if (!ret) + pm_runtime_mark_last_busy(dev); + + return ret; } #endif static const struct dev_pm_ops trf7970a_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(trf7970a_suspend, trf7970a_resume) SET_RUNTIME_PM_OPS(trf7970a_pm_runtime_suspend, trf7970a_pm_runtime_resume, NULL) }; static const struct spi_device_id trf7970a_id_table[] = { - { "trf7970a", TRF7970A_QUIRK_IRQ_STATUS_READ_ERRATA }, + { "trf7970a", 0 }, { } }; MODULE_DEVICE_TABLE(spi, trf7970a_id_table); diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index 401b2453da45ae033c7d334dff28679284202765..1bd43053b8c774505d7d3037d46a96925a446643 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -224,6 +224,8 @@ struct phy_device *of_phy_connect(struct net_device *dev, if (!phy) return NULL; + phy->dev_flags = flags; + return phy_connect_direct(dev, phy, hndlr, iface) ? NULL : phy; } EXPORT_SYMBOL(of_phy_connect); @@ -284,6 +286,7 @@ int of_phy_register_fixed_link(struct device_node *np) struct device_node *fixed_link_node; const __be32 *fixed_link_prop; int len; + struct phy_device *phy; /* New binding */ fixed_link_node = of_get_child_by_name(np, "fixed-link"); @@ -297,7 +300,8 @@ int of_phy_register_fixed_link(struct device_node *np) status.asym_pause = of_property_read_bool(fixed_link_node, "asym-pause"); of_node_put(fixed_link_node); - return fixed_phy_register(PHY_POLL, &status, np); + phy = fixed_phy_register(PHY_POLL, &status, np); + return IS_ERR(phy) ? PTR_ERR(phy) : 0; } /* Old binding */ @@ -308,7 +312,8 @@ int of_phy_register_fixed_link(struct device_node *np) status.speed = be32_to_cpu(fixed_link_prop[2]); status.pause = be32_to_cpu(fixed_link_prop[3]); status.asym_pause = be32_to_cpu(fixed_link_prop[4]); - return fixed_phy_register(PHY_POLL, &status, np); + phy = fixed_phy_register(PHY_POLL, &status, np); + return IS_ERR(phy) ? PTR_ERR(phy) : 0; } return -ENODEV; diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index f8427a2c4840b9e2453e7a9129068cb69e438397..afebb9709763b0a011320b9ad7e66aa709eca9a3 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -3306,7 +3306,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; - card->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; + netif_keep_dst(card->dev); card->dev->gso_max_size = 15 * PAGE_SIZE; SET_NETDEV_DEV(card->dev, &card->gdev->dev); diff --git a/drivers/staging/rtl8723au/core/rtw_mlme_ext.c b/drivers/staging/rtl8723au/core/rtw_mlme_ext.c index fcba08770e0e777c0573f6fdf831c984d36c1fe3..3eb77de17e3acfe142242de4eaaf1fa54bc22bb6 100644 --- a/drivers/staging/rtl8723au/core/rtw_mlme_ext.c +++ b/drivers/staging/rtl8723au/core/rtw_mlme_ext.c @@ -2128,7 +2128,7 @@ static int on_action_public23a(struct rtw_adapter *padapter, IEEE80211_BAND_5GHZ); if (cfg80211_rx_mgmt(padapter->rtw_wdev, freq, 0, pframe, - skb->len, 0, GFP_ATOMIC)) + skb->len, 0)) return _SUCCESS; return _FAIL; diff --git a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c index a0d44c679f5555fdac47f9c509cb78bc6c8c5d41..bd6953af0a0396f715cb2622e5a4e6ca904b5228 100644 --- a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c +++ b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c @@ -279,6 +279,7 @@ static int rtw_cfg80211_inform_bss(struct rtw_adapter *padapter, } bss = cfg80211_inform_bss(wiphy, notify_channel, + CFG80211_BSS_FTYPE_UNKNOWN, pnetwork->network.MacAddress, pnetwork->network.tsf, pnetwork->network.capability, @@ -2379,7 +2380,7 @@ void rtw_cfg80211_indicate_sta_assoc(struct rtw_adapter *padapter, IEEE80211_BAND_5GHZ); cfg80211_rx_mgmt(padapter->rtw_wdev, freq, 0, pmgmt_frame, frame_len, - 0, GFP_ATOMIC); + 0); #endif /* defined(RTW_USE_CFG80211_STA_EVENT) */ } @@ -2425,7 +2426,7 @@ void rtw_cfg80211_indicate_sta_disassoc(struct rtw_adapter *padapter, frame_len = sizeof(struct ieee80211_hdr_3addr) + 2; cfg80211_rx_mgmt(padapter->rtw_wdev, freq, 0, (u8 *)&mgmt, frame_len, - 0, GFP_ATOMIC); + 0); #endif /* defined(RTW_USE_CFG80211_STA_EVENT) */ } diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c index 3727f6d25cf1af074ce35f1336453e6b7312a965..8942dcb44180339ffb9cd6318a3b29d3aef06241 100644 --- a/drivers/staging/wlan-ng/cfg80211.c +++ b/drivers/staging/wlan-ng/cfg80211.c @@ -422,6 +422,7 @@ static int prism2_scan(struct wiphy *wiphy, IEEE80211_BAND_2GHZ); bss = cfg80211_inform_bss(wiphy, ieee80211_get_channel(wiphy, freq), + CFG80211_BSS_FTYPE_UNKNOWN, (const u8 *) &(msg2.bssid.data.data), msg2.timestamp.data, msg2.capinfo.data, msg2.beaconperiod.data, diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c index bcdc882cd4157af2a0bd56a89248b7d3969d4b35..146f48cc65d7fb9c000565448121337f914c694c 100644 --- a/drivers/usb/gadget/function/f_ncm.c +++ b/drivers/usb/gadget/function/f_ncm.c @@ -1101,7 +1101,15 @@ static void ncm_tx_tasklet(unsigned long data) /* Only send if data is available. */ if (ncm->skb_tx_data) { ncm->timer_force_tx = true; + + /* XXX This allowance of a NULL skb argument to ndo_start_xmit + * XXX is not sane. The gadget layer should be redesigned so + * XXX that the dev->wrap() invocations to build SKBs is transparent + * XXX and performed in some way outside of the ndo_start_xmit + * XXX interface. + */ ncm->netdev->netdev_ops->ndo_start_xmit(NULL, ncm->netdev); + ncm->timer_force_tx = false; } } diff --git a/drivers/usb/host/bcma-hcd.c b/drivers/usb/host/bcma-hcd.c index 205f4a336583de6c32b608854a943cc121dc1dc4..cd6d0afb6b8f7869d40520223093380159ba91ff 100644 --- a/drivers/usb/host/bcma-hcd.c +++ b/drivers/usb/host/bcma-hcd.c @@ -237,7 +237,7 @@ static int bcma_hcd_probe(struct bcma_device *dev) bcma_hcd_init_chip(dev); /* In AI chips EHCI is addrspace 0, OHCI is 1 */ - ohci_addr = dev->addr1; + ohci_addr = dev->addr_s[0]; if ((chipinfo->id == 0x5357 || chipinfo->id == 0x4749) && chipinfo->rev == 0) ohci_addr = 0x18009000; diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 4d08f45a9c29aeba18fe4c174bce0a644a090d57..3b1f89b6e7435576315c1b75e7754e3658080200 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -99,36 +99,10 @@ struct vring_virtqueue #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) -static inline struct scatterlist *sg_next_chained(struct scatterlist *sg, - unsigned int *count) -{ - return sg_next(sg); -} - -static inline struct scatterlist *sg_next_arr(struct scatterlist *sg, - unsigned int *count) -{ - if (--(*count) == 0) - return NULL; - return sg + 1; -} - -/* Set up an indirect table of descriptors and add it to the queue. */ -static inline int vring_add_indirect(struct vring_virtqueue *vq, - struct scatterlist *sgs[], - struct scatterlist *(*next) - (struct scatterlist *, unsigned int *), - unsigned int total_sg, - unsigned int total_out, - unsigned int total_in, - unsigned int out_sgs, - unsigned int in_sgs, - gfp_t gfp) +static struct vring_desc *alloc_indirect(unsigned int total_sg, gfp_t gfp) { struct vring_desc *desc; - unsigned head; - struct scatterlist *sg; - int i, n; + unsigned int i; /* * We require lowmem mappings for the descriptors because @@ -139,57 +113,16 @@ static inline int vring_add_indirect(struct vring_virtqueue *vq, desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp); if (!desc) - return -ENOMEM; - - /* Transfer entries from the sg lists into the indirect page */ - i = 0; - for (n = 0; n < out_sgs; n++) { - for (sg = sgs[n]; sg; sg = next(sg, &total_out)) { - desc[i].flags = VRING_DESC_F_NEXT; - desc[i].addr = sg_phys(sg); - desc[i].len = sg->length; - desc[i].next = i+1; - i++; - } - } - for (; n < (out_sgs + in_sgs); n++) { - for (sg = sgs[n]; sg; sg = next(sg, &total_in)) { - desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; - desc[i].addr = sg_phys(sg); - desc[i].len = sg->length; - desc[i].next = i+1; - i++; - } - } - BUG_ON(i != total_sg); - - /* Last one doesn't continue. */ - desc[i-1].flags &= ~VRING_DESC_F_NEXT; - desc[i-1].next = 0; - - /* We're about to use a buffer */ - vq->vq.num_free--; - - /* Use a single buffer which doesn't continue */ - head = vq->free_head; - vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT; - vq->vring.desc[head].addr = virt_to_phys(desc); - /* kmemleak gives a false positive, as it's hidden by virt_to_phys */ - kmemleak_ignore(desc); - vq->vring.desc[head].len = i * sizeof(struct vring_desc); - - /* Update free pointer */ - vq->free_head = vq->vring.desc[head].next; + return NULL; - return head; + for (i = 0; i < total_sg; i++) + desc[i].next = i+1; + return desc; } static inline int virtqueue_add(struct virtqueue *_vq, struct scatterlist *sgs[], - struct scatterlist *(*next) - (struct scatterlist *, unsigned int *), - unsigned int total_out, - unsigned int total_in, + unsigned int total_sg, unsigned int out_sgs, unsigned int in_sgs, void *data, @@ -197,8 +130,10 @@ static inline int virtqueue_add(struct virtqueue *_vq, { struct vring_virtqueue *vq = to_vvq(_vq); struct scatterlist *sg; - unsigned int i, n, avail, uninitialized_var(prev), total_sg; + struct vring_desc *desc; + unsigned int i, n, avail, descs_used, uninitialized_var(prev); int head; + bool indirect; START_USE(vq); @@ -222,24 +157,40 @@ static inline int virtqueue_add(struct virtqueue *_vq, } #endif - total_sg = total_in + total_out; + BUG_ON(total_sg > vq->vring.num); + BUG_ON(total_sg == 0); + + head = vq->free_head; /* If the host supports indirect descriptor tables, and we have multiple * buffers, then go indirect. FIXME: tune this threshold */ - if (vq->indirect && total_sg > 1 && vq->vq.num_free) { - head = vring_add_indirect(vq, sgs, next, total_sg, total_out, - total_in, - out_sgs, in_sgs, gfp); - if (likely(head >= 0)) - goto add_head; + if (vq->indirect && total_sg > 1 && vq->vq.num_free) + desc = alloc_indirect(total_sg, gfp); + else + desc = NULL; + + if (desc) { + /* Use a single buffer which doesn't continue */ + vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT; + vq->vring.desc[head].addr = virt_to_phys(desc); + /* avoid kmemleak false positive (hidden by virt_to_phys) */ + kmemleak_ignore(desc); + vq->vring.desc[head].len = total_sg * sizeof(struct vring_desc); + + /* Set up rest to use this indirect table. */ + i = 0; + descs_used = 1; + indirect = true; + } else { + desc = vq->vring.desc; + i = head; + descs_used = total_sg; + indirect = false; } - BUG_ON(total_sg > vq->vring.num); - BUG_ON(total_sg == 0); - - if (vq->vq.num_free < total_sg) { + if (vq->vq.num_free < descs_used) { pr_debug("Can't add buf len %i - avail = %i\n", - total_sg, vq->vq.num_free); + descs_used, vq->vq.num_free); /* FIXME: for historical reasons, we force a notify here if * there are outgoing parts to the buffer. Presumably the * host should service the ring ASAP. */ @@ -250,34 +201,35 @@ static inline int virtqueue_add(struct virtqueue *_vq, } /* We're about to use some buffers from the free list. */ - vq->vq.num_free -= total_sg; + vq->vq.num_free -= descs_used; - head = i = vq->free_head; for (n = 0; n < out_sgs; n++) { - for (sg = sgs[n]; sg; sg = next(sg, &total_out)) { - vq->vring.desc[i].flags = VRING_DESC_F_NEXT; - vq->vring.desc[i].addr = sg_phys(sg); - vq->vring.desc[i].len = sg->length; + for (sg = sgs[n]; sg; sg = sg_next(sg)) { + desc[i].flags = VRING_DESC_F_NEXT; + desc[i].addr = sg_phys(sg); + desc[i].len = sg->length; prev = i; - i = vq->vring.desc[i].next; + i = desc[i].next; } } for (; n < (out_sgs + in_sgs); n++) { - for (sg = sgs[n]; sg; sg = next(sg, &total_in)) { - vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; - vq->vring.desc[i].addr = sg_phys(sg); - vq->vring.desc[i].len = sg->length; + for (sg = sgs[n]; sg; sg = sg_next(sg)) { + desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; + desc[i].addr = sg_phys(sg); + desc[i].len = sg->length; prev = i; - i = vq->vring.desc[i].next; + i = desc[i].next; } } /* Last one doesn't continue. */ - vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT; + desc[prev].flags &= ~VRING_DESC_F_NEXT; /* Update free pointer */ - vq->free_head = i; + if (indirect) + vq->free_head = vq->vring.desc[head].next; + else + vq->free_head = i; -add_head: /* Set token. */ vq->data[head] = data; @@ -324,29 +276,23 @@ int virtqueue_add_sgs(struct virtqueue *_vq, void *data, gfp_t gfp) { - unsigned int i, total_out, total_in; + unsigned int i, total_sg = 0; /* Count them first. */ - for (i = total_out = total_in = 0; i < out_sgs; i++) { - struct scatterlist *sg; - for (sg = sgs[i]; sg; sg = sg_next(sg)) - total_out++; - } - for (; i < out_sgs + in_sgs; i++) { + for (i = 0; i < out_sgs + in_sgs; i++) { struct scatterlist *sg; for (sg = sgs[i]; sg; sg = sg_next(sg)) - total_in++; + total_sg++; } - return virtqueue_add(_vq, sgs, sg_next_chained, - total_out, total_in, out_sgs, in_sgs, data, gfp); + return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, data, gfp); } EXPORT_SYMBOL_GPL(virtqueue_add_sgs); /** * virtqueue_add_outbuf - expose output buffers to other end * @vq: the struct virtqueue we're talking about. - * @sgs: array of scatterlists (need not be terminated!) - * @num: the number of scatterlists readable by other side + * @sg: scatterlist (must be well-formed and terminated!) + * @num: the number of entries in @sg readable by other side * @data: the token identifying the buffer. * @gfp: how to do memory allocations (if necessary). * @@ -356,19 +302,19 @@ EXPORT_SYMBOL_GPL(virtqueue_add_sgs); * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). */ int virtqueue_add_outbuf(struct virtqueue *vq, - struct scatterlist sg[], unsigned int num, + struct scatterlist *sg, unsigned int num, void *data, gfp_t gfp) { - return virtqueue_add(vq, &sg, sg_next_arr, num, 0, 1, 0, data, gfp); + return virtqueue_add(vq, &sg, num, 1, 0, data, gfp); } EXPORT_SYMBOL_GPL(virtqueue_add_outbuf); /** * virtqueue_add_inbuf - expose input buffers to other end * @vq: the struct virtqueue we're talking about. - * @sgs: array of scatterlists (need not be terminated!) - * @num: the number of scatterlists writable by other side + * @sg: scatterlist (must be well-formed and terminated!) + * @num: the number of entries in @sg writable by other side * @data: the token identifying the buffer. * @gfp: how to do memory allocations (if necessary). * @@ -378,11 +324,11 @@ EXPORT_SYMBOL_GPL(virtqueue_add_outbuf); * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). */ int virtqueue_add_inbuf(struct virtqueue *vq, - struct scatterlist sg[], unsigned int num, + struct scatterlist *sg, unsigned int num, void *data, gfp_t gfp) { - return virtqueue_add(vq, &sg, sg_next_arr, 0, num, 0, 1, data, gfp); + return virtqueue_add(vq, &sg, num, 0, 1, data, gfp); } EXPORT_SYMBOL_GPL(virtqueue_add_inbuf); diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index 0272e49135d0cf0521a62c619185eadf2b418666..729f48e6b20b21564bfdbc4d958fd0bc327f740d 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h @@ -267,7 +267,7 @@ struct bcma_device { u8 core_unit; u32 addr; - u32 addr1; + u32 addr_s[8]; u32 wrap; void __iomem *io_addr; @@ -323,6 +323,8 @@ struct bcma_bus { struct pci_dev *host_pci; /* Pointer to the SDIO device (only for BCMA_HOSTTYPE_SDIO) */ struct sdio_func *host_sdio; + /* Pointer to platform device (only for BCMA_HOSTTYPE_SOC) */ + struct platform_device *host_pdev; }; struct bcma_chipinfo chipinfo; @@ -332,10 +334,10 @@ struct bcma_bus { struct bcma_device *mapped_core; struct list_head cores; u8 nr_cores; - u8 init_done:1; u8 num; struct bcma_drv_cc drv_cc; + struct bcma_drv_cc_b drv_cc_b; struct bcma_drv_pci drv_pci[2]; struct bcma_drv_pcie2 drv_pcie2; struct bcma_drv_mips drv_mips; diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h index 63d105cd14a33fc60048c28c9c3b07ef4f4136ef..db6fa217f98bf3f1276ac659c61e50099da739e4 100644 --- a/include/linux/bcma/bcma_driver_chipcommon.h +++ b/include/linux/bcma/bcma_driver_chipcommon.h @@ -644,6 +644,12 @@ struct bcma_drv_cc { #endif }; +struct bcma_drv_cc_b { + struct bcma_device *core; + u8 setup_done:1; + void __iomem *mii; +}; + /* Register access */ #define bcma_cc_read32(cc, offset) \ bcma_read32((cc)->core, offset) @@ -699,4 +705,6 @@ extern void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid); extern u32 bcma_pmu_get_bus_clock(struct bcma_drv_cc *cc); +void bcma_chipco_b_mii_write(struct bcma_drv_cc_b *ccb, u32 offset, u32 value); + #endif /* LINUX_BCMA_DRIVER_CC_H_ */ diff --git a/include/linux/bcma/bcma_regs.h b/include/linux/bcma/bcma_regs.h index 917dcd7965e7fb9b148029b2675cf1ee25f93671..e64ae7bf80a11f1eb6cd360225cbbc94eecd361b 100644 --- a/include/linux/bcma/bcma_regs.h +++ b/include/linux/bcma/bcma_regs.h @@ -39,6 +39,11 @@ #define BCMA_RESET_CTL_RESET 0x0001 #define BCMA_RESET_ST 0x0804 +#define BCMA_NS_ROM_IOST_BOOT_DEV_MASK 0x0003 +#define BCMA_NS_ROM_IOST_BOOT_DEV_NOR 0x0000 +#define BCMA_NS_ROM_IOST_BOOT_DEV_NAND 0x0001 +#define BCMA_NS_ROM_IOST_BOOT_DEV_ROM 0x0002 + /* BCMA PCI config space registers. */ #define BCMA_PCI_PMCSR 0x44 #define BCMA_PCI_PE 0x100 diff --git a/include/linux/bcma/bcma_soc.h b/include/linux/bcma/bcma_soc.h index 4203c5593b9f99f638eda35ad25ca161365c249b..f24d245f83948157092816175d872f360356ad48 100644 --- a/include/linux/bcma/bcma_soc.h +++ b/include/linux/bcma/bcma_soc.h @@ -10,6 +10,7 @@ struct bcma_soc { }; int __init bcma_host_soc_register(struct bcma_soc *soc); +int __init bcma_host_soc_init(struct bcma_soc *soc); int bcma_bus_register(struct bcma_bus *bus); diff --git a/include/linux/bpf.h b/include/linux/bpf.h new file mode 100644 index 0000000000000000000000000000000000000000..3cf91754a957e5f509bf5f97050a559cab048878 --- /dev/null +++ b/include/linux/bpf.h @@ -0,0 +1,136 @@ +/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#ifndef _LINUX_BPF_H +#define _LINUX_BPF_H 1 + +#include +#include +#include + +struct bpf_map; + +/* map is generic key/value storage optionally accesible by eBPF programs */ +struct bpf_map_ops { + /* funcs callable from userspace (via syscall) */ + struct bpf_map *(*map_alloc)(union bpf_attr *attr); + void (*map_free)(struct bpf_map *); + int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key); + + /* funcs callable from userspace and from eBPF programs */ + void *(*map_lookup_elem)(struct bpf_map *map, void *key); + int (*map_update_elem)(struct bpf_map *map, void *key, void *value); + int (*map_delete_elem)(struct bpf_map *map, void *key); +}; + +struct bpf_map { + atomic_t refcnt; + enum bpf_map_type map_type; + u32 key_size; + u32 value_size; + u32 max_entries; + struct bpf_map_ops *ops; + struct work_struct work; +}; + +struct bpf_map_type_list { + struct list_head list_node; + struct bpf_map_ops *ops; + enum bpf_map_type type; +}; + +void bpf_register_map_type(struct bpf_map_type_list *tl); +void bpf_map_put(struct bpf_map *map); +struct bpf_map *bpf_map_get(struct fd f); + +/* function argument constraints */ +enum bpf_arg_type { + ARG_ANYTHING = 0, /* any argument is ok */ + + /* the following constraints used to prototype + * bpf_map_lookup/update/delete_elem() functions + */ + ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */ + ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */ + ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */ + + /* the following constraints used to prototype bpf_memcmp() and other + * functions that access data on eBPF program stack + */ + ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */ + ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */ +}; + +/* type of values returned from helper functions */ +enum bpf_return_type { + RET_INTEGER, /* function returns integer */ + RET_VOID, /* function doesn't return anything */ + RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */ +}; + +/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs + * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL + * instructions after verifying + */ +struct bpf_func_proto { + u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); + bool gpl_only; + enum bpf_return_type ret_type; + enum bpf_arg_type arg1_type; + enum bpf_arg_type arg2_type; + enum bpf_arg_type arg3_type; + enum bpf_arg_type arg4_type; + enum bpf_arg_type arg5_type; +}; + +/* bpf_context is intentionally undefined structure. Pointer to bpf_context is + * the first argument to eBPF programs. + * For socket filters: 'struct bpf_context *' == 'struct sk_buff *' + */ +struct bpf_context; + +enum bpf_access_type { + BPF_READ = 1, + BPF_WRITE = 2 +}; + +struct bpf_verifier_ops { + /* return eBPF function prototype for verification */ + const struct bpf_func_proto *(*get_func_proto)(enum bpf_func_id func_id); + + /* return true if 'size' wide access at offset 'off' within bpf_context + * with 'type' (read or write) is allowed + */ + bool (*is_valid_access)(int off, int size, enum bpf_access_type type); +}; + +struct bpf_prog_type_list { + struct list_head list_node; + struct bpf_verifier_ops *ops; + enum bpf_prog_type type; +}; + +void bpf_register_prog_type(struct bpf_prog_type_list *tl); + +struct bpf_prog; + +struct bpf_prog_aux { + atomic_t refcnt; + bool is_gpl_compatible; + enum bpf_prog_type prog_type; + struct bpf_verifier_ops *ops; + struct bpf_map **used_maps; + u32 used_map_cnt; + struct bpf_prog *prog; + struct work_struct work; +}; + +void bpf_prog_put(struct bpf_prog *prog); +struct bpf_prog *bpf_prog_get(u32 ufd); +/* verify correctness of eBPF program */ +int bpf_check(struct bpf_prog *fp, union bpf_attr *attr); + +#endif /* _LINUX_BPF_H */ diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index 61219b9b34458862c9be600652006d8ac84b8705..7ccd928cc1f29b71afc7b7cf5cf1ea6a855cd70f 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -13,7 +13,11 @@ #define PHY_ID_BCM5461 0x002060c0 #define PHY_ID_BCM57780 0x03625d90 +#define PHY_ID_BCM7250 0xae025280 +#define PHY_ID_BCM7364 0xae025260 #define PHY_ID_BCM7366 0x600d8490 +#define PHY_ID_BCM7425 0x03625e60 +#define PHY_ID_BCM7429 0x600d8730 #define PHY_ID_BCM7439 0x600d8480 #define PHY_ID_BCM7445 0x600d8510 @@ -21,9 +25,9 @@ #define PHY_BCM_OUI_1 0x00206000 #define PHY_BCM_OUI_2 0x0143bc00 #define PHY_BCM_OUI_3 0x03625c00 -#define PHY_BCM_OUI_4 0x600d0000 +#define PHY_BCM_OUI_4 0x600d8400 #define PHY_BCM_OUI_5 0x03625e00 - +#define PHY_BCM_OUI_6 0xae025000 #define PHY_BCM_FLAGS_MODE_COPPER 0x00000001 #define PHY_BCM_FLAGS_MODE_1000BX 0x00000002 @@ -38,7 +42,8 @@ #define PHY_BRCM_CLEAR_RGMII_MODE 0x00004000 #define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00008000 /* Broadcom BCM7xxx specific workarounds */ -#define PHY_BRCM_100MBPS_WAR 0x00010000 +#define PHY_BRCM_7XXX_REV(x) (((x) >> 8) & 0xff) +#define PHY_BRCM_7XXX_PATCH(x) ((x) & 0xff) #define PHY_BCM_FLAGS_VALID 0x80000000 /* Broadcom BCM54XX register definitions, common to most Broadcom PHYs */ @@ -92,4 +97,130 @@ #define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000 +/* + * Broadcom LED source encodings. These are used in BCM5461, BCM5481, + * BCM5482, and possibly some others. + */ +#define BCM_LED_SRC_LINKSPD1 0x0 +#define BCM_LED_SRC_LINKSPD2 0x1 +#define BCM_LED_SRC_XMITLED 0x2 +#define BCM_LED_SRC_ACTIVITYLED 0x3 +#define BCM_LED_SRC_FDXLED 0x4 +#define BCM_LED_SRC_SLAVE 0x5 +#define BCM_LED_SRC_INTR 0x6 +#define BCM_LED_SRC_QUALITY 0x7 +#define BCM_LED_SRC_RCVLED 0x8 +#define BCM_LED_SRC_MULTICOLOR1 0xa +#define BCM_LED_SRC_OPENSHORT 0xb +#define BCM_LED_SRC_OFF 0xe /* Tied high */ +#define BCM_LED_SRC_ON 0xf /* Tied low */ + + +/* + * BCM5482: Shadow registers + * Shadow values go into bits [14:10] of register 0x1c to select a shadow + * register to access. + */ +/* 00101: Spare Control Register 3 */ +#define BCM54XX_SHD_SCR3 0x05 +#define BCM54XX_SHD_SCR3_DEF_CLK125 0x0001 +#define BCM54XX_SHD_SCR3_DLLAPD_DIS 0x0002 +#define BCM54XX_SHD_SCR3_TRDDAPD 0x0004 + +/* 01010: Auto Power-Down */ +#define BCM54XX_SHD_APD 0x0a +#define BCM54XX_SHD_APD_EN 0x0020 + +#define BCM5482_SHD_LEDS1 0x0d /* 01101: LED Selector 1 */ + /* LED3 / ~LINKSPD[2] selector */ +#define BCM5482_SHD_LEDS1_LED3(src) ((src & 0xf) << 4) + /* LED1 / ~LINKSPD[1] selector */ +#define BCM5482_SHD_LEDS1_LED1(src) ((src & 0xf) << 0) +#define BCM54XX_SHD_RGMII_MODE 0x0b /* 01011: RGMII Mode Selector */ +#define BCM5482_SHD_SSD 0x14 /* 10100: Secondary SerDes control */ +#define BCM5482_SHD_SSD_LEDM 0x0008 /* SSD LED Mode enable */ +#define BCM5482_SHD_SSD_EN 0x0001 /* SSD enable */ +#define BCM5482_SHD_MODE 0x1f /* 11111: Mode Control Register */ +#define BCM5482_SHD_MODE_1000BX 0x0001 /* Enable 1000BASE-X registers */ + + +/* + * EXPANSION SHADOW ACCESS REGISTERS. (PHY REG 0x15, 0x16, and 0x17) + */ +#define MII_BCM54XX_EXP_AADJ1CH0 0x001f +#define MII_BCM54XX_EXP_AADJ1CH0_SWP_ABCD_OEN 0x0200 +#define MII_BCM54XX_EXP_AADJ1CH0_SWSEL_THPF 0x0100 +#define MII_BCM54XX_EXP_AADJ1CH3 0x601f +#define MII_BCM54XX_EXP_AADJ1CH3_ADCCKADJ 0x0002 +#define MII_BCM54XX_EXP_EXP08 0x0F08 +#define MII_BCM54XX_EXP_EXP08_RJCT_2MHZ 0x0001 +#define MII_BCM54XX_EXP_EXP08_EARLY_DAC_WAKE 0x0200 +#define MII_BCM54XX_EXP_EXP75 0x0f75 +#define MII_BCM54XX_EXP_EXP75_VDACCTRL 0x003c +#define MII_BCM54XX_EXP_EXP75_CM_OSC 0x0001 +#define MII_BCM54XX_EXP_EXP96 0x0f96 +#define MII_BCM54XX_EXP_EXP96_MYST 0x0010 +#define MII_BCM54XX_EXP_EXP97 0x0f97 +#define MII_BCM54XX_EXP_EXP97_MYST 0x0c0c + +/* + * BCM5482: Secondary SerDes registers + */ +#define BCM5482_SSD_1000BX_CTL 0x00 /* 1000BASE-X Control */ +#define BCM5482_SSD_1000BX_CTL_PWRDOWN 0x0800 /* Power-down SSD */ +#define BCM5482_SSD_SGMII_SLAVE 0x15 /* SGMII Slave Register */ +#define BCM5482_SSD_SGMII_SLAVE_EN 0x0002 /* Slave mode enable */ +#define BCM5482_SSD_SGMII_SLAVE_AD 0x0001 /* Slave auto-detection */ + + +/*****************************************************************************/ +/* Fast Ethernet Transceiver definitions. */ +/*****************************************************************************/ + +#define MII_BRCM_FET_INTREG 0x1a /* Interrupt register */ +#define MII_BRCM_FET_IR_MASK 0x0100 /* Mask all interrupts */ +#define MII_BRCM_FET_IR_LINK_EN 0x0200 /* Link status change enable */ +#define MII_BRCM_FET_IR_SPEED_EN 0x0400 /* Link speed change enable */ +#define MII_BRCM_FET_IR_DUPLEX_EN 0x0800 /* Duplex mode change enable */ +#define MII_BRCM_FET_IR_ENABLE 0x4000 /* Interrupt enable */ + +#define MII_BRCM_FET_BRCMTEST 0x1f /* Brcm test register */ +#define MII_BRCM_FET_BT_SRE 0x0080 /* Shadow register enable */ + + +/*** Shadow register definitions ***/ + +#define MII_BRCM_FET_SHDW_MISCCTRL 0x10 /* Shadow misc ctrl */ +#define MII_BRCM_FET_SHDW_MC_FAME 0x4000 /* Force Auto MDIX enable */ + +#define MII_BRCM_FET_SHDW_AUXMODE4 0x1a /* Auxiliary mode 4 */ +#define MII_BRCM_FET_SHDW_AM4_LED_MASK 0x0003 +#define MII_BRCM_FET_SHDW_AM4_LED_MODE1 0x0001 + +#define MII_BRCM_FET_SHDW_AUXSTAT2 0x1b /* Auxiliary status 2 */ +#define MII_BRCM_FET_SHDW_AS2_APDE 0x0020 /* Auto power down enable */ + +/* + * Indirect register access functions for the 1000BASE-T/100BASE-TX/10BASE-T + * 0x1c shadow registers. + */ +static inline int bcm54xx_shadow_read(struct phy_device *phydev, u16 shadow) +{ + phy_write(phydev, MII_BCM54XX_SHD, MII_BCM54XX_SHD_VAL(shadow)); + return MII_BCM54XX_SHD_DATA(phy_read(phydev, MII_BCM54XX_SHD)); +} + +static inline int bcm54xx_shadow_write(struct phy_device *phydev, u16 shadow, + u16 val) +{ + return phy_write(phydev, MII_BCM54XX_SHD, + MII_BCM54XX_SHD_WRITE | + MII_BCM54XX_SHD_VAL(shadow) | + MII_BCM54XX_SHD_DATA(val)); +} + +#define BRCM_CL45VEN_EEE_CONTROL 0x803d +#define LPI_FEATURE_EN 0x8000 +#define LPI_FEATURE_EN_DIG1000X 0x4000 + #endif /* _LINUX_BRCMPHY_H */ diff --git a/include/linux/com20020.h b/include/linux/com20020.h index 5dcfb944b6ce11591f0623543ff48bb9e03be60d..85898995b234edc244b589415fd2c3391d8cd62d 100644 --- a/include/linux/com20020.h +++ b/include/linux/com20020.h @@ -41,6 +41,35 @@ extern const struct net_device_ops com20020_netdev_ops; #define BUS_ALIGN 1 #endif +#define PLX_PCI_MAX_CARDS 2 + +struct com20020_pci_channel_map { + u32 bar; + u32 offset; + u32 size; /* 0x00 - auto, e.g. length of entire bar */ +}; + +struct com20020_pci_card_info { + const char *name; + int devcount; + + struct com20020_pci_channel_map chan_map_tbl[PLX_PCI_MAX_CARDS]; + + unsigned int flags; +}; + +struct com20020_priv { + struct com20020_pci_card_info *ci; + struct list_head list_dev; +}; + +struct com20020_dev { + struct list_head list; + struct net_device *dev; + + struct com20020_priv *pci_priv; + int index; +}; #define _INTMASK (ioaddr+BUS_ALIGN*0) /* writable */ #define _STATUS (ioaddr+BUS_ALIGN*0) /* readable */ diff --git a/include/linux/cycx_x25.h b/include/linux/cycx_x25.h deleted file mode 100644 index 362bf19d6cf194a8ae40f84632debc34aac218f0..0000000000000000000000000000000000000000 --- a/include/linux/cycx_x25.h +++ /dev/null @@ -1,125 +0,0 @@ -#ifndef _CYCX_X25_H -#define _CYCX_X25_H -/* -* cycx_x25.h Cyclom X.25 firmware API definitions. -* -* Author: Arnaldo Carvalho de Melo -* -* Copyright: (c) 1998-2003 Arnaldo Carvalho de Melo -* -* Based on sdla_x25.h by Gene Kozin <74604.152@compuserve.com> -* -* This program is free software; you can redistribute it and/or -* modify it under the terms of the GNU General Public License -* as published by the Free Software Foundation; either version -* 2 of the License, or (at your option) any later version. -* ============================================================================ -* 2000/04/02 acme dprintk and cycx_debug -* 1999/01/03 acme judicious use of data types -* 1999/01/02 acme #define X25_ACK_N3 0x4411 -* 1998/12/28 acme cleanup: lot'o'things removed -* commands listed, -* TX25Cmd & TX25Config structs -* typedef'ed -*/ -#ifndef PACKED -#define PACKED __attribute__((packed)) -#endif - -/* X.25 shared memory layout. */ -#define X25_MBOX_OFFS 0x300 /* general mailbox block */ -#define X25_RXMBOX_OFFS 0x340 /* receive mailbox */ - -/* Debug */ -#define dprintk(level, format, a...) if (cycx_debug >= level) printk(format, ##a) - -extern unsigned int cycx_debug; - -/* Data Structures */ -/* X.25 Command Block. */ -struct cycx_x25_cmd { - u16 command; - u16 link; /* values: 0 or 1 */ - u16 len; /* values: 0 thru 0x205 (517) */ - u32 buf; -} PACKED; - -/* Defines for the 'command' field. */ -#define X25_CONNECT_REQUEST 0x4401 -#define X25_CONNECT_RESPONSE 0x4402 -#define X25_DISCONNECT_REQUEST 0x4403 -#define X25_DISCONNECT_RESPONSE 0x4404 -#define X25_DATA_REQUEST 0x4405 -#define X25_ACK_TO_VC 0x4406 -#define X25_INTERRUPT_RESPONSE 0x4407 -#define X25_CONFIG 0x4408 -#define X25_CONNECT_INDICATION 0x4409 -#define X25_CONNECT_CONFIRM 0x440A -#define X25_DISCONNECT_INDICATION 0x440B -#define X25_DISCONNECT_CONFIRM 0x440C -#define X25_DATA_INDICATION 0x440E -#define X25_INTERRUPT_INDICATION 0x440F -#define X25_ACK_FROM_VC 0x4410 -#define X25_ACK_N3 0x4411 -#define X25_CONNECT_COLLISION 0x4413 -#define X25_N3WIN 0x4414 -#define X25_LINE_ON 0x4415 -#define X25_LINE_OFF 0x4416 -#define X25_RESET_REQUEST 0x4417 -#define X25_LOG 0x4500 -#define X25_STATISTIC 0x4600 -#define X25_TRACE 0x4700 -#define X25_N2TRACEXC 0x4702 -#define X25_N3TRACEXC 0x4703 - -/** - * struct cycx_x25_config - cyclom2x x25 firmware configuration - * @link - link number - * @speed - line speed - * @clock - internal/external - * @n2 - # of level 2 retransm.(values: 1 thru FF) - * @n2win - level 2 window (values: 1 thru 7) - * @n3win - level 3 window (values: 1 thru 7) - * @nvc - # of logical channels (values: 1 thru 64) - * @pktlen - level 3 packet length - log base 2 of size - * @locaddr - my address - * @remaddr - remote address - * @t1 - time, in seconds - * @t2 - time, in seconds - * @t21 - time, in seconds - * @npvc - # of permanent virt. circuits (1 thru nvc) - * @t23 - time, in seconds - * @flags - see dosx25.doc, in portuguese, for details - */ -struct cycx_x25_config { - u8 link; - u8 speed; - u8 clock; - u8 n2; - u8 n2win; - u8 n3win; - u8 nvc; - u8 pktlen; - u8 locaddr; - u8 remaddr; - u16 t1; - u16 t2; - u8 t21; - u8 npvc; - u8 t23; - u8 flags; -} PACKED; - -struct cycx_x25_stats { - u16 rx_crc_errors; - u16 rx_over_errors; - u16 n2_tx_frames; - u16 n2_rx_frames; - u16 tx_timeouts; - u16 rx_timeouts; - u16 n3_tx_packets; - u16 n3_rx_packets; - u16 tx_aborts; - u16 rx_aborts; -} PACKED; -#endif /* _CYCX_X25_H */ diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h index 5621547d631bf47db8056b64e79bb09bd7018ad5..a4be70398ce1b2cb192a7b3bed9477439b2e3c57 100644 --- a/include/linux/dynamic_queue_limits.h +++ b/include/linux/dynamic_queue_limits.h @@ -73,14 +73,22 @@ static inline void dql_queued(struct dql *dql, unsigned int count) { BUG_ON(count > DQL_MAX_OBJECT); - dql->num_queued += count; dql->last_obj_cnt = count; + + /* We want to force a write first, so that cpu do not attempt + * to get cache line containing last_obj_cnt, num_queued, adj_limit + * in Shared state, but directly does a Request For Ownership + * It is only a hint, we use barrier() only. + */ + barrier(); + + dql->num_queued += count; } /* Returns how many objects can be queued, < 0 indicates over limit. */ static inline int dql_avail(const struct dql *dql) { - return dql->adj_limit - dql->num_queued; + return ACCESS_ONCE(dql->adj_limit) - ACCESS_ONCE(dql->num_queued); } /* Record number of completed objects and recalculate the limit. */ diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 9c5529dc6d0776b83888a3e77c0268e00e8f6585..733980fce8e30d6ddd2b442d407a3fac877a759a 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -29,6 +29,7 @@ #include #ifdef __KERNEL__ +u32 eth_get_headlen(void *data, unsigned int max_len); __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); extern const struct header_ops eth_header_ops; diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index e658229fee39c17ea8673eadcf7b49cfdce508e1..c1a2d60dfb82769c42b65dc2adc324517ed7e79c 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -257,6 +257,10 @@ struct ethtool_ops { struct ethtool_eeprom *, u8 *); int (*get_eee)(struct net_device *, struct ethtool_eee *); int (*set_eee)(struct net_device *, struct ethtool_eee *); + int (*get_tunable)(struct net_device *, + const struct ethtool_tunable *, void *); + int (*set_tunable)(struct net_device *, + const struct ethtool_tunable *, const void *); }; diff --git a/include/linux/filter.h b/include/linux/filter.h index a5227ab8ccb17ebd4cf9ed7a55f80c352a2d6fb3..ca95abd2bed130ac3c76161e7ae273e59c48c51b 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -4,58 +4,24 @@ #ifndef __LINUX_FILTER_H__ #define __LINUX_FILTER_H__ +#include + #include #include #include +#include +#include #include -#include -/* Internally used and optimized filter representation with extended - * instruction set based on top of classic BPF. - */ +#include -/* instruction classes */ -#define BPF_ALU64 0x07 /* alu mode in double word width */ - -/* ld/ldx fields */ -#define BPF_DW 0x18 /* double word */ -#define BPF_XADD 0xc0 /* exclusive add */ - -/* alu/jmp fields */ -#define BPF_MOV 0xb0 /* mov reg to reg */ -#define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */ - -/* change endianness of a register */ -#define BPF_END 0xd0 /* flags for endianness conversion: */ -#define BPF_TO_LE 0x00 /* convert to little-endian */ -#define BPF_TO_BE 0x08 /* convert to big-endian */ -#define BPF_FROM_LE BPF_TO_LE -#define BPF_FROM_BE BPF_TO_BE - -#define BPF_JNE 0x50 /* jump != */ -#define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */ -#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */ -#define BPF_CALL 0x80 /* function call */ -#define BPF_EXIT 0x90 /* function return */ - -/* Register numbers */ -enum { - BPF_REG_0 = 0, - BPF_REG_1, - BPF_REG_2, - BPF_REG_3, - BPF_REG_4, - BPF_REG_5, - BPF_REG_6, - BPF_REG_7, - BPF_REG_8, - BPF_REG_9, - BPF_REG_10, - __MAX_BPF_REG, -}; +#include +#include -/* BPF has 10 general purpose 64-bit registers and stack frame. */ -#define MAX_BPF_REG __MAX_BPF_REG +struct sk_buff; +struct sock; +struct seccomp_data; +struct bpf_prog_aux; /* ArgX, context and stack frame pointer register positions. Note, * Arg1, Arg2, Arg3, etc are used as argument mappings of function @@ -161,6 +127,30 @@ enum { .off = 0, \ .imm = IMM }) +/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */ +#define BPF_LD_IMM64(DST, IMM) \ + BPF_LD_IMM64_RAW(DST, 0, IMM) + +#define BPF_LD_IMM64_RAW(DST, SRC, IMM) \ + ((struct bpf_insn) { \ + .code = BPF_LD | BPF_DW | BPF_IMM, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = 0, \ + .imm = (__u32) (IMM) }), \ + ((struct bpf_insn) { \ + .code = 0, /* zero is reserved opcode */ \ + .dst_reg = 0, \ + .src_reg = 0, \ + .off = 0, \ + .imm = ((__u64) (IMM)) >> 32 }) + +#define BPF_PSEUDO_MAP_FD 1 + +/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */ +#define BPF_LD_MAP_FD(DST, MAP_FD) \ + BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD) + /* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */ #define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \ @@ -299,14 +289,6 @@ enum { #define SK_RUN_FILTER(filter, ctx) \ (*filter->prog->bpf_func)(ctx, filter->prog->insnsi) -struct bpf_insn { - __u8 code; /* opcode */ - __u8 dst_reg:4; /* dest register */ - __u8 src_reg:4; /* source register */ - __s16 off; /* signed offset */ - __s32 imm; /* signed immediate constant */ -}; - #ifdef CONFIG_COMPAT /* A struct sock_filter is architecture independent. */ struct compat_sock_fprog { @@ -320,20 +302,23 @@ struct sock_fprog_kern { struct sock_filter *filter; }; -struct sk_buff; -struct sock; -struct seccomp_data; +struct bpf_binary_header { + unsigned int pages; + u8 image[]; +}; struct bpf_prog { - u32 jited:1, /* Is our filter JIT'ed? */ - len:31; /* Number of filter blocks */ + u16 pages; /* Number of allocated pages */ + bool jited; /* Is our filter JIT'ed? */ + u32 len; /* Number of filter blocks */ struct sock_fprog_kern *orig_prog; /* Original BPF program */ + struct bpf_prog_aux *aux; /* Auxiliary fields */ unsigned int (*bpf_func)(const struct sk_buff *skb, const struct bpf_insn *filter); + /* Instructions for interpreter */ union { struct sock_filter insns[0]; struct bpf_insn insnsi[0]; - struct work_struct work; }; }; @@ -353,6 +338,26 @@ static inline unsigned int bpf_prog_size(unsigned int proglen) #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0])) +#ifdef CONFIG_DEBUG_SET_MODULE_RONX +static inline void bpf_prog_lock_ro(struct bpf_prog *fp) +{ + set_memory_ro((unsigned long)fp, fp->pages); +} + +static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) +{ + set_memory_rw((unsigned long)fp, fp->pages); +} +#else +static inline void bpf_prog_lock_ro(struct bpf_prog *fp) +{ +} + +static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) +{ +} +#endif /* CONFIG_DEBUG_SET_MODULE_RONX */ + int sk_filter(struct sock *sk, struct sk_buff *skb); void bpf_prog_select_runtime(struct bpf_prog *fp); @@ -361,6 +366,17 @@ void bpf_prog_free(struct bpf_prog *fp); int bpf_convert_filter(struct sock_filter *prog, int len, struct bpf_insn *new_prog, int *new_len); +struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags); +struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, + gfp_t gfp_extra_flags); +void __bpf_prog_free(struct bpf_prog *fp); + +static inline void bpf_prog_unlock_free(struct bpf_prog *fp) +{ + bpf_prog_unlock_ro(fp); + __bpf_prog_free(fp); +} + int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog); void bpf_prog_destroy(struct bpf_prog *fp); @@ -377,6 +393,38 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); void bpf_int_jit_compile(struct bpf_prog *fp); +#ifdef CONFIG_BPF_JIT +typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); + +struct bpf_binary_header * +bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, + unsigned int alignment, + bpf_jit_fill_hole_t bpf_fill_ill_insns); +void bpf_jit_binary_free(struct bpf_binary_header *hdr); + +void bpf_jit_compile(struct bpf_prog *fp); +void bpf_jit_free(struct bpf_prog *fp); + +static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, + u32 pass, void *image) +{ + pr_err("flen=%u proglen=%u pass=%u image=%pK\n", + flen, proglen, pass, image); + if (image) + print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET, + 16, 1, image, proglen, false); +} +#else +static inline void bpf_jit_compile(struct bpf_prog *fp) +{ +} + +static inline void bpf_jit_free(struct bpf_prog *fp) +{ + bpf_prog_unlock_free(fp); +} +#endif /* CONFIG_BPF_JIT */ + #define BPF_ANC BIT(15) static inline u16 bpf_anc_helper(const struct sock_filter *ftest) @@ -424,36 +472,6 @@ static inline void *bpf_load_pointer(const struct sk_buff *skb, int k, return bpf_internal_load_pointer_neg_helper(skb, k, size); } -#ifdef CONFIG_BPF_JIT -#include -#include -#include - -void bpf_jit_compile(struct bpf_prog *fp); -void bpf_jit_free(struct bpf_prog *fp); - -static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, - u32 pass, void *image) -{ - pr_err("flen=%u proglen=%u pass=%u image=%pK\n", - flen, proglen, pass, image); - if (image) - print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET, - 16, 1, image, proglen, false); -} -#else -#include - -static inline void bpf_jit_compile(struct bpf_prog *fp) -{ -} - -static inline void bpf_jit_free(struct bpf_prog *fp) -{ - kfree(fp); -} -#endif /* CONFIG_BPF_JIT */ - static inline int bpf_tell_extensions(void) { return SKF_AD_MAX; diff --git a/include/linux/fs_enet_pd.h b/include/linux/fs_enet_pd.h index efb05961bdd8fbebbde2d2f534f486defe1ca095..77d783f71527f713baa142611dce48a7217346fe 100644 --- a/include/linux/fs_enet_pd.h +++ b/include/linux/fs_enet_pd.h @@ -139,7 +139,6 @@ struct fs_platform_info { int rx_ring, tx_ring; /* number of buffers on rx */ __u8 macaddr[ETH_ALEN]; /* mac address */ int rx_copybreak; /* limit we copy small frames */ - int use_napi; /* use NAPI */ int napi_weight; /* NAPI weight */ int use_rmii; /* use RMII mode */ diff --git a/include/linux/i82593.h b/include/linux/i82593.h deleted file mode 100644 index afac5c7a323d35c77f2f6aa9d31d24184b1500df..0000000000000000000000000000000000000000 --- a/include/linux/i82593.h +++ /dev/null @@ -1,229 +0,0 @@ -/* - * Definitions for Intel 82593 CSMA/CD Core LAN Controller - * The definitions are taken from the 1992 users manual with Intel - * order number 297125-001. - * - * /usr/src/pc/RCS/i82593.h,v 1.1 1996/07/17 15:23:12 root Exp - * - * Copyright 1994, Anders Klemets - * - * HISTORY - * i82593.h,v - * Revision 1.4 2005/11/4 09:15:00 baroniunas - * Modified copyright with permission of author as follows: - * - * "If I82539.H is the only file with my copyright statement - * that is included in the Source Forge project, then you have - * my approval to change the copyright statement to be a GPL - * license, in the way you proposed on October 10." - * - * Revision 1.1 1996/07/17 15:23:12 root - * Initial revision - * - * Revision 1.3 1995/04/05 15:13:58 adj - * Initial alpha release - * - * Revision 1.2 1994/06/16 23:57:31 klemets - * Mirrored all the fields in the configuration block. - * - * Revision 1.1 1994/06/02 20:25:34 klemets - * Initial revision - * - * - */ -#ifndef _I82593_H -#define _I82593_H - -/* Intel 82593 CSMA/CD Core LAN Controller */ - -/* Port 0 Command Register definitions */ - -/* Execution operations */ -#define OP0_NOP 0 /* CHNL = 0 */ -#define OP0_SWIT_TO_PORT_1 0 /* CHNL = 1 */ -#define OP0_IA_SETUP 1 -#define OP0_CONFIGURE 2 -#define OP0_MC_SETUP 3 -#define OP0_TRANSMIT 4 -#define OP0_TDR 5 -#define OP0_DUMP 6 -#define OP0_DIAGNOSE 7 -#define OP0_TRANSMIT_NO_CRC 9 -#define OP0_RETRANSMIT 12 -#define OP0_ABORT 13 -/* Reception operations */ -#define OP0_RCV_ENABLE 8 -#define OP0_RCV_DISABLE 10 -#define OP0_STOP_RCV 11 -/* Status pointer control operations */ -#define OP0_FIX_PTR 15 /* CHNL = 1 */ -#define OP0_RLS_PTR 15 /* CHNL = 0 */ -#define OP0_RESET 14 - -#define CR0_CHNL (1 << 4) /* 0=Channel 0, 1=Channel 1 */ -#define CR0_STATUS_0 0x00 -#define CR0_STATUS_1 0x20 -#define CR0_STATUS_2 0x40 -#define CR0_STATUS_3 0x60 -#define CR0_INT_ACK (1 << 7) /* 0=No ack, 1=acknowledge */ - -/* Port 0 Status Register definitions */ - -#define SR0_NO_RESULT 0 /* dummy */ -#define SR0_EVENT_MASK 0x0f -#define SR0_IA_SETUP_DONE 1 -#define SR0_CONFIGURE_DONE 2 -#define SR0_MC_SETUP_DONE 3 -#define SR0_TRANSMIT_DONE 4 -#define SR0_TDR_DONE 5 -#define SR0_DUMP_DONE 6 -#define SR0_DIAGNOSE_PASSED 7 -#define SR0_TRANSMIT_NO_CRC_DONE 9 -#define SR0_RETRANSMIT_DONE 12 -#define SR0_EXECUTION_ABORTED 13 -#define SR0_END_OF_FRAME 8 -#define SR0_RECEPTION_ABORTED 10 -#define SR0_DIAGNOSE_FAILED 15 -#define SR0_STOP_REG_HIT 11 - -#define SR0_CHNL (1 << 4) -#define SR0_EXECUTION (1 << 5) -#define SR0_RECEPTION (1 << 6) -#define SR0_INTERRUPT (1 << 7) -#define SR0_BOTH_RX_TX (SR0_EXECUTION | SR0_RECEPTION) - -#define SR3_EXEC_STATE_MASK 0x03 -#define SR3_EXEC_IDLE 0 -#define SR3_TX_ABORT_IN_PROGRESS 1 -#define SR3_EXEC_ACTIVE 2 -#define SR3_ABORT_IN_PROGRESS 3 -#define SR3_EXEC_CHNL (1 << 2) -#define SR3_STP_ON_NO_RSRC (1 << 3) -#define SR3_RCVING_NO_RSRC (1 << 4) -#define SR3_RCV_STATE_MASK 0x60 -#define SR3_RCV_IDLE 0x00 -#define SR3_RCV_READY 0x20 -#define SR3_RCV_ACTIVE 0x40 -#define SR3_RCV_STOP_IN_PROG 0x60 -#define SR3_RCV_CHNL (1 << 7) - -/* Port 1 Command Register definitions */ - -#define OP1_NOP 0 -#define OP1_SWIT_TO_PORT_0 1 -#define OP1_INT_DISABLE 2 -#define OP1_INT_ENABLE 3 -#define OP1_SET_TS 5 -#define OP1_RST_TS 7 -#define OP1_POWER_DOWN 8 -#define OP1_RESET_RING_MNGMT 11 -#define OP1_RESET 14 -#define OP1_SEL_RST 15 - -#define CR1_STATUS_4 0x00 -#define CR1_STATUS_5 0x20 -#define CR1_STATUS_6 0x40 -#define CR1_STOP_REG_UPDATE (1 << 7) - -/* Receive frame status bits */ - -#define RX_RCLD (1 << 0) -#define RX_IA_MATCH (1 << 1) -#define RX_NO_AD_MATCH (1 << 2) -#define RX_NO_SFD (1 << 3) -#define RX_SRT_FRM (1 << 7) -#define RX_OVRRUN (1 << 8) -#define RX_ALG_ERR (1 << 10) -#define RX_CRC_ERR (1 << 11) -#define RX_LEN_ERR (1 << 12) -#define RX_RCV_OK (1 << 13) -#define RX_TYP_LEN (1 << 15) - -/* Transmit status bits */ - -#define TX_NCOL_MASK 0x0f -#define TX_FRTL (1 << 4) -#define TX_MAX_COL (1 << 5) -#define TX_HRT_BEAT (1 << 6) -#define TX_DEFER (1 << 7) -#define TX_UND_RUN (1 << 8) -#define TX_LOST_CTS (1 << 9) -#define TX_LOST_CRS (1 << 10) -#define TX_LTCOL (1 << 11) -#define TX_OK (1 << 13) -#define TX_COLL (1 << 15) - -struct i82593_conf_block { - u_char fifo_limit : 4, - forgnesi : 1, - fifo_32 : 1, - d6mod : 1, - throttle_enb : 1; - u_char throttle : 6, - cntrxint : 1, - contin : 1; - u_char addr_len : 3, - acloc : 1, - preamb_len : 2, - loopback : 2; - u_char lin_prio : 3, - tbofstop : 1, - exp_prio : 3, - bof_met : 1; - u_char : 4, - ifrm_spc : 4; - u_char : 5, - slottim_low : 3; - u_char slottim_hi : 3, - : 1, - max_retr : 4; - u_char prmisc : 1, - bc_dis : 1, - : 1, - crs_1 : 1, - nocrc_ins : 1, - crc_1632 : 1, - : 1, - crs_cdt : 1; - u_char cs_filter : 3, - crs_src : 1, - cd_filter : 3, - : 1; - u_char : 2, - min_fr_len : 6; - u_char lng_typ : 1, - lng_fld : 1, - rxcrc_xf : 1, - artx : 1, - sarec : 1, - tx_jabber : 1, /* why is this called max_len in the manual? */ - hash_1 : 1, - lbpkpol : 1; - u_char : 6, - fdx : 1, - : 1; - u_char dummy_6 : 6, /* supposed to be ones */ - mult_ia : 1, - dis_bof : 1; - u_char dummy_1 : 1, /* supposed to be one */ - tx_ifs_retrig : 2, - mc_all : 1, - rcv_mon : 2, - frag_acpt : 1, - tstrttrs : 1; - u_char fretx : 1, - runt_eop : 1, - hw_sw_pin : 1, - big_endn : 1, - syncrqs : 1, - sttlen : 1, - tx_eop : 1, - rx_eop : 1; - u_char rbuf_size : 5, - rcvstop : 1, - : 2; -}; - -#define I82593_MAX_MULTICAST_ADDRESSES 128 /* Hardware hashed filter */ - -#endif /* _I82593_H */ diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 63ab3873c5ed00ac6fa36e73892d4db709c13bf0..b1be39c76931b5084ee4bebab2c237021d31e109 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -6,6 +6,7 @@ * Copyright (c) 2002-2003, Jouni Malinen * Copyright (c) 2005, Devicescape Software, Inc. * Copyright (c) 2006, Michael Wu + * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -165,8 +166,12 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2) #define IEEE80211_MAX_MESH_ID_LEN 32 +#define IEEE80211_FIRST_TSPEC_TSID 8 #define IEEE80211_NUM_TIDS 16 +/* number of user priorities 802.11 uses */ +#define IEEE80211_NUM_UPS 8 + #define IEEE80211_QOS_CTL_LEN 2 /* 1d tag mask */ #define IEEE80211_QOS_CTL_TAG1D_MASK 0x0007 @@ -838,6 +843,16 @@ enum ieee80211_vht_opmode_bits { #define WLAN_SA_QUERY_TR_ID_LEN 2 +/** + * struct ieee80211_tpc_report_ie + * + * This structure refers to "TPC Report element" + */ +struct ieee80211_tpc_report_ie { + u8 tx_power; + u8 link_margin; +} __packed; + struct ieee80211_mgmt { __le16 frame_control; __le16 duration; @@ -973,6 +988,13 @@ struct ieee80211_mgmt { u8 action_code; u8 operating_mode; } __packed vht_opmode_notif; + struct { + u8 action_code; + u8 dialog_token; + u8 tpc_elem_id; + u8 tpc_elem_length; + struct ieee80211_tpc_report_ie tpc; + } __packed tpc_report; } u; } __packed action; } u; @@ -1806,7 +1828,8 @@ enum ieee80211_eid { WLAN_EID_DMG_TSPEC = 146, WLAN_EID_DMG_AT = 147, WLAN_EID_DMG_CAP = 148, - /* 149-150 reserved for Cisco */ + /* 149 reserved for Cisco */ + WLAN_EID_CISCO_VENDOR_SPECIFIC = 150, WLAN_EID_DMG_OPERATION = 151, WLAN_EID_DMG_BSS_PARAM_CHANGE = 152, WLAN_EID_DMG_BEAM_REFINEMENT = 153, @@ -1865,6 +1888,7 @@ enum ieee80211_category { WLAN_CATEGORY_DLS = 2, WLAN_CATEGORY_BACK = 3, WLAN_CATEGORY_PUBLIC = 4, + WLAN_CATEGORY_RADIO_MEASUREMENT = 5, WLAN_CATEGORY_HT = 7, WLAN_CATEGORY_SA_QUERY = 8, WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9, @@ -2378,4 +2402,51 @@ static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim, #define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024)) #define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x)) +/** + * ieee80211_action_contains_tpc - checks if the frame contains TPC element + * @skb: the skb containing the frame, length will be checked + * + * This function checks if it's either TPC report action frame or Link + * Measurement report action frame as defined in IEEE Std. 802.11-2012 8.5.2.5 + * and 8.5.7.5 accordingly. + */ +static inline bool ieee80211_action_contains_tpc(struct sk_buff *skb) +{ + struct ieee80211_mgmt *mgmt = (void *)skb->data; + + if (!ieee80211_is_action(mgmt->frame_control)) + return false; + + if (skb->len < IEEE80211_MIN_ACTION_SIZE + + sizeof(mgmt->u.action.u.tpc_report)) + return false; + + /* + * TPC report - check that: + * category = 0 (Spectrum Management) or 5 (Radio Measurement) + * spectrum management action = 3 (TPC/Link Measurement report) + * TPC report EID = 35 + * TPC report element length = 2 + * + * The spectrum management's tpc_report struct is used here both for + * parsing tpc_report and radio measurement's link measurement report + * frame, since the relevant part is identical in both frames. + */ + if (mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT && + mgmt->u.action.category != WLAN_CATEGORY_RADIO_MEASUREMENT) + return false; + + /* both spectrum mgmt and link measurement have same action code */ + if (mgmt->u.action.u.tpc_report.action_code != + WLAN_ACTION_SPCT_TPC_RPRT) + return false; + + if (mgmt->u.action.u.tpc_report.tpc_elem_id != WLAN_EID_TPC_REPORT || + mgmt->u.action.u.tpc_report.tpc_elem_length != + sizeof(struct ieee80211_tpc_report_ie)) + return false; + + return true; +} + #endif /* LINUX_IEEE80211_H */ diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h index 6b2c7cf352a5582aff86da52f4feac7ae7d208e4..6f6929ea8a0cb5e364845a31911d4abcc2a22741 100644 --- a/include/linux/if_macvlan.h +++ b/include/linux/if_macvlan.h @@ -60,6 +60,7 @@ struct macvlan_dev { #ifdef CONFIG_NET_POLL_CONTROLLER struct netpoll *netpoll; #endif + unsigned int macaddr_count; }; static inline void macvlan_count_rx(const struct macvlan_dev *vlan, diff --git a/include/linux/igmp.h b/include/linux/igmp.h index f47550d75f85fddd53186c92f0e1383c8f693b90..2c677afeea4782c96b79d0d8ede4846d87783b99 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h @@ -39,6 +39,7 @@ static inline struct igmpv3_query * extern int sysctl_igmp_max_memberships; extern int sysctl_igmp_max_msf; +extern int sysctl_igmp_qrv; struct ip_sf_socklist { unsigned int sl_max; diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index a5b7d7cfcedfaac0fe3f3ca4aabfd1dae91b2b53..37e4404d0227f8c3420bb9e14d8d943b5d734730 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -38,6 +38,7 @@ #include #include #include +#include #include @@ -184,19 +185,24 @@ enum { MLX4_DEV_CAP_FLAG2_DMFS_IPOIB = 1LL << 9, MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS = 1LL << 10, MLX4_DEV_CAP_FLAG2_MAD_DEMUX = 1LL << 11, + MLX4_DEV_CAP_FLAG2_CQE_STRIDE = 1LL << 12, + MLX4_DEV_CAP_FLAG2_EQE_STRIDE = 1LL << 13 }; enum { MLX4_DEV_CAP_64B_EQE_ENABLED = 1LL << 0, - MLX4_DEV_CAP_64B_CQE_ENABLED = 1LL << 1 + MLX4_DEV_CAP_64B_CQE_ENABLED = 1LL << 1, + MLX4_DEV_CAP_CQE_STRIDE_ENABLED = 1LL << 2, + MLX4_DEV_CAP_EQE_STRIDE_ENABLED = 1LL << 3 }; enum { - MLX4_USER_DEV_CAP_64B_CQE = 1L << 0 + MLX4_USER_DEV_CAP_LARGE_CQE = 1L << 0 }; enum { - MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0 + MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0, + MLX4_FUNC_CAP_EQE_CQE_STRIDE = 1L << 1 }; @@ -577,7 +583,7 @@ struct mlx4_uar { }; struct mlx4_bf { - unsigned long offset; + unsigned int offset; int buf_size; struct mlx4_uar *uar; void __iomem *reg; @@ -701,6 +707,7 @@ struct mlx4_dev { u64 regid_promisc_array[MLX4_MAX_PORTS + 1]; u64 regid_allmulti_array[MLX4_MAX_PORTS + 1]; struct mlx4_vf_dev *dev_vfs; + int nvfs[MLX4_MAX_PORTS + 1]; }; struct mlx4_eqe { @@ -1279,7 +1286,7 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, /* Returns true if running in low memory profile (kdump kernel) */ static inline bool mlx4_low_memory_profile(void) { - return reset_devices; + return is_kdump_kernel(); } #endif /* MLX4_DEVICE_H */ diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 334947151dfc4651385ef6dd4ad21bbf0e42259a..1d67fd32e71c4bd6f1622e5465dfbd2ba3e95b99 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -44,6 +44,50 @@ #error Host endianness not defined #endif +/* helper macros */ +#define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0) +#define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld) +#define __mlx5_bit_off(typ, fld) ((unsigned)(unsigned long)(&(__mlx5_nullp(typ)->fld))) +#define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32) +#define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64) +#define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1f)) +#define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1)) +#define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << __mlx5_dw_bit_off(typ, fld)) +#define __mlx5_st_sz_bits(typ) sizeof(struct mlx5_ifc_##typ##_bits) + +#define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8) +#define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8) +#define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32) +#define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8) +#define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld)) + +/* insert a value to a struct */ +#define MLX5_SET(typ, p, fld, v) do { \ + BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \ + *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \ + cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \ + (~__mlx5_dw_mask(typ, fld))) | (((v) & __mlx5_mask(typ, fld)) \ + << __mlx5_dw_bit_off(typ, fld))); \ +} while (0) + +#define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\ +__mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \ +__mlx5_mask(typ, fld)) + +#define MLX5_GET_PR(typ, p, fld) ({ \ + u32 ___t = MLX5_GET(typ, p, fld); \ + pr_debug(#fld " = 0x%x\n", ___t); \ + ___t; \ +}) + +#define MLX5_SET64(typ, p, fld, v) do { \ + BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) != 64); \ + BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \ + *((__be64 *)(p) + __mlx5_64_off(typ, fld)) = cpu_to_be64(v); \ +} while (0) + +#define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld))) + enum { MLX5_MAX_COMMANDS = 32, MLX5_CMD_DATA_BLOCK_SIZE = 512, @@ -70,6 +114,11 @@ enum { MLX5_INLINE_SEG = 0x80000000, }; +enum { + MLX5_MIN_PKEY_TABLE_SIZE = 128, + MLX5_MAX_LOG_PKEY_TABLE = 5, +}; + enum { MLX5_PERM_LOCAL_READ = 1 << 2, MLX5_PERM_LOCAL_WRITE = 1 << 3, @@ -184,10 +233,10 @@ enum { MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29, MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30, MLX5_DEV_CAP_FLAG_RESIZE_SRQ = 1LL << 32, + MLX5_DEV_CAP_FLAG_DCT = 1LL << 37, MLX5_DEV_CAP_FLAG_REMOTE_FENCE = 1LL << 38, MLX5_DEV_CAP_FLAG_TLP_HINTS = 1LL << 39, MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40, - MLX5_DEV_CAP_FLAG_DCT = 1LL << 41, MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46, }; @@ -243,10 +292,14 @@ enum { }; enum { - MLX5_CAP_OFF_DCT = 41, MLX5_CAP_OFF_CMDIF_CSUM = 46, }; +enum { + HCA_CAP_OPMOD_GET_MAX = 0, + HCA_CAP_OPMOD_GET_CUR = 1, +}; + struct mlx5_inbox_hdr { __be16 opcode; u8 rsvd[4]; @@ -274,101 +327,6 @@ struct mlx5_cmd_query_adapter_mbox_out { u8 vsd_psid[16]; }; -struct mlx5_hca_cap { - u8 rsvd1[16]; - u8 log_max_srq_sz; - u8 log_max_qp_sz; - u8 rsvd2; - u8 log_max_qp; - u8 log_max_strq_sz; - u8 log_max_srqs; - u8 rsvd4[2]; - u8 rsvd5; - u8 log_max_cq_sz; - u8 rsvd6; - u8 log_max_cq; - u8 log_max_eq_sz; - u8 log_max_mkey; - u8 rsvd7; - u8 log_max_eq; - u8 max_indirection; - u8 log_max_mrw_sz; - u8 log_max_bsf_list_sz; - u8 log_max_klm_list_sz; - u8 rsvd_8_0; - u8 log_max_ra_req_dc; - u8 rsvd_8_1; - u8 log_max_ra_res_dc; - u8 rsvd9; - u8 log_max_ra_req_qp; - u8 rsvd10; - u8 log_max_ra_res_qp; - u8 rsvd11[4]; - __be16 max_qp_count; - __be16 rsvd12; - u8 rsvd13; - u8 local_ca_ack_delay; - u8 rsvd14; - u8 num_ports; - u8 log_max_msg; - u8 rsvd15[3]; - __be16 stat_rate_support; - u8 rsvd16[2]; - __be64 flags; - u8 rsvd17; - u8 uar_sz; - u8 rsvd18; - u8 log_pg_sz; - __be16 bf_log_bf_reg_size; - u8 rsvd19[4]; - __be16 max_desc_sz_sq; - u8 rsvd20[2]; - __be16 max_desc_sz_rq; - u8 rsvd21[2]; - __be16 max_desc_sz_sq_dc; - __be32 max_qp_mcg; - u8 rsvd22[3]; - u8 log_max_mcg; - u8 rsvd23; - u8 log_max_pd; - u8 rsvd24; - u8 log_max_xrcd; - u8 rsvd25[42]; - __be16 log_uar_page_sz; - u8 rsvd26[28]; - u8 log_max_atomic_size_qp; - u8 rsvd27[2]; - u8 log_max_atomic_size_dc; - u8 rsvd28[76]; -}; - - -struct mlx5_cmd_query_hca_cap_mbox_in { - struct mlx5_inbox_hdr hdr; - u8 rsvd[8]; -}; - - -struct mlx5_cmd_query_hca_cap_mbox_out { - struct mlx5_outbox_hdr hdr; - u8 rsvd0[8]; - struct mlx5_hca_cap hca_cap; -}; - - -struct mlx5_cmd_set_hca_cap_mbox_in { - struct mlx5_inbox_hdr hdr; - u8 rsvd[8]; - struct mlx5_hca_cap hca_cap; -}; - - -struct mlx5_cmd_set_hca_cap_mbox_out { - struct mlx5_outbox_hdr hdr; - u8 rsvd0[8]; -}; - - struct mlx5_cmd_init_hca_mbox_in { struct mlx5_inbox_hdr hdr; u8 rsvd0[2]; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index b88e9b46d957d2750c4df3a992b926e9bbfd3253..246310dc8bef9a8f8604941ee40353868ebd3965 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -44,6 +44,7 @@ #include #include +#include enum { MLX5_BOARD_ID_LEN = 64, @@ -98,81 +99,6 @@ enum { MLX5_ATOMIC_MODE_256B = 8 << 16, }; -enum { - MLX5_CMD_OP_QUERY_HCA_CAP = 0x100, - MLX5_CMD_OP_QUERY_ADAPTER = 0x101, - MLX5_CMD_OP_INIT_HCA = 0x102, - MLX5_CMD_OP_TEARDOWN_HCA = 0x103, - MLX5_CMD_OP_ENABLE_HCA = 0x104, - MLX5_CMD_OP_DISABLE_HCA = 0x105, - MLX5_CMD_OP_QUERY_PAGES = 0x107, - MLX5_CMD_OP_MANAGE_PAGES = 0x108, - MLX5_CMD_OP_SET_HCA_CAP = 0x109, - - MLX5_CMD_OP_CREATE_MKEY = 0x200, - MLX5_CMD_OP_QUERY_MKEY = 0x201, - MLX5_CMD_OP_DESTROY_MKEY = 0x202, - MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS = 0x203, - - MLX5_CMD_OP_CREATE_EQ = 0x301, - MLX5_CMD_OP_DESTROY_EQ = 0x302, - MLX5_CMD_OP_QUERY_EQ = 0x303, - - MLX5_CMD_OP_CREATE_CQ = 0x400, - MLX5_CMD_OP_DESTROY_CQ = 0x401, - MLX5_CMD_OP_QUERY_CQ = 0x402, - MLX5_CMD_OP_MODIFY_CQ = 0x403, - - MLX5_CMD_OP_CREATE_QP = 0x500, - MLX5_CMD_OP_DESTROY_QP = 0x501, - MLX5_CMD_OP_RST2INIT_QP = 0x502, - MLX5_CMD_OP_INIT2RTR_QP = 0x503, - MLX5_CMD_OP_RTR2RTS_QP = 0x504, - MLX5_CMD_OP_RTS2RTS_QP = 0x505, - MLX5_CMD_OP_SQERR2RTS_QP = 0x506, - MLX5_CMD_OP_2ERR_QP = 0x507, - MLX5_CMD_OP_RTS2SQD_QP = 0x508, - MLX5_CMD_OP_SQD2RTS_QP = 0x509, - MLX5_CMD_OP_2RST_QP = 0x50a, - MLX5_CMD_OP_QUERY_QP = 0x50b, - MLX5_CMD_OP_CONF_SQP = 0x50c, - MLX5_CMD_OP_MAD_IFC = 0x50d, - MLX5_CMD_OP_INIT2INIT_QP = 0x50e, - MLX5_CMD_OP_SUSPEND_QP = 0x50f, - MLX5_CMD_OP_UNSUSPEND_QP = 0x510, - MLX5_CMD_OP_SQD2SQD_QP = 0x511, - MLX5_CMD_OP_ALLOC_QP_COUNTER_SET = 0x512, - MLX5_CMD_OP_DEALLOC_QP_COUNTER_SET = 0x513, - MLX5_CMD_OP_QUERY_QP_COUNTER_SET = 0x514, - - MLX5_CMD_OP_CREATE_PSV = 0x600, - MLX5_CMD_OP_DESTROY_PSV = 0x601, - MLX5_CMD_OP_QUERY_PSV = 0x602, - MLX5_CMD_OP_QUERY_SIG_RULE_TABLE = 0x603, - MLX5_CMD_OP_QUERY_BLOCK_SIZE_TABLE = 0x604, - - MLX5_CMD_OP_CREATE_SRQ = 0x700, - MLX5_CMD_OP_DESTROY_SRQ = 0x701, - MLX5_CMD_OP_QUERY_SRQ = 0x702, - MLX5_CMD_OP_ARM_RQ = 0x703, - MLX5_CMD_OP_RESIZE_SRQ = 0x704, - - MLX5_CMD_OP_ALLOC_PD = 0x800, - MLX5_CMD_OP_DEALLOC_PD = 0x801, - MLX5_CMD_OP_ALLOC_UAR = 0x802, - MLX5_CMD_OP_DEALLOC_UAR = 0x803, - - MLX5_CMD_OP_ATTACH_TO_MCG = 0x806, - MLX5_CMD_OP_DETACH_FROM_MCG = 0x807, - - - MLX5_CMD_OP_ALLOC_XRCD = 0x80e, - MLX5_CMD_OP_DEALLOC_XRCD = 0x80f, - - MLX5_CMD_OP_ACCESS_REG = 0x805, - MLX5_CMD_OP_MAX = 0x810, -}; - enum { MLX5_REG_PCAP = 0x5001, MLX5_REG_PMTU = 0x5003, @@ -335,23 +261,30 @@ struct mlx5_port_caps { int pkey_table_len; }; -struct mlx5_caps { +struct mlx5_general_caps { u8 log_max_eq; u8 log_max_cq; u8 log_max_qp; u8 log_max_mkey; u8 log_max_pd; u8 log_max_srq; + u8 log_max_strq; + u8 log_max_mrw_sz; + u8 log_max_bsf_list_size; + u8 log_max_klm_list_size; u32 max_cqes; int max_wqes; + u32 max_eqes; + u32 max_indirection; int max_sq_desc_sz; int max_rq_desc_sz; + int max_dc_sq_desc_sz; u64 flags; u16 stat_rate_support; int log_max_msg; int num_ports; - int max_ra_res_qp; - int max_ra_req_qp; + u8 log_max_ra_res_qp; + u8 log_max_ra_req_qp; int max_srq_wqes; int bf_reg_size; int bf_regs_per_page; @@ -363,6 +296,19 @@ struct mlx5_caps { u8 log_max_mcg; u32 max_qp_mcg; int min_page_sz; + int pd_cap; + u32 max_qp_counters; + u32 pkey_table_size; + u8 log_max_ra_req_dc; + u8 log_max_ra_res_dc; + u32 uar_sz; + u8 min_log_pg_sz; + u8 log_max_xrcd; + u16 log_uar_page_sz; +}; + +struct mlx5_caps { + struct mlx5_general_caps gen; }; struct mlx5_cmd_mailbox { @@ -429,6 +375,16 @@ struct mlx5_core_mr { u32 pd; }; +enum mlx5_res_type { + MLX5_RES_QP, +}; + +struct mlx5_core_rsc_common { + enum mlx5_res_type res; + atomic_t refcount; + struct completion free; +}; + struct mlx5_core_srq { u32 srqn; int max; @@ -695,6 +651,9 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); void mlx5_cmd_use_events(struct mlx5_core_dev *dev); void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr); +int mlx5_cmd_status_to_err_v2(void *ptr); +int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps, + u16 opmod); int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size); int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, @@ -751,7 +710,7 @@ int mlx5_eq_init(struct mlx5_core_dev *dev); void mlx5_eq_cleanup(struct mlx5_core_dev *dev); void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas); void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn); -void mlx5_qp_event(struct mlx5_core_dev *dev, u32 qpn, int event_type); +void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type); void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector); @@ -788,6 +747,7 @@ void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn, int npsvs, u32 *sig_index); int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num); +void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common); static inline u32 mlx5_mkey_to_idx(u32 mkey) { diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h new file mode 100644 index 0000000000000000000000000000000000000000..5f48b8f592c51a9aece4baa2755af9757b9976fa --- /dev/null +++ b/include/linux/mlx5/mlx5_ifc.h @@ -0,0 +1,349 @@ +/* + * Copyright (c) 2014, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX5_IFC_H +#define MLX5_IFC_H + +enum { + MLX5_CMD_OP_QUERY_HCA_CAP = 0x100, + MLX5_CMD_OP_QUERY_ADAPTER = 0x101, + MLX5_CMD_OP_INIT_HCA = 0x102, + MLX5_CMD_OP_TEARDOWN_HCA = 0x103, + MLX5_CMD_OP_ENABLE_HCA = 0x104, + MLX5_CMD_OP_DISABLE_HCA = 0x105, + MLX5_CMD_OP_QUERY_PAGES = 0x107, + MLX5_CMD_OP_MANAGE_PAGES = 0x108, + MLX5_CMD_OP_SET_HCA_CAP = 0x109, + MLX5_CMD_OP_CREATE_MKEY = 0x200, + MLX5_CMD_OP_QUERY_MKEY = 0x201, + MLX5_CMD_OP_DESTROY_MKEY = 0x202, + MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS = 0x203, + MLX5_CMD_OP_PAGE_FAULT_RESUME = 0x204, + MLX5_CMD_OP_CREATE_EQ = 0x301, + MLX5_CMD_OP_DESTROY_EQ = 0x302, + MLX5_CMD_OP_QUERY_EQ = 0x303, + MLX5_CMD_OP_GEN_EQE = 0x304, + MLX5_CMD_OP_CREATE_CQ = 0x400, + MLX5_CMD_OP_DESTROY_CQ = 0x401, + MLX5_CMD_OP_QUERY_CQ = 0x402, + MLX5_CMD_OP_MODIFY_CQ = 0x403, + MLX5_CMD_OP_CREATE_QP = 0x500, + MLX5_CMD_OP_DESTROY_QP = 0x501, + MLX5_CMD_OP_RST2INIT_QP = 0x502, + MLX5_CMD_OP_INIT2RTR_QP = 0x503, + MLX5_CMD_OP_RTR2RTS_QP = 0x504, + MLX5_CMD_OP_RTS2RTS_QP = 0x505, + MLX5_CMD_OP_SQERR2RTS_QP = 0x506, + MLX5_CMD_OP_2ERR_QP = 0x507, + MLX5_CMD_OP_2RST_QP = 0x50a, + MLX5_CMD_OP_QUERY_QP = 0x50b, + MLX5_CMD_OP_INIT2INIT_QP = 0x50e, + MLX5_CMD_OP_CREATE_PSV = 0x600, + MLX5_CMD_OP_DESTROY_PSV = 0x601, + MLX5_CMD_OP_CREATE_SRQ = 0x700, + MLX5_CMD_OP_DESTROY_SRQ = 0x701, + MLX5_CMD_OP_QUERY_SRQ = 0x702, + MLX5_CMD_OP_ARM_RQ = 0x703, + MLX5_CMD_OP_RESIZE_SRQ = 0x704, + MLX5_CMD_OP_CREATE_DCT = 0x710, + MLX5_CMD_OP_DESTROY_DCT = 0x711, + MLX5_CMD_OP_DRAIN_DCT = 0x712, + MLX5_CMD_OP_QUERY_DCT = 0x713, + MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION = 0x714, + MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750, + MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751, + MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT = 0x752, + MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT = 0x753, + MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT = 0x754, + MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT = 0x755, + MLX5_CMD_OP_QUERY_RCOE_ADDRESS = 0x760, + MLX5_CMD_OP_SET_ROCE_ADDRESS = 0x761, + MLX5_CMD_OP_QUERY_VPORT_COUNTER = 0x770, + MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771, + MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772, + MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773, + MLX5_CMD_OP_ALLOC_PD = 0x800, + MLX5_CMD_OP_DEALLOC_PD = 0x801, + MLX5_CMD_OP_ALLOC_UAR = 0x802, + MLX5_CMD_OP_DEALLOC_UAR = 0x803, + MLX5_CMD_OP_CONFIG_INT_MODERATION = 0x804, + MLX5_CMD_OP_ACCESS_REG = 0x805, + MLX5_CMD_OP_ATTACH_TO_MCG = 0x806, + MLX5_CMD_OP_DETACH_FROM_MCG = 0x807, + MLX5_CMD_OP_GET_DROPPED_PACKET_LOG = 0x80a, + MLX5_CMD_OP_MAD_IFC = 0x50d, + MLX5_CMD_OP_QUERY_MAD_DEMUX = 0x80b, + MLX5_CMD_OP_SET_MAD_DEMUX = 0x80c, + MLX5_CMD_OP_NOP = 0x80d, + MLX5_CMD_OP_ALLOC_XRCD = 0x80e, + MLX5_CMD_OP_DEALLOC_XRCD = 0x80f, + MLX5_CMD_OP_SET_BURST_SIZE = 0x812, + MLX5_CMD_OP_QUERY_BURST_SZIE = 0x813, + MLX5_CMD_OP_ACTIVATE_TRACER = 0x814, + MLX5_CMD_OP_DEACTIVATE_TRACER = 0x815, + MLX5_CMD_OP_CREATE_SNIFFER_RULE = 0x820, + MLX5_CMD_OP_DESTROY_SNIFFER_RULE = 0x821, + MLX5_CMD_OP_QUERY_CONG_PARAMS = 0x822, + MLX5_CMD_OP_MODIFY_CONG_PARAMS = 0x823, + MLX5_CMD_OP_QUERY_CONG_STATISTICS = 0x824, + MLX5_CMD_OP_CREATE_TIR = 0x900, + MLX5_CMD_OP_MODIFY_TIR = 0x901, + MLX5_CMD_OP_DESTROY_TIR = 0x902, + MLX5_CMD_OP_QUERY_TIR = 0x903, + MLX5_CMD_OP_CREATE_TIS = 0x912, + MLX5_CMD_OP_MODIFY_TIS = 0x913, + MLX5_CMD_OP_DESTROY_TIS = 0x914, + MLX5_CMD_OP_QUERY_TIS = 0x915, + MLX5_CMD_OP_CREATE_SQ = 0x904, + MLX5_CMD_OP_MODIFY_SQ = 0x905, + MLX5_CMD_OP_DESTROY_SQ = 0x906, + MLX5_CMD_OP_QUERY_SQ = 0x907, + MLX5_CMD_OP_CREATE_RQ = 0x908, + MLX5_CMD_OP_MODIFY_RQ = 0x909, + MLX5_CMD_OP_DESTROY_RQ = 0x90a, + MLX5_CMD_OP_QUERY_RQ = 0x90b, + MLX5_CMD_OP_CREATE_RMP = 0x90c, + MLX5_CMD_OP_MODIFY_RMP = 0x90d, + MLX5_CMD_OP_DESTROY_RMP = 0x90e, + MLX5_CMD_OP_QUERY_RMP = 0x90f, + MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x910, + MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY = 0x911, + MLX5_CMD_OP_MAX = 0x911 +}; + +struct mlx5_ifc_cmd_hca_cap_bits { + u8 reserved_0[0x80]; + + u8 log_max_srq_sz[0x8]; + u8 log_max_qp_sz[0x8]; + u8 reserved_1[0xb]; + u8 log_max_qp[0x5]; + + u8 log_max_strq_sz[0x8]; + u8 reserved_2[0x3]; + u8 log_max_srqs[0x5]; + u8 reserved_3[0x10]; + + u8 reserved_4[0x8]; + u8 log_max_cq_sz[0x8]; + u8 reserved_5[0xb]; + u8 log_max_cq[0x5]; + + u8 log_max_eq_sz[0x8]; + u8 reserved_6[0x2]; + u8 log_max_mkey[0x6]; + u8 reserved_7[0xc]; + u8 log_max_eq[0x4]; + + u8 max_indirection[0x8]; + u8 reserved_8[0x1]; + u8 log_max_mrw_sz[0x7]; + u8 reserved_9[0x2]; + u8 log_max_bsf_list_size[0x6]; + u8 reserved_10[0x2]; + u8 log_max_klm_list_size[0x6]; + + u8 reserved_11[0xa]; + u8 log_max_ra_req_dc[0x6]; + u8 reserved_12[0xa]; + u8 log_max_ra_res_dc[0x6]; + + u8 reserved_13[0xa]; + u8 log_max_ra_req_qp[0x6]; + u8 reserved_14[0xa]; + u8 log_max_ra_res_qp[0x6]; + + u8 pad_cap[0x1]; + u8 cc_query_allowed[0x1]; + u8 cc_modify_allowed[0x1]; + u8 reserved_15[0x1d]; + + u8 reserved_16[0x6]; + u8 max_qp_cnt[0xa]; + u8 pkey_table_size[0x10]; + + u8 eswitch_owner[0x1]; + u8 reserved_17[0xa]; + u8 local_ca_ack_delay[0x5]; + u8 reserved_18[0x8]; + u8 num_ports[0x8]; + + u8 reserved_19[0x3]; + u8 log_max_msg[0x5]; + u8 reserved_20[0x18]; + + u8 stat_rate_support[0x10]; + u8 reserved_21[0x10]; + + u8 reserved_22[0x10]; + u8 cmdif_checksum[0x2]; + u8 sigerr_cqe[0x1]; + u8 reserved_23[0x1]; + u8 wq_signature[0x1]; + u8 sctr_data_cqe[0x1]; + u8 reserved_24[0x1]; + u8 sho[0x1]; + u8 tph[0x1]; + u8 rf[0x1]; + u8 dc[0x1]; + u8 reserved_25[0x2]; + u8 roce[0x1]; + u8 atomic[0x1]; + u8 rsz_srq[0x1]; + + u8 cq_oi[0x1]; + u8 cq_resize[0x1]; + u8 cq_moderation[0x1]; + u8 sniffer_rule_flow[0x1]; + u8 sniffer_rule_vport[0x1]; + u8 sniffer_rule_phy[0x1]; + u8 reserved_26[0x1]; + u8 pg[0x1]; + u8 block_lb_mc[0x1]; + u8 reserved_27[0x3]; + u8 cd[0x1]; + u8 reserved_28[0x1]; + u8 apm[0x1]; + u8 reserved_29[0x7]; + u8 qkv[0x1]; + u8 pkv[0x1]; + u8 reserved_30[0x4]; + u8 xrc[0x1]; + u8 ud[0x1]; + u8 uc[0x1]; + u8 rc[0x1]; + + u8 reserved_31[0xa]; + u8 uar_sz[0x6]; + u8 reserved_32[0x8]; + u8 log_pg_sz[0x8]; + + u8 bf[0x1]; + u8 reserved_33[0xa]; + u8 log_bf_reg_size[0x5]; + u8 reserved_34[0x10]; + + u8 reserved_35[0x10]; + u8 max_wqe_sz_sq[0x10]; + + u8 reserved_36[0x10]; + u8 max_wqe_sz_rq[0x10]; + + u8 reserved_37[0x10]; + u8 max_wqe_sz_sq_dc[0x10]; + + u8 reserved_38[0x7]; + u8 max_qp_mcg[0x19]; + + u8 reserved_39[0x18]; + u8 log_max_mcg[0x8]; + + u8 reserved_40[0xb]; + u8 log_max_pd[0x5]; + u8 reserved_41[0xb]; + u8 log_max_xrcd[0x5]; + + u8 reserved_42[0x20]; + + u8 reserved_43[0x3]; + u8 log_max_rq[0x5]; + u8 reserved_44[0x3]; + u8 log_max_sq[0x5]; + u8 reserved_45[0x3]; + u8 log_max_tir[0x5]; + u8 reserved_46[0x3]; + u8 log_max_tis[0x5]; + + u8 reserved_47[0x13]; + u8 log_max_rq_per_tir[0x5]; + u8 reserved_48[0x3]; + u8 log_max_tis_per_sq[0x5]; + + u8 reserved_49[0xe0]; + + u8 reserved_50[0x10]; + u8 log_uar_page_sz[0x10]; + + u8 reserved_51[0x100]; + + u8 reserved_52[0x1f]; + u8 cqe_zip[0x1]; + + u8 cqe_zip_timeout[0x10]; + u8 cqe_zip_max_num[0x10]; + + u8 reserved_53[0x220]; +}; + +struct mlx5_ifc_set_hca_cap_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; + + struct mlx5_ifc_cmd_hca_cap_bits hca_capability_struct; +}; + +struct mlx5_ifc_query_hca_cap_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; +}; + +struct mlx5_ifc_query_hca_cap_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + u8 capability_struct[256][0x8]; +}; + +struct mlx5_ifc_set_hca_cap_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +#endif /* MLX5_IFC_H */ diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index 9709b30e2d690da604336a48f96d3757bb7d25c5..7c4c0f1f58054c3f4c9ad96f396eec079c07eb9c 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -342,10 +342,9 @@ struct mlx5_stride_block_ctrl_seg { }; struct mlx5_core_qp { + struct mlx5_core_rsc_common common; /* must be first */ void (*event) (struct mlx5_core_qp *, int); int qpn; - atomic_t refcount; - struct completion free; struct mlx5_rsc_debug *dbg; int pid; }; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index c8e388e5fcccfa604d087ce33ccee5ace528e598..838407aea70503003e09c06fe17845f9e1c9d656 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -30,6 +30,7 @@ #include #include #include +#include #include #include @@ -543,7 +544,7 @@ struct netdev_queue { * read mostly part */ struct net_device *dev; - struct Qdisc *qdisc; + struct Qdisc __rcu *qdisc; struct Qdisc *qdisc_sleeping; #ifdef CONFIG_SYSFS struct kobject kobj; @@ -1206,6 +1207,7 @@ enum netdev_priv_flags { IFF_SUPP_NOFCS = 1<<19, IFF_LIVE_ADDR_CHANGE = 1<<20, IFF_MACVLAN = 1<<21, + IFF_XMIT_DST_RELEASE_PERM = 1<<22, }; #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN @@ -1230,6 +1232,7 @@ enum netdev_priv_flags { #define IFF_SUPP_NOFCS IFF_SUPP_NOFCS #define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE #define IFF_MACVLAN IFF_MACVLAN +#define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM /** * struct net_device - The DEVICE structure. @@ -1416,6 +1419,8 @@ enum netdev_priv_flags { * @gso_max_size: Maximum size of generic segmentation offload * @gso_max_segs: Maximum number of segments that can be passed to the * NIC for GSO + * @gso_min_segs: Minimum number of segments that can be passed to the + * NIC for GSO * * @dcbnl_ops: Data Center Bridging netlink ops * @num_tc: Number of traffic classes in the net device @@ -1666,7 +1671,7 @@ struct net_device { unsigned int gso_max_size; #define GSO_MAX_SEGS 65535 u16 gso_max_segs; - + u16 gso_min_segs; #ifdef CONFIG_DCB const struct dcbnl_rtnl_ops *dcbnl_ops; #endif @@ -1747,6 +1752,12 @@ struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, return &dev->_tx[index]; } +static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev, + const struct sk_buff *skb) +{ + return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); +} + static inline void netdev_for_each_tx_queue(struct net_device *dev, void (*f)(struct net_device *, struct netdev_queue *, @@ -1781,24 +1792,13 @@ void dev_net_set(struct net_device *dev, struct net *net) #endif } -static inline bool netdev_uses_dsa_tags(struct net_device *dev) +static inline bool netdev_uses_dsa(struct net_device *dev) { -#ifdef CONFIG_NET_DSA_TAG_DSA - if (dev->dsa_ptr != NULL) - return dsa_uses_dsa_tags(dev->dsa_ptr); -#endif - - return 0; -} - -static inline bool netdev_uses_trailer_tags(struct net_device *dev) -{ -#ifdef CONFIG_NET_DSA_TAG_TRAILER +#if IS_ENABLED(CONFIG_NET_DSA) if (dev->dsa_ptr != NULL) - return dsa_uses_trailer_tags(dev->dsa_ptr); + return dsa_uses_tagged_protocol(dev->dsa_ptr); #endif - - return 0; + return false; } /** @@ -1879,11 +1879,20 @@ struct napi_gro_cb { /* jiffies when first packet was created/queued */ unsigned long age; - /* Used in ipv6_gro_receive() */ + /* Used in ipv6_gro_receive() and foo-over-udp */ u16 proto; /* Used in udp_gro_receive */ - u16 udp_mark; + u8 udp_mark:1; + + /* GRO checksum is valid */ + u8 csum_valid:1; + + /* Number of checksums via CHECKSUM_UNNECESSARY */ + u8 csum_cnt:3; + + /* Used in foo-over-udp, set in udp[46]_gro_receive */ + u8 is_ipv6:1; /* used to support CHECKSUM_COMPLETE for tunneling protocols */ __wsum csum; @@ -1910,7 +1919,6 @@ struct packet_type { struct offload_callbacks { struct sk_buff *(*gso_segment)(struct sk_buff *skb, netdev_features_t features); - int (*gso_send_check)(struct sk_buff *skb); struct sk_buff **(*gro_receive)(struct sk_buff **head, struct sk_buff *skb); int (*gro_complete)(struct sk_buff *skb, int nhoff); @@ -1924,6 +1932,7 @@ struct packet_offload { struct udp_offload { __be16 port; + u8 ipproto; struct offload_callbacks callbacks; }; @@ -1982,6 +1991,7 @@ struct pcpu_sw_netstats { #define NETDEV_CHANGEUPPER 0x0015 #define NETDEV_RESEND_IGMP 0x0016 #define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */ +#define NETDEV_CHANGEINFODATA 0x0018 int register_netdevice_notifier(struct notifier_block *nb); int unregister_netdevice_notifier(struct notifier_block *nb); @@ -2074,8 +2084,8 @@ void __dev_remove_pack(struct packet_type *pt); void dev_add_offload(struct packet_offload *po); void dev_remove_offload(struct packet_offload *po); -struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags, - unsigned short mask); +struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags, + unsigned short mask); struct net_device *dev_get_by_name(struct net *net, const char *name); struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); struct net_device *__dev_get_by_name(struct net *net, const char *name); @@ -2153,11 +2163,97 @@ static inline void *skb_gro_network_header(struct sk_buff *skb) static inline void skb_gro_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len) { - if (skb->ip_summed == CHECKSUM_COMPLETE) + if (NAPI_GRO_CB(skb)->csum_valid) NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum, csum_partial(start, len, 0)); } +/* GRO checksum functions. These are logical equivalents of the normal + * checksum functions (in skbuff.h) except that they operate on the GRO + * offsets and fields in sk_buff. + */ + +__sum16 __skb_gro_checksum_complete(struct sk_buff *skb); + +static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb, + bool zero_okay, + __sum16 check) +{ + return (skb->ip_summed != CHECKSUM_PARTIAL && + NAPI_GRO_CB(skb)->csum_cnt == 0 && + (!zero_okay || check)); +} + +static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb, + __wsum psum) +{ + if (NAPI_GRO_CB(skb)->csum_valid && + !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum))) + return 0; + + NAPI_GRO_CB(skb)->csum = psum; + + return __skb_gro_checksum_complete(skb); +} + +static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb) +{ + if (NAPI_GRO_CB(skb)->csum_cnt > 0) { + /* Consume a checksum from CHECKSUM_UNNECESSARY */ + NAPI_GRO_CB(skb)->csum_cnt--; + } else { + /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we + * verified a new top level checksum or an encapsulated one + * during GRO. This saves work if we fallback to normal path. + */ + __skb_incr_checksum_unnecessary(skb); + } +} + +#define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \ + compute_pseudo) \ +({ \ + __sum16 __ret = 0; \ + if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \ + __ret = __skb_gro_checksum_validate_complete(skb, \ + compute_pseudo(skb, proto)); \ + if (__ret) \ + __skb_mark_checksum_bad(skb); \ + else \ + skb_gro_incr_csum_unnecessary(skb); \ + __ret; \ +}) + +#define skb_gro_checksum_validate(skb, proto, compute_pseudo) \ + __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo) + +#define skb_gro_checksum_validate_zero_check(skb, proto, check, \ + compute_pseudo) \ + __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo) + +#define skb_gro_checksum_simple_validate(skb) \ + __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo) + +static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb) +{ + return (NAPI_GRO_CB(skb)->csum_cnt == 0 && + !NAPI_GRO_CB(skb)->csum_valid); +} + +static inline void __skb_gro_checksum_convert(struct sk_buff *skb, + __sum16 check, __wsum pseudo) +{ + NAPI_GRO_CB(skb)->csum = ~pseudo; + NAPI_GRO_CB(skb)->csum_valid = 1; +} + +#define skb_gro_checksum_try_convert(skb, proto, check, compute_pseudo) \ +do { \ + if (__skb_gro_checksum_convert_check(skb)) \ + __skb_gro_checksum_convert(skb, check, \ + compute_pseudo(skb, proto)); \ +} while (0) + static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, @@ -2261,12 +2357,7 @@ static inline void input_queue_tail_incr_save(struct softnet_data *sd, DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); void __netif_schedule(struct Qdisc *q); - -static inline void netif_schedule_queue(struct netdev_queue *txq) -{ - if (!(txq->state & QUEUE_STATE_ANY_XOFF)) - __netif_schedule(txq->qdisc); -} +void netif_schedule_queue(struct netdev_queue *txq); static inline void netif_tx_schedule_all(struct net_device *dev) { @@ -2302,11 +2393,7 @@ static inline void netif_tx_start_all_queues(struct net_device *dev) } } -static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue) -{ - if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) - __netif_schedule(dev_queue->qdisc); -} +void netif_tx_wake_queue(struct netdev_queue *dev_queue); /** * netif_wake_queue - restart transmit @@ -2394,6 +2481,34 @@ netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue) return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN; } +/** + * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write + * @dev_queue: pointer to transmit queue + * + * BQL enabled drivers might use this helper in their ndo_start_xmit(), + * to give appropriate hint to the cpu. + */ +static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue) +{ +#ifdef CONFIG_BQL + prefetchw(&dev_queue->dql.num_queued); +#endif +} + +/** + * netdev_txq_bql_complete_prefetchw - prefetch bql data for write + * @dev_queue: pointer to transmit queue + * + * BQL enabled drivers might use this helper in their TX completion path, + * to give appropriate hint to the cpu. + */ +static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue) +{ +#ifdef CONFIG_BQL + prefetchw(&dev_queue->dql.limit); +#endif +} + static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, unsigned int bytes) { @@ -2578,19 +2693,7 @@ static inline bool netif_subqueue_stopped(const struct net_device *dev, return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb)); } -/** - * netif_wake_subqueue - allow sending packets on subqueue - * @dev: network device - * @queue_index: sub queue index - * - * Resume individual transmit queue of a device with multiple transmit queues. - */ -static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) -{ - struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); - if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) - __netif_schedule(txq->qdisc); -} +void netif_wake_subqueue(struct net_device *dev, u16 queue_index); #ifdef CONFIG_XPS int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, @@ -2754,8 +2857,9 @@ int dev_set_mac_address(struct net_device *, struct sockaddr *); int dev_change_carrier(struct net_device *, bool new_carrier); int dev_get_phys_port_id(struct net_device *dev, struct netdev_phys_port_id *ppid); -int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, - struct netdev_queue *txq); +struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev); +struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, + struct netdev_queue *txq, int *ret); int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb); @@ -3357,6 +3461,27 @@ int __init dev_proc_init(void); #define dev_proc_init() 0 #endif +static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops, + struct sk_buff *skb, struct net_device *dev, + bool more) +{ + skb->xmit_more = more ? 1 : 0; + return ops->ndo_start_xmit(skb, dev); +} + +static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev, + struct netdev_queue *txq, bool more) +{ + const struct net_device_ops *ops = dev->netdev_ops; + int rc; + + rc = __netdev_start_xmit(ops, skb, dev, more); + if (rc == NETDEV_TX_OK) + txq_trans_update(txq); + + return rc; +} + int netdev_class_create_file_ns(struct class_attribute *class_attr, const void *ns); void netdev_class_remove_file_ns(struct class_attribute *class_attr, @@ -3494,6 +3619,12 @@ static inline bool netif_supports_nofcs(struct net_device *dev) return dev->priv_flags & IFF_SUPP_NOFCS; } +/* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */ +static inline void netif_keep_dst(struct net_device *dev) +{ + dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM); +} + extern struct pernet_operations __net_initdata loopback_net_ops; /* Logging, debugging and troubleshooting/diagnostic helpers. */ @@ -3523,22 +3654,22 @@ static inline const char *netdev_reg_state(const struct net_device *dev) } __printf(3, 4) -int netdev_printk(const char *level, const struct net_device *dev, - const char *format, ...); +void netdev_printk(const char *level, const struct net_device *dev, + const char *format, ...); __printf(2, 3) -int netdev_emerg(const struct net_device *dev, const char *format, ...); +void netdev_emerg(const struct net_device *dev, const char *format, ...); __printf(2, 3) -int netdev_alert(const struct net_device *dev, const char *format, ...); +void netdev_alert(const struct net_device *dev, const char *format, ...); __printf(2, 3) -int netdev_crit(const struct net_device *dev, const char *format, ...); +void netdev_crit(const struct net_device *dev, const char *format, ...); __printf(2, 3) -int netdev_err(const struct net_device *dev, const char *format, ...); +void netdev_err(const struct net_device *dev, const char *format, ...); __printf(2, 3) -int netdev_warn(const struct net_device *dev, const char *format, ...); +void netdev_warn(const struct net_device *dev, const char *format, ...); __printf(2, 3) -int netdev_notice(const struct net_device *dev, const char *format, ...); +void netdev_notice(const struct net_device *dev, const char *format, ...); __printf(2, 3) -int netdev_info(const struct net_device *dev, const char *format, ...); +void netdev_info(const struct net_device *dev, const char *format, ...); #define MODULE_ALIAS_NETDEV(device) \ MODULE_ALIAS("netdev-" device) @@ -3556,7 +3687,6 @@ do { \ ({ \ if (0) \ netdev_printk(KERN_DEBUG, __dev, format, ##args); \ - 0; \ }) #endif diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index 96afc29184bee810a1ec550933cfb15998f42efb..f1606fa6132d562b03ba11757393e1aeb4aa6e06 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h @@ -57,6 +57,8 @@ enum ip_set_extension { IPSET_EXT_COUNTER = (1 << IPSET_EXT_BIT_COUNTER), IPSET_EXT_BIT_COMMENT = 2, IPSET_EXT_COMMENT = (1 << IPSET_EXT_BIT_COMMENT), + IPSET_EXT_BIT_SKBINFO = 3, + IPSET_EXT_SKBINFO = (1 << IPSET_EXT_BIT_SKBINFO), /* Mark set with an extension which needs to call destroy */ IPSET_EXT_BIT_DESTROY = 7, IPSET_EXT_DESTROY = (1 << IPSET_EXT_BIT_DESTROY), @@ -65,12 +67,14 @@ enum ip_set_extension { #define SET_WITH_TIMEOUT(s) ((s)->extensions & IPSET_EXT_TIMEOUT) #define SET_WITH_COUNTER(s) ((s)->extensions & IPSET_EXT_COUNTER) #define SET_WITH_COMMENT(s) ((s)->extensions & IPSET_EXT_COMMENT) +#define SET_WITH_SKBINFO(s) ((s)->extensions & IPSET_EXT_SKBINFO) #define SET_WITH_FORCEADD(s) ((s)->flags & IPSET_CREATE_FLAG_FORCEADD) /* Extension id, in size order */ enum ip_set_ext_id { IPSET_EXT_ID_COUNTER = 0, IPSET_EXT_ID_TIMEOUT, + IPSET_EXT_ID_SKBINFO, IPSET_EXT_ID_COMMENT, IPSET_EXT_ID_MAX, }; @@ -92,6 +96,10 @@ struct ip_set_ext { u64 packets; u64 bytes; u32 timeout; + u32 skbmark; + u32 skbmarkmask; + u32 skbprio; + u16 skbqueue; char *comment; }; @@ -104,6 +112,13 @@ struct ip_set_comment { char *str; }; +struct ip_set_skbinfo { + u32 skbmark; + u32 skbmarkmask; + u32 skbprio; + u16 skbqueue; +}; + struct ip_set; #define ext_timeout(e, s) \ @@ -112,7 +127,8 @@ struct ip_set; (struct ip_set_counter *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COUNTER]) #define ext_comment(e, s) \ (struct ip_set_comment *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COMMENT]) - +#define ext_skbinfo(e, s) \ +(struct ip_set_skbinfo *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_SKBINFO]) typedef int (*ipset_adtfn)(struct ip_set *set, void *value, const struct ip_set_ext *ext, @@ -256,6 +272,8 @@ ip_set_put_flags(struct sk_buff *skb, struct ip_set *set) cadt_flags |= IPSET_FLAG_WITH_COUNTERS; if (SET_WITH_COMMENT(set)) cadt_flags |= IPSET_FLAG_WITH_COMMENT; + if (SET_WITH_SKBINFO(set)) + cadt_flags |= IPSET_FLAG_WITH_SKBINFO; if (SET_WITH_FORCEADD(set)) cadt_flags |= IPSET_FLAG_WITH_FORCEADD; @@ -304,6 +322,43 @@ ip_set_update_counter(struct ip_set_counter *counter, } } +static inline void +ip_set_get_skbinfo(struct ip_set_skbinfo *skbinfo, + const struct ip_set_ext *ext, + struct ip_set_ext *mext, u32 flags) +{ + mext->skbmark = skbinfo->skbmark; + mext->skbmarkmask = skbinfo->skbmarkmask; + mext->skbprio = skbinfo->skbprio; + mext->skbqueue = skbinfo->skbqueue; +} +static inline bool +ip_set_put_skbinfo(struct sk_buff *skb, struct ip_set_skbinfo *skbinfo) +{ + /* Send nonzero parameters only */ + return ((skbinfo->skbmark || skbinfo->skbmarkmask) && + nla_put_net64(skb, IPSET_ATTR_SKBMARK, + cpu_to_be64((u64)skbinfo->skbmark << 32 | + skbinfo->skbmarkmask))) || + (skbinfo->skbprio && + nla_put_net32(skb, IPSET_ATTR_SKBPRIO, + cpu_to_be32(skbinfo->skbprio))) || + (skbinfo->skbqueue && + nla_put_net16(skb, IPSET_ATTR_SKBQUEUE, + cpu_to_be16(skbinfo->skbqueue))); + +} + +static inline void +ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo, + const struct ip_set_ext *ext) +{ + skbinfo->skbmark = ext->skbmark; + skbinfo->skbmarkmask = ext->skbmarkmask; + skbinfo->skbprio = ext->skbprio; + skbinfo->skbqueue = ext->skbqueue; +} + static inline bool ip_set_put_counter(struct sk_buff *skb, struct ip_set_counter *counter) { @@ -497,6 +552,9 @@ ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set, if (SET_WITH_COMMENT(set) && ip_set_put_comment(skb, ext_comment(e, set))) return -EMSGSIZE; + if (SET_WITH_SKBINFO(set) && + ip_set_put_skbinfo(skb, ext_skbinfo(e, set))) + return -EMSGSIZE; return 0; } diff --git a/include/linux/netfilter/ipset/ip_set_list.h b/include/linux/netfilter/ipset/ip_set_list.h index 68c2aea897f593664dd9d0ab7131b1defd0a9265..fe2622a001516f382b71246dd7d9b68ec4f8b094 100644 --- a/include/linux/netfilter/ipset/ip_set_list.h +++ b/include/linux/netfilter/ipset/ip_set_list.h @@ -6,5 +6,6 @@ #define IP_SET_LIST_DEFAULT_SIZE 8 #define IP_SET_LIST_MIN_SIZE 4 +#define IP_SET_LIST_MAX_SIZE 65536 #endif /* __IP_SET_LIST_H */ diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h index 8ab1c278b66da77229647e08ef8e0d948d4e6122..c755e4971fa3cc89e3000b669e6f3ba557f555b8 100644 --- a/include/linux/netfilter_bridge.h +++ b/include/linux/netfilter_bridge.h @@ -15,7 +15,7 @@ enum nf_br_hook_priorities { NF_BR_PRI_LAST = INT_MAX, }; -#ifdef CONFIG_BRIDGE_NETFILTER +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) #define BRNF_PKT_TYPE 0x01 #define BRNF_BRIDGED_DNAT 0x02 @@ -24,16 +24,6 @@ enum nf_br_hook_priorities { #define BRNF_8021Q 0x10 #define BRNF_PPPoE 0x20 -/* Only used in br_forward.c */ -int nf_bridge_copy_header(struct sk_buff *skb); -static inline int nf_bridge_maybe_copy_header(struct sk_buff *skb) -{ - if (skb->nf_bridge && - skb->nf_bridge->mask & (BRNF_BRIDGED | BRNF_BRIDGED_DNAT)) - return nf_bridge_copy_header(skb); - return 0; -} - static inline unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb) { switch (skb->protocol) { @@ -46,6 +36,44 @@ static inline unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb) } } +static inline void nf_bridge_update_protocol(struct sk_buff *skb) +{ + if (skb->nf_bridge->mask & BRNF_8021Q) + skb->protocol = htons(ETH_P_8021Q); + else if (skb->nf_bridge->mask & BRNF_PPPoE) + skb->protocol = htons(ETH_P_PPP_SES); +} + +/* Fill in the header for fragmented IP packets handled by + * the IPv4 connection tracking code. + * + * Only used in br_forward.c + */ +static inline int nf_bridge_copy_header(struct sk_buff *skb) +{ + int err; + unsigned int header_size; + + nf_bridge_update_protocol(skb); + header_size = ETH_HLEN + nf_bridge_encap_header_len(skb); + err = skb_cow_head(skb, header_size); + if (err) + return err; + + skb_copy_to_linear_data_offset(skb, -header_size, + skb->nf_bridge->data, header_size); + __skb_push(skb, nf_bridge_encap_header_len(skb)); + return 0; +} + +static inline int nf_bridge_maybe_copy_header(struct sk_buff *skb) +{ + if (skb->nf_bridge && + skb->nf_bridge->mask & (BRNF_BRIDGED | BRNF_BRIDGED_DNAT)) + return nf_bridge_copy_header(skb); + return 0; +} + static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb) { if (unlikely(skb->nf_bridge->mask & BRNF_PPPoE)) diff --git a/include/linux/phonedev.h b/include/linux/phonedev.h deleted file mode 100644 index 4269de99e320afae47c4e96428636f07286668a6..0000000000000000000000000000000000000000 --- a/include/linux/phonedev.h +++ /dev/null @@ -1,25 +0,0 @@ -#ifndef __LINUX_PHONEDEV_H -#define __LINUX_PHONEDEV_H - -#include - -#ifdef __KERNEL__ - -#include - -struct phone_device { - struct phone_device *next; - const struct file_operations *f_op; - int (*open) (struct phone_device *, struct file *); - int board; /* Device private index */ - int minor; -}; - -extern int phonedev_init(void); -#define PHONE_MAJOR 100 -extern int phone_register_device(struct phone_device *, int unit); -#define PHONE_UNIT_ANY -1 -extern void phone_unregister_device(struct phone_device *); - -#endif -#endif diff --git a/include/linux/phy.h b/include/linux/phy.h index ed39956b5613f98b95a5edd94bfb3f422abe71c7..d090cfcaa167fe33a4fc0e63562219a813f37c28 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -597,6 +597,19 @@ static inline int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum) MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff)); } +/** + * phy_read_mmd_indirect - reads data from the MMD registers + * @phydev: The PHY device bus + * @prtad: MMD Address + * @devad: MMD DEVAD + * @addr: PHY address on the MII bus + * + * Description: it reads data from the MMD registers (clause 22 to access to + * clause 45) of the specified phy address. + */ +int phy_read_mmd_indirect(struct phy_device *phydev, int prtad, + int devad, int addr); + /** * phy_read - Convenience function for reading a given PHY register * @phydev: the phy_device struct @@ -668,6 +681,20 @@ static inline int phy_write_mmd(struct phy_device *phydev, int devad, return mdiobus_write(phydev->bus, phydev->addr, regnum, val); } +/** + * phy_write_mmd_indirect - writes data to the MMD registers + * @phydev: The PHY device + * @prtad: MMD Address + * @devad: MMD DEVAD + * @addr: PHY address on the MII bus + * @data: data to write in the MMD register + * + * Description: Write data from the MMD registers of the specified + * phy address. + */ +void phy_write_mmd_indirect(struct phy_device *phydev, int prtad, + int devad, int addr, u32 data); + struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, bool is_c45, struct phy_c45_device_ids *c45_ids); diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h index ae612acebb53d2f0bf121226afde877cb4d32f91..f2ca1b459377803094005029cc765ef1f3a9abd6 100644 --- a/include/linux/phy_fixed.h +++ b/include/linux/phy_fixed.h @@ -14,34 +14,35 @@ struct device_node; #ifdef CONFIG_FIXED_PHY extern int fixed_phy_add(unsigned int irq, int phy_id, struct fixed_phy_status *status); -extern int fixed_phy_register(unsigned int irq, - struct fixed_phy_status *status, - struct device_node *np); +extern struct phy_device *fixed_phy_register(unsigned int irq, + struct fixed_phy_status *status, + struct device_node *np); extern void fixed_phy_del(int phy_addr); +extern int fixed_phy_set_link_update(struct phy_device *phydev, + int (*link_update)(struct net_device *, + struct fixed_phy_status *)); #else static inline int fixed_phy_add(unsigned int irq, int phy_id, struct fixed_phy_status *status) { return -ENODEV; } -static inline int fixed_phy_register(unsigned int irq, - struct fixed_phy_status *status, - struct device_node *np) +static inline struct phy_device *fixed_phy_register(unsigned int irq, + struct fixed_phy_status *status, + struct device_node *np) { - return -ENODEV; + return ERR_PTR(-ENODEV); } static inline int fixed_phy_del(int phy_addr) { return -ENODEV; } -#endif /* CONFIG_FIXED_PHY */ - -/* - * This function issued only by fixed_phy-aware drivers, no need - * protect it with #ifdef - */ -extern int fixed_phy_set_link_update(struct phy_device *phydev, +static inline int fixed_phy_set_link_update(struct phy_device *phydev, int (*link_update)(struct net_device *, - struct fixed_phy_status *)); + struct fixed_phy_status *)) +{ + return -ENODEV; +} +#endif /* CONFIG_FIXED_PHY */ #endif /* __PHY_FIXED_H */ diff --git a/include/linux/random.h b/include/linux/random.h index 57fbbffd77a0406fe57ce2abb5c2a24d819c45f9..b05856e16b75be8b7dea5015a5a6ff6afcb021eb 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -26,7 +26,7 @@ unsigned int get_random_int(void); unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len); u32 prandom_u32(void); -void prandom_bytes(void *buf, int nbytes); +void prandom_bytes(void *buf, size_t nbytes); void prandom_seed(u32 seed); void prandom_reseed_late(void); @@ -35,7 +35,7 @@ struct rnd_state { }; u32 prandom_u32_state(struct rnd_state *state); -void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes); +void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes); /** * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro) diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 36826c0166c5f5af0d3a8e7601944f0e93946e09..fb298e9d6d3a8e75c280162f21e04c5112eebeac 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -44,6 +44,7 @@ struct rhashtable; * @head_offset: Offset of rhash_head in struct to be hashed * @hash_rnd: Seed to use while hashing * @max_shift: Maximum number of shifts while expanding + * @min_shift: Minimum number of shifts while shrinking * @hashfn: Function to hash key * @obj_hashfn: Function to hash object * @grow_decision: If defined, may return true if table should expand @@ -57,6 +58,7 @@ struct rhashtable_params { size_t head_offset; u32 hash_rnd; size_t max_shift; + size_t min_shift; rht_hashfn_t hashfn; rht_obj_hashfn_t obj_hashfn; bool (*grow_decision)(const struct rhashtable *ht, diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 167bae7bdfa45cc255cdf9c22b986c1fffe8f9d5..6cacbce1a06c54700de7017a008163cb0d4ee9b6 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -46,6 +46,16 @@ static inline int lockdep_rtnl_is_held(void) #define rcu_dereference_rtnl(p) \ rcu_dereference_check(p, lockdep_rtnl_is_held()) +/** + * rcu_dereference_bh_rtnl - rcu_dereference_bh with debug checking + * @p: The pointer to read, prior to dereference + * + * Do an rcu_dereference_bh(p), but check caller either holds rcu_read_lock_bh() + * or RTNL. Note : Please prefer rtnl_dereference() or rcu_dereference_bh() + */ +#define rcu_dereference_bh_rtnl(p) \ + rcu_dereference_bh_check(p, lockdep_rtnl_is_held()) + /** * rtnl_dereference - fetch RCU pointer when updates are prevented by RTNL * @p: The pointer to read, prior to dereferencing diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index a17ba0881afb241b9055c96ff35ff8578ad00903..3ab0749d6875c5032b12d3188b8420ac97797c8e 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -46,11 +46,29 @@ * * The hardware you're dealing with doesn't calculate the full checksum * (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums - * for specific protocols e.g. TCP/UDP/SCTP, then, for such packets it will - * set CHECKSUM_UNNECESSARY if their checksums are okay. skb->csum is still - * undefined in this case though. It is a bad option, but, unfortunately, - * nowadays most vendors do this. Apparently with the secret goal to sell - * you new devices, when you will add new protocol to your host, f.e. IPv6 8) + * for specific protocols. For such packets it will set CHECKSUM_UNNECESSARY + * if their checksums are okay. skb->csum is still undefined in this case + * though. It is a bad option, but, unfortunately, nowadays most vendors do + * this. Apparently with the secret goal to sell you new devices, when you + * will add new protocol to your host, f.e. IPv6 8) + * + * CHECKSUM_UNNECESSARY is applicable to following protocols: + * TCP: IPv6 and IPv4. + * UDP: IPv4 and IPv6. A device may apply CHECKSUM_UNNECESSARY to a + * zero UDP checksum for either IPv4 or IPv6, the networking stack + * may perform further validation in this case. + * GRE: only if the checksum is present in the header. + * SCTP: indicates the CRC in SCTP header has been validated. + * + * skb->csum_level indicates the number of consecutive checksums found in + * the packet minus one that have been verified as CHECKSUM_UNNECESSARY. + * For instance if a device receives an IPv6->UDP->GRE->IPv4->TCP packet + * and a device is able to verify the checksums for UDP (possibly zero), + * GRE (checksum flag is set), and TCP-- skb->csum_level would be set to + * two. If the device were only able to verify the UDP checksum and not + * GRE, either because it doesn't support GRE checksum of because GRE + * checksum is bad, skb->csum_level would be set to zero (TCP checksum is + * not considered in this case). * * CHECKSUM_COMPLETE: * @@ -111,6 +129,9 @@ #define CHECKSUM_COMPLETE 2 #define CHECKSUM_PARTIAL 3 +/* Maximum value in skb->csum_level */ +#define SKB_MAX_CSUM_LEVEL 3 + #define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES) #define SKB_WITH_OVERHEAD(X) \ ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) @@ -134,7 +155,7 @@ struct nf_conntrack { }; #endif -#ifdef CONFIG_BRIDGE_NETFILTER +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) struct nf_bridge_info { atomic_t use; unsigned int mask; @@ -317,9 +338,10 @@ struct skb_shared_info { enum { - SKB_FCLONE_UNAVAILABLE, - SKB_FCLONE_ORIG, - SKB_FCLONE_CLONE, + SKB_FCLONE_UNAVAILABLE, /* skb has no fclone (from head_cache) */ + SKB_FCLONE_ORIG, /* orig skb (from fclone_cache) */ + SKB_FCLONE_CLONE, /* companion fclone skb (from fclone_cache) */ + SKB_FCLONE_FREE, /* this companion fclone skb is available */ }; enum { @@ -451,6 +473,7 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1, * @tc_verd: traffic control verdict * @hash: the packet hash * @queue_mapping: Queue mapping for multiqueue devices + * @xmit_more: More SKBs are pending for this queue * @ndisc_nodetype: router type (from link layer) * @ooo_okay: allow the mapping of a socket to a queue to be changed * @l4_hash: indicate hash is a canonical 4-tuple hash over transport @@ -459,8 +482,6 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1, * @wifi_acked_valid: wifi_acked was set * @wifi_acked: whether frame was acked on wifi or not * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS - * @dma_cookie: a cookie to one of several possible DMA operations - * done by skb DMA functions * @napi_id: id of the NAPI struct this skb came from * @secmark: security marking * @mark: Generic packet mark @@ -504,82 +525,97 @@ struct sk_buff { char cb[48] __aligned(8); unsigned long _skb_refdst; + void (*destructor)(struct sk_buff *skb); #ifdef CONFIG_XFRM struct sec_path *sp; +#endif +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) + struct nf_conntrack *nfct; +#endif +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) + struct nf_bridge_info *nf_bridge; #endif unsigned int len, data_len; __u16 mac_len, hdr_len; - union { - __wsum csum; - struct { - __u16 csum_start; - __u16 csum_offset; - }; - }; - __u32 priority; + + /* Following fields are _not_ copied in __copy_skb_header() + * Note that queue_mapping is here mostly to fill a hole. + */ kmemcheck_bitfield_begin(flags1); - __u8 ignore_df:1, - cloned:1, - ip_summed:2, + __u16 queue_mapping; + __u8 cloned:1, nohdr:1, - nfctinfo:3; - __u8 pkt_type:3, fclone:2, - ipvs_property:1, peeked:1, - nf_trace:1; + head_frag:1, + xmit_more:1; + /* one bit hole */ kmemcheck_bitfield_end(flags1); - __be16 protocol; - - void (*destructor)(struct sk_buff *skb); -#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) - struct nf_conntrack *nfct; -#endif -#ifdef CONFIG_BRIDGE_NETFILTER - struct nf_bridge_info *nf_bridge; -#endif - - int skb_iif; - - __u32 hash; - __be16 vlan_proto; - __u16 vlan_tci; + /* fields enclosed in headers_start/headers_end are copied + * using a single memcpy() in __copy_skb_header() + */ + __u32 headers_start[0]; -#ifdef CONFIG_NET_SCHED - __u16 tc_index; /* traffic control index */ -#ifdef CONFIG_NET_CLS_ACT - __u16 tc_verd; /* traffic control verdict */ -#endif +/* if you move pkt_type around you also must adapt those constants */ +#ifdef __BIG_ENDIAN_BITFIELD +#define PKT_TYPE_MAX (7 << 5) +#else +#define PKT_TYPE_MAX 7 #endif +#define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset) - __u16 queue_mapping; - kmemcheck_bitfield_begin(flags2); -#ifdef CONFIG_IPV6_NDISC_NODETYPE - __u8 ndisc_nodetype:2; -#endif + __u8 __pkt_type_offset[0]; + __u8 pkt_type:3; __u8 pfmemalloc:1; + __u8 ignore_df:1; + __u8 nfctinfo:3; + + __u8 nf_trace:1; + __u8 ip_summed:2; __u8 ooo_okay:1; __u8 l4_hash:1; __u8 sw_hash:1; __u8 wifi_acked_valid:1; __u8 wifi_acked:1; + __u8 no_fcs:1; - __u8 head_frag:1; - /* Encapsulation protocol and NIC drivers should use - * this flag to indicate to each other if the skb contains - * encapsulated packet or not and maybe use the inner packet - * headers if needed - */ + /* Indicates the inner headers are valid in the skbuff. */ __u8 encapsulation:1; __u8 encap_hdr_csum:1; __u8 csum_valid:1; __u8 csum_complete_sw:1; - /* 2/4 bit hole (depending on ndisc_nodetype presence) */ - kmemcheck_bitfield_end(flags2); + __u8 csum_level:2; + __u8 csum_bad:1; + +#ifdef CONFIG_IPV6_NDISC_NODETYPE + __u8 ndisc_nodetype:2; +#endif + __u8 ipvs_property:1; + __u8 inner_protocol_type:1; + /* 4 or 6 bit hole */ + +#ifdef CONFIG_NET_SCHED + __u16 tc_index; /* traffic control index */ +#ifdef CONFIG_NET_CLS_ACT + __u16 tc_verd; /* traffic control verdict */ +#endif +#endif + union { + __wsum csum; + struct { + __u16 csum_start; + __u16 csum_offset; + }; + }; + __u32 priority; + int skb_iif; + __u32 hash; + __be16 vlan_proto; + __u16 vlan_tci; #ifdef CONFIG_NET_RX_BUSY_POLL unsigned int napi_id; #endif @@ -592,13 +628,22 @@ struct sk_buff { __u32 reserved_tailroom; }; - __be16 inner_protocol; + union { + __be16 inner_protocol; + __u8 inner_ipproto; + }; + __u16 inner_transport_header; __u16 inner_network_header; __u16 inner_mac_header; + + __be16 protocol; __u16 transport_header; __u16 network_header; __u16 mac_header; + + __u32 headers_end[0]; + /* These elements must be at the end, see alloc_skb() for details. */ sk_buff_data_t tail; sk_buff_data_t end; @@ -730,6 +775,37 @@ static inline struct sk_buff *alloc_skb(unsigned int size, return __alloc_skb(size, priority, 0, NUMA_NO_NODE); } +struct sk_buff *alloc_skb_with_frags(unsigned long header_len, + unsigned long data_len, + int max_page_order, + int *errcode, + gfp_t gfp_mask); + +/* Layout of fast clones : [skb1][skb2][fclone_ref] */ +struct sk_buff_fclones { + struct sk_buff skb1; + + struct sk_buff skb2; + + atomic_t fclone_ref; +}; + +/** + * skb_fclone_busy - check if fclone is busy + * @skb: buffer + * + * Returns true is skb is a fast clone, and its clone is not freed. + */ +static inline bool skb_fclone_busy(const struct sk_buff *skb) +{ + const struct sk_buff_fclones *fclones; + + fclones = container_of(skb, struct sk_buff_fclones, skb1); + + return skb->fclone == SKB_FCLONE_ORIG && + fclones->skb2.fclone == SKB_FCLONE_CLONE; +} + static inline struct sk_buff *alloc_skb_fclone(unsigned int size, gfp_t priority) { @@ -1038,6 +1114,7 @@ static inline int skb_header_cloned(const struct sk_buff *skb) * Drop a reference to the header part of the buffer. This is done * by acquiring a payload reference. You must not read from the header * part of skb->data after this. + * Note : Check if you can use __skb_header_release() instead. */ static inline void skb_header_release(struct sk_buff *skb) { @@ -1046,6 +1123,20 @@ static inline void skb_header_release(struct sk_buff *skb) atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref); } +/** + * __skb_header_release - release reference to header + * @skb: buffer to operate on + * + * Variant of skb_header_release() assuming skb is private to caller. + * We can avoid one atomic operation. + */ +static inline void __skb_header_release(struct sk_buff *skb) +{ + skb->nohdr = 1; + atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT)); +} + + /** * skb_shared - is the buffer shared * @skb: buffer to check @@ -1671,6 +1762,23 @@ static inline void skb_reserve(struct sk_buff *skb, int len) skb->tail += len; } +#define ENCAP_TYPE_ETHER 0 +#define ENCAP_TYPE_IPPROTO 1 + +static inline void skb_set_inner_protocol(struct sk_buff *skb, + __be16 protocol) +{ + skb->inner_protocol = protocol; + skb->inner_protocol_type = ENCAP_TYPE_ETHER; +} + +static inline void skb_set_inner_ipproto(struct sk_buff *skb, + __u8 ipproto) +{ + skb->inner_ipproto = ipproto; + skb->inner_protocol_type = ENCAP_TYPE_IPPROTO; +} + static inline void skb_reset_inner_headers(struct sk_buff *skb) { skb->inner_mac_header = skb->mac_header; @@ -1856,18 +1964,6 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len) return pskb_may_pull(skb, skb_network_offset(skb) + len); } -static inline void skb_pop_rcv_encapsulation(struct sk_buff *skb) -{ - /* Only continue with checksum unnecessary if device indicated - * it is valid across encapsulation (skb->encapsulation was set). - */ - if (skb->ip_summed == CHECKSUM_UNNECESSARY && !skb->encapsulation) - skb->ip_summed = CHECKSUM_NONE; - - skb->encapsulation = 0; - skb->csum_valid = 0; -} - /* * CPUs often take a performance hit when accessing unaligned memory * locations. The actual performance hit varies, it can be small if the @@ -2563,20 +2659,26 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, __wsum skb_checksum(const struct sk_buff *skb, int offset, int len, __wsum csum); -static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, - int len, void *buffer) +static inline void *__skb_header_pointer(const struct sk_buff *skb, int offset, + int len, void *data, int hlen, void *buffer) { - int hlen = skb_headlen(skb); - if (hlen - offset >= len) - return skb->data + offset; + return data + offset; - if (skb_copy_bits(skb, offset, buffer, len) < 0) + if (!skb || + skb_copy_bits(skb, offset, buffer, len) < 0) return NULL; return buffer; } +static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, + int len, void *buffer) +{ + return __skb_header_pointer(skb, offset, len, skb->data, + skb_headlen(skb), buffer); +} + /** * skb_needs_linearize - check if we need to linearize a given skb * depending on the given device features. @@ -2667,6 +2769,8 @@ static inline ktime_t net_invalid_timestamp(void) return ktime_set(0, 0); } +struct sk_buff *skb_clone_sk(struct sk_buff *skb); + #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING void skb_clone_tx_timestamp(struct sk_buff *skb); @@ -2782,6 +2886,42 @@ static inline __sum16 skb_checksum_complete(struct sk_buff *skb) 0 : __skb_checksum_complete(skb); } +static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb) +{ + if (skb->ip_summed == CHECKSUM_UNNECESSARY) { + if (skb->csum_level == 0) + skb->ip_summed = CHECKSUM_NONE; + else + skb->csum_level--; + } +} + +static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb) +{ + if (skb->ip_summed == CHECKSUM_UNNECESSARY) { + if (skb->csum_level < SKB_MAX_CSUM_LEVEL) + skb->csum_level++; + } else if (skb->ip_summed == CHECKSUM_NONE) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_level = 0; + } +} + +static inline void __skb_mark_checksum_bad(struct sk_buff *skb) +{ + /* Mark current checksum as bad (typically called from GRO + * path). In the case that ip_summed is CHECKSUM_NONE + * this must be the first checksum encountered in the packet. + * When ip_summed is CHECKSUM_UNNECESSARY, this is the first + * checksum after the last one validated. For UDP, a zero + * checksum can not be marked as bad. + */ + + if (skb->ip_summed == CHECKSUM_NONE || + skb->ip_summed == CHECKSUM_UNNECESSARY) + skb->csum_bad = 1; +} + /* Check if we need to perform checksum complete validation. * * Returns true if checksum complete is needed, false otherwise @@ -2793,6 +2933,7 @@ static inline bool __skb_checksum_validate_needed(struct sk_buff *skb, { if (skb_csum_unnecessary(skb) || (zero_okay && !check)) { skb->csum_valid = 1; + __skb_decr_checksum_unnecessary(skb); return false; } @@ -2822,6 +2963,9 @@ static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb, skb->csum_valid = 1; return 0; } + } else if (skb->csum_bad) { + /* ip_summed == CHECKSUM_NONE in this case */ + return 1; } skb->csum = psum; @@ -2879,6 +3023,26 @@ static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto) #define skb_checksum_simple_validate(skb) \ __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo) +static inline bool __skb_checksum_convert_check(struct sk_buff *skb) +{ + return (skb->ip_summed == CHECKSUM_NONE && + skb->csum_valid && !skb->csum_bad); +} + +static inline void __skb_checksum_convert(struct sk_buff *skb, + __sum16 check, __wsum pseudo) +{ + skb->csum = ~pseudo; + skb->ip_summed = CHECKSUM_COMPLETE; +} + +#define skb_checksum_try_convert(skb, proto, check, compute_pseudo) \ +do { \ + if (__skb_checksum_convert_check(skb)) \ + __skb_checksum_convert(skb, check, \ + compute_pseudo(skb, proto)); \ +} while (0) + #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) void nf_conntrack_destroy(struct nf_conntrack *nfct); static inline void nf_conntrack_put(struct nf_conntrack *nfct) @@ -2892,7 +3056,7 @@ static inline void nf_conntrack_get(struct nf_conntrack *nfct) atomic_inc(&nfct->use); } #endif -#ifdef CONFIG_BRIDGE_NETFILTER +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge) { if (nf_bridge && atomic_dec_and_test(&nf_bridge->use)) @@ -2910,7 +3074,7 @@ static inline void nf_reset(struct sk_buff *skb) nf_conntrack_put(skb->nfct); skb->nfct = NULL; #endif -#ifdef CONFIG_BRIDGE_NETFILTER +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) nf_bridge_put(skb->nf_bridge); skb->nf_bridge = NULL; #endif @@ -2924,19 +3088,22 @@ static inline void nf_reset_trace(struct sk_buff *skb) } /* Note: This doesn't put any conntrack and bridge info in dst. */ -static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src) +static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src, + bool copy) { #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) dst->nfct = src->nfct; nf_conntrack_get(src->nfct); - dst->nfctinfo = src->nfctinfo; + if (copy) + dst->nfctinfo = src->nfctinfo; #endif -#ifdef CONFIG_BRIDGE_NETFILTER +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) dst->nf_bridge = src->nf_bridge; nf_bridge_get(src->nf_bridge); #endif #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES) - dst->nf_trace = src->nf_trace; + if (copy) + dst->nf_trace = src->nf_trace; #endif } @@ -2945,10 +3112,10 @@ static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) nf_conntrack_put(dst->nfct); #endif -#ifdef CONFIG_BRIDGE_NETFILTER +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) nf_bridge_put(dst->nf_bridge); #endif - __nf_copy(dst, src); + __nf_copy(dst, src, true); } #ifdef CONFIG_NETWORK_SECMARK @@ -3133,7 +3300,9 @@ bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); int skb_checksum_setup(struct sk_buff *skb, bool recalculate); -u32 __skb_get_poff(const struct sk_buff *skb); +u32 skb_get_poff(const struct sk_buff *skb); +u32 __skb_get_poff(const struct sk_buff *skb, void *data, + const struct flow_keys *keys, int hlen); /** * skb_head_is_locked - Determine if the skb->head is locked down diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 0f86d85a9ce44cc771bb7118eb7b4cbc28ae905d..bda9b81357ccf13ae68f2c951e9d38d86896cbad 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -65,6 +65,7 @@ struct old_linux_dirent; struct perf_event_attr; struct file_handle; struct sigaltstack; +union bpf_attr; #include #include @@ -875,5 +876,5 @@ asmlinkage long sys_seccomp(unsigned int op, unsigned int flags, const char __user *uargs); asmlinkage long sys_getrandom(char __user *buf, size_t count, unsigned int flags); - +asmlinkage long sys_bpf(int cmd, union bpf_attr *attr, unsigned int size); #endif diff --git a/include/linux/tcp.h b/include/linux/tcp.h index ac82c5ea955bd160e0bde9057ca3885dfc16ad87..c2dee7deefa8cb32af530d20e5aa32a61b10ce68 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -268,7 +268,7 @@ struct tcp_sock { u32 retrans_stamp; /* Timestamp of the last retransmit, * also used in SYN-SENT to remember stamp of * the first SYN. */ - u32 undo_marker; /* tracking retrans started here. */ + u32 undo_marker; /* snd_una upon a new recovery episode. */ int undo_retrans; /* number of undoable retransmissions. */ u32 total_retrans; /* Total retransmits for entire connection */ diff --git a/include/linux/udp.h b/include/linux/udp.h index 247cfdcc4b08bbf377ff5819ebd02683806b0c83..ee3277593222cf314f9b030eec6ec09d0c38c4e4 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h @@ -49,7 +49,11 @@ struct udp_sock { unsigned int corkflag; /* Cork is required */ __u8 encap_type; /* Is this an Encapsulation socket? */ unsigned char no_check6_tx:1,/* Send zero UDP6 checksums on TX? */ - no_check6_rx:1;/* Allow zero UDP6 checksums on RX? */ + no_check6_rx:1,/* Allow zero UDP6 checksums on RX? */ + convert_csum:1;/* On receive, convert checksum + * unnecessary to checksum complete + * if possible. + */ /* * Following member retains the information to create a UDP header * when the socket is uncorked. @@ -98,6 +102,16 @@ static inline bool udp_get_no_check6_rx(struct sock *sk) return udp_sk(sk)->no_check6_rx; } +static inline void udp_set_convert_csum(struct sock *sk, bool val) +{ + udp_sk(sk)->convert_csum = val; +} + +static inline bool udp_get_convert_csum(struct sock *sk) +{ + return udp_sk(sk)->convert_csum; +} + #define udp_portaddr_for_each_entry(__sk, node, list) \ hlist_nulls_for_each_entry(__sk, node, list, __sk_common.skc_portaddr_node) diff --git a/include/net/addrconf.h b/include/net/addrconf.h index ec51e673b4b67bf762cd3e606709f34069ae1a55..d13573bb879e931628eb6b4b002f32ba11386b23 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -202,7 +202,7 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr); void ipv6_sock_ac_close(struct sock *sk); -int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr); +int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr); int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr); void ipv6_ac_destroy_dev(struct inet6_dev *idev); bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev, diff --git a/include/net/ah.h b/include/net/ah.h index ca95b98969ddf21860590e8252f7ad8a53f690b4..4e2dfa474a7e6e8deaeb56ec2a9115345141ac58 100644 --- a/include/net/ah.h +++ b/include/net/ah.h @@ -3,9 +3,6 @@ #include -/* This is the maximum truncated ICV length that we know of. */ -#define MAX_AH_AUTH_LEN 64 - struct crypto_ahash; struct ah_data { diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index 373000de610d4d3077f289a2f7a1967910ef09e3..58695ffeb13814786227f7b04d3a4930a8d98bdd 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h @@ -120,9 +120,9 @@ struct bt_voice { #define BT_RCVMTU 13 __printf(1, 2) -int bt_info(const char *fmt, ...); +void bt_info(const char *fmt, ...); __printf(1, 2) -int bt_err(const char *fmt, ...); +void bt_err(const char *fmt, ...); #define BT_INFO(fmt, ...) bt_info(fmt "\n", ##__VA_ARGS__) #define BT_ERR(fmt, ...) bt_err(fmt "\n", ##__VA_ARGS__) @@ -284,6 +284,7 @@ struct hci_req_ctrl { struct bt_skb_cb { __u8 pkt_type; __u8 incoming; + __u16 opcode; __u16 expect; __u8 force_active; struct l2cap_chan *chan; diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 3f8547f1c6f8c019cc33677fe8053c9ebf608fe2..6e8f249673089badfb3e512341478171ada9d174 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -385,6 +385,7 @@ enum { #define HCI_ERROR_AUTH_FAILURE 0x05 #define HCI_ERROR_MEMORY_EXCEEDED 0x07 #define HCI_ERROR_CONNECTION_TIMEOUT 0x08 +#define HCI_ERROR_REJ_LIMITED_RESOURCES 0x0d #define HCI_ERROR_REJ_BAD_ADDR 0x0f #define HCI_ERROR_REMOTE_USER_TERM 0x13 #define HCI_ERROR_REMOTE_LOW_RESOURCES 0x14 diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 6f884e6c731e41daccdf4119cb1f0f8a4ee9335f..37ff1aef0845e9c2ef0335b95c87b93b65e9c025 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -302,7 +302,7 @@ struct hci_dev { __u32 req_status; __u32 req_result; - struct crypto_blkcipher *tfm_aes; + void *smp_data; struct discovery_state discovery; struct hci_conn_hash conn_hash; @@ -539,7 +539,6 @@ enum { HCI_CONN_RSWITCH_PEND, HCI_CONN_MODE_CHANGE_PEND, HCI_CONN_SCO_SETUP_PEND, - HCI_CONN_LE_SMP_PEND, HCI_CONN_MGMT_CONNECTED, HCI_CONN_SSP_ENABLED, HCI_CONN_SC_ENABLED, @@ -553,6 +552,7 @@ enum { HCI_CONN_FIPS, HCI_CONN_STK_ENCRYPT, HCI_CONN_AUTH_INITIATOR, + HCI_CONN_DROP, }; static inline bool hci_conn_ssp_enabled(struct hci_conn *conn) @@ -702,7 +702,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev, return NULL; } -void hci_disconnect(struct hci_conn *conn, __u8 reason); +int hci_disconnect(struct hci_conn *conn, __u8 reason); bool hci_setup_sync(struct hci_conn *conn, __u16 handle); void hci_sco_setup(struct hci_conn *conn, __u8 status); @@ -756,9 +756,10 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status); * _get()/_drop() in it, but require the caller to have a valid ref (FIXME). */ -static inline void hci_conn_get(struct hci_conn *conn) +static inline struct hci_conn *hci_conn_get(struct hci_conn *conn) { get_device(&conn->dev); + return conn; } static inline void hci_conn_put(struct hci_conn *conn) @@ -790,7 +791,7 @@ static inline void hci_conn_drop(struct hci_conn *conn) if (!conn->out) timeo *= 2; } else { - timeo = msecs_to_jiffies(10); + timeo = 0; } break; @@ -799,7 +800,7 @@ static inline void hci_conn_drop(struct hci_conn *conn) break; default: - timeo = msecs_to_jiffies(10); + timeo = 0; break; } @@ -925,7 +926,6 @@ int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr); void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb); -int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count); int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count); void hci_init_sysfs(struct hci_dev *hdev); @@ -970,6 +970,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn); #define lmp_host_le_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE)) #define lmp_host_le_br_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE_BREDR)) +#define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \ + !test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) + /* ----- HCI protocols ----- */ #define HCI_PROTO_DEFER 0x01 @@ -1258,6 +1261,8 @@ bool hci_req_pending(struct hci_dev *hdev); void hci_req_add_le_scan_disable(struct hci_request *req); void hci_req_add_le_passive_scan(struct hci_request *req); +void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req); + struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param, u32 timeout); struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen, @@ -1336,8 +1341,7 @@ int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u32 passkey, u8 entered); -void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, - u8 addr_type, u8 status); +void mgmt_auth_failed(struct hci_conn *conn, u8 status); void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status); void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status); void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status); @@ -1353,6 +1357,7 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, s8 rssi, u8 *name, u8 name_len); void mgmt_discovering(struct hci_dev *hdev, u8 discovering); +bool mgmt_powering_down(struct hci_dev *hdev); void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent); void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk); void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk, diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h index 8df15ad0d43fadc17f82aed2cb2b9115ff6a425f..ead99f032f7ab381a0559cf5f4ee23600632d479 100644 --- a/include/net/bluetooth/l2cap.h +++ b/include/net/bluetooth/l2cap.h @@ -633,10 +633,11 @@ struct l2cap_conn { struct sk_buff_head pending_rx; struct work_struct pending_rx_work; + struct work_struct id_addr_update_work; + __u8 disc_reason; - struct delayed_work security_timer; - struct smp_chan *smp_chan; + struct l2cap_chan *smp; struct list_head chan_l; struct mutex chan_lock; @@ -708,6 +709,8 @@ enum { FLAG_EFS_ENABLE, FLAG_DEFER_SETUP, FLAG_LE_CONN_REQ_SENT, + FLAG_PENDING_SECURITY, + FLAG_HOLD_HCI_CONN, }; enum { @@ -837,18 +840,43 @@ static inline struct l2cap_chan *l2cap_chan_no_new_connection(struct l2cap_chan return NULL; } +static inline int l2cap_chan_no_recv(struct l2cap_chan *chan, struct sk_buff *skb) +{ + return -ENOSYS; +} + +static inline struct sk_buff *l2cap_chan_no_alloc_skb(struct l2cap_chan *chan, + unsigned long hdr_len, + unsigned long len, int nb) +{ + return ERR_PTR(-ENOSYS); +} + static inline void l2cap_chan_no_teardown(struct l2cap_chan *chan, int err) { } +static inline void l2cap_chan_no_close(struct l2cap_chan *chan) +{ +} + static inline void l2cap_chan_no_ready(struct l2cap_chan *chan) { } +static inline void l2cap_chan_no_state_change(struct l2cap_chan *chan, + int state, int err) +{ +} + static inline void l2cap_chan_no_defer(struct l2cap_chan *chan) { } +static inline void l2cap_chan_no_suspend(struct l2cap_chan *chan) +{ +} + static inline void l2cap_chan_no_resume(struct l2cap_chan *chan) { } @@ -911,14 +939,13 @@ int l2cap_ertm_init(struct l2cap_chan *chan); void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan); void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan); void l2cap_chan_del(struct l2cap_chan *chan, int err); -void l2cap_conn_update_id_addr(struct hci_conn *hcon); void l2cap_send_conn_req(struct l2cap_chan *chan); void l2cap_move_start(struct l2cap_chan *chan); void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan, u8 status); void __l2cap_physical_cfm(struct l2cap_chan *chan, int result); -void l2cap_conn_get(struct l2cap_conn *conn); +struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn); void l2cap_conn_put(struct l2cap_conn *conn); int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user); diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 0a080c4de2754d0b9fc64ecf54e8dc4bcb477cf3..a2ddcf2398fdadf43bcb5ccc12891aba20f0c946 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -4,6 +4,7 @@ * 802.11 device and configuration interface * * Copyright 2006-2010 Johannes Berg + * Copyright 2013-2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -663,6 +664,7 @@ struct cfg80211_acl_data { * @crypto: crypto settings * @privacy: the BSS uses privacy * @auth_type: Authentication type (algorithm) + * @smps_mode: SMPS mode * @inactivity_timeout: time in seconds to determine station's inactivity. * @p2p_ctwindow: P2P CT Window * @p2p_opp_ps: P2P opportunistic PS @@ -681,6 +683,7 @@ struct cfg80211_ap_settings { struct cfg80211_crypto_settings crypto; bool privacy; enum nl80211_auth_type auth_type; + enum nl80211_smps_mode smps_mode; int inactivity_timeout; u8 p2p_ctwindow; bool p2p_opp_ps; @@ -1503,12 +1506,14 @@ enum cfg80211_signal_type { * @tsf: TSF contained in the frame that carried these IEs * @rcu_head: internal use, for freeing * @len: length of the IEs + * @from_beacon: these IEs are known to come from a beacon * @data: IE data */ struct cfg80211_bss_ies { u64 tsf; struct rcu_head rcu_head; int len; + bool from_beacon; u8 data[]; }; @@ -1605,10 +1610,12 @@ struct cfg80211_auth_request { * * @ASSOC_REQ_DISABLE_HT: Disable HT (802.11n) * @ASSOC_REQ_DISABLE_VHT: Disable VHT + * @ASSOC_REQ_USE_RRM: Declare RRM capability in this association */ enum cfg80211_assoc_req_flags { ASSOC_REQ_DISABLE_HT = BIT(0), ASSOC_REQ_DISABLE_VHT = BIT(1), + ASSOC_REQ_USE_RRM = BIT(2), }; /** @@ -1800,6 +1807,7 @@ struct cfg80211_connect_params { * @WIPHY_PARAM_FRAG_THRESHOLD: wiphy->frag_threshold has changed * @WIPHY_PARAM_RTS_THRESHOLD: wiphy->rts_threshold has changed * @WIPHY_PARAM_COVERAGE_CLASS: coverage class changed + * @WIPHY_PARAM_DYN_ACK: dynack has been enabled */ enum wiphy_params_flags { WIPHY_PARAM_RETRY_SHORT = 1 << 0, @@ -1807,6 +1815,7 @@ enum wiphy_params_flags { WIPHY_PARAM_FRAG_THRESHOLD = 1 << 2, WIPHY_PARAM_RTS_THRESHOLD = 1 << 3, WIPHY_PARAM_COVERAGE_CLASS = 1 << 4, + WIPHY_PARAM_DYN_ACK = 1 << 5, }; /* @@ -1973,14 +1982,12 @@ struct cfg80211_wowlan_wakeup { /** * struct cfg80211_gtk_rekey_data - rekey data - * @kek: key encryption key - * @kck: key confirmation key - * @replay_ctr: replay counter + * @kek: key encryption key (NL80211_KEK_LEN bytes) + * @kck: key confirmation key (NL80211_KCK_LEN bytes) + * @replay_ctr: replay counter (NL80211_REPLAY_CTR_LEN bytes) */ struct cfg80211_gtk_rekey_data { - u8 kek[NL80211_KEK_LEN]; - u8 kck[NL80211_KCK_LEN]; - u8 replay_ctr[NL80211_REPLAY_CTR_LEN]; + const u8 *kek, *kck, *replay_ctr; }; /** @@ -2313,6 +2320,17 @@ struct cfg80211_qos_map { * @set_ap_chanwidth: Set the AP (including P2P GO) mode channel width for the * given interface This is used e.g. for dynamic HT 20/40 MHz channel width * changes during the lifetime of the BSS. + * + * @add_tx_ts: validate (if admitted_time is 0) or add a TX TS to the device + * with the given parameters; action frame exchange has been handled by + * userspace so this just has to modify the TX path to take the TS into + * account. + * If the admitted time is 0 just validate the parameters to make sure + * the session can be created at all; it is valid to just always return + * success for that but that may result in inefficient behaviour (handshake + * with the peer followed by immediate teardown when the addition is later + * rejected) + * @del_tx_ts: remove an existing TX TS */ struct cfg80211_ops { int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow); @@ -2553,6 +2571,12 @@ struct cfg80211_ops { int (*set_ap_chanwidth)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_chan_def *chandef); + + int (*add_tx_ts)(struct wiphy *wiphy, struct net_device *dev, + u8 tsid, const u8 *peer, u8 user_prio, + u16 admitted_time); + int (*del_tx_ts)(struct wiphy *wiphy, struct net_device *dev, + u8 tsid, const u8 *peer); }; /* @@ -2599,9 +2623,13 @@ struct cfg80211_ops { * @WIPHY_FLAG_SUPPORTS_5_10_MHZ: Device supports 5 MHz and 10 MHz channels. * @WIPHY_FLAG_HAS_CHANNEL_SWITCH: Device supports channel switch in * beaconing mode (AP, IBSS, Mesh, ...). + * @WIPHY_FLAG_SUPPORTS_WMM_ADMISSION: the device supports setting up WMM + * TSPEC sessions (TID aka TSID 0-7) with the NL80211_CMD_ADD_TX_TS + * command. Standard IEEE 802.11 TSPEC setup is not yet supported, it + * needs to be able to handle Block-Ack agreements and other things. */ enum wiphy_flags { - /* use hole at 0 */ + WIPHY_FLAG_SUPPORTS_WMM_ADMISSION = BIT(0), /* use hole at 1 */ /* use hole at 2 */ WIPHY_FLAG_NETNS_OK = BIT(3), @@ -3765,11 +3793,25 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy, } /** - * cfg80211_inform_bss - inform cfg80211 of a new BSS + * enum cfg80211_bss_frame_type - frame type that the BSS data came from + * @CFG80211_BSS_FTYPE_UNKNOWN: driver doesn't know whether the data is + * from a beacon or probe response + * @CFG80211_BSS_FTYPE_BEACON: data comes from a beacon + * @CFG80211_BSS_FTYPE_PRESP: data comes from a probe response + */ +enum cfg80211_bss_frame_type { + CFG80211_BSS_FTYPE_UNKNOWN, + CFG80211_BSS_FTYPE_BEACON, + CFG80211_BSS_FTYPE_PRESP, +}; + +/** + * cfg80211_inform_bss_width - inform cfg80211 of a new BSS * * @wiphy: the wiphy reporting the BSS * @rx_channel: The channel the frame was received on * @scan_width: width of the control channel + * @ftype: frame type (if known) * @bssid: the BSSID of the BSS * @tsf: the TSF sent by the peer in the beacon/probe response (or 0) * @capability: the capability field sent by the peer @@ -3789,6 +3831,7 @@ struct cfg80211_bss * __must_check cfg80211_inform_bss_width(struct wiphy *wiphy, struct ieee80211_channel *rx_channel, enum nl80211_bss_scan_width scan_width, + enum cfg80211_bss_frame_type ftype, const u8 *bssid, u64 tsf, u16 capability, u16 beacon_interval, const u8 *ie, size_t ielen, s32 signal, gfp_t gfp); @@ -3796,12 +3839,13 @@ cfg80211_inform_bss_width(struct wiphy *wiphy, static inline struct cfg80211_bss * __must_check cfg80211_inform_bss(struct wiphy *wiphy, struct ieee80211_channel *rx_channel, + enum cfg80211_bss_frame_type ftype, const u8 *bssid, u64 tsf, u16 capability, u16 beacon_interval, const u8 *ie, size_t ielen, s32 signal, gfp_t gfp) { return cfg80211_inform_bss_width(wiphy, rx_channel, - NL80211_BSS_CHAN_WIDTH_20, + NL80211_BSS_CHAN_WIDTH_20, ftype, bssid, tsf, capability, beacon_interval, ie, ielen, signal, gfp); @@ -3902,6 +3946,7 @@ void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr); * moves to cfg80211 in this call * @buf: authentication frame (header + body) * @len: length of the frame data + * @uapsd_queues: bitmap of ACs configured to uapsd. -1 if n/a. * * After being asked to associate via cfg80211_ops::assoc() the driver must * call either this function or cfg80211_auth_timeout(). @@ -3910,7 +3955,8 @@ void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr); */ void cfg80211_rx_assoc_resp(struct net_device *dev, struct cfg80211_bss *bss, - const u8 *buf, size_t len); + const u8 *buf, size_t len, + int uapsd_queues); /** * cfg80211_assoc_timeout - notification of timed out association @@ -4412,7 +4458,6 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr, * @buf: Management frame (header + body) * @len: length of the frame data * @flags: flags, as defined in enum nl80211_rxmgmt_flags - * @gfp: context flags * * This function is called whenever an Action frame is received for a station * mode interface, but is not processed in kernel. @@ -4423,7 +4468,7 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr, * driver is responsible for rejecting the frame. */ bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_dbm, - const u8 *buf, size_t len, u32 flags, gfp_t gfp); + const u8 *buf, size_t len, u32 flags); /** * cfg80211_mgmt_tx_status - notification of TX status for management frame diff --git a/include/net/checksum.h b/include/net/checksum.h index 87cb1903640d63ccfed890c957294af46a93cf24..6465bae80a4f8ee451d175b3dc8022340d418709 100644 --- a/include/net/checksum.h +++ b/include/net/checksum.h @@ -122,9 +122,7 @@ static inline __wsum csum_partial_ext(const void *buff, int len, __wsum sum) static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to) { - __be32 diff[] = { ~from, to }; - - *sum = csum_fold(csum_partial(diff, sizeof(diff), ~csum_unfold(*sum))); + *sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), from), to)); } /* Implements RFC 1624 (Incremental Internet Checksum) diff --git a/include/net/codel.h b/include/net/codel.h index fe0eab32ce76dd42845e8bc466c8bade5f429157..aeee28081245c9215f10badd611f58ba0124fcd0 100644 --- a/include/net/codel.h +++ b/include/net/codel.h @@ -66,7 +66,7 @@ typedef s32 codel_tdiff_t; static inline codel_time_t codel_get_time(void) { - u64 ns = ktime_to_ns(ktime_get()); + u64 ns = ktime_get_ns(); return ns >> CODEL_SHIFT; } diff --git a/include/net/dsa.h b/include/net/dsa.h index 6efce384451e56f16d8aab0847a6a7be4e94e0a0..58ad8c6492dba56360b9cad1c81d4061526721c9 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -15,6 +15,17 @@ #include #include #include +#include +#include +#include + +enum dsa_tag_protocol { + DSA_TAG_PROTO_NONE = 0, + DSA_TAG_PROTO_DSA, + DSA_TAG_PROTO_TRAILER, + DSA_TAG_PROTO_EDSA, + DSA_TAG_PROTO_BRCM, +}; #define DSA_MAX_SWITCHES 4 #define DSA_MAX_PORTS 12 @@ -23,9 +34,15 @@ struct dsa_chip_data { /* * How to access the switch configuration registers. */ - struct device *mii_bus; + struct device *host_dev; int sw_addr; + /* Device tree node pointer for this specific switch chip + * used during switch setup in case additional properties + * and resources needs to be used + */ + struct device_node *of_node; + /* * The names of the switch's ports. Use "cpu" to * designate the switch port that the cpu is connected to, @@ -34,6 +51,7 @@ struct dsa_chip_data { * or any other string to indicate this is a physical port. */ char *port_names[DSA_MAX_PORTS]; + struct device_node *port_dn[DSA_MAX_PORTS]; /* * An array (with nr_chips elements) of which element [a] @@ -59,6 +77,8 @@ struct dsa_platform_data { struct dsa_chip_data *chip; }; +struct packet_type; + struct dsa_switch_tree { /* * Configuration data for the platform device that owns @@ -71,7 +91,11 @@ struct dsa_switch_tree { * protocol to use. */ struct net_device *master_netdev; - __be16 tag_protocol; + int (*rcv)(struct sk_buff *skb, + struct net_device *dev, + struct packet_type *pt, + struct net_device *orig_dev); + enum dsa_tag_protocol tag_protocol; /* * The switch and port to which the CPU is attached. @@ -110,15 +134,16 @@ struct dsa_switch { struct dsa_switch_driver *drv; /* - * Reference to mii bus to use. + * Reference to host device to use. */ - struct mii_bus *master_mii_bus; + struct device *master_dev; /* * Slave mii_bus and devices for the individual ports. */ u32 dsa_port_mask; u32 phys_port_mask; + u32 phys_mii_mask; struct mii_bus *slave_mii_bus; struct net_device *ports[DSA_MAX_PORTS]; }; @@ -147,15 +172,16 @@ static inline u8 dsa_upstream_port(struct dsa_switch *ds) struct dsa_switch_driver { struct list_head list; - __be16 tag_protocol; + enum dsa_tag_protocol tag_protocol; int priv_size; /* * Probing and setup. */ - char *(*probe)(struct mii_bus *bus, int sw_addr); + char *(*probe)(struct device *host_dev, int sw_addr); int (*setup)(struct dsa_switch *ds); int (*set_addr)(struct dsa_switch *ds, u8 *addr); + u32 (*get_phy_flags)(struct dsa_switch *ds, int port); /* * Access to the switch's PHY registers. @@ -169,6 +195,14 @@ struct dsa_switch_driver { */ void (*poll_link)(struct dsa_switch *ds); + /* + * Link state adjustment (called from libphy) + */ + void (*adjust_link)(struct dsa_switch *ds, int port, + struct phy_device *phydev); + void (*fixed_link_update)(struct dsa_switch *ds, int port, + struct fixed_phy_status *st); + /* * ethtool hardware statistics. */ @@ -176,31 +210,50 @@ struct dsa_switch_driver { void (*get_ethtool_stats)(struct dsa_switch *ds, int port, uint64_t *data); int (*get_sset_count)(struct dsa_switch *ds); + + /* + * ethtool Wake-on-LAN + */ + void (*get_wol)(struct dsa_switch *ds, int port, + struct ethtool_wolinfo *w); + int (*set_wol)(struct dsa_switch *ds, int port, + struct ethtool_wolinfo *w); + + /* + * Suspend and resume + */ + int (*suspend)(struct dsa_switch *ds); + int (*resume)(struct dsa_switch *ds); + + /* + * Port enable/disable + */ + int (*port_enable)(struct dsa_switch *ds, int port, + struct phy_device *phy); + void (*port_disable)(struct dsa_switch *ds, int port, + struct phy_device *phy); + + /* + * EEE setttings + */ + int (*set_eee)(struct dsa_switch *ds, int port, + struct phy_device *phydev, + struct ethtool_eee *e); + int (*get_eee)(struct dsa_switch *ds, int port, + struct ethtool_eee *e); }; void register_switch_driver(struct dsa_switch_driver *type); void unregister_switch_driver(struct dsa_switch_driver *type); +struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev); static inline void *ds_to_priv(struct dsa_switch *ds) { return (void *)(ds + 1); } -/* - * The original DSA tag format and some other tag formats have no - * ethertype, which means that we need to add a little hack to the - * networking receive path to make sure that received frames get - * the right ->protocol assigned to them when one of those tag - * formats is in use. - */ -static inline bool dsa_uses_dsa_tags(struct dsa_switch_tree *dst) -{ - return !!(dst->tag_protocol == htons(ETH_P_DSA)); -} - -static inline bool dsa_uses_trailer_tags(struct dsa_switch_tree *dst) +static inline bool dsa_uses_tagged_protocol(struct dsa_switch_tree *dst) { - return !!(dst->tag_protocol == htons(ETH_P_TRAILER)); + return dst->rcv != NULL; } - #endif diff --git a/include/net/flow_keys.h b/include/net/flow_keys.h index 6667a054763adfad3407d59d0f6810fb13f56a14..7ee2df083542365e9d317fa1dbc2bbcfbbe34aa6 100644 --- a/include/net/flow_keys.h +++ b/include/net/flow_keys.h @@ -27,7 +27,19 @@ struct flow_keys { u8 ip_proto; }; -bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow); -__be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto); +bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow, + void *data, __be16 proto, int nhoff, int hlen); +static inline bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow) +{ + return __skb_flow_dissect(skb, flow, NULL, 0, 0, 0); +} +__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto, + void *data, int hlen_proto); +static inline __be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto) +{ + return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0); +} u32 flow_hash_from_keys(struct flow_keys *keys); +unsigned int flow_get_hlen(const unsigned char *data, unsigned int max_len, + __be16 protocol); #endif diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h index ea4271dceff0da2df04497e424e49c18b9a6d6f5..cbafa3768d48d1e1cae3e068e86f9f0a01813c9e 100644 --- a/include/net/gen_stats.h +++ b/include/net/gen_stats.h @@ -6,6 +6,11 @@ #include #include +struct gnet_stats_basic_cpu { + struct gnet_stats_basic_packed bstats; + struct u64_stats_sync syncp; +}; + struct gnet_dump { spinlock_t * lock; struct sk_buff * skb; @@ -27,21 +32,29 @@ int gnet_stats_start_copy_compat(struct sk_buff *skb, int type, spinlock_t *lock, struct gnet_dump *d); int gnet_stats_copy_basic(struct gnet_dump *d, + struct gnet_stats_basic_cpu __percpu *cpu, struct gnet_stats_basic_packed *b); +void __gnet_stats_copy_basic(struct gnet_stats_basic_packed *bstats, + struct gnet_stats_basic_cpu __percpu *cpu, + struct gnet_stats_basic_packed *b); int gnet_stats_copy_rate_est(struct gnet_dump *d, const struct gnet_stats_basic_packed *b, struct gnet_stats_rate_est64 *r); -int gnet_stats_copy_queue(struct gnet_dump *d, struct gnet_stats_queue *q); +int gnet_stats_copy_queue(struct gnet_dump *d, + struct gnet_stats_queue __percpu *cpu_q, + struct gnet_stats_queue *q, __u32 qlen); int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len); int gnet_stats_finish_copy(struct gnet_dump *d); int gen_new_estimator(struct gnet_stats_basic_packed *bstats, + struct gnet_stats_basic_cpu __percpu *cpu_bstats, struct gnet_stats_rate_est64 *rate_est, spinlock_t *stats_lock, struct nlattr *opt); void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, struct gnet_stats_rate_est64 *rate_est); int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, + struct gnet_stats_basic_cpu __percpu *cpu_bstats, struct gnet_stats_rate_est64 *rate_est, spinlock_t *stats_lock, struct nlattr *opt); bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, diff --git a/include/net/geneve.h b/include/net/geneve.h new file mode 100644 index 0000000000000000000000000000000000000000..112132cf8e2e878c0e4629c88e2065eadfda0f4f --- /dev/null +++ b/include/net/geneve.h @@ -0,0 +1,97 @@ +#ifndef __NET_GENEVE_H +#define __NET_GENEVE_H 1 + +#ifdef CONFIG_INET +#include +#endif + + +/* Geneve Header: + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * |Ver| Opt Len |O|C| Rsvd. | Protocol Type | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Virtual Network Identifier (VNI) | Reserved | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Variable Length Options | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * Option Header: + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Option Class | Type |R|R|R| Length | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Variable Option Data | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ + +struct geneve_opt { + __be16 opt_class; + u8 type; +#ifdef __LITTLE_ENDIAN_BITFIELD + u8 length:5; + u8 r3:1; + u8 r2:1; + u8 r1:1; +#else + u8 r1:1; + u8 r2:1; + u8 r3:1; + u8 length:5; +#endif + u8 opt_data[]; +}; + +#define GENEVE_CRIT_OPT_TYPE (1 << 7) + +struct genevehdr { +#ifdef __LITTLE_ENDIAN_BITFIELD + u8 opt_len:6; + u8 ver:2; + u8 rsvd1:6; + u8 critical:1; + u8 oam:1; +#else + u8 ver:2; + u8 opt_len:6; + u8 oam:1; + u8 critical:1; + u8 rsvd1:6; +#endif + __be16 proto_type; + u8 vni[3]; + u8 rsvd2; + struct geneve_opt options[]; +}; + +#ifdef CONFIG_INET +struct geneve_sock; + +typedef void (geneve_rcv_t)(struct geneve_sock *gs, struct sk_buff *skb); + +struct geneve_sock { + struct hlist_node hlist; + geneve_rcv_t *rcv; + void *rcv_data; + struct work_struct del_work; + struct socket *sock; + struct rcu_head rcu; + atomic_t refcnt; + struct udp_offload udp_offloads; +}; + +#define GENEVE_VER 0 +#define GENEVE_BASE_HLEN (sizeof(struct udphdr) + sizeof(struct genevehdr)) + +struct geneve_sock *geneve_sock_add(struct net *net, __be16 port, + geneve_rcv_t *rcv, void *data, + bool no_share, bool ipv6); + +void geneve_sock_release(struct geneve_sock *vs); + +int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt, + struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos, + __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port, + __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt, + bool xnet); +#endif /*ifdef CONFIG_INET */ + +#endif /*ifdef__NET_GENEVE_H */ diff --git a/include/net/gue.h b/include/net/gue.h new file mode 100644 index 0000000000000000000000000000000000000000..b6c332788084b8457ac07193354fed96822beba0 --- /dev/null +++ b/include/net/gue.h @@ -0,0 +1,23 @@ +#ifndef __NET_GUE_H +#define __NET_GUE_H + +struct guehdr { + union { + struct { +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 hlen:4, + version:4; +#elif defined (__BIG_ENDIAN_BITFIELD) + __u8 version:4, + hlen:4; +#else +#error "Please fix " +#endif + __u8 next_hdr; + __u16 flags; + }; + __u32 word; + }; +}; + +#endif diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h index d07b1a64b4e721958b9234aab1a94ab3ad40d63d..55a8d4056cc9123ee0422e49abf6bd4f98845045 100644 --- a/include/net/if_inet6.h +++ b/include/net/if_inet6.h @@ -35,7 +35,6 @@ enum { INET6_IFADDR_STATE_DAD, INET6_IFADDR_STATE_POSTDAD, INET6_IFADDR_STATE_ERRDAD, - INET6_IFADDR_STATE_UP, INET6_IFADDR_STATE_DEAD, }; diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 5fbe6568c3cff7b025d4957cc28df4b77f051dc6..848e85cb5c6128ecfe101e386657ce355f507b5c 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -242,6 +242,15 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what, #endif } +static inline unsigned long +inet_csk_rto_backoff(const struct inet_connection_sock *icsk, + unsigned long max_when) +{ + u64 when = (u64)icsk->icsk_rto << icsk->icsk_backoff; + + return (unsigned long)min_t(u64, when, max_when); +} + struct sock *inet_csk_accept(struct sock *sk, int flags, int *err); struct request_sock *inet_csk_search_req(const struct sock *sk, diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h index 01d590ee5e7eb5250eaef9e0880153c257b706bd..80479abddf73cc181899e1115a572727f578633d 100644 --- a/include/net/inetpeer.h +++ b/include/net/inetpeer.h @@ -61,7 +61,6 @@ struct inet_peer { struct inet_peer_base { struct inet_peer __rcu *root; seqlock_t lock; - u32 flush_seq; int total; }; diff --git a/include/net/ip.h b/include/net/ip.h index db4a771b9ef3a6893bb2ae5d5f8692bfec5c571d..0bb620702929e7ad3b48f7aa40e5c73df3638141 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -180,8 +180,10 @@ static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg) return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0; } -void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr, - __be32 saddr, const struct ip_reply_arg *arg, +void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, + const struct ip_options *sopt, + __be32 daddr, __be32 saddr, + const struct ip_reply_arg *arg, unsigned int len); #define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field) @@ -229,8 +231,6 @@ static inline int inet_is_local_reserved_port(struct net *net, int port) } #endif -extern int sysctl_ip_nonlocal_bind; - /* From inetpeer.c */ extern int inet_peer_threshold; extern int inet_peer_minttl; @@ -364,6 +364,14 @@ static inline void inet_set_txhash(struct sock *sk) sk->sk_txhash = flow_hash_from_keys(&keys); } +static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto) +{ + const struct iphdr *iph = skb_gro_network_header(skb); + + return csum_tcpudp_nofold(iph->saddr, iph->daddr, + skb_gro_len(skb), proto, 0); +} + /* * Map a multicast IP onto multicast MAC for type ethernet. */ @@ -505,7 +513,14 @@ int ip_forward(struct sk_buff *skb); void ip_options_build(struct sk_buff *skb, struct ip_options *opt, __be32 daddr, struct rtable *rt, int is_frag); -int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb); + +int __ip_options_echo(struct ip_options *dopt, struct sk_buff *skb, + const struct ip_options *sopt); +static inline int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb) +{ + return __ip_options_echo(dopt, skb, &IPCB(skb)->opt); +} + void ip_options_fragment(struct sk_buff *skb); int ip_options_compile(struct net *net, struct ip_options *opt, struct sk_buff *skb); @@ -542,6 +557,10 @@ void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport, u32 info); +bool icmp_global_allow(void); +extern int sysctl_icmp_msgs_per_sec; +extern int sysctl_icmp_msgs_burst; + #ifdef CONFIG_PROC_FS int ip_misc_proc_init(void); #endif diff --git a/include/net/ip6_checksum.h b/include/net/ip6_checksum.h index 55236cb711745fc76ef4c897bcf70e957b34611c..1a49b73f7f6e372381f9d1f3b7cffdc5649794c6 100644 --- a/include/net/ip6_checksum.h +++ b/include/net/ip6_checksum.h @@ -48,6 +48,14 @@ static inline __wsum ip6_compute_pseudo(struct sk_buff *skb, int proto) skb->len, proto, 0)); } +static inline __wsum ip6_gro_compute_pseudo(struct sk_buff *skb, int proto) +{ + const struct ipv6hdr *iph = skb_gro_network_header(skb); + + return ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr, + skb_gro_len(skb), proto, 0)); +} + static __inline__ __sum16 tcp_v6_check(int len, const struct in6_addr *saddr, const struct in6_addr *daddr, diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index cf485f9aa56369fec2d47d82ca0cb5741ffca30e..8eea35d32a75b3927316fa38304faf63632bf8dc 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -64,7 +64,7 @@ struct fib6_node { __u16 fn_bit; /* bit key */ __u16 fn_flags; - __u32 fn_sernum; + int fn_sernum; struct rt6_info *rr_ptr; }; @@ -202,15 +202,25 @@ static inline void ip6_rt_put(struct rt6_info *rt) dst_release(&rt->dst); } -struct fib6_walker_t { +enum fib6_walk_state { +#ifdef CONFIG_IPV6_SUBTREES + FWS_S, +#endif + FWS_L, + FWS_R, + FWS_C, + FWS_U +}; + +struct fib6_walker { struct list_head lh; struct fib6_node *root, *node; struct rt6_info *leaf; - unsigned char state; - unsigned char prune; + enum fib6_walk_state state; + bool prune; unsigned int skip; unsigned int count; - int (*func)(struct fib6_walker_t *); + int (*func)(struct fib6_walker *); void *args; }; diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 9922093f575e20fa2e53a903a68da7773469c600..dc9d2a27c3158115b927b3f4b93e5bd98799d749 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -65,7 +65,8 @@ struct fnhe_hash_bucket { struct fib_nh_exception __rcu *chain; }; -#define FNHE_HASH_SIZE 2048 +#define FNHE_HASH_SHIFT 11 +#define FNHE_HASH_SIZE (1 << FNHE_HASH_SHIFT) #define FNHE_RECLAIM_DEPTH 5 struct fib_nh { @@ -87,7 +88,7 @@ struct fib_nh { int nh_saddr_genid; struct rtable __rcu * __percpu *nh_pcpu_rth_output; struct rtable __rcu *nh_rth_input; - struct fnhe_hash_bucket *nh_exceptions; + struct fnhe_hash_bucket __rcu *nh_exceptions; }; /* diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 8dd8cab88b873cda0d8de9c936fefc0850ef0703..5bc6edeb7143398e41a5e7a185228d135534a5c8 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -10,6 +10,7 @@ #include #include #include +#include #include #if IS_ENABLED(CONFIG_IPV6) @@ -31,6 +32,13 @@ struct ip_tunnel_6rd_parm { }; #endif +struct ip_tunnel_encap { + __u16 type; + __u16 flags; + __be16 sport; + __be16 dport; +}; + struct ip_tunnel_prl_entry { struct ip_tunnel_prl_entry __rcu *next; __be32 addr; @@ -56,13 +64,18 @@ struct ip_tunnel { /* These four fields used only by GRE */ __u32 i_seqno; /* The last seen seqno */ __u32 o_seqno; /* The last output seqno */ - int hlen; /* Precalculated header length */ + int tun_hlen; /* Precalculated header length */ int mlink; struct ip_tunnel_dst __percpu *dst_cache; struct ip_tunnel_parm parms; + int encap_hlen; /* Encap header length (FOU,GUE) */ + struct ip_tunnel_encap encap; + + int hlen; /* tun_hlen + encap_hlen */ + /* for SIT */ #ifdef CONFIG_IPV6_SIT_6RD struct ip_tunnel_6rd_parm ip6rd; @@ -73,15 +86,18 @@ struct ip_tunnel { struct gro_cells gro_cells; }; -#define TUNNEL_CSUM __cpu_to_be16(0x01) -#define TUNNEL_ROUTING __cpu_to_be16(0x02) -#define TUNNEL_KEY __cpu_to_be16(0x04) -#define TUNNEL_SEQ __cpu_to_be16(0x08) -#define TUNNEL_STRICT __cpu_to_be16(0x10) -#define TUNNEL_REC __cpu_to_be16(0x20) -#define TUNNEL_VERSION __cpu_to_be16(0x40) -#define TUNNEL_NO_KEY __cpu_to_be16(0x80) +#define TUNNEL_CSUM __cpu_to_be16(0x01) +#define TUNNEL_ROUTING __cpu_to_be16(0x02) +#define TUNNEL_KEY __cpu_to_be16(0x04) +#define TUNNEL_SEQ __cpu_to_be16(0x08) +#define TUNNEL_STRICT __cpu_to_be16(0x10) +#define TUNNEL_REC __cpu_to_be16(0x20) +#define TUNNEL_VERSION __cpu_to_be16(0x40) +#define TUNNEL_NO_KEY __cpu_to_be16(0x80) #define TUNNEL_DONT_FRAGMENT __cpu_to_be16(0x0100) +#define TUNNEL_OAM __cpu_to_be16(0x0200) +#define TUNNEL_CRIT_OPT __cpu_to_be16(0x0400) +#define TUNNEL_OPTIONS_PRESENT __cpu_to_be16(0x0800) struct tnl_ptk_info { __be16 flags; @@ -114,6 +130,8 @@ void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops); void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, const struct iphdr *tnl_params, const u8 protocol); int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd); +int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t, + u8 *protocol, struct flowi4 *fl4); int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu); struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev, @@ -131,6 +149,8 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], struct ip_tunnel_parm *p); void ip_tunnel_setup(struct net_device *dev, int net_id); void ip_tunnel_dst_reset_all(struct ip_tunnel *t); +int ip_tunnel_encap_setup(struct ip_tunnel *t, + struct ip_tunnel_encap *ipencap); /* Extract dsfield from inner protocol */ static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph, diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 624a8a54806d4c877cabcbefec9dd975e1204922..615b20b585452111a25085890d8fa875657dbe76 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -1,6 +1,5 @@ -/* - * IP Virtual Server - * data structure and functionality definitions +/* IP Virtual Server + * data structure and functionality definitions */ #ifndef _NET_IP_VS_H @@ -12,7 +11,7 @@ #include /* for struct list_head */ #include /* for struct rwlock_t */ -#include /* for struct atomic_t */ +#include /* for struct atomic_t */ #include #include #include @@ -30,15 +29,13 @@ #endif #include /* Netw namespace */ -/* - * Generic access of ipvs struct - */ +/* Generic access of ipvs struct */ static inline struct netns_ipvs *net_ipvs(struct net* net) { return net->ipvs; } -/* - * Get net ptr from skb in traffic cases + +/* Get net ptr from skb in traffic cases * use skb_sknet when call is from userland (ioctl or netlink) */ static inline struct net *skb_net(const struct sk_buff *skb) @@ -90,8 +87,8 @@ static inline struct net *skb_sknet(const struct sk_buff *skb) return &init_net; #endif } -/* - * This one needed for single_open_net since net is stored directly in + +/* This one needed for single_open_net since net is stored directly in * private not as a struct i.e. seq_file_net can't be used. */ static inline struct net *seq_file_single_net(struct seq_file *seq) @@ -108,7 +105,7 @@ extern int ip_vs_conn_tab_size; struct ip_vs_iphdr { __u32 len; /* IPv4 simply where L4 starts - IPv6 where L4 Transport Header starts */ + * IPv6 where L4 Transport Header starts */ __u16 fragoffs; /* IPv6 fragment offset, 0 if first frag (or not frag)*/ __s16 protocol; __s32 flags; @@ -304,16 +301,11 @@ static inline const char *ip_vs_dbg_addr(int af, char *buf, size_t buf_len, #define LeaveFunction(level) do {} while (0) #endif - -/* - * The port number of FTP service (in network order). - */ +/* The port number of FTP service (in network order). */ #define FTPPORT cpu_to_be16(21) #define FTPDATA cpu_to_be16(20) -/* - * TCP State Values - */ +/* TCP State Values */ enum { IP_VS_TCP_S_NONE = 0, IP_VS_TCP_S_ESTABLISHED, @@ -329,25 +321,19 @@ enum { IP_VS_TCP_S_LAST }; -/* - * UDP State Values - */ +/* UDP State Values */ enum { IP_VS_UDP_S_NORMAL, IP_VS_UDP_S_LAST, }; -/* - * ICMP State Values - */ +/* ICMP State Values */ enum { IP_VS_ICMP_S_NORMAL, IP_VS_ICMP_S_LAST, }; -/* - * SCTP State Values - */ +/* SCTP State Values */ enum ip_vs_sctp_states { IP_VS_SCTP_S_NONE, IP_VS_SCTP_S_INIT1, @@ -366,21 +352,18 @@ enum ip_vs_sctp_states { IP_VS_SCTP_S_LAST }; -/* - * Delta sequence info structure - * Each ip_vs_conn has 2 (output AND input seq. changes). - * Only used in the VS/NAT. +/* Delta sequence info structure + * Each ip_vs_conn has 2 (output AND input seq. changes). + * Only used in the VS/NAT. */ struct ip_vs_seq { __u32 init_seq; /* Add delta from this seq */ __u32 delta; /* Delta in sequence numbers */ __u32 previous_delta; /* Delta in sequence numbers - before last resized pkt */ + * before last resized pkt */ }; -/* - * counters per cpu - */ +/* counters per cpu */ struct ip_vs_counters { __u32 conns; /* connections scheduled */ __u32 inpkts; /* incoming packets */ @@ -388,17 +371,13 @@ struct ip_vs_counters { __u64 inbytes; /* incoming bytes */ __u64 outbytes; /* outgoing bytes */ }; -/* - * Stats per cpu - */ +/* Stats per cpu */ struct ip_vs_cpu_stats { struct ip_vs_counters ustats; struct u64_stats_sync syncp; }; -/* - * IPVS statistics objects - */ +/* IPVS statistics objects */ struct ip_vs_estimator { struct list_head list; @@ -491,9 +470,7 @@ struct ip_vs_protocol { void (*timeout_change)(struct ip_vs_proto_data *pd, int flags); }; -/* - * protocol data per netns - */ +/* protocol data per netns */ struct ip_vs_proto_data { struct ip_vs_proto_data *next; struct ip_vs_protocol *pp; @@ -520,9 +497,7 @@ struct ip_vs_conn_param { __u8 pe_data_len; }; -/* - * IP_VS structure allocated for each dynamically scheduled connection - */ +/* IP_VS structure allocated for each dynamically scheduled connection */ struct ip_vs_conn { struct hlist_node c_list; /* hashed list heads */ /* Protocol, addresses and port numbers */ @@ -535,6 +510,7 @@ struct ip_vs_conn { union nf_inet_addr daddr; /* destination address */ volatile __u32 flags; /* status flags */ __u16 protocol; /* Which protocol (TCP/UDP) */ + __u16 daf; /* Address family of the dest */ #ifdef CONFIG_NET_NS struct net *net; /* Name space */ #endif @@ -560,17 +536,18 @@ struct ip_vs_conn { struct ip_vs_dest *dest; /* real server */ atomic_t in_pkts; /* incoming packet counter */ - /* packet transmitter for different forwarding methods. If it - mangles the packet, it must return NF_DROP or better NF_STOLEN, - otherwise this must be changed to a sk_buff **. - NF_ACCEPT can be returned when destination is local. + /* Packet transmitter for different forwarding methods. If it + * mangles the packet, it must return NF_DROP or better NF_STOLEN, + * otherwise this must be changed to a sk_buff **. + * NF_ACCEPT can be returned when destination is local. */ int (*packet_xmit)(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); /* Note: we can group the following members into a structure, - in order to save more space, and the following members are - only used in VS/NAT anyway */ + * in order to save more space, and the following members are + * only used in VS/NAT anyway + */ struct ip_vs_app *app; /* bound ip_vs_app object */ void *app_data; /* Application private data */ struct ip_vs_seq in_seq; /* incoming seq. struct */ @@ -583,9 +560,7 @@ struct ip_vs_conn { struct rcu_head rcu_head; }; -/* - * To save some memory in conn table when name space is disabled. - */ +/* To save some memory in conn table when name space is disabled. */ static inline struct net *ip_vs_conn_net(const struct ip_vs_conn *cp) { #ifdef CONFIG_NET_NS @@ -594,6 +569,7 @@ static inline struct net *ip_vs_conn_net(const struct ip_vs_conn *cp) return &init_net; #endif } + static inline void ip_vs_conn_net_set(struct ip_vs_conn *cp, struct net *net) { #ifdef CONFIG_NET_NS @@ -611,13 +587,12 @@ static inline int ip_vs_conn_net_eq(const struct ip_vs_conn *cp, #endif } -/* - * Extended internal versions of struct ip_vs_service_user and - * ip_vs_dest_user for IPv6 support. +/* Extended internal versions of struct ip_vs_service_user and ip_vs_dest_user + * for IPv6 support. * - * We need these to conveniently pass around service and destination - * options, but unfortunately, we also need to keep the old definitions to - * maintain userspace backwards compatibility for the setsockopt interface. + * We need these to conveniently pass around service and destination + * options, but unfortunately, we also need to keep the old definitions to + * maintain userspace backwards compatibility for the setsockopt interface. */ struct ip_vs_service_user_kern { /* virtual service addresses */ @@ -648,12 +623,15 @@ struct ip_vs_dest_user_kern { /* thresholds for active connections */ u32 u_threshold; /* upper threshold */ u32 l_threshold; /* lower threshold */ + + /* Address family of addr */ + u16 af; }; /* - * The information about the virtual service offered to the net - * and the forwarding entries + * The information about the virtual service offered to the net and the + * forwarding entries. */ struct ip_vs_service { struct hlist_node s_list; /* for normal service table */ @@ -693,9 +671,8 @@ struct ip_vs_dest_dst { struct rcu_head rcu_head; }; -/* - * The real server destination forwarding entry - * with ip address, port number, and so on. +/* The real server destination forwarding entry with ip address, port number, + * and so on. */ struct ip_vs_dest { struct list_head n_list; /* for the dests in the service */ @@ -734,10 +711,7 @@ struct ip_vs_dest { unsigned int in_rs_table:1; /* we are in rs_table */ }; - -/* - * The scheduler object - */ +/* The scheduler object */ struct ip_vs_scheduler { struct list_head n_list; /* d-linked list head */ char *name; /* scheduler name */ @@ -777,9 +751,7 @@ struct ip_vs_pe { int (*show_pe_data)(const struct ip_vs_conn *cp, char *buf); }; -/* - * The application module object (a.k.a. app incarnation) - */ +/* The application module object (a.k.a. app incarnation) */ struct ip_vs_app { struct list_head a_list; /* member in app list */ int type; /* IP_VS_APP_TYPE_xxx */ @@ -795,16 +767,14 @@ struct ip_vs_app { atomic_t usecnt; /* usage counter */ struct rcu_head rcu_head; - /* - * output hook: Process packet in inout direction, diff set for TCP. + /* output hook: Process packet in inout direction, diff set for TCP. * Return: 0=Error, 1=Payload Not Mangled/Mangled but checksum is ok, * 2=Mangled but checksum was not updated */ int (*pkt_out)(struct ip_vs_app *, struct ip_vs_conn *, struct sk_buff *, int *diff); - /* - * input hook: Process packet in outin direction, diff set for TCP. + /* input hook: Process packet in outin direction, diff set for TCP. * Return: 0=Error, 1=Payload Not Mangled/Mangled but checksum is ok, * 2=Mangled but checksum was not updated */ @@ -863,9 +833,7 @@ struct ipvs_master_sync_state { struct netns_ipvs { int gen; /* Generation */ int enable; /* enable like nf_hooks do */ - /* - * Hash table: for real service lookups - */ + /* Hash table: for real service lookups */ #define IP_VS_RTAB_BITS 4 #define IP_VS_RTAB_SIZE (1 << IP_VS_RTAB_BITS) #define IP_VS_RTAB_MASK (IP_VS_RTAB_SIZE - 1) @@ -899,7 +867,7 @@ struct netns_ipvs { struct list_head sctp_apps[SCTP_APP_TAB_SIZE]; #endif /* ip_vs_conn */ - atomic_t conn_count; /* connection counter */ + atomic_t conn_count; /* connection counter */ /* ip_vs_ctl */ struct ip_vs_stats tot_stats; /* Statistics & est. */ @@ -986,6 +954,10 @@ struct netns_ipvs { char backup_mcast_ifn[IP_VS_IFNAME_MAXLEN]; /* net name space ptr */ struct net *net; /* Needed by timer routines */ + /* Number of heterogeneous destinations, needed becaus heterogeneous + * are not supported when synchronization is enabled. + */ + unsigned int mixed_address_family_dests; }; #define DEFAULT_SYNC_THRESHOLD 3 @@ -1139,9 +1111,8 @@ static inline int sysctl_backup_only(struct netns_ipvs *ipvs) #endif -/* - * IPVS core functions - * (from ip_vs_core.c) +/* IPVS core functions + * (from ip_vs_core.c) */ const char *ip_vs_proto_name(unsigned int proto); void ip_vs_init_hash_table(struct list_head *table, int rows); @@ -1149,11 +1120,9 @@ void ip_vs_init_hash_table(struct list_head *table, int rows); #define IP_VS_APP_TYPE_FTP 1 -/* - * ip_vs_conn handling functions - * (from ip_vs_conn.c) +/* ip_vs_conn handling functions + * (from ip_vs_conn.c) */ - enum { IP_VS_DIR_INPUT = 0, IP_VS_DIR_OUTPUT, @@ -1210,7 +1179,7 @@ static inline void __ip_vs_conn_put(struct ip_vs_conn *cp) void ip_vs_conn_put(struct ip_vs_conn *cp); void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport); -struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p, +struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af, const union nf_inet_addr *daddr, __be16 dport, unsigned int flags, struct ip_vs_dest *dest, __u32 fwmark); @@ -1284,9 +1253,7 @@ ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp) atomic_inc(&ctl_cp->n_control); } -/* - * IPVS netns init & cleanup functions - */ +/* IPVS netns init & cleanup functions */ int ip_vs_estimator_net_init(struct net *net); int ip_vs_control_net_init(struct net *net); int ip_vs_protocol_net_init(struct net *net); @@ -1301,9 +1268,8 @@ void ip_vs_estimator_net_cleanup(struct net *net); void ip_vs_sync_net_cleanup(struct net *net); void ip_vs_service_net_cleanup(struct net *net); -/* - * IPVS application functions - * (from ip_vs_app.c) +/* IPVS application functions + * (from ip_vs_app.c) */ #define IP_VS_APP_MAX_PORTS 8 struct ip_vs_app *register_ip_vs_app(struct net *net, struct ip_vs_app *app); @@ -1323,9 +1289,7 @@ int unregister_ip_vs_pe(struct ip_vs_pe *pe); struct ip_vs_pe *ip_vs_pe_getbyname(const char *name); struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name); -/* - * Use a #define to avoid all of module.h just for these trivial ops - */ +/* Use a #define to avoid all of module.h just for these trivial ops */ #define ip_vs_pe_get(pe) \ if (pe && pe->module) \ __module_get(pe->module); @@ -1334,9 +1298,7 @@ struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name); if (pe && pe->module) \ module_put(pe->module); -/* - * IPVS protocol functions (from ip_vs_proto.c) - */ +/* IPVS protocol functions (from ip_vs_proto.c) */ int ip_vs_protocol_init(void); void ip_vs_protocol_cleanup(void); void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags); @@ -1354,9 +1316,8 @@ extern struct ip_vs_protocol ip_vs_protocol_esp; extern struct ip_vs_protocol ip_vs_protocol_ah; extern struct ip_vs_protocol ip_vs_protocol_sctp; -/* - * Registering/unregistering scheduler functions - * (from ip_vs_sched.c) +/* Registering/unregistering scheduler functions + * (from ip_vs_sched.c) */ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler); int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler); @@ -1375,10 +1336,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg); - -/* - * IPVS control data and functions (from ip_vs_ctl.c) - */ +/* IPVS control data and functions (from ip_vs_ctl.c) */ extern struct ip_vs_stats ip_vs_stats; extern int sysctl_ip_vs_sync_ver; @@ -1396,8 +1354,9 @@ void ip_vs_unregister_nl_ioctl(void); int ip_vs_control_init(void); void ip_vs_control_cleanup(void); struct ip_vs_dest * -ip_vs_find_dest(struct net *net, int af, const union nf_inet_addr *daddr, - __be16 dport, const union nf_inet_addr *vaddr, __be16 vport, +ip_vs_find_dest(struct net *net, int svc_af, int dest_af, + const union nf_inet_addr *daddr, __be16 dport, + const union nf_inet_addr *vaddr, __be16 vport, __u16 protocol, __u32 fwmark, __u32 flags); void ip_vs_try_bind_dest(struct ip_vs_conn *cp); @@ -1418,26 +1377,21 @@ static inline void ip_vs_dest_put_and_free(struct ip_vs_dest *dest) kfree(dest); } -/* - * IPVS sync daemon data and function prototypes - * (from ip_vs_sync.c) +/* IPVS sync daemon data and function prototypes + * (from ip_vs_sync.c) */ int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid); int stop_sync_thread(struct net *net, int state); void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts); -/* - * IPVS rate estimator prototypes (from ip_vs_est.c) - */ +/* IPVS rate estimator prototypes (from ip_vs_est.c) */ void ip_vs_start_estimator(struct net *net, struct ip_vs_stats *stats); void ip_vs_stop_estimator(struct net *net, struct ip_vs_stats *stats); void ip_vs_zero_estimator(struct ip_vs_stats *stats); void ip_vs_read_estimator(struct ip_vs_stats_user *dst, struct ip_vs_stats *stats); -/* - * Various IPVS packet transmitters (from ip_vs_xmit.c) - */ +/* Various IPVS packet transmitters (from ip_vs_xmit.c) */ int ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); int ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, @@ -1468,12 +1422,10 @@ int ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, #endif #ifdef CONFIG_SYSCTL -/* - * This is a simple mechanism to ignore packets when - * we are loaded. Just set ip_vs_drop_rate to 'n' and - * we start to drop 1/rate of the packets +/* This is a simple mechanism to ignore packets when + * we are loaded. Just set ip_vs_drop_rate to 'n' and + * we start to drop 1/rate of the packets */ - static inline int ip_vs_todrop(struct netns_ipvs *ipvs) { if (!ipvs->drop_rate) @@ -1487,9 +1439,7 @@ static inline int ip_vs_todrop(struct netns_ipvs *ipvs) static inline int ip_vs_todrop(struct netns_ipvs *ipvs) { return 0; } #endif -/* - * ip_vs_fwd_tag returns the forwarding tag of the connection - */ +/* ip_vs_fwd_tag returns the forwarding tag of the connection */ #define IP_VS_FWD_METHOD(cp) (cp->flags & IP_VS_CONN_F_FWD_MASK) static inline char ip_vs_fwd_tag(struct ip_vs_conn *cp) @@ -1548,9 +1498,7 @@ static inline __wsum ip_vs_check_diff2(__be16 old, __be16 new, __wsum oldsum) return csum_partial(diff, sizeof(diff), oldsum); } -/* - * Forget current conntrack (unconfirmed) and attach notrack entry - */ +/* Forget current conntrack (unconfirmed) and attach notrack entry */ static inline void ip_vs_notrack(struct sk_buff *skb) { #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) @@ -1567,9 +1515,8 @@ static inline void ip_vs_notrack(struct sk_buff *skb) } #ifdef CONFIG_IP_VS_NFCT -/* - * Netfilter connection tracking - * (from ip_vs_nfct.c) +/* Netfilter connection tracking + * (from ip_vs_nfct.c) */ static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs) { @@ -1608,14 +1555,12 @@ static inline int ip_vs_confirm_conntrack(struct sk_buff *skb) static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp) { } -/* CONFIG_IP_VS_NFCT */ -#endif +#endif /* CONFIG_IP_VS_NFCT */ static inline int ip_vs_dest_conn_overhead(struct ip_vs_dest *dest) { - /* - * We think the overhead of processing active connections is 256 + /* We think the overhead of processing active connections is 256 * times higher than that of inactive connections in average. (This * 256 times might not be accurate, we will change it later) We * use the following formula to estimate the overhead now: diff --git a/include/net/ipv6.h b/include/net/ipv6.h index a2db816e8461cff706f23c725cf69372ab8bfcaa..97f472012438b10a0bcbff9eef6c553d7357ad35 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -121,6 +121,7 @@ struct frag_hdr { /* sysctls */ extern int sysctl_mld_max_msf; +extern int sysctl_mld_qrv; #define _DEVINC(net, statname, modifier, idev, field) \ ({ \ @@ -287,7 +288,8 @@ struct ipv6_txoptions *ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space, struct ipv6_txoptions *opt); -bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb); +bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb, + const struct inet6_skb_parm *opt); static inline bool ipv6_accept_ra(struct inet6_dev *idev) { diff --git a/include/net/mac80211.h b/include/net/mac80211.h index dae2e24616e16bf3051af3dd31414f91f4220af9..0ad1f47d2dc77ee472656fbbfce1afb08d1e3a6f 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -4,6 +4,7 @@ * Copyright 2002-2005, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc * Copyright 2007-2010 Johannes Berg + * Copyright 2013-2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -1226,7 +1227,8 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev); * * @IEEE80211_KEY_FLAG_GENERATE_IV: This flag should be set by the * driver to indicate that it requires IV generation for this - * particular key. + * particular key. Setting this flag does not necessarily mean that SKBs + * will have sufficient tailroom for ICV or MIC. * @IEEE80211_KEY_FLAG_GENERATE_MMIC: This flag should be set by * the driver for a TKIP key if it requires Michael MIC * generation in software. @@ -1238,7 +1240,9 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev); * @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver * if space should be prepared for the IV, but the IV * itself should not be generated. Do not set together with - * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key. + * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key. Setting this flag does + * not necessarily mean that SKBs will have sufficient tailroom for ICV or + * MIC. * @IEEE80211_KEY_FLAG_RX_MGMT: This key will be used to decrypt received * management frames. The flag can help drivers that have a hardware * crypto implementation that doesn't deal with management frames @@ -1405,7 +1409,7 @@ struct ieee80211_sta_rates { * @supp_rates: Bitmap of supported rates (per band) * @ht_cap: HT capabilities of this STA; restricted to our own capabilities * @vht_cap: VHT capabilities of this STA; restricted to our own capabilities - * @wme: indicates whether the STA supports WME. Only valid during AP-mode. + * @wme: indicates whether the STA supports QoS/WME. * @drv_priv: data area for driver use, will always be aligned to * sizeof(void *), size is determined in hw information. * @uapsd_queues: bitmap of queues configured for uapsd. Only valid @@ -1533,16 +1537,6 @@ struct ieee80211_tx_control { * @IEEE80211_HW_MFP_CAPABLE: * Hardware supports management frame protection (MFP, IEEE 802.11w). * - * @IEEE80211_HW_SUPPORTS_STATIC_SMPS: - * Hardware supports static spatial multiplexing powersave, - * ie. can turn off all but one chain even on HT connections - * that should be using more chains. - * - * @IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS: - * Hardware supports dynamic spatial multiplexing powersave, - * ie. can turn off all but one chain and then wake the rest - * up as required after, for example, rts/cts handshake. - * * @IEEE80211_HW_SUPPORTS_UAPSD: * Hardware supports Unscheduled Automatic Power Save Delivery * (U-APSD) in managed mode. The mode is configured with @@ -1606,6 +1600,9 @@ struct ieee80211_tx_control { * is not enabled the default action is to disconnect when getting the * CSA frame. * + * @IEEE80211_HW_SUPPORTS_CLONED_SKBS: The driver will never modify the payload + * or tailroom of TX skbs without copying them first. + * * @IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS: The HW supports scanning on all bands * in one command, mac80211 doesn't have to run separate scans per band. */ @@ -1625,8 +1622,7 @@ enum ieee80211_hw_flags { IEEE80211_HW_SUPPORTS_DYNAMIC_PS = 1<<12, IEEE80211_HW_MFP_CAPABLE = 1<<13, IEEE80211_HW_WANT_MONITOR_VIF = 1<<14, - IEEE80211_HW_SUPPORTS_STATIC_SMPS = 1<<15, - IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS = 1<<16, + /* free slots */ IEEE80211_HW_SUPPORTS_UAPSD = 1<<17, IEEE80211_HW_REPORTS_TX_ACK_STATUS = 1<<18, IEEE80211_HW_CONNECTION_MONITOR = 1<<19, @@ -1639,7 +1635,7 @@ enum ieee80211_hw_flags { IEEE80211_HW_TIMING_BEACON_ONLY = 1<<26, IEEE80211_HW_SUPPORTS_HT_CCK_RATES = 1<<27, IEEE80211_HW_CHANCTX_STA_CSA = 1<<28, - /* bit 29 unused */ + IEEE80211_HW_SUPPORTS_CLONED_SKBS = 1<<29, IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS = 1<<30, }; @@ -2666,7 +2662,9 @@ enum ieee80211_roc_type { * * @set_coverage_class: Set slot time for given coverage class as specified * in IEEE 802.11-2007 section 17.3.8.6 and modify ACK timeout - * accordingly. This callback is not required and may sleep. + * accordingly; coverage class equals to -1 to enable ACK timeout + * estimation algorithm (dynack). To disable dynack set valid value for + * coverage class. This callback is not required and may sleep. * * @testmode_cmd: Implement a cfg80211 test mode command. The passed @vif may * be %NULL. The callback can sleep. @@ -2950,7 +2948,7 @@ struct ieee80211_ops { int (*get_survey)(struct ieee80211_hw *hw, int idx, struct survey_info *survey); void (*rfkill_poll)(struct ieee80211_hw *hw); - void (*set_coverage_class)(struct ieee80211_hw *hw, u8 coverage_class); + void (*set_coverage_class)(struct ieee80211_hw *hw, s16 coverage_class); #ifdef CONFIG_NL80211_TESTMODE int (*testmode_cmd)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void *data, int len); diff --git a/include/net/mld.h b/include/net/mld.h index faa1d161bf24d35244f13a9f0a6fae612eb79670..01d751303498ccdd08fd63d16134fb6a8fd64744 100644 --- a/include/net/mld.h +++ b/include/net/mld.h @@ -88,12 +88,15 @@ struct mld2_query { #define MLDV2_QQIC_EXP(value) (((value) >> 4) & 0x07) #define MLDV2_QQIC_MAN(value) ((value) & 0x0f) +#define MLD_EXP_MIN_LIMIT 32768UL +#define MLDV1_MRD_MAX_COMPAT (MLD_EXP_MIN_LIMIT - 1) + static inline unsigned long mldv2_mrc(const struct mld2_query *mlh2) { /* RFC3810, 5.1.3. Maximum Response Code */ unsigned long ret, mc_mrc = ntohs(mlh2->mld2q_mrc); - if (mc_mrc < 32768) { + if (mc_mrc < MLD_EXP_MIN_LIMIT) { ret = mc_mrc; } else { unsigned long mc_man, mc_exp; diff --git a/include/net/neighbour.h b/include/net/neighbour.h index 47f425464f847fd827719ac5da99cf2749824a14..f60558d0254ca1a482ab4eb2924ecbaf8d6c169c 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -373,7 +373,7 @@ static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) return 0; } -#ifdef CONFIG_BRIDGE_NETFILTER +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb) { unsigned int seq, hh_alen; diff --git a/include/net/netfilter/br_netfilter.h b/include/net/netfilter/br_netfilter.h new file mode 100644 index 0000000000000000000000000000000000000000..2aa6048a55c1e099ee14caff579ac184e3643bc3 --- /dev/null +++ b/include/net/netfilter/br_netfilter.h @@ -0,0 +1,6 @@ +#ifndef _BR_NETFILTER_H_ +#define _BR_NETFILTER_H_ + +void br_netfilter_enable(void); + +#endif /* _BR_NETFILTER_H_ */ diff --git a/include/net/netfilter/ipv4/nf_nat_masquerade.h b/include/net/netfilter/ipv4/nf_nat_masquerade.h new file mode 100644 index 0000000000000000000000000000000000000000..a9c001c646da25c3b1f71993ed1c3a0d799950f9 --- /dev/null +++ b/include/net/netfilter/ipv4/nf_nat_masquerade.h @@ -0,0 +1,14 @@ +#ifndef _NF_NAT_MASQUERADE_IPV4_H_ +#define _NF_NAT_MASQUERADE_IPV4_H_ + +#include + +unsigned int +nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum, + const struct nf_nat_range *range, + const struct net_device *out); + +void nf_nat_masquerade_ipv4_register_notifier(void); +void nf_nat_masquerade_ipv4_unregister_notifier(void); + +#endif /*_NF_NAT_MASQUERADE_IPV4_H_ */ diff --git a/include/net/netfilter/ipv4/nf_reject.h b/include/net/netfilter/ipv4/nf_reject.h index 931fbf812171afa37c53f258213a77759cd43edb..e8427193c777b884428aa1712ff18f910ab8d24a 100644 --- a/include/net/netfilter/ipv4/nf_reject.h +++ b/include/net/netfilter/ipv4/nf_reject.h @@ -1,128 +1,13 @@ #ifndef _IPV4_NF_REJECT_H #define _IPV4_NF_REJECT_H -#include -#include -#include -#include +#include static inline void nf_send_unreach(struct sk_buff *skb_in, int code) { icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0); } -/* Send RST reply */ -static void nf_send_reset(struct sk_buff *oldskb, int hook) -{ - struct sk_buff *nskb; - const struct iphdr *oiph; - struct iphdr *niph; - const struct tcphdr *oth; - struct tcphdr _otcph, *tcph; - - /* IP header checks: fragment. */ - if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET)) - return; - - oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb), - sizeof(_otcph), &_otcph); - if (oth == NULL) - return; - - /* No RST for RST. */ - if (oth->rst) - return; - - if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) - return; - - /* Check checksum */ - if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP)) - return; - oiph = ip_hdr(oldskb); - - nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) + - LL_MAX_HEADER, GFP_ATOMIC); - if (!nskb) - return; - - skb_reserve(nskb, LL_MAX_HEADER); - - skb_reset_network_header(nskb); - niph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr)); - niph->version = 4; - niph->ihl = sizeof(struct iphdr) / 4; - niph->tos = 0; - niph->id = 0; - niph->frag_off = htons(IP_DF); - niph->protocol = IPPROTO_TCP; - niph->check = 0; - niph->saddr = oiph->daddr; - niph->daddr = oiph->saddr; - - skb_reset_transport_header(nskb); - tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); - memset(tcph, 0, sizeof(*tcph)); - tcph->source = oth->dest; - tcph->dest = oth->source; - tcph->doff = sizeof(struct tcphdr) / 4; - - if (oth->ack) - tcph->seq = oth->ack_seq; - else { - tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin + - oldskb->len - ip_hdrlen(oldskb) - - (oth->doff << 2)); - tcph->ack = 1; - } - - tcph->rst = 1; - tcph->check = ~tcp_v4_check(sizeof(struct tcphdr), niph->saddr, - niph->daddr, 0); - nskb->ip_summed = CHECKSUM_PARTIAL; - nskb->csum_start = (unsigned char *)tcph - nskb->head; - nskb->csum_offset = offsetof(struct tcphdr, check); - - /* ip_route_me_harder expects skb->dst to be set */ - skb_dst_set_noref(nskb, skb_dst(oldskb)); - - nskb->protocol = htons(ETH_P_IP); - if (ip_route_me_harder(nskb, RTN_UNSPEC)) - goto free_nskb; - - niph->ttl = ip4_dst_hoplimit(skb_dst(nskb)); - - /* "Never happens" */ - if (nskb->len > dst_mtu(skb_dst(nskb))) - goto free_nskb; - - nf_ct_attach(nskb, oldskb); - -#ifdef CONFIG_BRIDGE_NETFILTER - /* If we use ip_local_out for bridged traffic, the MAC source on - * the RST will be ours, instead of the destination's. This confuses - * some routers/firewalls, and they drop the packet. So we need to - * build the eth header using the original destination's MAC as the - * source, and send the RST packet directly. - */ - if (oldskb->nf_bridge) { - struct ethhdr *oeth = eth_hdr(oldskb); - nskb->dev = oldskb->nf_bridge->physindev; - niph->tot_len = htons(nskb->len); - ip_send_check(niph); - if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol), - oeth->h_source, oeth->h_dest, nskb->len) < 0) - goto free_nskb; - dev_queue_xmit(nskb); - } else -#endif - ip_local_out(nskb); - - return; - - free_nskb: - kfree_skb(nskb); -} - +void nf_send_reset(struct sk_buff *oldskb, int hook); #endif /* _IPV4_NF_REJECT_H */ diff --git a/include/net/netfilter/ipv6/nf_nat_masquerade.h b/include/net/netfilter/ipv6/nf_nat_masquerade.h new file mode 100644 index 0000000000000000000000000000000000000000..0a13396cd390fa35b9132ca8c8caf3da3767dd84 --- /dev/null +++ b/include/net/netfilter/ipv6/nf_nat_masquerade.h @@ -0,0 +1,10 @@ +#ifndef _NF_NAT_MASQUERADE_IPV6_H_ +#define _NF_NAT_MASQUERADE_IPV6_H_ + +unsigned int +nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range *range, + const struct net_device *out); +void nf_nat_masquerade_ipv6_register_notifier(void); +void nf_nat_masquerade_ipv6_unregister_notifier(void); + +#endif /* _NF_NAT_MASQUERADE_IPV6_H_ */ diff --git a/include/net/netfilter/ipv6/nf_reject.h b/include/net/netfilter/ipv6/nf_reject.h index 710d17ed70b4c0ad6f2d28db779e12d976d10387..7a10cfcd8e33fd73e47e14877f31b1d5baf65b1f 100644 --- a/include/net/netfilter/ipv6/nf_reject.h +++ b/include/net/netfilter/ipv6/nf_reject.h @@ -147,7 +147,7 @@ static void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook) nf_ct_attach(nskb, oldskb); -#ifdef CONFIG_BRIDGE_NETFILTER +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) /* If we use ip6_local_out for bridged traffic, the MAC source on * the RST will be ours, instead of the destination's. This confuses * some routers/firewalls, and they drop the packet. So we need to diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h index a71dd333ac6869fdce096dce3a26be133e4d4aae..344b1ab19220c34ec14b99b112ef3f5699a846f4 100644 --- a/include/net/netfilter/nf_nat.h +++ b/include/net/netfilter/nf_nat.h @@ -32,10 +32,8 @@ struct nf_conn_nat { struct hlist_node bysource; struct nf_conn *ct; union nf_conntrack_nat_help help; -#if defined(CONFIG_IP_NF_TARGET_MASQUERADE) || \ - defined(CONFIG_IP_NF_TARGET_MASQUERADE_MODULE) || \ - defined(CONFIG_IP6_NF_TARGET_MASQUERADE) || \ - defined(CONFIG_IP6_NF_TARGET_MASQUERADE_MODULE) +#if IS_ENABLED(CONFIG_NF_NAT_MASQUERADE_IPV4) || \ + IS_ENABLED(CONFIG_NF_NAT_MASQUERADE_IPV6) int masq_index; #endif }; @@ -68,8 +66,8 @@ static inline bool nf_nat_oif_changed(unsigned int hooknum, struct nf_conn_nat *nat, const struct net_device *out) { -#if IS_ENABLED(CONFIG_IP_NF_TARGET_MASQUERADE) || \ - IS_ENABLED(CONFIG_IP6_NF_TARGET_MASQUERADE) +#if IS_ENABLED(CONFIG_NF_NAT_MASQUERADE_IPV4) || \ + IS_ENABLED(CONFIG_NF_NAT_MASQUERADE_IPV6) return nat->masq_index && hooknum == NF_INET_POST_ROUTING && CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL && nat->masq_index != out->ifindex; diff --git a/include/net/netfilter/nf_nat_l3proto.h b/include/net/netfilter/nf_nat_l3proto.h index 5a2919b2e09af396540348bad01f12b7deab5ef9..340c013795a49c9fcd91469ccb04a2be88f332fe 100644 --- a/include/net/netfilter/nf_nat_l3proto.h +++ b/include/net/netfilter/nf_nat_l3proto.h @@ -42,8 +42,83 @@ const struct nf_nat_l3proto *__nf_nat_l3proto_find(u8 l3proto); int nf_nat_icmp_reply_translation(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int hooknum); + +unsigned int nf_nat_ipv4_in(const struct nf_hook_ops *ops, struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + unsigned int (*do_chain)(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct)); + +unsigned int nf_nat_ipv4_out(const struct nf_hook_ops *ops, struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + unsigned int (*do_chain)(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct)); + +unsigned int nf_nat_ipv4_local_fn(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + unsigned int (*do_chain)(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct)); + +unsigned int nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + unsigned int (*do_chain)(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct)); + int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int hooknum, unsigned int hdrlen); +unsigned int nf_nat_ipv6_in(const struct nf_hook_ops *ops, struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + unsigned int (*do_chain)(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct)); + +unsigned int nf_nat_ipv6_out(const struct nf_hook_ops *ops, struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + unsigned int (*do_chain)(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct)); + +unsigned int nf_nat_ipv6_local_fn(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + unsigned int (*do_chain)(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct)); + +unsigned int nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + unsigned int (*do_chain)(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct)); + #endif /* _NF_NAT_L3PROTO_H */ diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index c4d86198d3d6542088ed6db8d4daed2adf191806..3d7292392fac91ca806b12945fd9eb190c21a031 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -241,6 +241,7 @@ void nft_unregister_set(struct nft_set_ops *ops); * @dtype: data type (verdict or numeric type defined by userspace) * @size: maximum set size * @nelems: number of elements + * @policy: set parameterization (see enum nft_set_policies) * @ops: set ops * @flags: set flags * @klen: key length @@ -255,6 +256,7 @@ struct nft_set { u32 dtype; u32 size; u32 nelems; + u16 policy; /* runtime data below here */ const struct nft_set_ops *ops ____cacheline_aligned; u16 flags; diff --git a/include/net/netfilter/nft_masq.h b/include/net/netfilter/nft_masq.h new file mode 100644 index 0000000000000000000000000000000000000000..c72729f954f41e2168d5a095db5cc1ad39f38878 --- /dev/null +++ b/include/net/netfilter/nft_masq.h @@ -0,0 +1,16 @@ +#ifndef _NFT_MASQ_H_ +#define _NFT_MASQ_H_ + +struct nft_masq { + u32 flags; +}; + +extern const struct nla_policy nft_masq_policy[]; + +int nft_masq_init(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nlattr * const tb[]); + +int nft_masq_dump(struct sk_buff *skb, const struct nft_expr *expr); + +#endif /* _NFT_MASQ_H_ */ diff --git a/include/net/netfilter/nft_reject.h b/include/net/netfilter/nft_reject.h index 36b0da2d55bb7db470991e873d02b0fbf400d2ac..60fa1530006b45bc229dbf46bfe93ff690bc7b42 100644 --- a/include/net/netfilter/nft_reject.h +++ b/include/net/netfilter/nft_reject.h @@ -14,12 +14,7 @@ int nft_reject_init(const struct nft_ctx *ctx, int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr); -void nft_reject_ipv4_eval(const struct nft_expr *expr, - struct nft_data data[NFT_REG_MAX + 1], - const struct nft_pktinfo *pkt); - -void nft_reject_ipv6_eval(const struct nft_expr *expr, - struct nft_data data[NFT_REG_MAX + 1], - const struct nft_pktinfo *pkt); +int nft_reject_icmp_code(u8 code); +int nft_reject_icmpv6_code(u8 code); #endif diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index aec5e12f9f19f1a6c506e47f60cc3056d7ce2a3d..24945cefc4fde6bfaf9c4560080c91b2e3b12d0d 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -76,6 +76,7 @@ struct netns_ipv4 { int sysctl_tcp_ecn; int sysctl_ip_no_pmtu_disc; int sysctl_ip_fwd_use_pmtu; + int sysctl_ip_nonlocal_bind; int sysctl_fwmark_reflect; int sysctl_tcp_fwmark_accept; diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h index eade27adecf3678ed2d1568adf34badd38ed88f9..69ae41f2098c159548a9e6ee6b432eabfa369785 100644 --- a/include/net/netns/ipv6.h +++ b/include/net/netns/ipv6.h @@ -76,7 +76,7 @@ struct netns_ipv6 { #endif #endif atomic_t dev_addr_genid; - atomic_t rt_genid; + atomic_t fib6_sernum; }; #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h index 3492434baf88e9dffb3da763de75db36480d9eb9..9da798256f0e4dff710273ce96fa108d862a9457 100644 --- a/include/net/netns/xfrm.h +++ b/include/net/netns/xfrm.h @@ -13,6 +13,19 @@ struct ctl_table_header; struct xfrm_policy_hash { struct hlist_head *table; unsigned int hmask; + u8 dbits4; + u8 sbits4; + u8 dbits6; + u8 sbits6; +}; + +struct xfrm_policy_hthresh { + struct work_struct work; + seqlock_t lock; + u8 lbits4; + u8 rbits4; + u8 lbits6; + u8 rbits6; }; struct netns_xfrm { @@ -41,6 +54,7 @@ struct netns_xfrm { struct xfrm_policy_hash policy_bydst[XFRM_POLICY_MAX * 2]; unsigned int policy_count[XFRM_POLICY_MAX * 2]; struct work_struct policy_hash_work; + struct xfrm_policy_hthresh policy_hthresh; struct sock *nlsk; diff --git a/include/net/nfc/nci.h b/include/net/nfc/nci.h index fbfa4e471abb7f62cf31508da0f5ac3e2686fec9..9eca9ae2280c57245a4d40b3a17164a98859ccf0 100644 --- a/include/net/nfc/nci.h +++ b/include/net/nfc/nci.h @@ -2,6 +2,7 @@ * The NFC Controller Interface is the communication protocol between an * NFC Controller (NFCC) and a Device Host (DH). * + * Copyright (C) 2014 Marvell International Ltd. * Copyright (C) 2011 Texas Instruments, Inc. * * Written by Ilan Elias @@ -65,19 +66,18 @@ #define NCI_NFC_F_PASSIVE_POLL_MODE 0x02 #define NCI_NFC_A_ACTIVE_POLL_MODE 0x03 #define NCI_NFC_F_ACTIVE_POLL_MODE 0x05 -#define NCI_NFC_15693_PASSIVE_POLL_MODE 0x06 +#define NCI_NFC_V_PASSIVE_POLL_MODE 0x06 #define NCI_NFC_A_PASSIVE_LISTEN_MODE 0x80 #define NCI_NFC_B_PASSIVE_LISTEN_MODE 0x81 #define NCI_NFC_F_PASSIVE_LISTEN_MODE 0x82 #define NCI_NFC_A_ACTIVE_LISTEN_MODE 0x83 #define NCI_NFC_F_ACTIVE_LISTEN_MODE 0x85 -#define NCI_NFC_15693_PASSIVE_LISTEN_MODE 0x86 /* NCI RF Technologies */ #define NCI_NFC_RF_TECHNOLOGY_A 0x00 #define NCI_NFC_RF_TECHNOLOGY_B 0x01 #define NCI_NFC_RF_TECHNOLOGY_F 0x02 -#define NCI_NFC_RF_TECHNOLOGY_15693 0x03 +#define NCI_NFC_RF_TECHNOLOGY_V 0x03 /* NCI Bit Rates */ #define NCI_NFC_BIT_RATE_106 0x00 @@ -87,6 +87,7 @@ #define NCI_NFC_BIT_RATE_1695 0x04 #define NCI_NFC_BIT_RATE_3390 0x05 #define NCI_NFC_BIT_RATE_6780 0x06 +#define NCI_NFC_BIT_RATE_26 0x20 /* NCI RF Protocols */ #define NCI_RF_PROTOCOL_UNKNOWN 0x00 @@ -95,6 +96,7 @@ #define NCI_RF_PROTOCOL_T3T 0x03 #define NCI_RF_PROTOCOL_ISO_DEP 0x04 #define NCI_RF_PROTOCOL_NFC_DEP 0x05 +#define NCI_RF_PROTOCOL_T5T 0x06 /* NCI RF Interfaces */ #define NCI_RF_INTERFACE_NFCEE_DIRECT 0x00 @@ -328,6 +330,12 @@ struct rf_tech_specific_params_nfcf_poll { __u8 sensf_res[18]; /* 16 or 18 Bytes */ } __packed; +struct rf_tech_specific_params_nfcv_poll { + __u8 res_flags; + __u8 dsfid; + __u8 uid[8]; /* 8 Bytes */ +} __packed; + struct nci_rf_discover_ntf { __u8 rf_discovery_id; __u8 rf_protocol; @@ -338,6 +346,7 @@ struct nci_rf_discover_ntf { struct rf_tech_specific_params_nfca_poll nfca_poll; struct rf_tech_specific_params_nfcb_poll nfcb_poll; struct rf_tech_specific_params_nfcf_poll nfcf_poll; + struct rf_tech_specific_params_nfcv_poll nfcv_poll; } rf_tech_specific_params; __u8 ntf_type; @@ -372,6 +381,7 @@ struct nci_rf_intf_activated_ntf { struct rf_tech_specific_params_nfca_poll nfca_poll; struct rf_tech_specific_params_nfcb_poll nfcb_poll; struct rf_tech_specific_params_nfcf_poll nfcf_poll; + struct rf_tech_specific_params_nfcv_poll nfcv_poll; } rf_tech_specific_params; __u8 data_exch_rf_tech_and_mode; diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h index 1f9a0f5272fe308735ba3ab2d0c892a2884165c7..75d10e625c4969b0ddc806e5588815347ad9e73d 100644 --- a/include/net/nfc/nci_core.h +++ b/include/net/nfc/nci_core.h @@ -64,10 +64,11 @@ enum nci_state { struct nci_dev; struct nci_ops { - int (*open)(struct nci_dev *ndev); - int (*close)(struct nci_dev *ndev); - int (*send)(struct nci_dev *ndev, struct sk_buff *skb); - int (*setup)(struct nci_dev *ndev); + int (*open)(struct nci_dev *ndev); + int (*close)(struct nci_dev *ndev); + int (*send)(struct nci_dev *ndev, struct sk_buff *skb); + int (*setup)(struct nci_dev *ndev); + __u32 (*get_rfprotocol)(struct nci_dev *ndev, __u8 rf_protocol); }; #define NCI_MAX_SUPPORTED_RF_INTERFACES 4 diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 6da46dcf1049789f492cefd9472d0df84d4db91d..bc49967e1a68b933f0a7cd5a66a9f9395f76be00 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -20,11 +20,7 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops); static inline unsigned long __cls_set_class(unsigned long *clp, unsigned long cl) { - unsigned long old_cl; - - old_cl = *clp; - *clp = cl; - return old_cl; + return xchg(clp, cl); } static inline unsigned long @@ -137,7 +133,7 @@ tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts, int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr); -void tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts); +void tcf_exts_destroy(struct tcf_exts *exts); void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst, struct tcf_exts *src); int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts); @@ -170,6 +166,7 @@ struct tcf_ematch { unsigned int datalen; u16 matchid; u16 flags; + struct net *net; }; static inline int tcf_em_is_container(struct tcf_ematch *em) @@ -233,12 +230,11 @@ struct tcf_ematch_tree { struct tcf_ematch_ops { int kind; int datalen; - int (*change)(struct tcf_proto *, void *, + int (*change)(struct net *net, void *, int, struct tcf_ematch *); int (*match)(struct sk_buff *, struct tcf_ematch *, struct tcf_pkt_info *); - void (*destroy)(struct tcf_proto *, - struct tcf_ematch *); + void (*destroy)(struct tcf_ematch *); int (*dump)(struct sk_buff *, struct tcf_ematch *); struct module *owner; struct list_head link; @@ -248,7 +244,7 @@ int tcf_em_register(struct tcf_ematch_ops *); void tcf_em_unregister(struct tcf_ematch_ops *); int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *, struct tcf_ematch_tree *); -void tcf_em_tree_destroy(struct tcf_proto *, struct tcf_ematch_tree *); +void tcf_em_tree_destroy(struct tcf_ematch_tree *); int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int); int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *, struct tcf_pkt_info *); @@ -305,7 +301,7 @@ struct tcf_ematch_tree { }; #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0) -#define tcf_em_tree_destroy(tp, t) do { (void)(t); } while(0) +#define tcf_em_tree_destroy(t) do { (void)(t); } while(0) #define tcf_em_tree_dump(skb, t, tlv) (0) #define tcf_em_tree_change(tp, dst, src) do { } while(0) #define tcf_em_tree_match(skb, t, info) ((void)(info), 1) diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index ec030cd7661693d3812035d350e242f9b271edc4..27a33833ff4a942a2dde57140a6d67d520ffd7b1 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -50,7 +50,7 @@ typedef long psched_tdiff_t; static inline psched_time_t psched_get_time(void) { - return PSCHED_NS2TICKS(ktime_to_ns(ktime_get())); + return PSCHED_NS2TICKS(ktime_get_ns()); } static inline psched_tdiff_t @@ -65,12 +65,12 @@ struct qdisc_watchdog { }; void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc); -void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires); +void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires, bool throttle); static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires) { - qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires)); + qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires), true); } void qdisc_watchdog_cancel(struct qdisc_watchdog *wd); @@ -99,7 +99,7 @@ void qdisc_put_stab(struct qdisc_size_table *tab); void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc); int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, struct net_device *dev, struct netdev_queue *txq, - spinlock_t *root_lock); + spinlock_t *root_lock, bool validate); void __qdisc_run(struct Qdisc *q); diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 620e086c0cbeca36ab268abdaad9839d45f8a58d..d17ed6fb2f7043d84011fbf471b06e10bdf06db4 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -6,6 +6,8 @@ #include #include #include +#include +#include #include #include @@ -58,6 +60,7 @@ struct Qdisc { * multiqueue device. */ #define TCQ_F_WARN_NONWC (1 << 16) +#define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */ u32 limit; const struct Qdisc_ops *ops; struct qdisc_size_table __rcu *stab; @@ -83,9 +86,15 @@ struct Qdisc { */ unsigned long state; struct sk_buff_head q; - struct gnet_stats_basic_packed bstats; + union { + struct gnet_stats_basic_packed bstats; + struct gnet_stats_basic_cpu __percpu *cpu_bstats; + } __packed; unsigned int __state; - struct gnet_stats_queue qstats; + union { + struct gnet_stats_queue qstats; + struct gnet_stats_queue __percpu *cpu_qstats; + } __packed; struct rcu_head rcu_head; int padded; atomic_t refcnt; @@ -111,6 +120,21 @@ static inline void qdisc_run_end(struct Qdisc *qdisc) qdisc->__state &= ~__QDISC___STATE_RUNNING; } +static inline bool qdisc_may_bulk(const struct Qdisc *qdisc) +{ + return qdisc->flags & TCQ_F_ONETXQUEUE; +} + +static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq) +{ +#ifdef CONFIG_BQL + /* Non-BQL migrated drivers will return 0, too. */ + return dql_avail(&txq->dql); +#else + return 0; +#endif +} + static inline bool qdisc_is_throttled(const struct Qdisc *qdisc) { return test_bit(__QDISC_STATE_THROTTLED, &qdisc->state) ? true : false; @@ -143,7 +167,7 @@ struct Qdisc_class_ops { void (*walk)(struct Qdisc *, struct qdisc_walker * arg); /* Filter manipulation */ - struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long); + struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long); unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, u32 classid); void (*unbind_tcf)(struct Qdisc *, unsigned long); @@ -212,8 +236,8 @@ struct tcf_proto_ops { struct tcf_proto { /* Fast access part */ - struct tcf_proto *next; - void *root; + struct tcf_proto __rcu *next; + void __rcu *root; int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); @@ -225,6 +249,7 @@ struct tcf_proto { struct Qdisc *q; void *data; const struct tcf_proto_ops *ops; + struct rcu_head rcu; }; struct qdisc_skb_cb { @@ -260,7 +285,9 @@ static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc) static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc) { - return qdisc->dev_queue->qdisc; + struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc); + + return q; } static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc) @@ -377,7 +404,7 @@ struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, void __qdisc_calculate_pkt_len(struct sk_buff *skb, const struct qdisc_size_table *stab); void tcf_destroy(struct tcf_proto *tp); -void tcf_destroy_chain(struct tcf_proto **fl); +void tcf_destroy_chain(struct tcf_proto __rcu **fl); /* Reset all TX qdiscs greater then index of a device. */ static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) @@ -385,7 +412,7 @@ static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) struct Qdisc *qdisc; for (; i < dev->num_tx_queues; i++) { - qdisc = netdev_get_tx_queue(dev, i)->qdisc; + qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc); if (qdisc) { spin_lock_bh(qdisc_lock(qdisc)); qdisc_reset(qdisc); @@ -403,13 +430,18 @@ static inline void qdisc_reset_all_tx(struct net_device *dev) static inline bool qdisc_all_tx_empty(const struct net_device *dev) { unsigned int i; + + rcu_read_lock(); for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); - const struct Qdisc *q = txq->qdisc; + const struct Qdisc *q = rcu_dereference(txq->qdisc); - if (q->q.qlen) + if (q->q.qlen) { + rcu_read_unlock(); return false; + } } + rcu_read_unlock(); return true; } @@ -417,9 +449,10 @@ static inline bool qdisc_all_tx_empty(const struct net_device *dev) static inline bool qdisc_tx_changing(const struct net_device *dev) { unsigned int i; + for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); - if (txq->qdisc != txq->qdisc_sleeping) + if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping) return true; } return false; @@ -429,9 +462,10 @@ static inline bool qdisc_tx_changing(const struct net_device *dev) static inline bool qdisc_tx_is_noop(const struct net_device *dev) { unsigned int i; + for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); - if (txq->qdisc != &noop_qdisc) + if (rcu_access_pointer(txq->qdisc) != &noop_qdisc) return false; } return true; @@ -477,6 +511,10 @@ static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch) return qdisc_enqueue(skb, sch) & NET_XMIT_MASK; } +static inline bool qdisc_is_percpu_stats(const struct Qdisc *q) +{ + return q->flags & TCQ_F_CPUSTATS; +} static inline void bstats_update(struct gnet_stats_basic_packed *bstats, const struct sk_buff *skb) @@ -485,17 +523,62 @@ static inline void bstats_update(struct gnet_stats_basic_packed *bstats, bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; } +static inline void qdisc_bstats_update_cpu(struct Qdisc *sch, + const struct sk_buff *skb) +{ + struct gnet_stats_basic_cpu *bstats = + this_cpu_ptr(sch->cpu_bstats); + + u64_stats_update_begin(&bstats->syncp); + bstats_update(&bstats->bstats, skb); + u64_stats_update_end(&bstats->syncp); +} + static inline void qdisc_bstats_update(struct Qdisc *sch, const struct sk_buff *skb) { bstats_update(&sch->bstats, skb); } +static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch, + const struct sk_buff *skb) +{ + sch->qstats.backlog -= qdisc_pkt_len(skb); +} + +static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch, + const struct sk_buff *skb) +{ + sch->qstats.backlog += qdisc_pkt_len(skb); +} + +static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count) +{ + sch->qstats.drops += count; +} + +static inline void qdisc_qstats_drop(struct Qdisc *sch) +{ + sch->qstats.drops++; +} + +static inline void qdisc_qstats_drop_cpu(struct Qdisc *sch) +{ + struct gnet_stats_queue *qstats = this_cpu_ptr(sch->cpu_qstats); + + qstats->drops++; +} + +static inline void qdisc_qstats_overlimit(struct Qdisc *sch) +{ + sch->qstats.overlimits++; +} + static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff_head *list) { __skb_queue_tail(list, skb); - sch->qstats.backlog += qdisc_pkt_len(skb); + qdisc_qstats_backlog_inc(sch, skb); return NET_XMIT_SUCCESS; } @@ -511,7 +594,7 @@ static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch, struct sk_buff *skb = __skb_dequeue(list); if (likely(skb != NULL)) { - sch->qstats.backlog -= qdisc_pkt_len(skb); + qdisc_qstats_backlog_dec(sch, skb); qdisc_bstats_update(sch, skb); } @@ -530,7 +613,7 @@ static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, if (likely(skb != NULL)) { unsigned int len = qdisc_pkt_len(skb); - sch->qstats.backlog -= len; + qdisc_qstats_backlog_dec(sch, skb); kfree_skb(skb); return len; } @@ -549,7 +632,7 @@ static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch, struct sk_buff *skb = __skb_dequeue_tail(list); if (likely(skb != NULL)) - sch->qstats.backlog -= qdisc_pkt_len(skb); + qdisc_qstats_backlog_dec(sch, skb); return skb; } @@ -631,14 +714,14 @@ static inline unsigned int qdisc_queue_drop(struct Qdisc *sch) static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) { kfree_skb(skb); - sch->qstats.drops++; + qdisc_qstats_drop(sch); return NET_XMIT_DROP; } static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch) { - sch->qstats.drops++; + qdisc_qstats_drop(sch); #ifdef CONFIG_NET_CLS_ACT if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch)) diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h index f22538e68245082a8a8663260007bcae58a50be2..d4a20d00461cff61767c7e4d0aaae1b2c05bfa4e 100644 --- a/include/net/sctp/command.h +++ b/include/net/sctp/command.h @@ -115,7 +115,7 @@ typedef enum { * analysis of the state functions, but in reality just taken from * thin air in the hopes othat we don't trigger a kernel panic. */ -#define SCTP_MAX_NUM_COMMANDS 14 +#define SCTP_MAX_NUM_COMMANDS 20 typedef union { void *zero_all; /* Set to NULL to clear the entire union */ diff --git a/include/net/snmp.h b/include/net/snmp.h index f1f27fdbb0d5738d6f3f3fbb93a79d240a1129b5..8fd2f498782eb78e0dd214ba05390c14e0260a5a 100644 --- a/include/net/snmp.h +++ b/include/net/snmp.h @@ -146,19 +146,15 @@ struct linux_xfrm_mib { #define SNMP_ADD_STATS(mib, field, addend) \ this_cpu_add(mib->mibs[field], addend) -/* - * Use "__typeof__(*mib) *ptr" instead of "__typeof__(mib) ptr" - * to make @ptr a non-percpu pointer. - */ #define SNMP_UPD_PO_STATS(mib, basefield, addend) \ do { \ - __typeof__(*mib->mibs) *ptr = mib->mibs; \ + __typeof__((mib->mibs) + 0) ptr = mib->mibs; \ this_cpu_inc(ptr[basefield##PKTS]); \ this_cpu_add(ptr[basefield##OCTETS], addend); \ } while (0) #define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \ do { \ - __typeof__(*mib->mibs) *ptr = mib->mibs; \ + __typeof__((mib->mibs) + 0) ptr = mib->mibs; \ __this_cpu_inc(ptr[basefield##PKTS]); \ __this_cpu_add(ptr[basefield##OCTETS], addend); \ } while (0) diff --git a/include/net/sock.h b/include/net/sock.h index 591e607cca358ead95cca0c724bb65bc2c1cf0d8..7db3db112baa5eaa9ce1958c7837f89dbafde6a8 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1569,7 +1569,12 @@ struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, void sock_wfree(struct sk_buff *skb); void skb_orphan_partial(struct sk_buff *skb); void sock_rfree(struct sk_buff *skb); +void sock_efree(struct sk_buff *skb); +#ifdef CONFIG_INET void sock_edemux(struct sk_buff *skb); +#else +#define sock_edemux(skb) sock_efree(skb) +#endif int sock_setsockopt(struct socket *sock, int level, int op, char __user *optval, unsigned int optlen); @@ -2036,6 +2041,7 @@ void sk_stop_timer(struct sock *sk, struct timer_list *timer); int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb); +struct sk_buff *sock_dequeue_err_skb(struct sock *sk); /* * Recover an error report and clear atomically @@ -2188,6 +2194,8 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, sk->sk_stamp = skb->tstamp; } +void __sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags); + /** * sock_tx_timestamp - checks whether the outgoing packet is to be time stamped * @sk: socket sending this packet @@ -2195,7 +2203,13 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, * * Note : callers should take care of initial *tx_flags value (usually 0) */ -void sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags); +static inline void sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags) +{ + if (unlikely(sk->sk_tsflags)) + __sock_tx_timestamp(sk, tx_flags); + if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS))) + *tx_flags |= SKBTX_WIFI_STATUS; +} /** * sk_eat_skb - Release a skb if it is no longer needed diff --git a/include/net/tcp.h b/include/net/tcp.h index 7523c325673e980db6494d6ba83603d97f7d1ecf..74efeda994b3938fe1633082e0f23ccc667753c6 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -669,6 +669,12 @@ void tcp_send_window_probe(struct sock *sk); */ #define tcp_time_stamp ((__u32)(jiffies)) +static inline u32 tcp_skb_timestamp(const struct sk_buff *skb) +{ + return skb->skb_mstamp.stamp_jiffies; +} + + #define tcp_flag_byte(th) (((u_int8_t *)th)[13]) #define TCPHDR_FIN 0x01 @@ -687,15 +693,18 @@ void tcp_send_window_probe(struct sock *sk); * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately. */ struct tcp_skb_cb { - union { - struct inet_skb_parm h4; -#if IS_ENABLED(CONFIG_IPV6) - struct inet6_skb_parm h6; -#endif - } header; /* For incoming frames */ __u32 seq; /* Starting sequence number */ __u32 end_seq; /* SEQ + FIN + SYN + datalen */ - __u32 when; /* used to compute rtt's */ + union { + /* Note : tcp_tw_isn is used in input path only + * (isn chosen by tcp_timewait_state_process()) + * + * tcp_gso_segs is used in write queue only, + * cf tcp_skb_pcount() + */ + __u32 tcp_tw_isn; + __u32 tcp_gso_segs; + }; __u8 tcp_flags; /* TCP header flags. (tcp[13]) */ __u8 sacked; /* State flags for SACK/FACK. */ @@ -711,33 +720,32 @@ struct tcp_skb_cb { __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */ /* 1 byte hole */ __u32 ack_seq; /* Sequence number ACK'd */ + union { + struct inet_skb_parm h4; +#if IS_ENABLED(CONFIG_IPV6) + struct inet6_skb_parm h6; +#endif + } header; /* For incoming frames */ }; #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0])) -/* RFC3168 : 6.1.1 SYN packets must not have ECT/ECN bits set - * - * If we receive a SYN packet with these bits set, it means a network is - * playing bad games with TOS bits. In order to avoid possible false congestion - * notifications, we disable TCP ECN negociation. +/* Due to TSO, an SKB can be composed of multiple actual + * packets. To keep these tracked properly, we use this. */ -static inline void -TCP_ECN_create_request(struct request_sock *req, const struct sk_buff *skb, - struct net *net) +static inline int tcp_skb_pcount(const struct sk_buff *skb) { - const struct tcphdr *th = tcp_hdr(skb); + return TCP_SKB_CB(skb)->tcp_gso_segs; +} - if (net->ipv4.sysctl_tcp_ecn && th->ece && th->cwr && - INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield)) - inet_rsk(req)->ecn_ok = 1; +static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs) +{ + TCP_SKB_CB(skb)->tcp_gso_segs = segs; } -/* Due to TSO, an SKB can be composed of multiple actual - * packets. To keep these tracked properly, we use this. - */ -static inline int tcp_skb_pcount(const struct sk_buff *skb) +static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs) { - return skb_shinfo(skb)->gso_segs; + TCP_SKB_CB(skb)->tcp_gso_segs += segs; } /* This is valid iff tcp_skb_pcount() > 1. */ @@ -752,8 +760,17 @@ enum tcp_ca_event { CA_EVENT_CWND_RESTART, /* congestion window restart */ CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */ CA_EVENT_LOSS, /* loss timeout */ - CA_EVENT_FAST_ACK, /* in sequence ack */ - CA_EVENT_SLOW_ACK, /* other ack */ + CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */ + CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */ + CA_EVENT_DELAYED_ACK, /* Delayed ack is sent */ + CA_EVENT_NON_DELAYED_ACK, +}; + +/* Information about inbound ACK, passed to cong_ops->in_ack_event() */ +enum tcp_ca_ack_event_flags { + CA_ACK_SLOWPATH = (1 << 0), /* In slow path processing */ + CA_ACK_WIN_UPDATE = (1 << 1), /* ACK updated window */ + CA_ACK_ECE = (1 << 2), /* ECE bit is set on ack */ }; /* @@ -763,7 +780,10 @@ enum tcp_ca_event { #define TCP_CA_MAX 128 #define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX) +/* Algorithm can be set on socket without CAP_NET_ADMIN privileges */ #define TCP_CONG_NON_RESTRICTED 0x1 +/* Requires ECN/ECT set on all packets */ +#define TCP_CONG_NEEDS_ECN 0x2 struct tcp_congestion_ops { struct list_head list; @@ -782,6 +802,8 @@ struct tcp_congestion_ops { void (*set_state)(struct sock *sk, u8 new_state); /* call when cwnd event occurs (optional) */ void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev); + /* call when ack arrives (optional) */ + void (*in_ack_event)(struct sock *sk, u32 flags); /* new value of cwnd after loss (optional) */ u32 (*undo_cwnd)(struct sock *sk); /* hook for packet ack accounting (optional) */ @@ -796,6 +818,7 @@ struct tcp_congestion_ops { int tcp_register_congestion_control(struct tcp_congestion_ops *type); void tcp_unregister_congestion_control(struct tcp_congestion_ops *type); +void tcp_assign_congestion_control(struct sock *sk); void tcp_init_congestion_control(struct sock *sk); void tcp_cleanup_congestion_control(struct sock *sk); int tcp_set_default_congestion_control(const char *name); @@ -804,14 +827,20 @@ void tcp_get_available_congestion_control(char *buf, size_t len); void tcp_get_allowed_congestion_control(char *buf, size_t len); int tcp_set_allowed_congestion_control(char *allowed); int tcp_set_congestion_control(struct sock *sk, const char *name); -int tcp_slow_start(struct tcp_sock *tp, u32 acked); +void tcp_slow_start(struct tcp_sock *tp, u32 acked); void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w); -extern struct tcp_congestion_ops tcp_init_congestion_ops; u32 tcp_reno_ssthresh(struct sock *sk); void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked); extern struct tcp_congestion_ops tcp_reno; +static inline bool tcp_ca_needs_ecn(const struct sock *sk) +{ + const struct inet_connection_sock *icsk = inet_csk(sk); + + return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN; +} + static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state) { struct inet_connection_sock *icsk = inet_csk(sk); diff --git a/include/net/udp.h b/include/net/udp.h index 70f941368ace488dc93f4e78f81b1dca935cd87f..07f9b70962f64f0a7e80fe1ebbc81996be2d4567 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -158,6 +158,24 @@ static inline __sum16 udp_v4_check(int len, __be32 saddr, void udp_set_csum(bool nocheck, struct sk_buff *skb, __be32 saddr, __be32 daddr, int len); +struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, + struct udphdr *uh); +int udp_gro_complete(struct sk_buff *skb, int nhoff); + +static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb) +{ + struct udphdr *uh; + unsigned int hlen, off; + + off = skb_gro_offset(skb); + hlen = off + sizeof(*uh); + uh = skb_gro_header_fast(skb, off); + if (skb_gro_header_hard(skb, hlen)) + uh = skb_gro_header_slow(skb, hlen, off); + + return uh; +} + /* hash routines shared between UDPv4/6 and UDP-Litev4/6 */ static inline void udp_lib_hash(struct sock *sk) { @@ -221,7 +239,8 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg); int udp_disconnect(struct sock *sk, int flags); unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait); struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, - netdev_features_t features); + netdev_features_t features, + bool is_ipv6); int udp_lib_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); int udp_lib_setsockopt(struct sock *sk, int level, int optname, diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h index ffd69cbded3529717d4d5567a804c5dd952502de..a47790bcaa3831b1c2692b3cf27c4306ff39e631 100644 --- a/include/net/udp_tunnel.h +++ b/include/net/udp_tunnel.h @@ -1,6 +1,14 @@ #ifndef __NET_UDP_TUNNEL_H #define __NET_UDP_TUNNEL_H +#include +#include + +#if IS_ENABLED(CONFIG_IPV6) +#include +#include +#endif + struct udp_port_cfg { u8 family; @@ -26,7 +34,80 @@ struct udp_port_cfg { use_udp6_rx_checksums:1; }; -int udp_sock_create(struct net *net, struct udp_port_cfg *cfg, - struct socket **sockp); +int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg, + struct socket **sockp); + +#if IS_ENABLED(CONFIG_IPV6) +int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg, + struct socket **sockp); +#else +static inline int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg, + struct socket **sockp) +{ + return 0; +} +#endif + +static inline int udp_sock_create(struct net *net, + struct udp_port_cfg *cfg, + struct socket **sockp) +{ + if (cfg->family == AF_INET) + return udp_sock_create4(net, cfg, sockp); + + if (cfg->family == AF_INET6) + return udp_sock_create6(net, cfg, sockp); + + return -EPFNOSUPPORT; +} + +typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb); +typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk); + +struct udp_tunnel_sock_cfg { + void *sk_user_data; /* user data used by encap_rcv call back */ + /* Used for setting up udp_sock fields, see udp.h for details */ + __u8 encap_type; + udp_tunnel_encap_rcv_t encap_rcv; + udp_tunnel_encap_destroy_t encap_destroy; +}; + +/* Setup the given (UDP) sock to receive UDP encapsulated packets */ +void setup_udp_tunnel_sock(struct net *net, struct socket *sock, + struct udp_tunnel_sock_cfg *sock_cfg); + +/* Transmit the skb using UDP encapsulation. */ +int udp_tunnel_xmit_skb(struct socket *sock, struct rtable *rt, + struct sk_buff *skb, __be32 src, __be32 dst, + __u8 tos, __u8 ttl, __be16 df, __be16 src_port, + __be16 dst_port, bool xnet); + +#if IS_ENABLED(CONFIG_IPV6) +int udp_tunnel6_xmit_skb(struct socket *sock, struct dst_entry *dst, + struct sk_buff *skb, struct net_device *dev, + struct in6_addr *saddr, struct in6_addr *daddr, + __u8 prio, __u8 ttl, __be16 src_port, + __be16 dst_port); +#endif + +void udp_tunnel_sock_release(struct socket *sock); + +static inline struct sk_buff *udp_tunnel_handle_offloads(struct sk_buff *skb, + bool udp_csum) +{ + int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; + + return iptunnel_handle_offloads(skb, udp_csum, type); +} + +static inline void udp_tunnel_encap_enable(struct socket *sock) +{ +#if IS_ENABLED(CONFIG_IPV6) + if (sock->sk->sk_family == PF_INET6) + ipv6_stub->udpv6_encap_enable(); + else +#endif + udp_encap_enable(); +} #endif diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 721e9c3b11bddb208927852d223a36f548a3cade..dc4865e90fe489c4859659b3a937e7c1f8a7a4bb 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -1591,6 +1591,7 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8, int dir, u32 id, int delete, int *err); int xfrm_policy_flush(struct net *net, u8 type, bool task_valid); +void xfrm_policy_hash_rebuild(struct net *net); u32 xfrm_get_acqseq(void); int verify_spi_info(u8 proto, u32 min, u32 max); int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi); diff --git a/include/rxrpc/types.h b/include/rxrpc/types.h deleted file mode 100644 index 30d48f6da2281f0da209ec66f69e062da5f55aa4..0000000000000000000000000000000000000000 --- a/include/rxrpc/types.h +++ /dev/null @@ -1,41 +0,0 @@ -/* types.h: Rx types - * - * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#ifndef _LINUX_RXRPC_TYPES_H -#define _LINUX_RXRPC_TYPES_H - -#include -#include -#include -#include -#include -#include - -typedef uint32_t rxrpc_seq_t; /* Rx message sequence number */ -typedef uint32_t rxrpc_serial_t; /* Rx message serial number */ -typedef __be32 rxrpc_seq_net_t; /* on-the-wire Rx message sequence number */ -typedef __be32 rxrpc_serial_net_t; /* on-the-wire Rx message serial number */ - -struct rxrpc_call; -struct rxrpc_connection; -struct rxrpc_header; -struct rxrpc_message; -struct rxrpc_operation; -struct rxrpc_peer; -struct rxrpc_service; -typedef struct rxrpc_timer rxrpc_timer_t; -struct rxrpc_transport; - -typedef void (*rxrpc_call_attn_func_t)(struct rxrpc_call *call); -typedef void (*rxrpc_call_error_func_t)(struct rxrpc_call *call); -typedef void (*rxrpc_call_aemap_func_t)(struct rxrpc_call *call); - -#endif /* _LINUX_RXRPC_TYPES_H */ diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index 11d11bc5c78f2d480ecac331cbf5d73f58001bb7..22749c13411775a77cabae4617f228228aadd47f 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -705,9 +705,11 @@ __SYSCALL(__NR_seccomp, sys_seccomp) __SYSCALL(__NR_getrandom, sys_getrandom) #define __NR_memfd_create 279 __SYSCALL(__NR_memfd_create, sys_memfd_create) +#define __NR_bpf 280 +__SYSCALL(__NR_bpf, sys_bpf) #undef __NR_syscalls -#define __NR_syscalls 280 +#define __NR_syscalls 281 /* * All syscalls below here should go away really, diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index be88166349a128ca80e521ee2cef80ca8617a702..70e150ebc6c9392f4d49aa3744925a82af2561c5 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild @@ -67,6 +67,7 @@ header-y += bfs_fs.h header-y += binfmts.h header-y += blkpg.h header-y += blktrace_api.h +header-y += bpf.h header-y += bpqether.h header-y += bsg.h header-y += btrfs.h diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h new file mode 100644 index 0000000000000000000000000000000000000000..31b0ac208a52e8772467c46e6db45f9bb7efc36c --- /dev/null +++ b/include/uapi/linux/bpf.h @@ -0,0 +1,155 @@ +/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#ifndef _UAPI__LINUX_BPF_H__ +#define _UAPI__LINUX_BPF_H__ + +#include + +/* Extended instruction set based on top of classic BPF */ + +/* instruction classes */ +#define BPF_ALU64 0x07 /* alu mode in double word width */ + +/* ld/ldx fields */ +#define BPF_DW 0x18 /* double word */ +#define BPF_XADD 0xc0 /* exclusive add */ + +/* alu/jmp fields */ +#define BPF_MOV 0xb0 /* mov reg to reg */ +#define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */ + +/* change endianness of a register */ +#define BPF_END 0xd0 /* flags for endianness conversion: */ +#define BPF_TO_LE 0x00 /* convert to little-endian */ +#define BPF_TO_BE 0x08 /* convert to big-endian */ +#define BPF_FROM_LE BPF_TO_LE +#define BPF_FROM_BE BPF_TO_BE + +#define BPF_JNE 0x50 /* jump != */ +#define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */ +#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */ +#define BPF_CALL 0x80 /* function call */ +#define BPF_EXIT 0x90 /* function return */ + +/* Register numbers */ +enum { + BPF_REG_0 = 0, + BPF_REG_1, + BPF_REG_2, + BPF_REG_3, + BPF_REG_4, + BPF_REG_5, + BPF_REG_6, + BPF_REG_7, + BPF_REG_8, + BPF_REG_9, + BPF_REG_10, + __MAX_BPF_REG, +}; + +/* BPF has 10 general purpose 64-bit registers and stack frame. */ +#define MAX_BPF_REG __MAX_BPF_REG + +struct bpf_insn { + __u8 code; /* opcode */ + __u8 dst_reg:4; /* dest register */ + __u8 src_reg:4; /* source register */ + __s16 off; /* signed offset */ + __s32 imm; /* signed immediate constant */ +}; + +/* BPF syscall commands */ +enum bpf_cmd { + /* create a map with given type and attributes + * fd = bpf(BPF_MAP_CREATE, union bpf_attr *, u32 size) + * returns fd or negative error + * map is deleted when fd is closed + */ + BPF_MAP_CREATE, + + /* lookup key in a given map + * err = bpf(BPF_MAP_LOOKUP_ELEM, union bpf_attr *attr, u32 size) + * Using attr->map_fd, attr->key, attr->value + * returns zero and stores found elem into value + * or negative error + */ + BPF_MAP_LOOKUP_ELEM, + + /* create or update key/value pair in a given map + * err = bpf(BPF_MAP_UPDATE_ELEM, union bpf_attr *attr, u32 size) + * Using attr->map_fd, attr->key, attr->value + * returns zero or negative error + */ + BPF_MAP_UPDATE_ELEM, + + /* find and delete elem by key in a given map + * err = bpf(BPF_MAP_DELETE_ELEM, union bpf_attr *attr, u32 size) + * Using attr->map_fd, attr->key + * returns zero or negative error + */ + BPF_MAP_DELETE_ELEM, + + /* lookup key in a given map and return next key + * err = bpf(BPF_MAP_GET_NEXT_KEY, union bpf_attr *attr, u32 size) + * Using attr->map_fd, attr->key, attr->next_key + * returns zero and stores next key or negative error + */ + BPF_MAP_GET_NEXT_KEY, + + /* verify and load eBPF program + * prog_fd = bpf(BPF_PROG_LOAD, union bpf_attr *attr, u32 size) + * Using attr->prog_type, attr->insns, attr->license + * returns fd or negative error + */ + BPF_PROG_LOAD, +}; + +enum bpf_map_type { + BPF_MAP_TYPE_UNSPEC, +}; + +enum bpf_prog_type { + BPF_PROG_TYPE_UNSPEC, +}; + +union bpf_attr { + struct { /* anonymous struct used by BPF_MAP_CREATE command */ + __u32 map_type; /* one of enum bpf_map_type */ + __u32 key_size; /* size of key in bytes */ + __u32 value_size; /* size of value in bytes */ + __u32 max_entries; /* max number of entries in a map */ + }; + + struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ + __u32 map_fd; + __aligned_u64 key; + union { + __aligned_u64 value; + __aligned_u64 next_key; + }; + }; + + struct { /* anonymous struct used by BPF_PROG_LOAD command */ + __u32 prog_type; /* one of enum bpf_prog_type */ + __u32 insn_cnt; + __aligned_u64 insns; + __aligned_u64 license; + __u32 log_level; /* verbosity level of verifier */ + __u32 log_size; /* size of user buffer */ + __aligned_u64 log_buf; /* user supplied buffer */ + }; +} __attribute__((aligned(8))); + +/* integer value in 'imm' field of BPF_CALL instruction selects which helper + * function eBPF program intends to call + */ +enum bpf_func_id { + BPF_FUNC_unspec, + __BPF_FUNC_MAX_ID, +}; + +#endif /* _UAPI__LINUX_BPF_H__ */ diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index e3c7a719c76b4a586c40da9506adaa095857ede4..99b43056a6feb3db4617143df6d7753bbf124ed2 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -209,6 +209,33 @@ struct ethtool_value { __u32 data; }; +enum tunable_id { + ETHTOOL_ID_UNSPEC, + ETHTOOL_RX_COPYBREAK, + ETHTOOL_TX_COPYBREAK, +}; + +enum tunable_type_id { + ETHTOOL_TUNABLE_UNSPEC, + ETHTOOL_TUNABLE_U8, + ETHTOOL_TUNABLE_U16, + ETHTOOL_TUNABLE_U32, + ETHTOOL_TUNABLE_U64, + ETHTOOL_TUNABLE_STRING, + ETHTOOL_TUNABLE_S8, + ETHTOOL_TUNABLE_S16, + ETHTOOL_TUNABLE_S32, + ETHTOOL_TUNABLE_S64, +}; + +struct ethtool_tunable { + __u32 cmd; + __u32 id; + __u32 type_id; + __u32 len; + void *data[0]; +}; + /** * struct ethtool_regs - hardware register dump * @cmd: Command number = %ETHTOOL_GREGS @@ -1152,6 +1179,8 @@ enum ethtool_sfeatures_retval_bits { #define ETHTOOL_GRSSH 0x00000046 /* Get RX flow hash configuration */ #define ETHTOOL_SRSSH 0x00000047 /* Set RX flow hash configuration */ +#define ETHTOOL_GTUNABLE 0x00000048 /* Get tunable configuration */ +#define ETHTOOL_STUNABLE 0x00000049 /* Set tunable configuration */ /* compatibility with older code */ #define SPARC_ETH_GSET ETHTOOL_GSET diff --git a/include/uapi/linux/fou.h b/include/uapi/linux/fou.h new file mode 100644 index 0000000000000000000000000000000000000000..8df06894da23dac0233bc8e7b0da28b693aba20f --- /dev/null +++ b/include/uapi/linux/fou.h @@ -0,0 +1,39 @@ +/* fou.h - FOU Interface */ + +#ifndef _UAPI_LINUX_FOU_H +#define _UAPI_LINUX_FOU_H + +/* NETLINK_GENERIC related info + */ +#define FOU_GENL_NAME "fou" +#define FOU_GENL_VERSION 0x1 + +enum { + FOU_ATTR_UNSPEC, + FOU_ATTR_PORT, /* u16 */ + FOU_ATTR_AF, /* u8 */ + FOU_ATTR_IPPROTO, /* u8 */ + FOU_ATTR_TYPE, /* u8 */ + + __FOU_ATTR_MAX, +}; + +#define FOU_ATTR_MAX (__FOU_ATTR_MAX - 1) + +enum { + FOU_CMD_UNSPEC, + FOU_CMD_ADD, + FOU_CMD_DEL, + + __FOU_CMD_MAX, +}; + +enum { + FOU_ENCAP_UNSPEC, + FOU_ENCAP_DIRECT, + FOU_ENCAP_GUE, +}; + +#define FOU_CMD_MAX (__FOU_CMD_MAX - 1) + +#endif /* _UAPI_LINUX_FOU_H */ diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h index 0f8210b8e0bc47ac0b7faab45a12ca6dcfbdf72d..aa63ed023c2b96b61b42231f9dd9b34b6ae46b66 100644 --- a/include/uapi/linux/if_ether.h +++ b/include/uapi/linux/if_ether.h @@ -128,6 +128,7 @@ #define ETH_P_PHONET 0x00F5 /* Nokia Phonet frames */ #define ETH_P_IEEE802154 0x00F6 /* IEEE802.15.4 frame */ #define ETH_P_CAIF 0x00F7 /* ST-Ericsson CAIF protocol */ +#define ETH_P_XDSA 0x00F8 /* Multiplexed DSA protocol */ /* * This is an Ethernet frame header. diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index ff957604a7213136d72bf6ec0b34989e7637ccc4..0bdb77e16875342ad86fe165727a1034a395b731 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -215,6 +215,18 @@ enum in6_addr_gen_mode { IN6_ADDR_GEN_MODE_NONE, }; +/* Bridge section */ + +enum { + IFLA_BR_UNSPEC, + IFLA_BR_FORWARD_DELAY, + IFLA_BR_HELLO_TIME, + IFLA_BR_MAX_AGE, + __IFLA_BR_MAX, +}; + +#define IFLA_BR_MAX (__IFLA_BR_MAX - 1) + enum { BRIDGE_MODE_UNSPEC, BRIDGE_MODE_HAIRPIN, @@ -291,6 +303,10 @@ enum { IFLA_MACVLAN_UNSPEC, IFLA_MACVLAN_MODE, IFLA_MACVLAN_FLAGS, + IFLA_MACVLAN_MACADDR_MODE, + IFLA_MACVLAN_MACADDR, + IFLA_MACVLAN_MACADDR_DATA, + IFLA_MACVLAN_MACADDR_COUNT, __IFLA_MACVLAN_MAX, }; @@ -301,6 +317,14 @@ enum macvlan_mode { MACVLAN_MODE_VEPA = 2, /* talk to other ports through ext bridge */ MACVLAN_MODE_BRIDGE = 4, /* talk to bridge ports directly */ MACVLAN_MODE_PASSTHRU = 8,/* take over the underlying device */ + MACVLAN_MODE_SOURCE = 16,/* use source MAC address list to assign */ +}; + +enum macvlan_macaddr_mode { + MACVLAN_MACADDR_ADD, + MACVLAN_MACADDR_DEL, + MACVLAN_MACADDR_FLUSH, + MACVLAN_MACADDR_SET, }; #define MACVLAN_FLAG_NOPROMISC 1 diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h index 3bce9e9d9f7c8d82c1e8064904098a73cdd4fb81..280d9e092283673c4ac6ef37c3b5ecec2cb406e6 100644 --- a/include/uapi/linux/if_tunnel.h +++ b/include/uapi/linux/if_tunnel.h @@ -53,10 +53,23 @@ enum { IFLA_IPTUN_6RD_RELAY_PREFIX, IFLA_IPTUN_6RD_PREFIXLEN, IFLA_IPTUN_6RD_RELAY_PREFIXLEN, + IFLA_IPTUN_ENCAP_TYPE, + IFLA_IPTUN_ENCAP_FLAGS, + IFLA_IPTUN_ENCAP_SPORT, + IFLA_IPTUN_ENCAP_DPORT, __IFLA_IPTUN_MAX, }; #define IFLA_IPTUN_MAX (__IFLA_IPTUN_MAX - 1) +enum tunnel_encap_types { + TUNNEL_ENCAP_NONE, + TUNNEL_ENCAP_FOU, + TUNNEL_ENCAP_GUE, +}; + +#define TUNNEL_ENCAP_FLAG_CSUM (1<<0) +#define TUNNEL_ENCAP_FLAG_CSUM6 (1<<1) + /* SIT-mode i_flags */ #define SIT_ISATAP 0x0001 @@ -94,6 +107,10 @@ enum { IFLA_GRE_ENCAP_LIMIT, IFLA_GRE_FLOWINFO, IFLA_GRE_FLAGS, + IFLA_GRE_ENCAP_TYPE, + IFLA_GRE_ENCAP_FLAGS, + IFLA_GRE_ENCAP_SPORT, + IFLA_GRE_ENCAP_DPORT, __IFLA_GRE_MAX, }; diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h index bbde90fa58380bb6d6940d9ec7454af7a4d55608..d65c0a09efd32041e3dd07ff80d0a421f0de6ae8 100644 --- a/include/uapi/linux/inet_diag.h +++ b/include/uapi/linux/inet_diag.h @@ -110,10 +110,10 @@ enum { INET_DIAG_TCLASS, INET_DIAG_SKMEMINFO, INET_DIAG_SHUTDOWN, + INET_DIAG_DCTCPINFO, }; -#define INET_DIAG_MAX INET_DIAG_SHUTDOWN - +#define INET_DIAG_MAX INET_DIAG_DCTCPINFO /* INET_DIAG_MEM */ @@ -133,5 +133,14 @@ struct tcpvegas_info { __u32 tcpv_minrtt; }; +/* INET_DIAG_DCTCPINFO */ + +struct tcp_dctcp_info { + __u16 dctcp_enabled; + __u16 dctcp_ce_state; + __u32 dctcp_alpha; + __u32 dctcp_ab_ecn; + __u32 dctcp_ab_tot; +}; #endif /* _UAPI_INET_DIAG_H_ */ diff --git a/include/uapi/linux/ip_vs.h b/include/uapi/linux/ip_vs.h index fbcffe8041f7aff03046f0e901902f44cb094d0d..cabe95d5b4613c78af31f16be630ef03146a7d0b 100644 --- a/include/uapi/linux/ip_vs.h +++ b/include/uapi/linux/ip_vs.h @@ -384,6 +384,9 @@ enum { IPVS_DEST_ATTR_PERSIST_CONNS, /* persistent connections */ IPVS_DEST_ATTR_STATS, /* nested attribute for dest stats */ + + IPVS_DEST_ATTR_ADDR_FAMILY, /* Address family of address */ + __IPVS_DEST_ATTR_MAX, }; diff --git a/include/uapi/linux/netfilter/ipset/ip_set.h b/include/uapi/linux/netfilter/ipset/ip_set.h index 78c2f2e799208a467b75c8035b4d3362365362bb..ca03119111a2182dd5ff9b4f930153cf066e4ad3 100644 --- a/include/uapi/linux/netfilter/ipset/ip_set.h +++ b/include/uapi/linux/netfilter/ipset/ip_set.h @@ -115,6 +115,9 @@ enum { IPSET_ATTR_BYTES, IPSET_ATTR_PACKETS, IPSET_ATTR_COMMENT, + IPSET_ATTR_SKBMARK, + IPSET_ATTR_SKBPRIO, + IPSET_ATTR_SKBQUEUE, __IPSET_ATTR_ADT_MAX, }; #define IPSET_ATTR_ADT_MAX (__IPSET_ATTR_ADT_MAX - 1) @@ -147,6 +150,7 @@ enum ipset_errno { IPSET_ERR_COUNTER, IPSET_ERR_COMMENT, IPSET_ERR_INVALID_MARKMASK, + IPSET_ERR_SKBINFO, /* Type specific error codes */ IPSET_ERR_TYPE_SPECIFIC = 4352, @@ -170,6 +174,12 @@ enum ipset_cmd_flags { IPSET_FLAG_MATCH_COUNTERS = (1 << IPSET_FLAG_BIT_MATCH_COUNTERS), IPSET_FLAG_BIT_RETURN_NOMATCH = 7, IPSET_FLAG_RETURN_NOMATCH = (1 << IPSET_FLAG_BIT_RETURN_NOMATCH), + IPSET_FLAG_BIT_MAP_SKBMARK = 8, + IPSET_FLAG_MAP_SKBMARK = (1 << IPSET_FLAG_BIT_MAP_SKBMARK), + IPSET_FLAG_BIT_MAP_SKBPRIO = 9, + IPSET_FLAG_MAP_SKBPRIO = (1 << IPSET_FLAG_BIT_MAP_SKBPRIO), + IPSET_FLAG_BIT_MAP_SKBQUEUE = 10, + IPSET_FLAG_MAP_SKBQUEUE = (1 << IPSET_FLAG_BIT_MAP_SKBQUEUE), IPSET_FLAG_CMD_MAX = 15, }; @@ -187,6 +197,8 @@ enum ipset_cadt_flags { IPSET_FLAG_WITH_COMMENT = (1 << IPSET_FLAG_BIT_WITH_COMMENT), IPSET_FLAG_BIT_WITH_FORCEADD = 5, IPSET_FLAG_WITH_FORCEADD = (1 << IPSET_FLAG_BIT_WITH_FORCEADD), + IPSET_FLAG_BIT_WITH_SKBINFO = 6, + IPSET_FLAG_WITH_SKBINFO = (1 << IPSET_FLAG_BIT_WITH_SKBINFO), IPSET_FLAG_CADT_MAX = 15, }; diff --git a/include/uapi/linux/netfilter/nf_nat.h b/include/uapi/linux/netfilter/nf_nat.h index 1ad3659102b64d54a8ad814fd7be11132a136319..0880781ad7b699811d5971cbf18f3cd2ccd3082d 100644 --- a/include/uapi/linux/netfilter/nf_nat.h +++ b/include/uapi/linux/netfilter/nf_nat.h @@ -13,6 +13,11 @@ #define NF_NAT_RANGE_PROTO_RANDOM_ALL \ (NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PROTO_RANDOM_FULLY) +#define NF_NAT_RANGE_MASK \ + (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED | \ + NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PERSISTENT | \ + NF_NAT_RANGE_PROTO_RANDOM_FULLY) + struct nf_nat_ipv4_range { unsigned int flags; __be32 min_ip; diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 801bdd1e56e33b168b54705e0bfb8464e076f3b5..c26df6787fb075a4d2317a9d9e8e897813df8e0f 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -51,6 +51,8 @@ enum nft_verdicts { * @NFT_MSG_NEWSETELEM: create a new set element (enum nft_set_elem_attributes) * @NFT_MSG_GETSETELEM: get a set element (enum nft_set_elem_attributes) * @NFT_MSG_DELSETELEM: delete a set element (enum nft_set_elem_attributes) + * @NFT_MSG_NEWGEN: announce a new generation, only for events (enum nft_gen_attributes) + * @NFT_MSG_GETGEN: get the rule-set generation (enum nft_gen_attributes) */ enum nf_tables_msg_types { NFT_MSG_NEWTABLE, @@ -68,6 +70,8 @@ enum nf_tables_msg_types { NFT_MSG_NEWSETELEM, NFT_MSG_GETSETELEM, NFT_MSG_DELSETELEM, + NFT_MSG_NEWGEN, + NFT_MSG_GETGEN, NFT_MSG_MAX, }; @@ -571,6 +575,10 @@ enum nft_exthdr_attributes { * @NFT_META_L4PROTO: layer 4 protocol number * @NFT_META_BRI_IIFNAME: packet input bridge interface name * @NFT_META_BRI_OIFNAME: packet output bridge interface name + * @NFT_META_PKTTYPE: packet type (skb->pkt_type), special handling for loopback + * @NFT_META_CPU: cpu id through smp_processor_id() + * @NFT_META_IIFGROUP: packet input interface group + * @NFT_META_OIFGROUP: packet output interface group */ enum nft_meta_keys { NFT_META_LEN, @@ -592,6 +600,10 @@ enum nft_meta_keys { NFT_META_L4PROTO, NFT_META_BRI_IIFNAME, NFT_META_BRI_OIFNAME, + NFT_META_PKTTYPE, + NFT_META_CPU, + NFT_META_IIFGROUP, + NFT_META_OIFGROUP, }; /** @@ -737,12 +749,33 @@ enum nft_queue_attributes { * * @NFT_REJECT_ICMP_UNREACH: reject using ICMP unreachable * @NFT_REJECT_TCP_RST: reject using TCP RST + * @NFT_REJECT_ICMPX_UNREACH: abstracted ICMP unreachable for bridge and inet */ enum nft_reject_types { NFT_REJECT_ICMP_UNREACH, NFT_REJECT_TCP_RST, + NFT_REJECT_ICMPX_UNREACH, }; +/** + * enum nft_reject_code - Generic reject codes for IPv4/IPv6 + * + * @NFT_REJECT_ICMPX_NO_ROUTE: no route to host / network unreachable + * @NFT_REJECT_ICMPX_PORT_UNREACH: port unreachable + * @NFT_REJECT_ICMPX_HOST_UNREACH: host unreachable + * @NFT_REJECT_ICMPX_ADMIN_PROHIBITED: administratively prohibited + * + * These codes are mapped to real ICMP and ICMPv6 codes. + */ +enum nft_reject_inet_code { + NFT_REJECT_ICMPX_NO_ROUTE = 0, + NFT_REJECT_ICMPX_PORT_UNREACH, + NFT_REJECT_ICMPX_HOST_UNREACH, + NFT_REJECT_ICMPX_ADMIN_PROHIBITED, + __NFT_REJECT_ICMPX_MAX +}; +#define NFT_REJECT_ICMPX_MAX (__NFT_REJECT_ICMPX_MAX + 1) + /** * enum nft_reject_attributes - nf_tables reject expression netlink attributes * @@ -777,6 +810,7 @@ enum nft_nat_types { * @NFTA_NAT_REG_ADDR_MAX: source register of address range end (NLA_U32: nft_registers) * @NFTA_NAT_REG_PROTO_MIN: source register of proto range start (NLA_U32: nft_registers) * @NFTA_NAT_REG_PROTO_MAX: source register of proto range end (NLA_U32: nft_registers) + * @NFTA_NAT_FLAGS: NAT flags (see NF_NAT_RANGE_* in linux/netfilter/nf_nat.h) (NLA_U32) */ enum nft_nat_attributes { NFTA_NAT_UNSPEC, @@ -786,8 +820,33 @@ enum nft_nat_attributes { NFTA_NAT_REG_ADDR_MAX, NFTA_NAT_REG_PROTO_MIN, NFTA_NAT_REG_PROTO_MAX, + NFTA_NAT_FLAGS, __NFTA_NAT_MAX }; #define NFTA_NAT_MAX (__NFTA_NAT_MAX - 1) +/** + * enum nft_masq_attributes - nf_tables masquerade expression attributes + * + * @NFTA_MASQ_FLAGS: NAT flags (see NF_NAT_RANGE_* in linux/netfilter/nf_nat.h) (NLA_U32) + */ +enum nft_masq_attributes { + NFTA_MASQ_UNSPEC, + NFTA_MASQ_FLAGS, + __NFTA_MASQ_MAX +}; +#define NFTA_MASQ_MAX (__NFTA_MASQ_MAX - 1) + +/** + * enum nft_gen_attributes - nf_tables ruleset generation attributes + * + * @NFTA_GEN_ID: Ruleset generation ID (NLA_U32) + */ +enum nft_gen_attributes { + NFTA_GEN_UNSPEC, + NFTA_GEN_ID, + __NFTA_GEN_MAX +}; +#define NFTA_GEN_MAX (__NFTA_GEN_MAX - 1) + #endif /* _LINUX_NF_TABLES_H */ diff --git a/include/uapi/linux/netfilter/nfnetlink_acct.h b/include/uapi/linux/netfilter/nfnetlink_acct.h index 51404ec190223a84f9f0c72393d435c674b8f11c..f3e34dbbf9666d79f481c9d879cafbc2d8045022 100644 --- a/include/uapi/linux/netfilter/nfnetlink_acct.h +++ b/include/uapi/linux/netfilter/nfnetlink_acct.h @@ -28,9 +28,17 @@ enum nfnl_acct_type { NFACCT_USE, NFACCT_FLAGS, NFACCT_QUOTA, + NFACCT_FILTER, __NFACCT_MAX }; #define NFACCT_MAX (__NFACCT_MAX - 1) +enum nfnl_attr_filter_type { + NFACCT_FILTER_UNSPEC, + NFACCT_FILTER_MASK, + NFACCT_FILTER_VALUE, + __NFACCT_FILTER_MAX +}; +#define NFACCT_FILTER_MAX (__NFACCT_FILTER_MAX - 1) #endif /* _UAPI_NFNL_ACCT_H_ */ diff --git a/include/uapi/linux/netfilter/xt_set.h b/include/uapi/linux/netfilter/xt_set.h index 964d3d42f8749d7e697aee47c42e3e31b67c5e48..d6a1df1f2947ba6aa0cc8680a2efb8f61a0ae178 100644 --- a/include/uapi/linux/netfilter/xt_set.h +++ b/include/uapi/linux/netfilter/xt_set.h @@ -71,4 +71,14 @@ struct xt_set_info_match_v3 { __u32 flags; }; +/* Revision 3 target */ + +struct xt_set_info_target_v3 { + struct xt_set_info add_set; + struct xt_set_info del_set; + struct xt_set_info map_set; + __u32 flags; + __u32 timeout; +}; + #endif /*_XT_SET_H*/ diff --git a/include/uapi/linux/netfilter_arp/arpt_mangle.h b/include/uapi/linux/netfilter_arp/arpt_mangle.h index 250f502902bb8e6178ea89d1792471f04a8f93c6..8c2b16a1f5a0b3c02dfbb754b0aed1b96f866758 100644 --- a/include/uapi/linux/netfilter_arp/arpt_mangle.h +++ b/include/uapi/linux/netfilter_arp/arpt_mangle.h @@ -13,7 +13,7 @@ struct arpt_mangle union { struct in_addr tgt_ip; } u_t; - u_int8_t flags; + __u8 flags; int target; }; diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index f1db15b9c041ccad0f13a4206e73245b59e7c236..4b28dc07bcb1fd3ea358b12c68432bf82fb27b23 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -722,6 +722,22 @@ * QoS mapping is relevant for IP packets, it is only valid during an * association. This is cleared on disassociation and AP restart. * + * @NL80211_CMD_ADD_TX_TS: Ask the kernel to add a traffic stream for the given + * %NL80211_ATTR_TSID and %NL80211_ATTR_MAC with %NL80211_ATTR_USER_PRIO + * and %NL80211_ATTR_ADMITTED_TIME parameters. + * Note that the action frame handshake with the AP shall be handled by + * userspace via the normal management RX/TX framework, this only sets + * up the TX TS in the driver/device. + * If the admitted time attribute is not added then the request just checks + * if a subsequent setup could be successful, the intent is to use this to + * avoid setting up a session with the AP when local restrictions would + * make that impossible. However, the subsequent "real" setup may still + * fail even if the check was successful. + * @NL80211_CMD_DEL_TX_TS: Remove an existing TS with the %NL80211_ATTR_TSID + * and %NL80211_ATTR_MAC parameters. It isn't necessary to call this + * before removing a station entry entirely, or before disassociating + * or similar, cleanup will happen in the driver/device in this case. + * * @NL80211_CMD_MAX: highest used command number * @__NL80211_CMD_AFTER_LAST: internal use */ @@ -893,6 +909,9 @@ enum nl80211_commands { NL80211_CMD_SET_QOS_MAP, + NL80211_CMD_ADD_TX_TS, + NL80211_CMD_DEL_TX_TS, + /* add new commands above here */ /* used to define NL80211_CMD_MAX below */ @@ -1594,6 +1613,31 @@ enum nl80211_commands { * @NL80211_ATTR_TDLS_INITIATOR: flag attribute indicating the current end is * the TDLS link initiator. * + * @NL80211_ATTR_USE_RRM: flag for indicating whether the current connection + * shall support Radio Resource Measurements (11k). This attribute can be + * used with %NL80211_CMD_ASSOCIATE and %NL80211_CMD_CONNECT requests. + * User space applications are expected to use this flag only if the + * underlying device supports these minimal RRM features: + * %NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES, + * %NL80211_FEATURE_QUIET, + * If this flag is used, driver must add the Power Capabilities IE to the + * association request. In addition, it must also set the RRM capability + * flag in the association request's Capability Info field. + * + * @NL80211_ATTR_WIPHY_DYN_ACK: flag attribute used to enable ACK timeout + * estimation algorithm (dynack). In order to activate dynack + * %NL80211_FEATURE_ACKTO_ESTIMATION feature flag must be set by lower + * drivers to indicate dynack capability. Dynack is automatically disabled + * setting valid value for coverage class. + * + * @NL80211_ATTR_TSID: a TSID value (u8 attribute) + * @NL80211_ATTR_USER_PRIO: user priority value (u8 attribute) + * @NL80211_ATTR_ADMITTED_TIME: admitted time in units of 32 microseconds + * (per second) (u16 attribute) + * + * @NL80211_ATTR_SMPS_MODE: SMPS mode to use (ap mode). see + * &enum nl80211_smps_mode. + * * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use */ @@ -1936,6 +1980,16 @@ enum nl80211_attrs { NL80211_ATTR_TDLS_INITIATOR, + NL80211_ATTR_USE_RRM, + + NL80211_ATTR_WIPHY_DYN_ACK, + + NL80211_ATTR_TSID, + NL80211_ATTR_USER_PRIO, + NL80211_ATTR_ADMITTED_TIME, + + NL80211_ATTR_SMPS_MODE, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -3055,14 +3109,20 @@ enum nl80211_bss_scan_width { * @NL80211_BSS_BSSID: BSSID of the BSS (6 octets) * @NL80211_BSS_FREQUENCY: frequency in MHz (u32) * @NL80211_BSS_TSF: TSF of the received probe response/beacon (u64) + * (if @NL80211_BSS_PRESP_DATA is present then this is known to be + * from a probe response, otherwise it may be from the same beacon + * that the NL80211_BSS_BEACON_TSF will be from) * @NL80211_BSS_BEACON_INTERVAL: beacon interval of the (I)BSS (u16) * @NL80211_BSS_CAPABILITY: capability field (CPU order, u16) * @NL80211_BSS_INFORMATION_ELEMENTS: binary attribute containing the * raw information elements from the probe response/beacon (bin); - * if the %NL80211_BSS_BEACON_IES attribute is present, the IEs here are - * from a Probe Response frame; otherwise they are from a Beacon frame. + * if the %NL80211_BSS_BEACON_IES attribute is present and the data is + * different then the IEs here are from a Probe Response frame; otherwise + * they are from a Beacon frame. * However, if the driver does not indicate the source of the IEs, these * IEs may be from either frame subtype. + * If present, the @NL80211_BSS_PRESP_DATA attribute indicates that the + * data here is known to be from a probe response, without any heuristics. * @NL80211_BSS_SIGNAL_MBM: signal strength of probe response/beacon * in mBm (100 * dBm) (s32) * @NL80211_BSS_SIGNAL_UNSPEC: signal strength of the probe response/beacon @@ -3074,6 +3134,10 @@ enum nl80211_bss_scan_width { * yet been received * @NL80211_BSS_CHAN_WIDTH: channel width of the control channel * (u32, enum nl80211_bss_scan_width) + * @NL80211_BSS_BEACON_TSF: TSF of the last received beacon (u64) + * (not present if no beacon frame has been received yet) + * @NL80211_BSS_PRESP_DATA: the data in @NL80211_BSS_INFORMATION_ELEMENTS and + * @NL80211_BSS_TSF is known to be from a probe response (flag attribute) * @__NL80211_BSS_AFTER_LAST: internal * @NL80211_BSS_MAX: highest BSS attribute */ @@ -3091,6 +3155,8 @@ enum nl80211_bss { NL80211_BSS_SEEN_MS_AGO, NL80211_BSS_BEACON_IES, NL80211_BSS_CHAN_WIDTH, + NL80211_BSS_BEACON_TSF, + NL80211_BSS_PRESP_DATA, /* keep last */ __NL80211_BSS_AFTER_LAST, @@ -3956,6 +4022,26 @@ enum nl80211_ap_sme_features { * @NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE: This driver supports dynamic * channel bandwidth change (e.g., HT 20 <-> 40 MHz channel) during the * lifetime of a BSS. + * @NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES: This device adds a DS Parameter + * Set IE to probe requests. + * @NL80211_FEATURE_WFA_TPC_IE_IN_PROBES: This device adds a WFA TPC Report IE + * to probe requests. + * @NL80211_FEATURE_QUIET: This device, in client mode, supports Quiet Period + * requests sent to it by an AP. + * @NL80211_FEATURE_TX_POWER_INSERTION: This device is capable of inserting the + * current tx power value into the TPC Report IE in the spectrum + * management TPC Report action frame, and in the Radio Measurement Link + * Measurement Report action frame. + * @NL80211_FEATURE_ACKTO_ESTIMATION: This driver supports dynamic ACK timeout + * estimation (dynack). %NL80211_ATTR_WIPHY_DYN_ACK flag attribute is used + * to enable dynack. + * @NL80211_FEATURE_STATIC_SMPS: Device supports static spatial + * multiplexing powersave, ie. can turn off all but one chain + * even on HT connections that should be using more chains. + * @NL80211_FEATURE_DYNAMIC_SMPS: Device supports dynamic spatial + * multiplexing powersave, ie. can turn off all but one chain + * and then wake the rest up as required after, for example, + * rts/cts handshake. */ enum nl80211_feature_flags { NL80211_FEATURE_SK_TX_STATUS = 1 << 0, @@ -3977,6 +4063,13 @@ enum nl80211_feature_flags { NL80211_FEATURE_USERSPACE_MPM = 1 << 16, NL80211_FEATURE_ACTIVE_MONITOR = 1 << 17, NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE = 1 << 18, + NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES = 1 << 19, + NL80211_FEATURE_WFA_TPC_IE_IN_PROBES = 1 << 20, + NL80211_FEATURE_QUIET = 1 << 21, + NL80211_FEATURE_TX_POWER_INSERTION = 1 << 22, + NL80211_FEATURE_ACKTO_ESTIMATION = 1 << 23, + NL80211_FEATURE_STATIC_SMPS = 1 << 24, + NL80211_FEATURE_DYNAMIC_SMPS = 1 << 25, }; /** @@ -4050,6 +4143,25 @@ enum nl80211_acl_policy { NL80211_ACL_POLICY_DENY_UNLESS_LISTED, }; +/** + * enum nl80211_smps_mode - SMPS mode + * + * Requested SMPS mode (for AP mode) + * + * @NL80211_SMPS_OFF: SMPS off (use all antennas). + * @NL80211_SMPS_STATIC: static SMPS (use a single antenna) + * @NL80211_SMPS_DYNAMIC: dynamic smps (start with a single antenna and + * turn on other antennas after CTS/RTS). + */ +enum nl80211_smps_mode { + NL80211_SMPS_OFF, + NL80211_SMPS_STATIC, + NL80211_SMPS_DYNAMIC, + + __NL80211_SMPS_AFTER_LAST, + NL80211_SMPS_MAX = __NL80211_SMPS_AFTER_LAST - 1 +}; + /** * enum nl80211_radar_event - type of radar event for DFS operation * diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index a794d1dd7b405b72c85f9a32a24d6826eada6e53..435eabc5ffaadb9e9e5d3dcfa554331065d430a6 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -192,6 +192,7 @@ enum ovs_vport_type { OVS_VPORT_TYPE_INTERNAL, /* network device implemented by datapath */ OVS_VPORT_TYPE_GRE, /* GRE tunnel. */ OVS_VPORT_TYPE_VXLAN, /* VXLAN tunnel. */ + OVS_VPORT_TYPE_GENEVE, /* Geneve tunnel. */ __OVS_VPORT_TYPE_MAX }; @@ -289,9 +290,12 @@ enum ovs_key_attr { OVS_KEY_ATTR_TUNNEL, /* Nested set of ovs_tunnel attributes */ OVS_KEY_ATTR_SCTP, /* struct ovs_key_sctp */ OVS_KEY_ATTR_TCP_FLAGS, /* be16 TCP flags. */ + OVS_KEY_ATTR_DP_HASH, /* u32 hash value. Value 0 indicates the hash + is not computed by the datapath. */ + OVS_KEY_ATTR_RECIRC_ID, /* u32 recirc id */ #ifdef __KERNEL__ - OVS_KEY_ATTR_IPV4_TUNNEL, /* struct ovs_key_ipv4_tunnel */ + OVS_KEY_ATTR_TUNNEL_INFO, /* struct ovs_tunnel_info */ #endif __OVS_KEY_ATTR_MAX }; @@ -306,6 +310,8 @@ enum ovs_tunnel_key_attr { OVS_TUNNEL_KEY_ATTR_TTL, /* u8 Tunnel IP TTL. */ OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT, /* No argument, set DF. */ OVS_TUNNEL_KEY_ATTR_CSUM, /* No argument. CSUM packet. */ + OVS_TUNNEL_KEY_ATTR_OAM, /* No argument. OAM frame. */ + OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS, /* Array of Geneve options. */ __OVS_TUNNEL_KEY_ATTR_MAX }; @@ -493,6 +499,27 @@ struct ovs_action_push_vlan { __be16 vlan_tci; /* 802.1Q TCI (VLAN ID and priority). */ }; +/* Data path hash algorithm for computing Datapath hash. + * + * The algorithm type only specifies the fields in a flow + * will be used as part of the hash. Each datapath is free + * to use its own hash algorithm. The hash value will be + * opaque to the user space daemon. + */ +enum ovs_hash_alg { + OVS_HASH_ALG_L4, +}; + +/* + * struct ovs_action_hash - %OVS_ACTION_ATTR_HASH action argument. + * @hash_alg: Algorithm used to compute hash prior to recirculation. + * @hash_basis: basis used for computing hash. + */ +struct ovs_action_hash { + uint32_t hash_alg; /* One of ovs_hash_alg. */ + uint32_t hash_basis; +}; + /** * enum ovs_action_attr - Action types. * @@ -521,6 +548,8 @@ enum ovs_action_attr { OVS_ACTION_ATTR_PUSH_VLAN, /* struct ovs_action_push_vlan. */ OVS_ACTION_ATTR_POP_VLAN, /* No argument. */ OVS_ACTION_ATTR_SAMPLE, /* Nested OVS_SAMPLE_ATTR_*. */ + OVS_ACTION_ATTR_RECIRC, /* u32 recirc_id. */ + OVS_ACTION_ATTR_HASH, /* struct ovs_action_hash. */ __OVS_ACTION_ATTR_MAX }; diff --git a/include/uapi/linux/wil6210_uapi.h b/include/uapi/linux/wil6210_uapi.h new file mode 100644 index 0000000000000000000000000000000000000000..6a3cddd156c47b79572d788b32a7662b957499c3 --- /dev/null +++ b/include/uapi/linux/wil6210_uapi.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2014 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __WIL6210_UAPI_H__ +#define __WIL6210_UAPI_H__ + +#if !defined(__KERNEL__) +#define __user +#endif + +#include + +/* Numbers SIOCDEVPRIVATE and SIOCDEVPRIVATE + 1 + * are used by Android devices to implement PNO (preferred network offload). + * Albeit it is temporary solution, use different numbers to avoid conflicts + */ + +/** + * Perform 32-bit I/O operation to the card memory + * + * User code should arrange data in memory like this: + * + * struct wil_memio io; + * struct ifreq ifr = { + * .ifr_data = &io, + * }; + */ +#define WIL_IOCTL_MEMIO (SIOCDEVPRIVATE + 2) + +/** + * Perform block I/O operation to the card memory + * + * User code should arrange data in memory like this: + * + * void *buf; + * struct wil_memio_block io = { + * .block = buf, + * }; + * struct ifreq ifr = { + * .ifr_data = &io, + * }; + */ +#define WIL_IOCTL_MEMIO_BLOCK (SIOCDEVPRIVATE + 3) + +/** + * operation to perform + * + * @wil_mmio_op_mask - bits defining operation, + * @wil_mmio_addr_mask - bits defining addressing mode + */ +enum wil_memio_op { + wil_mmio_read = 0, + wil_mmio_write = 1, + wil_mmio_op_mask = 0xff, + wil_mmio_addr_linker = 0 << 8, + wil_mmio_addr_ahb = 1 << 8, + wil_mmio_addr_bar = 2 << 8, + wil_mmio_addr_mask = 0xff00, +}; + +struct wil_memio { + uint32_t op; /* enum wil_memio_op */ + uint32_t addr; /* should be 32-bit aligned */ + uint32_t val; +}; + +struct wil_memio_block { + uint32_t op; /* enum wil_memio_op */ + uint32_t addr; /* should be 32-bit aligned */ + uint32_t size; /* should be multiple of 4 */ + void __user *block; /* block address */ +}; + +#endif /* __WIL6210_UAPI_H__ */ diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h index 25e5dd916ba491feba3f1d570e8712283a95645e..02d5125a5ee8b285032ef3b066d2c48efca7c95a 100644 --- a/include/uapi/linux/xfrm.h +++ b/include/uapi/linux/xfrm.h @@ -328,6 +328,8 @@ enum xfrm_spdattr_type_t { XFRMA_SPD_UNSPEC, XFRMA_SPD_INFO, XFRMA_SPD_HINFO, + XFRMA_SPD_IPV4_HTHRESH, + XFRMA_SPD_IPV6_HTHRESH, __XFRMA_SPD_MAX #define XFRMA_SPD_MAX (__XFRMA_SPD_MAX - 1) @@ -347,6 +349,11 @@ struct xfrmu_spdhinfo { __u32 spdhmcnt; }; +struct xfrmu_spdhthresh { + __u8 lbits; + __u8 rbits; +}; + struct xfrm_usersa_info { struct xfrm_selector sel; struct xfrm_id id; diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile index 6a71145e276972ce159918e73b5b635f0df8ac26..45427239f375c564049951e00e3925aacff8bc3f 100644 --- a/kernel/bpf/Makefile +++ b/kernel/bpf/Makefile @@ -1 +1,5 @@ -obj-y := core.o +obj-y := core.o syscall.o verifier.o + +ifdef CONFIG_TEST_BPF +obj-y += test_stub.o +endif diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 7f0dbcbb34af1559657adea42b7c726f145a3986..f0c30c59b31755ff1e124e0a5a0aaa0e356efc9e 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -20,9 +20,14 @@ * Andi Kleen - Fix a few bad bugs and races. * Kris Katterjohn - Added many additional checks in bpf_check_classic() */ + #include #include +#include +#include +#include #include +#include /* Registers */ #define BPF_R0 regs[BPF_REG_0] @@ -63,6 +68,105 @@ void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, uns return NULL; } +struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags) +{ + gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO | + gfp_extra_flags; + struct bpf_prog_aux *aux; + struct bpf_prog *fp; + + size = round_up(size, PAGE_SIZE); + fp = __vmalloc(size, gfp_flags, PAGE_KERNEL); + if (fp == NULL) + return NULL; + + aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags); + if (aux == NULL) { + vfree(fp); + return NULL; + } + + fp->pages = size / PAGE_SIZE; + fp->aux = aux; + + return fp; +} +EXPORT_SYMBOL_GPL(bpf_prog_alloc); + +struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, + gfp_t gfp_extra_flags) +{ + gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO | + gfp_extra_flags; + struct bpf_prog *fp; + + BUG_ON(fp_old == NULL); + + size = round_up(size, PAGE_SIZE); + if (size <= fp_old->pages * PAGE_SIZE) + return fp_old; + + fp = __vmalloc(size, gfp_flags, PAGE_KERNEL); + if (fp != NULL) { + memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE); + fp->pages = size / PAGE_SIZE; + + /* We keep fp->aux from fp_old around in the new + * reallocated structure. + */ + fp_old->aux = NULL; + __bpf_prog_free(fp_old); + } + + return fp; +} +EXPORT_SYMBOL_GPL(bpf_prog_realloc); + +void __bpf_prog_free(struct bpf_prog *fp) +{ + kfree(fp->aux); + vfree(fp); +} +EXPORT_SYMBOL_GPL(__bpf_prog_free); + +#ifdef CONFIG_BPF_JIT +struct bpf_binary_header * +bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, + unsigned int alignment, + bpf_jit_fill_hole_t bpf_fill_ill_insns) +{ + struct bpf_binary_header *hdr; + unsigned int size, hole, start; + + /* Most of BPF filters are really small, but if some of them + * fill a page, allow at least 128 extra bytes to insert a + * random section of illegal instructions. + */ + size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE); + hdr = module_alloc(size); + if (hdr == NULL) + return NULL; + + /* Fill space with illegal/arch-dep instructions. */ + bpf_fill_ill_insns(hdr, size); + + hdr->pages = size / PAGE_SIZE; + hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)), + PAGE_SIZE - sizeof(*hdr)); + start = (prandom_u32() % hole) & ~(alignment - 1); + + /* Leave a random number of instructions before BPF code. */ + *image_ptr = &hdr->image[start]; + + return hdr; +} + +void bpf_jit_binary_free(struct bpf_binary_header *hdr) +{ + module_free(NULL, hdr); +} +#endif /* CONFIG_BPF_JIT */ + /* Base function for offset calculation. Needs to go into .text section, * therefore keeping it non-static as well; will also be used by JITs * anyway later on, so do not let the compiler omit it. @@ -180,6 +284,7 @@ static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn) [BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W, [BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H, [BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B, + [BPF_LD | BPF_IMM | BPF_DW] = &&LD_IMM_DW, }; void *ptr; int off; @@ -239,6 +344,10 @@ static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn) ALU64_MOV_K: DST = IMM; CONT; + LD_IMM_DW: + DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32; + insn++; + CONT; ALU64_ARSH_X: (*(s64 *) &DST) >>= SRC; CONT; @@ -523,12 +632,26 @@ void bpf_prog_select_runtime(struct bpf_prog *fp) /* Probe if internal BPF can be JITed */ bpf_int_jit_compile(fp); + /* Lock whole bpf_prog as read-only */ + bpf_prog_lock_ro(fp); } EXPORT_SYMBOL_GPL(bpf_prog_select_runtime); -/* free internal BPF program */ +static void bpf_prog_free_deferred(struct work_struct *work) +{ + struct bpf_prog_aux *aux; + + aux = container_of(work, struct bpf_prog_aux, work); + bpf_jit_free(aux->prog); +} + +/* Free internal BPF program */ void bpf_prog_free(struct bpf_prog *fp) { - bpf_jit_free(fp); + struct bpf_prog_aux *aux = fp->aux; + + INIT_WORK(&aux->work, bpf_prog_free_deferred); + aux->prog = fp; + schedule_work(&aux->work); } EXPORT_SYMBOL_GPL(bpf_prog_free); diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c new file mode 100644 index 0000000000000000000000000000000000000000..ba61c8c160321ea6f333c560a9dc1e55f8b83632 --- /dev/null +++ b/kernel/bpf/syscall.c @@ -0,0 +1,606 @@ +/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include + +static LIST_HEAD(bpf_map_types); + +static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) +{ + struct bpf_map_type_list *tl; + struct bpf_map *map; + + list_for_each_entry(tl, &bpf_map_types, list_node) { + if (tl->type == attr->map_type) { + map = tl->ops->map_alloc(attr); + if (IS_ERR(map)) + return map; + map->ops = tl->ops; + map->map_type = attr->map_type; + return map; + } + } + return ERR_PTR(-EINVAL); +} + +/* boot time registration of different map implementations */ +void bpf_register_map_type(struct bpf_map_type_list *tl) +{ + list_add(&tl->list_node, &bpf_map_types); +} + +/* called from workqueue */ +static void bpf_map_free_deferred(struct work_struct *work) +{ + struct bpf_map *map = container_of(work, struct bpf_map, work); + + /* implementation dependent freeing */ + map->ops->map_free(map); +} + +/* decrement map refcnt and schedule it for freeing via workqueue + * (unrelying map implementation ops->map_free() might sleep) + */ +void bpf_map_put(struct bpf_map *map) +{ + if (atomic_dec_and_test(&map->refcnt)) { + INIT_WORK(&map->work, bpf_map_free_deferred); + schedule_work(&map->work); + } +} + +static int bpf_map_release(struct inode *inode, struct file *filp) +{ + struct bpf_map *map = filp->private_data; + + bpf_map_put(map); + return 0; +} + +static const struct file_operations bpf_map_fops = { + .release = bpf_map_release, +}; + +/* helper macro to check that unused fields 'union bpf_attr' are zero */ +#define CHECK_ATTR(CMD) \ + memchr_inv((void *) &attr->CMD##_LAST_FIELD + \ + sizeof(attr->CMD##_LAST_FIELD), 0, \ + sizeof(*attr) - \ + offsetof(union bpf_attr, CMD##_LAST_FIELD) - \ + sizeof(attr->CMD##_LAST_FIELD)) != NULL + +#define BPF_MAP_CREATE_LAST_FIELD max_entries +/* called via syscall */ +static int map_create(union bpf_attr *attr) +{ + struct bpf_map *map; + int err; + + err = CHECK_ATTR(BPF_MAP_CREATE); + if (err) + return -EINVAL; + + /* find map type and init map: hashtable vs rbtree vs bloom vs ... */ + map = find_and_alloc_map(attr); + if (IS_ERR(map)) + return PTR_ERR(map); + + atomic_set(&map->refcnt, 1); + + err = anon_inode_getfd("bpf-map", &bpf_map_fops, map, O_RDWR | O_CLOEXEC); + + if (err < 0) + /* failed to allocate fd */ + goto free_map; + + return err; + +free_map: + map->ops->map_free(map); + return err; +} + +/* if error is returned, fd is released. + * On success caller should complete fd access with matching fdput() + */ +struct bpf_map *bpf_map_get(struct fd f) +{ + struct bpf_map *map; + + if (!f.file) + return ERR_PTR(-EBADF); + + if (f.file->f_op != &bpf_map_fops) { + fdput(f); + return ERR_PTR(-EINVAL); + } + + map = f.file->private_data; + + return map; +} + +/* helper to convert user pointers passed inside __aligned_u64 fields */ +static void __user *u64_to_ptr(__u64 val) +{ + return (void __user *) (unsigned long) val; +} + +/* last field in 'union bpf_attr' used by this command */ +#define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value + +static int map_lookup_elem(union bpf_attr *attr) +{ + void __user *ukey = u64_to_ptr(attr->key); + void __user *uvalue = u64_to_ptr(attr->value); + int ufd = attr->map_fd; + struct fd f = fdget(ufd); + struct bpf_map *map; + void *key, *value; + int err; + + if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM)) + return -EINVAL; + + map = bpf_map_get(f); + if (IS_ERR(map)) + return PTR_ERR(map); + + err = -ENOMEM; + key = kmalloc(map->key_size, GFP_USER); + if (!key) + goto err_put; + + err = -EFAULT; + if (copy_from_user(key, ukey, map->key_size) != 0) + goto free_key; + + err = -ESRCH; + rcu_read_lock(); + value = map->ops->map_lookup_elem(map, key); + if (!value) + goto err_unlock; + + err = -EFAULT; + if (copy_to_user(uvalue, value, map->value_size) != 0) + goto err_unlock; + + err = 0; + +err_unlock: + rcu_read_unlock(); +free_key: + kfree(key); +err_put: + fdput(f); + return err; +} + +#define BPF_MAP_UPDATE_ELEM_LAST_FIELD value + +static int map_update_elem(union bpf_attr *attr) +{ + void __user *ukey = u64_to_ptr(attr->key); + void __user *uvalue = u64_to_ptr(attr->value); + int ufd = attr->map_fd; + struct fd f = fdget(ufd); + struct bpf_map *map; + void *key, *value; + int err; + + if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM)) + return -EINVAL; + + map = bpf_map_get(f); + if (IS_ERR(map)) + return PTR_ERR(map); + + err = -ENOMEM; + key = kmalloc(map->key_size, GFP_USER); + if (!key) + goto err_put; + + err = -EFAULT; + if (copy_from_user(key, ukey, map->key_size) != 0) + goto free_key; + + err = -ENOMEM; + value = kmalloc(map->value_size, GFP_USER); + if (!value) + goto free_key; + + err = -EFAULT; + if (copy_from_user(value, uvalue, map->value_size) != 0) + goto free_value; + + /* eBPF program that use maps are running under rcu_read_lock(), + * therefore all map accessors rely on this fact, so do the same here + */ + rcu_read_lock(); + err = map->ops->map_update_elem(map, key, value); + rcu_read_unlock(); + +free_value: + kfree(value); +free_key: + kfree(key); +err_put: + fdput(f); + return err; +} + +#define BPF_MAP_DELETE_ELEM_LAST_FIELD key + +static int map_delete_elem(union bpf_attr *attr) +{ + void __user *ukey = u64_to_ptr(attr->key); + int ufd = attr->map_fd; + struct fd f = fdget(ufd); + struct bpf_map *map; + void *key; + int err; + + if (CHECK_ATTR(BPF_MAP_DELETE_ELEM)) + return -EINVAL; + + map = bpf_map_get(f); + if (IS_ERR(map)) + return PTR_ERR(map); + + err = -ENOMEM; + key = kmalloc(map->key_size, GFP_USER); + if (!key) + goto err_put; + + err = -EFAULT; + if (copy_from_user(key, ukey, map->key_size) != 0) + goto free_key; + + rcu_read_lock(); + err = map->ops->map_delete_elem(map, key); + rcu_read_unlock(); + +free_key: + kfree(key); +err_put: + fdput(f); + return err; +} + +/* last field in 'union bpf_attr' used by this command */ +#define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key + +static int map_get_next_key(union bpf_attr *attr) +{ + void __user *ukey = u64_to_ptr(attr->key); + void __user *unext_key = u64_to_ptr(attr->next_key); + int ufd = attr->map_fd; + struct fd f = fdget(ufd); + struct bpf_map *map; + void *key, *next_key; + int err; + + if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY)) + return -EINVAL; + + map = bpf_map_get(f); + if (IS_ERR(map)) + return PTR_ERR(map); + + err = -ENOMEM; + key = kmalloc(map->key_size, GFP_USER); + if (!key) + goto err_put; + + err = -EFAULT; + if (copy_from_user(key, ukey, map->key_size) != 0) + goto free_key; + + err = -ENOMEM; + next_key = kmalloc(map->key_size, GFP_USER); + if (!next_key) + goto free_key; + + rcu_read_lock(); + err = map->ops->map_get_next_key(map, key, next_key); + rcu_read_unlock(); + if (err) + goto free_next_key; + + err = -EFAULT; + if (copy_to_user(unext_key, next_key, map->key_size) != 0) + goto free_next_key; + + err = 0; + +free_next_key: + kfree(next_key); +free_key: + kfree(key); +err_put: + fdput(f); + return err; +} + +static LIST_HEAD(bpf_prog_types); + +static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog) +{ + struct bpf_prog_type_list *tl; + + list_for_each_entry(tl, &bpf_prog_types, list_node) { + if (tl->type == type) { + prog->aux->ops = tl->ops; + prog->aux->prog_type = type; + return 0; + } + } + return -EINVAL; +} + +void bpf_register_prog_type(struct bpf_prog_type_list *tl) +{ + list_add(&tl->list_node, &bpf_prog_types); +} + +/* fixup insn->imm field of bpf_call instructions: + * if (insn->imm == BPF_FUNC_map_lookup_elem) + * insn->imm = bpf_map_lookup_elem - __bpf_call_base; + * else if (insn->imm == BPF_FUNC_map_update_elem) + * insn->imm = bpf_map_update_elem - __bpf_call_base; + * else ... + * + * this function is called after eBPF program passed verification + */ +static void fixup_bpf_calls(struct bpf_prog *prog) +{ + const struct bpf_func_proto *fn; + int i; + + for (i = 0; i < prog->len; i++) { + struct bpf_insn *insn = &prog->insnsi[i]; + + if (insn->code == (BPF_JMP | BPF_CALL)) { + /* we reach here when program has bpf_call instructions + * and it passed bpf_check(), means that + * ops->get_func_proto must have been supplied, check it + */ + BUG_ON(!prog->aux->ops->get_func_proto); + + fn = prog->aux->ops->get_func_proto(insn->imm); + /* all functions that have prototype and verifier allowed + * programs to call them, must be real in-kernel functions + */ + BUG_ON(!fn->func); + insn->imm = fn->func - __bpf_call_base; + } + } +} + +/* drop refcnt on maps used by eBPF program and free auxilary data */ +static void free_used_maps(struct bpf_prog_aux *aux) +{ + int i; + + for (i = 0; i < aux->used_map_cnt; i++) + bpf_map_put(aux->used_maps[i]); + + kfree(aux->used_maps); +} + +void bpf_prog_put(struct bpf_prog *prog) +{ + if (atomic_dec_and_test(&prog->aux->refcnt)) { + free_used_maps(prog->aux); + bpf_prog_free(prog); + } +} + +static int bpf_prog_release(struct inode *inode, struct file *filp) +{ + struct bpf_prog *prog = filp->private_data; + + bpf_prog_put(prog); + return 0; +} + +static const struct file_operations bpf_prog_fops = { + .release = bpf_prog_release, +}; + +static struct bpf_prog *get_prog(struct fd f) +{ + struct bpf_prog *prog; + + if (!f.file) + return ERR_PTR(-EBADF); + + if (f.file->f_op != &bpf_prog_fops) { + fdput(f); + return ERR_PTR(-EINVAL); + } + + prog = f.file->private_data; + + return prog; +} + +/* called by sockets/tracing/seccomp before attaching program to an event + * pairs with bpf_prog_put() + */ +struct bpf_prog *bpf_prog_get(u32 ufd) +{ + struct fd f = fdget(ufd); + struct bpf_prog *prog; + + prog = get_prog(f); + + if (IS_ERR(prog)) + return prog; + + atomic_inc(&prog->aux->refcnt); + fdput(f); + return prog; +} + +/* last field in 'union bpf_attr' used by this command */ +#define BPF_PROG_LOAD_LAST_FIELD log_buf + +static int bpf_prog_load(union bpf_attr *attr) +{ + enum bpf_prog_type type = attr->prog_type; + struct bpf_prog *prog; + int err; + char license[128]; + bool is_gpl; + + if (CHECK_ATTR(BPF_PROG_LOAD)) + return -EINVAL; + + /* copy eBPF program license from user space */ + if (strncpy_from_user(license, u64_to_ptr(attr->license), + sizeof(license) - 1) < 0) + return -EFAULT; + license[sizeof(license) - 1] = 0; + + /* eBPF programs must be GPL compatible to use GPL-ed functions */ + is_gpl = license_is_gpl_compatible(license); + + if (attr->insn_cnt >= BPF_MAXINSNS) + return -EINVAL; + + /* plain bpf_prog allocation */ + prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER); + if (!prog) + return -ENOMEM; + + prog->len = attr->insn_cnt; + + err = -EFAULT; + if (copy_from_user(prog->insns, u64_to_ptr(attr->insns), + prog->len * sizeof(struct bpf_insn)) != 0) + goto free_prog; + + prog->orig_prog = NULL; + prog->jited = false; + + atomic_set(&prog->aux->refcnt, 1); + prog->aux->is_gpl_compatible = is_gpl; + + /* find program type: socket_filter vs tracing_filter */ + err = find_prog_type(type, prog); + if (err < 0) + goto free_prog; + + /* run eBPF verifier */ + err = bpf_check(prog, attr); + + if (err < 0) + goto free_used_maps; + + /* fixup BPF_CALL->imm field */ + fixup_bpf_calls(prog); + + /* eBPF program is ready to be JITed */ + bpf_prog_select_runtime(prog); + + err = anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog, O_RDWR | O_CLOEXEC); + + if (err < 0) + /* failed to allocate fd */ + goto free_used_maps; + + return err; + +free_used_maps: + free_used_maps(prog->aux); +free_prog: + bpf_prog_free(prog); + return err; +} + +SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size) +{ + union bpf_attr attr = {}; + int err; + + /* the syscall is limited to root temporarily. This restriction will be + * lifted when security audit is clean. Note that eBPF+tracing must have + * this restriction, since it may pass kernel data to user space + */ + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (!access_ok(VERIFY_READ, uattr, 1)) + return -EFAULT; + + if (size > PAGE_SIZE) /* silly large */ + return -E2BIG; + + /* If we're handed a bigger struct than we know of, + * ensure all the unknown bits are 0 - i.e. new + * user-space does not rely on any kernel feature + * extensions we dont know about yet. + */ + if (size > sizeof(attr)) { + unsigned char __user *addr; + unsigned char __user *end; + unsigned char val; + + addr = (void __user *)uattr + sizeof(attr); + end = (void __user *)uattr + size; + + for (; addr < end; addr++) { + err = get_user(val, addr); + if (err) + return err; + if (val) + return -E2BIG; + } + size = sizeof(attr); + } + + /* copy attributes from user space, may be less than sizeof(bpf_attr) */ + if (copy_from_user(&attr, uattr, size) != 0) + return -EFAULT; + + switch (cmd) { + case BPF_MAP_CREATE: + err = map_create(&attr); + break; + case BPF_MAP_LOOKUP_ELEM: + err = map_lookup_elem(&attr); + break; + case BPF_MAP_UPDATE_ELEM: + err = map_update_elem(&attr); + break; + case BPF_MAP_DELETE_ELEM: + err = map_delete_elem(&attr); + break; + case BPF_MAP_GET_NEXT_KEY: + err = map_get_next_key(&attr); + break; + case BPF_PROG_LOAD: + err = bpf_prog_load(&attr); + break; + default: + err = -EINVAL; + break; + } + + return err; +} diff --git a/kernel/bpf/test_stub.c b/kernel/bpf/test_stub.c new file mode 100644 index 0000000000000000000000000000000000000000..fcaddff4003e27561c63f67d934360cfb2adeeb6 --- /dev/null +++ b/kernel/bpf/test_stub.c @@ -0,0 +1,116 @@ +/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include + +/* test stubs for BPF_MAP_TYPE_UNSPEC and for BPF_PROG_TYPE_UNSPEC + * to be used by user space verifier testsuite + */ +struct bpf_context { + u64 arg1; + u64 arg2; +}; + +static u64 test_func(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) +{ + return 0; +} + +static struct bpf_func_proto test_funcs[] = { + [BPF_FUNC_unspec] = { + .func = test_func, + .gpl_only = true, + .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, + .arg1_type = ARG_CONST_MAP_PTR, + .arg2_type = ARG_PTR_TO_MAP_KEY, + }, +}; + +static const struct bpf_func_proto *test_func_proto(enum bpf_func_id func_id) +{ + if (func_id < 0 || func_id >= ARRAY_SIZE(test_funcs)) + return NULL; + return &test_funcs[func_id]; +} + +static const struct bpf_context_access { + int size; + enum bpf_access_type type; +} test_ctx_access[] = { + [offsetof(struct bpf_context, arg1)] = { + FIELD_SIZEOF(struct bpf_context, arg1), + BPF_READ + }, + [offsetof(struct bpf_context, arg2)] = { + FIELD_SIZEOF(struct bpf_context, arg2), + BPF_READ + }, +}; + +static bool test_is_valid_access(int off, int size, enum bpf_access_type type) +{ + const struct bpf_context_access *access; + + if (off < 0 || off >= ARRAY_SIZE(test_ctx_access)) + return false; + + access = &test_ctx_access[off]; + if (access->size == size && (access->type & type)) + return true; + + return false; +} + +static struct bpf_verifier_ops test_ops = { + .get_func_proto = test_func_proto, + .is_valid_access = test_is_valid_access, +}; + +static struct bpf_prog_type_list tl_prog = { + .ops = &test_ops, + .type = BPF_PROG_TYPE_UNSPEC, +}; + +static struct bpf_map *test_map_alloc(union bpf_attr *attr) +{ + struct bpf_map *map; + + map = kzalloc(sizeof(*map), GFP_USER); + if (!map) + return ERR_PTR(-ENOMEM); + + map->key_size = attr->key_size; + map->value_size = attr->value_size; + map->max_entries = attr->max_entries; + return map; +} + +static void test_map_free(struct bpf_map *map) +{ + kfree(map); +} + +static struct bpf_map_ops test_map_ops = { + .map_alloc = test_map_alloc, + .map_free = test_map_free, +}; + +static struct bpf_map_type_list tl_map = { + .ops = &test_map_ops, + .type = BPF_MAP_TYPE_UNSPEC, +}; + +static int __init register_test_ops(void) +{ + bpf_register_map_type(&tl_map); + bpf_register_prog_type(&tl_prog); + return 0; +} +late_initcall(register_test_ops); diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c new file mode 100644 index 0000000000000000000000000000000000000000..801f5f3b9307cc5cbe8ed24f13e2aa1cbab53dc6 --- /dev/null +++ b/kernel/bpf/verifier.c @@ -0,0 +1,1923 @@ +/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +/* bpf_check() is a static code analyzer that walks eBPF program + * instruction by instruction and updates register/stack state. + * All paths of conditional branches are analyzed until 'bpf_exit' insn. + * + * The first pass is depth-first-search to check that the program is a DAG. + * It rejects the following programs: + * - larger than BPF_MAXINSNS insns + * - if loop is present (detected via back-edge) + * - unreachable insns exist (shouldn't be a forest. program = one function) + * - out of bounds or malformed jumps + * The second pass is all possible path descent from the 1st insn. + * Since it's analyzing all pathes through the program, the length of the + * analysis is limited to 32k insn, which may be hit even if total number of + * insn is less then 4K, but there are too many branches that change stack/regs. + * Number of 'branches to be analyzed' is limited to 1k + * + * On entry to each instruction, each register has a type, and the instruction + * changes the types of the registers depending on instruction semantics. + * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is + * copied to R1. + * + * All registers are 64-bit. + * R0 - return register + * R1-R5 argument passing registers + * R6-R9 callee saved registers + * R10 - frame pointer read-only + * + * At the start of BPF program the register R1 contains a pointer to bpf_context + * and has type PTR_TO_CTX. + * + * Verifier tracks arithmetic operations on pointers in case: + * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20), + * 1st insn copies R10 (which has FRAME_PTR) type into R1 + * and 2nd arithmetic instruction is pattern matched to recognize + * that it wants to construct a pointer to some element within stack. + * So after 2nd insn, the register R1 has type PTR_TO_STACK + * (and -20 constant is saved for further stack bounds checking). + * Meaning that this reg is a pointer to stack plus known immediate constant. + * + * Most of the time the registers have UNKNOWN_VALUE type, which + * means the register has some value, but it's not a valid pointer. + * (like pointer plus pointer becomes UNKNOWN_VALUE type) + * + * When verifier sees load or store instructions the type of base register + * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, FRAME_PTR. These are three pointer + * types recognized by check_mem_access() function. + * + * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value' + * and the range of [ptr, ptr + map's value_size) is accessible. + * + * registers used to pass values to function calls are checked against + * function argument constraints. + * + * ARG_PTR_TO_MAP_KEY is one of such argument constraints. + * It means that the register type passed to this function must be + * PTR_TO_STACK and it will be used inside the function as + * 'pointer to map element key' + * + * For example the argument constraints for bpf_map_lookup_elem(): + * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, + * .arg1_type = ARG_CONST_MAP_PTR, + * .arg2_type = ARG_PTR_TO_MAP_KEY, + * + * ret_type says that this function returns 'pointer to map elem value or null' + * function expects 1st argument to be a const pointer to 'struct bpf_map' and + * 2nd argument should be a pointer to stack, which will be used inside + * the helper function as a pointer to map element key. + * + * On the kernel side the helper function looks like: + * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) + * { + * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; + * void *key = (void *) (unsigned long) r2; + * void *value; + * + * here kernel can access 'key' and 'map' pointers safely, knowing that + * [key, key + map->key_size) bytes are valid and were initialized on + * the stack of eBPF program. + * } + * + * Corresponding eBPF program may look like: + * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR + * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK + * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP + * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + * here verifier looks at prototype of map_lookup_elem() and sees: + * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok, + * Now verifier knows that this map has key of R1->map_ptr->key_size bytes + * + * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far, + * Now verifier checks that [R2, R2 + map's key_size) are within stack limits + * and were initialized prior to this call. + * If it's ok, then verifier allows this BPF_CALL insn and looks at + * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets + * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function + * returns ether pointer to map value or NULL. + * + * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off' + * insn, the register holding that pointer in the true branch changes state to + * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false + * branch. See check_cond_jmp_op(). + * + * After the call R0 is set to return type of the function and registers R1-R5 + * are set to NOT_INIT to indicate that they are no longer readable. + */ + +/* types of values stored in eBPF registers */ +enum bpf_reg_type { + NOT_INIT = 0, /* nothing was written into register */ + UNKNOWN_VALUE, /* reg doesn't contain a valid pointer */ + PTR_TO_CTX, /* reg points to bpf_context */ + CONST_PTR_TO_MAP, /* reg points to struct bpf_map */ + PTR_TO_MAP_VALUE, /* reg points to map element value */ + PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */ + FRAME_PTR, /* reg == frame_pointer */ + PTR_TO_STACK, /* reg == frame_pointer + imm */ + CONST_IMM, /* constant integer value */ +}; + +struct reg_state { + enum bpf_reg_type type; + union { + /* valid when type == CONST_IMM | PTR_TO_STACK */ + int imm; + + /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE | + * PTR_TO_MAP_VALUE_OR_NULL + */ + struct bpf_map *map_ptr; + }; +}; + +enum bpf_stack_slot_type { + STACK_INVALID, /* nothing was stored in this stack slot */ + STACK_SPILL, /* 1st byte of register spilled into stack */ + STACK_SPILL_PART, /* other 7 bytes of register spill */ + STACK_MISC /* BPF program wrote some data into this slot */ +}; + +struct bpf_stack_slot { + enum bpf_stack_slot_type stype; + struct reg_state reg_st; +}; + +/* state of the program: + * type of all registers and stack info + */ +struct verifier_state { + struct reg_state regs[MAX_BPF_REG]; + struct bpf_stack_slot stack[MAX_BPF_STACK]; +}; + +/* linked list of verifier states used to prune search */ +struct verifier_state_list { + struct verifier_state state; + struct verifier_state_list *next; +}; + +/* verifier_state + insn_idx are pushed to stack when branch is encountered */ +struct verifier_stack_elem { + /* verifer state is 'st' + * before processing instruction 'insn_idx' + * and after processing instruction 'prev_insn_idx' + */ + struct verifier_state st; + int insn_idx; + int prev_insn_idx; + struct verifier_stack_elem *next; +}; + +#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ + +/* single container for all structs + * one verifier_env per bpf_check() call + */ +struct verifier_env { + struct bpf_prog *prog; /* eBPF program being verified */ + struct verifier_stack_elem *head; /* stack of verifier states to be processed */ + int stack_size; /* number of states to be processed */ + struct verifier_state cur_state; /* current verifier state */ + struct verifier_state_list **explored_states; /* search pruning optimization */ + struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ + u32 used_map_cnt; /* number of used maps */ +}; + +/* verbose verifier prints what it's seeing + * bpf_check() is called under lock, so no race to access these global vars + */ +static u32 log_level, log_size, log_len; +static char *log_buf; + +static DEFINE_MUTEX(bpf_verifier_lock); + +/* log_level controls verbosity level of eBPF verifier. + * verbose() is used to dump the verification trace to the log, so the user + * can figure out what's wrong with the program + */ +static void verbose(const char *fmt, ...) +{ + va_list args; + + if (log_level == 0 || log_len >= log_size - 1) + return; + + va_start(args, fmt); + log_len += vscnprintf(log_buf + log_len, log_size - log_len, fmt, args); + va_end(args); +} + +/* string representation of 'enum bpf_reg_type' */ +static const char * const reg_type_str[] = { + [NOT_INIT] = "?", + [UNKNOWN_VALUE] = "inv", + [PTR_TO_CTX] = "ctx", + [CONST_PTR_TO_MAP] = "map_ptr", + [PTR_TO_MAP_VALUE] = "map_value", + [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null", + [FRAME_PTR] = "fp", + [PTR_TO_STACK] = "fp", + [CONST_IMM] = "imm", +}; + +static void print_verifier_state(struct verifier_env *env) +{ + enum bpf_reg_type t; + int i; + + for (i = 0; i < MAX_BPF_REG; i++) { + t = env->cur_state.regs[i].type; + if (t == NOT_INIT) + continue; + verbose(" R%d=%s", i, reg_type_str[t]); + if (t == CONST_IMM || t == PTR_TO_STACK) + verbose("%d", env->cur_state.regs[i].imm); + else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE || + t == PTR_TO_MAP_VALUE_OR_NULL) + verbose("(ks=%d,vs=%d)", + env->cur_state.regs[i].map_ptr->key_size, + env->cur_state.regs[i].map_ptr->value_size); + } + for (i = 0; i < MAX_BPF_STACK; i++) { + if (env->cur_state.stack[i].stype == STACK_SPILL) + verbose(" fp%d=%s", -MAX_BPF_STACK + i, + reg_type_str[env->cur_state.stack[i].reg_st.type]); + } + verbose("\n"); +} + +static const char *const bpf_class_string[] = { + [BPF_LD] = "ld", + [BPF_LDX] = "ldx", + [BPF_ST] = "st", + [BPF_STX] = "stx", + [BPF_ALU] = "alu", + [BPF_JMP] = "jmp", + [BPF_RET] = "BUG", + [BPF_ALU64] = "alu64", +}; + +static const char *const bpf_alu_string[] = { + [BPF_ADD >> 4] = "+=", + [BPF_SUB >> 4] = "-=", + [BPF_MUL >> 4] = "*=", + [BPF_DIV >> 4] = "/=", + [BPF_OR >> 4] = "|=", + [BPF_AND >> 4] = "&=", + [BPF_LSH >> 4] = "<<=", + [BPF_RSH >> 4] = ">>=", + [BPF_NEG >> 4] = "neg", + [BPF_MOD >> 4] = "%=", + [BPF_XOR >> 4] = "^=", + [BPF_MOV >> 4] = "=", + [BPF_ARSH >> 4] = "s>>=", + [BPF_END >> 4] = "endian", +}; + +static const char *const bpf_ldst_string[] = { + [BPF_W >> 3] = "u32", + [BPF_H >> 3] = "u16", + [BPF_B >> 3] = "u8", + [BPF_DW >> 3] = "u64", +}; + +static const char *const bpf_jmp_string[] = { + [BPF_JA >> 4] = "jmp", + [BPF_JEQ >> 4] = "==", + [BPF_JGT >> 4] = ">", + [BPF_JGE >> 4] = ">=", + [BPF_JSET >> 4] = "&", + [BPF_JNE >> 4] = "!=", + [BPF_JSGT >> 4] = "s>", + [BPF_JSGE >> 4] = "s>=", + [BPF_CALL >> 4] = "call", + [BPF_EXIT >> 4] = "exit", +}; + +static void print_bpf_insn(struct bpf_insn *insn) +{ + u8 class = BPF_CLASS(insn->code); + + if (class == BPF_ALU || class == BPF_ALU64) { + if (BPF_SRC(insn->code) == BPF_X) + verbose("(%02x) %sr%d %s %sr%d\n", + insn->code, class == BPF_ALU ? "(u32) " : "", + insn->dst_reg, + bpf_alu_string[BPF_OP(insn->code) >> 4], + class == BPF_ALU ? "(u32) " : "", + insn->src_reg); + else + verbose("(%02x) %sr%d %s %s%d\n", + insn->code, class == BPF_ALU ? "(u32) " : "", + insn->dst_reg, + bpf_alu_string[BPF_OP(insn->code) >> 4], + class == BPF_ALU ? "(u32) " : "", + insn->imm); + } else if (class == BPF_STX) { + if (BPF_MODE(insn->code) == BPF_MEM) + verbose("(%02x) *(%s *)(r%d %+d) = r%d\n", + insn->code, + bpf_ldst_string[BPF_SIZE(insn->code) >> 3], + insn->dst_reg, + insn->off, insn->src_reg); + else if (BPF_MODE(insn->code) == BPF_XADD) + verbose("(%02x) lock *(%s *)(r%d %+d) += r%d\n", + insn->code, + bpf_ldst_string[BPF_SIZE(insn->code) >> 3], + insn->dst_reg, insn->off, + insn->src_reg); + else + verbose("BUG_%02x\n", insn->code); + } else if (class == BPF_ST) { + if (BPF_MODE(insn->code) != BPF_MEM) { + verbose("BUG_st_%02x\n", insn->code); + return; + } + verbose("(%02x) *(%s *)(r%d %+d) = %d\n", + insn->code, + bpf_ldst_string[BPF_SIZE(insn->code) >> 3], + insn->dst_reg, + insn->off, insn->imm); + } else if (class == BPF_LDX) { + if (BPF_MODE(insn->code) != BPF_MEM) { + verbose("BUG_ldx_%02x\n", insn->code); + return; + } + verbose("(%02x) r%d = *(%s *)(r%d %+d)\n", + insn->code, insn->dst_reg, + bpf_ldst_string[BPF_SIZE(insn->code) >> 3], + insn->src_reg, insn->off); + } else if (class == BPF_LD) { + if (BPF_MODE(insn->code) == BPF_ABS) { + verbose("(%02x) r0 = *(%s *)skb[%d]\n", + insn->code, + bpf_ldst_string[BPF_SIZE(insn->code) >> 3], + insn->imm); + } else if (BPF_MODE(insn->code) == BPF_IND) { + verbose("(%02x) r0 = *(%s *)skb[r%d + %d]\n", + insn->code, + bpf_ldst_string[BPF_SIZE(insn->code) >> 3], + insn->src_reg, insn->imm); + } else if (BPF_MODE(insn->code) == BPF_IMM) { + verbose("(%02x) r%d = 0x%x\n", + insn->code, insn->dst_reg, insn->imm); + } else { + verbose("BUG_ld_%02x\n", insn->code); + return; + } + } else if (class == BPF_JMP) { + u8 opcode = BPF_OP(insn->code); + + if (opcode == BPF_CALL) { + verbose("(%02x) call %d\n", insn->code, insn->imm); + } else if (insn->code == (BPF_JMP | BPF_JA)) { + verbose("(%02x) goto pc%+d\n", + insn->code, insn->off); + } else if (insn->code == (BPF_JMP | BPF_EXIT)) { + verbose("(%02x) exit\n", insn->code); + } else if (BPF_SRC(insn->code) == BPF_X) { + verbose("(%02x) if r%d %s r%d goto pc%+d\n", + insn->code, insn->dst_reg, + bpf_jmp_string[BPF_OP(insn->code) >> 4], + insn->src_reg, insn->off); + } else { + verbose("(%02x) if r%d %s 0x%x goto pc%+d\n", + insn->code, insn->dst_reg, + bpf_jmp_string[BPF_OP(insn->code) >> 4], + insn->imm, insn->off); + } + } else { + verbose("(%02x) %s\n", insn->code, bpf_class_string[class]); + } +} + +static int pop_stack(struct verifier_env *env, int *prev_insn_idx) +{ + struct verifier_stack_elem *elem; + int insn_idx; + + if (env->head == NULL) + return -1; + + memcpy(&env->cur_state, &env->head->st, sizeof(env->cur_state)); + insn_idx = env->head->insn_idx; + if (prev_insn_idx) + *prev_insn_idx = env->head->prev_insn_idx; + elem = env->head->next; + kfree(env->head); + env->head = elem; + env->stack_size--; + return insn_idx; +} + +static struct verifier_state *push_stack(struct verifier_env *env, int insn_idx, + int prev_insn_idx) +{ + struct verifier_stack_elem *elem; + + elem = kmalloc(sizeof(struct verifier_stack_elem), GFP_KERNEL); + if (!elem) + goto err; + + memcpy(&elem->st, &env->cur_state, sizeof(env->cur_state)); + elem->insn_idx = insn_idx; + elem->prev_insn_idx = prev_insn_idx; + elem->next = env->head; + env->head = elem; + env->stack_size++; + if (env->stack_size > 1024) { + verbose("BPF program is too complex\n"); + goto err; + } + return &elem->st; +err: + /* pop all elements and return */ + while (pop_stack(env, NULL) >= 0); + return NULL; +} + +#define CALLER_SAVED_REGS 6 +static const int caller_saved[CALLER_SAVED_REGS] = { + BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 +}; + +static void init_reg_state(struct reg_state *regs) +{ + int i; + + for (i = 0; i < MAX_BPF_REG; i++) { + regs[i].type = NOT_INIT; + regs[i].imm = 0; + regs[i].map_ptr = NULL; + } + + /* frame pointer */ + regs[BPF_REG_FP].type = FRAME_PTR; + + /* 1st arg to a function */ + regs[BPF_REG_1].type = PTR_TO_CTX; +} + +static void mark_reg_unknown_value(struct reg_state *regs, u32 regno) +{ + BUG_ON(regno >= MAX_BPF_REG); + regs[regno].type = UNKNOWN_VALUE; + regs[regno].imm = 0; + regs[regno].map_ptr = NULL; +} + +enum reg_arg_type { + SRC_OP, /* register is used as source operand */ + DST_OP, /* register is used as destination operand */ + DST_OP_NO_MARK /* same as above, check only, don't mark */ +}; + +static int check_reg_arg(struct reg_state *regs, u32 regno, + enum reg_arg_type t) +{ + if (regno >= MAX_BPF_REG) { + verbose("R%d is invalid\n", regno); + return -EINVAL; + } + + if (t == SRC_OP) { + /* check whether register used as source operand can be read */ + if (regs[regno].type == NOT_INIT) { + verbose("R%d !read_ok\n", regno); + return -EACCES; + } + } else { + /* check whether register used as dest operand can be written to */ + if (regno == BPF_REG_FP) { + verbose("frame pointer is read only\n"); + return -EACCES; + } + if (t == DST_OP) + mark_reg_unknown_value(regs, regno); + } + return 0; +} + +static int bpf_size_to_bytes(int bpf_size) +{ + if (bpf_size == BPF_W) + return 4; + else if (bpf_size == BPF_H) + return 2; + else if (bpf_size == BPF_B) + return 1; + else if (bpf_size == BPF_DW) + return 8; + else + return -EINVAL; +} + +/* check_stack_read/write functions track spill/fill of registers, + * stack boundary and alignment are checked in check_mem_access() + */ +static int check_stack_write(struct verifier_state *state, int off, int size, + int value_regno) +{ + struct bpf_stack_slot *slot; + int i; + + if (value_regno >= 0 && + (state->regs[value_regno].type == PTR_TO_MAP_VALUE || + state->regs[value_regno].type == PTR_TO_STACK || + state->regs[value_regno].type == PTR_TO_CTX)) { + + /* register containing pointer is being spilled into stack */ + if (size != 8) { + verbose("invalid size of register spill\n"); + return -EACCES; + } + + slot = &state->stack[MAX_BPF_STACK + off]; + slot->stype = STACK_SPILL; + /* save register state */ + slot->reg_st = state->regs[value_regno]; + for (i = 1; i < 8; i++) { + slot = &state->stack[MAX_BPF_STACK + off + i]; + slot->stype = STACK_SPILL_PART; + slot->reg_st.type = UNKNOWN_VALUE; + slot->reg_st.map_ptr = NULL; + } + } else { + + /* regular write of data into stack */ + for (i = 0; i < size; i++) { + slot = &state->stack[MAX_BPF_STACK + off + i]; + slot->stype = STACK_MISC; + slot->reg_st.type = UNKNOWN_VALUE; + slot->reg_st.map_ptr = NULL; + } + } + return 0; +} + +static int check_stack_read(struct verifier_state *state, int off, int size, + int value_regno) +{ + int i; + struct bpf_stack_slot *slot; + + slot = &state->stack[MAX_BPF_STACK + off]; + + if (slot->stype == STACK_SPILL) { + if (size != 8) { + verbose("invalid size of register spill\n"); + return -EACCES; + } + for (i = 1; i < 8; i++) { + if (state->stack[MAX_BPF_STACK + off + i].stype != + STACK_SPILL_PART) { + verbose("corrupted spill memory\n"); + return -EACCES; + } + } + + if (value_regno >= 0) + /* restore register state from stack */ + state->regs[value_regno] = slot->reg_st; + return 0; + } else { + for (i = 0; i < size; i++) { + if (state->stack[MAX_BPF_STACK + off + i].stype != + STACK_MISC) { + verbose("invalid read from stack off %d+%d size %d\n", + off, i, size); + return -EACCES; + } + } + if (value_regno >= 0) + /* have read misc data from the stack */ + mark_reg_unknown_value(state->regs, value_regno); + return 0; + } +} + +/* check read/write into map element returned by bpf_map_lookup_elem() */ +static int check_map_access(struct verifier_env *env, u32 regno, int off, + int size) +{ + struct bpf_map *map = env->cur_state.regs[regno].map_ptr; + + if (off < 0 || off + size > map->value_size) { + verbose("invalid access to map value, value_size=%d off=%d size=%d\n", + map->value_size, off, size); + return -EACCES; + } + return 0; +} + +/* check access to 'struct bpf_context' fields */ +static int check_ctx_access(struct verifier_env *env, int off, int size, + enum bpf_access_type t) +{ + if (env->prog->aux->ops->is_valid_access && + env->prog->aux->ops->is_valid_access(off, size, t)) + return 0; + + verbose("invalid bpf_context access off=%d size=%d\n", off, size); + return -EACCES; +} + +/* check whether memory at (regno + off) is accessible for t = (read | write) + * if t==write, value_regno is a register which value is stored into memory + * if t==read, value_regno is a register which will receive the value from memory + * if t==write && value_regno==-1, some unknown value is stored into memory + * if t==read && value_regno==-1, don't care what we read from memory + */ +static int check_mem_access(struct verifier_env *env, u32 regno, int off, + int bpf_size, enum bpf_access_type t, + int value_regno) +{ + struct verifier_state *state = &env->cur_state; + int size, err = 0; + + size = bpf_size_to_bytes(bpf_size); + if (size < 0) + return size; + + if (off % size != 0) { + verbose("misaligned access off %d size %d\n", off, size); + return -EACCES; + } + + if (state->regs[regno].type == PTR_TO_MAP_VALUE) { + err = check_map_access(env, regno, off, size); + if (!err && t == BPF_READ && value_regno >= 0) + mark_reg_unknown_value(state->regs, value_regno); + + } else if (state->regs[regno].type == PTR_TO_CTX) { + err = check_ctx_access(env, off, size, t); + if (!err && t == BPF_READ && value_regno >= 0) + mark_reg_unknown_value(state->regs, value_regno); + + } else if (state->regs[regno].type == FRAME_PTR) { + if (off >= 0 || off < -MAX_BPF_STACK) { + verbose("invalid stack off=%d size=%d\n", off, size); + return -EACCES; + } + if (t == BPF_WRITE) + err = check_stack_write(state, off, size, value_regno); + else + err = check_stack_read(state, off, size, value_regno); + } else { + verbose("R%d invalid mem access '%s'\n", + regno, reg_type_str[state->regs[regno].type]); + return -EACCES; + } + return err; +} + +static int check_xadd(struct verifier_env *env, struct bpf_insn *insn) +{ + struct reg_state *regs = env->cur_state.regs; + int err; + + if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) || + insn->imm != 0) { + verbose("BPF_XADD uses reserved fields\n"); + return -EINVAL; + } + + /* check src1 operand */ + err = check_reg_arg(regs, insn->src_reg, SRC_OP); + if (err) + return err; + + /* check src2 operand */ + err = check_reg_arg(regs, insn->dst_reg, SRC_OP); + if (err) + return err; + + /* check whether atomic_add can read the memory */ + err = check_mem_access(env, insn->dst_reg, insn->off, + BPF_SIZE(insn->code), BPF_READ, -1); + if (err) + return err; + + /* check whether atomic_add can write into the same memory */ + return check_mem_access(env, insn->dst_reg, insn->off, + BPF_SIZE(insn->code), BPF_WRITE, -1); +} + +/* when register 'regno' is passed into function that will read 'access_size' + * bytes from that pointer, make sure that it's within stack boundary + * and all elements of stack are initialized + */ +static int check_stack_boundary(struct verifier_env *env, + int regno, int access_size) +{ + struct verifier_state *state = &env->cur_state; + struct reg_state *regs = state->regs; + int off, i; + + if (regs[regno].type != PTR_TO_STACK) + return -EACCES; + + off = regs[regno].imm; + if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 || + access_size <= 0) { + verbose("invalid stack type R%d off=%d access_size=%d\n", + regno, off, access_size); + return -EACCES; + } + + for (i = 0; i < access_size; i++) { + if (state->stack[MAX_BPF_STACK + off + i].stype != STACK_MISC) { + verbose("invalid indirect read from stack off %d+%d size %d\n", + off, i, access_size); + return -EACCES; + } + } + return 0; +} + +static int check_func_arg(struct verifier_env *env, u32 regno, + enum bpf_arg_type arg_type, struct bpf_map **mapp) +{ + struct reg_state *reg = env->cur_state.regs + regno; + enum bpf_reg_type expected_type; + int err = 0; + + if (arg_type == ARG_ANYTHING) + return 0; + + if (reg->type == NOT_INIT) { + verbose("R%d !read_ok\n", regno); + return -EACCES; + } + + if (arg_type == ARG_PTR_TO_STACK || arg_type == ARG_PTR_TO_MAP_KEY || + arg_type == ARG_PTR_TO_MAP_VALUE) { + expected_type = PTR_TO_STACK; + } else if (arg_type == ARG_CONST_STACK_SIZE) { + expected_type = CONST_IMM; + } else if (arg_type == ARG_CONST_MAP_PTR) { + expected_type = CONST_PTR_TO_MAP; + } else { + verbose("unsupported arg_type %d\n", arg_type); + return -EFAULT; + } + + if (reg->type != expected_type) { + verbose("R%d type=%s expected=%s\n", regno, + reg_type_str[reg->type], reg_type_str[expected_type]); + return -EACCES; + } + + if (arg_type == ARG_CONST_MAP_PTR) { + /* bpf_map_xxx(map_ptr) call: remember that map_ptr */ + *mapp = reg->map_ptr; + + } else if (arg_type == ARG_PTR_TO_MAP_KEY) { + /* bpf_map_xxx(..., map_ptr, ..., key) call: + * check that [key, key + map->key_size) are within + * stack limits and initialized + */ + if (!*mapp) { + /* in function declaration map_ptr must come before + * map_key, so that it's verified and known before + * we have to check map_key here. Otherwise it means + * that kernel subsystem misconfigured verifier + */ + verbose("invalid map_ptr to access map->key\n"); + return -EACCES; + } + err = check_stack_boundary(env, regno, (*mapp)->key_size); + + } else if (arg_type == ARG_PTR_TO_MAP_VALUE) { + /* bpf_map_xxx(..., map_ptr, ..., value) call: + * check [value, value + map->value_size) validity + */ + if (!*mapp) { + /* kernel subsystem misconfigured verifier */ + verbose("invalid map_ptr to access map->value\n"); + return -EACCES; + } + err = check_stack_boundary(env, regno, (*mapp)->value_size); + + } else if (arg_type == ARG_CONST_STACK_SIZE) { + /* bpf_xxx(..., buf, len) call will access 'len' bytes + * from stack pointer 'buf'. Check it + * note: regno == len, regno - 1 == buf + */ + if (regno == 0) { + /* kernel subsystem misconfigured verifier */ + verbose("ARG_CONST_STACK_SIZE cannot be first argument\n"); + return -EACCES; + } + err = check_stack_boundary(env, regno - 1, reg->imm); + } + + return err; +} + +static int check_call(struct verifier_env *env, int func_id) +{ + struct verifier_state *state = &env->cur_state; + const struct bpf_func_proto *fn = NULL; + struct reg_state *regs = state->regs; + struct bpf_map *map = NULL; + struct reg_state *reg; + int i, err; + + /* find function prototype */ + if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) { + verbose("invalid func %d\n", func_id); + return -EINVAL; + } + + if (env->prog->aux->ops->get_func_proto) + fn = env->prog->aux->ops->get_func_proto(func_id); + + if (!fn) { + verbose("unknown func %d\n", func_id); + return -EINVAL; + } + + /* eBPF programs must be GPL compatible to use GPL-ed functions */ + if (!env->prog->aux->is_gpl_compatible && fn->gpl_only) { + verbose("cannot call GPL only function from proprietary program\n"); + return -EINVAL; + } + + /* check args */ + err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &map); + if (err) + return err; + err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &map); + if (err) + return err; + err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &map); + if (err) + return err; + err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &map); + if (err) + return err; + err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &map); + if (err) + return err; + + /* reset caller saved regs */ + for (i = 0; i < CALLER_SAVED_REGS; i++) { + reg = regs + caller_saved[i]; + reg->type = NOT_INIT; + reg->imm = 0; + } + + /* update return register */ + if (fn->ret_type == RET_INTEGER) { + regs[BPF_REG_0].type = UNKNOWN_VALUE; + } else if (fn->ret_type == RET_VOID) { + regs[BPF_REG_0].type = NOT_INIT; + } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) { + regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; + /* remember map_ptr, so that check_map_access() + * can check 'value_size' boundary of memory access + * to map element returned from bpf_map_lookup_elem() + */ + if (map == NULL) { + verbose("kernel subsystem misconfigured verifier\n"); + return -EINVAL; + } + regs[BPF_REG_0].map_ptr = map; + } else { + verbose("unknown return type %d of func %d\n", + fn->ret_type, func_id); + return -EINVAL; + } + return 0; +} + +/* check validity of 32-bit and 64-bit arithmetic operations */ +static int check_alu_op(struct reg_state *regs, struct bpf_insn *insn) +{ + u8 opcode = BPF_OP(insn->code); + int err; + + if (opcode == BPF_END || opcode == BPF_NEG) { + if (opcode == BPF_NEG) { + if (BPF_SRC(insn->code) != 0 || + insn->src_reg != BPF_REG_0 || + insn->off != 0 || insn->imm != 0) { + verbose("BPF_NEG uses reserved fields\n"); + return -EINVAL; + } + } else { + if (insn->src_reg != BPF_REG_0 || insn->off != 0 || + (insn->imm != 16 && insn->imm != 32 && insn->imm != 64)) { + verbose("BPF_END uses reserved fields\n"); + return -EINVAL; + } + } + + /* check src operand */ + err = check_reg_arg(regs, insn->dst_reg, SRC_OP); + if (err) + return err; + + /* check dest operand */ + err = check_reg_arg(regs, insn->dst_reg, DST_OP); + if (err) + return err; + + } else if (opcode == BPF_MOV) { + + if (BPF_SRC(insn->code) == BPF_X) { + if (insn->imm != 0 || insn->off != 0) { + verbose("BPF_MOV uses reserved fields\n"); + return -EINVAL; + } + + /* check src operand */ + err = check_reg_arg(regs, insn->src_reg, SRC_OP); + if (err) + return err; + } else { + if (insn->src_reg != BPF_REG_0 || insn->off != 0) { + verbose("BPF_MOV uses reserved fields\n"); + return -EINVAL; + } + } + + /* check dest operand */ + err = check_reg_arg(regs, insn->dst_reg, DST_OP); + if (err) + return err; + + if (BPF_SRC(insn->code) == BPF_X) { + if (BPF_CLASS(insn->code) == BPF_ALU64) { + /* case: R1 = R2 + * copy register state to dest reg + */ + regs[insn->dst_reg] = regs[insn->src_reg]; + } else { + regs[insn->dst_reg].type = UNKNOWN_VALUE; + regs[insn->dst_reg].map_ptr = NULL; + } + } else { + /* case: R = imm + * remember the value we stored into this reg + */ + regs[insn->dst_reg].type = CONST_IMM; + regs[insn->dst_reg].imm = insn->imm; + } + + } else if (opcode > BPF_END) { + verbose("invalid BPF_ALU opcode %x\n", opcode); + return -EINVAL; + + } else { /* all other ALU ops: and, sub, xor, add, ... */ + + bool stack_relative = false; + + if (BPF_SRC(insn->code) == BPF_X) { + if (insn->imm != 0 || insn->off != 0) { + verbose("BPF_ALU uses reserved fields\n"); + return -EINVAL; + } + /* check src1 operand */ + err = check_reg_arg(regs, insn->src_reg, SRC_OP); + if (err) + return err; + } else { + if (insn->src_reg != BPF_REG_0 || insn->off != 0) { + verbose("BPF_ALU uses reserved fields\n"); + return -EINVAL; + } + } + + /* check src2 operand */ + err = check_reg_arg(regs, insn->dst_reg, SRC_OP); + if (err) + return err; + + if ((opcode == BPF_MOD || opcode == BPF_DIV) && + BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { + verbose("div by zero\n"); + return -EINVAL; + } + + /* pattern match 'bpf_add Rx, imm' instruction */ + if (opcode == BPF_ADD && BPF_CLASS(insn->code) == BPF_ALU64 && + regs[insn->dst_reg].type == FRAME_PTR && + BPF_SRC(insn->code) == BPF_K) + stack_relative = true; + + /* check dest operand */ + err = check_reg_arg(regs, insn->dst_reg, DST_OP); + if (err) + return err; + + if (stack_relative) { + regs[insn->dst_reg].type = PTR_TO_STACK; + regs[insn->dst_reg].imm = insn->imm; + } + } + + return 0; +} + +static int check_cond_jmp_op(struct verifier_env *env, + struct bpf_insn *insn, int *insn_idx) +{ + struct reg_state *regs = env->cur_state.regs; + struct verifier_state *other_branch; + u8 opcode = BPF_OP(insn->code); + int err; + + if (opcode > BPF_EXIT) { + verbose("invalid BPF_JMP opcode %x\n", opcode); + return -EINVAL; + } + + if (BPF_SRC(insn->code) == BPF_X) { + if (insn->imm != 0) { + verbose("BPF_JMP uses reserved fields\n"); + return -EINVAL; + } + + /* check src1 operand */ + err = check_reg_arg(regs, insn->src_reg, SRC_OP); + if (err) + return err; + } else { + if (insn->src_reg != BPF_REG_0) { + verbose("BPF_JMP uses reserved fields\n"); + return -EINVAL; + } + } + + /* check src2 operand */ + err = check_reg_arg(regs, insn->dst_reg, SRC_OP); + if (err) + return err; + + /* detect if R == 0 where R was initialized to zero earlier */ + if (BPF_SRC(insn->code) == BPF_K && + (opcode == BPF_JEQ || opcode == BPF_JNE) && + regs[insn->dst_reg].type == CONST_IMM && + regs[insn->dst_reg].imm == insn->imm) { + if (opcode == BPF_JEQ) { + /* if (imm == imm) goto pc+off; + * only follow the goto, ignore fall-through + */ + *insn_idx += insn->off; + return 0; + } else { + /* if (imm != imm) goto pc+off; + * only follow fall-through branch, since + * that's where the program will go + */ + return 0; + } + } + + other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx); + if (!other_branch) + return -EFAULT; + + /* detect if R == 0 where R is returned value from bpf_map_lookup_elem() */ + if (BPF_SRC(insn->code) == BPF_K && + insn->imm == 0 && (opcode == BPF_JEQ || + opcode == BPF_JNE) && + regs[insn->dst_reg].type == PTR_TO_MAP_VALUE_OR_NULL) { + if (opcode == BPF_JEQ) { + /* next fallthrough insn can access memory via + * this register + */ + regs[insn->dst_reg].type = PTR_TO_MAP_VALUE; + /* branch targer cannot access it, since reg == 0 */ + other_branch->regs[insn->dst_reg].type = CONST_IMM; + other_branch->regs[insn->dst_reg].imm = 0; + } else { + other_branch->regs[insn->dst_reg].type = PTR_TO_MAP_VALUE; + regs[insn->dst_reg].type = CONST_IMM; + regs[insn->dst_reg].imm = 0; + } + } else if (BPF_SRC(insn->code) == BPF_K && + (opcode == BPF_JEQ || opcode == BPF_JNE)) { + + if (opcode == BPF_JEQ) { + /* detect if (R == imm) goto + * and in the target state recognize that R = imm + */ + other_branch->regs[insn->dst_reg].type = CONST_IMM; + other_branch->regs[insn->dst_reg].imm = insn->imm; + } else { + /* detect if (R != imm) goto + * and in the fall-through state recognize that R = imm + */ + regs[insn->dst_reg].type = CONST_IMM; + regs[insn->dst_reg].imm = insn->imm; + } + } + if (log_level) + print_verifier_state(env); + return 0; +} + +/* return the map pointer stored inside BPF_LD_IMM64 instruction */ +static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn) +{ + u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32; + + return (struct bpf_map *) (unsigned long) imm64; +} + +/* verify BPF_LD_IMM64 instruction */ +static int check_ld_imm(struct verifier_env *env, struct bpf_insn *insn) +{ + struct reg_state *regs = env->cur_state.regs; + int err; + + if (BPF_SIZE(insn->code) != BPF_DW) { + verbose("invalid BPF_LD_IMM insn\n"); + return -EINVAL; + } + if (insn->off != 0) { + verbose("BPF_LD_IMM64 uses reserved fields\n"); + return -EINVAL; + } + + err = check_reg_arg(regs, insn->dst_reg, DST_OP); + if (err) + return err; + + if (insn->src_reg == 0) + /* generic move 64-bit immediate into a register */ + return 0; + + /* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */ + BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD); + + regs[insn->dst_reg].type = CONST_PTR_TO_MAP; + regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn); + return 0; +} + +/* non-recursive DFS pseudo code + * 1 procedure DFS-iterative(G,v): + * 2 label v as discovered + * 3 let S be a stack + * 4 S.push(v) + * 5 while S is not empty + * 6 t <- S.pop() + * 7 if t is what we're looking for: + * 8 return t + * 9 for all edges e in G.adjacentEdges(t) do + * 10 if edge e is already labelled + * 11 continue with the next edge + * 12 w <- G.adjacentVertex(t,e) + * 13 if vertex w is not discovered and not explored + * 14 label e as tree-edge + * 15 label w as discovered + * 16 S.push(w) + * 17 continue at 5 + * 18 else if vertex w is discovered + * 19 label e as back-edge + * 20 else + * 21 // vertex w is explored + * 22 label e as forward- or cross-edge + * 23 label t as explored + * 24 S.pop() + * + * convention: + * 0x10 - discovered + * 0x11 - discovered and fall-through edge labelled + * 0x12 - discovered and fall-through and branch edges labelled + * 0x20 - explored + */ + +enum { + DISCOVERED = 0x10, + EXPLORED = 0x20, + FALLTHROUGH = 1, + BRANCH = 2, +}; + +#define STATE_LIST_MARK ((struct verifier_state_list *) -1L) + +static int *insn_stack; /* stack of insns to process */ +static int cur_stack; /* current stack index */ +static int *insn_state; + +/* t, w, e - match pseudo-code above: + * t - index of current instruction + * w - next instruction + * e - edge + */ +static int push_insn(int t, int w, int e, struct verifier_env *env) +{ + if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH)) + return 0; + + if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH)) + return 0; + + if (w < 0 || w >= env->prog->len) { + verbose("jump out of range from insn %d to %d\n", t, w); + return -EINVAL; + } + + if (e == BRANCH) + /* mark branch target for state pruning */ + env->explored_states[w] = STATE_LIST_MARK; + + if (insn_state[w] == 0) { + /* tree-edge */ + insn_state[t] = DISCOVERED | e; + insn_state[w] = DISCOVERED; + if (cur_stack >= env->prog->len) + return -E2BIG; + insn_stack[cur_stack++] = w; + return 1; + } else if ((insn_state[w] & 0xF0) == DISCOVERED) { + verbose("back-edge from insn %d to %d\n", t, w); + return -EINVAL; + } else if (insn_state[w] == EXPLORED) { + /* forward- or cross-edge */ + insn_state[t] = DISCOVERED | e; + } else { + verbose("insn state internal bug\n"); + return -EFAULT; + } + return 0; +} + +/* non-recursive depth-first-search to detect loops in BPF program + * loop == back-edge in directed graph + */ +static int check_cfg(struct verifier_env *env) +{ + struct bpf_insn *insns = env->prog->insnsi; + int insn_cnt = env->prog->len; + int ret = 0; + int i, t; + + insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); + if (!insn_state) + return -ENOMEM; + + insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); + if (!insn_stack) { + kfree(insn_state); + return -ENOMEM; + } + + insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */ + insn_stack[0] = 0; /* 0 is the first instruction */ + cur_stack = 1; + +peek_stack: + if (cur_stack == 0) + goto check_state; + t = insn_stack[cur_stack - 1]; + + if (BPF_CLASS(insns[t].code) == BPF_JMP) { + u8 opcode = BPF_OP(insns[t].code); + + if (opcode == BPF_EXIT) { + goto mark_explored; + } else if (opcode == BPF_CALL) { + ret = push_insn(t, t + 1, FALLTHROUGH, env); + if (ret == 1) + goto peek_stack; + else if (ret < 0) + goto err_free; + } else if (opcode == BPF_JA) { + if (BPF_SRC(insns[t].code) != BPF_K) { + ret = -EINVAL; + goto err_free; + } + /* unconditional jump with single edge */ + ret = push_insn(t, t + insns[t].off + 1, + FALLTHROUGH, env); + if (ret == 1) + goto peek_stack; + else if (ret < 0) + goto err_free; + /* tell verifier to check for equivalent states + * after every call and jump + */ + env->explored_states[t + 1] = STATE_LIST_MARK; + } else { + /* conditional jump with two edges */ + ret = push_insn(t, t + 1, FALLTHROUGH, env); + if (ret == 1) + goto peek_stack; + else if (ret < 0) + goto err_free; + + ret = push_insn(t, t + insns[t].off + 1, BRANCH, env); + if (ret == 1) + goto peek_stack; + else if (ret < 0) + goto err_free; + } + } else { + /* all other non-branch instructions with single + * fall-through edge + */ + ret = push_insn(t, t + 1, FALLTHROUGH, env); + if (ret == 1) + goto peek_stack; + else if (ret < 0) + goto err_free; + } + +mark_explored: + insn_state[t] = EXPLORED; + if (cur_stack-- <= 0) { + verbose("pop stack internal bug\n"); + ret = -EFAULT; + goto err_free; + } + goto peek_stack; + +check_state: + for (i = 0; i < insn_cnt; i++) { + if (insn_state[i] != EXPLORED) { + verbose("unreachable insn %d\n", i); + ret = -EINVAL; + goto err_free; + } + } + ret = 0; /* cfg looks good */ + +err_free: + kfree(insn_state); + kfree(insn_stack); + return ret; +} + +/* compare two verifier states + * + * all states stored in state_list are known to be valid, since + * verifier reached 'bpf_exit' instruction through them + * + * this function is called when verifier exploring different branches of + * execution popped from the state stack. If it sees an old state that has + * more strict register state and more strict stack state then this execution + * branch doesn't need to be explored further, since verifier already + * concluded that more strict state leads to valid finish. + * + * Therefore two states are equivalent if register state is more conservative + * and explored stack state is more conservative than the current one. + * Example: + * explored current + * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC) + * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC) + * + * In other words if current stack state (one being explored) has more + * valid slots than old one that already passed validation, it means + * the verifier can stop exploring and conclude that current state is valid too + * + * Similarly with registers. If explored state has register type as invalid + * whereas register type in current state is meaningful, it means that + * the current state will reach 'bpf_exit' instruction safely + */ +static bool states_equal(struct verifier_state *old, struct verifier_state *cur) +{ + int i; + + for (i = 0; i < MAX_BPF_REG; i++) { + if (memcmp(&old->regs[i], &cur->regs[i], + sizeof(old->regs[0])) != 0) { + if (old->regs[i].type == NOT_INIT || + old->regs[i].type == UNKNOWN_VALUE) + continue; + return false; + } + } + + for (i = 0; i < MAX_BPF_STACK; i++) { + if (memcmp(&old->stack[i], &cur->stack[i], + sizeof(old->stack[0])) != 0) { + if (old->stack[i].stype == STACK_INVALID) + continue; + return false; + } + } + return true; +} + +static int is_state_visited(struct verifier_env *env, int insn_idx) +{ + struct verifier_state_list *new_sl; + struct verifier_state_list *sl; + + sl = env->explored_states[insn_idx]; + if (!sl) + /* this 'insn_idx' instruction wasn't marked, so we will not + * be doing state search here + */ + return 0; + + while (sl != STATE_LIST_MARK) { + if (states_equal(&sl->state, &env->cur_state)) + /* reached equivalent register/stack state, + * prune the search + */ + return 1; + sl = sl->next; + } + + /* there were no equivalent states, remember current one. + * technically the current state is not proven to be safe yet, + * but it will either reach bpf_exit (which means it's safe) or + * it will be rejected. Since there are no loops, we won't be + * seeing this 'insn_idx' instruction again on the way to bpf_exit + */ + new_sl = kmalloc(sizeof(struct verifier_state_list), GFP_USER); + if (!new_sl) + return -ENOMEM; + + /* add new state to the head of linked list */ + memcpy(&new_sl->state, &env->cur_state, sizeof(env->cur_state)); + new_sl->next = env->explored_states[insn_idx]; + env->explored_states[insn_idx] = new_sl; + return 0; +} + +static int do_check(struct verifier_env *env) +{ + struct verifier_state *state = &env->cur_state; + struct bpf_insn *insns = env->prog->insnsi; + struct reg_state *regs = state->regs; + int insn_cnt = env->prog->len; + int insn_idx, prev_insn_idx = 0; + int insn_processed = 0; + bool do_print_state = false; + + init_reg_state(regs); + insn_idx = 0; + for (;;) { + struct bpf_insn *insn; + u8 class; + int err; + + if (insn_idx >= insn_cnt) { + verbose("invalid insn idx %d insn_cnt %d\n", + insn_idx, insn_cnt); + return -EFAULT; + } + + insn = &insns[insn_idx]; + class = BPF_CLASS(insn->code); + + if (++insn_processed > 32768) { + verbose("BPF program is too large. Proccessed %d insn\n", + insn_processed); + return -E2BIG; + } + + err = is_state_visited(env, insn_idx); + if (err < 0) + return err; + if (err == 1) { + /* found equivalent state, can prune the search */ + if (log_level) { + if (do_print_state) + verbose("\nfrom %d to %d: safe\n", + prev_insn_idx, insn_idx); + else + verbose("%d: safe\n", insn_idx); + } + goto process_bpf_exit; + } + + if (log_level && do_print_state) { + verbose("\nfrom %d to %d:", prev_insn_idx, insn_idx); + print_verifier_state(env); + do_print_state = false; + } + + if (log_level) { + verbose("%d: ", insn_idx); + print_bpf_insn(insn); + } + + if (class == BPF_ALU || class == BPF_ALU64) { + err = check_alu_op(regs, insn); + if (err) + return err; + + } else if (class == BPF_LDX) { + if (BPF_MODE(insn->code) != BPF_MEM || + insn->imm != 0) { + verbose("BPF_LDX uses reserved fields\n"); + return -EINVAL; + } + /* check src operand */ + err = check_reg_arg(regs, insn->src_reg, SRC_OP); + if (err) + return err; + + err = check_reg_arg(regs, insn->dst_reg, DST_OP_NO_MARK); + if (err) + return err; + + /* check that memory (src_reg + off) is readable, + * the state of dst_reg will be updated by this func + */ + err = check_mem_access(env, insn->src_reg, insn->off, + BPF_SIZE(insn->code), BPF_READ, + insn->dst_reg); + if (err) + return err; + + } else if (class == BPF_STX) { + if (BPF_MODE(insn->code) == BPF_XADD) { + err = check_xadd(env, insn); + if (err) + return err; + insn_idx++; + continue; + } + + if (BPF_MODE(insn->code) != BPF_MEM || + insn->imm != 0) { + verbose("BPF_STX uses reserved fields\n"); + return -EINVAL; + } + /* check src1 operand */ + err = check_reg_arg(regs, insn->src_reg, SRC_OP); + if (err) + return err; + /* check src2 operand */ + err = check_reg_arg(regs, insn->dst_reg, SRC_OP); + if (err) + return err; + + /* check that memory (dst_reg + off) is writeable */ + err = check_mem_access(env, insn->dst_reg, insn->off, + BPF_SIZE(insn->code), BPF_WRITE, + insn->src_reg); + if (err) + return err; + + } else if (class == BPF_ST) { + if (BPF_MODE(insn->code) != BPF_MEM || + insn->src_reg != BPF_REG_0) { + verbose("BPF_ST uses reserved fields\n"); + return -EINVAL; + } + /* check src operand */ + err = check_reg_arg(regs, insn->dst_reg, SRC_OP); + if (err) + return err; + + /* check that memory (dst_reg + off) is writeable */ + err = check_mem_access(env, insn->dst_reg, insn->off, + BPF_SIZE(insn->code), BPF_WRITE, + -1); + if (err) + return err; + + } else if (class == BPF_JMP) { + u8 opcode = BPF_OP(insn->code); + + if (opcode == BPF_CALL) { + if (BPF_SRC(insn->code) != BPF_K || + insn->off != 0 || + insn->src_reg != BPF_REG_0 || + insn->dst_reg != BPF_REG_0) { + verbose("BPF_CALL uses reserved fields\n"); + return -EINVAL; + } + + err = check_call(env, insn->imm); + if (err) + return err; + + } else if (opcode == BPF_JA) { + if (BPF_SRC(insn->code) != BPF_K || + insn->imm != 0 || + insn->src_reg != BPF_REG_0 || + insn->dst_reg != BPF_REG_0) { + verbose("BPF_JA uses reserved fields\n"); + return -EINVAL; + } + + insn_idx += insn->off + 1; + continue; + + } else if (opcode == BPF_EXIT) { + if (BPF_SRC(insn->code) != BPF_K || + insn->imm != 0 || + insn->src_reg != BPF_REG_0 || + insn->dst_reg != BPF_REG_0) { + verbose("BPF_EXIT uses reserved fields\n"); + return -EINVAL; + } + + /* eBPF calling convetion is such that R0 is used + * to return the value from eBPF program. + * Make sure that it's readable at this time + * of bpf_exit, which means that program wrote + * something into it earlier + */ + err = check_reg_arg(regs, BPF_REG_0, SRC_OP); + if (err) + return err; + +process_bpf_exit: + insn_idx = pop_stack(env, &prev_insn_idx); + if (insn_idx < 0) { + break; + } else { + do_print_state = true; + continue; + } + } else { + err = check_cond_jmp_op(env, insn, &insn_idx); + if (err) + return err; + } + } else if (class == BPF_LD) { + u8 mode = BPF_MODE(insn->code); + + if (mode == BPF_ABS || mode == BPF_IND) { + verbose("LD_ABS is not supported yet\n"); + return -EINVAL; + } else if (mode == BPF_IMM) { + err = check_ld_imm(env, insn); + if (err) + return err; + + insn_idx++; + } else { + verbose("invalid BPF_LD mode\n"); + return -EINVAL; + } + } else { + verbose("unknown insn class %d\n", class); + return -EINVAL; + } + + insn_idx++; + } + + return 0; +} + +/* look for pseudo eBPF instructions that access map FDs and + * replace them with actual map pointers + */ +static int replace_map_fd_with_map_ptr(struct verifier_env *env) +{ + struct bpf_insn *insn = env->prog->insnsi; + int insn_cnt = env->prog->len; + int i, j; + + for (i = 0; i < insn_cnt; i++, insn++) { + if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) { + struct bpf_map *map; + struct fd f; + + if (i == insn_cnt - 1 || insn[1].code != 0 || + insn[1].dst_reg != 0 || insn[1].src_reg != 0 || + insn[1].off != 0) { + verbose("invalid bpf_ld_imm64 insn\n"); + return -EINVAL; + } + + if (insn->src_reg == 0) + /* valid generic load 64-bit imm */ + goto next_insn; + + if (insn->src_reg != BPF_PSEUDO_MAP_FD) { + verbose("unrecognized bpf_ld_imm64 insn\n"); + return -EINVAL; + } + + f = fdget(insn->imm); + + map = bpf_map_get(f); + if (IS_ERR(map)) { + verbose("fd %d is not pointing to valid bpf_map\n", + insn->imm); + fdput(f); + return PTR_ERR(map); + } + + /* store map pointer inside BPF_LD_IMM64 instruction */ + insn[0].imm = (u32) (unsigned long) map; + insn[1].imm = ((u64) (unsigned long) map) >> 32; + + /* check whether we recorded this map already */ + for (j = 0; j < env->used_map_cnt; j++) + if (env->used_maps[j] == map) { + fdput(f); + goto next_insn; + } + + if (env->used_map_cnt >= MAX_USED_MAPS) { + fdput(f); + return -E2BIG; + } + + /* remember this map */ + env->used_maps[env->used_map_cnt++] = map; + + /* hold the map. If the program is rejected by verifier, + * the map will be released by release_maps() or it + * will be used by the valid program until it's unloaded + * and all maps are released in free_bpf_prog_info() + */ + atomic_inc(&map->refcnt); + + fdput(f); +next_insn: + insn++; + i++; + } + } + + /* now all pseudo BPF_LD_IMM64 instructions load valid + * 'struct bpf_map *' into a register instead of user map_fd. + * These pointers will be used later by verifier to validate map access. + */ + return 0; +} + +/* drop refcnt of maps used by the rejected program */ +static void release_maps(struct verifier_env *env) +{ + int i; + + for (i = 0; i < env->used_map_cnt; i++) + bpf_map_put(env->used_maps[i]); +} + +/* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */ +static void convert_pseudo_ld_imm64(struct verifier_env *env) +{ + struct bpf_insn *insn = env->prog->insnsi; + int insn_cnt = env->prog->len; + int i; + + for (i = 0; i < insn_cnt; i++, insn++) + if (insn->code == (BPF_LD | BPF_IMM | BPF_DW)) + insn->src_reg = 0; +} + +static void free_states(struct verifier_env *env) +{ + struct verifier_state_list *sl, *sln; + int i; + + if (!env->explored_states) + return; + + for (i = 0; i < env->prog->len; i++) { + sl = env->explored_states[i]; + + if (sl) + while (sl != STATE_LIST_MARK) { + sln = sl->next; + kfree(sl); + sl = sln; + } + } + + kfree(env->explored_states); +} + +int bpf_check(struct bpf_prog *prog, union bpf_attr *attr) +{ + char __user *log_ubuf = NULL; + struct verifier_env *env; + int ret = -EINVAL; + + if (prog->len <= 0 || prog->len > BPF_MAXINSNS) + return -E2BIG; + + /* 'struct verifier_env' can be global, but since it's not small, + * allocate/free it every time bpf_check() is called + */ + env = kzalloc(sizeof(struct verifier_env), GFP_KERNEL); + if (!env) + return -ENOMEM; + + env->prog = prog; + + /* grab the mutex to protect few globals used by verifier */ + mutex_lock(&bpf_verifier_lock); + + if (attr->log_level || attr->log_buf || attr->log_size) { + /* user requested verbose verifier output + * and supplied buffer to store the verification trace + */ + log_level = attr->log_level; + log_ubuf = (char __user *) (unsigned long) attr->log_buf; + log_size = attr->log_size; + log_len = 0; + + ret = -EINVAL; + /* log_* values have to be sane */ + if (log_size < 128 || log_size > UINT_MAX >> 8 || + log_level == 0 || log_ubuf == NULL) + goto free_env; + + ret = -ENOMEM; + log_buf = vmalloc(log_size); + if (!log_buf) + goto free_env; + } else { + log_level = 0; + } + + ret = replace_map_fd_with_map_ptr(env); + if (ret < 0) + goto skip_full_check; + + env->explored_states = kcalloc(prog->len, + sizeof(struct verifier_state_list *), + GFP_USER); + ret = -ENOMEM; + if (!env->explored_states) + goto skip_full_check; + + ret = check_cfg(env); + if (ret < 0) + goto skip_full_check; + + ret = do_check(env); + +skip_full_check: + while (pop_stack(env, NULL) >= 0); + free_states(env); + + if (log_level && log_len >= log_size - 1) { + BUG_ON(log_len >= log_size); + /* verifier log exceeded user supplied buffer */ + ret = -ENOSPC; + /* fall through to return what was recorded */ + } + + /* copy verifier log back to user space including trailing zero */ + if (log_level && copy_to_user(log_ubuf, log_buf, log_len + 1) != 0) { + ret = -EFAULT; + goto free_log_buf; + } + + if (ret == 0 && env->used_map_cnt) { + /* if program passed verifier, update used_maps in bpf_prog_info */ + prog->aux->used_maps = kmalloc_array(env->used_map_cnt, + sizeof(env->used_maps[0]), + GFP_KERNEL); + + if (!prog->aux->used_maps) { + ret = -ENOMEM; + goto free_log_buf; + } + + memcpy(prog->aux->used_maps, env->used_maps, + sizeof(env->used_maps[0]) * env->used_map_cnt); + prog->aux->used_map_cnt = env->used_map_cnt; + + /* program is valid. Convert pseudo bpf_ld_imm64 into generic + * bpf_ld_imm64 instructions + */ + convert_pseudo_ld_imm64(env); + } + +free_log_buf: + if (log_level) + vfree(log_buf); +free_env: + if (!prog->aux->used_maps) + /* if we didn't copy map pointers into bpf_prog_info, release + * them now. Otherwise free_bpf_prog_info() will release them. + */ + release_maps(env); + kfree(env); + mutex_unlock(&bpf_verifier_lock); + return ret; +} diff --git a/kernel/crash_dump.c b/kernel/crash_dump.c index c766ee54c0b18bd27f87d4fcfb7f3caa24781343..b64e238b553b9430361127b223d2a003b70b1c99 100644 --- a/kernel/crash_dump.c +++ b/kernel/crash_dump.c @@ -18,6 +18,7 @@ unsigned long saved_max_pfn; * it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE. */ unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX; +EXPORT_SYMBOL_GPL(elfcorehdr_addr); /* * stores the size of elf header of crash image diff --git a/kernel/seccomp.c b/kernel/seccomp.c index 44eb005c6695010e79a5041b837914a12c69aa45..84922befea8414468eafe1330ffc372ec9bdb8da 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c @@ -395,16 +395,15 @@ static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog) if (!filter) goto free_prog; - filter->prog = kzalloc(bpf_prog_size(new_len), - GFP_KERNEL|__GFP_NOWARN); + filter->prog = bpf_prog_alloc(bpf_prog_size(new_len), __GFP_NOWARN); if (!filter->prog) goto free_filter; ret = bpf_convert_filter(fp, fprog->len, filter->prog->insnsi, &new_len); if (ret) goto free_filter_prog; - kfree(fp); + kfree(fp); atomic_set(&filter->usage, 1); filter->prog->len = new_len; @@ -413,7 +412,7 @@ static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog) return filter; free_filter_prog: - kfree(filter->prog); + __bpf_prog_free(filter->prog); free_filter: kfree(filter); free_prog: diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index d4709d4810537a904958740dfed2ae4183cb2094..02aa4185b17e3b7b579e18f119687d1ce415103d 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c @@ -221,3 +221,6 @@ cond_syscall(sys_kcmp); /* operate on Secure Computing state */ cond_syscall(sys_seccomp); + +/* access BPF programs and maps */ +cond_syscall(sys_bpf); diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index a28590083622705dd007bffd5d3a57f87f133fed..3ac43f34437bad6d64f870222c51dbc41b2c73b2 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1672,7 +1672,8 @@ config TEST_BPF against the BPF interpreter or BPF JIT compiler depending on the current setting. This is in particular useful for BPF JIT compiler development, but also to run regression tests against changes in - the interpreter code. + the interpreter code. It also enables test stubs for eBPF maps and + verifier used by user space verifier testsuite. If unsure, say N. diff --git a/lib/random32.c b/lib/random32.c index c9b6bf3afe0cbfd833f34304416ad21b5a7c94fc..0bee183fa18faaf3314ac37244469f111408cd26 100644 --- a/lib/random32.c +++ b/lib/random32.c @@ -37,6 +37,7 @@ #include #include #include +#include #ifdef CONFIG_RANDOM32_SELFTEST static void __init prandom_state_selftest(void); @@ -96,27 +97,23 @@ EXPORT_SYMBOL(prandom_u32); * This is used for pseudo-randomness with no outside seeding. * For more random results, use prandom_bytes(). */ -void prandom_bytes_state(struct rnd_state *state, void *buf, int bytes) +void prandom_bytes_state(struct rnd_state *state, void *buf, size_t bytes) { - unsigned char *p = buf; - int i; - - for (i = 0; i < round_down(bytes, sizeof(u32)); i += sizeof(u32)) { - u32 random = prandom_u32_state(state); - int j; + u8 *ptr = buf; - for (j = 0; j < sizeof(u32); j++) { - p[i + j] = random; - random >>= BITS_PER_BYTE; - } + while (bytes >= sizeof(u32)) { + put_unaligned(prandom_u32_state(state), (u32 *) ptr); + ptr += sizeof(u32); + bytes -= sizeof(u32); } - if (i < bytes) { - u32 random = prandom_u32_state(state); - for (; i < bytes; i++) { - p[i] = random; - random >>= BITS_PER_BYTE; - } + if (bytes > 0) { + u32 rem = prandom_u32_state(state); + do { + *ptr++ = (u8) rem; + bytes--; + rem >>= BITS_PER_BYTE; + } while (bytes > 0); } } EXPORT_SYMBOL(prandom_bytes_state); @@ -126,7 +123,7 @@ EXPORT_SYMBOL(prandom_bytes_state); * @buf: where to copy the pseudo-random bytes to * @bytes: the requested number of bytes */ -void prandom_bytes(void *buf, int bytes) +void prandom_bytes(void *buf, size_t bytes) { struct rnd_state *state = &get_cpu_var(net_rand_state); @@ -137,7 +134,7 @@ EXPORT_SYMBOL(prandom_bytes); static void prandom_warmup(struct rnd_state *state) { - /* Calling RNG ten times to satify recurrence condition */ + /* Calling RNG ten times to satisfy recurrence condition */ prandom_u32_state(state); prandom_u32_state(state); prandom_u32_state(state); @@ -152,7 +149,7 @@ static void prandom_warmup(struct rnd_state *state) static u32 __extract_hwseed(void) { - u32 val = 0; + unsigned int val = 0; (void)(arch_get_random_seed_int(&val) || arch_get_random_int(&val)); @@ -228,7 +225,7 @@ static void __prandom_timer(unsigned long dontcare) prandom_seed(entropy); /* reseed every ~60 seconds, in [40 .. 80) interval with slack */ - expires = 40 + (prandom_u32() % 40); + expires = 40 + prandom_u32_max(40); seed_timer.expires = jiffies + msecs_to_jiffies(expires * MSEC_PER_SEC); add_timer(&seed_timer); diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 3d2b4733f6cb7cf2fdca301d80f7d7bb0d6d61cc..081be3ba9ea8285b05374e6bb98299af541f567c 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -297,7 +297,7 @@ int rhashtable_shrink(struct rhashtable *ht, gfp_t flags) ASSERT_RHT_MUTEX(ht); - if (tbl->size <= HASH_MIN_SIZE) + if (ht->shift <= ht->p.min_shift) return 0; ntbl = bucket_table_alloc(tbl->size / 2, flags); @@ -505,9 +505,10 @@ void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash, } EXPORT_SYMBOL_GPL(rhashtable_lookup_compare); -static size_t rounded_hashtable_size(unsigned int nelem) +static size_t rounded_hashtable_size(struct rhashtable_params *params) { - return max(roundup_pow_of_two(nelem * 4 / 3), HASH_MIN_SIZE); + return max(roundup_pow_of_two(params->nelem_hint * 4 / 3), + 1UL << params->min_shift); } /** @@ -565,8 +566,11 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) (!params->key_len && !params->obj_hashfn)) return -EINVAL; + params->min_shift = max_t(size_t, params->min_shift, + ilog2(HASH_MIN_SIZE)); + if (params->nelem_hint) - size = rounded_hashtable_size(params->nelem_hint); + size = rounded_hashtable_size(params); tbl = bucket_table_alloc(size, GFP_KERNEL); if (tbl == NULL) diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 89e0345733bd33db9bc9ab1f6184a1d376ec9c89..23e070bcf72d3dea7dea8022e0381508662598c3 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -1341,6 +1341,44 @@ static struct bpf_test tests[] = { { }, { { 0, -1 } } }, + { + "INT: shifts by register", + .u.insns_int = { + BPF_MOV64_IMM(R0, -1234), + BPF_MOV64_IMM(R1, 1), + BPF_ALU32_REG(BPF_RSH, R0, R1), + BPF_JMP_IMM(BPF_JEQ, R0, 0x7ffffd97, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(R2, 1), + BPF_ALU64_REG(BPF_LSH, R0, R2), + BPF_MOV32_IMM(R4, -1234), + BPF_JMP_REG(BPF_JEQ, R0, R4, 1), + BPF_EXIT_INSN(), + BPF_ALU64_IMM(BPF_AND, R4, 63), + BPF_ALU64_REG(BPF_LSH, R0, R4), /* R0 <= 46 */ + BPF_MOV64_IMM(R3, 47), + BPF_ALU64_REG(BPF_ARSH, R0, R3), + BPF_JMP_IMM(BPF_JEQ, R0, -617, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(R2, 1), + BPF_ALU64_REG(BPF_LSH, R4, R2), /* R4 = 46 << 1 */ + BPF_JMP_IMM(BPF_JEQ, R4, 92, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(R4, 4), + BPF_ALU64_REG(BPF_LSH, R4, R4), /* R4 = 4 << 4 */ + BPF_JMP_IMM(BPF_JEQ, R4, 64, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(R4, 5), + BPF_ALU32_REG(BPF_LSH, R4, R4), /* R4 = 5 << 5 */ + BPF_JMP_IMM(BPF_JEQ, R4, 160, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(R0, -1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, -1 } } + }, { "INT: DIV + ABS", .u.insns_int = { @@ -1697,6 +1735,27 @@ static struct bpf_test tests[] = { { }, { { 1, 0 } }, }, + { + "load 64-bit immediate", + .u.insns_int = { + BPF_LD_IMM64(R1, 0x567800001234LL), + BPF_MOV64_REG(R2, R1), + BPF_MOV64_REG(R3, R2), + BPF_ALU64_IMM(BPF_RSH, R2, 32), + BPF_ALU64_IMM(BPF_LSH, R3, 32), + BPF_ALU64_IMM(BPF_RSH, R3, 32), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_JMP_IMM(BPF_JEQ, R2, 0x5678, 1), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JEQ, R3, 0x1234, 1), + BPF_EXIT_INSN(), + BPF_ALU64_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } } + }, }; static struct net_device dev; @@ -1798,7 +1857,7 @@ static struct bpf_prog *generate_filter(int which, int *err) break; case INTERNAL: - fp = kzalloc(bpf_prog_size(flen), GFP_KERNEL); + fp = bpf_prog_alloc(bpf_prog_size(flen), 0); if (fp == NULL) { pr_cont("UNEXPECTED_FAIL no memory left\n"); *err = -ENOMEM; @@ -1835,7 +1894,7 @@ static int __run_one(const struct bpf_prog *fp, const void *data, int runs, u64 *duration) { u64 start, finish; - int ret, i; + int ret = 0, i; start = ktime_to_us(ktime_get()); diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 35a6b6b15e8a1d9e2f846b18aa8c448ad0e915fd..0d441ec8763ec87eec018d2b043b82a5ead50aa0 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -799,7 +799,8 @@ void vlan_setup(struct net_device *dev) ether_setup(dev); dev->priv_flags |= IFF_802_1Q_VLAN; - dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); + dev->priv_flags &= ~IFF_TX_SKB_SHARING; + netif_keep_dst(dev); dev->tx_queue_len = 0; dev->netdev_ops = &vlan_netdev_ops; diff --git a/net/Kconfig b/net/Kconfig index 4051fdfa43677ccf7142244c97e2e383515e77e8..d6b138e2c263851d0164cc321794363a52cc6c77 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -176,10 +176,11 @@ config NETFILTER_ADVANCED If unsure, say Y. config BRIDGE_NETFILTER - bool "Bridged IP/ARP packets filtering" - depends on BRIDGE && NETFILTER && INET + tristate "Bridged IP/ARP packets filtering" + depends on BRIDGE + depends on NETFILTER && INET depends on NETFILTER_ADVANCED - default y + default m ---help--- Enabling this option will let arptables resp. iptables see bridged ARP resp. IP traffic. If you want a bridging firewall, you probably diff --git a/net/atm/clip.c b/net/atm/clip.c index 46339040fef014771e504803eef23356c6a1a9f4..17e55dfecbe2a133a2a0e02d363b583a0334aecc 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c @@ -384,7 +384,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb, pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc, vcc->dev); old = xchg(&entry->vccs->xoff, 1); /* assume XOFF ... */ if (old) { - pr_warning("XOFF->XOFF transition\n"); + pr_warn("XOFF->XOFF transition\n"); goto out_release_neigh; } dev->stats.tx_packets++; @@ -447,7 +447,7 @@ static int clip_setentry(struct atm_vcc *vcc, __be32 ip) struct rtable *rt; if (vcc->push != clip_push) { - pr_warning("non-CLIP VCC\n"); + pr_warn("non-CLIP VCC\n"); return -EBADF; } clip_vcc = CLIP_VCC(vcc); @@ -501,7 +501,7 @@ static void clip_setup(struct net_device *dev) /* without any more elaborate queuing. 100 is a reasonable */ /* compromise between decent burst-tolerance and protection */ /* against memory hogs. */ - dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; + netif_keep_dst(dev); } static int clip_create(int number) diff --git a/net/atm/common.c b/net/atm/common.c index 7b491006eaf4000424282979b0c4709325102152..6a765156a3f6b81fcd63f43504d1a7faca8f206b 100644 --- a/net/atm/common.c +++ b/net/atm/common.c @@ -300,7 +300,7 @@ static int adjust_tp(struct atm_trafprm *tp, unsigned char aal) max_sdu = ATM_MAX_AAL34_PDU; break; default: - pr_warning("AAL problems ... (%d)\n", aal); + pr_warn("AAL problems ... (%d)\n", aal); /* fall through */ case ATM_AAL5: max_sdu = ATM_MAX_AAL5_PDU; diff --git a/net/atm/mpc.c b/net/atm/mpc.c index e8e0e7a8a23d12029440b2f01b1c10b6e6326f27..0e982222d4255dbbc5ec9818e65af1145e086a03 100644 --- a/net/atm/mpc.c +++ b/net/atm/mpc.c @@ -599,7 +599,7 @@ static netdev_tx_t mpc_send_packet(struct sk_buff *skb, } non_ip: - return mpc->old_ops->ndo_start_xmit(skb, dev); + return __netdev_start_xmit(mpc->old_ops, skb, dev, false); } static int atm_mpoa_vcc_attach(struct atm_vcc *vcc, void __user *arg) diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c index 206b65ccd5b8bd318e554e452966fae1c582112f..c2e0d14433df6678011e07b00045c68e5cf7f998 100644 --- a/net/bluetooth/6lowpan.c +++ b/net/bluetooth/6lowpan.c @@ -39,6 +39,7 @@ static struct dentry *lowpan_control_debugfs; struct skb_cb { struct in6_addr addr; + struct in6_addr gw; struct l2cap_chan *chan; int status; }; @@ -158,6 +159,54 @@ static inline struct lowpan_peer *peer_lookup_conn(struct lowpan_dev *dev, return NULL; } +static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_dev *dev, + struct in6_addr *daddr, + struct sk_buff *skb) +{ + struct lowpan_peer *peer, *tmp; + struct in6_addr *nexthop; + struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); + int count = atomic_read(&dev->peer_count); + + BT_DBG("peers %d addr %pI6c rt %p", count, daddr, rt); + + /* If we have multiple 6lowpan peers, then check where we should + * send the packet. If only one peer exists, then we can send the + * packet right away. + */ + if (count == 1) + return list_first_entry(&dev->peers, struct lowpan_peer, + list); + + if (!rt) { + nexthop = &lowpan_cb(skb)->gw; + + if (ipv6_addr_any(nexthop)) + return NULL; + } else { + nexthop = rt6_nexthop(rt); + + /* We need to remember the address because it is needed + * by bt_xmit() when sending the packet. In bt_xmit(), the + * destination routing info is not set. + */ + memcpy(&lowpan_cb(skb)->gw, nexthop, sizeof(struct in6_addr)); + } + + BT_DBG("gw %pI6c", nexthop); + + list_for_each_entry_safe(peer, tmp, &dev->peers, list) { + BT_DBG("dst addr %pMR dst type %d ip %pI6c", + &peer->chan->dst, peer->chan->dst_type, + &peer->peer_addr); + + if (!ipv6_addr_cmp(&peer->peer_addr, nexthop)) + return peer; + } + + return NULL; +} + static struct lowpan_peer *lookup_peer(struct l2cap_conn *conn) { struct lowpan_dev *entry, *tmp; @@ -377,58 +426,85 @@ static void convert_dest_bdaddr(struct in6_addr *ip6_daddr, *addr_type = get_addr_type_from_eui64(addr->b[5]); } -static int header_create(struct sk_buff *skb, struct net_device *netdev, - unsigned short type, const void *_daddr, - const void *_saddr, unsigned int len) +static int setup_header(struct sk_buff *skb, struct net_device *netdev, + bdaddr_t *peer_addr, u8 *peer_addr_type) { - struct ipv6hdr *hdr; + struct in6_addr ipv6_daddr; struct lowpan_dev *dev; struct lowpan_peer *peer; bdaddr_t addr, *any = BDADDR_ANY; - u8 *saddr, *daddr = any->b; - u8 addr_type; - - if (type != ETH_P_IPV6) - return -EINVAL; - - hdr = ipv6_hdr(skb); + u8 *daddr = any->b; + int err, status = 0; dev = lowpan_dev(netdev); - if (ipv6_addr_is_multicast(&hdr->daddr)) { - memcpy(&lowpan_cb(skb)->addr, &hdr->daddr, - sizeof(struct in6_addr)); + memcpy(&ipv6_daddr, &lowpan_cb(skb)->addr, sizeof(ipv6_daddr)); + + if (ipv6_addr_is_multicast(&ipv6_daddr)) { lowpan_cb(skb)->chan = NULL; } else { unsigned long flags; + u8 addr_type; /* Get destination BT device from skb. * If there is no such peer then discard the packet. */ - convert_dest_bdaddr(&hdr->daddr, &addr, &addr_type); + convert_dest_bdaddr(&ipv6_daddr, &addr, &addr_type); BT_DBG("dest addr %pMR type %d IP %pI6c", &addr, - addr_type, &hdr->daddr); + addr_type, &ipv6_daddr); read_lock_irqsave(&devices_lock, flags); peer = peer_lookup_ba(dev, &addr, addr_type); read_unlock_irqrestore(&devices_lock, flags); if (!peer) { - BT_DBG("no such peer %pMR found", &addr); - return -ENOENT; + /* The packet might be sent to 6lowpan interface + * because of routing (either via default route + * or user set route) so get peer according to + * the destination address. + */ + read_lock_irqsave(&devices_lock, flags); + peer = peer_lookup_dst(dev, &ipv6_daddr, skb); + read_unlock_irqrestore(&devices_lock, flags); + if (!peer) { + BT_DBG("no such peer %pMR found", &addr); + return -ENOENT; + } } daddr = peer->eui64_addr; - - memcpy(&lowpan_cb(skb)->addr, &hdr->daddr, - sizeof(struct in6_addr)); + *peer_addr = addr; + *peer_addr_type = addr_type; lowpan_cb(skb)->chan = peer->chan; + + status = 1; } - saddr = dev->netdev->dev_addr; + lowpan_header_compress(skb, netdev, ETH_P_IPV6, daddr, + dev->netdev->dev_addr, skb->len); + + err = dev_hard_header(skb, netdev, ETH_P_IPV6, NULL, NULL, 0); + if (err < 0) + return err; + + return status; +} + +static int header_create(struct sk_buff *skb, struct net_device *netdev, + unsigned short type, const void *_daddr, + const void *_saddr, unsigned int len) +{ + struct ipv6hdr *hdr; + + if (type != ETH_P_IPV6) + return -EINVAL; + + hdr = ipv6_hdr(skb); + + memcpy(&lowpan_cb(skb)->addr, &hdr->daddr, sizeof(struct in6_addr)); - return lowpan_header_compress(skb, netdev, type, daddr, saddr, len); + return 0; } /* Packet to BT LE device */ @@ -470,11 +546,12 @@ static int send_pkt(struct l2cap_chan *chan, struct sk_buff *skb, return err; } -static void send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev) +static int send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev) { struct sk_buff *local_skb; struct lowpan_dev *entry, *tmp; unsigned long flags; + int err = 0; read_lock_irqsave(&devices_lock, flags); @@ -488,55 +565,77 @@ static void send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev) dev = lowpan_dev(entry->netdev); list_for_each_entry_safe(pentry, ptmp, &dev->peers, list) { + int ret; + local_skb = skb_clone(skb, GFP_ATOMIC); - send_pkt(pentry->chan, local_skb, netdev); + BT_DBG("xmit %s to %pMR type %d IP %pI6c chan %p", + netdev->name, + &pentry->chan->dst, pentry->chan->dst_type, + &pentry->peer_addr, pentry->chan); + ret = send_pkt(pentry->chan, local_skb, netdev); + if (ret < 0) + err = ret; kfree_skb(local_skb); } } read_unlock_irqrestore(&devices_lock, flags); + + return err; } static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev) { int err = 0; - struct lowpan_dev *dev; - struct lowpan_peer *peer; bdaddr_t addr; u8 addr_type; + struct sk_buff *tmpskb; - if (ipv6_addr_is_multicast(&lowpan_cb(skb)->addr)) { - /* We need to send the packet to every device - * behind this interface. - */ - send_mcast_pkt(skb, netdev); - } else { - unsigned long flags; - - convert_dest_bdaddr(&lowpan_cb(skb)->addr, &addr, &addr_type); - dev = lowpan_dev(netdev); - - read_lock_irqsave(&devices_lock, flags); - peer = peer_lookup_ba(dev, &addr, addr_type); - read_unlock_irqrestore(&devices_lock, flags); + /* We must take a copy of the skb before we modify/replace the ipv6 + * header as the header could be used elsewhere + */ + tmpskb = skb_unshare(skb, GFP_ATOMIC); + if (!tmpskb) { + kfree_skb(skb); + return NET_XMIT_DROP; + } + skb = tmpskb; - BT_DBG("xmit %s to %pMR type %d IP %pI6c peer %p", - netdev->name, &addr, addr_type, - &lowpan_cb(skb)->addr, peer); + /* Return values from setup_header() + * <0 - error, packet is dropped + * 0 - this is a multicast packet + * 1 - this is unicast packet + */ + err = setup_header(skb, netdev, &addr, &addr_type); + if (err < 0) { + kfree_skb(skb); + return NET_XMIT_DROP; + } - if (peer && peer->chan) - err = send_pkt(peer->chan, skb, netdev); - else + if (err) { + if (lowpan_cb(skb)->chan) { + BT_DBG("xmit %s to %pMR type %d IP %pI6c chan %p", + netdev->name, &addr, addr_type, + &lowpan_cb(skb)->addr, lowpan_cb(skb)->chan); + err = send_pkt(lowpan_cb(skb)->chan, skb, netdev); + } else { err = -ENOENT; + } + } else { + /* We need to send the packet to every device behind this + * interface. + */ + err = send_mcast_pkt(skb, netdev); } + dev_kfree_skb(skb); if (err) BT_DBG("ERROR: xmit failed (%d)", err); - return (err < 0) ? NET_XMIT_DROP : err; + return err < 0 ? NET_XMIT_DROP : err; } static const struct net_device_ops netdev_ops = { @@ -556,7 +655,8 @@ static void netdev_setup(struct net_device *dev) dev->needed_tailroom = 0; dev->mtu = IPV6_MIN_MTU; dev->tx_queue_len = 0; - dev->flags = IFF_RUNNING | IFF_POINTOPOINT; + dev->flags = IFF_RUNNING | IFF_POINTOPOINT | + IFF_MULTICAST; dev->watchdog_timeo = 0; dev->netdev_ops = &netdev_ops; @@ -671,6 +771,14 @@ static struct l2cap_chan *chan_open(struct l2cap_chan *pchan) return chan; } +static void set_ip_addr_bits(u8 addr_type, u8 *addr) +{ + if (addr_type == BDADDR_LE_PUBLIC) + *addr |= 0x02; + else + *addr &= ~0x02; +} + static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan, struct lowpan_dev *dev) { @@ -693,6 +801,11 @@ static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan, memcpy(&peer->eui64_addr, (u8 *)&peer->peer_addr.s6_addr + 8, EUI64_ADDR_LEN); + /* IPv6 address needs to have the U/L bit set properly so toggle + * it back here. + */ + set_ip_addr_bits(chan->dst_type, (u8 *)&peer->peer_addr.s6_addr + 8); + write_lock_irqsave(&devices_lock, flags); INIT_LIST_HEAD(&peer->list); peer_add(dev, peer); @@ -772,16 +885,16 @@ static inline void chan_ready_cb(struct l2cap_chan *chan) ifup(dev->netdev); } -static inline struct l2cap_chan *chan_new_conn_cb(struct l2cap_chan *chan) +static inline struct l2cap_chan *chan_new_conn_cb(struct l2cap_chan *pchan) { - struct l2cap_chan *pchan; + struct l2cap_chan *chan; - pchan = chan_open(chan); - pchan->ops = chan->ops; + chan = chan_open(pchan); + chan->ops = pchan->ops; BT_DBG("chan %p pchan %p", chan, pchan); - return pchan; + return chan; } static void delete_netdev(struct work_struct *work) @@ -876,6 +989,9 @@ static void chan_suspend_cb(struct l2cap_chan *chan) BT_DBG("chan %p conn %p skb %p", chan, chan->conn, skb); + if (!skb) + return; + lowpan_cb(skb)->status = -EAGAIN; } @@ -885,12 +1001,15 @@ static void chan_resume_cb(struct l2cap_chan *chan) BT_DBG("chan %p conn %p skb %p", chan, chan->conn, skb); + if (!skb) + return; + lowpan_cb(skb)->status = 0; } static long chan_get_sndtimeo_cb(struct l2cap_chan *chan) { - return msecs_to_jiffies(1000); + return L2CAP_CONN_TIMEOUT; } static const struct l2cap_ops bt_6lowpan_chan_ops = { diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index 4dca0299ed96875b16a80929a98b3a6972003be4..339c74ad45538f931ee1c31edb96c3f3f3298074 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@ -709,8 +709,11 @@ EXPORT_SYMBOL_GPL(bt_debugfs); static int __init bt_init(void) { + struct sk_buff *skb; int err; + BUILD_BUG_ON(sizeof(struct bt_skb_cb) > sizeof(skb->cb)); + BT_INFO("Core ver %s", VERSION); bt_debugfs = debugfs_create_dir("bluetooth", NULL); diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c index 016cdb66df6cfa5bfd4bb7fcd808a616e9fcde42..2640d78f30b80080f5b240c8a25cf668392e2961 100644 --- a/net/bluetooth/amp.c +++ b/net/bluetooth/amp.c @@ -149,15 +149,14 @@ static int hmac_sha256(u8 *key, u8 ksize, char *plaintext, u8 psize, u8 *output) if (ret) { BT_DBG("crypto_ahash_setkey failed: err %d", ret); } else { - struct { - struct shash_desc shash; - char ctx[crypto_shash_descsize(tfm)]; - } desc; + char desc[sizeof(struct shash_desc) + + crypto_shash_descsize(tfm)] CRYPTO_MINALIGN_ATTR; + struct shash_desc *shash = (struct shash_desc *)desc; - desc.shash.tfm = tfm; - desc.shash.flags = CRYPTO_TFM_REQ_MAY_SLEEP; + shash->tfm = tfm; + shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP; - ret = crypto_shash_digest(&desc.shash, plaintext, psize, + ret = crypto_shash_digest(shash, plaintext, psize, output); } diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index faff6247ac8fb8769bf58f235fd92c0deab6aaf1..b9517bd171901e7c6a9395791f3ecfc7fbefdd6d 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -36,19 +36,25 @@ struct sco_param { u16 pkt_type; u16 max_latency; + u8 retrans_effort; +}; + +static const struct sco_param esco_param_cvsd[] = { + { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a, 0x01 }, /* S3 */ + { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007, 0x01 }, /* S2 */ + { EDR_ESCO_MASK | ESCO_EV3, 0x0007, 0x01 }, /* S1 */ + { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0x01 }, /* D1 */ + { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0x01 }, /* D0 */ }; static const struct sco_param sco_param_cvsd[] = { - { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a }, /* S3 */ - { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007 }, /* S2 */ - { EDR_ESCO_MASK | ESCO_EV3, 0x0007 }, /* S1 */ - { EDR_ESCO_MASK | ESCO_HV3, 0xffff }, /* D1 */ - { EDR_ESCO_MASK | ESCO_HV1, 0xffff }, /* D0 */ + { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0xff }, /* D1 */ + { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0xff }, /* D0 */ }; -static const struct sco_param sco_param_wideband[] = { - { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d }, /* T2 */ - { EDR_ESCO_MASK | ESCO_EV3, 0x0008 }, /* T1 */ +static const struct sco_param esco_param_msbc[] = { + { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d, 0x02 }, /* T2 */ + { EDR_ESCO_MASK | ESCO_EV3, 0x0008, 0x02 }, /* T1 */ }; static void hci_le_create_connection_cancel(struct hci_conn *conn) @@ -116,23 +122,36 @@ static void hci_reject_sco(struct hci_conn *conn) { struct hci_cp_reject_sync_conn_req cp; - cp.reason = HCI_ERROR_REMOTE_USER_TERM; + cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES; bacpy(&cp.bdaddr, &conn->dst); hci_send_cmd(conn->hdev, HCI_OP_REJECT_SYNC_CONN_REQ, sizeof(cp), &cp); } -void hci_disconnect(struct hci_conn *conn, __u8 reason) +int hci_disconnect(struct hci_conn *conn, __u8 reason) { struct hci_cp_disconnect cp; BT_DBG("hcon %p", conn); + /* When we are master of an established connection and it enters + * the disconnect timeout, then go ahead and try to read the + * current clock offset. Processing of the result is done + * within the event handling and hci_clock_offset_evt function. + */ + if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER) { + struct hci_dev *hdev = conn->hdev; + struct hci_cp_read_clock_offset cp; + + cp.handle = cpu_to_le16(conn->handle); + hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(cp), &cp); + } + conn->state = BT_DISCONN; cp.handle = cpu_to_le16(conn->handle); cp.reason = reason; - hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp); + return hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp); } static void hci_amp_disconn(struct hci_conn *conn) @@ -188,21 +207,26 @@ bool hci_setup_sync(struct hci_conn *conn, __u16 handle) switch (conn->setting & SCO_AIRMODE_MASK) { case SCO_AIRMODE_TRANSP: - if (conn->attempt > ARRAY_SIZE(sco_param_wideband)) + if (conn->attempt > ARRAY_SIZE(esco_param_msbc)) return false; - cp.retrans_effort = 0x02; - param = &sco_param_wideband[conn->attempt - 1]; + param = &esco_param_msbc[conn->attempt - 1]; break; case SCO_AIRMODE_CVSD: - if (conn->attempt > ARRAY_SIZE(sco_param_cvsd)) - return false; - cp.retrans_effort = 0x01; - param = &sco_param_cvsd[conn->attempt - 1]; + if (lmp_esco_capable(conn->link)) { + if (conn->attempt > ARRAY_SIZE(esco_param_cvsd)) + return false; + param = &esco_param_cvsd[conn->attempt - 1]; + } else { + if (conn->attempt > ARRAY_SIZE(sco_param_cvsd)) + return false; + param = &sco_param_cvsd[conn->attempt - 1]; + } break; default: return false; } + cp.retrans_effort = param->retrans_effort; cp.pkt_type = __cpu_to_le16(param->pkt_type); cp.max_latency = __cpu_to_le16(param->max_latency); @@ -325,25 +349,6 @@ static void hci_conn_timeout(struct work_struct *work) hci_amp_disconn(conn); } else { __u8 reason = hci_proto_disconn_ind(conn); - - /* When we are master of an established connection - * and it enters the disconnect timeout, then go - * ahead and try to read the current clock offset. - * - * Processing of the result is done within the - * event handling and hci_clock_offset_evt function. - */ - if (conn->type == ACL_LINK && - conn->role == HCI_ROLE_MASTER) { - struct hci_dev *hdev = conn->hdev; - struct hci_cp_read_clock_offset cp; - - cp.handle = cpu_to_le16(conn->handle); - - hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, - sizeof(cp), &cp); - } - hci_disconnect(conn, reason); } break; @@ -595,6 +600,7 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status) conn->dst_type); if (params && params->conn) { hci_conn_drop(params->conn); + hci_conn_put(params->conn); params->conn = NULL; } @@ -1290,11 +1296,16 @@ struct hci_chan *hci_chan_create(struct hci_conn *conn) BT_DBG("%s hcon %p", hdev->name, conn); + if (test_bit(HCI_CONN_DROP, &conn->flags)) { + BT_DBG("Refusing to create new hci_chan"); + return NULL; + } + chan = kzalloc(sizeof(*chan), GFP_KERNEL); if (!chan) return NULL; - chan->conn = conn; + chan->conn = hci_conn_get(conn); skb_queue_head_init(&chan->data_q); chan->state = BT_CONNECTED; @@ -1314,7 +1325,10 @@ void hci_chan_del(struct hci_chan *chan) synchronize_rcu(); - hci_conn_drop(conn); + /* Prevent new hci_chan's to be created for this hci_conn */ + set_bit(HCI_CONN_DROP, &conn->flags); + + hci_conn_put(conn); skb_queue_purge(&chan->data_q); kfree(chan); diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 1d9c29a00568d9b6ac3eea93848ec6637e5a5a5d..cb05d7f16a34acc0ca78958d3e5d612453f7d154 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -1898,6 +1898,8 @@ static int __hci_init(struct hci_dev *hdev) debugfs_create_u16("discov_interleaved_timeout", 0644, hdev->debugfs, &hdev->discov_interleaved_timeout); + + smp_register(hdev); } return 0; @@ -2539,6 +2541,7 @@ static void hci_pend_le_actions_clear(struct hci_dev *hdev) list_for_each_entry(p, &hdev->le_conn_params, list) { if (p->conn) { hci_conn_drop(p->conn); + hci_conn_put(p->conn); p->conn = NULL; } list_del_init(&p->action); @@ -3238,7 +3241,7 @@ struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa) } list_for_each_entry(irk, &hdev->identity_resolving_keys, list) { - if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) { + if (smp_irk_matches(hdev, irk->val, rpa)) { bacpy(&irk->rpa, rpa); return irk; } @@ -3723,6 +3726,18 @@ int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type, return 0; } +static void hci_conn_params_free(struct hci_conn_params *params) +{ + if (params->conn) { + hci_conn_drop(params->conn); + hci_conn_put(params->conn); + } + + list_del(¶ms->action); + list_del(¶ms->list); + kfree(params); +} + /* This function requires the caller holds hdev->lock */ void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) { @@ -3732,12 +3747,7 @@ void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) if (!params) return; - if (params->conn) - hci_conn_drop(params->conn); - - list_del(¶ms->action); - list_del(¶ms->list); - kfree(params); + hci_conn_params_free(params); hci_update_background_scan(hdev); @@ -3764,13 +3774,8 @@ void hci_conn_params_clear_all(struct hci_dev *hdev) { struct hci_conn_params *params, *tmp; - list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) { - if (params->conn) - hci_conn_drop(params->conn); - list_del(¶ms->action); - list_del(¶ms->list); - kfree(params); - } + list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) + hci_conn_params_free(params); hci_update_background_scan(hdev); @@ -3867,6 +3872,7 @@ static void set_random_addr(struct hci_request *req, bdaddr_t *rpa) if (test_bit(HCI_LE_ADV, &hdev->dev_flags) || hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) { BT_DBG("Deferring random address update"); + set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags); return; } @@ -3892,7 +3898,7 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy, !bacmp(&hdev->random_addr, &hdev->rpa)) return 0; - err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa); + err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); if (err < 0) { BT_ERR("%s failed to generate new RPA", hdev->name); return err; @@ -4100,18 +4106,9 @@ int hci_register_dev(struct hci_dev *hdev) dev_set_name(&hdev->dev, "%s", hdev->name); - hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, - CRYPTO_ALG_ASYNC); - if (IS_ERR(hdev->tfm_aes)) { - BT_ERR("Unable to create crypto context"); - error = PTR_ERR(hdev->tfm_aes); - hdev->tfm_aes = NULL; - goto err_wqueue; - } - error = device_add(&hdev->dev); if (error < 0) - goto err_tfm; + goto err_wqueue; hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, @@ -4153,8 +4150,6 @@ int hci_register_dev(struct hci_dev *hdev) return id; -err_tfm: - crypto_free_blkcipher(hdev->tfm_aes); err_wqueue: destroy_workqueue(hdev->workqueue); destroy_workqueue(hdev->req_workqueue); @@ -4206,8 +4201,7 @@ void hci_unregister_dev(struct hci_dev *hdev) rfkill_destroy(hdev->rfkill); } - if (hdev->tfm_aes) - crypto_free_blkcipher(hdev->tfm_aes); + smp_unregister(hdev); device_del(&hdev->dev); @@ -4380,26 +4374,6 @@ static int hci_reassembly(struct hci_dev *hdev, int type, void *data, return remain; } -int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count) -{ - int rem = 0; - - if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) - return -EILSEQ; - - while (count) { - rem = hci_reassembly(hdev, type, data, count, type - 1); - if (rem < 0) - return rem; - - data += (count - rem); - count = rem; - } - - return rem; -} -EXPORT_SYMBOL(hci_recv_fragment); - #define STREAM_REASSEMBLY 0 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count) @@ -4553,6 +4527,7 @@ static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, BT_DBG("skb len %d", skb->len); bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; + bt_cb(skb)->opcode = opcode; return skb; } @@ -5690,3 +5665,52 @@ void hci_update_background_scan(struct hci_dev *hdev) if (err) BT_ERR("Failed to run HCI request: err %d", err); } + +static bool disconnected_whitelist_entries(struct hci_dev *hdev) +{ + struct bdaddr_list *b; + + list_for_each_entry(b, &hdev->whitelist, list) { + struct hci_conn *conn; + + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr); + if (!conn) + return true; + + if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) + return true; + } + + return false; +} + +void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req) +{ + u8 scan; + + if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) + return; + + if (!hdev_is_powered(hdev)) + return; + + if (mgmt_powering_down(hdev)) + return; + + if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) || + disconnected_whitelist_entries(hdev)) + scan = SCAN_PAGE; + else + scan = SCAN_DISABLED; + + if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE)) + return; + + if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) + scan |= SCAN_INQUIRY; + + if (req) + hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); + else + hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); +} diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index a6000823f0ff34d938631bed2e39a6b17a26ac13..8b0a2a6de4199cea5610890a887296d04dec3eca 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -2071,6 +2071,8 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) cp.handle = ev->handle; hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES, sizeof(cp), &cp); + + hci_update_page_scan(hdev, NULL); } /* Set packet type for incoming connection */ @@ -2247,9 +2249,12 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type, reason, mgmt_connected); - if (conn->type == ACL_LINK && - test_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) - hci_remove_link_key(hdev, &conn->dst); + if (conn->type == ACL_LINK) { + if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) + hci_remove_link_key(hdev, &conn->dst); + + hci_update_page_scan(hdev, NULL); + } params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); if (params) { @@ -2315,8 +2320,7 @@ static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) conn->sec_level = conn->pending_sec_level; } } else { - mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type, - ev->status); + mgmt_auth_failed(conn, ev->status); } clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); @@ -2434,6 +2438,12 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) } } + /* We should disregard the current RPA and generate a new one + * whenever the encryption procedure fails. + */ + if (ev->status && conn->type == LE_LINK) + set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags); + clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); if (ev->status && conn->state == BT_CONNECTED) { @@ -3895,8 +3905,7 @@ static void hci_simple_pair_complete_evt(struct hci_dev *hdev, * event gets always produced as initiator and is also mapped to * the mgmt_auth_failed event */ if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status) - mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type, - ev->status); + mgmt_auth_failed(conn, ev->status); hci_conn_drop(conn); @@ -4188,16 +4197,16 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) conn->dst_type = irk->addr_type; } - if (conn->dst_type == ADDR_LE_DEV_PUBLIC) - addr_type = BDADDR_LE_PUBLIC; - else - addr_type = BDADDR_LE_RANDOM; - if (ev->status) { hci_le_conn_failed(conn, ev->status); goto unlock; } + if (conn->dst_type == ADDR_LE_DEV_PUBLIC) + addr_type = BDADDR_LE_PUBLIC; + else + addr_type = BDADDR_LE_RANDOM; + /* Drop the connection if the device is blocked */ if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) { hci_conn_drop(conn); @@ -4220,11 +4229,13 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_proto_connect_cfm(conn, ev->status); - params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); + params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst, + conn->dst_type); if (params) { list_del_init(¶ms->action); if (params->conn) { hci_conn_drop(params->conn); + hci_conn_put(params->conn); params->conn = NULL; } } @@ -4316,7 +4327,7 @@ static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr, * the parameters get removed and keep the reference * count consistent once the connection is established. */ - params->conn = conn; + params->conn = hci_conn_get(conn); return; } @@ -4501,10 +4512,7 @@ static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb) memcpy(cp.ltk, ltk->val, sizeof(ltk->val)); cp.handle = cpu_to_le16(conn->handle); - if (ltk->authenticated) - conn->pending_sec_level = BT_SECURITY_HIGH; - else - conn->pending_sec_level = BT_SECURITY_MEDIUM; + conn->pending_sec_level = smp_ltk_sec_level(ltk); conn->enc_key_size = ltk->enc_size; diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index 6c7ecf116e74ba5e4b3a88ab7c71a5f632563577..1b7d605706aa5720b45e056d3a75f235c9c4ab60 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -915,7 +915,7 @@ static int hidp_session_new(struct hidp_session **out, const bdaddr_t *bdaddr, /* connection management */ bacpy(&session->bdaddr, bdaddr); - session->conn = conn; + session->conn = l2cap_conn_get(conn); session->user.probe = hidp_session_probe; session->user.remove = hidp_session_remove; session->ctrl_sock = ctrl_sock; @@ -941,13 +941,13 @@ static int hidp_session_new(struct hidp_session **out, const bdaddr_t *bdaddr, if (ret) goto err_free; - l2cap_conn_get(session->conn); get_file(session->intr_sock->file); get_file(session->ctrl_sock->file); *out = session; return 0; err_free: + l2cap_conn_put(session->conn); kfree(session); return ret; } @@ -1327,10 +1327,8 @@ int hidp_connection_add(struct hidp_connadd_req *req, conn = NULL; l2cap_chan_lock(chan); - if (chan->conn) { - l2cap_conn_get(chan->conn); - conn = chan->conn; - } + if (chan->conn) + conn = l2cap_conn_get(chan->conn); l2cap_chan_unlock(chan); if (!conn) diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 46547b920f88edf7e6df5b6c5d10ae30daa47d67..b6f9777e057da11105d63deabf9ebd0dc897436c 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -210,6 +210,10 @@ int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid) { write_lock(&chan_list_lock); + /* Override the defaults (which are for conn-oriented) */ + chan->omtu = L2CAP_DEFAULT_MTU; + chan->chan_type = L2CAP_CHAN_FIXED; + chan->scid = scid; write_unlock(&chan_list_lock); @@ -542,7 +546,10 @@ void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) l2cap_chan_hold(chan); - hci_conn_hold(conn->hcon); + /* Only keep a reference for fixed channels if they requested it */ + if (chan->chan_type != L2CAP_CHAN_FIXED || + test_bit(FLAG_HOLD_HCI_CONN, &chan->flags)) + hci_conn_hold(conn->hcon); list_add(&chan->list, &conn->chan_l); } @@ -562,6 +569,8 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err) BT_DBG("chan %p, conn %p, err %d", chan, conn, err); + chan->ops->teardown(chan, err); + if (conn) { struct amp_mgr *mgr = conn->hcon->amp_mgr; /* Delete from channel list */ @@ -571,7 +580,12 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err) chan->conn = NULL; - if (chan->scid != L2CAP_CID_A2MP) + /* Reference was only held for non-fixed channels or + * fixed channels that explicitly requested it using the + * FLAG_HOLD_HCI_CONN flag. + */ + if (chan->chan_type != L2CAP_CHAN_FIXED || + test_bit(FLAG_HOLD_HCI_CONN, &chan->flags)) hci_conn_drop(conn->hcon); if (mgr && mgr->bredr_chan == chan) @@ -585,8 +599,6 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err) amp_disconnect_logical_link(hs_hchan); } - chan->ops->teardown(chan, err); - if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state)) return; @@ -619,9 +631,11 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err) } EXPORT_SYMBOL_GPL(l2cap_chan_del); -void l2cap_conn_update_id_addr(struct hci_conn *hcon) +static void l2cap_conn_update_id_addr(struct work_struct *work) { - struct l2cap_conn *conn = hcon->l2cap_data; + struct l2cap_conn *conn = container_of(work, struct l2cap_conn, + id_addr_update_work); + struct hci_conn *hcon = conn->hcon; struct l2cap_chan *chan; mutex_lock(&conn->chan_lock); @@ -1082,6 +1096,9 @@ static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll) static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan) { + if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) + return true; + return !test_bit(CONF_CONNECT_PEND, &chan->conf_state); } @@ -1266,6 +1283,24 @@ static void l2cap_start_connection(struct l2cap_chan *chan) } } +static void l2cap_request_info(struct l2cap_conn *conn) +{ + struct l2cap_info_req req; + + if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) + return; + + req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); + + conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; + conn->info_ident = l2cap_get_ident(conn); + + schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT); + + l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ, + sizeof(req), &req); +} + static void l2cap_do_start(struct l2cap_chan *chan) { struct l2cap_conn *conn = chan->conn; @@ -1275,26 +1310,17 @@ static void l2cap_do_start(struct l2cap_chan *chan) return; } - if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) { - if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) - return; - - if (l2cap_chan_check_security(chan, true) && - __l2cap_no_conn_pending(chan)) { - l2cap_start_connection(chan); - } - } else { - struct l2cap_info_req req; - req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); - - conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; - conn->info_ident = l2cap_get_ident(conn); + if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) { + l2cap_request_info(conn); + return; + } - schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT); + if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) + return; - l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ, - sizeof(req), &req); - } + if (l2cap_chan_check_security(chan, true) && + __l2cap_no_conn_pending(chan)) + l2cap_start_connection(chan); } static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask) @@ -1353,6 +1379,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn) l2cap_chan_lock(chan); if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { + l2cap_chan_ready(chan); l2cap_chan_unlock(chan); continue; } @@ -1417,71 +1444,18 @@ static void l2cap_conn_start(struct l2cap_conn *conn) mutex_unlock(&conn->chan_lock); } -/* Find socket with cid and source/destination bdaddr. - * Returns closest match, locked. - */ -static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid, - bdaddr_t *src, - bdaddr_t *dst) -{ - struct l2cap_chan *c, *c1 = NULL; - - read_lock(&chan_list_lock); - - list_for_each_entry(c, &chan_list, global_l) { - if (state && c->state != state) - continue; - - if (c->scid == cid) { - int src_match, dst_match; - int src_any, dst_any; - - /* Exact match. */ - src_match = !bacmp(&c->src, src); - dst_match = !bacmp(&c->dst, dst); - if (src_match && dst_match) { - read_unlock(&chan_list_lock); - return c; - } - - /* Closest match */ - src_any = !bacmp(&c->src, BDADDR_ANY); - dst_any = !bacmp(&c->dst, BDADDR_ANY); - if ((src_match && dst_any) || (src_any && dst_match) || - (src_any && dst_any)) - c1 = c; - } - } - - read_unlock(&chan_list_lock); - - return c1; -} - static void l2cap_le_conn_ready(struct l2cap_conn *conn) { struct hci_conn *hcon = conn->hcon; struct hci_dev *hdev = hcon->hdev; - struct l2cap_chan *chan, *pchan; - u8 dst_type; - BT_DBG(""); - - /* Check if we have socket listening on cid */ - pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT, - &hcon->src, &hcon->dst); - if (!pchan) - return; - - /* Client ATT sockets should override the server one */ - if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT)) - return; - - dst_type = bdaddr_type(hcon, hcon->dst_type); + BT_DBG("%s conn %p", hdev->name, conn); - /* If device is blocked, do not create a channel for it */ - if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type)) - return; + /* For outgoing pairing which doesn't necessarily have an + * associated socket (e.g. mgmt_pair_device). + */ + if (hcon->out) + smp_conn_security(hcon, hcon->pending_sec_level); /* For LE slave connections, make sure the connection interval * is in the range of the minium and maximum interval that has @@ -1501,22 +1475,6 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn) l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req); } - - l2cap_chan_lock(pchan); - - chan = pchan->ops->new_connection(pchan); - if (!chan) - goto clean; - - bacpy(&chan->src, &hcon->src); - bacpy(&chan->dst, &hcon->dst); - chan->src_type = bdaddr_type(hcon, hcon->src_type); - chan->dst_type = dst_type; - - __l2cap_chan_add(conn, chan); - -clean: - l2cap_chan_unlock(pchan); } static void l2cap_conn_ready(struct l2cap_conn *conn) @@ -1526,17 +1484,11 @@ static void l2cap_conn_ready(struct l2cap_conn *conn) BT_DBG("conn %p", conn); - /* For outgoing pairing which doesn't necessarily have an - * associated socket (e.g. mgmt_pair_device). - */ - if (hcon->out && hcon->type == LE_LINK) - smp_conn_security(hcon, hcon->pending_sec_level); + if (hcon->type == ACL_LINK) + l2cap_request_info(conn); mutex_lock(&conn->chan_lock); - if (hcon->type == LE_LINK) - l2cap_le_conn_ready(conn); - list_for_each_entry(chan, &conn->chan_l, list) { l2cap_chan_lock(chan); @@ -1549,8 +1501,8 @@ static void l2cap_conn_ready(struct l2cap_conn *conn) if (hcon->type == LE_LINK) { l2cap_le_start(chan); } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { - l2cap_chan_ready(chan); - + if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) + l2cap_chan_ready(chan); } else if (chan->state == BT_CONNECT) { l2cap_do_start(chan); } @@ -1560,6 +1512,9 @@ static void l2cap_conn_ready(struct l2cap_conn *conn) mutex_unlock(&conn->chan_lock); + if (hcon->type == LE_LINK) + l2cap_le_conn_ready(conn); + queue_work(hcon->hdev->workqueue, &conn->pending_rx_work); } @@ -1695,8 +1650,14 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err) if (work_pending(&conn->pending_rx_work)) cancel_work_sync(&conn->pending_rx_work); + if (work_pending(&conn->id_addr_update_work)) + cancel_work_sync(&conn->id_addr_update_work); + l2cap_unregister_all_users(conn); + /* Force the connection to be immediately dropped */ + hcon->disc_timeout = 0; + mutex_lock(&conn->chan_lock); /* Kill channels */ @@ -1719,29 +1680,11 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err) if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) cancel_delayed_work_sync(&conn->info_timer); - if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) { - cancel_delayed_work_sync(&conn->security_timer); - smp_chan_destroy(conn); - } - hcon->l2cap_data = NULL; conn->hchan = NULL; l2cap_conn_put(conn); } -static void security_timeout(struct work_struct *work) -{ - struct l2cap_conn *conn = container_of(work, struct l2cap_conn, - security_timer.work); - - BT_DBG("conn %p", conn); - - if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) { - smp_chan_destroy(conn); - l2cap_conn_del(conn->hcon, ETIMEDOUT); - } -} - static void l2cap_conn_free(struct kref *ref) { struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref); @@ -1750,9 +1693,10 @@ static void l2cap_conn_free(struct kref *ref) kfree(conn); } -void l2cap_conn_get(struct l2cap_conn *conn) +struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn) { kref_get(&conn->ref); + return conn; } EXPORT_SYMBOL(l2cap_conn_get); @@ -1794,6 +1738,7 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, src_match = !bacmp(&c->src, src); dst_match = !bacmp(&c->dst, dst); if (src_match && dst_match) { + l2cap_chan_hold(c); read_unlock(&chan_list_lock); return c; } @@ -1807,6 +1752,9 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, } } + if (c1) + l2cap_chan_hold(c1); + read_unlock(&chan_list_lock); return c1; @@ -2027,10 +1975,12 @@ static void l2cap_ertm_resend(struct l2cap_chan *chan) tx_skb->data + L2CAP_HDR_SIZE); } + /* Update FCS */ if (chan->fcs == L2CAP_FCS_CRC16) { - u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len); - put_unaligned_le16(fcs, skb_put(tx_skb, - L2CAP_FCS_SIZE)); + u16 fcs = crc16(0, (u8 *) tx_skb->data, + tx_skb->len - L2CAP_FCS_SIZE); + put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) - + L2CAP_FCS_SIZE); } l2cap_do_send(chan, tx_skb); @@ -2334,7 +2284,6 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan, } else { sar = L2CAP_SAR_START; sdu_len = len; - pdu_len -= L2CAP_SDULEN_SIZE; } while (len > 0) { @@ -2349,10 +2298,8 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan, __skb_queue_tail(seg_queue, skb); len -= pdu_len; - if (sdu_len) { + if (sdu_len) sdu_len = 0; - pdu_len += L2CAP_SDULEN_SIZE; - } if (len <= pdu_len) { sar = L2CAP_SAR_END; @@ -2418,12 +2365,8 @@ static int l2cap_segment_le_sdu(struct l2cap_chan *chan, BT_DBG("chan %p, msg %p, len %zu", chan, msg, len); - pdu_len = chan->conn->mtu - L2CAP_HDR_SIZE; - - pdu_len = min_t(size_t, pdu_len, chan->remote_mps); - sdu_len = len; - pdu_len -= L2CAP_SDULEN_SIZE; + pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE; while (len > 0) { if (len <= pdu_len) @@ -3884,6 +3827,7 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn, response: l2cap_chan_unlock(pchan); mutex_unlock(&conn->chan_lock); + l2cap_chan_put(pchan); sendresp: rsp.scid = cpu_to_le16(scid); @@ -5487,6 +5431,11 @@ static int l2cap_le_connect_req(struct l2cap_conn *conn, if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { l2cap_state_change(chan, BT_CONNECT2); + /* The following result value is actually not defined + * for LE CoC but we use it to let the function know + * that it should bail out after doing its cleanup + * instead of sending a response. + */ result = L2CAP_CR_PEND; chan->ops->defer(chan); } else { @@ -5497,6 +5446,7 @@ static int l2cap_le_connect_req(struct l2cap_conn *conn, response_unlock: l2cap_chan_unlock(pchan); mutex_unlock(&conn->chan_lock); + l2cap_chan_put(pchan); if (result == L2CAP_CR_PEND) return 0; @@ -6845,12 +6795,12 @@ static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct l2cap_chan *chan; if (hcon->type != ACL_LINK) - goto drop; + goto free_skb; chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst, ACL_LINK); if (!chan) - goto drop; + goto free_skb; BT_DBG("chan %p, len %d", chan, skb->len); @@ -6864,36 +6814,14 @@ static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, bacpy(&bt_cb(skb)->bdaddr, &hcon->dst); bt_cb(skb)->psm = psm; - if (!chan->ops->recv(chan, skb)) - return; - -drop: - kfree_skb(skb); -} - -static void l2cap_att_channel(struct l2cap_conn *conn, - struct sk_buff *skb) -{ - struct hci_conn *hcon = conn->hcon; - struct l2cap_chan *chan; - - if (hcon->type != LE_LINK) - goto drop; - - chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT, - &hcon->src, &hcon->dst); - if (!chan) - goto drop; - - BT_DBG("chan %p, len %d", chan, skb->len); - - if (chan->imtu < skb->len) - goto drop; - - if (!chan->ops->recv(chan, skb)) + if (!chan->ops->recv(chan, skb)) { + l2cap_chan_put(chan); return; + } drop: + l2cap_chan_put(chan); +free_skb: kfree_skb(skb); } @@ -6942,19 +6870,10 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb) l2cap_conless_channel(conn, psm, skb); break; - case L2CAP_CID_ATT: - l2cap_att_channel(conn, skb); - break; - case L2CAP_CID_LE_SIGNALING: l2cap_le_sig_channel(conn, skb); break; - case L2CAP_CID_SMP: - if (smp_sig_channel(conn, skb)) - l2cap_conn_del(conn->hcon, EACCES); - break; - default: l2cap_data_channel(conn, cid, skb); break; @@ -6993,8 +6912,7 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon) kref_init(&conn->ref); hcon->l2cap_data = conn; - conn->hcon = hcon; - hci_conn_get(conn->hcon); + conn->hcon = hci_conn_get(hcon); conn->hchan = hchan; BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan); @@ -7023,13 +6941,11 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon) INIT_LIST_HEAD(&conn->chan_l); INIT_LIST_HEAD(&conn->users); - if (hcon->type == LE_LINK) - INIT_DELAYED_WORK(&conn->security_timer, security_timeout); - else - INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout); + INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout); skb_queue_head_init(&conn->pending_rx); INIT_WORK(&conn->pending_rx_work, process_pending_rx); + INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr); conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM; @@ -7064,8 +6980,6 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, hci_dev_lock(hdev); - l2cap_chan_lock(chan); - if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid && chan->chan_type != L2CAP_CHAN_RAW) { err = -EINVAL; @@ -7162,19 +7076,20 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, goto done; } + mutex_lock(&conn->chan_lock); + l2cap_chan_lock(chan); + if (cid && __l2cap_get_chan_by_dcid(conn, cid)) { hci_conn_drop(hcon); err = -EBUSY; - goto done; + goto chan_unlock; } /* Update source addr of the socket */ bacpy(&chan->src, &hcon->src); chan->src_type = bdaddr_type(hcon, hcon->src_type); - l2cap_chan_unlock(chan); - l2cap_chan_add(conn, chan); - l2cap_chan_lock(chan); + __l2cap_chan_add(conn, chan); /* l2cap_chan_add takes its own ref so we can drop this one */ hci_conn_drop(hcon); @@ -7200,8 +7115,10 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, err = 0; -done: +chan_unlock: l2cap_chan_unlock(chan); + mutex_unlock(&conn->chan_lock); +done: hci_dev_unlock(hdev); hci_dev_put(hdev); return err; @@ -7239,19 +7156,99 @@ int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr) return exact ? lm1 : lm2; } +/* Find the next fixed channel in BT_LISTEN state, continue iteration + * from an existing channel in the list or from the beginning of the + * global list (by passing NULL as first parameter). + */ +static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c, + bdaddr_t *src, u8 link_type) +{ + read_lock(&chan_list_lock); + + if (c) + c = list_next_entry(c, global_l); + else + c = list_entry(chan_list.next, typeof(*c), global_l); + + list_for_each_entry_from(c, &chan_list, global_l) { + if (c->chan_type != L2CAP_CHAN_FIXED) + continue; + if (c->state != BT_LISTEN) + continue; + if (bacmp(&c->src, src) && bacmp(&c->src, BDADDR_ANY)) + continue; + if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR) + continue; + if (link_type == LE_LINK && c->src_type == BDADDR_BREDR) + continue; + + l2cap_chan_hold(c); + read_unlock(&chan_list_lock); + return c; + } + + read_unlock(&chan_list_lock); + + return NULL; +} + void l2cap_connect_cfm(struct hci_conn *hcon, u8 status) { + struct hci_dev *hdev = hcon->hdev; struct l2cap_conn *conn; + struct l2cap_chan *pchan; + u8 dst_type; BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status); - if (!status) { - conn = l2cap_conn_add(hcon); - if (conn) - l2cap_conn_ready(conn); - } else { + if (status) { l2cap_conn_del(hcon, bt_to_errno(status)); + return; } + + conn = l2cap_conn_add(hcon); + if (!conn) + return; + + dst_type = bdaddr_type(hcon, hcon->dst_type); + + /* If device is blocked, do not create channels for it */ + if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type)) + return; + + /* Find fixed channels and notify them of the new connection. We + * use multiple individual lookups, continuing each time where + * we left off, because the list lock would prevent calling the + * potentially sleeping l2cap_chan_lock() function. + */ + pchan = l2cap_global_fixed_chan(NULL, &hdev->bdaddr, hcon->type); + while (pchan) { + struct l2cap_chan *chan, *next; + + /* Client fixed channels should override server ones */ + if (__l2cap_get_chan_by_dcid(conn, pchan->scid)) + goto next; + + l2cap_chan_lock(pchan); + chan = pchan->ops->new_connection(pchan); + if (chan) { + bacpy(&chan->src, &hcon->src); + bacpy(&chan->dst, &hcon->dst); + chan->src_type = bdaddr_type(hcon, hcon->src_type); + chan->dst_type = dst_type; + + __l2cap_chan_add(conn, chan); + } + + l2cap_chan_unlock(pchan); +next: + next = l2cap_global_fixed_chan(pchan, &hdev->bdaddr, + hcon->type); + l2cap_chan_put(pchan); + pchan = next; + } + + l2cap_conn_ready(conn); } int l2cap_disconn_ind(struct hci_conn *hcon) @@ -7299,12 +7296,6 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt); - if (hcon->type == LE_LINK) { - if (!status && encrypt) - smp_distribute_keys(conn); - cancel_delayed_work(&conn->security_timer); - } - mutex_lock(&conn->chan_lock); list_for_each_entry(chan, &conn->chan_l, list) { @@ -7318,15 +7309,8 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) continue; } - if (chan->scid == L2CAP_CID_ATT) { - if (!status && encrypt) { - chan->sec_level = hcon->sec_level; - l2cap_chan_ready(chan); - } - - l2cap_chan_unlock(chan); - continue; - } + if (!status && encrypt) + chan->sec_level = hcon->sec_level; if (!__l2cap_no_conn_pending(chan)) { l2cap_chan_unlock(chan); diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index 1884f72083c2f7de4595e91f2cdb61f02cef99bb..31f106e61ca20fd15420830e653f81e7b1f87a20 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -99,15 +99,6 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) if (!bdaddr_type_is_valid(la.l2_bdaddr_type)) return -EINVAL; - if (la.l2_cid) { - /* When the socket gets created it defaults to - * CHAN_CONN_ORIENTED, so we need to overwrite the - * default here. - */ - chan->chan_type = L2CAP_CHAN_FIXED; - chan->omtu = L2CAP_DEFAULT_MTU; - } - if (bdaddr_type_is_le(la.l2_bdaddr_type)) { /* We only allow ATT user space socket */ if (la.l2_cid && @@ -155,6 +146,14 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) case L2CAP_CHAN_RAW: chan->sec_level = BT_SECURITY_SDP; break; + case L2CAP_CHAN_FIXED: + /* Fixed channels default to the L2CAP core not holding a + * hci_conn reference for them. For fixed channels mapping to + * L2CAP sockets we do want to hold a reference so set the + * appropriate flag to request it. + */ + set_bit(FLAG_HOLD_HCI_CONN, &chan->flags); + break; } bacpy(&chan->src, &la.l2_bdaddr); @@ -790,6 +789,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, if (chan->scid == L2CAP_CID_ATT) { if (smp_conn_security(conn->hcon, sec.level)) break; + set_bit(FLAG_PENDING_SECURITY, &chan->flags); sk->sk_state = BT_CONFIG; chan->state = BT_CONFIG; @@ -1359,6 +1359,11 @@ static void l2cap_sock_resume_cb(struct l2cap_chan *chan) { struct sock *sk = chan->data; + if (test_and_clear_bit(FLAG_PENDING_SECURITY, &chan->flags)) { + sk->sk_state = BT_CONNECTED; + chan->state = BT_CONNECTED; + } + clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags); sk->sk_state_change(sk); } diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c index 941ad7530eda48f21dcf7faef9167cbce6574a8e..b36bc041585425beed292ef9d499b4dc8e11397c 100644 --- a/net/bluetooth/lib.c +++ b/net/bluetooth/lib.c @@ -135,40 +135,34 @@ int bt_to_errno(__u16 code) } EXPORT_SYMBOL(bt_to_errno); -int bt_info(const char *format, ...) +void bt_info(const char *format, ...) { struct va_format vaf; va_list args; - int r; va_start(args, format); vaf.fmt = format; vaf.va = &args; - r = pr_info("%pV", &vaf); + pr_info("%pV", &vaf); va_end(args); - - return r; } EXPORT_SYMBOL(bt_info); -int bt_err(const char *format, ...) +void bt_err(const char *format, ...) { struct va_format vaf; va_list args; - int r; va_start(args, format); vaf.fmt = format; vaf.va = &args; - r = pr_err("%pV", &vaf); + pr_err("%pV", &vaf); va_end(args); - - return r; } EXPORT_SYMBOL(bt_err); diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index b8554d429d889f97bd735cc62aaafce1e85bae5c..efb71b022ab6520527b3087dbf917ddcb925438e 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -129,9 +129,6 @@ static const u16 mgmt_events[] = { #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000) -#define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \ - !test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) - struct pending_cmd { struct list_head list; u16 opcode; @@ -1536,9 +1533,11 @@ static void set_discoverable_complete(struct hci_dev *hdev, u8 status) /* When the discoverable mode gets changed, make sure * that class of device has the limited discoverable - * bit correctly set. + * bit correctly set. Also update page scan based on whitelist + * entries. */ hci_req_init(&req, hdev); + hci_update_page_scan(hdev, &req); update_class(&req); hci_req_run(&req, NULL); @@ -1785,6 +1784,7 @@ static void set_connectable_complete(struct hci_dev *hdev, u8 status) if (conn_changed || discov_changed) { new_settings(hdev, cmd->sk); + hci_update_page_scan(hdev, NULL); if (discov_changed) mgmt_update_adv_data(hdev); hci_update_background_scan(hdev); @@ -1818,6 +1818,7 @@ static int set_connectable_update_settings(struct hci_dev *hdev, return err; if (changed) { + hci_update_page_scan(hdev, NULL); hci_update_background_scan(hdev); return new_settings(hdev, sk); } @@ -2787,7 +2788,6 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data, { struct mgmt_cp_disconnect *cp = data; struct mgmt_rp_disconnect rp; - struct hci_cp_disconnect dc; struct pending_cmd *cmd; struct hci_conn *conn; int err; @@ -2835,10 +2835,7 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data, goto failed; } - dc.handle = cpu_to_le16(conn->handle); - dc.reason = HCI_ERROR_REMOTE_USER_TERM; - - err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc); + err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM); if (err < 0) mgmt_pending_remove(cmd); @@ -3062,6 +3059,7 @@ static void pairing_complete(struct pending_cmd *cmd, u8 status) conn->disconn_cfm_cb = NULL; hci_conn_drop(conn); + hci_conn_put(conn); mgmt_pending_remove(cmd); } @@ -3211,7 +3209,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, } conn->io_capability = cp->io_cap; - cmd->user_data = conn; + cmd->user_data = hci_conn_get(conn); if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) && hci_conn_security(conn, sec_level, auth_type, true)) @@ -4381,27 +4379,6 @@ static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev, return err; } -static void set_bredr_scan(struct hci_request *req) -{ - struct hci_dev *hdev = req->hdev; - u8 scan = 0; - - /* Ensure that fast connectable is disabled. This function will - * not do anything if the page scan parameters are already what - * they should be. - */ - write_fast_connectable(req, false); - - if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) || - !list_empty(&hdev->whitelist)) - scan |= SCAN_PAGE; - if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) - scan |= SCAN_INQUIRY; - - if (scan) - hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); -} - static void set_bredr_complete(struct hci_dev *hdev, u8 status) { struct pending_cmd *cmd; @@ -4507,9 +4484,8 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) hci_req_init(&req, hdev); - if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) || - !list_empty(&hdev->whitelist)) - set_bredr_scan(&req); + write_fast_connectable(&req, false); + hci_update_page_scan(hdev, &req); /* Since only the advertising data flags will change, there * is no need to update the scan response data. @@ -4935,6 +4911,7 @@ static void get_conn_info_complete(struct pending_cmd *cmd, void *data) match->mgmt_status, &rp, sizeof(rp)); hci_conn_drop(conn); + hci_conn_put(conn); mgmt_pending_remove(cmd); } @@ -5091,7 +5068,7 @@ static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data, } hci_conn_hold(conn); - cmd->user_data = conn; + cmd->user_data = hci_conn_get(conn); conn->conn_info_timestamp = jiffies; } else { @@ -5155,8 +5132,10 @@ static void get_clock_info_complete(struct hci_dev *hdev, u8 status) cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status), &rp, sizeof(rp)); mgmt_pending_remove(cmd); - if (conn) + if (conn) { hci_conn_drop(conn); + hci_conn_put(conn); + } unlock: hci_dev_unlock(hdev); @@ -5219,7 +5198,7 @@ static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data, if (conn) { hci_conn_hold(conn); - cmd->user_data = conn; + cmd->user_data = hci_conn_get(conn); hci_cp.handle = cpu_to_le16(conn->handle); hci_cp.which = 0x01; /* Piconet clock */ @@ -5235,27 +5214,6 @@ static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data, return err; } -/* Helper for Add/Remove Device commands */ -static void update_page_scan(struct hci_dev *hdev, u8 scan) -{ - if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) - return; - - if (!hdev_is_powered(hdev)) - return; - - /* If HCI_CONNECTABLE is set then Add/Remove Device should not - * make any changes to page scanning. - */ - if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) - return; - - if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) - scan |= SCAN_INQUIRY; - - hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); -} - static void device_added(struct sock *sk, struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type, u8 action) { @@ -5291,8 +5249,6 @@ static int add_device(struct sock *sk, struct hci_dev *hdev, hci_dev_lock(hdev); if (cp->addr.type == BDADDR_BREDR) { - bool update_scan; - /* Only incoming connections action is supported for now */ if (cp->action != 0x01) { err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE, @@ -5301,15 +5257,12 @@ static int add_device(struct sock *sk, struct hci_dev *hdev, goto unlock; } - update_scan = list_empty(&hdev->whitelist); - err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr, cp->addr.type); if (err) goto unlock; - if (update_scan) - update_page_scan(hdev, SCAN_PAGE); + hci_update_page_scan(hdev, NULL); goto added; } @@ -5392,8 +5345,7 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev, goto unlock; } - if (list_empty(&hdev->whitelist)) - update_page_scan(hdev, SCAN_DISABLED); + hci_update_page_scan(hdev, NULL); device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type); @@ -5444,7 +5396,7 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev, kfree(b); } - update_page_scan(hdev, SCAN_DISABLED); + hci_update_page_scan(hdev, NULL); list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) { if (p->auto_connect == HCI_AUTO_CONN_DISABLED) @@ -5969,8 +5921,8 @@ static int powered_update_hci(struct hci_dev *hdev) sizeof(link_sec), &link_sec); if (lmp_bredr_capable(hdev)) { - if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) - set_bredr_scan(&req); + write_fast_connectable(&req, false); + hci_update_page_scan(hdev, &req); update_class(&req); update_name(&req); update_eir(&req); @@ -6281,25 +6233,35 @@ static void unpair_device_rsp(struct pending_cmd *cmd, void *data) mgmt_pending_remove(cmd); } +bool mgmt_powering_down(struct hci_dev *hdev) +{ + struct pending_cmd *cmd; + struct mgmt_mode *cp; + + cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev); + if (!cmd) + return false; + + cp = cmd->param; + if (!cp->val) + return true; + + return false; +} + void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 reason, bool mgmt_connected) { struct mgmt_ev_device_disconnected ev; - struct pending_cmd *power_off; struct sock *sk = NULL; - power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev); - if (power_off) { - struct mgmt_mode *cp = power_off->param; - - /* The connection is still in hci_conn_hash so test for 1 - * instead of 0 to know if this is the last one. - */ - if (!cp->val && hci_conn_count(hdev) == 1) { - cancel_delayed_work(&hdev->power_off); - queue_work(hdev->req_workqueue, &hdev->power_off.work); - } + /* The connection is still in hci_conn_hash so test for 1 + * instead of 0 to know if this is the last one. + */ + if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) { + cancel_delayed_work(&hdev->power_off); + queue_work(hdev->req_workqueue, &hdev->power_off.work); } if (!mgmt_connected) @@ -6359,19 +6321,13 @@ void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status) { struct mgmt_ev_connect_failed ev; - struct pending_cmd *power_off; - - power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev); - if (power_off) { - struct mgmt_mode *cp = power_off->param; - /* The connection is still in hci_conn_hash so test for 1 - * instead of 0 to know if this is the last one. - */ - if (!cp->val && hci_conn_count(hdev) == 1) { - cancel_delayed_work(&hdev->power_off); - queue_work(hdev->req_workqueue, &hdev->power_off.work); - } + /* The connection is still in hci_conn_hash so test for 1 + * instead of 0 to know if this is the last one. + */ + if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) { + cancel_delayed_work(&hdev->power_off); + queue_work(hdev->req_workqueue, &hdev->power_off.work); } bacpy(&ev.addr.bdaddr, bdaddr); @@ -6529,16 +6485,23 @@ int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr, return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL); } -void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, - u8 addr_type, u8 status) +void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status) { struct mgmt_ev_auth_failed ev; + struct pending_cmd *cmd; + u8 status = mgmt_status(hci_status); - bacpy(&ev.addr.bdaddr, bdaddr); - ev.addr.type = link_to_bdaddr(link_type, addr_type); - ev.status = mgmt_status(status); + bacpy(&ev.addr.bdaddr, &conn->dst); + ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type); + ev.status = status; - mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL); + cmd = find_pairing(conn); + + mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev), + cmd ? cmd->sk : NULL); + + if (cmd) + pairing_complete(cmd, status); } void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status) diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index fd32943008030332bc523e12a8d2e9a560381f04..f09b6b65cf6b22079d6ce60d53f2613ccf6a7745 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -31,9 +31,12 @@ #include "smp.h" +#define SMP_ALLOW_CMD(smp, code) set_bit(code, &smp->allow_cmd) + #define SMP_TIMEOUT msecs_to_jiffies(30000) #define AUTH_REQ_MASK 0x07 +#define KEY_DIST_MASK 0x07 enum { SMP_FLAG_TK_VALID, @@ -44,7 +47,10 @@ enum { }; struct smp_chan { - struct l2cap_conn *conn; + struct l2cap_conn *conn; + struct delayed_work security_timer; + unsigned long allow_cmd; /* Bitmask of allowed commands */ + u8 preq[7]; /* SMP Pairing Request */ u8 prsp[7]; /* SMP Pairing Response */ u8 prnd[16]; /* SMP Pairing Random (local) */ @@ -139,12 +145,18 @@ static int smp_ah(struct crypto_blkcipher *tfm, u8 irk[16], u8 r[3], u8 res[3]) return 0; } -bool smp_irk_matches(struct crypto_blkcipher *tfm, u8 irk[16], - bdaddr_t *bdaddr) +bool smp_irk_matches(struct hci_dev *hdev, u8 irk[16], bdaddr_t *bdaddr) { + struct l2cap_chan *chan = hdev->smp_data; + struct crypto_blkcipher *tfm; u8 hash[3]; int err; + if (!chan || !chan->data) + return false; + + tfm = chan->data; + BT_DBG("RPA %pMR IRK %*phN", bdaddr, 16, irk); err = smp_ah(tfm, irk, &bdaddr->b[3], hash); @@ -154,10 +166,17 @@ bool smp_irk_matches(struct crypto_blkcipher *tfm, u8 irk[16], return !memcmp(bdaddr->b, hash, 3); } -int smp_generate_rpa(struct crypto_blkcipher *tfm, u8 irk[16], bdaddr_t *rpa) +int smp_generate_rpa(struct hci_dev *hdev, u8 irk[16], bdaddr_t *rpa) { + struct l2cap_chan *chan = hdev->smp_data; + struct crypto_blkcipher *tfm; int err; + if (!chan || !chan->data) + return -EOPNOTSUPP; + + tfm = chan->data; + get_random_bytes(&rpa->b[3], 3); rpa->b[5] &= 0x3f; /* Clear two most significant bits */ @@ -235,47 +254,38 @@ static int smp_s1(struct smp_chan *smp, u8 k[16], u8 r1[16], u8 r2[16], return err; } -static struct sk_buff *smp_build_cmd(struct l2cap_conn *conn, u8 code, - u16 dlen, void *data) +static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data) { - struct sk_buff *skb; - struct l2cap_hdr *lh; - int len; - - len = L2CAP_HDR_SIZE + sizeof(code) + dlen; - - if (len > conn->mtu) - return NULL; + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp; + struct kvec iv[2]; + struct msghdr msg; - skb = bt_skb_alloc(len, GFP_ATOMIC); - if (!skb) - return NULL; + if (!chan) + return; - lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); - lh->len = cpu_to_le16(sizeof(code) + dlen); - lh->cid = cpu_to_le16(L2CAP_CID_SMP); + BT_DBG("code 0x%2.2x", code); - memcpy(skb_put(skb, sizeof(code)), &code, sizeof(code)); + iv[0].iov_base = &code; + iv[0].iov_len = 1; - memcpy(skb_put(skb, dlen), data, dlen); + iv[1].iov_base = data; + iv[1].iov_len = len; - return skb; -} + memset(&msg, 0, sizeof(msg)); -static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data) -{ - struct sk_buff *skb = smp_build_cmd(conn, code, len, data); + msg.msg_iov = (struct iovec *) &iv; + msg.msg_iovlen = 2; - BT_DBG("code 0x%2.2x", code); + l2cap_chan_send(chan, &msg, 1 + len); - if (!skb) + if (!chan->data) return; - skb->priority = HCI_PRIO_MAX; - hci_send_acl(conn->hchan, skb, 0); + smp = chan->data; - cancel_delayed_work_sync(&conn->security_timer); - schedule_delayed_work(&conn->security_timer, SMP_TIMEOUT); + cancel_delayed_work_sync(&smp->security_timer); + schedule_delayed_work(&smp->security_timer, SMP_TIMEOUT); } static __u8 authreq_to_seclevel(__u8 authreq) @@ -302,7 +312,8 @@ static void build_pairing_cmd(struct l2cap_conn *conn, struct smp_cmd_pairing *req, struct smp_cmd_pairing *rsp, __u8 authreq) { - struct smp_chan *smp = conn->smp_chan; + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp = chan->data; struct hci_conn *hcon = conn->hcon; struct hci_dev *hdev = hcon->hdev; u8 local_dist = 0, remote_dist = 0; @@ -345,7 +356,8 @@ static void build_pairing_cmd(struct l2cap_conn *conn, static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size) { - struct smp_chan *smp = conn->smp_chan; + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp = chan->data; if ((max_key_size > SMP_MAX_ENC_KEY_SIZE) || (max_key_size < SMP_MIN_ENC_KEY_SIZE)) @@ -356,21 +368,60 @@ static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size) return 0; } +static void smp_chan_destroy(struct l2cap_conn *conn) +{ + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp = chan->data; + bool complete; + + BUG_ON(!smp); + + cancel_delayed_work_sync(&smp->security_timer); + + complete = test_bit(SMP_FLAG_COMPLETE, &smp->flags); + mgmt_smp_complete(conn->hcon, complete); + + kfree(smp->csrk); + kfree(smp->slave_csrk); + + crypto_free_blkcipher(smp->tfm_aes); + + /* If pairing failed clean up any keys we might have */ + if (!complete) { + if (smp->ltk) { + list_del(&smp->ltk->list); + kfree(smp->ltk); + } + + if (smp->slave_ltk) { + list_del(&smp->slave_ltk->list); + kfree(smp->slave_ltk); + } + + if (smp->remote_irk) { + list_del(&smp->remote_irk->list); + kfree(smp->remote_irk); + } + } + + chan->data = NULL; + kfree(smp); + hci_conn_drop(conn->hcon); +} + static void smp_failure(struct l2cap_conn *conn, u8 reason) { struct hci_conn *hcon = conn->hcon; + struct l2cap_chan *chan = conn->smp; if (reason) smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason), &reason); clear_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags); - mgmt_auth_failed(hcon->hdev, &hcon->dst, hcon->type, hcon->dst_type, - HCI_ERROR_AUTH_FAILURE); + mgmt_auth_failed(hcon, HCI_ERROR_AUTH_FAILURE); - cancel_delayed_work_sync(&conn->security_timer); - - if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) + if (chan->data) smp_chan_destroy(conn); } @@ -405,7 +456,8 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth, u8 local_io, u8 remote_io) { struct hci_conn *hcon = conn->hcon; - struct smp_chan *smp = conn->smp_chan; + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp = chan->data; u8 method; u32 passkey = 0; int ret = 0; @@ -442,8 +494,11 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth, } /* Not Just Works/Confirm results in MITM Authentication */ - if (method != JUST_CFM) + if (method != JUST_CFM) { set_bit(SMP_FLAG_MITM_AUTH, &smp->flags); + if (hcon->pending_sec_level < BT_SECURITY_HIGH) + hcon->pending_sec_level = BT_SECURITY_HIGH; + } /* If both devices have Keyoard-Display I/O, the master * Confirms and the slave Enters the passkey. @@ -503,6 +558,11 @@ static u8 smp_confirm(struct smp_chan *smp) smp_send_cmd(smp->conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp); + if (conn->hcon->out) + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM); + else + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM); + return 0; } @@ -574,82 +634,262 @@ static u8 smp_random(struct smp_chan *smp) return 0; } -static struct smp_chan *smp_chan_create(struct l2cap_conn *conn) +static void smp_notify_keys(struct l2cap_conn *conn) { - struct smp_chan *smp; + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp = chan->data; + struct hci_conn *hcon = conn->hcon; + struct hci_dev *hdev = hcon->hdev; + struct smp_cmd_pairing *req = (void *) &smp->preq[1]; + struct smp_cmd_pairing *rsp = (void *) &smp->prsp[1]; + bool persistent; - smp = kzalloc(sizeof(*smp), GFP_ATOMIC); - if (!smp) { - clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags); - return NULL; + if (smp->remote_irk) { + mgmt_new_irk(hdev, smp->remote_irk); + /* Now that user space can be considered to know the + * identity address track the connection based on it + * from now on. + */ + bacpy(&hcon->dst, &smp->remote_irk->bdaddr); + hcon->dst_type = smp->remote_irk->addr_type; + queue_work(hdev->workqueue, &conn->id_addr_update_work); + + /* When receiving an indentity resolving key for + * a remote device that does not use a resolvable + * private address, just remove the key so that + * it is possible to use the controller white + * list for scanning. + * + * Userspace will have been told to not store + * this key at this point. So it is safe to + * just remove it. + */ + if (!bacmp(&smp->remote_irk->rpa, BDADDR_ANY)) { + list_del(&smp->remote_irk->list); + kfree(smp->remote_irk); + smp->remote_irk = NULL; + } } - smp->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC); - if (IS_ERR(smp->tfm_aes)) { - BT_ERR("Unable to create ECB crypto context"); - kfree(smp); - clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags); - return NULL; + /* The LTKs and CSRKs should be persistent only if both sides + * had the bonding bit set in their authentication requests. + */ + persistent = !!((req->auth_req & rsp->auth_req) & SMP_AUTH_BONDING); + + if (smp->csrk) { + smp->csrk->bdaddr_type = hcon->dst_type; + bacpy(&smp->csrk->bdaddr, &hcon->dst); + mgmt_new_csrk(hdev, smp->csrk, persistent); } - smp->conn = conn; - conn->smp_chan = smp; + if (smp->slave_csrk) { + smp->slave_csrk->bdaddr_type = hcon->dst_type; + bacpy(&smp->slave_csrk->bdaddr, &hcon->dst); + mgmt_new_csrk(hdev, smp->slave_csrk, persistent); + } - hci_conn_hold(conn->hcon); + if (smp->ltk) { + smp->ltk->bdaddr_type = hcon->dst_type; + bacpy(&smp->ltk->bdaddr, &hcon->dst); + mgmt_new_ltk(hdev, smp->ltk, persistent); + } - return smp; + if (smp->slave_ltk) { + smp->slave_ltk->bdaddr_type = hcon->dst_type; + bacpy(&smp->slave_ltk->bdaddr, &hcon->dst); + mgmt_new_ltk(hdev, smp->slave_ltk, persistent); + } } -void smp_chan_destroy(struct l2cap_conn *conn) +static void smp_allow_key_dist(struct smp_chan *smp) { - struct smp_chan *smp = conn->smp_chan; - bool complete; + /* Allow the first expected phase 3 PDU. The rest of the PDUs + * will be allowed in each PDU handler to ensure we receive + * them in the correct order. + */ + if (smp->remote_key_dist & SMP_DIST_ENC_KEY) + SMP_ALLOW_CMD(smp, SMP_CMD_ENCRYPT_INFO); + else if (smp->remote_key_dist & SMP_DIST_ID_KEY) + SMP_ALLOW_CMD(smp, SMP_CMD_IDENT_INFO); + else if (smp->remote_key_dist & SMP_DIST_SIGN) + SMP_ALLOW_CMD(smp, SMP_CMD_SIGN_INFO); +} - BUG_ON(!smp); +static void smp_distribute_keys(struct smp_chan *smp) +{ + struct smp_cmd_pairing *req, *rsp; + struct l2cap_conn *conn = smp->conn; + struct hci_conn *hcon = conn->hcon; + struct hci_dev *hdev = hcon->hdev; + __u8 *keydist; - complete = test_bit(SMP_FLAG_COMPLETE, &smp->flags); - mgmt_smp_complete(conn->hcon, complete); + BT_DBG("conn %p", conn); - kfree(smp->csrk); - kfree(smp->slave_csrk); + rsp = (void *) &smp->prsp[1]; - crypto_free_blkcipher(smp->tfm_aes); + /* The responder sends its keys first */ + if (hcon->out && (smp->remote_key_dist & KEY_DIST_MASK)) { + smp_allow_key_dist(smp); + return; + } - /* If pairing failed clean up any keys we might have */ - if (!complete) { - if (smp->ltk) { - list_del(&smp->ltk->list); - kfree(smp->ltk); - } + req = (void *) &smp->preq[1]; - if (smp->slave_ltk) { - list_del(&smp->slave_ltk->list); - kfree(smp->slave_ltk); + if (hcon->out) { + keydist = &rsp->init_key_dist; + *keydist &= req->init_key_dist; + } else { + keydist = &rsp->resp_key_dist; + *keydist &= req->resp_key_dist; + } + + BT_DBG("keydist 0x%x", *keydist); + + if (*keydist & SMP_DIST_ENC_KEY) { + struct smp_cmd_encrypt_info enc; + struct smp_cmd_master_ident ident; + struct smp_ltk *ltk; + u8 authenticated; + __le16 ediv; + __le64 rand; + + get_random_bytes(enc.ltk, sizeof(enc.ltk)); + get_random_bytes(&ediv, sizeof(ediv)); + get_random_bytes(&rand, sizeof(rand)); + + smp_send_cmd(conn, SMP_CMD_ENCRYPT_INFO, sizeof(enc), &enc); + + authenticated = hcon->sec_level == BT_SECURITY_HIGH; + ltk = hci_add_ltk(hdev, &hcon->dst, hcon->dst_type, + SMP_LTK_SLAVE, authenticated, enc.ltk, + smp->enc_key_size, ediv, rand); + smp->slave_ltk = ltk; + + ident.ediv = ediv; + ident.rand = rand; + + smp_send_cmd(conn, SMP_CMD_MASTER_IDENT, sizeof(ident), &ident); + + *keydist &= ~SMP_DIST_ENC_KEY; + } + + if (*keydist & SMP_DIST_ID_KEY) { + struct smp_cmd_ident_addr_info addrinfo; + struct smp_cmd_ident_info idinfo; + + memcpy(idinfo.irk, hdev->irk, sizeof(idinfo.irk)); + + smp_send_cmd(conn, SMP_CMD_IDENT_INFO, sizeof(idinfo), &idinfo); + + /* The hci_conn contains the local identity address + * after the connection has been established. + * + * This is true even when the connection has been + * established using a resolvable random address. + */ + bacpy(&addrinfo.bdaddr, &hcon->src); + addrinfo.addr_type = hcon->src_type; + + smp_send_cmd(conn, SMP_CMD_IDENT_ADDR_INFO, sizeof(addrinfo), + &addrinfo); + + *keydist &= ~SMP_DIST_ID_KEY; + } + + if (*keydist & SMP_DIST_SIGN) { + struct smp_cmd_sign_info sign; + struct smp_csrk *csrk; + + /* Generate a new random key */ + get_random_bytes(sign.csrk, sizeof(sign.csrk)); + + csrk = kzalloc(sizeof(*csrk), GFP_KERNEL); + if (csrk) { + csrk->master = 0x00; + memcpy(csrk->val, sign.csrk, sizeof(csrk->val)); } + smp->slave_csrk = csrk; + + smp_send_cmd(conn, SMP_CMD_SIGN_INFO, sizeof(sign), &sign); + + *keydist &= ~SMP_DIST_SIGN; + } + + /* If there are still keys to be received wait for them */ + if (smp->remote_key_dist & KEY_DIST_MASK) { + smp_allow_key_dist(smp); + return; + } + + set_bit(SMP_FLAG_COMPLETE, &smp->flags); + smp_notify_keys(conn); + + smp_chan_destroy(conn); +} + +static void smp_timeout(struct work_struct *work) +{ + struct smp_chan *smp = container_of(work, struct smp_chan, + security_timer.work); + struct l2cap_conn *conn = smp->conn; + + BT_DBG("conn %p", conn); + + hci_disconnect(conn->hcon, HCI_ERROR_REMOTE_USER_TERM); +} + +static struct smp_chan *smp_chan_create(struct l2cap_conn *conn) +{ + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp; + + smp = kzalloc(sizeof(*smp), GFP_ATOMIC); + if (!smp) + return NULL; - if (smp->remote_irk) { - list_del(&smp->remote_irk->list); - kfree(smp->remote_irk); - } + smp->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(smp->tfm_aes)) { + BT_ERR("Unable to create ECB crypto context"); + kfree(smp); + return NULL; } - kfree(smp); - conn->smp_chan = NULL; - hci_conn_drop(conn->hcon); + smp->conn = conn; + chan->data = smp; + + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_FAIL); + + INIT_DELAYED_WORK(&smp->security_timer, smp_timeout); + + hci_conn_hold(conn->hcon); + + return smp; } int smp_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, __le32 passkey) { struct l2cap_conn *conn = hcon->l2cap_data; + struct l2cap_chan *chan; struct smp_chan *smp; u32 value; + int err; BT_DBG(""); - if (!conn || !test_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) + if (!conn) + return -ENOTCONN; + + chan = conn->smp; + if (!chan) return -ENOTCONN; - smp = conn->smp_chan; + l2cap_chan_lock(chan); + if (!chan->data) { + err = -ENOTCONN; + goto unlock; + } + + smp = chan->data; switch (mgmt_op) { case MGMT_OP_USER_PASSKEY_REPLY: @@ -664,12 +904,16 @@ int smp_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, __le32 passkey) case MGMT_OP_USER_PASSKEY_NEG_REPLY: case MGMT_OP_USER_CONFIRM_NEG_REPLY: smp_failure(conn, SMP_PASSKEY_ENTRY_FAILED); - return 0; + err = 0; + goto unlock; default: smp_failure(conn, SMP_PASSKEY_ENTRY_FAILED); - return -EOPNOTSUPP; + err = -EOPNOTSUPP; + goto unlock; } + err = 0; + /* If it is our turn to send Pairing Confirm, do so now */ if (test_bit(SMP_FLAG_CFM_PENDING, &smp->flags)) { u8 rsp = smp_confirm(smp); @@ -677,12 +921,15 @@ int smp_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, __le32 passkey) smp_failure(conn, rsp); } - return 0; +unlock: + l2cap_chan_unlock(chan); + return err; } static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb) { struct smp_cmd_pairing rsp, *req = (void *) skb->data; + struct l2cap_chan *chan = conn->smp; struct hci_dev *hdev = conn->hcon->hdev; struct smp_chan *smp; u8 key_size, auth, sec_level; @@ -696,26 +943,30 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb) if (conn->hcon->role != HCI_ROLE_SLAVE) return SMP_CMD_NOTSUPP; - if (!test_and_set_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) + if (!chan->data) smp = smp_chan_create(conn); else - smp = conn->smp_chan; + smp = chan->data; if (!smp) return SMP_UNSPECIFIED; + /* We didn't start the pairing, so match remote */ + auth = req->auth_req & AUTH_REQ_MASK; + if (!test_bit(HCI_BONDABLE, &hdev->dev_flags) && - (req->auth_req & SMP_AUTH_BONDING)) + (auth & SMP_AUTH_BONDING)) return SMP_PAIRING_NOTSUPP; smp->preq[0] = SMP_CMD_PAIRING_REQ; memcpy(&smp->preq[1], req, sizeof(*req)); skb_pull(skb, sizeof(*req)); - /* We didn't start the pairing, so match remote */ - auth = req->auth_req; + if (conn->hcon->io_capability == HCI_IO_NO_INPUT_OUTPUT) + sec_level = BT_SECURITY_MEDIUM; + else + sec_level = authreq_to_seclevel(auth); - sec_level = authreq_to_seclevel(auth); if (sec_level > conn->hcon->pending_sec_level) conn->hcon->pending_sec_level = sec_level; @@ -741,6 +992,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb) memcpy(&smp->prsp[1], &rsp, sizeof(rsp)); smp_send_cmd(conn, SMP_CMD_PAIRING_RSP, sizeof(rsp), &rsp); + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM); /* Request setup of TK */ ret = tk_request(conn, 0, auth, rsp.io_capability, req->io_capability); @@ -753,8 +1005,9 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb) static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb) { struct smp_cmd_pairing *req, *rsp = (void *) skb->data; - struct smp_chan *smp = conn->smp_chan; - u8 key_size, auth = SMP_AUTH_NONE; + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp = chan->data; + u8 key_size, auth; int ret; BT_DBG("conn %p", conn); @@ -773,6 +1026,8 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb) if (check_enc_key_size(conn, key_size)) return SMP_ENC_KEY_SIZE; + auth = rsp->auth_req & AUTH_REQ_MASK; + /* If we need MITM check that it can be acheived */ if (conn->hcon->pending_sec_level >= BT_SECURITY_HIGH) { u8 method; @@ -793,11 +1048,7 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb) */ smp->remote_key_dist &= rsp->resp_key_dist; - if ((req->auth_req & SMP_AUTH_BONDING) && - (rsp->auth_req & SMP_AUTH_BONDING)) - auth = SMP_AUTH_BONDING; - - auth |= (req->auth_req | rsp->auth_req) & SMP_AUTH_MITM; + auth |= req->auth_req; ret = tk_request(conn, 0, auth, req->io_capability, rsp->io_capability); if (ret) @@ -814,7 +1065,8 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb) static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb) { - struct smp_chan *smp = conn->smp_chan; + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp = chan->data; BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave"); @@ -824,10 +1076,14 @@ static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb) memcpy(smp->pcnf, skb->data, sizeof(smp->pcnf)); skb_pull(skb, sizeof(smp->pcnf)); - if (conn->hcon->out) + if (conn->hcon->out) { smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd), smp->prnd); - else if (test_bit(SMP_FLAG_TK_VALID, &smp->flags)) + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM); + return 0; + } + + if (test_bit(SMP_FLAG_TK_VALID, &smp->flags)) return smp_confirm(smp); else set_bit(SMP_FLAG_CFM_PENDING, &smp->flags); @@ -837,7 +1093,8 @@ static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb) static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb) { - struct smp_chan *smp = conn->smp_chan; + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp = chan->data; BT_DBG("conn %p", conn); @@ -860,7 +1117,7 @@ static bool smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level) if (!key) return false; - if (sec_level > BT_SECURITY_MEDIUM && !key->authenticated) + if (smp_ltk_sec_level(key) < sec_level) return false; if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags)) @@ -903,7 +1160,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb) struct smp_cmd_pairing cp; struct hci_conn *hcon = conn->hcon; struct smp_chan *smp; - u8 sec_level; + u8 sec_level, auth; BT_DBG("conn %p", conn); @@ -913,7 +1170,13 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb) if (hcon->role != HCI_ROLE_MASTER) return SMP_CMD_NOTSUPP; - sec_level = authreq_to_seclevel(rp->auth_req); + auth = rp->auth_req & AUTH_REQ_MASK; + + if (hcon->io_capability == HCI_IO_NO_INPUT_OUTPUT) + sec_level = BT_SECURITY_MEDIUM; + else + sec_level = authreq_to_seclevel(auth); + if (smp_sufficient_security(hcon, sec_level)) return 0; @@ -923,26 +1186,24 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb) if (smp_ltk_encrypt(conn, hcon->pending_sec_level)) return 0; - if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) - return 0; - smp = smp_chan_create(conn); if (!smp) return SMP_UNSPECIFIED; if (!test_bit(HCI_BONDABLE, &hcon->hdev->dev_flags) && - (rp->auth_req & SMP_AUTH_BONDING)) + (auth & SMP_AUTH_BONDING)) return SMP_PAIRING_NOTSUPP; skb_pull(skb, sizeof(*rp)); memset(&cp, 0, sizeof(cp)); - build_pairing_cmd(conn, &cp, NULL, rp->auth_req); + build_pairing_cmd(conn, &cp, NULL, auth); smp->preq[0] = SMP_CMD_PAIRING_REQ; memcpy(&smp->preq[1], &cp, sizeof(cp)); smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp); + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RSP); return 0; } @@ -950,8 +1211,10 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb) int smp_conn_security(struct hci_conn *hcon, __u8 sec_level) { struct l2cap_conn *conn = hcon->l2cap_data; + struct l2cap_chan *chan; struct smp_chan *smp; __u8 authreq; + int ret; BT_DBG("conn %p hcon %p level 0x%2.2x", conn, hcon, sec_level); @@ -959,6 +1222,8 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level) if (!conn) return 1; + chan = conn->smp; + if (!test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags)) return 1; @@ -972,12 +1237,19 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level) if (smp_ltk_encrypt(conn, hcon->pending_sec_level)) return 0; - if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) - return 0; + l2cap_chan_lock(chan); + + /* If SMP is already in progress ignore this request */ + if (chan->data) { + ret = 0; + goto unlock; + } smp = smp_chan_create(conn); - if (!smp) - return 1; + if (!smp) { + ret = 1; + goto unlock; + } authreq = seclevel_to_authreq(sec_level); @@ -996,30 +1268,34 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level) memcpy(&smp->preq[1], &cp, sizeof(cp)); smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp); + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RSP); } else { struct smp_cmd_security_req cp; cp.auth_req = authreq; smp_send_cmd(conn, SMP_CMD_SECURITY_REQ, sizeof(cp), &cp); + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_REQ); } set_bit(SMP_FLAG_INITIATOR, &smp->flags); + ret = 0; - return 0; +unlock: + l2cap_chan_unlock(chan); + return ret; } static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb) { struct smp_cmd_encrypt_info *rp = (void *) skb->data; - struct smp_chan *smp = conn->smp_chan; + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp = chan->data; BT_DBG("conn %p", conn); if (skb->len < sizeof(*rp)) return SMP_INVALID_PARAMS; - /* Ignore this PDU if it wasn't requested */ - if (!(smp->remote_key_dist & SMP_DIST_ENC_KEY)) - return 0; + SMP_ALLOW_CMD(smp, SMP_CMD_MASTER_IDENT); skb_pull(skb, sizeof(*rp)); @@ -1031,7 +1307,8 @@ static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb) static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb) { struct smp_cmd_master_ident *rp = (void *) skb->data; - struct smp_chan *smp = conn->smp_chan; + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp = chan->data; struct hci_dev *hdev = conn->hcon->hdev; struct hci_conn *hcon = conn->hcon; struct smp_ltk *ltk; @@ -1042,13 +1319,14 @@ static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb) if (skb->len < sizeof(*rp)) return SMP_INVALID_PARAMS; - /* Ignore this PDU if it wasn't requested */ - if (!(smp->remote_key_dist & SMP_DIST_ENC_KEY)) - return 0; - /* Mark the information as received */ smp->remote_key_dist &= ~SMP_DIST_ENC_KEY; + if (smp->remote_key_dist & SMP_DIST_ID_KEY) + SMP_ALLOW_CMD(smp, SMP_CMD_IDENT_INFO); + else if (smp->remote_key_dist & SMP_DIST_SIGN) + SMP_ALLOW_CMD(smp, SMP_CMD_SIGN_INFO); + skb_pull(skb, sizeof(*rp)); hci_dev_lock(hdev); @@ -1057,8 +1335,8 @@ static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb) authenticated, smp->tk, smp->enc_key_size, rp->ediv, rp->rand); smp->ltk = ltk; - if (!(smp->remote_key_dist & SMP_DIST_ID_KEY)) - smp_distribute_keys(conn); + if (!(smp->remote_key_dist & KEY_DIST_MASK)) + smp_distribute_keys(smp); hci_dev_unlock(hdev); return 0; @@ -1067,16 +1345,15 @@ static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb) static int smp_cmd_ident_info(struct l2cap_conn *conn, struct sk_buff *skb) { struct smp_cmd_ident_info *info = (void *) skb->data; - struct smp_chan *smp = conn->smp_chan; + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp = chan->data; BT_DBG(""); if (skb->len < sizeof(*info)) return SMP_INVALID_PARAMS; - /* Ignore this PDU if it wasn't requested */ - if (!(smp->remote_key_dist & SMP_DIST_ID_KEY)) - return 0; + SMP_ALLOW_CMD(smp, SMP_CMD_IDENT_ADDR_INFO); skb_pull(skb, sizeof(*info)); @@ -1089,7 +1366,8 @@ static int smp_cmd_ident_addr_info(struct l2cap_conn *conn, struct sk_buff *skb) { struct smp_cmd_ident_addr_info *info = (void *) skb->data; - struct smp_chan *smp = conn->smp_chan; + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp = chan->data; struct hci_conn *hcon = conn->hcon; bdaddr_t rpa; @@ -1098,13 +1376,12 @@ static int smp_cmd_ident_addr_info(struct l2cap_conn *conn, if (skb->len < sizeof(*info)) return SMP_INVALID_PARAMS; - /* Ignore this PDU if it wasn't requested */ - if (!(smp->remote_key_dist & SMP_DIST_ID_KEY)) - return 0; - /* Mark the information as received */ smp->remote_key_dist &= ~SMP_DIST_ID_KEY; + if (smp->remote_key_dist & SMP_DIST_SIGN) + SMP_ALLOW_CMD(smp, SMP_CMD_SIGN_INFO); + skb_pull(skb, sizeof(*info)); hci_dev_lock(hcon->hdev); @@ -1133,7 +1410,8 @@ static int smp_cmd_ident_addr_info(struct l2cap_conn *conn, smp->id_addr_type, smp->irk, &rpa); distribute: - smp_distribute_keys(conn); + if (!(smp->remote_key_dist & KEY_DIST_MASK)) + smp_distribute_keys(smp); hci_dev_unlock(hcon->hdev); @@ -1143,7 +1421,8 @@ static int smp_cmd_ident_addr_info(struct l2cap_conn *conn, static int smp_cmd_sign_info(struct l2cap_conn *conn, struct sk_buff *skb) { struct smp_cmd_sign_info *rp = (void *) skb->data; - struct smp_chan *smp = conn->smp_chan; + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp = chan->data; struct hci_dev *hdev = conn->hcon->hdev; struct smp_csrk *csrk; @@ -1152,10 +1431,6 @@ static int smp_cmd_sign_info(struct l2cap_conn *conn, struct sk_buff *skb) if (skb->len < sizeof(*rp)) return SMP_INVALID_PARAMS; - /* Ignore this PDU if it wasn't requested */ - if (!(smp->remote_key_dist & SMP_DIST_SIGN)) - return 0; - /* Mark the information as received */ smp->remote_key_dist &= ~SMP_DIST_SIGN; @@ -1168,16 +1443,17 @@ static int smp_cmd_sign_info(struct l2cap_conn *conn, struct sk_buff *skb) memcpy(csrk->val, rp->csrk, sizeof(csrk->val)); } smp->csrk = csrk; - if (!(smp->remote_key_dist & SMP_DIST_SIGN)) - smp_distribute_keys(conn); + smp_distribute_keys(smp); hci_dev_unlock(hdev); return 0; } -int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb) +static int smp_sig_channel(struct l2cap_chan *chan, struct sk_buff *skb) { + struct l2cap_conn *conn = chan->conn; struct hci_conn *hcon = conn->hcon; + struct smp_chan *smp; __u8 code, reason; int err = 0; @@ -1186,13 +1462,10 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb) return 0; } - if (skb->len < 1) { - kfree_skb(skb); + if (skb->len < 1) return -EILSEQ; - } if (!test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags)) { - err = -EOPNOTSUPP; reason = SMP_PAIRING_NOTSUPP; goto done; } @@ -1200,18 +1473,19 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb) code = skb->data[0]; skb_pull(skb, sizeof(code)); - /* - * The SMP context must be initialized for all other PDUs except - * pairing and security requests. If we get any other PDU when - * not initialized simply disconnect (done if this function - * returns an error). + smp = chan->data; + + if (code > SMP_CMD_MAX) + goto drop; + + if (smp && !test_and_clear_bit(code, &smp->allow_cmd)) + goto drop; + + /* If we don't have a context the only allowed commands are + * pairing request and security request. */ - if (code != SMP_CMD_PAIRING_REQ && code != SMP_CMD_SECURITY_REQ && - !conn->smp_chan) { - BT_ERR("Unexpected SMP command 0x%02x. Disconnecting.", code); - kfree_skb(skb); - return -EOPNOTSUPP; - } + if (!smp && code != SMP_CMD_PAIRING_REQ && code != SMP_CMD_SECURITY_REQ) + goto drop; switch (code) { case SMP_CMD_PAIRING_REQ: @@ -1220,7 +1494,6 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb) case SMP_CMD_PAIRING_FAIL: smp_failure(conn, 0); - reason = 0; err = -EPERM; break; @@ -1262,197 +1535,217 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb) default: BT_DBG("Unknown command code 0x%2.2x", code); - reason = SMP_CMD_NOTSUPP; - err = -EOPNOTSUPP; goto done; } done: - if (reason) - smp_failure(conn, reason); + if (!err) { + if (reason) + smp_failure(conn, reason); + kfree_skb(skb); + } - kfree_skb(skb); return err; + +drop: + BT_ERR("%s unexpected SMP command 0x%02x from %pMR", hcon->hdev->name, + code, &hcon->dst); + kfree_skb(skb); + return 0; } -static void smp_notify_keys(struct l2cap_conn *conn) +static void smp_teardown_cb(struct l2cap_chan *chan, int err) { - struct smp_chan *smp = conn->smp_chan; - struct hci_conn *hcon = conn->hcon; - struct hci_dev *hdev = hcon->hdev; - struct smp_cmd_pairing *req = (void *) &smp->preq[1]; - struct smp_cmd_pairing *rsp = (void *) &smp->prsp[1]; - bool persistent; + struct l2cap_conn *conn = chan->conn; - if (smp->remote_irk) { - mgmt_new_irk(hdev, smp->remote_irk); - /* Now that user space can be considered to know the - * identity address track the connection based on it - * from now on. - */ - bacpy(&hcon->dst, &smp->remote_irk->bdaddr); - hcon->dst_type = smp->remote_irk->addr_type; - l2cap_conn_update_id_addr(hcon); + BT_DBG("chan %p", chan); - /* When receiving an indentity resolving key for - * a remote device that does not use a resolvable - * private address, just remove the key so that - * it is possible to use the controller white - * list for scanning. - * - * Userspace will have been told to not store - * this key at this point. So it is safe to - * just remove it. - */ - if (!bacmp(&smp->remote_irk->rpa, BDADDR_ANY)) { - list_del(&smp->remote_irk->list); - kfree(smp->remote_irk); - smp->remote_irk = NULL; - } - } + if (chan->data) + smp_chan_destroy(conn); - /* The LTKs and CSRKs should be persistent only if both sides - * had the bonding bit set in their authentication requests. - */ - persistent = !!((req->auth_req & rsp->auth_req) & SMP_AUTH_BONDING); + conn->smp = NULL; + l2cap_chan_put(chan); +} - if (smp->csrk) { - smp->csrk->bdaddr_type = hcon->dst_type; - bacpy(&smp->csrk->bdaddr, &hcon->dst); - mgmt_new_csrk(hdev, smp->csrk, persistent); - } +static void smp_resume_cb(struct l2cap_chan *chan) +{ + struct smp_chan *smp = chan->data; + struct l2cap_conn *conn = chan->conn; + struct hci_conn *hcon = conn->hcon; - if (smp->slave_csrk) { - smp->slave_csrk->bdaddr_type = hcon->dst_type; - bacpy(&smp->slave_csrk->bdaddr, &hcon->dst); - mgmt_new_csrk(hdev, smp->slave_csrk, persistent); - } + BT_DBG("chan %p", chan); - if (smp->ltk) { - smp->ltk->bdaddr_type = hcon->dst_type; - bacpy(&smp->ltk->bdaddr, &hcon->dst); - mgmt_new_ltk(hdev, smp->ltk, persistent); - } + if (!smp) + return; - if (smp->slave_ltk) { - smp->slave_ltk->bdaddr_type = hcon->dst_type; - bacpy(&smp->slave_ltk->bdaddr, &hcon->dst); - mgmt_new_ltk(hdev, smp->slave_ltk, persistent); - } + if (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags)) + return; + + cancel_delayed_work(&smp->security_timer); + + smp_distribute_keys(smp); } -int smp_distribute_keys(struct l2cap_conn *conn) +static void smp_ready_cb(struct l2cap_chan *chan) { - struct smp_cmd_pairing *req, *rsp; - struct smp_chan *smp = conn->smp_chan; - struct hci_conn *hcon = conn->hcon; - struct hci_dev *hdev = hcon->hdev; - __u8 *keydist; + struct l2cap_conn *conn = chan->conn; - BT_DBG("conn %p", conn); + BT_DBG("chan %p", chan); - if (!test_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) - return 0; + conn->smp = chan; + l2cap_chan_hold(chan); +} - rsp = (void *) &smp->prsp[1]; +static int smp_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb) +{ + int err; - /* The responder sends its keys first */ - if (hcon->out && (smp->remote_key_dist & 0x07)) - return 0; + BT_DBG("chan %p", chan); - req = (void *) &smp->preq[1]; + err = smp_sig_channel(chan, skb); + if (err) { + struct smp_chan *smp = chan->data; - if (hcon->out) { - keydist = &rsp->init_key_dist; - *keydist &= req->init_key_dist; - } else { - keydist = &rsp->resp_key_dist; - *keydist &= req->resp_key_dist; + if (smp) + cancel_delayed_work_sync(&smp->security_timer); + + hci_disconnect(chan->conn->hcon, HCI_ERROR_AUTH_FAILURE); } - BT_DBG("keydist 0x%x", *keydist); + return err; +} - if (*keydist & SMP_DIST_ENC_KEY) { - struct smp_cmd_encrypt_info enc; - struct smp_cmd_master_ident ident; - struct smp_ltk *ltk; - u8 authenticated; - __le16 ediv; - __le64 rand; +static struct sk_buff *smp_alloc_skb_cb(struct l2cap_chan *chan, + unsigned long hdr_len, + unsigned long len, int nb) +{ + struct sk_buff *skb; - get_random_bytes(enc.ltk, sizeof(enc.ltk)); - get_random_bytes(&ediv, sizeof(ediv)); - get_random_bytes(&rand, sizeof(rand)); + skb = bt_skb_alloc(hdr_len + len, GFP_KERNEL); + if (!skb) + return ERR_PTR(-ENOMEM); - smp_send_cmd(conn, SMP_CMD_ENCRYPT_INFO, sizeof(enc), &enc); + skb->priority = HCI_PRIO_MAX; + bt_cb(skb)->chan = chan; - authenticated = hcon->sec_level == BT_SECURITY_HIGH; - ltk = hci_add_ltk(hdev, &hcon->dst, hcon->dst_type, - SMP_LTK_SLAVE, authenticated, enc.ltk, - smp->enc_key_size, ediv, rand); - smp->slave_ltk = ltk; + return skb; +} - ident.ediv = ediv; - ident.rand = rand; +static const struct l2cap_ops smp_chan_ops = { + .name = "Security Manager", + .ready = smp_ready_cb, + .recv = smp_recv_cb, + .alloc_skb = smp_alloc_skb_cb, + .teardown = smp_teardown_cb, + .resume = smp_resume_cb, + + .new_connection = l2cap_chan_no_new_connection, + .state_change = l2cap_chan_no_state_change, + .close = l2cap_chan_no_close, + .defer = l2cap_chan_no_defer, + .suspend = l2cap_chan_no_suspend, + .set_shutdown = l2cap_chan_no_set_shutdown, + .get_sndtimeo = l2cap_chan_no_get_sndtimeo, + .memcpy_fromiovec = l2cap_chan_no_memcpy_fromiovec, +}; - smp_send_cmd(conn, SMP_CMD_MASTER_IDENT, sizeof(ident), &ident); +static inline struct l2cap_chan *smp_new_conn_cb(struct l2cap_chan *pchan) +{ + struct l2cap_chan *chan; - *keydist &= ~SMP_DIST_ENC_KEY; - } + BT_DBG("pchan %p", pchan); - if (*keydist & SMP_DIST_ID_KEY) { - struct smp_cmd_ident_addr_info addrinfo; - struct smp_cmd_ident_info idinfo; + chan = l2cap_chan_create(); + if (!chan) + return NULL; - memcpy(idinfo.irk, hdev->irk, sizeof(idinfo.irk)); + chan->chan_type = pchan->chan_type; + chan->ops = &smp_chan_ops; + chan->scid = pchan->scid; + chan->dcid = chan->scid; + chan->imtu = pchan->imtu; + chan->omtu = pchan->omtu; + chan->mode = pchan->mode; - smp_send_cmd(conn, SMP_CMD_IDENT_INFO, sizeof(idinfo), &idinfo); + BT_DBG("created chan %p", chan); - /* The hci_conn contains the local identity address - * after the connection has been established. - * - * This is true even when the connection has been - * established using a resolvable random address. - */ - bacpy(&addrinfo.bdaddr, &hcon->src); - addrinfo.addr_type = hcon->src_type; + return chan; +} - smp_send_cmd(conn, SMP_CMD_IDENT_ADDR_INFO, sizeof(addrinfo), - &addrinfo); +static const struct l2cap_ops smp_root_chan_ops = { + .name = "Security Manager Root", + .new_connection = smp_new_conn_cb, + + /* None of these are implemented for the root channel */ + .close = l2cap_chan_no_close, + .alloc_skb = l2cap_chan_no_alloc_skb, + .recv = l2cap_chan_no_recv, + .state_change = l2cap_chan_no_state_change, + .teardown = l2cap_chan_no_teardown, + .ready = l2cap_chan_no_ready, + .defer = l2cap_chan_no_defer, + .suspend = l2cap_chan_no_suspend, + .resume = l2cap_chan_no_resume, + .set_shutdown = l2cap_chan_no_set_shutdown, + .get_sndtimeo = l2cap_chan_no_get_sndtimeo, + .memcpy_fromiovec = l2cap_chan_no_memcpy_fromiovec, +}; - *keydist &= ~SMP_DIST_ID_KEY; - } +int smp_register(struct hci_dev *hdev) +{ + struct l2cap_chan *chan; + struct crypto_blkcipher *tfm_aes; - if (*keydist & SMP_DIST_SIGN) { - struct smp_cmd_sign_info sign; - struct smp_csrk *csrk; + BT_DBG("%s", hdev->name); - /* Generate a new random key */ - get_random_bytes(sign.csrk, sizeof(sign.csrk)); + tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm_aes)) { + int err = PTR_ERR(tfm_aes); + BT_ERR("Unable to create crypto context"); + return err; + } - csrk = kzalloc(sizeof(*csrk), GFP_KERNEL); - if (csrk) { - csrk->master = 0x00; - memcpy(csrk->val, sign.csrk, sizeof(csrk->val)); - } - smp->slave_csrk = csrk; + chan = l2cap_chan_create(); + if (!chan) { + crypto_free_blkcipher(tfm_aes); + return -ENOMEM; + } - smp_send_cmd(conn, SMP_CMD_SIGN_INFO, sizeof(sign), &sign); + chan->data = tfm_aes; - *keydist &= ~SMP_DIST_SIGN; - } + l2cap_add_scid(chan, L2CAP_CID_SMP); - /* If there are still keys to be received wait for them */ - if ((smp->remote_key_dist & 0x07)) - return 0; + l2cap_chan_set_defaults(chan); - clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags); - cancel_delayed_work_sync(&conn->security_timer); - set_bit(SMP_FLAG_COMPLETE, &smp->flags); - smp_notify_keys(conn); + bacpy(&chan->src, &hdev->bdaddr); + chan->src_type = BDADDR_LE_PUBLIC; + chan->state = BT_LISTEN; + chan->mode = L2CAP_MODE_BASIC; + chan->imtu = L2CAP_DEFAULT_MTU; + chan->ops = &smp_root_chan_ops; - smp_chan_destroy(conn); + hdev->smp_data = chan; return 0; } + +void smp_unregister(struct hci_dev *hdev) +{ + struct l2cap_chan *chan = hdev->smp_data; + struct crypto_blkcipher *tfm_aes; + + if (!chan) + return; + + BT_DBG("%s chan %p", hdev->name, chan); + + tfm_aes = chan->data; + if (tfm_aes) { + chan->data = NULL; + crypto_free_blkcipher(tfm_aes); + } + + hdev->smp_data = NULL; + l2cap_chan_put(chan); +} diff --git a/net/bluetooth/smp.h b/net/bluetooth/smp.h index 796f4f45f92f67e43e32d75bfb015b5175f2aba0..86a683a8b4917fe7ba0567254da8da210909f13c 100644 --- a/net/bluetooth/smp.h +++ b/net/bluetooth/smp.h @@ -102,6 +102,8 @@ struct smp_cmd_security_req { __u8 auth_req; } __packed; +#define SMP_CMD_MAX 0x0b + #define SMP_PASSKEY_ENTRY_FAILED 0x01 #define SMP_OOB_NOT_AVAIL 0x02 #define SMP_AUTH_REQUIREMENTS 0x03 @@ -123,17 +125,23 @@ enum { SMP_LTK_SLAVE, }; +static inline u8 smp_ltk_sec_level(struct smp_ltk *key) +{ + if (key->authenticated) + return BT_SECURITY_HIGH; + + return BT_SECURITY_MEDIUM; +} + /* SMP Commands */ bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level); int smp_conn_security(struct hci_conn *hcon, __u8 sec_level); -int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb); -int smp_distribute_keys(struct l2cap_conn *conn); int smp_user_confirm_reply(struct hci_conn *conn, u16 mgmt_op, __le32 passkey); -void smp_chan_destroy(struct l2cap_conn *conn); +bool smp_irk_matches(struct hci_dev *hdev, u8 irk[16], bdaddr_t *bdaddr); +int smp_generate_rpa(struct hci_dev *hdev, u8 irk[16], bdaddr_t *rpa); -bool smp_irk_matches(struct crypto_blkcipher *tfm, u8 irk[16], - bdaddr_t *bdaddr); -int smp_generate_rpa(struct crypto_blkcipher *tfm, u8 irk[16], bdaddr_t *rpa); +int smp_register(struct hci_dev *hdev); +void smp_unregister(struct hci_dev *hdev); #endif /* __SMP_H */ diff --git a/net/bridge/Makefile b/net/bridge/Makefile index 8590b942bffa62a11d23c4e7a0666c9564d310c1..fd7ee03c59b3f6c891fe9b5020b08af389d3e814 100644 --- a/net/bridge/Makefile +++ b/net/bridge/Makefile @@ -10,7 +10,9 @@ bridge-y := br.o br_device.o br_fdb.o br_forward.o br_if.o br_input.o \ bridge-$(CONFIG_SYSFS) += br_sysfs_if.o br_sysfs_br.o -bridge-$(CONFIG_BRIDGE_NETFILTER) += br_netfilter.o +bridge-$(subst m,y,$(CONFIG_BRIDGE_NETFILTER)) += br_nf_core.o + +obj-$(CONFIG_BRIDGE_NETFILTER) += br_netfilter.o bridge-$(CONFIG_BRIDGE_IGMP_SNOOPING) += br_multicast.o br_mdb.o diff --git a/net/bridge/br.c b/net/bridge/br.c index 1a755a1e54101d924e88ea240a82c154dcb7bbe5..44425aff7cba15f93c659fd0502a7b650d050b5e 100644 --- a/net/bridge/br.c +++ b/net/bridge/br.c @@ -161,7 +161,7 @@ static int __init br_init(void) if (err) goto err_out1; - err = br_netfilter_init(); + err = br_nf_core_init(); if (err) goto err_out2; @@ -179,11 +179,16 @@ static int __init br_init(void) br_fdb_test_addr_hook = br_fdb_test_addr; #endif + pr_info("bridge: automatic filtering via arp/ip/ip6tables has been " + "deprecated. Update your scripts to load br_netfilter if you " + "need this.\n"); + return 0; + err_out4: unregister_netdevice_notifier(&br_device_notifier); err_out3: - br_netfilter_fini(); + br_nf_core_fini(); err_out2: unregister_pernet_subsys(&br_net_ops); err_out1: @@ -196,20 +201,17 @@ static int __init br_init(void) static void __exit br_deinit(void) { stp_proto_unregister(&br_stp_proto); - br_netlink_fini(); unregister_netdevice_notifier(&br_device_notifier); brioctl_set(NULL); - unregister_pernet_subsys(&br_net_ops); rcu_barrier(); /* Wait for completion of call_rcu()'s */ - br_netfilter_fini(); + br_nf_core_fini(); #if IS_ENABLED(CONFIG_ATM_LANE) br_fdb_test_addr_hook = NULL; #endif - br_fdb_fini(); } diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 568cccd39a3d8a25716ef13e14b2f4e4850c1db5..ffd379db5938b17694be31e85d104289c7fb09bc 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -36,7 +36,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) u16 vid = 0; rcu_read_lock(); -#ifdef CONFIG_BRIDGE_NETFILTER +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) if (skb->nf_bridge && (skb->nf_bridge->mask & BRNF_BRIDGED_DNAT)) { br_nf_pre_routing_finish_bridge_slow(skb); rcu_read_unlock(); @@ -88,12 +88,17 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) static int br_dev_init(struct net_device *dev) { struct net_bridge *br = netdev_priv(dev); + int err; br->stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); if (!br->stats) return -ENOMEM; - return 0; + err = br_vlan_init(br); + if (err) + free_percpu(br->stats); + + return err; } static int br_dev_open(struct net_device *dev) @@ -167,7 +172,7 @@ static int br_change_mtu(struct net_device *dev, int new_mtu) dev->mtu = new_mtu; -#ifdef CONFIG_BRIDGE_NETFILTER +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) /* remember the MTU in the rtable for PMTU */ dst_metric_set(&br->fake_rtable.dst, RTAX_MTU, new_mtu); #endif @@ -389,5 +394,4 @@ void br_dev_setup(struct net_device *dev) br_netfilter_rtable_init(br); br_stp_timer_init(br); br_multicast_init(br); - br_vlan_init(br); } diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index 056b67b0e2778fdce7bd80a2bf4ede2d552c0e1d..992ec49a96aa7e289bd3c74bca9a606762c63614 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -49,6 +49,7 @@ int br_dev_queue_push_xmit(struct sk_buff *skb) return 0; } +EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit); int br_forward_finish(struct sk_buff *skb) { @@ -56,6 +57,7 @@ int br_forward_finish(struct sk_buff *skb) br_dev_queue_push_xmit); } +EXPORT_SYMBOL_GPL(br_forward_finish); static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) { diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 078d336a1f379f80d622e4fffa187630593827cf..ed307db7a12b64531d0cdf2a89ccb53b7356ad4c 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -252,12 +252,12 @@ static void del_nbp(struct net_bridge_port *p) br_fdb_delete_by_port(br, p, 1); nbp_update_port_count(br); + netdev_upper_dev_unlink(dev, br->dev); + dev->priv_flags &= ~IFF_BRIDGE_PORT; netdev_rx_handler_unregister(dev); - netdev_upper_dev_unlink(dev, br->dev); - br_multicast_del_port(p); kobject_uevent(&p->kobj, KOBJ_REMOVE); @@ -332,7 +332,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br, p->port_no = index; p->flags = BR_LEARNING | BR_FLOOD; br_init_port(p); - p->state = BR_STATE_DISABLED; + br_set_state(p, BR_STATE_DISABLED); br_stp_port_timer_init(p); br_multicast_add_port(p); @@ -476,16 +476,16 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) if (err) goto err3; - err = netdev_master_upper_dev_link(dev, br->dev); + err = netdev_rx_handler_register(dev, br_handle_frame, p); if (err) goto err4; - err = netdev_rx_handler_register(dev, br_handle_frame, p); + dev->priv_flags |= IFF_BRIDGE_PORT; + + err = netdev_master_upper_dev_link(dev, br->dev); if (err) goto err5; - dev->priv_flags |= IFF_BRIDGE_PORT; - dev_disable_lro(dev); list_add_rcu(&p->list, &br->port_list); @@ -500,6 +500,9 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) if (br_fdb_insert(br, p, dev->dev_addr, 0)) netdev_err(dev, "failed insert local address bridge forwarding table\n"); + if (nbp_vlan_init(p)) + netdev_err(dev, "failed to initialize vlan filtering on this port\n"); + spin_lock_bh(&br->lock); changed_addr = br_stp_recalculate_bridge_id(br); @@ -520,7 +523,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) return 0; err5: - netdev_upper_dev_unlink(dev, br->dev); + dev->priv_flags &= ~IFF_BRIDGE_PORT; + netdev_rx_handler_unregister(dev); err4: br_netpoll_disable(p); err3: diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 366c43649079d9bdef66063af2ab848965ca8197..6fd5522df696ce744558a4db82803c34394eed6f 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -140,6 +140,7 @@ int br_handle_frame_finish(struct sk_buff *skb) kfree_skb(skb); goto out; } +EXPORT_SYMBOL_GPL(br_handle_frame_finish); /* note: already called with rcu_read_lock */ static int br_handle_local_finish(struct sk_buff *skb) diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 7751c92c8c57fc24b0c18e4d20a095bfa02e9ff0..648d79ccf46244a5769c1703361cf35db4cfeb09 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -1822,7 +1822,7 @@ static void br_multicast_query_expired(struct net_bridge *br, if (query->startup_sent < br->multicast_startup_query_count) query->startup_sent++; - rcu_assign_pointer(querier, NULL); + RCU_INIT_POINTER(querier, NULL); br_multicast_send_query(br, NULL, query); spin_unlock(&br->multicast_lock); } diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index a615264cf01a950aafd894109d43f55cfd8dff91..1bada53bb195ca4c35950af943e049a8ddf8347a 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -111,66 +111,6 @@ static inline __be16 pppoe_proto(const struct sk_buff *skb) pppoe_proto(skb) == htons(PPP_IPV6) && \ brnf_filter_pppoe_tagged) -static void fake_update_pmtu(struct dst_entry *dst, struct sock *sk, - struct sk_buff *skb, u32 mtu) -{ -} - -static void fake_redirect(struct dst_entry *dst, struct sock *sk, - struct sk_buff *skb) -{ -} - -static u32 *fake_cow_metrics(struct dst_entry *dst, unsigned long old) -{ - return NULL; -} - -static struct neighbour *fake_neigh_lookup(const struct dst_entry *dst, - struct sk_buff *skb, - const void *daddr) -{ - return NULL; -} - -static unsigned int fake_mtu(const struct dst_entry *dst) -{ - return dst->dev->mtu; -} - -static struct dst_ops fake_dst_ops = { - .family = AF_INET, - .protocol = cpu_to_be16(ETH_P_IP), - .update_pmtu = fake_update_pmtu, - .redirect = fake_redirect, - .cow_metrics = fake_cow_metrics, - .neigh_lookup = fake_neigh_lookup, - .mtu = fake_mtu, -}; - -/* - * Initialize bogus route table used to keep netfilter happy. - * Currently, we fill in the PMTU entry because netfilter - * refragmentation needs it, and the rt_flags entry because - * ipt_REJECT needs it. Future netfilter modules might - * require us to fill additional fields. - */ -static const u32 br_dst_default_metrics[RTAX_MAX] = { - [RTAX_MTU - 1] = 1500, -}; - -void br_netfilter_rtable_init(struct net_bridge *br) -{ - struct rtable *rt = &br->fake_rtable; - - atomic_set(&rt->dst.__refcnt, 1); - rt->dst.dev = br->dev; - rt->dst.path = &rt->dst; - dst_init_metrics(&rt->dst, br_dst_default_metrics, true); - rt->dst.flags = DST_NOXFRM | DST_FAKE_RTABLE; - rt->dst.ops = &fake_dst_ops; -} - static inline struct rtable *bridge_parent_rtable(const struct net_device *dev) { struct net_bridge_port *port; @@ -245,14 +185,6 @@ static inline void nf_bridge_save_header(struct sk_buff *skb) skb->nf_bridge->data, header_size); } -static inline void nf_bridge_update_protocol(struct sk_buff *skb) -{ - if (skb->nf_bridge->mask & BRNF_8021Q) - skb->protocol = htons(ETH_P_8021Q); - else if (skb->nf_bridge->mask & BRNF_PPPoE) - skb->protocol = htons(ETH_P_PPP_SES); -} - /* When handing a packet over to the IP layer * check whether we have a skb that is in the * expected format @@ -320,26 +252,6 @@ static int br_parse_ip_options(struct sk_buff *skb) return -1; } -/* Fill in the header for fragmented IP packets handled by - * the IPv4 connection tracking code. - */ -int nf_bridge_copy_header(struct sk_buff *skb) -{ - int err; - unsigned int header_size; - - nf_bridge_update_protocol(skb); - header_size = ETH_HLEN + nf_bridge_encap_header_len(skb); - err = skb_cow_head(skb, header_size); - if (err) - return err; - - skb_copy_to_linear_data_offset(skb, -header_size, - skb->nf_bridge->data, header_size); - __skb_push(skb, nf_bridge_encap_header_len(skb)); - return 0; -} - /* PF_BRIDGE/PRE_ROUTING *********************************************/ /* Undo the changes made for ip6tables PREROUTING and continue the * bridge PRE_ROUTING hook. */ @@ -404,6 +316,7 @@ static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb) ETH_HLEN-ETH_ALEN); /* tell br_dev_xmit to continue with forwarding */ nf_bridge->mask |= BRNF_BRIDGED_DNAT; + /* FIXME Need to refragment */ ret = neigh->output(neigh, skb); } neigh_release(neigh); @@ -459,6 +372,10 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb) struct nf_bridge_info *nf_bridge = skb->nf_bridge; struct rtable *rt; int err; + int frag_max_size; + + frag_max_size = IPCB(skb)->frag_max_size; + BR_INPUT_SKB_CB(skb)->frag_max_size = frag_max_size; if (nf_bridge->mask & BRNF_PKT_TYPE) { skb->pkt_type = PACKET_OTHERHOST; @@ -863,13 +780,19 @@ static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops, static int br_nf_dev_queue_xmit(struct sk_buff *skb) { int ret; + int frag_max_size; + /* This is wrong! We should preserve the original fragment + * boundaries by preserving frag_list rather than refragmenting. + */ if (skb->protocol == htons(ETH_P_IP) && skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu && !skb_is_gso(skb)) { + frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size; if (br_parse_ip_options(skb)) /* Drop invalid packet */ return NF_DROP; + IPCB(skb)->frag_max_size = frag_max_size; ret = ip_fragment(skb, br_dev_queue_push_xmit); } else ret = br_dev_queue_push_xmit(skb); @@ -944,6 +867,11 @@ static unsigned int ip_sabotage_in(const struct nf_hook_ops *ops, return NF_ACCEPT; } +void br_netfilter_enable(void) +{ +} +EXPORT_SYMBOL_GPL(br_netfilter_enable); + /* For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because * br_dev_queue_push_xmit is called afterwards */ static struct nf_hook_ops br_nf_ops[] __read_mostly = { @@ -1059,38 +987,42 @@ static struct ctl_table brnf_table[] = { }; #endif -int __init br_netfilter_init(void) +static int __init br_netfilter_init(void) { int ret; - ret = dst_entries_init(&fake_dst_ops); + ret = nf_register_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops)); if (ret < 0) return ret; - ret = nf_register_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops)); - if (ret < 0) { - dst_entries_destroy(&fake_dst_ops); - return ret; - } #ifdef CONFIG_SYSCTL brnf_sysctl_header = register_net_sysctl(&init_net, "net/bridge", brnf_table); if (brnf_sysctl_header == NULL) { printk(KERN_WARNING "br_netfilter: can't register to sysctl.\n"); - nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops)); - dst_entries_destroy(&fake_dst_ops); - return -ENOMEM; + ret = -ENOMEM; + goto err1; } #endif printk(KERN_NOTICE "Bridge firewalling registered\n"); return 0; +err1: + nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops)); + return ret; } -void br_netfilter_fini(void) +static void __exit br_netfilter_fini(void) { nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops)); #ifdef CONFIG_SYSCTL unregister_net_sysctl_table(brnf_sysctl_header); #endif - dst_entries_destroy(&fake_dst_ops); } + +module_init(br_netfilter_init); +module_exit(br_netfilter_fini); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Lennert Buytenhek "); +MODULE_AUTHOR("Bart De Schuymer "); +MODULE_DESCRIPTION("Linux ethernet netfilter firewall bridge"); diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index cb5fcf62f66332d2ef9fb3f46260d43583713a1d..2ff9706647f2cb9f7930d22d0a3092fb3fe0d225 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -257,9 +257,6 @@ static int br_afspec(struct net_bridge *br, } else err = br_vlan_add(br, vinfo->vid, vinfo->flags); - if (err) - break; - break; case RTM_DELLINK: @@ -276,7 +273,7 @@ static int br_afspec(struct net_bridge *br, return err; } -static const struct nla_policy ifla_brport_policy[IFLA_BRPORT_MAX + 1] = { +static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = { [IFLA_BRPORT_STATE] = { .type = NLA_U8 }, [IFLA_BRPORT_COST] = { .type = NLA_U32 }, [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 }, @@ -304,7 +301,7 @@ static int br_set_port_state(struct net_bridge_port *p, u8 state) (!netif_oper_up(p->dev) && state != BR_STATE_DISABLED)) return -ENETDOWN; - p->state = state; + br_set_state(p, state); br_log_state(p); br_port_state_selection(p->br); return 0; @@ -382,7 +379,7 @@ int br_setlink(struct net_device *dev, struct nlmsghdr *nlh) if (p && protinfo) { if (protinfo->nla_type & NLA_F_NESTED) { err = nla_parse_nested(tb, IFLA_BRPORT_MAX, - protinfo, ifla_brport_policy); + protinfo, br_port_policy); if (err) return err; @@ -461,6 +458,88 @@ static int br_dev_newlink(struct net *src_net, struct net_device *dev, return register_netdevice(dev); } +static int br_port_slave_changelink(struct net_device *brdev, + struct net_device *dev, + struct nlattr *tb[], + struct nlattr *data[]) +{ + if (!data) + return 0; + return br_setport(br_port_get_rtnl(dev), data); +} + +static int br_port_fill_slave_info(struct sk_buff *skb, + const struct net_device *brdev, + const struct net_device *dev) +{ + return br_port_fill_attrs(skb, br_port_get_rtnl(dev)); +} + +static size_t br_port_get_slave_size(const struct net_device *brdev, + const struct net_device *dev) +{ + return br_port_info_size(); +} + +static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = { + [IFLA_BR_FORWARD_DELAY] = { .type = NLA_U32 }, + [IFLA_BR_HELLO_TIME] = { .type = NLA_U32 }, + [IFLA_BR_MAX_AGE] = { .type = NLA_U32 }, +}; + +static int br_changelink(struct net_device *brdev, struct nlattr *tb[], + struct nlattr *data[]) +{ + struct net_bridge *br = netdev_priv(brdev); + int err; + + if (!data) + return 0; + + if (data[IFLA_BR_FORWARD_DELAY]) { + err = br_set_forward_delay(br, nla_get_u32(data[IFLA_BR_FORWARD_DELAY])); + if (err) + return err; + } + + if (data[IFLA_BR_HELLO_TIME]) { + err = br_set_hello_time(br, nla_get_u32(data[IFLA_BR_HELLO_TIME])); + if (err) + return err; + } + + if (data[IFLA_BR_MAX_AGE]) { + err = br_set_max_age(br, nla_get_u32(data[IFLA_BR_MAX_AGE])); + if (err) + return err; + } + + return 0; +} + +static size_t br_get_size(const struct net_device *brdev) +{ + return nla_total_size(sizeof(u32)) + /* IFLA_BR_FORWARD_DELAY */ + nla_total_size(sizeof(u32)) + /* IFLA_BR_HELLO_TIME */ + nla_total_size(sizeof(u32)) + /* IFLA_BR_MAX_AGE */ + 0; +} + +static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev) +{ + struct net_bridge *br = netdev_priv(brdev); + u32 forward_delay = jiffies_to_clock_t(br->forward_delay); + u32 hello_time = jiffies_to_clock_t(br->hello_time); + u32 age_time = jiffies_to_clock_t(br->max_age); + + if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) || + nla_put_u32(skb, IFLA_BR_HELLO_TIME, hello_time) || + nla_put_u32(skb, IFLA_BR_MAX_AGE, age_time)) + return -EMSGSIZE; + + return 0; +} + static size_t br_get_link_af_size(const struct net_device *dev) { struct net_port_vlans *pv; @@ -485,12 +564,23 @@ static struct rtnl_af_ops br_af_ops = { }; struct rtnl_link_ops br_link_ops __read_mostly = { - .kind = "bridge", - .priv_size = sizeof(struct net_bridge), - .setup = br_dev_setup, - .validate = br_validate, - .newlink = br_dev_newlink, - .dellink = br_dev_delete, + .kind = "bridge", + .priv_size = sizeof(struct net_bridge), + .setup = br_dev_setup, + .maxtype = IFLA_BRPORT_MAX, + .policy = br_policy, + .validate = br_validate, + .newlink = br_dev_newlink, + .changelink = br_changelink, + .dellink = br_dev_delete, + .get_size = br_get_size, + .fill_info = br_fill_info, + + .slave_maxtype = IFLA_BRPORT_MAX, + .slave_policy = br_port_policy, + .slave_changelink = br_port_slave_changelink, + .get_slave_size = br_port_get_slave_size, + .fill_slave_info = br_port_fill_slave_info, }; int __init br_netlink_init(void) @@ -512,7 +602,7 @@ int __init br_netlink_init(void) return err; } -void __exit br_netlink_fini(void) +void br_netlink_fini(void) { br_mdb_uninit(); rtnl_af_unregister(&br_af_ops); diff --git a/net/bridge/br_nf_core.c b/net/bridge/br_nf_core.c new file mode 100644 index 0000000000000000000000000000000000000000..387cb3bd017c0102b73ea2cf7089500ea00a19a9 --- /dev/null +++ b/net/bridge/br_nf_core.c @@ -0,0 +1,96 @@ +/* + * Handle firewalling core + * Linux ethernet bridge + * + * Authors: + * Lennert Buytenhek + * Bart De Schuymer + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Lennert dedicates this file to Kerstin Wurdinger. + */ + +#include +#include +#include +#include +#include + +#include "br_private.h" +#ifdef CONFIG_SYSCTL +#include +#endif + +static void fake_update_pmtu(struct dst_entry *dst, struct sock *sk, + struct sk_buff *skb, u32 mtu) +{ +} + +static void fake_redirect(struct dst_entry *dst, struct sock *sk, + struct sk_buff *skb) +{ +} + +static u32 *fake_cow_metrics(struct dst_entry *dst, unsigned long old) +{ + return NULL; +} + +static struct neighbour *fake_neigh_lookup(const struct dst_entry *dst, + struct sk_buff *skb, + const void *daddr) +{ + return NULL; +} + +static unsigned int fake_mtu(const struct dst_entry *dst) +{ + return dst->dev->mtu; +} + +static struct dst_ops fake_dst_ops = { + .family = AF_INET, + .protocol = cpu_to_be16(ETH_P_IP), + .update_pmtu = fake_update_pmtu, + .redirect = fake_redirect, + .cow_metrics = fake_cow_metrics, + .neigh_lookup = fake_neigh_lookup, + .mtu = fake_mtu, +}; + +/* + * Initialize bogus route table used to keep netfilter happy. + * Currently, we fill in the PMTU entry because netfilter + * refragmentation needs it, and the rt_flags entry because + * ipt_REJECT needs it. Future netfilter modules might + * require us to fill additional fields. + */ +static const u32 br_dst_default_metrics[RTAX_MAX] = { + [RTAX_MTU - 1] = 1500, +}; + +void br_netfilter_rtable_init(struct net_bridge *br) +{ + struct rtable *rt = &br->fake_rtable; + + atomic_set(&rt->dst.__refcnt, 1); + rt->dst.dev = br->dev; + rt->dst.path = &rt->dst; + dst_init_metrics(&rt->dst, br_dst_default_metrics, true); + rt->dst.flags = DST_NOXFRM | DST_FAKE_RTABLE; + rt->dst.ops = &fake_dst_ops; +} + +int __init br_nf_core_init(void) +{ + return dst_entries_init(&fake_dst_ops); +} + +void br_nf_core_fini(void) +{ + dst_entries_destroy(&fake_dst_ops); +} diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index b6c04cbcfdc5ee4aa8200677e50107014e0ce2cb..4d783d071305a4d7aa683aaa74b57088db6142a8 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -221,7 +221,7 @@ struct net_bridge struct pcpu_sw_netstats __percpu *stats; spinlock_t hash_lock; struct hlist_head hash[BR_HASH_SIZE]; -#ifdef CONFIG_BRIDGE_NETFILTER +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) struct rtable fake_rtable; bool nf_call_iptables; bool nf_call_ip6tables; @@ -299,16 +299,21 @@ struct net_bridge #ifdef CONFIG_BRIDGE_VLAN_FILTERING u8 vlan_enabled; __be16 vlan_proto; + u16 default_pvid; struct net_port_vlans __rcu *vlan_info; #endif }; struct br_input_skb_cb { struct net_device *brdev; + #ifdef CONFIG_BRIDGE_IGMP_SNOOPING int igmp; int mrouters_only; #endif + + u16 frag_max_size; + #ifdef CONFIG_BRIDGE_VLAN_FILTERING bool vlan_filtered; #endif @@ -604,11 +609,13 @@ bool br_vlan_find(struct net_bridge *br, u16 vid); void br_recalculate_fwd_mask(struct net_bridge *br); int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val); int br_vlan_set_proto(struct net_bridge *br, unsigned long val); -void br_vlan_init(struct net_bridge *br); +int br_vlan_init(struct net_bridge *br); +int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val); int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags); int nbp_vlan_delete(struct net_bridge_port *port, u16 vid); void nbp_vlan_flush(struct net_bridge_port *port); bool nbp_vlan_find(struct net_bridge_port *port, u16 vid); +int nbp_vlan_init(struct net_bridge_port *port); static inline struct net_port_vlans *br_get_vlan_info( const struct net_bridge *br) @@ -641,11 +648,11 @@ static inline int br_vlan_get_tag(const struct sk_buff *skb, u16 *vid) static inline u16 br_get_pvid(const struct net_port_vlans *v) { - /* Return just the VID if it is set, or VLAN_N_VID (invalid vid) if - * vid wasn't set - */ + if (!v) + return 0; + smp_rmb(); - return v->pvid ?: VLAN_N_VID; + return v->pvid; } static inline int br_vlan_enabled(struct net_bridge *br) @@ -704,8 +711,9 @@ static inline void br_recalculate_fwd_mask(struct net_bridge *br) { } -static inline void br_vlan_init(struct net_bridge *br) +static inline int br_vlan_init(struct net_bridge *br) { + return 0; } static inline int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags) @@ -738,13 +746,18 @@ static inline bool nbp_vlan_find(struct net_bridge_port *port, u16 vid) return false; } +static inline int nbp_vlan_init(struct net_bridge_port *port) +{ + return 0; +} + static inline u16 br_vlan_get_tag(const struct sk_buff *skb, u16 *tag) { return 0; } static inline u16 br_get_pvid(const struct net_port_vlans *v) { - return VLAN_N_VID; /* Returns invalid vid */ + return 0; } static inline int br_vlan_enabled(struct net_bridge *br) @@ -754,18 +767,19 @@ static inline int br_vlan_enabled(struct net_bridge *br) #endif /* br_netfilter.c */ -#ifdef CONFIG_BRIDGE_NETFILTER -int br_netfilter_init(void); -void br_netfilter_fini(void); +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) +int br_nf_core_init(void); +void br_nf_core_fini(void); void br_netfilter_rtable_init(struct net_bridge *); #else -#define br_netfilter_init() (0) -#define br_netfilter_fini() do { } while (0) +static inline int br_nf_core_init(void) { return 0; } +static inline void br_nf_core_fini(void) {} #define br_netfilter_rtable_init(x) #endif /* br_stp.c */ void br_log_state(const struct net_bridge_port *p); +void br_set_state(struct net_bridge_port *p, unsigned int state); struct net_bridge_port *br_get_port(struct net_bridge *br, u16 port_no); void br_init_port(struct net_bridge_port *p); void br_become_designated_port(struct net_bridge_port *p); diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c index 3c86f0538cbb4a056bc274c971e254cb3d8da0f8..2b047bcf42a4eb1c450c0283af9bea2676052836 100644 --- a/net/bridge/br_stp.c +++ b/net/bridge/br_stp.c @@ -36,6 +36,11 @@ void br_log_state(const struct net_bridge_port *p) br_port_state_names[p->state]); } +void br_set_state(struct net_bridge_port *p, unsigned int state) +{ + p->state = state; +} + /* called under bridge lock */ struct net_bridge_port *br_get_port(struct net_bridge *br, u16 port_no) { @@ -107,7 +112,7 @@ static void br_root_port_block(const struct net_bridge *br, br_notice(br, "port %u(%s) tried to become root port (blocked)", (unsigned int) p->port_no, p->dev->name); - p->state = BR_STATE_LISTENING; + br_set_state(p, BR_STATE_LISTENING); br_log_state(p); br_ifinfo_notify(RTM_NEWLINK, p); @@ -387,7 +392,7 @@ static void br_make_blocking(struct net_bridge_port *p) p->state == BR_STATE_LEARNING) br_topology_change_detection(p->br); - p->state = BR_STATE_BLOCKING; + br_set_state(p, BR_STATE_BLOCKING); br_log_state(p); br_ifinfo_notify(RTM_NEWLINK, p); @@ -404,13 +409,13 @@ static void br_make_forwarding(struct net_bridge_port *p) return; if (br->stp_enabled == BR_NO_STP || br->forward_delay == 0) { - p->state = BR_STATE_FORWARDING; + br_set_state(p, BR_STATE_FORWARDING); br_topology_change_detection(br); del_timer(&p->forward_delay_timer); } else if (br->stp_enabled == BR_KERNEL_STP) - p->state = BR_STATE_LISTENING; + br_set_state(p, BR_STATE_LISTENING); else - p->state = BR_STATE_LEARNING; + br_set_state(p, BR_STATE_LEARNING); br_multicast_enable_port(p); br_log_state(p); diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c index 189ba1e7d8515945db5593c053b7751d36a02d3b..41146872c1b47ca6ee3ac48c14e672a9743273d4 100644 --- a/net/bridge/br_stp_if.c +++ b/net/bridge/br_stp_if.c @@ -37,7 +37,7 @@ void br_init_port(struct net_bridge_port *p) { p->port_id = br_make_port_id(p->priority, p->port_no); br_become_designated_port(p); - p->state = BR_STATE_BLOCKING; + br_set_state(p, BR_STATE_BLOCKING); p->topology_change_ack = 0; p->config_pending = 0; } @@ -100,7 +100,7 @@ void br_stp_disable_port(struct net_bridge_port *p) wasroot = br_is_root_bridge(br); br_become_designated_port(p); - p->state = BR_STATE_DISABLED; + br_set_state(p, BR_STATE_DISABLED); p->topology_change_ack = 0; p->config_pending = 0; diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c index 558c46d19e0559c6c4f015e49d6b3e77d2205f87..4fcaa67750fda845ad0a180332c4cd96a9524086 100644 --- a/net/bridge/br_stp_timer.c +++ b/net/bridge/br_stp_timer.c @@ -87,11 +87,11 @@ static void br_forward_delay_timer_expired(unsigned long arg) (unsigned int) p->port_no, p->dev->name); spin_lock(&br->lock); if (p->state == BR_STATE_LISTENING) { - p->state = BR_STATE_LEARNING; + br_set_state(p, BR_STATE_LEARNING); mod_timer(&p->forward_delay_timer, jiffies + br->forward_delay); } else if (p->state == BR_STATE_LEARNING) { - p->state = BR_STATE_FORWARDING; + br_set_state(p, BR_STATE_FORWARDING); if (br_is_designated_for_some_port(br)) br_topology_change_detection(br); netif_carrier_on(br->dev); diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c index c9e2572b15f400f9183606922a87f024ffe6570f..4c97fc50fb704265bd2f51d96527cf3227a65118 100644 --- a/net/bridge/br_sysfs_br.c +++ b/net/bridge/br_sysfs_br.c @@ -629,7 +629,7 @@ static ssize_t multicast_startup_query_interval_store( } static DEVICE_ATTR_RW(multicast_startup_query_interval); #endif -#ifdef CONFIG_BRIDGE_NETFILTER +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) static ssize_t nf_call_iptables_show( struct device *d, struct device_attribute *attr, char *buf) { @@ -725,6 +725,22 @@ static ssize_t vlan_protocol_store(struct device *d, return store_bridge_parm(d, buf, len, br_vlan_set_proto); } static DEVICE_ATTR_RW(vlan_protocol); + +static ssize_t default_pvid_show(struct device *d, + struct device_attribute *attr, + char *buf) +{ + struct net_bridge *br = to_bridge(d); + return sprintf(buf, "%d\n", br->default_pvid); +} + +static ssize_t default_pvid_store(struct device *d, + struct device_attribute *attr, + const char *buf, size_t len) +{ + return store_bridge_parm(d, buf, len, br_vlan_set_default_pvid); +} +static DEVICE_ATTR_RW(default_pvid); #endif static struct attribute *bridge_attrs[] = { @@ -763,7 +779,7 @@ static struct attribute *bridge_attrs[] = { &dev_attr_multicast_query_response_interval.attr, &dev_attr_multicast_startup_query_interval.attr, #endif -#ifdef CONFIG_BRIDGE_NETFILTER +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) &dev_attr_nf_call_iptables.attr, &dev_attr_nf_call_ip6tables.attr, &dev_attr_nf_call_arptables.attr, @@ -771,6 +787,7 @@ static struct attribute *bridge_attrs[] = { #ifdef CONFIG_BRIDGE_VLAN_FILTERING &dev_attr_vlan_filtering.attr, &dev_attr_vlan_protocol.attr, + &dev_attr_default_pvid.attr, #endif NULL }; diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index 3ba57fcdcd13fec638aa17e539e4baf0d3e010eb..150048fb99b08eeeb25aac140721b973731ebf61 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@ -223,7 +223,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v, * See if pvid is set on this port. That tells us which * vlan untagged or priority-tagged traffic belongs to. */ - if (pvid == VLAN_N_VID) + if (!pvid) goto drop; /* PVID is set on this port. Any untagged or priority-tagged @@ -292,7 +292,7 @@ bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid) if (!*vid) { *vid = br_get_pvid(v); - if (*vid == VLAN_N_VID) + if (!*vid) return false; return true; @@ -499,9 +499,141 @@ int br_vlan_set_proto(struct net_bridge *br, unsigned long val) goto unlock; } -void br_vlan_init(struct net_bridge *br) +static bool vlan_default_pvid(struct net_port_vlans *pv, u16 vid) +{ + return pv && vid == pv->pvid && test_bit(vid, pv->untagged_bitmap); +} + +static void br_vlan_disable_default_pvid(struct net_bridge *br) +{ + struct net_bridge_port *p; + u16 pvid = br->default_pvid; + + /* Disable default_pvid on all ports where it is still + * configured. + */ + if (vlan_default_pvid(br_get_vlan_info(br), pvid)) + br_vlan_delete(br, pvid); + + list_for_each_entry(p, &br->port_list, list) { + if (vlan_default_pvid(nbp_get_vlan_info(p), pvid)) + nbp_vlan_delete(p, pvid); + } + + br->default_pvid = 0; +} + +static int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid) +{ + struct net_bridge_port *p; + u16 old_pvid; + int err = 0; + unsigned long *changed; + + changed = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long), + GFP_KERNEL); + if (!changed) + return -ENOMEM; + + old_pvid = br->default_pvid; + + /* Update default_pvid config only if we do not conflict with + * user configuration. + */ + if ((!old_pvid || vlan_default_pvid(br_get_vlan_info(br), old_pvid)) && + !br_vlan_find(br, pvid)) { + err = br_vlan_add(br, pvid, + BRIDGE_VLAN_INFO_PVID | + BRIDGE_VLAN_INFO_UNTAGGED); + if (err) + goto out; + br_vlan_delete(br, old_pvid); + set_bit(0, changed); + } + + list_for_each_entry(p, &br->port_list, list) { + /* Update default_pvid config only if we do not conflict with + * user configuration. + */ + if ((old_pvid && + !vlan_default_pvid(nbp_get_vlan_info(p), old_pvid)) || + nbp_vlan_find(p, pvid)) + continue; + + err = nbp_vlan_add(p, pvid, + BRIDGE_VLAN_INFO_PVID | + BRIDGE_VLAN_INFO_UNTAGGED); + if (err) + goto err_port; + nbp_vlan_delete(p, old_pvid); + set_bit(p->port_no, changed); + } + + br->default_pvid = pvid; + +out: + kfree(changed); + return err; + +err_port: + list_for_each_entry_continue_reverse(p, &br->port_list, list) { + if (!test_bit(p->port_no, changed)) + continue; + + if (old_pvid) + nbp_vlan_add(p, old_pvid, + BRIDGE_VLAN_INFO_PVID | + BRIDGE_VLAN_INFO_UNTAGGED); + nbp_vlan_delete(p, pvid); + } + + if (test_bit(0, changed)) { + if (old_pvid) + br_vlan_add(br, old_pvid, + BRIDGE_VLAN_INFO_PVID | + BRIDGE_VLAN_INFO_UNTAGGED); + br_vlan_delete(br, pvid); + } + goto out; +} + +int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val) +{ + u16 pvid = val; + int err = 0; + + if (val >= VLAN_VID_MASK) + return -EINVAL; + + if (!rtnl_trylock()) + return restart_syscall(); + + if (pvid == br->default_pvid) + goto unlock; + + /* Only allow default pvid change when filtering is disabled */ + if (br->vlan_enabled) { + pr_info_once("Please disable vlan filtering to change default_pvid\n"); + err = -EPERM; + goto unlock; + } + + if (!pvid) + br_vlan_disable_default_pvid(br); + else + err = __br_vlan_set_default_pvid(br, pvid); + +unlock: + rtnl_unlock(); + return err; +} + +int br_vlan_init(struct net_bridge *br) { br->vlan_proto = htons(ETH_P_8021Q); + br->default_pvid = 1; + return br_vlan_add(br, 1, + BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED); } /* Must be protected by RTNL. @@ -593,3 +725,12 @@ bool nbp_vlan_find(struct net_bridge_port *port, u16 vid) rcu_read_unlock(); return found; } + +int nbp_vlan_init(struct net_bridge_port *p) +{ + return p->br->default_pvid ? + nbp_vlan_add(p, p->br->default_pvid, + BRIDGE_VLAN_INFO_PVID | + BRIDGE_VLAN_INFO_UNTAGGED) : + 0; +} diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 6d69631b9f4d2bf5c667d1f76e4f5bc384cdf61c..d9a8c05d995d14466d2cef02fcd571d4ad009272 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -26,6 +26,7 @@ #include #include #include +#include #include /* needed for logical [in,out]-dev filtering */ #include "../br_private.h" @@ -1058,6 +1059,20 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl, vfree(table); vfree(counterstmp); + +#ifdef CONFIG_AUDIT + if (audit_enabled) { + struct audit_buffer *ab; + + ab = audit_log_start(current->audit_context, GFP_KERNEL, + AUDIT_NETFILTER_CFG); + if (ab) { + audit_log_format(ab, "table=%s family=%u entries=%u", + repl->name, AF_BRIDGE, repl->nentries); + audit_log_end(ab); + } + } +#endif return ret; free_unlock: diff --git a/net/bridge/netfilter/nf_tables_bridge.c b/net/bridge/netfilter/nf_tables_bridge.c index 5bcc0d8b31f22dff5b0368a77ae249f60e4bad9e..da17a5eab8b40e5df3eeafcef74934534eb26dd1 100644 --- a/net/bridge/netfilter/nf_tables_bridge.c +++ b/net/bridge/netfilter/nf_tables_bridge.c @@ -34,9 +34,11 @@ static struct nft_af_info nft_af_bridge __read_mostly = { .owner = THIS_MODULE, .nops = 1, .hooks = { + [NF_BR_PRE_ROUTING] = nft_do_chain_bridge, [NF_BR_LOCAL_IN] = nft_do_chain_bridge, [NF_BR_FORWARD] = nft_do_chain_bridge, [NF_BR_LOCAL_OUT] = nft_do_chain_bridge, + [NF_BR_POST_ROUTING] = nft_do_chain_bridge, }, }; diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c index ee3ffe93e14ec73e330da1c1fcbdd6b18cc74be6..a76479535df2a5005968c18422d999aa897b3556 100644 --- a/net/bridge/netfilter/nft_reject_bridge.c +++ b/net/bridge/netfilter/nft_reject_bridge.c @@ -14,21 +14,106 @@ #include #include #include +#include +#include static void nft_reject_bridge_eval(const struct nft_expr *expr, struct nft_data data[NFT_REG_MAX + 1], const struct nft_pktinfo *pkt) { + struct nft_reject *priv = nft_expr_priv(expr); + struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out); + switch (eth_hdr(pkt->skb)->h_proto) { case htons(ETH_P_IP): - return nft_reject_ipv4_eval(expr, data, pkt); + switch (priv->type) { + case NFT_REJECT_ICMP_UNREACH: + nf_send_unreach(pkt->skb, priv->icmp_code); + break; + case NFT_REJECT_TCP_RST: + nf_send_reset(pkt->skb, pkt->ops->hooknum); + break; + case NFT_REJECT_ICMPX_UNREACH: + nf_send_unreach(pkt->skb, + nft_reject_icmp_code(priv->icmp_code)); + break; + } + break; case htons(ETH_P_IPV6): - return nft_reject_ipv6_eval(expr, data, pkt); + switch (priv->type) { + case NFT_REJECT_ICMP_UNREACH: + nf_send_unreach6(net, pkt->skb, priv->icmp_code, + pkt->ops->hooknum); + break; + case NFT_REJECT_TCP_RST: + nf_send_reset6(net, pkt->skb, pkt->ops->hooknum); + break; + case NFT_REJECT_ICMPX_UNREACH: + nf_send_unreach6(net, pkt->skb, + nft_reject_icmpv6_code(priv->icmp_code), + pkt->ops->hooknum); + break; + } + break; default: /* No explicit way to reject this protocol, drop it. */ - data[NFT_REG_VERDICT].verdict = NF_DROP; break; } + data[NFT_REG_VERDICT].verdict = NF_DROP; +} + +static int nft_reject_bridge_init(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nlattr * const tb[]) +{ + struct nft_reject *priv = nft_expr_priv(expr); + int icmp_code; + + if (tb[NFTA_REJECT_TYPE] == NULL) + return -EINVAL; + + priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE])); + switch (priv->type) { + case NFT_REJECT_ICMP_UNREACH: + case NFT_REJECT_ICMPX_UNREACH: + if (tb[NFTA_REJECT_ICMP_CODE] == NULL) + return -EINVAL; + + icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]); + if (priv->type == NFT_REJECT_ICMPX_UNREACH && + icmp_code > NFT_REJECT_ICMPX_MAX) + return -EINVAL; + + priv->icmp_code = icmp_code; + break; + case NFT_REJECT_TCP_RST: + break; + default: + return -EINVAL; + } + return 0; +} + +static int nft_reject_bridge_dump(struct sk_buff *skb, + const struct nft_expr *expr) +{ + const struct nft_reject *priv = nft_expr_priv(expr); + + if (nla_put_be32(skb, NFTA_REJECT_TYPE, htonl(priv->type))) + goto nla_put_failure; + + switch (priv->type) { + case NFT_REJECT_ICMP_UNREACH: + case NFT_REJECT_ICMPX_UNREACH: + if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code)) + goto nla_put_failure; + break; + } + + return 0; + +nla_put_failure: + return -1; } static struct nft_expr_type nft_reject_bridge_type; @@ -36,8 +121,8 @@ static const struct nft_expr_ops nft_reject_bridge_ops = { .type = &nft_reject_bridge_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_reject)), .eval = nft_reject_bridge_eval, - .init = nft_reject_init, - .dump = nft_reject_dump, + .init = nft_reject_bridge_init, + .dump = nft_reject_bridge_dump, }; static struct nft_expr_type nft_reject_bridge_type __read_mostly = { diff --git a/net/core/dev.c b/net/core/dev.c index 130d642202297205cd038fb2645970eab5d2b972..4699dcfdc4ab1254f106af5e4ae1451987bff7aa 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -897,23 +897,25 @@ struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) EXPORT_SYMBOL(dev_getfirstbyhwtype); /** - * dev_get_by_flags_rcu - find any device with given flags + * __dev_get_by_flags - find any device with given flags * @net: the applicable net namespace * @if_flags: IFF_* values * @mask: bitmask of bits in if_flags to check * * Search for any interface with the given flags. Returns NULL if a device * is not found or a pointer to the device. Must be called inside - * rcu_read_lock(), and result refcount is unchanged. + * rtnl_lock(), and result refcount is unchanged. */ -struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags, - unsigned short mask) +struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags, + unsigned short mask) { struct net_device *dev, *ret; + ASSERT_RTNL(); + ret = NULL; - for_each_netdev_rcu(net, dev) { + for_each_netdev(net, dev) { if (((dev->flags ^ if_flags) & mask) == 0) { ret = dev; break; @@ -921,7 +923,7 @@ struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags } return ret; } -EXPORT_SYMBOL(dev_get_by_flags_rcu); +EXPORT_SYMBOL(__dev_get_by_flags); /** * dev_valid_name - check if name is okay for network device @@ -2175,6 +2177,53 @@ static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb) return (struct dev_kfree_skb_cb *)skb->cb; } +void netif_schedule_queue(struct netdev_queue *txq) +{ + rcu_read_lock(); + if (!(txq->state & QUEUE_STATE_ANY_XOFF)) { + struct Qdisc *q = rcu_dereference(txq->qdisc); + + __netif_schedule(q); + } + rcu_read_unlock(); +} +EXPORT_SYMBOL(netif_schedule_queue); + +/** + * netif_wake_subqueue - allow sending packets on subqueue + * @dev: network device + * @queue_index: sub queue index + * + * Resume individual transmit queue of a device with multiple transmit queues. + */ +void netif_wake_subqueue(struct net_device *dev, u16 queue_index) +{ + struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); + + if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) { + struct Qdisc *q; + + rcu_read_lock(); + q = rcu_dereference(txq->qdisc); + __netif_schedule(q); + rcu_read_unlock(); + } +} +EXPORT_SYMBOL(netif_wake_subqueue); + +void netif_tx_wake_queue(struct netdev_queue *dev_queue) +{ + if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) { + struct Qdisc *q; + + rcu_read_lock(); + q = rcu_dereference(dev_queue->qdisc); + __netif_schedule(q); + rcu_read_unlock(); + } +} +EXPORT_SYMBOL(netif_tx_wake_queue); + void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason) { unsigned long flags; @@ -2371,16 +2420,6 @@ struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, rcu_read_lock(); list_for_each_entry_rcu(ptype, &offload_base, list) { if (ptype->type == type && ptype->callbacks.gso_segment) { - if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { - int err; - - err = ptype->callbacks.gso_send_check(skb); - segs = ERR_PTR(err); - if (err || skb_gso_ok(skb, features)) - break; - __skb_push(skb, (skb->data - - skb_network_header(skb))); - } segs = ptype->callbacks.gso_segment(skb, features); break; } @@ -2483,52 +2522,6 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) return 0; } -struct dev_gso_cb { - void (*destructor)(struct sk_buff *skb); -}; - -#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb) - -static void dev_gso_skb_destructor(struct sk_buff *skb) -{ - struct dev_gso_cb *cb; - - kfree_skb_list(skb->next); - skb->next = NULL; - - cb = DEV_GSO_CB(skb); - if (cb->destructor) - cb->destructor(skb); -} - -/** - * dev_gso_segment - Perform emulated hardware segmentation on skb. - * @skb: buffer to segment - * @features: device features as applicable to this skb - * - * This function segments the given skb and stores the list of segments - * in skb->next. - */ -static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features) -{ - struct sk_buff *segs; - - segs = skb_gso_segment(skb, features); - - /* Verifying header integrity only. */ - if (!segs) - return 0; - - if (IS_ERR(segs)) - return PTR_ERR(segs); - - skb->next = segs; - DEV_GSO_CB(skb)->destructor = skb->destructor; - skb->destructor = dev_gso_skb_destructor; - - return 0; -} - /* If MPLS offload request, verify we are testing hardware MPLS features * instead of standard features for the netdev. */ @@ -2572,10 +2565,12 @@ static netdev_features_t harmonize_features(struct sk_buff *skb, netdev_features_t netif_skb_features(struct sk_buff *skb) { + const struct net_device *dev = skb->dev; + netdev_features_t features = dev->features; + u16 gso_segs = skb_shinfo(skb)->gso_segs; __be16 protocol = skb->protocol; - netdev_features_t features = skb->dev->features; - if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs) + if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs) features &= ~NETIF_F_GSO_MASK; if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) { @@ -2586,7 +2581,7 @@ netdev_features_t netif_skb_features(struct sk_buff *skb) } features = netdev_intersect_features(features, - skb->dev->vlan_features | + dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX); @@ -2603,119 +2598,149 @@ netdev_features_t netif_skb_features(struct sk_buff *skb) } EXPORT_SYMBOL(netif_skb_features); -int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, - struct netdev_queue *txq) +static int xmit_one(struct sk_buff *skb, struct net_device *dev, + struct netdev_queue *txq, bool more) { - const struct net_device_ops *ops = dev->netdev_ops; - int rc = NETDEV_TX_OK; - unsigned int skb_len; + unsigned int len; + int rc; - if (likely(!skb->next)) { - netdev_features_t features; + if (!list_empty(&ptype_all)) + dev_queue_xmit_nit(skb, dev); - /* - * If device doesn't need skb->dst, release it right now while - * its hot in this cpu cache - */ - if (dev->priv_flags & IFF_XMIT_DST_RELEASE) - skb_dst_drop(skb); + len = skb->len; + trace_net_dev_start_xmit(skb, dev); + rc = netdev_start_xmit(skb, dev, txq, more); + trace_net_dev_xmit(skb, rc, dev, len); - features = netif_skb_features(skb); - - if (vlan_tx_tag_present(skb) && - !vlan_hw_offload_capable(features, skb->vlan_proto)) { - skb = __vlan_put_tag(skb, skb->vlan_proto, - vlan_tx_tag_get(skb)); - if (unlikely(!skb)) - goto out; + return rc; +} - skb->vlan_tci = 0; - } +struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev, + struct netdev_queue *txq, int *ret) +{ + struct sk_buff *skb = first; + int rc = NETDEV_TX_OK; - /* If encapsulation offload request, verify we are testing - * hardware encapsulation features instead of standard - * features for the netdev - */ - if (skb->encapsulation) - features &= dev->hw_enc_features; + while (skb) { + struct sk_buff *next = skb->next; - if (netif_needs_gso(skb, features)) { - if (unlikely(dev_gso_segment(skb, features))) - goto out_kfree_skb; - if (skb->next) - goto gso; - } else { - if (skb_needs_linearize(skb, features) && - __skb_linearize(skb)) - goto out_kfree_skb; + skb->next = NULL; + rc = xmit_one(skb, dev, txq, next != NULL); + if (unlikely(!dev_xmit_complete(rc))) { + skb->next = next; + goto out; + } - /* If packet is not checksummed and device does not - * support checksumming for this protocol, complete - * checksumming here. - */ - if (skb->ip_summed == CHECKSUM_PARTIAL) { - if (skb->encapsulation) - skb_set_inner_transport_header(skb, - skb_checksum_start_offset(skb)); - else - skb_set_transport_header(skb, - skb_checksum_start_offset(skb)); - if (!(features & NETIF_F_ALL_CSUM) && - skb_checksum_help(skb)) - goto out_kfree_skb; - } + skb = next; + if (netif_xmit_stopped(txq) && skb) { + rc = NETDEV_TX_BUSY; + break; } + } - if (!list_empty(&ptype_all)) - dev_queue_xmit_nit(skb, dev); +out: + *ret = rc; + return skb; +} - skb_len = skb->len; - trace_net_dev_start_xmit(skb, dev); - rc = ops->ndo_start_xmit(skb, dev); - trace_net_dev_xmit(skb, rc, dev, skb_len); - if (rc == NETDEV_TX_OK) - txq_trans_update(txq); - return rc; +static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb, + netdev_features_t features) +{ + if (vlan_tx_tag_present(skb) && + !vlan_hw_offload_capable(features, skb->vlan_proto)) { + skb = __vlan_put_tag(skb, skb->vlan_proto, + vlan_tx_tag_get(skb)); + if (skb) + skb->vlan_tci = 0; } + return skb; +} -gso: - do { - struct sk_buff *nskb = skb->next; +static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev) +{ + netdev_features_t features; - skb->next = nskb->next; - nskb->next = NULL; + if (skb->next) + return skb; - if (!list_empty(&ptype_all)) - dev_queue_xmit_nit(nskb, dev); - - skb_len = nskb->len; - trace_net_dev_start_xmit(nskb, dev); - rc = ops->ndo_start_xmit(nskb, dev); - trace_net_dev_xmit(nskb, rc, dev, skb_len); - if (unlikely(rc != NETDEV_TX_OK)) { - if (rc & ~NETDEV_TX_MASK) - goto out_kfree_gso_skb; - nskb->next = skb->next; - skb->next = nskb; - return rc; + features = netif_skb_features(skb); + skb = validate_xmit_vlan(skb, features); + if (unlikely(!skb)) + goto out_null; + + /* If encapsulation offload request, verify we are testing + * hardware encapsulation features instead of standard + * features for the netdev + */ + if (skb->encapsulation) + features &= dev->hw_enc_features; + + if (netif_needs_gso(skb, features)) { + struct sk_buff *segs; + + segs = skb_gso_segment(skb, features); + if (IS_ERR(segs)) { + segs = NULL; + } else if (segs) { + consume_skb(skb); + skb = segs; } - txq_trans_update(txq); - if (unlikely(netif_xmit_stopped(txq) && skb->next)) - return NETDEV_TX_BUSY; - } while (skb->next); + } else { + if (skb_needs_linearize(skb, features) && + __skb_linearize(skb)) + goto out_kfree_skb; -out_kfree_gso_skb: - if (likely(skb->next == NULL)) { - skb->destructor = DEV_GSO_CB(skb)->destructor; - consume_skb(skb); - return rc; + /* If packet is not checksummed and device does not + * support checksumming for this protocol, complete + * checksumming here. + */ + if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (skb->encapsulation) + skb_set_inner_transport_header(skb, + skb_checksum_start_offset(skb)); + else + skb_set_transport_header(skb, + skb_checksum_start_offset(skb)); + if (!(features & NETIF_F_ALL_CSUM) && + skb_checksum_help(skb)) + goto out_kfree_skb; + } } + + return skb; + out_kfree_skb: kfree_skb(skb); -out: - return rc; +out_null: + return NULL; +} + +struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev) +{ + struct sk_buff *next, *head = NULL, *tail; + + for (; skb != NULL; skb = next) { + next = skb->next; + skb->next = NULL; + + /* in case skb wont be segmented, point to itself */ + skb->prev = skb; + + skb = validate_xmit_skb(skb, dev); + if (!skb) + continue; + + if (!head) + head = skb; + else + tail->next = skb; + /* If skb was segmented, skb->prev points to + * the last segment. If not, it still contains skb. + */ + tail = skb->prev; + } + return head; } -EXPORT_SYMBOL_GPL(dev_hard_start_xmit); static void qdisc_pkt_len_init(struct sk_buff *skb) { @@ -2778,12 +2803,10 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, * waiting to be sent out; and the qdisc is not running - * xmit the skb directly. */ - if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE)) - skb_dst_force(skb); qdisc_bstats_update(q, skb); - if (sch_direct_xmit(skb, q, dev, txq, root_lock)) { + if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) { if (unlikely(contended)) { spin_unlock(&q->busylock); contended = false; @@ -2794,7 +2817,6 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, rc = NET_XMIT_SUCCESS; } else { - skb_dst_force(skb); rc = q->enqueue(skb, q) & NET_XMIT_MASK; if (qdisc_run_begin(q)) { if (unlikely(contended)) { @@ -2891,6 +2913,14 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) skb_update_prio(skb); + /* If device/qdisc don't need skb->dst, release it right now while + * its hot in this cpu cache. + */ + if (dev->priv_flags & IFF_XMIT_DST_RELEASE) + skb_dst_drop(skb); + else + skb_dst_force(skb); + txq = netdev_pick_tx(dev, skb, accel_priv); q = rcu_dereference_bh(txq->qdisc); @@ -2923,11 +2953,15 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT) goto recursion_alert; + skb = validate_xmit_skb(skb, dev); + if (!skb) + goto drop; + HARD_TX_LOCK(dev, txq, cpu); if (!netif_xmit_stopped(txq)) { __this_cpu_inc(xmit_recursion); - rc = dev_hard_start_xmit(skb, dev, txq); + skb = dev_hard_start_xmit(skb, dev, txq, &rc); __this_cpu_dec(xmit_recursion); if (dev_xmit_complete(rc)) { HARD_TX_UNLOCK(dev, txq); @@ -2948,10 +2982,11 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) } rc = -ENETDOWN; +drop: rcu_read_unlock_bh(); atomic_long_inc(&dev->tx_dropped); - kfree_skb(skb); + kfree_skb_list(skb); return rc; out: rcu_read_unlock_bh(); @@ -3128,8 +3163,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, } if (map) { - tcpu = map->cpus[((u64) hash * map->len) >> 32]; - + tcpu = map->cpus[reciprocal_scale(hash, map->len)]; if (cpu_online(tcpu)) { cpu = tcpu; goto done; @@ -3465,7 +3499,7 @@ static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq) skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl); skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS); - q = rxq->qdisc; + q = rcu_dereference(rxq->qdisc); if (q != &noop_qdisc) { spin_lock(qdisc_lock(q)); if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) @@ -3482,7 +3516,7 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb, { struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue); - if (!rxq || rxq->qdisc == &noop_qdisc) + if (!rxq || rcu_access_pointer(rxq->qdisc) == &noop_qdisc) goto out; if (*pt_prev) { @@ -3963,11 +3997,10 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff if (!(skb->dev->features & NETIF_F_GRO)) goto normal; - if (skb_is_gso(skb) || skb_has_frag_list(skb)) + if (skb_is_gso(skb) || skb_has_frag_list(skb) || skb->csum_bad) goto normal; gro_list_prepare(napi, skb); - NAPI_GRO_CB(skb)->csum = skb->csum; /* Needed for CHECKSUM_COMPLETE */ rcu_read_lock(); list_for_each_entry_rcu(ptype, head, list) { @@ -3981,6 +4014,22 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff NAPI_GRO_CB(skb)->free = 0; NAPI_GRO_CB(skb)->udp_mark = 0; + /* Setup for GRO checksum validation */ + switch (skb->ip_summed) { + case CHECKSUM_COMPLETE: + NAPI_GRO_CB(skb)->csum = skb->csum; + NAPI_GRO_CB(skb)->csum_valid = 1; + NAPI_GRO_CB(skb)->csum_cnt = 0; + break; + case CHECKSUM_UNNECESSARY: + NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1; + NAPI_GRO_CB(skb)->csum_valid = 0; + break; + default: + NAPI_GRO_CB(skb)->csum_cnt = 0; + NAPI_GRO_CB(skb)->csum_valid = 0; + } + pp = ptype->callbacks.gro_receive(&napi->gro_list, skb); break; } @@ -4210,6 +4259,31 @@ gro_result_t napi_gro_frags(struct napi_struct *napi) } EXPORT_SYMBOL(napi_gro_frags); +/* Compute the checksum from gro_offset and return the folded value + * after adding in any pseudo checksum. + */ +__sum16 __skb_gro_checksum_complete(struct sk_buff *skb) +{ + __wsum wsum; + __sum16 sum; + + wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0); + + /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */ + sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum)); + if (likely(!sum)) { + if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && + !skb->csum_complete_sw) + netdev_rx_csum_fault(skb->dev); + } + + NAPI_GRO_CB(skb)->csum = wsum; + NAPI_GRO_CB(skb)->csum_valid = 1; + + return sum; +} +EXPORT_SYMBOL(__skb_gro_checksum_complete); + /* * net_rps_action_and_irq_enable sends any pending IPI's for rps. * Note: called with local irq disabled, but exits with local irq enabled. @@ -6579,6 +6653,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, dev->gso_max_size = GSO_MAX_SIZE; dev->gso_max_segs = GSO_MAX_SEGS; + dev->gso_min_segs = 0; INIT_LIST_HEAD(&dev->napi_list); INIT_LIST_HEAD(&dev->unreg_list); @@ -6588,7 +6663,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, INIT_LIST_HEAD(&dev->adj_list.lower); INIT_LIST_HEAD(&dev->all_adj_list.upper); INIT_LIST_HEAD(&dev->all_adj_list.lower); - dev->priv_flags = IFF_XMIT_DST_RELEASE; + dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM; setup(dev); dev->num_tx_queues = txqs; @@ -7010,53 +7085,45 @@ const char *netdev_drivername(const struct net_device *dev) return empty; } -static int __netdev_printk(const char *level, const struct net_device *dev, - struct va_format *vaf) +static void __netdev_printk(const char *level, const struct net_device *dev, + struct va_format *vaf) { - int r; - if (dev && dev->dev.parent) { - r = dev_printk_emit(level[1] - '0', - dev->dev.parent, - "%s %s %s%s: %pV", - dev_driver_string(dev->dev.parent), - dev_name(dev->dev.parent), - netdev_name(dev), netdev_reg_state(dev), - vaf); + dev_printk_emit(level[1] - '0', + dev->dev.parent, + "%s %s %s%s: %pV", + dev_driver_string(dev->dev.parent), + dev_name(dev->dev.parent), + netdev_name(dev), netdev_reg_state(dev), + vaf); } else if (dev) { - r = printk("%s%s%s: %pV", level, netdev_name(dev), - netdev_reg_state(dev), vaf); + printk("%s%s%s: %pV", + level, netdev_name(dev), netdev_reg_state(dev), vaf); } else { - r = printk("%s(NULL net_device): %pV", level, vaf); + printk("%s(NULL net_device): %pV", level, vaf); } - - return r; } -int netdev_printk(const char *level, const struct net_device *dev, - const char *format, ...) +void netdev_printk(const char *level, const struct net_device *dev, + const char *format, ...) { struct va_format vaf; va_list args; - int r; va_start(args, format); vaf.fmt = format; vaf.va = &args; - r = __netdev_printk(level, dev, &vaf); + __netdev_printk(level, dev, &vaf); va_end(args); - - return r; } EXPORT_SYMBOL(netdev_printk); #define define_netdev_printk_level(func, level) \ -int func(const struct net_device *dev, const char *fmt, ...) \ +void func(const struct net_device *dev, const char *fmt, ...) \ { \ - int r; \ struct va_format vaf; \ va_list args; \ \ @@ -7065,11 +7132,9 @@ int func(const struct net_device *dev, const char *fmt, ...) \ vaf.fmt = fmt; \ vaf.va = &args; \ \ - r = __netdev_printk(level, dev, &vaf); \ + __netdev_printk(level, dev, &vaf); \ \ va_end(args); \ - \ - return r; \ } \ EXPORT_SYMBOL(func); diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c index cf999e09bcd2c23654826b8c7e7b3d3dde2a7a59..72e899a3efdaadd82877550d4991b70424cff5a2 100644 --- a/net/core/dev_ioctl.c +++ b/net/core/dev_ioctl.c @@ -365,11 +365,8 @@ void dev_load(struct net *net, const char *name) no_module = !dev; if (no_module && capable(CAP_NET_ADMIN)) no_module = request_module("netdev-%s", name); - if (no_module && capable(CAP_SYS_MODULE)) { - if (!request_module("%s", name)) - pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n", - name); - } + if (no_module && capable(CAP_SYS_MODULE)) + request_module("%s", name); } EXPORT_SYMBOL(dev_load); diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 17cb912793fa5ef0d221abda0dfb447b216b4268..1600aa24d36bcaa653dff0b0c34f4d700eb4fbb9 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -1621,6 +1621,81 @@ static int ethtool_get_module_eeprom(struct net_device *dev, modinfo.eeprom_len); } +static int ethtool_tunable_valid(const struct ethtool_tunable *tuna) +{ + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + case ETHTOOL_TX_COPYBREAK: + if (tuna->len != sizeof(u32) || + tuna->type_id != ETHTOOL_TUNABLE_U32) + return -EINVAL; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int ethtool_get_tunable(struct net_device *dev, void __user *useraddr) +{ + int ret; + struct ethtool_tunable tuna; + const struct ethtool_ops *ops = dev->ethtool_ops; + void *data; + + if (!ops->get_tunable) + return -EOPNOTSUPP; + if (copy_from_user(&tuna, useraddr, sizeof(tuna))) + return -EFAULT; + ret = ethtool_tunable_valid(&tuna); + if (ret) + return ret; + data = kmalloc(tuna.len, GFP_USER); + if (!data) + return -ENOMEM; + ret = ops->get_tunable(dev, &tuna, data); + if (ret) + goto out; + useraddr += sizeof(tuna); + ret = -EFAULT; + if (copy_to_user(useraddr, data, tuna.len)) + goto out; + ret = 0; + +out: + kfree(data); + return ret; +} + +static int ethtool_set_tunable(struct net_device *dev, void __user *useraddr) +{ + int ret; + struct ethtool_tunable tuna; + const struct ethtool_ops *ops = dev->ethtool_ops; + void *data; + + if (!ops->set_tunable) + return -EOPNOTSUPP; + if (copy_from_user(&tuna, useraddr, sizeof(tuna))) + return -EFAULT; + ret = ethtool_tunable_valid(&tuna); + if (ret) + return ret; + data = kmalloc(tuna.len, GFP_USER); + if (!data) + return -ENOMEM; + useraddr += sizeof(tuna); + ret = -EFAULT; + if (copy_from_user(data, useraddr, tuna.len)) + goto out; + ret = ops->set_tunable(dev, &tuna, data); + +out: + kfree(data); + return ret; +} + /* The main entry point in this file. Called from net/core/dev_ioctl.c */ int dev_ethtool(struct net *net, struct ifreq *ifr) @@ -1670,6 +1745,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) case ETHTOOL_GCHANNELS: case ETHTOOL_GET_TS_INFO: case ETHTOOL_GEEE: + case ETHTOOL_GTUNABLE: break; default: if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) @@ -1857,6 +1933,12 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) case ETHTOOL_GMODULEEEPROM: rc = ethtool_get_module_eeprom(dev, useraddr); break; + case ETHTOOL_GTUNABLE: + rc = ethtool_get_tunable(dev, useraddr); + break; + case ETHTOOL_STUNABLE: + rc = ethtool_set_tunable(dev, useraddr); + break; default: rc = -EOPNOTSUPP; } diff --git a/net/core/filter.c b/net/core/filter.c index d814b8a89d0f2f65efb95858f2e4fe26bf5a4ca9..fcd3f6742a6aa192efaf179b0a656377c98c31ce 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -87,33 +87,9 @@ int sk_filter(struct sock *sk, struct sk_buff *skb) } EXPORT_SYMBOL(sk_filter); -/* Helper to find the offset of pkt_type in sk_buff structure. We want - * to make sure its still a 3bit field starting at a byte boundary; - * taken from arch/x86/net/bpf_jit_comp.c. - */ -#ifdef __BIG_ENDIAN_BITFIELD -#define PKT_TYPE_MAX (7 << 5) -#else -#define PKT_TYPE_MAX 7 -#endif -static unsigned int pkt_type_offset(void) -{ - struct sk_buff skb_probe = { .pkt_type = ~0, }; - u8 *ct = (u8 *) &skb_probe; - unsigned int off; - - for (off = 0; off < sizeof(struct sk_buff); off++) { - if (ct[off] == PKT_TYPE_MAX) - return off; - } - - pr_err_once("Please fix %s, as pkt_type couldn't be found!\n", __func__); - return -1; -} - static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) { - return __skb_get_poff((struct sk_buff *)(unsigned long) ctx); + return skb_get_poff((struct sk_buff *)(unsigned long) ctx); } static u64 __skb_get_nlattr(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) @@ -190,11 +166,8 @@ static bool convert_bpf_extensions(struct sock_filter *fp, break; case SKF_AD_OFF + SKF_AD_PKTTYPE: - *insn = BPF_LDX_MEM(BPF_B, BPF_REG_A, BPF_REG_CTX, - pkt_type_offset()); - if (insn->off < 0) - return false; - insn++; + *insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_A, BPF_REG_CTX, + PKT_TYPE_OFFSET()); *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, PKT_TYPE_MAX); #ifdef __BIG_ENDIAN_BITFIELD insn++; @@ -933,7 +906,7 @@ static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp) /* Expand fp for appending the new filter representation. */ old_fp = fp; - fp = krealloc(old_fp, bpf_prog_size(new_len), GFP_KERNEL); + fp = bpf_prog_realloc(old_fp, bpf_prog_size(new_len), 0); if (!fp) { /* The old_fp is still around in case we couldn't * allocate new memory, so uncharge on that one. @@ -972,7 +945,7 @@ static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp) int err; fp->bpf_func = NULL; - fp->jited = 0; + fp->jited = false; err = bpf_check_classic(fp->insns, fp->len); if (err) { @@ -1013,7 +986,7 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog) if (fprog->filter == NULL) return -EINVAL; - fp = kmalloc(bpf_prog_size(fprog->len), GFP_KERNEL); + fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); if (!fp) return -ENOMEM; @@ -1069,12 +1042,12 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) if (fprog->filter == NULL) return -EINVAL; - prog = kmalloc(bpf_fsize, GFP_KERNEL); + prog = bpf_prog_alloc(bpf_fsize, 0); if (!prog) return -ENOMEM; if (copy_from_user(prog->insns, fprog->filter, fsize)) { - kfree(prog); + __bpf_prog_free(prog); return -EFAULT; } @@ -1082,7 +1055,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) err = bpf_prog_store_orig_filter(prog, fprog); if (err) { - kfree(prog); + __bpf_prog_free(prog); return -ENOMEM; } diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 5f362c1d03322692da59509c7d594f72255330b8..8560dea58803f947842e0be44d10464fb54127f6 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -13,6 +13,7 @@ #include #include #include +#include /* copy saddr & daddr, possibly using 64bit load/store * Equivalent to : flow->src = iph->saddr; @@ -26,36 +27,61 @@ static void iph_to_flow_copy_addrs(struct flow_keys *flow, const struct iphdr *i } /** - * skb_flow_get_ports - extract the upper layer ports and return them - * @skb: buffer to extract the ports from + * __skb_flow_get_ports - extract the upper layer ports and return them + * @skb: sk_buff to extract the ports from * @thoff: transport header offset * @ip_proto: protocol for which to get port offset + * @data: raw buffer pointer to the packet, if NULL use skb->data + * @hlen: packet header length, if @data is NULL use skb_headlen(skb) * * The function will try to retrieve the ports at offset thoff + poff where poff * is the protocol port offset returned from proto_ports_offset */ -__be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto) +__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto, + void *data, int hlen) { int poff = proto_ports_offset(ip_proto); + if (!data) { + data = skb->data; + hlen = skb_headlen(skb); + } + if (poff >= 0) { __be32 *ports, _ports; - ports = skb_header_pointer(skb, thoff + poff, - sizeof(_ports), &_ports); + ports = __skb_header_pointer(skb, thoff + poff, + sizeof(_ports), data, hlen, &_ports); if (ports) return *ports; } return 0; } -EXPORT_SYMBOL(skb_flow_get_ports); +EXPORT_SYMBOL(__skb_flow_get_ports); -bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow) +/** + * __skb_flow_dissect - extract the flow_keys struct and return it + * @skb: sk_buff to extract the flow from, can be NULL if the rest are specified + * @data: raw buffer pointer to the packet, if NULL use skb->data + * @proto: protocol for which to get the flow, if @data is NULL use skb->protocol + * @nhoff: network header offset, if @data is NULL use skb_network_offset(skb) + * @hlen: packet header length, if @data is NULL use skb_headlen(skb) + * + * The function will try to retrieve the struct flow_keys from either the skbuff + * or a raw buffer specified by the rest parameters + */ +bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow, + void *data, __be16 proto, int nhoff, int hlen) { - int nhoff = skb_network_offset(skb); u8 ip_proto; - __be16 proto = skb->protocol; + + if (!data) { + data = skb->data; + proto = skb->protocol; + nhoff = skb_network_offset(skb); + hlen = skb_headlen(skb); + } memset(flow, 0, sizeof(*flow)); @@ -65,7 +91,7 @@ bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow) const struct iphdr *iph; struct iphdr _iph; ip: - iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph); + iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph); if (!iph || iph->ihl < 5) return false; nhoff += iph->ihl * 4; @@ -83,7 +109,7 @@ bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow) __be32 flow_label; ipv6: - iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph); + iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph); if (!iph) return false; @@ -92,6 +118,13 @@ bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow) flow->dst = (__force __be32)ipv6_addr_hash(&iph->daddr); nhoff += sizeof(struct ipv6hdr); + /* skip the flow label processing if skb is NULL. The + * assumption here is that if there is no skb we are not + * looking for flow info as much as we are length. + */ + if (!skb) + break; + flow_label = ip6_flowlabel(iph); if (flow_label) { /* Awesome, IPv6 packet has a flow label so we can @@ -113,7 +146,7 @@ bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow) const struct vlan_hdr *vlan; struct vlan_hdr _vlan; - vlan = skb_header_pointer(skb, nhoff, sizeof(_vlan), &_vlan); + vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan), data, hlen, &_vlan); if (!vlan) return false; @@ -126,7 +159,7 @@ bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow) struct pppoe_hdr hdr; __be16 proto; } *hdr, _hdr; - hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr); + hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr); if (!hdr) return false; proto = hdr->proto; @@ -140,6 +173,9 @@ bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow) return false; } } + case htons(ETH_P_FCOE): + flow->thoff = (u16)(nhoff + FCOE_HEADER_LEN); + /* fall through */ default: return false; } @@ -151,7 +187,7 @@ bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow) __be16 proto; } *hdr, _hdr; - hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr); + hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr); if (!hdr) return false; /* @@ -171,8 +207,9 @@ bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow) const struct ethhdr *eth; struct ethhdr _eth; - eth = skb_header_pointer(skb, nhoff, - sizeof(_eth), &_eth); + eth = __skb_header_pointer(skb, nhoff, + sizeof(_eth), + data, hlen, &_eth); if (!eth) return false; proto = eth->h_proto; @@ -194,12 +231,12 @@ bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow) flow->n_proto = proto; flow->ip_proto = ip_proto; - flow->ports = skb_flow_get_ports(skb, nhoff, ip_proto); + flow->ports = __skb_flow_get_ports(skb, nhoff, ip_proto, data, hlen); flow->thoff = (u16) nhoff; return true; } -EXPORT_SYMBOL(skb_flow_dissect); +EXPORT_SYMBOL(__skb_flow_dissect); static u32 hashrnd __read_mostly; static __always_inline void __flow_hash_secret_init(void) @@ -286,30 +323,22 @@ u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb, qcount = dev->tc_to_txq[tc].count; } - return (u16) (((u64)skb_get_hash(skb) * qcount) >> 32) + qoffset; + return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset; } EXPORT_SYMBOL(__skb_tx_hash); -/* __skb_get_poff() returns the offset to the payload as far as it could - * be dissected. The main user is currently BPF, so that we can dynamically - * truncate packets without needing to push actual payload to the user - * space and can analyze headers only, instead. - */ -u32 __skb_get_poff(const struct sk_buff *skb) +u32 __skb_get_poff(const struct sk_buff *skb, void *data, + const struct flow_keys *keys, int hlen) { - struct flow_keys keys; - u32 poff = 0; - - if (!skb_flow_dissect(skb, &keys)) - return 0; + u32 poff = keys->thoff; - poff += keys.thoff; - switch (keys.ip_proto) { + switch (keys->ip_proto) { case IPPROTO_TCP: { const struct tcphdr *tcph; struct tcphdr _tcph; - tcph = skb_header_pointer(skb, poff, sizeof(_tcph), &_tcph); + tcph = __skb_header_pointer(skb, poff, sizeof(_tcph), + data, hlen, &_tcph); if (!tcph) return poff; @@ -343,6 +372,21 @@ u32 __skb_get_poff(const struct sk_buff *skb) return poff; } +/* skb_get_poff() returns the offset to the payload as far as it could + * be dissected. The main user is currently BPF, so that we can dynamically + * truncate packets without needing to push actual payload to the user + * space and can analyze headers only, instead. + */ +u32 skb_get_poff(const struct sk_buff *skb) +{ + struct flow_keys keys; + + if (!skb_flow_dissect(skb, &keys)) + return 0; + + return __skb_get_poff(skb, skb->data, &keys, skb_headlen(skb)); +} + static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) { #ifdef CONFIG_XPS @@ -359,9 +403,8 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) if (map->len == 1) queue_index = map->queues[0]; else - queue_index = map->queues[ - ((u64)skb_get_hash(skb) * map->len) >> 32]; - + queue_index = map->queues[reciprocal_scale(skb_get_hash(skb), + map->len)]; if (unlikely(queue_index >= dev->real_num_tx_queues)) queue_index = -1; } diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c index 9d33dfffca19a992baf28e07ade0c7029c413ad0..9dfb88a933e7c25ac5125570c2fd0edf4696c1cb 100644 --- a/net/core/gen_estimator.c +++ b/net/core/gen_estimator.c @@ -91,6 +91,8 @@ struct gen_estimator u32 avpps; struct rcu_head e_rcu; struct rb_node node; + struct gnet_stats_basic_cpu __percpu *cpu_bstats; + struct rcu_head head; }; struct gen_estimator_head @@ -115,9 +117,8 @@ static void est_timer(unsigned long arg) rcu_read_lock(); list_for_each_entry_rcu(e, &elist[idx].list, list) { - u64 nbytes; + struct gnet_stats_basic_packed b = {0}; u64 brate; - u32 npackets; u32 rate; spin_lock(e->stats_lock); @@ -125,15 +126,15 @@ static void est_timer(unsigned long arg) if (e->bstats == NULL) goto skip; - nbytes = e->bstats->bytes; - npackets = e->bstats->packets; - brate = (nbytes - e->last_bytes)<<(7 - idx); - e->last_bytes = nbytes; + __gnet_stats_copy_basic(&b, e->cpu_bstats, e->bstats); + + brate = (b.bytes - e->last_bytes)<<(7 - idx); + e->last_bytes = b.bytes; e->avbps += (brate >> e->ewma_log) - (e->avbps >> e->ewma_log); e->rate_est->bps = (e->avbps+0xF)>>5; - rate = (npackets - e->last_packets)<<(12 - idx); - e->last_packets = npackets; + rate = (b.packets - e->last_packets)<<(12 - idx); + e->last_packets = b.packets; e->avpps += (rate >> e->ewma_log) - (e->avpps >> e->ewma_log); e->rate_est->pps = (e->avpps+0x1FF)>>10; skip: @@ -203,12 +204,14 @@ struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats * */ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, + struct gnet_stats_basic_cpu __percpu *cpu_bstats, struct gnet_stats_rate_est64 *rate_est, spinlock_t *stats_lock, struct nlattr *opt) { struct gen_estimator *est; struct gnet_estimator *parm = nla_data(opt); + struct gnet_stats_basic_packed b = {0}; int idx; if (nla_len(opt) < sizeof(*parm)) @@ -221,15 +224,18 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, if (est == NULL) return -ENOBUFS; + __gnet_stats_copy_basic(&b, cpu_bstats, bstats); + idx = parm->interval + 2; est->bstats = bstats; est->rate_est = rate_est; est->stats_lock = stats_lock; est->ewma_log = parm->ewma_log; - est->last_bytes = bstats->bytes; + est->last_bytes = b.bytes; est->avbps = rate_est->bps<<5; - est->last_packets = bstats->packets; + est->last_packets = b.packets; est->avpps = rate_est->pps<<10; + est->cpu_bstats = cpu_bstats; spin_lock_bh(&est_tree_lock); if (!elist[idx].timer.function) { @@ -290,11 +296,12 @@ EXPORT_SYMBOL(gen_kill_estimator); * Returns 0 on success or a negative error code. */ int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, + struct gnet_stats_basic_cpu __percpu *cpu_bstats, struct gnet_stats_rate_est64 *rate_est, spinlock_t *stats_lock, struct nlattr *opt) { gen_kill_estimator(bstats, rate_est); - return gen_new_estimator(bstats, rate_est, stats_lock, opt); + return gen_new_estimator(bstats, cpu_bstats, rate_est, stats_lock, opt); } EXPORT_SYMBOL(gen_replace_estimator); diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c index 2ddbce4cce144f62e8fed9dd39209fb3429b82a7..0c08062d1796337f25b9dbc2cbce68d5e3194037 100644 --- a/net/core/gen_stats.c +++ b/net/core/gen_stats.c @@ -97,6 +97,43 @@ gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock, } EXPORT_SYMBOL(gnet_stats_start_copy); +static void +__gnet_stats_copy_basic_cpu(struct gnet_stats_basic_packed *bstats, + struct gnet_stats_basic_cpu __percpu *cpu) +{ + int i; + + for_each_possible_cpu(i) { + struct gnet_stats_basic_cpu *bcpu = per_cpu_ptr(cpu, i); + unsigned int start; + u64 bytes; + u32 packets; + + do { + start = u64_stats_fetch_begin_irq(&bcpu->syncp); + bytes = bcpu->bstats.bytes; + packets = bcpu->bstats.packets; + } while (u64_stats_fetch_retry_irq(&bcpu->syncp, start)); + + bstats->bytes += bytes; + bstats->packets += packets; + } +} + +void +__gnet_stats_copy_basic(struct gnet_stats_basic_packed *bstats, + struct gnet_stats_basic_cpu __percpu *cpu, + struct gnet_stats_basic_packed *b) +{ + if (cpu) { + __gnet_stats_copy_basic_cpu(bstats, cpu); + } else { + bstats->bytes = b->bytes; + bstats->packets = b->packets; + } +} +EXPORT_SYMBOL(__gnet_stats_copy_basic); + /** * gnet_stats_copy_basic - copy basic statistics into statistic TLV * @d: dumping handle @@ -109,19 +146,25 @@ EXPORT_SYMBOL(gnet_stats_start_copy); * if the room in the socket buffer was not sufficient. */ int -gnet_stats_copy_basic(struct gnet_dump *d, struct gnet_stats_basic_packed *b) +gnet_stats_copy_basic(struct gnet_dump *d, + struct gnet_stats_basic_cpu __percpu *cpu, + struct gnet_stats_basic_packed *b) { + struct gnet_stats_basic_packed bstats = {0}; + + __gnet_stats_copy_basic(&bstats, cpu, b); + if (d->compat_tc_stats) { - d->tc_stats.bytes = b->bytes; - d->tc_stats.packets = b->packets; + d->tc_stats.bytes = bstats.bytes; + d->tc_stats.packets = bstats.packets; } if (d->tail) { struct gnet_stats_basic sb; memset(&sb, 0, sizeof(sb)); - sb.bytes = b->bytes; - sb.packets = b->packets; + sb.bytes = bstats.bytes; + sb.packets = bstats.packets; return gnet_stats_copy(d, TCA_STATS_BASIC, &sb, sizeof(sb)); } return 0; @@ -172,29 +215,74 @@ gnet_stats_copy_rate_est(struct gnet_dump *d, } EXPORT_SYMBOL(gnet_stats_copy_rate_est); +static void +__gnet_stats_copy_queue_cpu(struct gnet_stats_queue *qstats, + const struct gnet_stats_queue __percpu *q) +{ + int i; + + for_each_possible_cpu(i) { + const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i); + + qstats->qlen = 0; + qstats->backlog += qcpu->backlog; + qstats->drops += qcpu->drops; + qstats->requeues += qcpu->requeues; + qstats->overlimits += qcpu->overlimits; + } +} + +static void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats, + const struct gnet_stats_queue __percpu *cpu, + const struct gnet_stats_queue *q, + __u32 qlen) +{ + if (cpu) { + __gnet_stats_copy_queue_cpu(qstats, cpu); + } else { + qstats->qlen = q->qlen; + qstats->backlog = q->backlog; + qstats->drops = q->drops; + qstats->requeues = q->requeues; + qstats->overlimits = q->overlimits; + } + + qstats->qlen = qlen; +} + /** * gnet_stats_copy_queue - copy queue statistics into statistics TLV * @d: dumping handle + * @cpu_q: per cpu queue statistics * @q: queue statistics + * @qlen: queue length statistics * * Appends the queue statistics to the top level TLV created by - * gnet_stats_start_copy(). + * gnet_stats_start_copy(). Using per cpu queue statistics if + * they are available. * * Returns 0 on success or -1 with the statistic lock released * if the room in the socket buffer was not sufficient. */ int -gnet_stats_copy_queue(struct gnet_dump *d, struct gnet_stats_queue *q) +gnet_stats_copy_queue(struct gnet_dump *d, + struct gnet_stats_queue __percpu *cpu_q, + struct gnet_stats_queue *q, __u32 qlen) { + struct gnet_stats_queue qstats = {0}; + + __gnet_stats_copy_queue(&qstats, cpu_q, q, qlen); + if (d->compat_tc_stats) { - d->tc_stats.drops = q->drops; - d->tc_stats.qlen = q->qlen; - d->tc_stats.backlog = q->backlog; - d->tc_stats.overlimits = q->overlimits; + d->tc_stats.drops = qstats.drops; + d->tc_stats.qlen = qstats.qlen; + d->tc_stats.backlog = qstats.backlog; + d->tc_stats.overlimits = qstats.overlimits; } if (d->tail) - return gnet_stats_copy(d, TCA_STATS_QUEUE, q, sizeof(*q)); + return gnet_stats_copy(d, TCA_STATS_QUEUE, + &qstats, sizeof(qstats)); return 0; } diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 7c6b51a58968613010576f3bd35a3cb6c527db08..7f155175bba83a0c8083008ece1a56e265174984 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -224,7 +224,7 @@ static void net_free(struct net *net) return; } #endif - kfree(net->gen); + kfree(rcu_access_pointer(net->gen)); kmem_cache_free(net_cachep, net); } diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 907fb5e36c02e54794734abe4dc1225bf9af9c7d..e6645b4f330af1d16607ca0d6d9c9c95fd163dc6 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -72,7 +72,6 @@ module_param(carrier_timeout, uint, 0644); static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq) { - const struct net_device_ops *ops = dev->netdev_ops; int status = NETDEV_TX_OK; netdev_features_t features; @@ -92,9 +91,7 @@ static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev, skb->vlan_tci = 0; } - status = ops->ndo_start_xmit(skb, dev); - if (status == NETDEV_TX_OK) - txq_trans_update(txq); + status = netdev_start_xmit(skb, dev, txq, false); out: return status; @@ -116,7 +113,7 @@ static void queue_process(struct work_struct *work) continue; } - txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); + txq = skb_get_tx_queue(dev, skb); local_irq_save(flags); HARD_TX_LOCK(dev, txq, smp_processor_id()); diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 8b849ddfef2e743676fe35642cd49d36188dd74d..443256bdcddc8e01c2ba0f0b01c0b0bf78a6608b 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -202,6 +202,7 @@ #define F_QUEUE_MAP_CPU (1<<14) /* queue map mirrors smp_processor_id() */ #define F_NODE (1<<15) /* Node memory alloc*/ #define F_UDPCSUM (1<<16) /* Include UDP checksum */ +#define F_NO_TIMESTAMP (1<<17) /* Don't timestamp packets (default TS) */ /* Thread control flag bits */ #define T_STOP (1<<0) /* Stop run */ @@ -386,6 +387,7 @@ struct pktgen_dev { u16 queue_map_min; u16 queue_map_max; __u32 skb_priority; /* skb priority field */ + unsigned int burst; /* number of duplicated packets to burst */ int node; /* Memory node */ #ifdef CONFIG_XFRM @@ -505,7 +507,7 @@ static ssize_t pgctrl_write(struct file *file, const char __user *buf, pktgen_reset_all_threads(pn); else - pr_warning("Unknown command: %s\n", data); + pr_warn("Unknown command: %s\n", data); return count; } @@ -612,6 +614,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v) if (pkt_dev->traffic_class) seq_printf(seq, " traffic_class: 0x%02x\n", pkt_dev->traffic_class); + if (pkt_dev->burst > 1) + seq_printf(seq, " burst: %d\n", pkt_dev->burst); + if (pkt_dev->node >= 0) seq_printf(seq, " node: %d\n", pkt_dev->node); @@ -638,6 +643,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v) if (pkt_dev->flags & F_UDPCSUM) seq_puts(seq, "UDPCSUM "); + if (pkt_dev->flags & F_NO_TIMESTAMP) + seq_puts(seq, "NO_TIMESTAMP "); + if (pkt_dev->flags & F_MPLS_RND) seq_puts(seq, "MPLS_RND "); @@ -857,14 +865,14 @@ static ssize_t pktgen_if_write(struct file *file, pg_result = &(pkt_dev->result[0]); if (count < 1) { - pr_warning("wrong command format\n"); + pr_warn("wrong command format\n"); return -EINVAL; } max = count; tmp = count_trail_chars(user_buffer, max); if (tmp < 0) { - pr_warning("illegal format\n"); + pr_warn("illegal format\n"); return tmp; } i = tmp; @@ -1120,6 +1128,16 @@ static ssize_t pktgen_if_write(struct file *file, pkt_dev->dst_mac_count); return count; } + if (!strcmp(name, "burst")) { + len = num_arg(&user_buffer[i], 10, &value); + if (len < 0) + return len; + + i += len; + pkt_dev->burst = value < 1 ? 1 : value; + sprintf(pg_result, "OK: burst=%d", pkt_dev->burst); + return count; + } if (!strcmp(name, "node")) { len = num_arg(&user_buffer[i], 10, &value); if (len < 0) @@ -1243,6 +1261,9 @@ static ssize_t pktgen_if_write(struct file *file, else if (strcmp(f, "!UDPCSUM") == 0) pkt_dev->flags &= ~F_UDPCSUM; + else if (strcmp(f, "NO_TIMESTAMP") == 0) + pkt_dev->flags |= F_NO_TIMESTAMP; + else { sprintf(pg_result, "Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s", @@ -1251,6 +1272,7 @@ static ssize_t pktgen_if_write(struct file *file, "MACSRC_RND, MACDST_RND, TXSIZE_RND, IPV6, " "MPLS_RND, VID_RND, SVID_RND, FLOW_SEQ, " "QUEUE_MAP_RND, QUEUE_MAP_CPU, UDPCSUM, " + "NO_TIMESTAMP, " #ifdef CONFIG_XFRM "IPSEC, " #endif @@ -2048,15 +2070,15 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) ntxq = pkt_dev->odev->real_num_tx_queues; if (ntxq <= pkt_dev->queue_map_min) { - pr_warning("WARNING: Requested queue_map_min (zero-based) (%d) exceeds valid range [0 - %d] for (%d) queues on %s, resetting\n", - pkt_dev->queue_map_min, (ntxq ?: 1) - 1, ntxq, - pkt_dev->odevname); + pr_warn("WARNING: Requested queue_map_min (zero-based) (%d) exceeds valid range [0 - %d] for (%d) queues on %s, resetting\n", + pkt_dev->queue_map_min, (ntxq ?: 1) - 1, ntxq, + pkt_dev->odevname); pkt_dev->queue_map_min = (ntxq ?: 1) - 1; } if (pkt_dev->queue_map_max >= ntxq) { - pr_warning("WARNING: Requested queue_map_max (zero-based) (%d) exceeds valid range [0 - %d] for (%d) queues on %s, resetting\n", - pkt_dev->queue_map_max, (ntxq ?: 1) - 1, ntxq, - pkt_dev->odevname); + pr_warn("WARNING: Requested queue_map_max (zero-based) (%d) exceeds valid range [0 - %d] for (%d) queues on %s, resetting\n", + pkt_dev->queue_map_max, (ntxq ?: 1) - 1, ntxq, + pkt_dev->odevname); pkt_dev->queue_map_max = (ntxq ?: 1) - 1; } @@ -2685,9 +2707,14 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb, pgh->pgh_magic = htonl(PKTGEN_MAGIC); pgh->seq_num = htonl(pkt_dev->seq_num); - do_gettimeofday(×tamp); - pgh->tv_sec = htonl(timestamp.tv_sec); - pgh->tv_usec = htonl(timestamp.tv_usec); + if (pkt_dev->flags & F_NO_TIMESTAMP) { + pgh->tv_sec = 0; + pgh->tv_usec = 0; + } else { + do_gettimeofday(×tamp); + pgh->tv_sec = htonl(timestamp.tv_sec); + pgh->tv_usec = htonl(timestamp.tv_usec); + } } static struct sk_buff *pktgen_alloc_skb(struct net_device *dev, @@ -3160,8 +3187,8 @@ static int pktgen_stop_device(struct pktgen_dev *pkt_dev) int nr_frags = pkt_dev->skb ? skb_shinfo(pkt_dev->skb)->nr_frags : -1; if (!pkt_dev->running) { - pr_warning("interface: %s is already stopped\n", - pkt_dev->odevname); + pr_warn("interface: %s is already stopped\n", + pkt_dev->odevname); return -EINVAL; } @@ -3284,11 +3311,9 @@ static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev) static void pktgen_xmit(struct pktgen_dev *pkt_dev) { + unsigned int burst = ACCESS_ONCE(pkt_dev->burst); struct net_device *odev = pkt_dev->odev; - netdev_tx_t (*xmit)(struct sk_buff *, struct net_device *) - = odev->netdev_ops->ndo_start_xmit; struct netdev_queue *txq; - u16 queue_map; int ret; /* If device is offline, then don't send */ @@ -3326,8 +3351,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) if (pkt_dev->delay && pkt_dev->last_ok) spin(pkt_dev, pkt_dev->next_tx); - queue_map = skb_get_queue_mapping(pkt_dev->skb); - txq = netdev_get_tx_queue(odev, queue_map); + txq = skb_get_tx_queue(odev, pkt_dev->skb); local_bh_disable(); @@ -3338,16 +3362,19 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) pkt_dev->last_ok = 0; goto unlock; } - atomic_inc(&(pkt_dev->skb->users)); - ret = (*xmit)(pkt_dev->skb, odev); + atomic_add(burst, &pkt_dev->skb->users); + +xmit_more: + ret = netdev_start_xmit(pkt_dev->skb, odev, txq, --burst > 0); switch (ret) { case NETDEV_TX_OK: - txq_trans_update(txq); pkt_dev->last_ok = 1; pkt_dev->sofar++; pkt_dev->seq_num++; pkt_dev->tx_bytes += pkt_dev->last_pkt_size; + if (burst > 0 && !netif_xmit_frozen_or_drv_stopped(txq)) + goto xmit_more; break; case NET_XMIT_DROP: case NET_XMIT_CN: @@ -3366,6 +3393,8 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) atomic_dec(&(pkt_dev->skb->users)); pkt_dev->last_ok = 0; } + if (unlikely(burst)) + atomic_sub(burst, &pkt_dev->skb->users); unlock: HARD_TX_UNLOCK(odev, txq); @@ -3564,6 +3593,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname) pkt_dev->svlan_p = 0; pkt_dev->svlan_cfi = 0; pkt_dev->svlan_id = 0xffff; + pkt_dev->burst = 1; pkt_dev->node = -1; err = pktgen_setup_dev(t->net, pkt_dev, ifname); @@ -3684,7 +3714,7 @@ static int pktgen_remove_device(struct pktgen_thread *t, pr_debug("remove_device pkt_dev=%p\n", pkt_dev); if (pkt_dev->running) { - pr_warning("WARNING: trying to remove a running interface, stopping it now\n"); + pr_warn("WARNING: trying to remove a running interface, stopping it now\n"); pktgen_stop_device(pkt_dev); } diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index f0493e3b7471099f0245b0ea0c4b52de4a03034f..a6882686ca3a10fc3be7ced6299dc7385ffd239d 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1481,9 +1481,12 @@ static int do_set_master(struct net_device *dev, int ifindex) return 0; } +#define DO_SETLINK_MODIFIED 0x01 +/* notify flag means notify + modified. */ +#define DO_SETLINK_NOTIFY 0x03 static int do_setlink(const struct sk_buff *skb, struct net_device *dev, struct ifinfomsg *ifm, - struct nlattr **tb, char *ifname, int modified) + struct nlattr **tb, char *ifname, int status) { const struct net_device_ops *ops = dev->netdev_ops; int err; @@ -1502,7 +1505,7 @@ static int do_setlink(const struct sk_buff *skb, put_net(net); if (err) goto errout; - modified = 1; + status |= DO_SETLINK_MODIFIED; } if (tb[IFLA_MAP]) { @@ -1531,7 +1534,7 @@ static int do_setlink(const struct sk_buff *skb, if (err < 0) goto errout; - modified = 1; + status |= DO_SETLINK_NOTIFY; } if (tb[IFLA_ADDRESS]) { @@ -1551,19 +1554,19 @@ static int do_setlink(const struct sk_buff *skb, kfree(sa); if (err) goto errout; - modified = 1; + status |= DO_SETLINK_MODIFIED; } if (tb[IFLA_MTU]) { err = dev_set_mtu(dev, nla_get_u32(tb[IFLA_MTU])); if (err < 0) goto errout; - modified = 1; + status |= DO_SETLINK_MODIFIED; } if (tb[IFLA_GROUP]) { dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP])); - modified = 1; + status |= DO_SETLINK_NOTIFY; } /* @@ -1575,7 +1578,7 @@ static int do_setlink(const struct sk_buff *skb, err = dev_change_name(dev, ifname); if (err < 0) goto errout; - modified = 1; + status |= DO_SETLINK_MODIFIED; } if (tb[IFLA_IFALIAS]) { @@ -1583,7 +1586,7 @@ static int do_setlink(const struct sk_buff *skb, nla_len(tb[IFLA_IFALIAS])); if (err < 0) goto errout; - modified = 1; + status |= DO_SETLINK_NOTIFY; } if (tb[IFLA_BROADCAST]) { @@ -1601,25 +1604,35 @@ static int do_setlink(const struct sk_buff *skb, err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER])); if (err) goto errout; - modified = 1; + status |= DO_SETLINK_MODIFIED; } if (tb[IFLA_CARRIER]) { err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER])); if (err) goto errout; - modified = 1; + status |= DO_SETLINK_MODIFIED; } - if (tb[IFLA_TXQLEN]) - dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]); + if (tb[IFLA_TXQLEN]) { + unsigned long value = nla_get_u32(tb[IFLA_TXQLEN]); + + if (dev->tx_queue_len ^ value) + status |= DO_SETLINK_NOTIFY; + + dev->tx_queue_len = value; + } if (tb[IFLA_OPERSTATE]) set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); if (tb[IFLA_LINKMODE]) { + unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]); + write_lock_bh(&dev_base_lock); - dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]); + if (dev->link_mode ^ value) + status |= DO_SETLINK_NOTIFY; + dev->link_mode = value; write_unlock_bh(&dev_base_lock); } @@ -1634,7 +1647,7 @@ static int do_setlink(const struct sk_buff *skb, err = do_setvfinfo(dev, attr); if (err < 0) goto errout; - modified = 1; + status |= DO_SETLINK_NOTIFY; } } err = 0; @@ -1664,7 +1677,7 @@ static int do_setlink(const struct sk_buff *skb, err = ops->ndo_set_vf_port(dev, vf, port); if (err < 0) goto errout; - modified = 1; + status |= DO_SETLINK_NOTIFY; } } err = 0; @@ -1682,7 +1695,7 @@ static int do_setlink(const struct sk_buff *skb, err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port); if (err < 0) goto errout; - modified = 1; + status |= DO_SETLINK_NOTIFY; } if (tb[IFLA_AF_SPEC]) { @@ -1699,15 +1712,20 @@ static int do_setlink(const struct sk_buff *skb, if (err < 0) goto errout; - modified = 1; + status |= DO_SETLINK_NOTIFY; } } err = 0; errout: - if (err < 0 && modified) - net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n", - dev->name); + if (status & DO_SETLINK_MODIFIED) { + if (status & DO_SETLINK_NOTIFY) + netdev_state_change(dev); + + if (err < 0) + net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n", + dev->name); + } return err; } @@ -1989,7 +2007,7 @@ static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh) } if (dev) { - int modified = 0; + int status = 0; if (nlh->nlmsg_flags & NLM_F_EXCL) return -EEXIST; @@ -2004,7 +2022,7 @@ static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh) err = ops->changelink(dev, tb, data); if (err < 0) return err; - modified = 1; + status |= DO_SETLINK_NOTIFY; } if (linkinfo[IFLA_INFO_SLAVE_DATA]) { @@ -2015,10 +2033,10 @@ static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh) tb, slave_data); if (err < 0) return err; - modified = 1; + status |= DO_SETLINK_NOTIFY; } - return do_setlink(skb, dev, ifm, tb, ifname, modified); + return do_setlink(skb, dev, ifm, tb, ifname, status); } if (!(nlh->nlmsg_flags & NLM_F_CREATE)) { diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c index ba71212f0251218c3c41599838e36aa8e169fea7..51dd3193a33ebb26ea3c008e752d1f2832f791d8 100644 --- a/net/core/secure_seq.c +++ b/net/core/secure_seq.c @@ -35,7 +35,7 @@ static u32 seq_scale(u32 seq) * overlaps less than one time per MSL (2 minutes). * Choosing a clock of 64 ns period is OK. (period of 274 s) */ - return seq + (ktime_to_ns(ktime_get_real()) >> 6); + return seq + (ktime_get_real_ns() >> 6); } #endif @@ -135,7 +135,7 @@ u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr, md5_transform(hash, net_secret); seq = hash[0] | (((u64)hash[1]) << 32); - seq += ktime_to_ns(ktime_get_real()); + seq += ktime_get_real_ns(); seq &= (1ull << 48) - 1; return seq; @@ -163,7 +163,7 @@ u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr, md5_transform(hash, secret); seq = hash[0] | (((u64)hash[1]) << 32); - seq += ktime_to_ns(ktime_get_real()); + seq += ktime_get_real_ns(); seq &= (1ull << 48) - 1; return seq; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 8d289697cc7adcb2ce28c9e793e49c25fbbba5b5..7b3df0d518abfa5da3440d614ce5fa9e65bb06e6 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -257,16 +257,16 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, kmemcheck_annotate_variable(shinfo->destructor_arg); if (flags & SKB_ALLOC_FCLONE) { - struct sk_buff *child = skb + 1; - atomic_t *fclone_ref = (atomic_t *) (child + 1); + struct sk_buff_fclones *fclones; - kmemcheck_annotate_bitfield(child, flags1); - kmemcheck_annotate_bitfield(child, flags2); + fclones = container_of(skb, struct sk_buff_fclones, skb1); + + kmemcheck_annotate_bitfield(&fclones->skb2, flags1); skb->fclone = SKB_FCLONE_ORIG; - atomic_set(fclone_ref, 1); + atomic_set(&fclones->fclone_ref, 1); - child->fclone = SKB_FCLONE_UNAVAILABLE; - child->pfmemalloc = pfmemalloc; + fclones->skb2.fclone = SKB_FCLONE_FREE; + fclones->skb2.pfmemalloc = pfmemalloc; } out: return skb; @@ -491,32 +491,33 @@ static void skb_free_head(struct sk_buff *skb) static void skb_release_data(struct sk_buff *skb) { - if (!skb->cloned || - !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, - &skb_shinfo(skb)->dataref)) { - if (skb_shinfo(skb)->nr_frags) { - int i; - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) - skb_frag_unref(skb, i); - } + struct skb_shared_info *shinfo = skb_shinfo(skb); + int i; - /* - * If skb buf is from userspace, we need to notify the caller - * the lower device DMA has done; - */ - if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { - struct ubuf_info *uarg; + if (skb->cloned && + atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, + &shinfo->dataref)) + return; - uarg = skb_shinfo(skb)->destructor_arg; - if (uarg->callback) - uarg->callback(uarg, true); - } + for (i = 0; i < shinfo->nr_frags; i++) + __skb_frag_unref(&shinfo->frags[i]); - if (skb_has_frag_list(skb)) - skb_drop_fraglist(skb); + /* + * If skb buf is from userspace, we need to notify the caller + * the lower device DMA has done; + */ + if (shinfo->tx_flags & SKBTX_DEV_ZEROCOPY) { + struct ubuf_info *uarg; - skb_free_head(skb); + uarg = shinfo->destructor_arg; + if (uarg->callback) + uarg->callback(uarg, true); } + + if (shinfo->frag_list) + kfree_skb_list(shinfo->frag_list); + + skb_free_head(skb); } /* @@ -524,8 +525,7 @@ static void skb_release_data(struct sk_buff *skb) */ static void kfree_skbmem(struct sk_buff *skb) { - struct sk_buff *other; - atomic_t *fclone_ref; + struct sk_buff_fclones *fclones; switch (skb->fclone) { case SKB_FCLONE_UNAVAILABLE: @@ -533,22 +533,28 @@ static void kfree_skbmem(struct sk_buff *skb) break; case SKB_FCLONE_ORIG: - fclone_ref = (atomic_t *) (skb + 2); - if (atomic_dec_and_test(fclone_ref)) - kmem_cache_free(skbuff_fclone_cache, skb); + fclones = container_of(skb, struct sk_buff_fclones, skb1); + if (atomic_dec_and_test(&fclones->fclone_ref)) + kmem_cache_free(skbuff_fclone_cache, fclones); break; case SKB_FCLONE_CLONE: - fclone_ref = (atomic_t *) (skb + 1); - other = skb - 1; + fclones = container_of(skb, struct sk_buff_fclones, skb2); - /* The clone portion is available for - * fast-cloning again. + /* Warning : We must perform the atomic_dec_and_test() before + * setting skb->fclone back to SKB_FCLONE_FREE, otherwise + * skb_clone() could set clone_ref to 2 before our decrement. + * Anyway, if we are going to free the structure, no need to + * rewrite skb->fclone. */ - skb->fclone = SKB_FCLONE_UNAVAILABLE; - - if (atomic_dec_and_test(fclone_ref)) - kmem_cache_free(skbuff_fclone_cache, other); + if (atomic_dec_and_test(&fclones->fclone_ref)) { + kmem_cache_free(skbuff_fclone_cache, fclones); + } else { + /* The clone portion is available for + * fast-cloning again. + */ + skb->fclone = SKB_FCLONE_FREE; + } break; } } @@ -566,7 +572,7 @@ static void skb_release_head_state(struct sk_buff *skb) #if IS_ENABLED(CONFIG_NF_CONNTRACK) nf_conntrack_put(skb->nfct); #endif -#ifdef CONFIG_BRIDGE_NETFILTER +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) nf_bridge_put(skb->nf_bridge); #endif /* XXX: IS this still necessary? - JHS */ @@ -674,57 +680,61 @@ void consume_skb(struct sk_buff *skb) } EXPORT_SYMBOL(consume_skb); +/* Make sure a field is enclosed inside headers_start/headers_end section */ +#define CHECK_SKB_FIELD(field) \ + BUILD_BUG_ON(offsetof(struct sk_buff, field) < \ + offsetof(struct sk_buff, headers_start)); \ + BUILD_BUG_ON(offsetof(struct sk_buff, field) > \ + offsetof(struct sk_buff, headers_end)); \ + static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) { new->tstamp = old->tstamp; + /* We do not copy old->sk */ new->dev = old->dev; - new->transport_header = old->transport_header; - new->network_header = old->network_header; - new->mac_header = old->mac_header; - new->inner_protocol = old->inner_protocol; - new->inner_transport_header = old->inner_transport_header; - new->inner_network_header = old->inner_network_header; - new->inner_mac_header = old->inner_mac_header; + memcpy(new->cb, old->cb, sizeof(old->cb)); skb_dst_copy(new, old); - skb_copy_hash(new, old); - new->ooo_okay = old->ooo_okay; - new->no_fcs = old->no_fcs; - new->encapsulation = old->encapsulation; - new->encap_hdr_csum = old->encap_hdr_csum; - new->csum_valid = old->csum_valid; - new->csum_complete_sw = old->csum_complete_sw; #ifdef CONFIG_XFRM new->sp = secpath_get(old->sp); #endif - memcpy(new->cb, old->cb, sizeof(old->cb)); - new->csum = old->csum; - new->ignore_df = old->ignore_df; - new->pkt_type = old->pkt_type; - new->ip_summed = old->ip_summed; - skb_copy_queue_mapping(new, old); - new->priority = old->priority; -#if IS_ENABLED(CONFIG_IP_VS) - new->ipvs_property = old->ipvs_property; + __nf_copy(new, old, false); + + /* Note : this field could be in headers_start/headers_end section + * It is not yet because we do not want to have a 16 bit hole + */ + new->queue_mapping = old->queue_mapping; + + memcpy(&new->headers_start, &old->headers_start, + offsetof(struct sk_buff, headers_end) - + offsetof(struct sk_buff, headers_start)); + CHECK_SKB_FIELD(protocol); + CHECK_SKB_FIELD(csum); + CHECK_SKB_FIELD(hash); + CHECK_SKB_FIELD(priority); + CHECK_SKB_FIELD(skb_iif); + CHECK_SKB_FIELD(vlan_proto); + CHECK_SKB_FIELD(vlan_tci); + CHECK_SKB_FIELD(transport_header); + CHECK_SKB_FIELD(network_header); + CHECK_SKB_FIELD(mac_header); + CHECK_SKB_FIELD(inner_protocol); + CHECK_SKB_FIELD(inner_transport_header); + CHECK_SKB_FIELD(inner_network_header); + CHECK_SKB_FIELD(inner_mac_header); + CHECK_SKB_FIELD(mark); +#ifdef CONFIG_NETWORK_SECMARK + CHECK_SKB_FIELD(secmark); +#endif +#ifdef CONFIG_NET_RX_BUSY_POLL + CHECK_SKB_FIELD(napi_id); #endif - new->pfmemalloc = old->pfmemalloc; - new->protocol = old->protocol; - new->mark = old->mark; - new->skb_iif = old->skb_iif; - __nf_copy(new, old); #ifdef CONFIG_NET_SCHED - new->tc_index = old->tc_index; + CHECK_SKB_FIELD(tc_index); #ifdef CONFIG_NET_CLS_ACT - new->tc_verd = old->tc_verd; + CHECK_SKB_FIELD(tc_verd); #endif #endif - new->vlan_proto = old->vlan_proto; - new->vlan_tci = old->vlan_tci; - skb_copy_secmark(new, old); - -#ifdef CONFIG_NET_RX_BUSY_POLL - new->napi_id = old->napi_id; -#endif } /* @@ -855,17 +865,22 @@ EXPORT_SYMBOL_GPL(skb_copy_ubufs); struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) { - struct sk_buff *n; + struct sk_buff_fclones *fclones = container_of(skb, + struct sk_buff_fclones, + skb1); + struct sk_buff *n = &fclones->skb2; if (skb_orphan_frags(skb, gfp_mask)) return NULL; - n = skb + 1; if (skb->fclone == SKB_FCLONE_ORIG && - n->fclone == SKB_FCLONE_UNAVAILABLE) { - atomic_t *fclone_ref = (atomic_t *) (n + 1); + n->fclone == SKB_FCLONE_FREE) { n->fclone = SKB_FCLONE_CLONE; - atomic_inc(fclone_ref); + /* As our fastclone was free, clone_ref must be 1 at this point. + * We could use atomic_inc() here, but it is faster + * to set the final value. + */ + atomic_set(&fclones->fclone_ref, 2); } else { if (skb_pfmemalloc(skb)) gfp_mask |= __GFP_MEMALLOC; @@ -875,7 +890,6 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) return NULL; kmemcheck_annotate_bitfield(n, flags1); - kmemcheck_annotate_bitfield(n, flags2); n->fclone = SKB_FCLONE_UNAVAILABLE; } @@ -3069,6 +3083,11 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, } } while ((offset += len) < head_skb->len); + /* Some callers want to get the end of the list. + * Put it in segs->prev to avoid walking the list. + * (see validate_xmit_skb_list() for example) + */ + segs->prev = tail; return segs; err: @@ -3182,7 +3201,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) skb_shinfo(nskb)->frag_list = p; skb_shinfo(nskb)->gso_size = pinfo->gso_size; pinfo->gso_size = 0; - skb_header_release(p); + __skb_header_release(p); NAPI_GRO_CB(nskb)->last = p; nskb->data_len += p->len; @@ -3214,7 +3233,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) else NAPI_GRO_CB(p)->last->next = skb; NAPI_GRO_CB(p)->last = skb; - skb_header_release(skb); + __skb_header_release(skb); lp = p; done: @@ -3230,7 +3249,6 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) NAPI_GRO_CB(skb)->same_flow = 1; return 0; } -EXPORT_SYMBOL_GPL(skb_gro_receive); void __init skb_init(void) { @@ -3240,8 +3258,7 @@ void __init skb_init(void) SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", - (2*sizeof(struct sk_buff)) + - sizeof(atomic_t), + sizeof(struct sk_buff_fclones), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); @@ -3494,32 +3511,66 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) } EXPORT_SYMBOL(sock_queue_err_skb); -void __skb_tstamp_tx(struct sk_buff *orig_skb, - struct skb_shared_hwtstamps *hwtstamps, - struct sock *sk, int tstype) +struct sk_buff *sock_dequeue_err_skb(struct sock *sk) { - struct sock_exterr_skb *serr; - struct sk_buff *skb; - int err; + struct sk_buff_head *q = &sk->sk_error_queue; + struct sk_buff *skb, *skb_next; + int err = 0; - if (!sk) - return; + spin_lock_bh(&q->lock); + skb = __skb_dequeue(q); + if (skb && (skb_next = skb_peek(q))) + err = SKB_EXT_ERR(skb_next)->ee.ee_errno; + spin_unlock_bh(&q->lock); - if (hwtstamps) { - *skb_hwtstamps(orig_skb) = - *hwtstamps; - } else { - /* - * no hardware time stamps available, - * so keep the shared tx_flags and only - * store software time stamp - */ - orig_skb->tstamp = ktime_get_real(); + sk->sk_err = err; + if (err) + sk->sk_error_report(sk); + + return skb; +} +EXPORT_SYMBOL(sock_dequeue_err_skb); + +/** + * skb_clone_sk - create clone of skb, and take reference to socket + * @skb: the skb to clone + * + * This function creates a clone of a buffer that holds a reference on + * sk_refcnt. Buffers created via this function are meant to be + * returned using sock_queue_err_skb, or free via kfree_skb. + * + * When passing buffers allocated with this function to sock_queue_err_skb + * it is necessary to wrap the call with sock_hold/sock_put in order to + * prevent the socket from being released prior to being enqueued on + * the sk_error_queue. + */ +struct sk_buff *skb_clone_sk(struct sk_buff *skb) +{ + struct sock *sk = skb->sk; + struct sk_buff *clone; + + if (!sk || !atomic_inc_not_zero(&sk->sk_refcnt)) + return NULL; + + clone = skb_clone(skb, GFP_ATOMIC); + if (!clone) { + sock_put(sk); + return NULL; } - skb = skb_clone(orig_skb, GFP_ATOMIC); - if (!skb) - return; + clone->sk = sk; + clone->destructor = sock_efree; + + return clone; +} +EXPORT_SYMBOL(skb_clone_sk); + +static void __skb_complete_tx_timestamp(struct sk_buff *skb, + struct sock *sk, + int tstype) +{ + struct sock_exterr_skb *serr; + int err; serr = SKB_EXT_ERR(skb); memset(serr, 0, sizeof(*serr)); @@ -3537,6 +3588,42 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb, if (err) kfree_skb(skb); } + +void skb_complete_tx_timestamp(struct sk_buff *skb, + struct skb_shared_hwtstamps *hwtstamps) +{ + struct sock *sk = skb->sk; + + /* take a reference to prevent skb_orphan() from freeing the socket */ + sock_hold(sk); + + *skb_hwtstamps(skb) = *hwtstamps; + __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND); + + sock_put(sk); +} +EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); + +void __skb_tstamp_tx(struct sk_buff *orig_skb, + struct skb_shared_hwtstamps *hwtstamps, + struct sock *sk, int tstype) +{ + struct sk_buff *skb; + + if (!sk) + return; + + if (hwtstamps) + *skb_hwtstamps(orig_skb) = *hwtstamps; + else + orig_skb->tstamp = ktime_get_real(); + + skb = skb_clone(orig_skb, GFP_ATOMIC); + if (!skb) + return; + + __skb_complete_tx_timestamp(skb, sk, tstype); +} EXPORT_SYMBOL_GPL(__skb_tstamp_tx); void skb_tstamp_tx(struct sk_buff *orig_skb, @@ -3561,9 +3648,14 @@ void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) serr->ee.ee_errno = ENOMSG; serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; + /* take a reference to prevent skb_orphan() from freeing the socket */ + sock_hold(sk); + err = sock_queue_err_skb(sk, skb); if (err) kfree_skb(skb); + + sock_put(sk); } EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); @@ -3864,7 +3956,8 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, return false; if (len <= skb_tailroom(to)) { - BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); + if (len) + BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); *delta_truesize = 0; return true; } @@ -4029,3 +4122,81 @@ struct sk_buff *skb_vlan_untag(struct sk_buff *skb) return NULL; } EXPORT_SYMBOL(skb_vlan_untag); + +/** + * alloc_skb_with_frags - allocate skb with page frags + * + * header_len: size of linear part + * data_len: needed length in frags + * max_page_order: max page order desired. + * errcode: pointer to error code if any + * gfp_mask: allocation mask + * + * This can be used to allocate a paged skb, given a maximal order for frags. + */ +struct sk_buff *alloc_skb_with_frags(unsigned long header_len, + unsigned long data_len, + int max_page_order, + int *errcode, + gfp_t gfp_mask) +{ + int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; + unsigned long chunk; + struct sk_buff *skb; + struct page *page; + gfp_t gfp_head; + int i; + + *errcode = -EMSGSIZE; + /* Note this test could be relaxed, if we succeed to allocate + * high order pages... + */ + if (npages > MAX_SKB_FRAGS) + return NULL; + + gfp_head = gfp_mask; + if (gfp_head & __GFP_WAIT) + gfp_head |= __GFP_REPEAT; + + *errcode = -ENOBUFS; + skb = alloc_skb(header_len, gfp_head); + if (!skb) + return NULL; + + skb->truesize += npages << PAGE_SHIFT; + + for (i = 0; npages > 0; i++) { + int order = max_page_order; + + while (order) { + if (npages >= 1 << order) { + page = alloc_pages(gfp_mask | + __GFP_COMP | + __GFP_NOWARN | + __GFP_NORETRY, + order); + if (page) + goto fill_page; + /* Do not retry other high order allocations */ + order = 1; + max_page_order = 0; + } + order--; + } + page = alloc_page(gfp_mask); + if (!page) + goto failure; +fill_page: + chunk = min_t(unsigned long, data_len, + PAGE_SIZE << order); + skb_fill_page_desc(skb, i, page, 0, chunk); + data_len -= chunk; + npages -= 1 << order; + } + return skb; + +failure: + kfree_skb(skb); + return NULL; +} +EXPORT_SYMBOL(alloc_skb_with_frags); diff --git a/net/core/sock.c b/net/core/sock.c index 611f424fb76b9349944e4dd3cf722ff33396ba61..b4f3ea2fce60cbb72d90434ad97ca26d682135a9 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -437,7 +437,6 @@ static void sock_disable_timestamp(struct sock *sk, unsigned long flags) int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { int err; - int skb_len; unsigned long flags; struct sk_buff_head *list = &sk->sk_receive_queue; @@ -459,13 +458,6 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) skb->dev = NULL; skb_set_owner_r(skb, sk); - /* Cache the SKB length before we tack it onto the receive - * queue. Once it is added it no longer belongs to us and - * may be freed by other threads of control pulling packets - * from the queue. - */ - skb_len = skb->len; - /* we escape from rcu protected region, make sure we dont leak * a norefcounted dst */ @@ -1642,18 +1634,24 @@ void sock_rfree(struct sk_buff *skb) } EXPORT_SYMBOL(sock_rfree); +void sock_efree(struct sk_buff *skb) +{ + sock_put(skb->sk); +} +EXPORT_SYMBOL(sock_efree); + +#ifdef CONFIG_INET void sock_edemux(struct sk_buff *skb) { struct sock *sk = skb->sk; -#ifdef CONFIG_INET if (sk->sk_state == TCP_TIME_WAIT) inet_twsk_put(inet_twsk(sk)); else -#endif sock_put(sk); } EXPORT_SYMBOL(sock_edemux); +#endif kuid_t sock_i_uid(struct sock *sk) { @@ -1761,21 +1759,12 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, unsigned long data_len, int noblock, int *errcode, int max_page_order) { - struct sk_buff *skb = NULL; - unsigned long chunk; - gfp_t gfp_mask; + struct sk_buff *skb; long timeo; int err; - int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; - struct page *page; - int i; - - err = -EMSGSIZE; - if (npages > MAX_SKB_FRAGS) - goto failure; timeo = sock_sndtimeo(sk, noblock); - while (!skb) { + for (;;) { err = sock_error(sk); if (err != 0) goto failure; @@ -1784,66 +1773,27 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, if (sk->sk_shutdown & SEND_SHUTDOWN) goto failure; - if (atomic_read(&sk->sk_wmem_alloc) >= sk->sk_sndbuf) { - set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); - set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); - err = -EAGAIN; - if (!timeo) - goto failure; - if (signal_pending(current)) - goto interrupted; - timeo = sock_wait_for_wmem(sk, timeo); - continue; - } - - err = -ENOBUFS; - gfp_mask = sk->sk_allocation; - if (gfp_mask & __GFP_WAIT) - gfp_mask |= __GFP_REPEAT; + if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf) + break; - skb = alloc_skb(header_len, gfp_mask); - if (!skb) + set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); + set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); + err = -EAGAIN; + if (!timeo) goto failure; - - skb->truesize += data_len; - - for (i = 0; npages > 0; i++) { - int order = max_page_order; - - while (order) { - if (npages >= 1 << order) { - page = alloc_pages(sk->sk_allocation | - __GFP_COMP | - __GFP_NOWARN | - __GFP_NORETRY, - order); - if (page) - goto fill_page; - /* Do not retry other high order allocations */ - order = 1; - max_page_order = 0; - } - order--; - } - page = alloc_page(sk->sk_allocation); - if (!page) - goto failure; -fill_page: - chunk = min_t(unsigned long, data_len, - PAGE_SIZE << order); - skb_fill_page_desc(skb, i, page, 0, chunk); - data_len -= chunk; - npages -= 1 << order; - } + if (signal_pending(current)) + goto interrupted; + timeo = sock_wait_for_wmem(sk, timeo); } - - skb_set_owner_w(skb, sk); + skb = alloc_skb_with_frags(header_len, data_len, max_page_order, + errcode, sk->sk_allocation); + if (skb) + skb_set_owner_w(skb, sk); return skb; interrupted: err = sock_intr_errno(timeo); failure: - kfree_skb(skb); *errcode = err; return NULL; } @@ -2492,11 +2442,11 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level, int type) { struct sock_exterr_skb *serr; - struct sk_buff *skb, *skb2; + struct sk_buff *skb; int copied, err; err = -EAGAIN; - skb = skb_dequeue(&sk->sk_error_queue); + skb = sock_dequeue_err_skb(sk); if (skb == NULL) goto out; @@ -2517,16 +2467,6 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, msg->msg_flags |= MSG_ERRQUEUE; err = copied; - /* Reset and regenerate socket error */ - spin_lock_bh(&sk->sk_error_queue.lock); - sk->sk_err = 0; - if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) { - sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno; - spin_unlock_bh(&sk->sk_error_queue.lock); - sk->sk_error_report(sk); - } else - spin_unlock_bh(&sk->sk_error_queue.lock); - out_free_skb: kfree_skb(skb); out: diff --git a/net/core/timestamping.c b/net/core/timestamping.c index a8770391ea5bfc0db85135cf913c84d09f55cedb..43d3dd62fcc8eccd95a4618f68b0553cf7309c01 100644 --- a/net/core/timestamping.c +++ b/net/core/timestamping.c @@ -36,10 +36,9 @@ void skb_clone_tx_timestamp(struct sk_buff *skb) { struct phy_device *phydev; struct sk_buff *clone; - struct sock *sk = skb->sk; unsigned int type; - if (!sk) + if (!skb->sk) return; type = classify(skb); @@ -48,50 +47,14 @@ void skb_clone_tx_timestamp(struct sk_buff *skb) phydev = skb->dev->phydev; if (likely(phydev->drv->txtstamp)) { - if (!atomic_inc_not_zero(&sk->sk_refcnt)) + clone = skb_clone_sk(skb); + if (!clone) return; - - clone = skb_clone(skb, GFP_ATOMIC); - if (!clone) { - sock_put(sk); - return; - } - - clone->sk = sk; phydev->drv->txtstamp(phydev, clone, type); } } EXPORT_SYMBOL_GPL(skb_clone_tx_timestamp); -void skb_complete_tx_timestamp(struct sk_buff *skb, - struct skb_shared_hwtstamps *hwtstamps) -{ - struct sock *sk = skb->sk; - struct sock_exterr_skb *serr; - int err; - - if (!hwtstamps) { - sock_put(sk); - kfree_skb(skb); - return; - } - - *skb_hwtstamps(skb) = *hwtstamps; - - serr = SKB_EXT_ERR(skb); - memset(serr, 0, sizeof(*serr)); - serr->ee.ee_errno = ENOMSG; - serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; - skb->sk = NULL; - - err = sock_queue_err_skb(sk, skb); - - sock_put(sk); - if (err) - kfree_skb(skb); -} -EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); - bool skb_defer_rx_timestamp(struct sk_buff *skb) { struct phy_device *phydev; diff --git a/net/core/utils.c b/net/core/utils.c index eed34338736c275aa02bfa40448801d46dda736b..efc76dd9dcd160aaf31a37454fc980fbb7e9d5b4 100644 --- a/net/core/utils.c +++ b/net/core/utils.c @@ -306,16 +306,14 @@ EXPORT_SYMBOL(in6_pton); void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb, __be32 from, __be32 to, int pseudohdr) { - __be32 diff[] = { ~from, to }; if (skb->ip_summed != CHECKSUM_PARTIAL) { - *sum = csum_fold(csum_partial(diff, sizeof(diff), - ~csum_unfold(*sum))); + *sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), from), + to)); if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr) - skb->csum = ~csum_partial(diff, sizeof(diff), - ~skb->csum); + skb->csum = ~csum_add(csum_sub(~(skb->csum), from), to); } else if (pseudohdr) - *sum = ~csum_fold(csum_partial(diff, sizeof(diff), - csum_unfold(*sum))); + *sum = ~csum_fold(csum_add(csum_sub(csum_unfold(*sum), from), + to)); } EXPORT_SYMBOL(inet_proto_csum_replace4); diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c index 597557254ddb42dd9e58e8cb5b3594f07fc34d10..83498975165f9906b80349d58559eb36d81e0fae 100644 --- a/net/dccp/ccid.c +++ b/net/dccp/ccid.c @@ -99,7 +99,7 @@ static void ccid_kmem_cache_destroy(struct kmem_cache *slab) kmem_cache_destroy(slab); } -static int ccid_activate(struct ccid_operations *ccid_ops) +static int __init ccid_activate(struct ccid_operations *ccid_ops) { int err = -ENOBUFS; diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 04cb17d4b0cea9e9a0eac62f7e7f7135d7b9337c..ad2acfe1ca61004e1276e388fdde95c93716d26a 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -404,7 +404,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr; ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr; - if (ipv6_opt_accepted(sk, skb) || + if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) || np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) { atomic_inc(&skb->users); diff --git a/net/dccp/proto.c b/net/dccp/proto.c index f440cc7c9f721b6db5d31b37b5fa863a50aad2fc..97b0fcc79547aa48476feda4e0c6ef95054bd5e7 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -1082,7 +1082,7 @@ void dccp_shutdown(struct sock *sk, int how) EXPORT_SYMBOL_GPL(dccp_shutdown); -static inline int dccp_mib_init(void) +static inline int __init dccp_mib_init(void) { dccp_statistics = alloc_percpu(struct dccp_mib); if (!dccp_statistics) diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index ae011b46c0710fc96204deda6bddb1d912d78f22..25733d53814763c85e49a612df3bbcdb202992d6 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c @@ -127,6 +127,7 @@ Version 0.0.6 2.1.110 07-aug-98 Eduardo Marcelo Serrat #include #include #include +#include #include #include #include @@ -598,7 +599,7 @@ int dn_destroy_timer(struct sock *sk) if (sk->sk_socket) return 0; - if ((jiffies - scp->stamp) >= (HZ * decnet_time_wait)) { + if (time_after_eq(jiffies, scp->stamp + HZ * decnet_time_wait)) { dn_unhash_sock(sk); sock_put(sk); return 1; diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c index 3b726f31c64c0b88efcfacd4d64df7177260ad5c..4400da7739dafb3c3d42e52829086b9d3e418505 100644 --- a/net/decnet/dn_dev.c +++ b/net/decnet/dn_dev.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include #include @@ -875,7 +876,7 @@ static void dn_send_endnode_hello(struct net_device *dev, struct dn_ifaddr *ifa) static int dn_am_i_a_router(struct dn_neigh *dn, struct dn_dev *dn_db, struct dn_ifaddr *ifa) { /* First check time since device went up */ - if ((jiffies - dn_db->uptime) < DRDELAY) + if (time_before(jiffies, dn_db->uptime + DRDELAY)) return 0; /* If there is no router, then yes... */ diff --git a/net/decnet/dn_timer.c b/net/decnet/dn_timer.c index d9c150cc59a952ac86585b6c600acc398b80bc14..1d330fd43dc7e177d162f77796d2142a4e563fdb 100644 --- a/net/decnet/dn_timer.c +++ b/net/decnet/dn_timer.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -91,7 +92,7 @@ static void dn_slow_timer(unsigned long arg) * since the last successful transmission. */ if (scp->keepalive && scp->keepalive_fxn && (scp->state == DN_RUN)) { - if ((jiffies - scp->stamp) >= scp->keepalive) + if (time_after_eq(jiffies, scp->stamp + scp->keepalive)) scp->keepalive_fxn(sk); } diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig index f5eede1d6cb8eb92cbf97b1b37de1362f13ac095..a585fd6352ebaf8e8bdf0e9772653857def08811 100644 --- a/net/dsa/Kconfig +++ b/net/dsa/Kconfig @@ -12,6 +12,9 @@ config NET_DSA if NET_DSA # tagging formats +config NET_DSA_TAG_BRCM + bool + config NET_DSA_TAG_DSA bool diff --git a/net/dsa/Makefile b/net/dsa/Makefile index 7b9fcbbeda5d0d80678ba909f76b0b30d8436a2d..da06ed1df620fc620a90ab06cfadad29ce26f3bf 100644 --- a/net/dsa/Makefile +++ b/net/dsa/Makefile @@ -3,6 +3,7 @@ obj-$(CONFIG_NET_DSA) += dsa_core.o dsa_core-y += dsa.o slave.o # tagging formats +dsa_core-$(CONFIG_NET_DSA_TAG_BRCM) += tag_brcm.o dsa_core-$(CONFIG_NET_DSA_TAG_DSA) += tag_dsa.o dsa_core-$(CONFIG_NET_DSA_TAG_EDSA) += tag_edsa.o dsa_core-$(CONFIG_NET_DSA_TAG_TRAILER) += tag_trailer.o diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 0a49632fac478f1ac17b3f0159cbdf748fe25110..22f34cf4cb27d1aad06dcb41a1f6d84d4dffd688 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -10,7 +10,6 @@ */ #include -#include #include #include #include @@ -44,7 +43,7 @@ void unregister_switch_driver(struct dsa_switch_driver *drv) EXPORT_SYMBOL_GPL(unregister_switch_driver); static struct dsa_switch_driver * -dsa_switch_probe(struct mii_bus *bus, int sw_addr, char **_name) +dsa_switch_probe(struct device *host_dev, int sw_addr, char **_name) { struct dsa_switch_driver *ret; struct list_head *list; @@ -59,7 +58,7 @@ dsa_switch_probe(struct mii_bus *bus, int sw_addr, char **_name) drv = list_entry(list, struct dsa_switch_driver, list); - name = drv->probe(bus, sw_addr); + name = drv->probe(host_dev, sw_addr); if (name != NULL) { ret = drv; break; @@ -76,7 +75,7 @@ dsa_switch_probe(struct mii_bus *bus, int sw_addr, char **_name) /* basic switch operations **************************************************/ static struct dsa_switch * dsa_switch_setup(struct dsa_switch_tree *dst, int index, - struct device *parent, struct mii_bus *bus) + struct device *parent, struct device *host_dev) { struct dsa_chip_data *pd = dst->pd->chip + index; struct dsa_switch_driver *drv; @@ -89,7 +88,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, /* * Probe for switch model. */ - drv = dsa_switch_probe(bus, pd->sw_addr, &name); + drv = dsa_switch_probe(host_dev, pd->sw_addr, &name); if (drv == NULL) { printk(KERN_ERR "%s[%d]: could not detect attached switch\n", dst->master_netdev->name, index); @@ -110,8 +109,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, ds->index = index; ds->pd = dst->pd->chip + index; ds->drv = drv; - ds->master_mii_bus = bus; - + ds->master_dev = host_dev; /* * Validate supplied switch configuration. @@ -144,14 +142,44 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, goto out; } + /* Make the built-in MII bus mask match the number of ports, + * switch drivers can override this later + */ + ds->phys_mii_mask = ds->phys_port_mask; + /* * If the CPU connects to this switch, set the switch tree * tagging protocol to the preferred tagging format of this * switch. */ - if (ds->dst->cpu_switch == index) - ds->dst->tag_protocol = drv->tag_protocol; + if (dst->cpu_switch == index) { + switch (drv->tag_protocol) { +#ifdef CONFIG_NET_DSA_TAG_DSA + case DSA_TAG_PROTO_DSA: + dst->rcv = dsa_netdev_ops.rcv; + break; +#endif +#ifdef CONFIG_NET_DSA_TAG_EDSA + case DSA_TAG_PROTO_EDSA: + dst->rcv = edsa_netdev_ops.rcv; + break; +#endif +#ifdef CONFIG_NET_DSA_TAG_TRAILER + case DSA_TAG_PROTO_TRAILER: + dst->rcv = trailer_netdev_ops.rcv; + break; +#endif +#ifdef CONFIG_NET_DSA_TAG_BRCM + case DSA_TAG_PROTO_BRCM: + dst->rcv = brcm_netdev_ops.rcv; + break; +#endif + default: + break; + } + dst->tag_protocol = drv->tag_protocol; + } /* * Do basic register setup. @@ -210,6 +238,51 @@ static void dsa_switch_destroy(struct dsa_switch *ds) { } +#ifdef CONFIG_PM_SLEEP +static int dsa_switch_suspend(struct dsa_switch *ds) +{ + int i, ret = 0; + + /* Suspend slave network devices */ + for (i = 0; i < DSA_MAX_PORTS; i++) { + if (!(ds->phys_port_mask & (1 << i))) + continue; + + ret = dsa_slave_suspend(ds->ports[i]); + if (ret) + return ret; + } + + if (ds->drv->suspend) + ret = ds->drv->suspend(ds); + + return ret; +} + +static int dsa_switch_resume(struct dsa_switch *ds) +{ + int i, ret = 0; + + if (ds->drv->resume) + ret = ds->drv->resume(ds); + + if (ret) + return ret; + + /* Resume slave network devices */ + for (i = 0; i < DSA_MAX_PORTS; i++) { + if (!(ds->phys_port_mask & (1 << i))) + continue; + + ret = dsa_slave_resume(ds->ports[i]); + if (ret) + return ret; + } + + return 0; +} +#endif + /* link polling *************************************************************/ static void dsa_link_poll_work(struct work_struct *ugly) @@ -256,7 +329,7 @@ static struct device *dev_find_class(struct device *parent, char *class) return device_find_child(parent, class, dev_is_class); } -static struct mii_bus *dev_to_mii_bus(struct device *dev) +struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev) { struct device *d; @@ -272,6 +345,7 @@ static struct mii_bus *dev_to_mii_bus(struct device *dev) return NULL; } +EXPORT_SYMBOL_GPL(dsa_host_dev_to_mii_bus); static struct net_device *dev_to_net_device(struct device *dev) { @@ -410,7 +484,8 @@ static int dsa_of_probe(struct platform_device *pdev) chip_index++; cd = &pd->chip[chip_index]; - cd->mii_bus = &mdio_bus->dev; + cd->of_node = child; + cd->host_dev = &mdio_bus->dev; sw_addr = of_get_property(child, "reg", NULL); if (!sw_addr) @@ -431,6 +506,8 @@ static int dsa_of_probe(struct platform_device *pdev) if (!port_name) continue; + cd->port_dn[port_index] = port; + cd->port_names[port_index] = kstrdup(port_name, GFP_KERNEL); if (!cd->port_names[port_index]) { @@ -534,17 +611,9 @@ static int dsa_probe(struct platform_device *pdev) dst->cpu_port = -1; for (i = 0; i < pd->nr_chips; i++) { - struct mii_bus *bus; struct dsa_switch *ds; - bus = dev_to_mii_bus(pd->chip[i].mii_bus); - if (bus == NULL) { - printk(KERN_ERR "%s[%d]: no mii bus found for " - "dsa switch\n", dev->name, i); - continue; - } - - ds = dsa_switch_setup(dst, i, &pdev->dev, bus); + ds = dsa_switch_setup(dst, i, &pdev->dev, pd->chip[i].host_dev); if (IS_ERR(ds)) { printk(KERN_ERR "%s[%d]: couldn't create dsa switch " "instance (error %ld)\n", dev->name, i, @@ -608,7 +677,62 @@ static void dsa_shutdown(struct platform_device *pdev) { } +static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *pt, struct net_device *orig_dev) +{ + struct dsa_switch_tree *dst = dev->dsa_ptr; + + if (unlikely(dst == NULL)) { + kfree_skb(skb); + return 0; + } + + return dst->rcv(skb, dev, pt, orig_dev); +} + +static struct packet_type dsa_pack_type __read_mostly = { + .type = cpu_to_be16(ETH_P_XDSA), + .func = dsa_switch_rcv, +}; + +#ifdef CONFIG_PM_SLEEP +static int dsa_suspend(struct device *d) +{ + struct platform_device *pdev = to_platform_device(d); + struct dsa_switch_tree *dst = platform_get_drvdata(pdev); + int i, ret = 0; + + for (i = 0; i < dst->pd->nr_chips; i++) { + struct dsa_switch *ds = dst->ds[i]; + + if (ds != NULL) + ret = dsa_switch_suspend(ds); + } + + return ret; +} + +static int dsa_resume(struct device *d) +{ + struct platform_device *pdev = to_platform_device(d); + struct dsa_switch_tree *dst = platform_get_drvdata(pdev); + int i, ret = 0; + + for (i = 0; i < dst->pd->nr_chips; i++) { + struct dsa_switch *ds = dst->ds[i]; + + if (ds != NULL) + ret = dsa_switch_resume(ds); + } + + return ret; +} +#endif + +static SIMPLE_DEV_PM_OPS(dsa_pm_ops, dsa_suspend, dsa_resume); + static const struct of_device_id dsa_of_match_table[] = { + { .compatible = "brcm,bcm7445-switch-v4.0" }, { .compatible = "marvell,dsa", }, {} }; @@ -622,6 +746,7 @@ static struct platform_driver dsa_driver = { .name = "dsa", .owner = THIS_MODULE, .of_match_table = dsa_of_match_table, + .pm = &dsa_pm_ops, }, }; @@ -633,30 +758,15 @@ static int __init dsa_init_module(void) if (rc) return rc; -#ifdef CONFIG_NET_DSA_TAG_DSA - dev_add_pack(&dsa_packet_type); -#endif -#ifdef CONFIG_NET_DSA_TAG_EDSA - dev_add_pack(&edsa_packet_type); -#endif -#ifdef CONFIG_NET_DSA_TAG_TRAILER - dev_add_pack(&trailer_packet_type); -#endif + dev_add_pack(&dsa_pack_type); + return 0; } module_init(dsa_init_module); static void __exit dsa_cleanup_module(void) { -#ifdef CONFIG_NET_DSA_TAG_TRAILER - dev_remove_pack(&trailer_packet_type); -#endif -#ifdef CONFIG_NET_DSA_TAG_EDSA - dev_remove_pack(&edsa_packet_type); -#endif -#ifdef CONFIG_NET_DSA_TAG_DSA - dev_remove_pack(&dsa_packet_type); -#endif + dev_remove_pack(&dsa_pack_type); platform_driver_unregister(&dsa_driver); } module_exit(dsa_cleanup_module); diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index d4cf5cc747e3569f4d41855d9894b7a4fa98253d..dc9756d3154c7c81a1f8bd2b3124de36f0a97963 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -12,7 +12,13 @@ #define __DSA_PRIV_H #include -#include +#include + +struct dsa_device_ops { + netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *dev); + int (*rcv)(struct sk_buff *skb, struct net_device *dev, + struct packet_type *pt, struct net_device *orig_dev); +}; struct dsa_slave_priv { /* @@ -20,6 +26,8 @@ struct dsa_slave_priv { * switch port. */ struct net_device *dev; + netdev_tx_t (*xmit)(struct sk_buff *skb, + struct net_device *dev); /* * Which switch this port is a part of, and the port index @@ -33,28 +41,35 @@ struct dsa_slave_priv { * to this port. */ struct phy_device *phy; + phy_interface_t phy_interface; + int old_link; + int old_pause; + int old_duplex; }; /* dsa.c */ extern char dsa_driver_version[]; /* slave.c */ +extern const struct dsa_device_ops notag_netdev_ops; void dsa_slave_mii_bus_init(struct dsa_switch *ds); struct net_device *dsa_slave_create(struct dsa_switch *ds, struct device *parent, int port, char *name); +int dsa_slave_suspend(struct net_device *slave_dev); +int dsa_slave_resume(struct net_device *slave_dev); /* tag_dsa.c */ -netdev_tx_t dsa_xmit(struct sk_buff *skb, struct net_device *dev); -extern struct packet_type dsa_packet_type; +extern const struct dsa_device_ops dsa_netdev_ops; /* tag_edsa.c */ -netdev_tx_t edsa_xmit(struct sk_buff *skb, struct net_device *dev); -extern struct packet_type edsa_packet_type; +extern const struct dsa_device_ops edsa_netdev_ops; /* tag_trailer.c */ -netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev); -extern struct packet_type trailer_packet_type; +extern const struct dsa_device_ops trailer_netdev_ops; + +/* tag_brcm.c */ +extern const struct dsa_device_ops brcm_netdev_ops; #endif diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 45a1e34c89e0d975dd9f361a73a5617d69a10301..8030489d9cbe84f73cfd5ebbae82f60383f707b1 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -9,9 +9,10 @@ */ #include -#include #include #include +#include +#include #include "dsa_priv.h" /* slave mii_bus handling ***************************************************/ @@ -19,7 +20,7 @@ static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg) { struct dsa_switch *ds = bus->priv; - if (ds->phys_port_mask & (1 << addr)) + if (ds->phys_mii_mask & (1 << addr)) return ds->drv->phy_read(ds, addr, reg); return 0xffff; @@ -29,7 +30,7 @@ static int dsa_slave_phy_write(struct mii_bus *bus, int addr, int reg, u16 val) { struct dsa_switch *ds = bus->priv; - if (ds->phys_port_mask & (1 << addr)) + if (ds->phys_mii_mask & (1 << addr)) return ds->drv->phy_write(ds, addr, reg, val); return 0; @@ -43,7 +44,7 @@ void dsa_slave_mii_bus_init(struct dsa_switch *ds) ds->slave_mii_bus->write = dsa_slave_phy_write; snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d:%.2x", ds->index, ds->pd->sw_addr); - ds->slave_mii_bus->parent = &ds->master_mii_bus->dev; + ds->slave_mii_bus->parent = ds->master_dev; } @@ -61,6 +62,7 @@ static int dsa_slave_open(struct net_device *dev) { struct dsa_slave_priv *p = netdev_priv(dev); struct net_device *master = p->parent->dst->master_netdev; + struct dsa_switch *ds = p->parent; int err; if (!(master->flags & IFF_UP)) @@ -83,8 +85,20 @@ static int dsa_slave_open(struct net_device *dev) goto clear_allmulti; } + if (ds->drv->port_enable) { + err = ds->drv->port_enable(ds, p->port, p->phy); + if (err) + goto clear_promisc; + } + + if (p->phy) + phy_start(p->phy); + return 0; +clear_promisc: + if (dev->flags & IFF_PROMISC) + dev_set_promiscuity(master, 0); clear_allmulti: if (dev->flags & IFF_ALLMULTI) dev_set_allmulti(master, -1); @@ -99,6 +113,10 @@ static int dsa_slave_close(struct net_device *dev) { struct dsa_slave_priv *p = netdev_priv(dev); struct net_device *master = p->parent->dst->master_netdev; + struct dsa_switch *ds = p->parent; + + if (p->phy) + phy_stop(p->phy); dev_mc_unsync(master, dev); dev_uc_unsync(master, dev); @@ -110,6 +128,9 @@ static int dsa_slave_close(struct net_device *dev) if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) dev_uc_del(master, dev->dev_addr); + if (ds->drv->port_disable) + ds->drv->port_disable(ds, p->port, p->phy); + return 0; } @@ -171,6 +192,24 @@ static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return -EOPNOTSUPP; } +static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct dsa_slave_priv *p = netdev_priv(dev); + + return p->xmit(skb, dev); +} + +static netdev_tx_t dsa_slave_notag_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct dsa_slave_priv *p = netdev_priv(dev); + + skb->dev = p->parent->dst->master_netdev; + dev_queue_xmit(skb); + + return NETDEV_TX_OK; +} + /* ethtool operations *******************************************************/ static int @@ -282,6 +321,65 @@ static int dsa_slave_get_sset_count(struct net_device *dev, int sset) return -EOPNOTSUPP; } +static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w) +{ + struct dsa_slave_priv *p = netdev_priv(dev); + struct dsa_switch *ds = p->parent; + + if (ds->drv->get_wol) + ds->drv->get_wol(ds, p->port, w); +} + +static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w) +{ + struct dsa_slave_priv *p = netdev_priv(dev); + struct dsa_switch *ds = p->parent; + int ret = -EOPNOTSUPP; + + if (ds->drv->set_wol) + ret = ds->drv->set_wol(ds, p->port, w); + + return ret; +} + +static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e) +{ + struct dsa_slave_priv *p = netdev_priv(dev); + struct dsa_switch *ds = p->parent; + int ret; + + if (!ds->drv->set_eee) + return -EOPNOTSUPP; + + ret = ds->drv->set_eee(ds, p->port, p->phy, e); + if (ret) + return ret; + + if (p->phy) + ret = phy_ethtool_set_eee(p->phy, e); + + return ret; +} + +static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e) +{ + struct dsa_slave_priv *p = netdev_priv(dev); + struct dsa_switch *ds = p->parent; + int ret; + + if (!ds->drv->get_eee) + return -EOPNOTSUPP; + + ret = ds->drv->get_eee(ds, p->port, e); + if (ret) + return ret; + + if (p->phy) + ret = phy_ethtool_get_eee(p->phy, e); + + return ret; +} + static const struct ethtool_ops dsa_slave_ethtool_ops = { .get_settings = dsa_slave_get_settings, .set_settings = dsa_slave_set_settings, @@ -291,46 +389,143 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = { .get_strings = dsa_slave_get_strings, .get_ethtool_stats = dsa_slave_get_ethtool_stats, .get_sset_count = dsa_slave_get_sset_count, + .set_wol = dsa_slave_set_wol, + .get_wol = dsa_slave_get_wol, + .set_eee = dsa_slave_set_eee, + .get_eee = dsa_slave_get_eee, }; -#ifdef CONFIG_NET_DSA_TAG_DSA -static const struct net_device_ops dsa_netdev_ops = { +static const struct net_device_ops dsa_slave_netdev_ops = { .ndo_init = dsa_slave_init, .ndo_open = dsa_slave_open, .ndo_stop = dsa_slave_close, - .ndo_start_xmit = dsa_xmit, + .ndo_start_xmit = dsa_slave_xmit, .ndo_change_rx_flags = dsa_slave_change_rx_flags, .ndo_set_rx_mode = dsa_slave_set_rx_mode, .ndo_set_mac_address = dsa_slave_set_mac_address, .ndo_do_ioctl = dsa_slave_ioctl, }; -#endif -#ifdef CONFIG_NET_DSA_TAG_EDSA -static const struct net_device_ops edsa_netdev_ops = { - .ndo_init = dsa_slave_init, - .ndo_open = dsa_slave_open, - .ndo_stop = dsa_slave_close, - .ndo_start_xmit = edsa_xmit, - .ndo_change_rx_flags = dsa_slave_change_rx_flags, - .ndo_set_rx_mode = dsa_slave_set_rx_mode, - .ndo_set_mac_address = dsa_slave_set_mac_address, - .ndo_do_ioctl = dsa_slave_ioctl, -}; -#endif -#ifdef CONFIG_NET_DSA_TAG_TRAILER -static const struct net_device_ops trailer_netdev_ops = { - .ndo_init = dsa_slave_init, - .ndo_open = dsa_slave_open, - .ndo_stop = dsa_slave_close, - .ndo_start_xmit = trailer_xmit, - .ndo_change_rx_flags = dsa_slave_change_rx_flags, - .ndo_set_rx_mode = dsa_slave_set_rx_mode, - .ndo_set_mac_address = dsa_slave_set_mac_address, - .ndo_do_ioctl = dsa_slave_ioctl, -}; -#endif + +static void dsa_slave_adjust_link(struct net_device *dev) +{ + struct dsa_slave_priv *p = netdev_priv(dev); + struct dsa_switch *ds = p->parent; + unsigned int status_changed = 0; + + if (p->old_link != p->phy->link) { + status_changed = 1; + p->old_link = p->phy->link; + } + + if (p->old_duplex != p->phy->duplex) { + status_changed = 1; + p->old_duplex = p->phy->duplex; + } + + if (p->old_pause != p->phy->pause) { + status_changed = 1; + p->old_pause = p->phy->pause; + } + + if (ds->drv->adjust_link && status_changed) + ds->drv->adjust_link(ds, p->port, p->phy); + + if (status_changed) + phy_print_status(p->phy); +} + +static int dsa_slave_fixed_link_update(struct net_device *dev, + struct fixed_phy_status *status) +{ + struct dsa_slave_priv *p = netdev_priv(dev); + struct dsa_switch *ds = p->parent; + + if (ds->drv->fixed_link_update) + ds->drv->fixed_link_update(ds, p->port, status); + + return 0; +} /* slave device setup *******************************************************/ +static void dsa_slave_phy_setup(struct dsa_slave_priv *p, + struct net_device *slave_dev) +{ + struct dsa_switch *ds = p->parent; + struct dsa_chip_data *cd = ds->pd; + struct device_node *phy_dn, *port_dn; + bool phy_is_fixed = false; + u32 phy_flags = 0; + int ret; + + port_dn = cd->port_dn[p->port]; + p->phy_interface = of_get_phy_mode(port_dn); + + phy_dn = of_parse_phandle(port_dn, "phy-handle", 0); + if (of_phy_is_fixed_link(port_dn)) { + /* In the case of a fixed PHY, the DT node associated + * to the fixed PHY is the Port DT node + */ + ret = of_phy_register_fixed_link(port_dn); + if (ret) { + pr_err("failed to register fixed PHY\n"); + return; + } + phy_is_fixed = true; + phy_dn = port_dn; + } + + if (ds->drv->get_phy_flags) + phy_flags = ds->drv->get_phy_flags(ds, p->port); + + if (phy_dn) + p->phy = of_phy_connect(slave_dev, phy_dn, + dsa_slave_adjust_link, phy_flags, + p->phy_interface); + + if (p->phy && phy_is_fixed) + fixed_phy_set_link_update(p->phy, dsa_slave_fixed_link_update); + + /* We could not connect to a designated PHY, so use the switch internal + * MDIO bus instead + */ + if (!p->phy) + p->phy = ds->slave_mii_bus->phy_map[p->port]; + else + pr_info("attached PHY at address %d [%s]\n", + p->phy->addr, p->phy->drv->name); +} + +int dsa_slave_suspend(struct net_device *slave_dev) +{ + struct dsa_slave_priv *p = netdev_priv(slave_dev); + + netif_device_detach(slave_dev); + + if (p->phy) { + phy_stop(p->phy); + p->old_pause = -1; + p->old_link = -1; + p->old_duplex = -1; + phy_suspend(p->phy); + } + + return 0; +} + +int dsa_slave_resume(struct net_device *slave_dev) +{ + struct dsa_slave_priv *p = netdev_priv(slave_dev); + + netif_device_attach(slave_dev); + + if (p->phy) { + phy_resume(p->phy); + phy_start(p->phy); + } + + return 0; +} + struct net_device * dsa_slave_create(struct dsa_switch *ds, struct device *parent, int port, char *name) @@ -349,35 +544,48 @@ dsa_slave_create(struct dsa_switch *ds, struct device *parent, slave_dev->ethtool_ops = &dsa_slave_ethtool_ops; eth_hw_addr_inherit(slave_dev, master); slave_dev->tx_queue_len = 0; + slave_dev->netdev_ops = &dsa_slave_netdev_ops; + + SET_NETDEV_DEV(slave_dev, parent); + slave_dev->dev.of_node = ds->pd->port_dn[port]; + slave_dev->vlan_features = master->vlan_features; + + p = netdev_priv(slave_dev); + p->dev = slave_dev; + p->parent = ds; + p->port = port; switch (ds->dst->tag_protocol) { #ifdef CONFIG_NET_DSA_TAG_DSA - case htons(ETH_P_DSA): - slave_dev->netdev_ops = &dsa_netdev_ops; + case DSA_TAG_PROTO_DSA: + p->xmit = dsa_netdev_ops.xmit; break; #endif #ifdef CONFIG_NET_DSA_TAG_EDSA - case htons(ETH_P_EDSA): - slave_dev->netdev_ops = &edsa_netdev_ops; + case DSA_TAG_PROTO_EDSA: + p->xmit = edsa_netdev_ops.xmit; break; #endif #ifdef CONFIG_NET_DSA_TAG_TRAILER - case htons(ETH_P_TRAILER): - slave_dev->netdev_ops = &trailer_netdev_ops; + case DSA_TAG_PROTO_TRAILER: + p->xmit = trailer_netdev_ops.xmit; + break; +#endif +#ifdef CONFIG_NET_DSA_TAG_BRCM + case DSA_TAG_PROTO_BRCM: + p->xmit = brcm_netdev_ops.xmit; break; #endif default: - BUG(); + p->xmit = dsa_slave_notag_xmit; + break; } - SET_NETDEV_DEV(slave_dev, parent); - slave_dev->vlan_features = master->vlan_features; + p->old_pause = -1; + p->old_link = -1; + p->old_duplex = -1; - p = netdev_priv(slave_dev); - p->dev = slave_dev; - p->parent = ds; - p->port = port; - p->phy = ds->slave_mii_bus->phy_map[port]; + dsa_slave_phy_setup(p, slave_dev); ret = register_netdev(slave_dev); if (ret) { @@ -390,6 +598,9 @@ dsa_slave_create(struct dsa_switch *ds, struct device *parent, netif_carrier_off(slave_dev); if (p->phy != NULL) { + if (ds->drv->get_phy_flags(ds, port)) + p->phy->dev_flags |= ds->drv->get_phy_flags(ds, port); + phy_attach(slave_dev, dev_name(&p->phy->dev), PHY_INTERFACE_MODE_GMII); @@ -397,7 +608,6 @@ dsa_slave_create(struct dsa_switch *ds, struct device *parent, p->phy->speed = 0; p->phy->duplex = 0; p->phy->advertising = p->phy->supported | ADVERTISED_Autoneg; - phy_start_aneg(p->phy); } return slave_dev; diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c new file mode 100644 index 0000000000000000000000000000000000000000..83d3572cdb205934e3099c258244f347d7351517 --- /dev/null +++ b/net/dsa/tag_brcm.c @@ -0,0 +1,171 @@ +/* + * Broadcom tag support + * + * Copyright (C) 2014 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include "dsa_priv.h" + +/* This tag length is 4 bytes, older ones were 6 bytes, we do not + * handle them + */ +#define BRCM_TAG_LEN 4 + +/* Tag is constructed and desconstructed using byte by byte access + * because the tag is placed after the MAC Source Address, which does + * not make it 4-bytes aligned, so this might cause unaligned accesses + * on most systems where this is used. + */ + +/* Ingress and egress opcodes */ +#define BRCM_OPCODE_SHIFT 5 +#define BRCM_OPCODE_MASK 0x7 + +/* Ingress fields */ +/* 1st byte in the tag */ +#define BRCM_IG_TC_SHIFT 2 +#define BRCM_IG_TC_MASK 0x7 +/* 2nd byte in the tag */ +#define BRCM_IG_TE_MASK 0x3 +#define BRCM_IG_TS_SHIFT 7 +/* 3rd byte in the tag */ +#define BRCM_IG_DSTMAP2_MASK 1 +#define BRCM_IG_DSTMAP1_MASK 0xff + +/* Egress fields */ + +/* 2nd byte in the tag */ +#define BRCM_EG_CID_MASK 0xff + +/* 3rd byte in the tag */ +#define BRCM_EG_RC_MASK 0xff +#define BRCM_EG_RC_RSVD (3 << 6) +#define BRCM_EG_RC_EXCEPTION (1 << 5) +#define BRCM_EG_RC_PROT_SNOOP (1 << 4) +#define BRCM_EG_RC_PROT_TERM (1 << 3) +#define BRCM_EG_RC_SWITCH (1 << 2) +#define BRCM_EG_RC_MAC_LEARN (1 << 1) +#define BRCM_EG_RC_MIRROR (1 << 0) +#define BRCM_EG_TC_SHIFT 5 +#define BRCM_EG_TC_MASK 0x7 +#define BRCM_EG_PID_MASK 0x1f + +static netdev_tx_t brcm_tag_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct dsa_slave_priv *p = netdev_priv(dev); + u8 *brcm_tag; + + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + + if (skb_cow_head(skb, BRCM_TAG_LEN) < 0) + goto out_free; + + skb_push(skb, BRCM_TAG_LEN); + + memmove(skb->data, skb->data + BRCM_TAG_LEN, 2 * ETH_ALEN); + + /* Build the tag after the MAC Source Address */ + brcm_tag = skb->data + 2 * ETH_ALEN; + + /* Set the ingress opcode, traffic class, tag enforcment is + * deprecated + */ + brcm_tag[0] = (1 << BRCM_OPCODE_SHIFT) | + ((skb->priority << BRCM_IG_TC_SHIFT) & BRCM_IG_TC_MASK); + brcm_tag[1] = 0; + brcm_tag[2] = 0; + if (p->port == 8) + brcm_tag[2] = BRCM_IG_DSTMAP2_MASK; + brcm_tag[3] = (1 << p->port) & BRCM_IG_DSTMAP1_MASK; + + /* Queue the SKB for transmission on the parent interface, but + * do not modify its EtherType + */ + skb->dev = p->parent->dst->master_netdev; + dev_queue_xmit(skb); + + return NETDEV_TX_OK; + +out_free: + kfree_skb(skb); + return NETDEV_TX_OK; +} + +static int brcm_tag_rcv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *pt, struct net_device *orig_dev) +{ + struct dsa_switch_tree *dst = dev->dsa_ptr; + struct dsa_switch *ds; + int source_port; + u8 *brcm_tag; + + if (unlikely(dst == NULL)) + goto out_drop; + + ds = dst->ds[0]; + + skb = skb_unshare(skb, GFP_ATOMIC); + if (skb == NULL) + goto out; + + if (unlikely(!pskb_may_pull(skb, BRCM_TAG_LEN))) + goto out_drop; + + /* skb->data points to the EtherType, the tag is right before it */ + brcm_tag = skb->data - 2; + + /* The opcode should never be different than 0b000 */ + if (unlikely((brcm_tag[0] >> BRCM_OPCODE_SHIFT) & BRCM_OPCODE_MASK)) + goto out_drop; + + /* We should never see a reserved reason code without knowing how to + * handle it + */ + WARN_ON(brcm_tag[2] & BRCM_EG_RC_RSVD); + + /* Locate which port this is coming from */ + source_port = brcm_tag[3] & BRCM_EG_PID_MASK; + + /* Validate port against switch setup, either the port is totally */ + if (source_port >= DSA_MAX_PORTS || ds->ports[source_port] == NULL) + goto out_drop; + + /* Remove Broadcom tag and update checksum */ + skb_pull_rcsum(skb, BRCM_TAG_LEN); + + /* Move the Ethernet DA and SA */ + memmove(skb->data - ETH_HLEN, + skb->data - ETH_HLEN - BRCM_TAG_LEN, + 2 * ETH_ALEN); + + skb_push(skb, ETH_HLEN); + skb->pkt_type = PACKET_HOST; + skb->dev = ds->ports[source_port]; + skb->protocol = eth_type_trans(skb, skb->dev); + + skb->dev->stats.rx_packets++; + skb->dev->stats.rx_bytes += skb->len; + + netif_receive_skb(skb); + + return 0; + +out_drop: + kfree_skb(skb); +out: + return 0; +} + +const struct dsa_device_ops brcm_netdev_ops = { + .xmit = brcm_tag_xmit, + .rcv = brcm_tag_rcv, +}; diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c index cacce1e22f9caa029c2374cd93c2e071932f365c..ce90c8bdc65864576164f5b431e1d1ea15c573f4 100644 --- a/net/dsa/tag_dsa.c +++ b/net/dsa/tag_dsa.c @@ -10,13 +10,12 @@ #include #include -#include #include #include "dsa_priv.h" #define DSA_HLEN 4 -netdev_tx_t dsa_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t dsa_xmit(struct sk_buff *skb, struct net_device *dev) { struct dsa_slave_priv *p = netdev_priv(dev); u8 *dsa_header; @@ -186,7 +185,7 @@ static int dsa_rcv(struct sk_buff *skb, struct net_device *dev, return 0; } -struct packet_type dsa_packet_type __read_mostly = { - .type = cpu_to_be16(ETH_P_DSA), - .func = dsa_rcv, +const struct dsa_device_ops dsa_netdev_ops = { + .xmit = dsa_xmit, + .rcv = dsa_rcv, }; diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c index e70c43c25e64c9310d3d5a61b407d8eeb76402d2..94fcce7786796a8d534dd188ce8a6562e0543c9c 100644 --- a/net/dsa/tag_edsa.c +++ b/net/dsa/tag_edsa.c @@ -10,14 +10,13 @@ #include #include -#include #include #include "dsa_priv.h" #define DSA_HLEN 4 #define EDSA_HLEN 8 -netdev_tx_t edsa_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t edsa_xmit(struct sk_buff *skb, struct net_device *dev) { struct dsa_slave_priv *p = netdev_priv(dev); u8 *edsa_header; @@ -205,7 +204,7 @@ static int edsa_rcv(struct sk_buff *skb, struct net_device *dev, return 0; } -struct packet_type edsa_packet_type __read_mostly = { - .type = cpu_to_be16(ETH_P_EDSA), - .func = edsa_rcv, +const struct dsa_device_ops edsa_netdev_ops = { + .xmit = edsa_xmit, + .rcv = edsa_rcv, }; diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c index 94bc260d015d11f1a321ca3f3876c7b901716302..115fdca34077d420290941e6a264df42255f4136 100644 --- a/net/dsa/tag_trailer.c +++ b/net/dsa/tag_trailer.c @@ -10,11 +10,10 @@ #include #include -#include #include #include "dsa_priv.h" -netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev) { struct dsa_slave_priv *p = netdev_priv(dev); struct sk_buff *nskb; @@ -114,7 +113,7 @@ static int trailer_rcv(struct sk_buff *skb, struct net_device *dev, return 0; } -struct packet_type trailer_packet_type __read_mostly = { - .type = cpu_to_be16(ETH_P_TRAILER), - .func = trailer_rcv, +const struct dsa_device_ops trailer_netdev_ops = { + .xmit = trailer_xmit, + .rcv = trailer_rcv, }; diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index f405e05924078b2d30f45139cd698843a16256da..33a140e1583449bcf2816bb91792e810f7c1734f 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c @@ -145,6 +145,33 @@ int eth_rebuild_header(struct sk_buff *skb) } EXPORT_SYMBOL(eth_rebuild_header); +/** + * eth_get_headlen - determine the the length of header for an ethernet frame + * @data: pointer to start of frame + * @len: total length of frame + * + * Make a best effort attempt to pull the length for all of the headers for + * a given frame in a linear buffer. + */ +u32 eth_get_headlen(void *data, unsigned int len) +{ + const struct ethhdr *eth = (const struct ethhdr *)data; + struct flow_keys keys; + + /* this should never happen, but better safe than sorry */ + if (len < sizeof(*eth)) + return len; + + /* parse any remaining L2/L3 headers, check for L4 */ + if (!__skb_flow_dissect(NULL, &keys, data, + eth->h_proto, sizeof(*eth), len)) + return max_t(u32, keys.thoff, sizeof(*eth)); + + /* parse for any L4 headers */ + return min_t(u32, __skb_get_poff(NULL, data, &keys, len), len); +} +EXPORT_SYMBOL(eth_get_headlen); + /** * eth_type_trans - determine the packet's protocol ID. * @skb: received socket data @@ -181,11 +208,8 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev) * variants has been configured on the receiving interface, * and if so, set skb->protocol without looking at the packet. */ - if (unlikely(netdev_uses_dsa_tags(dev))) - return htons(ETH_P_DSA); - - if (unlikely(netdev_uses_trailer_tags(dev))) - return htons(ETH_P_TRAILER); + if (unlikely(netdev_uses_dsa(dev))) + return htons(ETH_P_XDSA); if (likely(ntohs(eth->h_proto) >= ETH_P_802_3_MIN)) return eth->h_proto; diff --git a/net/ieee802154/6lowpan_rtnl.c b/net/ieee802154/6lowpan_rtnl.c index 6591d27e53a43413656a18a6eedb25d7bb5fe034..44136297b673a2b2645f481fb723222fa97b951a 100644 --- a/net/ieee802154/6lowpan_rtnl.c +++ b/net/ieee802154/6lowpan_rtnl.c @@ -71,18 +71,33 @@ struct lowpan_dev_record { struct list_head list; }; +/* don't save pan id, it's intra pan */ +struct lowpan_addr { + u8 mode; + union { + /* IPv6 needs big endian here */ + __be64 extended_addr; + __be16 short_addr; + } u; +}; + +struct lowpan_addr_info { + struct lowpan_addr daddr; + struct lowpan_addr saddr; +}; + static inline struct lowpan_dev_info *lowpan_dev_info(const struct net_device *dev) { return netdev_priv(dev); } -static inline void lowpan_address_flip(u8 *src, u8 *dest) +static inline struct +lowpan_addr_info *lowpan_skb_priv(const struct sk_buff *skb) { - int i; - - for (i = 0; i < IEEE802154_ADDR_LEN; i++) - (dest)[IEEE802154_ADDR_LEN - i - 1] = (src)[i]; + WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct lowpan_addr_info)); + return (struct lowpan_addr_info *)(skb->data - + sizeof(struct lowpan_addr_info)); } static int lowpan_header_create(struct sk_buff *skb, struct net_device *dev, @@ -91,8 +106,7 @@ static int lowpan_header_create(struct sk_buff *skb, struct net_device *dev, { const u8 *saddr = _saddr; const u8 *daddr = _daddr; - struct ieee802154_addr sa, da; - struct ieee802154_mac_cb *cb = mac_cb_init(skb); + struct lowpan_addr_info *info; /* TODO: * if this package isn't ipv6 one, where should it be routed? @@ -106,41 +120,17 @@ static int lowpan_header_create(struct sk_buff *skb, struct net_device *dev, raw_dump_inline(__func__, "saddr", (unsigned char *)saddr, 8); raw_dump_inline(__func__, "daddr", (unsigned char *)daddr, 8); - lowpan_header_compress(skb, dev, type, daddr, saddr, len); - - /* NOTE1: I'm still unsure about the fact that compression and WPAN - * header are created here and not later in the xmit. So wait for - * an opinion of net maintainers. - */ - /* NOTE2: to be absolutely correct, we must derive PANid information - * from MAC subif of the 'dev' and 'real_dev' network devices, but - * this isn't implemented in mainline yet, so currently we assign 0xff - */ - cb->type = IEEE802154_FC_TYPE_DATA; + info = lowpan_skb_priv(skb); - /* prepare wpan address data */ - sa.mode = IEEE802154_ADDR_LONG; - sa.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev); - sa.extended_addr = ieee802154_devaddr_from_raw(saddr); + /* TODO: Currently we only support extended_addr */ + info->daddr.mode = IEEE802154_ADDR_LONG; + memcpy(&info->daddr.u.extended_addr, daddr, + sizeof(info->daddr.u.extended_addr)); + info->saddr.mode = IEEE802154_ADDR_LONG; + memcpy(&info->saddr.u.extended_addr, saddr, + sizeof(info->daddr.u.extended_addr)); - /* intra-PAN communications */ - da.pan_id = sa.pan_id; - - /* if the destination address is the broadcast address, use the - * corresponding short address - */ - if (lowpan_is_addr_broadcast(daddr)) { - da.mode = IEEE802154_ADDR_SHORT; - da.short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST); - } else { - da.mode = IEEE802154_ADDR_LONG; - da.extended_addr = ieee802154_devaddr_from_raw(daddr); - } - - cb->ackreq = !lowpan_is_addr_broadcast(daddr); - - return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev, - type, (void *)&da, (void *)&sa, 0); + return 0; } static int lowpan_give_skb_to_devices(struct sk_buff *skb, @@ -338,13 +328,68 @@ lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *dev, return rc; } +static int lowpan_header(struct sk_buff *skb, struct net_device *dev) +{ + struct ieee802154_addr sa, da; + struct ieee802154_mac_cb *cb = mac_cb_init(skb); + struct lowpan_addr_info info; + void *daddr, *saddr; + + memcpy(&info, lowpan_skb_priv(skb), sizeof(info)); + + /* TODO: Currently we only support extended_addr */ + daddr = &info.daddr.u.extended_addr; + saddr = &info.saddr.u.extended_addr; + + lowpan_header_compress(skb, dev, ETH_P_IPV6, daddr, saddr, skb->len); + + cb->type = IEEE802154_FC_TYPE_DATA; + + /* prepare wpan address data */ + sa.mode = IEEE802154_ADDR_LONG; + sa.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev); + sa.extended_addr = ieee802154_devaddr_from_raw(saddr); + + /* intra-PAN communications */ + da.pan_id = sa.pan_id; + + /* if the destination address is the broadcast address, use the + * corresponding short address + */ + if (lowpan_is_addr_broadcast((const u8 *)daddr)) { + da.mode = IEEE802154_ADDR_SHORT; + da.short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST); + cb->ackreq = false; + } else { + da.mode = IEEE802154_ADDR_LONG; + da.extended_addr = ieee802154_devaddr_from_raw(daddr); + cb->ackreq = true; + } + + return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev, + ETH_P_IPV6, (void *)&da, (void *)&sa, 0); +} + static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev) { struct ieee802154_hdr wpan_hdr; - int max_single; + int max_single, ret; pr_debug("package xmit\n"); + /* We must take a copy of the skb before we modify/replace the ipv6 + * header as the header could be used elsewhere + */ + skb = skb_unshare(skb, GFP_ATOMIC); + if (!skb) + return NET_XMIT_DROP; + + ret = lowpan_header(skb, dev); + if (ret < 0) { + kfree_skb(skb); + return NET_XMIT_DROP; + } + if (ieee802154_hdr_peek(skb, &wpan_hdr) < 0) { kfree_skb(skb); return NET_XMIT_DROP; diff --git a/net/ieee802154/reassembly.c b/net/ieee802154/reassembly.c index 32755cb7e64e3c02f77c85bdad6982d31311bcc5..7cfcd6885225b57cb94e1ac51a25baeb76d83380 100644 --- a/net/ieee802154/reassembly.c +++ b/net/ieee802154/reassembly.c @@ -485,7 +485,7 @@ static void __net_exit lowpan_frags_ns_sysctl_unregister(struct net *net) static struct ctl_table_header *lowpan_ctl_header; -static int lowpan_frags_sysctl_register(void) +static int __init lowpan_frags_sysctl_register(void) { lowpan_ctl_header = register_net_sysctl(&init_net, "net/ieee802154/6lowpan", @@ -507,7 +507,7 @@ static inline void lowpan_frags_ns_sysctl_unregister(struct net *net) { } -static inline int lowpan_frags_sysctl_register(void) +static inline int __init lowpan_frags_sysctl_register(void) { return 0; } diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index dbc10d84161fc59047ee01feeb9f860227c3754a..e682b48e070993e4221a4400a8c86c3fa0c02684 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -309,8 +309,33 @@ config NET_IPVTI config NET_UDP_TUNNEL tristate + select NET_IP_TUNNEL default n +config NET_FOU + tristate "IP: Foo (IP protocols) over UDP" + select XFRM + select NET_UDP_TUNNEL + ---help--- + Foo over UDP allows any IP protocol to be directly encapsulated + over UDP include tunnels (IPIP, GRE, SIT). By encapsulating in UDP + network mechanisms and optimizations for UDP (such as ECMP + and RSS) can be leveraged to provide better service. + +config GENEVE + tristate "Generic Network Virtualization Encapsulation (Geneve)" + depends on INET + select NET_UDP_TUNNEL + ---help--- + This allows one to create Geneve virtual interfaces that provide + Layer 2 Networks over Layer 3 Networks. Geneve is often used + to tunnel virtual network infrastructure in virtualized environments. + For more information see: + http://tools.ietf.org/html/draft-gross-geneve-01 + + To compile this driver as a module, choose M here: the module + + config INET_AH tristate "IP: AH transformation" select XFRM_ALGO @@ -560,6 +585,27 @@ config TCP_CONG_ILLINOIS For further details see: http://www.ews.uiuc.edu/~shaoliu/tcpillinois/index.html +config TCP_CONG_DCTCP + tristate "DataCenter TCP (DCTCP)" + default n + ---help--- + DCTCP leverages Explicit Congestion Notification (ECN) in the network to + provide multi-bit feedback to the end hosts. It is designed to provide: + + - High burst tolerance (incast due to partition/aggregate), + - Low latency (short flows, queries), + - High throughput (continuous data updates, large file transfers) with + commodity, shallow-buffered switches. + + All switches in the data center network running DCTCP must support + ECN marking and be configured for marking when reaching defined switch + buffer thresholds. The default ECN marking threshold heuristic for + DCTCP on switches is 20 packets (30KB) at 1Gbps, and 65 packets + (~100KB) at 10Gbps, but might need further careful tweaking. + + For further details see: + http://simula.stanford.edu/~alizade/Site/DCTCP_files/dctcp-final.pdf + choice prompt "Default TCP congestion control" default DEFAULT_CUBIC @@ -588,9 +634,11 @@ choice config DEFAULT_WESTWOOD bool "Westwood" if TCP_CONG_WESTWOOD=y + config DEFAULT_DCTCP + bool "DCTCP" if TCP_CONG_DCTCP=y + config DEFAULT_RENO bool "Reno" - endchoice endif @@ -610,6 +658,7 @@ config DEFAULT_TCP_CONG default "westwood" if DEFAULT_WESTWOOD default "veno" if DEFAULT_VENO default "reno" if DEFAULT_RENO + default "dctcp" if DEFAULT_DCTCP default "cubic" config TCP_MD5SIG diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile index 8ee1cd4053ee742d723e1cca7c3cdee7c3527b5f..518c04ed666eee00f436872b97b43d224c31e111 100644 --- a/net/ipv4/Makefile +++ b/net/ipv4/Makefile @@ -20,6 +20,7 @@ obj-$(CONFIG_IP_MULTIPLE_TABLES) += fib_rules.o obj-$(CONFIG_IP_MROUTE) += ipmr.o obj-$(CONFIG_NET_IPIP) += ipip.o gre-y := gre_demux.o +obj-$(CONFIG_NET_FOU) += fou.o obj-$(CONFIG_NET_IPGRE_DEMUX) += gre.o obj-$(CONFIG_NET_IPGRE) += ip_gre.o obj-$(CONFIG_NET_UDP_TUNNEL) += udp_tunnel.o @@ -42,6 +43,7 @@ obj-$(CONFIG_INET_UDP_DIAG) += udp_diag.o obj-$(CONFIG_NET_TCPPROBE) += tcp_probe.o obj-$(CONFIG_TCP_CONG_BIC) += tcp_bic.o obj-$(CONFIG_TCP_CONG_CUBIC) += tcp_cubic.o +obj-$(CONFIG_TCP_CONG_DCTCP) += tcp_dctcp.o obj-$(CONFIG_TCP_CONG_WESTWOOD) += tcp_westwood.o obj-$(CONFIG_TCP_CONG_HSTCP) += tcp_highspeed.o obj-$(CONFIG_TCP_CONG_HYBLA) += tcp_hybla.o @@ -54,6 +56,7 @@ obj-$(CONFIG_TCP_CONG_YEAH) += tcp_yeah.o obj-$(CONFIG_TCP_CONG_ILLINOIS) += tcp_illinois.o obj-$(CONFIG_MEMCG_KMEM) += tcp_memcontrol.o obj-$(CONFIG_NETLABEL) += cipso_ipv4.o +obj-$(CONFIG_GENEVE) += geneve.o obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \ xfrm4_output.o xfrm4_protocol.o diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index d156b3c5f3631f169f179bfdd534b69ce5070b82..92db7a69f2b9d106d7d235388719d3a476eb91d5 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -418,10 +418,6 @@ int inet_release(struct socket *sock) } EXPORT_SYMBOL(inet_release); -/* It is off by default, see below. */ -int sysctl_ip_nonlocal_bind __read_mostly; -EXPORT_SYMBOL(sysctl_ip_nonlocal_bind); - int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sockaddr_in *addr = (struct sockaddr_in *)uaddr; @@ -461,7 +457,7 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) * is temporarily down) */ err = -EADDRNOTAVAIL; - if (!sysctl_ip_nonlocal_bind && + if (!net->ipv4.sysctl_ip_nonlocal_bind && !(inet->freebind || inet->transparent) && addr->sin_addr.s_addr != htonl(INADDR_ANY) && chk_addr_ret != RTN_LOCAL && @@ -1201,40 +1197,6 @@ int inet_sk_rebuild_header(struct sock *sk) } EXPORT_SYMBOL(inet_sk_rebuild_header); -static int inet_gso_send_check(struct sk_buff *skb) -{ - const struct net_offload *ops; - const struct iphdr *iph; - int proto; - int ihl; - int err = -EINVAL; - - if (unlikely(!pskb_may_pull(skb, sizeof(*iph)))) - goto out; - - iph = ip_hdr(skb); - ihl = iph->ihl * 4; - if (ihl < sizeof(*iph)) - goto out; - - proto = iph->protocol; - - /* Warning: after this point, iph might be no longer valid */ - if (unlikely(!pskb_may_pull(skb, ihl))) - goto out; - __skb_pull(skb, ihl); - - skb_reset_transport_header(skb); - err = -EPROTONOSUPPORT; - - ops = rcu_dereference(inet_offloads[proto]); - if (likely(ops && ops->callbacks.gso_send_check)) - err = ops->callbacks.gso_send_check(skb); - -out: - return err; -} - static struct sk_buff *inet_gso_segment(struct sk_buff *skb, netdev_features_t features) { @@ -1407,6 +1369,9 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head, * immediately following this IP hdr. */ + /* Note : No need to call skb_gro_postpull_rcsum() here, + * as we already checked checksum over ipv4 header was 0 + */ skb_gro_pull(skb, sizeof(*iph)); skb_set_transport_header(skb, skb_gro_offset(skb)); @@ -1659,7 +1624,6 @@ static int ipv4_proc_init(void); static struct packet_offload ip_packet_offload __read_mostly = { .type = cpu_to_be16(ETH_P_IP), .callbacks = { - .gso_send_check = inet_gso_send_check, .gso_segment = inet_gso_segment, .gro_receive = inet_gro_receive, .gro_complete = inet_gro_complete, @@ -1668,8 +1632,9 @@ static struct packet_offload ip_packet_offload __read_mostly = { static const struct net_offload ipip_offload = { .callbacks = { - .gso_send_check = inet_gso_send_check, .gso_segment = inet_gso_segment, + .gro_receive = inet_gro_receive, + .gro_complete = inet_gro_complete, }, }; diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c index a2afa89513a06d43ecdac6033dfcf89c16ea8ffc..ac9a32ec3ee465a3ad59798dae19d3a307e77cd4 100644 --- a/net/ipv4/ah4.c +++ b/net/ipv4/ah4.c @@ -505,8 +505,6 @@ static int ah_init_state(struct xfrm_state *x) ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; ahp->icv_trunc_len = x->aalg->alg_trunc_len/8; - BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN); - if (x->props.flags & XFRM_STATE_ALIGN4) x->props.header_len = XFRM_ALIGN4(sizeof(struct ip_auth_hdr) + ahp->icv_trunc_len); diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 1a9b99e04465a1eb7f8a197bdc7ca6f1bd6978f0..16acb59d665e32e321a583b2178fcba5b7f7b88b 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -953,10 +953,11 @@ static int arp_rcv(struct sk_buff *skb, struct net_device *dev, { const struct arphdr *arp; + /* do not tweak dropwatch on an ARP we will ignore */ if (dev->flags & IFF_NOARP || skb->pkt_type == PACKET_OTHERHOST || skb->pkt_type == PACKET_LOOPBACK) - goto freeskb; + goto consumeskb; skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) @@ -974,6 +975,9 @@ static int arp_rcv(struct sk_buff *skb, struct net_device *dev, return NF_HOOK(NFPROTO_ARP, NF_ARP_IN, skb, dev, NULL, arp_process); +consumeskb: + consume_skb(skb); + return 0; freeskb: kfree_skb(skb); out_of_mem: diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index 05b708bbdb0dedd34a04d99b16391a6b2ca4a30c..4715f25dfe0351a2d219f8c2aa7756da75e51dcb 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c @@ -246,7 +246,7 @@ static u32 cipso_v4_map_cache_hash(const unsigned char *key, u32 key_len) * success, negative values on error. * */ -static int cipso_v4_cache_init(void) +static int __init cipso_v4_cache_init(void) { u32 iter; diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 255aa9946fe785a2577fbdb23560288599c1ba0a..23104a3f29245abeb682089185bd103ae4794cb2 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -243,7 +243,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, u8 tos, int oif, struct net_device *dev, int rpf, struct in_device *idev, u32 *itag) { - int ret, no_addr, accept_local; + int ret, no_addr; struct fib_result res; struct flowi4 fl4; struct net *net; @@ -258,16 +258,17 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, no_addr = idev->ifa_list == NULL; - accept_local = IN_DEV_ACCEPT_LOCAL(idev); fl4.flowi4_mark = IN_DEV_SRC_VMARK(idev) ? skb->mark : 0; net = dev_net(dev); if (fib_lookup(net, &fl4, &res)) goto last_resort; - if (res.type != RTN_UNICAST) { - if (res.type != RTN_LOCAL || !accept_local) - goto e_inval; - } + if (res.type != RTN_UNICAST && + (res.type != RTN_LOCAL || !IN_DEV_ACCEPT_LOCAL(idev))) + goto e_inval; + if (!rpf && !fib_num_tclassid_users(dev_net(dev)) && + (dev->ifindex != oif || !IN_DEV_TX_REDIRECTS(idev))) + goto last_resort; fib_combine_itag(itag, &res); dev_match = false; @@ -321,6 +322,7 @@ int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, int r = secpath_exists(skb) ? 0 : IN_DEV_RPFILTER(idev); if (!r && !fib_num_tclassid_users(dev_net(dev)) && + IN_DEV_ACCEPT_LOCAL(idev) && (dev->ifindex != oif || !IN_DEV_TX_REDIRECTS(idev))) { *itag = 0; return 0; diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index b10cd43a4722730205272d7822d699bc49ea71d3..5b6efb3d2308b0b40b22da7274ad611c4f3d0158 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -157,9 +157,12 @@ static void rt_fibinfo_free(struct rtable __rcu **rtp) static void free_nh_exceptions(struct fib_nh *nh) { - struct fnhe_hash_bucket *hash = nh->nh_exceptions; + struct fnhe_hash_bucket *hash; int i; + hash = rcu_dereference_protected(nh->nh_exceptions, 1); + if (!hash) + return; for (i = 0; i < FNHE_HASH_SIZE; i++) { struct fib_nh_exception *fnhe; @@ -205,8 +208,7 @@ static void free_fib_info_rcu(struct rcu_head *head) change_nexthops(fi) { if (nexthop_nh->nh_dev) dev_put(nexthop_nh->nh_dev); - if (nexthop_nh->nh_exceptions) - free_nh_exceptions(nexthop_nh); + free_nh_exceptions(nexthop_nh); rt_fibinfo_free_cpus(nexthop_nh->nh_pcpu_rth_output); rt_fibinfo_free(&nexthop_nh->nh_rth_input); } endfor_nexthops(fi); diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c new file mode 100644 index 0000000000000000000000000000000000000000..efa70ad44906aac3204cd90e76b35c3159caa2ac --- /dev/null +++ b/net/ipv4/fou.c @@ -0,0 +1,514 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static DEFINE_SPINLOCK(fou_lock); +static LIST_HEAD(fou_list); + +struct fou { + struct socket *sock; + u8 protocol; + u16 port; + struct udp_offload udp_offloads; + struct list_head list; +}; + +struct fou_cfg { + u16 type; + u8 protocol; + struct udp_port_cfg udp_config; +}; + +static inline struct fou *fou_from_sock(struct sock *sk) +{ + return sk->sk_user_data; +} + +static int fou_udp_encap_recv_deliver(struct sk_buff *skb, + u8 protocol, size_t len) +{ + struct iphdr *iph = ip_hdr(skb); + + /* Remove 'len' bytes from the packet (UDP header and + * FOU header if present), modify the protocol to the one + * we found, and then call rcv_encap. + */ + iph->tot_len = htons(ntohs(iph->tot_len) - len); + __skb_pull(skb, len); + skb_postpull_rcsum(skb, udp_hdr(skb), len); + skb_reset_transport_header(skb); + + return -protocol; +} + +static int fou_udp_recv(struct sock *sk, struct sk_buff *skb) +{ + struct fou *fou = fou_from_sock(sk); + + if (!fou) + return 1; + + return fou_udp_encap_recv_deliver(skb, fou->protocol, + sizeof(struct udphdr)); +} + +static int gue_udp_recv(struct sock *sk, struct sk_buff *skb) +{ + struct fou *fou = fou_from_sock(sk); + size_t len; + struct guehdr *guehdr; + struct udphdr *uh; + + if (!fou) + return 1; + + len = sizeof(struct udphdr) + sizeof(struct guehdr); + if (!pskb_may_pull(skb, len)) + goto drop; + + uh = udp_hdr(skb); + guehdr = (struct guehdr *)&uh[1]; + + len += guehdr->hlen << 2; + if (!pskb_may_pull(skb, len)) + goto drop; + + if (guehdr->version != 0) + goto drop; + + if (guehdr->flags) { + /* No support yet */ + goto drop; + } + + return fou_udp_encap_recv_deliver(skb, guehdr->next_hdr, len); +drop: + kfree_skb(skb); + return 0; +} + +static struct sk_buff **fou_gro_receive(struct sk_buff **head, + struct sk_buff *skb) +{ + const struct net_offload *ops; + struct sk_buff **pp = NULL; + u8 proto = NAPI_GRO_CB(skb)->proto; + const struct net_offload **offloads; + + rcu_read_lock(); + offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; + ops = rcu_dereference(offloads[proto]); + if (!ops || !ops->callbacks.gro_receive) + goto out_unlock; + + pp = ops->callbacks.gro_receive(head, skb); + +out_unlock: + rcu_read_unlock(); + + return pp; +} + +static int fou_gro_complete(struct sk_buff *skb, int nhoff) +{ + const struct net_offload *ops; + u8 proto = NAPI_GRO_CB(skb)->proto; + int err = -ENOSYS; + const struct net_offload **offloads; + + rcu_read_lock(); + offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; + ops = rcu_dereference(offloads[proto]); + if (WARN_ON(!ops || !ops->callbacks.gro_complete)) + goto out_unlock; + + err = ops->callbacks.gro_complete(skb, nhoff); + +out_unlock: + rcu_read_unlock(); + + return err; +} + +static struct sk_buff **gue_gro_receive(struct sk_buff **head, + struct sk_buff *skb) +{ + const struct net_offload **offloads; + const struct net_offload *ops; + struct sk_buff **pp = NULL; + struct sk_buff *p; + u8 proto; + struct guehdr *guehdr; + unsigned int hlen, guehlen; + unsigned int off; + int flush = 1; + + off = skb_gro_offset(skb); + hlen = off + sizeof(*guehdr); + guehdr = skb_gro_header_fast(skb, off); + if (skb_gro_header_hard(skb, hlen)) { + guehdr = skb_gro_header_slow(skb, hlen, off); + if (unlikely(!guehdr)) + goto out; + } + + proto = guehdr->next_hdr; + + rcu_read_lock(); + offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; + ops = rcu_dereference(offloads[proto]); + if (WARN_ON(!ops || !ops->callbacks.gro_receive)) + goto out_unlock; + + guehlen = sizeof(*guehdr) + (guehdr->hlen << 2); + + hlen = off + guehlen; + if (skb_gro_header_hard(skb, hlen)) { + guehdr = skb_gro_header_slow(skb, hlen, off); + if (unlikely(!guehdr)) + goto out_unlock; + } + + flush = 0; + + for (p = *head; p; p = p->next) { + const struct guehdr *guehdr2; + + if (!NAPI_GRO_CB(p)->same_flow) + continue; + + guehdr2 = (struct guehdr *)(p->data + off); + + /* Compare base GUE header to be equal (covers + * hlen, version, next_hdr, and flags. + */ + if (guehdr->word != guehdr2->word) { + NAPI_GRO_CB(p)->same_flow = 0; + continue; + } + + /* Compare optional fields are the same. */ + if (guehdr->hlen && memcmp(&guehdr[1], &guehdr2[1], + guehdr->hlen << 2)) { + NAPI_GRO_CB(p)->same_flow = 0; + continue; + } + } + + skb_gro_pull(skb, guehlen); + + /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/ + skb_gro_postpull_rcsum(skb, guehdr, guehlen); + + pp = ops->callbacks.gro_receive(head, skb); + +out_unlock: + rcu_read_unlock(); +out: + NAPI_GRO_CB(skb)->flush |= flush; + + return pp; +} + +static int gue_gro_complete(struct sk_buff *skb, int nhoff) +{ + const struct net_offload **offloads; + struct guehdr *guehdr = (struct guehdr *)(skb->data + nhoff); + const struct net_offload *ops; + unsigned int guehlen; + u8 proto; + int err = -ENOENT; + + proto = guehdr->next_hdr; + + guehlen = sizeof(*guehdr) + (guehdr->hlen << 2); + + rcu_read_lock(); + offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; + ops = rcu_dereference(offloads[proto]); + if (WARN_ON(!ops || !ops->callbacks.gro_complete)) + goto out_unlock; + + err = ops->callbacks.gro_complete(skb, nhoff + guehlen); + +out_unlock: + rcu_read_unlock(); + return err; +} + +static int fou_add_to_port_list(struct fou *fou) +{ + struct fou *fout; + + spin_lock(&fou_lock); + list_for_each_entry(fout, &fou_list, list) { + if (fou->port == fout->port) { + spin_unlock(&fou_lock); + return -EALREADY; + } + } + + list_add(&fou->list, &fou_list); + spin_unlock(&fou_lock); + + return 0; +} + +static void fou_release(struct fou *fou) +{ + struct socket *sock = fou->sock; + struct sock *sk = sock->sk; + + udp_del_offload(&fou->udp_offloads); + + list_del(&fou->list); + + /* Remove hooks into tunnel socket */ + sk->sk_user_data = NULL; + + sock_release(sock); + + kfree(fou); +} + +static int fou_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg) +{ + udp_sk(sk)->encap_rcv = fou_udp_recv; + fou->protocol = cfg->protocol; + fou->udp_offloads.callbacks.gro_receive = fou_gro_receive; + fou->udp_offloads.callbacks.gro_complete = fou_gro_complete; + fou->udp_offloads.port = cfg->udp_config.local_udp_port; + fou->udp_offloads.ipproto = cfg->protocol; + + return 0; +} + +static int gue_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg) +{ + udp_sk(sk)->encap_rcv = gue_udp_recv; + fou->udp_offloads.callbacks.gro_receive = gue_gro_receive; + fou->udp_offloads.callbacks.gro_complete = gue_gro_complete; + fou->udp_offloads.port = cfg->udp_config.local_udp_port; + + return 0; +} + +static int fou_create(struct net *net, struct fou_cfg *cfg, + struct socket **sockp) +{ + struct fou *fou = NULL; + int err; + struct socket *sock = NULL; + struct sock *sk; + + /* Open UDP socket */ + err = udp_sock_create(net, &cfg->udp_config, &sock); + if (err < 0) + goto error; + + /* Allocate FOU port structure */ + fou = kzalloc(sizeof(*fou), GFP_KERNEL); + if (!fou) { + err = -ENOMEM; + goto error; + } + + sk = sock->sk; + + fou->port = cfg->udp_config.local_udp_port; + + /* Initial for fou type */ + switch (cfg->type) { + case FOU_ENCAP_DIRECT: + err = fou_encap_init(sk, fou, cfg); + if (err) + goto error; + break; + case FOU_ENCAP_GUE: + err = gue_encap_init(sk, fou, cfg); + if (err) + goto error; + break; + default: + err = -EINVAL; + goto error; + } + + udp_sk(sk)->encap_type = 1; + udp_encap_enable(); + + sk->sk_user_data = fou; + fou->sock = sock; + + udp_set_convert_csum(sk, true); + + sk->sk_allocation = GFP_ATOMIC; + + if (cfg->udp_config.family == AF_INET) { + err = udp_add_offload(&fou->udp_offloads); + if (err) + goto error; + } + + err = fou_add_to_port_list(fou); + if (err) + goto error; + + if (sockp) + *sockp = sock; + + return 0; + +error: + kfree(fou); + if (sock) + sock_release(sock); + + return err; +} + +static int fou_destroy(struct net *net, struct fou_cfg *cfg) +{ + struct fou *fou; + u16 port = cfg->udp_config.local_udp_port; + int err = -EINVAL; + + spin_lock(&fou_lock); + list_for_each_entry(fou, &fou_list, list) { + if (fou->port == port) { + udp_del_offload(&fou->udp_offloads); + fou_release(fou); + err = 0; + break; + } + } + spin_unlock(&fou_lock); + + return err; +} + +static struct genl_family fou_nl_family = { + .id = GENL_ID_GENERATE, + .hdrsize = 0, + .name = FOU_GENL_NAME, + .version = FOU_GENL_VERSION, + .maxattr = FOU_ATTR_MAX, + .netnsok = true, +}; + +static struct nla_policy fou_nl_policy[FOU_ATTR_MAX + 1] = { + [FOU_ATTR_PORT] = { .type = NLA_U16, }, + [FOU_ATTR_AF] = { .type = NLA_U8, }, + [FOU_ATTR_IPPROTO] = { .type = NLA_U8, }, + [FOU_ATTR_TYPE] = { .type = NLA_U8, }, +}; + +static int parse_nl_config(struct genl_info *info, + struct fou_cfg *cfg) +{ + memset(cfg, 0, sizeof(*cfg)); + + cfg->udp_config.family = AF_INET; + + if (info->attrs[FOU_ATTR_AF]) { + u8 family = nla_get_u8(info->attrs[FOU_ATTR_AF]); + + if (family != AF_INET && family != AF_INET6) + return -EINVAL; + + cfg->udp_config.family = family; + } + + if (info->attrs[FOU_ATTR_PORT]) { + u16 port = nla_get_u16(info->attrs[FOU_ATTR_PORT]); + + cfg->udp_config.local_udp_port = port; + } + + if (info->attrs[FOU_ATTR_IPPROTO]) + cfg->protocol = nla_get_u8(info->attrs[FOU_ATTR_IPPROTO]); + + if (info->attrs[FOU_ATTR_TYPE]) + cfg->type = nla_get_u8(info->attrs[FOU_ATTR_TYPE]); + + return 0; +} + +static int fou_nl_cmd_add_port(struct sk_buff *skb, struct genl_info *info) +{ + struct fou_cfg cfg; + int err; + + err = parse_nl_config(info, &cfg); + if (err) + return err; + + return fou_create(&init_net, &cfg, NULL); +} + +static int fou_nl_cmd_rm_port(struct sk_buff *skb, struct genl_info *info) +{ + struct fou_cfg cfg; + + parse_nl_config(info, &cfg); + + return fou_destroy(&init_net, &cfg); +} + +static const struct genl_ops fou_nl_ops[] = { + { + .cmd = FOU_CMD_ADD, + .doit = fou_nl_cmd_add_port, + .policy = fou_nl_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = FOU_CMD_DEL, + .doit = fou_nl_cmd_rm_port, + .policy = fou_nl_policy, + .flags = GENL_ADMIN_PERM, + }, +}; + +static int __init fou_init(void) +{ + int ret; + + ret = genl_register_family_with_ops(&fou_nl_family, + fou_nl_ops); + + return ret; +} + +static void __exit fou_fini(void) +{ + struct fou *fou, *next; + + genl_unregister_family(&fou_nl_family); + + /* Close all the FOU sockets */ + + spin_lock(&fou_lock); + list_for_each_entry_safe(fou, next, &fou_list, list) + fou_release(fou); + spin_unlock(&fou_lock); +} + +module_init(fou_init); +module_exit(fou_fini); +MODULE_AUTHOR("Tom Herbert "); +MODULE_LICENSE("GPL"); diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c new file mode 100644 index 0000000000000000000000000000000000000000..065cd94c640c0c2a20cbf99c31cba588db54897e --- /dev/null +++ b/net/ipv4/geneve.c @@ -0,0 +1,373 @@ +/* + * Geneve: Generic Network Virtualization Encapsulation + * + * Copyright (c) 2014 Nicira, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if IS_ENABLED(CONFIG_IPV6) +#include +#include +#include +#include +#endif + +#define PORT_HASH_BITS 8 +#define PORT_HASH_SIZE (1<sock_list[hash_32(ntohs(port), PORT_HASH_BITS)]; +} + +/* Find geneve socket based on network namespace and UDP port */ +static struct geneve_sock *geneve_find_sock(struct net *net, __be16 port) +{ + struct geneve_sock *gs; + + hlist_for_each_entry_rcu(gs, gs_head(net, port), hlist) { + if (inet_sk(gs->sock->sk)->inet_sport == port) + return gs; + } + + return NULL; +} + +static void geneve_build_header(struct genevehdr *geneveh, + __be16 tun_flags, u8 vni[3], + u8 options_len, u8 *options) +{ + geneveh->ver = GENEVE_VER; + geneveh->opt_len = options_len / 4; + geneveh->oam = !!(tun_flags & TUNNEL_OAM); + geneveh->critical = !!(tun_flags & TUNNEL_CRIT_OPT); + geneveh->rsvd1 = 0; + memcpy(geneveh->vni, vni, 3); + geneveh->proto_type = htons(ETH_P_TEB); + geneveh->rsvd2 = 0; + + memcpy(geneveh->options, options, options_len); +} + +/* Transmit a fully formated Geneve frame. + * + * When calling this function. The skb->data should point + * to the geneve header which is fully formed. + * + * This function will add other UDP tunnel headers. + */ +int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt, + struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos, + __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port, + __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt, + bool xnet) +{ + struct genevehdr *gnvh; + int min_headroom; + int err; + + skb = udp_tunnel_handle_offloads(skb, !gs->sock->sk->sk_no_check_tx); + + min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len + + GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr) + + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0); + + err = skb_cow_head(skb, min_headroom); + if (unlikely(err)) + return err; + + if (vlan_tx_tag_present(skb)) { + if (unlikely(!__vlan_put_tag(skb, + skb->vlan_proto, + vlan_tx_tag_get(skb)))) { + err = -ENOMEM; + return err; + } + skb->vlan_tci = 0; + } + + gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len); + geneve_build_header(gnvh, tun_flags, vni, opt_len, opt); + + return udp_tunnel_xmit_skb(gs->sock, rt, skb, src, dst, + tos, ttl, df, src_port, dst_port, xnet); +} +EXPORT_SYMBOL_GPL(geneve_xmit_skb); + +static void geneve_notify_add_rx_port(struct geneve_sock *gs) +{ + struct sock *sk = gs->sock->sk; + sa_family_t sa_family = sk->sk_family; + int err; + + if (sa_family == AF_INET) { + err = udp_add_offload(&gs->udp_offloads); + if (err) + pr_warn("geneve: udp_add_offload failed with status %d\n", + err); + } +} + +/* Callback from net/ipv4/udp.c to receive packets */ +static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb) +{ + struct genevehdr *geneveh; + struct geneve_sock *gs; + int opts_len; + + /* Need Geneve and inner Ethernet header to be present */ + if (unlikely(!pskb_may_pull(skb, GENEVE_BASE_HLEN))) + goto error; + + /* Return packets with reserved bits set */ + geneveh = geneve_hdr(skb); + + if (unlikely(geneveh->ver != GENEVE_VER)) + goto error; + + if (unlikely(geneveh->proto_type != htons(ETH_P_TEB))) + goto error; + + opts_len = geneveh->opt_len * 4; + if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len, + htons(ETH_P_TEB))) + goto drop; + + gs = rcu_dereference_sk_user_data(sk); + if (!gs) + goto drop; + + gs->rcv(gs, skb); + return 0; + +drop: + /* Consume bad packet */ + kfree_skb(skb); + return 0; + +error: + /* Let the UDP layer deal with the skb */ + return 1; +} + +static void geneve_del_work(struct work_struct *work) +{ + struct geneve_sock *gs = container_of(work, struct geneve_sock, + del_work); + + udp_tunnel_sock_release(gs->sock); + kfree_rcu(gs, rcu); +} + +static struct socket *geneve_create_sock(struct net *net, bool ipv6, + __be16 port) +{ + struct socket *sock; + struct udp_port_cfg udp_conf; + int err; + + memset(&udp_conf, 0, sizeof(udp_conf)); + + if (ipv6) { + udp_conf.family = AF_INET6; + } else { + udp_conf.family = AF_INET; + udp_conf.local_ip.s_addr = htonl(INADDR_ANY); + } + + udp_conf.local_udp_port = port; + + /* Open UDP socket */ + err = udp_sock_create(net, &udp_conf, &sock); + if (err < 0) + return ERR_PTR(err); + + return sock; +} + +/* Create new listen socket if needed */ +static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port, + geneve_rcv_t *rcv, void *data, + bool ipv6) +{ + struct geneve_net *gn = net_generic(net, geneve_net_id); + struct geneve_sock *gs; + struct socket *sock; + struct udp_tunnel_sock_cfg tunnel_cfg; + + gs = kzalloc(sizeof(*gs), GFP_KERNEL); + if (!gs) + return ERR_PTR(-ENOMEM); + + INIT_WORK(&gs->del_work, geneve_del_work); + + sock = geneve_create_sock(net, ipv6, port); + if (IS_ERR(sock)) { + kfree(gs); + return ERR_CAST(sock); + } + + gs->sock = sock; + atomic_set(&gs->refcnt, 1); + gs->rcv = rcv; + gs->rcv_data = data; + + /* Initialize the geneve udp offloads structure */ + gs->udp_offloads.port = port; + gs->udp_offloads.callbacks.gro_receive = NULL; + gs->udp_offloads.callbacks.gro_complete = NULL; + + spin_lock(&gn->sock_lock); + hlist_add_head_rcu(&gs->hlist, gs_head(net, port)); + geneve_notify_add_rx_port(gs); + spin_unlock(&gn->sock_lock); + + /* Mark socket as an encapsulation socket */ + tunnel_cfg.sk_user_data = gs; + tunnel_cfg.encap_type = 1; + tunnel_cfg.encap_rcv = geneve_udp_encap_recv; + tunnel_cfg.encap_destroy = NULL; + setup_udp_tunnel_sock(net, sock, &tunnel_cfg); + + return gs; +} + +struct geneve_sock *geneve_sock_add(struct net *net, __be16 port, + geneve_rcv_t *rcv, void *data, + bool no_share, bool ipv6) +{ + struct geneve_sock *gs; + + gs = geneve_socket_create(net, port, rcv, data, ipv6); + if (!IS_ERR(gs)) + return gs; + + if (no_share) /* Return error if sharing is not allowed. */ + return ERR_PTR(-EINVAL); + + gs = geneve_find_sock(net, port); + if (gs) { + if (gs->rcv == rcv) + atomic_inc(&gs->refcnt); + else + gs = ERR_PTR(-EBUSY); + } else { + gs = ERR_PTR(-EINVAL); + } + + return gs; +} +EXPORT_SYMBOL_GPL(geneve_sock_add); + +void geneve_sock_release(struct geneve_sock *gs) +{ + if (!atomic_dec_and_test(&gs->refcnt)) + return; + + queue_work(geneve_wq, &gs->del_work); +} +EXPORT_SYMBOL_GPL(geneve_sock_release); + +static __net_init int geneve_init_net(struct net *net) +{ + struct geneve_net *gn = net_generic(net, geneve_net_id); + unsigned int h; + + spin_lock_init(&gn->sock_lock); + + for (h = 0; h < PORT_HASH_SIZE; ++h) + INIT_HLIST_HEAD(&gn->sock_list[h]); + + return 0; +} + +static struct pernet_operations geneve_net_ops = { + .init = geneve_init_net, + .exit = NULL, + .id = &geneve_net_id, + .size = sizeof(struct geneve_net), +}; + +static int __init geneve_init_module(void) +{ + int rc; + + geneve_wq = alloc_workqueue("geneve", 0, 0); + if (!geneve_wq) + return -ENOMEM; + + rc = register_pernet_subsys(&geneve_net_ops); + if (rc) + return rc; + + pr_info("Geneve driver\n"); + + return 0; +} +late_initcall(geneve_init_module); + +static void __exit geneve_cleanup_module(void) +{ + destroy_workqueue(geneve_wq); +} +module_exit(geneve_cleanup_module); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Jesse Gross "); +MODULE_DESCRIPTION("Driver for GENEVE encapsulated traffic"); +MODULE_ALIAS_RTNL_LINK("geneve"); diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c index 0485bf7f8f030d59bc6e9ee499051e99d9ab53d6..4a7b5b2a1ce3ddbd79e9282051020a4509dd7fe5 100644 --- a/net/ipv4/gre_demux.c +++ b/net/ipv4/gre_demux.c @@ -98,7 +98,6 @@ EXPORT_SYMBOL_GPL(gre_build_header); static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, bool *csum_err) { - unsigned int ip_hlen = ip_hdrlen(skb); const struct gre_base_hdr *greh; __be32 *options; int hdr_len; @@ -106,7 +105,7 @@ static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr)))) return -EINVAL; - greh = (struct gre_base_hdr *)(skb_network_header(skb) + ip_hlen); + greh = (struct gre_base_hdr *)skb_transport_header(skb); if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING))) return -EINVAL; @@ -116,7 +115,7 @@ static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, if (!pskb_may_pull(skb, hdr_len)) return -EINVAL; - greh = (struct gre_base_hdr *)(skb_network_header(skb) + ip_hlen); + greh = (struct gre_base_hdr *)skb_transport_header(skb); tpi->proto = greh->protocol; options = (__be32 *)(greh + 1); @@ -125,6 +124,10 @@ static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, *csum_err = true; return -EINVAL; } + + skb_checksum_try_convert(skb, IPPROTO_GRE, 0, + null_compute_pseudo); + options++; } diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index 6556263c8fa52330efd6e4d9d4f5b491ac9dbe41..a777295030716930a8ec6ef90f121f363f859683 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c @@ -15,13 +15,6 @@ #include #include -static int gre_gso_send_check(struct sk_buff *skb) -{ - if (!skb->encapsulation) - return -EINVAL; - return 0; -} - static struct sk_buff *gre_gso_segment(struct sk_buff *skb, netdev_features_t features) { @@ -46,6 +39,9 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, SKB_GSO_IPIP))) goto out; + if (!skb->encapsulation) + goto out; + if (unlikely(!pskb_may_pull(skb, sizeof(*greh)))) goto out; @@ -119,28 +115,6 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, return segs; } -/* Compute the whole skb csum in s/w and store it, then verify GRO csum - * starting from gro_offset. - */ -static __sum16 gro_skb_checksum(struct sk_buff *skb) -{ - __sum16 sum; - - skb->csum = skb_checksum(skb, 0, skb->len, 0); - NAPI_GRO_CB(skb)->csum = csum_sub(skb->csum, - csum_partial(skb->data, skb_gro_offset(skb), 0)); - sum = csum_fold(NAPI_GRO_CB(skb)->csum); - if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) { - if (unlikely(!sum) && !skb->csum_complete_sw) - netdev_rx_csum_fault(skb->dev); - } else { - skb->ip_summed = CHECKSUM_COMPLETE; - skb->csum_complete_sw = 1; - } - - return sum; -} - static struct sk_buff **gre_gro_receive(struct sk_buff **head, struct sk_buff *skb) { @@ -192,22 +166,16 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head, if (unlikely(!greh)) goto out_unlock; } - if (greh->flags & GRE_CSUM) { /* Need to verify GRE csum first */ - __sum16 csum = 0; - - if (skb->ip_summed == CHECKSUM_COMPLETE) - csum = csum_fold(NAPI_GRO_CB(skb)->csum); - /* Don't trust csum error calculated/reported by h/w */ - if (skb->ip_summed == CHECKSUM_NONE || csum != 0) - csum = gro_skb_checksum(skb); - - /* GRE CSUM is the 1's complement of the 1's complement sum - * of the GRE hdr plus payload so it should add up to 0xffff - * (and 0 after csum_fold()) just like the IPv4 hdr csum. - */ - if (csum) + + /* Don't bother verifying checksum if we're going to flush anyway. */ + if ((greh->flags & GRE_CSUM) && !NAPI_GRO_CB(skb)->flush) { + if (skb_gro_checksum_simple_validate(skb)) goto out_unlock; + + skb_gro_checksum_try_convert(skb, IPPROTO_GRE, 0, + null_compute_pseudo); } + flush = 0; for (p = *head; p; p = p->next) { @@ -284,7 +252,6 @@ static int gre_gro_complete(struct sk_buff *skb, int nhoff) static const struct net_offload gre_offload = { .callbacks = { - .gso_send_check = gre_gso_send_check, .gso_segment = gre_gso_segment, .gro_receive = gre_gro_receive, .gro_complete = gre_gro_complete, diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index ea7d4afe8205860ca4917f5403821c7afa4aa227..5882f584910edcd38affd3be200df9d0ddf1ee14 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -231,12 +231,62 @@ static inline void icmp_xmit_unlock(struct sock *sk) spin_unlock_bh(&sk->sk_lock.slock); } +int sysctl_icmp_msgs_per_sec __read_mostly = 1000; +int sysctl_icmp_msgs_burst __read_mostly = 50; + +static struct { + spinlock_t lock; + u32 credit; + u32 stamp; +} icmp_global = { + .lock = __SPIN_LOCK_UNLOCKED(icmp_global.lock), +}; + +/** + * icmp_global_allow - Are we allowed to send one more ICMP message ? + * + * Uses a token bucket to limit our ICMP messages to sysctl_icmp_msgs_per_sec. + * Returns false if we reached the limit and can not send another packet. + * Note: called with BH disabled + */ +bool icmp_global_allow(void) +{ + u32 credit, delta, incr = 0, now = (u32)jiffies; + bool rc = false; + + /* Check if token bucket is empty and cannot be refilled + * without taking the spinlock. + */ + if (!icmp_global.credit) { + delta = min_t(u32, now - icmp_global.stamp, HZ); + if (delta < HZ / 50) + return false; + } + + spin_lock(&icmp_global.lock); + delta = min_t(u32, now - icmp_global.stamp, HZ); + if (delta >= HZ / 50) { + incr = sysctl_icmp_msgs_per_sec * delta / HZ ; + if (incr) + icmp_global.stamp = now; + } + credit = min_t(u32, icmp_global.credit + incr, sysctl_icmp_msgs_burst); + if (credit) { + credit--; + rc = true; + } + icmp_global.credit = credit; + spin_unlock(&icmp_global.lock); + return rc; +} +EXPORT_SYMBOL(icmp_global_allow); + /* * Send an ICMP frame. */ -static inline bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt, - struct flowi4 *fl4, int type, int code) +static bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt, + struct flowi4 *fl4, int type, int code) { struct dst_entry *dst = &rt->dst; bool rc = true; @@ -253,8 +303,14 @@ static inline bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt, goto out; /* Limit if icmp type is enabled in ratemask. */ - if ((1 << type) & net->ipv4.sysctl_icmp_ratemask) { - struct inet_peer *peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, 1); + if (!((1 << type) & net->ipv4.sysctl_icmp_ratemask)) + goto out; + + rc = false; + if (icmp_global_allow()) { + struct inet_peer *peer; + + peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, 1); rc = inet_peer_xrlim_allow(peer, net->ipv4.sysctl_icmp_ratelimit); if (peer) diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index f10eab462282b1afa80bcc3e7bac7f862ab9709f..fb70e3ecc3e4d58da5db9756db9b6da027369288 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -117,7 +117,7 @@ #define IGMP_V2_Unsolicited_Report_Interval (10*HZ) #define IGMP_V3_Unsolicited_Report_Interval (1*HZ) #define IGMP_Query_Response_Interval (10*HZ) -#define IGMP_Unsolicited_Report_Count 2 +#define IGMP_Query_Robustness_Variable 2 #define IGMP_Initial_Report_Delay (1) @@ -756,8 +756,7 @@ static void igmp_ifc_event(struct in_device *in_dev) { if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) return; - in_dev->mr_ifc_count = in_dev->mr_qrv ? in_dev->mr_qrv : - IGMP_Unsolicited_Report_Count; + in_dev->mr_ifc_count = in_dev->mr_qrv ?: sysctl_igmp_qrv; igmp_ifc_start_timer(in_dev, 1); } @@ -932,7 +931,7 @@ static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb, in_dev->mr_qrv = ih3->qrv; if (!group) { /* general query */ if (ih3->nsrcs) - return false; /* no sources allowed */ + return true; /* no sources allowed */ igmp_gq_start_timer(in_dev); return false; } @@ -1086,8 +1085,7 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im) pmc->interface = im->interface; in_dev_hold(in_dev); pmc->multiaddr = im->multiaddr; - pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv : - IGMP_Unsolicited_Report_Count; + pmc->crcount = in_dev->mr_qrv ?: sysctl_igmp_qrv; pmc->sfmode = im->sfmode; if (pmc->sfmode == MCAST_INCLUDE) { struct ip_sf_list *psf; @@ -1226,8 +1224,7 @@ static void igmp_group_added(struct ip_mc_list *im) } /* else, v3 */ - im->crcount = in_dev->mr_qrv ? in_dev->mr_qrv : - IGMP_Unsolicited_Report_Count; + im->crcount = in_dev->mr_qrv ?: sysctl_igmp_qrv; igmp_ifc_event(in_dev); #endif } @@ -1322,7 +1319,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr) spin_lock_init(&im->lock); #ifdef CONFIG_IP_MULTICAST setup_timer(&im->timer, igmp_timer_expire, (unsigned long)im); - im->unsolicit_count = IGMP_Unsolicited_Report_Count; + im->unsolicit_count = sysctl_igmp_qrv; #endif im->next_rcu = in_dev->mc_list; @@ -1460,7 +1457,7 @@ void ip_mc_init_dev(struct in_device *in_dev) (unsigned long)in_dev); setup_timer(&in_dev->mr_ifc_timer, igmp_ifc_timer_expire, (unsigned long)in_dev); - in_dev->mr_qrv = IGMP_Unsolicited_Report_Count; + in_dev->mr_qrv = sysctl_igmp_qrv; #endif spin_lock_init(&in_dev->mc_tomb_lock); @@ -1474,6 +1471,9 @@ void ip_mc_up(struct in_device *in_dev) ASSERT_RTNL(); +#ifdef CONFIG_IP_MULTICAST + in_dev->mr_qrv = sysctl_igmp_qrv; +#endif ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS); for_each_pmc_rtnl(in_dev, pmc) @@ -1540,7 +1540,9 @@ static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr) */ int sysctl_igmp_max_memberships __read_mostly = IP_MAX_MEMBERSHIPS; int sysctl_igmp_max_msf __read_mostly = IP_MAX_MSF; - +#ifdef CONFIG_IP_MULTICAST +int sysctl_igmp_qrv __read_mostly = IGMP_Query_Robustness_Variable; +#endif static int ip_mc_del1_src(struct ip_mc_list *pmc, int sfmode, __be32 *psfsrc) @@ -1575,8 +1577,7 @@ static int ip_mc_del1_src(struct ip_mc_list *pmc, int sfmode, #ifdef CONFIG_IP_MULTICAST if (psf->sf_oldin && !IGMP_V1_SEEN(in_dev) && !IGMP_V2_SEEN(in_dev)) { - psf->sf_crcount = in_dev->mr_qrv ? in_dev->mr_qrv : - IGMP_Unsolicited_Report_Count; + psf->sf_crcount = in_dev->mr_qrv ?: sysctl_igmp_qrv; psf->sf_next = pmc->tomb; pmc->tomb = psf; rv = 1; @@ -1639,8 +1640,7 @@ static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode, /* filter mode change */ pmc->sfmode = MCAST_INCLUDE; #ifdef CONFIG_IP_MULTICAST - pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv : - IGMP_Unsolicited_Report_Count; + pmc->crcount = in_dev->mr_qrv ?: sysctl_igmp_qrv; in_dev->mr_ifc_count = pmc->crcount; for (psf = pmc->sources; psf; psf = psf->sf_next) psf->sf_crcount = 0; @@ -1818,8 +1818,7 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode, #ifdef CONFIG_IP_MULTICAST /* else no filters; keep old mode for reports */ - pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv : - IGMP_Unsolicited_Report_Count; + pmc->crcount = in_dev->mr_qrv ?: sysctl_igmp_qrv; in_dev->mr_ifc_count = pmc->crcount; for (psf = pmc->sources; psf; psf = psf->sf_next) psf->sf_crcount = 0; @@ -2539,7 +2538,7 @@ static int igmp_mc_seq_show(struct seq_file *seq, void *v) querier = "NONE"; #endif - if (rcu_dereference(state->in_dev->mc_list) == im) { + if (rcu_access_pointer(state->in_dev->mc_list) == im) { seq_printf(seq, "%d\t%-10s: %5d %7s\n", state->dev->ifindex, state->dev->name, state->in_dev->mc_count, querier); } diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 43116e8c8e1323cdd8d902c5b88e42187c8df991..9111a4e221557173df0ce08e95632ee059d00b61 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -229,7 +229,7 @@ struct sock *__inet_lookup_listener(struct net *net, } } else if (score == hiscore && reuseport) { matches++; - if (((u64)phash * matches) >> 32 == 0) + if (reciprocal_scale(phash, matches) == 0) result = sk; phash = next_pseudo_random32(phash); } diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index bd5f5928167dbd44c72fbd1a5eefd475bd29f260..241afd743d2ccfda93bfb42b928020f2d9dc0486 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c @@ -72,29 +72,10 @@ void inet_peer_base_init(struct inet_peer_base *bp) { bp->root = peer_avl_empty_rcu; seqlock_init(&bp->lock); - bp->flush_seq = ~0U; bp->total = 0; } EXPORT_SYMBOL_GPL(inet_peer_base_init); -static atomic_t v4_seq = ATOMIC_INIT(0); -static atomic_t v6_seq = ATOMIC_INIT(0); - -static atomic_t *inetpeer_seq_ptr(int family) -{ - return (family == AF_INET ? &v4_seq : &v6_seq); -} - -static inline void flush_check(struct inet_peer_base *base, int family) -{ - atomic_t *fp = inetpeer_seq_ptr(family); - - if (unlikely(base->flush_seq != atomic_read(fp))) { - inetpeer_invalidate_tree(base); - base->flush_seq = atomic_read(fp); - } -} - #define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */ /* Exported for sysctl_net_ipv4. */ @@ -444,8 +425,6 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base, unsigned int sequence; int invalidated, gccnt = 0; - flush_check(base, daddr->family); - /* Attempt a lockless lookup first. * Because of a concurrent writer, we might not find an existing entry. */ diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 15f0e2bad7ad10989ece456fd17cae589e1493b3..2811cc18701a2f49499286fd1a5c4f891abf6144 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -790,7 +790,7 @@ static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net) kfree(table); } -static void ip4_frags_ctl_register(void) +static void __init ip4_frags_ctl_register(void) { register_net_sysctl(&init_net, "net/ipv4", ip4_frags_ctl_table); } @@ -804,7 +804,7 @@ static inline void ip4_frags_ns_ctl_unregister(struct net *net) { } -static inline void ip4_frags_ctl_register(void) +static inline void __init ip4_frags_ctl_register(void) { } #endif diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 9b842544aea3d8155a28a4b9f8030901bb712a9c..12055fdbe716f0f1d905f288cbe36415f0e4f592 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -239,7 +239,9 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev, tpi.seq = htonl(tunnel->o_seqno); /* Push GRE header. */ - gre_build_header(skb, &tpi, tunnel->hlen); + gre_build_header(skb, &tpi, tunnel->tun_hlen); + + skb_set_inner_protocol(skb, tpi.proto); ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol); } @@ -310,7 +312,7 @@ static netdev_tx_t gre_tap_xmit(struct sk_buff *skb, static int ipgre_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { - int err = 0; + int err; struct ip_tunnel_parm p; if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) @@ -470,13 +472,18 @@ static void ipgre_tunnel_setup(struct net_device *dev) static void __gre_tunnel_init(struct net_device *dev) { struct ip_tunnel *tunnel; + int t_hlen; tunnel = netdev_priv(dev); - tunnel->hlen = ip_gre_calc_hlen(tunnel->parms.o_flags); + tunnel->tun_hlen = ip_gre_calc_hlen(tunnel->parms.o_flags); tunnel->parms.iph.protocol = IPPROTO_GRE; - dev->needed_headroom = LL_MAX_HEADER + sizeof(struct iphdr) + 4; - dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 4; + tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen; + + t_hlen = tunnel->hlen + sizeof(struct iphdr); + + dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4; + dev->mtu = ETH_DATA_LEN - t_hlen - 4; dev->features |= GRE_FEATURES; dev->hw_features |= GRE_FEATURES; @@ -503,7 +510,7 @@ static int ipgre_tunnel_init(struct net_device *dev) memcpy(dev->broadcast, &iph->daddr, 4); dev->flags = IFF_NOARP; - dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; + netif_keep_dst(dev); dev->addr_len = 4; if (iph->daddr) { @@ -628,6 +635,40 @@ static void ipgre_netlink_parms(struct nlattr *data[], struct nlattr *tb[], parms->iph.frag_off = htons(IP_DF); } +/* This function returns true when ENCAP attributes are present in the nl msg */ +static bool ipgre_netlink_encap_parms(struct nlattr *data[], + struct ip_tunnel_encap *ipencap) +{ + bool ret = false; + + memset(ipencap, 0, sizeof(*ipencap)); + + if (!data) + return ret; + + if (data[IFLA_GRE_ENCAP_TYPE]) { + ret = true; + ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]); + } + + if (data[IFLA_GRE_ENCAP_FLAGS]) { + ret = true; + ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]); + } + + if (data[IFLA_GRE_ENCAP_SPORT]) { + ret = true; + ipencap->sport = nla_get_u16(data[IFLA_GRE_ENCAP_SPORT]); + } + + if (data[IFLA_GRE_ENCAP_DPORT]) { + ret = true; + ipencap->dport = nla_get_u16(data[IFLA_GRE_ENCAP_DPORT]); + } + + return ret; +} + static int gre_tap_init(struct net_device *dev) { __gre_tunnel_init(dev); @@ -657,6 +698,15 @@ static int ipgre_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { struct ip_tunnel_parm p; + struct ip_tunnel_encap ipencap; + + if (ipgre_netlink_encap_parms(data, &ipencap)) { + struct ip_tunnel *t = netdev_priv(dev); + int err = ip_tunnel_encap_setup(t, &ipencap); + + if (err < 0) + return err; + } ipgre_netlink_parms(data, tb, &p); return ip_tunnel_newlink(dev, tb, &p); @@ -666,6 +716,15 @@ static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { struct ip_tunnel_parm p; + struct ip_tunnel_encap ipencap; + + if (ipgre_netlink_encap_parms(data, &ipencap)) { + struct ip_tunnel *t = netdev_priv(dev); + int err = ip_tunnel_encap_setup(t, &ipencap); + + if (err < 0) + return err; + } ipgre_netlink_parms(data, tb, &p); return ip_tunnel_changelink(dev, tb, &p); @@ -694,6 +753,14 @@ static size_t ipgre_get_size(const struct net_device *dev) nla_total_size(1) + /* IFLA_GRE_PMTUDISC */ nla_total_size(1) + + /* IFLA_GRE_ENCAP_TYPE */ + nla_total_size(2) + + /* IFLA_GRE_ENCAP_FLAGS */ + nla_total_size(2) + + /* IFLA_GRE_ENCAP_SPORT */ + nla_total_size(2) + + /* IFLA_GRE_ENCAP_DPORT */ + nla_total_size(2) + 0; } @@ -714,6 +781,17 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev) nla_put_u8(skb, IFLA_GRE_PMTUDISC, !!(p->iph.frag_off & htons(IP_DF)))) goto nla_put_failure; + + if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE, + t->encap.type) || + nla_put_u16(skb, IFLA_GRE_ENCAP_SPORT, + t->encap.sport) || + nla_put_u16(skb, IFLA_GRE_ENCAP_DPORT, + t->encap.dport) || + nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS, + t->encap.dport)) + goto nla_put_failure; + return 0; nla_put_failure: @@ -731,6 +809,10 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = { [IFLA_GRE_TTL] = { .type = NLA_U8 }, [IFLA_GRE_TOS] = { .type = NLA_U8 }, [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 }, + [IFLA_GRE_ENCAP_TYPE] = { .type = NLA_U16 }, + [IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 }, + [IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 }, + [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 }, }; static struct rtnl_link_ops ipgre_link_ops __read_mostly = { diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index ad382499bace4ce3852c2953bb262f2796d9a416..5b3d91be2db0c8f1a78606727c703475dd61b598 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c @@ -87,17 +87,15 @@ void ip_options_build(struct sk_buff *skb, struct ip_options *opt, * NOTE: dopt cannot point to skb. */ -int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb) +int __ip_options_echo(struct ip_options *dopt, struct sk_buff *skb, + const struct ip_options *sopt) { - const struct ip_options *sopt; unsigned char *sptr, *dptr; int soffset, doffset; int optlen; memset(dopt, 0, sizeof(struct ip_options)); - sopt = &(IPCB(skb)->opt); - if (sopt->optlen == 0) return 0; diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 215af2b155cb1946ff5f4b4d7d0fff19291573b8..e35b712891563d443fedde951f927b5068552bae 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -516,7 +516,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) hlen = iph->ihl * 4; mtu = mtu - hlen; /* Size of data space */ -#ifdef CONFIG_BRIDGE_NETFILTER +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) if (skb->nf_bridge) mtu -= nf_bridge_mtu_reduction(skb); #endif @@ -1522,8 +1522,10 @@ static DEFINE_PER_CPU(struct inet_sock, unicast_sock) = { .uc_ttl = -1, }; -void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr, - __be32 saddr, const struct ip_reply_arg *arg, +void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, + const struct ip_options *sopt, + __be32 daddr, __be32 saddr, + const struct ip_reply_arg *arg, unsigned int len) { struct ip_options_data replyopts; @@ -1534,7 +1536,7 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr, struct sock *sk; struct inet_sock *inet; - if (ip_options_echo(&replyopts.opt.opt, skb)) + if (__ip_options_echo(&replyopts.opt.opt, skb, sopt)) return; ipc.addr = daddr; diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 5cb830c78990a8642aed4df591b7109260de685c..c373a9ad45556815d1d95f4faf71b8f8a3f61996 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -303,7 +303,7 @@ int ip_ra_control(struct sock *sk, unsigned char on, } /* dont let ip_call_ra_chain() use sk again */ ra->sk = NULL; - rcu_assign_pointer(*rap, ra->next); + RCU_INIT_POINTER(*rap, ra->next); spin_unlock_bh(&ip_ra_lock); if (ra->destructor) @@ -325,7 +325,7 @@ int ip_ra_control(struct sock *sk, unsigned char on, new_ra->sk = sk; new_ra->destructor = destructor; - new_ra->next = ra; + RCU_INIT_POINTER(new_ra->next, ra); rcu_assign_pointer(*rap, new_ra); sock_hold(sk); spin_unlock_bh(&ip_ra_lock); @@ -405,7 +405,7 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) { struct sock_exterr_skb *serr; - struct sk_buff *skb, *skb2; + struct sk_buff *skb; DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name); struct { struct sock_extended_err ee; @@ -415,7 +415,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) int copied; err = -EAGAIN; - skb = skb_dequeue(&sk->sk_error_queue); + skb = sock_dequeue_err_skb(sk); if (skb == NULL) goto out; @@ -462,17 +462,6 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) msg->msg_flags |= MSG_ERRQUEUE; err = copied; - /* Reset and regenerate socket error */ - spin_lock_bh(&sk->sk_error_queue.lock); - sk->sk_err = 0; - skb2 = skb_peek(&sk->sk_error_queue); - if (skb2 != NULL) { - sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno; - spin_unlock_bh(&sk->sk_error_queue.lock); - sk->sk_error_report(sk); - } else - spin_unlock_bh(&sk->sk_error_queue.lock); - out_free_skb: kfree_skb(skb); out: diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index bda4bb8ae2608e4543a505d8ae039fa301a8a9e6..0bb8e141eaccd66e9865385a9c71b33d1bbfc65f 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -55,6 +55,8 @@ #include #include #include +#include +#include #if IS_ENABLED(CONFIG_IPV6) #include @@ -487,6 +489,103 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, } EXPORT_SYMBOL_GPL(ip_tunnel_rcv); +static int ip_encap_hlen(struct ip_tunnel_encap *e) +{ + switch (e->type) { + case TUNNEL_ENCAP_NONE: + return 0; + case TUNNEL_ENCAP_FOU: + return sizeof(struct udphdr); + case TUNNEL_ENCAP_GUE: + return sizeof(struct udphdr) + sizeof(struct guehdr); + default: + return -EINVAL; + } +} + +int ip_tunnel_encap_setup(struct ip_tunnel *t, + struct ip_tunnel_encap *ipencap) +{ + int hlen; + + memset(&t->encap, 0, sizeof(t->encap)); + + hlen = ip_encap_hlen(ipencap); + if (hlen < 0) + return hlen; + + t->encap.type = ipencap->type; + t->encap.sport = ipencap->sport; + t->encap.dport = ipencap->dport; + t->encap.flags = ipencap->flags; + + t->encap_hlen = hlen; + t->hlen = t->encap_hlen + t->tun_hlen; + + return 0; +} +EXPORT_SYMBOL_GPL(ip_tunnel_encap_setup); + +static int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e, + size_t hdr_len, u8 *protocol, struct flowi4 *fl4) +{ + struct udphdr *uh; + __be16 sport; + bool csum = !!(e->flags & TUNNEL_ENCAP_FLAG_CSUM); + int type = csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; + + skb = iptunnel_handle_offloads(skb, csum, type); + + if (IS_ERR(skb)) + return PTR_ERR(skb); + + /* Get length and hash before making space in skb */ + + sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev), + skb, 0, 0, false); + + skb_push(skb, hdr_len); + + skb_reset_transport_header(skb); + uh = udp_hdr(skb); + + if (e->type == TUNNEL_ENCAP_GUE) { + struct guehdr *guehdr = (struct guehdr *)&uh[1]; + + guehdr->version = 0; + guehdr->hlen = 0; + guehdr->flags = 0; + guehdr->next_hdr = *protocol; + } + + uh->dest = e->dport; + uh->source = sport; + uh->len = htons(skb->len); + uh->check = 0; + udp_set_csum(!(e->flags & TUNNEL_ENCAP_FLAG_CSUM), skb, + fl4->saddr, fl4->daddr, skb->len); + + *protocol = IPPROTO_UDP; + + return 0; +} + +int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t, + u8 *protocol, struct flowi4 *fl4) +{ + switch (t->encap.type) { + case TUNNEL_ENCAP_NONE: + return 0; + case TUNNEL_ENCAP_FOU: + case TUNNEL_ENCAP_GUE: + return fou_build_header(skb, &t->encap, t->encap_hlen, + protocol, fl4); + default: + return -EINVAL; + } +} +EXPORT_SYMBOL(ip_tunnel_encap); + static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb, struct rtable *rt, __be16 df) { @@ -536,7 +635,7 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb, } void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, - const struct iphdr *tnl_params, const u8 protocol) + const struct iphdr *tnl_params, u8 protocol) { struct ip_tunnel *tunnel = netdev_priv(dev); const struct iphdr *inner_iph; @@ -617,6 +716,9 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr, tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link); + if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0) + goto tx_error; + rt = connected ? tunnel_rtable_get(tunnel, 0, &fl4.saddr) : NULL; if (!rt) { @@ -670,7 +772,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, df |= (inner_iph->frag_off&htons(IP_DF)); max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr) - + rt->dst.header_len; + + rt->dst.header_len + ip_encap_hlen(&tunnel->encap); if (max_headroom > dev->needed_headroom) dev->needed_headroom = max_headroom; diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index e453cb724a950317d5f9b96db7de8bd969b7f111..3e861011e4a31e57b21c3fa507c178d3693a7970 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -364,7 +364,7 @@ static int vti_tunnel_init(struct net_device *dev) dev->iflink = 0; dev->addr_len = 4; dev->features |= NETIF_F_LLTX; - dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; + netif_keep_dst(dev); return ip_tunnel_init(dev); } diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 5bbef4fdcb439f7c3445278dc580b3f0aaacf907..648fa1490ea7f96d1f373b4f8e6b167ed70e82e3 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -262,7 +262,8 @@ static int __init ic_open_devs(void) /* wait for a carrier on at least one device */ start = jiffies; next_msg = start + msecs_to_jiffies(CONF_CARRIER_TIMEOUT/12); - while (jiffies - start < msecs_to_jiffies(CONF_CARRIER_TIMEOUT)) { + while (time_before(jiffies, start + + msecs_to_jiffies(CONF_CARRIER_TIMEOUT))) { int wait, elapsed; for_each_netdev(&init_net, dev) diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 62eaa005e14610237f9e8a6016c366ba6d6b09cf..37096d64730ef0c3c7c7278995f9c0c6e64896e9 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -224,6 +224,8 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) if (IS_ERR(skb)) goto out; + skb_set_inner_ipproto(skb, IPPROTO_IPIP); + ip_tunnel_xmit(skb, dev, tiph, tiph->protocol); return NETDEV_TX_OK; @@ -287,7 +289,7 @@ static void ipip_tunnel_setup(struct net_device *dev) dev->iflink = 0; dev->addr_len = 4; dev->features |= NETIF_F_LLTX; - dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; + netif_keep_dst(dev); dev->features |= IPIP_FEATURES; dev->hw_features |= IPIP_FEATURES; @@ -301,7 +303,8 @@ static int ipip_tunnel_init(struct net_device *dev) memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4); memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4); - tunnel->hlen = 0; + tunnel->tun_hlen = 0; + tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen; tunnel->parms.iph.protocol = IPPROTO_IPIP; return ip_tunnel_init(dev); } @@ -340,10 +343,53 @@ static void ipip_netlink_parms(struct nlattr *data[], parms->iph.frag_off = htons(IP_DF); } +/* This function returns true when ENCAP attributes are present in the nl msg */ +static bool ipip_netlink_encap_parms(struct nlattr *data[], + struct ip_tunnel_encap *ipencap) +{ + bool ret = false; + + memset(ipencap, 0, sizeof(*ipencap)); + + if (!data) + return ret; + + if (data[IFLA_IPTUN_ENCAP_TYPE]) { + ret = true; + ipencap->type = nla_get_u16(data[IFLA_IPTUN_ENCAP_TYPE]); + } + + if (data[IFLA_IPTUN_ENCAP_FLAGS]) { + ret = true; + ipencap->flags = nla_get_u16(data[IFLA_IPTUN_ENCAP_FLAGS]); + } + + if (data[IFLA_IPTUN_ENCAP_SPORT]) { + ret = true; + ipencap->sport = nla_get_u16(data[IFLA_IPTUN_ENCAP_SPORT]); + } + + if (data[IFLA_IPTUN_ENCAP_DPORT]) { + ret = true; + ipencap->dport = nla_get_u16(data[IFLA_IPTUN_ENCAP_DPORT]); + } + + return ret; +} + static int ipip_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { struct ip_tunnel_parm p; + struct ip_tunnel_encap ipencap; + + if (ipip_netlink_encap_parms(data, &ipencap)) { + struct ip_tunnel *t = netdev_priv(dev); + int err = ip_tunnel_encap_setup(t, &ipencap); + + if (err < 0) + return err; + } ipip_netlink_parms(data, &p); return ip_tunnel_newlink(dev, tb, &p); @@ -353,6 +399,15 @@ static int ipip_changelink(struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { struct ip_tunnel_parm p; + struct ip_tunnel_encap ipencap; + + if (ipip_netlink_encap_parms(data, &ipencap)) { + struct ip_tunnel *t = netdev_priv(dev); + int err = ip_tunnel_encap_setup(t, &ipencap); + + if (err < 0) + return err; + } ipip_netlink_parms(data, &p); @@ -378,6 +433,14 @@ static size_t ipip_get_size(const struct net_device *dev) nla_total_size(1) + /* IFLA_IPTUN_PMTUDISC */ nla_total_size(1) + + /* IFLA_IPTUN_ENCAP_TYPE */ + nla_total_size(2) + + /* IFLA_IPTUN_ENCAP_FLAGS */ + nla_total_size(2) + + /* IFLA_IPTUN_ENCAP_SPORT */ + nla_total_size(2) + + /* IFLA_IPTUN_ENCAP_DPORT */ + nla_total_size(2) + 0; } @@ -394,6 +457,17 @@ static int ipip_fill_info(struct sk_buff *skb, const struct net_device *dev) nla_put_u8(skb, IFLA_IPTUN_PMTUDISC, !!(parm->iph.frag_off & htons(IP_DF)))) goto nla_put_failure; + + if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, + tunnel->encap.type) || + nla_put_u16(skb, IFLA_IPTUN_ENCAP_SPORT, + tunnel->encap.sport) || + nla_put_u16(skb, IFLA_IPTUN_ENCAP_DPORT, + tunnel->encap.dport) || + nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS, + tunnel->encap.dport)) + goto nla_put_failure; + return 0; nla_put_failure: @@ -407,6 +481,10 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = { [IFLA_IPTUN_TTL] = { .type = NLA_U8 }, [IFLA_IPTUN_TOS] = { .type = NLA_U8 }, [IFLA_IPTUN_PMTUDISC] = { .type = NLA_U8 }, + [IFLA_IPTUN_ENCAP_TYPE] = { .type = NLA_U16 }, + [IFLA_IPTUN_ENCAP_FLAGS] = { .type = NLA_U16 }, + [IFLA_IPTUN_ENCAP_SPORT] = { .type = NLA_U16 }, + [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 }, }; static struct rtnl_link_ops ipip_link_ops __read_mostly = { diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig index 7cbcaf4f0194f22b797f04b1fca1029a0a4e159e..4c019d5c3f5759128b8de9ac900282aaa5f014e2 100644 --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig @@ -61,18 +61,13 @@ config NFT_CHAIN_ROUTE_IPV4 fields such as the source, destination, type of service and the packet mark. -config NFT_CHAIN_NAT_IPV4 - depends on NF_TABLES_IPV4 - depends on NF_NAT_IPV4 && NFT_NAT - tristate "IPv4 nf_tables nat chain support" - help - This option enables the "nat" chain for IPv4 in nf_tables. This - chain type is used to perform Network Address Translation (NAT) - packet transformations such as the source, destination address and - source and destination ports. +config NF_REJECT_IPV4 + tristate "IPv4 packet rejection" + default m if NETFILTER_ADVANCED=n config NFT_REJECT_IPV4 depends on NF_TABLES_IPV4 + select NF_REJECT_IPV4 default NFT_REJECT tristate @@ -94,6 +89,30 @@ config NF_NAT_IPV4 if NF_NAT_IPV4 +config NFT_CHAIN_NAT_IPV4 + depends on NF_TABLES_IPV4 + tristate "IPv4 nf_tables nat chain support" + help + This option enables the "nat" chain for IPv4 in nf_tables. This + chain type is used to perform Network Address Translation (NAT) + packet transformations such as the source, destination address and + source and destination ports. + +config NF_NAT_MASQUERADE_IPV4 + tristate "IPv4 masquerade support" + help + This is the kernel functionality to provide NAT in the masquerade + flavour (automatic source address selection). + +config NFT_MASQ_IPV4 + tristate "IPv4 masquerading support for nf_tables" + depends on NF_TABLES_IPV4 + depends on NFT_MASQ + select NF_NAT_MASQUERADE_IPV4 + help + This is the expression that provides IPv4 masquerading support for + nf_tables. + config NF_NAT_SNMP_BASIC tristate "Basic SNMP-ALG support" depends on NF_CONNTRACK_SNMP @@ -194,6 +213,7 @@ config IP_NF_FILTER config IP_NF_TARGET_REJECT tristate "REJECT target support" depends on IP_NF_FILTER + select NF_REJECT_IPV4 default m if NETFILTER_ADVANCED=n help The REJECT target allows a filtering rule to specify that an ICMP @@ -234,6 +254,7 @@ if IP_NF_NAT config IP_NF_TARGET_MASQUERADE tristate "MASQUERADE target support" + select NF_NAT_MASQUERADE_IPV4 default m if NETFILTER_ADVANCED=n help Masquerading is a special case of NAT: all outgoing connections are diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile index edf4af32e9f28fcdb3a391abbb0d651b4a16af38..f4cef5af096978e813e179cfbe714862799556d4 100644 --- a/net/ipv4/netfilter/Makefile +++ b/net/ipv4/netfilter/Makefile @@ -23,10 +23,14 @@ obj-$(CONFIG_NF_DEFRAG_IPV4) += nf_defrag_ipv4.o obj-$(CONFIG_NF_LOG_ARP) += nf_log_arp.o obj-$(CONFIG_NF_LOG_IPV4) += nf_log_ipv4.o +# reject +obj-$(CONFIG_NF_REJECT_IPV4) += nf_reject_ipv4.o + # NAT helpers (nf_conntrack) obj-$(CONFIG_NF_NAT_H323) += nf_nat_h323.o obj-$(CONFIG_NF_NAT_PPTP) += nf_nat_pptp.o obj-$(CONFIG_NF_NAT_SNMP_BASIC) += nf_nat_snmp_basic.o +obj-$(CONFIG_NF_NAT_MASQUERADE_IPV4) += nf_nat_masquerade_ipv4.o # NAT protocols (nf_nat) obj-$(CONFIG_NF_NAT_PROTO_GRE) += nf_nat_proto_gre.o @@ -35,6 +39,7 @@ obj-$(CONFIG_NF_TABLES_IPV4) += nf_tables_ipv4.o obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV4) += nft_chain_route_ipv4.o obj-$(CONFIG_NFT_CHAIN_NAT_IPV4) += nft_chain_nat_ipv4.o obj-$(CONFIG_NFT_REJECT_IPV4) += nft_reject_ipv4.o +obj-$(CONFIG_NFT_MASQ_IPV4) += nft_masq_ipv4.o obj-$(CONFIG_NF_TABLES_ARP) += nf_tables_arp.o # generic IP tables diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index 2510c02c2d2168ffcd27f286c7eab3ca75165f26..e90f83a3415b464014cecf0f072b2188f6889b7a 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c @@ -285,7 +285,7 @@ clusterip_hashfn(const struct sk_buff *skb, } /* node numbers are 1..n, not 0..n */ - return (((u64)hashval * config->num_total_nodes) >> 32) + 1; + return reciprocal_scale(hashval, config->num_total_nodes) + 1; } static inline int diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c index 00352ce0f0dec43f0f0d08e0e77115c97625ab89..da7f02a0b868843b01dffcaeb75181584585f81f 100644 --- a/net/ipv4/netfilter/ipt_MASQUERADE.c +++ b/net/ipv4/netfilter/ipt_MASQUERADE.c @@ -22,6 +22,7 @@ #include #include #include +#include MODULE_LICENSE("GPL"); MODULE_AUTHOR("Netfilter Core Team "); @@ -46,103 +47,17 @@ static int masquerade_tg_check(const struct xt_tgchk_param *par) static unsigned int masquerade_tg(struct sk_buff *skb, const struct xt_action_param *par) { - struct nf_conn *ct; - struct nf_conn_nat *nat; - enum ip_conntrack_info ctinfo; - struct nf_nat_range newrange; + struct nf_nat_range range; const struct nf_nat_ipv4_multi_range_compat *mr; - const struct rtable *rt; - __be32 newsrc, nh; - - NF_CT_ASSERT(par->hooknum == NF_INET_POST_ROUTING); - - ct = nf_ct_get(skb, &ctinfo); - nat = nfct_nat(ct); - - NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || - ctinfo == IP_CT_RELATED_REPLY)); - - /* Source address is 0.0.0.0 - locally generated packet that is - * probably not supposed to be masqueraded. - */ - if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip == 0) - return NF_ACCEPT; mr = par->targinfo; - rt = skb_rtable(skb); - nh = rt_nexthop(rt, ip_hdr(skb)->daddr); - newsrc = inet_select_addr(par->out, nh, RT_SCOPE_UNIVERSE); - if (!newsrc) { - pr_info("%s ate my IP address\n", par->out->name); - return NF_DROP; - } - - nat->masq_index = par->out->ifindex; - - /* Transfer from original range. */ - memset(&newrange.min_addr, 0, sizeof(newrange.min_addr)); - memset(&newrange.max_addr, 0, sizeof(newrange.max_addr)); - newrange.flags = mr->range[0].flags | NF_NAT_RANGE_MAP_IPS; - newrange.min_addr.ip = newsrc; - newrange.max_addr.ip = newsrc; - newrange.min_proto = mr->range[0].min; - newrange.max_proto = mr->range[0].max; + range.flags = mr->range[0].flags; + range.min_proto = mr->range[0].min; + range.max_proto = mr->range[0].max; - /* Hand modified range to generic setup. */ - return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC); + return nf_nat_masquerade_ipv4(skb, par->hooknum, &range, par->out); } -static int -device_cmp(struct nf_conn *i, void *ifindex) -{ - const struct nf_conn_nat *nat = nfct_nat(i); - - if (!nat) - return 0; - if (nf_ct_l3num(i) != NFPROTO_IPV4) - return 0; - return nat->masq_index == (int)(long)ifindex; -} - -static int masq_device_event(struct notifier_block *this, - unsigned long event, - void *ptr) -{ - const struct net_device *dev = netdev_notifier_info_to_dev(ptr); - struct net *net = dev_net(dev); - - if (event == NETDEV_DOWN) { - /* Device was downed. Search entire table for - conntracks which were associated with that device, - and forget them. */ - NF_CT_ASSERT(dev->ifindex != 0); - - nf_ct_iterate_cleanup(net, device_cmp, - (void *)(long)dev->ifindex, 0, 0); - } - - return NOTIFY_DONE; -} - -static int masq_inet_event(struct notifier_block *this, - unsigned long event, - void *ptr) -{ - struct net_device *dev = ((struct in_ifaddr *)ptr)->ifa_dev->dev; - struct netdev_notifier_info info; - - netdev_notifier_info_init(&info, dev); - return masq_device_event(this, event, &info); -} - -static struct notifier_block masq_dev_notifier = { - .notifier_call = masq_device_event, -}; - -static struct notifier_block masq_inet_notifier = { - .notifier_call = masq_inet_event, -}; - static struct xt_target masquerade_tg_reg __read_mostly = { .name = "MASQUERADE", .family = NFPROTO_IPV4, @@ -160,12 +75,8 @@ static int __init masquerade_tg_init(void) ret = xt_register_target(&masquerade_tg_reg); - if (ret == 0) { - /* Register for device down reports */ - register_netdevice_notifier(&masq_dev_notifier); - /* Register IP address change reports */ - register_inetaddr_notifier(&masq_inet_notifier); - } + if (ret == 0) + nf_nat_masquerade_ipv4_register_notifier(); return ret; } @@ -173,8 +84,7 @@ static int __init masquerade_tg_init(void) static void __exit masquerade_tg_exit(void) { xt_unregister_target(&masquerade_tg_reg); - unregister_netdevice_notifier(&masq_dev_notifier); - unregister_inetaddr_notifier(&masq_inet_notifier); + nf_nat_masquerade_ipv4_unregister_notifier(); } module_init(masquerade_tg_init); diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c index 5b6e0df4ccffcbd712b15757a9557b3cefbbabbd..8f48f5517e336e61e3457379744fffd792816fcb 100644 --- a/net/ipv4/netfilter/ipt_REJECT.c +++ b/net/ipv4/netfilter/ipt_REJECT.c @@ -20,7 +20,7 @@ #include #include #include -#ifdef CONFIG_BRIDGE_NETFILTER +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) #include #endif diff --git a/net/ipv4/netfilter/iptable_nat.c b/net/ipv4/netfilter/iptable_nat.c index f1787c04a4ddfe06f70cd09c4b4c2573c434042d..6b67d7e9a75d69e95d9d25f9b8524bc58d9ef607 100644 --- a/net/ipv4/netfilter/iptable_nat.c +++ b/net/ipv4/netfilter/iptable_nat.c @@ -28,222 +28,57 @@ static const struct xt_table nf_nat_ipv4_table = { .af = NFPROTO_IPV4, }; -static unsigned int alloc_null_binding(struct nf_conn *ct, unsigned int hooknum) -{ - /* Force range to this IP; let proto decide mapping for - * per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED). - */ - struct nf_nat_range range; - - range.flags = 0; - pr_debug("Allocating NULL binding for %p (%pI4)\n", ct, - HOOK2MANIP(hooknum) == NF_NAT_MANIP_SRC ? - &ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip : - &ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip); - - return nf_nat_setup_info(ct, &range, HOOK2MANIP(hooknum)); -} - -static unsigned int nf_nat_rule_find(struct sk_buff *skb, unsigned int hooknum, - const struct net_device *in, - const struct net_device *out, - struct nf_conn *ct) +static unsigned int iptable_nat_do_chain(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct) { struct net *net = nf_ct_net(ct); - unsigned int ret; - ret = ipt_do_table(skb, hooknum, in, out, net->ipv4.nat_table); - if (ret == NF_ACCEPT) { - if (!nf_nat_initialized(ct, HOOK2MANIP(hooknum))) - ret = alloc_null_binding(ct, hooknum); - } - return ret; + return ipt_do_table(skb, ops->hooknum, in, out, net->ipv4.nat_table); } -static unsigned int -nf_nat_ipv4_fn(const struct nf_hook_ops *ops, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) +static unsigned int iptable_nat_ipv4_fn(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + int (*okfn)(struct sk_buff *)) { - struct nf_conn *ct; - enum ip_conntrack_info ctinfo; - struct nf_conn_nat *nat; - /* maniptype == SRC for postrouting. */ - enum nf_nat_manip_type maniptype = HOOK2MANIP(ops->hooknum); - - /* We never see fragments: conntrack defrags on pre-routing - * and local-out, and nf_nat_out protects post-routing. - */ - NF_CT_ASSERT(!ip_is_fragment(ip_hdr(skb))); - - ct = nf_ct_get(skb, &ctinfo); - /* Can't track? It's not due to stress, or conntrack would - * have dropped it. Hence it's the user's responsibilty to - * packet filter it out, or implement conntrack/NAT for that - * protocol. 8) --RR - */ - if (!ct) - return NF_ACCEPT; - - /* Don't try to NAT if this packet is not conntracked */ - if (nf_ct_is_untracked(ct)) - return NF_ACCEPT; - - nat = nf_ct_nat_ext_add(ct); - if (nat == NULL) - return NF_ACCEPT; - - switch (ctinfo) { - case IP_CT_RELATED: - case IP_CT_RELATED_REPLY: - if (ip_hdr(skb)->protocol == IPPROTO_ICMP) { - if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo, - ops->hooknum)) - return NF_DROP; - else - return NF_ACCEPT; - } - /* Fall thru... (Only ICMPs can be IP_CT_IS_REPLY) */ - case IP_CT_NEW: - /* Seen it before? This can happen for loopback, retrans, - * or local packets. - */ - if (!nf_nat_initialized(ct, maniptype)) { - unsigned int ret; - - ret = nf_nat_rule_find(skb, ops->hooknum, in, out, ct); - if (ret != NF_ACCEPT) - return ret; - } else { - pr_debug("Already setup manip %s for ct %p\n", - maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST", - ct); - if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out)) - goto oif_changed; - } - break; - - default: - /* ESTABLISHED */ - NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED || - ctinfo == IP_CT_ESTABLISHED_REPLY); - if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out)) - goto oif_changed; - } - - return nf_nat_packet(ct, ctinfo, ops->hooknum, skb); - -oif_changed: - nf_ct_kill_acct(ct, ctinfo, skb); - return NF_DROP; + return nf_nat_ipv4_fn(ops, skb, in, out, iptable_nat_do_chain); } -static unsigned int -nf_nat_ipv4_in(const struct nf_hook_ops *ops, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) +static unsigned int iptable_nat_ipv4_in(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + int (*okfn)(struct sk_buff *)) { - unsigned int ret; - __be32 daddr = ip_hdr(skb)->daddr; - - ret = nf_nat_ipv4_fn(ops, skb, in, out, okfn); - if (ret != NF_DROP && ret != NF_STOLEN && - daddr != ip_hdr(skb)->daddr) - skb_dst_drop(skb); - - return ret; + return nf_nat_ipv4_in(ops, skb, in, out, iptable_nat_do_chain); } -static unsigned int -nf_nat_ipv4_out(const struct nf_hook_ops *ops, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) +static unsigned int iptable_nat_ipv4_out(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + int (*okfn)(struct sk_buff *)) { -#ifdef CONFIG_XFRM - const struct nf_conn *ct; - enum ip_conntrack_info ctinfo; - int err; -#endif - unsigned int ret; - - /* root is playing with raw sockets. */ - if (skb->len < sizeof(struct iphdr) || - ip_hdrlen(skb) < sizeof(struct iphdr)) - return NF_ACCEPT; - - ret = nf_nat_ipv4_fn(ops, skb, in, out, okfn); -#ifdef CONFIG_XFRM - if (ret != NF_DROP && ret != NF_STOLEN && - !(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) && - (ct = nf_ct_get(skb, &ctinfo)) != NULL) { - enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); - - if ((ct->tuplehash[dir].tuple.src.u3.ip != - ct->tuplehash[!dir].tuple.dst.u3.ip) || - (ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP && - ct->tuplehash[dir].tuple.src.u.all != - ct->tuplehash[!dir].tuple.dst.u.all)) { - err = nf_xfrm_me_harder(skb, AF_INET); - if (err < 0) - ret = NF_DROP_ERR(err); - } - } -#endif - return ret; + return nf_nat_ipv4_out(ops, skb, in, out, iptable_nat_do_chain); } -static unsigned int -nf_nat_ipv4_local_fn(const struct nf_hook_ops *ops, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) +static unsigned int iptable_nat_ipv4_local_fn(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + int (*okfn)(struct sk_buff *)) { - const struct nf_conn *ct; - enum ip_conntrack_info ctinfo; - unsigned int ret; - int err; - - /* root is playing with raw sockets. */ - if (skb->len < sizeof(struct iphdr) || - ip_hdrlen(skb) < sizeof(struct iphdr)) - return NF_ACCEPT; - - ret = nf_nat_ipv4_fn(ops, skb, in, out, okfn); - if (ret != NF_DROP && ret != NF_STOLEN && - (ct = nf_ct_get(skb, &ctinfo)) != NULL) { - enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); - - if (ct->tuplehash[dir].tuple.dst.u3.ip != - ct->tuplehash[!dir].tuple.src.u3.ip) { - err = ip_route_me_harder(skb, RTN_UNSPEC); - if (err < 0) - ret = NF_DROP_ERR(err); - } -#ifdef CONFIG_XFRM - else if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) && - ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP && - ct->tuplehash[dir].tuple.dst.u.all != - ct->tuplehash[!dir].tuple.src.u.all) { - err = nf_xfrm_me_harder(skb, AF_INET); - if (err < 0) - ret = NF_DROP_ERR(err); - } -#endif - } - return ret; + return nf_nat_ipv4_local_fn(ops, skb, in, out, iptable_nat_do_chain); } static struct nf_hook_ops nf_nat_ipv4_ops[] __read_mostly = { /* Before packet filtering, change destination */ { - .hook = nf_nat_ipv4_in, + .hook = iptable_nat_ipv4_in, .owner = THIS_MODULE, .pf = NFPROTO_IPV4, .hooknum = NF_INET_PRE_ROUTING, @@ -251,7 +86,7 @@ static struct nf_hook_ops nf_nat_ipv4_ops[] __read_mostly = { }, /* After packet filtering, change source */ { - .hook = nf_nat_ipv4_out, + .hook = iptable_nat_ipv4_out, .owner = THIS_MODULE, .pf = NFPROTO_IPV4, .hooknum = NF_INET_POST_ROUTING, @@ -259,7 +94,7 @@ static struct nf_hook_ops nf_nat_ipv4_ops[] __read_mostly = { }, /* Before packet filtering, change destination */ { - .hook = nf_nat_ipv4_local_fn, + .hook = iptable_nat_ipv4_local_fn, .owner = THIS_MODULE, .pf = NFPROTO_IPV4, .hooknum = NF_INET_LOCAL_OUT, @@ -267,7 +102,7 @@ static struct nf_hook_ops nf_nat_ipv4_ops[] __read_mostly = { }, /* After packet filtering, change source */ { - .hook = nf_nat_ipv4_fn, + .hook = iptable_nat_ipv4_fn, .owner = THIS_MODULE, .pf = NFPROTO_IPV4, .hooknum = NF_INET_LOCAL_IN, diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c index 76bd1aef257f213d04b3f4393f3705cc4863558d..7e5ca6f2d0cd57a7084cd9cd7386c9cdd40ac076 100644 --- a/net/ipv4/netfilter/nf_defrag_ipv4.c +++ b/net/ipv4/netfilter/nf_defrag_ipv4.c @@ -50,7 +50,7 @@ static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum, zone = nf_ct_zone((struct nf_conn *)skb->nfct); #endif -#ifdef CONFIG_BRIDGE_NETFILTER +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) if (skb->nf_bridge && skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING) return IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone; diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c index 14f5ccd063378a0b62162710aeba4a0dc229541c..fc37711e11f38be40252086913ff5b913a40ed68 100644 --- a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c @@ -254,6 +254,205 @@ int nf_nat_icmp_reply_translation(struct sk_buff *skb, } EXPORT_SYMBOL_GPL(nf_nat_icmp_reply_translation); +unsigned int +nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, + const struct net_device *in, const struct net_device *out, + unsigned int (*do_chain)(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct)) +{ + struct nf_conn *ct; + enum ip_conntrack_info ctinfo; + struct nf_conn_nat *nat; + /* maniptype == SRC for postrouting. */ + enum nf_nat_manip_type maniptype = HOOK2MANIP(ops->hooknum); + + /* We never see fragments: conntrack defrags on pre-routing + * and local-out, and nf_nat_out protects post-routing. + */ + NF_CT_ASSERT(!ip_is_fragment(ip_hdr(skb))); + + ct = nf_ct_get(skb, &ctinfo); + /* Can't track? It's not due to stress, or conntrack would + * have dropped it. Hence it's the user's responsibilty to + * packet filter it out, or implement conntrack/NAT for that + * protocol. 8) --RR + */ + if (!ct) + return NF_ACCEPT; + + /* Don't try to NAT if this packet is not conntracked */ + if (nf_ct_is_untracked(ct)) + return NF_ACCEPT; + + nat = nf_ct_nat_ext_add(ct); + if (nat == NULL) + return NF_ACCEPT; + + switch (ctinfo) { + case IP_CT_RELATED: + case IP_CT_RELATED_REPLY: + if (ip_hdr(skb)->protocol == IPPROTO_ICMP) { + if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo, + ops->hooknum)) + return NF_DROP; + else + return NF_ACCEPT; + } + /* Fall thru... (Only ICMPs can be IP_CT_IS_REPLY) */ + case IP_CT_NEW: + /* Seen it before? This can happen for loopback, retrans, + * or local packets. + */ + if (!nf_nat_initialized(ct, maniptype)) { + unsigned int ret; + + ret = do_chain(ops, skb, in, out, ct); + if (ret != NF_ACCEPT) + return ret; + + if (nf_nat_initialized(ct, HOOK2MANIP(ops->hooknum))) + break; + + ret = nf_nat_alloc_null_binding(ct, ops->hooknum); + if (ret != NF_ACCEPT) + return ret; + } else { + pr_debug("Already setup manip %s for ct %p\n", + maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST", + ct); + if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out)) + goto oif_changed; + } + break; + + default: + /* ESTABLISHED */ + NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED || + ctinfo == IP_CT_ESTABLISHED_REPLY); + if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out)) + goto oif_changed; + } + + return nf_nat_packet(ct, ctinfo, ops->hooknum, skb); + +oif_changed: + nf_ct_kill_acct(ct, ctinfo, skb); + return NF_DROP; +} +EXPORT_SYMBOL_GPL(nf_nat_ipv4_fn); + +unsigned int +nf_nat_ipv4_in(const struct nf_hook_ops *ops, struct sk_buff *skb, + const struct net_device *in, const struct net_device *out, + unsigned int (*do_chain)(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct)) +{ + unsigned int ret; + __be32 daddr = ip_hdr(skb)->daddr; + + ret = nf_nat_ipv4_fn(ops, skb, in, out, do_chain); + if (ret != NF_DROP && ret != NF_STOLEN && + daddr != ip_hdr(skb)->daddr) + skb_dst_drop(skb); + + return ret; +} +EXPORT_SYMBOL_GPL(nf_nat_ipv4_in); + +unsigned int +nf_nat_ipv4_out(const struct nf_hook_ops *ops, struct sk_buff *skb, + const struct net_device *in, const struct net_device *out, + unsigned int (*do_chain)(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct)) +{ +#ifdef CONFIG_XFRM + const struct nf_conn *ct; + enum ip_conntrack_info ctinfo; + int err; +#endif + unsigned int ret; + + /* root is playing with raw sockets. */ + if (skb->len < sizeof(struct iphdr) || + ip_hdrlen(skb) < sizeof(struct iphdr)) + return NF_ACCEPT; + + ret = nf_nat_ipv4_fn(ops, skb, in, out, do_chain); +#ifdef CONFIG_XFRM + if (ret != NF_DROP && ret != NF_STOLEN && + !(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) && + (ct = nf_ct_get(skb, &ctinfo)) != NULL) { + enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); + + if ((ct->tuplehash[dir].tuple.src.u3.ip != + ct->tuplehash[!dir].tuple.dst.u3.ip) || + (ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP && + ct->tuplehash[dir].tuple.src.u.all != + ct->tuplehash[!dir].tuple.dst.u.all)) { + err = nf_xfrm_me_harder(skb, AF_INET); + if (err < 0) + ret = NF_DROP_ERR(err); + } + } +#endif + return ret; +} +EXPORT_SYMBOL_GPL(nf_nat_ipv4_out); + +unsigned int +nf_nat_ipv4_local_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, + const struct net_device *in, const struct net_device *out, + unsigned int (*do_chain)(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct)) +{ + const struct nf_conn *ct; + enum ip_conntrack_info ctinfo; + unsigned int ret; + int err; + + /* root is playing with raw sockets. */ + if (skb->len < sizeof(struct iphdr) || + ip_hdrlen(skb) < sizeof(struct iphdr)) + return NF_ACCEPT; + + ret = nf_nat_ipv4_fn(ops, skb, in, out, do_chain); + if (ret != NF_DROP && ret != NF_STOLEN && + (ct = nf_ct_get(skb, &ctinfo)) != NULL) { + enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); + + if (ct->tuplehash[dir].tuple.dst.u3.ip != + ct->tuplehash[!dir].tuple.src.u3.ip) { + err = ip_route_me_harder(skb, RTN_UNSPEC); + if (err < 0) + ret = NF_DROP_ERR(err); + } +#ifdef CONFIG_XFRM + else if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) && + ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP && + ct->tuplehash[dir].tuple.dst.u.all != + ct->tuplehash[!dir].tuple.src.u.all) { + err = nf_xfrm_me_harder(skb, AF_INET); + if (err < 0) + ret = NF_DROP_ERR(err); + } +#endif + } + return ret; +} +EXPORT_SYMBOL_GPL(nf_nat_ipv4_local_fn); + static int __init nf_nat_l3proto_ipv4_init(void) { int err; diff --git a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c new file mode 100644 index 0000000000000000000000000000000000000000..c6eb42100e9a7d78a6a5c27434e42e3282b3af74 --- /dev/null +++ b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c @@ -0,0 +1,153 @@ +/* (C) 1999-2001 Paul `Rusty' Russell + * (C) 2002-2006 Netfilter Core Team + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +unsigned int +nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum, + const struct nf_nat_range *range, + const struct net_device *out) +{ + struct nf_conn *ct; + struct nf_conn_nat *nat; + enum ip_conntrack_info ctinfo; + struct nf_nat_range newrange; + const struct rtable *rt; + __be32 newsrc, nh; + + NF_CT_ASSERT(hooknum == NF_INET_POST_ROUTING); + + ct = nf_ct_get(skb, &ctinfo); + nat = nfct_nat(ct); + + NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || + ctinfo == IP_CT_RELATED_REPLY)); + + /* Source address is 0.0.0.0 - locally generated packet that is + * probably not supposed to be masqueraded. + */ + if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip == 0) + return NF_ACCEPT; + + rt = skb_rtable(skb); + nh = rt_nexthop(rt, ip_hdr(skb)->daddr); + newsrc = inet_select_addr(out, nh, RT_SCOPE_UNIVERSE); + if (!newsrc) { + pr_info("%s ate my IP address\n", out->name); + return NF_DROP; + } + + nat->masq_index = out->ifindex; + + /* Transfer from original range. */ + memset(&newrange.min_addr, 0, sizeof(newrange.min_addr)); + memset(&newrange.max_addr, 0, sizeof(newrange.max_addr)); + newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS; + newrange.min_addr.ip = newsrc; + newrange.max_addr.ip = newsrc; + newrange.min_proto = range->min_proto; + newrange.max_proto = range->max_proto; + + /* Hand modified range to generic setup. */ + return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC); +} +EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4); + +static int device_cmp(struct nf_conn *i, void *ifindex) +{ + const struct nf_conn_nat *nat = nfct_nat(i); + + if (!nat) + return 0; + if (nf_ct_l3num(i) != NFPROTO_IPV4) + return 0; + return nat->masq_index == (int)(long)ifindex; +} + +static int masq_device_event(struct notifier_block *this, + unsigned long event, + void *ptr) +{ + const struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct net *net = dev_net(dev); + + if (event == NETDEV_DOWN) { + /* Device was downed. Search entire table for + * conntracks which were associated with that device, + * and forget them. + */ + NF_CT_ASSERT(dev->ifindex != 0); + + nf_ct_iterate_cleanup(net, device_cmp, + (void *)(long)dev->ifindex, 0, 0); + } + + return NOTIFY_DONE; +} + +static int masq_inet_event(struct notifier_block *this, + unsigned long event, + void *ptr) +{ + struct net_device *dev = ((struct in_ifaddr *)ptr)->ifa_dev->dev; + struct netdev_notifier_info info; + + netdev_notifier_info_init(&info, dev); + return masq_device_event(this, event, &info); +} + +static struct notifier_block masq_dev_notifier = { + .notifier_call = masq_device_event, +}; + +static struct notifier_block masq_inet_notifier = { + .notifier_call = masq_inet_event, +}; + +static atomic_t masquerade_notifier_refcount = ATOMIC_INIT(0); + +void nf_nat_masquerade_ipv4_register_notifier(void) +{ + /* check if the notifier was already set */ + if (atomic_inc_return(&masquerade_notifier_refcount) > 1) + return; + + /* Register for device down reports */ + register_netdevice_notifier(&masq_dev_notifier); + /* Register IP address change reports */ + register_inetaddr_notifier(&masq_inet_notifier); +} +EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4_register_notifier); + +void nf_nat_masquerade_ipv4_unregister_notifier(void) +{ + /* check if the notifier still has clients */ + if (atomic_dec_return(&masquerade_notifier_refcount) > 0) + return; + + unregister_netdevice_notifier(&masq_dev_notifier); + unregister_inetaddr_notifier(&masq_inet_notifier); +} +EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4_unregister_notifier); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Rusty Russell "); diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c new file mode 100644 index 0000000000000000000000000000000000000000..b023b4eb1a966415c1ae43582428566036379d7a --- /dev/null +++ b/net/ipv4/netfilter/nf_reject_ipv4.c @@ -0,0 +1,127 @@ +/* (C) 1999-2001 Paul `Rusty' Russell + * (C) 2002-2004 Netfilter Core Team + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include + +/* Send RST reply */ +void nf_send_reset(struct sk_buff *oldskb, int hook) +{ + struct sk_buff *nskb; + const struct iphdr *oiph; + struct iphdr *niph; + const struct tcphdr *oth; + struct tcphdr _otcph, *tcph; + + /* IP header checks: fragment. */ + if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET)) + return; + + oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb), + sizeof(_otcph), &_otcph); + if (oth == NULL) + return; + + /* No RST for RST. */ + if (oth->rst) + return; + + if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) + return; + + /* Check checksum */ + if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP)) + return; + oiph = ip_hdr(oldskb); + + nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) + + LL_MAX_HEADER, GFP_ATOMIC); + if (!nskb) + return; + + skb_reserve(nskb, LL_MAX_HEADER); + + skb_reset_network_header(nskb); + niph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr)); + niph->version = 4; + niph->ihl = sizeof(struct iphdr) / 4; + niph->tos = 0; + niph->id = 0; + niph->frag_off = htons(IP_DF); + niph->protocol = IPPROTO_TCP; + niph->check = 0; + niph->saddr = oiph->daddr; + niph->daddr = oiph->saddr; + + skb_reset_transport_header(nskb); + tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); + memset(tcph, 0, sizeof(*tcph)); + tcph->source = oth->dest; + tcph->dest = oth->source; + tcph->doff = sizeof(struct tcphdr) / 4; + + if (oth->ack) + tcph->seq = oth->ack_seq; + else { + tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin + + oldskb->len - ip_hdrlen(oldskb) - + (oth->doff << 2)); + tcph->ack = 1; + } + + tcph->rst = 1; + tcph->check = ~tcp_v4_check(sizeof(struct tcphdr), niph->saddr, + niph->daddr, 0); + nskb->ip_summed = CHECKSUM_PARTIAL; + nskb->csum_start = (unsigned char *)tcph - nskb->head; + nskb->csum_offset = offsetof(struct tcphdr, check); + + /* ip_route_me_harder expects skb->dst to be set */ + skb_dst_set_noref(nskb, skb_dst(oldskb)); + + nskb->protocol = htons(ETH_P_IP); + if (ip_route_me_harder(nskb, RTN_UNSPEC)) + goto free_nskb; + + niph->ttl = ip4_dst_hoplimit(skb_dst(nskb)); + + /* "Never happens" */ + if (nskb->len > dst_mtu(skb_dst(nskb))) + goto free_nskb; + + nf_ct_attach(nskb, oldskb); + +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) + /* If we use ip_local_out for bridged traffic, the MAC source on + * the RST will be ours, instead of the destination's. This confuses + * some routers/firewalls, and they drop the packet. So we need to + * build the eth header using the original destination's MAC as the + * source, and send the RST packet directly. + */ + if (oldskb->nf_bridge) { + struct ethhdr *oeth = eth_hdr(oldskb); + nskb->dev = oldskb->nf_bridge->physindev; + niph->tot_len = htons(nskb->len); + ip_send_check(niph); + if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol), + oeth->h_source, oeth->h_dest, nskb->len) < 0) + goto free_nskb; + dev_queue_xmit(nskb); + } else +#endif + ip_local_out(nskb); + + return; + + free_nskb: + kfree_skb(nskb); +} +EXPORT_SYMBOL_GPL(nf_send_reset); diff --git a/net/ipv4/netfilter/nft_chain_nat_ipv4.c b/net/ipv4/netfilter/nft_chain_nat_ipv4.c index 3964157d826c197e27ce0578c2da409bbc6e4dec..df547bf50078c4016c0e1924da77315cb1e6127a 100644 --- a/net/ipv4/netfilter/nft_chain_nat_ipv4.c +++ b/net/ipv4/netfilter/nft_chain_nat_ipv4.c @@ -26,136 +26,53 @@ #include #include -/* - * NAT chains - */ - -static unsigned int nf_nat_fn(const struct nf_hook_ops *ops, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) +static unsigned int nft_nat_do_chain(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct) { - enum ip_conntrack_info ctinfo; - struct nf_conn *ct = nf_ct_get(skb, &ctinfo); - struct nf_conn_nat *nat; - enum nf_nat_manip_type maniptype = HOOK2MANIP(ops->hooknum); struct nft_pktinfo pkt; - unsigned int ret; - - if (ct == NULL || nf_ct_is_untracked(ct)) - return NF_ACCEPT; - - NF_CT_ASSERT(!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET))); - - nat = nf_ct_nat_ext_add(ct); - if (nat == NULL) - return NF_ACCEPT; - - switch (ctinfo) { - case IP_CT_RELATED: - case IP_CT_RELATED + IP_CT_IS_REPLY: - if (ip_hdr(skb)->protocol == IPPROTO_ICMP) { - if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo, - ops->hooknum)) - return NF_DROP; - else - return NF_ACCEPT; - } - /* Fall through */ - case IP_CT_NEW: - if (nf_nat_initialized(ct, maniptype)) - break; - nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out); + nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out); - ret = nft_do_chain(&pkt, ops); - if (ret != NF_ACCEPT) - return ret; - if (!nf_nat_initialized(ct, maniptype)) { - ret = nf_nat_alloc_null_binding(ct, ops->hooknum); - if (ret != NF_ACCEPT) - return ret; - } - default: - break; - } - - return nf_nat_packet(ct, ctinfo, ops->hooknum, skb); + return nft_do_chain(&pkt, ops); } -static unsigned int nf_nat_prerouting(const struct nf_hook_ops *ops, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) +static unsigned int nft_nat_ipv4_fn(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + int (*okfn)(struct sk_buff *)) { - __be32 daddr = ip_hdr(skb)->daddr; - unsigned int ret; - - ret = nf_nat_fn(ops, skb, in, out, okfn); - if (ret != NF_DROP && ret != NF_STOLEN && - ip_hdr(skb)->daddr != daddr) { - skb_dst_drop(skb); - } - return ret; + return nf_nat_ipv4_fn(ops, skb, in, out, nft_nat_do_chain); } -static unsigned int nf_nat_postrouting(const struct nf_hook_ops *ops, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) +static unsigned int nft_nat_ipv4_in(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + int (*okfn)(struct sk_buff *)) { - enum ip_conntrack_info ctinfo __maybe_unused; - const struct nf_conn *ct __maybe_unused; - unsigned int ret; - - ret = nf_nat_fn(ops, skb, in, out, okfn); -#ifdef CONFIG_XFRM - if (ret != NF_DROP && ret != NF_STOLEN && - (ct = nf_ct_get(skb, &ctinfo)) != NULL) { - enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); - - if (ct->tuplehash[dir].tuple.src.u3.ip != - ct->tuplehash[!dir].tuple.dst.u3.ip || - ct->tuplehash[dir].tuple.src.u.all != - ct->tuplehash[!dir].tuple.dst.u.all) - return nf_xfrm_me_harder(skb, AF_INET) == 0 ? - ret : NF_DROP; - } -#endif - return ret; + return nf_nat_ipv4_in(ops, skb, in, out, nft_nat_do_chain); } -static unsigned int nf_nat_output(const struct nf_hook_ops *ops, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) +static unsigned int nft_nat_ipv4_out(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + int (*okfn)(struct sk_buff *)) { - enum ip_conntrack_info ctinfo; - const struct nf_conn *ct; - unsigned int ret; - - ret = nf_nat_fn(ops, skb, in, out, okfn); - if (ret != NF_DROP && ret != NF_STOLEN && - (ct = nf_ct_get(skb, &ctinfo)) != NULL) { - enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); + return nf_nat_ipv4_out(ops, skb, in, out, nft_nat_do_chain); +} - if (ct->tuplehash[dir].tuple.dst.u3.ip != - ct->tuplehash[!dir].tuple.src.u3.ip) { - if (ip_route_me_harder(skb, RTN_UNSPEC)) - ret = NF_DROP; - } -#ifdef CONFIG_XFRM - else if (ct->tuplehash[dir].tuple.dst.u.all != - ct->tuplehash[!dir].tuple.src.u.all) - if (nf_xfrm_me_harder(skb, AF_INET)) - ret = NF_DROP; -#endif - } - return ret; +static unsigned int nft_nat_ipv4_local_fn(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + int (*okfn)(struct sk_buff *)) +{ + return nf_nat_ipv4_local_fn(ops, skb, in, out, nft_nat_do_chain); } static const struct nf_chain_type nft_chain_nat_ipv4 = { @@ -168,10 +85,10 @@ static const struct nf_chain_type nft_chain_nat_ipv4 = { (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_LOCAL_IN), .hooks = { - [NF_INET_PRE_ROUTING] = nf_nat_prerouting, - [NF_INET_POST_ROUTING] = nf_nat_postrouting, - [NF_INET_LOCAL_OUT] = nf_nat_output, - [NF_INET_LOCAL_IN] = nf_nat_fn, + [NF_INET_PRE_ROUTING] = nft_nat_ipv4_in, + [NF_INET_POST_ROUTING] = nft_nat_ipv4_out, + [NF_INET_LOCAL_OUT] = nft_nat_ipv4_local_fn, + [NF_INET_LOCAL_IN] = nft_nat_ipv4_fn, }, }; diff --git a/net/ipv4/netfilter/nft_masq_ipv4.c b/net/ipv4/netfilter/nft_masq_ipv4.c new file mode 100644 index 0000000000000000000000000000000000000000..1c636d6b5b5007b597e95b7852d7f76d750a4040 --- /dev/null +++ b/net/ipv4/netfilter/nft_masq_ipv4.c @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2014 Arturo Borrero Gonzalez + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static void nft_masq_ipv4_eval(const struct nft_expr *expr, + struct nft_data data[NFT_REG_MAX + 1], + const struct nft_pktinfo *pkt) +{ + struct nft_masq *priv = nft_expr_priv(expr); + struct nf_nat_range range; + unsigned int verdict; + + range.flags = priv->flags; + + verdict = nf_nat_masquerade_ipv4(pkt->skb, pkt->ops->hooknum, + &range, pkt->out); + + data[NFT_REG_VERDICT].verdict = verdict; +} + +static struct nft_expr_type nft_masq_ipv4_type; +static const struct nft_expr_ops nft_masq_ipv4_ops = { + .type = &nft_masq_ipv4_type, + .size = NFT_EXPR_SIZE(sizeof(struct nft_masq)), + .eval = nft_masq_ipv4_eval, + .init = nft_masq_init, + .dump = nft_masq_dump, +}; + +static struct nft_expr_type nft_masq_ipv4_type __read_mostly = { + .family = NFPROTO_IPV4, + .name = "masq", + .ops = &nft_masq_ipv4_ops, + .policy = nft_masq_policy, + .maxattr = NFTA_MASQ_MAX, + .owner = THIS_MODULE, +}; + +static int __init nft_masq_ipv4_module_init(void) +{ + int ret; + + ret = nft_register_expr(&nft_masq_ipv4_type); + if (ret < 0) + return ret; + + nf_nat_masquerade_ipv4_register_notifier(); + + return ret; +} + +static void __exit nft_masq_ipv4_module_exit(void) +{ + nft_unregister_expr(&nft_masq_ipv4_type); + nf_nat_masquerade_ipv4_unregister_notifier(); +} + +module_init(nft_masq_ipv4_module_init); +module_exit(nft_masq_ipv4_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Arturo Borrero Gonzalez "); +MODULE_ALIAS_NFT_AF_EXPR(AF_INET, "masq"); diff --git a/net/ipv4/netfilter/nft_reject_ipv4.c b/net/ipv4/netfilter/nft_reject_ipv4.c index e79718a382f2e373890bc5fbb2be4fa2468162c8..ed33299c56d15089055f4664ce28ef3da36f9ee3 100644 --- a/net/ipv4/netfilter/nft_reject_ipv4.c +++ b/net/ipv4/netfilter/nft_reject_ipv4.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index a3c59a077a5f0574f02584b0068de0703d5cb3dc..57f7c98041394998fe390735aa5b8cd2602b3f90 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -311,7 +311,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk, if (addr->sin_addr.s_addr == htonl(INADDR_ANY)) chk_addr_ret = RTN_LOCAL; - if ((sysctl_ip_nonlocal_bind == 0 && + if ((net->ipv4.sysctl_ip_nonlocal_bind == 0 && isk->freebind == 0 && isk->transparent == 0 && chk_addr_ret != RTN_LOCAL) || chk_addr_ret == RTN_MULTICAST || diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c index 46d6a1c923a8741776fa1de1cdd727b6cba72294..4b7c0ec65251ef40577a2d5e360fcbaed391a566 100644 --- a/net/ipv4/protocol.c +++ b/net/ipv4/protocol.c @@ -30,6 +30,7 @@ const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS] __read_mostly; const struct net_offload __rcu *inet_offloads[MAX_INET_PROTOS] __read_mostly; +EXPORT_SYMBOL(inet_offloads); int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol) { diff --git a/net/ipv4/route.c b/net/ipv4/route.c index cbadb942c3328d3585add8e791fcd2e3d2cb1d67..793c0bb8c4fd9d0f8db91bc715e842ca2edc3620 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -596,12 +596,12 @@ static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash) static inline u32 fnhe_hashfun(__be32 daddr) { + static u32 fnhe_hashrnd __read_mostly; u32 hval; - hval = (__force u32) daddr; - hval ^= (hval >> 11) ^ (hval >> 22); - - return hval & (FNHE_HASH_SIZE - 1); + net_get_random_once(&fnhe_hashrnd, sizeof(fnhe_hashrnd)); + hval = jhash_1word((__force u32) daddr, fnhe_hashrnd); + return hash_32(hval, FNHE_HASH_SHIFT); } static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe) @@ -628,12 +628,12 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw, spin_lock_bh(&fnhe_lock); - hash = nh->nh_exceptions; + hash = rcu_dereference(nh->nh_exceptions); if (!hash) { hash = kzalloc(FNHE_HASH_SIZE * sizeof(*hash), GFP_ATOMIC); if (!hash) goto out_unlock; - nh->nh_exceptions = hash; + rcu_assign_pointer(nh->nh_exceptions, hash); } hash += hval; @@ -1242,7 +1242,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst) static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr) { - struct fnhe_hash_bucket *hash = nh->nh_exceptions; + struct fnhe_hash_bucket *hash = rcu_dereference(nh->nh_exceptions); struct fib_nh_exception *fnhe; u32 hval; diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index c0c75688896e06bd1b64384c94e7db758f842d0b..0431a8f3c8f458538e37543a0b37a091da0c04bb 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -25,7 +25,7 @@ extern int sysctl_tcp_syncookies; -static u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS]; +static u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS] __read_mostly; #define COOKIEBITS 24 /* Upper bits store count */ #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1) diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index a9fde0eef77c33bf1c082564930dbbb2a859e550..b3c53c8b331efc3d5cf6437fd3ec7634a154263c 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -285,13 +285,6 @@ static struct ctl_table ipv4_table[] = { .extra1 = &ip_ttl_min, .extra2 = &ip_ttl_max, }, - { - .procname = "ip_nonlocal_bind", - .data = &sysctl_ip_nonlocal_bind, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec - }, { .procname = "tcp_syn_retries", .data = &sysctl_tcp_syn_retries, @@ -450,6 +443,16 @@ static struct ctl_table ipv4_table[] = { .mode = 0644, .proc_handler = proc_dointvec }, +#ifdef CONFIG_IP_MULTICAST + { + .procname = "igmp_qrv", + .data = &sysctl_igmp_qrv, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &one + }, +#endif { .procname = "inet_peer_threshold", .data = &inet_peer_threshold, @@ -718,6 +721,22 @@ static struct ctl_table ipv4_table[] = { .extra1 = &zero, .extra2 = &one, }, + { + .procname = "icmp_msgs_per_sec", + .data = &sysctl_icmp_msgs_per_sec, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + }, + { + .procname = "icmp_msgs_burst", + .data = &sysctl_icmp_msgs_burst, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + }, { .procname = "udp_mem", .data = &sysctl_udp_mem, @@ -829,6 +848,13 @@ static struct ctl_table ipv4_net_table[] = { .mode = 0644, .proc_handler = proc_dointvec, }, + { + .procname = "ip_nonlocal_bind", + .data = &init_net.ipv4.sysctl_ip_nonlocal_bind, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec + }, { .procname = "fwmark_reflect", .data = &init_net.ipv4.sysctl_fwmark_reflect, diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 8ee43ae90396f45f0cd08118d19f985439543cef..461003d258ba4030b1a8aa1c49930b7a1ac94b9c 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -404,7 +404,7 @@ void tcp_init_sock(struct sock *sk) tp->reordering = sysctl_tcp_reordering; tcp_enable_early_retrans(tp); - icsk->icsk_ca_ops = &tcp_init_congestion_ops; + tcp_assign_congestion_control(sk); tp->tsoffset = 0; @@ -608,7 +608,7 @@ static inline bool forced_push(const struct tcp_sock *tp) return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); } -static inline void skb_entail(struct sock *sk, struct sk_buff *skb) +static void skb_entail(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); @@ -617,7 +617,7 @@ static inline void skb_entail(struct sock *sk, struct sk_buff *skb) tcb->seq = tcb->end_seq = tp->write_seq; tcb->tcp_flags = TCPHDR_ACK; tcb->sacked = 0; - skb_header_release(skb); + __skb_header_release(skb); tcp_add_write_queue_tail(sk, skb); sk->sk_wmem_queued += skb->truesize; sk_mem_charge(sk, skb->truesize); @@ -962,7 +962,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, skb->ip_summed = CHECKSUM_PARTIAL; tp->write_seq += copy; TCP_SKB_CB(skb)->end_seq += copy; - skb_shinfo(skb)->gso_segs = 0; + tcp_skb_pcount_set(skb, 0); if (!copied) TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; @@ -1260,7 +1260,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, tp->write_seq += copy; TCP_SKB_CB(skb)->end_seq += copy; - skb_shinfo(skb)->gso_segs = 0; + tcp_skb_pcount_set(skb, 0); from += copy; copied += copy; @@ -1476,9 +1476,9 @@ static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) { offset = seq - TCP_SKB_CB(skb)->seq; - if (tcp_hdr(skb)->syn) + if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) offset--; - if (offset < skb->len || tcp_hdr(skb)->fin) { + if (offset < skb->len || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) { *off = offset; return skb; } @@ -1551,7 +1551,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, if (offset + 1 != skb->len) continue; } - if (tcp_hdr(skb)->fin) { + if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { sk_eat_skb(sk, skb); ++seq; break; @@ -1665,11 +1665,11 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, break; offset = *seq - TCP_SKB_CB(skb)->seq; - if (tcp_hdr(skb)->syn) + if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) offset--; if (offset < skb->len) goto found_ok_skb; - if (tcp_hdr(skb)->fin) + if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) goto found_fin_ok; WARN(!(flags & MSG_PEEK), "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n", @@ -1857,7 +1857,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, if (used + offset < skb->len) continue; - if (tcp_hdr(skb)->fin) + if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) goto found_fin_ok; if (!(flags & MSG_PEEK)) sk_eat_skb(sk, skb); @@ -2044,8 +2044,10 @@ void tcp_close(struct sock *sk, long timeout) * reader process may not have drained the data yet! */ while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { - u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq - - tcp_hdr(skb)->fin; + u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq; + + if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) + len--; data_was_unread += len; __kfree_skb(skb); } @@ -2572,7 +2574,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, break; #endif case TCP_USER_TIMEOUT: - /* Cap the max timeout in ms TCP will retry/retrans + /* Cap the max time in ms TCP will retry or probe the window * before giving up and aborting (ETIMEDOUT) a connection. */ if (val < 0) @@ -3051,7 +3053,7 @@ static int __init set_thash_entries(char *str) } __setup("thash_entries=", set_thash_entries); -static void tcp_init_mem(void) +static void __init tcp_init_mem(void) { unsigned long limit = nr_free_buffer_pages() / 8; limit = max(limit, 128UL); @@ -3137,8 +3139,6 @@ void __init tcp_init(void) tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size); tcp_metrics_init(); - - tcp_register_congestion_control(&tcp_reno); - + BUG_ON(tcp_register_congestion_control(&tcp_reno) != 0); tcp_tasklet_init(); } diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c index d5de69bc04f581ac589ae0f6fe7fcc3646b2cc3e..bb395d46a3898136afe615c73ac311fd2832f6f1 100644 --- a/net/ipv4/tcp_bic.c +++ b/net/ipv4/tcp_bic.c @@ -17,7 +17,6 @@ #include #include - #define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation * max_cwnd = snd_cwnd * beta */ @@ -46,11 +45,10 @@ MODULE_PARM_DESC(initial_ssthresh, "initial value of slow start threshold"); module_param(smooth_part, int, 0644); MODULE_PARM_DESC(smooth_part, "log(B/(B*Smin))/log(B/(B-1))+B, # of RTT from Wmax-B to Wmax"); - /* BIC TCP Parameters */ struct bictcp { u32 cnt; /* increase cwnd by 1 after ACKs */ - u32 last_max_cwnd; /* last maximum snd_cwnd */ + u32 last_max_cwnd; /* last maximum snd_cwnd */ u32 loss_cwnd; /* congestion window at last loss */ u32 last_cwnd; /* the last snd_cwnd */ u32 last_time; /* time when updated last_cwnd */ @@ -103,7 +101,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd) /* binary increase */ if (cwnd < ca->last_max_cwnd) { - __u32 dist = (ca->last_max_cwnd - cwnd) + __u32 dist = (ca->last_max_cwnd - cwnd) / BICTCP_B; if (dist > max_increment) @@ -154,7 +152,6 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) bictcp_update(ca, tp->snd_cwnd); tcp_cong_avoid_ai(tp, ca->cnt); } - } /* @@ -177,7 +174,6 @@ static u32 bictcp_recalc_ssthresh(struct sock *sk) ca->loss_cwnd = tp->snd_cwnd; - if (tp->snd_cwnd <= low_window) return max(tp->snd_cwnd >> 1U, 2U); else @@ -188,6 +184,7 @@ static u32 bictcp_undo_cwnd(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); const struct bictcp *ca = inet_csk_ca(sk); + return max(tp->snd_cwnd, ca->loss_cwnd); } @@ -206,12 +203,12 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt) if (icsk->icsk_ca_state == TCP_CA_Open) { struct bictcp *ca = inet_csk_ca(sk); + cnt -= ca->delayed_ack >> ACK_RATIO_SHIFT; ca->delayed_ack += cnt; } } - static struct tcp_congestion_ops bictcp __read_mostly = { .init = bictcp_init, .ssthresh = bictcp_recalc_ssthresh, diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index 7b09d8b49fa51271cc0fa99a3fcda9f017c0f469..b1c5970d47a1bf65a9c9cbf6a89d13b0bb8f909f 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c @@ -74,24 +74,34 @@ void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca) EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control); /* Assign choice of congestion control. */ -void tcp_init_congestion_control(struct sock *sk) +void tcp_assign_congestion_control(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_congestion_ops *ca; - /* if no choice made yet assign the current value set as default */ - if (icsk->icsk_ca_ops == &tcp_init_congestion_ops) { - rcu_read_lock(); - list_for_each_entry_rcu(ca, &tcp_cong_list, list) { - if (try_module_get(ca->owner)) { - icsk->icsk_ca_ops = ca; - break; - } - - /* fallback to next available */ + rcu_read_lock(); + list_for_each_entry_rcu(ca, &tcp_cong_list, list) { + if (likely(try_module_get(ca->owner))) { + icsk->icsk_ca_ops = ca; + goto out; } - rcu_read_unlock(); + /* Fallback to next available. The last really + * guaranteed fallback is Reno from this list. + */ } +out: + rcu_read_unlock(); + + /* Clear out private data before diag gets it and + * the ca has not been initialized. + */ + if (ca->get_info) + memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); +} + +void tcp_init_congestion_control(struct sock *sk) +{ + const struct inet_connection_sock *icsk = inet_csk(sk); if (icsk->icsk_ca_ops->init) icsk->icsk_ca_ops->init(sk); @@ -142,7 +152,6 @@ static int __init tcp_congestion_default(void) } late_initcall(tcp_congestion_default); - /* Build string with list of available congestion control values */ void tcp_get_available_congestion_control(char *buf, size_t maxlen) { @@ -154,7 +163,6 @@ void tcp_get_available_congestion_control(char *buf, size_t maxlen) offs += snprintf(buf + offs, maxlen - offs, "%s%s", offs == 0 ? "" : " ", ca->name); - } rcu_read_unlock(); } @@ -186,7 +194,6 @@ void tcp_get_allowed_congestion_control(char *buf, size_t maxlen) offs += snprintf(buf + offs, maxlen - offs, "%s%s", offs == 0 ? "" : " ", ca->name); - } rcu_read_unlock(); } @@ -230,7 +237,6 @@ int tcp_set_allowed_congestion_control(char *val) return ret; } - /* Change congestion control for socket */ int tcp_set_congestion_control(struct sock *sk, const char *name) { @@ -285,15 +291,13 @@ int tcp_set_congestion_control(struct sock *sk, const char *name) * ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and * returns the leftover acks to adjust cwnd in congestion avoidance mode. */ -int tcp_slow_start(struct tcp_sock *tp, u32 acked) +void tcp_slow_start(struct tcp_sock *tp, u32 acked) { u32 cwnd = tp->snd_cwnd + acked; if (cwnd > tp->snd_ssthresh) cwnd = tp->snd_ssthresh + 1; - acked -= cwnd - tp->snd_cwnd; tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp); - return acked; } EXPORT_SYMBOL_GPL(tcp_slow_start); @@ -337,6 +341,7 @@ EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid); u32 tcp_reno_ssthresh(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); + return max(tp->snd_cwnd >> 1U, 2U); } EXPORT_SYMBOL_GPL(tcp_reno_ssthresh); @@ -348,15 +353,3 @@ struct tcp_congestion_ops tcp_reno = { .ssthresh = tcp_reno_ssthresh, .cong_avoid = tcp_reno_cong_avoid, }; - -/* Initial congestion control used (until SYN) - * really reno under another name so we can tell difference - * during tcp_set_default_congestion_control - */ -struct tcp_congestion_ops tcp_init_congestion_ops = { - .name = "", - .owner = THIS_MODULE, - .ssthresh = tcp_reno_ssthresh, - .cong_avoid = tcp_reno_cong_avoid, -}; -EXPORT_SYMBOL_GPL(tcp_init_congestion_ops); diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index a9bd8a4828a9e5c2da275626c11c6c953a1b3dd6..20de0118c98e455447128513610ba04b13f3cd61 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -82,12 +82,13 @@ MODULE_PARM_DESC(hystart_ack_delta, "spacing between ack's indicating train (mse /* BIC TCP Parameters */ struct bictcp { u32 cnt; /* increase cwnd by 1 after ACKs */ - u32 last_max_cwnd; /* last maximum snd_cwnd */ + u32 last_max_cwnd; /* last maximum snd_cwnd */ u32 loss_cwnd; /* congestion window at last loss */ u32 last_cwnd; /* the last snd_cwnd */ u32 last_time; /* time when updated last_cwnd */ u32 bic_origin_point;/* origin point of bic function */ - u32 bic_K; /* time to origin point from the beginning of the current epoch */ + u32 bic_K; /* time to origin point + from the beginning of the current epoch */ u32 delay_min; /* min delay (msec << 3) */ u32 epoch_start; /* beginning of an epoch */ u32 ack_cnt; /* number of acks */ @@ -219,7 +220,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd) ca->last_time = tcp_time_stamp; if (ca->epoch_start == 0) { - ca->epoch_start = tcp_time_stamp; /* record the beginning of an epoch */ + ca->epoch_start = tcp_time_stamp; /* record beginning */ ca->ack_cnt = 1; /* start counting */ ca->tcp_cwnd = cwnd; /* syn with cubic */ @@ -263,9 +264,9 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd) /* c/rtt * (t-K)^3 */ delta = (cube_rtt_scale * offs * offs * offs) >> (10+3*BICTCP_HZ); - if (t < ca->bic_K) /* below origin*/ + if (t < ca->bic_K) /* below origin*/ bic_target = ca->bic_origin_point - delta; - else /* above origin*/ + else /* above origin*/ bic_target = ca->bic_origin_point + delta; /* cubic function - calc bictcp_cnt*/ @@ -285,13 +286,14 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd) /* TCP Friendly */ if (tcp_friendliness) { u32 scale = beta_scale; + delta = (cwnd * scale) >> 3; while (ca->ack_cnt > delta) { /* update tcp cwnd */ ca->ack_cnt -= delta; ca->tcp_cwnd++; } - if (ca->tcp_cwnd > cwnd){ /* if bic is slower than tcp */ + if (ca->tcp_cwnd > cwnd) { /* if bic is slower than tcp */ delta = ca->tcp_cwnd - cwnd; max_cnt = cwnd / delta; if (ca->cnt > max_cnt) @@ -320,7 +322,6 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) bictcp_update(ca, tp->snd_cwnd); tcp_cong_avoid_ai(tp, ca->cnt); } - } static u32 bictcp_recalc_ssthresh(struct sock *sk) @@ -452,7 +453,8 @@ static int __init cubictcp_register(void) * based on SRTT of 100ms */ - beta_scale = 8*(BICTCP_BETA_SCALE+beta)/ 3 / (BICTCP_BETA_SCALE - beta); + beta_scale = 8*(BICTCP_BETA_SCALE+beta) / 3 + / (BICTCP_BETA_SCALE - beta); cube_rtt_scale = (bic_scale * 10); /* 1024*c/rtt */ diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c new file mode 100644 index 0000000000000000000000000000000000000000..b504371af74209362e03bbae0770bcbafc3e9340 --- /dev/null +++ b/net/ipv4/tcp_dctcp.c @@ -0,0 +1,344 @@ +/* DataCenter TCP (DCTCP) congestion control. + * + * http://simula.stanford.edu/~alizade/Site/DCTCP.html + * + * This is an implementation of DCTCP over Reno, an enhancement to the + * TCP congestion control algorithm designed for data centers. DCTCP + * leverages Explicit Congestion Notification (ECN) in the network to + * provide multi-bit feedback to the end hosts. DCTCP's goal is to meet + * the following three data center transport requirements: + * + * - High burst tolerance (incast due to partition/aggregate) + * - Low latency (short flows, queries) + * - High throughput (continuous data updates, large file transfers) + * with commodity shallow buffered switches + * + * The algorithm is described in detail in the following two papers: + * + * 1) Mohammad Alizadeh, Albert Greenberg, David A. Maltz, Jitendra Padhye, + * Parveen Patel, Balaji Prabhakar, Sudipta Sengupta, and Murari Sridharan: + * "Data Center TCP (DCTCP)", Data Center Networks session + * Proc. ACM SIGCOMM, New Delhi, 2010. + * http://simula.stanford.edu/~alizade/Site/DCTCP_files/dctcp-final.pdf + * + * 2) Mohammad Alizadeh, Adel Javanmard, and Balaji Prabhakar: + * "Analysis of DCTCP: Stability, Convergence, and Fairness" + * Proc. ACM SIGMETRICS, San Jose, 2011. + * http://simula.stanford.edu/~alizade/Site/DCTCP_files/dctcp_analysis-full.pdf + * + * Initial prototype from Abdul Kabbani, Masato Yasuda and Mohammad Alizadeh. + * + * Authors: + * + * Daniel Borkmann + * Florian Westphal + * Glenn Judd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ + +#include +#include +#include +#include + +#define DCTCP_MAX_ALPHA 1024U + +struct dctcp { + u32 acked_bytes_ecn; + u32 acked_bytes_total; + u32 prior_snd_una; + u32 prior_rcv_nxt; + u32 dctcp_alpha; + u32 next_seq; + u32 ce_state; + u32 delayed_ack_reserved; +}; + +static unsigned int dctcp_shift_g __read_mostly = 4; /* g = 1/2^4 */ +module_param(dctcp_shift_g, uint, 0644); +MODULE_PARM_DESC(dctcp_shift_g, "parameter g for updating dctcp_alpha"); + +static unsigned int dctcp_alpha_on_init __read_mostly = DCTCP_MAX_ALPHA; +module_param(dctcp_alpha_on_init, uint, 0644); +MODULE_PARM_DESC(dctcp_alpha_on_init, "parameter for initial alpha value"); + +static unsigned int dctcp_clamp_alpha_on_loss __read_mostly; +module_param(dctcp_clamp_alpha_on_loss, uint, 0644); +MODULE_PARM_DESC(dctcp_clamp_alpha_on_loss, + "parameter for clamping alpha on loss"); + +static struct tcp_congestion_ops dctcp_reno; + +static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca) +{ + ca->next_seq = tp->snd_nxt; + + ca->acked_bytes_ecn = 0; + ca->acked_bytes_total = 0; +} + +static void dctcp_init(struct sock *sk) +{ + const struct tcp_sock *tp = tcp_sk(sk); + + if ((tp->ecn_flags & TCP_ECN_OK) || + (sk->sk_state == TCP_LISTEN || + sk->sk_state == TCP_CLOSE)) { + struct dctcp *ca = inet_csk_ca(sk); + + ca->prior_snd_una = tp->snd_una; + ca->prior_rcv_nxt = tp->rcv_nxt; + + ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA); + + ca->delayed_ack_reserved = 0; + ca->ce_state = 0; + + dctcp_reset(tp, ca); + return; + } + + /* No ECN support? Fall back to Reno. Also need to clear + * ECT from sk since it is set during 3WHS for DCTCP. + */ + inet_csk(sk)->icsk_ca_ops = &dctcp_reno; + INET_ECN_dontxmit(sk); +} + +static u32 dctcp_ssthresh(struct sock *sk) +{ + const struct dctcp *ca = inet_csk_ca(sk); + struct tcp_sock *tp = tcp_sk(sk); + + return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U); +} + +/* Minimal DCTP CE state machine: + * + * S: 0 <- last pkt was non-CE + * 1 <- last pkt was CE + */ + +static void dctcp_ce_state_0_to_1(struct sock *sk) +{ + struct dctcp *ca = inet_csk_ca(sk); + struct tcp_sock *tp = tcp_sk(sk); + + /* State has changed from CE=0 to CE=1 and delayed + * ACK has not sent yet. + */ + if (!ca->ce_state && ca->delayed_ack_reserved) { + u32 tmp_rcv_nxt; + + /* Save current rcv_nxt. */ + tmp_rcv_nxt = tp->rcv_nxt; + + /* Generate previous ack with CE=0. */ + tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; + tp->rcv_nxt = ca->prior_rcv_nxt; + + tcp_send_ack(sk); + + /* Recover current rcv_nxt. */ + tp->rcv_nxt = tmp_rcv_nxt; + } + + ca->prior_rcv_nxt = tp->rcv_nxt; + ca->ce_state = 1; + + tp->ecn_flags |= TCP_ECN_DEMAND_CWR; +} + +static void dctcp_ce_state_1_to_0(struct sock *sk) +{ + struct dctcp *ca = inet_csk_ca(sk); + struct tcp_sock *tp = tcp_sk(sk); + + /* State has changed from CE=1 to CE=0 and delayed + * ACK has not sent yet. + */ + if (ca->ce_state && ca->delayed_ack_reserved) { + u32 tmp_rcv_nxt; + + /* Save current rcv_nxt. */ + tmp_rcv_nxt = tp->rcv_nxt; + + /* Generate previous ack with CE=1. */ + tp->ecn_flags |= TCP_ECN_DEMAND_CWR; + tp->rcv_nxt = ca->prior_rcv_nxt; + + tcp_send_ack(sk); + + /* Recover current rcv_nxt. */ + tp->rcv_nxt = tmp_rcv_nxt; + } + + ca->prior_rcv_nxt = tp->rcv_nxt; + ca->ce_state = 0; + + tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; +} + +static void dctcp_update_alpha(struct sock *sk, u32 flags) +{ + const struct tcp_sock *tp = tcp_sk(sk); + struct dctcp *ca = inet_csk_ca(sk); + u32 acked_bytes = tp->snd_una - ca->prior_snd_una; + + /* If ack did not advance snd_una, count dupack as MSS size. + * If ack did update window, do not count it at all. + */ + if (acked_bytes == 0 && !(flags & CA_ACK_WIN_UPDATE)) + acked_bytes = inet_csk(sk)->icsk_ack.rcv_mss; + if (acked_bytes) { + ca->acked_bytes_total += acked_bytes; + ca->prior_snd_una = tp->snd_una; + + if (flags & CA_ACK_ECE) + ca->acked_bytes_ecn += acked_bytes; + } + + /* Expired RTT */ + if (!before(tp->snd_una, ca->next_seq)) { + /* For avoiding denominator == 1. */ + if (ca->acked_bytes_total == 0) + ca->acked_bytes_total = 1; + + /* alpha = (1 - g) * alpha + g * F */ + ca->dctcp_alpha = ca->dctcp_alpha - + (ca->dctcp_alpha >> dctcp_shift_g) + + (ca->acked_bytes_ecn << (10U - dctcp_shift_g)) / + ca->acked_bytes_total; + + if (ca->dctcp_alpha > DCTCP_MAX_ALPHA) + /* Clamp dctcp_alpha to max. */ + ca->dctcp_alpha = DCTCP_MAX_ALPHA; + + dctcp_reset(tp, ca); + } +} + +static void dctcp_state(struct sock *sk, u8 new_state) +{ + if (dctcp_clamp_alpha_on_loss && new_state == TCP_CA_Loss) { + struct dctcp *ca = inet_csk_ca(sk); + + /* If this extension is enabled, we clamp dctcp_alpha to + * max on packet loss; the motivation is that dctcp_alpha + * is an indicator to the extend of congestion and packet + * loss is an indicator of extreme congestion; setting + * this in practice turned out to be beneficial, and + * effectively assumes total congestion which reduces the + * window by half. + */ + ca->dctcp_alpha = DCTCP_MAX_ALPHA; + } +} + +static void dctcp_update_ack_reserved(struct sock *sk, enum tcp_ca_event ev) +{ + struct dctcp *ca = inet_csk_ca(sk); + + switch (ev) { + case CA_EVENT_DELAYED_ACK: + if (!ca->delayed_ack_reserved) + ca->delayed_ack_reserved = 1; + break; + case CA_EVENT_NON_DELAYED_ACK: + if (ca->delayed_ack_reserved) + ca->delayed_ack_reserved = 0; + break; + default: + /* Don't care for the rest. */ + break; + } +} + +static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev) +{ + switch (ev) { + case CA_EVENT_ECN_IS_CE: + dctcp_ce_state_0_to_1(sk); + break; + case CA_EVENT_ECN_NO_CE: + dctcp_ce_state_1_to_0(sk); + break; + case CA_EVENT_DELAYED_ACK: + case CA_EVENT_NON_DELAYED_ACK: + dctcp_update_ack_reserved(sk, ev); + break; + default: + /* Don't care for the rest. */ + break; + } +} + +static void dctcp_get_info(struct sock *sk, u32 ext, struct sk_buff *skb) +{ + const struct dctcp *ca = inet_csk_ca(sk); + + /* Fill it also in case of VEGASINFO due to req struct limits. + * We can still correctly retrieve it later. + */ + if (ext & (1 << (INET_DIAG_DCTCPINFO - 1)) || + ext & (1 << (INET_DIAG_VEGASINFO - 1))) { + struct tcp_dctcp_info info; + + memset(&info, 0, sizeof(info)); + if (inet_csk(sk)->icsk_ca_ops != &dctcp_reno) { + info.dctcp_enabled = 1; + info.dctcp_ce_state = (u16) ca->ce_state; + info.dctcp_alpha = ca->dctcp_alpha; + info.dctcp_ab_ecn = ca->acked_bytes_ecn; + info.dctcp_ab_tot = ca->acked_bytes_total; + } + + nla_put(skb, INET_DIAG_DCTCPINFO, sizeof(info), &info); + } +} + +static struct tcp_congestion_ops dctcp __read_mostly = { + .init = dctcp_init, + .in_ack_event = dctcp_update_alpha, + .cwnd_event = dctcp_cwnd_event, + .ssthresh = dctcp_ssthresh, + .cong_avoid = tcp_reno_cong_avoid, + .set_state = dctcp_state, + .get_info = dctcp_get_info, + .flags = TCP_CONG_NEEDS_ECN, + .owner = THIS_MODULE, + .name = "dctcp", +}; + +static struct tcp_congestion_ops dctcp_reno __read_mostly = { + .ssthresh = tcp_reno_ssthresh, + .cong_avoid = tcp_reno_cong_avoid, + .get_info = dctcp_get_info, + .owner = THIS_MODULE, + .name = "dctcp-reno", +}; + +static int __init dctcp_register(void) +{ + BUILD_BUG_ON(sizeof(struct dctcp) > ICSK_CA_PRIV_SIZE); + return tcp_register_congestion_control(&dctcp); +} + +static void __exit dctcp_unregister(void) +{ + tcp_unregister_congestion_control(&dctcp); +} + +module_init(dctcp_register); +module_exit(dctcp_unregister); + +MODULE_AUTHOR("Daniel Borkmann "); +MODULE_AUTHOR("Florian Westphal "); +MODULE_AUTHOR("Glenn Judd "); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("DataCenter TCP (DCTCP)"); diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c index ed3f2ad42e0f0ea881e507d8c91e7962d336d824..0d73f9ddb55b8d55204643541dad651cd9d6eb06 100644 --- a/net/ipv4/tcp_diag.c +++ b/net/ipv4/tcp_diag.c @@ -9,7 +9,6 @@ * 2 of the License, or (at your option) any later version. */ - #include #include @@ -35,13 +34,13 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r, } static void tcp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, - struct inet_diag_req_v2 *r, struct nlattr *bc) + struct inet_diag_req_v2 *r, struct nlattr *bc) { inet_diag_dump_icsk(&tcp_hashinfo, skb, cb, r, bc); } static int tcp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh, - struct inet_diag_req_v2 *req) + struct inet_diag_req_v2 *req) { return inet_diag_dump_one_icsk(&tcp_hashinfo, in_skb, nlh, req); } diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index 9771563ab564923e325f5eb5341d7e6ae81a499a..815c85e3b1e06483829d596dc2c4b4a1a9cc5e44 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c @@ -115,7 +115,7 @@ static bool tcp_fastopen_cookie_gen(struct request_sock *req, if (__tcp_fastopen_cookie_gen(&ip6h->saddr, &tmp)) { struct in6_addr *buf = (struct in6_addr *) tmp.val; - int i = 4; + int i; for (i = 0; i < 4; i++) buf->s6_addr32[i] ^= ip6h->daddr.s6_addr32[i]; diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c index 1c4908280d921fbe9a9e21e4fd55d1a87f2db706..882c08aae2f58d02bb78212a4eba4d25d7e9c123 100644 --- a/net/ipv4/tcp_highspeed.c +++ b/net/ipv4/tcp_highspeed.c @@ -9,7 +9,6 @@ #include #include - /* From AIMD tables from RFC 3649 appendix B, * with fixed-point MD scaled <<8. */ @@ -17,78 +16,78 @@ static const struct hstcp_aimd_val { unsigned int cwnd; unsigned int md; } hstcp_aimd_vals[] = { - { 38, 128, /* 0.50 */ }, - { 118, 112, /* 0.44 */ }, - { 221, 104, /* 0.41 */ }, - { 347, 98, /* 0.38 */ }, - { 495, 93, /* 0.37 */ }, - { 663, 89, /* 0.35 */ }, - { 851, 86, /* 0.34 */ }, - { 1058, 83, /* 0.33 */ }, - { 1284, 81, /* 0.32 */ }, - { 1529, 78, /* 0.31 */ }, - { 1793, 76, /* 0.30 */ }, - { 2076, 74, /* 0.29 */ }, - { 2378, 72, /* 0.28 */ }, - { 2699, 71, /* 0.28 */ }, - { 3039, 69, /* 0.27 */ }, - { 3399, 68, /* 0.27 */ }, - { 3778, 66, /* 0.26 */ }, - { 4177, 65, /* 0.26 */ }, - { 4596, 64, /* 0.25 */ }, - { 5036, 62, /* 0.25 */ }, - { 5497, 61, /* 0.24 */ }, - { 5979, 60, /* 0.24 */ }, - { 6483, 59, /* 0.23 */ }, - { 7009, 58, /* 0.23 */ }, - { 7558, 57, /* 0.22 */ }, - { 8130, 56, /* 0.22 */ }, - { 8726, 55, /* 0.22 */ }, - { 9346, 54, /* 0.21 */ }, - { 9991, 53, /* 0.21 */ }, - { 10661, 52, /* 0.21 */ }, - { 11358, 52, /* 0.20 */ }, - { 12082, 51, /* 0.20 */ }, - { 12834, 50, /* 0.20 */ }, - { 13614, 49, /* 0.19 */ }, - { 14424, 48, /* 0.19 */ }, - { 15265, 48, /* 0.19 */ }, - { 16137, 47, /* 0.19 */ }, - { 17042, 46, /* 0.18 */ }, - { 17981, 45, /* 0.18 */ }, - { 18955, 45, /* 0.18 */ }, - { 19965, 44, /* 0.17 */ }, - { 21013, 43, /* 0.17 */ }, - { 22101, 43, /* 0.17 */ }, - { 23230, 42, /* 0.17 */ }, - { 24402, 41, /* 0.16 */ }, - { 25618, 41, /* 0.16 */ }, - { 26881, 40, /* 0.16 */ }, - { 28193, 39, /* 0.16 */ }, - { 29557, 39, /* 0.15 */ }, - { 30975, 38, /* 0.15 */ }, - { 32450, 38, /* 0.15 */ }, - { 33986, 37, /* 0.15 */ }, - { 35586, 36, /* 0.14 */ }, - { 37253, 36, /* 0.14 */ }, - { 38992, 35, /* 0.14 */ }, - { 40808, 35, /* 0.14 */ }, - { 42707, 34, /* 0.13 */ }, - { 44694, 33, /* 0.13 */ }, - { 46776, 33, /* 0.13 */ }, - { 48961, 32, /* 0.13 */ }, - { 51258, 32, /* 0.13 */ }, - { 53677, 31, /* 0.12 */ }, - { 56230, 30, /* 0.12 */ }, - { 58932, 30, /* 0.12 */ }, - { 61799, 29, /* 0.12 */ }, - { 64851, 28, /* 0.11 */ }, - { 68113, 28, /* 0.11 */ }, - { 71617, 27, /* 0.11 */ }, - { 75401, 26, /* 0.10 */ }, - { 79517, 26, /* 0.10 */ }, - { 84035, 25, /* 0.10 */ }, - { 89053, 24, /* 0.10 */ }, + { 38, 128, /* 0.50 */ }, + { 118, 112, /* 0.44 */ }, + { 221, 104, /* 0.41 */ }, + { 347, 98, /* 0.38 */ }, + { 495, 93, /* 0.37 */ }, + { 663, 89, /* 0.35 */ }, + { 851, 86, /* 0.34 */ }, + { 1058, 83, /* 0.33 */ }, + { 1284, 81, /* 0.32 */ }, + { 1529, 78, /* 0.31 */ }, + { 1793, 76, /* 0.30 */ }, + { 2076, 74, /* 0.29 */ }, + { 2378, 72, /* 0.28 */ }, + { 2699, 71, /* 0.28 */ }, + { 3039, 69, /* 0.27 */ }, + { 3399, 68, /* 0.27 */ }, + { 3778, 66, /* 0.26 */ }, + { 4177, 65, /* 0.26 */ }, + { 4596, 64, /* 0.25 */ }, + { 5036, 62, /* 0.25 */ }, + { 5497, 61, /* 0.24 */ }, + { 5979, 60, /* 0.24 */ }, + { 6483, 59, /* 0.23 */ }, + { 7009, 58, /* 0.23 */ }, + { 7558, 57, /* 0.22 */ }, + { 8130, 56, /* 0.22 */ }, + { 8726, 55, /* 0.22 */ }, + { 9346, 54, /* 0.21 */ }, + { 9991, 53, /* 0.21 */ }, + { 10661, 52, /* 0.21 */ }, + { 11358, 52, /* 0.20 */ }, + { 12082, 51, /* 0.20 */ }, + { 12834, 50, /* 0.20 */ }, + { 13614, 49, /* 0.19 */ }, + { 14424, 48, /* 0.19 */ }, + { 15265, 48, /* 0.19 */ }, + { 16137, 47, /* 0.19 */ }, + { 17042, 46, /* 0.18 */ }, + { 17981, 45, /* 0.18 */ }, + { 18955, 45, /* 0.18 */ }, + { 19965, 44, /* 0.17 */ }, + { 21013, 43, /* 0.17 */ }, + { 22101, 43, /* 0.17 */ }, + { 23230, 42, /* 0.17 */ }, + { 24402, 41, /* 0.16 */ }, + { 25618, 41, /* 0.16 */ }, + { 26881, 40, /* 0.16 */ }, + { 28193, 39, /* 0.16 */ }, + { 29557, 39, /* 0.15 */ }, + { 30975, 38, /* 0.15 */ }, + { 32450, 38, /* 0.15 */ }, + { 33986, 37, /* 0.15 */ }, + { 35586, 36, /* 0.14 */ }, + { 37253, 36, /* 0.14 */ }, + { 38992, 35, /* 0.14 */ }, + { 40808, 35, /* 0.14 */ }, + { 42707, 34, /* 0.13 */ }, + { 44694, 33, /* 0.13 */ }, + { 46776, 33, /* 0.13 */ }, + { 48961, 32, /* 0.13 */ }, + { 51258, 32, /* 0.13 */ }, + { 53677, 31, /* 0.12 */ }, + { 56230, 30, /* 0.12 */ }, + { 58932, 30, /* 0.12 */ }, + { 61799, 29, /* 0.12 */ }, + { 64851, 28, /* 0.11 */ }, + { 68113, 28, /* 0.11 */ }, + { 71617, 27, /* 0.11 */ }, + { 75401, 26, /* 0.10 */ }, + { 79517, 26, /* 0.10 */ }, + { 84035, 25, /* 0.10 */ }, + { 89053, 24, /* 0.10 */ }, }; #define HSTCP_AIMD_MAX ARRAY_SIZE(hstcp_aimd_vals) diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c index 031361311a8b92b1f7ab1172fb00e3bbb0503129..58469fff6c18fd444c95366caa04ab60965d654a 100644 --- a/net/ipv4/tcp_htcp.c +++ b/net/ipv4/tcp_htcp.c @@ -98,7 +98,8 @@ static inline void measure_rtt(struct sock *sk, u32 srtt) } } -static void measure_achieved_throughput(struct sock *sk, u32 pkts_acked, s32 rtt) +static void measure_achieved_throughput(struct sock *sk, + u32 pkts_acked, s32 rtt) { const struct inet_connection_sock *icsk = inet_csk(sk); const struct tcp_sock *tp = tcp_sk(sk); @@ -148,8 +149,8 @@ static inline void htcp_beta_update(struct htcp *ca, u32 minRTT, u32 maxRTT) if (use_bandwidth_switch) { u32 maxB = ca->maxB; u32 old_maxB = ca->old_maxB; - ca->old_maxB = ca->maxB; + ca->old_maxB = ca->maxB; if (!between(5 * maxB, 4 * old_maxB, 6 * old_maxB)) { ca->beta = BETA_MIN; ca->modeswitch = 0; @@ -270,6 +271,7 @@ static void htcp_state(struct sock *sk, u8 new_state) case TCP_CA_Open: { struct htcp *ca = inet_csk_ca(sk); + if (ca->undo_last_cong) { ca->last_cong = jiffies; ca->undo_last_cong = 0; diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c index d8f8f05a49516ec2d9213f10af77b82fbf32bff7..f963b274f2b0436755ebe8bb5586b1ec9682c336 100644 --- a/net/ipv4/tcp_hybla.c +++ b/net/ipv4/tcp_hybla.c @@ -29,7 +29,6 @@ static int rtt0 = 25; module_param(rtt0, int, 0644); MODULE_PARM_DESC(rtt0, "reference rout trip time (ms)"); - /* This is called to refresh values for hybla parameters */ static inline void hybla_recalc_param (struct sock *sk) { diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c index 5999b3972e6449d616facb628d27116ab8876ac2..1d5a30a90adf6194d3b2d2215276f52c0a3f4632 100644 --- a/net/ipv4/tcp_illinois.c +++ b/net/ipv4/tcp_illinois.c @@ -284,7 +284,7 @@ static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked) delta = (tp->snd_cwnd_cnt * ca->alpha) >> ALPHA_SHIFT; if (delta >= tp->snd_cwnd) { tp->snd_cwnd = min(tp->snd_cwnd + delta / tp->snd_cwnd, - (u32) tp->snd_cwnd_clamp); + (u32)tp->snd_cwnd_clamp); tp->snd_cwnd_cnt = 0; } } @@ -299,7 +299,6 @@ static u32 tcp_illinois_ssthresh(struct sock *sk) return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->beta) >> BETA_SHIFT), 2U); } - /* Extract info for Tcp socket info provided via netlink. */ static void tcp_illinois_info(struct sock *sk, u32 ext, struct sk_buff *skb) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 0185eea59342a9318ba42a9eb2f77da900b53e91..00a41499d52c89c0961f6229d79f58022c25ba46 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -200,28 +200,25 @@ static inline bool tcp_in_quickack_mode(const struct sock *sk) return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong; } -static inline void TCP_ECN_queue_cwr(struct tcp_sock *tp) +static void tcp_ecn_queue_cwr(struct tcp_sock *tp) { if (tp->ecn_flags & TCP_ECN_OK) tp->ecn_flags |= TCP_ECN_QUEUE_CWR; } -static inline void TCP_ECN_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb) +static void tcp_ecn_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb) { if (tcp_hdr(skb)->cwr) tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; } -static inline void TCP_ECN_withdraw_cwr(struct tcp_sock *tp) +static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp) { tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; } -static inline void TCP_ECN_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) +static void __tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) { - if (!(tp->ecn_flags & TCP_ECN_OK)) - return; - switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) { case INET_ECN_NOT_ECT: /* Funny extension: if ECT is not set on a segment, @@ -232,30 +229,43 @@ static inline void TCP_ECN_check_ce(struct tcp_sock *tp, const struct sk_buff *s tcp_enter_quickack_mode((struct sock *)tp); break; case INET_ECN_CE: + if (tcp_ca_needs_ecn((struct sock *)tp)) + tcp_ca_event((struct sock *)tp, CA_EVENT_ECN_IS_CE); + if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) { /* Better not delay acks, sender can have a very low cwnd */ tcp_enter_quickack_mode((struct sock *)tp); tp->ecn_flags |= TCP_ECN_DEMAND_CWR; } - /* fallinto */ + tp->ecn_flags |= TCP_ECN_SEEN; + break; default: + if (tcp_ca_needs_ecn((struct sock *)tp)) + tcp_ca_event((struct sock *)tp, CA_EVENT_ECN_NO_CE); tp->ecn_flags |= TCP_ECN_SEEN; + break; } } -static inline void TCP_ECN_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th) +static void tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) +{ + if (tp->ecn_flags & TCP_ECN_OK) + __tcp_ecn_check_ce(tp, skb); +} + +static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th) { if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr)) tp->ecn_flags &= ~TCP_ECN_OK; } -static inline void TCP_ECN_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th) +static void tcp_ecn_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th) { if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr)) tp->ecn_flags &= ~TCP_ECN_OK; } -static bool TCP_ECN_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th) +static bool tcp_ecn_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th) { if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK)) return true; @@ -652,7 +662,7 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) } icsk->icsk_ack.lrcvtime = now; - TCP_ECN_check_ce(tp, skb); + tcp_ecn_check_ce(tp, skb); if (skb->len >= 128) tcp_grow_window(sk, skb); @@ -1294,9 +1304,9 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, TCP_SKB_CB(prev)->end_seq += shifted; TCP_SKB_CB(skb)->seq += shifted; - skb_shinfo(prev)->gso_segs += pcount; - BUG_ON(skb_shinfo(skb)->gso_segs < pcount); - skb_shinfo(skb)->gso_segs -= pcount; + tcp_skb_pcount_add(prev, pcount); + BUG_ON(tcp_skb_pcount(skb) < pcount); + tcp_skb_pcount_add(skb, -pcount); /* When we're adding to gso_segs == 1, gso_size will be zero, * in theory this shouldn't be necessary but as long as DSACK @@ -1309,7 +1319,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, } /* CHECKME: To clear or not to clear? Mimics normal skb currently */ - if (skb_shinfo(skb)->gso_segs <= 1) { + if (tcp_skb_pcount(skb) <= 1) { skb_shinfo(skb)->gso_size = 0; skb_shinfo(skb)->gso_type = 0; } @@ -1887,21 +1897,21 @@ static inline void tcp_reset_reno_sack(struct tcp_sock *tp) tp->sacked_out = 0; } -static void tcp_clear_retrans_partial(struct tcp_sock *tp) +void tcp_clear_retrans(struct tcp_sock *tp) { tp->retrans_out = 0; tp->lost_out = 0; - tp->undo_marker = 0; tp->undo_retrans = -1; + tp->fackets_out = 0; + tp->sacked_out = 0; } -void tcp_clear_retrans(struct tcp_sock *tp) +static inline void tcp_init_undo(struct tcp_sock *tp) { - tcp_clear_retrans_partial(tp); - - tp->fackets_out = 0; - tp->sacked_out = 0; + tp->undo_marker = tp->snd_una; + /* Retransmission still in flight may cause DSACKs later. */ + tp->undo_retrans = tp->retrans_out ? : -1; } /* Enter Loss state. If we detect SACK reneging, forget all SACK information @@ -1924,18 +1934,18 @@ void tcp_enter_loss(struct sock *sk) tp->prior_ssthresh = tcp_current_ssthresh(sk); tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); tcp_ca_event(sk, CA_EVENT_LOSS); + tcp_init_undo(tp); } tp->snd_cwnd = 1; tp->snd_cwnd_cnt = 0; tp->snd_cwnd_stamp = tcp_time_stamp; - tcp_clear_retrans_partial(tp); + tp->retrans_out = 0; + tp->lost_out = 0; if (tcp_is_reno(tp)) tcp_reset_reno_sack(tp); - tp->undo_marker = tp->snd_una; - skb = tcp_write_queue_head(sk); is_reneg = skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED); if (is_reneg) { @@ -1949,9 +1959,6 @@ void tcp_enter_loss(struct sock *sk) if (skb == tcp_send_head(sk)) break; - if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) - tp->undo_marker = 0; - TCP_SKB_CB(skb)->sacked &= (~TCPCB_TAGBITS)|TCPCB_SACKED_ACKED; if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED) || is_reneg) { TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED; @@ -1971,7 +1978,7 @@ void tcp_enter_loss(struct sock *sk) sysctl_tcp_reordering); tcp_set_ca_state(sk, TCP_CA_Loss); tp->high_seq = tp->snd_nxt; - TCP_ECN_queue_cwr(tp); + tcp_ecn_queue_cwr(tp); /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous * loss recovery is underway except recurring timeout(s) on @@ -2363,7 +2370,7 @@ static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss) if (tp->prior_ssthresh > tp->snd_ssthresh) { tp->snd_ssthresh = tp->prior_ssthresh; - TCP_ECN_withdraw_cwr(tp); + tcp_ecn_withdraw_cwr(tp); } } else { tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh); @@ -2493,7 +2500,7 @@ static void tcp_init_cwnd_reduction(struct sock *sk) tp->prr_delivered = 0; tp->prr_out = 0; tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk); - TCP_ECN_queue_cwr(tp); + tcp_ecn_queue_cwr(tp); } static void tcp_cwnd_reduction(struct sock *sk, const int prior_unsacked, @@ -2670,8 +2677,7 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack) NET_INC_STATS_BH(sock_net(sk), mib_idx); tp->prior_ssthresh = 0; - tp->undo_marker = tp->snd_una; - tp->undo_retrans = tp->retrans_out ? : -1; + tcp_init_undo(tp); if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { if (!ece_ack) @@ -2970,7 +2976,8 @@ void tcp_rearm_rto(struct sock *sk) if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { struct sk_buff *skb = tcp_write_queue_head(sk); - const u32 rto_time_stamp = TCP_SKB_CB(skb)->when + rto; + const u32 rto_time_stamp = + tcp_skb_timestamp(skb) + rto; s32 delta = (s32)(rto_time_stamp - tcp_time_stamp); /* delta may not be positive if the socket is locked * when the retrans timer fires and is rescheduled. @@ -3210,9 +3217,10 @@ static void tcp_ack_probe(struct sock *sk) * This function is not for random using! */ } else { + unsigned long when = inet_csk_rto_backoff(icsk, TCP_RTO_MAX); + inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, - min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX), - TCP_RTO_MAX); + when, TCP_RTO_MAX); } } @@ -3363,6 +3371,14 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag) } } +static inline void tcp_in_ack_event(struct sock *sk, u32 flags) +{ + const struct inet_connection_sock *icsk = inet_csk(sk); + + if (icsk->icsk_ca_ops->in_ack_event) + icsk->icsk_ca_ops->in_ack_event(sk, flags); +} + /* This routine deals with incoming acks, but not outgoing ones. */ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) { @@ -3422,10 +3438,12 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) tp->snd_una = ack; flag |= FLAG_WIN_UPDATE; - tcp_ca_event(sk, CA_EVENT_FAST_ACK); + tcp_in_ack_event(sk, CA_ACK_WIN_UPDATE); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPACKS); } else { + u32 ack_ev_flags = CA_ACK_SLOWPATH; + if (ack_seq != TCP_SKB_CB(skb)->end_seq) flag |= FLAG_DATA; else @@ -3437,10 +3455,15 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, &sack_rtt_us); - if (TCP_ECN_rcv_ecn_echo(tp, tcp_hdr(skb))) + if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb))) { flag |= FLAG_ECE; + ack_ev_flags |= CA_ACK_ECE; + } + + if (flag & FLAG_WIN_UPDATE) + ack_ev_flags |= CA_ACK_WIN_UPDATE; - tcp_ca_event(sk, CA_EVENT_SLOW_ACK); + tcp_in_ack_event(sk, ack_ev_flags); } /* We passed data and got it acked, remove any soft error @@ -4062,6 +4085,44 @@ static void tcp_sack_remove(struct tcp_sock *tp) tp->rx_opt.num_sacks = num_sacks; } +/** + * tcp_try_coalesce - try to merge skb to prior one + * @sk: socket + * @to: prior buffer + * @from: buffer to add in queue + * @fragstolen: pointer to boolean + * + * Before queueing skb @from after @to, try to merge them + * to reduce overall memory use and queue lengths, if cost is small. + * Packets in ofo or receive queues can stay a long time. + * Better try to coalesce them right now to avoid future collapses. + * Returns true if caller should free @from instead of queueing it + */ +static bool tcp_try_coalesce(struct sock *sk, + struct sk_buff *to, + struct sk_buff *from, + bool *fragstolen) +{ + int delta; + + *fragstolen = false; + + /* Its possible this segment overlaps with prior segment in queue */ + if (TCP_SKB_CB(from)->seq != TCP_SKB_CB(to)->end_seq) + return false; + + if (!skb_try_coalesce(to, from, fragstolen, &delta)) + return false; + + atomic_add(delta, &sk->sk_rmem_alloc); + sk_mem_charge(sk, delta); + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE); + TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq; + TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq; + TCP_SKB_CB(to)->tcp_flags |= TCP_SKB_CB(from)->tcp_flags; + return true; +} + /* This one checks to see if we can put data from the * out_of_order queue into the receive_queue. */ @@ -4069,7 +4130,8 @@ static void tcp_ofo_queue(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); __u32 dsack_high = tp->rcv_nxt; - struct sk_buff *skb; + struct sk_buff *skb, *tail; + bool fragstolen, eaten; while ((skb = skb_peek(&tp->out_of_order_queue)) != NULL) { if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) @@ -4082,9 +4144,9 @@ static void tcp_ofo_queue(struct sock *sk) tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack); } + __skb_unlink(skb, &tp->out_of_order_queue); if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { SOCK_DEBUG(sk, "ofo packet was already received\n"); - __skb_unlink(skb, &tp->out_of_order_queue); __kfree_skb(skb); continue; } @@ -4092,11 +4154,15 @@ static void tcp_ofo_queue(struct sock *sk) tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); - __skb_unlink(skb, &tp->out_of_order_queue); - __skb_queue_tail(&sk->sk_receive_queue, skb); + tail = skb_peek_tail(&sk->sk_receive_queue); + eaten = tail && tcp_try_coalesce(sk, tail, skb, &fragstolen); tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; - if (tcp_hdr(skb)->fin) + if (!eaten) + __skb_queue_tail(&sk->sk_receive_queue, skb); + if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) tcp_fin(sk); + if (eaten) + kfree_skb_partial(skb, fragstolen); } } @@ -4123,53 +4189,13 @@ static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb, return 0; } -/** - * tcp_try_coalesce - try to merge skb to prior one - * @sk: socket - * @to: prior buffer - * @from: buffer to add in queue - * @fragstolen: pointer to boolean - * - * Before queueing skb @from after @to, try to merge them - * to reduce overall memory use and queue lengths, if cost is small. - * Packets in ofo or receive queues can stay a long time. - * Better try to coalesce them right now to avoid future collapses. - * Returns true if caller should free @from instead of queueing it - */ -static bool tcp_try_coalesce(struct sock *sk, - struct sk_buff *to, - struct sk_buff *from, - bool *fragstolen) -{ - int delta; - - *fragstolen = false; - - if (tcp_hdr(from)->fin) - return false; - - /* Its possible this segment overlaps with prior segment in queue */ - if (TCP_SKB_CB(from)->seq != TCP_SKB_CB(to)->end_seq) - return false; - - if (!skb_try_coalesce(to, from, fragstolen, &delta)) - return false; - - atomic_add(delta, &sk->sk_rmem_alloc); - sk_mem_charge(sk, delta); - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE); - TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq; - TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq; - return true; -} - static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb1; u32 seq, end_seq; - TCP_ECN_check_ce(tp, skb); + tcp_ecn_check_ce(tp, skb); if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP); @@ -4308,24 +4334,19 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) { - struct sk_buff *skb = NULL; - struct tcphdr *th; + struct sk_buff *skb; bool fragstolen; if (size == 0) return 0; - skb = alloc_skb(size + sizeof(*th), sk->sk_allocation); + skb = alloc_skb(size, sk->sk_allocation); if (!skb) goto err; - if (tcp_try_rmem_schedule(sk, skb, size + sizeof(*th))) + if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) goto err_free; - th = (struct tcphdr *)skb_put(skb, sizeof(*th)); - skb_reset_transport_header(skb); - memset(th, 0, sizeof(*th)); - if (memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size)) goto err_free; @@ -4333,7 +4354,7 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + size; TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1; - if (tcp_queue_rcv(sk, skb, sizeof(*th), &fragstolen)) { + if (tcp_queue_rcv(sk, skb, 0, &fragstolen)) { WARN_ON_ONCE(fragstolen); /* should not happen */ __kfree_skb(skb); } @@ -4347,7 +4368,6 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) { - const struct tcphdr *th = tcp_hdr(skb); struct tcp_sock *tp = tcp_sk(sk); int eaten = -1; bool fragstolen = false; @@ -4356,9 +4376,9 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) goto drop; skb_dst_drop(skb); - __skb_pull(skb, th->doff * 4); + __skb_pull(skb, tcp_hdr(skb)->doff * 4); - TCP_ECN_accept_cwr(tp, skb); + tcp_ecn_accept_cwr(tp, skb); tp->rx_opt.dsack = 0; @@ -4400,7 +4420,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; if (skb->len) tcp_event_data_recv(sk, skb); - if (th->fin) + if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) tcp_fin(sk); if (!skb_queue_empty(&tp->out_of_order_queue)) { @@ -4515,7 +4535,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, * - bloated or contains data before "start" or * overlaps to the next one. */ - if (!tcp_hdr(skb)->syn && !tcp_hdr(skb)->fin && + if (!(TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)) && (tcp_win_from_space(skb->truesize) > skb->len || before(TCP_SKB_CB(skb)->seq, start))) { end_of_skbs = false; @@ -4534,30 +4554,18 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, /* Decided to skip this, advance start seq. */ start = TCP_SKB_CB(skb)->end_seq; } - if (end_of_skbs || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin) + if (end_of_skbs || + (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN))) return; while (before(start, end)) { + int copy = min_t(int, SKB_MAX_ORDER(0, 0), end - start); struct sk_buff *nskb; - unsigned int header = skb_headroom(skb); - int copy = SKB_MAX_ORDER(header, 0); - /* Too big header? This can happen with IPv6. */ - if (copy < 0) - return; - if (end - start < copy) - copy = end - start; - nskb = alloc_skb(copy + header, GFP_ATOMIC); + nskb = alloc_skb(copy, GFP_ATOMIC); if (!nskb) return; - skb_set_mac_header(nskb, skb_mac_header(skb) - skb->head); - skb_set_network_header(nskb, (skb_network_header(skb) - - skb->head)); - skb_set_transport_header(nskb, (skb_transport_header(skb) - - skb->head)); - skb_reserve(nskb, header); - memcpy(nskb->head, skb->head, header); memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start; __skb_queue_before(list, skb, nskb); @@ -4581,8 +4589,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, skb = tcp_collapse_one(sk, skb, list); if (!skb || skb == tail || - tcp_hdr(skb)->syn || - tcp_hdr(skb)->fin) + (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN))) return; } } @@ -5386,7 +5393,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, * state to ESTABLISHED..." */ - TCP_ECN_rcv_synack(tp, th); + tcp_ecn_rcv_synack(tp, th); tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); tcp_ack(sk, skb, FLAG_SLOWPATH); @@ -5505,7 +5512,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, tp->snd_wl1 = TCP_SKB_CB(skb)->seq; tp->max_window = tp->snd_wnd; - TCP_ECN_rcv_syn(tp, th); + tcp_ecn_rcv_syn(tp, th); tcp_mtup_init(sk); tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); @@ -5835,6 +5842,40 @@ static inline void pr_drop_req(struct request_sock *req, __u16 port, int family) #endif } +/* RFC3168 : 6.1.1 SYN packets must not have ECT/ECN bits set + * + * If we receive a SYN packet with these bits set, it means a + * network is playing bad games with TOS bits. In order to + * avoid possible false congestion notifications, we disable + * TCP ECN negociation. + * + * Exception: tcp_ca wants ECN. This is required for DCTCP + * congestion control; it requires setting ECT on all packets, + * including SYN. We inverse the test in this case: If our + * local socket wants ECN, but peer only set ece/cwr (but not + * ECT in IP header) its probably a non-DCTCP aware sender. + */ +static void tcp_ecn_create_request(struct request_sock *req, + const struct sk_buff *skb, + const struct sock *listen_sk) +{ + const struct tcphdr *th = tcp_hdr(skb); + const struct net *net = sock_net(listen_sk); + bool th_ecn = th->ece && th->cwr; + bool ect, need_ecn; + + if (!th_ecn) + return; + + ect = !INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield); + need_ecn = tcp_ca_needs_ecn(listen_sk); + + if (!ect && !need_ecn && net->ipv4.sysctl_tcp_ecn) + inet_rsk(req)->ecn_ok = 1; + else if (ect && need_ecn) + inet_rsk(req)->ecn_ok = 1; +} + int tcp_conn_request(struct request_sock_ops *rsk_ops, const struct tcp_request_sock_ops *af_ops, struct sock *sk, struct sk_buff *skb) @@ -5843,7 +5884,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, struct request_sock *req; struct tcp_sock *tp = tcp_sk(sk); struct dst_entry *dst = NULL; - __u32 isn = TCP_SKB_CB(skb)->when; + __u32 isn = TCP_SKB_CB(skb)->tcp_tw_isn; bool want_cookie = false, fastopen; struct flowi fl; struct tcp_fastopen_cookie foc = { .len = -1 }; @@ -5895,7 +5936,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, goto drop_and_free; if (!want_cookie || tmp_opt.tstamp_ok) - TCP_ECN_create_request(req, skb, sock_net(sk)); + tcp_ecn_create_request(req, skb, sk); if (want_cookie) { isn = cookie_init_sequence(af_ops, sk, skb, &req->mss); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index fbea536cf5c09ad56a2e0faed68c11cf0e15ce6e..552e87e3c269fccea0d48627832dd878310e2bea 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -89,7 +89,6 @@ int sysctl_tcp_tw_reuse __read_mostly; int sysctl_tcp_low_latency __read_mostly; EXPORT_SYMBOL(sysctl_tcp_low_latency); - #ifdef CONFIG_TCP_MD5SIG static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, __be32 daddr, __be32 saddr, const struct tcphdr *th); @@ -430,15 +429,16 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) break; icsk->icsk_backoff--; - inet_csk(sk)->icsk_rto = (tp->srtt_us ? __tcp_set_rto(tp) : - TCP_TIMEOUT_INIT) << icsk->icsk_backoff; - tcp_bound_rto(sk); + icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : + TCP_TIMEOUT_INIT; + icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX); skb = tcp_write_queue_head(sk); BUG_ON(!skb); - remaining = icsk->icsk_rto - min(icsk->icsk_rto, - tcp_time_stamp - TCP_SKB_CB(skb)->when); + remaining = icsk->icsk_rto - + min(icsk->icsk_rto, + tcp_time_stamp - tcp_skb_timestamp(skb)); if (remaining) { inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, @@ -680,8 +680,9 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) net = dev_net(skb_dst(skb)->dev); arg.tos = ip_hdr(skb)->tos; - ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr, - ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len); + ip_send_unicast_reply(net, skb, &TCP_SKB_CB(skb)->header.h4.opt, + ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, + &arg, arg.iov[0].iov_len); TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS); @@ -763,8 +764,9 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, if (oif) arg.bound_dev_if = oif; arg.tos = tos; - ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr, - ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len); + ip_send_unicast_reply(net, skb, &TCP_SKB_CB(skb)->header.h4.opt, + ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, + &arg, arg.iov[0].iov_len); TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); } @@ -883,18 +885,16 @@ EXPORT_SYMBOL(tcp_syn_flood_action); */ static struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb) { - const struct ip_options *opt = &(IPCB(skb)->opt); + const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt; struct ip_options_rcu *dopt = NULL; if (opt && opt->optlen) { int opt_size = sizeof(*dopt) + opt->optlen; dopt = kmalloc(opt_size, GFP_ATOMIC); - if (dopt) { - if (ip_options_echo(&dopt->opt, skb)) { - kfree(dopt); - dopt = NULL; - } + if (dopt && __ip_options_echo(&dopt->opt, skb, opt)) { + kfree(dopt); + dopt = NULL; } } return dopt; @@ -1268,7 +1268,7 @@ struct request_sock_ops tcp_request_sock_ops __read_mostly = { .send_ack = tcp_v4_reqsk_send_ack, .destructor = tcp_v4_reqsk_destructor, .send_reset = tcp_v4_send_reset, - .syn_ack_timeout = tcp_syn_ack_timeout, + .syn_ack_timeout = tcp_syn_ack_timeout, }; static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = { @@ -1428,7 +1428,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) #ifdef CONFIG_SYN_COOKIES if (!th->syn) - sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt)); + sk = cookie_v4_check(sk, skb, &TCP_SKB_CB(skb)->header.h4.opt); #endif return sk; } @@ -1558,7 +1558,17 @@ bool tcp_prequeue(struct sock *sk, struct sk_buff *skb) skb_queue_len(&tp->ucopy.prequeue) == 0) return false; - skb_dst_force(skb); + /* Before escaping RCU protected region, we need to take care of skb + * dst. Prequeue is only enabled for established sockets. + * For such sockets, we might need the skb dst only to set sk->sk_rx_dst + * Instead of doing full sk_rx_dst validity here, let's perform + * an optimistic check. + */ + if (likely(sk->sk_rx_dst)) + skb_dst_drop(skb); + else + skb_dst_force(skb); + __skb_queue_tail(&tp->ucopy.prequeue, skb); tp->ucopy.memory += skb->truesize; if (tp->ucopy.memory > sk->sk_rcvbuf) { @@ -1623,11 +1633,19 @@ int tcp_v4_rcv(struct sk_buff *skb) th = tcp_hdr(skb); iph = ip_hdr(skb); + /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB() + * barrier() makes sure compiler wont play fool^Waliasing games. + */ + memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb), + sizeof(struct inet_skb_parm)); + barrier(); + TCP_SKB_CB(skb)->seq = ntohl(th->seq); TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + skb->len - th->doff * 4); TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); - TCP_SKB_CB(skb)->when = 0; + TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th); + TCP_SKB_CB(skb)->tcp_tw_isn = 0; TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph); TCP_SKB_CB(skb)->sacked = 0; @@ -1754,9 +1772,11 @@ void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); - dst_hold(dst); - sk->sk_rx_dst = dst; - inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; + if (dst) { + dst_hold(dst); + sk->sk_rx_dst = dst; + inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; + } } EXPORT_SYMBOL(inet_sk_rx_dst_set); @@ -2167,7 +2187,7 @@ int tcp_seq_open(struct inode *inode, struct file *file) s = ((struct seq_file *)file->private_data)->private; s->family = afinfo->family; - s->last_pos = 0; + s->last_pos = 0; return 0; } EXPORT_SYMBOL(tcp_seq_open); diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 1649988bd1b632f09506da9e685dc643c64161fd..63d2680b65db36c93737f8c72df66263dfde06bf 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -232,7 +232,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, u32 isn = tcptw->tw_snd_nxt + 65535 + 2; if (isn == 0) isn++; - TCP_SKB_CB(skb)->when = isn; + TCP_SKB_CB(skb)->tcp_tw_isn = isn; return TCP_TW_SYN; } @@ -393,8 +393,8 @@ void tcp_openreq_init_rwin(struct request_sock *req, } EXPORT_SYMBOL(tcp_openreq_init_rwin); -static inline void TCP_ECN_openreq_child(struct tcp_sock *tp, - struct request_sock *req) +static void tcp_ecn_openreq_child(struct tcp_sock *tp, + const struct request_sock *req) { tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0; } @@ -451,9 +451,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, newtp->snd_cwnd = TCP_INIT_CWND; newtp->snd_cwnd_cnt = 0; - if (newicsk->icsk_ca_ops != &tcp_init_congestion_ops && - !try_module_get(newicsk->icsk_ca_ops->owner)) - newicsk->icsk_ca_ops = &tcp_init_congestion_ops; + if (!try_module_get(newicsk->icsk_ca_ops->owner)) + tcp_assign_congestion_control(newsk); tcp_set_ca_state(newsk, TCP_CA_Open); tcp_init_xmit_timers(newsk); @@ -508,7 +507,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len) newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; newtp->rx_opt.mss_clamp = req->mss; - TCP_ECN_openreq_child(newtp, req); + tcp_ecn_openreq_child(newtp, req); newtp->fastopen_rsk = NULL; newtp->syn_data_acked = 0; diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c index bc1b83cb8309f5d4737d7751e9542823e044b587..5b90f2f447a511703bbce1e22e8ef55888c43479 100644 --- a/net/ipv4/tcp_offload.c +++ b/net/ipv4/tcp_offload.c @@ -29,6 +29,28 @@ static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq, } } +struct sk_buff *tcp4_gso_segment(struct sk_buff *skb, + netdev_features_t features) +{ + if (!pskb_may_pull(skb, sizeof(struct tcphdr))) + return ERR_PTR(-EINVAL); + + if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { + const struct iphdr *iph = ip_hdr(skb); + struct tcphdr *th = tcp_hdr(skb); + + /* Set up checksum pseudo header, usually expect stack to + * have done this already. + */ + + th->check = 0; + skb->ip_summed = CHECKSUM_PARTIAL; + __tcp_v4_send_check(skb, iph->saddr, iph->daddr); + } + + return tcp_gso_segment(skb, features); +} + struct sk_buff *tcp_gso_segment(struct sk_buff *skb, netdev_features_t features) { @@ -44,9 +66,6 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb, __sum16 newcheck; bool ooo_okay, copy_destructor; - if (!pskb_may_pull(skb, sizeof(*th))) - goto out; - th = tcp_hdr(skb); thlen = th->doff * 4; if (thlen < sizeof(*th)) @@ -269,54 +288,16 @@ int tcp_gro_complete(struct sk_buff *skb) } EXPORT_SYMBOL(tcp_gro_complete); -static int tcp_v4_gso_send_check(struct sk_buff *skb) -{ - const struct iphdr *iph; - struct tcphdr *th; - - if (!pskb_may_pull(skb, sizeof(*th))) - return -EINVAL; - - iph = ip_hdr(skb); - th = tcp_hdr(skb); - - th->check = 0; - skb->ip_summed = CHECKSUM_PARTIAL; - __tcp_v4_send_check(skb, iph->saddr, iph->daddr); - return 0; -} - static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb) { - /* Use the IP hdr immediately proceeding for this transport */ - const struct iphdr *iph = skb_gro_network_header(skb); - __wsum wsum; - /* Don't bother verifying checksum if we're going to flush anyway. */ - if (NAPI_GRO_CB(skb)->flush) - goto skip_csum; - - wsum = NAPI_GRO_CB(skb)->csum; - - switch (skb->ip_summed) { - case CHECKSUM_NONE: - wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), - 0); - - /* fall through */ - - case CHECKSUM_COMPLETE: - if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr, - wsum)) { - skb->ip_summed = CHECKSUM_UNNECESSARY; - break; - } - + if (!NAPI_GRO_CB(skb)->flush && + skb_gro_checksum_validate(skb, IPPROTO_TCP, + inet_gro_compute_pseudo)) { NAPI_GRO_CB(skb)->flush = 1; return NULL; } -skip_csum: return tcp_gro_receive(head, skb); } @@ -334,8 +315,7 @@ static int tcp4_gro_complete(struct sk_buff *skb, int thoff) static const struct net_offload tcpv4_offload = { .callbacks = { - .gso_send_check = tcp_v4_gso_send_check, - .gso_segment = tcp_gso_segment, + .gso_segment = tcp4_gso_segment, .gro_receive = tcp4_gro_receive, .gro_complete = tcp4_gro_complete, }, diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 5a7c41fbc6d3399686ff47ab5102ef7cc1a9ea42..8d4eac79370057907e7f6031ba1fedbf120227dd 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -318,36 +318,47 @@ static u16 tcp_select_window(struct sock *sk) } /* Packet ECN state for a SYN-ACK */ -static inline void TCP_ECN_send_synack(const struct tcp_sock *tp, struct sk_buff *skb) +static void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb) { + const struct tcp_sock *tp = tcp_sk(sk); + TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR; if (!(tp->ecn_flags & TCP_ECN_OK)) TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE; + else if (tcp_ca_needs_ecn(sk)) + INET_ECN_xmit(sk); } /* Packet ECN state for a SYN. */ -static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb) +static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); tp->ecn_flags = 0; - if (sock_net(sk)->ipv4.sysctl_tcp_ecn == 1) { + if (sock_net(sk)->ipv4.sysctl_tcp_ecn == 1 || + tcp_ca_needs_ecn(sk)) { TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR; tp->ecn_flags = TCP_ECN_OK; + if (tcp_ca_needs_ecn(sk)) + INET_ECN_xmit(sk); } } -static __inline__ void -TCP_ECN_make_synack(const struct request_sock *req, struct tcphdr *th) +static void +tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th, + struct sock *sk) { - if (inet_rsk(req)->ecn_ok) + if (inet_rsk(req)->ecn_ok) { th->ece = 1; + if (tcp_ca_needs_ecn(sk)) + INET_ECN_xmit(sk); + } } /* Set up ECN state for a packet on a ESTABLISHED socket that is about to * be sent. */ -static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb, +static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb, int tcp_header_len) { struct tcp_sock *tp = tcp_sk(sk); @@ -362,7 +373,7 @@ static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb, tcp_hdr(skb)->cwr = 1; skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; } - } else { + } else if (!tcp_ca_needs_ecn(sk)) { /* ACK or retransmitted segment: clear ECT|CE */ INET_ECN_dontxmit(sk); } @@ -384,7 +395,7 @@ static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags) TCP_SKB_CB(skb)->tcp_flags = flags; TCP_SKB_CB(skb)->sacked = 0; - shinfo->gso_segs = 1; + tcp_skb_pcount_set(skb, 1); shinfo->gso_size = 0; shinfo->gso_type = 0; @@ -550,7 +561,7 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, if (likely(sysctl_tcp_timestamps && *md5 == NULL)) { opts->options |= OPTION_TS; - opts->tsval = TCP_SKB_CB(skb)->when + tp->tsoffset; + opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset; opts->tsecr = tp->rx_opt.ts_recent; remaining -= TCPOLEN_TSTAMP_ALIGNED; } @@ -618,7 +629,7 @@ static unsigned int tcp_synack_options(struct sock *sk, } if (likely(ireq->tstamp_ok)) { opts->options |= OPTION_TS; - opts->tsval = TCP_SKB_CB(skb)->when; + opts->tsval = tcp_skb_timestamp(skb); opts->tsecr = req->ts_recent; remaining -= TCPOLEN_TSTAMP_ALIGNED; } @@ -647,7 +658,6 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb struct tcp_out_options *opts, struct tcp_md5sig_key **md5) { - struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL; struct tcp_sock *tp = tcp_sk(sk); unsigned int size = 0; unsigned int eff_sacks; @@ -666,7 +676,7 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb if (likely(tp->rx_opt.tstamp_ok)) { opts->options |= OPTION_TS; - opts->tsval = tcb ? tcb->when + tp->tsoffset : 0; + opts->tsval = skb ? tcp_skb_timestamp(skb) + tp->tsoffset : 0; opts->tsecr = tp->rx_opt.ts_recent; size += TCPOLEN_TSTAMP_ALIGNED; } @@ -886,8 +896,6 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, skb = skb_clone(skb, gfp_mask); if (unlikely(!skb)) return -ENOBUFS; - /* Our usage of tstamp should remain private */ - skb->tstamp.tv64 = 0; } inet = inet_sk(sk); @@ -952,7 +960,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, tcp_options_write((__be32 *)(th + 1), tp, &opts); if (likely((tcb->tcp_flags & TCPHDR_SYN) == 0)) - TCP_ECN_send(sk, skb, tcp_header_size); + tcp_ecn_send(sk, skb, tcp_header_size); #ifdef CONFIG_TCP_MD5SIG /* Calculate the MD5 hash, as we have all we need now */ @@ -975,7 +983,18 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, tcp_skb_pcount(skb)); + /* OK, its time to fill skb_shinfo(skb)->gso_segs */ + skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb); + + /* Our usage of tstamp should remain private */ + skb->tstamp.tv64 = 0; + + /* Cleanup our debris for IP stacks */ + memset(skb->cb, 0, max(sizeof(struct inet_skb_parm), + sizeof(struct inet6_skb_parm))); + err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); + if (likely(err <= 0)) return err; @@ -995,7 +1014,7 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) /* Advance write_seq and place onto the write_queue. */ tp->write_seq = TCP_SKB_CB(skb)->end_seq; - skb_header_release(skb); + __skb_header_release(skb); tcp_add_write_queue_tail(sk, skb); sk->sk_wmem_queued += skb->truesize; sk_mem_charge(sk, skb->truesize); @@ -1014,11 +1033,11 @@ static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb, /* Avoid the costly divide in the normal * non-TSO case. */ - shinfo->gso_segs = 1; + tcp_skb_pcount_set(skb, 1); shinfo->gso_size = 0; shinfo->gso_type = 0; } else { - shinfo->gso_segs = DIV_ROUND_UP(skb->len, mss_now); + tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now)); shinfo->gso_size = mss_now; shinfo->gso_type = sk->sk_gso_type; } @@ -1146,10 +1165,6 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, buff->ip_summed = skb->ip_summed; - /* Looks stupid, but our code really uses when of - * skbs, which it never sent before. --ANK - */ - TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when; buff->tstamp = skb->tstamp; tcp_fragment_tstamp(skb, buff); @@ -1171,7 +1186,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, } /* Link BUFF into the send queue. */ - skb_header_release(buff); + __skb_header_release(buff); tcp_insert_write_queue_after(skb, buff, sk); return 0; @@ -1675,7 +1690,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, tcp_set_skb_tso_segs(sk, buff, mss_now); /* Link BUFF into the send queue. */ - skb_header_release(buff); + __skb_header_release(buff); tcp_insert_write_queue_after(skb, buff, sk); return 0; @@ -1874,8 +1889,8 @@ static int tcp_mtu_probe(struct sock *sk) tcp_init_tso_segs(sk, nskb, nskb->len); /* We're ready to send. If this fails, the probe will - * be resegmented into mss-sized pieces by tcp_write_xmit(). */ - TCP_SKB_CB(nskb)->when = tcp_time_stamp; + * be resegmented into mss-sized pieces by tcp_write_xmit(). + */ if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) { /* Decrement cwnd here because we are sending * effectively two packets. */ @@ -1935,8 +1950,8 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, BUG_ON(!tso_segs); if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) { - /* "when" is used as a start point for the retransmit timer */ - TCP_SKB_CB(skb)->when = tcp_time_stamp; + /* "skb_mstamp" is used as a start point for the retransmit timer */ + skb_mstamp_get(&skb->skb_mstamp); goto repair; /* Skip network transmission */ } @@ -2000,8 +2015,6 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) break; - TCP_SKB_CB(skb)->when = tcp_time_stamp; - if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp))) break; @@ -2097,10 +2110,7 @@ bool tcp_schedule_loss_probe(struct sock *sk) static bool skb_still_in_host_queue(const struct sock *sk, const struct sk_buff *skb) { - const struct sk_buff *fclone = skb + 1; - - if (unlikely(skb->fclone == SKB_FCLONE_ORIG && - fclone->fclone == SKB_FCLONE_CLONE)) { + if (unlikely(skb_fclone_busy(skb))) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES); return true; @@ -2499,7 +2509,6 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) /* Make a copy, if the first transmission SKB clone we made * is still in somebody's hands, else make a clone. */ - TCP_SKB_CB(skb)->when = tcp_time_stamp; /* make sure skb->data is aligned on arches that require it * and check if ack-trimming & collapsing extended the headroom @@ -2544,7 +2553,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) /* Save stamp of the first retransmit. */ if (!tp->retrans_stamp) - tp->retrans_stamp = TCP_SKB_CB(skb)->when; + tp->retrans_stamp = tcp_skb_timestamp(skb); /* snd_nxt is stored to detect loss of retransmitted segment, * see tcp_input.c tcp_sacktag_write_queue(). @@ -2752,7 +2761,6 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority) tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), TCPHDR_ACK | TCPHDR_RST); /* Send it off. */ - TCP_SKB_CB(skb)->when = tcp_time_stamp; if (tcp_transmit_skb(sk, skb, 0, priority)) NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); @@ -2780,7 +2788,7 @@ int tcp_send_synack(struct sock *sk) if (nskb == NULL) return -ENOMEM; tcp_unlink_write_queue(skb, sk); - skb_header_release(nskb); + __skb_header_release(nskb); __tcp_add_write_queue_head(sk, nskb); sk_wmem_free_skb(sk, skb); sk->sk_wmem_queued += nskb->truesize; @@ -2789,9 +2797,8 @@ int tcp_send_synack(struct sock *sk) } TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK; - TCP_ECN_send_synack(tcp_sk(sk), skb); + tcp_ecn_send_synack(sk, skb); } - TCP_SKB_CB(skb)->when = tcp_time_stamp; return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); } @@ -2835,10 +2842,10 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, memset(&opts, 0, sizeof(opts)); #ifdef CONFIG_SYN_COOKIES if (unlikely(req->cookie_ts)) - TCP_SKB_CB(skb)->when = cookie_init_timestamp(req); + skb->skb_mstamp.stamp_jiffies = cookie_init_timestamp(req); else #endif - TCP_SKB_CB(skb)->when = tcp_time_stamp; + skb_mstamp_get(&skb->skb_mstamp); tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, &md5, foc) + sizeof(*th); @@ -2849,7 +2856,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, memset(th, 0, sizeof(struct tcphdr)); th->syn = 1; th->ack = 1; - TCP_ECN_make_synack(req, th); + tcp_ecn_make_synack(req, th, sk); th->source = htons(ireq->ir_num); th->dest = ireq->ir_rmt_port; /* Setting of flags are superfluous here for callers (and ECE is @@ -2956,7 +2963,7 @@ static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb) struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); tcb->end_seq += skb->len; - skb_header_release(skb); + __skb_header_release(skb); __tcp_add_write_queue_tail(sk, skb); sk->sk_wmem_queued += skb->truesize; sk_mem_charge(sk, skb->truesize); @@ -3086,9 +3093,9 @@ int tcp_connect(struct sock *sk) skb_reserve(buff, MAX_TCP_HEADER); tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); - tp->retrans_stamp = TCP_SKB_CB(buff)->when = tcp_time_stamp; + tp->retrans_stamp = tcp_time_stamp; tcp_connect_queue_skb(sk, buff); - TCP_ECN_send_syn(sk, buff); + tcp_ecn_send_syn(sk, buff); /* Send off SYN; include data in Fast Open. */ err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) : @@ -3120,6 +3127,8 @@ void tcp_send_delayed_ack(struct sock *sk) int ato = icsk->icsk_ack.ato; unsigned long timeout; + tcp_ca_event(sk, CA_EVENT_DELAYED_ACK); + if (ato > TCP_DELACK_MIN) { const struct tcp_sock *tp = tcp_sk(sk); int max_ato = HZ / 2; @@ -3176,6 +3185,8 @@ void tcp_send_ack(struct sock *sk) if (sk->sk_state == TCP_CLOSE) return; + tcp_ca_event(sk, CA_EVENT_NON_DELAYED_ACK); + /* We are not putting this on the write queue, so * tcp_transmit_skb() will set the ownership to this * sock. @@ -3194,9 +3205,10 @@ void tcp_send_ack(struct sock *sk) tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK); /* Send it off, this clears delayed acks for us. */ - TCP_SKB_CB(buff)->when = tcp_time_stamp; + skb_mstamp_get(&buff->skb_mstamp); tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC)); } +EXPORT_SYMBOL_GPL(tcp_send_ack); /* This routine sends a packet with an out of date sequence * number. It assumes the other end will try to ack it. @@ -3226,7 +3238,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent) * send it. */ tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK); - TCP_SKB_CB(skb)->when = tcp_time_stamp; + skb_mstamp_get(&skb->skb_mstamp); return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC); } @@ -3270,7 +3282,6 @@ int tcp_write_wakeup(struct sock *sk) tcp_set_skb_tso_segs(sk, skb, mss); TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; - TCP_SKB_CB(skb)->when = tcp_time_stamp; err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); if (!err) tcp_event_new_data_sent(sk, skb); @@ -3289,6 +3300,7 @@ void tcp_send_probe0(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); + unsigned long probe_max; int err; err = tcp_write_wakeup(sk); @@ -3304,9 +3316,7 @@ void tcp_send_probe0(struct sock *sk) if (icsk->icsk_backoff < sysctl_tcp_retries2) icsk->icsk_backoff++; icsk->icsk_probes_out++; - inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, - min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX), - TCP_RTO_MAX); + probe_max = TCP_RTO_MAX; } else { /* If packet was not sent due to local congestion, * do not backoff and do not remember icsk_probes_out. @@ -3316,11 +3326,11 @@ void tcp_send_probe0(struct sock *sk) */ if (!icsk->icsk_probes_out) icsk->icsk_probes_out = 1; - inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, - min(icsk->icsk_rto << icsk->icsk_backoff, - TCP_RESOURCE_PROBE_INTERVAL), - TCP_RTO_MAX); + probe_max = TCP_RESOURCE_PROBE_INTERVAL; } + inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, + inet_csk_rto_backoff(icsk, probe_max), + TCP_RTO_MAX); } int tcp_rtx_synack(struct sock *sk, struct request_sock *req) diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c index 3b66610d41562035c541304924fc27a4eb416a6e..ebf5ff57526eab65018005c47a907545e61cacf6 100644 --- a/net/ipv4/tcp_probe.c +++ b/net/ipv4/tcp_probe.c @@ -83,7 +83,6 @@ static struct { struct tcp_log *log; } tcp_probe; - static inline int tcp_probe_used(void) { return (tcp_probe.head - tcp_probe.tail) & (bufsize - 1); @@ -101,7 +100,6 @@ static inline int tcp_probe_avail(void) si4.sin_addr.s_addr = inet->inet_##mem##addr; \ } while (0) \ - /* * Hook inserted to be called before each receive packet. * Note: arguments must match tcp_rcv_established()! @@ -194,8 +192,8 @@ static int tcpprobe_sprint(char *tbuf, int n) return scnprintf(tbuf, n, "%lu.%09lu %pISpc %pISpc %d %#x %#x %u %u %u %u %u\n", - (unsigned long) tv.tv_sec, - (unsigned long) tv.tv_nsec, + (unsigned long)tv.tv_sec, + (unsigned long)tv.tv_nsec, &p->src, &p->dst, p->length, p->snd_nxt, p->snd_una, p->snd_cwnd, p->ssthresh, p->snd_wnd, p->srtt, p->rcv_wnd); } diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c index 8250949b88538db4e0f2d318050518855e74cf3d..6824afb65d9335532fe2bf61edce82cab8c3fd9c 100644 --- a/net/ipv4/tcp_scalable.c +++ b/net/ipv4/tcp_scalable.c @@ -31,10 +31,10 @@ static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked) static u32 tcp_scalable_ssthresh(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); + return max(tp->snd_cwnd - (tp->snd_cwnd>>TCP_SCALABLE_MD_SCALE), 2U); } - static struct tcp_congestion_ops tcp_scalable __read_mostly = { .ssthresh = tcp_scalable_ssthresh, .cong_avoid = tcp_scalable_cong_avoid, diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index df90cd1ce37f71b145d3f5cde4bf1d9b4a4cfd22..9b21ae8b2e31d85acf2822baf9de0b96e7162d39 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -52,7 +52,7 @@ static void tcp_write_err(struct sock *sk) * limit. * 2. If we have strong memory pressure. */ -static int tcp_out_of_resources(struct sock *sk, int do_reset) +static int tcp_out_of_resources(struct sock *sk, bool do_reset) { struct tcp_sock *tp = tcp_sk(sk); int shift = 0; @@ -72,7 +72,7 @@ static int tcp_out_of_resources(struct sock *sk, int do_reset) if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN || /* 2. Window is closed. */ (!tp->snd_wnd && !tp->packets_out)) - do_reset = 1; + do_reset = true; if (do_reset) tcp_send_active_reset(sk, GFP_ATOMIC); tcp_done(sk); @@ -135,10 +135,9 @@ static bool retransmits_timed_out(struct sock *sk, if (!inet_csk(sk)->icsk_retransmits) return false; - if (unlikely(!tcp_sk(sk)->retrans_stamp)) - start_ts = TCP_SKB_CB(tcp_write_queue_head(sk))->when; - else - start_ts = tcp_sk(sk)->retrans_stamp; + start_ts = tcp_sk(sk)->retrans_stamp; + if (unlikely(!start_ts)) + start_ts = tcp_skb_timestamp(tcp_write_queue_head(sk)); if (likely(timeout == 0)) { linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base); @@ -181,7 +180,7 @@ static int tcp_write_timeout(struct sock *sk) retry_until = sysctl_tcp_retries2; if (sock_flag(sk, SOCK_DEAD)) { - const int alive = (icsk->icsk_rto < TCP_RTO_MAX); + const int alive = icsk->icsk_rto < TCP_RTO_MAX; retry_until = tcp_orphan_retries(sk, alive); do_reset = alive || @@ -271,40 +270,41 @@ static void tcp_probe_timer(struct sock *sk) struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); int max_probes; + u32 start_ts; if (tp->packets_out || !tcp_send_head(sk)) { icsk->icsk_probes_out = 0; return; } - /* *WARNING* RFC 1122 forbids this - * - * It doesn't AFAIK, because we kill the retransmit timer -AK - * - * FIXME: We ought not to do it, Solaris 2.5 actually has fixing - * this behaviour in Solaris down as a bug fix. [AC] - * - * Let me to explain. icsk_probes_out is zeroed by incoming ACKs - * even if they advertise zero window. Hence, connection is killed only - * if we received no ACKs for normal connection timeout. It is not killed - * only because window stays zero for some time, window may be zero - * until armageddon and even later. We are in full accordance - * with RFCs, only probe timer combines both retransmission timeout - * and probe timeout in one bottle. --ANK + /* RFC 1122 4.2.2.17 requires the sender to stay open indefinitely as + * long as the receiver continues to respond probes. We support this by + * default and reset icsk_probes_out with incoming ACKs. But if the + * socket is orphaned or the user specifies TCP_USER_TIMEOUT, we + * kill the socket when the retry count and the time exceeds the + * corresponding system limit. We also implement similar policy when + * we use RTO to probe window in tcp_retransmit_timer(). */ - max_probes = sysctl_tcp_retries2; + start_ts = tcp_skb_timestamp(tcp_send_head(sk)); + if (!start_ts) + skb_mstamp_get(&tcp_send_head(sk)->skb_mstamp); + else if (icsk->icsk_user_timeout && + (s32)(tcp_time_stamp - start_ts) > icsk->icsk_user_timeout) + goto abort; + max_probes = sysctl_tcp_retries2; if (sock_flag(sk, SOCK_DEAD)) { - const int alive = ((icsk->icsk_rto << icsk->icsk_backoff) < TCP_RTO_MAX); + const int alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX; max_probes = tcp_orphan_retries(sk, alive); - - if (tcp_out_of_resources(sk, alive || icsk->icsk_probes_out <= max_probes)) + if (!alive && icsk->icsk_backoff >= max_probes) + goto abort; + if (tcp_out_of_resources(sk, true)) return; } if (icsk->icsk_probes_out > max_probes) { - tcp_write_err(sk); +abort: tcp_write_err(sk); } else { /* Only send another probe if we didn't close things up. */ tcp_send_probe0(sk); diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c index b40ad897f94509201988b6b9cc589e599d036851..a6afde666ab1284ea53c24ce80b2c287ff42073b 100644 --- a/net/ipv4/tcp_vegas.c +++ b/net/ipv4/tcp_vegas.c @@ -51,7 +51,6 @@ MODULE_PARM_DESC(beta, "upper bound of packets in network"); module_param(gamma, int, 0644); MODULE_PARM_DESC(gamma, "limit on increase (scale by 2)"); - /* There are several situations when we must "re-start" Vegas: * * o when a connection is established @@ -133,7 +132,6 @@ EXPORT_SYMBOL_GPL(tcp_vegas_pkts_acked); void tcp_vegas_state(struct sock *sk, u8 ca_state) { - if (ca_state == TCP_CA_Open) vegas_enable(sk); else @@ -285,7 +283,6 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked) /* Use normal slow start */ else if (tp->snd_cwnd <= tp->snd_ssthresh) tcp_slow_start(tp, acked); - } /* Extract info for Tcp socket info provided via netlink. */ diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c index 8276977d2c854729d9c7a0e3e501b2dee9db4c3d..a4d2d2d88dcae7c00cf4db83d8b13ce6b143b3b4 100644 --- a/net/ipv4/tcp_veno.c +++ b/net/ipv4/tcp_veno.c @@ -175,7 +175,6 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked) } else tp->snd_cwnd_cnt++; } - } if (tp->snd_cwnd < 2) tp->snd_cwnd = 2; diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c index b94a04ae2ed5672eca79a172c5c3467a677186a1..bb63fba47d47837b615c6bc6b730e36eead6328f 100644 --- a/net/ipv4/tcp_westwood.c +++ b/net/ipv4/tcp_westwood.c @@ -42,7 +42,6 @@ struct westwood { u8 reset_rtt_min; /* Reset RTT min to next RTT sample*/ }; - /* TCP Westwood functions and constants */ #define TCP_WESTWOOD_RTT_MIN (HZ/20) /* 50ms */ #define TCP_WESTWOOD_INIT_RTT (20*HZ) /* maybe too conservative?! */ @@ -153,7 +152,6 @@ static inline void update_rtt_min(struct westwood *w) w->rtt_min = min(w->rtt, w->rtt_min); } - /* * @westwood_fast_bw * It is called when we are in fast path. In particular it is called when @@ -208,7 +206,6 @@ static inline u32 westwood_acked_count(struct sock *sk) return w->cumul_ack; } - /* * TCP Westwood * Here limit is evaluated as Bw estimation*RTTmin (for obtaining it @@ -219,47 +216,51 @@ static u32 tcp_westwood_bw_rttmin(const struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); const struct westwood *w = inet_csk_ca(sk); + return max_t(u32, (w->bw_est * w->rtt_min) / tp->mss_cache, 2); } +static void tcp_westwood_ack(struct sock *sk, u32 ack_flags) +{ + if (ack_flags & CA_ACK_SLOWPATH) { + struct westwood *w = inet_csk_ca(sk); + + westwood_update_window(sk); + w->bk += westwood_acked_count(sk); + + update_rtt_min(w); + return; + } + + westwood_fast_bw(sk); +} + static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event) { struct tcp_sock *tp = tcp_sk(sk); struct westwood *w = inet_csk_ca(sk); switch (event) { - case CA_EVENT_FAST_ACK: - westwood_fast_bw(sk); - break; - case CA_EVENT_COMPLETE_CWR: tp->snd_cwnd = tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk); break; - case CA_EVENT_LOSS: tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk); /* Update RTT_min when next ack arrives */ w->reset_rtt_min = 1; break; - - case CA_EVENT_SLOW_ACK: - westwood_update_window(sk); - w->bk += westwood_acked_count(sk); - update_rtt_min(w); - break; - default: /* don't care */ break; } } - /* Extract info for Tcp socket info provided via netlink. */ static void tcp_westwood_info(struct sock *sk, u32 ext, struct sk_buff *skb) { const struct westwood *ca = inet_csk_ca(sk); + if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) { struct tcpvegas_info info = { .tcpv_enabled = 1, @@ -271,12 +272,12 @@ static void tcp_westwood_info(struct sock *sk, u32 ext, } } - static struct tcp_congestion_ops tcp_westwood __read_mostly = { .init = tcp_westwood_init, .ssthresh = tcp_reno_ssthresh, .cong_avoid = tcp_reno_cong_avoid, .cwnd_event = tcp_westwood_event, + .in_ack_event = tcp_westwood_ack, .get_info = tcp_westwood_info, .pkts_acked = tcp_westwood_pkts_acked, diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c index 599b79b8eac07298a34cff43ae87e546bdb36847..cd72732185989b41d1a3f9167eacbf44c235cc01 100644 --- a/net/ipv4/tcp_yeah.c +++ b/net/ipv4/tcp_yeah.c @@ -54,10 +54,8 @@ static void tcp_yeah_init(struct sock *sk) /* Ensure the MD arithmetic works. This is somewhat pedantic, * since I don't think we will see a cwnd this large. :) */ tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128); - } - static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, s32 rtt_us) { const struct inet_connection_sock *icsk = inet_csk(sk); @@ -84,7 +82,7 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked) /* Scalable */ tp->snd_cwnd_cnt += yeah->pkts_acked; - if (tp->snd_cwnd_cnt > min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)){ + if (tp->snd_cwnd_cnt > min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)) { if (tp->snd_cwnd < tp->snd_cwnd_clamp) tp->snd_cwnd++; tp->snd_cwnd_cnt = 0; @@ -120,7 +118,6 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked) */ if (after(ack, yeah->vegas.beg_snd_nxt)) { - /* We do the Vegas calculations only if we got enough RTT * samples that we can be reasonably sure that we got * at least one RTT sample that wasn't from a delayed ACK. @@ -189,7 +186,6 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked) } yeah->lastQ = queue; - } /* Save the extent of the current window so we can use this @@ -205,7 +201,8 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked) } } -static u32 tcp_yeah_ssthresh(struct sock *sk) { +static u32 tcp_yeah_ssthresh(struct sock *sk) +{ const struct tcp_sock *tp = tcp_sk(sk); struct yeah *yeah = inet_csk_ca(sk); u32 reduction; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index f57c0e4c2326fd1a7d4b85cbdb4304657e5660f8..cd0db5471bb5eb6048208404aab71253867e1eec 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -99,6 +99,7 @@ #include #include #include +#include #include #include #include @@ -224,7 +225,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum, remaining = (high - low) + 1; rand = prandom_u32(); - first = (((u64)rand * remaining) >> 32) + low; + first = reciprocal_scale(rand, remaining) + low; /* * force rand to be an odd multiple of UDP_HTABLE_SIZE */ @@ -448,7 +449,7 @@ static struct sock *udp4_lib_lookup2(struct net *net, } } else if (score == badness && reuseport) { matches++; - if (((u64)hash * matches) >> 32 == 0) + if (reciprocal_scale(hash, matches) == 0) result = sk; hash = next_pseudo_random32(hash); } @@ -529,7 +530,7 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, } } else if (score == badness && reuseport) { matches++; - if (((u64)hash * matches) >> 32 == 0) + if (reciprocal_scale(hash, matches) == 0) result = sk; hash = next_pseudo_random32(hash); } @@ -1787,6 +1788,10 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, if (sk != NULL) { int ret; + if (udp_sk(sk)->convert_csum && uh->check && !IS_UDPLITE(sk)) + skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check, + inet_compute_pseudo); + ret = udp_queue_rcv_skb(sk, skb); sock_put(sk); @@ -1967,7 +1972,7 @@ void udp_v4_early_demux(struct sk_buff *skb) return; skb->sk = sk; - skb->destructor = sock_edemux; + skb->destructor = sock_efree; dst = sk->sk_rx_dst; if (dst) diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 59035bc3008d2bc45ec87e9eea4145120bf664be..507310ef4b5681bd8e59ba779e0560ecc3e5d04e 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -25,30 +25,11 @@ struct udp_offload_priv { struct udp_offload_priv __rcu *next; }; -static int udp4_ufo_send_check(struct sk_buff *skb) -{ - if (!pskb_may_pull(skb, sizeof(struct udphdr))) - return -EINVAL; - - if (likely(!skb->encapsulation)) { - const struct iphdr *iph; - struct udphdr *uh; - - iph = ip_hdr(skb); - uh = udp_hdr(skb); - - uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len, - IPPROTO_UDP, 0); - skb->csum_start = skb_transport_header(skb) - skb->head; - skb->csum_offset = offsetof(struct udphdr, check); - skb->ip_summed = CHECKSUM_PARTIAL; - } - - return 0; -} - -struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, - netdev_features_t features) +static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, + netdev_features_t features, + struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb, + netdev_features_t features), + __be16 new_protocol) { struct sk_buff *segs = ERR_PTR(-EINVAL); u16 mac_offset = skb->mac_header; @@ -70,7 +51,7 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, skb_reset_mac_header(skb); skb_set_network_header(skb, skb_inner_network_offset(skb)); skb->mac_len = skb_inner_network_offset(skb); - skb->protocol = htons(ETH_P_TEB); + skb->protocol = new_protocol; need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM); if (need_csum) @@ -78,7 +59,7 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, /* segment inner packet. */ enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); - segs = skb_mac_gso_segment(skb, enc_features); + segs = gso_inner_segment(skb, enc_features); if (IS_ERR_OR_NULL(segs)) { skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset, mac_len); @@ -123,21 +104,63 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, return segs; } +struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, + netdev_features_t features, + bool is_ipv6) +{ + __be16 protocol = skb->protocol; + const struct net_offload **offloads; + const struct net_offload *ops; + struct sk_buff *segs = ERR_PTR(-EINVAL); + struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb, + netdev_features_t features); + + rcu_read_lock(); + + switch (skb->inner_protocol_type) { + case ENCAP_TYPE_ETHER: + protocol = skb->inner_protocol; + gso_inner_segment = skb_mac_gso_segment; + break; + case ENCAP_TYPE_IPPROTO: + offloads = is_ipv6 ? inet6_offloads : inet_offloads; + ops = rcu_dereference(offloads[skb->inner_ipproto]); + if (!ops || !ops->callbacks.gso_segment) + goto out_unlock; + gso_inner_segment = ops->callbacks.gso_segment; + break; + default: + goto out_unlock; + } + + segs = __skb_udp_tunnel_segment(skb, features, gso_inner_segment, + protocol); + +out_unlock: + rcu_read_unlock(); + + return segs; +} + static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EINVAL); unsigned int mss; - int offset; __wsum csum; + struct udphdr *uh; + struct iphdr *iph; if (skb->encapsulation && (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) { - segs = skb_udp_tunnel_segment(skb, features); + segs = skb_udp_tunnel_segment(skb, features, false); goto out; } + if (!pskb_may_pull(skb, sizeof(struct udphdr))) + goto out; + mss = skb_shinfo(skb)->gso_size; if (unlikely(skb->len <= mss)) goto out; @@ -165,10 +188,16 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, * HW cannot do checksum of UDP packets sent as multiple * IP fragments. */ - offset = skb_checksum_start_offset(skb); - csum = skb_checksum(skb, offset, skb->len - offset, 0); - offset += skb->csum_offset; - *(__sum16 *)(skb->data + offset) = csum_fold(csum); + + uh = udp_hdr(skb); + iph = ip_hdr(skb); + + uh->check = 0; + csum = skb_checksum(skb, 0, skb->len, 0); + uh->check = udp_v4_check(skb->len, iph->saddr, iph->daddr, csum); + if (uh->check == 0) + uh->check = CSUM_MANGLED_0; + skb->ip_summed = CHECKSUM_NONE; /* Fragment the skb. IP headers of the fragments are updated in @@ -228,30 +257,24 @@ void udp_del_offload(struct udp_offload *uo) } EXPORT_SYMBOL(udp_del_offload); -static struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb) +struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, + struct udphdr *uh) { struct udp_offload_priv *uo_priv; struct sk_buff *p, **pp = NULL; - struct udphdr *uh, *uh2; - unsigned int hlen, off; + struct udphdr *uh2; + unsigned int off = skb_gro_offset(skb); int flush = 1; if (NAPI_GRO_CB(skb)->udp_mark || - (!skb->encapsulation && skb->ip_summed != CHECKSUM_COMPLETE)) + (skb->ip_summed != CHECKSUM_PARTIAL && + NAPI_GRO_CB(skb)->csum_cnt == 0 && + !NAPI_GRO_CB(skb)->csum_valid)) goto out; /* mark that this skb passed once through the udp gro layer */ NAPI_GRO_CB(skb)->udp_mark = 1; - off = skb_gro_offset(skb); - hlen = off + sizeof(*uh); - uh = skb_gro_header_fast(skb, off); - if (skb_gro_header_hard(skb, hlen)) { - uh = skb_gro_header_slow(skb, hlen, off); - if (unlikely(!uh)) - goto out; - } - rcu_read_lock(); uo_priv = rcu_dereference(udp_offload_base); for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) { @@ -269,7 +292,12 @@ static struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *s continue; uh2 = (struct udphdr *)(p->data + off); - if ((*(u32 *)&uh->source != *(u32 *)&uh2->source)) { + + /* Match ports and either checksums are either both zero + * or nonzero. + */ + if ((*(u32 *)&uh->source != *(u32 *)&uh2->source) || + (!uh->check ^ !uh2->check)) { NAPI_GRO_CB(p)->same_flow = 0; continue; } @@ -277,6 +305,7 @@ static struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *s skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */ skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr)); + NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto; pp = uo_priv->offload->callbacks.gro_receive(head, skb); out_unlock: @@ -286,7 +315,34 @@ static struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *s return pp; } -static int udp_gro_complete(struct sk_buff *skb, int nhoff) +static struct sk_buff **udp4_gro_receive(struct sk_buff **head, + struct sk_buff *skb) +{ + struct udphdr *uh = udp_gro_udphdr(skb); + + if (unlikely(!uh)) + goto flush; + + /* Don't bother verifying checksum if we're going to flush anyway. */ + if (NAPI_GRO_CB(skb)->flush) + goto skip; + + if (skb_gro_checksum_validate_zero_check(skb, IPPROTO_UDP, uh->check, + inet_gro_compute_pseudo)) + goto flush; + else if (uh->check) + skb_gro_checksum_try_convert(skb, IPPROTO_UDP, uh->check, + inet_gro_compute_pseudo); +skip: + NAPI_GRO_CB(skb)->is_ipv6 = 0; + return udp_gro_receive(head, skb, uh); + +flush: + NAPI_GRO_CB(skb)->flush = 1; + return NULL; +} + +int udp_gro_complete(struct sk_buff *skb, int nhoff) { struct udp_offload_priv *uo_priv; __be16 newlen = htons(skb->len - nhoff); @@ -304,19 +360,32 @@ static int udp_gro_complete(struct sk_buff *skb, int nhoff) break; } - if (uo_priv != NULL) + if (uo_priv != NULL) { + NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto; err = uo_priv->offload->callbacks.gro_complete(skb, nhoff + sizeof(struct udphdr)); + } rcu_read_unlock(); return err; } +static int udp4_gro_complete(struct sk_buff *skb, int nhoff) +{ + const struct iphdr *iph = ip_hdr(skb); + struct udphdr *uh = (struct udphdr *)(skb->data + nhoff); + + if (uh->check) + uh->check = ~udp_v4_check(skb->len - nhoff, iph->saddr, + iph->daddr, 0); + + return udp_gro_complete(skb, nhoff); +} + static const struct net_offload udpv4_offload = { .callbacks = { - .gso_send_check = udp4_ufo_send_check, .gso_segment = udp4_ufo_fragment, - .gro_receive = udp_gro_receive, - .gro_complete = udp_gro_complete, + .gro_receive = udp4_gro_receive, + .gro_complete = udp4_gro_complete, }, }; diff --git a/net/ipv4/udp_tunnel.c b/net/ipv4/udp_tunnel.c index 61ec1a65207efe07155f4e259a31659d3d183f9d..1671263e5fa0eae2e6a7ebad40f912cae002bc53 100644 --- a/net/ipv4/udp_tunnel.c +++ b/net/ipv4/udp_tunnel.c @@ -8,83 +8,40 @@ #include #include -int udp_sock_create(struct net *net, struct udp_port_cfg *cfg, - struct socket **sockp) +int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg, + struct socket **sockp) { - int err = -EINVAL; + int err; struct socket *sock = NULL; + struct sockaddr_in udp_addr; -#if IS_ENABLED(CONFIG_IPV6) - if (cfg->family == AF_INET6) { - struct sockaddr_in6 udp6_addr; + err = sock_create_kern(AF_INET, SOCK_DGRAM, 0, &sock); + if (err < 0) + goto error; - err = sock_create_kern(AF_INET6, SOCK_DGRAM, 0, &sock); - if (err < 0) - goto error; - - sk_change_net(sock->sk, net); - - udp6_addr.sin6_family = AF_INET6; - memcpy(&udp6_addr.sin6_addr, &cfg->local_ip6, - sizeof(udp6_addr.sin6_addr)); - udp6_addr.sin6_port = cfg->local_udp_port; - err = kernel_bind(sock, (struct sockaddr *)&udp6_addr, - sizeof(udp6_addr)); - if (err < 0) - goto error; - - if (cfg->peer_udp_port) { - udp6_addr.sin6_family = AF_INET6; - memcpy(&udp6_addr.sin6_addr, &cfg->peer_ip6, - sizeof(udp6_addr.sin6_addr)); - udp6_addr.sin6_port = cfg->peer_udp_port; - err = kernel_connect(sock, - (struct sockaddr *)&udp6_addr, - sizeof(udp6_addr), 0); - } - if (err < 0) - goto error; + sk_change_net(sock->sk, net); - udp_set_no_check6_tx(sock->sk, !cfg->use_udp6_tx_checksums); - udp_set_no_check6_rx(sock->sk, !cfg->use_udp6_rx_checksums); - } else -#endif - if (cfg->family == AF_INET) { - struct sockaddr_in udp_addr; - - err = sock_create_kern(AF_INET, SOCK_DGRAM, 0, &sock); - if (err < 0) - goto error; - - sk_change_net(sock->sk, net); + udp_addr.sin_family = AF_INET; + udp_addr.sin_addr = cfg->local_ip; + udp_addr.sin_port = cfg->local_udp_port; + err = kernel_bind(sock, (struct sockaddr *)&udp_addr, + sizeof(udp_addr)); + if (err < 0) + goto error; + if (cfg->peer_udp_port) { udp_addr.sin_family = AF_INET; - udp_addr.sin_addr = cfg->local_ip; - udp_addr.sin_port = cfg->local_udp_port; - err = kernel_bind(sock, (struct sockaddr *)&udp_addr, - sizeof(udp_addr)); + udp_addr.sin_addr = cfg->peer_ip; + udp_addr.sin_port = cfg->peer_udp_port; + err = kernel_connect(sock, (struct sockaddr *)&udp_addr, + sizeof(udp_addr), 0); if (err < 0) goto error; - - if (cfg->peer_udp_port) { - udp_addr.sin_family = AF_INET; - udp_addr.sin_addr = cfg->peer_ip; - udp_addr.sin_port = cfg->peer_udp_port; - err = kernel_connect(sock, - (struct sockaddr *)&udp_addr, - sizeof(udp_addr), 0); - if (err < 0) - goto error; - } - - sock->sk->sk_no_check_tx = !cfg->use_udp_checksums; - } else { - return -EPFNOSUPPORT; } + sock->sk->sk_no_check_tx = !cfg->use_udp_checksums; *sockp = sock; - return 0; error: @@ -95,6 +52,57 @@ int udp_sock_create(struct net *net, struct udp_port_cfg *cfg, *sockp = NULL; return err; } -EXPORT_SYMBOL(udp_sock_create); +EXPORT_SYMBOL(udp_sock_create4); + +void setup_udp_tunnel_sock(struct net *net, struct socket *sock, + struct udp_tunnel_sock_cfg *cfg) +{ + struct sock *sk = sock->sk; + + /* Disable multicast loopback */ + inet_sk(sk)->mc_loop = 0; + + /* Enable CHECKSUM_UNNECESSARY to CHECKSUM_COMPLETE conversion */ + udp_set_convert_csum(sk, true); + + rcu_assign_sk_user_data(sk, cfg->sk_user_data); + + udp_sk(sk)->encap_type = cfg->encap_type; + udp_sk(sk)->encap_rcv = cfg->encap_rcv; + udp_sk(sk)->encap_destroy = cfg->encap_destroy; + + udp_tunnel_encap_enable(sock); +} +EXPORT_SYMBOL_GPL(setup_udp_tunnel_sock); + +int udp_tunnel_xmit_skb(struct socket *sock, struct rtable *rt, + struct sk_buff *skb, __be32 src, __be32 dst, + __u8 tos, __u8 ttl, __be16 df, __be16 src_port, + __be16 dst_port, bool xnet) +{ + struct udphdr *uh; + + __skb_push(skb, sizeof(*uh)); + skb_reset_transport_header(skb); + uh = udp_hdr(skb); + + uh->dest = dst_port; + uh->source = src_port; + uh->len = htons(skb->len); + + udp_set_csum(sock->sk->sk_no_check_tx, skb, src, dst, skb->len); + + return iptunnel_xmit(sock->sk, rt, skb, src, dst, IPPROTO_UDP, + tos, ttl, df, xnet); +} +EXPORT_SYMBOL_GPL(udp_tunnel_xmit_skb); + +void udp_tunnel_sock_release(struct socket *sock) +{ + rcu_assign_sk_user_data(sock->sk, NULL); + kernel_sock_shutdown(sock, SHUT_RDWR); + sk_release_kernel(sock->sk); +} +EXPORT_SYMBOL_GPL(udp_tunnel_sock_release); MODULE_LICENSE("GPL"); diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile index 2fe68364bb20610c39f20a21b4d32498ff5741d1..2e8c06108ab9b8bcd572db2372c4f0ef02672eb2 100644 --- a/net/ipv6/Makefile +++ b/net/ipv6/Makefile @@ -45,3 +45,7 @@ obj-y += addrconf_core.o exthdrs_core.o ip6_checksum.o ip6_icmp.o obj-$(CONFIG_INET) += output_core.o protocol.o $(ipv6-offload) obj-$(subst m,y,$(CONFIG_IPV6)) += inet6_hashtables.o + +ifneq ($(CONFIG_IPV6),) +obj-$(CONFIG_NET_UDP_TUNNEL) += ip6_udp_tunnel.o +endif diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 3e118dfddd02a0c16051b49ffba0953daed54225..725c763270a067e3945ec3056c4f5893cf9f7060 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -180,7 +180,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = { .rtr_solicits = MAX_RTR_SOLICITATIONS, .rtr_solicit_interval = RTR_SOLICITATION_INTERVAL, .rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY, - .use_tempaddr = 0, + .use_tempaddr = 0, .temp_valid_lft = TEMP_VALID_LIFETIME, .temp_prefered_lft = TEMP_PREFERRED_LIFETIME, .regen_max_retry = REGEN_MAX_RETRY, @@ -1105,8 +1105,8 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *i spin_unlock_bh(&ifp->lock); regen_advance = idev->cnf.regen_max_retry * - idev->cnf.dad_transmits * - NEIGH_VAR(idev->nd_parms, RETRANS_TIME) / HZ; + idev->cnf.dad_transmits * + NEIGH_VAR(idev->nd_parms, RETRANS_TIME) / HZ; write_unlock_bh(&idev->lock); /* A temporary address is created only if this calculated Preferred @@ -1725,7 +1725,7 @@ static void addrconf_join_anycast(struct inet6_ifaddr *ifp) ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); if (ipv6_addr_any(&addr)) return; - ipv6_dev_ac_inc(ifp->idev->dev, &addr); + __ipv6_dev_ac_inc(ifp->idev, &addr); } /* caller must hold RTNL */ @@ -2844,6 +2844,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, if (dev->flags & IFF_SLAVE) break; + if (idev && idev->cnf.disable_ipv6) + break; + if (event == NETDEV_UP) { if (!addrconf_qdisc_ok(dev)) { /* device is not ready yet. */ @@ -3030,7 +3033,7 @@ static int addrconf_ifdown(struct net_device *dev, int how) struct hlist_head *h = &inet6_addr_lst[i]; spin_lock_bh(&addrconf_hash_lock); - restart: +restart: hlist_for_each_entry_rcu(ifa, h, addr_lst) { if (ifa->idev == idev) { hlist_del_init_rcu(&ifa->addr_lst); @@ -3544,8 +3547,8 @@ static void __net_exit if6_proc_net_exit(struct net *net) } static struct pernet_operations if6_proc_net_ops = { - .init = if6_proc_net_init, - .exit = if6_proc_net_exit, + .init = if6_proc_net_init, + .exit = if6_proc_net_exit, }; int __init if6_proc_init(void) diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 2daa3a133e498cdccfe5695ee62db7c72da8ce25..e8c4400f23e9b4afe47fcc45bb761d82354fb6a6 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -7,15 +7,15 @@ * * Adapted from linux/net/ipv4/af_inet.c * - * Fixes: + * Fixes: * piggy, Karl Knutson : Socket protocol table - * Hideaki YOSHIFUJI : sin6_scope_id support - * Arnaldo Melo : check proc_net_create return, cleanups + * Hideaki YOSHIFUJI : sin6_scope_id support + * Arnaldo Melo : check proc_net_create return, cleanups * * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. */ #define pr_fmt(fmt) "IPv6: " fmt @@ -302,7 +302,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) /* Reproduce AF_INET checks to make the bindings consistent */ v4addr = addr->sin6_addr.s6_addr32[3]; chk_addr_ret = inet_addr_type(net, v4addr); - if (!sysctl_ip_nonlocal_bind && + if (!net->ipv4.sysctl_ip_nonlocal_bind && !(inet->freebind || inet->transparent) && v4addr != htonl(INADDR_ANY) && chk_addr_ret != RTN_LOCAL && @@ -672,10 +672,10 @@ int inet6_sk_rebuild_header(struct sock *sk) } EXPORT_SYMBOL_GPL(inet6_sk_rebuild_header); -bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb) +bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb, + const struct inet6_skb_parm *opt) { const struct ipv6_pinfo *np = inet6_sk(sk); - const struct inet6_skb_parm *opt = IP6CB(skb); if (np->rxopt.all) { if ((opt->hop && (np->rxopt.bits.hopopts || @@ -766,7 +766,7 @@ static int __net_init inet6_net_init(struct net *net) net->ipv6.sysctl.icmpv6_time = 1*HZ; net->ipv6.sysctl.flowlabel_consistency = 1; net->ipv6.sysctl.auto_flowlabels = 0; - atomic_set(&net->ipv6.rt_genid, 0); + atomic_set(&net->ipv6.fib6_sernum, 1); err = ipv6_init_mibs(net); if (err) diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index 72a4930bdc0a0e0d43e1a6ad8670e6a1df1608f4..6d16eb0e0c7f938822de68bd57674665dcdcbddf 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c @@ -17,10 +17,10 @@ * Authors * * Mitsuru KANDA @USAGI : IPv6 Support - * Kazunori MIYAZAWA @USAGI : - * Kunihiro Ishiguro + * Kazunori MIYAZAWA @USAGI : + * Kunihiro Ishiguro * - * This file is derived from net/ipv4/ah.c. + * This file is derived from net/ipv4/ah.c. */ #define pr_fmt(fmt) "IPv6: " fmt @@ -284,7 +284,7 @@ static int ipv6_clear_mutable_options(struct ipv6hdr *iph, int len, int dir) ipv6_rearrange_rthdr(iph, exthdr.rth); break; - default : + default: return 0; } @@ -478,7 +478,7 @@ static void ah6_input_done(struct crypto_async_request *base, int err) auth_data = ah_tmp_auth(work_iph, hdr_len); icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len); - err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG: 0; + err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG : 0; if (err) goto out; @@ -622,7 +622,7 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb) goto out_free; } - err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG: 0; + err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG : 0; if (err) goto out_free; @@ -647,8 +647,8 @@ static int ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info) { struct net *net = dev_net(skb->dev); - struct ipv6hdr *iph = (struct ipv6hdr*)skb->data; - struct ip_auth_hdr *ah = (struct ip_auth_hdr*)(skb->data+offset); + struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; + struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+offset); struct xfrm_state *x; if (type != ICMPV6_PKT_TOOBIG && @@ -713,8 +713,6 @@ static int ah6_init_state(struct xfrm_state *x) ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; ahp->icv_trunc_len = x->aalg->alg_trunc_len/8; - BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN); - x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + ahp->icv_trunc_len); switch (x->props.mode) { @@ -755,11 +753,10 @@ static int ah6_rcv_cb(struct sk_buff *skb, int err) return 0; } -static const struct xfrm_type ah6_type = -{ +static const struct xfrm_type ah6_type = { .description = "AH6", .owner = THIS_MODULE, - .proto = IPPROTO_AH, + .proto = IPPROTO_AH, .flags = XFRM_TYPE_REPLAY_PROT, .init_state = ah6_init_state, .destructor = ah6_destroy, diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c index 9a386842fd6278468ed6cc1b435d00f31b2ebe43..f5e319a8d4e2ed1a6d4fc13524e5a6d703706eef 100644 --- a/net/ipv6/anycast.c +++ b/net/ipv6/anycast.c @@ -46,10 +46,6 @@ static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr); -/* Big ac list lock for all the sockets */ -static DEFINE_SPINLOCK(ipv6_sk_ac_lock); - - /* * socket join an anycast group */ @@ -78,7 +74,6 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr) pac->acl_addr = *addr; rtnl_lock(); - rcu_read_lock(); if (ifindex == 0) { struct rt6_info *rt; @@ -91,11 +86,11 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr) goto error; } else { /* router, no matching interface: just pick one */ - dev = dev_get_by_flags_rcu(net, IFF_UP, - IFF_UP | IFF_LOOPBACK); + dev = __dev_get_by_flags(net, IFF_UP, + IFF_UP | IFF_LOOPBACK); } } else - dev = dev_get_by_index_rcu(net, ifindex); + dev = __dev_get_by_index(net, ifindex); if (dev == NULL) { err = -ENODEV; @@ -127,17 +122,14 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr) goto error; } - err = ipv6_dev_ac_inc(dev, addr); + err = __ipv6_dev_ac_inc(idev, addr); if (!err) { - spin_lock_bh(&ipv6_sk_ac_lock); pac->acl_next = np->ipv6_ac_list; np->ipv6_ac_list = pac; - spin_unlock_bh(&ipv6_sk_ac_lock); pac = NULL; } error: - rcu_read_unlock(); rtnl_unlock(); if (pac) sock_kfree_s(sk, pac, sizeof(*pac)); @@ -154,7 +146,7 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) struct ipv6_ac_socklist *pac, *prev_pac; struct net *net = sock_net(sk); - spin_lock_bh(&ipv6_sk_ac_lock); + rtnl_lock(); prev_pac = NULL; for (pac = np->ipv6_ac_list; pac; pac = pac->acl_next) { if ((ifindex == 0 || pac->acl_ifindex == ifindex) && @@ -163,7 +155,7 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) prev_pac = pac; } if (!pac) { - spin_unlock_bh(&ipv6_sk_ac_lock); + rtnl_unlock(); return -ENOENT; } if (prev_pac) @@ -171,14 +163,9 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) else np->ipv6_ac_list = pac->acl_next; - spin_unlock_bh(&ipv6_sk_ac_lock); - - rtnl_lock(); - rcu_read_lock(); - dev = dev_get_by_index_rcu(net, pac->acl_ifindex); + dev = __dev_get_by_index(net, pac->acl_ifindex); if (dev) ipv6_dev_ac_dec(dev, &pac->acl_addr); - rcu_read_unlock(); rtnl_unlock(); sock_kfree_s(sk, pac, sizeof(*pac)); @@ -196,19 +183,16 @@ void ipv6_sock_ac_close(struct sock *sk) if (!np->ipv6_ac_list) return; - spin_lock_bh(&ipv6_sk_ac_lock); + rtnl_lock(); pac = np->ipv6_ac_list; np->ipv6_ac_list = NULL; - spin_unlock_bh(&ipv6_sk_ac_lock); prev_index = 0; - rtnl_lock(); - rcu_read_lock(); while (pac) { struct ipv6_ac_socklist *next = pac->acl_next; if (pac->acl_ifindex != prev_index) { - dev = dev_get_by_index_rcu(net, pac->acl_ifindex); + dev = __dev_get_by_index(net, pac->acl_ifindex); prev_index = pac->acl_ifindex; } if (dev) @@ -216,10 +200,14 @@ void ipv6_sock_ac_close(struct sock *sk) sock_kfree_s(sk, pac, sizeof(*pac)); pac = next; } - rcu_read_unlock(); rtnl_unlock(); } +static void aca_get(struct ifacaddr6 *aca) +{ + atomic_inc(&aca->aca_refcnt); +} + static void aca_put(struct ifacaddr6 *ac) { if (atomic_dec_and_test(&ac->aca_refcnt)) { @@ -229,23 +217,40 @@ static void aca_put(struct ifacaddr6 *ac) } } +static struct ifacaddr6 *aca_alloc(struct rt6_info *rt, + const struct in6_addr *addr) +{ + struct inet6_dev *idev = rt->rt6i_idev; + struct ifacaddr6 *aca; + + aca = kzalloc(sizeof(*aca), GFP_ATOMIC); + if (aca == NULL) + return NULL; + + aca->aca_addr = *addr; + in6_dev_hold(idev); + aca->aca_idev = idev; + aca->aca_rt = rt; + aca->aca_users = 1; + /* aca_tstamp should be updated upon changes */ + aca->aca_cstamp = aca->aca_tstamp = jiffies; + atomic_set(&aca->aca_refcnt, 1); + spin_lock_init(&aca->aca_lock); + + return aca; +} + /* * device anycast group inc (add if not found) */ -int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr) +int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr) { struct ifacaddr6 *aca; - struct inet6_dev *idev; struct rt6_info *rt; int err; ASSERT_RTNL(); - idev = in6_dev_get(dev); - - if (idev == NULL) - return -EINVAL; - write_lock_bh(&idev->lock); if (idev->dead) { err = -ENODEV; @@ -260,46 +265,35 @@ int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr) } } - /* - * not found: create a new one. - */ - - aca = kzalloc(sizeof(struct ifacaddr6), GFP_ATOMIC); - - if (aca == NULL) { - err = -ENOMEM; - goto out; - } - rt = addrconf_dst_alloc(idev, addr, true); if (IS_ERR(rt)) { - kfree(aca); err = PTR_ERR(rt); goto out; } - - aca->aca_addr = *addr; - aca->aca_idev = idev; - aca->aca_rt = rt; - aca->aca_users = 1; - /* aca_tstamp should be updated upon changes */ - aca->aca_cstamp = aca->aca_tstamp = jiffies; - atomic_set(&aca->aca_refcnt, 2); - spin_lock_init(&aca->aca_lock); + aca = aca_alloc(rt, addr); + if (aca == NULL) { + ip6_rt_put(rt); + err = -ENOMEM; + goto out; + } aca->aca_next = idev->ac_list; idev->ac_list = aca; + + /* Hold this for addrconf_join_solict() below before we unlock, + * it is already exposed via idev->ac_list. + */ + aca_get(aca); write_unlock_bh(&idev->lock); ip6_ins_rt(rt); - addrconf_join_solict(dev, &aca->aca_addr); + addrconf_join_solict(idev->dev, &aca->aca_addr); aca_put(aca); return 0; out: write_unlock_bh(&idev->lock); - in6_dev_put(idev); return err; } @@ -341,7 +335,7 @@ int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr) return 0; } -/* called with rcu_read_lock() */ +/* called with rtnl_lock() */ static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr) { struct inet6_dev *idev = __in6_dev_get(dev); diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 2753319524f1acabb34a0520ea29ee361c1dfe9e..2cdc38338be3fda267d7137cb20fc8bbe72c4466 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -43,13 +43,13 @@ static bool ipv6_mapped_addr_any(const struct in6_addr *a) int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; - struct inet_sock *inet = inet_sk(sk); - struct ipv6_pinfo *np = inet6_sk(sk); - struct in6_addr *daddr, *final_p, final; + struct inet_sock *inet = inet_sk(sk); + struct ipv6_pinfo *np = inet6_sk(sk); + struct in6_addr *daddr, *final_p, final; struct dst_entry *dst; struct flowi6 fl6; struct ip6_flowlabel *flowlabel = NULL; - struct ipv6_txoptions *opt; + struct ipv6_txoptions *opt; int addr_type; int err; @@ -332,7 +332,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) { struct ipv6_pinfo *np = inet6_sk(sk); struct sock_exterr_skb *serr; - struct sk_buff *skb, *skb2; + struct sk_buff *skb; DECLARE_SOCKADDR(struct sockaddr_in6 *, sin, msg->msg_name); struct { struct sock_extended_err ee; @@ -342,7 +342,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) int copied; err = -EAGAIN; - skb = skb_dequeue(&sk->sk_error_queue); + skb = sock_dequeue_err_skb(sk); if (skb == NULL) goto out; @@ -415,17 +415,6 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) msg->msg_flags |= MSG_ERRQUEUE; err = copied; - /* Reset and regenerate socket error */ - spin_lock_bh(&sk->sk_error_queue.lock); - sk->sk_err = 0; - if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) { - sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno; - spin_unlock_bh(&sk->sk_error_queue.lock); - sk->sk_error_report(sk); - } else { - spin_unlock_bh(&sk->sk_error_queue.lock); - } - out_free_skb: kfree_skb(skb); out: diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index d15da1377149d3a0fdf846a7a07364e6d1251845..83fc3a385a26782c316914c4f8aafab78fe1a5e2 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -17,10 +17,10 @@ * Authors * * Mitsuru KANDA @USAGI : IPv6 Support - * Kazunori MIYAZAWA @USAGI : - * Kunihiro Ishiguro + * Kazunori MIYAZAWA @USAGI : + * Kunihiro Ishiguro * - * This file is derived from net/ipv4/esp.c + * This file is derived from net/ipv4/esp.c */ #define pr_fmt(fmt) "IPv6: " fmt @@ -598,7 +598,7 @@ static int esp6_init_state(struct xfrm_state *x) case XFRM_MODE_BEET: if (x->sel.family != AF_INET6) x->props.header_len += IPV4_BEET_PHMAXLEN + - (sizeof(struct ipv6hdr) - sizeof(struct iphdr)); + (sizeof(struct ipv6hdr) - sizeof(struct iphdr)); break; case XFRM_MODE_TRANSPORT: break; @@ -621,11 +621,10 @@ static int esp6_rcv_cb(struct sk_buff *skb, int err) return 0; } -static const struct xfrm_type esp6_type = -{ +static const struct xfrm_type esp6_type = { .description = "ESP6", - .owner = THIS_MODULE, - .proto = IPPROTO_ESP, + .owner = THIS_MODULE, + .proto = IPPROTO_ESP, .flags = XFRM_TYPE_REPLAY_PROT, .init_state = esp6_init_state, .destructor = esp6_destroy, diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index 8d67900aa0036139e934354f98aefab13ba51c7a..bfde361b613400df742c21225a1b4b44744ece65 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -142,7 +142,7 @@ static bool ip6_parse_tlv(const struct tlvtype_proc *procs, struct sk_buff *skb) default: /* Other TLV code so scan list */ if (optlen > len) goto bad; - for (curr=procs; curr->type >= 0; curr++) { + for (curr = procs; curr->type >= 0; curr++) { if (curr->type == nh[off]) { /* type specific length/alignment checks will be performed in the diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 06ba3e58320ba45fc3856659e43afd952495db35..97ae70077a4f226bac3da0f2f94f6efb21d2b246 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -170,11 +170,11 @@ static bool is_ineligible(const struct sk_buff *skb) /* * Check the ICMP output rate limit */ -static inline bool icmpv6_xrlim_allow(struct sock *sk, u8 type, - struct flowi6 *fl6) +static bool icmpv6_xrlim_allow(struct sock *sk, u8 type, + struct flowi6 *fl6) { - struct dst_entry *dst; struct net *net = sock_net(sk); + struct dst_entry *dst; bool res = false; /* Informational messages are not limited. */ @@ -199,16 +199,20 @@ static inline bool icmpv6_xrlim_allow(struct sock *sk, u8 type, } else { struct rt6_info *rt = (struct rt6_info *)dst; int tmo = net->ipv6.sysctl.icmpv6_time; - struct inet_peer *peer; /* Give more bandwidth to wider prefixes. */ if (rt->rt6i_dst.plen < 128) tmo >>= ((128 - rt->rt6i_dst.plen)>>5); - peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1); - res = inet_peer_xrlim_allow(peer, tmo); - if (peer) - inet_putpeer(peer); + if (icmp_global_allow()) { + struct inet_peer *peer; + + peer = inet_getpeer_v6(net->ipv6.peers, + &rt->rt6i_dst.addr, 1); + res = inet_peer_xrlim_allow(peer, tmo); + if (peer) + inet_putpeer(peer); + } } dst_release(dst); return res; @@ -503,7 +507,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) msg.type = type; len = skb->len - msg.offset; - len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) -sizeof(struct icmp6hdr)); + len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) - sizeof(struct icmp6hdr)); if (len < 0) { LIMIT_NETDEBUG(KERN_DEBUG "icmp: len problem\n"); goto out_dst_release; @@ -636,7 +640,7 @@ void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info) /* now skip over extension headers */ inner_offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, &frag_off); - if (inner_offset<0) + if (inner_offset < 0) goto out; } else { inner_offset = sizeof(struct ipv6hdr); @@ -773,12 +777,12 @@ static int icmpv6_rcv(struct sk_buff *skb) break; default: - LIMIT_NETDEBUG(KERN_DEBUG "icmpv6: msg of unknown type\n"); - /* informational */ if (type & ICMPV6_INFOMSG_MASK) break; + LIMIT_NETDEBUG(KERN_DEBUG "icmpv6: msg of unknown type\n"); + /* * error of unknown type. * must pass to upper level @@ -808,7 +812,7 @@ void icmpv6_flow_init(struct sock *sk, struct flowi6 *fl6, memset(fl6, 0, sizeof(*fl6)); fl6->saddr = *saddr; fl6->daddr = *daddr; - fl6->flowi6_proto = IPPROTO_ICMPV6; + fl6->flowi6_proto = IPPROTO_ICMPV6; fl6->fl6_icmp_type = type; fl6->fl6_icmp_code = 0; fl6->flowi6_oif = oif; @@ -875,8 +879,8 @@ static void __net_exit icmpv6_sk_exit(struct net *net) } static struct pernet_operations icmpv6_sk_ops = { - .init = icmpv6_sk_init, - .exit = icmpv6_sk_exit, + .init = icmpv6_sk_init, + .exit = icmpv6_sk_exit, }; int __init icmpv6_init(void) diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index a245e5ddffbd0450968c44de7d3fcd8a1dd055cf..29b32206e49488e1155900adfcd1707ea909855e 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c @@ -63,7 +63,6 @@ int inet6_csk_bind_conflict(const struct sock *sk, return sk2 != NULL; } - EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict); struct dst_entry *inet6_csk_route_req(struct sock *sk, @@ -144,7 +143,6 @@ struct request_sock *inet6_csk_search_req(const struct sock *sk, return NULL; } - EXPORT_SYMBOL_GPL(inet6_csk_search_req); void inet6_csk_reqsk_queue_hash_add(struct sock *sk, @@ -160,10 +158,9 @@ void inet6_csk_reqsk_queue_hash_add(struct sock *sk, reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout); inet_csk_reqsk_queue_added(sk, timeout); } - EXPORT_SYMBOL_GPL(inet6_csk_reqsk_queue_hash_add); -void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr) +void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr) { struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) uaddr; @@ -175,7 +172,6 @@ void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr) sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr, sk->sk_bound_dev_if); } - EXPORT_SYMBOL_GPL(inet6_csk_addr2sockaddr); static inline diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index 262e13c02ec27dea15154d9b4af5fd413dd1c504..051dffb49c90e979ca6cc58dd1c2cd2fe9b1cc4c 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c @@ -6,7 +6,7 @@ * Generic INET6 transport hashtables * * Authors: Lotsa people, from code originally in tcp, generalised here - * by Arnaldo Carvalho de Melo + * by Arnaldo Carvalho de Melo * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -198,7 +198,7 @@ struct sock *inet6_lookup_listener(struct net *net, } } else if (score == hiscore && reuseport) { matches++; - if (((u64)phash * matches) >> 32 == 0) + if (reciprocal_scale(phash, matches) == 0) result = sk; phash = next_pseudo_random32(phash); } @@ -222,7 +222,6 @@ struct sock *inet6_lookup_listener(struct net *net, rcu_read_unlock(); return result; } - EXPORT_SYMBOL_GPL(inet6_lookup_listener); struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo, @@ -238,7 +237,6 @@ struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo, return sk; } - EXPORT_SYMBOL_GPL(inet6_lookup); static int __inet6_check_established(struct inet_timewait_death_row *death_row, @@ -324,5 +322,4 @@ int inet6_hash_connect(struct inet_timewait_death_row *death_row, return __inet_hash_connect(death_row, sk, inet6_sk_port_offset(sk), __inet6_check_established, __inet6_hash); } - EXPORT_SYMBOL_GPL(inet6_hash_connect); diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 97b9fa8de37783a0da90671208e42a887a20332c..b2d1838897c933f6f1ceb2a6f64a2f67495ce3c9 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -46,20 +46,11 @@ static struct kmem_cache *fib6_node_kmem __read_mostly; -enum fib_walk_state_t { -#ifdef CONFIG_IPV6_SUBTREES - FWS_S, -#endif - FWS_L, - FWS_R, - FWS_C, - FWS_U -}; - -struct fib6_cleaner_t { - struct fib6_walker_t w; +struct fib6_cleaner { + struct fib6_walker w; struct net *net; int (*func)(struct rt6_info *, void *arg); + int sernum; void *arg; }; @@ -74,8 +65,8 @@ static DEFINE_RWLOCK(fib6_walker_lock); static void fib6_prune_clones(struct net *net, struct fib6_node *fn); static struct rt6_info *fib6_find_prefix(struct net *net, struct fib6_node *fn); static struct fib6_node *fib6_repair_tree(struct net *net, struct fib6_node *fn); -static int fib6_walk(struct fib6_walker_t *w); -static int fib6_walk_continue(struct fib6_walker_t *w); +static int fib6_walk(struct fib6_walker *w); +static int fib6_walk_continue(struct fib6_walker *w); /* * A routing update causes an increase of the serial number on the @@ -84,34 +75,41 @@ static int fib6_walk_continue(struct fib6_walker_t *w); * result of redirects, path MTU changes, etc. */ -static __u32 rt_sernum; - static void fib6_gc_timer_cb(unsigned long arg); static LIST_HEAD(fib6_walkers); #define FOR_WALKERS(w) list_for_each_entry(w, &fib6_walkers, lh) -static inline void fib6_walker_link(struct fib6_walker_t *w) +static void fib6_walker_link(struct fib6_walker *w) { write_lock_bh(&fib6_walker_lock); list_add(&w->lh, &fib6_walkers); write_unlock_bh(&fib6_walker_lock); } -static inline void fib6_walker_unlink(struct fib6_walker_t *w) +static void fib6_walker_unlink(struct fib6_walker *w) { write_lock_bh(&fib6_walker_lock); list_del(&w->lh); write_unlock_bh(&fib6_walker_lock); } -static __inline__ u32 fib6_new_sernum(void) + +static int fib6_new_sernum(struct net *net) { - u32 n = ++rt_sernum; - if ((__s32)n <= 0) - rt_sernum = n = 1; - return n; + int new, old; + + do { + old = atomic_read(&net->ipv6.fib6_sernum); + new = old < INT_MAX ? old + 1 : 1; + } while (atomic_cmpxchg(&net->ipv6.fib6_sernum, + old, new) != old); + return new; } +enum { + FIB6_NO_SERNUM_CHANGE = 0, +}; + /* * Auxiliary address test functions for the radix tree. * @@ -128,7 +126,7 @@ static __inline__ u32 fib6_new_sernum(void) # define BITOP_BE32_SWIZZLE 0 #endif -static __inline__ __be32 addr_bit_set(const void *token, int fn_bit) +static __be32 addr_bit_set(const void *token, int fn_bit) { const __be32 *addr = token; /* @@ -142,7 +140,7 @@ static __inline__ __be32 addr_bit_set(const void *token, int fn_bit) addr[fn_bit >> 5]; } -static __inline__ struct fib6_node *node_alloc(void) +static struct fib6_node *node_alloc(void) { struct fib6_node *fn; @@ -151,12 +149,12 @@ static __inline__ struct fib6_node *node_alloc(void) return fn; } -static __inline__ void node_free(struct fib6_node *fn) +static void node_free(struct fib6_node *fn) { kmem_cache_free(fib6_node_kmem, fn); } -static __inline__ void rt6_release(struct rt6_info *rt) +static void rt6_release(struct rt6_info *rt) { if (atomic_dec_and_test(&rt->rt6i_ref)) dst_free(&rt->dst); @@ -267,7 +265,7 @@ static void __net_init fib6_tables_init(struct net *net) #endif -static int fib6_dump_node(struct fib6_walker_t *w) +static int fib6_dump_node(struct fib6_walker *w) { int res; struct rt6_info *rt; @@ -287,7 +285,7 @@ static int fib6_dump_node(struct fib6_walker_t *w) static void fib6_dump_end(struct netlink_callback *cb) { - struct fib6_walker_t *w = (void *)cb->args[2]; + struct fib6_walker *w = (void *)cb->args[2]; if (w) { if (cb->args[4]) { @@ -310,7 +308,7 @@ static int fib6_dump_done(struct netlink_callback *cb) static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb, struct netlink_callback *cb) { - struct fib6_walker_t *w; + struct fib6_walker *w; int res; w = (void *)cb->args[2]; @@ -355,7 +353,7 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) unsigned int h, s_h; unsigned int e = 0, s_e; struct rt6_rtnl_dump_arg arg; - struct fib6_walker_t *w; + struct fib6_walker *w; struct fib6_table *tb; struct hlist_head *head; int res = 0; @@ -423,14 +421,13 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) static struct fib6_node *fib6_add_1(struct fib6_node *root, struct in6_addr *addr, int plen, int offset, int allow_create, - int replace_required) + int replace_required, int sernum) { struct fib6_node *fn, *in, *ln; struct fib6_node *pn = NULL; struct rt6key *key; int bit; __be32 dir = 0; - __u32 sernum = fib6_new_sernum(); RT6_TRACE("fib6_add_1\n"); @@ -627,7 +624,7 @@ static struct fib6_node *fib6_add_1(struct fib6_node *root, return ln; } -static inline bool rt6_qualify_for_ecmp(struct rt6_info *rt) +static bool rt6_qualify_for_ecmp(struct rt6_info *rt) { return (rt->rt6i_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) == RTF_GATEWAY; @@ -820,7 +817,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, return 0; } -static __inline__ void fib6_start_gc(struct net *net, struct rt6_info *rt) +static void fib6_start_gc(struct net *net, struct rt6_info *rt) { if (!timer_pending(&net->ipv6.ip6_fib_timer) && (rt->rt6i_flags & (RTF_EXPIRES | RTF_CACHE))) @@ -848,6 +845,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info, int err = -ENOMEM; int allow_create = 1; int replace_required = 0; + int sernum = fib6_new_sernum(info->nl_net); if (info->nlh) { if (!(info->nlh->nlmsg_flags & NLM_F_CREATE)) @@ -860,7 +858,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info, fn = fib6_add_1(root, &rt->rt6i_dst.addr, rt->rt6i_dst.plen, offsetof(struct rt6_info, rt6i_dst), allow_create, - replace_required); + replace_required, sernum); if (IS_ERR(fn)) { err = PTR_ERR(fn); fn = NULL; @@ -894,14 +892,14 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info, sfn->leaf = info->nl_net->ipv6.ip6_null_entry; atomic_inc(&info->nl_net->ipv6.ip6_null_entry->rt6i_ref); sfn->fn_flags = RTN_ROOT; - sfn->fn_sernum = fib6_new_sernum(); + sfn->fn_sernum = sernum; /* Now add the first leaf node to new subtree */ sn = fib6_add_1(sfn, &rt->rt6i_src.addr, rt->rt6i_src.plen, offsetof(struct rt6_info, rt6i_src), - allow_create, replace_required); + allow_create, replace_required, sernum); if (IS_ERR(sn)) { /* If it is failed, discard just allocated @@ -920,7 +918,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info, sn = fib6_add_1(fn->subtree, &rt->rt6i_src.addr, rt->rt6i_src.plen, offsetof(struct rt6_info, rt6i_src), - allow_create, replace_required); + allow_create, replace_required, sernum); if (IS_ERR(sn)) { err = PTR_ERR(sn); @@ -1174,7 +1172,7 @@ static struct fib6_node *fib6_repair_tree(struct net *net, int children; int nstate; struct fib6_node *child, *pn; - struct fib6_walker_t *w; + struct fib6_walker *w; int iter = 0; for (;;) { @@ -1276,7 +1274,7 @@ static struct fib6_node *fib6_repair_tree(struct net *net, static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp, struct nl_info *info) { - struct fib6_walker_t *w; + struct fib6_walker *w; struct rt6_info *rt = *rtp; struct net *net = info->nl_net; @@ -1414,7 +1412,7 @@ int fib6_del(struct rt6_info *rt, struct nl_info *info) * <0 -> walk is terminated by an error. */ -static int fib6_walk_continue(struct fib6_walker_t *w) +static int fib6_walk_continue(struct fib6_walker *w) { struct fib6_node *fn, *pn; @@ -1498,7 +1496,7 @@ static int fib6_walk_continue(struct fib6_walker_t *w) } } -static int fib6_walk(struct fib6_walker_t *w) +static int fib6_walk(struct fib6_walker *w) { int res; @@ -1512,15 +1510,25 @@ static int fib6_walk(struct fib6_walker_t *w) return res; } -static int fib6_clean_node(struct fib6_walker_t *w) +static int fib6_clean_node(struct fib6_walker *w) { int res; struct rt6_info *rt; - struct fib6_cleaner_t *c = container_of(w, struct fib6_cleaner_t, w); + struct fib6_cleaner *c = container_of(w, struct fib6_cleaner, w); struct nl_info info = { .nl_net = c->net, }; + if (c->sernum != FIB6_NO_SERNUM_CHANGE && + w->node->fn_sernum != c->sernum) + w->node->fn_sernum = c->sernum; + + if (!c->func) { + WARN_ON_ONCE(c->sernum == FIB6_NO_SERNUM_CHANGE); + w->leaf = NULL; + return 0; + } + for (rt = w->leaf; rt; rt = rt->dst.rt6_next) { res = c->func(rt, c->arg); if (res < 0) { @@ -1554,9 +1562,9 @@ static int fib6_clean_node(struct fib6_walker_t *w) static void fib6_clean_tree(struct net *net, struct fib6_node *root, int (*func)(struct rt6_info *, void *arg), - int prune, void *arg) + bool prune, int sernum, void *arg) { - struct fib6_cleaner_t c; + struct fib6_cleaner c; c.w.root = root; c.w.func = fib6_clean_node; @@ -1564,14 +1572,16 @@ static void fib6_clean_tree(struct net *net, struct fib6_node *root, c.w.count = 0; c.w.skip = 0; c.func = func; + c.sernum = sernum; c.arg = arg; c.net = net; fib6_walk(&c.w); } -void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg), - void *arg) +static void __fib6_clean_all(struct net *net, + int (*func)(struct rt6_info *, void *), + int sernum, void *arg) { struct fib6_table *table; struct hlist_head *head; @@ -1583,13 +1593,19 @@ void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg), hlist_for_each_entry_rcu(table, head, tb6_hlist) { write_lock_bh(&table->tb6_lock); fib6_clean_tree(net, &table->tb6_root, - func, 0, arg); + func, false, sernum, arg); write_unlock_bh(&table->tb6_lock); } } rcu_read_unlock(); } +void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *), + void *arg) +{ + __fib6_clean_all(net, func, FIB6_NO_SERNUM_CHANGE, arg); +} + static int fib6_prune_clone(struct rt6_info *rt, void *arg) { if (rt->rt6i_flags & RTF_CACHE) { @@ -1602,25 +1618,15 @@ static int fib6_prune_clone(struct rt6_info *rt, void *arg) static void fib6_prune_clones(struct net *net, struct fib6_node *fn) { - fib6_clean_tree(net, fn, fib6_prune_clone, 1, NULL); -} - -static int fib6_update_sernum(struct rt6_info *rt, void *arg) -{ - __u32 sernum = *(__u32 *)arg; - - if (rt->rt6i_node && - rt->rt6i_node->fn_sernum != sernum) - rt->rt6i_node->fn_sernum = sernum; - - return 0; + fib6_clean_tree(net, fn, fib6_prune_clone, true, + FIB6_NO_SERNUM_CHANGE, NULL); } static void fib6_flush_trees(struct net *net) { - __u32 new_sernum = fib6_new_sernum(); + int new_sernum = fib6_new_sernum(net); - fib6_clean_all(net, fib6_update_sernum, &new_sernum); + __fib6_clean_all(net, NULL, new_sernum, NULL); } /* @@ -1828,10 +1834,10 @@ void fib6_gc_cleanup(void) struct ipv6_route_iter { struct seq_net_private p; - struct fib6_walker_t w; + struct fib6_walker w; loff_t skip; struct fib6_table *tbl; - __u32 sernum; + int sernum; }; static int ipv6_route_seq_show(struct seq_file *seq, void *v) @@ -1859,7 +1865,7 @@ static int ipv6_route_seq_show(struct seq_file *seq, void *v) return 0; } -static int ipv6_route_yield(struct fib6_walker_t *w) +static int ipv6_route_yield(struct fib6_walker *w) { struct ipv6_route_iter *iter = w->args; @@ -1980,7 +1986,7 @@ static void *ipv6_route_seq_start(struct seq_file *seq, loff_t *pos) static bool ipv6_route_iter_active(struct ipv6_route_iter *iter) { - struct fib6_walker_t *w = &iter->w; + struct fib6_walker *w = &iter->w; return w->node && !(w->state == FWS_U && w->node == w->root); } diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index 4052694c6f2cb196b54ef7d5235d61d74e4ea69c..3dd7d4ebd7cd9dff75c7a89a3a872cf8a073543b 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -136,7 +136,7 @@ static void ip6_fl_gc(unsigned long dummy) spin_lock(&ip6_fl_lock); - for (i=0; i<=FL_HASH_MASK; i++) { + for (i = 0; i <= FL_HASH_MASK; i++) { struct ip6_flowlabel *fl; struct ip6_flowlabel __rcu **flp; @@ -239,7 +239,7 @@ static struct ip6_flowlabel *fl_intern(struct net *net, /* Socket flowlabel lists */ -struct ip6_flowlabel * fl6_sock_lookup(struct sock *sk, __be32 label) +struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label) { struct ipv6_fl_socklist *sfl; struct ipv6_pinfo *np = inet6_sk(sk); @@ -259,7 +259,6 @@ struct ip6_flowlabel * fl6_sock_lookup(struct sock *sk, __be32 label) rcu_read_unlock_bh(); return NULL; } - EXPORT_SYMBOL_GPL(fl6_sock_lookup); void fl6_free_socklist(struct sock *sk) @@ -293,11 +292,11 @@ void fl6_free_socklist(struct sock *sk) following rthdr. */ -struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions * opt_space, - struct ip6_flowlabel * fl, - struct ipv6_txoptions * fopt) +struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space, + struct ip6_flowlabel *fl, + struct ipv6_txoptions *fopt) { - struct ipv6_txoptions * fl_opt = fl->opt; + struct ipv6_txoptions *fl_opt = fl->opt; if (fopt == NULL || fopt->opt_flen == 0) return fl_opt; @@ -388,7 +387,7 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq, goto done; msg.msg_controllen = olen; - msg.msg_control = (void*)(fl->opt+1); + msg.msg_control = (void *)(fl->opt+1); memset(&flowi6, 0, sizeof(flowi6)); err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, @@ -517,7 +516,7 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen) struct net *net = sock_net(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct in6_flowlabel_req freq; - struct ipv6_fl_socklist *sfl1=NULL; + struct ipv6_fl_socklist *sfl1 = NULL; struct ipv6_fl_socklist *sfl; struct ipv6_fl_socklist __rcu **sflp; struct ip6_flowlabel *fl, *fl1 = NULL; @@ -542,7 +541,7 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen) } spin_lock_bh(&ip6_sk_fl_lock); for (sflp = &np->ipv6_fl_list; - (sfl = rcu_dereference(*sflp))!=NULL; + (sfl = rcu_dereference(*sflp)) != NULL; sflp = &sfl->next) { if (sfl->fl->label == freq.flr_label) { if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK)) diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index f304471477dce45bcb8fc71f842ae8c27032f609..12c3c8ef3849467296ced452089008872e64083f 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -618,6 +618,7 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb, int err = -1; u8 proto; struct sk_buff *new_skb; + __be16 protocol; if (dev->type == ARPHRD_ETHER) IPCB(skb)->flags = 0; @@ -734,8 +735,9 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb, ipv6h->daddr = fl6->daddr; ((__be16 *)(ipv6h + 1))[0] = tunnel->parms.o_flags; - ((__be16 *)(ipv6h + 1))[1] = (dev->type == ARPHRD_ETHER) ? - htons(ETH_P_TEB) : skb->protocol; + protocol = (dev->type == ARPHRD_ETHER) ? + htons(ETH_P_TEB) : skb->protocol; + ((__be16 *)(ipv6h + 1))[1] = protocol; if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) { __be32 *ptr = (__be32 *)(((u8 *)ipv6h) + tunnel->hlen - 4); @@ -756,6 +758,8 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb, } } + skb_set_inner_protocol(skb, protocol); + ip6tunnel_xmit(skb, dev); if (ndst) ip6_tnl_dst_store(tunnel, ndst); @@ -782,7 +786,7 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev) encap_limit = t->parms.encap_limit; memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); - fl6.flowi6_proto = IPPROTO_IPIP; + fl6.flowi6_proto = IPPROTO_GRE; dsfield = ipv4_get_dsfield(iph); @@ -832,7 +836,7 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev) encap_limit = t->parms.encap_limit; memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); - fl6.flowi6_proto = IPPROTO_IPV6; + fl6.flowi6_proto = IPPROTO_GRE; dsfield = ipv6_get_dsfield(ipv6h); if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) @@ -1238,7 +1242,7 @@ static void ip6gre_tunnel_setup(struct net_device *dev) dev->flags |= IFF_NOARP; dev->iflink = 0; dev->addr_len = sizeof(struct in6_addr); - dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; + netif_keep_dst(dev); } static int ip6gre_tunnel_init(struct net_device *dev) diff --git a/net/ipv6/ip6_icmp.c b/net/ipv6/ip6_icmp.c index 4578e23834f726ca254d577029c7ccf54ccbed7b..14dacc544c3efc06de8ac80f6bbf6a5ee6cf6b35 100644 --- a/net/ipv6/ip6_icmp.c +++ b/net/ipv6/ip6_icmp.c @@ -13,7 +13,7 @@ static ip6_icmp_send_t __rcu *ip6_icmp_send; int inet6_register_icmp_sender(ip6_icmp_send_t *fn) { return (cmpxchg((ip6_icmp_send_t **)&ip6_icmp_send, NULL, fn) == NULL) ? - 0 : -EBUSY; + 0 : -EBUSY; } EXPORT_SYMBOL(inet6_register_icmp_sender); diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index 51d54dc376f3b1862040f38e6f58e2c75001049c..a3084ab5df6cd1b343af6d725975082f5c4b6eb6 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -15,8 +15,8 @@ */ /* Changes * - * Mitsuru KANDA @USAGI and - * YOSHIFUJI Hideaki @USAGI: Remove ipv6_parse_exthdrs(). + * Mitsuru KANDA @USAGI and + * YOSHIFUJI Hideaki @USAGI: Remove ipv6_parse_exthdrs(). */ #include @@ -65,7 +65,7 @@ int ip6_rcv_finish(struct sk_buff *skb) int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { const struct ipv6hdr *hdr; - u32 pkt_len; + u32 pkt_len; struct inet6_dev *idev; struct net *net = dev_net(skb->dev); diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 65eda2a8af48280e0c068f24d8ef7adc8c7d8c9e..9034f76ae0138c4a201708ea15e28ba0b846247b 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -53,31 +53,6 @@ static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto) return proto; } -static int ipv6_gso_send_check(struct sk_buff *skb) -{ - const struct ipv6hdr *ipv6h; - const struct net_offload *ops; - int err = -EINVAL; - - if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) - goto out; - - ipv6h = ipv6_hdr(skb); - __skb_pull(skb, sizeof(*ipv6h)); - err = -EPROTONOSUPPORT; - - ops = rcu_dereference(inet6_offloads[ - ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]); - - if (likely(ops && ops->callbacks.gso_send_check)) { - skb_reset_transport_header(skb); - err = ops->callbacks.gso_send_check(skb); - } - -out: - return err; -} - static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, netdev_features_t features) { @@ -244,7 +219,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, continue; iph2 = (struct ipv6hdr *)(p->data + off); - first_word = *(__be32 *)iph ^ *(__be32 *)iph2 ; + first_word = *(__be32 *)iph ^ *(__be32 *)iph2; /* All fields must match except length and Traffic Class. * XXX skbs on the gro_list have all been parsed and pulled @@ -261,6 +236,9 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, /* flush if Traffic Class fields are different */ NAPI_GRO_CB(p)->flush |= !!(first_word & htonl(0x0FF00000)); NAPI_GRO_CB(p)->flush |= flush; + + /* Clear flush_id, there's really no concept of ID in IPv6. */ + NAPI_GRO_CB(p)->flush_id = 0; } NAPI_GRO_CB(skb)->flush |= flush; @@ -303,7 +281,6 @@ static int ipv6_gro_complete(struct sk_buff *skb, int nhoff) static struct packet_offload ipv6_packet_offload __read_mostly = { .type = cpu_to_be16(ETH_P_IPV6), .callbacks = { - .gso_send_check = ipv6_gso_send_check, .gso_segment = ipv6_gso_segment, .gro_receive = ipv6_gro_receive, .gro_complete = ipv6_gro_complete, @@ -312,8 +289,9 @@ static struct packet_offload ipv6_packet_offload __read_mostly = { static const struct net_offload sit_offload = { .callbacks = { - .gso_send_check = ipv6_gso_send_check, .gso_segment = ipv6_gso_segment, + .gro_receive = ipv6_gro_receive, + .gro_complete = ipv6_gro_complete, }, }; diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 0a3448b2888fbdc62181a531ad665ce8fce3a65f..8e950c250ada0785323c3e270a61c2e1b7a7b6f3 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -20,7 +20,7 @@ * etc. * * H. von Brand : Added missing #include - * Imran Patel : frag id should be in NBO + * Imran Patel : frag id should be in NBO * Kazunori MIYAZAWA @USAGI * : add ip6_append_data and related functions * for datagram xmit @@ -233,7 +233,6 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, kfree_skb(skb); return -EMSGSIZE; } - EXPORT_SYMBOL(ip6_xmit); static int ip6_call_ra_chain(struct sk_buff *skb, int sel) @@ -555,14 +554,14 @@ static void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt) int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) { struct sk_buff *frag; - struct rt6_info *rt = (struct rt6_info*)skb_dst(skb); + struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL; struct ipv6hdr *tmp_hdr; struct frag_hdr *fh; unsigned int mtu, hlen, left, len; int hroom, troom; __be32 frag_id = 0; - int ptr, offset = 0, err=0; + int ptr, offset = 0, err = 0; u8 *prevhdr, nexthdr = 0; struct net *net = dev_net(skb_dst(skb)->dev); @@ -637,7 +636,7 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) } __skb_pull(skb, hlen); - fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr)); + fh = (struct frag_hdr *)__skb_push(skb, sizeof(struct frag_hdr)); __skb_push(skb, hlen); skb_reset_network_header(skb); memcpy(skb_network_header(skb), tmp_hdr, hlen); @@ -662,7 +661,7 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) if (frag) { frag->ip_summed = CHECKSUM_NONE; skb_reset_transport_header(frag); - fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr)); + fh = (struct frag_hdr *)__skb_push(frag, sizeof(struct frag_hdr)); __skb_push(frag, hlen); skb_reset_network_header(frag); memcpy(skb_network_header(frag), tmp_hdr, @@ -681,7 +680,7 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) } err = output(skb); - if(!err) + if (!err) IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), IPSTATS_MIB_FRAGCREATES); @@ -702,11 +701,7 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) return 0; } - while (frag) { - skb = frag->next; - kfree_skb(frag); - frag = skb; - } + kfree_skb_list(frag); IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), IPSTATS_MIB_FRAGFAILS); @@ -742,7 +737,7 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) /* * Keep copying data until we run out. */ - while(left > 0) { + while (left > 0) { len = left; /* IF: it doesn't fit, use 'mtu' - the data space left */ if (len > mtu) @@ -865,7 +860,7 @@ static struct dst_entry *ip6_sk_dst_check(struct sock *sk, /* Yes, checking route validity in not connected * case is not very simple. Take into account, * that we do not support routing by source, TOS, - * and MSG_DONTROUTE --ANK (980726) + * and MSG_DONTROUTE --ANK (980726) * * 1. ip6_rt_check(): If route was host route, * check that cached destination is current. @@ -1049,7 +1044,7 @@ static inline int ip6_ufo_append_data(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int length, int hh_len, int fragheaderlen, - int transhdrlen, int mtu,unsigned int flags, + int transhdrlen, int mtu, unsigned int flags, struct rt6_info *rt) { @@ -1072,7 +1067,7 @@ static inline int ip6_ufo_append_data(struct sock *sk, skb_reserve(skb, hh_len); /* create space for UDP/IP header */ - skb_put(skb,fragheaderlen + transhdrlen); + skb_put(skb, fragheaderlen + transhdrlen); /* initialize network header pointer */ skb_reset_network_header(skb); diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 69a84b464009cc3192d42acc8a2c4d1e8419ca61..9409887fb664dd78d13937b89ea1b5044afdbc94 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -412,12 +412,12 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw) { const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw; __u8 nexthdr = ipv6h->nexthdr; - __u16 off = sizeof (*ipv6h); + __u16 off = sizeof(*ipv6h); while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) { __u16 optlen = 0; struct ipv6_opt_hdr *hdr; - if (raw + off + sizeof (*hdr) > skb->data && + if (raw + off + sizeof(*hdr) > skb->data && !pskb_may_pull(skb, raw - skb->data + off + sizeof (*hdr))) break; @@ -534,7 +534,7 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, mtu = IPV6_MIN_MTU; t->dev->mtu = mtu; - if ((len = sizeof (*ipv6h) + ntohs(ipv6h->payload_len)) > mtu) { + if ((len = sizeof(*ipv6h) + ntohs(ipv6h->payload_len)) > mtu) { rel_type = ICMPV6_PKT_TOOBIG; rel_code = 0; rel_info = mtu; @@ -995,7 +995,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, t->parms.name); goto tx_err_dst_release; } - mtu = dst_mtu(dst) - sizeof (*ipv6h); + mtu = dst_mtu(dst) - sizeof(*ipv6h); if (encap_limit >= 0) { max_headroom += 8; mtu -= 8; @@ -1087,7 +1087,7 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) encap_limit = t->parms.encap_limit; - memcpy(&fl6, &t->fl.u.ip6, sizeof (fl6)); + memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_IPIP; dsfield = ipv4_get_dsfield(iph); @@ -1139,7 +1139,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) encap_limit = t->parms.encap_limit; - memcpy(&fl6, &t->fl.u.ip6, sizeof (fl6)); + memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_IPV6; dsfield = ipv6_get_dsfield(ipv6h); @@ -1233,11 +1233,11 @@ static void ip6_tnl_link_config(struct ip6_tnl *t) if (rt->dst.dev) { dev->hard_header_len = rt->dst.dev->hard_header_len + - sizeof (struct ipv6hdr); + sizeof(struct ipv6hdr); - dev->mtu = rt->dst.dev->mtu - sizeof (struct ipv6hdr); + dev->mtu = rt->dst.dev->mtu - sizeof(struct ipv6hdr); if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) - dev->mtu-=8; + dev->mtu -= 8; if (dev->mtu < IPV6_MIN_MTU) dev->mtu = IPV6_MIN_MTU; @@ -1354,7 +1354,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) switch (cmd) { case SIOCGETTUNNEL: if (dev == ip6n->fb_tnl_dev) { - if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) { + if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) { err = -EFAULT; break; } @@ -1366,7 +1366,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) memset(&p, 0, sizeof(p)); } ip6_tnl_parm_to_user(&p, &t->parms); - if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof (p))) { + if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) { err = -EFAULT; } break; @@ -1376,7 +1376,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) break; err = -EFAULT; - if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) + if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) break; err = -EINVAL; if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP && @@ -1411,7 +1411,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) if (dev == ip6n->fb_tnl_dev) { err = -EFAULT; - if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) + if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) break; err = -ENOENT; ip6_tnl_parm_from_user(&p1, &p); @@ -1486,14 +1486,14 @@ static void ip6_tnl_dev_setup(struct net_device *dev) dev->destructor = ip6_dev_free; dev->type = ARPHRD_TUNNEL6; - dev->hard_header_len = LL_MAX_HEADER + sizeof (struct ipv6hdr); - dev->mtu = ETH_DATA_LEN - sizeof (struct ipv6hdr); + dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr); + dev->mtu = ETH_DATA_LEN - sizeof(struct ipv6hdr); t = netdev_priv(dev); if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) - dev->mtu-=8; + dev->mtu -= 8; dev->flags |= IFF_NOARP; dev->addr_len = sizeof(struct in6_addr); - dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; + netif_keep_dst(dev); /* This perm addr will be used as interface identifier by IPv6 */ dev->addr_assign_type = NET_ADDR_RANDOM; eth_random_addr(dev->perm_addr); diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c new file mode 100644 index 0000000000000000000000000000000000000000..b04ed72c454247886d7d99ae5c9e36e949b48cd1 --- /dev/null +++ b/net/ipv6/ip6_udp_tunnel.c @@ -0,0 +1,107 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg, + struct socket **sockp) +{ + struct sockaddr_in6 udp6_addr; + int err; + struct socket *sock = NULL; + + err = sock_create_kern(AF_INET6, SOCK_DGRAM, 0, &sock); + if (err < 0) + goto error; + + sk_change_net(sock->sk, net); + + udp6_addr.sin6_family = AF_INET6; + memcpy(&udp6_addr.sin6_addr, &cfg->local_ip6, + sizeof(udp6_addr.sin6_addr)); + udp6_addr.sin6_port = cfg->local_udp_port; + err = kernel_bind(sock, (struct sockaddr *)&udp6_addr, + sizeof(udp6_addr)); + if (err < 0) + goto error; + + if (cfg->peer_udp_port) { + udp6_addr.sin6_family = AF_INET6; + memcpy(&udp6_addr.sin6_addr, &cfg->peer_ip6, + sizeof(udp6_addr.sin6_addr)); + udp6_addr.sin6_port = cfg->peer_udp_port; + err = kernel_connect(sock, + (struct sockaddr *)&udp6_addr, + sizeof(udp6_addr), 0); + } + if (err < 0) + goto error; + + udp_set_no_check6_tx(sock->sk, !cfg->use_udp6_tx_checksums); + udp_set_no_check6_rx(sock->sk, !cfg->use_udp6_rx_checksums); + + *sockp = sock; + return 0; + +error: + if (sock) { + kernel_sock_shutdown(sock, SHUT_RDWR); + sk_release_kernel(sock->sk); + } + *sockp = NULL; + return err; +} +EXPORT_SYMBOL_GPL(udp_sock_create6); + +int udp_tunnel6_xmit_skb(struct socket *sock, struct dst_entry *dst, + struct sk_buff *skb, struct net_device *dev, + struct in6_addr *saddr, struct in6_addr *daddr, + __u8 prio, __u8 ttl, __be16 src_port, __be16 dst_port) +{ + struct udphdr *uh; + struct ipv6hdr *ip6h; + struct sock *sk = sock->sk; + + __skb_push(skb, sizeof(*uh)); + skb_reset_transport_header(skb); + uh = udp_hdr(skb); + + uh->dest = dst_port; + uh->source = src_port; + + uh->len = htons(skb->len); + uh->check = 0; + + memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); + IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED + | IPSKB_REROUTED); + skb_dst_set(skb, dst); + + udp6_set_csum(udp_get_no_check6_tx(sk), skb, &inet6_sk(sk)->saddr, + &sk->sk_v6_daddr, skb->len); + + __skb_push(skb, sizeof(*ip6h)); + skb_reset_network_header(skb); + ip6h = ipv6_hdr(skb); + ip6_flow_hdr(ip6h, prio, htonl(0)); + ip6h->payload_len = htons(skb->len); + ip6h->nexthdr = IPPROTO_UDP; + ip6h->hop_limit = ttl; + ip6h->daddr = *daddr; + ip6h->saddr = *saddr; + + ip6tunnel_xmit(skb, dev); + return 0; +} +EXPORT_SYMBOL_GPL(udp_tunnel6_xmit_skb); + +MODULE_LICENSE("GPL"); diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 5833a2244467325caff4f75f5740adc809db235b..d440bb585524d72202f809ebd1f0cb4ee8b7cc79 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -807,7 +807,7 @@ static void vti6_dev_setup(struct net_device *dev) dev->mtu = ETH_DATA_LEN; dev->flags |= IFF_NOARP; dev->addr_len = sizeof(struct in6_addr); - dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; + netif_keep_dst(dev); } /** diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index f9a3fd320d1df23ca1140e3d39e46b4d14a234be..0171f08325c3ff3991485297de4c532fe6992bf8 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -845,7 +845,7 @@ static void ip6mr_destroy_unres(struct mr6_table *mrt, struct mfc6_cache *c) atomic_dec(&mrt->cache_resolve_queue_len); - while((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) { + while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) { if (ipv6_hdr(skb)->version == 0) { struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr)); nlh->nlmsg_type = NLMSG_ERROR; @@ -1103,7 +1103,7 @@ static void ip6mr_cache_resolve(struct net *net, struct mr6_table *mrt, * Play the pending entries through our router */ - while((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) { + while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) { if (ipv6_hdr(skb)->version == 0) { struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr)); diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c index d1c793cffcb5f44aba0f922f2ebdd3105d51b49f..1b9316e1386a96c899c67888fba4618d3004e69a 100644 --- a/net/ipv6/ipcomp6.c +++ b/net/ipv6/ipcomp6.c @@ -181,8 +181,7 @@ static int ipcomp6_rcv_cb(struct sk_buff *skb, int err) return 0; } -static const struct xfrm_type ipcomp6_type = -{ +static const struct xfrm_type ipcomp6_type = { .description = "IPCOMP6", .owner = THIS_MODULE, .proto = IPPROTO_COMP, @@ -193,8 +192,7 @@ static const struct xfrm_type ipcomp6_type = .hdr_offset = xfrm6_find_1stfragopt, }; -static struct xfrm6_protocol ipcomp6_protocol = -{ +static struct xfrm6_protocol ipcomp6_protocol = { .handler = xfrm6_rcv, .cb_handler = ipcomp6_rcv_cb, .err_handler = ipcomp6_err, diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 0c289982796dfb6e01a761ff4d49616700da3ba8..e1a9583bb4191f44b021e3d39483ed29d1f22fc4 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -66,12 +66,12 @@ int ip6_ra_control(struct sock *sk, int sel) if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num != IPPROTO_RAW) return -ENOPROTOOPT; - new_ra = (sel>=0) ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL; + new_ra = (sel >= 0) ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL; write_lock_bh(&ip6_ra_lock); - for (rap = &ip6_ra_chain; (ra=*rap) != NULL; rap = &ra->next) { + for (rap = &ip6_ra_chain; (ra = *rap) != NULL; rap = &ra->next) { if (ra->sk == sk) { - if (sel>=0) { + if (sel >= 0) { write_unlock_bh(&ip6_ra_lock); kfree(new_ra); return -EADDRINUSE; @@ -130,7 +130,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, int retv = -ENOPROTOOPT; if (optval == NULL) - val=0; + val = 0; else { if (optlen >= sizeof(int)) { if (get_user(val, (int __user *) optval)) @@ -139,7 +139,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, val = 0; } - valbool = (val!=0); + valbool = (val != 0); if (ip6_mroute_opt(optname)) return ip6_mroute_setsockopt(sk, optname, optval, optlen); @@ -474,7 +474,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, goto done; msg.msg_controllen = optlen; - msg.msg_control = (void*)(opt+1); + msg.msg_control = (void *)(opt+1); retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, &junk, &junk); @@ -687,7 +687,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, retv = -ENOBUFS; break; } - gsf = kmalloc(optlen,GFP_KERNEL); + gsf = kmalloc(optlen, GFP_KERNEL); if (!gsf) { retv = -ENOBUFS; break; @@ -873,7 +873,6 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname, #endif return err; } - EXPORT_SYMBOL(ipv6_setsockopt); #ifdef CONFIG_COMPAT @@ -909,7 +908,6 @@ int compat_ipv6_setsockopt(struct sock *sk, int level, int optname, #endif return err; } - EXPORT_SYMBOL(compat_ipv6_setsockopt); #endif @@ -921,7 +919,7 @@ static int ipv6_getsockopt_sticky(struct sock *sk, struct ipv6_txoptions *opt, if (!opt) return 0; - switch(optname) { + switch (optname) { case IPV6_HOPOPTS: hdr = opt->hopopt; break; @@ -1284,9 +1282,9 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, return -ENOPROTOOPT; } len = min_t(unsigned int, sizeof(int), len); - if(put_user(len, optlen)) + if (put_user(len, optlen)) return -EFAULT; - if(copy_to_user(optval,&val,len)) + if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } @@ -1299,7 +1297,7 @@ int ipv6_getsockopt(struct sock *sk, int level, int optname, if (level == SOL_IP && sk->sk_type != SOCK_RAW) return udp_prot.getsockopt(sk, level, optname, optval, optlen); - if(level != SOL_IPV6) + if (level != SOL_IPV6) return -ENOPROTOOPT; err = do_ipv6_getsockopt(sk, level, optname, optval, optlen, 0); @@ -1321,7 +1319,6 @@ int ipv6_getsockopt(struct sock *sk, int level, int optname, #endif return err; } - EXPORT_SYMBOL(ipv6_getsockopt); #ifdef CONFIG_COMPAT @@ -1364,7 +1361,6 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname, #endif return err; } - EXPORT_SYMBOL(compat_ipv6_getsockopt); #endif diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index a23b655a7627a69046d956139946c00dda8825f4..9648de2b67458201ad9435b6972b05191c88c075 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -64,15 +64,6 @@ #include -/* Set to 3 to get tracing... */ -#define MCAST_DEBUG 2 - -#if MCAST_DEBUG >= 3 -#define MDBG(x) printk x -#else -#define MDBG(x) -#endif - /* Ensure that we have struct in6_addr aligned on 32bit word. */ static void *__mld2_query_bugs[] __attribute__((__unused__)) = { BUILD_BUG_ON_NULL(offsetof(struct mld2_query, mld2q_srcs) % 4), @@ -82,9 +73,6 @@ static void *__mld2_query_bugs[] __attribute__((__unused__)) = { static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT; -/* Big mc list lock for all the sockets */ -static DEFINE_SPINLOCK(ipv6_sk_mc_lock); - static void igmp6_join_group(struct ifmcaddr6 *ma); static void igmp6_leave_group(struct ifmcaddr6 *ma); static void igmp6_timer_handler(unsigned long data); @@ -121,6 +109,7 @@ static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml, #define IPV6_MLD_MAX_MSF 64 int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF; +int sysctl_mld_qrv __read_mostly = MLD_QRV_DEFAULT; /* * socket join on multicast group @@ -173,7 +162,6 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) mc_lst->addr = *addr; rtnl_lock(); - rcu_read_lock(); if (ifindex == 0) { struct rt6_info *rt; rt = rt6_lookup(net, addr, NULL, 0, 0); @@ -182,10 +170,9 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) ip6_rt_put(rt); } } else - dev = dev_get_by_index_rcu(net, ifindex); + dev = __dev_get_by_index(net, ifindex); if (dev == NULL) { - rcu_read_unlock(); rtnl_unlock(); sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); return -ENODEV; @@ -203,18 +190,14 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) err = ipv6_dev_mc_inc(dev, addr); if (err) { - rcu_read_unlock(); rtnl_unlock(); sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); return err; } - spin_lock(&ipv6_sk_mc_lock); mc_lst->next = np->ipv6_mc_list; rcu_assign_pointer(np->ipv6_mc_list, mc_lst); - spin_unlock(&ipv6_sk_mc_lock); - rcu_read_unlock(); rtnl_unlock(); return 0; @@ -234,20 +217,16 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) return -EINVAL; rtnl_lock(); - spin_lock(&ipv6_sk_mc_lock); for (lnk = &np->ipv6_mc_list; - (mc_lst = rcu_dereference_protected(*lnk, - lockdep_is_held(&ipv6_sk_mc_lock))) !=NULL ; + (mc_lst = rtnl_dereference(*lnk)) != NULL; lnk = &mc_lst->next) { if ((ifindex == 0 || mc_lst->ifindex == ifindex) && ipv6_addr_equal(&mc_lst->addr, addr)) { struct net_device *dev; *lnk = mc_lst->next; - spin_unlock(&ipv6_sk_mc_lock); - rcu_read_lock(); - dev = dev_get_by_index_rcu(net, mc_lst->ifindex); + dev = __dev_get_by_index(net, mc_lst->ifindex); if (dev != NULL) { struct inet6_dev *idev = __in6_dev_get(dev); @@ -256,7 +235,6 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) __ipv6_dev_mc_dec(idev, &mc_lst->addr); } else (void) ip6_mc_leave_src(sk, mc_lst, NULL); - rcu_read_unlock(); rtnl_unlock(); atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc); @@ -264,7 +242,6 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) return 0; } } - spin_unlock(&ipv6_sk_mc_lock); rtnl_unlock(); return -EADDRNOTAVAIL; @@ -311,16 +288,12 @@ void ipv6_sock_mc_close(struct sock *sk) return; rtnl_lock(); - spin_lock(&ipv6_sk_mc_lock); - while ((mc_lst = rcu_dereference_protected(np->ipv6_mc_list, - lockdep_is_held(&ipv6_sk_mc_lock))) != NULL) { + while ((mc_lst = rtnl_dereference(np->ipv6_mc_list)) != NULL) { struct net_device *dev; np->ipv6_mc_list = mc_lst->next; - spin_unlock(&ipv6_sk_mc_lock); - rcu_read_lock(); - dev = dev_get_by_index_rcu(net, mc_lst->ifindex); + dev = __dev_get_by_index(net, mc_lst->ifindex); if (dev) { struct inet6_dev *idev = __in6_dev_get(dev); @@ -329,14 +302,11 @@ void ipv6_sock_mc_close(struct sock *sk) __ipv6_dev_mc_dec(idev, &mc_lst->addr); } else (void) ip6_mc_leave_src(sk, mc_lst, NULL); - rcu_read_unlock(); atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc); kfree_rcu(mc_lst, rcu); - spin_lock(&ipv6_sk_mc_lock); } - spin_unlock(&ipv6_sk_mc_lock); rtnl_unlock(); } @@ -400,7 +370,7 @@ int ip6_mc_source(int add, int omode, struct sock *sk, if (!psl) goto done; /* err = -EADDRNOTAVAIL */ rv = !0; - for (i=0; isl_count; i++) { + for (i = 0; i < psl->sl_count; i++) { rv = !ipv6_addr_equal(&psl->sl_addr[i], source); if (rv == 0) break; @@ -417,7 +387,7 @@ int ip6_mc_source(int add, int omode, struct sock *sk, /* update the interface filter */ ip6_mc_del_src(idev, group, omode, 1, source, 1); - for (j=i+1; jsl_count; j++) + for (j = i+1; j < psl->sl_count; j++) psl->sl_addr[j-1] = psl->sl_addr[j]; psl->sl_count--; err = 0; @@ -443,19 +413,19 @@ int ip6_mc_source(int add, int omode, struct sock *sk, newpsl->sl_max = count; newpsl->sl_count = count - IP6_SFBLOCK; if (psl) { - for (i=0; isl_count; i++) + for (i = 0; i < psl->sl_count; i++) newpsl->sl_addr[i] = psl->sl_addr[i]; sock_kfree_s(sk, psl, IP6_SFLSIZE(psl->sl_max)); } pmc->sflist = psl = newpsl; } rv = 1; /* > 0 for insert logic below if sl_count is 0 */ - for (i=0; isl_count; i++) { + for (i = 0; i < psl->sl_count; i++) { rv = !ipv6_addr_equal(&psl->sl_addr[i], source); if (rv == 0) /* There is an error in the address. */ goto done; } - for (j=psl->sl_count-1; j>=i; j--) + for (j = psl->sl_count-1; j >= i; j--) psl->sl_addr[j+1] = psl->sl_addr[j]; psl->sl_addr[i] = *source; psl->sl_count++; @@ -524,7 +494,7 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf) goto done; } newpsl->sl_max = newpsl->sl_count = gsf->gf_numsrc; - for (i=0; isl_count; ++i) { + for (i = 0; i < newpsl->sl_count; ++i) { struct sockaddr_in6 *psin6; psin6 = (struct sockaddr_in6 *)&gsf->gf_slist[i]; @@ -586,9 +556,8 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf, } err = -EADDRNOTAVAIL; - /* - * changes to the ipv6_mc_list require the socket lock and - * a read lock on ip6_sk_mc_lock. We have the socket lock, + /* changes to the ipv6_mc_list require the socket lock and + * rtnl lock. We have the socket lock and rcu read lock, * so reading the list is safe. */ @@ -612,11 +581,10 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf, copy_to_user(optval, gsf, GROUP_FILTER_SIZE(0))) { return -EFAULT; } - /* changes to psl require the socket lock, a read lock on - * on ipv6_sk_mc_lock and a write lock on pmc->sflock. We - * have the socket lock, so reading here is safe. + /* changes to psl require the socket lock, and a write lock + * on pmc->sflock. We have the socket lock so reading here is safe. */ - for (i=0; isl_count; i++) { + for (i = 0; i < psl->sl_count; i++) { if (ipv6_addr_equal(&psl->sl_addr[i], src_addr)) break; } @@ -673,14 +641,6 @@ bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr, return rv; } -static void ma_put(struct ifmcaddr6 *mc) -{ - if (atomic_dec_and_test(&mc->mca_refcnt)) { - in6_dev_put(mc->idev); - kfree(mc); - } -} - static void igmp6_group_added(struct ifmcaddr6 *mc) { struct net_device *dev = mc->idev->dev; @@ -772,7 +732,7 @@ static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im) pmc->mca_tomb = im->mca_tomb; pmc->mca_sources = im->mca_sources; im->mca_tomb = im->mca_sources = NULL; - for (psf=pmc->mca_sources; psf; psf=psf->sf_next) + for (psf = pmc->mca_sources; psf; psf = psf->sf_next) psf->sf_crcount = pmc->mca_crcount; } spin_unlock_bh(&im->mca_lock); @@ -790,7 +750,7 @@ static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *pmca) spin_lock_bh(&idev->mc_lock); pmc_prev = NULL; - for (pmc=idev->mc_tomb; pmc; pmc=pmc->next) { + for (pmc = idev->mc_tomb; pmc; pmc = pmc->next) { if (ipv6_addr_equal(&pmc->mca_addr, pmca)) break; pmc_prev = pmc; @@ -804,7 +764,7 @@ static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *pmca) spin_unlock_bh(&idev->mc_lock); if (pmc) { - for (psf=pmc->mca_tomb; psf; psf=psf_next) { + for (psf = pmc->mca_tomb; psf; psf = psf_next) { psf_next = psf->sf_next; kfree(psf); } @@ -831,14 +791,14 @@ static void mld_clear_delrec(struct inet6_dev *idev) /* clear dead sources, too */ read_lock_bh(&idev->lock); - for (pmc=idev->mc_list; pmc; pmc=pmc->next) { + for (pmc = idev->mc_list; pmc; pmc = pmc->next) { struct ip6_sf_list *psf, *psf_next; spin_lock_bh(&pmc->mca_lock); psf = pmc->mca_tomb; pmc->mca_tomb = NULL; spin_unlock_bh(&pmc->mca_lock); - for (; psf; psf=psf_next) { + for (; psf; psf = psf_next) { psf_next = psf->sf_next; kfree(psf); } @@ -846,6 +806,48 @@ static void mld_clear_delrec(struct inet6_dev *idev) read_unlock_bh(&idev->lock); } +static void mca_get(struct ifmcaddr6 *mc) +{ + atomic_inc(&mc->mca_refcnt); +} + +static void ma_put(struct ifmcaddr6 *mc) +{ + if (atomic_dec_and_test(&mc->mca_refcnt)) { + in6_dev_put(mc->idev); + kfree(mc); + } +} + +static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev, + const struct in6_addr *addr) +{ + struct ifmcaddr6 *mc; + + mc = kzalloc(sizeof(*mc), GFP_ATOMIC); + if (mc == NULL) + return NULL; + + setup_timer(&mc->mca_timer, igmp6_timer_handler, (unsigned long)mc); + + mc->mca_addr = *addr; + mc->idev = idev; /* reference taken by caller */ + mc->mca_users = 1; + /* mca_stamp should be updated upon changes */ + mc->mca_cstamp = mc->mca_tstamp = jiffies; + atomic_set(&mc->mca_refcnt, 1); + spin_lock_init(&mc->mca_lock); + + /* initial mode is (EX, empty) */ + mc->mca_sfmode = MCAST_EXCLUDE; + mc->mca_sfcount[MCAST_EXCLUDE] = 1; + + if (ipv6_addr_is_ll_all_nodes(&mc->mca_addr) || + IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL) + mc->mca_flags |= MAF_NOREPORT; + + return mc; +} /* * device multicast group inc (add if not found) @@ -881,38 +883,20 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr) } } - /* - * not found: create a new one. - */ - - mc = kzalloc(sizeof(struct ifmcaddr6), GFP_ATOMIC); - - if (mc == NULL) { + mc = mca_alloc(idev, addr); + if (!mc) { write_unlock_bh(&idev->lock); in6_dev_put(idev); return -ENOMEM; } - setup_timer(&mc->mca_timer, igmp6_timer_handler, (unsigned long)mc); - - mc->mca_addr = *addr; - mc->idev = idev; /* (reference taken) */ - mc->mca_users = 1; - /* mca_stamp should be updated upon changes */ - mc->mca_cstamp = mc->mca_tstamp = jiffies; - atomic_set(&mc->mca_refcnt, 2); - spin_lock_init(&mc->mca_lock); - - /* initial mode is (EX, empty) */ - mc->mca_sfmode = MCAST_EXCLUDE; - mc->mca_sfcount[MCAST_EXCLUDE] = 1; - - if (ipv6_addr_is_ll_all_nodes(&mc->mca_addr) || - IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL) - mc->mca_flags |= MAF_NOREPORT; - mc->next = idev->mc_list; idev->mc_list = mc; + + /* Hold this for the code below before we unlock, + * it is already exposed via idev->mc_list. + */ + mca_get(mc); write_unlock_bh(&idev->lock); mld_del_delrec(idev, &mc->mca_addr); @@ -931,7 +915,7 @@ int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr) ASSERT_RTNL(); write_lock_bh(&idev->lock); - for (map = &idev->mc_list; (ma=*map) != NULL; map = &ma->next) { + for (map = &idev->mc_list; (ma = *map) != NULL; map = &ma->next) { if (ipv6_addr_equal(&ma->mca_addr, addr)) { if (--ma->mca_users == 0) { *map = ma->next; @@ -956,7 +940,7 @@ int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr) struct inet6_dev *idev; int err; - rcu_read_lock(); + ASSERT_RTNL(); idev = __in6_dev_get(dev); if (!idev) @@ -964,7 +948,6 @@ int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr) else err = __ipv6_dev_mc_dec(idev, addr); - rcu_read_unlock(); return err; } @@ -982,7 +965,7 @@ bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group, idev = __in6_dev_get(dev); if (idev) { read_lock_bh(&idev->lock); - for (mc = idev->mc_list; mc; mc=mc->next) { + for (mc = idev->mc_list; mc; mc = mc->next) { if (ipv6_addr_equal(&mc->mca_addr, group)) break; } @@ -991,7 +974,7 @@ bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group, struct ip6_sf_list *psf; spin_lock_bh(&mc->mca_lock); - for (psf=mc->mca_sources;psf;psf=psf->sf_next) { + for (psf = mc->mca_sources; psf; psf = psf->sf_next) { if (ipv6_addr_equal(&psf->sf_addr, src_addr)) break; } @@ -1000,7 +983,7 @@ bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group, psf->sf_count[MCAST_EXCLUDE] != mc->mca_sfcount[MCAST_EXCLUDE]; else - rv = mc->mca_sfcount[MCAST_EXCLUDE] !=0; + rv = mc->mca_sfcount[MCAST_EXCLUDE] != 0; spin_unlock_bh(&mc->mca_lock); } else rv = true; /* don't filter unspecified source */ @@ -1091,10 +1074,10 @@ static bool mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs, int i, scount; scount = 0; - for (psf=pmc->mca_sources; psf; psf=psf->sf_next) { + for (psf = pmc->mca_sources; psf; psf = psf->sf_next) { if (scount == nsrcs) break; - for (i=0; isf_count[MCAST_INCLUDE] || pmc->mca_sfcount[MCAST_EXCLUDE] != @@ -1124,10 +1107,10 @@ static bool mld_marksources(struct ifmcaddr6 *pmc, int nsrcs, /* mark INCLUDE-mode sources */ scount = 0; - for (psf=pmc->mca_sources; psf; psf=psf->sf_next) { + for (psf = pmc->mca_sources; psf; psf = psf->sf_next) { if (scount == nsrcs) break; - for (i=0; isf_addr)) { psf->sf_gsresp = 1; scount++; @@ -1205,15 +1188,16 @@ static void mld_update_qrv(struct inet6_dev *idev, * and SHOULD NOT be one. Catch this here if we ever run * into such a case in future. */ + const int min_qrv = min(MLD_QRV_DEFAULT, sysctl_mld_qrv); WARN_ON(idev->mc_qrv == 0); if (mlh2->mld2q_qrv > 0) idev->mc_qrv = mlh2->mld2q_qrv; - if (unlikely(idev->mc_qrv < 2)) { + if (unlikely(idev->mc_qrv < min_qrv)) { net_warn_ratelimited("IPv6: MLD: clamping QRV from %u to %u!\n", - idev->mc_qrv, MLD_QRV_DEFAULT); - idev->mc_qrv = MLD_QRV_DEFAULT; + idev->mc_qrv, min_qrv); + idev->mc_qrv = min_qrv; } } @@ -1253,7 +1237,7 @@ static void mld_update_qri(struct inet6_dev *idev, } static int mld_process_v1(struct inet6_dev *idev, struct mld_msg *mld, - unsigned long *max_delay) + unsigned long *max_delay, bool v1_query) { unsigned long mldv1_md; @@ -1261,11 +1245,32 @@ static int mld_process_v1(struct inet6_dev *idev, struct mld_msg *mld, if (mld_in_v2_mode_only(idev)) return -EINVAL; - /* MLDv1 router present */ mldv1_md = ntohs(mld->mld_maxdelay); + + /* When in MLDv1 fallback and a MLDv2 router start-up being + * unaware of current MLDv1 operation, the MRC == MRD mapping + * only works when the exponential algorithm is not being + * used (as MLDv1 is unaware of such things). + * + * According to the RFC author, the MLDv2 implementations + * he's aware of all use a MRC < 32768 on start up queries. + * + * Thus, should we *ever* encounter something else larger + * than that, just assume the maximum possible within our + * reach. + */ + if (!v1_query) + mldv1_md = min(mldv1_md, MLDV1_MRD_MAX_COMPAT); + *max_delay = max(msecs_to_jiffies(mldv1_md), 1UL); - mld_set_v1_mode(idev); + /* MLDv1 router present: we need to go into v1 mode *only* + * when an MLDv1 query is received as per section 9.12. of + * RFC3810! And we know from RFC2710 section 3.7 that MLDv1 + * queries MUST be of exactly 24 octets. + */ + if (v1_query) + mld_set_v1_mode(idev); /* cancel MLDv2 report timer */ mld_gq_stop_timer(idev); @@ -1280,10 +1285,6 @@ static int mld_process_v1(struct inet6_dev *idev, struct mld_msg *mld, static int mld_process_v2(struct inet6_dev *idev, struct mld2_query *mld, unsigned long *max_delay) { - /* hosts need to stay in MLDv1 mode, discard MLDv2 queries */ - if (mld_in_v1_mode(idev)) - return -EINVAL; - *max_delay = max(msecs_to_jiffies(mldv2_mrc(mld)), 1UL); mld_update_qrv(idev, mld); @@ -1340,8 +1341,11 @@ int igmp6_event_query(struct sk_buff *skb) !(group_type&IPV6_ADDR_MULTICAST)) return -EINVAL; - if (len == MLD_V1_QUERY_LEN) { - err = mld_process_v1(idev, mld, &max_delay); + if (len < MLD_V1_QUERY_LEN) { + return -EINVAL; + } else if (len == MLD_V1_QUERY_LEN || mld_in_v1_mode(idev)) { + err = mld_process_v1(idev, mld, &max_delay, + len == MLD_V1_QUERY_LEN); if (err < 0) return err; } else if (len >= MLD_V2_QUERY_LEN_MIN) { @@ -1373,18 +1377,19 @@ int igmp6_event_query(struct sk_buff *skb) mlh2 = (struct mld2_query *)skb_transport_header(skb); mark = 1; } - } else + } else { return -EINVAL; + } read_lock_bh(&idev->lock); if (group_type == IPV6_ADDR_ANY) { - for (ma = idev->mc_list; ma; ma=ma->next) { + for (ma = idev->mc_list; ma; ma = ma->next) { spin_lock_bh(&ma->mca_lock); igmp6_group_queried(ma, max_delay); spin_unlock_bh(&ma->mca_lock); } } else { - for (ma = idev->mc_list; ma; ma=ma->next) { + for (ma = idev->mc_list; ma; ma = ma->next) { if (!ipv6_addr_equal(group, &ma->mca_addr)) continue; spin_lock_bh(&ma->mca_lock); @@ -1448,7 +1453,7 @@ int igmp6_event_report(struct sk_buff *skb) */ read_lock_bh(&idev->lock); - for (ma = idev->mc_list; ma; ma=ma->next) { + for (ma = idev->mc_list; ma; ma = ma->next) { if (ipv6_addr_equal(&ma->mca_addr, &mld->mld_mca)) { spin_lock(&ma->mca_lock); if (del_timer(&ma->mca_timer)) @@ -1512,7 +1517,7 @@ mld_scount(struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted) struct ip6_sf_list *psf; int scount = 0; - for (psf=pmc->mca_sources; psf; psf=psf->sf_next) { + for (psf = pmc->mca_sources; psf; psf = psf->sf_next) { if (!is_in(pmc, psf, type, gdeleted, sdeleted)) continue; scount++; @@ -1726,7 +1731,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, } first = 1; psf_prev = NULL; - for (psf=*psf_list; psf; psf=psf_next) { + for (psf = *psf_list; psf; psf = psf_next) { struct in6_addr *psrc; psf_next = psf->sf_next; @@ -1805,7 +1810,7 @@ static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc) read_lock_bh(&idev->lock); if (!pmc) { - for (pmc=idev->mc_list; pmc; pmc=pmc->next) { + for (pmc = idev->mc_list; pmc; pmc = pmc->next) { if (pmc->mca_flags & MAF_NOREPORT) continue; spin_lock_bh(&pmc->mca_lock); @@ -1838,7 +1843,7 @@ static void mld_clear_zeros(struct ip6_sf_list **ppsf) struct ip6_sf_list *psf_prev, *psf_next, *psf; psf_prev = NULL; - for (psf=*ppsf; psf; psf = psf_next) { + for (psf = *ppsf; psf; psf = psf_next) { psf_next = psf->sf_next; if (psf->sf_crcount == 0) { if (psf_prev) @@ -1862,7 +1867,7 @@ static void mld_send_cr(struct inet6_dev *idev) /* deleted MCA's */ pmc_prev = NULL; - for (pmc=idev->mc_tomb; pmc; pmc=pmc_next) { + for (pmc = idev->mc_tomb; pmc; pmc = pmc_next) { pmc_next = pmc->next; if (pmc->mca_sfmode == MCAST_INCLUDE) { type = MLD2_BLOCK_OLD_SOURCES; @@ -1895,7 +1900,7 @@ static void mld_send_cr(struct inet6_dev *idev) spin_unlock(&idev->mc_lock); /* change recs */ - for (pmc=idev->mc_list; pmc; pmc=pmc->next) { + for (pmc = idev->mc_list; pmc; pmc = pmc->next) { spin_lock_bh(&pmc->mca_lock); if (pmc->mca_sfcount[MCAST_EXCLUDE]) { type = MLD2_BLOCK_OLD_SOURCES; @@ -2032,7 +2037,7 @@ static void mld_send_initial_cr(struct inet6_dev *idev) skb = NULL; read_lock_bh(&idev->lock); - for (pmc=idev->mc_list; pmc; pmc=pmc->next) { + for (pmc = idev->mc_list; pmc; pmc = pmc->next) { spin_lock_bh(&pmc->mca_lock); if (pmc->mca_sfcount[MCAST_EXCLUDE]) type = MLD2_CHANGE_TO_EXCLUDE; @@ -2077,7 +2082,7 @@ static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode, int rv = 0; psf_prev = NULL; - for (psf=pmc->mca_sources; psf; psf=psf->sf_next) { + for (psf = pmc->mca_sources; psf; psf = psf->sf_next) { if (ipv6_addr_equal(&psf->sf_addr, psfsrc)) break; psf_prev = psf; @@ -2118,7 +2123,7 @@ static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca, if (!idev) return -ENODEV; read_lock_bh(&idev->lock); - for (pmc=idev->mc_list; pmc; pmc=pmc->next) { + for (pmc = idev->mc_list; pmc; pmc = pmc->next) { if (ipv6_addr_equal(pmca, &pmc->mca_addr)) break; } @@ -2138,7 +2143,7 @@ static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca, pmc->mca_sfcount[sfmode]--; } err = 0; - for (i=0; i 0; @@ -2154,7 +2159,7 @@ static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca, pmc->mca_sfmode = MCAST_INCLUDE; pmc->mca_crcount = idev->mc_qrv; idev->mc_ifc_count = pmc->mca_crcount; - for (psf=pmc->mca_sources; psf; psf = psf->sf_next) + for (psf = pmc->mca_sources; psf; psf = psf->sf_next) psf->sf_crcount = 0; mld_ifc_event(pmc->idev); } else if (sf_setstate(pmc) || changerec) @@ -2173,7 +2178,7 @@ static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode, struct ip6_sf_list *psf, *psf_prev; psf_prev = NULL; - for (psf=pmc->mca_sources; psf; psf=psf->sf_next) { + for (psf = pmc->mca_sources; psf; psf = psf->sf_next) { if (ipv6_addr_equal(&psf->sf_addr, psfsrc)) break; psf_prev = psf; @@ -2198,7 +2203,7 @@ static void sf_markstate(struct ifmcaddr6 *pmc) struct ip6_sf_list *psf; int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE]; - for (psf=pmc->mca_sources; psf; psf=psf->sf_next) + for (psf = pmc->mca_sources; psf; psf = psf->sf_next) if (pmc->mca_sfcount[MCAST_EXCLUDE]) { psf->sf_oldin = mca_xcount == psf->sf_count[MCAST_EXCLUDE] && @@ -2215,7 +2220,7 @@ static int sf_setstate(struct ifmcaddr6 *pmc) int new_in, rv; rv = 0; - for (psf=pmc->mca_sources; psf; psf=psf->sf_next) { + for (psf = pmc->mca_sources; psf; psf = psf->sf_next) { if (pmc->mca_sfcount[MCAST_EXCLUDE]) { new_in = mca_xcount == psf->sf_count[MCAST_EXCLUDE] && !psf->sf_count[MCAST_INCLUDE]; @@ -2225,8 +2230,8 @@ static int sf_setstate(struct ifmcaddr6 *pmc) if (!psf->sf_oldin) { struct ip6_sf_list *prev = NULL; - for (dpsf=pmc->mca_tomb; dpsf; - dpsf=dpsf->sf_next) { + for (dpsf = pmc->mca_tomb; dpsf; + dpsf = dpsf->sf_next) { if (ipv6_addr_equal(&dpsf->sf_addr, &psf->sf_addr)) break; @@ -2248,7 +2253,7 @@ static int sf_setstate(struct ifmcaddr6 *pmc) * add or update "delete" records if an active filter * is now inactive */ - for (dpsf=pmc->mca_tomb; dpsf; dpsf=dpsf->sf_next) + for (dpsf = pmc->mca_tomb; dpsf; dpsf = dpsf->sf_next) if (ipv6_addr_equal(&dpsf->sf_addr, &psf->sf_addr)) break; @@ -2282,7 +2287,7 @@ static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca, if (!idev) return -ENODEV; read_lock_bh(&idev->lock); - for (pmc=idev->mc_list; pmc; pmc=pmc->next) { + for (pmc = idev->mc_list; pmc; pmc = pmc->next) { if (ipv6_addr_equal(pmca, &pmc->mca_addr)) break; } @@ -2298,7 +2303,7 @@ static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca, if (!delta) pmc->mca_sfcount[sfmode]++; err = 0; - for (i=0; imca_sfcount[sfmode]--; - for (j=0; jmca_sfcount[MCAST_EXCLUDE] != 0)) { struct ip6_sf_list *psf; @@ -2322,7 +2327,7 @@ static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca, pmc->mca_crcount = idev->mc_qrv; idev->mc_ifc_count = pmc->mca_crcount; - for (psf=pmc->mca_sources; psf; psf = psf->sf_next) + for (psf = pmc->mca_sources; psf; psf = psf->sf_next) psf->sf_crcount = 0; mld_ifc_event(idev); } else if (sf_setstate(pmc)) @@ -2336,12 +2341,12 @@ static void ip6_mc_clear_src(struct ifmcaddr6 *pmc) { struct ip6_sf_list *psf, *nextpsf; - for (psf=pmc->mca_tomb; psf; psf=nextpsf) { + for (psf = pmc->mca_tomb; psf; psf = nextpsf) { nextpsf = psf->sf_next; kfree(psf); } pmc->mca_tomb = NULL; - for (psf=pmc->mca_sources; psf; psf=nextpsf) { + for (psf = pmc->mca_sources; psf; psf = nextpsf) { nextpsf = psf->sf_next; kfree(psf); } @@ -2380,7 +2385,7 @@ static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml, { int err; - /* callers have the socket lock and a write lock on ipv6_sk_mc_lock, + /* callers have the socket lock and rtnl lock * so no other readers or writers of iml or its sflist */ if (!iml->sflist) { @@ -2485,13 +2490,21 @@ void ipv6_mc_down(struct inet6_dev *idev) mld_gq_stop_timer(idev); mld_dad_stop_timer(idev); - for (i = idev->mc_list; i; i=i->next) + for (i = idev->mc_list; i; i = i->next) igmp6_group_dropped(i); read_unlock_bh(&idev->lock); mld_clear_delrec(idev); } +static void ipv6_mc_reset(struct inet6_dev *idev) +{ + idev->mc_qrv = sysctl_mld_qrv; + idev->mc_qi = MLD_QI_DEFAULT; + idev->mc_qri = MLD_QRI_DEFAULT; + idev->mc_v1_seen = 0; + idev->mc_maxdelay = unsolicited_report_interval(idev); +} /* Device going up */ @@ -2502,7 +2515,8 @@ void ipv6_mc_up(struct inet6_dev *idev) /* Install multicast list, except for all-nodes (already installed) */ read_lock_bh(&idev->lock); - for (i = idev->mc_list; i; i=i->next) + ipv6_mc_reset(idev); + for (i = idev->mc_list; i; i = i->next) igmp6_group_added(i); read_unlock_bh(&idev->lock); } @@ -2522,13 +2536,7 @@ void ipv6_mc_init_dev(struct inet6_dev *idev) (unsigned long)idev); setup_timer(&idev->mc_dad_timer, mld_dad_timer_expire, (unsigned long)idev); - - idev->mc_qrv = MLD_QRV_DEFAULT; - idev->mc_qi = MLD_QI_DEFAULT; - idev->mc_qri = MLD_QRI_DEFAULT; - - idev->mc_maxdelay = unsolicited_report_interval(idev); - idev->mc_v1_seen = 0; + ipv6_mc_reset(idev); write_unlock_bh(&idev->lock); } diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c index db9b6cbc9db3905695888343912be7dfd06af6f2..f61429d391d32693f3fb92fd75c630c01520f776 100644 --- a/net/ipv6/mip6.c +++ b/net/ipv6/mip6.c @@ -336,11 +336,10 @@ static void mip6_destopt_destroy(struct xfrm_state *x) { } -static const struct xfrm_type mip6_destopt_type = -{ +static const struct xfrm_type mip6_destopt_type = { .description = "MIP6DESTOPT", .owner = THIS_MODULE, - .proto = IPPROTO_DSTOPTS, + .proto = IPPROTO_DSTOPTS, .flags = XFRM_TYPE_NON_FRAGMENT | XFRM_TYPE_LOCAL_COADDR, .init_state = mip6_destopt_init_state, .destructor = mip6_destopt_destroy, @@ -469,11 +468,10 @@ static void mip6_rthdr_destroy(struct xfrm_state *x) { } -static const struct xfrm_type mip6_rthdr_type = -{ +static const struct xfrm_type mip6_rthdr_type = { .description = "MIP6RT", .owner = THIS_MODULE, - .proto = IPPROTO_ROUTING, + .proto = IPPROTO_ROUTING, .flags = XFRM_TYPE_NON_FRAGMENT | XFRM_TYPE_REMOTE_COADDR, .init_state = mip6_rthdr_init_state, .destructor = mip6_rthdr_destroy, diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 339078f95d1b6b955d29f094cf281f7835096f9c..4cb45c1079a29f4b7b59201e829905a5c65ac9fe 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -175,7 +175,7 @@ static struct nd_opt_hdr *ndisc_next_option(struct nd_opt_hdr *cur, type = cur->nd_opt_type; do { cur = ((void *)cur) + (cur->nd_opt_len << 3); - } while(cur < end && cur->nd_opt_type != type); + } while (cur < end && cur->nd_opt_type != type); return cur <= end && cur->nd_opt_type == type ? cur : NULL; } @@ -192,7 +192,7 @@ static struct nd_opt_hdr *ndisc_next_useropt(struct nd_opt_hdr *cur, return NULL; do { cur = ((void *)cur) + (cur->nd_opt_len << 3); - } while(cur < end && !ndisc_is_useropt(cur)); + } while (cur < end && !ndisc_is_useropt(cur)); return cur <= end && ndisc_is_useropt(cur) ? cur : NULL; } @@ -284,7 +284,6 @@ int ndisc_mc_map(const struct in6_addr *addr, char *buf, struct net_device *dev, } return -EINVAL; } - EXPORT_SYMBOL(ndisc_mc_map); static u32 ndisc_hash(const void *pkey, @@ -296,7 +295,7 @@ static u32 ndisc_hash(const void *pkey, static int ndisc_constructor(struct neighbour *neigh) { - struct in6_addr *addr = (struct in6_addr*)&neigh->primary_key; + struct in6_addr *addr = (struct in6_addr *)&neigh->primary_key; struct net_device *dev = neigh->dev; struct inet6_dev *in6_dev; struct neigh_parms *parms; @@ -344,7 +343,7 @@ static int ndisc_constructor(struct neighbour *neigh) static int pndisc_constructor(struct pneigh_entry *n) { - struct in6_addr *addr = (struct in6_addr*)&n->key; + struct in6_addr *addr = (struct in6_addr *)&n->key; struct in6_addr maddr; struct net_device *dev = n->dev; @@ -357,7 +356,7 @@ static int pndisc_constructor(struct pneigh_entry *n) static void pndisc_destructor(struct pneigh_entry *n) { - struct in6_addr *addr = (struct in6_addr*)&n->key; + struct in6_addr *addr = (struct in6_addr *)&n->key; struct in6_addr maddr; struct net_device *dev = n->dev; @@ -1065,7 +1064,7 @@ static void ndisc_router_discovery(struct sk_buff *skb) int optlen; unsigned int pref = 0; - __u8 * opt = (__u8 *)(ra_msg + 1); + __u8 *opt = (__u8 *)(ra_msg + 1); optlen = (skb_tail_pointer(skb) - skb_transport_header(skb)) - sizeof(struct ra_msg); @@ -1319,7 +1318,7 @@ static void ndisc_router_discovery(struct sk_buff *skb) continue; if (ri->prefix_len > in6_dev->cnf.accept_ra_rt_info_max_plen) continue; - rt6_route_rcv(skb->dev, (u8*)p, (p->nd_opt_len) << 3, + rt6_route_rcv(skb->dev, (u8 *)p, (p->nd_opt_len) << 3, &ipv6_hdr(skb)->saddr); } } @@ -1352,7 +1351,7 @@ static void ndisc_router_discovery(struct sk_buff *skb) __be32 n; u32 mtu; - memcpy(&n, ((u8*)(ndopts.nd_opts_mtu+1))+2, sizeof(mtu)); + memcpy(&n, ((u8 *)(ndopts.nd_opts_mtu+1))+2, sizeof(mtu)); mtu = ntohl(n); if (mtu < IPV6_MIN_MTU || mtu > skb->dev->mtu) { diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig index 2812816aabdc0a2918b183094ac4a119b4c0f1fa..6af874fc187f64c57b7e16dfbe97ce94768f7df4 100644 --- a/net/ipv6/netfilter/Kconfig +++ b/net/ipv6/netfilter/Kconfig @@ -40,18 +40,13 @@ config NFT_CHAIN_ROUTE_IPV6 fields such as the source, destination, flowlabel, hop-limit and the packet mark. -config NFT_CHAIN_NAT_IPV6 - depends on NF_TABLES_IPV6 - depends on NF_NAT_IPV6 && NFT_NAT - tristate "IPv6 nf_tables nat chain support" - help - This option enables the "nat" chain for IPv6 in nf_tables. This - chain type is used to perform Network Address Translation (NAT) - packet transformations such as the source, destination address and - source and destination ports. +config NF_REJECT_IPV6 + tristate "IPv6 packet rejection" + default m if NETFILTER_ADVANCED=n config NFT_REJECT_IPV6 depends on NF_TABLES_IPV6 + select NF_REJECT_IPV6 default NFT_REJECT tristate @@ -70,6 +65,34 @@ config NF_NAT_IPV6 forms of full Network Address Port Translation. This can be controlled by iptables or nft. +if NF_NAT_IPV6 + +config NFT_CHAIN_NAT_IPV6 + depends on NF_TABLES_IPV6 + tristate "IPv6 nf_tables nat chain support" + help + This option enables the "nat" chain for IPv6 in nf_tables. This + chain type is used to perform Network Address Translation (NAT) + packet transformations such as the source, destination address and + source and destination ports. + +config NF_NAT_MASQUERADE_IPV6 + tristate "IPv6 masquerade support" + help + This is the kernel functionality to provide NAT in the masquerade + flavour (automatic source address selection) for IPv6. + +config NFT_MASQ_IPV6 + tristate "IPv6 masquerade support for nf_tables" + depends on NF_TABLES_IPV6 + depends on NFT_MASQ + select NF_NAT_MASQUERADE_IPV6 + help + This is the expression that provides IPv4 masquerading support for + nf_tables. + +endif # NF_NAT_IPV6 + config IP6_NF_IPTABLES tristate "IP6 tables support (required for filtering)" depends on INET && IPV6 @@ -190,6 +213,7 @@ config IP6_NF_FILTER config IP6_NF_TARGET_REJECT tristate "REJECT target support" depends on IP6_NF_FILTER + select NF_REJECT_IPV6 default m if NETFILTER_ADVANCED=n help The REJECT target allows a filtering rule to specify that an ICMPv6 @@ -260,6 +284,7 @@ if IP6_NF_NAT config IP6_NF_TARGET_MASQUERADE tristate "MASQUERADE target support" + select NF_NAT_MASQUERADE_IPV6 help Masquerading is a special case of NAT: all outgoing connections are changed to seem to come from a particular interface's address, and diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile index c3d3286db4bb804546ee8b0488ecdad1441bb06b..fbb25f01143c8992023ca3fb167e86933b209f84 100644 --- a/net/ipv6/netfilter/Makefile +++ b/net/ipv6/netfilter/Makefile @@ -18,6 +18,7 @@ obj-$(CONFIG_NF_CONNTRACK_IPV6) += nf_conntrack_ipv6.o nf_nat_ipv6-y := nf_nat_l3proto_ipv6.o nf_nat_proto_icmpv6.o obj-$(CONFIG_NF_NAT_IPV6) += nf_nat_ipv6.o +obj-$(CONFIG_NF_NAT_MASQUERADE_IPV6) += nf_nat_masquerade_ipv6.o # defrag nf_defrag_ipv6-y := nf_defrag_ipv6_hooks.o nf_conntrack_reasm.o @@ -26,11 +27,15 @@ obj-$(CONFIG_NF_DEFRAG_IPV6) += nf_defrag_ipv6.o # logging obj-$(CONFIG_NF_LOG_IPV6) += nf_log_ipv6.o +# reject +obj-$(CONFIG_NF_REJECT_IPV6) += nf_reject_ipv6.o + # nf_tables obj-$(CONFIG_NF_TABLES_IPV6) += nf_tables_ipv6.o obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV6) += nft_chain_route_ipv6.o obj-$(CONFIG_NFT_CHAIN_NAT_IPV6) += nft_chain_nat_ipv6.o obj-$(CONFIG_NFT_REJECT_IPV6) += nft_reject_ipv6.o +obj-$(CONFIG_NFT_MASQ_IPV6) += nft_masq_ipv6.o # matches obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o diff --git a/net/ipv6/netfilter/ip6t_MASQUERADE.c b/net/ipv6/netfilter/ip6t_MASQUERADE.c index 3e4e92d5e157358520fc5dc1cb04c3c18175df9f..7f9f45d829d2e7e2a742187f22ebde6c12df1a40 100644 --- a/net/ipv6/netfilter/ip6t_MASQUERADE.c +++ b/net/ipv6/netfilter/ip6t_MASQUERADE.c @@ -19,33 +19,12 @@ #include #include #include +#include static unsigned int masquerade_tg6(struct sk_buff *skb, const struct xt_action_param *par) { - const struct nf_nat_range *range = par->targinfo; - enum ip_conntrack_info ctinfo; - struct in6_addr src; - struct nf_conn *ct; - struct nf_nat_range newrange; - - ct = nf_ct_get(skb, &ctinfo); - NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || - ctinfo == IP_CT_RELATED_REPLY)); - - if (ipv6_dev_get_saddr(dev_net(par->out), par->out, - &ipv6_hdr(skb)->daddr, 0, &src) < 0) - return NF_DROP; - - nfct_nat(ct)->masq_index = par->out->ifindex; - - newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS; - newrange.min_addr.in6 = src; - newrange.max_addr.in6 = src; - newrange.min_proto = range->min_proto; - newrange.max_proto = range->max_proto; - - return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC); + return nf_nat_masquerade_ipv6(skb, par->targinfo, par->out); } static int masquerade_tg6_checkentry(const struct xt_tgchk_param *par) @@ -57,48 +36,6 @@ static int masquerade_tg6_checkentry(const struct xt_tgchk_param *par) return 0; } -static int device_cmp(struct nf_conn *ct, void *ifindex) -{ - const struct nf_conn_nat *nat = nfct_nat(ct); - - if (!nat) - return 0; - if (nf_ct_l3num(ct) != NFPROTO_IPV6) - return 0; - return nat->masq_index == (int)(long)ifindex; -} - -static int masq_device_event(struct notifier_block *this, - unsigned long event, void *ptr) -{ - const struct net_device *dev = netdev_notifier_info_to_dev(ptr); - struct net *net = dev_net(dev); - - if (event == NETDEV_DOWN) - nf_ct_iterate_cleanup(net, device_cmp, - (void *)(long)dev->ifindex, 0, 0); - - return NOTIFY_DONE; -} - -static struct notifier_block masq_dev_notifier = { - .notifier_call = masq_device_event, -}; - -static int masq_inet_event(struct notifier_block *this, - unsigned long event, void *ptr) -{ - struct inet6_ifaddr *ifa = ptr; - struct netdev_notifier_info info; - - netdev_notifier_info_init(&info, ifa->idev->dev); - return masq_device_event(this, event, &info); -} - -static struct notifier_block masq_inet_notifier = { - .notifier_call = masq_inet_event, -}; - static struct xt_target masquerade_tg6_reg __read_mostly = { .name = "MASQUERADE", .family = NFPROTO_IPV6, @@ -115,17 +52,14 @@ static int __init masquerade_tg6_init(void) int err; err = xt_register_target(&masquerade_tg6_reg); - if (err == 0) { - register_netdevice_notifier(&masq_dev_notifier); - register_inet6addr_notifier(&masq_inet_notifier); - } + if (err == 0) + nf_nat_masquerade_ipv6_register_notifier(); return err; } static void __exit masquerade_tg6_exit(void) { - unregister_inet6addr_notifier(&masq_inet_notifier); - unregister_netdevice_notifier(&masq_dev_notifier); + nf_nat_masquerade_ipv6_unregister_notifier(); xt_unregister_target(&masquerade_tg6_reg); } diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c index 387d8b8fc18db9744426e6f8c258a71d3f3d06fb..b0634ac996b706a9108b2f1369e4c2be9c1188c4 100644 --- a/net/ipv6/netfilter/ip6table_nat.c +++ b/net/ipv6/netfilter/ip6table_nat.c @@ -30,222 +30,57 @@ static const struct xt_table nf_nat_ipv6_table = { .af = NFPROTO_IPV6, }; -static unsigned int alloc_null_binding(struct nf_conn *ct, unsigned int hooknum) -{ - /* Force range to this IP; let proto decide mapping for - * per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED). - */ - struct nf_nat_range range; - - range.flags = 0; - pr_debug("Allocating NULL binding for %p (%pI6)\n", ct, - HOOK2MANIP(hooknum) == NF_NAT_MANIP_SRC ? - &ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip6 : - &ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip6); - - return nf_nat_setup_info(ct, &range, HOOK2MANIP(hooknum)); -} - -static unsigned int nf_nat_rule_find(struct sk_buff *skb, unsigned int hooknum, - const struct net_device *in, - const struct net_device *out, - struct nf_conn *ct) +static unsigned int ip6table_nat_do_chain(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct) { struct net *net = nf_ct_net(ct); - unsigned int ret; - ret = ip6t_do_table(skb, hooknum, in, out, net->ipv6.ip6table_nat); - if (ret == NF_ACCEPT) { - if (!nf_nat_initialized(ct, HOOK2MANIP(hooknum))) - ret = alloc_null_binding(ct, hooknum); - } - return ret; + return ip6t_do_table(skb, ops->hooknum, in, out, net->ipv6.ip6table_nat); } -static unsigned int -nf_nat_ipv6_fn(const struct nf_hook_ops *ops, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) +static unsigned int ip6table_nat_fn(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + int (*okfn)(struct sk_buff *)) { - struct nf_conn *ct; - enum ip_conntrack_info ctinfo; - struct nf_conn_nat *nat; - enum nf_nat_manip_type maniptype = HOOK2MANIP(ops->hooknum); - __be16 frag_off; - int hdrlen; - u8 nexthdr; - - ct = nf_ct_get(skb, &ctinfo); - /* Can't track? It's not due to stress, or conntrack would - * have dropped it. Hence it's the user's responsibilty to - * packet filter it out, or implement conntrack/NAT for that - * protocol. 8) --RR - */ - if (!ct) - return NF_ACCEPT; - - /* Don't try to NAT if this packet is not conntracked */ - if (nf_ct_is_untracked(ct)) - return NF_ACCEPT; - - nat = nf_ct_nat_ext_add(ct); - if (nat == NULL) - return NF_ACCEPT; - - switch (ctinfo) { - case IP_CT_RELATED: - case IP_CT_RELATED_REPLY: - nexthdr = ipv6_hdr(skb)->nexthdr; - hdrlen = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), - &nexthdr, &frag_off); - - if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) { - if (!nf_nat_icmpv6_reply_translation(skb, ct, ctinfo, - ops->hooknum, - hdrlen)) - return NF_DROP; - else - return NF_ACCEPT; - } - /* Fall thru... (Only ICMPs can be IP_CT_IS_REPLY) */ - case IP_CT_NEW: - /* Seen it before? This can happen for loopback, retrans, - * or local packets. - */ - if (!nf_nat_initialized(ct, maniptype)) { - unsigned int ret; - - ret = nf_nat_rule_find(skb, ops->hooknum, in, out, ct); - if (ret != NF_ACCEPT) - return ret; - } else { - pr_debug("Already setup manip %s for ct %p\n", - maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST", - ct); - if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out)) - goto oif_changed; - } - break; - - default: - /* ESTABLISHED */ - NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED || - ctinfo == IP_CT_ESTABLISHED_REPLY); - if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out)) - goto oif_changed; - } - - return nf_nat_packet(ct, ctinfo, ops->hooknum, skb); - -oif_changed: - nf_ct_kill_acct(ct, ctinfo, skb); - return NF_DROP; + return nf_nat_ipv6_fn(ops, skb, in, out, ip6table_nat_do_chain); } -static unsigned int -nf_nat_ipv6_in(const struct nf_hook_ops *ops, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) +static unsigned int ip6table_nat_in(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + int (*okfn)(struct sk_buff *)) { - unsigned int ret; - struct in6_addr daddr = ipv6_hdr(skb)->daddr; - - ret = nf_nat_ipv6_fn(ops, skb, in, out, okfn); - if (ret != NF_DROP && ret != NF_STOLEN && - ipv6_addr_cmp(&daddr, &ipv6_hdr(skb)->daddr)) - skb_dst_drop(skb); - - return ret; + return nf_nat_ipv6_in(ops, skb, in, out, ip6table_nat_do_chain); } -static unsigned int -nf_nat_ipv6_out(const struct nf_hook_ops *ops, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) +static unsigned int ip6table_nat_out(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + int (*okfn)(struct sk_buff *)) { -#ifdef CONFIG_XFRM - const struct nf_conn *ct; - enum ip_conntrack_info ctinfo; - int err; -#endif - unsigned int ret; - - /* root is playing with raw sockets. */ - if (skb->len < sizeof(struct ipv6hdr)) - return NF_ACCEPT; - - ret = nf_nat_ipv6_fn(ops, skb, in, out, okfn); -#ifdef CONFIG_XFRM - if (ret != NF_DROP && ret != NF_STOLEN && - !(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && - (ct = nf_ct_get(skb, &ctinfo)) != NULL) { - enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); - - if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3, - &ct->tuplehash[!dir].tuple.dst.u3) || - (ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMPV6 && - ct->tuplehash[dir].tuple.src.u.all != - ct->tuplehash[!dir].tuple.dst.u.all)) { - err = nf_xfrm_me_harder(skb, AF_INET6); - if (err < 0) - ret = NF_DROP_ERR(err); - } - } -#endif - return ret; + return nf_nat_ipv6_out(ops, skb, in, out, ip6table_nat_do_chain); } -static unsigned int -nf_nat_ipv6_local_fn(const struct nf_hook_ops *ops, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) +static unsigned int ip6table_nat_local_fn(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + int (*okfn)(struct sk_buff *)) { - const struct nf_conn *ct; - enum ip_conntrack_info ctinfo; - unsigned int ret; - int err; - - /* root is playing with raw sockets. */ - if (skb->len < sizeof(struct ipv6hdr)) - return NF_ACCEPT; - - ret = nf_nat_ipv6_fn(ops, skb, in, out, okfn); - if (ret != NF_DROP && ret != NF_STOLEN && - (ct = nf_ct_get(skb, &ctinfo)) != NULL) { - enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); - - if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3, - &ct->tuplehash[!dir].tuple.src.u3)) { - err = ip6_route_me_harder(skb); - if (err < 0) - ret = NF_DROP_ERR(err); - } -#ifdef CONFIG_XFRM - else if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && - ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMPV6 && - ct->tuplehash[dir].tuple.dst.u.all != - ct->tuplehash[!dir].tuple.src.u.all) { - err = nf_xfrm_me_harder(skb, AF_INET6); - if (err < 0) - ret = NF_DROP_ERR(err); - } -#endif - } - return ret; + return nf_nat_ipv6_local_fn(ops, skb, in, out, ip6table_nat_do_chain); } static struct nf_hook_ops nf_nat_ipv6_ops[] __read_mostly = { /* Before packet filtering, change destination */ { - .hook = nf_nat_ipv6_in, + .hook = ip6table_nat_in, .owner = THIS_MODULE, .pf = NFPROTO_IPV6, .hooknum = NF_INET_PRE_ROUTING, @@ -253,7 +88,7 @@ static struct nf_hook_ops nf_nat_ipv6_ops[] __read_mostly = { }, /* After packet filtering, change source */ { - .hook = nf_nat_ipv6_out, + .hook = ip6table_nat_out, .owner = THIS_MODULE, .pf = NFPROTO_IPV6, .hooknum = NF_INET_POST_ROUTING, @@ -261,7 +96,7 @@ static struct nf_hook_ops nf_nat_ipv6_ops[] __read_mostly = { }, /* Before packet filtering, change destination */ { - .hook = nf_nat_ipv6_local_fn, + .hook = ip6table_nat_local_fn, .owner = THIS_MODULE, .pf = NFPROTO_IPV6, .hooknum = NF_INET_LOCAL_OUT, @@ -269,7 +104,7 @@ static struct nf_hook_ops nf_nat_ipv6_ops[] __read_mostly = { }, /* After packet filtering, change source */ { - .hook = nf_nat_ipv6_fn, + .hook = ip6table_nat_fn, .owner = THIS_MODULE, .pf = NFPROTO_IPV6, .hooknum = NF_INET_LOCAL_IN, diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c index 7b9a748c6bacbe52cceb0277e4f834b81b012915..e70382e4dfb5d05d91d9dad132d04baabd2b48d5 100644 --- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c +++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c @@ -40,7 +40,7 @@ static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum, zone = nf_ct_zone((struct nf_conn *)skb->nfct); #endif -#ifdef CONFIG_BRIDGE_NETFILTER +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) if (skb->nf_bridge && skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING) return IP6_DEFRAG_CONNTRACK_BRIDGE_IN + zone; diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c index fc8e49b2ff3ed6b444cd93f7db75f202ef504b64..c5812e1c1ffbfbd6029ba1dce305ca2d6f691f98 100644 --- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c @@ -261,6 +261,205 @@ int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, } EXPORT_SYMBOL_GPL(nf_nat_icmpv6_reply_translation); +unsigned int +nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, + const struct net_device *in, const struct net_device *out, + unsigned int (*do_chain)(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct)) +{ + struct nf_conn *ct; + enum ip_conntrack_info ctinfo; + struct nf_conn_nat *nat; + enum nf_nat_manip_type maniptype = HOOK2MANIP(ops->hooknum); + __be16 frag_off; + int hdrlen; + u8 nexthdr; + + ct = nf_ct_get(skb, &ctinfo); + /* Can't track? It's not due to stress, or conntrack would + * have dropped it. Hence it's the user's responsibilty to + * packet filter it out, or implement conntrack/NAT for that + * protocol. 8) --RR + */ + if (!ct) + return NF_ACCEPT; + + /* Don't try to NAT if this packet is not conntracked */ + if (nf_ct_is_untracked(ct)) + return NF_ACCEPT; + + nat = nf_ct_nat_ext_add(ct); + if (nat == NULL) + return NF_ACCEPT; + + switch (ctinfo) { + case IP_CT_RELATED: + case IP_CT_RELATED_REPLY: + nexthdr = ipv6_hdr(skb)->nexthdr; + hdrlen = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), + &nexthdr, &frag_off); + + if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) { + if (!nf_nat_icmpv6_reply_translation(skb, ct, ctinfo, + ops->hooknum, + hdrlen)) + return NF_DROP; + else + return NF_ACCEPT; + } + /* Fall thru... (Only ICMPs can be IP_CT_IS_REPLY) */ + case IP_CT_NEW: + /* Seen it before? This can happen for loopback, retrans, + * or local packets. + */ + if (!nf_nat_initialized(ct, maniptype)) { + unsigned int ret; + + ret = do_chain(ops, skb, in, out, ct); + if (ret != NF_ACCEPT) + return ret; + + if (nf_nat_initialized(ct, HOOK2MANIP(ops->hooknum))) + break; + + ret = nf_nat_alloc_null_binding(ct, ops->hooknum); + if (ret != NF_ACCEPT) + return ret; + } else { + pr_debug("Already setup manip %s for ct %p\n", + maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST", + ct); + if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out)) + goto oif_changed; + } + break; + + default: + /* ESTABLISHED */ + NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED || + ctinfo == IP_CT_ESTABLISHED_REPLY); + if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out)) + goto oif_changed; + } + + return nf_nat_packet(ct, ctinfo, ops->hooknum, skb); + +oif_changed: + nf_ct_kill_acct(ct, ctinfo, skb); + return NF_DROP; +} +EXPORT_SYMBOL_GPL(nf_nat_ipv6_fn); + +unsigned int +nf_nat_ipv6_in(const struct nf_hook_ops *ops, struct sk_buff *skb, + const struct net_device *in, const struct net_device *out, + unsigned int (*do_chain)(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct)) +{ + unsigned int ret; + struct in6_addr daddr = ipv6_hdr(skb)->daddr; + + ret = nf_nat_ipv6_fn(ops, skb, in, out, do_chain); + if (ret != NF_DROP && ret != NF_STOLEN && + ipv6_addr_cmp(&daddr, &ipv6_hdr(skb)->daddr)) + skb_dst_drop(skb); + + return ret; +} +EXPORT_SYMBOL_GPL(nf_nat_ipv6_in); + +unsigned int +nf_nat_ipv6_out(const struct nf_hook_ops *ops, struct sk_buff *skb, + const struct net_device *in, const struct net_device *out, + unsigned int (*do_chain)(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct)) +{ +#ifdef CONFIG_XFRM + const struct nf_conn *ct; + enum ip_conntrack_info ctinfo; + int err; +#endif + unsigned int ret; + + /* root is playing with raw sockets. */ + if (skb->len < sizeof(struct ipv6hdr)) + return NF_ACCEPT; + + ret = nf_nat_ipv6_fn(ops, skb, in, out, do_chain); +#ifdef CONFIG_XFRM + if (ret != NF_DROP && ret != NF_STOLEN && + !(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && + (ct = nf_ct_get(skb, &ctinfo)) != NULL) { + enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); + + if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3, + &ct->tuplehash[!dir].tuple.dst.u3) || + (ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMPV6 && + ct->tuplehash[dir].tuple.src.u.all != + ct->tuplehash[!dir].tuple.dst.u.all)) { + err = nf_xfrm_me_harder(skb, AF_INET6); + if (err < 0) + ret = NF_DROP_ERR(err); + } + } +#endif + return ret; +} +EXPORT_SYMBOL_GPL(nf_nat_ipv6_out); + +unsigned int +nf_nat_ipv6_local_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, + const struct net_device *in, const struct net_device *out, + unsigned int (*do_chain)(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct)) +{ + const struct nf_conn *ct; + enum ip_conntrack_info ctinfo; + unsigned int ret; + int err; + + /* root is playing with raw sockets. */ + if (skb->len < sizeof(struct ipv6hdr)) + return NF_ACCEPT; + + ret = nf_nat_ipv6_fn(ops, skb, in, out, do_chain); + if (ret != NF_DROP && ret != NF_STOLEN && + (ct = nf_ct_get(skb, &ctinfo)) != NULL) { + enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); + + if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3, + &ct->tuplehash[!dir].tuple.src.u3)) { + err = ip6_route_me_harder(skb); + if (err < 0) + ret = NF_DROP_ERR(err); + } +#ifdef CONFIG_XFRM + else if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && + ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMPV6 && + ct->tuplehash[dir].tuple.dst.u.all != + ct->tuplehash[!dir].tuple.src.u.all) { + err = nf_xfrm_me_harder(skb, AF_INET6); + if (err < 0) + ret = NF_DROP_ERR(err); + } +#endif + } + return ret; +} +EXPORT_SYMBOL_GPL(nf_nat_ipv6_local_fn); + static int __init nf_nat_l3proto_ipv6_init(void) { int err; diff --git a/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c b/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c new file mode 100644 index 0000000000000000000000000000000000000000..7745609665cd3418ba0b61e68a9a83caf9c3c6c8 --- /dev/null +++ b/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2011 Patrick McHardy + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Based on Rusty Russell's IPv6 MASQUERADE target. Development of IPv6 + * NAT funded by Astaro. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +unsigned int +nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range *range, + const struct net_device *out) +{ + enum ip_conntrack_info ctinfo; + struct in6_addr src; + struct nf_conn *ct; + struct nf_nat_range newrange; + + ct = nf_ct_get(skb, &ctinfo); + NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || + ctinfo == IP_CT_RELATED_REPLY)); + + if (ipv6_dev_get_saddr(dev_net(out), out, + &ipv6_hdr(skb)->daddr, 0, &src) < 0) + return NF_DROP; + + nfct_nat(ct)->masq_index = out->ifindex; + + newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS; + newrange.min_addr.in6 = src; + newrange.max_addr.in6 = src; + newrange.min_proto = range->min_proto; + newrange.max_proto = range->max_proto; + + return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC); +} +EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6); + +static int device_cmp(struct nf_conn *ct, void *ifindex) +{ + const struct nf_conn_nat *nat = nfct_nat(ct); + + if (!nat) + return 0; + if (nf_ct_l3num(ct) != NFPROTO_IPV6) + return 0; + return nat->masq_index == (int)(long)ifindex; +} + +static int masq_device_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + const struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct net *net = dev_net(dev); + + if (event == NETDEV_DOWN) + nf_ct_iterate_cleanup(net, device_cmp, + (void *)(long)dev->ifindex, 0, 0); + + return NOTIFY_DONE; +} + +static struct notifier_block masq_dev_notifier = { + .notifier_call = masq_device_event, +}; + +static int masq_inet_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct inet6_ifaddr *ifa = ptr; + struct netdev_notifier_info info; + + netdev_notifier_info_init(&info, ifa->idev->dev); + return masq_device_event(this, event, &info); +} + +static struct notifier_block masq_inet_notifier = { + .notifier_call = masq_inet_event, +}; + +static atomic_t masquerade_notifier_refcount = ATOMIC_INIT(0); + +void nf_nat_masquerade_ipv6_register_notifier(void) +{ + /* check if the notifier is already set */ + if (atomic_inc_return(&masquerade_notifier_refcount) > 1) + return; + + register_netdevice_notifier(&masq_dev_notifier); + register_inet6addr_notifier(&masq_inet_notifier); +} +EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6_register_notifier); + +void nf_nat_masquerade_ipv6_unregister_notifier(void) +{ + /* check if the notifier still has clients */ + if (atomic_dec_return(&masquerade_notifier_refcount) > 0) + return; + + unregister_inet6addr_notifier(&masq_inet_notifier); + unregister_netdevice_notifier(&masq_dev_notifier); +} +EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6_unregister_notifier); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Patrick McHardy "); diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c new file mode 100644 index 0000000000000000000000000000000000000000..5f5f0438d74d9b76596b1e5633189402c5925d80 --- /dev/null +++ b/net/ipv6/netfilter/nf_reject_ipv6.c @@ -0,0 +1,163 @@ +/* (C) 1999-2001 Paul `Rusty' Russell + * (C) 2002-2004 Netfilter Core Team + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include + +void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook) +{ + struct sk_buff *nskb; + struct tcphdr otcph, *tcph; + unsigned int otcplen, hh_len; + int tcphoff, needs_ack; + const struct ipv6hdr *oip6h = ipv6_hdr(oldskb); + struct ipv6hdr *ip6h; +#define DEFAULT_TOS_VALUE 0x0U + const __u8 tclass = DEFAULT_TOS_VALUE; + struct dst_entry *dst = NULL; + u8 proto; + __be16 frag_off; + struct flowi6 fl6; + + if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) || + (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) { + pr_debug("addr is not unicast.\n"); + return; + } + + proto = oip6h->nexthdr; + tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto, &frag_off); + + if ((tcphoff < 0) || (tcphoff > oldskb->len)) { + pr_debug("Cannot get TCP header.\n"); + return; + } + + otcplen = oldskb->len - tcphoff; + + /* IP header checks: fragment, too short. */ + if (proto != IPPROTO_TCP || otcplen < sizeof(struct tcphdr)) { + pr_debug("proto(%d) != IPPROTO_TCP, " + "or too short. otcplen = %d\n", + proto, otcplen); + return; + } + + if (skb_copy_bits(oldskb, tcphoff, &otcph, sizeof(struct tcphdr))) + BUG(); + + /* No RST for RST. */ + if (otcph.rst) { + pr_debug("RST is set\n"); + return; + } + + /* Check checksum. */ + if (nf_ip6_checksum(oldskb, hook, tcphoff, IPPROTO_TCP)) { + pr_debug("TCP checksum is invalid\n"); + return; + } + + memset(&fl6, 0, sizeof(fl6)); + fl6.flowi6_proto = IPPROTO_TCP; + fl6.saddr = oip6h->daddr; + fl6.daddr = oip6h->saddr; + fl6.fl6_sport = otcph.dest; + fl6.fl6_dport = otcph.source; + security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6)); + dst = ip6_route_output(net, NULL, &fl6); + if (dst == NULL || dst->error) { + dst_release(dst); + return; + } + dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0); + if (IS_ERR(dst)) + return; + + hh_len = (dst->dev->hard_header_len + 15)&~15; + nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr) + + sizeof(struct tcphdr) + dst->trailer_len, + GFP_ATOMIC); + + if (!nskb) { + net_dbg_ratelimited("cannot alloc skb\n"); + dst_release(dst); + return; + } + + skb_dst_set(nskb, dst); + + skb_reserve(nskb, hh_len + dst->header_len); + + skb_put(nskb, sizeof(struct ipv6hdr)); + skb_reset_network_header(nskb); + ip6h = ipv6_hdr(nskb); + ip6_flow_hdr(ip6h, tclass, 0); + ip6h->hop_limit = ip6_dst_hoplimit(dst); + ip6h->nexthdr = IPPROTO_TCP; + ip6h->saddr = oip6h->daddr; + ip6h->daddr = oip6h->saddr; + + skb_reset_transport_header(nskb); + tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); + /* Truncate to length (no data) */ + tcph->doff = sizeof(struct tcphdr)/4; + tcph->source = otcph.dest; + tcph->dest = otcph.source; + + if (otcph.ack) { + needs_ack = 0; + tcph->seq = otcph.ack_seq; + tcph->ack_seq = 0; + } else { + needs_ack = 1; + tcph->ack_seq = htonl(ntohl(otcph.seq) + otcph.syn + otcph.fin + + otcplen - (otcph.doff<<2)); + tcph->seq = 0; + } + + /* Reset flags */ + ((u_int8_t *)tcph)[13] = 0; + tcph->rst = 1; + tcph->ack = needs_ack; + tcph->window = 0; + tcph->urg_ptr = 0; + tcph->check = 0; + + /* Adjust TCP checksum */ + tcph->check = csum_ipv6_magic(&ipv6_hdr(nskb)->saddr, + &ipv6_hdr(nskb)->daddr, + sizeof(struct tcphdr), IPPROTO_TCP, + csum_partial(tcph, + sizeof(struct tcphdr), 0)); + + nf_ct_attach(nskb, oldskb); + +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) + /* If we use ip6_local_out for bridged traffic, the MAC source on + * the RST will be ours, instead of the destination's. This confuses + * some routers/firewalls, and they drop the packet. So we need to + * build the eth header using the original destination's MAC as the + * source, and send the RST packet directly. + */ + if (oldskb->nf_bridge) { + struct ethhdr *oeth = eth_hdr(oldskb); + nskb->dev = oldskb->nf_bridge->physindev; + nskb->protocol = htons(ETH_P_IPV6); + ip6h->payload_len = htons(sizeof(struct tcphdr)); + if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol), + oeth->h_source, oeth->h_dest, nskb->len) < 0) + return; + dev_queue_xmit(nskb); + } else +#endif + ip6_local_out(nskb); +} +EXPORT_SYMBOL_GPL(nf_send_reset6); diff --git a/net/ipv6/netfilter/nft_chain_nat_ipv6.c b/net/ipv6/netfilter/nft_chain_nat_ipv6.c index d189fcb437feb9de2f43d5217389c90e029dbd21..1c4b75dd425b8e7fe421df37e215534a6eb19584 100644 --- a/net/ipv6/netfilter/nft_chain_nat_ipv6.c +++ b/net/ipv6/netfilter/nft_chain_nat_ipv6.c @@ -24,144 +24,53 @@ #include #include -/* - * IPv6 NAT chains - */ - -static unsigned int nf_nat_ipv6_fn(const struct nf_hook_ops *ops, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) +static unsigned int nft_nat_do_chain(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct) { - enum ip_conntrack_info ctinfo; - struct nf_conn *ct = nf_ct_get(skb, &ctinfo); - struct nf_conn_nat *nat; - enum nf_nat_manip_type maniptype = HOOK2MANIP(ops->hooknum); - __be16 frag_off; - int hdrlen; - u8 nexthdr; struct nft_pktinfo pkt; - unsigned int ret; - - if (ct == NULL || nf_ct_is_untracked(ct)) - return NF_ACCEPT; - - nat = nf_ct_nat_ext_add(ct); - if (nat == NULL) - return NF_ACCEPT; - - switch (ctinfo) { - case IP_CT_RELATED: - case IP_CT_RELATED + IP_CT_IS_REPLY: - nexthdr = ipv6_hdr(skb)->nexthdr; - hdrlen = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), - &nexthdr, &frag_off); - - if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) { - if (!nf_nat_icmpv6_reply_translation(skb, ct, ctinfo, - ops->hooknum, - hdrlen)) - return NF_DROP; - else - return NF_ACCEPT; - } - /* Fall through */ - case IP_CT_NEW: - if (nf_nat_initialized(ct, maniptype)) - break; - - nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out); - ret = nft_do_chain(&pkt, ops); - if (ret != NF_ACCEPT) - return ret; - if (!nf_nat_initialized(ct, maniptype)) { - ret = nf_nat_alloc_null_binding(ct, ops->hooknum); - if (ret != NF_ACCEPT) - return ret; - } - default: - break; - } + nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out); - return nf_nat_packet(ct, ctinfo, ops->hooknum, skb); + return nft_do_chain(&pkt, ops); } -static unsigned int nf_nat_ipv6_prerouting(const struct nf_hook_ops *ops, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) +static unsigned int nft_nat_ipv6_fn(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + int (*okfn)(struct sk_buff *)) { - struct in6_addr daddr = ipv6_hdr(skb)->daddr; - unsigned int ret; - - ret = nf_nat_ipv6_fn(ops, skb, in, out, okfn); - if (ret != NF_DROP && ret != NF_STOLEN && - ipv6_addr_cmp(&daddr, &ipv6_hdr(skb)->daddr)) - skb_dst_drop(skb); - - return ret; + return nf_nat_ipv6_fn(ops, skb, in, out, nft_nat_do_chain); } -static unsigned int nf_nat_ipv6_postrouting(const struct nf_hook_ops *ops, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) +static unsigned int nft_nat_ipv6_in(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + int (*okfn)(struct sk_buff *)) { - enum ip_conntrack_info ctinfo __maybe_unused; - const struct nf_conn *ct __maybe_unused; - unsigned int ret; - - ret = nf_nat_ipv6_fn(ops, skb, in, out, okfn); -#ifdef CONFIG_XFRM - if (ret != NF_DROP && ret != NF_STOLEN && - !(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && - (ct = nf_ct_get(skb, &ctinfo)) != NULL) { - enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); - - if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3, - &ct->tuplehash[!dir].tuple.dst.u3) || - (ct->tuplehash[dir].tuple.src.u.all != - ct->tuplehash[!dir].tuple.dst.u.all)) - if (nf_xfrm_me_harder(skb, AF_INET6) < 0) - ret = NF_DROP; - } -#endif - return ret; + return nf_nat_ipv6_in(ops, skb, in, out, nft_nat_do_chain); } -static unsigned int nf_nat_ipv6_output(const struct nf_hook_ops *ops, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) +static unsigned int nft_nat_ipv6_out(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + int (*okfn)(struct sk_buff *)) { - enum ip_conntrack_info ctinfo; - const struct nf_conn *ct; - unsigned int ret; - - ret = nf_nat_ipv6_fn(ops, skb, in, out, okfn); - if (ret != NF_DROP && ret != NF_STOLEN && - (ct = nf_ct_get(skb, &ctinfo)) != NULL) { - enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); + return nf_nat_ipv6_out(ops, skb, in, out, nft_nat_do_chain); +} - if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3, - &ct->tuplehash[!dir].tuple.src.u3)) { - if (ip6_route_me_harder(skb)) - ret = NF_DROP; - } -#ifdef CONFIG_XFRM - else if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && - ct->tuplehash[dir].tuple.dst.u.all != - ct->tuplehash[!dir].tuple.src.u.all) - if (nf_xfrm_me_harder(skb, AF_INET6)) - ret = NF_DROP; -#endif - } - return ret; +static unsigned int nft_nat_ipv6_local_fn(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + int (*okfn)(struct sk_buff *)) +{ + return nf_nat_ipv6_local_fn(ops, skb, in, out, nft_nat_do_chain); } static const struct nf_chain_type nft_chain_nat_ipv6 = { @@ -174,10 +83,10 @@ static const struct nf_chain_type nft_chain_nat_ipv6 = { (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_LOCAL_IN), .hooks = { - [NF_INET_PRE_ROUTING] = nf_nat_ipv6_prerouting, - [NF_INET_POST_ROUTING] = nf_nat_ipv6_postrouting, - [NF_INET_LOCAL_OUT] = nf_nat_ipv6_output, - [NF_INET_LOCAL_IN] = nf_nat_ipv6_fn, + [NF_INET_PRE_ROUTING] = nft_nat_ipv6_in, + [NF_INET_POST_ROUTING] = nft_nat_ipv6_out, + [NF_INET_LOCAL_OUT] = nft_nat_ipv6_local_fn, + [NF_INET_LOCAL_IN] = nft_nat_ipv6_fn, }, }; diff --git a/net/ipv6/netfilter/nft_masq_ipv6.c b/net/ipv6/netfilter/nft_masq_ipv6.c new file mode 100644 index 0000000000000000000000000000000000000000..556262f407616ee3a20bb10888e02ca1570d73c0 --- /dev/null +++ b/net/ipv6/netfilter/nft_masq_ipv6.c @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2014 Arturo Borrero Gonzalez + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static void nft_masq_ipv6_eval(const struct nft_expr *expr, + struct nft_data data[NFT_REG_MAX + 1], + const struct nft_pktinfo *pkt) +{ + struct nft_masq *priv = nft_expr_priv(expr); + struct nf_nat_range range; + unsigned int verdict; + + range.flags = priv->flags; + + verdict = nf_nat_masquerade_ipv6(pkt->skb, &range, pkt->out); + + data[NFT_REG_VERDICT].verdict = verdict; +} + +static struct nft_expr_type nft_masq_ipv6_type; +static const struct nft_expr_ops nft_masq_ipv6_ops = { + .type = &nft_masq_ipv6_type, + .size = NFT_EXPR_SIZE(sizeof(struct nft_masq)), + .eval = nft_masq_ipv6_eval, + .init = nft_masq_init, + .dump = nft_masq_dump, +}; + +static struct nft_expr_type nft_masq_ipv6_type __read_mostly = { + .family = NFPROTO_IPV6, + .name = "masq", + .ops = &nft_masq_ipv6_ops, + .policy = nft_masq_policy, + .maxattr = NFTA_MASQ_MAX, + .owner = THIS_MODULE, +}; + +static int __init nft_masq_ipv6_module_init(void) +{ + int ret; + + ret = nft_register_expr(&nft_masq_ipv6_type); + if (ret < 0) + return ret; + + nf_nat_masquerade_ipv6_register_notifier(); + + return ret; +} + +static void __exit nft_masq_ipv6_module_exit(void) +{ + nft_unregister_expr(&nft_masq_ipv6_type); + nf_nat_masquerade_ipv6_unregister_notifier(); +} + +module_init(nft_masq_ipv6_module_init); +module_exit(nft_masq_ipv6_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Arturo Borrero Gonzalez "); +MODULE_ALIAS_NFT_AF_EXPR(AF_INET6, "masq"); diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c index 5ec867e4a8b74fb23d8fdd2bcf9e0dce52f94455..fc24c390af0541050782c76434ccbbf9e00c24f0 100644 --- a/net/ipv6/output_core.c +++ b/net/ipv6/output_core.c @@ -35,7 +35,7 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) if (found_rhdr) return offset; break; - default : + default: return offset; } diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index 2d6f860e5c1e77cb087b2220ba2db9592a770159..1752cd0b48820367ad64cd926d8ea16a6c69341a 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c @@ -8,7 +8,7 @@ * except it reports the sockets in the INET6 address family. * * Authors: David S. Miller (davem@caip.rutgers.edu) - * YOSHIFUJI Hideaki + * YOSHIFUJI Hideaki * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License diff --git a/net/ipv6/protocol.c b/net/ipv6/protocol.c index e048cf1bb6a234bb1c0eac0987d8e093a733f2f1..e3770abe688a3a9059456fe9195adbfcdfb73157 100644 --- a/net/ipv6/protocol.c +++ b/net/ipv6/protocol.c @@ -51,6 +51,7 @@ EXPORT_SYMBOL(inet6_del_protocol); #endif const struct net_offload __rcu *inet6_offloads[MAX_INET_PROTOS] __read_mostly; +EXPORT_SYMBOL(inet6_offloads); int inet6_add_offload(const struct net_offload *prot, unsigned char protocol) { diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 39d44226e402473ed03058f6945111f02abeb146..896af8807979fad382ba65724ccad0bf040c992f 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -889,7 +889,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, else { lock_sock(sk); err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov, - len, 0, hlimit, tclass, opt, &fl6, (struct rt6_info*)dst, + len, 0, hlimit, tclass, opt, &fl6, (struct rt6_info *)dst, msg->msg_flags, dontfrag); if (err) @@ -902,7 +902,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, dst_release(dst); out: fl6_sock_release(flowlabel); - return err<0?err:len; + return err < 0 ? err : len; do_confirm: dst_confirm(dst); if (!(msg->msg_flags & MSG_PROBE) || len) @@ -1045,7 +1045,7 @@ static int do_rawv6_getsockopt(struct sock *sk, int level, int optname, struct raw6_sock *rp = raw6_sk(sk); int val, len; - if (get_user(len,optlen)) + if (get_user(len, optlen)) return -EFAULT; switch (optname) { @@ -1069,7 +1069,7 @@ static int do_rawv6_getsockopt(struct sock *sk, int level, int optname, if (put_user(len, optlen)) return -EFAULT; - if (copy_to_user(optval,&val,len)) + if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index c6557d9f7808c42e686582f31e8cc535a7067f3f..1a157ca2ebc18c5a12519adb9eacd30d508722eb 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -62,13 +62,12 @@ static const char ip6_frag_cache_name[] = "ip6-frags"; -struct ip6frag_skb_cb -{ +struct ip6frag_skb_cb { struct inet6_skb_parm h; int offset; }; -#define FRAG6_CB(skb) ((struct ip6frag_skb_cb*)((skb)->cb)) +#define FRAG6_CB(skb) ((struct ip6frag_skb_cb *)((skb)->cb)) static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h) { @@ -289,7 +288,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, goto found; } prev = NULL; - for(next = fq->q.fragments; next != NULL; next = next->next) { + for (next = fq->q.fragments; next != NULL; next = next->next) { if (FRAG6_CB(next)->offset >= offset) break; /* bingo! */ prev = next; @@ -529,7 +528,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb) IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS); /* Jumbo payload inhibits frag. header */ - if (hdr->payload_len==0) + if (hdr->payload_len == 0) goto fail_hdr; if (!pskb_may_pull(skb, (skb_transport_offset(skb) + @@ -575,8 +574,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb) return -1; } -static const struct inet6_protocol frag_protocol = -{ +static const struct inet6_protocol frag_protocol = { .handler = ipv6_frag_rcv, .flags = INET6_PROTO_NOPOLICY, }; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index bafde82324c57369abfd6f5b37decafadcb08d9c..a318dd89b6d91a1c373ef0ef0ea8604976c9e36f 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -812,7 +812,7 @@ static struct rt6_info *ip6_pol_route_lookup(struct net *net, } -struct dst_entry * ip6_route_lookup(struct net *net, struct flowi6 *fl6, +struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6, int flags) { return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_lookup); @@ -842,7 +842,6 @@ struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr, return NULL; } - EXPORT_SYMBOL(rt6_lookup); /* ip6_ins_rt is called with FREE table->tb6_lock. @@ -1023,7 +1022,7 @@ static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags); } -struct dst_entry * ip6_route_output(struct net *net, const struct sock *sk, +struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk, struct flowi6 *fl6) { int flags = 0; @@ -1040,7 +1039,6 @@ struct dst_entry * ip6_route_output(struct net *net, const struct sock *sk, return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output); } - EXPORT_SYMBOL(ip6_route_output); struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig) @@ -1145,7 +1143,7 @@ static void ip6_link_failure(struct sk_buff *skb) static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, u32 mtu) { - struct rt6_info *rt6 = (struct rt6_info*)dst; + struct rt6_info *rt6 = (struct rt6_info *)dst; dst_confirm(dst); if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) { @@ -1920,7 +1918,7 @@ static struct rt6_info *rt6_get_route_info(struct net *net, return NULL; read_lock_bh(&table->tb6_lock); - fn = fib6_locate(&table->tb6_root, prefix ,prefixlen, NULL, 0); + fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0); if (!fn) goto out; @@ -1979,7 +1977,7 @@ struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_dev return NULL; read_lock_bh(&table->tb6_lock); - for (rt = table->tb6_root.leaf; rt; rt=rt->dst.rt6_next) { + for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) { if (dev == rt->dst.dev && ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) && ipv6_addr_equal(&rt->rt6i_gateway, addr)) @@ -2064,7 +2062,7 @@ int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg) struct in6_rtmsg rtmsg; int err; - switch(cmd) { + switch (cmd) { case SIOCADDRT: /* Add a route */ case SIOCDELRT: /* Delete a route */ if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) @@ -2187,7 +2185,7 @@ int ip6_route_get_saddr(struct net *net, unsigned int prefs, struct in6_addr *saddr) { - struct inet6_dev *idev = ip6_dst_idev((struct dst_entry*)rt); + struct inet6_dev *idev = ip6_dst_idev((struct dst_entry *)rt); int err = 0; if (rt->rt6i_prefsrc.plen) *saddr = rt->rt6i_prefsrc.addr; @@ -2482,7 +2480,7 @@ static int ip6_route_multipath(struct fib6_config *cfg, int add) return last_err; } -static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh) +static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh) { struct fib6_config cfg; int err; @@ -2497,7 +2495,7 @@ static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh) return ip6_route_del(&cfg); } -static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh) +static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh) { struct fib6_config cfg; int err; @@ -2689,7 +2687,7 @@ int rt6_dump_route(struct rt6_info *rt, void *p_arg) prefix, 0, NLM_F_MULTI); } -static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh) +static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh) { struct net *net = sock_net(in_skb->sk); struct nlattr *tb[RTA_MAX+1]; diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 6163f851dc014ebd205211829214ce4d0bbc332b..6eab37cf53451b0f8112e51bff2437cb2b4da213 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -812,9 +812,9 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, const struct ipv6hdr *iph6 = ipv6_hdr(skb); u8 tos = tunnel->parms.iph.tos; __be16 df = tiph->frag_off; - struct rtable *rt; /* Route to the other host */ - struct net_device *tdev; /* Device to other host */ - unsigned int max_headroom; /* The extra header space needed */ + struct rtable *rt; /* Route to the other host */ + struct net_device *tdev; /* Device to other host */ + unsigned int max_headroom; /* The extra header space needed */ __be32 dst = tiph->daddr; struct flowi4 fl4; int mtu; @@ -822,6 +822,8 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, int addr_type; u8 ttl; int err; + u8 protocol = IPPROTO_IPV6; + int t_hlen = tunnel->hlen + sizeof(struct iphdr); if (skb->protocol != htons(ETH_P_IPV6)) goto tx_error; @@ -911,8 +913,14 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, goto tx_error; } + skb = iptunnel_handle_offloads(skb, false, SKB_GSO_SIT); + if (IS_ERR(skb)) { + ip_rt_put(rt); + goto out; + } + if (df) { - mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr); + mtu = dst_mtu(&rt->dst) - t_hlen; if (mtu < 68) { dev->stats.collisions++; @@ -947,7 +955,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, /* * Okay, now see if we can stuff it in the buffer as-is. */ - max_headroom = LL_RESERVED_SPACE(tdev)+sizeof(struct iphdr); + max_headroom = LL_RESERVED_SPACE(tdev) + t_hlen; if (skb_headroom(skb) < max_headroom || skb_shared(skb) || (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { @@ -969,14 +977,15 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, ttl = iph6->hop_limit; tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6)); - skb = iptunnel_handle_offloads(skb, false, SKB_GSO_SIT); - if (IS_ERR(skb)) { + if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0) { ip_rt_put(rt); - goto out; + goto tx_error; } + skb_set_inner_ipproto(skb, IPPROTO_IPV6); + err = iptunnel_xmit(skb->sk, rt, skb, fl4.saddr, fl4.daddr, - IPPROTO_IPV6, tos, ttl, df, + protocol, tos, ttl, df, !net_eq(tunnel->net, dev_net(dev))); iptunnel_xmit_stats(err, &dev->stats, dev->tstats); return NETDEV_TX_OK; @@ -999,6 +1008,8 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) if (IS_ERR(skb)) goto out; + skb_set_inner_ipproto(skb, IPPROTO_IPIP); + ip_tunnel_xmit(skb, dev, tiph, IPPROTO_IPIP); return NETDEV_TX_OK; out: @@ -1059,8 +1070,10 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev) tdev = __dev_get_by_index(tunnel->net, tunnel->parms.link); if (tdev) { + int t_hlen = tunnel->hlen + sizeof(struct iphdr); + dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr); - dev->mtu = tdev->mtu - sizeof(struct iphdr); + dev->mtu = tdev->mtu - t_hlen; if (dev->mtu < IPV6_MIN_MTU) dev->mtu = IPV6_MIN_MTU; } @@ -1123,7 +1136,7 @@ static int ipip6_tunnel_update_6rd(struct ip_tunnel *t, #endif static int -ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) +ipip6_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { int err = 0; struct ip_tunnel_parm p; @@ -1307,7 +1320,10 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu) { - if (new_mtu < IPV6_MIN_MTU || new_mtu > 0xFFF8 - sizeof(struct iphdr)) + struct ip_tunnel *tunnel = netdev_priv(dev); + int t_hlen = tunnel->hlen + sizeof(struct iphdr); + + if (new_mtu < IPV6_MIN_MTU || new_mtu > 0xFFF8 - t_hlen) return -EINVAL; dev->mtu = new_mtu; return 0; @@ -1338,14 +1354,17 @@ static void ipip6_dev_free(struct net_device *dev) static void ipip6_tunnel_setup(struct net_device *dev) { + struct ip_tunnel *tunnel = netdev_priv(dev); + int t_hlen = tunnel->hlen + sizeof(struct iphdr); + dev->netdev_ops = &ipip6_netdev_ops; - dev->destructor = ipip6_dev_free; + dev->destructor = ipip6_dev_free; dev->type = ARPHRD_SIT; - dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr); - dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr); + dev->hard_header_len = LL_MAX_HEADER + t_hlen; + dev->mtu = ETH_DATA_LEN - t_hlen; dev->flags = IFF_NOARP; - dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; + netif_keep_dst(dev); dev->iflink = 0; dev->addr_len = 4; dev->features |= NETIF_F_LLTX; @@ -1466,6 +1485,40 @@ static void ipip6_netlink_parms(struct nlattr *data[], } +/* This function returns true when ENCAP attributes are present in the nl msg */ +static bool ipip6_netlink_encap_parms(struct nlattr *data[], + struct ip_tunnel_encap *ipencap) +{ + bool ret = false; + + memset(ipencap, 0, sizeof(*ipencap)); + + if (!data) + return ret; + + if (data[IFLA_IPTUN_ENCAP_TYPE]) { + ret = true; + ipencap->type = nla_get_u16(data[IFLA_IPTUN_ENCAP_TYPE]); + } + + if (data[IFLA_IPTUN_ENCAP_FLAGS]) { + ret = true; + ipencap->flags = nla_get_u16(data[IFLA_IPTUN_ENCAP_FLAGS]); + } + + if (data[IFLA_IPTUN_ENCAP_SPORT]) { + ret = true; + ipencap->sport = nla_get_u16(data[IFLA_IPTUN_ENCAP_SPORT]); + } + + if (data[IFLA_IPTUN_ENCAP_DPORT]) { + ret = true; + ipencap->dport = nla_get_u16(data[IFLA_IPTUN_ENCAP_DPORT]); + } + + return ret; +} + #ifdef CONFIG_IPV6_SIT_6RD /* This function returns true when 6RD attributes are present in the nl msg */ static bool ipip6_netlink_6rd_parms(struct nlattr *data[], @@ -1509,12 +1562,20 @@ static int ipip6_newlink(struct net *src_net, struct net_device *dev, { struct net *net = dev_net(dev); struct ip_tunnel *nt; + struct ip_tunnel_encap ipencap; #ifdef CONFIG_IPV6_SIT_6RD struct ip_tunnel_6rd ip6rd; #endif int err; nt = netdev_priv(dev); + + if (ipip6_netlink_encap_parms(data, &ipencap)) { + err = ip_tunnel_encap_setup(nt, &ipencap); + if (err < 0) + return err; + } + ipip6_netlink_parms(data, &nt->parms); if (ipip6_tunnel_locate(net, &nt->parms, 0)) @@ -1537,15 +1598,23 @@ static int ipip6_changelink(struct net_device *dev, struct nlattr *tb[], { struct ip_tunnel *t = netdev_priv(dev); struct ip_tunnel_parm p; + struct ip_tunnel_encap ipencap; struct net *net = t->net; struct sit_net *sitn = net_generic(net, sit_net_id); #ifdef CONFIG_IPV6_SIT_6RD struct ip_tunnel_6rd ip6rd; #endif + int err; if (dev == sitn->fb_tunnel_dev) return -EINVAL; + if (ipip6_netlink_encap_parms(data, &ipencap)) { + err = ip_tunnel_encap_setup(t, &ipencap); + if (err < 0) + return err; + } + ipip6_netlink_parms(data, &p); if (((dev->flags & IFF_POINTOPOINT) && !p.iph.daddr) || @@ -1599,6 +1668,14 @@ static size_t ipip6_get_size(const struct net_device *dev) /* IFLA_IPTUN_6RD_RELAY_PREFIXLEN */ nla_total_size(2) + #endif + /* IFLA_IPTUN_ENCAP_TYPE */ + nla_total_size(2) + + /* IFLA_IPTUN_ENCAP_FLAGS */ + nla_total_size(2) + + /* IFLA_IPTUN_ENCAP_SPORT */ + nla_total_size(2) + + /* IFLA_IPTUN_ENCAP_DPORT */ + nla_total_size(2) + 0; } @@ -1630,6 +1707,16 @@ static int ipip6_fill_info(struct sk_buff *skb, const struct net_device *dev) goto nla_put_failure; #endif + if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, + tunnel->encap.type) || + nla_put_u16(skb, IFLA_IPTUN_ENCAP_SPORT, + tunnel->encap.sport) || + nla_put_u16(skb, IFLA_IPTUN_ENCAP_DPORT, + tunnel->encap.dport) || + nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS, + tunnel->encap.dport)) + goto nla_put_failure; + return 0; nla_put_failure: @@ -1651,6 +1738,10 @@ static const struct nla_policy ipip6_policy[IFLA_IPTUN_MAX + 1] = { [IFLA_IPTUN_6RD_PREFIXLEN] = { .type = NLA_U16 }, [IFLA_IPTUN_6RD_RELAY_PREFIXLEN] = { .type = NLA_U16 }, #endif + [IFLA_IPTUN_ENCAP_TYPE] = { .type = NLA_U16 }, + [IFLA_IPTUN_ENCAP_FLAGS] = { .type = NLA_U16 }, + [IFLA_IPTUN_ENCAP_SPORT] = { .type = NLA_U16 }, + [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 }, }; static void ipip6_dellink(struct net_device *dev, struct list_head *head) diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 83cea1d39466affce703d7c1b737a386bf3faecf..9a2838e93cc5dc2f901c64373378a6dee3533235 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c @@ -24,7 +24,7 @@ #define COOKIEBITS 24 /* Upper bits store count */ #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1) -static u32 syncookie6_secret[2][16-4+SHA_DIGEST_WORDS]; +static u32 syncookie6_secret[2][16-4+SHA_DIGEST_WORDS] __read_mostly; /* RFC 2460, Section 8.3: * [ipv6 tcp] MSS must be computed as the maximum packet size minus 60 [..] @@ -203,7 +203,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) ireq->ir_num = ntohs(th->dest); ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr; ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr; - if (ipv6_opt_accepted(sk, skb) || + if (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) || np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) { atomic_inc(&skb->users); diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c index 0c56c93619e063710a8d32f3f9e15fd9cdb6453c..c5c10fafcfe2e068fe33adc8367206a16e40c7bf 100644 --- a/net/ipv6/sysctl_net_ipv6.c +++ b/net/ipv6/sysctl_net_ipv6.c @@ -16,6 +16,8 @@ #include #include +static int one = 1; + static struct ctl_table ipv6_table_template[] = { { .procname = "bindv6only", @@ -63,6 +65,14 @@ static struct ctl_table ipv6_rotable[] = { .mode = 0644, .proc_handler = proc_dointvec }, + { + .procname = "mld_qrv", + .data = &sysctl_mld_qrv, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &one + }, { } }; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 03a5d1ed33405efab3804973c6a807bb6e24344e..cf2e45ab2fa45994111cc2cab6e3b332fc5979c1 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -92,13 +92,16 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk, static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); - const struct rt6_info *rt = (const struct rt6_info *)dst; - dst_hold(dst); - sk->sk_rx_dst = dst; - inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; - if (rt->rt6i_node) - inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum; + if (dst) { + const struct rt6_info *rt = (const struct rt6_info *)dst; + + dst_hold(dst); + sk->sk_rx_dst = dst; + inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; + if (rt->rt6i_node) + inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum; + } } static void tcp_v6_hash(struct sock *sk) @@ -737,8 +740,9 @@ static void tcp_v6_init_req(struct request_sock *req, struct sock *sk, ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL) ireq->ir_iif = inet6_iif(skb); - if (!TCP_SKB_CB(skb)->when && - (ipv6_opt_accepted(sk, skb) || np->rxopt.bits.rxinfo || + if (!TCP_SKB_CB(skb)->tcp_tw_isn && + (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) || + np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim || np->repflow)) { atomic_inc(&skb->users); @@ -1363,7 +1367,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb)); if (np->repflow) np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb)); - if (ipv6_opt_accepted(sk, opt_skb)) { + if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) { skb_set_owner_r(opt_skb, sk); opt_skb = xchg(&np->pktoptions, opt_skb); } else { @@ -1407,11 +1411,19 @@ static int tcp_v6_rcv(struct sk_buff *skb) th = tcp_hdr(skb); hdr = ipv6_hdr(skb); + /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB() + * barrier() makes sure compiler wont play fool^Waliasing games. + */ + memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb), + sizeof(struct inet6_skb_parm)); + barrier(); + TCP_SKB_CB(skb)->seq = ntohl(th->seq); TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + skb->len - th->doff*4); TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); - TCP_SKB_CB(skb)->when = 0; + TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th); + TCP_SKB_CB(skb)->tcp_tw_isn = 0; TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr); TCP_SKB_CB(skb)->sacked = 0; diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c index 01b0ff9a0c2c00d6734537254e2150edcbcb64d7..c1ab77105b4c3f8025beea58d06c3607eda44656 100644 --- a/net/ipv6/tcpv6_offload.c +++ b/net/ipv6/tcpv6_offload.c @@ -15,54 +15,17 @@ #include #include "ip6_offload.h" -static int tcp_v6_gso_send_check(struct sk_buff *skb) -{ - const struct ipv6hdr *ipv6h; - struct tcphdr *th; - - if (!pskb_may_pull(skb, sizeof(*th))) - return -EINVAL; - - ipv6h = ipv6_hdr(skb); - th = tcp_hdr(skb); - - th->check = 0; - skb->ip_summed = CHECKSUM_PARTIAL; - __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr); - return 0; -} - static struct sk_buff **tcp6_gro_receive(struct sk_buff **head, struct sk_buff *skb) { - const struct ipv6hdr *iph = skb_gro_network_header(skb); - __wsum wsum; - /* Don't bother verifying checksum if we're going to flush anyway. */ - if (NAPI_GRO_CB(skb)->flush) - goto skip_csum; - - wsum = NAPI_GRO_CB(skb)->csum; - - switch (skb->ip_summed) { - case CHECKSUM_NONE: - wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), - wsum); - - /* fall through */ - - case CHECKSUM_COMPLETE: - if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr, - wsum)) { - skb->ip_summed = CHECKSUM_UNNECESSARY; - break; - } - + if (!NAPI_GRO_CB(skb)->flush && + skb_gro_checksum_validate(skb, IPPROTO_TCP, + ip6_gro_compute_pseudo)) { NAPI_GRO_CB(skb)->flush = 1; return NULL; } -skip_csum: return tcp_gro_receive(head, skb); } @@ -78,10 +41,32 @@ static int tcp6_gro_complete(struct sk_buff *skb, int thoff) return tcp_gro_complete(skb); } +struct sk_buff *tcp6_gso_segment(struct sk_buff *skb, + netdev_features_t features) +{ + struct tcphdr *th; + + if (!pskb_may_pull(skb, sizeof(*th))) + return ERR_PTR(-EINVAL); + + if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { + const struct ipv6hdr *ipv6h = ipv6_hdr(skb); + struct tcphdr *th = tcp_hdr(skb); + + /* Set up pseudo header, usually expect stack to have done + * this. + */ + + th->check = 0; + skb->ip_summed = CHECKSUM_PARTIAL; + __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr); + } + + return tcp_gso_segment(skb, features); +} static const struct net_offload tcpv6_offload = { .callbacks = { - .gso_send_check = tcp_v6_gso_send_check, - .gso_segment = tcp_gso_segment, + .gso_segment = tcp6_gso_segment, .gro_receive = tcp6_gro_receive, .gro_complete = tcp6_gro_complete, }, diff --git a/net/ipv6/tunnel6.c b/net/ipv6/tunnel6.c index 2c4e4c5c7614bf9ee864a99e095c7c17e46d9b57..3c758007b327decd0f1d950ae6cca928af052b05 100644 --- a/net/ipv6/tunnel6.c +++ b/net/ipv6/tunnel6.c @@ -15,7 +15,7 @@ * along with this program; if not, see . * * Authors Mitsuru KANDA - * YOSHIFUJI Hideaki + * YOSHIFUJI Hideaki */ #define pr_fmt(fmt) "IPv6: " fmt @@ -64,7 +64,6 @@ int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family) return ret; } - EXPORT_SYMBOL(xfrm6_tunnel_register); int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family) @@ -92,7 +91,6 @@ int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family) return ret; } - EXPORT_SYMBOL(xfrm6_tunnel_deregister); #define for_each_tunnel_rcu(head, handler) \ diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 4836af8f582d32d881f8697c1c92b6909e37ab27..f6ba535b6febe40aad990e7f9541446fc573d511 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -243,7 +243,7 @@ static struct sock *udp6_lib_lookup2(struct net *net, goto exact_match; } else if (score == badness && reuseport) { matches++; - if (((u64)hash * matches) >> 32 == 0) + if (reciprocal_scale(hash, matches) == 0) result = sk; hash = next_pseudo_random32(hash); } @@ -323,7 +323,7 @@ struct sock *__udp6_lib_lookup(struct net *net, } } else if (score == badness && reuseport) { matches++; - if (((u64)hash * matches) >> 32 == 0) + if (reciprocal_scale(hash, matches) == 0) result = sk; hash = next_pseudo_random32(hash); } @@ -373,8 +373,8 @@ EXPORT_SYMBOL_GPL(udp6_lib_lookup); /* - * This should be easy, if there is something there we - * return it, otherwise we block. + * This should be easy, if there is something there we + * return it, otherwise we block. */ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, @@ -530,7 +530,7 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt, const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; const struct in6_addr *saddr = &hdr->saddr; const struct in6_addr *daddr = &hdr->daddr; - struct udphdr *uh = (struct udphdr*)(skb->data+offset); + struct udphdr *uh = (struct udphdr *)(skb->data+offset); struct sock *sk; int err; struct net *net = dev_net(skb->dev); @@ -596,7 +596,7 @@ static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) static __inline__ void udpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, - u8 code, int offset, __be32 info ) + u8 code, int offset, __be32 info) { __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table); } @@ -891,6 +891,10 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, goto csum_error; } + if (udp_sk(sk)->convert_csum && uh->check && !IS_UDPLITE(sk)) + skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check, + ip6_compute_pseudo); + ret = udpv6_queue_rcv_skb(sk, skb); sock_put(sk); @@ -960,10 +964,10 @@ static void udp_v6_flush_pending_frames(struct sock *sk) } /** - * udp6_hwcsum_outgoing - handle outgoing HW checksumming - * @sk: socket we are sending on - * @skb: sk_buff containing the filled-in UDP header - * (checksum field must be zeroed out) + * udp6_hwcsum_outgoing - handle outgoing HW checksumming + * @sk: socket we are sending on + * @skb: sk_buff containing the filled-in UDP header + * (checksum field must be zeroed out) */ static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb, const struct in6_addr *saddr, @@ -1294,7 +1298,7 @@ int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk, getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; err = ip6_append_data(sk, getfrag, msg->msg_iov, ulen, sizeof(struct udphdr), hlimit, tclass, opt, &fl6, - (struct rt6_info*)dst, + (struct rt6_info *)dst, corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags, dontfrag); if (err) udp_v6_flush_pending_frames(sk); diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c index 0ae3d98f83e00533c14e6f8c23ab78a5a6a72625..6b8f543f6ac6ac76c2efaec21b4489fe347f9544 100644 --- a/net/ipv6/udp_offload.c +++ b/net/ipv6/udp_offload.c @@ -10,34 +10,13 @@ * UDPv6 GSO support */ #include +#include #include #include #include #include #include "ip6_offload.h" -static int udp6_ufo_send_check(struct sk_buff *skb) -{ - const struct ipv6hdr *ipv6h; - struct udphdr *uh; - - if (!pskb_may_pull(skb, sizeof(*uh))) - return -EINVAL; - - if (likely(!skb->encapsulation)) { - ipv6h = ipv6_hdr(skb); - uh = udp_hdr(skb); - - uh->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len, - IPPROTO_UDP, 0); - skb->csum_start = skb_transport_header(skb) - skb->head; - skb->csum_offset = offsetof(struct udphdr, check); - skb->ip_summed = CHECKSUM_PARTIAL; - } - - return 0; -} - static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, netdev_features_t features) { @@ -48,7 +27,6 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u8 *packet_start, *prevhdr; u8 nexthdr; u8 frag_hdr_sz = sizeof(struct frag_hdr); - int offset; __wsum csum; int tnl_hlen; @@ -80,15 +58,29 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, if (skb->encapsulation && skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM)) - segs = skb_udp_tunnel_segment(skb, features); + segs = skb_udp_tunnel_segment(skb, features, true); else { + const struct ipv6hdr *ipv6h; + struct udphdr *uh; + + if (!pskb_may_pull(skb, sizeof(struct udphdr))) + goto out; + /* Do software UFO. Complete and fill in the UDP checksum as HW cannot * do checksum of UDP packets sent as multiple IP fragments. */ - offset = skb_checksum_start_offset(skb); - csum = skb_checksum(skb, offset, skb->len - offset, 0); - offset += skb->csum_offset; - *(__sum16 *)(skb->data + offset) = csum_fold(csum); + + uh = udp_hdr(skb); + ipv6h = ipv6_hdr(skb); + + uh->check = 0; + csum = skb_checksum(skb, 0, skb->len, 0); + uh->check = udp_v6_check(skb->len, &ipv6h->saddr, + &ipv6h->daddr, csum); + + if (uh->check == 0) + uh->check = CSUM_MANGLED_0; + skb->ip_summed = CHECKSUM_NONE; /* Check if there is enough headroom to insert fragment header. */ @@ -127,10 +119,52 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, out: return segs; } + +static struct sk_buff **udp6_gro_receive(struct sk_buff **head, + struct sk_buff *skb) +{ + struct udphdr *uh = udp_gro_udphdr(skb); + + if (unlikely(!uh)) + goto flush; + + /* Don't bother verifying checksum if we're going to flush anyway. */ + if (NAPI_GRO_CB(skb)->flush) + goto skip; + + if (skb_gro_checksum_validate_zero_check(skb, IPPROTO_UDP, uh->check, + ip6_gro_compute_pseudo)) + goto flush; + else if (uh->check) + skb_gro_checksum_try_convert(skb, IPPROTO_UDP, uh->check, + ip6_gro_compute_pseudo); + +skip: + NAPI_GRO_CB(skb)->is_ipv6 = 1; + return udp_gro_receive(head, skb, uh); + +flush: + NAPI_GRO_CB(skb)->flush = 1; + return NULL; +} + +static int udp6_gro_complete(struct sk_buff *skb, int nhoff) +{ + const struct ipv6hdr *ipv6h = ipv6_hdr(skb); + struct udphdr *uh = (struct udphdr *)(skb->data + nhoff); + + if (uh->check) + uh->check = ~udp_v6_check(skb->len - nhoff, &ipv6h->saddr, + &ipv6h->daddr, 0); + + return udp_gro_complete(skb, nhoff); +} + static const struct net_offload udpv6_offload = { .callbacks = { - .gso_send_check = udp6_ufo_send_check, .gso_segment = udp6_ufo_fragment, + .gro_receive = udp6_gro_receive, + .gro_complete = udp6_gro_complete, }, }; diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c index f8c3cf842f534e3e87c35a1213afcb320880998b..f48fbe4d16f5f433c40cba8077663db77d2984d4 100644 --- a/net/ipv6/xfrm6_input.c +++ b/net/ipv6/xfrm6_input.c @@ -3,8 +3,8 @@ * * Authors: * Mitsuru KANDA @USAGI - * Kazunori MIYAZAWA @USAGI - * Kunihiro Ishiguro + * Kazunori MIYAZAWA @USAGI + * Kunihiro Ishiguro * YOSHIFUJI Hideaki @USAGI * IPv6 support */ @@ -52,7 +52,6 @@ int xfrm6_rcv(struct sk_buff *skb) return xfrm6_rcv_spi(skb, skb_network_header(skb)[IP6CB(skb)->nhoff], 0); } - EXPORT_SYMBOL(xfrm6_rcv); int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr, @@ -142,5 +141,4 @@ int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr, drop: return -1; } - EXPORT_SYMBOL(xfrm6_input_addr); diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index 433672d07d0b55e1e436be704780e8c6f5777447..ca3f29b98ae5d76b7e69c38f617362a90fd9fd04 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c @@ -25,7 +25,6 @@ int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, { return ip6_find_1stfragopt(skb, prevhdr); } - EXPORT_SYMBOL(xfrm6_find_1stfragopt); static int xfrm6_local_dontfrag(struct sk_buff *skb) diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 2a0bbda2c76a99dfa687313d230595c251103b45..ac49f84fe2c34026b7af5392df06fc232ea3b9e1 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -3,11 +3,11 @@ * * Authors: * Mitsuru KANDA @USAGI - * Kazunori MIYAZAWA @USAGI - * Kunihiro Ishiguro - * IPv6 support - * YOSHIFUJI Hideaki - * Split up af-specific portion + * Kazunori MIYAZAWA @USAGI + * Kunihiro Ishiguro + * IPv6 support + * YOSHIFUJI Hideaki + * Split up af-specific portion * */ @@ -84,7 +84,7 @@ static int xfrm6_init_path(struct xfrm_dst *path, struct dst_entry *dst, int nfheader_len) { if (dst->ops->family == AF_INET6) { - struct rt6_info *rt = (struct rt6_info*)dst; + struct rt6_info *rt = (struct rt6_info *)dst; if (rt->rt6i_node) path->path_cookie = rt->rt6i_node->fn_sernum; } @@ -97,7 +97,7 @@ static int xfrm6_init_path(struct xfrm_dst *path, struct dst_entry *dst, static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, const struct flowi *fl) { - struct rt6_info *rt = (struct rt6_info*)xdst->route; + struct rt6_info *rt = (struct rt6_info *)xdst->route; xdst->u.dst.dev = dev; dev_hold(dev); @@ -296,7 +296,7 @@ static struct xfrm_policy_afinfo xfrm6_policy_afinfo = { .family = AF_INET6, .dst_ops = &xfrm6_dst_ops, .dst_lookup = xfrm6_dst_lookup, - .get_saddr = xfrm6_get_saddr, + .get_saddr = xfrm6_get_saddr, .decode_session = _decode_session6, .get_tos = xfrm6_get_tos, .init_dst = xfrm6_init_dst, @@ -319,9 +319,9 @@ static void xfrm6_policy_fini(void) static struct ctl_table xfrm6_policy_table[] = { { .procname = "xfrm6_gc_thresh", - .data = &init_net.xfrm.xfrm6_dst_ops.gc_thresh, - .maxlen = sizeof(int), - .mode = 0644, + .data = &init_net.xfrm.xfrm6_dst_ops.gc_thresh, + .maxlen = sizeof(int), + .mode = 0644, .proc_handler = proc_dointvec, }, { } diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c index 3fc970135fc66583e5842943246993da7e2b69b4..8a1f9c0d2a13b27ae0b67c9eacf802f30e656115 100644 --- a/net/ipv6/xfrm6_state.c +++ b/net/ipv6/xfrm6_state.c @@ -3,11 +3,11 @@ * * Authors: * Mitsuru KANDA @USAGI - * Kazunori MIYAZAWA @USAGI - * Kunihiro Ishiguro - * IPv6 support - * YOSHIFUJI Hideaki @USAGI - * Split up af-specific portion + * Kazunori MIYAZAWA @USAGI + * Kunihiro Ishiguro + * IPv6 support + * YOSHIFUJI Hideaki @USAGI + * Split up af-specific portion * */ @@ -45,10 +45,10 @@ xfrm6_init_temprop(struct xfrm_state *x, const struct xfrm_tmpl *tmpl, const xfrm_address_t *daddr, const xfrm_address_t *saddr) { x->id = tmpl->id; - if (ipv6_addr_any((struct in6_addr*)&x->id.daddr)) + if (ipv6_addr_any((struct in6_addr *)&x->id.daddr)) memcpy(&x->id.daddr, daddr, sizeof(x->sel.daddr)); memcpy(&x->props.saddr, &tmpl->saddr, sizeof(x->props.saddr)); - if (ipv6_addr_any((struct in6_addr*)&x->props.saddr)) + if (ipv6_addr_any((struct in6_addr *)&x->props.saddr)) memcpy(&x->props.saddr, saddr, sizeof(x->props.saddr)); x->props.mode = tmpl->mode; x->props.reqid = tmpl->reqid; diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c index 1c66465a42ddc09627dae47d4c6cbd40aa4fdc35..5743044cd660b8a8557d1a87c4ee35508ebff7e0 100644 --- a/net/ipv6/xfrm6_tunnel.c +++ b/net/ipv6/xfrm6_tunnel.c @@ -15,7 +15,7 @@ * along with this program; if not, see . * * Authors Mitsuru KANDA - * YOSHIFUJI Hideaki + * YOSHIFUJI Hideaki * * Based on net/ipv4/xfrm4_tunnel.c * @@ -110,7 +110,6 @@ __be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr) rcu_read_unlock_bh(); return htonl(spi); } - EXPORT_SYMBOL(xfrm6_tunnel_spi_lookup); static int __xfrm6_tunnel_spi_check(struct net *net, u32 spi) @@ -187,7 +186,6 @@ __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr) return htonl(spi); } - EXPORT_SYMBOL(xfrm6_tunnel_alloc_spi); static void x6spi_destroy_rcu(struct rcu_head *head) diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c index 1bc49edf22966fdce8c8fbf136a3c4e7800fe5b8..5a2d0a69552993ce5e88bf7efa9ea8e3745424c1 100644 --- a/net/irda/irlan/irlan_common.c +++ b/net/irda/irlan/irlan_common.c @@ -98,7 +98,7 @@ static const struct file_operations irlan_fops = { extern struct proc_dir_entry *proc_irda; #endif /* CONFIG_PROC_FS */ -static struct irlan_cb *irlan_open(__u32 saddr, __u32 daddr); +static struct irlan_cb __init *irlan_open(__u32 saddr, __u32 daddr); static void __irlan_close(struct irlan_cb *self); static int __irlan_insert_param(struct sk_buff *skb, char *param, int type, __u8 value_byte, __u16 value_short, @@ -196,7 +196,7 @@ static void __exit irlan_cleanup(void) * Open new instance of a client/provider, we should only register the * network device if this instance is ment for a particular client/provider */ -static struct irlan_cb *irlan_open(__u32 saddr, __u32 daddr) +static struct irlan_cb __init *irlan_open(__u32 saddr, __u32 daddr) { struct net_device *dev; struct irlan_cb *self; diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index da787930df0ab643a81bfa1f3f554efb2cd41bee..2a6a1fdd62c059eb857312d10fee38fa1c6bc86f 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c @@ -493,8 +493,8 @@ static void iucv_declare_cpu(void *data) err = "Paging or storage error"; break; } - pr_warning("Defining an interrupt buffer on CPU %i" - " failed with 0x%02x (%s)\n", cpu, rc, err); + pr_warn("Defining an interrupt buffer on CPU %i failed with 0x%02x (%s)\n", + cpu, rc, err); return; } @@ -1831,7 +1831,7 @@ static void iucv_external_interrupt(struct ext_code ext_code, BUG_ON(p->iptype < 0x01 || p->iptype > 0x09); work = kmalloc(sizeof(struct iucv_irq_list), GFP_ATOMIC); if (!work) { - pr_warning("iucv_external_interrupt: out of memory\n"); + pr_warn("iucv_external_interrupt: out of memory\n"); return; } memcpy(&work->data, p, sizeof(work->data)); @@ -1974,8 +1974,7 @@ static int iucv_pm_restore(struct device *dev) printk(KERN_WARNING "iucv_pm_restore %p\n", iucv_path_table); #endif if ((iucv_pm_state != IUCV_PM_RESTORING) && iucv_path_table) - pr_warning("Suspending Linux did not completely close all IUCV " - "connections\n"); + pr_warn("Suspending Linux did not completely close all IUCV connections\n"); iucv_pm_state = IUCV_PM_RESTORING; if (cpumask_empty(&iucv_irq_cpumask)) { rc = iucv_query_maxconn(); diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 1109d3bb8dac8d4142eb6bdd159915b2f978ea45..895348e44c7d22c9e6d4828195e7099a74154531 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -148,7 +148,7 @@ do { \ atomic_read(&_t->ref_count)); \ l2tp_tunnel_inc_refcount_1(_t); \ } while (0) -#define l2tp_tunnel_dec_refcount(_t) +#define l2tp_tunnel_dec_refcount(_t) \ do { \ pr_debug("l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", \ __func__, __LINE__, (_t)->name, \ @@ -1582,19 +1582,17 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ tunnel->encap = encap; if (encap == L2TP_ENCAPTYPE_UDP) { - /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ - udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP; - udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv; - udp_sk(sk)->encap_destroy = l2tp_udp_encap_destroy; -#if IS_ENABLED(CONFIG_IPV6) - if (sk->sk_family == PF_INET6 && !tunnel->v4mapped) - udpv6_encap_enable(); - else -#endif - udp_encap_enable(); - } + struct udp_tunnel_sock_cfg udp_cfg; + + udp_cfg.sk_user_data = tunnel; + udp_cfg.encap_type = UDP_ENCAP_L2TPINUDP; + udp_cfg.encap_rcv = l2tp_udp_encap_recv; + udp_cfg.encap_destroy = l2tp_udp_encap_destroy; - sk->sk_user_data = tunnel; + setup_udp_tunnel_sock(net, sock, &udp_cfg); + } else { + sk->sk_user_data = tunnel; + } /* Hook on the tunnel socket destructor so that we can cleanup * if the tunnel socket goes away. diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index f0e84bc48038932b022a0623ab1e2bf11a9fdfaf..a48bad468880aa60c80b45a1cc97bef616845ffa 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c @@ -227,7 +227,7 @@ static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *d void __ieee80211_start_rx_ba_session(struct sta_info *sta, u8 dialog_token, u16 timeout, u16 start_seq_num, u16 ba_policy, u16 tid, - u16 buf_size, bool tx) + u16 buf_size, bool tx, bool auto_seq) { struct ieee80211_local *local = sta->sdata->local; struct tid_ampdu_rx *tid_agg_rx; @@ -326,6 +326,7 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta, tid_agg_rx->buf_size = buf_size; tid_agg_rx->timeout = timeout; tid_agg_rx->stored_mpdu_num = 0; + tid_agg_rx->auto_seq = auto_seq; status = WLAN_STATUS_SUCCESS; /* activate it for RX */ @@ -367,7 +368,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, __ieee80211_start_rx_ba_session(sta, dialog_token, timeout, start_seq_num, ba_policy, tid, - buf_size, true); + buf_size, true, false); } void ieee80211_start_rx_ba_session_offl(struct ieee80211_vif *vif, diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 927b4ea0128bbc365a9692302ba299d5da16b2c4..fb6a1502b6dfa64980305c3bba380408ef3dde14 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -2,6 +2,7 @@ * mac80211 configuration hooks for cfg80211 * * Copyright 2006-2010 Johannes Berg + * Copyright 2013-2014 Intel Mobile Communications GmbH * * This file is GPLv2 as found in COPYING. */ @@ -682,8 +683,19 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, if (old) return -EALREADY; - /* TODO: make hostapd tell us what it wants */ - sdata->smps_mode = IEEE80211_SMPS_OFF; + switch (params->smps_mode) { + case NL80211_SMPS_OFF: + sdata->smps_mode = IEEE80211_SMPS_OFF; + break; + case NL80211_SMPS_STATIC: + sdata->smps_mode = IEEE80211_SMPS_STATIC; + break; + case NL80211_SMPS_DYNAMIC: + sdata->smps_mode = IEEE80211_SMPS_DYNAMIC; + break; + default: + return -EINVAL; + } sdata->needed_rx_chains = sdata->local->rx_chains; mutex_lock(&local->mtx); @@ -1011,15 +1023,8 @@ static int sta_apply_parameters(struct ieee80211_local *local, clear_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE); } - if (mask & BIT(NL80211_STA_FLAG_WME)) { - if (set & BIT(NL80211_STA_FLAG_WME)) { - set_sta_flag(sta, WLAN_STA_WME); - sta->sta.wme = true; - } else { - clear_sta_flag(sta, WLAN_STA_WME); - sta->sta.wme = false; - } - } + if (mask & BIT(NL80211_STA_FLAG_WME)) + sta->sta.wme = set & BIT(NL80211_STA_FLAG_WME); if (mask & BIT(NL80211_STA_FLAG_MFP)) { if (set & BIT(NL80211_STA_FLAG_MFP)) @@ -1984,8 +1989,13 @@ static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed) return err; } - if (changed & WIPHY_PARAM_COVERAGE_CLASS) { - err = drv_set_coverage_class(local, wiphy->coverage_class); + if ((changed & WIPHY_PARAM_COVERAGE_CLASS) || + (changed & WIPHY_PARAM_DYN_ACK)) { + s16 coverage_class; + + coverage_class = changed & WIPHY_PARAM_COVERAGE_CLASS ? + wiphy->coverage_class : -1; + err = drv_set_coverage_class(local, coverage_class); if (err) return err; @@ -2358,6 +2368,58 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy, return 0; } +static bool ieee80211_coalesce_started_roc(struct ieee80211_local *local, + struct ieee80211_roc_work *new_roc, + struct ieee80211_roc_work *cur_roc) +{ + unsigned long j = jiffies; + unsigned long cur_roc_end = cur_roc->hw_start_time + + msecs_to_jiffies(cur_roc->duration); + struct ieee80211_roc_work *next_roc; + int new_dur; + + if (WARN_ON(!cur_roc->started || !cur_roc->hw_begun)) + return false; + + if (time_after(j + IEEE80211_ROC_MIN_LEFT, cur_roc_end)) + return false; + + ieee80211_handle_roc_started(new_roc); + + new_dur = new_roc->duration - jiffies_to_msecs(cur_roc_end - j); + + /* cur_roc is long enough - add new_roc to the dependents list. */ + if (new_dur <= 0) { + list_add_tail(&new_roc->list, &cur_roc->dependents); + return true; + } + + new_roc->duration = new_dur; + + /* + * if cur_roc was already coalesced before, we might + * want to extend the next roc instead of adding + * a new one. + */ + next_roc = list_entry(cur_roc->list.next, + struct ieee80211_roc_work, list); + if (&next_roc->list != &local->roc_list && + next_roc->chan == new_roc->chan && + next_roc->sdata == new_roc->sdata && + !WARN_ON(next_roc->started)) { + list_add_tail(&new_roc->list, &next_roc->dependents); + next_roc->duration = max(next_roc->duration, + new_roc->duration); + next_roc->type = max(next_roc->type, new_roc->type); + return true; + } + + /* add right after cur_roc */ + list_add(&new_roc->list, &cur_roc->list); + + return true; +} + static int ieee80211_start_roc_work(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_channel *channel, @@ -2463,8 +2525,6 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local, /* If it has already started, it's more difficult ... */ if (local->ops->remain_on_channel) { - unsigned long j = jiffies; - /* * In the offloaded ROC case, if it hasn't begun, add * this new one to the dependent list to be handled @@ -2487,28 +2547,8 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local, break; } - if (time_before(j + IEEE80211_ROC_MIN_LEFT, - tmp->hw_start_time + - msecs_to_jiffies(tmp->duration))) { - int new_dur; - - ieee80211_handle_roc_started(roc); - - new_dur = roc->duration - - jiffies_to_msecs(tmp->hw_start_time + - msecs_to_jiffies( - tmp->duration) - - j); - - if (new_dur > 0) { - /* add right after tmp */ - list_add(&roc->list, &tmp->list); - } else { - list_add_tail(&roc->list, - &tmp->dependents); - } + if (ieee80211_coalesce_started_roc(local, roc, tmp)) queued = true; - } } else if (del_timer_sync(&tmp->work.timer)) { unsigned long new_end; @@ -3352,7 +3392,7 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev, band = chanctx_conf->def.chan->band; sta = sta_info_get_bss(sdata, peer); if (sta) { - qos = test_sta_flag(sta, WLAN_STA_WME); + qos = sta->sta.wme; } else { rcu_read_unlock(); return -ENOLINK; diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index 399ad82c997f99c33833c03c67e6acddf7019b6f..4c74e8da64b909cc36befe4c5ecb29af5ff7629e 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c @@ -549,12 +549,12 @@ static void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local, compat = cfg80211_chandef_compatible( &sdata->vif.bss_conf.chandef, compat); - if (!compat) + if (WARN_ON_ONCE(!compat)) break; } rcu_read_unlock(); - if (WARN_ON_ONCE(!compat)) + if (!compat) return; ieee80211_change_chanctx(local, ctx, compat); @@ -639,41 +639,6 @@ static int ieee80211_assign_vif_chanctx(struct ieee80211_sub_if_data *sdata, return ret; } -static void __ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata) -{ - struct ieee80211_local *local = sdata->local; - struct ieee80211_chanctx_conf *conf; - struct ieee80211_chanctx *ctx; - bool use_reserved_switch = false; - - lockdep_assert_held(&local->chanctx_mtx); - - conf = rcu_dereference_protected(sdata->vif.chanctx_conf, - lockdep_is_held(&local->chanctx_mtx)); - if (!conf) - return; - - ctx = container_of(conf, struct ieee80211_chanctx, conf); - - if (sdata->reserved_chanctx) { - if (sdata->reserved_chanctx->replace_state == - IEEE80211_CHANCTX_REPLACES_OTHER && - ieee80211_chanctx_num_reserved(local, - sdata->reserved_chanctx) > 1) - use_reserved_switch = true; - - ieee80211_vif_unreserve_chanctx(sdata); - } - - ieee80211_assign_vif_chanctx(sdata, NULL); - if (ieee80211_chanctx_refcount(local, ctx) == 0) - ieee80211_free_chanctx(local, ctx); - - /* Unreserving may ready an in-place reservation. */ - if (use_reserved_switch) - ieee80211_vif_use_reserved_switch(local); -} - void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local, struct ieee80211_chanctx *chanctx) { @@ -764,63 +729,6 @@ void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local, drv_change_chanctx(local, chanctx, IEEE80211_CHANCTX_CHANGE_RX_CHAINS); } -int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata, - const struct cfg80211_chan_def *chandef, - enum ieee80211_chanctx_mode mode) -{ - struct ieee80211_local *local = sdata->local; - struct ieee80211_chanctx *ctx; - u8 radar_detect_width = 0; - int ret; - - lockdep_assert_held(&local->mtx); - - WARN_ON(sdata->dev && netif_carrier_ok(sdata->dev)); - - mutex_lock(&local->chanctx_mtx); - - ret = cfg80211_chandef_dfs_required(local->hw.wiphy, - chandef, - sdata->wdev.iftype); - if (ret < 0) - goto out; - if (ret > 0) - radar_detect_width = BIT(chandef->width); - - sdata->radar_required = ret; - - ret = ieee80211_check_combinations(sdata, chandef, mode, - radar_detect_width); - if (ret < 0) - goto out; - - __ieee80211_vif_release_channel(sdata); - - ctx = ieee80211_find_chanctx(local, chandef, mode); - if (!ctx) - ctx = ieee80211_new_chanctx(local, chandef, mode); - if (IS_ERR(ctx)) { - ret = PTR_ERR(ctx); - goto out; - } - - sdata->vif.bss_conf.chandef = *chandef; - - ret = ieee80211_assign_vif_chanctx(sdata, ctx); - if (ret) { - /* if assign fails refcount stays the same */ - if (ieee80211_chanctx_refcount(local, ctx) == 0) - ieee80211_free_chanctx(local, ctx); - goto out; - } - - ieee80211_recalc_smps_chanctx(local, ctx); - ieee80211_recalc_radar_chanctx(local, ctx); - out: - mutex_unlock(&local->chanctx_mtx); - return ret; -} - static void __ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata, bool clear) @@ -1269,8 +1177,7 @@ static int ieee80211_chsw_switch_ctxs(struct ieee80211_local *local) return err; } -int -ieee80211_vif_use_reserved_switch(struct ieee80211_local *local) +static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sdata, *sdata_tmp; struct ieee80211_chanctx *ctx, *ctx_tmp, *old_ctx; @@ -1522,6 +1429,98 @@ ieee80211_vif_use_reserved_switch(struct ieee80211_local *local) return err; } +static void __ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_chanctx_conf *conf; + struct ieee80211_chanctx *ctx; + bool use_reserved_switch = false; + + lockdep_assert_held(&local->chanctx_mtx); + + conf = rcu_dereference_protected(sdata->vif.chanctx_conf, + lockdep_is_held(&local->chanctx_mtx)); + if (!conf) + return; + + ctx = container_of(conf, struct ieee80211_chanctx, conf); + + if (sdata->reserved_chanctx) { + if (sdata->reserved_chanctx->replace_state == + IEEE80211_CHANCTX_REPLACES_OTHER && + ieee80211_chanctx_num_reserved(local, + sdata->reserved_chanctx) > 1) + use_reserved_switch = true; + + ieee80211_vif_unreserve_chanctx(sdata); + } + + ieee80211_assign_vif_chanctx(sdata, NULL); + if (ieee80211_chanctx_refcount(local, ctx) == 0) + ieee80211_free_chanctx(local, ctx); + + /* Unreserving may ready an in-place reservation. */ + if (use_reserved_switch) + ieee80211_vif_use_reserved_switch(local); +} + +int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata, + const struct cfg80211_chan_def *chandef, + enum ieee80211_chanctx_mode mode) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_chanctx *ctx; + u8 radar_detect_width = 0; + int ret; + + lockdep_assert_held(&local->mtx); + + WARN_ON(sdata->dev && netif_carrier_ok(sdata->dev)); + + mutex_lock(&local->chanctx_mtx); + + ret = cfg80211_chandef_dfs_required(local->hw.wiphy, + chandef, + sdata->wdev.iftype); + if (ret < 0) + goto out; + if (ret > 0) + radar_detect_width = BIT(chandef->width); + + sdata->radar_required = ret; + + ret = ieee80211_check_combinations(sdata, chandef, mode, + radar_detect_width); + if (ret < 0) + goto out; + + __ieee80211_vif_release_channel(sdata); + + ctx = ieee80211_find_chanctx(local, chandef, mode); + if (!ctx) + ctx = ieee80211_new_chanctx(local, chandef, mode); + if (IS_ERR(ctx)) { + ret = PTR_ERR(ctx); + goto out; + } + + sdata->vif.bss_conf.chandef = *chandef; + + ret = ieee80211_assign_vif_chanctx(sdata, ctx); + if (ret) { + /* if assign fails refcount stays the same */ + if (ieee80211_chanctx_refcount(local, ctx) == 0) + ieee80211_free_chanctx(local, ctx); + goto out; + } + + ieee80211_recalc_smps_chanctx(local, ctx); + ieee80211_recalc_radar_chanctx(local, ctx); + out: + mutex_unlock(&local->chanctx_mtx); + return ret; +} + int ieee80211_vif_use_reserved_context(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c index 0e963bc1ceac3109f378431a617b853a7166df37..54a189f0393efe546d1949801468e065a7481c0e 100644 --- a/net/mac80211/debugfs.c +++ b/net/mac80211/debugfs.c @@ -3,6 +3,7 @@ * mac80211 debugfs for wireless PHYs * * Copyright 2007 Johannes Berg + * Copyright 2013-2014 Intel Mobile Communications GmbH * * GPLv2 * @@ -302,11 +303,6 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf, sf += scnprintf(buf + sf, mxln - sf, "SUPPORTS_DYNAMIC_PS\n"); if (local->hw.flags & IEEE80211_HW_MFP_CAPABLE) sf += scnprintf(buf + sf, mxln - sf, "MFP_CAPABLE\n"); - if (local->hw.flags & IEEE80211_HW_SUPPORTS_STATIC_SMPS) - sf += scnprintf(buf + sf, mxln - sf, "SUPPORTS_STATIC_SMPS\n"); - if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS) - sf += scnprintf(buf + sf, mxln - sf, - "SUPPORTS_DYNAMIC_SMPS\n"); if (local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD) sf += scnprintf(buf + sf, mxln - sf, "SUPPORTS_UAPSD\n"); if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index e205ebabfa5015f5375a60cb79af9b2bfdbd9f80..c68896adfa960113c584656c478fd8ff043a11ca 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c @@ -226,12 +226,12 @@ static int ieee80211_set_smps(struct ieee80211_sub_if_data *sdata, struct ieee80211_local *local = sdata->local; int err; - if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_STATIC_SMPS) && + if (!(local->hw.wiphy->features & NL80211_FEATURE_STATIC_SMPS) && smps_mode == IEEE80211_SMPS_STATIC) return -EINVAL; /* auto should be dynamic if in PS mode */ - if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS) && + if (!(local->hw.wiphy->features & NL80211_FEATURE_DYNAMIC_SMPS) && (smps_mode == IEEE80211_SMPS_DYNAMIC || smps_mode == IEEE80211_SMPS_AUTOMATIC)) return -EINVAL; diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index 86173c0de40e91d658e883514232c720a4c51e0e..bafe489162298ec2d2df67b359985da691785258 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c @@ -2,6 +2,7 @@ * Copyright 2003-2005 Devicescape Software, Inc. * Copyright (c) 2006 Jiri Benc * Copyright 2007 Johannes Berg + * Copyright 2013-2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -77,7 +78,8 @@ static ssize_t sta_flags_read(struct file *file, char __user *userbuf, TEST(AUTH), TEST(ASSOC), TEST(PS_STA), TEST(PS_DRIVER), TEST(AUTHORIZED), TEST(SHORT_PREAMBLE), - TEST(WME), TEST(WDS), TEST(CLEAR_PS_FILT), + sta->sta.wme ? "WME\n" : "", + TEST(WDS), TEST(CLEAR_PS_FILT), TEST(MFP), TEST(BLOCK_BA), TEST(PSPOLL), TEST(UAPSD), TEST(SP), TEST(TDLS_PEER), TEST(TDLS_PEER_AUTH), TEST(4ADDR_EVENT), diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index 11423958116a3fc9e37c91684de3aade8fdef763..196d48c68134a08b644fc97f8f1a1fc740ec7bdf 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h @@ -450,7 +450,7 @@ static inline int drv_set_rts_threshold(struct ieee80211_local *local, } static inline int drv_set_coverage_class(struct ieee80211_local *local, - u8 value) + s16 value) { int ret = 0; might_sleep(); diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 9713dc54ea4bb385abf1d48ae891ff5fceb7440f..56b53571c8077ae5a3660449cb0afeb81375028c 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -6,6 +6,7 @@ * Copyright 2006-2007 Jiri Benc * Copyright 2007, Michael Wu * Copyright 2009, Johannes Berg + * Copyright 2013-2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -1038,7 +1039,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, } if (sta && elems->wmm_info) - set_sta_flag(sta, WLAN_STA_WME); + sta->sta.wme = true; if (sta && elems->ht_operation && elems->ht_cap_elem && sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_20_NOHT && diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index ef7a089ac54647eb763b8bbb0793bef98454b48d..c2aaec4dfcf0ea38da3e45cfdc7b8ad0ecef5fc8 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -3,6 +3,7 @@ * Copyright 2005, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc * Copyright 2007-2010 Johannes Berg + * Copyright 2013-2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -354,6 +355,7 @@ enum ieee80211_sta_flags { IEEE80211_STA_DISABLE_80P80MHZ = BIT(12), IEEE80211_STA_DISABLE_160MHZ = BIT(13), IEEE80211_STA_DISABLE_WMM = BIT(14), + IEEE80211_STA_ENABLE_RRM = BIT(15), }; struct ieee80211_mgd_auth_data { @@ -1367,6 +1369,7 @@ struct ieee802_11_elems { const struct ieee80211_wide_bw_chansw_ie *wide_bw_chansw_ie; const u8 *country_elem; const u8 *pwr_constr_elem; + const u8 *cisco_dtpc_elem; const struct ieee80211_timeout_interval_ie *timeout_int; const u8 *opmode_notif; const struct ieee80211_sec_chan_offs_ie *sec_chan_offs; @@ -1587,7 +1590,7 @@ void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, void __ieee80211_start_rx_ba_session(struct sta_info *sta, u8 dialog_token, u16 timeout, u16 start_seq_num, u16 ba_policy, u16 tid, - u16 buf_size, bool tx); + u16 buf_size, bool tx, bool auto_seq); void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta, enum ieee80211_agg_stop_reason reason); void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata, @@ -1869,7 +1872,6 @@ ieee80211_vif_reserve_chanctx(struct ieee80211_sub_if_data *sdata, int __must_check ieee80211_vif_use_reserved_context(struct ieee80211_sub_if_data *sdata); int ieee80211_vif_unreserve_chanctx(struct ieee80211_sub_if_data *sdata); -int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local); int __must_check ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata, @@ -1918,7 +1920,7 @@ int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, size_t extra_ies_len); int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, const u8 *peer, enum nl80211_tdls_operation oper); - +void ieee80211_tdls_peer_del_work(struct work_struct *wk); extern const struct ethtool_ops ieee80211_ethtool_ops; @@ -1929,4 +1931,3 @@ extern const struct ethtool_ops ieee80211_ethtool_ops; #endif #endif /* IEEE80211_I_H */ -void ieee80211_tdls_peer_del_work(struct work_struct *wk); diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index f75e5f132c5ad4a816fc933defedda38f0695e66..af237223a8cd9bd3aa6121576650238d42e3bbb0 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -5,6 +5,7 @@ * Copyright 2005-2006, Devicescape Software, Inc. * Copyright (c) 2006 Jiri Benc * Copyright 2008, Johannes Berg + * Copyright 2013-2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -1172,19 +1173,11 @@ static void ieee80211_iface_work(struct work_struct *work) rx_agg = (void *)&skb->cb; mutex_lock(&local->sta_mtx); sta = sta_info_get_bss(sdata, rx_agg->addr); - if (sta) { - u16 last_seq; - - last_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu( - sta->last_seq_ctrl[rx_agg->tid])); - + if (sta) __ieee80211_start_rx_ba_session(sta, - 0, 0, - ieee80211_sn_inc(last_seq), - 1, rx_agg->tid, + 0, 0, 0, 1, rx_agg->tid, IEEE80211_MAX_AMPDU_BUF, - false); - } + false, true); mutex_unlock(&local->sta_mtx); } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_RX_AGG_STOP) { rx_agg = (void *)&skb->cb; diff --git a/net/mac80211/key.c b/net/mac80211/key.c index d808cff8015374ddfb927d5695a3d3973bcc0428..4712150dc2109df0b6f17162da8ec5119a94ec0a 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c @@ -3,6 +3,7 @@ * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc * Copyright 2007-2008 Johannes Berg + * Copyright 2013-2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -130,9 +131,7 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key) if (!ret) { key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; - if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || - (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) || - (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))) + if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) sdata->crypto_tx_tailroom_needed_cnt--; WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) && @@ -180,9 +179,7 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key) sta = key->sta; sdata = key->sdata; - if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || - (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) || - (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))) + if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) increment_tailroom_need_count(sdata); ret = drv_set_key(key->local, DISABLE_KEY, sdata, @@ -425,7 +422,7 @@ static void ieee80211_key_free_common(struct ieee80211_key *key) ieee80211_aes_key_free(key->u.ccmp.tfm); if (key->conf.cipher == WLAN_CIPHER_SUITE_AES_CMAC) ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm); - kfree(key); + kzfree(key); } static void __ieee80211_key_destroy(struct ieee80211_key *key, @@ -878,9 +875,7 @@ void ieee80211_remove_key(struct ieee80211_key_conf *keyconf) if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; - if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || - (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) || - (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))) + if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) increment_tailroom_need_count(key->sdata); } diff --git a/net/mac80211/main.c b/net/mac80211/main.c index e0ab4320a078528b27e101c17bfd70ad422ac4fb..0de7c93bf62b5a3352de711ab9deeaa21649d8a1 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -2,6 +2,7 @@ * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc + * Copyright 2013-2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index cf032a8db9d78e9c13fe9df1ee2f2c35dbe8ec62..a6699dceae7c8aeeff4419d8cc2815f9d86adf5e 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -729,7 +729,7 @@ void mesh_plink_broken(struct sta_info *sta) tbl = rcu_dereference(mesh_paths); for_each_mesh_entry(tbl, node, i) { mpath = node->mpath; - if (rcu_dereference(mpath->next_hop) == sta && + if (rcu_access_pointer(mpath->next_hop) == sta && mpath->flags & MESH_PATH_ACTIVE && !(mpath->flags & MESH_PATH_FIXED)) { spin_lock_bh(&mpath->state_lock); @@ -794,7 +794,7 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta) tbl = resize_dereference_mesh_paths(); for_each_mesh_entry(tbl, node, i) { mpath = node->mpath; - if (rcu_dereference(mpath->next_hop) == sta) { + if (rcu_access_pointer(mpath->next_hop) == sta) { spin_lock(&tbl->hashwlock[i]); __mesh_path_del(tbl, node); spin_unlock(&tbl->hashwlock[i]); diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index c47194d2714933bd8e1dc3658ffd165cd50a5a4a..b488e1859b18e8ed7797cffbb5ab2319138fdb28 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -431,14 +431,12 @@ __mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *hw_addr) return NULL; sta->plink_state = NL80211_PLINK_LISTEN; + sta->sta.wme = true; sta_info_pre_move_state(sta, IEEE80211_STA_AUTH); sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC); sta_info_pre_move_state(sta, IEEE80211_STA_AUTHORIZED); - set_sta_flag(sta, WLAN_STA_WME); - sta->sta.wme = true; - return sta; } @@ -1004,7 +1002,6 @@ mesh_process_plink_frame(struct ieee80211_sub_if_data *sdata, enum ieee80211_self_protected_actioncode ftype; u32 changed = 0; u8 ie_len = elems->peering_len; - __le16 _plid, _llid; u16 plid, llid = 0; if (!elems->peering) { @@ -1039,13 +1036,10 @@ mesh_process_plink_frame(struct ieee80211_sub_if_data *sdata, /* Note the lines below are correct, the llid in the frame is the plid * from the point of view of this host. */ - memcpy(&_plid, PLINK_GET_LLID(elems->peering), sizeof(__le16)); - plid = le16_to_cpu(_plid); + plid = get_unaligned_le16(PLINK_GET_LLID(elems->peering)); if (ftype == WLAN_SP_MESH_PEERING_CONFIRM || - (ftype == WLAN_SP_MESH_PEERING_CLOSE && ie_len == 8)) { - memcpy(&_llid, PLINK_GET_PLID(elems->peering), sizeof(__le16)); - llid = le16_to_cpu(_llid); - } + (ftype == WLAN_SP_MESH_PEERING_CLOSE && ie_len == 8)) + llid = get_unaligned_le16(PLINK_GET_PLID(elems->peering)); /* WARNING: Only for sta pointer, is dropped & re-acquired */ rcu_read_lock(); diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index b82a12a9f0f110779756027944dce6293d1387fb..2de88704278b85b8d9ce47f2ef77905108d456e7 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -5,6 +5,7 @@ * Copyright 2005, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc * Copyright 2007, Michael Wu + * Copyright 2013-2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -149,6 +150,7 @@ static u32 ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, struct ieee80211_supported_band *sband, struct ieee80211_channel *channel, + const struct ieee80211_ht_cap *ht_cap, const struct ieee80211_ht_operation *ht_oper, const struct ieee80211_vht_operation *vht_oper, struct cfg80211_chan_def *chandef, bool tracking) @@ -162,13 +164,19 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, chandef->center_freq1 = channel->center_freq; chandef->center_freq2 = 0; - if (!ht_oper || !sband->ht_cap.ht_supported) { + if (!ht_cap || !ht_oper || !sband->ht_cap.ht_supported) { ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT; goto out; } chandef->width = NL80211_CHAN_WIDTH_20; + if (!(ht_cap->cap_info & + cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40))) { + ret = IEEE80211_STA_DISABLE_40MHZ; + goto out; + } + ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan, channel->band); /* check that channel matches the right operating channel */ @@ -328,6 +336,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, + const struct ieee80211_ht_cap *ht_cap, const struct ieee80211_ht_operation *ht_oper, const struct ieee80211_vht_operation *vht_oper, const u8 *bssid, u32 *changed) @@ -367,8 +376,9 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata, sband = local->hw.wiphy->bands[chan->band]; /* calculate new channel (type) based on HT/VHT operation IEs */ - flags = ieee80211_determine_chantype(sdata, sband, chan, ht_oper, - vht_oper, &chandef, true); + flags = ieee80211_determine_chantype(sdata, sband, chan, + ht_cap, ht_oper, vht_oper, + &chandef, true); /* * Downgrade the new channel if we associated with restricted @@ -663,6 +673,9 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT)) capab |= WLAN_CAPABILITY_SPECTRUM_MGMT; + if (ifmgd->flags & IEEE80211_STA_ENABLE_RRM) + capab |= WLAN_CAPABILITY_RADIO_MEASURE; + mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); memset(mgmt, 0, 24); memcpy(mgmt->da, assoc_data->bss->bssid, ETH_ALEN); @@ -728,16 +741,17 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) } } - if (capab & WLAN_CAPABILITY_SPECTRUM_MGMT) { - /* 1. power capabilities */ + if (capab & WLAN_CAPABILITY_SPECTRUM_MGMT || + capab & WLAN_CAPABILITY_RADIO_MEASURE) { pos = skb_put(skb, 4); *pos++ = WLAN_EID_PWR_CAPABILITY; *pos++ = 2; *pos++ = 0; /* min tx power */ /* max tx power */ *pos++ = ieee80211_chandef_max_power(&chanctx_conf->def); + } - /* 2. supported channels */ + if (capab & WLAN_CAPABILITY_SPECTRUM_MGMT) { /* TODO: get this in reg domain format */ pos = skb_put(skb, 2 * sband->n_channels + 2); *pos++ = WLAN_EID_SUPPORTED_CHANNELS; @@ -1157,19 +1171,21 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, TU_TO_EXP_TIME(csa_ie.count * cbss->beacon_interval)); } -static u32 ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata, - struct ieee80211_channel *channel, - const u8 *country_ie, u8 country_ie_len, - const u8 *pwr_constr_elem) +static bool +ieee80211_find_80211h_pwr_constr(struct ieee80211_sub_if_data *sdata, + struct ieee80211_channel *channel, + const u8 *country_ie, u8 country_ie_len, + const u8 *pwr_constr_elem, + int *chan_pwr, int *pwr_reduction) { struct ieee80211_country_ie_triplet *triplet; int chan = ieee80211_frequency_to_channel(channel->center_freq); - int i, chan_pwr, chan_increment, new_ap_level; + int i, chan_increment; bool have_chan_pwr = false; /* Invalid IE */ if (country_ie_len % 2 || country_ie_len < IEEE80211_COUNTRY_IE_MIN_LEN) - return 0; + return false; triplet = (void *)(country_ie + 3); country_ie_len -= 3; @@ -1197,7 +1213,7 @@ static u32 ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata, for (i = 0; i < triplet->chans.num_channels; i++) { if (first_channel + i * chan_increment == chan) { have_chan_pwr = true; - chan_pwr = triplet->chans.max_power; + *chan_pwr = triplet->chans.max_power; break; } } @@ -1209,18 +1225,76 @@ static u32 ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata, country_ie_len -= 3; } - if (!have_chan_pwr) + if (have_chan_pwr) + *pwr_reduction = *pwr_constr_elem; + return have_chan_pwr; +} + +static void ieee80211_find_cisco_dtpc(struct ieee80211_sub_if_data *sdata, + struct ieee80211_channel *channel, + const u8 *cisco_dtpc_ie, + int *pwr_level) +{ + /* From practical testing, the first data byte of the DTPC element + * seems to contain the requested dBm level, and the CLI on Cisco + * APs clearly state the range is -127 to 127 dBm, which indicates + * a signed byte, although it seemingly never actually goes negative. + * The other byte seems to always be zero. + */ + *pwr_level = (__s8)cisco_dtpc_ie[4]; +} + +static u32 ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata, + struct ieee80211_channel *channel, + struct ieee80211_mgmt *mgmt, + const u8 *country_ie, u8 country_ie_len, + const u8 *pwr_constr_ie, + const u8 *cisco_dtpc_ie) +{ + bool has_80211h_pwr = false, has_cisco_pwr = false; + int chan_pwr = 0, pwr_reduction_80211h = 0; + int pwr_level_cisco, pwr_level_80211h; + int new_ap_level; + + if (country_ie && pwr_constr_ie && + mgmt->u.probe_resp.capab_info & + cpu_to_le16(WLAN_CAPABILITY_SPECTRUM_MGMT)) { + has_80211h_pwr = ieee80211_find_80211h_pwr_constr( + sdata, channel, country_ie, country_ie_len, + pwr_constr_ie, &chan_pwr, &pwr_reduction_80211h); + pwr_level_80211h = + max_t(int, 0, chan_pwr - pwr_reduction_80211h); + } + + if (cisco_dtpc_ie) { + ieee80211_find_cisco_dtpc( + sdata, channel, cisco_dtpc_ie, &pwr_level_cisco); + has_cisco_pwr = true; + } + + if (!has_80211h_pwr && !has_cisco_pwr) return 0; - new_ap_level = max_t(int, 0, chan_pwr - *pwr_constr_elem); + /* If we have both 802.11h and Cisco DTPC, apply both limits + * by picking the smallest of the two power levels advertised. + */ + if (has_80211h_pwr && + (!has_cisco_pwr || pwr_level_80211h <= pwr_level_cisco)) { + sdata_info(sdata, + "Limiting TX power to %d (%d - %d) dBm as advertised by %pM\n", + pwr_level_80211h, chan_pwr, pwr_reduction_80211h, + sdata->u.mgd.bssid); + new_ap_level = pwr_level_80211h; + } else { /* has_cisco_pwr is always true here. */ + sdata_info(sdata, + "Limiting TX power to %d dBm as advertised by %pM\n", + pwr_level_cisco, sdata->u.mgd.bssid); + new_ap_level = pwr_level_cisco; + } if (sdata->ap_power_level == new_ap_level) return 0; - sdata_info(sdata, - "Limiting TX power to %d (%d - %d) dBm as advertised by %pM\n", - new_ap_level, chan_pwr, *pwr_constr_elem, - sdata->u.mgd.bssid); sdata->ap_power_level = new_ap_level; if (__ieee80211_recalc_txpower(sdata)) return BSS_CHANGED_TXPOWER; @@ -2677,8 +2751,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED) set_sta_flag(sta, WLAN_STA_MFP); - if (elems.wmm_param) - set_sta_flag(sta, WLAN_STA_WME); + sta->sta.wme = elems.wmm_param; err = sta_info_move_state(sta, IEEE80211_STA_ASSOC); if (!err && !(ifmgd->flags & IEEE80211_STA_CONTROL_PORT)) @@ -2744,6 +2817,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data; u16 capab_info, status_code, aid; struct ieee802_11_elems elems; + int ac, uapsd_queues = -1; u8 *pos; bool reassoc; struct cfg80211_bss *bss; @@ -2813,9 +2887,15 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, * is set can cause the interface to go idle */ ieee80211_destroy_assoc_data(sdata, true); + + /* get uapsd queues configuration */ + uapsd_queues = 0; + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) + if (sdata->tx_conf[ac].uapsd) + uapsd_queues |= BIT(ac); } - cfg80211_rx_assoc_resp(sdata->dev, bss, (u8 *)mgmt, len); + cfg80211_rx_assoc_resp(sdata->dev, bss, (u8 *)mgmt, len, uapsd_queues); } static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, @@ -2885,7 +2965,9 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata, /* * This is the canonical list of information elements we care about, * the filter code also gives us all changes to the Microsoft OUI - * (00:50:F2) vendor IE which is used for WMM which we need to track. + * (00:50:F2) vendor IE which is used for WMM which we need to track, + * as well as the DTPC IE (part of the Cisco OUI) used for signaling + * changes to requested client power. * * We implement beacon filtering in software since that means we can * avoid processing the frame here and in cfg80211, and userspace @@ -3174,7 +3256,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, mutex_lock(&local->sta_mtx); sta = sta_info_get(sdata, bssid); - if (ieee80211_config_bw(sdata, sta, elems.ht_operation, + if (ieee80211_config_bw(sdata, sta, + elems.ht_cap_elem, elems.ht_operation, elems.vht_operation, bssid, &changed)) { mutex_unlock(&local->sta_mtx); ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, @@ -3190,13 +3273,11 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, rx_status->band, true); mutex_unlock(&local->sta_mtx); - if (elems.country_elem && elems.pwr_constr_elem && - mgmt->u.probe_resp.capab_info & - cpu_to_le16(WLAN_CAPABILITY_SPECTRUM_MGMT)) - changed |= ieee80211_handle_pwr_constr(sdata, chan, - elems.country_elem, - elems.country_elem_len, - elems.pwr_constr_elem); + changed |= ieee80211_handle_pwr_constr(sdata, chan, mgmt, + elems.country_elem, + elems.country_elem_len, + elems.pwr_constr_elem, + elems.cisco_dtpc_elem); ieee80211_bss_info_change_notify(sdata, changed); } @@ -3724,7 +3805,7 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata) ifmgd->uapsd_max_sp_len = sdata->local->hw.uapsd_max_sp_len; ifmgd->p2p_noa_index = -1; - if (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS) + if (sdata->local->hw.wiphy->features & NL80211_FEATURE_DYNAMIC_SMPS) ifmgd->req_smps = IEEE80211_SMPS_AUTOMATIC; else ifmgd->req_smps = IEEE80211_SMPS_OFF; @@ -3808,6 +3889,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, { struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + const struct ieee80211_ht_cap *ht_cap = NULL; const struct ieee80211_ht_operation *ht_oper = NULL; const struct ieee80211_vht_operation *vht_oper = NULL; struct ieee80211_supported_band *sband; @@ -3824,14 +3906,17 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) && sband->ht_cap.ht_supported) { - const u8 *ht_oper_ie, *ht_cap; + const u8 *ht_oper_ie, *ht_cap_ie; ht_oper_ie = ieee80211_bss_get_ie(cbss, WLAN_EID_HT_OPERATION); if (ht_oper_ie && ht_oper_ie[1] >= sizeof(*ht_oper)) ht_oper = (void *)(ht_oper_ie + 2); - ht_cap = ieee80211_bss_get_ie(cbss, WLAN_EID_HT_CAPABILITY); - if (!ht_cap || ht_cap[1] < sizeof(struct ieee80211_ht_cap)) { + ht_cap_ie = ieee80211_bss_get_ie(cbss, WLAN_EID_HT_CAPABILITY); + if (ht_cap_ie && ht_cap_ie[1] >= sizeof(*ht_cap)) + ht_cap = (void *)(ht_cap_ie + 2); + + if (!ht_cap) { ifmgd->flags |= IEEE80211_STA_DISABLE_HT; ht_oper = NULL; } @@ -3862,7 +3947,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, ifmgd->flags |= ieee80211_determine_chantype(sdata, sband, cbss->channel, - ht_oper, vht_oper, + ht_cap, ht_oper, vht_oper, &chandef, false); sdata->needed_rx_chains = min(ieee80211_ht_vht_rx_chains(sdata, cbss), @@ -4395,6 +4480,11 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, ifmgd->flags &= ~IEEE80211_STA_MFP_ENABLED; } + if (req->flags & ASSOC_REQ_USE_RRM) + ifmgd->flags |= IEEE80211_STA_ENABLE_RRM; + else + ifmgd->flags &= ~IEEE80211_STA_ENABLE_RRM; + if (req->crypto.control_port) ifmgd->flags |= IEEE80211_STA_CONTROL_PORT; else diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c index 1c1469c36dca05cbdf55e2aba55dc296962aba55..2baa7ed8789dce2752a3f69d5cc3627bbbd47c76 100644 --- a/net/mac80211/rc80211_minstrel.c +++ b/net/mac80211/rc80211_minstrel.c @@ -75,7 +75,7 @@ minstrel_sort_best_tp_rates(struct minstrel_sta_info *mi, int i, u8 *tp_list) { int j = MAX_THR_RATES; - while (j > 0 && mi->r[i].cur_tp > mi->r[tp_list[j - 1]].cur_tp) + while (j > 0 && mi->r[i].stats.cur_tp > mi->r[tp_list[j - 1]].stats.cur_tp) j--; if (j < MAX_THR_RATES - 1) memmove(&tp_list[j + 1], &tp_list[j], MAX_THR_RATES - (j + 1)); @@ -92,7 +92,7 @@ minstrel_set_rate(struct minstrel_sta_info *mi, struct ieee80211_sta_rates *rate ratetbl->rate[offset].idx = r->rix; ratetbl->rate[offset].count = r->adjusted_retry_count; ratetbl->rate[offset].count_cts = r->retry_count_cts; - ratetbl->rate[offset].count_rts = r->retry_count_rtscts; + ratetbl->rate[offset].count_rts = r->stats.retry_count_rtscts; } static void @@ -140,44 +140,46 @@ minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi) for (i = 0; i < mi->n_rates; i++) { struct minstrel_rate *mr = &mi->r[i]; + struct minstrel_rate_stats *mrs = &mi->r[i].stats; usecs = mr->perfect_tx_time; if (!usecs) usecs = 1000000; - if (unlikely(mr->attempts > 0)) { - mr->sample_skipped = 0; - mr->cur_prob = MINSTREL_FRAC(mr->success, mr->attempts); - mr->succ_hist += mr->success; - mr->att_hist += mr->attempts; - mr->probability = minstrel_ewma(mr->probability, - mr->cur_prob, - EWMA_LEVEL); + if (unlikely(mrs->attempts > 0)) { + mrs->sample_skipped = 0; + mrs->cur_prob = MINSTREL_FRAC(mrs->success, + mrs->attempts); + mrs->succ_hist += mrs->success; + mrs->att_hist += mrs->attempts; + mrs->probability = minstrel_ewma(mrs->probability, + mrs->cur_prob, + EWMA_LEVEL); } else - mr->sample_skipped++; + mrs->sample_skipped++; - mr->last_success = mr->success; - mr->last_attempts = mr->attempts; - mr->success = 0; - mr->attempts = 0; + mrs->last_success = mrs->success; + mrs->last_attempts = mrs->attempts; + mrs->success = 0; + mrs->attempts = 0; /* Update throughput per rate, reset thr. below 10% success */ - if (mr->probability < MINSTREL_FRAC(10, 100)) - mr->cur_tp = 0; + if (mrs->probability < MINSTREL_FRAC(10, 100)) + mrs->cur_tp = 0; else - mr->cur_tp = mr->probability * (1000000 / usecs); + mrs->cur_tp = mrs->probability * (1000000 / usecs); /* Sample less often below the 10% chance of success. * Sample less often above the 95% chance of success. */ - if (mr->probability > MINSTREL_FRAC(95, 100) || - mr->probability < MINSTREL_FRAC(10, 100)) { - mr->adjusted_retry_count = mr->retry_count >> 1; + if (mrs->probability > MINSTREL_FRAC(95, 100) || + mrs->probability < MINSTREL_FRAC(10, 100)) { + mr->adjusted_retry_count = mrs->retry_count >> 1; if (mr->adjusted_retry_count > 2) mr->adjusted_retry_count = 2; mr->sample_limit = 4; } else { mr->sample_limit = -1; - mr->adjusted_retry_count = mr->retry_count; + mr->adjusted_retry_count = mrs->retry_count; } if (!mr->adjusted_retry_count) mr->adjusted_retry_count = 2; @@ -190,11 +192,11 @@ minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi) * choose the maximum throughput rate as max_prob_rate * (2) if all success probabilities < 95%, the rate with * highest success probability is choosen as max_prob_rate */ - if (mr->probability >= MINSTREL_FRAC(95, 100)) { - if (mr->cur_tp >= mi->r[tmp_prob_rate].cur_tp) + if (mrs->probability >= MINSTREL_FRAC(95, 100)) { + if (mrs->cur_tp >= mi->r[tmp_prob_rate].stats.cur_tp) tmp_prob_rate = i; } else { - if (mr->probability >= mi->r[tmp_prob_rate].probability) + if (mrs->probability >= mi->r[tmp_prob_rate].stats.probability) tmp_prob_rate = i; } } @@ -240,14 +242,14 @@ minstrel_tx_status(void *priv, struct ieee80211_supported_band *sband, if (ndx < 0) continue; - mi->r[ndx].attempts += ar[i].count; + mi->r[ndx].stats.attempts += ar[i].count; if ((i != IEEE80211_TX_MAX_RATES - 1) && (ar[i + 1].idx < 0)) - mi->r[ndx].success += success; + mi->r[ndx].stats.success += success; } if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && (i >= 0)) - mi->sample_count++; + mi->sample_packets++; if (mi->sample_deferred > 0) mi->sample_deferred--; @@ -265,7 +267,7 @@ minstrel_get_retry_count(struct minstrel_rate *mr, unsigned int retry = mr->adjusted_retry_count; if (info->control.use_rts) - retry = max(2U, min(mr->retry_count_rtscts, retry)); + retry = max(2U, min(mr->stats.retry_count_rtscts, retry)); else if (info->control.use_cts_prot) retry = max(2U, min(mr->retry_count_cts, retry)); return retry; @@ -317,15 +319,15 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta, sampling_ratio = mp->lookaround_rate; /* increase sum packet counter */ - mi->packet_count++; + mi->total_packets++; #ifdef CONFIG_MAC80211_DEBUGFS if (mp->fixed_rate_idx != -1) return; #endif - delta = (mi->packet_count * sampling_ratio / 100) - - (mi->sample_count + mi->sample_deferred / 2); + delta = (mi->total_packets * sampling_ratio / 100) - + (mi->sample_packets + mi->sample_deferred / 2); /* delta < 0: no sampling required */ prev_sample = mi->prev_sample; @@ -333,10 +335,10 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta, if (delta < 0 || (!mrr_capable && prev_sample)) return; - if (mi->packet_count >= 10000) { + if (mi->total_packets >= 10000) { mi->sample_deferred = 0; - mi->sample_count = 0; - mi->packet_count = 0; + mi->sample_packets = 0; + mi->total_packets = 0; } else if (delta > mi->n_rates * 2) { /* With multi-rate retry, not every planned sample * attempt actually gets used, due to the way the retry @@ -347,7 +349,7 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta, * starts getting worse, minstrel would start bursting * out lots of sampling frames, which would result * in a large throughput loss. */ - mi->sample_count += (delta - mi->n_rates * 2); + mi->sample_packets += (delta - mi->n_rates * 2); } /* get next random rate sample */ @@ -361,7 +363,7 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta, */ if (mrr_capable && msr->perfect_tx_time > mr->perfect_tx_time && - msr->sample_skipped < 20) { + msr->stats.sample_skipped < 20) { /* Only use IEEE80211_TX_CTL_RATE_CTRL_PROBE to mark * packets that have the sampling rate deferred to the * second MRR stage. Increase the sample counter only @@ -375,7 +377,7 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta, if (!msr->sample_limit != 0) return; - mi->sample_count++; + mi->sample_packets++; if (msr->sample_limit > 0) msr->sample_limit--; } @@ -384,7 +386,7 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta, * has a probability of >95%, we shouldn't be attempting * to use it, as this only wastes precious airtime */ if (!mrr_capable && - (mi->r[ndx].probability > MINSTREL_FRAC(95, 100))) + (mi->r[ndx].stats.probability > MINSTREL_FRAC(95, 100))) return; mi->prev_sample = true; @@ -459,6 +461,7 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband, for (i = 0; i < sband->n_bitrates; i++) { struct minstrel_rate *mr = &mi->r[n]; + struct minstrel_rate_stats *mrs = &mi->r[n].stats; unsigned int tx_time = 0, tx_time_cts = 0, tx_time_rtscts = 0; unsigned int tx_time_single; unsigned int cw = mp->cw_min; @@ -471,6 +474,7 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband, n++; memset(mr, 0, sizeof(*mr)); + memset(mrs, 0, sizeof(*mrs)); mr->rix = i; shift = ieee80211_chandef_get_shift(chandef); @@ -482,9 +486,9 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband, /* calculate maximum number of retransmissions before * fallback (based on maximum segment size) */ mr->sample_limit = -1; - mr->retry_count = 1; + mrs->retry_count = 1; mr->retry_count_cts = 1; - mr->retry_count_rtscts = 1; + mrs->retry_count_rtscts = 1; tx_time = mr->perfect_tx_time + mi->sp_ack_dur; do { /* add one retransmission */ @@ -501,13 +505,13 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband, (mr->retry_count_cts < mp->max_retry)) mr->retry_count_cts++; if ((tx_time_rtscts < mp->segment_size) && - (mr->retry_count_rtscts < mp->max_retry)) - mr->retry_count_rtscts++; + (mrs->retry_count_rtscts < mp->max_retry)) + mrs->retry_count_rtscts++; } while ((tx_time < mp->segment_size) && - (++mr->retry_count < mp->max_retry)); - mr->adjusted_retry_count = mr->retry_count; + (++mr->stats.retry_count < mp->max_retry)); + mr->adjusted_retry_count = mrs->retry_count; if (!(sband->bitrates[i].flags & IEEE80211_RATE_ERP_G)) - mr->retry_count_cts = mr->retry_count; + mr->retry_count_cts = mrs->retry_count; } for (i = n; i < sband->n_bitrates; i++) { @@ -665,7 +669,7 @@ static u32 minstrel_get_expected_throughput(void *priv_sta) /* convert pkt per sec in kbps (1200 is the average pkt size used for * computing cur_tp */ - return MINSTREL_TRUNC(mi->r[idx].cur_tp) * 1200 * 8 / 1024; + return MINSTREL_TRUNC(mi->r[idx].stats.cur_tp) * 1200 * 8 / 1024; } const struct rate_control_ops mac80211_minstrel = { diff --git a/net/mac80211/rc80211_minstrel.h b/net/mac80211/rc80211_minstrel.h index 046d1bd598a86d114c5c4dd6a7c7edb75808af13..97eca86a4af0b2da79fc539f1dd3663876d7fa8f 100644 --- a/net/mac80211/rc80211_minstrel.h +++ b/net/mac80211/rc80211_minstrel.h @@ -31,6 +31,27 @@ minstrel_ewma(int old, int new, int weight) return (new * (EWMA_DIV - weight) + old * weight) / EWMA_DIV; } +struct minstrel_rate_stats { + /* current / last sampling period attempts/success counters */ + unsigned int attempts, last_attempts; + unsigned int success, last_success; + + /* total attempts/success counters */ + u64 att_hist, succ_hist; + + /* current throughput */ + unsigned int cur_tp; + + /* packet delivery probabilities */ + unsigned int cur_prob, probability; + + /* maximum retry counts */ + unsigned int retry_count; + unsigned int retry_count_rtscts; + + u8 sample_skipped; + bool retry_updated; +}; struct minstrel_rate { int bitrate; @@ -40,26 +61,10 @@ struct minstrel_rate { unsigned int ack_time; int sample_limit; - unsigned int retry_count; unsigned int retry_count_cts; - unsigned int retry_count_rtscts; unsigned int adjusted_retry_count; - u32 success; - u32 attempts; - u32 last_attempts; - u32 last_success; - u8 sample_skipped; - - /* parts per thousand */ - u32 cur_prob; - u32 probability; - - /* per-rate throughput */ - u32 cur_tp; - - u64 succ_hist; - u64 att_hist; + struct minstrel_rate_stats stats; }; struct minstrel_sta_info { @@ -73,8 +78,8 @@ struct minstrel_sta_info { u8 max_tp_rate[MAX_THR_RATES]; u8 max_prob_rate; - unsigned int packet_count; - unsigned int sample_count; + unsigned int total_packets; + unsigned int sample_packets; int sample_deferred; unsigned int sample_row; diff --git a/net/mac80211/rc80211_minstrel_debugfs.c b/net/mac80211/rc80211_minstrel_debugfs.c index fd0b9ca1570e3ea92fba228b1824a4c99ddfff39..edde723f9f009e33388f5a6f89129f445fae37a2 100644 --- a/net/mac80211/rc80211_minstrel_debugfs.c +++ b/net/mac80211/rc80211_minstrel_debugfs.c @@ -72,6 +72,7 @@ minstrel_stats_open(struct inode *inode, struct file *file) "this succ/attempt success attempts\n"); for (i = 0; i < mi->n_rates; i++) { struct minstrel_rate *mr = &mi->r[i]; + struct minstrel_rate_stats *mrs = &mi->r[i].stats; *(p++) = (i == mi->max_tp_rate[0]) ? 'A' : ' '; *(p++) = (i == mi->max_tp_rate[1]) ? 'B' : ' '; @@ -81,24 +82,24 @@ minstrel_stats_open(struct inode *inode, struct file *file) p += sprintf(p, "%3u%s", mr->bitrate / 2, (mr->bitrate & 1 ? ".5" : " ")); - tp = MINSTREL_TRUNC(mr->cur_tp / 10); - prob = MINSTREL_TRUNC(mr->cur_prob * 1000); - eprob = MINSTREL_TRUNC(mr->probability * 1000); + tp = MINSTREL_TRUNC(mrs->cur_tp / 10); + prob = MINSTREL_TRUNC(mrs->cur_prob * 1000); + eprob = MINSTREL_TRUNC(mrs->probability * 1000); p += sprintf(p, " %6u.%1u %6u.%1u %6u.%1u " " %3u(%3u) %8llu %8llu\n", tp / 10, tp % 10, eprob / 10, eprob % 10, prob / 10, prob % 10, - mr->last_success, - mr->last_attempts, - (unsigned long long)mr->succ_hist, - (unsigned long long)mr->att_hist); + mrs->last_success, + mrs->last_attempts, + (unsigned long long)mrs->succ_hist, + (unsigned long long)mrs->att_hist); } p += sprintf(p, "\nTotal packet count:: ideal %d " "lookaround %d\n\n", - mi->packet_count - mi->sample_count, - mi->sample_count); + mi->total_packets - mi->sample_packets, + mi->sample_packets); ms->len = p - ms->buf; return 0; diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index 85c1e74b7714c2160ffd6976bbec4322bb7e428d..df90ce2db00c042a31bf3cd79dab3cfefcacbc35 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -135,7 +135,7 @@ minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi); static int minstrel_ht_get_group_idx(struct ieee80211_tx_rate *rate) { - return GROUP_IDX((rate->idx / 8) + 1, + return GROUP_IDX((rate->idx / MCS_GROUP_RATES) + 1, !!(rate->flags & IEEE80211_TX_RC_SHORT_GI), !!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)); } @@ -232,13 +232,152 @@ minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate) mr->cur_tp = MINSTREL_TRUNC(tp); } +/* + * Find & sort topmost throughput rates + * + * If multiple rates provide equal throughput the sorting is based on their + * current success probability. Higher success probability is preferred among + * MCS groups, CCK rates do not provide aggregation and are therefore at last. + */ +static void +minstrel_ht_sort_best_tp_rates(struct minstrel_ht_sta *mi, u8 index, + u8 *tp_list) +{ + int cur_group, cur_idx, cur_thr, cur_prob; + int tmp_group, tmp_idx, tmp_thr, tmp_prob; + int j = MAX_THR_RATES; + + cur_group = index / MCS_GROUP_RATES; + cur_idx = index % MCS_GROUP_RATES; + cur_thr = mi->groups[cur_group].rates[cur_idx].cur_tp; + cur_prob = mi->groups[cur_group].rates[cur_idx].probability; + + tmp_group = tp_list[j - 1] / MCS_GROUP_RATES; + tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES; + tmp_thr = mi->groups[tmp_group].rates[tmp_idx].cur_tp; + tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability; + + while (j > 0 && (cur_thr > tmp_thr || + (cur_thr == tmp_thr && cur_prob > tmp_prob))) { + j--; + tmp_group = tp_list[j - 1] / MCS_GROUP_RATES; + tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES; + tmp_thr = mi->groups[tmp_group].rates[tmp_idx].cur_tp; + tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability; + } + + if (j < MAX_THR_RATES - 1) { + memmove(&tp_list[j + 1], &tp_list[j], (sizeof(*tp_list) * + (MAX_THR_RATES - (j + 1)))); + } + if (j < MAX_THR_RATES) + tp_list[j] = index; +} + +/* + * Find and set the topmost probability rate per sta and per group + */ +static void +minstrel_ht_set_best_prob_rate(struct minstrel_ht_sta *mi, u8 index) +{ + struct minstrel_mcs_group_data *mg; + struct minstrel_rate_stats *mr; + int tmp_group, tmp_idx, tmp_tp, tmp_prob, max_tp_group; + + mg = &mi->groups[index / MCS_GROUP_RATES]; + mr = &mg->rates[index % MCS_GROUP_RATES]; + + tmp_group = mi->max_prob_rate / MCS_GROUP_RATES; + tmp_idx = mi->max_prob_rate % MCS_GROUP_RATES; + tmp_tp = mi->groups[tmp_group].rates[tmp_idx].cur_tp; + tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability; + + /* if max_tp_rate[0] is from MCS_GROUP max_prob_rate get selected from + * MCS_GROUP as well as CCK_GROUP rates do not allow aggregation */ + max_tp_group = mi->max_tp_rate[0] / MCS_GROUP_RATES; + if((index / MCS_GROUP_RATES == MINSTREL_CCK_GROUP) && + (max_tp_group != MINSTREL_CCK_GROUP)) + return; + + if (mr->probability > MINSTREL_FRAC(75, 100)) { + if (mr->cur_tp > tmp_tp) + mi->max_prob_rate = index; + if (mr->cur_tp > mg->rates[mg->max_group_prob_rate].cur_tp) + mg->max_group_prob_rate = index; + } else { + if (mr->probability > tmp_prob) + mi->max_prob_rate = index; + if (mr->probability > mg->rates[mg->max_group_prob_rate].probability) + mg->max_group_prob_rate = index; + } +} + + +/* + * Assign new rate set per sta and use CCK rates only if the fastest + * rate (max_tp_rate[0]) is from CCK group. This prohibits such sorted + * rate sets where MCS and CCK rates are mixed, because CCK rates can + * not use aggregation. + */ +static void +minstrel_ht_assign_best_tp_rates(struct minstrel_ht_sta *mi, + u8 tmp_mcs_tp_rate[MAX_THR_RATES], + u8 tmp_cck_tp_rate[MAX_THR_RATES]) +{ + unsigned int tmp_group, tmp_idx, tmp_cck_tp, tmp_mcs_tp; + int i; + + tmp_group = tmp_cck_tp_rate[0] / MCS_GROUP_RATES; + tmp_idx = tmp_cck_tp_rate[0] % MCS_GROUP_RATES; + tmp_cck_tp = mi->groups[tmp_group].rates[tmp_idx].cur_tp; + + tmp_group = tmp_mcs_tp_rate[0] / MCS_GROUP_RATES; + tmp_idx = tmp_mcs_tp_rate[0] % MCS_GROUP_RATES; + tmp_mcs_tp = mi->groups[tmp_group].rates[tmp_idx].cur_tp; + + if (tmp_cck_tp > tmp_mcs_tp) { + for(i = 0; i < MAX_THR_RATES; i++) { + minstrel_ht_sort_best_tp_rates(mi, tmp_cck_tp_rate[i], + tmp_mcs_tp_rate); + } + } + +} + +/* + * Try to increase robustness of max_prob rate by decrease number of + * streams if possible. + */ +static inline void +minstrel_ht_prob_rate_reduce_streams(struct minstrel_ht_sta *mi) +{ + struct minstrel_mcs_group_data *mg; + struct minstrel_rate_stats *mr; + int tmp_max_streams, group; + int tmp_tp = 0; + + tmp_max_streams = minstrel_mcs_groups[mi->max_tp_rate[0] / + MCS_GROUP_RATES].streams; + for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) { + mg = &mi->groups[group]; + if (!mg->supported || group == MINSTREL_CCK_GROUP) + continue; + mr = minstrel_get_ratestats(mi, mg->max_group_prob_rate); + if (tmp_tp < mr->cur_tp && + (minstrel_mcs_groups[group].streams < tmp_max_streams)) { + mi->max_prob_rate = mg->max_group_prob_rate; + tmp_tp = mr->cur_tp; + } + } +} + /* * Update rate statistics and select new primary rates * * Rules for rate selection: * - max_prob_rate must use only one stream, as a tradeoff between delivery * probability and throughput during strong fluctuations - * - as long as the max prob rate has a probability of more than 3/4, pick + * - as long as the max prob rate has a probability of more than 75%, pick * higher throughput rates, even if the probablity is a bit lower */ static void @@ -246,9 +385,9 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) { struct minstrel_mcs_group_data *mg; struct minstrel_rate_stats *mr; - int cur_prob, cur_prob_tp, cur_tp, cur_tp2; - int group, i, index; - bool mi_rates_valid = false; + int group, i, j; + u8 tmp_mcs_tp_rate[MAX_THR_RATES], tmp_group_tp_rate[MAX_THR_RATES]; + u8 tmp_cck_tp_rate[MAX_THR_RATES], index; if (mi->ampdu_packets > 0) { mi->avg_ampdu_len = minstrel_ewma(mi->avg_ampdu_len, @@ -260,13 +399,14 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) mi->sample_slow = 0; mi->sample_count = 0; - for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) { - bool mg_rates_valid = false; + /* Initialize global rate indexes */ + for(j = 0; j < MAX_THR_RATES; j++){ + tmp_mcs_tp_rate[j] = 0; + tmp_cck_tp_rate[j] = 0; + } - cur_prob = 0; - cur_prob_tp = 0; - cur_tp = 0; - cur_tp2 = 0; + /* Find best rate sets within all MCS groups*/ + for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) { mg = &mi->groups[group]; if (!mg->supported) @@ -274,24 +414,16 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) mi->sample_count++; + /* (re)Initialize group rate indexes */ + for(j = 0; j < MAX_THR_RATES; j++) + tmp_group_tp_rate[j] = group; + for (i = 0; i < MCS_GROUP_RATES; i++) { if (!(mg->supported & BIT(i))) continue; index = MCS_GROUP_RATES * group + i; - /* initialize rates selections starting indexes */ - if (!mg_rates_valid) { - mg->max_tp_rate = mg->max_tp_rate2 = - mg->max_prob_rate = i; - if (!mi_rates_valid) { - mi->max_tp_rate = mi->max_tp_rate2 = - mi->max_prob_rate = index; - mi_rates_valid = true; - } - mg_rates_valid = true; - } - mr = &mg->rates[i]; mr->retry_updated = false; minstrel_calc_rate_ewma(mr); @@ -300,82 +432,47 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) if (!mr->cur_tp) continue; - if ((mr->cur_tp > cur_prob_tp && mr->probability > - MINSTREL_FRAC(3, 4)) || mr->probability > cur_prob) { - mg->max_prob_rate = index; - cur_prob = mr->probability; - cur_prob_tp = mr->cur_tp; - } - - if (mr->cur_tp > cur_tp) { - swap(index, mg->max_tp_rate); - cur_tp = mr->cur_tp; - mr = minstrel_get_ratestats(mi, index); - } - - if (index >= mg->max_tp_rate) - continue; - - if (mr->cur_tp > cur_tp2) { - mg->max_tp_rate2 = index; - cur_tp2 = mr->cur_tp; + /* Find max throughput rate set */ + if (group != MINSTREL_CCK_GROUP) { + minstrel_ht_sort_best_tp_rates(mi, index, + tmp_mcs_tp_rate); + } else if (group == MINSTREL_CCK_GROUP) { + minstrel_ht_sort_best_tp_rates(mi, index, + tmp_cck_tp_rate); } - } - } - /* try to sample all available rates during each interval */ - mi->sample_count *= 8; + /* Find max throughput rate set within a group */ + minstrel_ht_sort_best_tp_rates(mi, index, + tmp_group_tp_rate); - cur_prob = 0; - cur_prob_tp = 0; - cur_tp = 0; - cur_tp2 = 0; - for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) { - mg = &mi->groups[group]; - if (!mg->supported) - continue; - - mr = minstrel_get_ratestats(mi, mg->max_tp_rate); - if (cur_tp < mr->cur_tp) { - mi->max_tp_rate2 = mi->max_tp_rate; - cur_tp2 = cur_tp; - mi->max_tp_rate = mg->max_tp_rate; - cur_tp = mr->cur_tp; - mi->max_prob_streams = minstrel_mcs_groups[group].streams - 1; + /* Find max probability rate per group and global */ + minstrel_ht_set_best_prob_rate(mi, index); } - mr = minstrel_get_ratestats(mi, mg->max_tp_rate2); - if (cur_tp2 < mr->cur_tp) { - mi->max_tp_rate2 = mg->max_tp_rate2; - cur_tp2 = mr->cur_tp; - } + memcpy(mg->max_group_tp_rate, tmp_group_tp_rate, + sizeof(mg->max_group_tp_rate)); } - if (mi->max_prob_streams < 1) - mi->max_prob_streams = 1; + /* Assign new rate set per sta */ + minstrel_ht_assign_best_tp_rates(mi, tmp_mcs_tp_rate, tmp_cck_tp_rate); + memcpy(mi->max_tp_rate, tmp_mcs_tp_rate, sizeof(mi->max_tp_rate)); - for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) { - mg = &mi->groups[group]; - if (!mg->supported) - continue; - mr = minstrel_get_ratestats(mi, mg->max_prob_rate); - if (cur_prob_tp < mr->cur_tp && - minstrel_mcs_groups[group].streams <= mi->max_prob_streams) { - mi->max_prob_rate = mg->max_prob_rate; - cur_prob = mr->cur_prob; - cur_prob_tp = mr->cur_tp; - } - } + /* Try to increase robustness of max_prob_rate*/ + minstrel_ht_prob_rate_reduce_streams(mi); + + /* try to sample all available rates during each interval */ + mi->sample_count *= 8; #ifdef CONFIG_MAC80211_DEBUGFS /* use fixed index if set */ if (mp->fixed_rate_idx != -1) { - mi->max_tp_rate = mp->fixed_rate_idx; - mi->max_tp_rate2 = mp->fixed_rate_idx; + for (i = 0; i < 4; i++) + mi->max_tp_rate[i] = mp->fixed_rate_idx; mi->max_prob_rate = mp->fixed_rate_idx; } #endif + /* Reset update timer */ mi->stats_update = jiffies; } @@ -420,8 +517,7 @@ minstrel_next_sample_idx(struct minstrel_ht_sta *mi) } static void -minstrel_downgrade_rate(struct minstrel_ht_sta *mi, unsigned int *idx, - bool primary) +minstrel_downgrade_rate(struct minstrel_ht_sta *mi, u8 *idx, bool primary) { int group, orig_group; @@ -437,9 +533,9 @@ minstrel_downgrade_rate(struct minstrel_ht_sta *mi, unsigned int *idx, continue; if (primary) - *idx = mi->groups[group].max_tp_rate; + *idx = mi->groups[group].max_group_tp_rate[0]; else - *idx = mi->groups[group].max_tp_rate2; + *idx = mi->groups[group].max_group_tp_rate[1]; break; } } @@ -524,19 +620,19 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband, * check for sudden death of spatial multiplexing, * downgrade to a lower number of streams if necessary. */ - rate = minstrel_get_ratestats(mi, mi->max_tp_rate); + rate = minstrel_get_ratestats(mi, mi->max_tp_rate[0]); if (rate->attempts > 30 && MINSTREL_FRAC(rate->success, rate->attempts) < MINSTREL_FRAC(20, 100)) { - minstrel_downgrade_rate(mi, &mi->max_tp_rate, true); + minstrel_downgrade_rate(mi, &mi->max_tp_rate[0], true); update = true; } - rate2 = minstrel_get_ratestats(mi, mi->max_tp_rate2); + rate2 = minstrel_get_ratestats(mi, mi->max_tp_rate[1]); if (rate2->attempts > 30 && MINSTREL_FRAC(rate2->success, rate2->attempts) < MINSTREL_FRAC(20, 100)) { - minstrel_downgrade_rate(mi, &mi->max_tp_rate2, false); + minstrel_downgrade_rate(mi, &mi->max_tp_rate[1], false); update = true; } @@ -661,12 +757,12 @@ minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) if (!rates) return; - /* Start with max_tp_rate */ - minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_tp_rate); + /* Start with max_tp_rate[0] */ + minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_tp_rate[0]); if (mp->hw->max_rates >= 3) { - /* At least 3 tx rates supported, use max_tp_rate2 next */ - minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_tp_rate2); + /* At least 3 tx rates supported, use max_tp_rate[1] next */ + minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_tp_rate[1]); } if (mp->hw->max_rates >= 2) { @@ -691,7 +787,7 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) { struct minstrel_rate_stats *mr; struct minstrel_mcs_group_data *mg; - unsigned int sample_dur, sample_group; + unsigned int sample_dur, sample_group, cur_max_tp_streams; int sample_idx = 0; if (mi->sample_wait > 0) { @@ -718,8 +814,8 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) * to the frame. Hence, don't use sampling for the currently * used rates. */ - if (sample_idx == mi->max_tp_rate || - sample_idx == mi->max_tp_rate2 || + if (sample_idx == mi->max_tp_rate[0] || + sample_idx == mi->max_tp_rate[1] || sample_idx == mi->max_prob_rate) return -1; @@ -734,9 +830,12 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) * Make sure that lower rates get sampled only occasionally, * if the link is working perfectly. */ + + cur_max_tp_streams = minstrel_mcs_groups[mi->max_tp_rate[0] / + MCS_GROUP_RATES].streams; sample_dur = minstrel_get_duration(sample_idx); - if (sample_dur >= minstrel_get_duration(mi->max_tp_rate2) && - (mi->max_prob_streams < + if (sample_dur >= minstrel_get_duration(mi->max_tp_rate[1]) && + (cur_max_tp_streams - 1 < minstrel_mcs_groups[sample_group].streams || sample_dur >= minstrel_get_duration(mi->max_prob_rate))) { if (mr->sample_skipped < 20) @@ -1041,8 +1140,8 @@ static u32 minstrel_ht_get_expected_throughput(void *priv_sta) if (!msp->is_ht) return mac80211_minstrel.get_expected_throughput(priv_sta); - i = mi->max_tp_rate / MCS_GROUP_RATES; - j = mi->max_tp_rate % MCS_GROUP_RATES; + i = mi->max_tp_rate[0] / MCS_GROUP_RATES; + j = mi->max_tp_rate[0] % MCS_GROUP_RATES; /* convert cur_tp from pkt per second in kbps */ return mi->groups[i].rates[j].cur_tp * AVG_PKT_SIZE * 8 / 1024; diff --git a/net/mac80211/rc80211_minstrel_ht.h b/net/mac80211/rc80211_minstrel_ht.h index d655586773ac2f435de881f84414c5e260424484..01570e0e014b0995beef18504825b848ec00cbad 100644 --- a/net/mac80211/rc80211_minstrel_ht.h +++ b/net/mac80211/rc80211_minstrel_ht.h @@ -26,28 +26,6 @@ struct mcs_group { extern const struct mcs_group minstrel_mcs_groups[]; -struct minstrel_rate_stats { - /* current / last sampling period attempts/success counters */ - unsigned int attempts, last_attempts; - unsigned int success, last_success; - - /* total attempts/success counters */ - u64 att_hist, succ_hist; - - /* current throughput */ - unsigned int cur_tp; - - /* packet delivery probabilities */ - unsigned int cur_prob, probability; - - /* maximum retry counts */ - unsigned int retry_count; - unsigned int retry_count_rtscts; - - bool retry_updated; - u8 sample_skipped; -}; - struct minstrel_mcs_group_data { u8 index; u8 column; @@ -55,10 +33,9 @@ struct minstrel_mcs_group_data { /* bitfield of supported MCS rates of this group */ u8 supported; - /* selected primary rates */ - unsigned int max_tp_rate; - unsigned int max_tp_rate2; - unsigned int max_prob_rate; + /* sorted rate set within a MCS group*/ + u8 max_group_tp_rate[MAX_THR_RATES]; + u8 max_group_prob_rate; /* MCS rate statistics */ struct minstrel_rate_stats rates[MCS_GROUP_RATES]; @@ -74,15 +51,9 @@ struct minstrel_ht_sta { /* ampdu length (EWMA) */ unsigned int avg_ampdu_len; - /* best throughput rate */ - unsigned int max_tp_rate; - - /* second best throughput rate */ - unsigned int max_tp_rate2; - - /* best probability rate */ - unsigned int max_prob_rate; - unsigned int max_prob_streams; + /* overall sorted rate set */ + u8 max_tp_rate[MAX_THR_RATES]; + u8 max_prob_rate; /* time of last status update */ unsigned long stats_update; diff --git a/net/mac80211/rc80211_minstrel_ht_debugfs.c b/net/mac80211/rc80211_minstrel_ht_debugfs.c index 3e7d793de0c3407180965bb785485fd361700560..a72ad46f2a04b6e557044d52da1f033c7be2c531 100644 --- a/net/mac80211/rc80211_minstrel_ht_debugfs.c +++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c @@ -46,8 +46,10 @@ minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p) else p += sprintf(p, "HT%c0/%cGI ", htmode, gimode); - *(p++) = (idx == mi->max_tp_rate) ? 'T' : ' '; - *(p++) = (idx == mi->max_tp_rate2) ? 't' : ' '; + *(p++) = (idx == mi->max_tp_rate[0]) ? 'A' : ' '; + *(p++) = (idx == mi->max_tp_rate[1]) ? 'B' : ' '; + *(p++) = (idx == mi->max_tp_rate[2]) ? 'C' : ' '; + *(p++) = (idx == mi->max_tp_rate[3]) ? 'D' : ' '; *(p++) = (idx == mi->max_prob_rate) ? 'P' : ' '; if (i == max_mcs) { @@ -100,8 +102,8 @@ minstrel_ht_stats_open(struct inode *inode, struct file *file) file->private_data = ms; p = ms->buf; - p += sprintf(p, "type rate throughput ewma prob this prob " - "retry this succ/attempt success attempts\n"); + p += sprintf(p, "type rate throughput ewma prob " + "this prob retry this succ/attempt success attempts\n"); p = minstrel_ht_stats_dump(mi, max_mcs, p); for (i = 0; i < max_mcs; i++) diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index bd2c9b22c945669f8ec1d48e3959101fd9ce6754..b04ca4049c95f276aa4627501d5ced01e8a3b878 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -3,6 +3,7 @@ * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc * Copyright 2007-2010 Johannes Berg + * Copyright 2013-2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -835,6 +836,16 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata spin_lock(&tid_agg_rx->reorder_lock); + /* + * Offloaded BA sessions have no known starting sequence number so pick + * one from first Rxed frame for this tid after BA was started. + */ + if (unlikely(tid_agg_rx->auto_seq)) { + tid_agg_rx->auto_seq = false; + tid_agg_rx->ssn = mpdu_seq_num; + tid_agg_rx->head_seq_num = mpdu_seq_num; + } + buf_size = tid_agg_rx->buf_size; head_seq_num = tid_agg_rx->head_seq_num; @@ -2725,7 +2736,7 @@ ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx) sig = status->signal; if (cfg80211_rx_mgmt(&rx->sdata->wdev, status->freq, sig, - rx->skb->data, rx->skb->len, 0, GFP_ATOMIC)) { + rx->skb->data, rx->skb->len, 0)) { if (rx->sta) rx->sta->rx_packets++; dev_kfree_skb(rx->skb); diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index a0a938145dcc87397cb4ecfad5f59b1c5fb28dbc..af0d094b2f2f0ecc6939edd15b2c2d253763e10b 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -6,6 +6,7 @@ * Copyright 2005, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc * Copyright 2007, Michael Wu + * Copyright 2013-2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -1094,7 +1095,7 @@ int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata) if (rcu_access_pointer(local->sched_scan_sdata)) { ret = drv_sched_scan_stop(local, sdata); if (!ret) - rcu_assign_pointer(local->sched_scan_sdata, NULL); + RCU_INIT_POINTER(local->sched_scan_sdata, NULL); } out: mutex_unlock(&local->mtx); diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index a1e433b88c66ab7413eb8813b7010b56b0a4c450..de494df3bab82ce56efabde30790d8a660f6a983 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -1,6 +1,7 @@ /* * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2006-2007 Jiri Benc + * Copyright 2013-2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -1182,7 +1183,7 @@ static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb; int size = sizeof(*nullfunc); __le16 fc; - bool qos = test_sta_flag(sta, WLAN_STA_WME); + bool qos = sta->sta.wme; struct ieee80211_tx_info *info; struct ieee80211_chanctx_conf *chanctx_conf; @@ -1837,7 +1838,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHORIZED); if (test_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE)) sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_SHORT_PREAMBLE); - if (test_sta_flag(sta, WLAN_STA_WME)) + if (sta->sta.wme) sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_WME); if (test_sta_flag(sta, WLAN_STA_MFP)) sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_MFP); diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index d411bcc8ef085215b8369fc0f066a49fd65fc6eb..42f68cb8957e5a29d1a4e7965b65aff086bfe55e 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -1,5 +1,6 @@ /* * Copyright 2002-2005, Devicescape Software, Inc. + * Copyright 2013-2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -31,7 +32,6 @@ * when virtual port control is not in use. * @WLAN_STA_SHORT_PREAMBLE: Station is capable of receiving short-preamble * frames. - * @WLAN_STA_WME: Station is a QoS-STA. * @WLAN_STA_WDS: Station is one of our WDS peers. * @WLAN_STA_CLEAR_PS_FILT: Clear PS filter in hardware (using the * IEEE80211_TX_CTL_CLEAR_PS_FILT control flag) when the next @@ -69,7 +69,6 @@ enum ieee80211_sta_info_flags { WLAN_STA_PS_STA, WLAN_STA_AUTHORIZED, WLAN_STA_SHORT_PREAMBLE, - WLAN_STA_WME, WLAN_STA_WDS, WLAN_STA_CLEAR_PS_FILT, WLAN_STA_MFP, @@ -169,6 +168,8 @@ struct tid_ampdu_tx { * @dialog_token: dialog token for aggregation session * @rcu_head: RCU head used for freeing this struct * @reorder_lock: serializes access to reorder buffer, see below. + * @auto_seq: used for offloaded BA sessions to automatically pick head_seq_and + * and ssn. * * This structure's lifetime is managed by RCU, assignments to * the array holding it must hold the aggregation mutex. @@ -192,6 +193,7 @@ struct tid_ampdu_rx { u16 buf_size; u16 timeout; u8 dialog_token; + bool auto_seq; }; /** @@ -448,6 +450,9 @@ struct sta_info { enum ieee80211_smps_mode known_smps_mode; const struct ieee80211_cipher_scheme *cipher_scheme; + /* TDLS timeout data */ + unsigned long last_tdls_pkt_time; + /* keep last! */ struct ieee80211_sta sta; }; diff --git a/net/mac80211/status.c b/net/mac80211/status.c index aa06dcad336e6d9bcdfff2009d7775b26bee8a6e..89290e33dafe9f72335c907f075b1f784a91e509 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -3,6 +3,7 @@ * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc * Copyright 2008-2010 Johannes Berg + * Copyright 2013-2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -537,6 +538,8 @@ static void ieee80211_tx_latency_end_msrmnt(struct ieee80211_local *local, * - current throughput (higher value for higher tpt)? */ #define STA_LOST_PKT_THRESHOLD 50 +#define STA_LOST_TDLS_PKT_THRESHOLD 10 +#define STA_LOST_TDLS_PKT_TIME (10*HZ) /* 10secs since last ACK */ static void ieee80211_lost_packet(struct sta_info *sta, struct sk_buff *skb) { @@ -547,7 +550,20 @@ static void ieee80211_lost_packet(struct sta_info *sta, struct sk_buff *skb) !(info->flags & IEEE80211_TX_STAT_AMPDU)) return; - if (++sta->lost_packets < STA_LOST_PKT_THRESHOLD) + sta->lost_packets++; + if (!sta->sta.tdls && sta->lost_packets < STA_LOST_PKT_THRESHOLD) + return; + + /* + * If we're in TDLS mode, make sure that all STA_LOST_TDLS_PKT_THRESHOLD + * of the last packets were lost, and that no ACK was received in the + * last STA_LOST_TDLS_PKT_TIME ms, before triggering the CQM packet-loss + * mechanism. + */ + if (sta->sta.tdls && + (sta->lost_packets < STA_LOST_TDLS_PKT_THRESHOLD || + time_before(jiffies, + sta->last_tdls_pkt_time + STA_LOST_TDLS_PKT_TIME))) return; cfg80211_cqm_pktloss_notify(sta->sdata->dev, sta->sta.addr, @@ -694,6 +710,10 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) if (info->flags & IEEE80211_TX_STAT_ACK) { if (sta->lost_packets) sta->lost_packets = 0; + + /* Track when last TDLS packet was ACKed */ + if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) + sta->last_tdls_pkt_time = jiffies; } else { ieee80211_lost_packet(sta, skb); } diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c index 1b21050be174b29539a954991d0376dab76fc78f..4ea25dec06984792234f4a77edc1e26c157b0662 100644 --- a/net/mac80211/tdls.c +++ b/net/mac80211/tdls.c @@ -3,6 +3,7 @@ * * Copyright 2006-2010 Johannes Berg * Copyright 2014, Intel Corporation + * Copyright 2014 Intel Mobile Communications GmbH * * This file is GPLv2 as found in COPYING. */ @@ -316,8 +317,7 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata, } /* add the QoS param IE if both the peer and we support it */ - if (local->hw.queues >= IEEE80211_NUM_ACS && - test_sta_flag(sta, WLAN_STA_WME)) + if (local->hw.queues >= IEEE80211_NUM_ACS && sta->sta.wme) ieee80211_tdls_add_wmm_param_ie(sdata, skb); /* add any custom IEs that go before HT operation */ @@ -412,6 +412,9 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev, tf->ether_type = cpu_to_be16(ETH_P_TDLS); tf->payload_type = WLAN_TDLS_SNAP_RFTYPE; + /* network header is after the ethernet header */ + skb_set_network_header(skb, ETH_HLEN); + switch (action_code) { case WLAN_TDLS_SETUP_REQUEST: tf->category = WLAN_CATEGORY_TDLS; diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h index 02ac535d1274217653e2f9a48c6f6e60ed6d0927..38fae7ebe984a621540328b6bdad28dcd632fccb 100644 --- a/net/mac80211/trace.h +++ b/net/mac80211/trace.h @@ -672,13 +672,13 @@ DEFINE_EVENT(local_u32_evt, drv_set_rts_threshold, ); TRACE_EVENT(drv_set_coverage_class, - TP_PROTO(struct ieee80211_local *local, u8 value), + TP_PROTO(struct ieee80211_local *local, s16 value), TP_ARGS(local, value), TP_STRUCT__entry( LOCAL_ENTRY - __field(u8, value) + __field(s16, value) ), TP_fast_assign( diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 464106c023d8c7d9cb1bd65b39ad642bd1090bc1..900632a250ecc7b0f4c57190d68bcdd45169e60e 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -3,6 +3,7 @@ * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc * Copyright 2007 Johannes Berg + * Copyright 2013-2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -1478,7 +1479,10 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata, tail_need = max_t(int, tail_need, 0); } - if (skb_cloned(skb)) + if (skb_cloned(skb) && + (!(local->hw.flags & IEEE80211_HW_SUPPORTS_CLONED_SKBS) || + !skb_clone_writable(skb, ETH_HLEN) || + sdata->crypto_tx_tailroom_needed_cnt)) I802_DEBUG_INC(local->tx_expand_skb_head_cloned); else if (head_need || tail_need) I802_DEBUG_INC(local->tx_expand_skb_head); @@ -1785,9 +1789,8 @@ static void ieee80211_tx_latency_start_msrmnt(struct ieee80211_local *local, * @skb: packet to be sent * @dev: incoming interface * - * Returns: 0 on success (and frees skb in this case) or 1 on failure (skb will - * not be freed, and caller is responsible for either retrying later or freeing - * skb). + * Returns: NETDEV_TX_OK both on success and on failure. On failure skb will + * be freed. * * This function takes in an Ethernet header and encapsulates it with suitable * IEEE 802.11 header based on which interface the packet is coming in. The @@ -1844,7 +1847,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); hdrlen = 30; authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED); - wme_sta = test_sta_flag(sta, WLAN_STA_WME); + wme_sta = sta->sta.wme; } ap_sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap); @@ -1957,7 +1960,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, if (sta) { authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED); - wme_sta = test_sta_flag(sta, WLAN_STA_WME); + wme_sta = sta->sta.wme; tdls_peer = test_sta_flag(sta, WLAN_STA_TDLS_PEER); tdls_auth = test_sta_flag(sta, @@ -2035,7 +2038,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, sta = sta_info_get(sdata, hdr.addr1); if (sta) { authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED); - wme_sta = test_sta_flag(sta, WLAN_STA_WME); + wme_sta = sta->sta.wme; } } @@ -2069,30 +2072,23 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, if (unlikely(!multicast && skb->sk && skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS)) { - struct sk_buff *orig_skb = skb; + struct sk_buff *ack_skb = skb_clone_sk(skb); - skb = skb_clone(skb, GFP_ATOMIC); - if (skb) { + if (ack_skb) { unsigned long flags; int id; spin_lock_irqsave(&local->ack_status_lock, flags); - id = idr_alloc(&local->ack_status_frames, orig_skb, + id = idr_alloc(&local->ack_status_frames, ack_skb, 1, 0x10000, GFP_ATOMIC); spin_unlock_irqrestore(&local->ack_status_lock, flags); if (id >= 0) { info_id = id; info_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; - } else if (skb_shared(skb)) { - kfree_skb(orig_skb); } else { - kfree_skb(skb); - skb = orig_skb; + kfree_skb(ack_skb); } - } else { - /* couldn't clone -- lose tx status ... */ - skb = orig_skb; } } diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 725af7a468d233b502a6de8a07e30d7a25d60e66..3c61060a4d2b75bd89d4a792dde8b4721eb435dc 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -3,6 +3,7 @@ * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc * Copyright 2007 Johannes Berg + * Copyright 2013-2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -1014,6 +1015,31 @@ u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action, } elems->pwr_constr_elem = pos; break; + case WLAN_EID_CISCO_VENDOR_SPECIFIC: + /* Lots of different options exist, but we only care + * about the Dynamic Transmit Power Control element. + * First check for the Cisco OUI, then for the DTPC + * tag (0x00). + */ + if (elen < 4) { + elem_parse_failed = true; + break; + } + + if (pos[0] != 0x00 || pos[1] != 0x40 || + pos[2] != 0x96 || pos[3] != 0x00) + break; + + if (elen != 6) { + elem_parse_failed = true; + break; + } + + if (calc_crc) + crc = crc32_be(crc, pos - 2, elen + 2); + + elems->cisco_dtpc_elem = pos; + break; case WLAN_EID_TIMEOUT_INTERVAL: if (elen >= sizeof(struct ieee80211_timeout_interval_ie)) elems->timeout_int = (void *)pos; diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c index d51422c778dee359bec450c75b77bfb24ca712f4..3b873989992cbe0d0ae9b09d76d799cb3585add4 100644 --- a/net/mac80211/wme.c +++ b/net/mac80211/wme.c @@ -1,5 +1,6 @@ /* * Copyright 2004, Instant802 Networks, Inc. + * Copyright 2013-2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -118,7 +119,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, case NL80211_IFTYPE_AP_VLAN: sta = rcu_dereference(sdata->u.vlan.sta); if (sta) { - qos = test_sta_flag(sta, WLAN_STA_WME); + qos = sta->sta.wme; break; } case NL80211_IFTYPE_AP: @@ -145,7 +146,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, if (!sta && ra && !is_multicast_ether_addr(ra)) { sta = sta_info_get(sdata, ra); if (sta) - qos = test_sta_flag(sta, WLAN_STA_WME); + qos = sta->sta.wme; } rcu_read_unlock(); diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index f7d4ca4c46e0bf68c6232d339d4b54f7934798ba..983527a4c1aba22c4ae6dc43c113ed1df9f1eb9c 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c @@ -64,8 +64,11 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx) if (!info->control.hw_key) tail += IEEE80211_TKIP_ICV_LEN; - if (WARN_ON(skb_tailroom(skb) < tail || - skb_headroom(skb) < IEEE80211_TKIP_IV_LEN)) + if (WARN(skb_tailroom(skb) < tail || + skb_headroom(skb) < IEEE80211_TKIP_IV_LEN, + "mmic: not enough head/tail (%d/%d,%d/%d)\n", + skb_headroom(skb), IEEE80211_TKIP_IV_LEN, + skb_tailroom(skb), tail)) return TX_DROP; key = &tx->key->conf.key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY]; diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c index 7f820a108a9cf3461a46a57522251ea4edc51003..a14cf9ede171e7bcc2c1373931e5e7754b36bab4 100644 --- a/net/mac802154/rx.c +++ b/net/mac802154/rx.c @@ -86,9 +86,8 @@ mac802154_subif_rx(struct ieee802154_dev *hw, struct sk_buff *skb, u8 lqi) static void mac802154_rx_worker(struct work_struct *work) { struct rx_work *rw = container_of(work, struct rx_work, work); - struct sk_buff *skb = rw->skb; - mac802154_subif_rx(rw->dev, skb, rw->lqi); + mac802154_subif_rx(rw->dev, rw->skb, rw->lqi); kfree(rw); } @@ -101,7 +100,7 @@ ieee802154_rx_irqsafe(struct ieee802154_dev *dev, struct sk_buff *skb, u8 lqi) if (!skb) return; - work = kzalloc(sizeof(struct rx_work), GFP_ATOMIC); + work = kzalloc(sizeof(*work), GFP_ATOMIC); if (!work) return; diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c index 8124353646ae64239556a0ed915f046f7e689864..fdf4c0e67259ecceacc0b06b9060405ded6f63cc 100644 --- a/net/mac802154/tx.c +++ b/net/mac802154/tx.c @@ -89,8 +89,7 @@ netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb, if (!(priv->phy->channels_supported[page] & (1 << chan))) { WARN_ON(1); - kfree_skb(skb); - return NETDEV_TX_OK; + goto err_tx; } mac802154_monitors_rx(mac802154_to_priv(&priv->hw), skb); @@ -103,12 +102,10 @@ netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb, data[1] = crc >> 8; } - if (skb_cow_head(skb, priv->hw.extra_tx_headroom)) { - kfree_skb(skb); - return NETDEV_TX_OK; - } + if (skb_cow_head(skb, priv->hw.extra_tx_headroom)) + goto err_tx; - work = kzalloc(sizeof(struct xmit_work), GFP_ATOMIC); + work = kzalloc(sizeof(*work), GFP_ATOMIC); if (!work) { kfree_skb(skb); return NETDEV_TX_BUSY; @@ -129,4 +126,8 @@ netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb, queue_work(priv->dev_workqueue, &work->work); return NETDEV_TX_OK; + +err_tx: + kfree_skb(skb); + return NETDEV_TX_OK; } diff --git a/net/mac802154/wpan.c b/net/mac802154/wpan.c index d593500ceb3c63af67d2679ee182c791a1e6bdad..4ab86a57dca55212bddaac61f7c3965a255934ba 100644 --- a/net/mac802154/wpan.c +++ b/net/mac802154/wpan.c @@ -475,8 +475,7 @@ mac802154_subif_frame(struct mac802154_sub_if_data *sdata, struct sk_buff *skb, rc = mac802154_llsec_decrypt(&sdata->sec, skb); if (rc) { pr_debug("decryption failed: %i\n", rc); - kfree_skb(skb); - return NET_RX_DROP; + goto fail; } sdata->dev->stats.rx_packets++; @@ -488,9 +487,12 @@ mac802154_subif_frame(struct mac802154_sub_if_data *sdata, struct sk_buff *skb, default: pr_warn("ieee802154: bad frame received (type = %d)\n", mac_cb(skb)->type); - kfree_skb(skb); - return NET_RX_DROP; + goto fail; } + +fail: + kfree_skb(skb); + return NET_RX_DROP; } static void mac802154_print_addr(const char *name, diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c index 6b38d083e1c973c55e18914748d49fe4968a7188..e28ed2ef5b0650faf5f3f0db950c319f4caa7c31 100644 --- a/net/mpls/mpls_gso.c +++ b/net/mpls/mpls_gso.c @@ -65,15 +65,9 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb, return segs; } -static int mpls_gso_send_check(struct sk_buff *skb) -{ - return 0; -} - static struct packet_offload mpls_mc_offload = { .type = cpu_to_be16(ETH_P_MPLS_MC), .callbacks = { - .gso_send_check = mpls_gso_send_check, .gso_segment = mpls_gso_segment, }, }; @@ -81,7 +75,6 @@ static struct packet_offload mpls_mc_offload = { static struct packet_offload mpls_uc_offload = { .type = cpu_to_be16(ETH_P_MPLS_UC), .callbacks = { - .gso_send_check = mpls_gso_send_check, .gso_segment = mpls_gso_segment, }, }; diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 6d77cce481d59b798babc9deba9234dff31c7dce..ae5096ab65eb54cff350f8a19e981b6d8906b16e 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -496,6 +496,15 @@ config NFT_LIMIT This option adds the "limit" expression that you can use to ratelimit rule matchings. +config NFT_MASQ + depends on NF_TABLES + depends on NF_CONNTRACK + depends on NF_NAT + tristate "Netfilter nf_tables masquerade support" + help + This option adds the "masquerade" expression that you can use + to perform NAT in the masquerade flavour. + config NFT_NAT depends on NF_TABLES depends on NF_CONNTRACK diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile index fad5fdba34e5873c0535037bf709c343ebfd55e3..a9571be3f7914cff0619cb5c7d5829f56feab35f 100644 --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile @@ -87,6 +87,7 @@ obj-$(CONFIG_NFT_RBTREE) += nft_rbtree.o obj-$(CONFIG_NFT_HASH) += nft_hash.o obj-$(CONFIG_NFT_COUNTER) += nft_counter.o obj-$(CONFIG_NFT_LOG) += nft_log.o +obj-$(CONFIG_NFT_MASQ) += nft_masq.o # generic X tables obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o diff --git a/net/netfilter/ipset/Kconfig b/net/netfilter/ipset/Kconfig index 2f7f5c32c6f90a0eb376d7921aecf167564329ce..234a8ec82076803a1c2ca75de9c7a2406363954b 100644 --- a/net/netfilter/ipset/Kconfig +++ b/net/netfilter/ipset/Kconfig @@ -99,6 +99,15 @@ config IP_SET_HASH_IPPORTNET To compile it as a module, choose M here. If unsure, say N. +config IP_SET_HASH_MAC + tristate "hash:mac set support" + depends on IP_SET + help + This option adds the hash:mac set type support, by which + one can store MAC (ethernet address) elements in a set. + + To compile it as a module, choose M here. If unsure, say N. + config IP_SET_HASH_NETPORTNET tristate "hash:net,port,net set support" depends on IP_SET diff --git a/net/netfilter/ipset/Makefile b/net/netfilter/ipset/Makefile index 231f10196cb906fd4cbbe5f58263c5a3ba97886a..3dbd5e95848947230f80692cb26acb9437b3106c 100644 --- a/net/netfilter/ipset/Makefile +++ b/net/netfilter/ipset/Makefile @@ -18,6 +18,7 @@ obj-$(CONFIG_IP_SET_HASH_IPMARK) += ip_set_hash_ipmark.o obj-$(CONFIG_IP_SET_HASH_IPPORT) += ip_set_hash_ipport.o obj-$(CONFIG_IP_SET_HASH_IPPORTIP) += ip_set_hash_ipportip.o obj-$(CONFIG_IP_SET_HASH_IPPORTNET) += ip_set_hash_ipportnet.o +obj-$(CONFIG_IP_SET_HASH_MAC) += ip_set_hash_mac.o obj-$(CONFIG_IP_SET_HASH_NET) += ip_set_hash_net.o obj-$(CONFIG_IP_SET_HASH_NETPORT) += ip_set_hash_netport.o obj-$(CONFIG_IP_SET_HASH_NETIFACE) += ip_set_hash_netiface.o diff --git a/net/netfilter/ipset/ip_set_bitmap_gen.h b/net/netfilter/ipset/ip_set_bitmap_gen.h index f2c7d83dc23f46b83e4d24e665f2abc31317783b..6f024a8a1534a7552168c49a8007444c61af0bc0 100644 --- a/net/netfilter/ipset/ip_set_bitmap_gen.h +++ b/net/netfilter/ipset/ip_set_bitmap_gen.h @@ -128,6 +128,8 @@ mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext, return 0; if (SET_WITH_COUNTER(set)) ip_set_update_counter(ext_counter(x, set), ext, mext, flags); + if (SET_WITH_SKBINFO(set)) + ip_set_get_skbinfo(ext_skbinfo(x, set), ext, mext, flags); return 1; } @@ -161,6 +163,8 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext, ip_set_init_counter(ext_counter(x, set), ext); if (SET_WITH_COMMENT(set)) ip_set_init_comment(ext_comment(x, set), ext); + if (SET_WITH_SKBINFO(set)) + ip_set_init_skbinfo(ext_skbinfo(x, set), ext); return 0; } diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c index 6f1f9f4948084e40375ab722565e7bf20b2d972d..55b083ec587a617109bc2b1d4b299f0f6fbe15e3 100644 --- a/net/netfilter/ipset/ip_set_bitmap_ip.c +++ b/net/netfilter/ipset/ip_set_bitmap_ip.c @@ -27,7 +27,8 @@ #define IPSET_TYPE_REV_MIN 0 /* 1 Counter support added */ -#define IPSET_TYPE_REV_MAX 2 /* Comment support added */ +/* 2 Comment support added */ +#define IPSET_TYPE_REV_MAX 3 /* skbinfo support added */ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jozsef Kadlecsik "); @@ -112,7 +113,7 @@ bitmap_ip_kadt(struct ip_set *set, const struct sk_buff *skb, { struct bitmap_ip *map = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; - struct bitmap_ip_adt_elem e = { }; + struct bitmap_ip_adt_elem e = { .id = 0 }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); u32 ip; @@ -132,14 +133,17 @@ bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[], struct bitmap_ip *map = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; u32 ip = 0, ip_to = 0; - struct bitmap_ip_adt_elem e = { }; + struct bitmap_ip_adt_elem e = { .id = 0 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); int ret = 0; if (unlikely(!tb[IPSET_ATTR_IP] || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || - !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES))) + !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_LINENO]) @@ -357,6 +361,9 @@ static struct ip_set_type bitmap_ip_type __read_mostly = { [IPSET_ATTR_BYTES] = { .type = NLA_U64 }, [IPSET_ATTR_PACKETS] = { .type = NLA_U64 }, [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING }, + [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 }, + [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 }, + [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 }, }, .me = THIS_MODULE, }; diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c index 740eabededd9754b7db95a9d46d48f8a1f283bf7..86104744b00ff67339f78db238f093874b70845a 100644 --- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c +++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c @@ -27,7 +27,8 @@ #define IPSET_TYPE_REV_MIN 0 /* 1 Counter support added */ -#define IPSET_TYPE_REV_MAX 2 /* Comment support added */ +/* 2 Comment support added */ +#define IPSET_TYPE_REV_MAX 3 /* skbinfo support added */ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jozsef Kadlecsik "); @@ -203,7 +204,7 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb, { struct bitmap_ipmac *map = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; - struct bitmap_ipmac_adt_elem e = {}; + struct bitmap_ipmac_adt_elem e = { .id = 0 }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); u32 ip; @@ -232,7 +233,7 @@ bitmap_ipmac_uadt(struct ip_set *set, struct nlattr *tb[], { const struct bitmap_ipmac *map = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; - struct bitmap_ipmac_adt_elem e = {}; + struct bitmap_ipmac_adt_elem e = { .id = 0 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); u32 ip = 0; int ret = 0; @@ -240,7 +241,10 @@ bitmap_ipmac_uadt(struct ip_set *set, struct nlattr *tb[], if (unlikely(!tb[IPSET_ATTR_IP] || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || - !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES))) + !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_LINENO]) @@ -394,6 +398,9 @@ static struct ip_set_type bitmap_ipmac_type = { [IPSET_ATTR_BYTES] = { .type = NLA_U64 }, [IPSET_ATTR_PACKETS] = { .type = NLA_U64 }, [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING }, + [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 }, + [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 }, + [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 }, }, .me = THIS_MODULE, }; diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c index cf99676e69f81bdc326b2a187dc8de1450b23e87..005dd36444c3472b2257be6e29ada9882649c1c8 100644 --- a/net/netfilter/ipset/ip_set_bitmap_port.c +++ b/net/netfilter/ipset/ip_set_bitmap_port.c @@ -22,7 +22,8 @@ #define IPSET_TYPE_REV_MIN 0 /* 1 Counter support added */ -#define IPSET_TYPE_REV_MAX 2 /* Comment support added */ +/* 2 Comment support added */ +#define IPSET_TYPE_REV_MAX 3 /* skbinfo support added */ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jozsef Kadlecsik "); @@ -104,7 +105,7 @@ bitmap_port_kadt(struct ip_set *set, const struct sk_buff *skb, { struct bitmap_port *map = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; - struct bitmap_port_adt_elem e = {}; + struct bitmap_port_adt_elem e = { .id = 0 }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); __be16 __port; u16 port = 0; @@ -129,7 +130,7 @@ bitmap_port_uadt(struct ip_set *set, struct nlattr *tb[], { struct bitmap_port *map = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; - struct bitmap_port_adt_elem e = {}; + struct bitmap_port_adt_elem e = { .id = 0 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); u32 port; /* wraparound */ u16 port_to; @@ -139,7 +140,10 @@ bitmap_port_uadt(struct ip_set *set, struct nlattr *tb[], !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || - !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES))) + !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_LINENO]) @@ -291,6 +295,9 @@ static struct ip_set_type bitmap_port_type = { [IPSET_ATTR_BYTES] = { .type = NLA_U64 }, [IPSET_ATTR_PACKETS] = { .type = NLA_U64 }, [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING }, + [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 }, + [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 }, + [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 }, }, .me = THIS_MODULE, }; diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index 5edbbe829495f771087946dab1893e112c213e2f..912e5a05b79dbdc2de8aaa3729e08af01888dc4e 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c @@ -101,7 +101,7 @@ load_settype(const char *name) nfnl_unlock(NFNL_SUBSYS_IPSET); pr_debug("try to load ip_set_%s\n", name); if (request_module("ip_set_%s", name) < 0) { - pr_warning("Can't find ip_set type %s\n", name); + pr_warn("Can't find ip_set type %s\n", name); nfnl_lock(NFNL_SUBSYS_IPSET); return false; } @@ -195,20 +195,19 @@ ip_set_type_register(struct ip_set_type *type) int ret = 0; if (type->protocol != IPSET_PROTOCOL) { - pr_warning("ip_set type %s, family %s, revision %u:%u uses " - "wrong protocol version %u (want %u)\n", - type->name, family_name(type->family), - type->revision_min, type->revision_max, - type->protocol, IPSET_PROTOCOL); + pr_warn("ip_set type %s, family %s, revision %u:%u uses wrong protocol version %u (want %u)\n", + type->name, family_name(type->family), + type->revision_min, type->revision_max, + type->protocol, IPSET_PROTOCOL); return -EINVAL; } ip_set_type_lock(); if (find_set_type(type->name, type->family, type->revision_min)) { /* Duplicate! */ - pr_warning("ip_set type %s, family %s with revision min %u " - "already registered!\n", type->name, - family_name(type->family), type->revision_min); + pr_warn("ip_set type %s, family %s with revision min %u already registered!\n", + type->name, family_name(type->family), + type->revision_min); ret = -EINVAL; goto unlock; } @@ -228,9 +227,9 @@ ip_set_type_unregister(struct ip_set_type *type) { ip_set_type_lock(); if (!find_set_type(type->name, type->family, type->revision_min)) { - pr_warning("ip_set type %s, family %s with revision min %u " - "not registered\n", type->name, - family_name(type->family), type->revision_min); + pr_warn("ip_set type %s, family %s with revision min %u not registered\n", + type->name, family_name(type->family), + type->revision_min); goto unlock; } list_del_rcu(&type->list); @@ -338,6 +337,12 @@ const struct ip_set_ext_type ip_set_extensions[] = { .len = sizeof(unsigned long), .align = __alignof__(unsigned long), }, + [IPSET_EXT_ID_SKBINFO] = { + .type = IPSET_EXT_SKBINFO, + .flag = IPSET_FLAG_WITH_SKBINFO, + .len = sizeof(struct ip_set_skbinfo), + .align = __alignof__(struct ip_set_skbinfo), + }, [IPSET_EXT_ID_COMMENT] = { .type = IPSET_EXT_COMMENT | IPSET_EXT_DESTROY, .flag = IPSET_FLAG_WITH_COMMENT, @@ -383,6 +388,7 @@ int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[], struct ip_set_ext *ext) { + u64 fullmark; if (tb[IPSET_ATTR_TIMEOUT]) { if (!(set->extensions & IPSET_EXT_TIMEOUT)) return -IPSET_ERR_TIMEOUT; @@ -403,7 +409,25 @@ ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[], return -IPSET_ERR_COMMENT; ext->comment = ip_set_comment_uget(tb[IPSET_ATTR_COMMENT]); } - + if (tb[IPSET_ATTR_SKBMARK]) { + if (!(set->extensions & IPSET_EXT_SKBINFO)) + return -IPSET_ERR_SKBINFO; + fullmark = be64_to_cpu(nla_get_be64(tb[IPSET_ATTR_SKBMARK])); + ext->skbmark = fullmark >> 32; + ext->skbmarkmask = fullmark & 0xffffffff; + } + if (tb[IPSET_ATTR_SKBPRIO]) { + if (!(set->extensions & IPSET_EXT_SKBINFO)) + return -IPSET_ERR_SKBINFO; + ext->skbprio = be32_to_cpu(nla_get_be32( + tb[IPSET_ATTR_SKBPRIO])); + } + if (tb[IPSET_ATTR_SKBQUEUE]) { + if (!(set->extensions & IPSET_EXT_SKBINFO)) + return -IPSET_ERR_SKBINFO; + ext->skbqueue = be16_to_cpu(nla_get_be16( + tb[IPSET_ATTR_SKBQUEUE])); + } return 0; } EXPORT_SYMBOL_GPL(ip_set_get_extensions); @@ -1398,7 +1422,8 @@ call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set, struct nlmsghdr *rep, *nlh = nlmsg_hdr(skb); struct sk_buff *skb2; struct nlmsgerr *errmsg; - size_t payload = sizeof(*errmsg) + nlmsg_len(nlh); + size_t payload = min(SIZE_MAX, + sizeof(*errmsg) + nlmsg_len(nlh)); int min_len = nlmsg_total_size(sizeof(struct nfgenmsg)); struct nlattr *cda[IPSET_ATTR_CMD_MAX+1]; struct nlattr *cmdattr; diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h index 61c7fb052802e7c0cb291289ee29c484b4f84179..fee7c64e4dd183e5e2fa9d312bbd5cf3134f59f8 100644 --- a/net/netfilter/ipset/ip_set_hash_gen.h +++ b/net/netfilter/ipset/ip_set_hash_gen.h @@ -565,8 +565,8 @@ mtype_resize(struct ip_set *set, bool retried) set->name, orig->htable_bits, htable_bits, orig); if (!htable_bits) { /* In case we have plenty of memory :-) */ - pr_warning("Cannot increase the hashsize of set %s further\n", - set->name); + pr_warn("Cannot increase the hashsize of set %s further\n", + set->name); return -IPSET_ERR_HASH_FULL; } t = ip_set_alloc(sizeof(*t) @@ -651,8 +651,8 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext, if (h->elements >= h->maxelem) { if (net_ratelimit()) - pr_warning("Set %s is full, maxelem %u reached\n", - set->name, h->maxelem); + pr_warn("Set %s is full, maxelem %u reached\n", + set->name, h->maxelem); return -IPSET_ERR_HASH_FULL; } @@ -720,6 +720,8 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext, ip_set_init_counter(ext_counter(data, set), ext); if (SET_WITH_COMMENT(set)) ip_set_init_comment(ext_comment(data, set), ext); + if (SET_WITH_SKBINFO(set)) + ip_set_init_skbinfo(ext_skbinfo(data, set), ext); out: rcu_read_unlock_bh(); @@ -797,6 +799,9 @@ mtype_data_match(struct mtype_elem *data, const struct ip_set_ext *ext, if (SET_WITH_COUNTER(set)) ip_set_update_counter(ext_counter(data, set), ext, mext, flags); + if (SET_WITH_SKBINFO(set)) + ip_set_get_skbinfo(ext_skbinfo(data, set), + ext, mext, flags); return mtype_do_data_match(data); } @@ -998,8 +1003,8 @@ mtype_list(const struct ip_set *set, nla_put_failure: nlmsg_trim(skb, incomplete); if (unlikely(first == cb->args[IPSET_CB_ARG0])) { - pr_warning("Can't list set %s: one bucket does not fit into " - "a message. Please report it!\n", set->name); + pr_warn("Can't list set %s: one bucket does not fit into a message. Please report it!\n", + set->name); cb->args[IPSET_CB_ARG0] = 0; return -EMSGSIZE; } @@ -1049,8 +1054,10 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set, struct HTYPE *h; struct htable *t; +#ifndef IP_SET_PROTO_UNDEF if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6)) return -IPSET_ERR_INVALID_FAMILY; +#endif #ifdef IP_SET_HASH_WITH_MARKMASK markmask = 0xffffffff; @@ -1093,7 +1100,7 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set, if (tb[IPSET_ATTR_MARKMASK]) { markmask = ntohl(nla_get_u32(tb[IPSET_ATTR_MARKMASK])); - if ((markmask > 4294967295u) || markmask == 0) + if (markmask == 0) return -IPSET_ERR_INVALID_MARKMASK; } #endif @@ -1132,25 +1139,32 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set, rcu_assign_pointer(h->table, t); set->data = h; +#ifndef IP_SET_PROTO_UNDEF if (set->family == NFPROTO_IPV4) { +#endif set->variant = &IPSET_TOKEN(HTYPE, 4_variant); set->dsize = ip_set_elem_len(set, tb, sizeof(struct IPSET_TOKEN(HTYPE, 4_elem))); +#ifndef IP_SET_PROTO_UNDEF } else { set->variant = &IPSET_TOKEN(HTYPE, 6_variant); set->dsize = ip_set_elem_len(set, tb, sizeof(struct IPSET_TOKEN(HTYPE, 6_elem))); } +#endif if (tb[IPSET_ATTR_TIMEOUT]) { set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); +#ifndef IP_SET_PROTO_UNDEF if (set->family == NFPROTO_IPV4) +#endif IPSET_TOKEN(HTYPE, 4_gc_init)(set, IPSET_TOKEN(HTYPE, 4_gc)); +#ifndef IP_SET_PROTO_UNDEF else IPSET_TOKEN(HTYPE, 6_gc_init)(set, IPSET_TOKEN(HTYPE, 6_gc)); +#endif } - pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n", set->name, jhash_size(t->htable_bits), t->htable_bits, h->maxelem, set->data, t); diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c index dd40607f878e28e8c5c411a24fb8e36ef7db5d46..76959d79e9d1f67e4618b494bd265ceb0acb57a6 100644 --- a/net/netfilter/ipset/ip_set_hash_ip.c +++ b/net/netfilter/ipset/ip_set_hash_ip.c @@ -26,7 +26,8 @@ #define IPSET_TYPE_REV_MIN 0 /* 1 Counters support */ /* 2 Comments support */ -#define IPSET_TYPE_REV_MAX 3 /* Forceadd support */ +/* 3 Forceadd support */ +#define IPSET_TYPE_REV_MAX 4 /* skbinfo support */ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jozsef Kadlecsik "); @@ -84,7 +85,7 @@ hash_ip4_kadt(struct ip_set *set, const struct sk_buff *skb, { const struct hash_ip *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; - struct hash_ip4_elem e = {}; + struct hash_ip4_elem e = { 0 }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); __be32 ip; @@ -103,7 +104,7 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[], { const struct hash_ip *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; - struct hash_ip4_elem e = {}; + struct hash_ip4_elem e = { 0 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); u32 ip = 0, ip_to = 0, hosts; int ret = 0; @@ -111,7 +112,10 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[], if (unlikely(!tb[IPSET_ATTR_IP] || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || - !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES))) + !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_LINENO]) @@ -222,7 +226,7 @@ hash_ip6_kadt(struct ip_set *set, const struct sk_buff *skb, { const struct hash_ip *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; - struct hash_ip6_elem e = {}; + struct hash_ip6_elem e = { { .all = { 0 } } }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6); @@ -239,7 +243,7 @@ hash_ip6_uadt(struct ip_set *set, struct nlattr *tb[], { const struct hash_ip *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; - struct hash_ip6_elem e = {}; + struct hash_ip6_elem e = { { .all = { 0 } } }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); int ret; @@ -247,6 +251,9 @@ hash_ip6_uadt(struct ip_set *set, struct nlattr *tb[], !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE) || tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR])) return -IPSET_ERR_PROTOCOL; @@ -295,6 +302,9 @@ static struct ip_set_type hash_ip_type __read_mostly = { [IPSET_ATTR_BYTES] = { .type = NLA_U64 }, [IPSET_ATTR_PACKETS] = { .type = NLA_U64 }, [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING }, + [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 }, + [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 }, + [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 }, }, .me = THIS_MODULE, }; diff --git a/net/netfilter/ipset/ip_set_hash_ipmark.c b/net/netfilter/ipset/ip_set_hash_ipmark.c index 4eff0a29725498045703ac9fc2fc1af042c76386..7abf9788cfa850705bc2e5240751d8443a2a558e 100644 --- a/net/netfilter/ipset/ip_set_hash_ipmark.c +++ b/net/netfilter/ipset/ip_set_hash_ipmark.c @@ -25,7 +25,8 @@ #include #define IPSET_TYPE_REV_MIN 0 -#define IPSET_TYPE_REV_MAX 1 /* Forceadd support */ +/* 1 Forceadd support */ +#define IPSET_TYPE_REV_MAX 2 /* skbinfo support */ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Vytas Dauksa "); @@ -113,7 +114,10 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[], !ip_set_attr_netorder(tb, IPSET_ATTR_MARK) || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || - !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES))) + !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_LINENO]) @@ -244,6 +248,9 @@ hash_ipmark6_uadt(struct ip_set *set, struct nlattr *tb[], !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE) || tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR])) return -IPSET_ERR_PROTOCOL; @@ -301,6 +308,9 @@ static struct ip_set_type hash_ipmark_type __read_mostly = { [IPSET_ATTR_BYTES] = { .type = NLA_U64 }, [IPSET_ATTR_PACKETS] = { .type = NLA_U64 }, [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING }, + [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 }, + [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 }, + [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 }, }, .me = THIS_MODULE, }; diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c index 7597b82a8b033bd37c8a5f64dae02e8aa9137a19..dcbcceb9a52feea746d2b9d88a9a80e79d579bdb 100644 --- a/net/netfilter/ipset/ip_set_hash_ipport.c +++ b/net/netfilter/ipset/ip_set_hash_ipport.c @@ -28,7 +28,8 @@ /* 1 SCTP and UDPLITE support added */ /* 2 Counters support added */ /* 3 Comments support added */ -#define IPSET_TYPE_REV_MAX 4 /* Forceadd support added */ +/* 4 Forceadd support added */ +#define IPSET_TYPE_REV_MAX 5 /* skbinfo support added */ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jozsef Kadlecsik "); @@ -94,7 +95,7 @@ hash_ipport4_kadt(struct ip_set *set, const struct sk_buff *skb, enum ipset_adt adt, struct ip_set_adt_opt *opt) { ipset_adtfn adtfn = set->variant->adt[adt]; - struct hash_ipport4_elem e = { }; + struct hash_ipport4_elem e = { .ip = 0 }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC, @@ -111,7 +112,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[], { const struct hash_ipport *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; - struct hash_ipport4_elem e = { }; + struct hash_ipport4_elem e = { .ip = 0 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); u32 ip, ip_to = 0, p = 0, port, port_to; bool with_ports = false; @@ -122,7 +123,10 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[], !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || - !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES))) + !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_LINENO]) @@ -258,7 +262,7 @@ hash_ipport6_kadt(struct ip_set *set, const struct sk_buff *skb, enum ipset_adt adt, struct ip_set_adt_opt *opt) { ipset_adtfn adtfn = set->variant->adt[adt]; - struct hash_ipport6_elem e = { }; + struct hash_ipport6_elem e = { .ip = { .all = { 0 } } }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC, @@ -275,7 +279,7 @@ hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[], { const struct hash_ipport *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; - struct hash_ipport6_elem e = { }; + struct hash_ipport6_elem e = { .ip = { .all = { 0 } } }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); u32 port, port_to; bool with_ports = false; @@ -287,6 +291,9 @@ hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[], !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE) || tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR])) return -IPSET_ERR_PROTOCOL; @@ -370,6 +377,9 @@ static struct ip_set_type hash_ipport_type __read_mostly = { [IPSET_ATTR_BYTES] = { .type = NLA_U64 }, [IPSET_ATTR_PACKETS] = { .type = NLA_U64 }, [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING }, + [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 }, + [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 }, + [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 }, }, .me = THIS_MODULE, }; diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c index 672655ffd573404b2f2f5bf097b7a8d01f0c4c14..7ef93fc887a13b5e5d530a98a2bb3ae7f015d390 100644 --- a/net/netfilter/ipset/ip_set_hash_ipportip.c +++ b/net/netfilter/ipset/ip_set_hash_ipportip.c @@ -28,7 +28,8 @@ /* 1 SCTP and UDPLITE support added */ /* 2 Counters support added */ /* 3 Comments support added */ -#define IPSET_TYPE_REV_MAX 4 /* Forceadd support added */ +/* 4 Forceadd support added */ +#define IPSET_TYPE_REV_MAX 5 /* skbinfo support added */ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jozsef Kadlecsik "); @@ -95,7 +96,7 @@ hash_ipportip4_kadt(struct ip_set *set, const struct sk_buff *skb, enum ipset_adt adt, struct ip_set_adt_opt *opt) { ipset_adtfn adtfn = set->variant->adt[adt]; - struct hash_ipportip4_elem e = { }; + struct hash_ipportip4_elem e = { .ip = 0 }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC, @@ -113,7 +114,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[], { const struct hash_ipportip *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; - struct hash_ipportip4_elem e = { }; + struct hash_ipportip4_elem e = { .ip = 0 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); u32 ip, ip_to = 0, p = 0, port, port_to; bool with_ports = false; @@ -124,7 +125,10 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[], !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || - !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES))) + !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_LINENO]) @@ -265,7 +269,7 @@ hash_ipportip6_kadt(struct ip_set *set, const struct sk_buff *skb, enum ipset_adt adt, struct ip_set_adt_opt *opt) { ipset_adtfn adtfn = set->variant->adt[adt]; - struct hash_ipportip6_elem e = { }; + struct hash_ipportip6_elem e = { .ip = { .all = { 0 } } }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC, @@ -283,7 +287,7 @@ hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[], { const struct hash_ipportip *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; - struct hash_ipportip6_elem e = { }; + struct hash_ipportip6_elem e = { .ip = { .all = { 0 } } }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); u32 port, port_to; bool with_ports = false; @@ -295,6 +299,9 @@ hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[], !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE) || tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR])) return -IPSET_ERR_PROTOCOL; @@ -382,6 +389,9 @@ static struct ip_set_type hash_ipportip_type __read_mostly = { [IPSET_ATTR_BYTES] = { .type = NLA_U64 }, [IPSET_ATTR_PACKETS] = { .type = NLA_U64 }, [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING }, + [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 }, + [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 }, + [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 }, }, .me = THIS_MODULE, }; diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c index 7308d84f9277813f16351b692f75218ff1a451b3..b6012ad9278113e43c98e8615a0694ef9c30c48b 100644 --- a/net/netfilter/ipset/ip_set_hash_ipportnet.c +++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c @@ -30,7 +30,8 @@ /* 3 nomatch flag support added */ /* 4 Counters support added */ /* 5 Comments support added */ -#define IPSET_TYPE_REV_MAX 6 /* Forceadd support added */ +/* 6 Forceadd support added */ +#define IPSET_TYPE_REV_MAX 7 /* skbinfo support added */ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jozsef Kadlecsik "); @@ -179,7 +180,10 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || - !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES))) + !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_LINENO]) @@ -432,6 +436,9 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[], !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE) || tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR])) return -IPSET_ERR_PROTOCOL; @@ -541,6 +548,9 @@ static struct ip_set_type hash_ipportnet_type __read_mostly = { [IPSET_ATTR_BYTES] = { .type = NLA_U64 }, [IPSET_ATTR_PACKETS] = { .type = NLA_U64 }, [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING }, + [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 }, + [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 }, + [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 }, }, .me = THIS_MODULE, }; diff --git a/net/netfilter/ipset/ip_set_hash_mac.c b/net/netfilter/ipset/ip_set_hash_mac.c new file mode 100644 index 0000000000000000000000000000000000000000..65690b52a4d584cce8a6b7ce2864f716f4d39fd1 --- /dev/null +++ b/net/netfilter/ipset/ip_set_hash_mac.c @@ -0,0 +1,173 @@ +/* Copyright (C) 2014 Jozsef Kadlecsik + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* Kernel module implementing an IP set type: the hash:mac type */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define IPSET_TYPE_REV_MIN 0 +#define IPSET_TYPE_REV_MAX 0 + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Jozsef Kadlecsik "); +IP_SET_MODULE_DESC("hash:mac", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX); +MODULE_ALIAS("ip_set_hash:mac"); + +/* Type specific function prefix */ +#define HTYPE hash_mac + +/* Member elements */ +struct hash_mac4_elem { + /* Zero valued IP addresses cannot be stored */ + union { + unsigned char ether[ETH_ALEN]; + __be32 foo[2]; + }; +}; + +/* Common functions */ + +static inline bool +hash_mac4_data_equal(const struct hash_mac4_elem *e1, + const struct hash_mac4_elem *e2, + u32 *multi) +{ + return ether_addr_equal(e1->ether, e2->ether); +} + +static inline bool +hash_mac4_data_list(struct sk_buff *skb, const struct hash_mac4_elem *e) +{ + return nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN, e->ether); +} + +static inline void +hash_mac4_data_next(struct hash_mac4_elem *next, + const struct hash_mac4_elem *e) +{ +} + +#define MTYPE hash_mac4 +#define PF 4 +#define HOST_MASK 32 +#define IP_SET_EMIT_CREATE +#define IP_SET_PROTO_UNDEF +#include "ip_set_hash_gen.h" + +/* Zero valued element is not supported */ +static const unsigned char invalid_ether[ETH_ALEN] = { 0 }; + +static int +hash_mac4_kadt(struct ip_set *set, const struct sk_buff *skb, + const struct xt_action_param *par, + enum ipset_adt adt, struct ip_set_adt_opt *opt) +{ + ipset_adtfn adtfn = set->variant->adt[adt]; + struct hash_mac4_elem e = { { .foo[0] = 0, .foo[1] = 0 } }; + struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); + + /* MAC can be src only */ + if (!(opt->flags & IPSET_DIM_ONE_SRC)) + return 0; + + if (skb_mac_header(skb) < skb->head || + (skb_mac_header(skb) + ETH_HLEN) > skb->data) + return -EINVAL; + + memcpy(e.ether, eth_hdr(skb)->h_source, ETH_ALEN); + if (memcmp(e.ether, invalid_ether, ETH_ALEN) == 0) + return -EINVAL; + return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); +} + +static int +hash_mac4_uadt(struct ip_set *set, struct nlattr *tb[], + enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) +{ + ipset_adtfn adtfn = set->variant->adt[adt]; + struct hash_mac4_elem e = { { .foo[0] = 0, .foo[1] = 0 } }; + struct ip_set_ext ext = IP_SET_INIT_UEXT(set); + int ret; + + if (unlikely(!tb[IPSET_ATTR_ETHER] || + !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) + return -IPSET_ERR_PROTOCOL; + + if (tb[IPSET_ATTR_LINENO]) + *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); + + ret = ip_set_get_extensions(set, tb, &ext); + if (ret) + return ret; + memcpy(e.ether, nla_data(tb[IPSET_ATTR_ETHER]), ETH_ALEN); + if (memcmp(e.ether, invalid_ether, ETH_ALEN) == 0) + return -IPSET_ERR_HASH_ELEM; + + return adtfn(set, &e, &ext, &ext, flags); +} + +static struct ip_set_type hash_mac_type __read_mostly = { + .name = "hash:mac", + .protocol = IPSET_PROTOCOL, + .features = IPSET_TYPE_MAC, + .dimension = IPSET_DIM_ONE, + .family = NFPROTO_UNSPEC, + .revision_min = IPSET_TYPE_REV_MIN, + .revision_max = IPSET_TYPE_REV_MAX, + .create = hash_mac_create, + .create_policy = { + [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, + [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 }, + [IPSET_ATTR_PROBES] = { .type = NLA_U8 }, + [IPSET_ATTR_RESIZE] = { .type = NLA_U8 }, + [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, + [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 }, + }, + .adt_policy = { + [IPSET_ATTR_ETHER] = { .type = NLA_BINARY, + .len = ETH_ALEN }, + [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, + [IPSET_ATTR_LINENO] = { .type = NLA_U32 }, + [IPSET_ATTR_BYTES] = { .type = NLA_U64 }, + [IPSET_ATTR_PACKETS] = { .type = NLA_U64 }, + [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING }, + [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 }, + [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 }, + [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 }, + }, + .me = THIS_MODULE, +}; + +static int __init +hash_mac_init(void) +{ + return ip_set_type_register(&hash_mac_type); +} + +static void __exit +hash_mac_fini(void) +{ + ip_set_type_unregister(&hash_mac_type); +} + +module_init(hash_mac_init); +module_exit(hash_mac_fini); diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c index 4c7d495783a3aefa9447d4b37114bd3715928b0a..6b3ac10ac2f18718bda3b1b1f4108fa347eda377 100644 --- a/net/netfilter/ipset/ip_set_hash_net.c +++ b/net/netfilter/ipset/ip_set_hash_net.c @@ -27,7 +27,8 @@ /* 2 nomatch flag support added */ /* 3 Counters support added */ /* 4 Comments support added */ -#define IPSET_TYPE_REV_MAX 5 /* Forceadd support added */ +/* 5 Forceadd support added */ +#define IPSET_TYPE_REV_MAX 6 /* skbinfo mapping support added */ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jozsef Kadlecsik "); @@ -150,7 +151,10 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[], !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || - !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES))) + !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_LINENO]) @@ -318,7 +322,10 @@ hash_net6_uadt(struct ip_set *set, struct nlattr *tb[], !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || - !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES))) + !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) return -IPSET_ERR_PROTOCOL; if (unlikely(tb[IPSET_ATTR_IP_TO])) return -IPSET_ERR_HASH_RANGE_UNSUPPORTED; @@ -377,6 +384,9 @@ static struct ip_set_type hash_net_type __read_mostly = { [IPSET_ATTR_BYTES] = { .type = NLA_U64 }, [IPSET_ATTR_PACKETS] = { .type = NLA_U64 }, [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING }, + [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 }, + [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 }, + [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 }, }, .me = THIS_MODULE, }; diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c index db2606805b3575b91a87cded299222c571fbf6ba..35dd358734421fd5c7be3e4cc74d83a7074f7df5 100644 --- a/net/netfilter/ipset/ip_set_hash_netiface.c +++ b/net/netfilter/ipset/ip_set_hash_netiface.c @@ -28,7 +28,8 @@ /* 2 /0 support added */ /* 3 Counters support added */ /* 4 Comments support added */ -#define IPSET_TYPE_REV_MAX 5 /* Forceadd support added */ +/* 5 Forceadd support added */ +#define IPSET_TYPE_REV_MAX 6 /* skbinfo support added */ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jozsef Kadlecsik "); @@ -236,7 +237,7 @@ hash_netiface4_kadt(struct ip_set *set, const struct sk_buff *skb, #define SRCDIR (opt->flags & IPSET_DIM_TWO_SRC) if (opt->cmdflags & IPSET_FLAG_PHYSDEV) { -#ifdef CONFIG_BRIDGE_NETFILTER +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) const struct nf_bridge_info *nf_bridge = skb->nf_bridge; if (!nf_bridge) @@ -281,7 +282,10 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[], !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || - !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES))) + !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_LINENO]) @@ -470,7 +474,7 @@ hash_netiface6_kadt(struct ip_set *set, const struct sk_buff *skb, ip6_netmask(&e.ip, e.cidr); if (opt->cmdflags & IPSET_FLAG_PHYSDEV) { -#ifdef CONFIG_BRIDGE_NETFILTER +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) const struct nf_bridge_info *nf_bridge = skb->nf_bridge; if (!nf_bridge) @@ -514,7 +518,10 @@ hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[], !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || - !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES))) + !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) return -IPSET_ERR_PROTOCOL; if (unlikely(tb[IPSET_ATTR_IP_TO])) return -IPSET_ERR_HASH_RANGE_UNSUPPORTED; @@ -590,6 +597,9 @@ static struct ip_set_type hash_netiface_type __read_mostly = { [IPSET_ATTR_BYTES] = { .type = NLA_U64 }, [IPSET_ATTR_PACKETS] = { .type = NLA_U64 }, [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING }, + [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 }, + [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 }, + [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 }, }, .me = THIS_MODULE, }; diff --git a/net/netfilter/ipset/ip_set_hash_netnet.c b/net/netfilter/ipset/ip_set_hash_netnet.c index 3e99987e4bf248f6d6085118dba52a4c24ee108a..da00284b3571a748d843d0d79f6a193a876000d2 100644 --- a/net/netfilter/ipset/ip_set_hash_netnet.c +++ b/net/netfilter/ipset/ip_set_hash_netnet.c @@ -24,7 +24,8 @@ #include #define IPSET_TYPE_REV_MIN 0 -#define IPSET_TYPE_REV_MAX 1 /* Forceadd support added */ +/* 1 Forceadd support added */ +#define IPSET_TYPE_REV_MAX 2 /* skbinfo support added */ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Oliver Smith "); @@ -171,7 +172,10 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[], !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || - !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES))) + !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_LINENO]) @@ -203,7 +207,7 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[], flags |= (IPSET_FLAG_NOMATCH << 16); } - if (adt == IPSET_TEST || !(tb[IPSET_ATTR_IP_TO] && + if (adt == IPSET_TEST || !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_IP2_TO])) { e.ip[0] = htonl(ip & ip_set_hostmask(e.cidr[0])); e.ip[1] = htonl(ip2_from & ip_set_hostmask(e.cidr[1])); @@ -219,9 +223,10 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[], return ret; if (ip_to < ip) swap(ip, ip_to); - if (ip + UINT_MAX == ip_to) + if (unlikely(ip + UINT_MAX == ip_to)) return -IPSET_ERR_HASH_RANGE; - } + } else + ip_set_mask_from_to(ip, ip_to, e.cidr[0]); ip2_to = ip2_from; if (tb[IPSET_ATTR_IP2_TO]) { @@ -230,10 +235,10 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[], return ret; if (ip2_to < ip2_from) swap(ip2_from, ip2_to); - if (ip2_from + UINT_MAX == ip2_to) + if (unlikely(ip2_from + UINT_MAX == ip2_to)) return -IPSET_ERR_HASH_RANGE; - - } + } else + ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]); if (retried) ip = ntohl(h->next.ip[0]); @@ -393,7 +398,10 @@ hash_netnet6_uadt(struct ip_set *set, struct nlattr *tb[], !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || - !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES))) + !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) return -IPSET_ERR_PROTOCOL; if (unlikely(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_IP2_TO])) return -IPSET_ERR_HASH_RANGE_UNSUPPORTED; @@ -461,6 +469,9 @@ static struct ip_set_type hash_netnet_type __read_mostly = { [IPSET_ATTR_BYTES] = { .type = NLA_U64 }, [IPSET_ATTR_PACKETS] = { .type = NLA_U64 }, [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING }, + [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 }, + [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 }, + [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 }, }, .me = THIS_MODULE, }; diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c index 1c645fbd09c7d6bcb337b90977ae2536b2ec9ebd..c0ddb58d19dcca43818ea734cbc1ca64ef7eb1c9 100644 --- a/net/netfilter/ipset/ip_set_hash_netport.c +++ b/net/netfilter/ipset/ip_set_hash_netport.c @@ -29,7 +29,8 @@ /* 3 nomatch flag support added */ /* 4 Counters support added */ /* 5 Comments support added */ -#define IPSET_TYPE_REV_MAX 6 /* Forceadd support added */ +/* 6 Forceadd support added */ +#define IPSET_TYPE_REV_MAX 7 /* skbinfo support added */ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jozsef Kadlecsik "); @@ -172,7 +173,10 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[], !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || - !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES))) + !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_LINENO]) @@ -389,7 +393,10 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[], !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || - !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES))) + !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) return -IPSET_ERR_PROTOCOL; if (unlikely(tb[IPSET_ATTR_IP_TO])) return -IPSET_ERR_HASH_RANGE_UNSUPPORTED; @@ -489,6 +496,9 @@ static struct ip_set_type hash_netport_type __read_mostly = { [IPSET_ATTR_BYTES] = { .type = NLA_U64 }, [IPSET_ATTR_PACKETS] = { .type = NLA_U64 }, [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING }, + [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 }, + [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 }, + [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 }, }, .me = THIS_MODULE, }; diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c index c0d2ba73f8b2394995bba3737a833cc47138d581..b8053d675fc39ce0261dcf6270bc7616f383884c 100644 --- a/net/netfilter/ipset/ip_set_hash_netportnet.c +++ b/net/netfilter/ipset/ip_set_hash_netportnet.c @@ -26,7 +26,8 @@ #define IPSET_TYPE_REV_MIN 0 /* 0 Comments support added */ -#define IPSET_TYPE_REV_MAX 1 /* Forceadd support added */ +/* 1 Forceadd support added */ +#define IPSET_TYPE_REV_MAX 2 /* skbinfo support added */ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Oliver Smith "); @@ -189,7 +190,10 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[], !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || - !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES))) + !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_LINENO]) @@ -257,7 +261,8 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[], swap(ip, ip_to); if (unlikely(ip + UINT_MAX == ip_to)) return -IPSET_ERR_HASH_RANGE; - } + } else + ip_set_mask_from_to(ip, ip_to, e.cidr[0]); port_to = port = ntohs(e.port); if (tb[IPSET_ATTR_PORT_TO]) { @@ -275,7 +280,8 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[], swap(ip2_from, ip2_to); if (unlikely(ip2_from + UINT_MAX == ip2_to)) return -IPSET_ERR_HASH_RANGE; - } + } else + ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]); if (retried) ip = ntohl(h->next.ip[0]); @@ -458,7 +464,10 @@ hash_netportnet6_uadt(struct ip_set *set, struct nlattr *tb[], !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || - !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES))) + !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) return -IPSET_ERR_PROTOCOL; if (unlikely(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_IP2_TO])) return -IPSET_ERR_HASH_RANGE_UNSUPPORTED; @@ -567,6 +576,9 @@ static struct ip_set_type hash_netportnet_type __read_mostly = { [IPSET_ATTR_BYTES] = { .type = NLA_U64 }, [IPSET_ATTR_PACKETS] = { .type = NLA_U64 }, [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING }, + [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 }, + [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 }, + [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 }, }, .me = THIS_MODULE, }; diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c index 3e2317f3cf68625848edf5bc1d270b37d7d2af51..f8f682806e36df61fe6a606fcc94ed96660e2227 100644 --- a/net/netfilter/ipset/ip_set_list_set.c +++ b/net/netfilter/ipset/ip_set_list_set.c @@ -17,7 +17,8 @@ #define IPSET_TYPE_REV_MIN 0 /* 1 Counters support added */ -#define IPSET_TYPE_REV_MAX 2 /* Comments support added */ +/* 2 Comments support added */ +#define IPSET_TYPE_REV_MAX 3 /* skbinfo support added */ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jozsef Kadlecsik "); @@ -73,6 +74,10 @@ list_set_ktest(struct ip_set *set, const struct sk_buff *skb, ip_set_update_counter(ext_counter(e, set), ext, &opt->ext, cmdflags); + if (SET_WITH_SKBINFO(set)) + ip_set_get_skbinfo(ext_skbinfo(e, set), + ext, &opt->ext, + cmdflags); return ret; } } @@ -197,6 +202,8 @@ list_set_add(struct ip_set *set, u32 i, struct set_adt_elem *d, ip_set_init_counter(ext_counter(e, set), ext); if (SET_WITH_COMMENT(set)) ip_set_init_comment(ext_comment(e, set), ext); + if (SET_WITH_SKBINFO(set)) + ip_set_init_skbinfo(ext_skbinfo(e, set), ext); return 0; } @@ -307,6 +314,8 @@ list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext, ip_set_init_counter(ext_counter(e, set), ext); if (SET_WITH_COMMENT(set)) ip_set_init_comment(ext_comment(e, set), ext); + if (SET_WITH_SKBINFO(set)) + ip_set_init_skbinfo(ext_skbinfo(e, set), ext); /* Set is already added to the list */ ip_set_put_byindex(map->net, d->id); return 0; @@ -378,7 +387,10 @@ list_set_uadt(struct ip_set *set, struct nlattr *tb[], !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || - !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES))) + !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || + !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_LINENO]) @@ -597,7 +609,9 @@ init_list_set(struct net *net, struct ip_set *set, u32 size) struct set_elem *e; u32 i; - map = kzalloc(sizeof(*map) + size * set->dsize, GFP_KERNEL); + map = kzalloc(sizeof(*map) + + min_t(u32, size, IP_SET_LIST_MAX_SIZE) * set->dsize, + GFP_KERNEL); if (!map) return false; @@ -665,6 +679,9 @@ static struct ip_set_type list_set_type __read_mostly = { [IPSET_ATTR_BYTES] = { .type = NLA_U64 }, [IPSET_ATTR_PACKETS] = { .type = NLA_U64 }, [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING }, + [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 }, + [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 }, + [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 }, }, .me = THIS_MODULE, }; diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig index 0c3b1670b0d164cddf2e80717a89737821fd2b3b..3b6929dec7487a00630c1611892eabf6d4a14462 100644 --- a/net/netfilter/ipvs/Kconfig +++ b/net/netfilter/ipvs/Kconfig @@ -152,6 +152,16 @@ config IP_VS_WLC If you want to compile it in kernel, say Y. To compile it as a module, choose M here. If unsure, say N. +config IP_VS_FO + tristate "weighted failover scheduling" + ---help--- + The weighted failover scheduling algorithm directs network + connections to the server with the highest weight that is + currently available. + + If you want to compile it in kernel, say Y. To compile it as a + module, choose M here. If unsure, say N. + config IP_VS_LBLC tristate "locality-based least-connection scheduling" ---help--- diff --git a/net/netfilter/ipvs/Makefile b/net/netfilter/ipvs/Makefile index 34ee602ddb667d806691111c4f6cede4227a6872..38b2723b2e3d10211c6bd00aa0aef14f78c765c1 100644 --- a/net/netfilter/ipvs/Makefile +++ b/net/netfilter/ipvs/Makefile @@ -26,6 +26,7 @@ obj-$(CONFIG_IP_VS_RR) += ip_vs_rr.o obj-$(CONFIG_IP_VS_WRR) += ip_vs_wrr.o obj-$(CONFIG_IP_VS_LC) += ip_vs_lc.o obj-$(CONFIG_IP_VS_WLC) += ip_vs_wlc.o +obj-$(CONFIG_IP_VS_FO) += ip_vs_fo.o obj-$(CONFIG_IP_VS_LBLC) += ip_vs_lblc.o obj-$(CONFIG_IP_VS_LBLCR) += ip_vs_lblcr.o obj-$(CONFIG_IP_VS_DH) += ip_vs_dh.o diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index 610e19c0e13fc82b15eb3fa1dd8328df29d9baa2..b0f7b626b56da755222c0a1bd9a3a7b276ba32c8 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c @@ -27,6 +27,7 @@ #include #include +#include #include #include #include @@ -77,6 +78,13 @@ static unsigned int ip_vs_conn_rnd __read_mostly; #define CT_LOCKARRAY_SIZE (1<packet_xmit = ip_vs_tunnel_xmit; +#ifdef CONFIG_IP_VS_IPV6 + if (cp->daf == AF_INET6) + cp->packet_xmit = ip_vs_tunnel_xmit_v6; + else +#endif + cp->packet_xmit = ip_vs_tunnel_xmit; break; case IP_VS_CONN_F_DROUTE: @@ -514,7 +527,10 @@ static inline void ip_vs_bind_xmit_v6(struct ip_vs_conn *cp) break; case IP_VS_CONN_F_TUNNEL: - cp->packet_xmit = ip_vs_tunnel_xmit_v6; + if (cp->daf == AF_INET6) + cp->packet_xmit = ip_vs_tunnel_xmit_v6; + else + cp->packet_xmit = ip_vs_tunnel_xmit; break; case IP_VS_CONN_F_DROUTE: @@ -580,7 +596,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest) ip_vs_proto_name(cp->protocol), IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport), IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport), - IP_VS_DBG_ADDR(cp->af, &cp->daddr), ntohs(cp->dport), + IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport), ip_vs_fwd_tag(cp), cp->state, cp->flags, atomic_read(&cp->refcnt), atomic_read(&dest->refcnt)); @@ -616,7 +632,13 @@ void ip_vs_try_bind_dest(struct ip_vs_conn *cp) struct ip_vs_dest *dest; rcu_read_lock(); - dest = ip_vs_find_dest(ip_vs_conn_net(cp), cp->af, &cp->daddr, + + /* This function is only invoked by the synchronization code. We do + * not currently support heterogeneous pools with synchronization, + * so we can make the assumption that the svc_af is the same as the + * dest_af + */ + dest = ip_vs_find_dest(ip_vs_conn_net(cp), cp->af, cp->af, &cp->daddr, cp->dport, &cp->vaddr, cp->vport, cp->protocol, cp->fwmark, cp->flags); if (dest) { @@ -671,7 +693,7 @@ static inline void ip_vs_unbind_dest(struct ip_vs_conn *cp) ip_vs_proto_name(cp->protocol), IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport), IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport), - IP_VS_DBG_ADDR(cp->af, &cp->daddr), ntohs(cp->dport), + IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport), ip_vs_fwd_tag(cp), cp->state, cp->flags, atomic_read(&cp->refcnt), atomic_read(&dest->refcnt)); @@ -740,7 +762,7 @@ int ip_vs_check_template(struct ip_vs_conn *ct) ntohs(ct->cport), IP_VS_DBG_ADDR(ct->af, &ct->vaddr), ntohs(ct->vport), - IP_VS_DBG_ADDR(ct->af, &ct->daddr), + IP_VS_DBG_ADDR(ct->daf, &ct->daddr), ntohs(ct->dport)); /* @@ -848,7 +870,7 @@ void ip_vs_conn_expire_now(struct ip_vs_conn *cp) * Create a new connection entry and hash it into the ip_vs_conn_tab */ struct ip_vs_conn * -ip_vs_conn_new(const struct ip_vs_conn_param *p, +ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af, const union nf_inet_addr *daddr, __be16 dport, unsigned int flags, struct ip_vs_dest *dest, __u32 fwmark) { @@ -867,6 +889,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p, setup_timer(&cp->timer, ip_vs_conn_expire, (unsigned long)cp); ip_vs_conn_net_set(cp, p->net); cp->af = p->af; + cp->daf = dest_af; cp->protocol = p->protocol; ip_vs_addr_set(p->af, &cp->caddr, p->caddr); cp->cport = p->cport; @@ -874,7 +897,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p, ip_vs_addr_set(p->protocol == IPPROTO_IP ? AF_UNSPEC : p->af, &cp->vaddr, p->vaddr); cp->vport = p->vport; - ip_vs_addr_set(p->af, &cp->daddr, daddr); + ip_vs_addr_set(cp->daf, &cp->daddr, daddr); cp->dport = dport; cp->flags = flags; cp->fwmark = fwmark; @@ -1036,6 +1059,7 @@ static int ip_vs_conn_seq_show(struct seq_file *seq, void *v) struct net *net = seq_file_net(seq); char pe_data[IP_VS_PENAME_MAXLEN + IP_VS_PEDATA_MAXLEN + 3]; size_t len = 0; + char dbuf[IP_VS_ADDRSTRLEN]; if (!ip_vs_conn_net_eq(cp, net)) return 0; @@ -1049,25 +1073,33 @@ static int ip_vs_conn_seq_show(struct seq_file *seq, void *v) } pe_data[len] = '\0'; +#ifdef CONFIG_IP_VS_IPV6 + if (cp->daf == AF_INET6) + snprintf(dbuf, sizeof(dbuf), "%pI6", &cp->daddr.in6); + else +#endif + snprintf(dbuf, sizeof(dbuf), "%08X", + ntohl(cp->daddr.ip)); + #ifdef CONFIG_IP_VS_IPV6 if (cp->af == AF_INET6) seq_printf(seq, "%-3s %pI6 %04X %pI6 %04X " - "%pI6 %04X %-11s %7lu%s\n", + "%s %04X %-11s %7lu%s\n", ip_vs_proto_name(cp->protocol), &cp->caddr.in6, ntohs(cp->cport), &cp->vaddr.in6, ntohs(cp->vport), - &cp->daddr.in6, ntohs(cp->dport), + dbuf, ntohs(cp->dport), ip_vs_state_name(cp->protocol, cp->state), (cp->timer.expires-jiffies)/HZ, pe_data); else #endif seq_printf(seq, "%-3s %08X %04X %08X %04X" - " %08X %04X %-11s %7lu%s\n", + " %s %04X %-11s %7lu%s\n", ip_vs_proto_name(cp->protocol), ntohl(cp->caddr.ip), ntohs(cp->cport), ntohl(cp->vaddr.ip), ntohs(cp->vport), - ntohl(cp->daddr.ip), ntohs(cp->dport), + dbuf, ntohs(cp->dport), ip_vs_state_name(cp->protocol, cp->state), (cp->timer.expires-jiffies)/HZ, pe_data); } @@ -1105,6 +1137,7 @@ static const char *ip_vs_origin_name(unsigned int flags) static int ip_vs_conn_sync_seq_show(struct seq_file *seq, void *v) { + char dbuf[IP_VS_ADDRSTRLEN]; if (v == SEQ_START_TOKEN) seq_puts(seq, @@ -1116,13 +1149,22 @@ static int ip_vs_conn_sync_seq_show(struct seq_file *seq, void *v) if (!ip_vs_conn_net_eq(cp, net)) return 0; +#ifdef CONFIG_IP_VS_IPV6 + if (cp->daf == AF_INET6) + snprintf(dbuf, sizeof(dbuf), "%pI6", &cp->daddr.in6); + else +#endif + snprintf(dbuf, sizeof(dbuf), "%08X", + ntohl(cp->daddr.ip)); + #ifdef CONFIG_IP_VS_IPV6 if (cp->af == AF_INET6) - seq_printf(seq, "%-3s %pI6 %04X %pI6 %04X %pI6 %04X %-11s %-6s %7lu\n", + seq_printf(seq, "%-3s %pI6 %04X %pI6 %04X " + "%s %04X %-11s %-6s %7lu\n", ip_vs_proto_name(cp->protocol), &cp->caddr.in6, ntohs(cp->cport), &cp->vaddr.in6, ntohs(cp->vport), - &cp->daddr.in6, ntohs(cp->dport), + dbuf, ntohs(cp->dport), ip_vs_state_name(cp->protocol, cp->state), ip_vs_origin_name(cp->flags), (cp->timer.expires-jiffies)/HZ); @@ -1130,11 +1172,11 @@ static int ip_vs_conn_sync_seq_show(struct seq_file *seq, void *v) #endif seq_printf(seq, "%-3s %08X %04X %08X %04X " - "%08X %04X %-11s %-6s %7lu\n", + "%s %04X %-11s %-6s %7lu\n", ip_vs_proto_name(cp->protocol), ntohl(cp->caddr.ip), ntohs(cp->cport), ntohl(cp->vaddr.ip), ntohs(cp->vport), - ntohl(cp->daddr.ip), ntohs(cp->dport), + dbuf, ntohs(cp->dport), ip_vs_state_name(cp->protocol, cp->state), ip_vs_origin_name(cp->flags), (cp->timer.expires-jiffies)/HZ); diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 5c34e8d42e0190a14ec31c1238bde5929fdf0539..990decba1fe418e36e59a1f081fcf0e47188da29 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -328,7 +328,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc, * This adds param.pe_data to the template, * and thus param.pe_data will be destroyed * when the template expires */ - ct = ip_vs_conn_new(¶m, &dest->addr, dport, + ct = ip_vs_conn_new(¶m, dest->af, &dest->addr, dport, IP_VS_CONN_F_TEMPLATE, dest, skb->mark); if (ct == NULL) { kfree(param.pe_data); @@ -357,7 +357,8 @@ ip_vs_sched_persist(struct ip_vs_service *svc, ip_vs_conn_fill_param(svc->net, svc->af, iph->protocol, &iph->saddr, src_port, &iph->daddr, dst_port, ¶m); - cp = ip_vs_conn_new(¶m, &dest->addr, dport, flags, dest, skb->mark); + cp = ip_vs_conn_new(¶m, dest->af, &dest->addr, dport, flags, dest, + skb->mark); if (cp == NULL) { ip_vs_conn_put(ct); *ignored = -1; @@ -479,7 +480,7 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb, ip_vs_conn_fill_param(svc->net, svc->af, iph->protocol, &iph->saddr, pptr[0], &iph->daddr, pptr[1], &p); - cp = ip_vs_conn_new(&p, &dest->addr, + cp = ip_vs_conn_new(&p, dest->af, &dest->addr, dest->port ? dest->port : pptr[1], flags, dest, skb->mark); if (!cp) { @@ -491,9 +492,9 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb, IP_VS_DBG_BUF(6, "Schedule fwd:%c c:%s:%u v:%s:%u " "d:%s:%u conn->flags:%X conn->refcnt:%d\n", ip_vs_fwd_tag(cp), - IP_VS_DBG_ADDR(svc->af, &cp->caddr), ntohs(cp->cport), - IP_VS_DBG_ADDR(svc->af, &cp->vaddr), ntohs(cp->vport), - IP_VS_DBG_ADDR(svc->af, &cp->daddr), ntohs(cp->dport), + IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport), + IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport), + IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport), cp->flags, atomic_read(&cp->refcnt)); ip_vs_conn_stats(cp, svc); @@ -550,7 +551,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, ip_vs_conn_fill_param(svc->net, svc->af, iph->protocol, &iph->saddr, pptr[0], &iph->daddr, pptr[1], &p); - cp = ip_vs_conn_new(&p, &daddr, 0, + cp = ip_vs_conn_new(&p, svc->af, &daddr, 0, IP_VS_CONN_F_BYPASS | flags, NULL, skb->mark); if (!cp) diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index fd3f444a4f964428ae7290ba337384cf08d6028a..ac7ba689efe76c3e3df89c44dbd0094b08b6cf89 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -574,8 +574,8 @@ bool ip_vs_has_real_service(struct net *net, int af, __u16 protocol, * Called under RCU lock. */ static struct ip_vs_dest * -ip_vs_lookup_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr, - __be16 dport) +ip_vs_lookup_dest(struct ip_vs_service *svc, int dest_af, + const union nf_inet_addr *daddr, __be16 dport) { struct ip_vs_dest *dest; @@ -583,9 +583,9 @@ ip_vs_lookup_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr, * Find the destination for the given service */ list_for_each_entry_rcu(dest, &svc->destinations, n_list) { - if ((dest->af == svc->af) - && ip_vs_addr_equal(svc->af, &dest->addr, daddr) - && (dest->port == dport)) { + if ((dest->af == dest_af) && + ip_vs_addr_equal(dest_af, &dest->addr, daddr) && + (dest->port == dport)) { /* HIT */ return dest; } @@ -602,7 +602,7 @@ ip_vs_lookup_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr, * on the backup. * Called under RCU lock, no refcnt is returned. */ -struct ip_vs_dest *ip_vs_find_dest(struct net *net, int af, +struct ip_vs_dest *ip_vs_find_dest(struct net *net, int svc_af, int dest_af, const union nf_inet_addr *daddr, __be16 dport, const union nf_inet_addr *vaddr, @@ -613,14 +613,14 @@ struct ip_vs_dest *ip_vs_find_dest(struct net *net, int af, struct ip_vs_service *svc; __be16 port = dport; - svc = ip_vs_service_find(net, af, fwmark, protocol, vaddr, vport); + svc = ip_vs_service_find(net, svc_af, fwmark, protocol, vaddr, vport); if (!svc) return NULL; if (fwmark && (flags & IP_VS_CONN_F_FWD_MASK) != IP_VS_CONN_F_MASQ) port = 0; - dest = ip_vs_lookup_dest(svc, daddr, port); + dest = ip_vs_lookup_dest(svc, dest_af, daddr, port); if (!dest) - dest = ip_vs_lookup_dest(svc, daddr, port ^ dport); + dest = ip_vs_lookup_dest(svc, dest_af, daddr, port ^ dport); return dest; } @@ -657,8 +657,8 @@ static void __ip_vs_dst_cache_reset(struct ip_vs_dest *dest) * scheduling. */ static struct ip_vs_dest * -ip_vs_trash_get_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr, - __be16 dport) +ip_vs_trash_get_dest(struct ip_vs_service *svc, int dest_af, + const union nf_inet_addr *daddr, __be16 dport) { struct ip_vs_dest *dest; struct netns_ipvs *ipvs = net_ipvs(svc->net); @@ -671,11 +671,11 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr, IP_VS_DBG_BUF(3, "Destination %u/%s:%u still in trash, " "dest->refcnt=%d\n", dest->vfwmark, - IP_VS_DBG_ADDR(svc->af, &dest->addr), + IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port), atomic_read(&dest->refcnt)); - if (dest->af == svc->af && - ip_vs_addr_equal(svc->af, &dest->addr, daddr) && + if (dest->af == dest_af && + ip_vs_addr_equal(dest_af, &dest->addr, daddr) && dest->port == dport && dest->vfwmark == svc->fwmark && dest->protocol == svc->protocol && @@ -779,6 +779,12 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest, struct ip_vs_scheduler *sched; int conn_flags; + /* We cannot modify an address and change the address family */ + BUG_ON(!add && udest->af != dest->af); + + if (add && udest->af != svc->af) + ipvs->mixed_address_family_dests++; + /* set the weight and the flags */ atomic_set(&dest->weight, udest->weight); conn_flags = udest->conn_flags & IP_VS_CONN_F_DEST_MASK; @@ -816,6 +822,8 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest, dest->u_threshold = udest->u_threshold; dest->l_threshold = udest->l_threshold; + dest->af = udest->af; + spin_lock_bh(&dest->dst_lock); __ip_vs_dst_cache_reset(dest); spin_unlock_bh(&dest->dst_lock); @@ -847,7 +855,7 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest, EnterFunction(2); #ifdef CONFIG_IP_VS_IPV6 - if (svc->af == AF_INET6) { + if (udest->af == AF_INET6) { atype = ipv6_addr_type(&udest->addr.in6); if ((!(atype & IPV6_ADDR_UNICAST) || atype & IPV6_ADDR_LINKLOCAL) && @@ -875,12 +883,12 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest, u64_stats_init(&ip_vs_dest_stats->syncp); } - dest->af = svc->af; + dest->af = udest->af; dest->protocol = svc->protocol; dest->vaddr = svc->addr; dest->vport = svc->port; dest->vfwmark = svc->fwmark; - ip_vs_addr_copy(svc->af, &dest->addr, &udest->addr); + ip_vs_addr_copy(udest->af, &dest->addr, &udest->addr); dest->port = udest->port; atomic_set(&dest->activeconns, 0); @@ -928,11 +936,11 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest) return -ERANGE; } - ip_vs_addr_copy(svc->af, &daddr, &udest->addr); + ip_vs_addr_copy(udest->af, &daddr, &udest->addr); /* We use function that requires RCU lock */ rcu_read_lock(); - dest = ip_vs_lookup_dest(svc, &daddr, dport); + dest = ip_vs_lookup_dest(svc, udest->af, &daddr, dport); rcu_read_unlock(); if (dest != NULL) { @@ -944,12 +952,12 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest) * Check if the dest already exists in the trash and * is from the same service */ - dest = ip_vs_trash_get_dest(svc, &daddr, dport); + dest = ip_vs_trash_get_dest(svc, udest->af, &daddr, dport); if (dest != NULL) { IP_VS_DBG_BUF(3, "Get destination %s:%u from trash, " "dest->refcnt=%d, service %u/%s:%u\n", - IP_VS_DBG_ADDR(svc->af, &daddr), ntohs(dport), + IP_VS_DBG_ADDR(udest->af, &daddr), ntohs(dport), atomic_read(&dest->refcnt), dest->vfwmark, IP_VS_DBG_ADDR(svc->af, &dest->vaddr), @@ -992,11 +1000,11 @@ ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest) return -ERANGE; } - ip_vs_addr_copy(svc->af, &daddr, &udest->addr); + ip_vs_addr_copy(udest->af, &daddr, &udest->addr); /* We use function that requires RCU lock */ rcu_read_lock(); - dest = ip_vs_lookup_dest(svc, &daddr, dport); + dest = ip_vs_lookup_dest(svc, udest->af, &daddr, dport); rcu_read_unlock(); if (dest == NULL) { @@ -1055,6 +1063,9 @@ static void __ip_vs_unlink_dest(struct ip_vs_service *svc, list_del_rcu(&dest->n_list); svc->num_dests--; + if (dest->af != svc->af) + net_ipvs(svc->net)->mixed_address_family_dests--; + if (svcupd) { struct ip_vs_scheduler *sched; @@ -1078,7 +1089,7 @@ ip_vs_del_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest) /* We use function that requires RCU lock */ rcu_read_lock(); - dest = ip_vs_lookup_dest(svc, &udest->addr, dport); + dest = ip_vs_lookup_dest(svc, udest->af, &udest->addr, dport); rcu_read_unlock(); if (dest == NULL) { @@ -2179,29 +2190,41 @@ static int ip_vs_set_timeout(struct net *net, struct ip_vs_timeout_user *u) return 0; } +#define CMDID(cmd) (cmd - IP_VS_BASE_CTL) + +struct ip_vs_svcdest_user { + struct ip_vs_service_user s; + struct ip_vs_dest_user d; +}; + +static const unsigned char set_arglen[CMDID(IP_VS_SO_SET_MAX) + 1] = { + [CMDID(IP_VS_SO_SET_ADD)] = sizeof(struct ip_vs_service_user), + [CMDID(IP_VS_SO_SET_EDIT)] = sizeof(struct ip_vs_service_user), + [CMDID(IP_VS_SO_SET_DEL)] = sizeof(struct ip_vs_service_user), + [CMDID(IP_VS_SO_SET_ADDDEST)] = sizeof(struct ip_vs_svcdest_user), + [CMDID(IP_VS_SO_SET_DELDEST)] = sizeof(struct ip_vs_svcdest_user), + [CMDID(IP_VS_SO_SET_EDITDEST)] = sizeof(struct ip_vs_svcdest_user), + [CMDID(IP_VS_SO_SET_TIMEOUT)] = sizeof(struct ip_vs_timeout_user), + [CMDID(IP_VS_SO_SET_STARTDAEMON)] = sizeof(struct ip_vs_daemon_user), + [CMDID(IP_VS_SO_SET_STOPDAEMON)] = sizeof(struct ip_vs_daemon_user), + [CMDID(IP_VS_SO_SET_ZERO)] = sizeof(struct ip_vs_service_user), +}; -#define SET_CMDID(cmd) (cmd - IP_VS_BASE_CTL) -#define SERVICE_ARG_LEN (sizeof(struct ip_vs_service_user)) -#define SVCDEST_ARG_LEN (sizeof(struct ip_vs_service_user) + \ - sizeof(struct ip_vs_dest_user)) -#define TIMEOUT_ARG_LEN (sizeof(struct ip_vs_timeout_user)) -#define DAEMON_ARG_LEN (sizeof(struct ip_vs_daemon_user)) -#define MAX_ARG_LEN SVCDEST_ARG_LEN - -static const unsigned char set_arglen[SET_CMDID(IP_VS_SO_SET_MAX)+1] = { - [SET_CMDID(IP_VS_SO_SET_ADD)] = SERVICE_ARG_LEN, - [SET_CMDID(IP_VS_SO_SET_EDIT)] = SERVICE_ARG_LEN, - [SET_CMDID(IP_VS_SO_SET_DEL)] = SERVICE_ARG_LEN, - [SET_CMDID(IP_VS_SO_SET_FLUSH)] = 0, - [SET_CMDID(IP_VS_SO_SET_ADDDEST)] = SVCDEST_ARG_LEN, - [SET_CMDID(IP_VS_SO_SET_DELDEST)] = SVCDEST_ARG_LEN, - [SET_CMDID(IP_VS_SO_SET_EDITDEST)] = SVCDEST_ARG_LEN, - [SET_CMDID(IP_VS_SO_SET_TIMEOUT)] = TIMEOUT_ARG_LEN, - [SET_CMDID(IP_VS_SO_SET_STARTDAEMON)] = DAEMON_ARG_LEN, - [SET_CMDID(IP_VS_SO_SET_STOPDAEMON)] = DAEMON_ARG_LEN, - [SET_CMDID(IP_VS_SO_SET_ZERO)] = SERVICE_ARG_LEN, +union ip_vs_set_arglen { + struct ip_vs_service_user field_IP_VS_SO_SET_ADD; + struct ip_vs_service_user field_IP_VS_SO_SET_EDIT; + struct ip_vs_service_user field_IP_VS_SO_SET_DEL; + struct ip_vs_svcdest_user field_IP_VS_SO_SET_ADDDEST; + struct ip_vs_svcdest_user field_IP_VS_SO_SET_DELDEST; + struct ip_vs_svcdest_user field_IP_VS_SO_SET_EDITDEST; + struct ip_vs_timeout_user field_IP_VS_SO_SET_TIMEOUT; + struct ip_vs_daemon_user field_IP_VS_SO_SET_STARTDAEMON; + struct ip_vs_daemon_user field_IP_VS_SO_SET_STOPDAEMON; + struct ip_vs_service_user field_IP_VS_SO_SET_ZERO; }; +#define MAX_SET_ARGLEN sizeof(union ip_vs_set_arglen) + static void ip_vs_copy_usvc_compat(struct ip_vs_service_user_kern *usvc, struct ip_vs_service_user *usvc_compat) { @@ -2232,6 +2255,7 @@ static void ip_vs_copy_udest_compat(struct ip_vs_dest_user_kern *udest, udest->weight = udest_compat->weight; udest->u_threshold = udest_compat->u_threshold; udest->l_threshold = udest_compat->l_threshold; + udest->af = AF_INET; } static int @@ -2239,7 +2263,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) { struct net *net = sock_net(sk); int ret; - unsigned char arg[MAX_ARG_LEN]; + unsigned char arg[MAX_SET_ARGLEN]; struct ip_vs_service_user *usvc_compat; struct ip_vs_service_user_kern usvc; struct ip_vs_service *svc; @@ -2247,16 +2271,15 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) struct ip_vs_dest_user_kern udest; struct netns_ipvs *ipvs = net_ipvs(net); + BUILD_BUG_ON(sizeof(arg) > 255); if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; if (cmd < IP_VS_BASE_CTL || cmd > IP_VS_SO_SET_MAX) return -EINVAL; - if (len < 0 || len > MAX_ARG_LEN) - return -EINVAL; - if (len != set_arglen[SET_CMDID(cmd)]) { - pr_err("set_ctl: len %u != %u\n", - len, set_arglen[SET_CMDID(cmd)]); + if (len != set_arglen[CMDID(cmd)]) { + IP_VS_DBG(1, "set_ctl: len %u != %u\n", + len, set_arglen[CMDID(cmd)]); return -EINVAL; } @@ -2469,6 +2492,12 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get, if (count >= get->num_dests) break; + /* Cannot expose heterogeneous members via sockopt + * interface + */ + if (dest->af != svc->af) + continue; + entry.addr = dest->addr.ip; entry.port = dest->port; entry.conn_flags = atomic_read(&dest->conn_flags); @@ -2512,51 +2541,51 @@ __ip_vs_get_timeouts(struct net *net, struct ip_vs_timeout_user *u) #endif } +static const unsigned char get_arglen[CMDID(IP_VS_SO_GET_MAX) + 1] = { + [CMDID(IP_VS_SO_GET_VERSION)] = 64, + [CMDID(IP_VS_SO_GET_INFO)] = sizeof(struct ip_vs_getinfo), + [CMDID(IP_VS_SO_GET_SERVICES)] = sizeof(struct ip_vs_get_services), + [CMDID(IP_VS_SO_GET_SERVICE)] = sizeof(struct ip_vs_service_entry), + [CMDID(IP_VS_SO_GET_DESTS)] = sizeof(struct ip_vs_get_dests), + [CMDID(IP_VS_SO_GET_TIMEOUT)] = sizeof(struct ip_vs_timeout_user), + [CMDID(IP_VS_SO_GET_DAEMON)] = 2 * sizeof(struct ip_vs_daemon_user), +}; -#define GET_CMDID(cmd) (cmd - IP_VS_BASE_CTL) -#define GET_INFO_ARG_LEN (sizeof(struct ip_vs_getinfo)) -#define GET_SERVICES_ARG_LEN (sizeof(struct ip_vs_get_services)) -#define GET_SERVICE_ARG_LEN (sizeof(struct ip_vs_service_entry)) -#define GET_DESTS_ARG_LEN (sizeof(struct ip_vs_get_dests)) -#define GET_TIMEOUT_ARG_LEN (sizeof(struct ip_vs_timeout_user)) -#define GET_DAEMON_ARG_LEN (sizeof(struct ip_vs_daemon_user) * 2) - -static const unsigned char get_arglen[GET_CMDID(IP_VS_SO_GET_MAX)+1] = { - [GET_CMDID(IP_VS_SO_GET_VERSION)] = 64, - [GET_CMDID(IP_VS_SO_GET_INFO)] = GET_INFO_ARG_LEN, - [GET_CMDID(IP_VS_SO_GET_SERVICES)] = GET_SERVICES_ARG_LEN, - [GET_CMDID(IP_VS_SO_GET_SERVICE)] = GET_SERVICE_ARG_LEN, - [GET_CMDID(IP_VS_SO_GET_DESTS)] = GET_DESTS_ARG_LEN, - [GET_CMDID(IP_VS_SO_GET_TIMEOUT)] = GET_TIMEOUT_ARG_LEN, - [GET_CMDID(IP_VS_SO_GET_DAEMON)] = GET_DAEMON_ARG_LEN, +union ip_vs_get_arglen { + char field_IP_VS_SO_GET_VERSION[64]; + struct ip_vs_getinfo field_IP_VS_SO_GET_INFO; + struct ip_vs_get_services field_IP_VS_SO_GET_SERVICES; + struct ip_vs_service_entry field_IP_VS_SO_GET_SERVICE; + struct ip_vs_get_dests field_IP_VS_SO_GET_DESTS; + struct ip_vs_timeout_user field_IP_VS_SO_GET_TIMEOUT; + struct ip_vs_daemon_user field_IP_VS_SO_GET_DAEMON[2]; }; +#define MAX_GET_ARGLEN sizeof(union ip_vs_get_arglen) + static int do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { - unsigned char arg[128]; + unsigned char arg[MAX_GET_ARGLEN]; int ret = 0; unsigned int copylen; struct net *net = sock_net(sk); struct netns_ipvs *ipvs = net_ipvs(net); BUG_ON(!net); + BUILD_BUG_ON(sizeof(arg) > 255); if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; if (cmd < IP_VS_BASE_CTL || cmd > IP_VS_SO_GET_MAX) return -EINVAL; - if (*len < get_arglen[GET_CMDID(cmd)]) { - pr_err("get_ctl: len %u < %u\n", - *len, get_arglen[GET_CMDID(cmd)]); + copylen = get_arglen[CMDID(cmd)]; + if (*len < (int) copylen) { + IP_VS_DBG(1, "get_ctl: len %d < %u\n", *len, copylen); return -EINVAL; } - copylen = get_arglen[GET_CMDID(cmd)]; - if (copylen > 128) - return -EINVAL; - if (copy_from_user(arg, user, copylen) != 0) return -EFAULT; /* @@ -2766,6 +2795,7 @@ static const struct nla_policy ip_vs_dest_policy[IPVS_DEST_ATTR_MAX + 1] = { [IPVS_DEST_ATTR_INACT_CONNS] = { .type = NLA_U32 }, [IPVS_DEST_ATTR_PERSIST_CONNS] = { .type = NLA_U32 }, [IPVS_DEST_ATTR_STATS] = { .type = NLA_NESTED }, + [IPVS_DEST_ATTR_ADDR_FAMILY] = { .type = NLA_U16 }, }; static int ip_vs_genl_fill_stats(struct sk_buff *skb, int container_type, @@ -3021,7 +3051,8 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest) nla_put_u32(skb, IPVS_DEST_ATTR_INACT_CONNS, atomic_read(&dest->inactconns)) || nla_put_u32(skb, IPVS_DEST_ATTR_PERSIST_CONNS, - atomic_read(&dest->persistconns))) + atomic_read(&dest->persistconns)) || + nla_put_u16(skb, IPVS_DEST_ATTR_ADDR_FAMILY, dest->af)) goto nla_put_failure; if (ip_vs_genl_fill_stats(skb, IPVS_DEST_ATTR_STATS, &dest->stats)) goto nla_put_failure; @@ -3102,6 +3133,7 @@ static int ip_vs_genl_parse_dest(struct ip_vs_dest_user_kern *udest, { struct nlattr *attrs[IPVS_DEST_ATTR_MAX + 1]; struct nlattr *nla_addr, *nla_port; + struct nlattr *nla_addr_family; /* Parse mandatory identifying destination fields first */ if (nla == NULL || @@ -3110,6 +3142,7 @@ static int ip_vs_genl_parse_dest(struct ip_vs_dest_user_kern *udest, nla_addr = attrs[IPVS_DEST_ATTR_ADDR]; nla_port = attrs[IPVS_DEST_ATTR_PORT]; + nla_addr_family = attrs[IPVS_DEST_ATTR_ADDR_FAMILY]; if (!(nla_addr && nla_port)) return -EINVAL; @@ -3119,6 +3152,11 @@ static int ip_vs_genl_parse_dest(struct ip_vs_dest_user_kern *udest, nla_memcpy(&udest->addr, nla_addr, sizeof(udest->addr)); udest->port = nla_get_be16(nla_port); + if (nla_addr_family) + udest->af = nla_get_u16(nla_addr_family); + else + udest->af = 0; + /* If a full entry was requested, check for the additional fields */ if (full_entry) { struct nlattr *nla_fwd, *nla_weight, *nla_u_thresh, @@ -3223,6 +3261,12 @@ static int ip_vs_genl_new_daemon(struct net *net, struct nlattr **attrs) attrs[IPVS_DAEMON_ATTR_SYNC_ID])) return -EINVAL; + /* The synchronization protocol is incompatible with mixed family + * services + */ + if (net_ipvs(net)->mixed_address_family_dests > 0) + return -EINVAL; + return start_sync_thread(net, nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]), nla_data(attrs[IPVS_DAEMON_ATTR_MCAST_IFN]), @@ -3346,6 +3390,35 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info) need_full_dest); if (ret) goto out; + + /* Old protocols did not allow the user to specify address + * family, so we set it to zero instead. We also didn't + * allow heterogeneous pools in the old code, so it's safe + * to assume that this will have the same address family as + * the service. + */ + if (udest.af == 0) + udest.af = svc->af; + + if (udest.af != svc->af) { + /* The synchronization protocol is incompatible + * with mixed family services + */ + if (net_ipvs(net)->sync_state) { + ret = -EINVAL; + goto out; + } + + /* Which connection types do we support? */ + switch (udest.conn_flags) { + case IP_VS_CONN_F_TUNNEL: + /* We are able to forward this */ + break; + default: + ret = -EINVAL; + goto out; + } + } } switch (cmd) { diff --git a/net/netfilter/ipvs/ip_vs_dh.c b/net/netfilter/ipvs/ip_vs_dh.c index c3b84546ea9e2b385878f07eeae926495890ad1f..6be5c538b71e6fda1085bf01849af5d9df169895 100644 --- a/net/netfilter/ipvs/ip_vs_dh.c +++ b/net/netfilter/ipvs/ip_vs_dh.c @@ -234,7 +234,7 @@ ip_vs_dh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb, IP_VS_DBG_BUF(6, "DH: destination IP address %s --> server %s:%d\n", IP_VS_DBG_ADDR(svc->af, &iph->daddr), - IP_VS_DBG_ADDR(svc->af, &dest->addr), + IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port)); return dest; diff --git a/net/netfilter/ipvs/ip_vs_fo.c b/net/netfilter/ipvs/ip_vs_fo.c new file mode 100644 index 0000000000000000000000000000000000000000..e09874d02938721c24d2215496265c0c191cb9f0 --- /dev/null +++ b/net/netfilter/ipvs/ip_vs_fo.c @@ -0,0 +1,79 @@ +/* + * IPVS: Weighted Fail Over module + * + * Authors: Kenny Mathis + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Changes: + * Kenny Mathis : added initial functionality based on weight + * + */ + +#define KMSG_COMPONENT "IPVS" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include +#include + +#include + +/* Weighted Fail Over Module */ +static struct ip_vs_dest * +ip_vs_fo_schedule(struct ip_vs_service *svc, const struct sk_buff *skb, + struct ip_vs_iphdr *iph) +{ + struct ip_vs_dest *dest, *hweight = NULL; + int hw = 0; /* Track highest weight */ + + IP_VS_DBG(6, "ip_vs_fo_schedule(): Scheduling...\n"); + + /* Basic failover functionality + * Find virtual server with highest weight and send it traffic + */ + list_for_each_entry_rcu(dest, &svc->destinations, n_list) { + if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) && + atomic_read(&dest->weight) > hw) { + hweight = dest; + hw = atomic_read(&dest->weight); + } + } + + if (hweight) { + IP_VS_DBG_BUF(6, "FO: server %s:%u activeconns %d weight %d\n", + IP_VS_DBG_ADDR(hweight->af, &hweight->addr), + ntohs(hweight->port), + atomic_read(&hweight->activeconns), + atomic_read(&hweight->weight)); + return hweight; + } + + ip_vs_scheduler_err(svc, "no destination available"); + return NULL; +} + +static struct ip_vs_scheduler ip_vs_fo_scheduler = { + .name = "fo", + .refcnt = ATOMIC_INIT(0), + .module = THIS_MODULE, + .n_list = LIST_HEAD_INIT(ip_vs_fo_scheduler.n_list), + .schedule = ip_vs_fo_schedule, +}; + +static int __init ip_vs_fo_init(void) +{ + return register_ip_vs_scheduler(&ip_vs_fo_scheduler); +} + +static void __exit ip_vs_fo_cleanup(void) +{ + unregister_ip_vs_scheduler(&ip_vs_fo_scheduler); + synchronize_rcu(); +} + +module_init(ip_vs_fo_init); +module_exit(ip_vs_fo_cleanup); +MODULE_LICENSE("GPL"); diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c index 77c173282f388ce81bbd54ea6e41761656d95045..a64fa15790e53f8ad0eda7c53b73be8bd849f2aa 100644 --- a/net/netfilter/ipvs/ip_vs_ftp.c +++ b/net/netfilter/ipvs/ip_vs_ftp.c @@ -233,7 +233,8 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, ip_vs_conn_fill_param(ip_vs_conn_net(cp), AF_INET, IPPROTO_TCP, &cp->caddr, 0, &cp->vaddr, port, &p); - n_cp = ip_vs_conn_new(&p, &from, port, + /* As above, this is ipv4 only */ + n_cp = ip_vs_conn_new(&p, AF_INET, &from, port, IP_VS_CONN_F_NO_CPORT | IP_VS_CONN_F_NFCT, cp->dest, skb->mark); @@ -396,7 +397,8 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp, htons(ntohs(cp->vport)-1), &p); n_cp = ip_vs_conn_in_get(&p); if (!n_cp) { - n_cp = ip_vs_conn_new(&p, &cp->daddr, + /* This is ipv4 only */ + n_cp = ip_vs_conn_new(&p, AF_INET, &cp->daddr, htons(ntohs(cp->dport)-1), IP_VS_CONN_F_NFCT, cp->dest, skb->mark); diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c index 547ff33c1efdb0cb92f3f8890640a77bba3a73b5..127f14046c519d9aa0d0cb7596b8bcf676b5a55b 100644 --- a/net/netfilter/ipvs/ip_vs_lblc.c +++ b/net/netfilter/ipvs/ip_vs_lblc.c @@ -199,11 +199,11 @@ ip_vs_lblc_get(int af, struct ip_vs_lblc_table *tbl, */ static inline struct ip_vs_lblc_entry * ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, const union nf_inet_addr *daddr, - struct ip_vs_dest *dest) + u16 af, struct ip_vs_dest *dest) { struct ip_vs_lblc_entry *en; - en = ip_vs_lblc_get(dest->af, tbl, daddr); + en = ip_vs_lblc_get(af, tbl, daddr); if (en) { if (en->dest == dest) return en; @@ -213,8 +213,8 @@ ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, const union nf_inet_addr *daddr, if (!en) return NULL; - en->af = dest->af; - ip_vs_addr_copy(dest->af, &en->addr, daddr); + en->af = af; + ip_vs_addr_copy(af, &en->addr, daddr); en->lastuse = jiffies; ip_vs_dest_hold(dest); @@ -521,13 +521,13 @@ ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb, /* If we fail to create a cache entry, we'll just use the valid dest */ spin_lock_bh(&svc->sched_lock); if (!tbl->dead) - ip_vs_lblc_new(tbl, &iph->daddr, dest); + ip_vs_lblc_new(tbl, &iph->daddr, svc->af, dest); spin_unlock_bh(&svc->sched_lock); out: IP_VS_DBG_BUF(6, "LBLC: destination IP address %s --> server %s:%d\n", IP_VS_DBG_ADDR(svc->af, &iph->daddr), - IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port)); + IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port)); return dest; } diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c index 3f21a2f47de1ffc6be71bcc93b6c35be98f8ae3a..2229d2d8bbe0afe97e4d8fa4c2bfc95a6a93ee72 100644 --- a/net/netfilter/ipvs/ip_vs_lblcr.c +++ b/net/netfilter/ipvs/ip_vs_lblcr.c @@ -362,18 +362,18 @@ ip_vs_lblcr_get(int af, struct ip_vs_lblcr_table *tbl, */ static inline struct ip_vs_lblcr_entry * ip_vs_lblcr_new(struct ip_vs_lblcr_table *tbl, const union nf_inet_addr *daddr, - struct ip_vs_dest *dest) + u16 af, struct ip_vs_dest *dest) { struct ip_vs_lblcr_entry *en; - en = ip_vs_lblcr_get(dest->af, tbl, daddr); + en = ip_vs_lblcr_get(af, tbl, daddr); if (!en) { en = kmalloc(sizeof(*en), GFP_ATOMIC); if (!en) return NULL; - en->af = dest->af; - ip_vs_addr_copy(dest->af, &en->addr, daddr); + en->af = af; + ip_vs_addr_copy(af, &en->addr, daddr); en->lastuse = jiffies; /* initialize its dest set */ @@ -706,13 +706,13 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb, /* If we fail to create a cache entry, we'll just use the valid dest */ spin_lock_bh(&svc->sched_lock); if (!tbl->dead) - ip_vs_lblcr_new(tbl, &iph->daddr, dest); + ip_vs_lblcr_new(tbl, &iph->daddr, svc->af, dest); spin_unlock_bh(&svc->sched_lock); out: IP_VS_DBG_BUF(6, "LBLCR: destination IP address %s --> server %s:%d\n", IP_VS_DBG_ADDR(svc->af, &iph->daddr), - IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port)); + IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port)); return dest; } diff --git a/net/netfilter/ipvs/ip_vs_lc.c b/net/netfilter/ipvs/ip_vs_lc.c index 2bdcb1cf21279db80ebb0af0b091f600e8b4921b..19a0769a989aa331de570dd6ee2514db454f83ae 100644 --- a/net/netfilter/ipvs/ip_vs_lc.c +++ b/net/netfilter/ipvs/ip_vs_lc.c @@ -59,7 +59,7 @@ ip_vs_lc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb, else IP_VS_DBG_BUF(6, "LC: server %s:%u activeconns %d " "inactconns %d\n", - IP_VS_DBG_ADDR(svc->af, &least->addr), + IP_VS_DBG_ADDR(least->af, &least->addr), ntohs(least->port), atomic_read(&least->activeconns), atomic_read(&least->inactconns)); diff --git a/net/netfilter/ipvs/ip_vs_nq.c b/net/netfilter/ipvs/ip_vs_nq.c index 961a6de9bb29035458945185f488a5fc1209ba00..a8b63401e7731e6c8fef37b62c43425e2f96b43c 100644 --- a/net/netfilter/ipvs/ip_vs_nq.c +++ b/net/netfilter/ipvs/ip_vs_nq.c @@ -107,7 +107,8 @@ ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb, out: IP_VS_DBG_BUF(6, "NQ: server %s:%u " "activeconns %d refcnt %d weight %d overhead %d\n", - IP_VS_DBG_ADDR(svc->af, &least->addr), ntohs(least->port), + IP_VS_DBG_ADDR(least->af, &least->addr), + ntohs(least->port), atomic_read(&least->activeconns), atomic_read(&least->refcnt), atomic_read(&least->weight), loh); diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c index 2f7ea7564044ccd0d8600a2b6688cb6603645588..5b84c0b566424dce498c7ff85ee3b1060df4047f 100644 --- a/net/netfilter/ipvs/ip_vs_proto_sctp.c +++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c @@ -432,7 +432,7 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, pd->pp->name, ((direction == IP_VS_DIR_OUTPUT) ? "output " : "input "), - IP_VS_DBG_ADDR(cp->af, &cp->daddr), + IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport), IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport), diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c index e3a697234a988cb9c2f03afbf122a56f99a454aa..8e92beb0cca9920238421af8a5b5206869c357e0 100644 --- a/net/netfilter/ipvs/ip_vs_proto_tcp.c +++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c @@ -510,7 +510,7 @@ set_tcp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, th->fin ? 'F' : '.', th->ack ? 'A' : '.', th->rst ? 'R' : '.', - IP_VS_DBG_ADDR(cp->af, &cp->daddr), + IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport), IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport), diff --git a/net/netfilter/ipvs/ip_vs_rr.c b/net/netfilter/ipvs/ip_vs_rr.c index 176b87c35e34ea2b438739d21a3439f1ebade995..58bacfc461ee6a1d6df4e6e024032fb52a044e35 100644 --- a/net/netfilter/ipvs/ip_vs_rr.c +++ b/net/netfilter/ipvs/ip_vs_rr.c @@ -95,7 +95,7 @@ ip_vs_rr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb, spin_unlock_bh(&svc->sched_lock); IP_VS_DBG_BUF(6, "RR: server %s:%u " "activeconns %d refcnt %d weight %d\n", - IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port), + IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port), atomic_read(&dest->activeconns), atomic_read(&dest->refcnt), atomic_read(&dest->weight)); diff --git a/net/netfilter/ipvs/ip_vs_sed.c b/net/netfilter/ipvs/ip_vs_sed.c index e446b9fa7424c6382cb65433447f3febe2b17eeb..f8e2d00f528b945e774564854fc66f53dbc61970 100644 --- a/net/netfilter/ipvs/ip_vs_sed.c +++ b/net/netfilter/ipvs/ip_vs_sed.c @@ -108,7 +108,8 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb, IP_VS_DBG_BUF(6, "SED: server %s:%u " "activeconns %d refcnt %d weight %d overhead %d\n", - IP_VS_DBG_ADDR(svc->af, &least->addr), ntohs(least->port), + IP_VS_DBG_ADDR(least->af, &least->addr), + ntohs(least->port), atomic_read(&least->activeconns), atomic_read(&least->refcnt), atomic_read(&least->weight), loh); diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c index cc65b2f42cd40283fab4ed5a24a82767d8190a89..98a13433b68c226fee24e3421d5c46c5f0cc8a75 100644 --- a/net/netfilter/ipvs/ip_vs_sh.c +++ b/net/netfilter/ipvs/ip_vs_sh.c @@ -138,7 +138,7 @@ ip_vs_sh_get_fallback(struct ip_vs_service *svc, struct ip_vs_sh_state *s, return dest; IP_VS_DBG_BUF(6, "SH: selected unavailable server %s:%d, reselecting", - IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port)); + IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port)); /* if the original dest is unavailable, loop around the table * starting from ihash to find a new dest @@ -153,7 +153,7 @@ ip_vs_sh_get_fallback(struct ip_vs_service *svc, struct ip_vs_sh_state *s, return dest; IP_VS_DBG_BUF(6, "SH: selected unavailable " "server %s:%d (offset %d), reselecting", - IP_VS_DBG_ADDR(svc->af, &dest->addr), + IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port), roffset); } @@ -192,7 +192,7 @@ ip_vs_sh_reassign(struct ip_vs_sh_state *s, struct ip_vs_service *svc) RCU_INIT_POINTER(b->dest, dest); IP_VS_DBG_BUF(6, "assigned i: %d dest: %s weight: %d\n", - i, IP_VS_DBG_ADDR(svc->af, &dest->addr), + i, IP_VS_DBG_ADDR(dest->af, &dest->addr), atomic_read(&dest->weight)); /* Don't move to next dest until filling weight */ @@ -342,7 +342,7 @@ ip_vs_sh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb, IP_VS_DBG_BUF(6, "SH: source IP address %s --> server %s:%d\n", IP_VS_DBG_ADDR(svc->af, &iph->saddr), - IP_VS_DBG_ADDR(svc->af, &dest->addr), + IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port)); return dest; diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index eadffb29dec0cccf8333a6f446d3756d843ec4ee..7162c86fd50dca443c0e7c3aa008cdb93038b7e3 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c @@ -880,10 +880,17 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param, * but still handled. */ rcu_read_lock(); - dest = ip_vs_find_dest(net, type, daddr, dport, param->vaddr, - param->vport, protocol, fwmark, flags); + /* This function is only invoked by the synchronization + * code. We do not currently support heterogeneous pools + * with synchronization, so we can make the assumption that + * the svc_af is the same as the dest_af + */ + dest = ip_vs_find_dest(net, type, type, daddr, dport, + param->vaddr, param->vport, protocol, + fwmark, flags); - cp = ip_vs_conn_new(param, daddr, dport, flags, dest, fwmark); + cp = ip_vs_conn_new(param, type, daddr, dport, flags, dest, + fwmark); rcu_read_unlock(); if (!cp) { kfree(param->pe_data); diff --git a/net/netfilter/ipvs/ip_vs_wlc.c b/net/netfilter/ipvs/ip_vs_wlc.c index b5b4650d50a9180f211e6cce82e3393ece2fc11c..6b366fd905542ff086a36da4111bd11646d274d8 100644 --- a/net/netfilter/ipvs/ip_vs_wlc.c +++ b/net/netfilter/ipvs/ip_vs_wlc.c @@ -80,7 +80,8 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb, IP_VS_DBG_BUF(6, "WLC: server %s:%u " "activeconns %d refcnt %d weight %d overhead %d\n", - IP_VS_DBG_ADDR(svc->af, &least->addr), ntohs(least->port), + IP_VS_DBG_ADDR(least->af, &least->addr), + ntohs(least->port), atomic_read(&least->activeconns), atomic_read(&least->refcnt), atomic_read(&least->weight), loh); diff --git a/net/netfilter/ipvs/ip_vs_wrr.c b/net/netfilter/ipvs/ip_vs_wrr.c index 0546cd572d6b84bfe3d9feaea40d78e88cd6d23c..17e6d4406ca7c32657eff5e103d0aa1b9317e813 100644 --- a/net/netfilter/ipvs/ip_vs_wrr.c +++ b/net/netfilter/ipvs/ip_vs_wrr.c @@ -216,7 +216,7 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb, found: IP_VS_DBG_BUF(6, "WRR: server %s:%u " "activeconns %d refcnt %d weight %d\n", - IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port), + IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port), atomic_read(&dest->activeconns), atomic_read(&dest->refcnt), atomic_read(&dest->weight)); diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index 56896a412bcec7ae01726734115d19bc04079274..91f17c1eb8a20c0226e01c68e255acf118a6a4d0 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c @@ -157,18 +157,113 @@ static struct rtable *do_output_route4(struct net *net, __be32 daddr, return rt; } +#ifdef CONFIG_IP_VS_IPV6 +static inline int __ip_vs_is_local_route6(struct rt6_info *rt) +{ + return rt->dst.dev && rt->dst.dev->flags & IFF_LOOPBACK; +} +#endif + +static inline bool crosses_local_route_boundary(int skb_af, struct sk_buff *skb, + int rt_mode, + bool new_rt_is_local) +{ + bool rt_mode_allow_local = !!(rt_mode & IP_VS_RT_MODE_LOCAL); + bool rt_mode_allow_non_local = !!(rt_mode & IP_VS_RT_MODE_LOCAL); + bool rt_mode_allow_redirect = !!(rt_mode & IP_VS_RT_MODE_RDR); + bool source_is_loopback; + bool old_rt_is_local; + +#ifdef CONFIG_IP_VS_IPV6 + if (skb_af == AF_INET6) { + int addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr); + + source_is_loopback = + (!skb->dev || skb->dev->flags & IFF_LOOPBACK) && + (addr_type & IPV6_ADDR_LOOPBACK); + old_rt_is_local = __ip_vs_is_local_route6( + (struct rt6_info *)skb_dst(skb)); + } else +#endif + { + source_is_loopback = ipv4_is_loopback(ip_hdr(skb)->saddr); + old_rt_is_local = skb_rtable(skb)->rt_flags & RTCF_LOCAL; + } + + if (unlikely(new_rt_is_local)) { + if (!rt_mode_allow_local) + return true; + if (!rt_mode_allow_redirect && !old_rt_is_local) + return true; + } else { + if (!rt_mode_allow_non_local) + return true; + if (source_is_loopback) + return true; + } + return false; +} + +static inline void maybe_update_pmtu(int skb_af, struct sk_buff *skb, int mtu) +{ + struct sock *sk = skb->sk; + struct rtable *ort = skb_rtable(skb); + + if (!skb->dev && sk && sk->sk_state != TCP_TIME_WAIT) + ort->dst.ops->update_pmtu(&ort->dst, sk, NULL, mtu); +} + +static inline bool ensure_mtu_is_adequate(int skb_af, int rt_mode, + struct ip_vs_iphdr *ipvsh, + struct sk_buff *skb, int mtu) +{ +#ifdef CONFIG_IP_VS_IPV6 + if (skb_af == AF_INET6) { + struct net *net = dev_net(skb_dst(skb)->dev); + + if (unlikely(__mtu_check_toobig_v6(skb, mtu))) { + if (!skb->dev) + skb->dev = net->loopback_dev; + /* only send ICMP too big on first fragment */ + if (!ipvsh->fragoffs) + icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); + IP_VS_DBG(1, "frag needed for %pI6c\n", + &ipv6_hdr(skb)->saddr); + return false; + } + } else +#endif + { + struct netns_ipvs *ipvs = net_ipvs(skb_net(skb)); + + /* If we're going to tunnel the packet and pmtu discovery + * is disabled, we'll just fragment it anyway + */ + if ((rt_mode & IP_VS_RT_MODE_TUNNEL) && !sysctl_pmtu_disc(ipvs)) + return true; + + if (unlikely(ip_hdr(skb)->frag_off & htons(IP_DF) && + skb->len > mtu && !skb_is_gso(skb))) { + icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, + htonl(mtu)); + IP_VS_DBG(1, "frag needed for %pI4\n", + &ip_hdr(skb)->saddr); + return false; + } + } + + return true; +} + /* Get route to destination or remote server */ static int -__ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest, - __be32 daddr, int rt_mode, __be32 *ret_saddr) +__ip_vs_get_out_rt(int skb_af, struct sk_buff *skb, struct ip_vs_dest *dest, + __be32 daddr, int rt_mode, __be32 *ret_saddr, + struct ip_vs_iphdr *ipvsh) { struct net *net = dev_net(skb_dst(skb)->dev); - struct netns_ipvs *ipvs = net_ipvs(net); struct ip_vs_dest_dst *dest_dst; struct rtable *rt; /* Route to the other host */ - struct rtable *ort; /* Original route */ - struct iphdr *iph; - __be16 df; int mtu; int local, noref = 1; @@ -218,30 +313,14 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest, } local = (rt->rt_flags & RTCF_LOCAL) ? 1 : 0; - if (!((local ? IP_VS_RT_MODE_LOCAL : IP_VS_RT_MODE_NON_LOCAL) & - rt_mode)) { - IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI4\n", - (rt->rt_flags & RTCF_LOCAL) ? - "local":"non-local", &daddr); + if (unlikely(crosses_local_route_boundary(skb_af, skb, rt_mode, + local))) { + IP_VS_DBG_RL("We are crossing local and non-local addresses" + " daddr=%pI4\n", &dest->addr.ip); goto err_put; } - iph = ip_hdr(skb); - if (likely(!local)) { - if (unlikely(ipv4_is_loopback(iph->saddr))) { - IP_VS_DBG_RL("Stopping traffic from loopback address " - "%pI4 to non-local address, dest: %pI4\n", - &iph->saddr, &daddr); - goto err_put; - } - } else { - ort = skb_rtable(skb); - if (!(rt_mode & IP_VS_RT_MODE_RDR) && - !(ort->rt_flags & RTCF_LOCAL)) { - IP_VS_DBG_RL("Redirect from non-local address %pI4 to " - "local requires NAT method, dest: %pI4\n", - &iph->daddr, &daddr); - goto err_put; - } + + if (unlikely(local)) { /* skb to local stack, preserve old route */ if (!noref) ip_rt_put(rt); @@ -250,28 +329,17 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest, if (likely(!(rt_mode & IP_VS_RT_MODE_TUNNEL))) { mtu = dst_mtu(&rt->dst); - df = iph->frag_off & htons(IP_DF); } else { - struct sock *sk = skb->sk; - mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr); if (mtu < 68) { IP_VS_DBG_RL("%s(): mtu less than 68\n", __func__); goto err_put; } - ort = skb_rtable(skb); - if (!skb->dev && sk && sk->sk_state != TCP_TIME_WAIT) - ort->dst.ops->update_pmtu(&ort->dst, sk, NULL, mtu); - /* MTU check allowed? */ - df = sysctl_pmtu_disc(ipvs) ? iph->frag_off & htons(IP_DF) : 0; + maybe_update_pmtu(skb_af, skb, mtu); } - /* MTU checking */ - if (unlikely(df && skb->len > mtu && !skb_is_gso(skb))) { - icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); - IP_VS_DBG(1, "frag needed for %pI4\n", &iph->saddr); + if (!ensure_mtu_is_adequate(skb_af, rt_mode, ipvsh, skb, mtu)) goto err_put; - } skb_dst_drop(skb); if (noref) { @@ -295,12 +363,6 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest, } #ifdef CONFIG_IP_VS_IPV6 - -static inline int __ip_vs_is_local_route6(struct rt6_info *rt) -{ - return rt->dst.dev && rt->dst.dev->flags & IFF_LOOPBACK; -} - static struct dst_entry * __ip_vs_route_output_v6(struct net *net, struct in6_addr *daddr, struct in6_addr *ret_saddr, int do_xfrm) @@ -339,14 +401,13 @@ __ip_vs_route_output_v6(struct net *net, struct in6_addr *daddr, * Get route to destination or remote server */ static int -__ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest, +__ip_vs_get_out_rt_v6(int skb_af, struct sk_buff *skb, struct ip_vs_dest *dest, struct in6_addr *daddr, struct in6_addr *ret_saddr, struct ip_vs_iphdr *ipvsh, int do_xfrm, int rt_mode) { struct net *net = dev_net(skb_dst(skb)->dev); struct ip_vs_dest_dst *dest_dst; struct rt6_info *rt; /* Route to the other host */ - struct rt6_info *ort; /* Original route */ struct dst_entry *dst; int mtu; int local, noref = 1; @@ -393,32 +454,15 @@ __ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest, } local = __ip_vs_is_local_route6(rt); - if (!((local ? IP_VS_RT_MODE_LOCAL : IP_VS_RT_MODE_NON_LOCAL) & - rt_mode)) { - IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI6c\n", - local ? "local":"non-local", daddr); + + if (unlikely(crosses_local_route_boundary(skb_af, skb, rt_mode, + local))) { + IP_VS_DBG_RL("We are crossing local and non-local addresses" + " daddr=%pI6\n", &dest->addr.in6); goto err_put; } - if (likely(!local)) { - if (unlikely((!skb->dev || skb->dev->flags & IFF_LOOPBACK) && - ipv6_addr_type(&ipv6_hdr(skb)->saddr) & - IPV6_ADDR_LOOPBACK)) { - IP_VS_DBG_RL("Stopping traffic from loopback address " - "%pI6c to non-local address, " - "dest: %pI6c\n", - &ipv6_hdr(skb)->saddr, daddr); - goto err_put; - } - } else { - ort = (struct rt6_info *) skb_dst(skb); - if (!(rt_mode & IP_VS_RT_MODE_RDR) && - !__ip_vs_is_local_route6(ort)) { - IP_VS_DBG_RL("Redirect from non-local address %pI6c " - "to local requires NAT method, " - "dest: %pI6c\n", - &ipv6_hdr(skb)->daddr, daddr); - goto err_put; - } + + if (unlikely(local)) { /* skb to local stack, preserve old route */ if (!noref) dst_release(&rt->dst); @@ -429,28 +473,17 @@ __ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest, if (likely(!(rt_mode & IP_VS_RT_MODE_TUNNEL))) mtu = dst_mtu(&rt->dst); else { - struct sock *sk = skb->sk; - mtu = dst_mtu(&rt->dst) - sizeof(struct ipv6hdr); if (mtu < IPV6_MIN_MTU) { IP_VS_DBG_RL("%s(): mtu less than %d\n", __func__, IPV6_MIN_MTU); goto err_put; } - ort = (struct rt6_info *) skb_dst(skb); - if (!skb->dev && sk && sk->sk_state != TCP_TIME_WAIT) - ort->dst.ops->update_pmtu(&ort->dst, sk, NULL, mtu); + maybe_update_pmtu(skb_af, skb, mtu); } - if (unlikely(__mtu_check_toobig_v6(skb, mtu))) { - if (!skb->dev) - skb->dev = net->loopback_dev; - /* only send ICMP too big on first fragment */ - if (!ipvsh->fragoffs) - icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); - IP_VS_DBG(1, "frag needed for %pI6c\n", &ipv6_hdr(skb)->saddr); + if (!ensure_mtu_is_adequate(skb_af, rt_mode, ipvsh, skb, mtu)) goto err_put; - } skb_dst_drop(skb); if (noref) { @@ -556,8 +589,8 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, EnterFunction(10); rcu_read_lock(); - if (__ip_vs_get_out_rt(skb, NULL, iph->daddr, IP_VS_RT_MODE_NON_LOCAL, - NULL) < 0) + if (__ip_vs_get_out_rt(cp->af, skb, NULL, iph->daddr, + IP_VS_RT_MODE_NON_LOCAL, NULL, ipvsh) < 0) goto tx_error; ip_send_check(iph); @@ -586,7 +619,7 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, EnterFunction(10); rcu_read_lock(); - if (__ip_vs_get_out_rt_v6(skb, NULL, &ipvsh->daddr.in6, NULL, + if (__ip_vs_get_out_rt_v6(cp->af, skb, NULL, &ipvsh->daddr.in6, NULL, ipvsh, 0, IP_VS_RT_MODE_NON_LOCAL) < 0) goto tx_error; @@ -633,10 +666,10 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, } was_input = rt_is_input_route(skb_rtable(skb)); - local = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip, + local = __ip_vs_get_out_rt(cp->af, skb, cp->dest, cp->daddr.ip, IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL | - IP_VS_RT_MODE_RDR, NULL); + IP_VS_RT_MODE_RDR, NULL, ipvsh); if (local < 0) goto tx_error; rt = skb_rtable(skb); @@ -721,8 +754,8 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p)); } - local = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL, - ipvsh, 0, + local = __ip_vs_get_out_rt_v6(cp->af, skb, cp->dest, &cp->daddr.in6, + NULL, ipvsh, 0, IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL | IP_VS_RT_MODE_RDR); @@ -791,6 +824,81 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, } #endif +/* When forwarding a packet, we must ensure that we've got enough headroom + * for the encapsulation packet in the skb. This also gives us an + * opportunity to figure out what the payload_len, dsfield, ttl, and df + * values should be, so that we won't need to look at the old ip header + * again + */ +static struct sk_buff * +ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af, + unsigned int max_headroom, __u8 *next_protocol, + __u32 *payload_len, __u8 *dsfield, __u8 *ttl, + __be16 *df) +{ + struct sk_buff *new_skb = NULL; + struct iphdr *old_iph = NULL; +#ifdef CONFIG_IP_VS_IPV6 + struct ipv6hdr *old_ipv6h = NULL; +#endif + + if (skb_headroom(skb) < max_headroom || skb_cloned(skb)) { + new_skb = skb_realloc_headroom(skb, max_headroom); + if (!new_skb) + goto error; + consume_skb(skb); + skb = new_skb; + } + +#ifdef CONFIG_IP_VS_IPV6 + if (skb_af == AF_INET6) { + old_ipv6h = ipv6_hdr(skb); + *next_protocol = IPPROTO_IPV6; + if (payload_len) + *payload_len = + ntohs(old_ipv6h->payload_len) + + sizeof(*old_ipv6h); + *dsfield = ipv6_get_dsfield(old_ipv6h); + *ttl = old_ipv6h->hop_limit; + if (df) + *df = 0; + } else +#endif + { + old_iph = ip_hdr(skb); + /* Copy DF, reset fragment offset and MF */ + if (df) + *df = (old_iph->frag_off & htons(IP_DF)); + *next_protocol = IPPROTO_IPIP; + + /* fix old IP header checksum */ + ip_send_check(old_iph); + *dsfield = ipv4_get_dsfield(old_iph); + *ttl = old_iph->ttl; + if (payload_len) + *payload_len = ntohs(old_iph->tot_len); + } + + return skb; +error: + kfree_skb(skb); + return ERR_PTR(-ENOMEM); +} + +static inline int __tun_gso_type_mask(int encaps_af, int orig_af) +{ + if (encaps_af == AF_INET) { + if (orig_af == AF_INET) + return SKB_GSO_IPIP; + + return SKB_GSO_SIT; + } + + /* GSO: we need to provide proper SKB_GSO_ value for IPv6: + * SKB_GSO_SIT/IPV6 + */ + return 0; +} /* * IP Tunneling transmitter @@ -819,9 +927,11 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, struct rtable *rt; /* Route to the other host */ __be32 saddr; /* Source for tunnel */ struct net_device *tdev; /* Device to other host */ - struct iphdr *old_iph = ip_hdr(skb); - u8 tos = old_iph->tos; - __be16 df; + __u8 next_protocol = 0; + __u8 dsfield = 0; + __u8 ttl = 0; + __be16 df = 0; + __be16 *dfp = NULL; struct iphdr *iph; /* Our new IP header */ unsigned int max_headroom; /* The extra header space needed */ int ret, local; @@ -829,11 +939,11 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, EnterFunction(10); rcu_read_lock(); - local = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip, + local = __ip_vs_get_out_rt(cp->af, skb, cp->dest, cp->daddr.ip, IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL | IP_VS_RT_MODE_CONNECT | - IP_VS_RT_MODE_TUNNEL, &saddr); + IP_VS_RT_MODE_TUNNEL, &saddr, ipvsh); if (local < 0) goto tx_error; if (local) { @@ -844,29 +954,21 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, rt = skb_rtable(skb); tdev = rt->dst.dev; - /* Copy DF, reset fragment offset and MF */ - df = sysctl_pmtu_disc(ipvs) ? old_iph->frag_off & htons(IP_DF) : 0; - /* * Okay, now see if we can stuff it in the buffer as-is. */ max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct iphdr); - if (skb_headroom(skb) < max_headroom || skb_cloned(skb)) { - struct sk_buff *new_skb = - skb_realloc_headroom(skb, max_headroom); - - if (!new_skb) - goto tx_error; - consume_skb(skb); - skb = new_skb; - old_iph = ip_hdr(skb); - } - - /* fix old IP header checksum */ - ip_send_check(old_iph); + /* We only care about the df field if sysctl_pmtu_disc(ipvs) is set */ + dfp = sysctl_pmtu_disc(ipvs) ? &df : NULL; + skb = ip_vs_prepare_tunneled_skb(skb, cp->af, max_headroom, + &next_protocol, NULL, &dsfield, + &ttl, dfp); + if (IS_ERR(skb)) + goto tx_error; - skb = iptunnel_handle_offloads(skb, false, SKB_GSO_IPIP); + skb = iptunnel_handle_offloads( + skb, false, __tun_gso_type_mask(AF_INET, cp->af)); if (IS_ERR(skb)) goto tx_error; @@ -883,11 +985,11 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, iph->version = 4; iph->ihl = sizeof(struct iphdr)>>2; iph->frag_off = df; - iph->protocol = IPPROTO_IPIP; - iph->tos = tos; + iph->protocol = next_protocol; + iph->tos = dsfield; iph->daddr = cp->daddr.ip; iph->saddr = saddr; - iph->ttl = old_iph->ttl; + iph->ttl = ttl; ip_select_ident(skb, NULL); /* Another hack: avoid icmp_send in ip_fragment */ @@ -920,7 +1022,10 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, struct rt6_info *rt; /* Route to the other host */ struct in6_addr saddr; /* Source for tunnel */ struct net_device *tdev; /* Device to other host */ - struct ipv6hdr *old_iph = ipv6_hdr(skb); + __u8 next_protocol = 0; + __u32 payload_len = 0; + __u8 dsfield = 0; + __u8 ttl = 0; struct ipv6hdr *iph; /* Our new IP header */ unsigned int max_headroom; /* The extra header space needed */ int ret, local; @@ -928,7 +1033,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, EnterFunction(10); rcu_read_lock(); - local = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, + local = __ip_vs_get_out_rt_v6(cp->af, skb, cp->dest, &cp->daddr.in6, &saddr, ipvsh, 1, IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL | @@ -948,19 +1053,14 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, */ max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct ipv6hdr); - if (skb_headroom(skb) < max_headroom || skb_cloned(skb)) { - struct sk_buff *new_skb = - skb_realloc_headroom(skb, max_headroom); - - if (!new_skb) - goto tx_error; - consume_skb(skb); - skb = new_skb; - old_iph = ipv6_hdr(skb); - } + skb = ip_vs_prepare_tunneled_skb(skb, cp->af, max_headroom, + &next_protocol, &payload_len, + &dsfield, &ttl, NULL); + if (IS_ERR(skb)) + goto tx_error; - /* GSO: we need to provide proper SKB_GSO_ value for IPv6 */ - skb = iptunnel_handle_offloads(skb, false, 0); /* SKB_GSO_SIT/IPV6 */ + skb = iptunnel_handle_offloads( + skb, false, __tun_gso_type_mask(AF_INET6, cp->af)); if (IS_ERR(skb)) goto tx_error; @@ -975,14 +1075,13 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, */ iph = ipv6_hdr(skb); iph->version = 6; - iph->nexthdr = IPPROTO_IPV6; - iph->payload_len = old_iph->payload_len; - be16_add_cpu(&iph->payload_len, sizeof(*old_iph)); + iph->nexthdr = next_protocol; + iph->payload_len = htons(payload_len); memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl)); - ipv6_change_dsfield(iph, 0, ipv6_get_dsfield(old_iph)); + ipv6_change_dsfield(iph, 0, dsfield); iph->daddr = cp->daddr.in6; iph->saddr = saddr; - iph->hop_limit = old_iph->hop_limit; + iph->hop_limit = ttl; /* Another hack: avoid icmp_send in ip_fragment */ skb->ignore_df = 1; @@ -1021,10 +1120,10 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, EnterFunction(10); rcu_read_lock(); - local = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip, + local = __ip_vs_get_out_rt(cp->af, skb, cp->dest, cp->daddr.ip, IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL | - IP_VS_RT_MODE_KNOWN_NH, NULL); + IP_VS_RT_MODE_KNOWN_NH, NULL, ipvsh); if (local < 0) goto tx_error; if (local) { @@ -1060,8 +1159,8 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, EnterFunction(10); rcu_read_lock(); - local = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL, - ipvsh, 0, + local = __ip_vs_get_out_rt_v6(cp->af, skb, cp->dest, &cp->daddr.in6, + NULL, ipvsh, 0, IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL); if (local < 0) @@ -1128,7 +1227,8 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL | IP_VS_RT_MODE_RDR : IP_VS_RT_MODE_NON_LOCAL; rcu_read_lock(); - local = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip, rt_mode, NULL); + local = __ip_vs_get_out_rt(cp->af, skb, cp->dest, cp->daddr.ip, rt_mode, + NULL, iph); if (local < 0) goto tx_error; rt = skb_rtable(skb); @@ -1219,8 +1319,8 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL | IP_VS_RT_MODE_RDR : IP_VS_RT_MODE_NON_LOCAL; rcu_read_lock(); - local = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL, - ipvsh, 0, rt_mode); + local = __ip_vs_get_out_rt_v6(cp->af, skb, cp->dest, &cp->daddr.in6, + NULL, ipvsh, 0, rt_mode); if (local < 0) goto tx_error; rt = (struct rt6_info *) skb_dst(skb); diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index de88c4ab5146a168bc0866f3fdc07098b5ebe543..5016a6929085ebdbf151a1f47582d36195885540 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -142,7 +142,7 @@ static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, u16 zone) static u32 __hash_bucket(u32 hash, unsigned int size) { - return ((u64)hash * size) >> 32; + return reciprocal_scale(hash, size); } static u32 hash_bucket(u32 hash, const struct net *net) @@ -358,7 +358,7 @@ bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report) tstamp = nf_conn_tstamp_find(ct); if (tstamp && tstamp->stop == 0) - tstamp->stop = ktime_to_ns(ktime_get_real()); + tstamp->stop = ktime_get_real_ns(); if (nf_ct_is_dying(ct)) goto delete; diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index f87e8f68ad453e9baeec017cc74534e8ce85dfab..91a1837acd0e8fb981ccea73ae262197afecfb33 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -83,7 +83,8 @@ static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all), (((tuple->dst.protonum ^ tuple->src.l3num) << 16) | (__force __u16)tuple->dst.u.all) ^ nf_conntrack_hash_rnd); - return ((u64)hash * nf_ct_expect_hsize) >> 32; + + return reciprocal_scale(hash, nf_ct_expect_hsize); } struct nf_conntrack_expect * diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 355a5c4ef7635bb7aca1e1068ca269b891949550..1bd9ed9e62f642477e8a10d57cd73775cc1ebc14 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -1737,7 +1737,7 @@ ctnetlink_create_conntrack(struct net *net, u16 zone, } tstamp = nf_conn_tstamp_find(ct); if (tstamp) - tstamp->start = ktime_to_ns(ktime_get_real()); + tstamp->start = ktime_get_real_ns(); err = nf_conntrack_hash_check_insert(ct); if (err < 0) diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c index d25f293776482f5e88c831c290058f2ca2e6af2b..957c1db6665254645f43d0c04456015a1d4df65c 100644 --- a/net/netfilter/nf_conntrack_proto_generic.c +++ b/net/netfilter/nf_conntrack_proto_generic.c @@ -14,6 +14,30 @@ static unsigned int nf_ct_generic_timeout __read_mostly = 600*HZ; +static bool nf_generic_should_process(u8 proto) +{ + switch (proto) { +#ifdef CONFIG_NF_CT_PROTO_SCTP_MODULE + case IPPROTO_SCTP: + return false; +#endif +#ifdef CONFIG_NF_CT_PROTO_DCCP_MODULE + case IPPROTO_DCCP: + return false; +#endif +#ifdef CONFIG_NF_CT_PROTO_GRE_MODULE + case IPPROTO_GRE: + return false; +#endif +#ifdef CONFIG_NF_CT_PROTO_UDPLITE_MODULE + case IPPROTO_UDPLITE: + return false; +#endif + default: + return true; + } +} + static inline struct nf_generic_net *generic_pernet(struct net *net) { return &net->ct.nf_ct_proto.generic; @@ -67,7 +91,7 @@ static int generic_packet(struct nf_conn *ct, static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb, unsigned int dataoff, unsigned int *timeouts) { - return true; + return nf_generic_should_process(nf_ct_protonum(ct)); } #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index f641751dba9dc467b4207716bdb0ac69208747f8..cf65a1e040dd8c8920dd8fc330c75c218ea7050b 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -101,7 +101,7 @@ static void *ct_seq_start(struct seq_file *seq, loff_t *pos) { struct ct_iter_state *st = seq->private; - st->time_now = ktime_to_ns(ktime_get_real()); + st->time_now = ktime_get_real_ns(); rcu_read_lock(); return ct_get_idx(seq, *pos); } diff --git a/net/netfilter/nf_log_common.c b/net/netfilter/nf_log_common.c index eeb8ef4ff1a3c0d674072253fa1e47cea6b38d9a..a2233e77cf3990d8bbd2bde72747d2005fe2c1d4 100644 --- a/net/netfilter/nf_log_common.c +++ b/net/netfilter/nf_log_common.c @@ -158,7 +158,7 @@ nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf, '0' + loginfo->u.log.level, prefix, in ? in->name : "", out ? out->name : ""); -#ifdef CONFIG_BRIDGE_NETFILTER +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) if (skb->nf_bridge) { const struct net_device *physindev; const struct net_device *physoutdev; diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index 552f97cd9fde5c510ac055642affb66adbda181d..4e0b47831d43a25f021a1eeb2c62c2307b8630e1 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c @@ -126,7 +126,8 @@ hash_by_src(const struct net *net, u16 zone, /* Original src, to ensure we map it consistently if poss. */ hash = jhash2((u32 *)&tuple->src, sizeof(tuple->src) / sizeof(u32), tuple->dst.protonum ^ zone ^ nf_conntrack_hash_rnd); - return ((u64)hash * net->ct.nat_htable_size) >> 32; + + return reciprocal_scale(hash, net->ct.nat_htable_size); } /* Is this tuple already taken? (not by us) */ @@ -274,7 +275,7 @@ find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple, } var_ipp->all[i] = (__force __u32) - htonl(minip + (((u64)j * dist) >> 32)); + htonl(minip + reciprocal_scale(j, dist)); if (var_ipp->all[i] != range->max_addr.all[i]) full_range = true; diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c index 5d24b1fdb593b1e43bde32991903d41a065a0b34..4c8b68e5fa164fd71b6f613b66b36d52e6717bf4 100644 --- a/net/netfilter/nf_queue.c +++ b/net/netfilter/nf_queue.c @@ -52,7 +52,7 @@ void nf_queue_entry_release_refs(struct nf_queue_entry *entry) dev_put(entry->indev); if (entry->outdev) dev_put(entry->outdev); -#ifdef CONFIG_BRIDGE_NETFILTER +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) if (entry->skb->nf_bridge) { struct nf_bridge_info *nf_bridge = entry->skb->nf_bridge; @@ -77,7 +77,7 @@ bool nf_queue_entry_get_refs(struct nf_queue_entry *entry) dev_hold(entry->indev); if (entry->outdev) dev_hold(entry->outdev); -#ifdef CONFIG_BRIDGE_NETFILTER +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) if (entry->skb->nf_bridge) { struct nf_bridge_info *nf_bridge = entry->skb->nf_bridge; struct net_device *physdev; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index deeb95fb702833ac9d2dc9ec39d57778e31a570c..556a0dfa4abc07f7ed7bde511e07db1a85c99ecd 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -127,6 +127,204 @@ static void nft_trans_destroy(struct nft_trans *trans) kfree(trans); } +static void nf_tables_unregister_hooks(const struct nft_table *table, + const struct nft_chain *chain, + unsigned int hook_nops) +{ + if (!(table->flags & NFT_TABLE_F_DORMANT) && + chain->flags & NFT_BASE_CHAIN) + nf_unregister_hooks(nft_base_chain(chain)->ops, hook_nops); +} + +/* Internal table flags */ +#define NFT_TABLE_INACTIVE (1 << 15) + +static int nft_trans_table_add(struct nft_ctx *ctx, int msg_type) +{ + struct nft_trans *trans; + + trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_table)); + if (trans == NULL) + return -ENOMEM; + + if (msg_type == NFT_MSG_NEWTABLE) + ctx->table->flags |= NFT_TABLE_INACTIVE; + + list_add_tail(&trans->list, &ctx->net->nft.commit_list); + return 0; +} + +static int nft_deltable(struct nft_ctx *ctx) +{ + int err; + + err = nft_trans_table_add(ctx, NFT_MSG_DELTABLE); + if (err < 0) + return err; + + list_del_rcu(&ctx->table->list); + return err; +} + +static int nft_trans_chain_add(struct nft_ctx *ctx, int msg_type) +{ + struct nft_trans *trans; + + trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_chain)); + if (trans == NULL) + return -ENOMEM; + + if (msg_type == NFT_MSG_NEWCHAIN) + ctx->chain->flags |= NFT_CHAIN_INACTIVE; + + list_add_tail(&trans->list, &ctx->net->nft.commit_list); + return 0; +} + +static int nft_delchain(struct nft_ctx *ctx) +{ + int err; + + err = nft_trans_chain_add(ctx, NFT_MSG_DELCHAIN); + if (err < 0) + return err; + + ctx->table->use--; + list_del_rcu(&ctx->chain->list); + + return err; +} + +static inline bool +nft_rule_is_active(struct net *net, const struct nft_rule *rule) +{ + return (rule->genmask & (1 << net->nft.gencursor)) == 0; +} + +static inline int gencursor_next(struct net *net) +{ + return net->nft.gencursor+1 == 1 ? 1 : 0; +} + +static inline int +nft_rule_is_active_next(struct net *net, const struct nft_rule *rule) +{ + return (rule->genmask & (1 << gencursor_next(net))) == 0; +} + +static inline void +nft_rule_activate_next(struct net *net, struct nft_rule *rule) +{ + /* Now inactive, will be active in the future */ + rule->genmask = (1 << net->nft.gencursor); +} + +static inline void +nft_rule_deactivate_next(struct net *net, struct nft_rule *rule) +{ + rule->genmask = (1 << gencursor_next(net)); +} + +static inline void nft_rule_clear(struct net *net, struct nft_rule *rule) +{ + rule->genmask = 0; +} + +static int +nf_tables_delrule_deactivate(struct nft_ctx *ctx, struct nft_rule *rule) +{ + /* You cannot delete the same rule twice */ + if (nft_rule_is_active_next(ctx->net, rule)) { + nft_rule_deactivate_next(ctx->net, rule); + ctx->chain->use--; + return 0; + } + return -ENOENT; +} + +static struct nft_trans *nft_trans_rule_add(struct nft_ctx *ctx, int msg_type, + struct nft_rule *rule) +{ + struct nft_trans *trans; + + trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_rule)); + if (trans == NULL) + return NULL; + + nft_trans_rule(trans) = rule; + list_add_tail(&trans->list, &ctx->net->nft.commit_list); + + return trans; +} + +static int nft_delrule(struct nft_ctx *ctx, struct nft_rule *rule) +{ + struct nft_trans *trans; + int err; + + trans = nft_trans_rule_add(ctx, NFT_MSG_DELRULE, rule); + if (trans == NULL) + return -ENOMEM; + + err = nf_tables_delrule_deactivate(ctx, rule); + if (err < 0) { + nft_trans_destroy(trans); + return err; + } + + return 0; +} + +static int nft_delrule_by_chain(struct nft_ctx *ctx) +{ + struct nft_rule *rule; + int err; + + list_for_each_entry(rule, &ctx->chain->rules, list) { + err = nft_delrule(ctx, rule); + if (err < 0) + return err; + } + return 0; +} + +/* Internal set flag */ +#define NFT_SET_INACTIVE (1 << 15) + +static int nft_trans_set_add(struct nft_ctx *ctx, int msg_type, + struct nft_set *set) +{ + struct nft_trans *trans; + + trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_set)); + if (trans == NULL) + return -ENOMEM; + + if (msg_type == NFT_MSG_NEWSET && ctx->nla[NFTA_SET_ID] != NULL) { + nft_trans_set_id(trans) = + ntohl(nla_get_be32(ctx->nla[NFTA_SET_ID])); + set->flags |= NFT_SET_INACTIVE; + } + nft_trans_set(trans) = set; + list_add_tail(&trans->list, &ctx->net->nft.commit_list); + + return 0; +} + +static int nft_delset(struct nft_ctx *ctx, struct nft_set *set) +{ + int err; + + err = nft_trans_set_add(ctx, NFT_MSG_DELSET, set); + if (err < 0) + return err; + + list_del_rcu(&set->list); + ctx->table->use--; + + return err; +} + /* * Tables */ @@ -207,9 +405,9 @@ static const struct nla_policy nft_table_policy[NFTA_TABLE_MAX + 1] = { [NFTA_TABLE_FLAGS] = { .type = NLA_U32 }, }; -static int nf_tables_fill_table_info(struct sk_buff *skb, u32 portid, u32 seq, - int event, u32 flags, int family, - const struct nft_table *table) +static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net, + u32 portid, u32 seq, int event, u32 flags, + int family, const struct nft_table *table) { struct nlmsghdr *nlh; struct nfgenmsg *nfmsg; @@ -222,7 +420,7 @@ static int nf_tables_fill_table_info(struct sk_buff *skb, u32 portid, u32 seq, nfmsg = nlmsg_data(nlh); nfmsg->nfgen_family = family; nfmsg->version = NFNETLINK_V0; - nfmsg->res_id = 0; + nfmsg->res_id = htons(net->nft.base_seq & 0xffff); if (nla_put_string(skb, NFTA_TABLE_NAME, table->name) || nla_put_be32(skb, NFTA_TABLE_FLAGS, htonl(table->flags)) || @@ -250,8 +448,8 @@ static int nf_tables_table_notify(const struct nft_ctx *ctx, int event) if (skb == NULL) goto err; - err = nf_tables_fill_table_info(skb, ctx->portid, ctx->seq, event, 0, - ctx->afi->family, ctx->table); + err = nf_tables_fill_table_info(skb, ctx->net, ctx->portid, ctx->seq, + event, 0, ctx->afi->family, ctx->table); if (err < 0) { kfree_skb(skb); goto err; @@ -290,7 +488,7 @@ static int nf_tables_dump_tables(struct sk_buff *skb, if (idx > s_idx) memset(&cb->args[1], 0, sizeof(cb->args) - sizeof(cb->args[0])); - if (nf_tables_fill_table_info(skb, + if (nf_tables_fill_table_info(skb, net, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NFT_MSG_NEWTABLE, @@ -309,9 +507,6 @@ static int nf_tables_dump_tables(struct sk_buff *skb, return skb->len; } -/* Internal table flags */ -#define NFT_TABLE_INACTIVE (1 << 15) - static int nf_tables_gettable(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) @@ -345,7 +540,7 @@ static int nf_tables_gettable(struct sock *nlsk, struct sk_buff *skb, if (!skb2) return -ENOMEM; - err = nf_tables_fill_table_info(skb2, NETLINK_CB(skb).portid, + err = nf_tables_fill_table_info(skb2, net, NETLINK_CB(skb).portid, nlh->nlmsg_seq, NFT_MSG_NEWTABLE, 0, family, table); if (err < 0) @@ -443,21 +638,6 @@ static int nf_tables_updtable(struct nft_ctx *ctx) return ret; } -static int nft_trans_table_add(struct nft_ctx *ctx, int msg_type) -{ - struct nft_trans *trans; - - trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_table)); - if (trans == NULL) - return -ENOMEM; - - if (msg_type == NFT_MSG_NEWTABLE) - ctx->table->flags |= NFT_TABLE_INACTIVE; - - list_add_tail(&trans->list, &ctx->net->nft.commit_list); - return 0; -} - static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) @@ -527,6 +707,67 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb, return 0; } +static int nft_flush_table(struct nft_ctx *ctx) +{ + int err; + struct nft_chain *chain, *nc; + struct nft_set *set, *ns; + + list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) { + ctx->chain = chain; + + err = nft_delrule_by_chain(ctx); + if (err < 0) + goto out; + + err = nft_delchain(ctx); + if (err < 0) + goto out; + } + + list_for_each_entry_safe(set, ns, &ctx->table->sets, list) { + if (set->flags & NFT_SET_ANONYMOUS && + !list_empty(&set->bindings)) + continue; + + err = nft_delset(ctx, set); + if (err < 0) + goto out; + } + + err = nft_deltable(ctx); +out: + return err; +} + +static int nft_flush(struct nft_ctx *ctx, int family) +{ + struct nft_af_info *afi; + struct nft_table *table, *nt; + const struct nlattr * const *nla = ctx->nla; + int err = 0; + + list_for_each_entry(afi, &ctx->net->nft.af_info, list) { + if (family != AF_UNSPEC && afi->family != family) + continue; + + ctx->afi = afi; + list_for_each_entry_safe(table, nt, &afi->tables, list) { + if (nla[NFTA_TABLE_NAME] && + nla_strcmp(nla[NFTA_TABLE_NAME], table->name) != 0) + continue; + + ctx->table = table; + + err = nft_flush_table(ctx); + if (err < 0) + goto out; + } + } +out: + return err; +} + static int nf_tables_deltable(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) @@ -535,9 +776,13 @@ static int nf_tables_deltable(struct sock *nlsk, struct sk_buff *skb, struct nft_af_info *afi; struct nft_table *table; struct net *net = sock_net(skb->sk); - int family = nfmsg->nfgen_family, err; + int family = nfmsg->nfgen_family; struct nft_ctx ctx; + nft_ctx_init(&ctx, skb, nlh, NULL, NULL, NULL, nla); + if (family == AF_UNSPEC || nla[NFTA_TABLE_NAME] == NULL) + return nft_flush(&ctx, family); + afi = nf_tables_afinfo_lookup(net, family, false); if (IS_ERR(afi)) return PTR_ERR(afi); @@ -547,16 +792,11 @@ static int nf_tables_deltable(struct sock *nlsk, struct sk_buff *skb, return PTR_ERR(table); if (table->flags & NFT_TABLE_INACTIVE) return -ENOENT; - if (table->use > 0) - return -EBUSY; - nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla); - err = nft_trans_table_add(&ctx, NFT_MSG_DELTABLE); - if (err < 0) - return err; + ctx.afi = afi; + ctx.table = table; - list_del_rcu(&table->list); - return 0; + return nft_flush_table(&ctx); } static void nf_tables_table_destroy(struct nft_ctx *ctx) @@ -674,9 +914,9 @@ static int nft_dump_stats(struct sk_buff *skb, struct nft_stats __percpu *stats) return -ENOSPC; } -static int nf_tables_fill_chain_info(struct sk_buff *skb, u32 portid, u32 seq, - int event, u32 flags, int family, - const struct nft_table *table, +static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net, + u32 portid, u32 seq, int event, u32 flags, + int family, const struct nft_table *table, const struct nft_chain *chain) { struct nlmsghdr *nlh; @@ -690,7 +930,7 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, u32 portid, u32 seq, nfmsg = nlmsg_data(nlh); nfmsg->nfgen_family = family; nfmsg->version = NFNETLINK_V0; - nfmsg->res_id = 0; + nfmsg->res_id = htons(net->nft.base_seq & 0xffff); if (nla_put_string(skb, NFTA_CHAIN_TABLE, table->name)) goto nla_put_failure; @@ -748,8 +988,8 @@ static int nf_tables_chain_notify(const struct nft_ctx *ctx, int event) if (skb == NULL) goto err; - err = nf_tables_fill_chain_info(skb, ctx->portid, ctx->seq, event, 0, - ctx->afi->family, ctx->table, + err = nf_tables_fill_chain_info(skb, ctx->net, ctx->portid, ctx->seq, + event, 0, ctx->afi->family, ctx->table, ctx->chain); if (err < 0) { kfree_skb(skb); @@ -791,7 +1031,8 @@ static int nf_tables_dump_chains(struct sk_buff *skb, if (idx > s_idx) memset(&cb->args[1], 0, sizeof(cb->args) - sizeof(cb->args[0])); - if (nf_tables_fill_chain_info(skb, NETLINK_CB(cb->skb).portid, + if (nf_tables_fill_chain_info(skb, net, + NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NFT_MSG_NEWCHAIN, NLM_F_MULTI, @@ -850,7 +1091,7 @@ static int nf_tables_getchain(struct sock *nlsk, struct sk_buff *skb, if (!skb2) return -ENOMEM; - err = nf_tables_fill_chain_info(skb2, NETLINK_CB(skb).portid, + err = nf_tables_fill_chain_info(skb2, net, NETLINK_CB(skb).portid, nlh->nlmsg_seq, NFT_MSG_NEWCHAIN, 0, family, table, chain); if (err < 0) @@ -913,21 +1154,6 @@ static void nft_chain_stats_replace(struct nft_base_chain *chain, rcu_assign_pointer(chain->stats, newstats); } -static int nft_trans_chain_add(struct nft_ctx *ctx, int msg_type) -{ - struct nft_trans *trans; - - trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_chain)); - if (trans == NULL) - return -ENOMEM; - - if (msg_type == NFT_MSG_NEWCHAIN) - ctx->chain->flags |= NFT_CHAIN_INACTIVE; - - list_add_tail(&trans->list, &ctx->net->nft.commit_list); - return 0; -} - static void nf_tables_chain_destroy(struct nft_chain *chain) { BUG_ON(chain->use > 0); @@ -1157,11 +1383,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb, list_add_tail_rcu(&chain->list, &table->chains); return 0; err2: - if (!(table->flags & NFT_TABLE_F_DORMANT) && - chain->flags & NFT_BASE_CHAIN) { - nf_unregister_hooks(nft_base_chain(chain)->ops, - afi->nops); - } + nf_tables_unregister_hooks(table, chain, afi->nops); err1: nf_tables_chain_destroy(chain); return err; @@ -1178,7 +1400,6 @@ static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb, struct net *net = sock_net(skb->sk); int family = nfmsg->nfgen_family; struct nft_ctx ctx; - int err; afi = nf_tables_afinfo_lookup(net, family, false); if (IS_ERR(afi)) @@ -1199,13 +1420,8 @@ static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb, return -EBUSY; nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); - err = nft_trans_chain_add(&ctx, NFT_MSG_DELCHAIN); - if (err < 0) - return err; - table->use--; - list_del_rcu(&chain->list); - return 0; + return nft_delchain(&ctx); } /* @@ -1432,8 +1648,9 @@ static const struct nla_policy nft_rule_policy[NFTA_RULE_MAX + 1] = { .len = NFT_USERDATA_MAXLEN }, }; -static int nf_tables_fill_rule_info(struct sk_buff *skb, u32 portid, u32 seq, - int event, u32 flags, int family, +static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net, + u32 portid, u32 seq, int event, + u32 flags, int family, const struct nft_table *table, const struct nft_chain *chain, const struct nft_rule *rule) @@ -1453,7 +1670,7 @@ static int nf_tables_fill_rule_info(struct sk_buff *skb, u32 portid, u32 seq, nfmsg = nlmsg_data(nlh); nfmsg->nfgen_family = family; nfmsg->version = NFNETLINK_V0; - nfmsg->res_id = 0; + nfmsg->res_id = htons(net->nft.base_seq & 0xffff); if (nla_put_string(skb, NFTA_RULE_TABLE, table->name)) goto nla_put_failure; @@ -1509,8 +1726,8 @@ static int nf_tables_rule_notify(const struct nft_ctx *ctx, if (skb == NULL) goto err; - err = nf_tables_fill_rule_info(skb, ctx->portid, ctx->seq, event, 0, - ctx->afi->family, ctx->table, + err = nf_tables_fill_rule_info(skb, ctx->net, ctx->portid, ctx->seq, + event, 0, ctx->afi->family, ctx->table, ctx->chain, rule); if (err < 0) { kfree_skb(skb); @@ -1527,41 +1744,6 @@ static int nf_tables_rule_notify(const struct nft_ctx *ctx, return err; } -static inline bool -nft_rule_is_active(struct net *net, const struct nft_rule *rule) -{ - return (rule->genmask & (1 << net->nft.gencursor)) == 0; -} - -static inline int gencursor_next(struct net *net) -{ - return net->nft.gencursor+1 == 1 ? 1 : 0; -} - -static inline int -nft_rule_is_active_next(struct net *net, const struct nft_rule *rule) -{ - return (rule->genmask & (1 << gencursor_next(net))) == 0; -} - -static inline void -nft_rule_activate_next(struct net *net, struct nft_rule *rule) -{ - /* Now inactive, will be active in the future */ - rule->genmask = (1 << net->nft.gencursor); -} - -static inline void -nft_rule_disactivate_next(struct net *net, struct nft_rule *rule) -{ - rule->genmask = (1 << gencursor_next(net)); -} - -static inline void nft_rule_clear(struct net *net, struct nft_rule *rule) -{ - rule->genmask = 0; -} - static int nf_tables_dump_rules(struct sk_buff *skb, struct netlink_callback *cb) { @@ -1591,7 +1773,7 @@ static int nf_tables_dump_rules(struct sk_buff *skb, if (idx > s_idx) memset(&cb->args[1], 0, sizeof(cb->args) - sizeof(cb->args[0])); - if (nf_tables_fill_rule_info(skb, NETLINK_CB(cb->skb).portid, + if (nf_tables_fill_rule_info(skb, net, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NFT_MSG_NEWRULE, NLM_F_MULTI | NLM_F_APPEND, @@ -1657,7 +1839,7 @@ static int nf_tables_getrule(struct sock *nlsk, struct sk_buff *skb, if (!skb2) return -ENOMEM; - err = nf_tables_fill_rule_info(skb2, NETLINK_CB(skb).portid, + err = nf_tables_fill_rule_info(skb2, net, NETLINK_CB(skb).portid, nlh->nlmsg_seq, NFT_MSG_NEWRULE, 0, family, table, chain, rule); if (err < 0) @@ -1687,21 +1869,6 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx, kfree(rule); } -static struct nft_trans *nft_trans_rule_add(struct nft_ctx *ctx, int msg_type, - struct nft_rule *rule) -{ - struct nft_trans *trans; - - trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_rule)); - if (trans == NULL) - return NULL; - - nft_trans_rule(trans) = rule; - list_add_tail(&trans->list, &ctx->net->nft.commit_list); - - return trans; -} - #define NFT_RULE_MAXEXPRS 128 static struct nft_expr_info *info; @@ -1823,7 +1990,7 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb, err = -ENOMEM; goto err2; } - nft_rule_disactivate_next(net, old_rule); + nft_rule_deactivate_next(net, old_rule); chain->use--; list_add_tail_rcu(&rule->list, &old_rule->list); } else { @@ -1867,33 +2034,6 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb, return err; } -static int -nf_tables_delrule_one(struct nft_ctx *ctx, struct nft_rule *rule) -{ - /* You cannot delete the same rule twice */ - if (nft_rule_is_active_next(ctx->net, rule)) { - if (nft_trans_rule_add(ctx, NFT_MSG_DELRULE, rule) == NULL) - return -ENOMEM; - nft_rule_disactivate_next(ctx->net, rule); - ctx->chain->use--; - return 0; - } - return -ENOENT; -} - -static int nf_table_delrule_by_chain(struct nft_ctx *ctx) -{ - struct nft_rule *rule; - int err; - - list_for_each_entry(rule, &ctx->chain->rules, list) { - err = nf_tables_delrule_one(ctx, rule); - if (err < 0) - return err; - } - return 0; -} - static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) @@ -1932,14 +2072,14 @@ static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb, if (IS_ERR(rule)) return PTR_ERR(rule); - err = nf_tables_delrule_one(&ctx, rule); + err = nft_delrule(&ctx, rule); } else { - err = nf_table_delrule_by_chain(&ctx); + err = nft_delrule_by_chain(&ctx); } } else { list_for_each_entry(chain, &table->chains, list) { ctx.chain = chain; - err = nf_table_delrule_by_chain(&ctx); + err = nft_delrule_by_chain(&ctx); if (err < 0) break; } @@ -2183,7 +2323,7 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx, nfmsg = nlmsg_data(nlh); nfmsg->nfgen_family = ctx->afi->family; nfmsg->version = NFNETLINK_V0; - nfmsg->res_id = 0; + nfmsg->res_id = htons(ctx->net->nft.base_seq & 0xffff); if (nla_put_string(skb, NFTA_SET_TABLE, ctx->table->name)) goto nla_put_failure; @@ -2204,6 +2344,11 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx, goto nla_put_failure; } + if (set->policy != NFT_SET_POL_PERFORMANCE) { + if (nla_put_be32(skb, NFTA_SET_POLICY, htonl(set->policy))) + goto nla_put_failure; + } + desc = nla_nest_start(skb, NFTA_SET_DESC); if (desc == NULL) goto nla_put_failure; @@ -2322,8 +2467,6 @@ static int nf_tables_dump_sets_done(struct netlink_callback *cb) return 0; } -#define NFT_SET_INACTIVE (1 << 15) /* Internal set flag */ - static int nf_tables_getset(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) @@ -2398,26 +2541,6 @@ static int nf_tables_set_desc_parse(const struct nft_ctx *ctx, return 0; } -static int nft_trans_set_add(struct nft_ctx *ctx, int msg_type, - struct nft_set *set) -{ - struct nft_trans *trans; - - trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_set)); - if (trans == NULL) - return -ENOMEM; - - if (msg_type == NFT_MSG_NEWSET && ctx->nla[NFTA_SET_ID] != NULL) { - nft_trans_set_id(trans) = - ntohl(nla_get_be32(ctx->nla[NFTA_SET_ID])); - set->flags |= NFT_SET_INACTIVE; - } - nft_trans_set(trans) = set; - list_add_tail(&trans->list, &ctx->net->nft.commit_list); - - return 0; -} - static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) @@ -2551,6 +2674,7 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb, set->dlen = desc.dlen; set->flags = flags; set->size = desc.size; + set->policy = policy; err = ops->init(set, &desc, nla); if (err < 0) @@ -2611,13 +2735,7 @@ static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb, if (!list_empty(&set->bindings)) return -EBUSY; - err = nft_trans_set_add(&ctx, NFT_MSG_DELSET, set); - if (err < 0) - return err; - - list_del_rcu(&set->list); - ctx.table->use--; - return 0; + return nft_delset(&ctx, set); } static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx, @@ -2815,7 +2933,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb) nfmsg = nlmsg_data(nlh); nfmsg->nfgen_family = ctx.afi->family; nfmsg->version = NFNETLINK_V0; - nfmsg->res_id = 0; + nfmsg->res_id = htons(ctx.net->nft.base_seq & 0xffff); if (nla_put_string(skb, NFTA_SET_ELEM_LIST_TABLE, ctx.table->name)) goto nla_put_failure; @@ -2896,7 +3014,7 @@ static int nf_tables_fill_setelem_info(struct sk_buff *skb, nfmsg = nlmsg_data(nlh); nfmsg->nfgen_family = ctx->afi->family; nfmsg->version = NFNETLINK_V0; - nfmsg->res_id = 0; + nfmsg->res_id = htons(ctx->net->nft.base_seq & 0xffff); if (nla_put_string(skb, NFTA_SET_TABLE, ctx->table->name)) goto nla_put_failure; @@ -3183,6 +3301,87 @@ static int nf_tables_delsetelem(struct sock *nlsk, struct sk_buff *skb, return err; } +static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net, + u32 portid, u32 seq) +{ + struct nlmsghdr *nlh; + struct nfgenmsg *nfmsg; + int event = (NFNL_SUBSYS_NFTABLES << 8) | NFT_MSG_NEWGEN; + + nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), 0); + if (nlh == NULL) + goto nla_put_failure; + + nfmsg = nlmsg_data(nlh); + nfmsg->nfgen_family = AF_UNSPEC; + nfmsg->version = NFNETLINK_V0; + nfmsg->res_id = htons(net->nft.base_seq & 0xffff); + + if (nla_put_be32(skb, NFTA_GEN_ID, htonl(net->nft.base_seq))) + goto nla_put_failure; + + return nlmsg_end(skb, nlh); + +nla_put_failure: + nlmsg_trim(skb, nlh); + return -EMSGSIZE; +} + +static int nf_tables_gen_notify(struct net *net, struct sk_buff *skb, int event) +{ + struct nlmsghdr *nlh = nlmsg_hdr(skb); + struct sk_buff *skb2; + int err; + + if (nlmsg_report(nlh) && + !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES)) + return 0; + + err = -ENOBUFS; + skb2 = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + if (skb2 == NULL) + goto err; + + err = nf_tables_fill_gen_info(skb2, net, NETLINK_CB(skb).portid, + nlh->nlmsg_seq); + if (err < 0) { + kfree_skb(skb2); + goto err; + } + + err = nfnetlink_send(skb2, net, NETLINK_CB(skb).portid, + NFNLGRP_NFTABLES, nlmsg_report(nlh), GFP_KERNEL); +err: + if (err < 0) { + nfnetlink_set_err(net, NETLINK_CB(skb).portid, NFNLGRP_NFTABLES, + err); + } + return err; +} + +static int nf_tables_getgen(struct sock *nlsk, struct sk_buff *skb, + const struct nlmsghdr *nlh, + const struct nlattr * const nla[]) +{ + struct net *net = sock_net(skb->sk); + struct sk_buff *skb2; + int err; + + skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); + if (skb2 == NULL) + return -ENOMEM; + + err = nf_tables_fill_gen_info(skb2, net, NETLINK_CB(skb).portid, + nlh->nlmsg_seq); + if (err < 0) + goto err; + + return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); +err: + kfree_skb(skb2); + return err; +} + static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = { [NFT_MSG_NEWTABLE] = { .call_batch = nf_tables_newtable, @@ -3259,6 +3458,9 @@ static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = { .attr_count = NFTA_SET_ELEM_LIST_MAX, .policy = nft_set_elem_list_policy, }, + [NFT_MSG_GETGEN] = { + .call = nf_tables_getgen, + }, }; static void nft_chain_commit_update(struct nft_trans *trans) @@ -3352,11 +3554,9 @@ static int nf_tables_commit(struct sk_buff *skb) break; case NFT_MSG_DELCHAIN: nf_tables_chain_notify(&trans->ctx, NFT_MSG_DELCHAIN); - if (!(trans->ctx.table->flags & NFT_TABLE_F_DORMANT) && - trans->ctx.chain->flags & NFT_BASE_CHAIN) { - nf_unregister_hooks(nft_base_chain(trans->ctx.chain)->ops, - trans->ctx.afi->nops); - } + nf_tables_unregister_hooks(trans->ctx.table, + trans->ctx.chain, + trans->ctx.afi->nops); break; case NFT_MSG_NEWRULE: nft_rule_clear(trans->ctx.net, nft_trans_rule(trans)); @@ -3418,6 +3618,8 @@ static int nf_tables_commit(struct sk_buff *skb) call_rcu(&trans->rcu_head, nf_tables_commit_release_rcu); } + nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN); + return 0; } @@ -3479,11 +3681,9 @@ static int nf_tables_abort(struct sk_buff *skb) } else { trans->ctx.table->use--; list_del_rcu(&trans->ctx.chain->list); - if (!(trans->ctx.table->flags & NFT_TABLE_F_DORMANT) && - trans->ctx.chain->flags & NFT_BASE_CHAIN) { - nf_unregister_hooks(nft_base_chain(trans->ctx.chain)->ops, - trans->ctx.afi->nops); - } + nf_tables_unregister_hooks(trans->ctx.table, + trans->ctx.chain, + trans->ctx.afi->nops); } break; case NFT_MSG_DELCHAIN: @@ -3963,6 +4163,7 @@ static void __exit nf_tables_module_exit(void) { unregister_pernet_subsys(&nf_tables_net_ops); nfnetlink_subsys_unregister(&nf_tables_subsys); + rcu_barrier(); nf_tables_core_module_exit(); kfree(info); } diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index f37f0716a9fc04e58f69bc826e827ce99546cec2..6c5a915cfa758bb4d8187dac9dc606201e99b458 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -381,7 +381,7 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh, */ if (err == -EAGAIN) { nfnl_err_reset(&err_list); - ss->abort(skb); + ss->abort(oskb); nfnl_unlock(subsys_id); kfree_skb(nskb); goto replay; @@ -418,9 +418,9 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh, } done: if (success && done) - ss->commit(skb); + ss->commit(oskb); else - ss->abort(skb); + ss->abort(oskb); nfnl_err_deliver(&err_list, oskb); nfnl_unlock(subsys_id); diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c index 3ea0eacbd9704b7ca612c56d17276aef623b14e0..c18af2f63eefb07e00be893190c35492232d4008 100644 --- a/net/netfilter/nfnetlink_acct.c +++ b/net/netfilter/nfnetlink_acct.c @@ -40,6 +40,11 @@ struct nf_acct { char data[0]; }; +struct nfacct_filter { + u32 value; + u32 mask; +}; + #define NFACCT_F_QUOTA (NFACCT_F_QUOTA_PKTS | NFACCT_F_QUOTA_BYTES) #define NFACCT_OVERQUOTA_BIT 2 /* NFACCT_F_OVERQUOTA */ @@ -181,6 +186,7 @@ static int nfnl_acct_dump(struct sk_buff *skb, struct netlink_callback *cb) { struct nf_acct *cur, *last; + const struct nfacct_filter *filter = cb->data; if (cb->args[2]) return 0; @@ -197,6 +203,10 @@ nfnl_acct_dump(struct sk_buff *skb, struct netlink_callback *cb) last = NULL; } + + if (filter && (cur->flags & filter->mask) != filter->value) + continue; + if (nfnl_acct_fill_info(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NFNL_MSG_TYPE(cb->nlh->nlmsg_type), @@ -211,6 +221,38 @@ nfnl_acct_dump(struct sk_buff *skb, struct netlink_callback *cb) return skb->len; } +static int nfnl_acct_done(struct netlink_callback *cb) +{ + kfree(cb->data); + return 0; +} + +static const struct nla_policy filter_policy[NFACCT_FILTER_MAX + 1] = { + [NFACCT_FILTER_MASK] = { .type = NLA_U32 }, + [NFACCT_FILTER_VALUE] = { .type = NLA_U32 }, +}; + +static struct nfacct_filter * +nfacct_filter_alloc(const struct nlattr * const attr) +{ + struct nfacct_filter *filter; + struct nlattr *tb[NFACCT_FILTER_MAX + 1]; + int err; + + err = nla_parse_nested(tb, NFACCT_FILTER_MAX, attr, filter_policy); + if (err < 0) + return ERR_PTR(err); + + filter = kzalloc(sizeof(struct nfacct_filter), GFP_KERNEL); + if (!filter) + return ERR_PTR(-ENOMEM); + + filter->mask = ntohl(nla_get_be32(tb[NFACCT_FILTER_MASK])); + filter->value = ntohl(nla_get_be32(tb[NFACCT_FILTER_VALUE])); + + return filter; +} + static int nfnl_acct_get(struct sock *nfnl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const tb[]) @@ -222,7 +264,18 @@ nfnl_acct_get(struct sock *nfnl, struct sk_buff *skb, if (nlh->nlmsg_flags & NLM_F_DUMP) { struct netlink_dump_control c = { .dump = nfnl_acct_dump, + .done = nfnl_acct_done, }; + + if (tb[NFACCT_FILTER]) { + struct nfacct_filter *filter; + + filter = nfacct_filter_alloc(tb[NFACCT_FILTER]); + if (IS_ERR(filter)) + return PTR_ERR(filter); + + c.data = filter; + } return netlink_dump_start(nfnl, skb, nlh, &c); } @@ -314,6 +367,7 @@ static const struct nla_policy nfnl_acct_policy[NFACCT_MAX+1] = { [NFACCT_PKTS] = { .type = NLA_U64 }, [NFACCT_FLAGS] = { .type = NLA_U32 }, [NFACCT_QUOTA] = { .type = NLA_U64 }, + [NFACCT_FILTER] = {.type = NLA_NESTED }, }; static const struct nfnl_callback nfnl_acct_cb[NFNL_MSG_ACCT_MAX] = { diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index a11c5ff2f720187418e22ed602119651672bb717..b1e3a05794169283ed50d1c0fb4f44d9e7753eeb 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c @@ -36,7 +36,7 @@ #include -#ifdef CONFIG_BRIDGE_NETFILTER +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) #include "../bridge/br_private.h" #endif @@ -429,7 +429,7 @@ __build_packet_message(struct nfnl_log_net *log, goto nla_put_failure; if (indev) { -#ifndef CONFIG_BRIDGE_NETFILTER +#if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER) if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV, htonl(indev->ifindex))) goto nla_put_failure; @@ -460,7 +460,7 @@ __build_packet_message(struct nfnl_log_net *log, } if (outdev) { -#ifndef CONFIG_BRIDGE_NETFILTER +#if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER) if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV, htonl(outdev->ifindex))) goto nla_put_failure; @@ -640,7 +640,7 @@ nfulnl_log_packet(struct net *net, + nla_total_size(sizeof(struct nfulnl_msg_packet_hdr)) + nla_total_size(sizeof(u_int32_t)) /* ifindex */ + nla_total_size(sizeof(u_int32_t)) /* ifindex */ -#ifdef CONFIG_BRIDGE_NETFILTER +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) + nla_total_size(sizeof(u_int32_t)) /* ifindex */ + nla_total_size(sizeof(u_int32_t)) /* ifindex */ #endif diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c index 108120f216b17671351cb492c86cb0ae928504f6..a82077d9f59b2f49cf755bcc9c8754846cb64b3e 100644 --- a/net/netfilter/nfnetlink_queue_core.c +++ b/net/netfilter/nfnetlink_queue_core.c @@ -36,7 +36,7 @@ #include -#ifdef CONFIG_BRIDGE_NETFILTER +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) #include "../bridge/br_private.h" #endif @@ -302,7 +302,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr)) + nla_total_size(sizeof(u_int32_t)) /* ifindex */ + nla_total_size(sizeof(u_int32_t)) /* ifindex */ -#ifdef CONFIG_BRIDGE_NETFILTER +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) + nla_total_size(sizeof(u_int32_t)) /* ifindex */ + nla_total_size(sizeof(u_int32_t)) /* ifindex */ #endif @@ -380,7 +380,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, indev = entry->indev; if (indev) { -#ifndef CONFIG_BRIDGE_NETFILTER +#if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER) if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex))) goto nla_put_failure; #else @@ -410,7 +410,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, } if (outdev) { -#ifndef CONFIG_BRIDGE_NETFILTER +#if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER) if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex))) goto nla_put_failure; #else @@ -569,7 +569,7 @@ nf_queue_entry_dup(struct nf_queue_entry *e) return NULL; } -#ifdef CONFIG_BRIDGE_NETFILTER +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) /* When called from bridge netfilter, skb->data must point to MAC header * before calling skb_gso_segment(). Else, original MAC header is lost * and segmented skbs will be sent to wrong destination. @@ -763,7 +763,7 @@ dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex) if (entry->outdev) if (entry->outdev->ifindex == ifindex) return 1; -#ifdef CONFIG_BRIDGE_NETFILTER +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) if (entry->skb->nf_bridge) { if (entry->skb->nf_bridge->physindev && entry->skb->nf_bridge->physindev->ifindex == ifindex) diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index 1840989092ed8eb84f465bf6302131773cb01d87..7e2683c8a44a9d778ab39c212f7204c28770091c 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c @@ -101,26 +101,12 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par, static void target_compat_from_user(struct xt_target *t, void *in, void *out) { -#ifdef CONFIG_COMPAT - if (t->compat_from_user) { - int pad; - - t->compat_from_user(out, in); - pad = XT_ALIGN(t->targetsize) - t->targetsize; - if (pad > 0) - memset(out + t->targetsize, 0, pad); - } else -#endif - memcpy(out, in, XT_ALIGN(t->targetsize)); -} + int pad; -static inline int nft_compat_target_offset(struct xt_target *target) -{ -#ifdef CONFIG_COMPAT - return xt_compat_target_offset(target); -#else - return 0; -#endif + memcpy(out, in, t->targetsize); + pad = XT_ALIGN(t->targetsize) - t->targetsize; + if (pad > 0) + memset(out + t->targetsize, 0, pad); } static const struct nla_policy nft_rule_compat_policy[NFTA_RULE_COMPAT_MAX + 1] = { @@ -208,34 +194,6 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) module_put(target->me); } -static int -target_dump_info(struct sk_buff *skb, const struct xt_target *t, const void *in) -{ - int ret; - -#ifdef CONFIG_COMPAT - if (t->compat_to_user) { - mm_segment_t old_fs; - void *out; - - out = kmalloc(XT_ALIGN(t->targetsize), GFP_ATOMIC); - if (out == NULL) - return -ENOMEM; - - /* We want to reuse existing compat_to_user */ - old_fs = get_fs(); - set_fs(KERNEL_DS); - t->compat_to_user(out, in); - set_fs(old_fs); - ret = nla_put(skb, NFTA_TARGET_INFO, XT_ALIGN(t->targetsize), out); - kfree(out); - } else -#endif - ret = nla_put(skb, NFTA_TARGET_INFO, XT_ALIGN(t->targetsize), in); - - return ret; -} - static int nft_target_dump(struct sk_buff *skb, const struct nft_expr *expr) { const struct xt_target *target = expr->ops->data; @@ -243,7 +201,7 @@ static int nft_target_dump(struct sk_buff *skb, const struct nft_expr *expr) if (nla_put_string(skb, NFTA_TARGET_NAME, target->name) || nla_put_be32(skb, NFTA_TARGET_REV, htonl(target->revision)) || - target_dump_info(skb, target, info)) + nla_put(skb, NFTA_TARGET_INFO, XT_ALIGN(target->targetsize), info)) goto nla_put_failure; return 0; @@ -341,17 +299,12 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx, static void match_compat_from_user(struct xt_match *m, void *in, void *out) { -#ifdef CONFIG_COMPAT - if (m->compat_from_user) { - int pad; - - m->compat_from_user(out, in); - pad = XT_ALIGN(m->matchsize) - m->matchsize; - if (pad > 0) - memset(out + m->matchsize, 0, pad); - } else -#endif - memcpy(out, in, XT_ALIGN(m->matchsize)); + int pad; + + memcpy(out, in, m->matchsize); + pad = XT_ALIGN(m->matchsize) - m->matchsize; + if (pad > 0) + memset(out + m->matchsize, 0, pad); } static int @@ -404,43 +357,6 @@ nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) module_put(match->me); } -static int -match_dump_info(struct sk_buff *skb, const struct xt_match *m, const void *in) -{ - int ret; - -#ifdef CONFIG_COMPAT - if (m->compat_to_user) { - mm_segment_t old_fs; - void *out; - - out = kmalloc(XT_ALIGN(m->matchsize), GFP_ATOMIC); - if (out == NULL) - return -ENOMEM; - - /* We want to reuse existing compat_to_user */ - old_fs = get_fs(); - set_fs(KERNEL_DS); - m->compat_to_user(out, in); - set_fs(old_fs); - ret = nla_put(skb, NFTA_MATCH_INFO, XT_ALIGN(m->matchsize), out); - kfree(out); - } else -#endif - ret = nla_put(skb, NFTA_MATCH_INFO, XT_ALIGN(m->matchsize), in); - - return ret; -} - -static inline int nft_compat_match_offset(struct xt_match *match) -{ -#ifdef CONFIG_COMPAT - return xt_compat_match_offset(match); -#else - return 0; -#endif -} - static int nft_match_dump(struct sk_buff *skb, const struct nft_expr *expr) { void *info = nft_expr_priv(expr); @@ -448,7 +364,7 @@ static int nft_match_dump(struct sk_buff *skb, const struct nft_expr *expr) if (nla_put_string(skb, NFTA_MATCH_NAME, match->name) || nla_put_be32(skb, NFTA_MATCH_REV, htonl(match->revision)) || - match_dump_info(skb, match, info)) + nla_put(skb, NFTA_MATCH_INFO, XT_ALIGN(match->matchsize), info)) goto nla_put_failure; return 0; @@ -643,8 +559,7 @@ nft_match_select_ops(const struct nft_ctx *ctx, return ERR_PTR(-ENOMEM); nft_match->ops.type = &nft_match_type; - nft_match->ops.size = NFT_EXPR_SIZE(XT_ALIGN(match->matchsize) + - nft_compat_match_offset(match)); + nft_match->ops.size = NFT_EXPR_SIZE(XT_ALIGN(match->matchsize)); nft_match->ops.eval = nft_match_eval; nft_match->ops.init = nft_match_init; nft_match->ops.destroy = nft_match_destroy; @@ -714,8 +629,7 @@ nft_target_select_ops(const struct nft_ctx *ctx, return ERR_PTR(-ENOMEM); nft_target->ops.type = &nft_target_type; - nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize) + - nft_compat_target_offset(target)); + nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize)); nft_target->ops.eval = nft_target_eval; nft_target->ops.init = nft_target_init; nft_target->ops.destroy = nft_target_destroy; diff --git a/net/netfilter/nft_masq.c b/net/netfilter/nft_masq.c new file mode 100644 index 0000000000000000000000000000000000000000..6637bab0056705d6cf9671f681b65b1c5166523a --- /dev/null +++ b/net/netfilter/nft_masq.c @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2014 Arturo Borrero Gonzalez + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +const struct nla_policy nft_masq_policy[NFTA_MASQ_MAX + 1] = { + [NFTA_MASQ_FLAGS] = { .type = NLA_U32 }, +}; +EXPORT_SYMBOL_GPL(nft_masq_policy); + +int nft_masq_init(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nlattr * const tb[]) +{ + struct nft_masq *priv = nft_expr_priv(expr); + + if (tb[NFTA_MASQ_FLAGS] == NULL) + return 0; + + priv->flags = ntohl(nla_get_be32(tb[NFTA_MASQ_FLAGS])); + if (priv->flags & ~NF_NAT_RANGE_MASK) + return -EINVAL; + + return 0; +} +EXPORT_SYMBOL_GPL(nft_masq_init); + +int nft_masq_dump(struct sk_buff *skb, const struct nft_expr *expr) +{ + const struct nft_masq *priv = nft_expr_priv(expr); + + if (priv->flags == 0) + return 0; + + if (nla_put_be32(skb, NFTA_MASQ_FLAGS, htonl(priv->flags))) + goto nla_put_failure; + + return 0; + +nla_put_failure: + return -1; +} +EXPORT_SYMBOL_GPL(nft_masq_dump); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Arturo Borrero Gonzalez "); diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c index 852b178c6ae7fa2f7dbd6f7887404033da45971f..1e7c076ca63ab92e062c56e68910a47b8b1dffc6 100644 --- a/net/netfilter/nft_meta.c +++ b/net/netfilter/nft_meta.c @@ -14,6 +14,10 @@ #include #include #include +#include +#include +#include +#include #include #include #include /* for TCP_TIME_WAIT */ @@ -124,6 +128,43 @@ void nft_meta_get_eval(const struct nft_expr *expr, dest->data[0] = skb->secmark; break; #endif + case NFT_META_PKTTYPE: + if (skb->pkt_type != PACKET_LOOPBACK) { + dest->data[0] = skb->pkt_type; + break; + } + + switch (pkt->ops->pf) { + case NFPROTO_IPV4: + if (ipv4_is_multicast(ip_hdr(skb)->daddr)) + dest->data[0] = PACKET_MULTICAST; + else + dest->data[0] = PACKET_BROADCAST; + break; + case NFPROTO_IPV6: + if (ipv6_hdr(skb)->daddr.s6_addr[0] == 0xFF) + dest->data[0] = PACKET_MULTICAST; + else + dest->data[0] = PACKET_BROADCAST; + break; + default: + WARN_ON(1); + goto err; + } + break; + case NFT_META_CPU: + dest->data[0] = smp_processor_id(); + break; + case NFT_META_IIFGROUP: + if (in == NULL) + goto err; + dest->data[0] = in->group; + break; + case NFT_META_OIFGROUP: + if (out == NULL) + goto err; + dest->data[0] = out->group; + break; default: WARN_ON(1); goto err; @@ -195,6 +236,10 @@ int nft_meta_get_init(const struct nft_ctx *ctx, #ifdef CONFIG_NETWORK_SECMARK case NFT_META_SECMARK: #endif + case NFT_META_PKTTYPE: + case NFT_META_CPU: + case NFT_META_IIFGROUP: + case NFT_META_OIFGROUP: break; default: return -EOPNOTSUPP; diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c index 79ff58cd36dc42c053ef272905457bf08db3d6ef..799550b476fbdeeadc2745aaff97fe5f2b197761 100644 --- a/net/netfilter/nft_nat.c +++ b/net/netfilter/nft_nat.c @@ -33,6 +33,7 @@ struct nft_nat { enum nft_registers sreg_proto_max:8; enum nf_nat_manip_type type:8; u8 family; + u16 flags; }; static void nft_nat_eval(const struct nft_expr *expr, @@ -71,6 +72,8 @@ static void nft_nat_eval(const struct nft_expr *expr, range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; } + range.flags |= priv->flags; + data[NFT_REG_VERDICT].verdict = nf_nat_setup_info(ct, &range, priv->type); } @@ -82,6 +85,7 @@ static const struct nla_policy nft_nat_policy[NFTA_NAT_MAX + 1] = { [NFTA_NAT_REG_ADDR_MAX] = { .type = NLA_U32 }, [NFTA_NAT_REG_PROTO_MIN] = { .type = NLA_U32 }, [NFTA_NAT_REG_PROTO_MAX] = { .type = NLA_U32 }, + [NFTA_NAT_FLAGS] = { .type = NLA_U32 }, }; static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, @@ -149,6 +153,12 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, } else priv->sreg_proto_max = priv->sreg_proto_min; + if (tb[NFTA_NAT_FLAGS]) { + priv->flags = ntohl(nla_get_be32(tb[NFTA_NAT_FLAGS])); + if (priv->flags & ~NF_NAT_RANGE_MASK) + return -EINVAL; + } + return 0; } @@ -183,6 +193,12 @@ static int nft_nat_dump(struct sk_buff *skb, const struct nft_expr *expr) htonl(priv->sreg_proto_max))) goto nla_put_failure; } + + if (priv->flags != 0) { + if (nla_put_be32(skb, NFTA_NAT_FLAGS, htonl(priv->flags))) + goto nla_put_failure; + } + return 0; nla_put_failure: diff --git a/net/netfilter/nft_reject.c b/net/netfilter/nft_reject.c index f3448c2964468abc08d2b3630be18953820e1aff..ec8a456092a73d7b8f92e92db46be986770a85e7 100644 --- a/net/netfilter/nft_reject.c +++ b/net/netfilter/nft_reject.c @@ -17,6 +17,8 @@ #include #include #include +#include +#include const struct nla_policy nft_reject_policy[NFTA_REJECT_MAX + 1] = { [NFTA_REJECT_TYPE] = { .type = NLA_U32 }, @@ -70,5 +72,40 @@ int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr) } EXPORT_SYMBOL_GPL(nft_reject_dump); +static u8 icmp_code_v4[NFT_REJECT_ICMPX_MAX] = { + [NFT_REJECT_ICMPX_NO_ROUTE] = ICMP_NET_UNREACH, + [NFT_REJECT_ICMPX_PORT_UNREACH] = ICMP_PORT_UNREACH, + [NFT_REJECT_ICMPX_HOST_UNREACH] = ICMP_HOST_UNREACH, + [NFT_REJECT_ICMPX_ADMIN_PROHIBITED] = ICMP_PKT_FILTERED, +}; + +int nft_reject_icmp_code(u8 code) +{ + if (code > NFT_REJECT_ICMPX_MAX) + return -EINVAL; + + return icmp_code_v4[code]; +} + +EXPORT_SYMBOL_GPL(nft_reject_icmp_code); + + +static u8 icmp_code_v6[NFT_REJECT_ICMPX_MAX] = { + [NFT_REJECT_ICMPX_NO_ROUTE] = ICMPV6_NOROUTE, + [NFT_REJECT_ICMPX_PORT_UNREACH] = ICMPV6_PORT_UNREACH, + [NFT_REJECT_ICMPX_HOST_UNREACH] = ICMPV6_ADDR_UNREACH, + [NFT_REJECT_ICMPX_ADMIN_PROHIBITED] = ICMPV6_ADM_PROHIBITED, +}; + +int nft_reject_icmpv6_code(u8 code) +{ + if (code > NFT_REJECT_ICMPX_MAX) + return -EINVAL; + + return icmp_code_v6[code]; +} + +EXPORT_SYMBOL_GPL(nft_reject_icmpv6_code); + MODULE_LICENSE("GPL"); MODULE_AUTHOR("Patrick McHardy "); diff --git a/net/netfilter/nft_reject_inet.c b/net/netfilter/nft_reject_inet.c index b718a52a46544036ba81a4105da149d8261d30c3..7b5f9d58680ad0ebca03c13e4a6f43037283bdc1 100644 --- a/net/netfilter/nft_reject_inet.c +++ b/net/netfilter/nft_reject_inet.c @@ -14,17 +14,103 @@ #include #include #include +#include +#include static void nft_reject_inet_eval(const struct nft_expr *expr, struct nft_data data[NFT_REG_MAX + 1], const struct nft_pktinfo *pkt) { + struct nft_reject *priv = nft_expr_priv(expr); + struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out); + switch (pkt->ops->pf) { case NFPROTO_IPV4: - return nft_reject_ipv4_eval(expr, data, pkt); + switch (priv->type) { + case NFT_REJECT_ICMP_UNREACH: + nf_send_unreach(pkt->skb, priv->icmp_code); + break; + case NFT_REJECT_TCP_RST: + nf_send_reset(pkt->skb, pkt->ops->hooknum); + break; + case NFT_REJECT_ICMPX_UNREACH: + nf_send_unreach(pkt->skb, + nft_reject_icmp_code(priv->icmp_code)); + break; + } + break; case NFPROTO_IPV6: - return nft_reject_ipv6_eval(expr, data, pkt); + switch (priv->type) { + case NFT_REJECT_ICMP_UNREACH: + nf_send_unreach6(net, pkt->skb, priv->icmp_code, + pkt->ops->hooknum); + break; + case NFT_REJECT_TCP_RST: + nf_send_reset6(net, pkt->skb, pkt->ops->hooknum); + break; + case NFT_REJECT_ICMPX_UNREACH: + nf_send_unreach6(net, pkt->skb, + nft_reject_icmpv6_code(priv->icmp_code), + pkt->ops->hooknum); + break; + } + break; + } + data[NFT_REG_VERDICT].verdict = NF_DROP; +} + +static int nft_reject_inet_init(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nlattr * const tb[]) +{ + struct nft_reject *priv = nft_expr_priv(expr); + int icmp_code; + + if (tb[NFTA_REJECT_TYPE] == NULL) + return -EINVAL; + + priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE])); + switch (priv->type) { + case NFT_REJECT_ICMP_UNREACH: + case NFT_REJECT_ICMPX_UNREACH: + if (tb[NFTA_REJECT_ICMP_CODE] == NULL) + return -EINVAL; + + icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]); + if (priv->type == NFT_REJECT_ICMPX_UNREACH && + icmp_code > NFT_REJECT_ICMPX_MAX) + return -EINVAL; + + priv->icmp_code = icmp_code; + break; + case NFT_REJECT_TCP_RST: + break; + default: + return -EINVAL; } + return 0; +} + +static int nft_reject_inet_dump(struct sk_buff *skb, + const struct nft_expr *expr) +{ + const struct nft_reject *priv = nft_expr_priv(expr); + + if (nla_put_be32(skb, NFTA_REJECT_TYPE, htonl(priv->type))) + goto nla_put_failure; + + switch (priv->type) { + case NFT_REJECT_ICMP_UNREACH: + case NFT_REJECT_ICMPX_UNREACH: + if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code)) + goto nla_put_failure; + break; + } + + return 0; + +nla_put_failure: + return -1; } static struct nft_expr_type nft_reject_inet_type; @@ -32,8 +118,8 @@ static const struct nft_expr_ops nft_reject_inet_ops = { .type = &nft_reject_inet_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_reject)), .eval = nft_reject_inet_eval, - .init = nft_reject_init, - .dump = nft_reject_dump, + .init = nft_reject_inet_init, + .dump = nft_reject_inet_dump, }; static struct nft_expr_type nft_reject_inet_type __read_mostly = { diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 272ae4d6fdf4f1dcb27eb2bc6e68414a25b63363..133eb4772f12586d31b4b95aa45202318753d848 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -1101,22 +1101,11 @@ static const struct seq_operations xt_match_seq_ops = { static int xt_match_open(struct inode *inode, struct file *file) { - struct seq_file *seq; struct nf_mttg_trav *trav; - int ret; - - trav = kmalloc(sizeof(*trav), GFP_KERNEL); - if (trav == NULL) + trav = __seq_open_private(file, &xt_match_seq_ops, sizeof(*trav)); + if (!trav) return -ENOMEM; - ret = seq_open(file, &xt_match_seq_ops); - if (ret < 0) { - kfree(trav); - return ret; - } - - seq = file->private_data; - seq->private = trav; trav->nfproto = (unsigned long)PDE_DATA(inode); return 0; } @@ -1165,22 +1154,11 @@ static const struct seq_operations xt_target_seq_ops = { static int xt_target_open(struct inode *inode, struct file *file) { - struct seq_file *seq; struct nf_mttg_trav *trav; - int ret; - - trav = kmalloc(sizeof(*trav), GFP_KERNEL); - if (trav == NULL) + trav = __seq_open_private(file, &xt_target_seq_ops, sizeof(*trav)); + if (!trav) return -ENOMEM; - ret = seq_open(file, &xt_target_seq_ops); - if (ret < 0) { - kfree(trav); - return ret; - } - - seq = file->private_data; - seq->private = trav; trav->nfproto = (unsigned long)PDE_DATA(inode); return 0; } diff --git a/net/netfilter/xt_HMARK.c b/net/netfilter/xt_HMARK.c index 73b73f687c580ccfe66648234caab5f0bdc543b9..02afaf48a7290b15e6f2d56b2ca2c5cb07455b4d 100644 --- a/net/netfilter/xt_HMARK.c +++ b/net/netfilter/xt_HMARK.c @@ -126,7 +126,7 @@ hmark_hash(struct hmark_tuple *t, const struct xt_hmark_info *info) hash = jhash_3words(src, dst, t->uports.v32, info->hashrnd); hash = hash ^ (t->proto & info->proto_mask); - return (((u64)hash * info->hmodulus) >> 32) + info->hoffset; + return reciprocal_scale(hash, info->hmodulus) + info->hoffset; } static void diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c index 370adf622cefd29f04f39cd227a9d4f9869d3b88..604df6fae6fcb0c02d7dff8636890c0adb656d93 100644 --- a/net/netfilter/xt_RATEEST.c +++ b/net/netfilter/xt_RATEEST.c @@ -136,7 +136,7 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par) cfg.est.interval = info->interval; cfg.est.ewma_log = info->ewma_log; - ret = gen_new_estimator(&est->bstats, &est->rstats, + ret = gen_new_estimator(&est->bstats, NULL, &est->rstats, &est->lock, &cfg.opt); if (ret < 0) goto err2; diff --git a/net/netfilter/xt_cluster.c b/net/netfilter/xt_cluster.c index f4af1bfafb1c61642ddb56ac490f7e161df5d39d..96fa26b20b67dce9ed2d2abaaf6ce71dda363faf 100644 --- a/net/netfilter/xt_cluster.c +++ b/net/netfilter/xt_cluster.c @@ -55,7 +55,8 @@ xt_cluster_hash(const struct nf_conn *ct, WARN_ON(1); break; } - return (((u64)hash * info->total_nodes) >> 32); + + return reciprocal_scale(hash, info->total_nodes); } static inline bool diff --git a/net/netfilter/xt_connbytes.c b/net/netfilter/xt_connbytes.c index 1e634615ab9d6e4b2a5c7db5872332c5a522f435..d4bec261e74e636e5a51c8ea95480f15bc61a1ab 100644 --- a/net/netfilter/xt_connbytes.c +++ b/net/netfilter/xt_connbytes.c @@ -120,7 +120,7 @@ static int connbytes_mt_check(const struct xt_mtchk_param *par) * accounting is enabled, so complain in the hope that someone notices. */ if (!nf_ct_acct_enabled(par->net)) { - pr_warning("Forcing CT accounting to be enabled\n"); + pr_warn("Forcing CT accounting to be enabled\n"); nf_ct_set_acct(par->net, true); } diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index 47dc6836830a9cfe1b329fbdddabfd1d80c4d100..05fbc2a0be4614aff1fbe55fccce25eb30041c0e 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c @@ -135,7 +135,7 @@ hash_dst(const struct xt_hashlimit_htable *ht, const struct dsthash_dst *dst) * give results between [0 and cfg.size-1] and same hash distribution, * but using a multiply, less expensive than a divide */ - return ((u64)hash * ht->cfg.size) >> 32; + return reciprocal_scale(hash, ht->cfg.size); } static struct dsthash_ent * @@ -943,7 +943,7 @@ static int __init hashlimit_mt_init(void) sizeof(struct dsthash_ent), 0, 0, NULL); if (!hashlimit_cachep) { - pr_warning("unable to create slab cache\n"); + pr_warn("unable to create slab cache\n"); goto err2; } return 0; diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c index d7ca16b8b8dfbcd8dec7c7ab67f9d6e5c3f957d9..f440f57a452fd9650d73f0a13b644383793463f5 100644 --- a/net/netfilter/xt_physdev.c +++ b/net/netfilter/xt_physdev.c @@ -13,6 +13,7 @@ #include #include #include +#include MODULE_LICENSE("GPL"); MODULE_AUTHOR("Bart De Schuymer "); @@ -87,6 +88,8 @@ static int physdev_mt_check(const struct xt_mtchk_param *par) { const struct xt_physdev_info *info = par->matchinfo; + br_netfilter_enable(); + if (!(info->bitmask & XT_PHYSDEV_OP_MASK) || info->bitmask & ~XT_PHYSDEV_OP_MASK) return -EINVAL; diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c index 80c2e2d603e00c718ad1744804ef29f926b9d404..5732cd64acc0d579dd423b0ef23d9b65d77ba5f4 100644 --- a/net/netfilter/xt_set.c +++ b/net/netfilter/xt_set.c @@ -84,13 +84,12 @@ set_match_v0_checkentry(const struct xt_mtchk_param *par) index = ip_set_nfnl_get_byindex(par->net, info->match_set.index); if (index == IPSET_INVALID_ID) { - pr_warning("Cannot find set identified by id %u to match\n", - info->match_set.index); + pr_warn("Cannot find set identified by id %u to match\n", + info->match_set.index); return -ENOENT; } if (info->match_set.u.flags[IPSET_DIM_MAX-1] != 0) { - pr_warning("Protocol error: set match dimension " - "is over the limit!\n"); + pr_warn("Protocol error: set match dimension is over the limit!\n"); ip_set_nfnl_put(par->net, info->match_set.index); return -ERANGE; } @@ -134,13 +133,12 @@ set_match_v1_checkentry(const struct xt_mtchk_param *par) index = ip_set_nfnl_get_byindex(par->net, info->match_set.index); if (index == IPSET_INVALID_ID) { - pr_warning("Cannot find set identified by id %u to match\n", - info->match_set.index); + pr_warn("Cannot find set identified by id %u to match\n", + info->match_set.index); return -ENOENT; } if (info->match_set.dim > IPSET_DIM_MAX) { - pr_warning("Protocol error: set match dimension " - "is over the limit!\n"); + pr_warn("Protocol error: set match dimension is over the limit!\n"); ip_set_nfnl_put(par->net, info->match_set.index); return -ERANGE; } @@ -230,8 +228,8 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par) if (info->add_set.index != IPSET_INVALID_ID) { index = ip_set_nfnl_get_byindex(par->net, info->add_set.index); if (index == IPSET_INVALID_ID) { - pr_warning("Cannot find add_set index %u as target\n", - info->add_set.index); + pr_warn("Cannot find add_set index %u as target\n", + info->add_set.index); return -ENOENT; } } @@ -239,8 +237,8 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par) if (info->del_set.index != IPSET_INVALID_ID) { index = ip_set_nfnl_get_byindex(par->net, info->del_set.index); if (index == IPSET_INVALID_ID) { - pr_warning("Cannot find del_set index %u as target\n", - info->del_set.index); + pr_warn("Cannot find del_set index %u as target\n", + info->del_set.index); if (info->add_set.index != IPSET_INVALID_ID) ip_set_nfnl_put(par->net, info->add_set.index); return -ENOENT; @@ -248,8 +246,7 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par) } if (info->add_set.u.flags[IPSET_DIM_MAX-1] != 0 || info->del_set.u.flags[IPSET_DIM_MAX-1] != 0) { - pr_warning("Protocol error: SET target dimension " - "is over the limit!\n"); + pr_warn("Protocol error: SET target dimension is over the limit!\n"); if (info->add_set.index != IPSET_INVALID_ID) ip_set_nfnl_put(par->net, info->add_set.index); if (info->del_set.index != IPSET_INVALID_ID) @@ -303,8 +300,8 @@ set_target_v1_checkentry(const struct xt_tgchk_param *par) if (info->add_set.index != IPSET_INVALID_ID) { index = ip_set_nfnl_get_byindex(par->net, info->add_set.index); if (index == IPSET_INVALID_ID) { - pr_warning("Cannot find add_set index %u as target\n", - info->add_set.index); + pr_warn("Cannot find add_set index %u as target\n", + info->add_set.index); return -ENOENT; } } @@ -312,8 +309,8 @@ set_target_v1_checkentry(const struct xt_tgchk_param *par) if (info->del_set.index != IPSET_INVALID_ID) { index = ip_set_nfnl_get_byindex(par->net, info->del_set.index); if (index == IPSET_INVALID_ID) { - pr_warning("Cannot find del_set index %u as target\n", - info->del_set.index); + pr_warn("Cannot find del_set index %u as target\n", + info->del_set.index); if (info->add_set.index != IPSET_INVALID_ID) ip_set_nfnl_put(par->net, info->add_set.index); return -ENOENT; @@ -321,8 +318,7 @@ set_target_v1_checkentry(const struct xt_tgchk_param *par) } if (info->add_set.dim > IPSET_DIM_MAX || info->del_set.dim > IPSET_DIM_MAX) { - pr_warning("Protocol error: SET target dimension " - "is over the limit!\n"); + pr_warn("Protocol error: SET target dimension is over the limit!\n"); if (info->add_set.index != IPSET_INVALID_ID) ip_set_nfnl_put(par->net, info->add_set.index); if (info->del_set.index != IPSET_INVALID_ID) @@ -370,6 +366,140 @@ set_target_v2(struct sk_buff *skb, const struct xt_action_param *par) #define set_target_v2_checkentry set_target_v1_checkentry #define set_target_v2_destroy set_target_v1_destroy +/* Revision 3 target */ + +static unsigned int +set_target_v3(struct sk_buff *skb, const struct xt_action_param *par) +{ + const struct xt_set_info_target_v3 *info = par->targinfo; + ADT_OPT(add_opt, par->family, info->add_set.dim, + info->add_set.flags, info->flags, info->timeout); + ADT_OPT(del_opt, par->family, info->del_set.dim, + info->del_set.flags, 0, UINT_MAX); + ADT_OPT(map_opt, par->family, info->map_set.dim, + info->map_set.flags, 0, UINT_MAX); + + int ret; + + /* Normalize to fit into jiffies */ + if (add_opt.ext.timeout != IPSET_NO_TIMEOUT && + add_opt.ext.timeout > UINT_MAX/MSEC_PER_SEC) + add_opt.ext.timeout = UINT_MAX/MSEC_PER_SEC; + if (info->add_set.index != IPSET_INVALID_ID) + ip_set_add(info->add_set.index, skb, par, &add_opt); + if (info->del_set.index != IPSET_INVALID_ID) + ip_set_del(info->del_set.index, skb, par, &del_opt); + if (info->map_set.index != IPSET_INVALID_ID) { + map_opt.cmdflags |= info->flags & (IPSET_FLAG_MAP_SKBMARK | + IPSET_FLAG_MAP_SKBPRIO | + IPSET_FLAG_MAP_SKBQUEUE); + ret = match_set(info->map_set.index, skb, par, &map_opt, + info->map_set.flags & IPSET_INV_MATCH); + if (!ret) + return XT_CONTINUE; + if (map_opt.cmdflags & IPSET_FLAG_MAP_SKBMARK) + skb->mark = (skb->mark & ~(map_opt.ext.skbmarkmask)) + ^ (map_opt.ext.skbmark); + if (map_opt.cmdflags & IPSET_FLAG_MAP_SKBPRIO) + skb->priority = map_opt.ext.skbprio; + if ((map_opt.cmdflags & IPSET_FLAG_MAP_SKBQUEUE) && + skb->dev && + skb->dev->real_num_tx_queues > map_opt.ext.skbqueue) + skb_set_queue_mapping(skb, map_opt.ext.skbqueue); + } + return XT_CONTINUE; +} + + +static int +set_target_v3_checkentry(const struct xt_tgchk_param *par) +{ + const struct xt_set_info_target_v3 *info = par->targinfo; + ip_set_id_t index; + + if (info->add_set.index != IPSET_INVALID_ID) { + index = ip_set_nfnl_get_byindex(par->net, + info->add_set.index); + if (index == IPSET_INVALID_ID) { + pr_warn("Cannot find add_set index %u as target\n", + info->add_set.index); + return -ENOENT; + } + } + + if (info->del_set.index != IPSET_INVALID_ID) { + index = ip_set_nfnl_get_byindex(par->net, + info->del_set.index); + if (index == IPSET_INVALID_ID) { + pr_warn("Cannot find del_set index %u as target\n", + info->del_set.index); + if (info->add_set.index != IPSET_INVALID_ID) + ip_set_nfnl_put(par->net, + info->add_set.index); + return -ENOENT; + } + } + + if (info->map_set.index != IPSET_INVALID_ID) { + if (strncmp(par->table, "mangle", 7)) { + pr_warn("--map-set only usable from mangle table\n"); + return -EINVAL; + } + if (((info->flags & IPSET_FLAG_MAP_SKBPRIO) | + (info->flags & IPSET_FLAG_MAP_SKBQUEUE)) && + !(par->hook_mask & (1 << NF_INET_FORWARD | + 1 << NF_INET_LOCAL_OUT | + 1 << NF_INET_POST_ROUTING))) { + pr_warn("mapping of prio or/and queue is allowed only" + "from OUTPUT/FORWARD/POSTROUTING chains\n"); + return -EINVAL; + } + index = ip_set_nfnl_get_byindex(par->net, + info->map_set.index); + if (index == IPSET_INVALID_ID) { + pr_warn("Cannot find map_set index %u as target\n", + info->map_set.index); + if (info->add_set.index != IPSET_INVALID_ID) + ip_set_nfnl_put(par->net, + info->add_set.index); + if (info->del_set.index != IPSET_INVALID_ID) + ip_set_nfnl_put(par->net, + info->del_set.index); + return -ENOENT; + } + } + + if (info->add_set.dim > IPSET_DIM_MAX || + info->del_set.dim > IPSET_DIM_MAX || + info->map_set.dim > IPSET_DIM_MAX) { + pr_warn("Protocol error: SET target dimension " + "is over the limit!\n"); + if (info->add_set.index != IPSET_INVALID_ID) + ip_set_nfnl_put(par->net, info->add_set.index); + if (info->del_set.index != IPSET_INVALID_ID) + ip_set_nfnl_put(par->net, info->del_set.index); + if (info->map_set.index != IPSET_INVALID_ID) + ip_set_nfnl_put(par->net, info->map_set.index); + return -ERANGE; + } + + return 0; +} + +static void +set_target_v3_destroy(const struct xt_tgdtor_param *par) +{ + const struct xt_set_info_target_v3 *info = par->targinfo; + + if (info->add_set.index != IPSET_INVALID_ID) + ip_set_nfnl_put(par->net, info->add_set.index); + if (info->del_set.index != IPSET_INVALID_ID) + ip_set_nfnl_put(par->net, info->del_set.index); + if (info->map_set.index != IPSET_INVALID_ID) + ip_set_nfnl_put(par->net, info->map_set.index); +} + + static struct xt_match set_matches[] __read_mostly = { { .name = "set", @@ -497,6 +627,27 @@ static struct xt_target set_targets[] __read_mostly = { .destroy = set_target_v2_destroy, .me = THIS_MODULE }, + /* --map-set support */ + { + .name = "SET", + .revision = 3, + .family = NFPROTO_IPV4, + .target = set_target_v3, + .targetsize = sizeof(struct xt_set_info_target_v3), + .checkentry = set_target_v3_checkentry, + .destroy = set_target_v3_destroy, + .me = THIS_MODULE + }, + { + .name = "SET", + .revision = 3, + .family = NFPROTO_IPV6, + .target = set_target_v3, + .targetsize = sizeof(struct xt_set_info_target_v3), + .checkentry = set_target_v3_checkentry, + .destroy = set_target_v3_destroy, + .me = THIS_MODULE + }, }; static int __init xt_set_init(void) diff --git a/net/netfilter/xt_string.c b/net/netfilter/xt_string.c index d3c48b14ab94c09d74f0c74e62b94ed5fcb58af7..5699adb976521450c542c17acd13c91c6454750f 100644 --- a/net/netfilter/xt_string.c +++ b/net/netfilter/xt_string.c @@ -29,7 +29,6 @@ string_mt(const struct sk_buff *skb, struct xt_action_param *par) struct ts_state state; bool invert; - memset(&state, 0, sizeof(struct ts_state)); invert = conf->u.v1.flags & XT_STRING_FLAG_INVERT; return (skb_find_text((struct sk_buff *)skb, conf->from_offset, diff --git a/net/netlabel/netlabel_user.c b/net/netlabel/netlabel_user.c index 1e779bb7fa435a7096342a1360b696b4a09a60cf..adf8b7900da2ac131c8ebc61d60d1373493d30c4 100644 --- a/net/netlabel/netlabel_user.c +++ b/net/netlabel/netlabel_user.c @@ -71,11 +71,7 @@ int __init netlbl_netlink_init(void) if (ret_val != 0) return ret_val; - ret_val = netlbl_unlabel_genl_init(); - if (ret_val != 0) - return ret_val; - - return 0; + return netlbl_unlabel_genl_init(); } /* diff --git a/net/nfc/digital_dep.c b/net/nfc/digital_dep.c index e1638dab076da4688057889299244125b42419bb..b60aa35c074f5220f0b939b347881cab8e6c9db8 100644 --- a/net/nfc/digital_dep.c +++ b/net/nfc/digital_dep.c @@ -33,6 +33,8 @@ #define DIGITAL_ATR_REQ_MAX_SIZE 64 #define DIGITAL_LR_BITS_PAYLOAD_SIZE_254B 0x30 +#define DIGITAL_FSL_BITS_PAYLOAD_SIZE_254B \ + (DIGITAL_LR_BITS_PAYLOAD_SIZE_254B >> 4) #define DIGITAL_GB_BIT 0x02 #define DIGITAL_NFC_DEP_PFB_TYPE(pfb) ((pfb) & 0xE0) @@ -127,6 +129,98 @@ static int digital_skb_pull_dep_sod(struct nfc_digital_dev *ddev, return 0; } +static void digital_in_recv_psl_res(struct nfc_digital_dev *ddev, void *arg, + struct sk_buff *resp) +{ + struct nfc_target *target = arg; + struct digital_psl_res *psl_res; + int rc; + + if (IS_ERR(resp)) { + rc = PTR_ERR(resp); + resp = NULL; + goto exit; + } + + rc = ddev->skb_check_crc(resp); + if (rc) { + PROTOCOL_ERR("14.4.1.6"); + goto exit; + } + + rc = digital_skb_pull_dep_sod(ddev, resp); + if (rc) { + PROTOCOL_ERR("14.4.1.2"); + goto exit; + } + + psl_res = (struct digital_psl_res *)resp->data; + + if ((resp->len != sizeof(*psl_res)) || + (psl_res->dir != DIGITAL_NFC_DEP_FRAME_DIR_IN) || + (psl_res->cmd != DIGITAL_CMD_PSL_RES)) { + rc = -EIO; + goto exit; + } + + rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH, + NFC_DIGITAL_RF_TECH_424F); + if (rc) + goto exit; + + rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING, + NFC_DIGITAL_FRAMING_NFCF_NFC_DEP); + if (rc) + goto exit; + + if (!DIGITAL_DRV_CAPS_IN_CRC(ddev) && + (ddev->curr_rf_tech == NFC_DIGITAL_RF_TECH_106A)) { + ddev->skb_add_crc = digital_skb_add_crc_f; + ddev->skb_check_crc = digital_skb_check_crc_f; + } + + ddev->curr_rf_tech = NFC_DIGITAL_RF_TECH_424F; + + nfc_dep_link_is_up(ddev->nfc_dev, target->idx, NFC_COMM_ACTIVE, + NFC_RF_INITIATOR); + + ddev->curr_nfc_dep_pni = 0; + +exit: + dev_kfree_skb(resp); + + if (rc) + ddev->curr_protocol = 0; +} + +static int digital_in_send_psl_req(struct nfc_digital_dev *ddev, + struct nfc_target *target) +{ + struct sk_buff *skb; + struct digital_psl_req *psl_req; + + skb = digital_skb_alloc(ddev, sizeof(*psl_req)); + if (!skb) + return -ENOMEM; + + skb_put(skb, sizeof(*psl_req)); + + psl_req = (struct digital_psl_req *)skb->data; + + psl_req->dir = DIGITAL_NFC_DEP_FRAME_DIR_OUT; + psl_req->cmd = DIGITAL_CMD_PSL_REQ; + psl_req->did = 0; + psl_req->brs = (0x2 << 3) | 0x2; /* 424F both directions */ + psl_req->fsl = DIGITAL_FSL_BITS_PAYLOAD_SIZE_254B; + + digital_skb_push_dep_sod(ddev, skb); + + ddev->skb_add_crc(skb); + + return digital_in_send_cmd(ddev, skb, 500, digital_in_recv_psl_res, + target); +} + static void digital_in_recv_atr_res(struct nfc_digital_dev *ddev, void *arg, struct sk_buff *resp) { @@ -166,6 +260,13 @@ static void digital_in_recv_atr_res(struct nfc_digital_dev *ddev, void *arg, if (rc) goto exit; + if ((ddev->protocols & NFC_PROTO_FELICA_MASK) && + (ddev->curr_rf_tech != NFC_DIGITAL_RF_TECH_424F)) { + rc = digital_in_send_psl_req(ddev, target); + if (!rc) + goto exit; + } + rc = nfc_dep_link_is_up(ddev->nfc_dev, target->idx, NFC_COMM_ACTIVE, NFC_RF_INITIATOR); diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c index 2b400e1a869522b9ecbca787a0ff2938f6109c26..90b16cb4005880214f1eca87cc5e72f2f75c7c34 100644 --- a/net/nfc/nci/core.c +++ b/net/nfc/nci/core.c @@ -231,6 +231,14 @@ static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt) cmd.num_disc_configs++; } + if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) && + (protocols & NFC_PROTO_ISO15693_MASK)) { + cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode = + NCI_NFC_V_PASSIVE_POLL_MODE; + cmd.disc_configs[cmd.num_disc_configs].frequency = 1; + cmd.num_disc_configs++; + } + nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_CMD, (1 + (cmd.num_disc_configs * sizeof(struct disc_config))), &cmd); @@ -751,10 +759,6 @@ int nci_register_device(struct nci_dev *ndev) struct device *dev = &ndev->nfc_dev->dev; char name[32]; - rc = nfc_register_device(ndev->nfc_dev); - if (rc) - goto exit; - ndev->flags = 0; INIT_WORK(&ndev->cmd_work, nci_cmd_work); @@ -762,7 +766,7 @@ int nci_register_device(struct nci_dev *ndev) ndev->cmd_wq = create_singlethread_workqueue(name); if (!ndev->cmd_wq) { rc = -ENOMEM; - goto unreg_exit; + goto exit; } INIT_WORK(&ndev->rx_work, nci_rx_work); @@ -792,6 +796,10 @@ int nci_register_device(struct nci_dev *ndev) mutex_init(&ndev->req_lock); + rc = nfc_register_device(ndev->nfc_dev); + if (rc) + goto destroy_rx_wq_exit; + goto exit; destroy_rx_wq_exit: @@ -800,9 +808,6 @@ int nci_register_device(struct nci_dev *ndev) destroy_cmd_wq_exit: destroy_workqueue(ndev->cmd_wq); -unreg_exit: - nfc_unregister_device(ndev->nfc_dev); - exit: return rc; } diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c index 6c3aef852876c98acc09f6c8c554a60df1d7990e..427ef2c7ab681245c8d392e76ccaace41cab10e3 100644 --- a/net/nfc/nci/data.c +++ b/net/nfc/nci/data.c @@ -241,9 +241,12 @@ void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb) /* strip the nci data header */ skb_pull(skb, NCI_DATA_HDR_SIZE); - if (ndev->target_active_prot == NFC_PROTO_MIFARE) { + if (ndev->target_active_prot == NFC_PROTO_MIFARE || + ndev->target_active_prot == NFC_PROTO_JEWEL || + ndev->target_active_prot == NFC_PROTO_FELICA || + ndev->target_active_prot == NFC_PROTO_ISO15693) { /* frame I/F => remove the status byte */ - pr_debug("NFC_PROTO_MIFARE => remove the status byte\n"); + pr_debug("frame I/F => remove the status byte\n"); skb_trim(skb, (skb->len - 1)); } diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c index df91bb95b12aacd265f89ba82db7e3ae4cf0553e..205b35f666dba827d4daafd6d29c33483e73a609 100644 --- a/net/nfc/nci/ntf.c +++ b/net/nfc/nci/ntf.c @@ -2,6 +2,7 @@ * The NFC Controller Interface is the communication protocol between an * NFC Controller (NFCC) and a Device Host (DH). * + * Copyright (C) 2014 Marvell International Ltd. * Copyright (C) 2011 Texas Instruments, Inc. * * Written by Ilan Elias @@ -155,6 +156,24 @@ static __u8 *nci_extract_rf_params_nfcf_passive_poll(struct nci_dev *ndev, return data; } +static __u8 *nci_extract_rf_params_nfcv_passive_poll(struct nci_dev *ndev, + struct rf_tech_specific_params_nfcv_poll *nfcv_poll, + __u8 *data) +{ + ++data; + nfcv_poll->dsfid = *data++; + memcpy(nfcv_poll->uid, data, NFC_ISO15693_UID_MAXSIZE); + data += NFC_ISO15693_UID_MAXSIZE; + return data; +} + +__u32 nci_get_prop_rf_protocol(struct nci_dev *ndev, __u8 rf_protocol) +{ + if (ndev->ops->get_rfprotocol) + return ndev->ops->get_rfprotocol(ndev, rf_protocol); + return 0; +} + static int nci_add_new_protocol(struct nci_dev *ndev, struct nfc_target *target, __u8 rf_protocol, @@ -164,6 +183,7 @@ static int nci_add_new_protocol(struct nci_dev *ndev, struct rf_tech_specific_params_nfca_poll *nfca_poll; struct rf_tech_specific_params_nfcb_poll *nfcb_poll; struct rf_tech_specific_params_nfcf_poll *nfcf_poll; + struct rf_tech_specific_params_nfcv_poll *nfcv_poll; __u32 protocol; if (rf_protocol == NCI_RF_PROTOCOL_T1T) @@ -179,8 +199,10 @@ static int nci_add_new_protocol(struct nci_dev *ndev, protocol = NFC_PROTO_FELICA_MASK; else if (rf_protocol == NCI_RF_PROTOCOL_NFC_DEP) protocol = NFC_PROTO_NFC_DEP_MASK; + else if (rf_protocol == NCI_RF_PROTOCOL_T5T) + protocol = NFC_PROTO_ISO15693_MASK; else - protocol = 0; + protocol = nci_get_prop_rf_protocol(ndev, rf_protocol); if (!(protocol & ndev->poll_prots)) { pr_err("the target found does not have the desired protocol\n"); @@ -213,6 +235,12 @@ static int nci_add_new_protocol(struct nci_dev *ndev, memcpy(target->sensf_res, nfcf_poll->sensf_res, target->sensf_res_len); } + } else if (rf_tech_and_mode == NCI_NFC_V_PASSIVE_POLL_MODE) { + nfcv_poll = (struct rf_tech_specific_params_nfcv_poll *)params; + + target->is_iso15693 = 1; + target->iso15693_dsfid = nfcv_poll->dsfid; + memcpy(target->iso15693_uid, nfcv_poll->uid, NFC_ISO15693_UID_MAXSIZE); } else { pr_err("unsupported rf_tech_and_mode 0x%x\n", rf_tech_and_mode); return -EPROTO; @@ -305,6 +333,11 @@ static void nci_rf_discover_ntf_packet(struct nci_dev *ndev, &(ntf.rf_tech_specific_params.nfcf_poll), data); break; + case NCI_NFC_V_PASSIVE_POLL_MODE: + data = nci_extract_rf_params_nfcv_passive_poll(ndev, + &(ntf.rf_tech_specific_params.nfcv_poll), data); + break; + default: pr_err("unsupported rf_tech_and_mode 0x%x\n", ntf.rf_tech_and_mode); @@ -455,6 +488,11 @@ static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev, &(ntf.rf_tech_specific_params.nfcf_poll), data); break; + case NCI_NFC_V_PASSIVE_POLL_MODE: + data = nci_extract_rf_params_nfcv_passive_poll(ndev, + &(ntf.rf_tech_specific_params.nfcv_poll), data); + break; + default: pr_err("unsupported activation_rf_tech_and_mode 0x%x\n", ntf.activation_rf_tech_and_mode); diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig index 6ecf491ad5098db632b294cf20ea5156870f0095..ba3bb8203b999bce68dba7e5d158465e1cab9e02 100644 --- a/net/openvswitch/Kconfig +++ b/net/openvswitch/Kconfig @@ -54,3 +54,14 @@ config OPENVSWITCH_VXLAN Say N to exclude this support and reduce the binary size. If unsure, say Y. + +config OPENVSWITCH_GENEVE + bool "Open vSwitch Geneve tunneling support" + depends on INET + depends on OPENVSWITCH + depends on GENEVE && !(OPENVSWITCH=y && GENEVE=m) + default y + ---help--- + If you say Y here, then the Open vSwitch will be able create geneve vport. + + Say N to exclude this support and reduce the binary size. diff --git a/net/openvswitch/Makefile b/net/openvswitch/Makefile index 3591cb5dae9125570012c1c6dc1ab13694663db8..9a33a273c375bdad208f5fa6b36d73743c791d00 100644 --- a/net/openvswitch/Makefile +++ b/net/openvswitch/Makefile @@ -15,6 +15,10 @@ openvswitch-y := \ vport-internal_dev.o \ vport-netdev.o +ifneq ($(CONFIG_OPENVSWITCH_GENEVE),) +openvswitch-y += vport-geneve.o +endif + ifneq ($(CONFIG_OPENVSWITCH_VXLAN),) openvswitch-y += vport-vxlan.o endif diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 5231652a95d90fa2c5d18193380c1a8b6c7dae62..006886dbee36b075fc7054ec1ddc87781a75d739 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007-2013 Nicira, Inc. + * Copyright (c) 2007-2014 Nicira, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public @@ -35,11 +35,78 @@ #include #include "datapath.h" +#include "flow.h" #include "vport.h" static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, + struct sw_flow_key *key, const struct nlattr *attr, int len); +struct deferred_action { + struct sk_buff *skb; + const struct nlattr *actions; + + /* Store pkt_key clone when creating deferred action. */ + struct sw_flow_key pkt_key; +}; + +#define DEFERRED_ACTION_FIFO_SIZE 10 +struct action_fifo { + int head; + int tail; + /* Deferred action fifo queue storage. */ + struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE]; +}; + +static struct action_fifo __percpu *action_fifos; +static DEFINE_PER_CPU(int, exec_actions_level); + +static void action_fifo_init(struct action_fifo *fifo) +{ + fifo->head = 0; + fifo->tail = 0; +} + +static bool action_fifo_is_empty(struct action_fifo *fifo) +{ + return (fifo->head == fifo->tail); +} + +static struct deferred_action *action_fifo_get(struct action_fifo *fifo) +{ + if (action_fifo_is_empty(fifo)) + return NULL; + + return &fifo->fifo[fifo->tail++]; +} + +static struct deferred_action *action_fifo_put(struct action_fifo *fifo) +{ + if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1) + return NULL; + + return &fifo->fifo[fifo->head++]; +} + +/* Return true if fifo is not full */ +static struct deferred_action *add_deferred_actions(struct sk_buff *skb, + struct sw_flow_key *key, + const struct nlattr *attr) +{ + struct action_fifo *fifo; + struct deferred_action *da; + + fifo = this_cpu_ptr(action_fifos); + da = action_fifo_put(fifo); + if (da) { + da->skb = skb; + da->actions = attr; + da->pkt_key = *key; + } + + return da; +} + static int make_writable(struct sk_buff *skb, int write_len) { if (!pskb_may_pull(skb, write_len)) @@ -410,16 +477,14 @@ static int do_output(struct datapath *dp, struct sk_buff *skb, int out_port) } static int output_userspace(struct datapath *dp, struct sk_buff *skb, - const struct nlattr *attr) + struct sw_flow_key *key, const struct nlattr *attr) { struct dp_upcall_info upcall; const struct nlattr *a; int rem; - BUG_ON(!OVS_CB(skb)->pkt_key); - upcall.cmd = OVS_PACKET_CMD_ACTION; - upcall.key = OVS_CB(skb)->pkt_key; + upcall.key = key; upcall.userdata = NULL; upcall.portid = 0; @@ -445,11 +510,10 @@ static bool last_action(const struct nlattr *a, int rem) } static int sample(struct datapath *dp, struct sk_buff *skb, - const struct nlattr *attr) + struct sw_flow_key *key, const struct nlattr *attr) { const struct nlattr *acts_list = NULL; const struct nlattr *a; - struct sk_buff *sample_skb; int rem; for (a = nla_data(attr), rem = nla_len(attr); rem > 0; @@ -469,31 +533,47 @@ static int sample(struct datapath *dp, struct sk_buff *skb, rem = nla_len(acts_list); a = nla_data(acts_list); - /* Actions list is either empty or only contains a single user-space - * action, the latter being a special case as it is the only known - * usage of the sample action. - * In these special cases don't clone the skb as there are no - * side-effects in the nested actions. - * Otherwise, clone in case the nested actions have side effects. + /* Actions list is empty, do nothing */ + if (unlikely(!rem)) + return 0; + + /* The only known usage of sample action is having a single user-space + * action. Treat this usage as a special case. + * The output_userspace() should clone the skb to be sent to the + * user space. This skb will be consumed by its caller. */ - if (likely(rem == 0 || (nla_type(a) == OVS_ACTION_ATTR_USERSPACE && - last_action(a, rem)))) { - sample_skb = skb; - skb_get(skb); - } else { - sample_skb = skb_clone(skb, GFP_ATOMIC); - if (!sample_skb) /* Skip sample action when out of memory. */ - return 0; + if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE && + last_action(a, rem))) + return output_userspace(dp, skb, key, a); + + skb = skb_clone(skb, GFP_ATOMIC); + if (!skb) + /* Skip the sample action when out of memory. */ + return 0; + + if (!add_deferred_actions(skb, key, a)) { + if (net_ratelimit()) + pr_warn("%s: deferred actions limit reached, dropping sample action\n", + ovs_dp_name(dp)); + + kfree_skb(skb); } + return 0; +} - /* Note that do_execute_actions() never consumes skb. - * In the case where skb has been cloned above it is the clone that - * is consumed. Otherwise the skb_get(skb) call prevents - * consumption by do_execute_actions(). Thus, it is safe to simply - * return the error code and let the caller (also - * do_execute_actions()) free skb on error. - */ - return do_execute_actions(dp, sample_skb, a, rem); +static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key, + const struct nlattr *attr) +{ + struct ovs_action_hash *hash_act = nla_data(attr); + u32 hash = 0; + + /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */ + hash = skb_get_hash(skb); + hash = jhash_1word(hash, hash_act->hash_basis); + if (!hash) + hash = 0x1; + + key->ovs_flow_hash = hash; } static int execute_set_action(struct sk_buff *skb, @@ -510,8 +590,8 @@ static int execute_set_action(struct sk_buff *skb, skb->mark = nla_get_u32(nested_attr); break; - case OVS_KEY_ATTR_IPV4_TUNNEL: - OVS_CB(skb)->tun_key = nla_data(nested_attr); + case OVS_KEY_ATTR_TUNNEL_INFO: + OVS_CB(skb)->egress_tun_info = nla_data(nested_attr); break; case OVS_KEY_ATTR_ETHERNET: @@ -542,8 +622,47 @@ static int execute_set_action(struct sk_buff *skb, return err; } +static int execute_recirc(struct datapath *dp, struct sk_buff *skb, + struct sw_flow_key *key, + const struct nlattr *a, int rem) +{ + struct deferred_action *da; + int err; + + err = ovs_flow_key_update(skb, key); + if (err) + return err; + + if (!last_action(a, rem)) { + /* Recirc action is the not the last action + * of the action list, need to clone the skb. + */ + skb = skb_clone(skb, GFP_ATOMIC); + + /* Skip the recirc action when out of memory, but + * continue on with the rest of the action list. + */ + if (!skb) + return 0; + } + + da = add_deferred_actions(skb, key, NULL); + if (da) { + da->pkt_key.recirc_id = nla_get_u32(a); + } else { + kfree_skb(skb); + + if (net_ratelimit()) + pr_warn("%s: deferred action limit reached, drop recirc action\n", + ovs_dp_name(dp)); + } + + return 0; +} + /* Execute a list of actions against 'skb'. */ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, + struct sw_flow_key *key, const struct nlattr *attr, int len) { /* Every output action needs a separate clone of 'skb', but the common @@ -569,7 +688,11 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, break; case OVS_ACTION_ATTR_USERSPACE: - output_userspace(dp, skb, a); + output_userspace(dp, skb, key, a); + break; + + case OVS_ACTION_ATTR_HASH: + execute_hash(skb, key, a); break; case OVS_ACTION_ATTR_PUSH_VLAN: @@ -582,12 +705,23 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, err = pop_vlan(skb); break; + case OVS_ACTION_ATTR_RECIRC: + err = execute_recirc(dp, skb, key, a, rem); + if (last_action(a, rem)) { + /* If this is the last action, the skb has + * been consumed or freed. + * Return immediately. + */ + return err; + } + break; + case OVS_ACTION_ATTR_SET: err = execute_set_action(skb, nla_data(a)); break; case OVS_ACTION_ATTR_SAMPLE: - err = sample(dp, skb, a); + err = sample(dp, skb, key, a); if (unlikely(err)) /* skb already freed. */ return err; break; @@ -607,11 +741,64 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, return 0; } +static void process_deferred_actions(struct datapath *dp) +{ + struct action_fifo *fifo = this_cpu_ptr(action_fifos); + + /* Do not touch the FIFO in case there is no deferred actions. */ + if (action_fifo_is_empty(fifo)) + return; + + /* Finishing executing all deferred actions. */ + do { + struct deferred_action *da = action_fifo_get(fifo); + struct sk_buff *skb = da->skb; + struct sw_flow_key *key = &da->pkt_key; + const struct nlattr *actions = da->actions; + + if (actions) + do_execute_actions(dp, skb, key, actions, + nla_len(actions)); + else + ovs_dp_process_packet(skb, key); + } while (!action_fifo_is_empty(fifo)); + + /* Reset FIFO for the next packet. */ + action_fifo_init(fifo); +} + /* Execute a list of actions against 'skb'. */ -int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb) +int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb, + struct sw_flow_key *key) { - struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts); + int level = this_cpu_read(exec_actions_level); + struct sw_flow_actions *acts; + int err; + + acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts); + + this_cpu_inc(exec_actions_level); + OVS_CB(skb)->egress_tun_info = NULL; + err = do_execute_actions(dp, skb, key, + acts->actions, acts->actions_len); - OVS_CB(skb)->tun_key = NULL; - return do_execute_actions(dp, skb, acts->actions, acts->actions_len); + if (!level) + process_deferred_actions(dp); + + this_cpu_dec(exec_actions_level); + return err; +} + +int action_fifos_init(void) +{ + action_fifos = alloc_percpu(struct action_fifo); + if (!action_fifos) + return -ENOMEM; + + return 0; +} + +void action_fifos_exit(void) +{ + free_percpu(action_fifos); } diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 64dc864a417f2b5910d71ab8641cd31d5367eba1..2e31d9e7f4dc4ca1c002b3c488b838236dcdf2de 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -157,7 +157,7 @@ static struct datapath *get_dp(struct net *net, int dp_ifindex) } /* Must be called with rcu_read_lock or ovs_mutex. */ -static const char *ovs_dp_name(const struct datapath *dp) +const char *ovs_dp_name(const struct datapath *dp) { struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL); return vport->ops->get_name(vport); @@ -238,32 +238,25 @@ void ovs_dp_detach_port(struct vport *p) } /* Must be called with rcu_read_lock. */ -void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb) +void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key) { + const struct vport *p = OVS_CB(skb)->input_vport; struct datapath *dp = p->dp; struct sw_flow *flow; struct dp_stats_percpu *stats; - struct sw_flow_key key; u64 *stats_counter; u32 n_mask_hit; - int error; stats = this_cpu_ptr(dp->stats_percpu); - /* Extract flow from 'skb' into 'key'. */ - error = ovs_flow_extract(skb, p->port_no, &key); - if (unlikely(error)) { - kfree_skb(skb); - return; - } - /* Look up flow. */ - flow = ovs_flow_tbl_lookup_stats(&dp->table, &key, &n_mask_hit); + flow = ovs_flow_tbl_lookup_stats(&dp->table, key, &n_mask_hit); if (unlikely(!flow)) { struct dp_upcall_info upcall; + int error; upcall.cmd = OVS_PACKET_CMD_MISS; - upcall.key = &key; + upcall.key = key; upcall.userdata = NULL; upcall.portid = ovs_vport_find_upcall_portid(p, skb); error = ovs_dp_upcall(dp, skb, &upcall); @@ -276,10 +269,9 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb) } OVS_CB(skb)->flow = flow; - OVS_CB(skb)->pkt_key = &key; - ovs_flow_stats_update(OVS_CB(skb)->flow, key.tp.flags, skb); - ovs_execute_actions(dp, skb); + ovs_flow_stats_update(OVS_CB(skb)->flow, key->tp.flags, skb); + ovs_execute_actions(dp, skb, key); stats_counter = &stats->n_hit; out: @@ -377,6 +369,8 @@ static size_t key_attr_size(void) + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TTL */ + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */ + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_CSUM */ + + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_OAM */ + + nla_total_size(256) /* OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS */ + nla_total_size(4) /* OVS_KEY_ATTR_IN_PORT */ + nla_total_size(4) /* OVS_KEY_ATTR_SKB_MARK */ + nla_total_size(12) /* OVS_KEY_ATTR_ETHERNET */ @@ -516,6 +510,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) struct sw_flow *flow; struct datapath *dp; struct ethhdr *eth; + struct vport *input_vport; int len; int err; @@ -550,13 +545,11 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) if (IS_ERR(flow)) goto err_kfree_skb; - err = ovs_flow_extract(packet, -1, &flow->key); + err = ovs_flow_key_extract_userspace(a[OVS_PACKET_ATTR_KEY], packet, + &flow->key); if (err) goto err_flow_free; - err = ovs_nla_get_flow_metadata(flow, a[OVS_PACKET_ATTR_KEY]); - if (err) - goto err_flow_free; acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_PACKET_ATTR_ACTIONS])); err = PTR_ERR(acts); if (IS_ERR(acts)) @@ -564,12 +557,13 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) err = ovs_nla_copy_actions(a[OVS_PACKET_ATTR_ACTIONS], &flow->key, 0, &acts); - rcu_assign_pointer(flow->sf_acts, acts); if (err) goto err_flow_free; + rcu_assign_pointer(flow->sf_acts, acts); + + OVS_CB(packet)->egress_tun_info = NULL; OVS_CB(packet)->flow = flow; - OVS_CB(packet)->pkt_key = &flow->key; packet->priority = flow->key.phy.priority; packet->mark = flow->key.phy.skb_mark; @@ -579,8 +573,17 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) if (!dp) goto err_unlock; + input_vport = ovs_vport_rcu(dp, flow->key.phy.in_port); + if (!input_vport) + input_vport = ovs_vport_rcu(dp, OVSP_LOCAL); + + if (!input_vport) + goto err_unlock; + + OVS_CB(packet)->input_vport = input_vport; + local_bh_disable(); - err = ovs_execute_actions(dp, packet); + err = ovs_execute_actions(dp, packet, &flow->key); local_bh_enable(); rcu_read_unlock(); @@ -933,11 +936,34 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info) return error; } +static struct sw_flow_actions *get_flow_actions(const struct nlattr *a, + const struct sw_flow_key *key, + const struct sw_flow_mask *mask) +{ + struct sw_flow_actions *acts; + struct sw_flow_key masked_key; + int error; + + acts = ovs_nla_alloc_flow_actions(nla_len(a)); + if (IS_ERR(acts)) + return acts; + + ovs_flow_mask_key(&masked_key, key, mask); + error = ovs_nla_copy_actions(a, &masked_key, 0, &acts); + if (error) { + OVS_NLERR("Flow actions may not be safe on all matching packets.\n"); + kfree(acts); + return ERR_PTR(error); + } + + return acts; +} + static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info) { struct nlattr **a = info->attrs; struct ovs_header *ovs_header = info->userhdr; - struct sw_flow_key key, masked_key; + struct sw_flow_key key; struct sw_flow *flow; struct sw_flow_mask mask; struct sk_buff *reply = NULL; @@ -959,17 +985,10 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info) /* Validate actions. */ if (a[OVS_FLOW_ATTR_ACTIONS]) { - acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_FLOW_ATTR_ACTIONS])); - error = PTR_ERR(acts); - if (IS_ERR(acts)) + acts = get_flow_actions(a[OVS_FLOW_ATTR_ACTIONS], &key, &mask); + if (IS_ERR(acts)) { + error = PTR_ERR(acts); goto error; - - ovs_flow_mask_key(&masked_key, &key, &mask); - error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], - &masked_key, 0, &acts); - if (error) { - OVS_NLERR("Flow actions may not be safe on all matching packets.\n"); - goto err_kfree_acts; } } @@ -2067,10 +2086,14 @@ static int __init dp_init(void) pr_info("Open vSwitch switching datapath\n"); - err = ovs_internal_dev_rtnl_link_register(); + err = action_fifos_init(); if (err) goto error; + err = ovs_internal_dev_rtnl_link_register(); + if (err) + goto error_action_fifos_exit; + err = ovs_flow_init(); if (err) goto error_unreg_rtnl_link; @@ -2103,6 +2126,8 @@ static int __init dp_init(void) ovs_flow_exit(); error_unreg_rtnl_link: ovs_internal_dev_rtnl_link_unregister(); +error_action_fifos_exit: + action_fifos_exit(); error: return err; } @@ -2116,6 +2141,7 @@ static void dp_cleanup(void) ovs_vport_exit(); ovs_flow_exit(); ovs_internal_dev_rtnl_link_unregister(); + action_fifos_exit(); } module_init(dp_init); diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h index 701b5738c38a634285f331e4e05288ed557e6332..974135439c5c77af2e7ee0684054589e1a81779c 100644 --- a/net/openvswitch/datapath.h +++ b/net/openvswitch/datapath.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007-2012 Nicira, Inc. + * Copyright (c) 2007-2014 Nicira, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public @@ -95,14 +95,15 @@ struct datapath { /** * struct ovs_skb_cb - OVS data in skb CB * @flow: The flow associated with this packet. May be %NULL if no flow. - * @pkt_key: The flow information extracted from the packet. Must be nonnull. - * @tun_key: Key for the tunnel that encapsulated this packet. NULL if the - * packet is not being tunneled. + * @egress_tun_key: Tunnel information about this packet on egress path. + * NULL if the packet is not being tunneled. + * @input_vport: The original vport packet came in on. This value is cached + * when a packet is received by OVS. */ struct ovs_skb_cb { struct sw_flow *flow; - struct sw_flow_key *pkt_key; - struct ovs_key_ipv4_tunnel *tun_key; + struct ovs_tunnel_info *egress_tun_info; + struct vport *input_vport; }; #define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb) @@ -183,17 +184,23 @@ static inline struct vport *ovs_vport_ovsl(const struct datapath *dp, int port_n extern struct notifier_block ovs_dp_device_notifier; extern struct genl_family dp_vport_genl_family; -void ovs_dp_process_received_packet(struct vport *, struct sk_buff *); +void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key); void ovs_dp_detach_port(struct vport *); int ovs_dp_upcall(struct datapath *, struct sk_buff *, const struct dp_upcall_info *); +const char *ovs_dp_name(const struct datapath *dp); struct sk_buff *ovs_vport_cmd_build_info(struct vport *, u32 pid, u32 seq, u8 cmd); -int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb); +int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb, + struct sw_flow_key *); + void ovs_dp_notify_wq(struct work_struct *work); +int action_fifos_init(void); +void action_fifos_exit(void); + #define OVS_NLERR(fmt, ...) \ do { \ if (net_ratelimit()) \ diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index d07ab538fc9d37b78082e88906fe41c433467166..62db02ba36bcf023f0d560b3e7a2a4609621022a 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007-2013 Nicira, Inc. + * Copyright (c) 2007-2014 Nicira, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public @@ -16,8 +16,6 @@ * 02110-1301, USA */ -#include "flow.h" -#include "datapath.h" #include #include #include @@ -46,6 +44,10 @@ #include #include +#include "datapath.h" +#include "flow.h" +#include "flow_netlink.h" + u64 ovs_flow_used_time(unsigned long flow_jiffies) { struct timespec cur_ts; @@ -89,7 +91,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags, * allocated stats as we have already locked them. */ if (likely(flow->stats_last_writer != NUMA_NO_NODE) - && likely(!rcu_dereference(flow->stats[node]))) { + && likely(!rcu_access_pointer(flow->stats[node]))) { /* Try to allocate node-specific stats. */ struct flow_stats *new_stats; @@ -420,10 +422,9 @@ static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key, } /** - * ovs_flow_extract - extracts a flow key from an Ethernet frame. + * key_extract - extracts a flow key from an Ethernet frame. * @skb: sk_buff that contains the frame, with skb->data pointing to the * Ethernet header - * @in_port: port number on which @skb was received. * @key: output flow key * * The caller must ensure that skb->len >= ETH_HLEN. @@ -442,18 +443,13 @@ static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key, * of a correct length, otherwise the same as skb->network_header. * For other key->eth.type values it is left untouched. */ -int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key) +static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) { int error; struct ethhdr *eth; - memset(key, 0, sizeof(*key)); - - key->phy.priority = skb->priority; - if (OVS_CB(skb)->tun_key) - memcpy(&key->tun_key, OVS_CB(skb)->tun_key, sizeof(key->tun_key)); - key->phy.in_port = in_port; - key->phy.skb_mark = skb->mark; + /* Flags are always used as part of stats */ + key->tp.flags = 0; skb_reset_mac_header(skb); @@ -469,6 +465,7 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key) * update skb->csum here. */ + key->eth.tci = 0; if (vlan_tx_tag_present(skb)) key->eth.tci = htons(skb->vlan_tci); else if (eth->h_proto == htons(ETH_P_8021Q)) @@ -489,6 +486,8 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key) error = check_iphdr(skb); if (unlikely(error)) { + memset(&key->ip, 0, sizeof(key->ip)); + memset(&key->ipv4, 0, sizeof(key->ipv4)); if (error == -EINVAL) { skb->transport_header = skb->network_header; error = 0; @@ -510,8 +509,10 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key) return 0; } if (nh->frag_off & htons(IP_MF) || - skb_shinfo(skb)->gso_type & SKB_GSO_UDP) + skb_shinfo(skb)->gso_type & SKB_GSO_UDP) key->ip.frag = OVS_FRAG_TYPE_FIRST; + else + key->ip.frag = OVS_FRAG_TYPE_NONE; /* Transport layer. */ if (key->ip.proto == IPPROTO_TCP) { @@ -520,18 +521,25 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key) key->tp.src = tcp->source; key->tp.dst = tcp->dest; key->tp.flags = TCP_FLAGS_BE16(tcp); + } else { + memset(&key->tp, 0, sizeof(key->tp)); } + } else if (key->ip.proto == IPPROTO_UDP) { if (udphdr_ok(skb)) { struct udphdr *udp = udp_hdr(skb); key->tp.src = udp->source; key->tp.dst = udp->dest; + } else { + memset(&key->tp, 0, sizeof(key->tp)); } } else if (key->ip.proto == IPPROTO_SCTP) { if (sctphdr_ok(skb)) { struct sctphdr *sctp = sctp_hdr(skb); key->tp.src = sctp->source; key->tp.dst = sctp->dest; + } else { + memset(&key->tp, 0, sizeof(key->tp)); } } else if (key->ip.proto == IPPROTO_ICMP) { if (icmphdr_ok(skb)) { @@ -541,33 +549,44 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key) * them in 16-bit network byte order. */ key->tp.src = htons(icmp->type); key->tp.dst = htons(icmp->code); + } else { + memset(&key->tp, 0, sizeof(key->tp)); } } - } else if ((key->eth.type == htons(ETH_P_ARP) || - key->eth.type == htons(ETH_P_RARP)) && arphdr_ok(skb)) { + } else if (key->eth.type == htons(ETH_P_ARP) || + key->eth.type == htons(ETH_P_RARP)) { struct arp_eth_header *arp; arp = (struct arp_eth_header *)skb_network_header(skb); - if (arp->ar_hrd == htons(ARPHRD_ETHER) - && arp->ar_pro == htons(ETH_P_IP) - && arp->ar_hln == ETH_ALEN - && arp->ar_pln == 4) { + if (arphdr_ok(skb) && + arp->ar_hrd == htons(ARPHRD_ETHER) && + arp->ar_pro == htons(ETH_P_IP) && + arp->ar_hln == ETH_ALEN && + arp->ar_pln == 4) { /* We only match on the lower 8 bits of the opcode. */ if (ntohs(arp->ar_op) <= 0xff) key->ip.proto = ntohs(arp->ar_op); + else + key->ip.proto = 0; + memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src)); memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst)); ether_addr_copy(key->ipv4.arp.sha, arp->ar_sha); ether_addr_copy(key->ipv4.arp.tha, arp->ar_tha); + } else { + memset(&key->ip, 0, sizeof(key->ip)); + memset(&key->ipv4, 0, sizeof(key->ipv4)); } } else if (key->eth.type == htons(ETH_P_IPV6)) { int nh_len; /* IPv6 Header + Extensions */ nh_len = parse_ipv6hdr(skb, key); if (unlikely(nh_len < 0)) { + memset(&key->ip, 0, sizeof(key->ip)); + memset(&key->ipv6.addr, 0, sizeof(key->ipv6.addr)); if (nh_len == -EINVAL) { skb->transport_header = skb->network_header; error = 0; @@ -589,27 +608,87 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key) key->tp.src = tcp->source; key->tp.dst = tcp->dest; key->tp.flags = TCP_FLAGS_BE16(tcp); + } else { + memset(&key->tp, 0, sizeof(key->tp)); } } else if (key->ip.proto == NEXTHDR_UDP) { if (udphdr_ok(skb)) { struct udphdr *udp = udp_hdr(skb); key->tp.src = udp->source; key->tp.dst = udp->dest; + } else { + memset(&key->tp, 0, sizeof(key->tp)); } } else if (key->ip.proto == NEXTHDR_SCTP) { if (sctphdr_ok(skb)) { struct sctphdr *sctp = sctp_hdr(skb); key->tp.src = sctp->source; key->tp.dst = sctp->dest; + } else { + memset(&key->tp, 0, sizeof(key->tp)); } } else if (key->ip.proto == NEXTHDR_ICMP) { if (icmp6hdr_ok(skb)) { error = parse_icmpv6(skb, key, nh_len); if (error) return error; + } else { + memset(&key->tp, 0, sizeof(key->tp)); } } } - return 0; } + +int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key) +{ + return key_extract(skb, key); +} + +int ovs_flow_key_extract(struct ovs_tunnel_info *tun_info, + struct sk_buff *skb, struct sw_flow_key *key) +{ + /* Extract metadata from packet. */ + if (tun_info) { + memcpy(&key->tun_key, &tun_info->tunnel, sizeof(key->tun_key)); + + if (tun_info->options) { + BUILD_BUG_ON((1 << (sizeof(tun_info->options_len) * + 8)) - 1 + > sizeof(key->tun_opts)); + memcpy(GENEVE_OPTS(key, tun_info->options_len), + tun_info->options, tun_info->options_len); + key->tun_opts_len = tun_info->options_len; + } else { + key->tun_opts_len = 0; + } + } else { + key->tun_opts_len = 0; + memset(&key->tun_key, 0, sizeof(key->tun_key)); + } + + key->phy.priority = skb->priority; + key->phy.in_port = OVS_CB(skb)->input_vport->port_no; + key->phy.skb_mark = skb->mark; + key->ovs_flow_hash = 0; + key->recirc_id = 0; + + /* Flags are always used as part of stats */ + key->tp.flags = 0; + + return key_extract(skb, key); +} + +int ovs_flow_key_extract_userspace(const struct nlattr *attr, + struct sk_buff *skb, + struct sw_flow_key *key) +{ + int err; + + /* Extract metadata from netlink attributes. */ + err = ovs_nla_get_flow_metadata(attr, key); + if (err) + return err; + + return key_extract(skb, key); +} diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h index 5e5aaed3a85b0761a4831894e0c2645961790f8c..71813318c8c73ef1efb27f8a22bcb062ac81a89a 100644 --- a/net/openvswitch/flow.h +++ b/net/openvswitch/flow.h @@ -49,29 +49,53 @@ struct ovs_key_ipv4_tunnel { u8 ipv4_ttl; } __packed __aligned(4); /* Minimize padding. */ -static inline void ovs_flow_tun_key_init(struct ovs_key_ipv4_tunnel *tun_key, - const struct iphdr *iph, __be64 tun_id, - __be16 tun_flags) +struct ovs_tunnel_info { + struct ovs_key_ipv4_tunnel tunnel; + struct geneve_opt *options; + u8 options_len; +}; + +/* Store options at the end of the array if they are less than the + * maximum size. This allows us to get the benefits of variable length + * matching for small options. + */ +#define GENEVE_OPTS(flow_key, opt_len) \ + ((struct geneve_opt *)((flow_key)->tun_opts + \ + FIELD_SIZEOF(struct sw_flow_key, tun_opts) - \ + opt_len)) + +static inline void ovs_flow_tun_info_init(struct ovs_tunnel_info *tun_info, + const struct iphdr *iph, + __be64 tun_id, __be16 tun_flags, + struct geneve_opt *opts, + u8 opts_len) { - tun_key->tun_id = tun_id; - tun_key->ipv4_src = iph->saddr; - tun_key->ipv4_dst = iph->daddr; - tun_key->ipv4_tos = iph->tos; - tun_key->ipv4_ttl = iph->ttl; - tun_key->tun_flags = tun_flags; + tun_info->tunnel.tun_id = tun_id; + tun_info->tunnel.ipv4_src = iph->saddr; + tun_info->tunnel.ipv4_dst = iph->daddr; + tun_info->tunnel.ipv4_tos = iph->tos; + tun_info->tunnel.ipv4_ttl = iph->ttl; + tun_info->tunnel.tun_flags = tun_flags; /* clear struct padding. */ - memset((unsigned char *) tun_key + OVS_TUNNEL_KEY_SIZE, 0, - sizeof(*tun_key) - OVS_TUNNEL_KEY_SIZE); + memset((unsigned char *)&tun_info->tunnel + OVS_TUNNEL_KEY_SIZE, 0, + sizeof(tun_info->tunnel) - OVS_TUNNEL_KEY_SIZE); + + tun_info->options = opts; + tun_info->options_len = opts_len; } struct sw_flow_key { + u8 tun_opts[255]; + u8 tun_opts_len; struct ovs_key_ipv4_tunnel tun_key; /* Encapsulating tunnel key. */ struct { u32 priority; /* Packet QoS priority. */ u32 skb_mark; /* SKB mark. */ u16 in_port; /* Input switch port (or DP_MAX_PORTS). */ } __packed phy; /* Safe when right after 'tun_key'. */ + u32 ovs_flow_hash; /* Datapath computed hash value. */ + u32 recirc_id; /* Recirculation ID. */ struct { u8 src[ETH_ALEN]; /* Ethernet source address. */ u8 dst[ETH_ALEN]; /* Ethernet destination address. */ @@ -187,6 +211,12 @@ void ovs_flow_stats_get(const struct sw_flow *, struct ovs_flow_stats *, void ovs_flow_stats_clear(struct sw_flow *); u64 ovs_flow_used_time(unsigned long flow_jiffies); -int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *); +int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key); +int ovs_flow_key_extract(struct ovs_tunnel_info *tun_info, struct sk_buff *skb, + struct sw_flow_key *key); +/* Extract key from packet coming from userspace. */ +int ovs_flow_key_extract_userspace(const struct nlattr *attr, + struct sk_buff *skb, + struct sw_flow_key *key); #endif /* flow.h */ diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index d757848da89ca734ac95cef806c2dd314f881bf6..368f23307911cf14d4efc3082f379b58d4e4ae3d 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007-2013 Nicira, Inc. + * Copyright (c) 2007-2014 Nicira, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public @@ -42,6 +42,7 @@ #include #include #include +#include #include #include #include @@ -88,18 +89,20 @@ static void update_range__(struct sw_flow_match *match, } \ } while (0) -#define SW_FLOW_KEY_MEMCPY(match, field, value_p, len, is_mask) \ - do { \ - update_range__(match, offsetof(struct sw_flow_key, field), \ - len, is_mask); \ - if (is_mask) { \ - if ((match)->mask) \ - memcpy(&(match)->mask->key.field, value_p, len);\ - } else { \ - memcpy(&(match)->key->field, value_p, len); \ - } \ +#define SW_FLOW_KEY_MEMCPY_OFFSET(match, offset, value_p, len, is_mask) \ + do { \ + update_range__(match, offset, len, is_mask); \ + if (is_mask) \ + memcpy((u8 *)&(match)->mask->key + offset, value_p, \ + len); \ + else \ + memcpy((u8 *)(match)->key + offset, value_p, len); \ } while (0) +#define SW_FLOW_KEY_MEMCPY(match, field, value_p, len, is_mask) \ + SW_FLOW_KEY_MEMCPY_OFFSET(match, offsetof(struct sw_flow_key, field), \ + value_p, len, is_mask) + static u16 range_n_bytes(const struct sw_flow_key_range *range) { return range->end - range->start; @@ -251,6 +254,8 @@ static const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = { [OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6), [OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp), [OVS_KEY_ATTR_ND] = sizeof(struct ovs_key_nd), + [OVS_KEY_ATTR_RECIRC_ID] = sizeof(u32), + [OVS_KEY_ATTR_DP_HASH] = sizeof(u32), [OVS_KEY_ATTR_TUNNEL] = -1, }; @@ -333,6 +338,7 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr, int rem; bool ttl = false; __be16 tun_flags = 0; + unsigned long opt_key_offset; nla_for_each_nested(a, attr, rem) { int type = nla_type(a); @@ -344,6 +350,8 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr, [OVS_TUNNEL_KEY_ATTR_TTL] = 1, [OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT] = 0, [OVS_TUNNEL_KEY_ATTR_CSUM] = 0, + [OVS_TUNNEL_KEY_ATTR_OAM] = 0, + [OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS] = -1, }; if (type > OVS_TUNNEL_KEY_ATTR_MAX) { @@ -352,7 +360,8 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr, return -EINVAL; } - if (ovs_tunnel_key_lens[type] != nla_len(a)) { + if (ovs_tunnel_key_lens[type] != nla_len(a) && + ovs_tunnel_key_lens[type] != -1) { OVS_NLERR("IPv4 tunnel attribute type has unexpected " " length (type=%d, length=%d, expected=%d).\n", type, nla_len(a), ovs_tunnel_key_lens[type]); @@ -388,7 +397,63 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr, case OVS_TUNNEL_KEY_ATTR_CSUM: tun_flags |= TUNNEL_CSUM; break; + case OVS_TUNNEL_KEY_ATTR_OAM: + tun_flags |= TUNNEL_OAM; + break; + case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS: + tun_flags |= TUNNEL_OPTIONS_PRESENT; + if (nla_len(a) > sizeof(match->key->tun_opts)) { + OVS_NLERR("Geneve option length exceeds maximum size (len %d, max %zu).\n", + nla_len(a), + sizeof(match->key->tun_opts)); + return -EINVAL; + } + + if (nla_len(a) % 4 != 0) { + OVS_NLERR("Geneve option length is not a multiple of 4 (len %d).\n", + nla_len(a)); + return -EINVAL; + } + + /* We need to record the length of the options passed + * down, otherwise packets with the same format but + * additional options will be silently matched. + */ + if (!is_mask) { + SW_FLOW_KEY_PUT(match, tun_opts_len, nla_len(a), + false); + } else { + /* This is somewhat unusual because it looks at + * both the key and mask while parsing the + * attributes (and by extension assumes the key + * is parsed first). Normally, we would verify + * that each is the correct length and that the + * attributes line up in the validate function. + * However, that is difficult because this is + * variable length and we won't have the + * information later. + */ + if (match->key->tun_opts_len != nla_len(a)) { + OVS_NLERR("Geneve option key length (%d) is different from mask length (%d).", + match->key->tun_opts_len, + nla_len(a)); + return -EINVAL; + } + + SW_FLOW_KEY_PUT(match, tun_opts_len, 0xff, + true); + } + + opt_key_offset = (unsigned long)GENEVE_OPTS( + (struct sw_flow_key *)0, + nla_len(a)); + SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset, + nla_data(a), nla_len(a), + is_mask); + break; default: + OVS_NLERR("Unknown IPv4 tunnel attribute (%d).\n", + type); return -EINVAL; } } @@ -415,45 +480,80 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr, return 0; } -static int ipv4_tun_to_nlattr(struct sk_buff *skb, - const struct ovs_key_ipv4_tunnel *tun_key, - const struct ovs_key_ipv4_tunnel *output) +static int __ipv4_tun_to_nlattr(struct sk_buff *skb, + const struct ovs_key_ipv4_tunnel *output, + const struct geneve_opt *tun_opts, + int swkey_tun_opts_len) { - struct nlattr *nla; - - nla = nla_nest_start(skb, OVS_KEY_ATTR_TUNNEL); - if (!nla) - return -EMSGSIZE; - if (output->tun_flags & TUNNEL_KEY && nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id)) return -EMSGSIZE; if (output->ipv4_src && - nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, output->ipv4_src)) + nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, output->ipv4_src)) return -EMSGSIZE; if (output->ipv4_dst && - nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST, output->ipv4_dst)) + nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST, output->ipv4_dst)) return -EMSGSIZE; if (output->ipv4_tos && - nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->ipv4_tos)) + nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->ipv4_tos)) return -EMSGSIZE; if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ipv4_ttl)) return -EMSGSIZE; if ((output->tun_flags & TUNNEL_DONT_FRAGMENT) && - nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT)) + nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT)) return -EMSGSIZE; if ((output->tun_flags & TUNNEL_CSUM) && - nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM)) + nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM)) + return -EMSGSIZE; + if ((output->tun_flags & TUNNEL_OAM) && + nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_OAM)) + return -EMSGSIZE; + if (tun_opts && + nla_put(skb, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS, + swkey_tun_opts_len, tun_opts)) return -EMSGSIZE; - nla_nest_end(skb, nla); return 0; } +static int ipv4_tun_to_nlattr(struct sk_buff *skb, + const struct ovs_key_ipv4_tunnel *output, + const struct geneve_opt *tun_opts, + int swkey_tun_opts_len) +{ + struct nlattr *nla; + int err; + + nla = nla_nest_start(skb, OVS_KEY_ATTR_TUNNEL); + if (!nla) + return -EMSGSIZE; + + err = __ipv4_tun_to_nlattr(skb, output, tun_opts, swkey_tun_opts_len); + if (err) + return err; + + nla_nest_end(skb, nla); + return 0; +} + static int metadata_from_nlattrs(struct sw_flow_match *match, u64 *attrs, const struct nlattr **a, bool is_mask) { + if (*attrs & (1 << OVS_KEY_ATTR_DP_HASH)) { + u32 hash_val = nla_get_u32(a[OVS_KEY_ATTR_DP_HASH]); + + SW_FLOW_KEY_PUT(match, ovs_flow_hash, hash_val, is_mask); + *attrs &= ~(1 << OVS_KEY_ATTR_DP_HASH); + } + + if (*attrs & (1 << OVS_KEY_ATTR_RECIRC_ID)) { + u32 recirc_id = nla_get_u32(a[OVS_KEY_ATTR_RECIRC_ID]); + + SW_FLOW_KEY_PUT(match, recirc_id, recirc_id, is_mask); + *attrs &= ~(1 << OVS_KEY_ATTR_RECIRC_ID); + } + if (*attrs & (1 << OVS_KEY_ATTR_PRIORITY)) { SW_FLOW_KEY_PUT(match, phy.priority, nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]), is_mask); @@ -836,7 +936,7 @@ int ovs_nla_get_match(struct sw_flow_match *match, /** * ovs_nla_get_flow_metadata - parses Netlink attributes into a flow key. - * @flow: Receives extracted in_port, priority, tun_key and skb_mark. + * @key: Receives extracted in_port, priority, tun_key and skb_mark. * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute * sequence. * @@ -846,32 +946,24 @@ int ovs_nla_get_match(struct sw_flow_match *match, * extracted from the packet itself. */ -int ovs_nla_get_flow_metadata(struct sw_flow *flow, - const struct nlattr *attr) +int ovs_nla_get_flow_metadata(const struct nlattr *attr, + struct sw_flow_key *key) { - struct ovs_key_ipv4_tunnel *tun_key = &flow->key.tun_key; const struct nlattr *a[OVS_KEY_ATTR_MAX + 1]; + struct sw_flow_match match; u64 attrs = 0; int err; - struct sw_flow_match match; - - flow->key.phy.in_port = DP_MAX_PORTS; - flow->key.phy.priority = 0; - flow->key.phy.skb_mark = 0; - memset(tun_key, 0, sizeof(flow->key.tun_key)); err = parse_flow_nlattrs(attr, a, &attrs); if (err) return -EINVAL; memset(&match, 0, sizeof(match)); - match.key = &flow->key; + match.key = key; - err = metadata_from_nlattrs(&match, &attrs, a, false); - if (err) - return err; + key->phy.in_port = DP_MAX_PORTS; - return 0; + return metadata_from_nlattrs(&match, &attrs, a, false); } int ovs_nla_put_flow(const struct sw_flow_key *swkey, @@ -881,13 +973,26 @@ int ovs_nla_put_flow(const struct sw_flow_key *swkey, struct nlattr *nla, *encap; bool is_mask = (swkey != output); - if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority)) + if (nla_put_u32(skb, OVS_KEY_ATTR_RECIRC_ID, output->recirc_id)) + goto nla_put_failure; + + if (nla_put_u32(skb, OVS_KEY_ATTR_DP_HASH, output->ovs_flow_hash)) goto nla_put_failure; - if ((swkey->tun_key.ipv4_dst || is_mask) && - ipv4_tun_to_nlattr(skb, &swkey->tun_key, &output->tun_key)) + if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority)) goto nla_put_failure; + if ((swkey->tun_key.ipv4_dst || is_mask)) { + const struct geneve_opt *opts = NULL; + + if (output->tun_key.tun_flags & TUNNEL_OPTIONS_PRESENT) + opts = GENEVE_OPTS(output, swkey->tun_opts_len); + + if (ipv4_tun_to_nlattr(skb, &output->tun_key, opts, + swkey->tun_opts_len)) + goto nla_put_failure; + } + if (swkey->phy.in_port == DP_MAX_PORTS) { if (is_mask && (output->phy.in_port == 0xffff)) if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, 0xffffffff)) @@ -1127,13 +1232,14 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa, return (struct nlattr *) ((unsigned char *)(*sfa) + next_offset); } -static int add_action(struct sw_flow_actions **sfa, int attrtype, void *data, int len) +static struct nlattr *__add_action(struct sw_flow_actions **sfa, + int attrtype, void *data, int len) { struct nlattr *a; a = reserve_sfa_size(sfa, nla_attr_size(len)); if (IS_ERR(a)) - return PTR_ERR(a); + return a; a->nla_type = attrtype; a->nla_len = nla_attr_size(len); @@ -1142,6 +1248,18 @@ static int add_action(struct sw_flow_actions **sfa, int attrtype, void *data, in memcpy(nla_data(a), data, len); memset((unsigned char *) a + a->nla_len, 0, nla_padlen(len)); + return a; +} + +static int add_action(struct sw_flow_actions **sfa, int attrtype, + void *data, int len) +{ + struct nlattr *a; + + a = __add_action(sfa, attrtype, data, len); + if (IS_ERR(a)) + return PTR_ERR(a); + return 0; } @@ -1247,6 +1365,8 @@ static int validate_and_copy_set_tun(const struct nlattr *attr, { struct sw_flow_match match; struct sw_flow_key key; + struct ovs_tunnel_info *tun_info; + struct nlattr *a; int err, start; ovs_match_init(&match, &key, NULL); @@ -1254,12 +1374,56 @@ static int validate_and_copy_set_tun(const struct nlattr *attr, if (err) return err; + if (key.tun_opts_len) { + struct geneve_opt *option = GENEVE_OPTS(&key, + key.tun_opts_len); + int opts_len = key.tun_opts_len; + bool crit_opt = false; + + while (opts_len > 0) { + int len; + + if (opts_len < sizeof(*option)) + return -EINVAL; + + len = sizeof(*option) + option->length * 4; + if (len > opts_len) + return -EINVAL; + + crit_opt |= !!(option->type & GENEVE_CRIT_OPT_TYPE); + + option = (struct geneve_opt *)((u8 *)option + len); + opts_len -= len; + }; + + key.tun_key.tun_flags |= crit_opt ? TUNNEL_CRIT_OPT : 0; + }; + start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SET); if (start < 0) return start; - err = add_action(sfa, OVS_KEY_ATTR_IPV4_TUNNEL, &match.key->tun_key, - sizeof(match.key->tun_key)); + a = __add_action(sfa, OVS_KEY_ATTR_TUNNEL_INFO, NULL, + sizeof(*tun_info) + key.tun_opts_len); + if (IS_ERR(a)) + return PTR_ERR(a); + + tun_info = nla_data(a); + tun_info->tunnel = key.tun_key; + tun_info->options_len = key.tun_opts_len; + + if (tun_info->options_len) { + /* We need to store the options in the action itself since + * everything else will go away after flow setup. We can append + * it to tun_info and then point there. + */ + memcpy((tun_info + 1), GENEVE_OPTS(&key, key.tun_opts_len), + key.tun_opts_len); + tun_info->options = (struct geneve_opt *)(tun_info + 1); + } else { + tun_info->options = NULL; + } + add_nested_action_end(*sfa, start); return err; @@ -1409,11 +1573,13 @@ int ovs_nla_copy_actions(const struct nlattr *attr, /* Expected argument lengths, (u32)-1 for variable length. */ static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = { [OVS_ACTION_ATTR_OUTPUT] = sizeof(u32), + [OVS_ACTION_ATTR_RECIRC] = sizeof(u32), [OVS_ACTION_ATTR_USERSPACE] = (u32)-1, [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan), [OVS_ACTION_ATTR_POP_VLAN] = 0, [OVS_ACTION_ATTR_SET] = (u32)-1, - [OVS_ACTION_ATTR_SAMPLE] = (u32)-1 + [OVS_ACTION_ATTR_SAMPLE] = (u32)-1, + [OVS_ACTION_ATTR_HASH] = sizeof(struct ovs_action_hash) }; const struct ovs_action_push_vlan *vlan; int type = nla_type(a); @@ -1440,6 +1606,18 @@ int ovs_nla_copy_actions(const struct nlattr *attr, return -EINVAL; break; + case OVS_ACTION_ATTR_HASH: { + const struct ovs_action_hash *act_hash = nla_data(a); + + switch (act_hash->hash_alg) { + case OVS_HASH_ALG_L4: + break; + default: + return -EINVAL; + } + + break; + } case OVS_ACTION_ATTR_POP_VLAN: break; @@ -1452,6 +1630,9 @@ int ovs_nla_copy_actions(const struct nlattr *attr, return -EINVAL; break; + case OVS_ACTION_ATTR_RECIRC: + break; + case OVS_ACTION_ATTR_SET: err = validate_set(a, key, sfa, &skip_copy); if (err) @@ -1525,17 +1706,22 @@ static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb) int err; switch (key_type) { - case OVS_KEY_ATTR_IPV4_TUNNEL: + case OVS_KEY_ATTR_TUNNEL_INFO: { + struct ovs_tunnel_info *tun_info = nla_data(ovs_key); + start = nla_nest_start(skb, OVS_ACTION_ATTR_SET); if (!start) return -EMSGSIZE; - err = ipv4_tun_to_nlattr(skb, nla_data(ovs_key), - nla_data(ovs_key)); + err = ipv4_tun_to_nlattr(skb, &tun_info->tunnel, + tun_info->options_len ? + tun_info->options : NULL, + tun_info->options_len); if (err) return err; nla_nest_end(skb, start); break; + } default: if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a), ovs_key)) return -EMSGSIZE; diff --git a/net/openvswitch/flow_netlink.h b/net/openvswitch/flow_netlink.h index 440151045d3946329bf01e4fd5a1c81f0fd4e906..206e45add8882540b56426c9e57fdca86a0e20a5 100644 --- a/net/openvswitch/flow_netlink.h +++ b/net/openvswitch/flow_netlink.h @@ -42,8 +42,8 @@ void ovs_match_init(struct sw_flow_match *match, int ovs_nla_put_flow(const struct sw_flow_key *, const struct sw_flow_key *, struct sk_buff *); -int ovs_nla_get_flow_metadata(struct sw_flow *flow, - const struct nlattr *attr); +int ovs_nla_get_flow_metadata(const struct nlattr *, struct sw_flow_key *); + int ovs_nla_get_match(struct sw_flow_match *match, const struct nlattr *, const struct nlattr *); diff --git a/net/openvswitch/vport-geneve.c b/net/openvswitch/vport-geneve.c new file mode 100644 index 0000000000000000000000000000000000000000..910b3ef2c0d5968f0756a0fbc904d349853b0ed1 --- /dev/null +++ b/net/openvswitch/vport-geneve.c @@ -0,0 +1,235 @@ +/* + * Copyright (c) 2014 Nicira, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "datapath.h" +#include "vport.h" + +/** + * struct geneve_port - Keeps track of open UDP ports + * @sock: The socket created for this port number. + * @name: vport name. + */ +struct geneve_port { + struct geneve_sock *gs; + char name[IFNAMSIZ]; +}; + +static LIST_HEAD(geneve_ports); + +static inline struct geneve_port *geneve_vport(const struct vport *vport) +{ + return vport_priv(vport); +} + +static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb) +{ + return (struct genevehdr *)(udp_hdr(skb) + 1); +} + +/* Convert 64 bit tunnel ID to 24 bit VNI. */ +static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni) +{ +#ifdef __BIG_ENDIAN + vni[0] = (__force __u8)(tun_id >> 16); + vni[1] = (__force __u8)(tun_id >> 8); + vni[2] = (__force __u8)tun_id; +#else + vni[0] = (__force __u8)((__force u64)tun_id >> 40); + vni[1] = (__force __u8)((__force u64)tun_id >> 48); + vni[2] = (__force __u8)((__force u64)tun_id >> 56); +#endif +} + +/* Convert 24 bit VNI to 64 bit tunnel ID. */ +static __be64 vni_to_tunnel_id(__u8 *vni) +{ +#ifdef __BIG_ENDIAN + return (vni[0] << 16) | (vni[1] << 8) | vni[2]; +#else + return (__force __be64)(((__force u64)vni[0] << 40) | + ((__force u64)vni[1] << 48) | + ((__force u64)vni[2] << 56)); +#endif +} + +static void geneve_rcv(struct geneve_sock *gs, struct sk_buff *skb) +{ + struct vport *vport = gs->rcv_data; + struct genevehdr *geneveh = geneve_hdr(skb); + int opts_len; + struct ovs_tunnel_info tun_info; + __be64 key; + __be16 flags; + + opts_len = geneveh->opt_len * 4; + + flags = TUNNEL_KEY | TUNNEL_OPTIONS_PRESENT | + (udp_hdr(skb)->check != 0 ? TUNNEL_CSUM : 0) | + (geneveh->oam ? TUNNEL_OAM : 0) | + (geneveh->critical ? TUNNEL_CRIT_OPT : 0); + + key = vni_to_tunnel_id(geneveh->vni); + + ovs_flow_tun_info_init(&tun_info, ip_hdr(skb), key, flags, + geneveh->options, opts_len); + + ovs_vport_receive(vport, skb, &tun_info); +} + +static int geneve_get_options(const struct vport *vport, + struct sk_buff *skb) +{ + struct geneve_port *geneve_port = geneve_vport(vport); + struct inet_sock *sk = inet_sk(geneve_port->gs->sock->sk); + + if (nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT, ntohs(sk->inet_sport))) + return -EMSGSIZE; + return 0; +} + +static void geneve_tnl_destroy(struct vport *vport) +{ + struct geneve_port *geneve_port = geneve_vport(vport); + + geneve_sock_release(geneve_port->gs); + + ovs_vport_deferred_free(vport); +} + +static struct vport *geneve_tnl_create(const struct vport_parms *parms) +{ + struct net *net = ovs_dp_get_net(parms->dp); + struct nlattr *options = parms->options; + struct geneve_port *geneve_port; + struct geneve_sock *gs; + struct vport *vport; + struct nlattr *a; + int err; + u16 dst_port; + + if (!options) { + err = -EINVAL; + goto error; + } + + a = nla_find_nested(options, OVS_TUNNEL_ATTR_DST_PORT); + if (a && nla_len(a) == sizeof(u16)) { + dst_port = nla_get_u16(a); + } else { + /* Require destination port from userspace. */ + err = -EINVAL; + goto error; + } + + vport = ovs_vport_alloc(sizeof(struct geneve_port), + &ovs_geneve_vport_ops, parms); + if (IS_ERR(vport)) + return vport; + + geneve_port = geneve_vport(vport); + strncpy(geneve_port->name, parms->name, IFNAMSIZ); + + gs = geneve_sock_add(net, htons(dst_port), geneve_rcv, vport, true, 0); + if (IS_ERR(gs)) { + ovs_vport_free(vport); + return (void *)gs; + } + geneve_port->gs = gs; + + return vport; +error: + return ERR_PTR(err); +} + +static int geneve_tnl_send(struct vport *vport, struct sk_buff *skb) +{ + struct ovs_key_ipv4_tunnel *tun_key; + struct ovs_tunnel_info *tun_info; + struct net *net = ovs_dp_get_net(vport->dp); + struct geneve_port *geneve_port = geneve_vport(vport); + __be16 dport = inet_sk(geneve_port->gs->sock->sk)->inet_sport; + __be16 sport; + struct rtable *rt; + struct flowi4 fl; + u8 vni[3]; + __be16 df; + int err; + + tun_info = OVS_CB(skb)->egress_tun_info; + if (unlikely(!tun_info)) { + err = -EINVAL; + goto error; + } + + tun_key = &tun_info->tunnel; + + /* Route lookup */ + memset(&fl, 0, sizeof(fl)); + fl.daddr = tun_key->ipv4_dst; + fl.saddr = tun_key->ipv4_src; + fl.flowi4_tos = RT_TOS(tun_key->ipv4_tos); + fl.flowi4_mark = skb->mark; + fl.flowi4_proto = IPPROTO_UDP; + + rt = ip_route_output_key(net, &fl); + if (IS_ERR(rt)) { + err = PTR_ERR(rt); + goto error; + } + + df = tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; + sport = udp_flow_src_port(net, skb, 1, USHRT_MAX, true); + tunnel_id_to_vni(tun_key->tun_id, vni); + skb->ignore_df = 1; + + err = geneve_xmit_skb(geneve_port->gs, rt, skb, fl.saddr, + tun_key->ipv4_dst, tun_key->ipv4_tos, + tun_key->ipv4_ttl, df, sport, dport, + tun_key->tun_flags, vni, + tun_info->options_len, (u8 *)tun_info->options, + false); + if (err < 0) + ip_rt_put(rt); +error: + return err; +} + +static const char *geneve_get_name(const struct vport *vport) +{ + struct geneve_port *geneve_port = geneve_vport(vport); + + return geneve_port->name; +} + +const struct vport_ops ovs_geneve_vport_ops = { + .type = OVS_VPORT_TYPE_GENEVE, + .create = geneve_tnl_create, + .destroy = geneve_tnl_destroy, + .get_name = geneve_get_name, + .get_options = geneve_get_options, + .send = geneve_tnl_send, +}; diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c index f49148a07da29037c0799ab122b18b1d4d0599cd..108b82da2fd94169bab809df9d771232fa0c53b5 100644 --- a/net/openvswitch/vport-gre.c +++ b/net/openvswitch/vport-gre.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007-2013 Nicira, Inc. + * Copyright (c) 2007-2014 Nicira, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public @@ -63,8 +63,10 @@ static __be16 filter_tnl_flags(__be16 flags) static struct sk_buff *__build_header(struct sk_buff *skb, int tunnel_hlen) { - const struct ovs_key_ipv4_tunnel *tun_key = OVS_CB(skb)->tun_key; struct tnl_ptk_info tpi; + const struct ovs_key_ipv4_tunnel *tun_key; + + tun_key = &OVS_CB(skb)->egress_tun_info->tunnel; skb = gre_handle_offloads(skb, !!(tun_key->tun_flags & TUNNEL_CSUM)); if (IS_ERR(skb)) @@ -92,7 +94,7 @@ static __be64 key_to_tunnel_id(__be32 key, __be32 seq) static int gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi) { - struct ovs_key_ipv4_tunnel tun_key; + struct ovs_tunnel_info tun_info; struct ovs_net *ovs_net; struct vport *vport; __be64 key; @@ -103,10 +105,10 @@ static int gre_rcv(struct sk_buff *skb, return PACKET_REJECT; key = key_to_tunnel_id(tpi->key, tpi->seq); - ovs_flow_tun_key_init(&tun_key, ip_hdr(skb), key, - filter_tnl_flags(tpi->flags)); + ovs_flow_tun_info_init(&tun_info, ip_hdr(skb), key, + filter_tnl_flags(tpi->flags), NULL, 0); - ovs_vport_receive(vport, skb, &tun_key); + ovs_vport_receive(vport, skb, &tun_info); return PACKET_RCVD; } @@ -129,6 +131,7 @@ static int gre_err(struct sk_buff *skb, u32 info, static int gre_tnl_send(struct vport *vport, struct sk_buff *skb) { struct net *net = ovs_dp_get_net(vport->dp); + struct ovs_key_ipv4_tunnel *tun_key; struct flowi4 fl; struct rtable *rt; int min_headroom; @@ -136,16 +139,17 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb) __be16 df; int err; - if (unlikely(!OVS_CB(skb)->tun_key)) { + if (unlikely(!OVS_CB(skb)->egress_tun_info)) { err = -EINVAL; goto error; } + tun_key = &OVS_CB(skb)->egress_tun_info->tunnel; /* Route lookup */ memset(&fl, 0, sizeof(fl)); - fl.daddr = OVS_CB(skb)->tun_key->ipv4_dst; - fl.saddr = OVS_CB(skb)->tun_key->ipv4_src; - fl.flowi4_tos = RT_TOS(OVS_CB(skb)->tun_key->ipv4_tos); + fl.daddr = tun_key->ipv4_dst; + fl.saddr = tun_key->ipv4_src; + fl.flowi4_tos = RT_TOS(tun_key->ipv4_tos); fl.flowi4_mark = skb->mark; fl.flowi4_proto = IPPROTO_GRE; @@ -153,7 +157,7 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb) if (IS_ERR(rt)) return PTR_ERR(rt); - tunnel_hlen = ip_gre_calc_hlen(OVS_CB(skb)->tun_key->tun_flags); + tunnel_hlen = ip_gre_calc_hlen(tun_key->tun_flags); min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len + tunnel_hlen + sizeof(struct iphdr) @@ -185,15 +189,14 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb) goto err_free_rt; } - df = OVS_CB(skb)->tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ? + df = tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; skb->ignore_df = 1; return iptunnel_xmit(skb->sk, rt, skb, fl.saddr, - OVS_CB(skb)->tun_key->ipv4_dst, IPPROTO_GRE, - OVS_CB(skb)->tun_key->ipv4_tos, - OVS_CB(skb)->tun_key->ipv4_ttl, df, false); + tun_key->ipv4_dst, IPPROTO_GRE, + tun_key->ipv4_tos, tun_key->ipv4_ttl, df, false); err_free_rt: ip_rt_put(rt); error: diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c index d8b7e247bebff5c5f4c3b83345ca5e4f492be60b..2735e01dca73a40e4f60027152a79efa435724a7 100644 --- a/net/openvswitch/vport-vxlan.c +++ b/net/openvswitch/vport-vxlan.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Nicira, Inc. + * Copyright (c) 2014 Nicira, Inc. * Copyright (c) 2013 Cisco Systems, Inc. * * This program is free software; you can redistribute it and/or @@ -58,7 +58,7 @@ static inline struct vxlan_port *vxlan_vport(const struct vport *vport) /* Called with rcu_read_lock and BH disabled. */ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, __be32 vx_vni) { - struct ovs_key_ipv4_tunnel tun_key; + struct ovs_tunnel_info tun_info; struct vport *vport = vs->data; struct iphdr *iph; __be64 key; @@ -66,9 +66,9 @@ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, __be32 vx_vni) /* Save outer tunnel values */ iph = ip_hdr(skb); key = cpu_to_be64(ntohl(vx_vni) >> 8); - ovs_flow_tun_key_init(&tun_key, iph, key, TUNNEL_KEY); + ovs_flow_tun_info_init(&tun_info, iph, key, TUNNEL_KEY, NULL, 0); - ovs_vport_receive(vport, skb, &tun_key); + ovs_vport_receive(vport, skb, &tun_info); } static int vxlan_get_options(const struct vport *vport, struct sk_buff *skb) @@ -140,22 +140,24 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb) struct net *net = ovs_dp_get_net(vport->dp); struct vxlan_port *vxlan_port = vxlan_vport(vport); __be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport; + struct ovs_key_ipv4_tunnel *tun_key; struct rtable *rt; struct flowi4 fl; __be16 src_port; __be16 df; int err; - if (unlikely(!OVS_CB(skb)->tun_key)) { + if (unlikely(!OVS_CB(skb)->egress_tun_info)) { err = -EINVAL; goto error; } + tun_key = &OVS_CB(skb)->egress_tun_info->tunnel; /* Route lookup */ memset(&fl, 0, sizeof(fl)); - fl.daddr = OVS_CB(skb)->tun_key->ipv4_dst; - fl.saddr = OVS_CB(skb)->tun_key->ipv4_src; - fl.flowi4_tos = RT_TOS(OVS_CB(skb)->tun_key->ipv4_tos); + fl.daddr = tun_key->ipv4_dst; + fl.saddr = tun_key->ipv4_src; + fl.flowi4_tos = RT_TOS(tun_key->ipv4_tos); fl.flowi4_mark = skb->mark; fl.flowi4_proto = IPPROTO_UDP; @@ -165,7 +167,7 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb) goto error; } - df = OVS_CB(skb)->tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ? + df = tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; skb->ignore_df = 1; @@ -173,11 +175,10 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb) src_port = udp_flow_src_port(net, skb, 0, 0, true); err = vxlan_xmit_skb(vxlan_port->vs, rt, skb, - fl.saddr, OVS_CB(skb)->tun_key->ipv4_dst, - OVS_CB(skb)->tun_key->ipv4_tos, - OVS_CB(skb)->tun_key->ipv4_ttl, df, + fl.saddr, tun_key->ipv4_dst, + tun_key->ipv4_tos, tun_key->ipv4_ttl, df, src_port, dst_port, - htonl(be64_to_cpu(OVS_CB(skb)->tun_key->tun_id) << 8), + htonl(be64_to_cpu(tun_key->tun_id) << 8), false); if (err < 0) ip_rt_put(rt); diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index 6d8f2ec481d9d33927795cfd3f4a59bb7258f128..53001b020ca7699fbf4a4c21199ac99825b45c51 100644 --- a/net/openvswitch/vport.c +++ b/net/openvswitch/vport.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007-2012 Nicira, Inc. + * Copyright (c) 2007-2014 Nicira, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public @@ -48,6 +48,9 @@ static const struct vport_ops *vport_ops_list[] = { #ifdef CONFIG_OPENVSWITCH_VXLAN &ovs_vxlan_vport_ops, #endif +#ifdef CONFIG_OPENVSWITCH_GENEVE + &ovs_geneve_vport_ops, +#endif }; /* Protected by RCU read lock for reading, ovs_mutex for writing. */ @@ -148,8 +151,6 @@ struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops, return ERR_PTR(-ENOMEM); } - spin_lock_init(&vport->stats_lock); - return vport; } @@ -268,14 +269,10 @@ void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats) * netdev-stats can be directly read over netlink-ioctl. */ - spin_lock_bh(&vport->stats_lock); - - stats->rx_errors = vport->err_stats.rx_errors; - stats->tx_errors = vport->err_stats.tx_errors; - stats->tx_dropped = vport->err_stats.tx_dropped; - stats->rx_dropped = vport->err_stats.rx_dropped; - - spin_unlock_bh(&vport->stats_lock); + stats->rx_errors = atomic_long_read(&vport->err_stats.rx_errors); + stats->tx_errors = atomic_long_read(&vport->err_stats.tx_errors); + stats->tx_dropped = atomic_long_read(&vport->err_stats.tx_dropped); + stats->rx_dropped = atomic_long_read(&vport->err_stats.rx_dropped); for_each_possible_cpu(i) { const struct pcpu_sw_netstats *percpu_stats; @@ -438,9 +435,11 @@ u32 ovs_vport_find_upcall_portid(const struct vport *p, struct sk_buff *skb) * skb->data should point to the Ethernet header. */ void ovs_vport_receive(struct vport *vport, struct sk_buff *skb, - struct ovs_key_ipv4_tunnel *tun_key) + struct ovs_tunnel_info *tun_info) { struct pcpu_sw_netstats *stats; + struct sw_flow_key key; + int error; stats = this_cpu_ptr(vport->percpu_stats); u64_stats_update_begin(&stats->syncp); @@ -448,8 +447,15 @@ void ovs_vport_receive(struct vport *vport, struct sk_buff *skb, stats->rx_bytes += skb->len; u64_stats_update_end(&stats->syncp); - OVS_CB(skb)->tun_key = tun_key; - ovs_dp_process_received_packet(vport, skb); + OVS_CB(skb)->input_vport = vport; + OVS_CB(skb)->egress_tun_info = NULL; + /* Extract flow from 'skb' into 'key'. */ + error = ovs_flow_key_extract(tun_info, skb, &key); + if (unlikely(error)) { + kfree_skb(skb); + return; + } + ovs_dp_process_packet(skb, &key); } /** @@ -495,27 +501,24 @@ int ovs_vport_send(struct vport *vport, struct sk_buff *skb) static void ovs_vport_record_error(struct vport *vport, enum vport_err_type err_type) { - spin_lock(&vport->stats_lock); - switch (err_type) { case VPORT_E_RX_DROPPED: - vport->err_stats.rx_dropped++; + atomic_long_inc(&vport->err_stats.rx_dropped); break; case VPORT_E_RX_ERROR: - vport->err_stats.rx_errors++; + atomic_long_inc(&vport->err_stats.rx_errors); break; case VPORT_E_TX_DROPPED: - vport->err_stats.tx_dropped++; + atomic_long_inc(&vport->err_stats.tx_dropped); break; case VPORT_E_TX_ERROR: - vport->err_stats.tx_errors++; + atomic_long_inc(&vport->err_stats.tx_errors); break; } - spin_unlock(&vport->stats_lock); } static void free_vport_rcu(struct rcu_head *rcu) diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h index 35f89d84b45e2cc828b4e0a2f77e683da6e067c4..8942125de3a6c051e9d8d818f747bd3cce023be4 100644 --- a/net/openvswitch/vport.h +++ b/net/openvswitch/vport.h @@ -35,7 +35,6 @@ struct vport_parms; /* The following definitions are for users of the vport subsytem: */ -/* The following definitions are for users of the vport subsytem: */ struct vport_net { struct vport __rcu *gre_vport; }; @@ -62,10 +61,10 @@ int ovs_vport_send(struct vport *, struct sk_buff *); /* The following definitions are for implementers of vport devices: */ struct vport_err_stats { - u64 rx_dropped; - u64 rx_errors; - u64 tx_dropped; - u64 tx_errors; + atomic_long_t rx_dropped; + atomic_long_t rx_errors; + atomic_long_t tx_dropped; + atomic_long_t tx_errors; }; /** * struct vport_portids - array of netlink portids of a vport. @@ -93,7 +92,6 @@ struct vport_portids { * @dp_hash_node: Element in @datapath->ports hash table in datapath.c. * @ops: Class structure. * @percpu_stats: Points to per-CPU statistics used and maintained by vport - * @stats_lock: Protects @err_stats; * @err_stats: Points to error statistics used and maintained by vport */ struct vport { @@ -108,7 +106,6 @@ struct vport { struct pcpu_sw_netstats __percpu *percpu_stats; - spinlock_t stats_lock; struct vport_err_stats err_stats; }; @@ -210,7 +207,7 @@ static inline struct vport *vport_from_priv(void *priv) } void ovs_vport_receive(struct vport *, struct sk_buff *, - struct ovs_key_ipv4_tunnel *); + struct ovs_tunnel_info *); /* List of statically compiled vport implementations. Don't forget to also * add yours to the list at the top of vport.c. */ @@ -218,6 +215,7 @@ extern const struct vport_ops ovs_netdev_vport_ops; extern const struct vport_ops ovs_internal_vport_ops; extern const struct vport_ops ovs_gre_vport_ops; extern const struct vport_ops ovs_vxlan_vport_ops; +extern const struct vport_ops ovs_geneve_vport_ops; static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len) diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 93896d2092f67dc60a17c6a7c6b4abbb689b6cd9..87d20f48ff06195766e8ecd20a3fcfae5ccae690 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -240,11 +240,9 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po); static int packet_direct_xmit(struct sk_buff *skb) { struct net_device *dev = skb->dev; - const struct net_device_ops *ops = dev->netdev_ops; netdev_features_t features; struct netdev_queue *txq; int ret = NETDEV_TX_BUSY; - u16 queue_map; if (unlikely(!netif_running(dev) || !netif_carrier_ok(dev))) @@ -255,17 +253,13 @@ static int packet_direct_xmit(struct sk_buff *skb) __skb_linearize(skb)) goto drop; - queue_map = skb_get_queue_mapping(skb); - txq = netdev_get_tx_queue(dev, queue_map); + txq = skb_get_tx_queue(dev, skb); local_bh_disable(); HARD_TX_LOCK(dev, txq, smp_processor_id()); - if (!netif_xmit_frozen_or_drv_stopped(txq)) { - ret = ops->ndo_start_xmit(skb, dev); - if (ret == NETDEV_TX_OK) - txq_trans_update(txq); - } + if (!netif_xmit_frozen_or_drv_stopped(txq)) + ret = netdev_start_xmit(skb, dev, txq, false); HARD_TX_UNLOCK(dev, txq); local_bh_enable(); diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c index 56a6146ac94bd331502294244ee2daa637f2054b..a58680016472b88010c30a5f1708c1f8a046280f 100644 --- a/net/phonet/pn_dev.c +++ b/net/phonet/pn_dev.c @@ -36,7 +36,7 @@ struct phonet_routes { struct mutex lock; - struct net_device *table[64]; + struct net_device __rcu *table[64]; }; struct phonet_net { @@ -275,7 +275,7 @@ static void phonet_route_autodel(struct net_device *dev) bitmap_zero(deleted, 64); mutex_lock(&pnn->routes.lock); for (i = 0; i < 64; i++) - if (dev == pnn->routes.table[i]) { + if (rcu_access_pointer(pnn->routes.table[i]) == dev) { RCU_INIT_POINTER(pnn->routes.table[i], NULL); set_bit(i, deleted); } @@ -388,7 +388,7 @@ int phonet_route_del(struct net_device *dev, u8 daddr) daddr = daddr >> 2; mutex_lock(&routes->lock); - if (dev == routes->table[daddr]) + if (rcu_access_pointer(routes->table[daddr]) == dev) RCU_INIT_POINTER(routes->table[daddr], NULL); else dev = NULL; diff --git a/net/rds/send.c b/net/rds/send.c index 23718160d71ec9fdb919f95f6c7d73fd61a2ffad..0a64541020b0b52edb0cf197a55ff12cc36ccb8e 100644 --- a/net/rds/send.c +++ b/net/rds/send.c @@ -593,8 +593,11 @@ static void rds_send_remove_from_sock(struct list_head *messages, int status) sock_put(rds_rs_to_sk(rs)); } rs = rm->m_rs; - sock_hold(rds_rs_to_sk(rs)); + if (rs) + sock_hold(rds_rs_to_sk(rs)); } + if (!rs) + goto unlock_and_drop; spin_lock(&rs->rs_lock); if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) { @@ -638,9 +641,6 @@ static void rds_send_remove_from_sock(struct list_head *messages, int status) * queue. This means that in the TCP case, the message may not have been * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked * checks the RDS_MSG_HAS_ACK_SEQ bit. - * - * XXX It's not clear to me how this is safely serialized with socket - * destruction. Maybe it should bail if it sees SOCK_DEAD. */ void rds_send_drop_acked(struct rds_connection *conn, u64 ack, is_acked_func is_acked) @@ -711,6 +711,9 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest) */ if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) { spin_unlock_irqrestore(&conn->c_lock, flags); + spin_lock_irqsave(&rm->m_rs_lock, flags); + rm->m_rs = NULL; + spin_unlock_irqrestore(&rm->m_rs_lock, flags); continue; } list_del_init(&rm->m_conn_item); diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c index a65ee78db0c54e1062186b9b132fe46fac380c91..f9f564a6c960e47b3c243d11a3e11317c5a56353 100644 --- a/net/rds/tcp_connect.c +++ b/net/rds/tcp_connect.c @@ -106,11 +106,14 @@ int rds_tcp_conn_connect(struct rds_connection *conn) rds_tcp_set_callbacks(sock, conn); ret = sock->ops->connect(sock, (struct sockaddr *)&dest, sizeof(dest), O_NONBLOCK); - sock = NULL; rdsdebug("connect to address %pI4 returned %d\n", &conn->c_faddr, ret); if (ret == -EINPROGRESS) ret = 0; + if (ret == 0) + sock = NULL; + else + rds_tcp_restore_callbacks(sock, conn->c_transport_data); out: if (sock) diff --git a/net/rds/threads.c b/net/rds/threads.c index 65eaefcab241f105fdcaeac11be3d81328f8b0cc..dc2402e871fda52d10b0a67089c7c6e13832f72b 100644 --- a/net/rds/threads.c +++ b/net/rds/threads.c @@ -78,8 +78,7 @@ void rds_connect_complete(struct rds_connection *conn) "current state is %d\n", __func__, atomic_read(&conn->c_state)); - atomic_set(&conn->c_state, RDS_CONN_ERROR); - queue_work(rds_wq, &conn->c_down_w); + rds_conn_drop(conn); return; } diff --git a/net/rose/rose_link.c b/net/rose/rose_link.c index bc5514211b0cfb4b3d4d42a12eeb03de8e90a2b4..e873d7d9f8571c4d5db2efd70dcdd0c5da792e1c 100644 --- a/net/rose/rose_link.c +++ b/net/rose/rose_link.c @@ -160,7 +160,8 @@ void rose_link_rx_restart(struct sk_buff *skb, struct rose_neigh *neigh, unsigne break; case ROSE_DIAGNOSTIC: - printk(KERN_WARNING "ROSE: received diagnostic #%d - %02X %02X %02X\n", skb->data[3], skb->data[4], skb->data[5], skb->data[6]); + pr_warn("ROSE: received diagnostic #%d - %3ph\n", skb->data[3], + skb->data + 4); break; default: diff --git a/net/rxrpc/ar-error.c b/net/rxrpc/ar-error.c index db57458c824c87b463ceff200be22c07df936b73..74c0fcd36838dfc7326eeddb7726483576da0413 100644 --- a/net/rxrpc/ar-error.c +++ b/net/rxrpc/ar-error.c @@ -37,7 +37,7 @@ void rxrpc_UDP_error_report(struct sock *sk) _enter("%p{%d}", sk, local->debug_id); - skb = skb_dequeue(&sk->sk_error_queue); + skb = sock_dequeue_err_skb(sk); if (!skb) { _leave("UDP socket errqueue empty"); return; @@ -111,18 +111,6 @@ void rxrpc_UDP_error_report(struct sock *sk) skb_queue_tail(&trans->error_queue, skb); rxrpc_queue_work(&trans->error_handler); - /* reset and regenerate socket error */ - spin_lock_bh(&sk->sk_error_queue.lock); - sk->sk_err = 0; - skb = skb_peek(&sk->sk_error_queue); - if (skb) { - sk->sk_err = SKB_EXT_ERR(skb)->ee.ee_errno; - spin_unlock_bh(&sk->sk_error_queue.lock); - sk->sk_error_report(sk); - } else { - spin_unlock_bh(&sk->sk_error_queue.lock); - } - _leave(""); } diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c index 63b21e580de95d21c60bfeeffac7d2b0aaefaf42..481f89f93789a147fd5979e894e62145f4d9d767 100644 --- a/net/rxrpc/ar-input.c +++ b/net/rxrpc/ar-input.c @@ -45,7 +45,7 @@ int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb, struct rxrpc_skb_priv *sp; struct rxrpc_sock *rx = call->socket; struct sock *sk; - int skb_len, ret; + int ret; _enter(",,%d,%d", force, terminal); @@ -101,13 +101,6 @@ int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb, rx->interceptor(sk, call->user_call_ID, skb); spin_unlock_bh(&sk->sk_receive_queue.lock); } else { - - /* Cache the SKB length before we tack it onto the - * receive queue. Once it is added it no longer - * belongs to us and may be freed by other threads of - * control pulling packets from the queue */ - skb_len = skb->len; - _net("post skb %p", skb); __skb_queue_tail(&sk->sk_receive_queue, skb); spin_unlock_bh(&sk->sk_receive_queue.lock); diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 648778aef1a254b9739443e93012798a134d0d86..3d43e4979f27c8dca3083c863549592216f741fe 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -252,7 +252,8 @@ int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a, p->tcfc_tm.install = jiffies; p->tcfc_tm.lastuse = jiffies; if (est) { - int err = gen_new_estimator(&p->tcfc_bstats, &p->tcfc_rate_est, + int err = gen_new_estimator(&p->tcfc_bstats, NULL, + &p->tcfc_rate_est, &p->tcfc_lock, est); if (err) { kfree(p); @@ -619,10 +620,12 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a, if (err < 0) goto errout; - if (gnet_stats_copy_basic(&d, &p->tcfc_bstats) < 0 || + if (gnet_stats_copy_basic(&d, NULL, &p->tcfc_bstats) < 0 || gnet_stats_copy_rate_est(&d, &p->tcfc_bstats, &p->tcfc_rate_est) < 0 || - gnet_stats_copy_queue(&d, &p->tcfc_qstats) < 0) + gnet_stats_copy_queue(&d, NULL, + &p->tcfc_qstats, + p->tcfc_qstats.qlen) < 0) goto errout; if (gnet_stats_finish_copy(&d) < 0) diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 0566e4606a4ac86710ebbc04a80c9ad1729849cd..69791ca77a05f6231b1349bce5aeb38aa018bd8c 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -178,7 +178,7 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla, spin_lock_bh(&police->tcf_lock); if (est) { - err = gen_replace_estimator(&police->tcf_bstats, + err = gen_replace_estimator(&police->tcf_bstats, NULL, &police->tcf_rate_est, &police->tcf_lock, est); if (err) @@ -231,7 +231,7 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla, if (ret != ACT_P_CREATED) return ret; - police->tcfp_t_c = ktime_to_ns(ktime_get()); + police->tcfp_t_c = ktime_get_ns(); police->tcf_index = parm->index ? parm->index : tcf_hash_new_index(hinfo); h = tcf_hash(police->tcf_index, POL_TAB_MASK); @@ -279,7 +279,7 @@ static int tcf_act_police(struct sk_buff *skb, const struct tc_action *a, return police->tcfp_result; } - now = ktime_to_ns(ktime_get()); + now = ktime_get_ns(); toks = min_t(s64, now - police->tcfp_t_c, police->tcfp_burst); if (police->peak_present) { diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index c28b0d327b124b8b478680293f145214caa6f835..aad6a679fb135e9c6492a1b1cd3c1b638d823453 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -117,7 +117,6 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n) { struct net *net = sock_net(skb->sk); struct nlattr *tca[TCA_MAX + 1]; - spinlock_t *root_lock; struct tcmsg *t; u32 protocol; u32 prio; @@ -125,7 +124,8 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n) u32 parent; struct net_device *dev; struct Qdisc *q; - struct tcf_proto **back, **chain; + struct tcf_proto __rcu **back; + struct tcf_proto __rcu **chain; struct tcf_proto *tp; const struct tcf_proto_ops *tp_ops; const struct Qdisc_class_ops *cops; @@ -197,7 +197,9 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n) goto errout; /* Check the chain for existence of proto-tcf with this priority */ - for (back = chain; (tp = *back) != NULL; back = &tp->next) { + for (back = chain; + (tp = rtnl_dereference(*back)) != NULL; + back = &tp->next) { if (tp->prio >= prio) { if (tp->prio == prio) { if (!nprio || @@ -209,8 +211,6 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n) } } - root_lock = qdisc_root_sleeping_lock(q); - if (tp == NULL) { /* Proto-tcf does not exist, create new one */ @@ -259,7 +259,8 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n) } tp->ops = tp_ops; tp->protocol = protocol; - tp->prio = nprio ? : TC_H_MAJ(tcf_auto_prio(*back)); + tp->prio = nprio ? : + TC_H_MAJ(tcf_auto_prio(rtnl_dereference(*back))); tp->q = q; tp->classify = tp_ops->classify; tp->classid = parent; @@ -280,9 +281,9 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n) if (fh == 0) { if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) { - spin_lock_bh(root_lock); - *back = tp->next; - spin_unlock_bh(root_lock); + struct tcf_proto *next = rtnl_dereference(tp->next); + + RCU_INIT_POINTER(*back, next); tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER); tcf_destroy(tp); @@ -322,10 +323,8 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n) n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE); if (err == 0) { if (tp_created) { - spin_lock_bh(root_lock); - tp->next = *back; - *back = tp; - spin_unlock_bh(root_lock); + RCU_INIT_POINTER(tp->next, rtnl_dereference(*back)); + rcu_assign_pointer(*back, tp); } tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER); } else { @@ -420,7 +419,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) int s_t; struct net_device *dev; struct Qdisc *q; - struct tcf_proto *tp, **chain; + struct tcf_proto *tp, __rcu **chain; struct tcmsg *tcm = nlmsg_data(cb->nlh); unsigned long cl = 0; const struct Qdisc_class_ops *cops; @@ -454,7 +453,8 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) s_t = cb->args[0]; - for (tp = *chain, t = 0; tp; tp = tp->next, t++) { + for (tp = rtnl_dereference(*chain), t = 0; + tp; tp = rtnl_dereference(tp->next), t++) { if (t < s_t) continue; if (TC_H_MAJ(tcm->tcm_info) && @@ -496,7 +496,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) return skb->len; } -void tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts) +void tcf_exts_destroy(struct tcf_exts *exts) { #ifdef CONFIG_NET_CLS_ACT tcf_action_destroy(&exts->actions, TCA_ACT_UNBIND); @@ -549,6 +549,7 @@ void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst, tcf_tree_lock(tp); list_splice_init(&dst->actions, &tmp); list_splice(&src->actions, &dst->actions); + dst->type = src->type; tcf_tree_unlock(tp); tcf_action_destroy(&tmp, TCA_ACT_UNBIND); #endif diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c index 0ae1813e3e90d55a1bf5993364502cebd58c1b22..cd61280941e5c72d8c22e4dfe18ec517e90cba53 100644 --- a/net/sched/cls_basic.c +++ b/net/sched/cls_basic.c @@ -24,6 +24,7 @@ struct basic_head { u32 hgenerator; struct list_head flist; + struct rcu_head rcu; }; struct basic_filter { @@ -31,17 +32,19 @@ struct basic_filter { struct tcf_exts exts; struct tcf_ematch_tree ematches; struct tcf_result res; + struct tcf_proto *tp; struct list_head link; + struct rcu_head rcu; }; static int basic_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res) { int r; - struct basic_head *head = tp->root; + struct basic_head *head = rcu_dereference_bh(tp->root); struct basic_filter *f; - list_for_each_entry(f, &head->flist, link) { + list_for_each_entry_rcu(f, &head->flist, link) { if (!tcf_em_tree_match(skb, &f->ematches, NULL)) continue; *res = f->res; @@ -56,7 +59,7 @@ static int basic_classify(struct sk_buff *skb, const struct tcf_proto *tp, static unsigned long basic_get(struct tcf_proto *tp, u32 handle) { unsigned long l = 0UL; - struct basic_head *head = tp->root; + struct basic_head *head = rtnl_dereference(tp->root); struct basic_filter *f; if (head == NULL) @@ -81,41 +84,43 @@ static int basic_init(struct tcf_proto *tp) if (head == NULL) return -ENOBUFS; INIT_LIST_HEAD(&head->flist); - tp->root = head; + rcu_assign_pointer(tp->root, head); return 0; } -static void basic_delete_filter(struct tcf_proto *tp, struct basic_filter *f) +static void basic_delete_filter(struct rcu_head *head) { - tcf_unbind_filter(tp, &f->res); - tcf_exts_destroy(tp, &f->exts); - tcf_em_tree_destroy(tp, &f->ematches); + struct basic_filter *f = container_of(head, struct basic_filter, rcu); + + tcf_exts_destroy(&f->exts); + tcf_em_tree_destroy(&f->ematches); kfree(f); } static void basic_destroy(struct tcf_proto *tp) { - struct basic_head *head = tp->root; + struct basic_head *head = rtnl_dereference(tp->root); struct basic_filter *f, *n; list_for_each_entry_safe(f, n, &head->flist, link) { - list_del(&f->link); - basic_delete_filter(tp, f); + list_del_rcu(&f->link); + tcf_unbind_filter(tp, &f->res); + call_rcu(&f->rcu, basic_delete_filter); } - kfree(head); + RCU_INIT_POINTER(tp->root, NULL); + kfree_rcu(head, rcu); } static int basic_delete(struct tcf_proto *tp, unsigned long arg) { - struct basic_head *head = tp->root; + struct basic_head *head = rtnl_dereference(tp->root); struct basic_filter *t, *f = (struct basic_filter *) arg; list_for_each_entry(t, &head->flist, link) if (t == f) { - tcf_tree_lock(tp); - list_del(&t->link); - tcf_tree_unlock(tp); - basic_delete_filter(tp, t); + list_del_rcu(&t->link); + tcf_unbind_filter(tp, &t->res); + call_rcu(&t->rcu, basic_delete_filter); return 0; } @@ -152,10 +157,11 @@ static int basic_set_parms(struct net *net, struct tcf_proto *tp, tcf_exts_change(tp, &f->exts, &e); tcf_em_tree_change(tp, &f->ematches, &t); + f->tp = tp; return 0; errout: - tcf_exts_destroy(tp, &e); + tcf_exts_destroy(&e); return err; } @@ -164,9 +170,10 @@ static int basic_change(struct net *net, struct sk_buff *in_skb, struct nlattr **tca, unsigned long *arg, bool ovr) { int err; - struct basic_head *head = tp->root; + struct basic_head *head = rtnl_dereference(tp->root); struct nlattr *tb[TCA_BASIC_MAX + 1]; - struct basic_filter *f = (struct basic_filter *) *arg; + struct basic_filter *fold = (struct basic_filter *) *arg; + struct basic_filter *fnew; if (tca[TCA_OPTIONS] == NULL) return -EINVAL; @@ -176,22 +183,23 @@ static int basic_change(struct net *net, struct sk_buff *in_skb, if (err < 0) return err; - if (f != NULL) { - if (handle && f->handle != handle) + if (fold != NULL) { + if (handle && fold->handle != handle) return -EINVAL; - return basic_set_parms(net, tp, f, base, tb, tca[TCA_RATE], ovr); } err = -ENOBUFS; - f = kzalloc(sizeof(*f), GFP_KERNEL); - if (f == NULL) + fnew = kzalloc(sizeof(*fnew), GFP_KERNEL); + if (fnew == NULL) goto errout; - tcf_exts_init(&f->exts, TCA_BASIC_ACT, TCA_BASIC_POLICE); + tcf_exts_init(&fnew->exts, TCA_BASIC_ACT, TCA_BASIC_POLICE); err = -EINVAL; - if (handle) - f->handle = handle; - else { + if (handle) { + fnew->handle = handle; + } else if (fold) { + fnew->handle = fold->handle; + } else { unsigned int i = 0x80000000; do { if (++head->hgenerator == 0x7FFFFFFF) @@ -203,29 +211,32 @@ static int basic_change(struct net *net, struct sk_buff *in_skb, goto errout; } - f->handle = head->hgenerator; + fnew->handle = head->hgenerator; } - err = basic_set_parms(net, tp, f, base, tb, tca[TCA_RATE], ovr); + err = basic_set_parms(net, tp, fnew, base, tb, tca[TCA_RATE], ovr); if (err < 0) goto errout; - tcf_tree_lock(tp); - list_add(&f->link, &head->flist); - tcf_tree_unlock(tp); - *arg = (unsigned long) f; + *arg = (unsigned long)fnew; + + if (fold) { + list_replace_rcu(&fold->link, &fnew->link); + tcf_unbind_filter(tp, &fold->res); + call_rcu(&fold->rcu, basic_delete_filter); + } else { + list_add_rcu(&fnew->link, &head->flist); + } return 0; errout: - if (*arg == 0UL && f) - kfree(f); - + kfree(fnew); return err; } static void basic_walk(struct tcf_proto *tp, struct tcf_walker *arg) { - struct basic_head *head = tp->root; + struct basic_head *head = rtnl_dereference(tp->root); struct basic_filter *f; list_for_each_entry(f, &head->flist, link) { diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index 0e30d58149dad95874b9023abb5febf4cab97bfa..eed49d1d0878d6e55d23e622e58f3bf908c9cab9 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -27,6 +27,7 @@ MODULE_DESCRIPTION("TC BPF based classifier"); struct cls_bpf_head { struct list_head plist; u32 hgen; + struct rcu_head rcu; }; struct cls_bpf_prog { @@ -37,6 +38,8 @@ struct cls_bpf_prog { struct list_head link; u32 handle; u16 bpf_len; + struct tcf_proto *tp; + struct rcu_head rcu; }; static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = { @@ -49,11 +52,11 @@ static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = { static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res) { - struct cls_bpf_head *head = tp->root; + struct cls_bpf_head *head = rcu_dereference_bh(tp->root); struct cls_bpf_prog *prog; int ret; - list_for_each_entry(prog, &head->plist, link) { + list_for_each_entry_rcu(prog, &head->plist, link) { int filter_res = BPF_PROG_RUN(prog->filter, skb); if (filter_res == 0) @@ -81,16 +84,15 @@ static int cls_bpf_init(struct tcf_proto *tp) if (head == NULL) return -ENOBUFS; - INIT_LIST_HEAD(&head->plist); - tp->root = head; + INIT_LIST_HEAD_RCU(&head->plist); + rcu_assign_pointer(tp->root, head); return 0; } static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog) { - tcf_unbind_filter(tp, &prog->res); - tcf_exts_destroy(tp, &prog->exts); + tcf_exts_destroy(&prog->exts); bpf_prog_destroy(prog->filter); @@ -98,18 +100,23 @@ static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog) kfree(prog); } +static void __cls_bpf_delete_prog(struct rcu_head *rcu) +{ + struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu); + + cls_bpf_delete_prog(prog->tp, prog); +} + static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg) { - struct cls_bpf_head *head = tp->root; + struct cls_bpf_head *head = rtnl_dereference(tp->root); struct cls_bpf_prog *prog, *todel = (struct cls_bpf_prog *) arg; list_for_each_entry(prog, &head->plist, link) { if (prog == todel) { - tcf_tree_lock(tp); - list_del(&prog->link); - tcf_tree_unlock(tp); - - cls_bpf_delete_prog(tp, prog); + list_del_rcu(&prog->link); + tcf_unbind_filter(tp, &prog->res); + call_rcu(&prog->rcu, __cls_bpf_delete_prog); return 0; } } @@ -119,27 +126,29 @@ static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg) static void cls_bpf_destroy(struct tcf_proto *tp) { - struct cls_bpf_head *head = tp->root; + struct cls_bpf_head *head = rtnl_dereference(tp->root); struct cls_bpf_prog *prog, *tmp; list_for_each_entry_safe(prog, tmp, &head->plist, link) { - list_del(&prog->link); - cls_bpf_delete_prog(tp, prog); + list_del_rcu(&prog->link); + tcf_unbind_filter(tp, &prog->res); + call_rcu(&prog->rcu, __cls_bpf_delete_prog); } - kfree(head); + RCU_INIT_POINTER(tp->root, NULL); + kfree_rcu(head, rcu); } static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle) { - struct cls_bpf_head *head = tp->root; + struct cls_bpf_head *head = rtnl_dereference(tp->root); struct cls_bpf_prog *prog; unsigned long ret = 0UL; if (head == NULL) return 0UL; - list_for_each_entry(prog, &head->plist, link) { + list_for_each_entry_rcu(prog, &head->plist, link) { if (prog->handle == handle) { ret = (unsigned long) prog; break; @@ -158,10 +167,10 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp, unsigned long base, struct nlattr **tb, struct nlattr *est, bool ovr) { - struct sock_filter *bpf_ops, *bpf_old; + struct sock_filter *bpf_ops; struct tcf_exts exts; struct sock_fprog_kern tmp; - struct bpf_prog *fp, *fp_old; + struct bpf_prog *fp; u16 bpf_size, bpf_len; u32 classid; int ret; @@ -197,30 +206,19 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp, if (ret) goto errout_free; - tcf_tree_lock(tp); - fp_old = prog->filter; - bpf_old = prog->bpf_ops; - prog->bpf_len = bpf_len; prog->bpf_ops = bpf_ops; prog->filter = fp; prog->res.classid = classid; - tcf_tree_unlock(tp); tcf_bind_filter(tp, &prog->res, base); tcf_exts_change(tp, &prog->exts, &exts); - if (fp_old) - bpf_prog_destroy(fp_old); - if (bpf_old) - kfree(bpf_old); - return 0; - errout_free: kfree(bpf_ops); errout: - tcf_exts_destroy(tp, &exts); + tcf_exts_destroy(&exts); return ret; } @@ -244,9 +242,10 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, u32 handle, struct nlattr **tca, unsigned long *arg, bool ovr) { - struct cls_bpf_head *head = tp->root; - struct cls_bpf_prog *prog = (struct cls_bpf_prog *) *arg; + struct cls_bpf_head *head = rtnl_dereference(tp->root); + struct cls_bpf_prog *oldprog = (struct cls_bpf_prog *) *arg; struct nlattr *tb[TCA_BPF_MAX + 1]; + struct cls_bpf_prog *prog; int ret; if (tca[TCA_OPTIONS] == NULL) @@ -256,18 +255,19 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, if (ret < 0) return ret; - if (prog != NULL) { - if (handle && prog->handle != handle) - return -EINVAL; - return cls_bpf_modify_existing(net, tp, prog, base, tb, - tca[TCA_RATE], ovr); - } - prog = kzalloc(sizeof(*prog), GFP_KERNEL); - if (prog == NULL) + if (!prog) return -ENOBUFS; tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE); + + if (oldprog) { + if (handle && oldprog->handle != handle) { + ret = -EINVAL; + goto errout; + } + } + if (handle == 0) prog->handle = cls_bpf_grab_new_handle(tp, head); else @@ -281,16 +281,18 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, if (ret < 0) goto errout; - tcf_tree_lock(tp); - list_add(&prog->link, &head->plist); - tcf_tree_unlock(tp); + if (oldprog) { + list_replace_rcu(&prog->link, &oldprog->link); + tcf_unbind_filter(tp, &oldprog->res); + call_rcu(&oldprog->rcu, __cls_bpf_delete_prog); + } else { + list_add_rcu(&prog->link, &head->plist); + } *arg = (unsigned long) prog; - return 0; errout: - if (*arg == 0UL && prog) - kfree(prog); + kfree(prog); return ret; } @@ -339,10 +341,10 @@ static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg) { - struct cls_bpf_head *head = tp->root; + struct cls_bpf_head *head = rtnl_dereference(tp->root); struct cls_bpf_prog *prog; - list_for_each_entry(prog, &head->plist, link) { + list_for_each_entry_rcu(prog, &head->plist, link) { if (arg->count < arg->skip) goto skip; if (arg->fn(tp, (unsigned long) prog, arg) < 0) { diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c index cacf01bd04f0a96660050c0e71da0840c8af0f95..d61a801222c10205e339a5d9d622b4ecce804ed4 100644 --- a/net/sched/cls_cgroup.c +++ b/net/sched/cls_cgroup.c @@ -22,17 +22,17 @@ struct cls_cgroup_head { u32 handle; struct tcf_exts exts; struct tcf_ematch_tree ematches; + struct tcf_proto *tp; + struct rcu_head rcu; }; static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res) { - struct cls_cgroup_head *head = tp->root; + struct cls_cgroup_head *head = rcu_dereference_bh(tp->root); u32 classid; - rcu_read_lock(); classid = task_cls_state(current)->classid; - rcu_read_unlock(); /* * Due to the nature of the classifier it is required to ignore all @@ -80,13 +80,25 @@ static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = { [TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED }, }; +static void cls_cgroup_destroy_rcu(struct rcu_head *root) +{ + struct cls_cgroup_head *head = container_of(root, + struct cls_cgroup_head, + rcu); + + tcf_exts_destroy(&head->exts); + tcf_em_tree_destroy(&head->ematches); + kfree(head); +} + static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb, struct tcf_proto *tp, unsigned long base, u32 handle, struct nlattr **tca, unsigned long *arg, bool ovr) { struct nlattr *tb[TCA_CGROUP_MAX + 1]; - struct cls_cgroup_head *head = tp->root; + struct cls_cgroup_head *head = rtnl_dereference(tp->root); + struct cls_cgroup_head *new; struct tcf_ematch_tree t; struct tcf_exts e; int err; @@ -94,53 +106,58 @@ static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb, if (!tca[TCA_OPTIONS]) return -EINVAL; - if (head == NULL) { - if (!handle) - return -EINVAL; - - head = kzalloc(sizeof(*head), GFP_KERNEL); - if (head == NULL) - return -ENOBUFS; + if (!head && !handle) + return -EINVAL; - tcf_exts_init(&head->exts, TCA_CGROUP_ACT, TCA_CGROUP_POLICE); - head->handle = handle; + if (head && handle != head->handle) + return -ENOENT; - tcf_tree_lock(tp); - tp->root = head; - tcf_tree_unlock(tp); - } + new = kzalloc(sizeof(*head), GFP_KERNEL); + if (!new) + return -ENOBUFS; - if (handle != head->handle) - return -ENOENT; + tcf_exts_init(&new->exts, TCA_CGROUP_ACT, TCA_CGROUP_POLICE); + if (head) + new->handle = head->handle; + else + new->handle = handle; + new->tp = tp; err = nla_parse_nested(tb, TCA_CGROUP_MAX, tca[TCA_OPTIONS], cgroup_policy); if (err < 0) - return err; + goto errout; tcf_exts_init(&e, TCA_CGROUP_ACT, TCA_CGROUP_POLICE); err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr); if (err < 0) - return err; + goto errout; err = tcf_em_tree_validate(tp, tb[TCA_CGROUP_EMATCHES], &t); - if (err < 0) - return err; + if (err < 0) { + tcf_exts_destroy(&e); + goto errout; + } - tcf_exts_change(tp, &head->exts, &e); - tcf_em_tree_change(tp, &head->ematches, &t); + tcf_exts_change(tp, &new->exts, &e); + tcf_em_tree_change(tp, &new->ematches, &t); + rcu_assign_pointer(tp->root, new); + if (head) + call_rcu(&head->rcu, cls_cgroup_destroy_rcu); return 0; +errout: + kfree(new); + return err; } static void cls_cgroup_destroy(struct tcf_proto *tp) { - struct cls_cgroup_head *head = tp->root; + struct cls_cgroup_head *head = rtnl_dereference(tp->root); if (head) { - tcf_exts_destroy(tp, &head->exts); - tcf_em_tree_destroy(tp, &head->ematches); - kfree(head); + RCU_INIT_POINTER(tp->root, NULL); + call_rcu(&head->rcu, cls_cgroup_destroy_rcu); } } @@ -151,7 +168,7 @@ static int cls_cgroup_delete(struct tcf_proto *tp, unsigned long arg) static void cls_cgroup_walk(struct tcf_proto *tp, struct tcf_walker *arg) { - struct cls_cgroup_head *head = tp->root; + struct cls_cgroup_head *head = rtnl_dereference(tp->root); if (arg->count < arg->skip) goto skip; @@ -167,7 +184,7 @@ static void cls_cgroup_walk(struct tcf_proto *tp, struct tcf_walker *arg) static int cls_cgroup_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, struct sk_buff *skb, struct tcmsg *t) { - struct cls_cgroup_head *head = tp->root; + struct cls_cgroup_head *head = rtnl_dereference(tp->root); unsigned char *b = skb_tail_pointer(skb); struct nlattr *nest; diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c index 35be16f7c192dc8d5d2ebdd38c2b27a144c41076..4ac515f2a6cecbcb56159c9c11892771e3816ddd 100644 --- a/net/sched/cls_flow.c +++ b/net/sched/cls_flow.c @@ -34,12 +34,14 @@ struct flow_head { struct list_head filters; + struct rcu_head rcu; }; struct flow_filter { struct list_head list; struct tcf_exts exts; struct tcf_ematch_tree ematches; + struct tcf_proto *tp; struct timer_list perturb_timer; u32 perturb_period; u32 handle; @@ -54,6 +56,7 @@ struct flow_filter { u32 divisor; u32 baseclass; u32 hashrnd; + struct rcu_head rcu; }; static inline u32 addr_fold(void *addr) @@ -276,14 +279,14 @@ static u32 flow_key_get(struct sk_buff *skb, int key, struct flow_keys *flow) static int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res) { - struct flow_head *head = tp->root; + struct flow_head *head = rcu_dereference_bh(tp->root); struct flow_filter *f; u32 keymask; u32 classid; unsigned int n, key; int r; - list_for_each_entry(f, &head->filters, list) { + list_for_each_entry_rcu(f, &head->filters, list) { u32 keys[FLOW_KEY_MAX + 1]; struct flow_keys flow_keys; @@ -346,13 +349,23 @@ static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = { [TCA_FLOW_PERTURB] = { .type = NLA_U32 }, }; +static void flow_destroy_filter(struct rcu_head *head) +{ + struct flow_filter *f = container_of(head, struct flow_filter, rcu); + + del_timer_sync(&f->perturb_timer); + tcf_exts_destroy(&f->exts); + tcf_em_tree_destroy(&f->ematches); + kfree(f); +} + static int flow_change(struct net *net, struct sk_buff *in_skb, struct tcf_proto *tp, unsigned long base, u32 handle, struct nlattr **tca, unsigned long *arg, bool ovr) { - struct flow_head *head = tp->root; - struct flow_filter *f; + struct flow_head *head = rtnl_dereference(tp->root); + struct flow_filter *fold, *fnew; struct nlattr *opt = tca[TCA_OPTIONS]; struct nlattr *tb[TCA_FLOW_MAX + 1]; struct tcf_exts e; @@ -401,20 +414,42 @@ static int flow_change(struct net *net, struct sk_buff *in_skb, if (err < 0) goto err1; - f = (struct flow_filter *)*arg; - if (f != NULL) { + err = -ENOBUFS; + fnew = kzalloc(sizeof(*fnew), GFP_KERNEL); + if (!fnew) + goto err2; + + fold = (struct flow_filter *)*arg; + if (fold) { err = -EINVAL; - if (f->handle != handle && handle) + if (fold->handle != handle && handle) goto err2; - mode = f->mode; + /* Copy fold into fnew */ + fnew->handle = fold->handle; + fnew->keymask = fold->keymask; + fnew->tp = fold->tp; + + fnew->handle = fold->handle; + fnew->nkeys = fold->nkeys; + fnew->keymask = fold->keymask; + fnew->mode = fold->mode; + fnew->mask = fold->mask; + fnew->xor = fold->xor; + fnew->rshift = fold->rshift; + fnew->addend = fold->addend; + fnew->divisor = fold->divisor; + fnew->baseclass = fold->baseclass; + fnew->hashrnd = fold->hashrnd; + + mode = fold->mode; if (tb[TCA_FLOW_MODE]) mode = nla_get_u32(tb[TCA_FLOW_MODE]); if (mode != FLOW_MODE_HASH && nkeys > 1) goto err2; if (mode == FLOW_MODE_HASH) - perturb_period = f->perturb_period; + perturb_period = fold->perturb_period; if (tb[TCA_FLOW_PERTURB]) { if (mode != FLOW_MODE_HASH) goto err2; @@ -444,83 +479,72 @@ static int flow_change(struct net *net, struct sk_buff *in_skb, if (TC_H_MIN(baseclass) == 0) baseclass = TC_H_MAKE(baseclass, 1); - err = -ENOBUFS; - f = kzalloc(sizeof(*f), GFP_KERNEL); - if (f == NULL) - goto err2; - - f->handle = handle; - f->mask = ~0U; - tcf_exts_init(&f->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE); - - get_random_bytes(&f->hashrnd, 4); - f->perturb_timer.function = flow_perturbation; - f->perturb_timer.data = (unsigned long)f; - init_timer_deferrable(&f->perturb_timer); + fnew->handle = handle; + fnew->mask = ~0U; + fnew->tp = tp; + get_random_bytes(&fnew->hashrnd, 4); + tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE); } - tcf_exts_change(tp, &f->exts, &e); - tcf_em_tree_change(tp, &f->ematches, &t); + fnew->perturb_timer.function = flow_perturbation; + fnew->perturb_timer.data = (unsigned long)fnew; + init_timer_deferrable(&fnew->perturb_timer); - tcf_tree_lock(tp); + tcf_exts_change(tp, &fnew->exts, &e); + tcf_em_tree_change(tp, &fnew->ematches, &t); + + netif_keep_dst(qdisc_dev(tp->q)); if (tb[TCA_FLOW_KEYS]) { - f->keymask = keymask; - f->nkeys = nkeys; + fnew->keymask = keymask; + fnew->nkeys = nkeys; } - f->mode = mode; + fnew->mode = mode; if (tb[TCA_FLOW_MASK]) - f->mask = nla_get_u32(tb[TCA_FLOW_MASK]); + fnew->mask = nla_get_u32(tb[TCA_FLOW_MASK]); if (tb[TCA_FLOW_XOR]) - f->xor = nla_get_u32(tb[TCA_FLOW_XOR]); + fnew->xor = nla_get_u32(tb[TCA_FLOW_XOR]); if (tb[TCA_FLOW_RSHIFT]) - f->rshift = nla_get_u32(tb[TCA_FLOW_RSHIFT]); + fnew->rshift = nla_get_u32(tb[TCA_FLOW_RSHIFT]); if (tb[TCA_FLOW_ADDEND]) - f->addend = nla_get_u32(tb[TCA_FLOW_ADDEND]); + fnew->addend = nla_get_u32(tb[TCA_FLOW_ADDEND]); if (tb[TCA_FLOW_DIVISOR]) - f->divisor = nla_get_u32(tb[TCA_FLOW_DIVISOR]); + fnew->divisor = nla_get_u32(tb[TCA_FLOW_DIVISOR]); if (baseclass) - f->baseclass = baseclass; + fnew->baseclass = baseclass; - f->perturb_period = perturb_period; - del_timer(&f->perturb_timer); + fnew->perturb_period = perturb_period; if (perturb_period) - mod_timer(&f->perturb_timer, jiffies + perturb_period); + mod_timer(&fnew->perturb_timer, jiffies + perturb_period); if (*arg == 0) - list_add_tail(&f->list, &head->filters); + list_add_tail_rcu(&fnew->list, &head->filters); + else + list_replace_rcu(&fnew->list, &fold->list); - tcf_tree_unlock(tp); + *arg = (unsigned long)fnew; - *arg = (unsigned long)f; + if (fold) + call_rcu(&fold->rcu, flow_destroy_filter); return 0; err2: - tcf_em_tree_destroy(tp, &t); + tcf_em_tree_destroy(&t); + kfree(fnew); err1: - tcf_exts_destroy(tp, &e); + tcf_exts_destroy(&e); return err; } -static void flow_destroy_filter(struct tcf_proto *tp, struct flow_filter *f) -{ - del_timer_sync(&f->perturb_timer); - tcf_exts_destroy(tp, &f->exts); - tcf_em_tree_destroy(tp, &f->ematches); - kfree(f); -} - static int flow_delete(struct tcf_proto *tp, unsigned long arg) { struct flow_filter *f = (struct flow_filter *)arg; - tcf_tree_lock(tp); - list_del(&f->list); - tcf_tree_unlock(tp); - flow_destroy_filter(tp, f); + list_del_rcu(&f->list); + call_rcu(&f->rcu, flow_destroy_filter); return 0; } @@ -532,28 +556,29 @@ static int flow_init(struct tcf_proto *tp) if (head == NULL) return -ENOBUFS; INIT_LIST_HEAD(&head->filters); - tp->root = head; + rcu_assign_pointer(tp->root, head); return 0; } static void flow_destroy(struct tcf_proto *tp) { - struct flow_head *head = tp->root; + struct flow_head *head = rtnl_dereference(tp->root); struct flow_filter *f, *next; list_for_each_entry_safe(f, next, &head->filters, list) { - list_del(&f->list); - flow_destroy_filter(tp, f); + list_del_rcu(&f->list); + call_rcu(&f->rcu, flow_destroy_filter); } - kfree(head); + RCU_INIT_POINTER(tp->root, NULL); + kfree_rcu(head, rcu); } static unsigned long flow_get(struct tcf_proto *tp, u32 handle) { - struct flow_head *head = tp->root; + struct flow_head *head = rtnl_dereference(tp->root); struct flow_filter *f; - list_for_each_entry(f, &head->filters, list) + list_for_each_entry_rcu(f, &head->filters, list) if (f->handle == handle) return (unsigned long)f; return 0; @@ -626,10 +651,10 @@ static int flow_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, static void flow_walk(struct tcf_proto *tp, struct tcf_walker *arg) { - struct flow_head *head = tp->root; + struct flow_head *head = rtnl_dereference(tp->root); struct flow_filter *f; - list_for_each_entry(f, &head->filters, list) { + list_for_each_entry_rcu(f, &head->filters, list) { if (arg->count < arg->skip) goto skip; if (arg->fn(tp, (unsigned long)f, arg) < 0) { diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c index 861b03ccfed0a55007ceb001a297b05906b36ed3..dbfdfd1f1a9f8cc6d2364bed4c700eef4a029ed3 100644 --- a/net/sched/cls_fw.c +++ b/net/sched/cls_fw.c @@ -33,17 +33,20 @@ struct fw_head { u32 mask; - struct fw_filter *ht[HTSIZE]; + struct fw_filter __rcu *ht[HTSIZE]; + struct rcu_head rcu; }; struct fw_filter { - struct fw_filter *next; + struct fw_filter __rcu *next; u32 id; struct tcf_result res; #ifdef CONFIG_NET_CLS_IND int ifindex; #endif /* CONFIG_NET_CLS_IND */ struct tcf_exts exts; + struct tcf_proto *tp; + struct rcu_head rcu; }; static u32 fw_hash(u32 handle) @@ -56,14 +59,16 @@ static u32 fw_hash(u32 handle) static int fw_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res) { - struct fw_head *head = tp->root; + struct fw_head *head = rcu_dereference_bh(tp->root); struct fw_filter *f; int r; u32 id = skb->mark; if (head != NULL) { id &= head->mask; - for (f = head->ht[fw_hash(id)]; f; f = f->next) { + + for (f = rcu_dereference_bh(head->ht[fw_hash(id)]); f; + f = rcu_dereference_bh(f->next)) { if (f->id == id) { *res = f->res; #ifdef CONFIG_NET_CLS_IND @@ -92,13 +97,14 @@ static int fw_classify(struct sk_buff *skb, const struct tcf_proto *tp, static unsigned long fw_get(struct tcf_proto *tp, u32 handle) { - struct fw_head *head = tp->root; + struct fw_head *head = rtnl_dereference(tp->root); struct fw_filter *f; if (head == NULL) return 0; - for (f = head->ht[fw_hash(handle)]; f; f = f->next) { + f = rtnl_dereference(head->ht[fw_hash(handle)]); + for (; f; f = rtnl_dereference(f->next)) { if (f->id == handle) return (unsigned long)f; } @@ -114,16 +120,17 @@ static int fw_init(struct tcf_proto *tp) return 0; } -static void fw_delete_filter(struct tcf_proto *tp, struct fw_filter *f) +static void fw_delete_filter(struct rcu_head *head) { - tcf_unbind_filter(tp, &f->res); - tcf_exts_destroy(tp, &f->exts); + struct fw_filter *f = container_of(head, struct fw_filter, rcu); + + tcf_exts_destroy(&f->exts); kfree(f); } static void fw_destroy(struct tcf_proto *tp) { - struct fw_head *head = tp->root; + struct fw_head *head = rtnl_dereference(tp->root); struct fw_filter *f; int h; @@ -131,29 +138,35 @@ static void fw_destroy(struct tcf_proto *tp) return; for (h = 0; h < HTSIZE; h++) { - while ((f = head->ht[h]) != NULL) { - head->ht[h] = f->next; - fw_delete_filter(tp, f); + while ((f = rtnl_dereference(head->ht[h])) != NULL) { + RCU_INIT_POINTER(head->ht[h], + rtnl_dereference(f->next)); + tcf_unbind_filter(tp, &f->res); + call_rcu(&f->rcu, fw_delete_filter); } } - kfree(head); + RCU_INIT_POINTER(tp->root, NULL); + kfree_rcu(head, rcu); } static int fw_delete(struct tcf_proto *tp, unsigned long arg) { - struct fw_head *head = tp->root; + struct fw_head *head = rtnl_dereference(tp->root); struct fw_filter *f = (struct fw_filter *)arg; - struct fw_filter **fp; + struct fw_filter __rcu **fp; + struct fw_filter *pfp; if (head == NULL || f == NULL) goto out; - for (fp = &head->ht[fw_hash(f->id)]; *fp; fp = &(*fp)->next) { - if (*fp == f) { - tcf_tree_lock(tp); - *fp = f->next; - tcf_tree_unlock(tp); - fw_delete_filter(tp, f); + fp = &head->ht[fw_hash(f->id)]; + + for (pfp = rtnl_dereference(*fp); pfp; + fp = &pfp->next, pfp = rtnl_dereference(*fp)) { + if (pfp == f) { + RCU_INIT_POINTER(*fp, rtnl_dereference(f->next)); + tcf_unbind_filter(tp, &f->res); + call_rcu(&f->rcu, fw_delete_filter); return 0; } } @@ -171,7 +184,7 @@ static int fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f, struct nlattr **tb, struct nlattr **tca, unsigned long base, bool ovr) { - struct fw_head *head = tp->root; + struct fw_head *head = rtnl_dereference(tp->root); struct tcf_exts e; u32 mask; int err; @@ -210,7 +223,7 @@ fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f, return 0; errout: - tcf_exts_destroy(tp, &e); + tcf_exts_destroy(&e); return err; } @@ -220,7 +233,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb, struct nlattr **tca, unsigned long *arg, bool ovr) { - struct fw_head *head = tp->root; + struct fw_head *head = rtnl_dereference(tp->root); struct fw_filter *f = (struct fw_filter *) *arg; struct nlattr *opt = tca[TCA_OPTIONS]; struct nlattr *tb[TCA_FW_MAX + 1]; @@ -233,10 +246,45 @@ static int fw_change(struct net *net, struct sk_buff *in_skb, if (err < 0) return err; - if (f != NULL) { + if (f) { + struct fw_filter *pfp, *fnew; + struct fw_filter __rcu **fp; + if (f->id != handle && handle) return -EINVAL; - return fw_change_attrs(net, tp, f, tb, tca, base, ovr); + + fnew = kzalloc(sizeof(struct fw_filter), GFP_KERNEL); + if (!fnew) + return -ENOBUFS; + + fnew->id = f->id; + fnew->res = f->res; +#ifdef CONFIG_NET_CLS_IND + fnew->ifindex = f->ifindex; +#endif /* CONFIG_NET_CLS_IND */ + fnew->tp = f->tp; + + tcf_exts_init(&fnew->exts, TCA_FW_ACT, TCA_FW_POLICE); + + err = fw_change_attrs(net, tp, fnew, tb, tca, base, ovr); + if (err < 0) { + kfree(fnew); + return err; + } + + fp = &head->ht[fw_hash(fnew->id)]; + for (pfp = rtnl_dereference(*fp); pfp; + fp = &pfp->next, pfp = rtnl_dereference(*fp)) + if (pfp == f) + break; + + RCU_INIT_POINTER(fnew->next, rtnl_dereference(pfp->next)); + rcu_assign_pointer(*fp, fnew); + tcf_unbind_filter(tp, &f->res); + call_rcu(&f->rcu, fw_delete_filter); + + *arg = (unsigned long)fnew; + return err; } if (!handle) @@ -252,9 +300,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb, return -ENOBUFS; head->mask = mask; - tcf_tree_lock(tp); - tp->root = head; - tcf_tree_unlock(tp); + rcu_assign_pointer(tp->root, head); } f = kzalloc(sizeof(struct fw_filter), GFP_KERNEL); @@ -263,15 +309,14 @@ static int fw_change(struct net *net, struct sk_buff *in_skb, tcf_exts_init(&f->exts, TCA_FW_ACT, TCA_FW_POLICE); f->id = handle; + f->tp = tp; err = fw_change_attrs(net, tp, f, tb, tca, base, ovr); if (err < 0) goto errout; - f->next = head->ht[fw_hash(handle)]; - tcf_tree_lock(tp); - head->ht[fw_hash(handle)] = f; - tcf_tree_unlock(tp); + RCU_INIT_POINTER(f->next, head->ht[fw_hash(handle)]); + rcu_assign_pointer(head->ht[fw_hash(handle)], f); *arg = (unsigned long)f; return 0; @@ -283,7 +328,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb, static void fw_walk(struct tcf_proto *tp, struct tcf_walker *arg) { - struct fw_head *head = tp->root; + struct fw_head *head = rtnl_dereference(tp->root); int h; if (head == NULL) @@ -295,7 +340,8 @@ static void fw_walk(struct tcf_proto *tp, struct tcf_walker *arg) for (h = 0; h < HTSIZE; h++) { struct fw_filter *f; - for (f = head->ht[h]; f; f = f->next) { + for (f = rtnl_dereference(head->ht[h]); f; + f = rtnl_dereference(f->next)) { if (arg->count < arg->skip) { arg->count++; continue; @@ -312,7 +358,7 @@ static void fw_walk(struct tcf_proto *tp, struct tcf_walker *arg) static int fw_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, struct sk_buff *skb, struct tcmsg *t) { - struct fw_head *head = tp->root; + struct fw_head *head = rtnl_dereference(tp->root); struct fw_filter *f = (struct fw_filter *)fh; unsigned char *b = skb_tail_pointer(skb); struct nlattr *nest; diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c index dd9fc2523c76a2b0b9fe4c5225328cc29c1649f6..109a329b719858bea8f8a0afc29a27369cb3b9d0 100644 --- a/net/sched/cls_route.c +++ b/net/sched/cls_route.c @@ -29,25 +29,26 @@ * are mutually exclusive. * 3. "to TAG from ANY" has higher priority, than "to ANY from XXX" */ - struct route4_fastmap { - struct route4_filter *filter; - u32 id; - int iif; + struct route4_filter *filter; + u32 id; + int iif; }; struct route4_head { - struct route4_fastmap fastmap[16]; - struct route4_bucket *table[256 + 1]; + struct route4_fastmap fastmap[16]; + struct route4_bucket __rcu *table[256 + 1]; + struct rcu_head rcu; }; struct route4_bucket { /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */ - struct route4_filter *ht[16 + 16 + 1]; + struct route4_filter __rcu *ht[16 + 16 + 1]; + struct rcu_head rcu; }; struct route4_filter { - struct route4_filter *next; + struct route4_filter __rcu *next; u32 id; int iif; @@ -55,6 +56,8 @@ struct route4_filter { struct tcf_exts exts; u32 handle; struct route4_bucket *bkt; + struct tcf_proto *tp; + struct rcu_head rcu; }; #define ROUTE4_FAILURE ((struct route4_filter *)(-1L)) @@ -64,14 +67,13 @@ static inline int route4_fastmap_hash(u32 id, int iif) return id & 0xF; } +static DEFINE_SPINLOCK(fastmap_lock); static void -route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id) +route4_reset_fastmap(struct route4_head *head) { - spinlock_t *root_lock = qdisc_root_sleeping_lock(q); - - spin_lock_bh(root_lock); + spin_lock_bh(&fastmap_lock); memset(head->fastmap, 0, sizeof(head->fastmap)); - spin_unlock_bh(root_lock); + spin_unlock_bh(&fastmap_lock); } static void @@ -80,9 +82,12 @@ route4_set_fastmap(struct route4_head *head, u32 id, int iif, { int h = route4_fastmap_hash(id, iif); + /* fastmap updates must look atomic to aling id, iff, filter */ + spin_lock_bh(&fastmap_lock); head->fastmap[h].id = id; head->fastmap[h].iif = iif; head->fastmap[h].filter = f; + spin_unlock_bh(&fastmap_lock); } static inline int route4_hash_to(u32 id) @@ -123,7 +128,7 @@ static inline int route4_hash_wild(void) static int route4_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res) { - struct route4_head *head = tp->root; + struct route4_head *head = rcu_dereference_bh(tp->root); struct dst_entry *dst; struct route4_bucket *b; struct route4_filter *f; @@ -141,32 +146,43 @@ static int route4_classify(struct sk_buff *skb, const struct tcf_proto *tp, iif = inet_iif(skb); h = route4_fastmap_hash(id, iif); + + spin_lock(&fastmap_lock); if (id == head->fastmap[h].id && iif == head->fastmap[h].iif && (f = head->fastmap[h].filter) != NULL) { - if (f == ROUTE4_FAILURE) + if (f == ROUTE4_FAILURE) { + spin_unlock(&fastmap_lock); goto failure; + } *res = f->res; + spin_unlock(&fastmap_lock); return 0; } + spin_unlock(&fastmap_lock); h = route4_hash_to(id); restart: - b = head->table[h]; + b = rcu_dereference_bh(head->table[h]); if (b) { - for (f = b->ht[route4_hash_from(id)]; f; f = f->next) + for (f = rcu_dereference_bh(b->ht[route4_hash_from(id)]); + f; + f = rcu_dereference_bh(f->next)) if (f->id == id) ROUTE4_APPLY_RESULT(); - for (f = b->ht[route4_hash_iif(iif)]; f; f = f->next) + for (f = rcu_dereference_bh(b->ht[route4_hash_iif(iif)]); + f; + f = rcu_dereference_bh(f->next)) if (f->iif == iif) ROUTE4_APPLY_RESULT(); - for (f = b->ht[route4_hash_wild()]; f; f = f->next) + for (f = rcu_dereference_bh(b->ht[route4_hash_wild()]); + f; + f = rcu_dereference_bh(f->next)) ROUTE4_APPLY_RESULT(); - } if (h < 256) { h = 256; @@ -213,7 +229,7 @@ static inline u32 from_hash(u32 id) static unsigned long route4_get(struct tcf_proto *tp, u32 handle) { - struct route4_head *head = tp->root; + struct route4_head *head = rtnl_dereference(tp->root); struct route4_bucket *b; struct route4_filter *f; unsigned int h1, h2; @@ -229,9 +245,11 @@ static unsigned long route4_get(struct tcf_proto *tp, u32 handle) if (h2 > 32) return 0; - b = head->table[h1]; + b = rtnl_dereference(head->table[h1]); if (b) { - for (f = b->ht[h2]; f; f = f->next) + for (f = rtnl_dereference(b->ht[h2]); + f; + f = rtnl_dereference(f->next)) if (f->handle == handle) return (unsigned long)f; } @@ -248,16 +266,17 @@ static int route4_init(struct tcf_proto *tp) } static void -route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f) +route4_delete_filter(struct rcu_head *head) { - tcf_unbind_filter(tp, &f->res); - tcf_exts_destroy(tp, &f->exts); + struct route4_filter *f = container_of(head, struct route4_filter, rcu); + + tcf_exts_destroy(&f->exts); kfree(f); } static void route4_destroy(struct tcf_proto *tp) { - struct route4_head *head = tp->root; + struct route4_head *head = rtnl_dereference(tp->root); int h1, h2; if (head == NULL) @@ -266,28 +285,36 @@ static void route4_destroy(struct tcf_proto *tp) for (h1 = 0; h1 <= 256; h1++) { struct route4_bucket *b; - b = head->table[h1]; + b = rtnl_dereference(head->table[h1]); if (b) { for (h2 = 0; h2 <= 32; h2++) { struct route4_filter *f; - while ((f = b->ht[h2]) != NULL) { - b->ht[h2] = f->next; - route4_delete_filter(tp, f); + while ((f = rtnl_dereference(b->ht[h2])) != NULL) { + struct route4_filter *next; + + next = rtnl_dereference(f->next); + RCU_INIT_POINTER(b->ht[h2], next); + tcf_unbind_filter(tp, &f->res); + call_rcu(&f->rcu, route4_delete_filter); } } - kfree(b); + RCU_INIT_POINTER(head->table[h1], NULL); + kfree_rcu(b, rcu); } } - kfree(head); + RCU_INIT_POINTER(tp->root, NULL); + kfree_rcu(head, rcu); } static int route4_delete(struct tcf_proto *tp, unsigned long arg) { - struct route4_head *head = tp->root; - struct route4_filter **fp, *f = (struct route4_filter *)arg; - unsigned int h = 0; + struct route4_head *head = rtnl_dereference(tp->root); + struct route4_filter *f = (struct route4_filter *)arg; + struct route4_filter __rcu **fp; + struct route4_filter *nf; struct route4_bucket *b; + unsigned int h = 0; int i; if (!head || !f) @@ -296,27 +323,36 @@ static int route4_delete(struct tcf_proto *tp, unsigned long arg) h = f->handle; b = f->bkt; - for (fp = &b->ht[from_hash(h >> 16)]; *fp; fp = &(*fp)->next) { - if (*fp == f) { - tcf_tree_lock(tp); - *fp = f->next; - tcf_tree_unlock(tp); - - route4_reset_fastmap(tp->q, head, f->id); - route4_delete_filter(tp, f); - - /* Strip tree */ - - for (i = 0; i <= 32; i++) - if (b->ht[i]) + fp = &b->ht[from_hash(h >> 16)]; + for (nf = rtnl_dereference(*fp); nf; + fp = &nf->next, nf = rtnl_dereference(*fp)) { + if (nf == f) { + /* unlink it */ + RCU_INIT_POINTER(*fp, rtnl_dereference(f->next)); + + /* Remove any fastmap lookups that might ref filter + * notice we unlink'd the filter so we can't get it + * back in the fastmap. + */ + route4_reset_fastmap(head); + + /* Delete it */ + tcf_unbind_filter(tp, &f->res); + call_rcu(&f->rcu, route4_delete_filter); + + /* Strip RTNL protected tree */ + for (i = 0; i <= 32; i++) { + struct route4_filter *rt; + + rt = rtnl_dereference(b->ht[i]); + if (rt) return 0; + } /* OK, session has no flows */ - tcf_tree_lock(tp); - head->table[to_hash(h)] = NULL; - tcf_tree_unlock(tp); + RCU_INIT_POINTER(head->table[to_hash(h)], NULL); + kfree_rcu(b, rcu); - kfree(b); return 0; } } @@ -380,26 +416,25 @@ static int route4_set_parms(struct net *net, struct tcf_proto *tp, } h1 = to_hash(nhandle); - b = head->table[h1]; + b = rtnl_dereference(head->table[h1]); if (!b) { err = -ENOBUFS; b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL); if (b == NULL) goto errout; - tcf_tree_lock(tp); - head->table[h1] = b; - tcf_tree_unlock(tp); + rcu_assign_pointer(head->table[h1], b); } else { unsigned int h2 = from_hash(nhandle >> 16); err = -EEXIST; - for (fp = b->ht[h2]; fp; fp = fp->next) + for (fp = rtnl_dereference(b->ht[h2]); + fp; + fp = rtnl_dereference(fp->next)) if (fp->handle == f->handle) goto errout; } - tcf_tree_lock(tp); if (tb[TCA_ROUTE4_TO]) f->id = to; @@ -410,7 +445,7 @@ static int route4_set_parms(struct net *net, struct tcf_proto *tp, f->handle = nhandle; f->bkt = b; - tcf_tree_unlock(tp); + f->tp = tp; if (tb[TCA_ROUTE4_CLASSID]) { f->res.classid = nla_get_u32(tb[TCA_ROUTE4_CLASSID]); @@ -421,7 +456,7 @@ static int route4_set_parms(struct net *net, struct tcf_proto *tp, return 0; errout: - tcf_exts_destroy(tp, &e); + tcf_exts_destroy(&e); return err; } @@ -431,14 +466,15 @@ static int route4_change(struct net *net, struct sk_buff *in_skb, struct nlattr **tca, unsigned long *arg, bool ovr) { - struct route4_head *head = tp->root; - struct route4_filter *f, *f1, **fp; + struct route4_head *head = rtnl_dereference(tp->root); + struct route4_filter __rcu **fp; + struct route4_filter *fold, *f1, *pfp, *f = NULL; struct route4_bucket *b; struct nlattr *opt = tca[TCA_OPTIONS]; struct nlattr *tb[TCA_ROUTE4_MAX + 1]; unsigned int h, th; - u32 old_handle = 0; int err; + bool new = true; if (opt == NULL) return handle ? -EINVAL : 0; @@ -447,70 +483,73 @@ static int route4_change(struct net *net, struct sk_buff *in_skb, if (err < 0) return err; - f = (struct route4_filter *)*arg; - if (f) { - if (f->handle != handle && handle) + fold = (struct route4_filter *)*arg; + if (fold && handle && fold->handle != handle) return -EINVAL; - if (f->bkt) - old_handle = f->handle; - - err = route4_set_parms(net, tp, base, f, handle, head, tb, - tca[TCA_RATE], 0, ovr); - if (err < 0) - return err; - - goto reinsert; - } - err = -ENOBUFS; if (head == NULL) { head = kzalloc(sizeof(struct route4_head), GFP_KERNEL); if (head == NULL) goto errout; - - tcf_tree_lock(tp); - tp->root = head; - tcf_tree_unlock(tp); + rcu_assign_pointer(tp->root, head); } f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL); - if (f == NULL) + if (!f) goto errout; tcf_exts_init(&f->exts, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE); + if (fold) { + f->id = fold->id; + f->iif = fold->iif; + f->res = fold->res; + f->handle = fold->handle; + + f->tp = fold->tp; + f->bkt = fold->bkt; + new = false; + } + err = route4_set_parms(net, tp, base, f, handle, head, tb, - tca[TCA_RATE], 1, ovr); + tca[TCA_RATE], new, ovr); if (err < 0) goto errout; -reinsert: h = from_hash(f->handle >> 16); - for (fp = &f->bkt->ht[h]; (f1 = *fp) != NULL; fp = &f1->next) + fp = &f->bkt->ht[h]; + for (pfp = rtnl_dereference(*fp); + (f1 = rtnl_dereference(*fp)) != NULL; + fp = &f1->next) if (f->handle < f1->handle) break; - f->next = f1; - tcf_tree_lock(tp); - *fp = f; + netif_keep_dst(qdisc_dev(tp->q)); + rcu_assign_pointer(f->next, f1); + rcu_assign_pointer(*fp, f); - if (old_handle && f->handle != old_handle) { - th = to_hash(old_handle); - h = from_hash(old_handle >> 16); - b = head->table[th]; + if (fold && fold->handle && f->handle != fold->handle) { + th = to_hash(fold->handle); + h = from_hash(fold->handle >> 16); + b = rtnl_dereference(head->table[th]); if (b) { - for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) { - if (*fp == f) { + fp = &b->ht[h]; + for (pfp = rtnl_dereference(*fp); pfp; + fp = &pfp->next, pfp = rtnl_dereference(*fp)) { + if (pfp == f) { *fp = f->next; break; } } } } - tcf_tree_unlock(tp); - route4_reset_fastmap(tp->q, head, f->id); + route4_reset_fastmap(head); *arg = (unsigned long)f; + if (fold) { + tcf_unbind_filter(tp, &fold->res); + call_rcu(&fold->rcu, route4_delete_filter); + } return 0; errout: @@ -520,7 +559,7 @@ static int route4_change(struct net *net, struct sk_buff *in_skb, static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg) { - struct route4_head *head = tp->root; + struct route4_head *head = rtnl_dereference(tp->root); unsigned int h, h1; if (head == NULL) @@ -530,13 +569,15 @@ static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg) return; for (h = 0; h <= 256; h++) { - struct route4_bucket *b = head->table[h]; + struct route4_bucket *b = rtnl_dereference(head->table[h]); if (b) { for (h1 = 0; h1 <= 32; h1++) { struct route4_filter *f; - for (f = b->ht[h1]; f; f = f->next) { + for (f = rtnl_dereference(b->ht[h1]); + f; + f = rtnl_dereference(f->next)) { if (arg->count < arg->skip) { arg->count++; continue; diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h index 1020e233a5d6c74092fb153133b1bfed7f4177a9..6bb55f277a5a095822eb7db0bfe523e58405ecdf 100644 --- a/net/sched/cls_rsvp.h +++ b/net/sched/cls_rsvp.h @@ -70,31 +70,34 @@ struct rsvp_head { u32 tmap[256/32]; u32 hgenerator; u8 tgenerator; - struct rsvp_session *ht[256]; + struct rsvp_session __rcu *ht[256]; + struct rcu_head rcu; }; struct rsvp_session { - struct rsvp_session *next; - __be32 dst[RSVP_DST_LEN]; - struct tc_rsvp_gpi dpi; - u8 protocol; - u8 tunnelid; + struct rsvp_session __rcu *next; + __be32 dst[RSVP_DST_LEN]; + struct tc_rsvp_gpi dpi; + u8 protocol; + u8 tunnelid; /* 16 (src,sport) hash slots, and one wildcard source slot */ - struct rsvp_filter *ht[16 + 1]; + struct rsvp_filter __rcu *ht[16 + 1]; + struct rcu_head rcu; }; struct rsvp_filter { - struct rsvp_filter *next; - __be32 src[RSVP_DST_LEN]; - struct tc_rsvp_gpi spi; - u8 tunnelhdr; + struct rsvp_filter __rcu *next; + __be32 src[RSVP_DST_LEN]; + struct tc_rsvp_gpi spi; + u8 tunnelhdr; - struct tcf_result res; - struct tcf_exts exts; + struct tcf_result res; + struct tcf_exts exts; - u32 handle; - struct rsvp_session *sess; + u32 handle; + struct rsvp_session *sess; + struct rcu_head rcu; }; static inline unsigned int hash_dst(__be32 *dst, u8 protocol, u8 tunnelid) @@ -128,7 +131,7 @@ static inline unsigned int hash_src(__be32 *src) static int rsvp_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res) { - struct rsvp_session **sht = ((struct rsvp_head *)tp->root)->ht; + struct rsvp_head *head = rcu_dereference_bh(tp->root); struct rsvp_session *s; struct rsvp_filter *f; unsigned int h1, h2; @@ -169,7 +172,8 @@ static int rsvp_classify(struct sk_buff *skb, const struct tcf_proto *tp, h1 = hash_dst(dst, protocol, tunnelid); h2 = hash_src(src); - for (s = sht[h1]; s; s = s->next) { + for (s = rcu_dereference_bh(head->ht[h1]); s; + s = rcu_dereference_bh(s->next)) { if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN - 1] && protocol == s->protocol && !(s->dpi.mask & @@ -181,7 +185,8 @@ static int rsvp_classify(struct sk_buff *skb, const struct tcf_proto *tp, #endif tunnelid == s->tunnelid) { - for (f = s->ht[h2]; f; f = f->next) { + for (f = rcu_dereference_bh(s->ht[h2]); f; + f = rcu_dereference_bh(f->next)) { if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN - 1] && !(f->spi.mask & (*(u32 *)(xprt + f->spi.offset) ^ f->spi.key)) #if RSVP_DST_LEN == 4 @@ -205,7 +210,8 @@ static int rsvp_classify(struct sk_buff *skb, const struct tcf_proto *tp, } /* And wildcard bucket... */ - for (f = s->ht[16]; f; f = f->next) { + for (f = rcu_dereference_bh(s->ht[16]); f; + f = rcu_dereference_bh(f->next)) { *res = f->res; RSVP_APPLY_RESULT(); goto matched; @@ -216,9 +222,36 @@ static int rsvp_classify(struct sk_buff *skb, const struct tcf_proto *tp, return -1; } +static void rsvp_replace(struct tcf_proto *tp, struct rsvp_filter *n, u32 h) +{ + struct rsvp_head *head = rtnl_dereference(tp->root); + struct rsvp_session *s; + struct rsvp_filter __rcu **ins; + struct rsvp_filter *pins; + unsigned int h1 = h & 0xFF; + unsigned int h2 = (h >> 8) & 0xFF; + + for (s = rtnl_dereference(head->ht[h1]); s; + s = rtnl_dereference(s->next)) { + for (ins = &s->ht[h2], pins = rtnl_dereference(*ins); ; + ins = &pins->next, pins = rtnl_dereference(*ins)) { + if (pins->handle == h) { + RCU_INIT_POINTER(n->next, pins->next); + rcu_assign_pointer(*ins, n); + return; + } + } + } + + /* Something went wrong if we are trying to replace a non-existant + * node. Mind as well halt instead of silently failing. + */ + BUG_ON(1); +} + static unsigned long rsvp_get(struct tcf_proto *tp, u32 handle) { - struct rsvp_session **sht = ((struct rsvp_head *)tp->root)->ht; + struct rsvp_head *head = rtnl_dereference(tp->root); struct rsvp_session *s; struct rsvp_filter *f; unsigned int h1 = handle & 0xFF; @@ -227,8 +260,10 @@ static unsigned long rsvp_get(struct tcf_proto *tp, u32 handle) if (h2 > 16) return 0; - for (s = sht[h1]; s; s = s->next) { - for (f = s->ht[h2]; f; f = f->next) { + for (s = rtnl_dereference(head->ht[h1]); s; + s = rtnl_dereference(s->next)) { + for (f = rtnl_dereference(s->ht[h2]); f; + f = rtnl_dereference(f->next)) { if (f->handle == handle) return (unsigned long)f; } @@ -246,7 +281,7 @@ static int rsvp_init(struct tcf_proto *tp) data = kzalloc(sizeof(struct rsvp_head), GFP_KERNEL); if (data) { - tp->root = data; + rcu_assign_pointer(tp->root, data); return 0; } return -ENOBUFS; @@ -256,54 +291,55 @@ static void rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f) { tcf_unbind_filter(tp, &f->res); - tcf_exts_destroy(tp, &f->exts); - kfree(f); + tcf_exts_destroy(&f->exts); + kfree_rcu(f, rcu); } static void rsvp_destroy(struct tcf_proto *tp) { - struct rsvp_head *data = xchg(&tp->root, NULL); - struct rsvp_session **sht; + struct rsvp_head *data = rtnl_dereference(tp->root); int h1, h2; if (data == NULL) return; - sht = data->ht; + RCU_INIT_POINTER(tp->root, NULL); for (h1 = 0; h1 < 256; h1++) { struct rsvp_session *s; - while ((s = sht[h1]) != NULL) { - sht[h1] = s->next; + while ((s = rtnl_dereference(data->ht[h1])) != NULL) { + RCU_INIT_POINTER(data->ht[h1], s->next); for (h2 = 0; h2 <= 16; h2++) { struct rsvp_filter *f; - while ((f = s->ht[h2]) != NULL) { - s->ht[h2] = f->next; + while ((f = rtnl_dereference(s->ht[h2])) != NULL) { + rcu_assign_pointer(s->ht[h2], f->next); rsvp_delete_filter(tp, f); } } - kfree(s); + kfree_rcu(s, rcu); } } - kfree(data); + kfree_rcu(data, rcu); } static int rsvp_delete(struct tcf_proto *tp, unsigned long arg) { - struct rsvp_filter **fp, *f = (struct rsvp_filter *)arg; + struct rsvp_head *head = rtnl_dereference(tp->root); + struct rsvp_filter *nfp, *f = (struct rsvp_filter *)arg; + struct rsvp_filter __rcu **fp; unsigned int h = f->handle; - struct rsvp_session **sp; - struct rsvp_session *s = f->sess; + struct rsvp_session __rcu **sp; + struct rsvp_session *nsp, *s = f->sess; int i; - for (fp = &s->ht[(h >> 8) & 0xFF]; *fp; fp = &(*fp)->next) { - if (*fp == f) { - tcf_tree_lock(tp); - *fp = f->next; - tcf_tree_unlock(tp); + fp = &s->ht[(h >> 8) & 0xFF]; + for (nfp = rtnl_dereference(*fp); nfp; + fp = &nfp->next, nfp = rtnl_dereference(*fp)) { + if (nfp == f) { + RCU_INIT_POINTER(*fp, f->next); rsvp_delete_filter(tp, f); /* Strip tree */ @@ -313,14 +349,12 @@ static int rsvp_delete(struct tcf_proto *tp, unsigned long arg) return 0; /* OK, session has no flows */ - for (sp = &((struct rsvp_head *)tp->root)->ht[h & 0xFF]; - *sp; sp = &(*sp)->next) { - if (*sp == s) { - tcf_tree_lock(tp); - *sp = s->next; - tcf_tree_unlock(tp); - - kfree(s); + sp = &head->ht[h & 0xFF]; + for (nsp = rtnl_dereference(*sp); nsp; + sp = &nsp->next, nsp = rtnl_dereference(*sp)) { + if (nsp == s) { + RCU_INIT_POINTER(*sp, s->next); + kfree_rcu(s, rcu); return 0; } } @@ -333,7 +367,7 @@ static int rsvp_delete(struct tcf_proto *tp, unsigned long arg) static unsigned int gen_handle(struct tcf_proto *tp, unsigned salt) { - struct rsvp_head *data = tp->root; + struct rsvp_head *data = rtnl_dereference(tp->root); int i = 0xFFFF; while (i-- > 0) { @@ -361,7 +395,7 @@ static int tunnel_bts(struct rsvp_head *data) static void tunnel_recycle(struct rsvp_head *data) { - struct rsvp_session **sht = data->ht; + struct rsvp_session __rcu **sht = data->ht; u32 tmap[256/32]; int h1, h2; @@ -369,11 +403,13 @@ static void tunnel_recycle(struct rsvp_head *data) for (h1 = 0; h1 < 256; h1++) { struct rsvp_session *s; - for (s = sht[h1]; s; s = s->next) { + for (s = rtnl_dereference(sht[h1]); s; + s = rtnl_dereference(s->next)) { for (h2 = 0; h2 <= 16; h2++) { struct rsvp_filter *f; - for (f = s->ht[h2]; f; f = f->next) { + for (f = rtnl_dereference(s->ht[h2]); f; + f = rtnl_dereference(f->next)) { if (f->tunnelhdr == 0) continue; data->tgenerator = f->res.classid; @@ -417,9 +453,11 @@ static int rsvp_change(struct net *net, struct sk_buff *in_skb, struct nlattr **tca, unsigned long *arg, bool ovr) { - struct rsvp_head *data = tp->root; - struct rsvp_filter *f, **fp; - struct rsvp_session *s, **sp; + struct rsvp_head *data = rtnl_dereference(tp->root); + struct rsvp_filter *f, *nfp; + struct rsvp_filter __rcu **fp; + struct rsvp_session *nsp, *s; + struct rsvp_session __rcu **sp; struct tc_rsvp_pinfo *pinfo = NULL; struct nlattr *opt = tca[TCA_OPTIONS]; struct nlattr *tb[TCA_RSVP_MAX + 1]; @@ -443,15 +481,26 @@ static int rsvp_change(struct net *net, struct sk_buff *in_skb, f = (struct rsvp_filter *)*arg; if (f) { /* Node exists: adjust only classid */ + struct rsvp_filter *n; if (f->handle != handle && handle) goto errout2; + + n = kmemdup(f, sizeof(*f), GFP_KERNEL); + if (!n) { + err = -ENOMEM; + goto errout2; + } + + tcf_exts_init(&n->exts, TCA_RSVP_ACT, TCA_RSVP_POLICE); + if (tb[TCA_RSVP_CLASSID]) { - f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]); - tcf_bind_filter(tp, &f->res, base); + n->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]); + tcf_bind_filter(tp, &n->res, base); } - tcf_exts_change(tp, &f->exts, &e); + tcf_exts_change(tp, &n->exts, &e); + rsvp_replace(tp, n, handle); return 0; } @@ -499,7 +548,9 @@ static int rsvp_change(struct net *net, struct sk_buff *in_skb, goto errout; } - for (sp = &data->ht[h1]; (s = *sp) != NULL; sp = &s->next) { + for (sp = &data->ht[h1]; + (s = rtnl_dereference(*sp)) != NULL; + sp = &s->next) { if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] && pinfo && pinfo->protocol == s->protocol && memcmp(&pinfo->dpi, &s->dpi, sizeof(s->dpi)) == 0 && @@ -521,12 +572,16 @@ static int rsvp_change(struct net *net, struct sk_buff *in_skb, tcf_exts_change(tp, &f->exts, &e); - for (fp = &s->ht[h2]; *fp; fp = &(*fp)->next) - if (((*fp)->spi.mask & f->spi.mask) != f->spi.mask) + fp = &s->ht[h2]; + for (nfp = rtnl_dereference(*fp); nfp; + fp = &nfp->next, nfp = rtnl_dereference(*fp)) { + __u32 mask = nfp->spi.mask & f->spi.mask; + + if (mask != f->spi.mask) break; - f->next = *fp; - wmb(); - *fp = f; + } + RCU_INIT_POINTER(f->next, nfp); + rcu_assign_pointer(*fp, f); *arg = (unsigned long)f; return 0; @@ -546,26 +601,27 @@ static int rsvp_change(struct net *net, struct sk_buff *in_skb, s->protocol = pinfo->protocol; s->tunnelid = pinfo->tunnelid; } - for (sp = &data->ht[h1]; *sp; sp = &(*sp)->next) { - if (((*sp)->dpi.mask&s->dpi.mask) != s->dpi.mask) + sp = &data->ht[h1]; + for (nsp = rtnl_dereference(*sp); nsp; + sp = &nsp->next, nsp = rtnl_dereference(*sp)) { + if ((nsp->dpi.mask & s->dpi.mask) != s->dpi.mask) break; } - s->next = *sp; - wmb(); - *sp = s; + RCU_INIT_POINTER(s->next, nsp); + rcu_assign_pointer(*sp, s); goto insert; errout: kfree(f); errout2: - tcf_exts_destroy(tp, &e); + tcf_exts_destroy(&e); return err; } static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg) { - struct rsvp_head *head = tp->root; + struct rsvp_head *head = rtnl_dereference(tp->root); unsigned int h, h1; if (arg->stop) @@ -574,11 +630,13 @@ static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg) for (h = 0; h < 256; h++) { struct rsvp_session *s; - for (s = head->ht[h]; s; s = s->next) { + for (s = rtnl_dereference(head->ht[h]); s; + s = rtnl_dereference(s->next)) { for (h1 = 0; h1 <= 16; h1++) { struct rsvp_filter *f; - for (f = s->ht[h1]; f; f = f->next) { + for (f = rtnl_dereference(s->ht[h1]); f; + f = rtnl_dereference(f->next)) { if (arg->count < arg->skip) { arg->count++; continue; diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index 3e9f76413b3b20e05a77bbabfa27961cee8d2e6b..30f10fb07f4a7d50e390c7df730f94366e00fb73 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c @@ -32,19 +32,21 @@ struct tcindex_filter_result { struct tcindex_filter { u16 key; struct tcindex_filter_result result; - struct tcindex_filter *next; + struct tcindex_filter __rcu *next; + struct rcu_head rcu; }; struct tcindex_data { struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */ - struct tcindex_filter **h; /* imperfect hash; only used if !perfect; - NULL if unused */ + struct tcindex_filter __rcu **h; /* imperfect hash; */ + struct tcf_proto *tp; u16 mask; /* AND key with mask */ - int shift; /* shift ANDed key to the right */ - int hash; /* hash table size; 0 if undefined */ - int alloc_hash; /* allocated size */ - int fall_through; /* 0: only classify if explicit match */ + u32 shift; /* shift ANDed key to the right */ + u32 hash; /* hash table size; 0 if undefined */ + u32 alloc_hash; /* allocated size */ + u32 fall_through; /* 0: only classify if explicit match */ + struct rcu_head rcu; }; static inline int @@ -56,13 +58,18 @@ tcindex_filter_is_set(struct tcindex_filter_result *r) static struct tcindex_filter_result * tcindex_lookup(struct tcindex_data *p, u16 key) { - struct tcindex_filter *f; + if (p->perfect) { + struct tcindex_filter_result *f = p->perfect + key; + + return tcindex_filter_is_set(f) ? f : NULL; + } else if (p->h) { + struct tcindex_filter __rcu **fp; + struct tcindex_filter *f; - if (p->perfect) - return tcindex_filter_is_set(p->perfect + key) ? - p->perfect + key : NULL; - else if (p->h) { - for (f = p->h[key % p->hash]; f; f = f->next) + fp = &p->h[key % p->hash]; + for (f = rcu_dereference_bh_rtnl(*fp); + f; + fp = &f->next, f = rcu_dereference_bh_rtnl(*fp)) if (f->key == key) return &f->result; } @@ -74,7 +81,7 @@ tcindex_lookup(struct tcindex_data *p, u16 key) static int tcindex_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res) { - struct tcindex_data *p = tp->root; + struct tcindex_data *p = rcu_dereference_bh(tp->root); struct tcindex_filter_result *f; int key = (skb->tc_index & p->mask) >> p->shift; @@ -99,7 +106,7 @@ static int tcindex_classify(struct sk_buff *skb, const struct tcf_proto *tp, static unsigned long tcindex_get(struct tcf_proto *tp, u32 handle) { - struct tcindex_data *p = tp->root; + struct tcindex_data *p = rtnl_dereference(tp->root); struct tcindex_filter_result *r; pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp, handle); @@ -129,49 +136,59 @@ static int tcindex_init(struct tcf_proto *tp) p->hash = DEFAULT_HASH_SIZE; p->fall_through = 1; - tp->root = p; + rcu_assign_pointer(tp->root, p); return 0; } - static int -__tcindex_delete(struct tcf_proto *tp, unsigned long arg, int lock) +tcindex_delete(struct tcf_proto *tp, unsigned long arg) { - struct tcindex_data *p = tp->root; + struct tcindex_data *p = rtnl_dereference(tp->root); struct tcindex_filter_result *r = (struct tcindex_filter_result *) arg; + struct tcindex_filter __rcu **walk; struct tcindex_filter *f = NULL; - pr_debug("tcindex_delete(tp %p,arg 0x%lx),p %p,f %p\n", tp, arg, p, f); + pr_debug("tcindex_delete(tp %p,arg 0x%lx),p %p\n", tp, arg, p); if (p->perfect) { if (!r->res.class) return -ENOENT; } else { int i; - struct tcindex_filter **walk = NULL; - for (i = 0; i < p->hash; i++) - for (walk = p->h+i; *walk; walk = &(*walk)->next) - if (&(*walk)->result == r) + for (i = 0; i < p->hash; i++) { + walk = p->h + i; + for (f = rtnl_dereference(*walk); f; + walk = &f->next, f = rtnl_dereference(*walk)) { + if (&f->result == r) goto found; + } + } return -ENOENT; found: - f = *walk; - if (lock) - tcf_tree_lock(tp); - *walk = f->next; - if (lock) - tcf_tree_unlock(tp); + rcu_assign_pointer(*walk, rtnl_dereference(f->next)); } tcf_unbind_filter(tp, &r->res); - tcf_exts_destroy(tp, &r->exts); - kfree(f); + tcf_exts_destroy(&r->exts); + if (f) + kfree_rcu(f, rcu); return 0; } -static int tcindex_delete(struct tcf_proto *tp, unsigned long arg) +static int tcindex_destroy_element(struct tcf_proto *tp, + unsigned long arg, + struct tcf_walker *walker) { - return __tcindex_delete(tp, arg, 1); + return tcindex_delete(tp, arg); +} + +static void __tcindex_destroy(struct rcu_head *head) +{ + struct tcindex_data *p = container_of(head, struct tcindex_data, rcu); + + kfree(p->perfect); + kfree(p->h); + kfree(p); } static inline int @@ -194,6 +211,14 @@ static void tcindex_filter_result_init(struct tcindex_filter_result *r) tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); } +static void __tcindex_partial_destroy(struct rcu_head *head) +{ + struct tcindex_data *p = container_of(head, struct tcindex_data, rcu); + + kfree(p->perfect); + kfree(p); +} + static int tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, u32 handle, struct tcindex_data *p, @@ -203,7 +228,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, int err, balloc = 0; struct tcindex_filter_result new_filter_result, *old_r = r; struct tcindex_filter_result cr; - struct tcindex_data cp; + struct tcindex_data *cp, *oldp; struct tcindex_filter *f = NULL; /* make gcc behave */ struct tcf_exts e; @@ -212,89 +237,130 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, if (err < 0) return err; - memcpy(&cp, p, sizeof(cp)); - tcindex_filter_result_init(&new_filter_result); + err = -ENOMEM; + /* tcindex_data attributes must look atomic to classifier/lookup so + * allocate new tcindex data and RCU assign it onto root. Keeping + * perfect hash and hash pointers from old data. + */ + cp = kzalloc(sizeof(*cp), GFP_KERNEL); + if (!cp) + goto errout; + + cp->mask = p->mask; + cp->shift = p->shift; + cp->hash = p->hash; + cp->alloc_hash = p->alloc_hash; + cp->fall_through = p->fall_through; + cp->tp = tp; + if (p->perfect) { + int i; + + cp->perfect = kmemdup(p->perfect, + sizeof(*r) * cp->hash, GFP_KERNEL); + if (!cp->perfect) + goto errout; + for (i = 0; i < cp->hash; i++) + tcf_exts_init(&cp->perfect[i].exts, + TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); + balloc = 1; + } + cp->h = p->h; + + tcindex_filter_result_init(&new_filter_result); tcindex_filter_result_init(&cr); if (old_r) cr.res = r->res; if (tb[TCA_TCINDEX_HASH]) - cp.hash = nla_get_u32(tb[TCA_TCINDEX_HASH]); + cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]); if (tb[TCA_TCINDEX_MASK]) - cp.mask = nla_get_u16(tb[TCA_TCINDEX_MASK]); + cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]); if (tb[TCA_TCINDEX_SHIFT]) - cp.shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]); + cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]); err = -EBUSY; + /* Hash already allocated, make sure that we still meet the * requirements for the allocated hash. */ - if (cp.perfect) { - if (!valid_perfect_hash(&cp) || - cp.hash > cp.alloc_hash) - goto errout; - } else if (cp.h && cp.hash != cp.alloc_hash) - goto errout; + if (cp->perfect) { + if (!valid_perfect_hash(cp) || + cp->hash > cp->alloc_hash) + goto errout_alloc; + } else if (cp->h && cp->hash != cp->alloc_hash) { + goto errout_alloc; + } err = -EINVAL; if (tb[TCA_TCINDEX_FALL_THROUGH]) - cp.fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]); + cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]); - if (!cp.hash) { + if (!cp->hash) { /* Hash not specified, use perfect hash if the upper limit * of the hashing index is below the threshold. */ - if ((cp.mask >> cp.shift) < PERFECT_HASH_THRESHOLD) - cp.hash = (cp.mask >> cp.shift) + 1; + if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD) + cp->hash = (cp->mask >> cp->shift) + 1; else - cp.hash = DEFAULT_HASH_SIZE; + cp->hash = DEFAULT_HASH_SIZE; } - if (!cp.perfect && !cp.h) - cp.alloc_hash = cp.hash; + if (!cp->perfect && !cp->h) + cp->alloc_hash = cp->hash; /* Note: this could be as restrictive as if (handle & ~(mask >> shift)) * but then, we'd fail handles that may become valid after some future * mask change. While this is extremely unlikely to ever matter, * the check below is safer (and also more backwards-compatible). */ - if (cp.perfect || valid_perfect_hash(&cp)) - if (handle >= cp.alloc_hash) - goto errout; + if (cp->perfect || valid_perfect_hash(cp)) + if (handle >= cp->alloc_hash) + goto errout_alloc; err = -ENOMEM; - if (!cp.perfect && !cp.h) { - if (valid_perfect_hash(&cp)) { + if (!cp->perfect && !cp->h) { + if (valid_perfect_hash(cp)) { int i; - cp.perfect = kcalloc(cp.hash, sizeof(*r), GFP_KERNEL); - if (!cp.perfect) - goto errout; - for (i = 0; i < cp.hash; i++) - tcf_exts_init(&cp.perfect[i].exts, TCA_TCINDEX_ACT, + cp->perfect = kcalloc(cp->hash, sizeof(*r), GFP_KERNEL); + if (!cp->perfect) + goto errout_alloc; + for (i = 0; i < cp->hash; i++) + tcf_exts_init(&cp->perfect[i].exts, + TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); balloc = 1; } else { - cp.h = kcalloc(cp.hash, sizeof(f), GFP_KERNEL); - if (!cp.h) - goto errout; + struct tcindex_filter __rcu **hash; + + hash = kcalloc(cp->hash, + sizeof(struct tcindex_filter *), + GFP_KERNEL); + + if (!hash) + goto errout_alloc; + + cp->h = hash; balloc = 2; } } - if (cp.perfect) - r = cp.perfect + handle; + if (cp->perfect) + r = cp->perfect + handle; else - r = tcindex_lookup(&cp, handle) ? : &new_filter_result; + r = tcindex_lookup(cp, handle) ? : &new_filter_result; if (r == &new_filter_result) { f = kzalloc(sizeof(*f), GFP_KERNEL); if (!f) goto errout_alloc; + f->key = handle; + tcindex_filter_result_init(&f->result); + f->next = NULL; } if (tb[TCA_TCINDEX_CLASSID]) { @@ -307,34 +373,40 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, else tcf_exts_change(tp, &cr.exts, &e); - tcf_tree_lock(tp); if (old_r && old_r != r) tcindex_filter_result_init(old_r); - memcpy(p, &cp, sizeof(cp)); + oldp = p; r->res = cr.res; + rcu_assign_pointer(tp->root, cp); if (r == &new_filter_result) { - struct tcindex_filter **fp; + struct tcindex_filter *nfp; + struct tcindex_filter __rcu **fp; - f->key = handle; - f->result = new_filter_result; - f->next = NULL; - for (fp = p->h+(handle % p->hash); *fp; fp = &(*fp)->next) - /* nothing */; - *fp = f; + tcf_exts_change(tp, &f->result.exts, &r->exts); + + fp = cp->h + (handle % cp->hash); + for (nfp = rtnl_dereference(*fp); + nfp; + fp = &nfp->next, nfp = rtnl_dereference(*fp)) + ; /* nothing */ + + rcu_assign_pointer(*fp, f); } - tcf_tree_unlock(tp); + if (oldp) + call_rcu(&oldp->rcu, __tcindex_partial_destroy); return 0; errout_alloc: if (balloc == 1) - kfree(cp.perfect); + kfree(cp->perfect); else if (balloc == 2) - kfree(cp.h); + kfree(cp->h); errout: - tcf_exts_destroy(tp, &e); + kfree(cp); + tcf_exts_destroy(&e); return err; } @@ -345,7 +417,7 @@ tcindex_change(struct net *net, struct sk_buff *in_skb, { struct nlattr *opt = tca[TCA_OPTIONS]; struct nlattr *tb[TCA_TCINDEX_MAX + 1]; - struct tcindex_data *p = tp->root; + struct tcindex_data *p = rtnl_dereference(tp->root); struct tcindex_filter_result *r = (struct tcindex_filter_result *) *arg; int err; @@ -364,10 +436,9 @@ tcindex_change(struct net *net, struct sk_buff *in_skb, tca[TCA_RATE], ovr); } - static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker) { - struct tcindex_data *p = tp->root; + struct tcindex_data *p = rtnl_dereference(tp->root); struct tcindex_filter *f, *next; int i; @@ -390,8 +461,8 @@ static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker) if (!p->h) return; for (i = 0; i < p->hash; i++) { - for (f = p->h[i]; f; f = next) { - next = f->next; + for (f = rtnl_dereference(p->h[i]); f; f = next) { + next = rtnl_dereference(f->next); if (walker->count >= walker->skip) { if (walker->fn(tp, (unsigned long) &f->result, walker) < 0) { @@ -404,17 +475,9 @@ static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker) } } - -static int tcindex_destroy_element(struct tcf_proto *tp, - unsigned long arg, struct tcf_walker *walker) -{ - return __tcindex_delete(tp, arg, 0); -} - - static void tcindex_destroy(struct tcf_proto *tp) { - struct tcindex_data *p = tp->root; + struct tcindex_data *p = rtnl_dereference(tp->root); struct tcf_walker walker; pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p); @@ -422,17 +485,16 @@ static void tcindex_destroy(struct tcf_proto *tp) walker.skip = 0; walker.fn = tcindex_destroy_element; tcindex_walk(tp, &walker); - kfree(p->perfect); - kfree(p->h); - kfree(p); - tp->root = NULL; + + RCU_INIT_POINTER(tp->root, NULL); + call_rcu(&p->rcu, __tcindex_destroy); } static int tcindex_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, struct sk_buff *skb, struct tcmsg *t) { - struct tcindex_data *p = tp->root; + struct tcindex_data *p = rtnl_dereference(tp->root); struct tcindex_filter_result *r = (struct tcindex_filter_result *) fh; unsigned char *b = skb_tail_pointer(skb); struct nlattr *nest; @@ -455,15 +517,18 @@ static int tcindex_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, nla_nest_end(skb, nest); } else { if (p->perfect) { - t->tcm_handle = r-p->perfect; + t->tcm_handle = r - p->perfect; } else { struct tcindex_filter *f; + struct tcindex_filter __rcu **fp; int i; t->tcm_handle = 0; for (i = 0; !t->tcm_handle && i < p->hash; i++) { - for (f = p->h[i]; !t->tcm_handle && f; - f = f->next) { + fp = &p->h[i]; + for (f = rtnl_dereference(*fp); + !t->tcm_handle && f; + fp = &f->next, f = rtnl_dereference(*fp)) { if (&f->result == r) t->tcm_handle = f->key; } diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 70c0be8d0121db461c1e21793a1b68d844f37e8b..0472909bb014c258e92f796736be7a527af843d1 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include #include @@ -44,40 +45,49 @@ #include struct tc_u_knode { - struct tc_u_knode *next; + struct tc_u_knode __rcu *next; u32 handle; - struct tc_u_hnode *ht_up; + struct tc_u_hnode __rcu *ht_up; struct tcf_exts exts; #ifdef CONFIG_NET_CLS_IND int ifindex; #endif u8 fshift; struct tcf_result res; - struct tc_u_hnode *ht_down; + struct tc_u_hnode __rcu *ht_down; #ifdef CONFIG_CLS_U32_PERF - struct tc_u32_pcnt *pf; + struct tc_u32_pcnt __percpu *pf; #endif #ifdef CONFIG_CLS_U32_MARK - struct tc_u32_mark mark; + u32 val; + u32 mask; + u32 __percpu *pcpu_success; #endif + struct tcf_proto *tp; + struct rcu_head rcu; + /* The 'sel' field MUST be the last field in structure to allow for + * tc_u32_keys allocated at end of structure. + */ struct tc_u32_sel sel; }; struct tc_u_hnode { - struct tc_u_hnode *next; + struct tc_u_hnode __rcu *next; u32 handle; u32 prio; struct tc_u_common *tp_c; int refcnt; unsigned int divisor; - struct tc_u_knode *ht[1]; + struct tc_u_knode __rcu *ht[1]; + struct rcu_head rcu; }; struct tc_u_common { - struct tc_u_hnode *hlist; + struct tc_u_hnode __rcu *hlist; struct Qdisc *q; int refcnt; u32 hgenerator; + struct rcu_head rcu; }; static inline unsigned int u32_hash_fold(__be32 key, @@ -96,7 +106,7 @@ static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct unsigned int off; } stack[TC_U32_MAXDEPTH]; - struct tc_u_hnode *ht = tp->root; + struct tc_u_hnode *ht = rcu_dereference_bh(tp->root); unsigned int off = skb_network_offset(skb); struct tc_u_knode *n; int sdepth = 0; @@ -108,23 +118,23 @@ static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct int i, r; next_ht: - n = ht->ht[sel]; + n = rcu_dereference_bh(ht->ht[sel]); next_knode: if (n) { struct tc_u32_key *key = n->sel.keys; #ifdef CONFIG_CLS_U32_PERF - n->pf->rcnt += 1; + __this_cpu_inc(n->pf->rcnt); j = 0; #endif #ifdef CONFIG_CLS_U32_MARK - if ((skb->mark & n->mark.mask) != n->mark.val) { - n = n->next; + if ((skb->mark & n->mask) != n->val) { + n = rcu_dereference_bh(n->next); goto next_knode; } else { - n->mark.success++; + __this_cpu_inc(*n->pcpu_success); } #endif @@ -139,37 +149,39 @@ static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct if (!data) goto out; if ((*data ^ key->val) & key->mask) { - n = n->next; + n = rcu_dereference_bh(n->next); goto next_knode; } #ifdef CONFIG_CLS_U32_PERF - n->pf->kcnts[j] += 1; + __this_cpu_inc(n->pf->kcnts[j]); j++; #endif } - if (n->ht_down == NULL) { + + ht = rcu_dereference_bh(n->ht_down); + if (!ht) { check_terminal: if (n->sel.flags & TC_U32_TERMINAL) { *res = n->res; #ifdef CONFIG_NET_CLS_IND if (!tcf_match_indev(skb, n->ifindex)) { - n = n->next; + n = rcu_dereference_bh(n->next); goto next_knode; } #endif #ifdef CONFIG_CLS_U32_PERF - n->pf->rhit += 1; + __this_cpu_inc(n->pf->rhit); #endif r = tcf_exts_exec(skb, &n->exts, res); if (r < 0) { - n = n->next; + n = rcu_dereference_bh(n->next); goto next_knode; } return r; } - n = n->next; + n = rcu_dereference_bh(n->next); goto next_knode; } @@ -180,7 +192,7 @@ static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct stack[sdepth].off = off; sdepth++; - ht = n->ht_down; + ht = rcu_dereference_bh(n->ht_down); sel = 0; if (ht->divisor) { __be32 *data, hdata; @@ -222,7 +234,7 @@ static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct /* POP */ if (sdepth--) { n = stack[sdepth].knode; - ht = n->ht_up; + ht = rcu_dereference_bh(n->ht_up); off = stack[sdepth].off; goto check_terminal; } @@ -239,7 +251,9 @@ u32_lookup_ht(struct tc_u_common *tp_c, u32 handle) { struct tc_u_hnode *ht; - for (ht = tp_c->hlist; ht; ht = ht->next) + for (ht = rtnl_dereference(tp_c->hlist); + ht; + ht = rtnl_dereference(ht->next)) if (ht->handle == handle) break; @@ -256,7 +270,9 @@ u32_lookup_key(struct tc_u_hnode *ht, u32 handle) if (sel > ht->divisor) goto out; - for (n = ht->ht[sel]; n; n = n->next) + for (n = rtnl_dereference(ht->ht[sel]); + n; + n = rtnl_dereference(n->next)) if (n->handle == handle) break; out: @@ -270,7 +286,7 @@ static unsigned long u32_get(struct tcf_proto *tp, u32 handle) struct tc_u_common *tp_c = tp->data; if (TC_U32_HTID(handle) == TC_U32_ROOT) - ht = tp->root; + ht = rtnl_dereference(tp->root); else ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle)); @@ -291,6 +307,9 @@ static u32 gen_new_htid(struct tc_u_common *tp_c) { int i = 0x800; + /* hgenerator only used inside rtnl lock it is safe to increment + * without read _copy_ update semantics + */ do { if (++tp_c->hgenerator == 0x7FF) tp_c->hgenerator = 1; @@ -326,41 +345,78 @@ static int u32_init(struct tcf_proto *tp) } tp_c->refcnt++; - root_ht->next = tp_c->hlist; - tp_c->hlist = root_ht; + RCU_INIT_POINTER(root_ht->next, tp_c->hlist); + rcu_assign_pointer(tp_c->hlist, root_ht); root_ht->tp_c = tp_c; - tp->root = root_ht; + rcu_assign_pointer(tp->root, root_ht); tp->data = tp_c; return 0; } -static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n) +static int u32_destroy_key(struct tcf_proto *tp, + struct tc_u_knode *n, + bool free_pf) { - tcf_unbind_filter(tp, &n->res); - tcf_exts_destroy(tp, &n->exts); + tcf_exts_destroy(&n->exts); if (n->ht_down) n->ht_down->refcnt--; #ifdef CONFIG_CLS_U32_PERF - kfree(n->pf); + if (free_pf) + free_percpu(n->pf); +#endif +#ifdef CONFIG_CLS_U32_MARK + if (free_pf) + free_percpu(n->pcpu_success); #endif kfree(n); return 0; } +/* u32_delete_key_rcu should be called when free'ing a copied + * version of a tc_u_knode obtained from u32_init_knode(). When + * copies are obtained from u32_init_knode() the statistics are + * shared between the old and new copies to allow readers to + * continue to update the statistics during the copy. To support + * this the u32_delete_key_rcu variant does not free the percpu + * statistics. + */ +static void u32_delete_key_rcu(struct rcu_head *rcu) +{ + struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu); + + u32_destroy_key(key->tp, key, false); +} + +/* u32_delete_key_freepf_rcu is the rcu callback variant + * that free's the entire structure including the statistics + * percpu variables. Only use this if the key is not a copy + * returned by u32_init_knode(). See u32_delete_key_rcu() + * for the variant that should be used with keys return from + * u32_init_knode() + */ +static void u32_delete_key_freepf_rcu(struct rcu_head *rcu) +{ + struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu); + + u32_destroy_key(key->tp, key, true); +} + static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key) { - struct tc_u_knode **kp; - struct tc_u_hnode *ht = key->ht_up; + struct tc_u_knode __rcu **kp; + struct tc_u_knode *pkp; + struct tc_u_hnode *ht = rtnl_dereference(key->ht_up); if (ht) { - for (kp = &ht->ht[TC_U32_HASH(key->handle)]; *kp; kp = &(*kp)->next) { - if (*kp == key) { - tcf_tree_lock(tp); - *kp = key->next; - tcf_tree_unlock(tp); - - u32_destroy_key(tp, key); + kp = &ht->ht[TC_U32_HASH(key->handle)]; + for (pkp = rtnl_dereference(*kp); pkp; + kp = &pkp->next, pkp = rtnl_dereference(*kp)) { + if (pkp == key) { + RCU_INIT_POINTER(*kp, key->next); + + tcf_unbind_filter(tp, &key->res); + call_rcu(&key->rcu, u32_delete_key_freepf_rcu); return 0; } } @@ -375,10 +431,11 @@ static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht) unsigned int h; for (h = 0; h <= ht->divisor; h++) { - while ((n = ht->ht[h]) != NULL) { - ht->ht[h] = n->next; - - u32_destroy_key(tp, n); + while ((n = rtnl_dereference(ht->ht[h])) != NULL) { + RCU_INIT_POINTER(ht->ht[h], + rtnl_dereference(n->next)); + tcf_unbind_filter(tp, &n->res); + call_rcu(&n->rcu, u32_delete_key_freepf_rcu); } } } @@ -386,28 +443,31 @@ static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht) static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht) { struct tc_u_common *tp_c = tp->data; - struct tc_u_hnode **hn; + struct tc_u_hnode __rcu **hn; + struct tc_u_hnode *phn; WARN_ON(ht->refcnt); u32_clear_hnode(tp, ht); - for (hn = &tp_c->hlist; *hn; hn = &(*hn)->next) { - if (*hn == ht) { - *hn = ht->next; - kfree(ht); + hn = &tp_c->hlist; + for (phn = rtnl_dereference(*hn); + phn; + hn = &phn->next, phn = rtnl_dereference(*hn)) { + if (phn == ht) { + RCU_INIT_POINTER(*hn, ht->next); + kfree_rcu(ht, rcu); return 0; } } - WARN_ON(1); return -ENOENT; } static void u32_destroy(struct tcf_proto *tp) { struct tc_u_common *tp_c = tp->data; - struct tc_u_hnode *root_ht = tp->root; + struct tc_u_hnode *root_ht = rtnl_dereference(tp->root); WARN_ON(root_ht == NULL); @@ -419,17 +479,16 @@ static void u32_destroy(struct tcf_proto *tp) tp->q->u32_node = NULL; - for (ht = tp_c->hlist; ht; ht = ht->next) { + for (ht = rtnl_dereference(tp_c->hlist); + ht; + ht = rtnl_dereference(ht->next)) { ht->refcnt--; u32_clear_hnode(tp, ht); } - while ((ht = tp_c->hlist) != NULL) { - tp_c->hlist = ht->next; - - WARN_ON(ht->refcnt != 0); - - kfree(ht); + while ((ht = rtnl_dereference(tp_c->hlist)) != NULL) { + RCU_INIT_POINTER(tp_c->hlist, ht->next); + kfree_rcu(ht, rcu); } kfree(tp_c); @@ -441,6 +500,7 @@ static void u32_destroy(struct tcf_proto *tp) static int u32_delete(struct tcf_proto *tp, unsigned long arg) { struct tc_u_hnode *ht = (struct tc_u_hnode *)arg; + struct tc_u_hnode *root_ht = rtnl_dereference(tp->root); if (ht == NULL) return 0; @@ -448,7 +508,7 @@ static int u32_delete(struct tcf_proto *tp, unsigned long arg) if (TC_U32_KEY(ht->handle)) return u32_delete_key(tp, (struct tc_u_knode *)ht); - if (tp->root == ht) + if (root_ht == ht) return -EINVAL; if (ht->refcnt == 1) { @@ -471,7 +531,9 @@ static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle) if (!bitmap) return handle | 0xFFF; - for (n = ht->ht[TC_U32_HASH(handle)]; n; n = n->next) + for (n = rtnl_dereference(ht->ht[TC_U32_HASH(handle)]); + n; + n = rtnl_dereference(n->next)) set_bit(TC_U32_NODE(n->handle), bitmap); i = find_next_zero_bit(bitmap, NR_U32_NODE, 0x800); @@ -521,10 +583,8 @@ static int u32_set_parms(struct net *net, struct tcf_proto *tp, ht_down->refcnt++; } - tcf_tree_lock(tp); - ht_old = n->ht_down; - n->ht_down = ht_down; - tcf_tree_unlock(tp); + ht_old = rtnl_dereference(n->ht_down); + rcu_assign_pointer(n->ht_down, ht_down); if (ht_old) ht_old->refcnt--; @@ -547,10 +607,86 @@ static int u32_set_parms(struct net *net, struct tcf_proto *tp, return 0; errout: - tcf_exts_destroy(tp, &e); + tcf_exts_destroy(&e); return err; } +static void u32_replace_knode(struct tcf_proto *tp, + struct tc_u_common *tp_c, + struct tc_u_knode *n) +{ + struct tc_u_knode __rcu **ins; + struct tc_u_knode *pins; + struct tc_u_hnode *ht; + + if (TC_U32_HTID(n->handle) == TC_U32_ROOT) + ht = rtnl_dereference(tp->root); + else + ht = u32_lookup_ht(tp_c, TC_U32_HTID(n->handle)); + + ins = &ht->ht[TC_U32_HASH(n->handle)]; + + /* The node must always exist for it to be replaced if this is not the + * case then something went very wrong elsewhere. + */ + for (pins = rtnl_dereference(*ins); ; + ins = &pins->next, pins = rtnl_dereference(*ins)) + if (pins->handle == n->handle) + break; + + RCU_INIT_POINTER(n->next, pins->next); + rcu_assign_pointer(*ins, n); +} + +static struct tc_u_knode *u32_init_knode(struct tcf_proto *tp, + struct tc_u_knode *n) +{ + struct tc_u_knode *new; + struct tc_u32_sel *s = &n->sel; + + new = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), + GFP_KERNEL); + + if (!new) + return NULL; + + RCU_INIT_POINTER(new->next, n->next); + new->handle = n->handle; + RCU_INIT_POINTER(new->ht_up, n->ht_up); + +#ifdef CONFIG_NET_CLS_IND + new->ifindex = n->ifindex; +#endif + new->fshift = n->fshift; + new->res = n->res; + RCU_INIT_POINTER(new->ht_down, n->ht_down); + + /* bump reference count as long as we hold pointer to structure */ + if (new->ht_down) + new->ht_down->refcnt++; + +#ifdef CONFIG_CLS_U32_PERF + /* Statistics may be incremented by readers during update + * so we must keep them in tact. When the node is later destroyed + * a special destroy call must be made to not free the pf memory. + */ + new->pf = n->pf; +#endif + +#ifdef CONFIG_CLS_U32_MARK + new->val = n->val; + new->mask = n->mask; + /* Similarly success statistics must be moved as pointers */ + new->pcpu_success = n->pcpu_success; +#endif + new->tp = tp; + memcpy(&new->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key)); + + tcf_exts_init(&new->exts, TCA_U32_ACT, TCA_U32_POLICE); + + return new; +} + static int u32_change(struct net *net, struct sk_buff *in_skb, struct tcf_proto *tp, unsigned long base, u32 handle, struct nlattr **tca, @@ -564,6 +700,9 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, struct nlattr *tb[TCA_U32_MAX + 1]; u32 htid; int err; +#ifdef CONFIG_CLS_U32_PERF + size_t size; +#endif if (opt == NULL) return handle ? -EINVAL : 0; @@ -574,11 +713,28 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, n = (struct tc_u_knode *)*arg; if (n) { + struct tc_u_knode *new; + if (TC_U32_KEY(n->handle) == 0) return -EINVAL; - return u32_set_parms(net, tp, base, n->ht_up, n, tb, - tca[TCA_RATE], ovr); + new = u32_init_knode(tp, n); + if (!new) + return -ENOMEM; + + err = u32_set_parms(net, tp, base, + rtnl_dereference(n->ht_up), new, tb, + tca[TCA_RATE], ovr); + + if (err) { + u32_destroy_key(tp, new, false); + return err; + } + + u32_replace_knode(tp, tp_c, new); + tcf_unbind_filter(tp, &n->res); + call_rcu(&n->rcu, u32_delete_key_rcu); + return 0; } if (tb[TCA_U32_DIVISOR]) { @@ -601,8 +757,8 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, ht->divisor = divisor; ht->handle = handle; ht->prio = tp->prio; - ht->next = tp_c->hlist; - tp_c->hlist = ht; + RCU_INIT_POINTER(ht->next, tp_c->hlist); + rcu_assign_pointer(tp_c->hlist, ht); *arg = (unsigned long)ht; return 0; } @@ -610,7 +766,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, if (tb[TCA_U32_HASH]) { htid = nla_get_u32(tb[TCA_U32_HASH]); if (TC_U32_HTID(htid) == TC_U32_ROOT) { - ht = tp->root; + ht = rtnl_dereference(tp->root); htid = ht->handle; } else { ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid)); @@ -618,7 +774,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, return -EINVAL; } } else { - ht = tp->root; + ht = rtnl_dereference(tp->root); htid = ht->handle; } @@ -642,46 +798,62 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, return -ENOBUFS; #ifdef CONFIG_CLS_U32_PERF - n->pf = kzalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64), GFP_KERNEL); - if (n->pf == NULL) { + size = sizeof(struct tc_u32_pcnt) + s->nkeys * sizeof(u64); + n->pf = __alloc_percpu(size, __alignof__(struct tc_u32_pcnt)); + if (!n->pf) { kfree(n); return -ENOBUFS; } #endif memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key)); - n->ht_up = ht; + RCU_INIT_POINTER(n->ht_up, ht); n->handle = handle; n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0; tcf_exts_init(&n->exts, TCA_U32_ACT, TCA_U32_POLICE); + n->tp = tp; #ifdef CONFIG_CLS_U32_MARK + n->pcpu_success = alloc_percpu(u32); + if (!n->pcpu_success) { + err = -ENOMEM; + goto errout; + } + if (tb[TCA_U32_MARK]) { struct tc_u32_mark *mark; mark = nla_data(tb[TCA_U32_MARK]); - memcpy(&n->mark, mark, sizeof(struct tc_u32_mark)); - n->mark.success = 0; + n->val = mark->val; + n->mask = mark->mask; } #endif err = u32_set_parms(net, tp, base, ht, n, tb, tca[TCA_RATE], ovr); if (err == 0) { - struct tc_u_knode **ins; - for (ins = &ht->ht[TC_U32_HASH(handle)]; *ins; ins = &(*ins)->next) - if (TC_U32_NODE(handle) < TC_U32_NODE((*ins)->handle)) + struct tc_u_knode __rcu **ins; + struct tc_u_knode *pins; + + ins = &ht->ht[TC_U32_HASH(handle)]; + for (pins = rtnl_dereference(*ins); pins; + ins = &pins->next, pins = rtnl_dereference(*ins)) + if (TC_U32_NODE(handle) < TC_U32_NODE(pins->handle)) break; - n->next = *ins; - tcf_tree_lock(tp); - *ins = n; - tcf_tree_unlock(tp); + RCU_INIT_POINTER(n->next, pins); + rcu_assign_pointer(*ins, n); *arg = (unsigned long)n; return 0; } + +#ifdef CONFIG_CLS_U32_MARK + free_percpu(n->pcpu_success); +errout: +#endif + #ifdef CONFIG_CLS_U32_PERF - kfree(n->pf); + free_percpu(n->pf); #endif kfree(n); return err; @@ -697,7 +869,9 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg) if (arg->stop) return; - for (ht = tp_c->hlist; ht; ht = ht->next) { + for (ht = rtnl_dereference(tp_c->hlist); + ht; + ht = rtnl_dereference(ht->next)) { if (ht->prio != tp->prio) continue; if (arg->count >= arg->skip) { @@ -708,7 +882,9 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg) } arg->count++; for (h = 0; h <= ht->divisor; h++) { - for (n = ht->ht[h]; n; n = n->next) { + for (n = rtnl_dereference(ht->ht[h]); + n; + n = rtnl_dereference(n->next)) { if (arg->count < arg->skip) { arg->count++; continue; @@ -727,6 +903,7 @@ static int u32_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, struct sk_buff *skb, struct tcmsg *t) { struct tc_u_knode *n = (struct tc_u_knode *)fh; + struct tc_u_hnode *ht_up, *ht_down; struct nlattr *nest; if (n == NULL) @@ -745,11 +922,18 @@ static int u32_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor)) goto nla_put_failure; } else { +#ifdef CONFIG_CLS_U32_PERF + struct tc_u32_pcnt *gpf; + int cpu; +#endif + if (nla_put(skb, TCA_U32_SEL, sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key), &n->sel)) goto nla_put_failure; - if (n->ht_up) { + + ht_up = rtnl_dereference(n->ht_up); + if (ht_up) { u32 htid = n->handle & 0xFFFFF000; if (nla_put_u32(skb, TCA_U32_HASH, htid)) goto nla_put_failure; @@ -757,14 +941,28 @@ static int u32_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, if (n->res.classid && nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid)) goto nla_put_failure; - if (n->ht_down && - nla_put_u32(skb, TCA_U32_LINK, n->ht_down->handle)) + + ht_down = rtnl_dereference(n->ht_down); + if (ht_down && + nla_put_u32(skb, TCA_U32_LINK, ht_down->handle)) goto nla_put_failure; #ifdef CONFIG_CLS_U32_MARK - if ((n->mark.val || n->mark.mask) && - nla_put(skb, TCA_U32_MARK, sizeof(n->mark), &n->mark)) - goto nla_put_failure; + if ((n->val || n->mask)) { + struct tc_u32_mark mark = {.val = n->val, + .mask = n->mask, + .success = 0}; + int cpum; + + for_each_possible_cpu(cpum) { + __u32 cnt = *per_cpu_ptr(n->pcpu_success, cpum); + + mark.success += cnt; + } + + if (nla_put(skb, TCA_U32_MARK, sizeof(mark), &mark)) + goto nla_put_failure; + } #endif if (tcf_exts_dump(skb, &n->exts) < 0) @@ -779,10 +977,29 @@ static int u32_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, } #endif #ifdef CONFIG_CLS_U32_PERF + gpf = kzalloc(sizeof(struct tc_u32_pcnt) + + n->sel.nkeys * sizeof(u64), + GFP_KERNEL); + if (!gpf) + goto nla_put_failure; + + for_each_possible_cpu(cpu) { + int i; + struct tc_u32_pcnt *pf = per_cpu_ptr(n->pf, cpu); + + gpf->rcnt += pf->rcnt; + gpf->rhit += pf->rhit; + for (i = 0; i < n->sel.nkeys; i++) + gpf->kcnts[i] += pf->kcnts[i]; + } + if (nla_put(skb, TCA_U32_PCNT, sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64), - n->pf)) + gpf)) { + kfree(gpf); goto nla_put_failure; + } + kfree(gpf); #endif } diff --git a/net/sched/em_canid.c b/net/sched/em_canid.c index 7c292d474f47861f3d9e293a1e7c55da7f38c5ca..ddd883ca55b2c1f18bd24500d6af457db5b0cd4a 100644 --- a/net/sched/em_canid.c +++ b/net/sched/em_canid.c @@ -120,7 +120,7 @@ static int em_canid_match(struct sk_buff *skb, struct tcf_ematch *m, return match; } -static int em_canid_change(struct tcf_proto *tp, void *data, int len, +static int em_canid_change(struct net *net, void *data, int len, struct tcf_ematch *m) { struct can_filter *conf = data; /* Array with rules */ @@ -183,7 +183,7 @@ static int em_canid_change(struct tcf_proto *tp, void *data, int len, return 0; } -static void em_canid_destroy(struct tcf_proto *tp, struct tcf_ematch *m) +static void em_canid_destroy(struct tcf_ematch *m) { struct canid_match *cm = em_canid_priv(m); diff --git a/net/sched/em_ipset.c b/net/sched/em_ipset.c index 527aeb7a3ff0b3b558873d0c7d93fc2c26e100b8..5b4a4efe468c927a33fe24e187f5f42ff09c770f 100644 --- a/net/sched/em_ipset.c +++ b/net/sched/em_ipset.c @@ -19,12 +19,11 @@ #include #include -static int em_ipset_change(struct tcf_proto *tp, void *data, int data_len, +static int em_ipset_change(struct net *net, void *data, int data_len, struct tcf_ematch *em) { struct xt_set_info *set = data; ip_set_id_t index; - struct net *net = dev_net(qdisc_dev(tp->q)); if (data_len != sizeof(*set)) return -EINVAL; @@ -42,11 +41,11 @@ static int em_ipset_change(struct tcf_proto *tp, void *data, int data_len, return -ENOMEM; } -static void em_ipset_destroy(struct tcf_proto *p, struct tcf_ematch *em) +static void em_ipset_destroy(struct tcf_ematch *em) { const struct xt_set_info *set = (const void *) em->data; if (set) { - ip_set_nfnl_put(dev_net(qdisc_dev(p->q)), set->index); + ip_set_nfnl_put(em->net, set->index); kfree((void *) em->data); } } diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c index 9b8c0b0e60d7dbc34260ae8fc010844eeda4e8ad..c8f8c399b99a356835e5713d6b846fe814d0157b 100644 --- a/net/sched/em_meta.c +++ b/net/sched/em_meta.c @@ -856,7 +856,7 @@ static const struct nla_policy meta_policy[TCA_EM_META_MAX + 1] = { [TCA_EM_META_HDR] = { .len = sizeof(struct tcf_meta_hdr) }, }; -static int em_meta_change(struct tcf_proto *tp, void *data, int len, +static int em_meta_change(struct net *net, void *data, int len, struct tcf_ematch *m) { int err; @@ -908,7 +908,7 @@ static int em_meta_change(struct tcf_proto *tp, void *data, int len, return err; } -static void em_meta_destroy(struct tcf_proto *tp, struct tcf_ematch *m) +static void em_meta_destroy(struct tcf_ematch *m) { if (m) meta_delete((struct meta_match *) m->data); diff --git a/net/sched/em_nbyte.c b/net/sched/em_nbyte.c index a3bed07a008b2f746376ce0970cd4f15196adbdb..df3110d695857e672f1a50f83399a7f63959dd00 100644 --- a/net/sched/em_nbyte.c +++ b/net/sched/em_nbyte.c @@ -23,7 +23,7 @@ struct nbyte_data { char pattern[0]; }; -static int em_nbyte_change(struct tcf_proto *tp, void *data, int data_len, +static int em_nbyte_change(struct net *net, void *data, int data_len, struct tcf_ematch *em) { struct tcf_em_nbyte *nbyte = data; diff --git a/net/sched/em_text.c b/net/sched/em_text.c index 15d353d2e4be0791859f533c8b816a584713fbcd..f03c3de16c274f3152f4ef701339d20158609b51 100644 --- a/net/sched/em_text.c +++ b/net/sched/em_text.c @@ -45,7 +45,7 @@ static int em_text_match(struct sk_buff *skb, struct tcf_ematch *m, return skb_find_text(skb, from, to, tm->config, &state) != UINT_MAX; } -static int em_text_change(struct tcf_proto *tp, void *data, int len, +static int em_text_change(struct net *net, void *data, int len, struct tcf_ematch *m) { struct text_match *tm; @@ -100,7 +100,7 @@ static int em_text_change(struct tcf_proto *tp, void *data, int len, return 0; } -static void em_text_destroy(struct tcf_proto *tp, struct tcf_ematch *m) +static void em_text_destroy(struct tcf_ematch *m) { if (EM_TEXT_PRIV(m) && EM_TEXT_PRIV(m)->config) textsearch_destroy(EM_TEXT_PRIV(m)->config); diff --git a/net/sched/ematch.c b/net/sched/ematch.c index ad57f4444b9c65cdc7abe6eb588868c4e2b65cd2..6742200b13071b6e63c200767a77a653d2e10c06 100644 --- a/net/sched/ematch.c +++ b/net/sched/ematch.c @@ -178,6 +178,7 @@ static int tcf_em_validate(struct tcf_proto *tp, struct tcf_ematch_hdr *em_hdr = nla_data(nla); int data_len = nla_len(nla) - sizeof(*em_hdr); void *data = (void *) em_hdr + sizeof(*em_hdr); + struct net *net = dev_net(qdisc_dev(tp->q)); if (!TCF_EM_REL_VALID(em_hdr->flags)) goto errout; @@ -240,7 +241,7 @@ static int tcf_em_validate(struct tcf_proto *tp, goto errout; if (em->ops->change) { - err = em->ops->change(tp, data, data_len, em); + err = em->ops->change(net, data, data_len, em); if (err < 0) goto errout; } else if (data_len > 0) { @@ -271,6 +272,7 @@ static int tcf_em_validate(struct tcf_proto *tp, em->matchid = em_hdr->matchid; em->flags = em_hdr->flags; em->datalen = data_len; + em->net = net; err = 0; errout: @@ -378,7 +380,7 @@ int tcf_em_tree_validate(struct tcf_proto *tp, struct nlattr *nla, return err; errout_abort: - tcf_em_tree_destroy(tp, tree); + tcf_em_tree_destroy(tree); return err; } EXPORT_SYMBOL(tcf_em_tree_validate); @@ -393,7 +395,7 @@ EXPORT_SYMBOL(tcf_em_tree_validate); * tcf_em_tree_validate()/tcf_em_tree_change(). You must ensure that * the ematch tree is not in use before calling this function. */ -void tcf_em_tree_destroy(struct tcf_proto *tp, struct tcf_ematch_tree *tree) +void tcf_em_tree_destroy(struct tcf_ematch_tree *tree) { int i; @@ -405,7 +407,7 @@ void tcf_em_tree_destroy(struct tcf_proto *tp, struct tcf_ematch_tree *tree) if (em->ops) { if (em->ops->destroy) - em->ops->destroy(tp, em); + em->ops->destroy(em); else if (!tcf_em_is_simple(em)) kfree((void *) em->data); module_put(em->ops->owner); @@ -526,9 +528,10 @@ int __tcf_em_tree_match(struct sk_buff *skb, struct tcf_ematch_tree *tree, match_idx = stack[--stackp]; cur_match = tcf_em_get_match(tree, match_idx); + if (tcf_em_is_inverted(cur_match)) + res = !res; + if (tcf_em_early_end(cur_match, res)) { - if (tcf_em_is_inverted(cur_match)) - res = !res; goto pop_stack; } else { match_idx++; diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 58bed7599db7d9f5538f01ff5057e7aa79d0a915..2cf61b3e633c25bb507860e970935998b05bf10d 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -578,31 +578,34 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer) struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog, timer); + rcu_read_lock(); qdisc_unthrottled(wd->qdisc); __netif_schedule(qdisc_root(wd->qdisc)); + rcu_read_unlock(); return HRTIMER_NORESTART; } void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc) { - hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); wd->timer.function = qdisc_watchdog; wd->qdisc = qdisc; } EXPORT_SYMBOL(qdisc_watchdog_init); -void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires) +void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires, bool throttle) { if (test_bit(__QDISC_STATE_DEACTIVATED, &qdisc_root_sleeping(wd->qdisc)->state)) return; - qdisc_throttled(wd->qdisc); + if (throttle) + qdisc_throttled(wd->qdisc); hrtimer_start(&wd->timer, ns_to_ktime(expires), - HRTIMER_MODE_ABS); + HRTIMER_MODE_ABS_PINNED); } EXPORT_SYMBOL(qdisc_watchdog_schedule_ns); @@ -763,7 +766,7 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n) cops->put(sch, cl); } sch->q.qlen -= n; - sch->qstats.drops += drops; + __qdisc_qstats_drop(sch, drops); } } EXPORT_SYMBOL(qdisc_tree_decrease_qlen); @@ -942,6 +945,17 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, sch->handle = handle; if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) { + if (qdisc_is_percpu_stats(sch)) { + sch->cpu_bstats = + alloc_percpu(struct gnet_stats_basic_cpu); + if (!sch->cpu_bstats) + goto err_out4; + + sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue); + if (!sch->cpu_qstats) + goto err_out4; + } + if (tca[TCA_STAB]) { stab = qdisc_get_stab(tca[TCA_STAB]); if (IS_ERR(stab)) { @@ -964,8 +978,11 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, else root_lock = qdisc_lock(sch); - err = gen_new_estimator(&sch->bstats, &sch->rate_est, - root_lock, tca[TCA_RATE]); + err = gen_new_estimator(&sch->bstats, + sch->cpu_bstats, + &sch->rate_est, + root_lock, + tca[TCA_RATE]); if (err) goto err_out4; } @@ -984,6 +1001,8 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, return NULL; err_out4: + free_percpu(sch->cpu_bstats); + free_percpu(sch->cpu_qstats); /* * Any broken qdiscs that would require a ops->reset() here? * The qdisc was never in action so it shouldn't be necessary. @@ -1022,9 +1041,11 @@ static int qdisc_change(struct Qdisc *sch, struct nlattr **tca) because change can't be undone. */ if (sch->flags & TCQ_F_MQROOT) goto out; - gen_replace_estimator(&sch->bstats, &sch->rate_est, - qdisc_root_sleeping_lock(sch), - tca[TCA_RATE]); + gen_replace_estimator(&sch->bstats, + sch->cpu_bstats, + &sch->rate_est, + qdisc_root_sleeping_lock(sch), + tca[TCA_RATE]); } out: return 0; @@ -1299,11 +1320,14 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n) static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, u32 portid, u32 seq, u16 flags, int event) { + struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL; + struct gnet_stats_queue __percpu *cpu_qstats = NULL; struct tcmsg *tcm; struct nlmsghdr *nlh; unsigned char *b = skb_tail_pointer(skb); struct gnet_dump d; struct qdisc_size_table *stab; + __u32 qlen; cond_resched(); nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); @@ -1321,7 +1345,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, goto nla_put_failure; if (q->ops->dump && q->ops->dump(q, skb) < 0) goto nla_put_failure; - q->qstats.qlen = q->q.qlen; + qlen = q->q.qlen; stab = rtnl_dereference(q->stab); if (stab && qdisc_dump_stab(skb, stab) < 0) @@ -1334,9 +1358,14 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0) goto nla_put_failure; - if (gnet_stats_copy_basic(&d, &q->bstats) < 0 || + if (qdisc_is_percpu_stats(q)) { + cpu_bstats = q->cpu_bstats; + cpu_qstats = q->cpu_qstats; + } + + if (gnet_stats_copy_basic(&d, cpu_bstats, &q->bstats) < 0 || gnet_stats_copy_rate_est(&d, &q->bstats, &q->rate_est) < 0 || - gnet_stats_copy_queue(&d, &q->qstats) < 0) + gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0) goto nla_put_failure; if (gnet_stats_finish_copy(&d) < 0) @@ -1781,7 +1810,7 @@ int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp, __be16 protocol = skb->protocol; int err; - for (; tp; tp = tp->next) { + for (; tp; tp = rcu_dereference_bh(tp->next)) { if (tp->protocol != protocol && tp->protocol != htons(ETH_P_ALL)) continue; @@ -1833,15 +1862,15 @@ void tcf_destroy(struct tcf_proto *tp) { tp->ops->destroy(tp); module_put(tp->ops->owner); - kfree(tp); + kfree_rcu(tp, rcu); } -void tcf_destroy_chain(struct tcf_proto **fl) +void tcf_destroy_chain(struct tcf_proto __rcu **fl) { struct tcf_proto *tp; - while ((tp = *fl) != NULL) { - *fl = tp->next; + while ((tp = rtnl_dereference(*fl)) != NULL) { + RCU_INIT_POINTER(*fl, tp->next); tcf_destroy(tp); } } diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index 8449b337f9e3c9991b36144b98965a08d463cdf9..e3e2cc5fd0689e6ac5b6226ab7ca0987601ad733 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c @@ -41,7 +41,7 @@ struct atm_flow_data { struct Qdisc *q; /* FIFO, TBF, etc. */ - struct tcf_proto *filter_list; + struct tcf_proto __rcu *filter_list; struct atm_vcc *vcc; /* VCC; NULL if VCC is closed */ void (*old_pop)(struct atm_vcc *vcc, struct sk_buff *skb); /* chaining */ @@ -273,7 +273,7 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent, error = -ENOBUFS; goto err_out; } - flow->filter_list = NULL; + RCU_INIT_POINTER(flow->filter_list, NULL); flow->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid); if (!flow->q) flow->q = &noop_qdisc; @@ -311,7 +311,7 @@ static int atm_tc_delete(struct Qdisc *sch, unsigned long arg) pr_debug("atm_tc_delete(sch %p,[qdisc %p],flow %p)\n", sch, p, flow); if (list_empty(&flow->list)) return -EINVAL; - if (flow->filter_list || flow == &p->link) + if (rcu_access_pointer(flow->filter_list) || flow == &p->link) return -EBUSY; /* * Reference count must be 2: one for "keepalive" (set at class @@ -345,7 +345,8 @@ static void atm_tc_walk(struct Qdisc *sch, struct qdisc_walker *walker) } } -static struct tcf_proto **atm_tc_find_tcf(struct Qdisc *sch, unsigned long cl) +static struct tcf_proto __rcu **atm_tc_find_tcf(struct Qdisc *sch, + unsigned long cl) { struct atm_qdisc_data *p = qdisc_priv(sch); struct atm_flow_data *flow = (struct atm_flow_data *)cl; @@ -369,11 +370,12 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch) flow = NULL; if (TC_H_MAJ(skb->priority) != sch->handle || !(flow = (struct atm_flow_data *)atm_tc_get(sch, skb->priority))) { + struct tcf_proto *fl; + list_for_each_entry(flow, &p->flows, list) { - if (flow->filter_list) { - result = tc_classify_compat(skb, - flow->filter_list, - &res); + fl = rcu_dereference_bh(flow->filter_list); + if (fl) { + result = tc_classify_compat(skb, fl, &res); if (result < 0) continue; flow = (struct atm_flow_data *)res.class; @@ -415,7 +417,7 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch) if (ret != NET_XMIT_SUCCESS) { drop: __maybe_unused if (net_xmit_drop_count(ret)) { - sch->qstats.drops++; + qdisc_qstats_drop(sch); if (flow) flow->qstats.drops++; } @@ -544,7 +546,7 @@ static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt) if (!p->link.q) p->link.q = &noop_qdisc; pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q); - p->link.filter_list = NULL; + RCU_INIT_POINTER(p->link.filter_list, NULL); p->link.vcc = NULL; p->link.sock = NULL; p->link.classid = sch->handle; @@ -635,10 +637,8 @@ atm_tc_dump_class_stats(struct Qdisc *sch, unsigned long arg, { struct atm_flow_data *flow = (struct atm_flow_data *)arg; - flow->qstats.qlen = flow->q->q.qlen; - - if (gnet_stats_copy_basic(d, &flow->bstats) < 0 || - gnet_stats_copy_queue(d, &flow->qstats) < 0) + if (gnet_stats_copy_basic(d, NULL, &flow->bstats) < 0 || + gnet_stats_copy_queue(d, NULL, &flow->qstats, flow->q->q.qlen) < 0) return -1; return 0; diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 762a04bb8f6dcbe4627b579ff4081a4cf92adb11..beeb75f80fdb970139b030fbb9ac268983b617e9 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -133,7 +133,7 @@ struct cbq_class { struct gnet_stats_rate_est64 rate_est; struct tc_cbq_xstats xstats; - struct tcf_proto *filter_list; + struct tcf_proto __rcu *filter_list; int refcnt; int filters; @@ -221,6 +221,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) struct cbq_class **defmap; struct cbq_class *cl = NULL; u32 prio = skb->priority; + struct tcf_proto *fl; struct tcf_result res; /* @@ -235,11 +236,12 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) int result = 0; defmap = head->defaults; + fl = rcu_dereference_bh(head->filter_list); /* * Step 2+n. Apply classifier. */ - if (!head->filter_list || - (result = tc_classify_compat(skb, head->filter_list, &res)) < 0) + result = tc_classify_compat(skb, fl, &res); + if (!fl || result < 0) goto fallback; cl = (void *)res.class; @@ -375,7 +377,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) #endif if (cl == NULL) { if (ret & __NET_XMIT_BYPASS) - sch->qstats.drops++; + qdisc_qstats_drop(sch); kfree_skb(skb); return ret; } @@ -393,7 +395,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) } if (net_xmit_drop_count(ret)) { - sch->qstats.drops++; + qdisc_qstats_drop(sch); cbq_mark_toplevel(q, cl); cl->qstats.drops++; } @@ -615,7 +617,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer) time = ktime_set(0, 0); time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay)); - hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS); + hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS_PINNED); } qdisc_unthrottled(sch); @@ -648,11 +650,11 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child) return 0; } if (net_xmit_drop_count(ret)) - sch->qstats.drops++; + qdisc_qstats_drop(sch); return 0; } - sch->qstats.drops++; + qdisc_qstats_drop(sch); return -1; } #endif @@ -993,7 +995,7 @@ cbq_dequeue(struct Qdisc *sch) */ if (sch->q.qlen) { - sch->qstats.overlimits++; + qdisc_qstats_overlimit(sch); if (q->wd_expires) qdisc_watchdog_schedule(&q->watchdog, now + q->wd_expires); @@ -1384,7 +1386,7 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt) q->link.minidle = -0x7FFFFFFF; qdisc_watchdog_init(&q->watchdog, sch); - hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); q->delay_timer.function = cbq_undelay; q->toplevel = TC_CBQ_MAXLEVEL; q->now = psched_get_time(); @@ -1592,16 +1594,15 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct cbq_sched_data *q = qdisc_priv(sch); struct cbq_class *cl = (struct cbq_class *)arg; - cl->qstats.qlen = cl->q->q.qlen; cl->xstats.avgidle = cl->avgidle; cl->xstats.undertime = 0; if (cl->undertime != PSCHED_PASTPERFECT) cl->xstats.undertime = cl->undertime - q->now; - if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || + if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 || gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || - gnet_stats_copy_queue(d, &cl->qstats) < 0) + gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->q->q.qlen) < 0) return -1; return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats)); @@ -1757,7 +1758,8 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t } if (tca[TCA_RATE]) { - err = gen_replace_estimator(&cl->bstats, &cl->rate_est, + err = gen_replace_estimator(&cl->bstats, NULL, + &cl->rate_est, qdisc_root_sleeping_lock(sch), tca[TCA_RATE]); if (err) { @@ -1850,7 +1852,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t goto failure; if (tca[TCA_RATE]) { - err = gen_new_estimator(&cl->bstats, &cl->rate_est, + err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est, qdisc_root_sleeping_lock(sch), tca[TCA_RATE]); if (err) { @@ -1954,7 +1956,8 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg) return 0; } -static struct tcf_proto **cbq_find_tcf(struct Qdisc *sch, unsigned long arg) +static struct tcf_proto __rcu **cbq_find_tcf(struct Qdisc *sch, + unsigned long arg) { struct cbq_sched_data *q = qdisc_priv(sch); struct cbq_class *cl = (struct cbq_class *)arg; diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c index fb666d1e4de3197cb81b0b2287b2dcef2df294a9..c009eb9045cef48adbe974e9d7d505e242213974 100644 --- a/net/sched/sch_choke.c +++ b/net/sched/sch_choke.c @@ -57,7 +57,7 @@ struct choke_sched_data { /* Variables */ struct red_vars vars; - struct tcf_proto *filter_list; + struct tcf_proto __rcu *filter_list; struct { u32 prob_drop; /* Early probability drops */ u32 prob_mark; /* Early probability marks */ @@ -127,7 +127,7 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx) if (idx == q->tail) choke_zap_tail_holes(q); - sch->qstats.backlog -= qdisc_pkt_len(skb); + qdisc_qstats_backlog_dec(sch, skb); qdisc_drop(skb, sch); qdisc_tree_decrease_qlen(sch, 1); --sch->q.qlen; @@ -203,9 +203,11 @@ static bool choke_classify(struct sk_buff *skb, { struct choke_sched_data *q = qdisc_priv(sch); struct tcf_result res; + struct tcf_proto *fl; int result; - result = tc_classify(skb, q->filter_list, &res); + fl = rcu_dereference_bh(q->filter_list); + result = tc_classify(skb, fl, &res); if (result >= 0) { #ifdef CONFIG_NET_CLS_ACT switch (result) { @@ -259,7 +261,7 @@ static bool choke_match_random(const struct choke_sched_data *q, return false; oskb = choke_peek_random(q, pidx); - if (q->filter_list) + if (rcu_access_pointer(q->filter_list)) return choke_get_classid(nskb) == choke_get_classid(oskb); return choke_match_flow(oskb, nskb); @@ -267,11 +269,11 @@ static bool choke_match_random(const struct choke_sched_data *q, static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch) { + int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; struct choke_sched_data *q = qdisc_priv(sch); const struct red_parms *p = &q->parms; - int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; - if (q->filter_list) { + if (rcu_access_pointer(q->filter_list)) { /* If using external classifiers, get result and record it. */ if (!choke_classify(skb, sch, &ret)) goto other_drop; /* Packet was eaten by filter */ @@ -300,7 +302,7 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch) if (q->vars.qavg > p->qth_max) { q->vars.qcount = -1; - sch->qstats.overlimits++; + qdisc_qstats_overlimit(sch); if (use_harddrop(q) || !use_ecn(q) || !INET_ECN_set_ce(skb)) { q->stats.forced_drop++; @@ -313,7 +315,7 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch) q->vars.qcount = 0; q->vars.qR = red_random(p); - sch->qstats.overlimits++; + qdisc_qstats_overlimit(sch); if (!use_ecn(q) || !INET_ECN_set_ce(skb)) { q->stats.prob_drop++; goto congestion_drop; @@ -330,7 +332,7 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch) q->tab[q->tail] = skb; q->tail = (q->tail + 1) & q->tab_mask; ++sch->q.qlen; - sch->qstats.backlog += qdisc_pkt_len(skb); + qdisc_qstats_backlog_inc(sch, skb); return NET_XMIT_SUCCESS; } @@ -343,7 +345,7 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch) other_drop: if (ret & __NET_XMIT_BYPASS) - sch->qstats.drops++; + qdisc_qstats_drop(sch); kfree_skb(skb); return ret; } @@ -363,7 +365,7 @@ static struct sk_buff *choke_dequeue(struct Qdisc *sch) q->tab[q->head] = NULL; choke_zap_head_holes(q); --sch->q.qlen; - sch->qstats.backlog -= qdisc_pkt_len(skb); + qdisc_qstats_backlog_dec(sch, skb); qdisc_bstats_update(sch, skb); return skb; @@ -458,7 +460,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt) ntab[tail++] = skb; continue; } - sch->qstats.backlog -= qdisc_pkt_len(skb); + qdisc_qstats_backlog_dec(sch, skb); --sch->q.qlen; qdisc_drop(skb, sch); } @@ -564,7 +566,8 @@ static unsigned long choke_bind(struct Qdisc *sch, unsigned long parent, return 0; } -static struct tcf_proto **choke_find_tcf(struct Qdisc *sch, unsigned long cl) +static struct tcf_proto __rcu **choke_find_tcf(struct Qdisc *sch, + unsigned long cl) { struct choke_sched_data *q = qdisc_priv(sch); diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c index 2f9ab17db85a59524556055bc5e53c5f2cab1a67..de28f8e968e8176ac7630a1e6fcccb45ad295f5d 100644 --- a/net/sched/sch_codel.c +++ b/net/sched/sch_codel.c @@ -149,7 +149,7 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt) while (sch->q.qlen > sch->limit) { struct sk_buff *skb = __skb_dequeue(&sch->q); - sch->qstats.backlog -= qdisc_pkt_len(skb); + qdisc_qstats_backlog_dec(sch, skb); qdisc_drop(skb, sch); } qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen); diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index 7bbbfe1121920a126537d43a0443ab4b98e340fa..338706092c27d1255a5c16bb2cc4c5564f79e558 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c @@ -35,7 +35,7 @@ struct drr_class { struct drr_sched { struct list_head active; - struct tcf_proto *filter_list; + struct tcf_proto __rcu *filter_list; struct Qdisc_class_hash clhash; }; @@ -88,7 +88,8 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid, if (cl != NULL) { if (tca[TCA_RATE]) { - err = gen_replace_estimator(&cl->bstats, &cl->rate_est, + err = gen_replace_estimator(&cl->bstats, NULL, + &cl->rate_est, qdisc_root_sleeping_lock(sch), tca[TCA_RATE]); if (err) @@ -116,7 +117,7 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid, cl->qdisc = &noop_qdisc; if (tca[TCA_RATE]) { - err = gen_replace_estimator(&cl->bstats, &cl->rate_est, + err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est, qdisc_root_sleeping_lock(sch), tca[TCA_RATE]); if (err) { @@ -184,7 +185,8 @@ static void drr_put_class(struct Qdisc *sch, unsigned long arg) drr_destroy_class(sch, cl); } -static struct tcf_proto **drr_tcf_chain(struct Qdisc *sch, unsigned long cl) +static struct tcf_proto __rcu **drr_tcf_chain(struct Qdisc *sch, + unsigned long cl) { struct drr_sched *q = qdisc_priv(sch); @@ -273,17 +275,16 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d) { struct drr_class *cl = (struct drr_class *)arg; + __u32 qlen = cl->qdisc->q.qlen; struct tc_drr_stats xstats; memset(&xstats, 0, sizeof(xstats)); - if (cl->qdisc->q.qlen) { + if (qlen) xstats.deficit = cl->deficit; - cl->qdisc->qstats.qlen = cl->qdisc->q.qlen; - } - if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || + if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 || gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || - gnet_stats_copy_queue(d, &cl->qdisc->qstats) < 0) + gnet_stats_copy_queue(d, NULL, &cl->qdisc->qstats, qlen) < 0) return -1; return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); @@ -319,6 +320,7 @@ static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch, struct drr_sched *q = qdisc_priv(sch); struct drr_class *cl; struct tcf_result res; + struct tcf_proto *fl; int result; if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) { @@ -328,7 +330,8 @@ static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch, } *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; - result = tc_classify(skb, q->filter_list, &res); + fl = rcu_dereference_bh(q->filter_list); + result = tc_classify(skb, fl, &res); if (result >= 0) { #ifdef CONFIG_NET_CLS_ACT switch (result) { @@ -356,7 +359,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch) cl = drr_classify(skb, sch, &err); if (cl == NULL) { if (err & __NET_XMIT_BYPASS) - sch->qstats.drops++; + qdisc_qstats_drop(sch); kfree_skb(skb); return err; } @@ -365,7 +368,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch) if (unlikely(err != NET_XMIT_SUCCESS)) { if (net_xmit_drop_count(err)) { cl->qstats.drops++; - sch->qstats.drops++; + qdisc_qstats_drop(sch); } return err; } diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index 49d6ef338b558ee338163826b1a8a8966093637b..227114f27f9408b6010c9c537236ce9f7cb484d9 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c @@ -37,7 +37,7 @@ struct dsmark_qdisc_data { struct Qdisc *q; - struct tcf_proto *filter_list; + struct tcf_proto __rcu *filter_list; u8 *mask; /* "owns" the array */ u8 *value; u16 indices; @@ -186,8 +186,8 @@ static void dsmark_walk(struct Qdisc *sch, struct qdisc_walker *walker) } } -static inline struct tcf_proto **dsmark_find_tcf(struct Qdisc *sch, - unsigned long cl) +static inline struct tcf_proto __rcu **dsmark_find_tcf(struct Qdisc *sch, + unsigned long cl) { struct dsmark_qdisc_data *p = qdisc_priv(sch); return &p->filter_list; @@ -229,7 +229,8 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) skb->tc_index = TC_H_MIN(skb->priority); else { struct tcf_result res; - int result = tc_classify(skb, p->filter_list, &res); + struct tcf_proto *fl = rcu_dereference_bh(p->filter_list); + int result = tc_classify(skb, fl, &res); pr_debug("result %d class 0x%04x\n", result, res.classid); @@ -257,7 +258,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) err = qdisc_enqueue(skb, p->q); if (err != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(err)) - sch->qstats.drops++; + qdisc_qstats_drop(sch); return err; } diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c index e15a9eb29087794ccc71408f467e9ae37f9c086b..2e2398cfc694aaf7ed12c2afda377e8ded340cb2 100644 --- a/net/sched/sch_fifo.c +++ b/net/sched/sch_fifo.c @@ -42,7 +42,7 @@ static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch) /* queue full, remove one skb to fulfill the limit */ __qdisc_queue_drop_head(sch, &sch->q); - sch->qstats.drops++; + qdisc_qstats_drop(sch); qdisc_enqueue_tail(skb, sch); return NET_XMIT_CN; diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index ba32c2b005d0821788f656e8ae05d4d47a862351..cbd7e1fd23b41bb7ba0bb348c3a1a287652cca93 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c @@ -290,7 +290,7 @@ static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow) flow->head = skb->next; skb->next = NULL; flow->qlen--; - sch->qstats.backlog -= qdisc_pkt_len(skb); + qdisc_qstats_backlog_dec(sch, skb); sch->q.qlen--; } return skb; @@ -371,13 +371,12 @@ static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch) f->qlen++; if (skb_is_retransmit(skb)) q->stat_tcp_retrans++; - sch->qstats.backlog += qdisc_pkt_len(skb); + qdisc_qstats_backlog_inc(sch, skb); if (fq_flow_is_detached(f)) { fq_flow_add_tail(&q->new_flows, f); if (time_after(jiffies, f->age + q->flow_refill_delay)) f->credit = max_t(u32, f->credit, q->quantum); q->inactive_flows--; - qdisc_unthrottled(sch); } /* Note: this overwrites f->age */ @@ -385,7 +384,6 @@ static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch) if (unlikely(f == &q->internal)) { q->stat_internal_packets++; - qdisc_unthrottled(sch); } sch->q.qlen++; @@ -416,7 +414,7 @@ static void fq_check_throttled(struct fq_sched_data *q, u64 now) static struct sk_buff *fq_dequeue(struct Qdisc *sch) { struct fq_sched_data *q = qdisc_priv(sch); - u64 now = ktime_to_ns(ktime_get()); + u64 now = ktime_get_ns(); struct fq_flow_head *head; struct sk_buff *skb; struct fq_flow *f; @@ -433,7 +431,8 @@ static struct sk_buff *fq_dequeue(struct Qdisc *sch) if (!head->first) { if (q->time_next_delayed_flow != ~0ULL) qdisc_watchdog_schedule_ns(&q->watchdog, - q->time_next_delayed_flow); + q->time_next_delayed_flow, + false); return NULL; } } @@ -495,7 +494,6 @@ static struct sk_buff *fq_dequeue(struct Qdisc *sch) } out: qdisc_bstats_update(sch, skb); - qdisc_unthrottled(sch); return skb; } @@ -787,7 +785,7 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb) static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d) { struct fq_sched_data *q = qdisc_priv(sch); - u64 now = ktime_to_ns(ktime_get()); + u64 now = ktime_get_ns(); struct tc_fq_qd_stats st = { .gc_flows = q->stat_gc_flows, .highprio_packets = q->stat_internal_packets, diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 063b726bf1f8636e3529a2e61d1b43fced918d9c..b9ca32ebc1de0922499379dda1735d75abdd26bc 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -52,7 +52,7 @@ struct fq_codel_flow { }; /* please try to keep this structure <= 64 bytes */ struct fq_codel_sched_data { - struct tcf_proto *filter_list; /* optional external classifier */ + struct tcf_proto __rcu *filter_list; /* optional external classifier */ struct fq_codel_flow *flows; /* Flows table [flows_cnt] */ u32 *backlogs; /* backlog table [flows_cnt] */ u32 flows_cnt; /* number of flows */ @@ -77,13 +77,15 @@ static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q, hash = jhash_3words((__force u32)keys.dst, (__force u32)keys.src ^ keys.ip_proto, (__force u32)keys.ports, q->perturbation); - return ((u64)hash * q->flows_cnt) >> 32; + + return reciprocal_scale(hash, q->flows_cnt); } static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) { struct fq_codel_sched_data *q = qdisc_priv(sch); + struct tcf_proto *filter; struct tcf_result res; int result; @@ -92,11 +94,12 @@ static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch, TC_H_MIN(skb->priority) <= q->flows_cnt) return TC_H_MIN(skb->priority); - if (!q->filter_list) + filter = rcu_dereference(q->filter_list); + if (!filter) return fq_codel_hash(q, skb) + 1; *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; - result = tc_classify(skb, q->filter_list, &res); + result = tc_classify(skb, filter, &res); if (result >= 0) { #ifdef CONFIG_NET_CLS_ACT switch (result) { @@ -161,8 +164,8 @@ static unsigned int fq_codel_drop(struct Qdisc *sch) q->backlogs[idx] -= len; kfree_skb(skb); sch->q.qlen--; - sch->qstats.drops++; - sch->qstats.backlog -= len; + qdisc_qstats_drop(sch); + qdisc_qstats_backlog_dec(sch, skb); flow->dropped++; return idx; } @@ -177,7 +180,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) idx = fq_codel_classify(skb, sch, &ret); if (idx == 0) { if (ret & __NET_XMIT_BYPASS) - sch->qstats.drops++; + qdisc_qstats_drop(sch); kfree_skb(skb); return ret; } @@ -187,7 +190,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) flow = &q->flows[idx]; flow_queue_add(flow, skb); q->backlogs[idx] += qdisc_pkt_len(skb); - sch->qstats.backlog += qdisc_pkt_len(skb); + qdisc_qstats_backlog_inc(sch, skb); if (list_empty(&flow->flowchain)) { list_add_tail(&flow->flowchain, &q->new_flows); @@ -495,7 +498,8 @@ static void fq_codel_put(struct Qdisc *q, unsigned long cl) { } -static struct tcf_proto **fq_codel_find_tcf(struct Qdisc *sch, unsigned long cl) +static struct tcf_proto __rcu **fq_codel_find_tcf(struct Qdisc *sch, + unsigned long cl) { struct fq_codel_sched_data *q = qdisc_priv(sch); @@ -546,7 +550,7 @@ static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl, qs.backlog = q->backlogs[idx]; qs.drops = flow->dropped; } - if (gnet_stats_copy_queue(d, &qs) < 0) + if (gnet_stats_copy_queue(d, NULL, &qs, 0) < 0) return -1; if (idx < q->flows_cnt) return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index fc04fe93c2da2fa8b43b4be97ccd28dba8ca9e3c..38d58e6cef07a06663312965059b08c31e2189fd 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -47,7 +47,6 @@ EXPORT_SYMBOL(default_qdisc_ops); static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) { - skb_dst_force(skb); q->gso_skb = skb; q->qstats.requeues++; q->q.qlen++; /* it's still part of the queue */ @@ -56,24 +55,52 @@ static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) return 0; } -static inline struct sk_buff *dequeue_skb(struct Qdisc *q) +static void try_bulk_dequeue_skb(struct Qdisc *q, + struct sk_buff *skb, + const struct netdev_queue *txq) +{ + int bytelimit = qdisc_avail_bulklimit(txq) - skb->len; + + while (bytelimit > 0) { + struct sk_buff *nskb = q->dequeue(q); + + if (!nskb) + break; + + bytelimit -= nskb->len; /* covers GSO len */ + skb->next = nskb; + skb = nskb; + } + skb->next = NULL; +} + +/* Note that dequeue_skb can possibly return a SKB list (via skb->next). + * A requeued skb (via q->gso_skb) can also be a SKB list. + */ +static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate) { struct sk_buff *skb = q->gso_skb; const struct netdev_queue *txq = q->dev_queue; + *validate = true; if (unlikely(skb)) { /* check the reason of requeuing without tx lock first */ - txq = netdev_get_tx_queue(txq->dev, skb_get_queue_mapping(skb)); + txq = skb_get_tx_queue(txq->dev, skb); if (!netif_xmit_frozen_or_stopped(txq)) { q->gso_skb = NULL; q->q.qlen--; } else skb = NULL; + /* skb in gso_skb were already validated */ + *validate = false; } else { - if (!(q->flags & TCQ_F_ONETXQUEUE) || !netif_xmit_frozen_or_stopped(txq)) + if (!(q->flags & TCQ_F_ONETXQUEUE) || + !netif_xmit_frozen_or_stopped(txq)) { skb = q->dequeue(q); + if (skb && qdisc_may_bulk(q)) + try_bulk_dequeue_skb(q, skb, txq); + } } - return skb; } @@ -90,7 +117,7 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb, * detect it by checking xmit owner and drop the packet when * deadloop is detected. Return OK to try the next skb. */ - kfree_skb(skb); + kfree_skb_list(skb); net_warn_ratelimited("Dead loop on netdevice %s, fix it urgently!\n", dev_queue->dev->name); ret = qdisc_qlen(q); @@ -107,9 +134,9 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb, } /* - * Transmit one skb, and handle the return status as required. Holding the - * __QDISC___STATE_RUNNING bit guarantees that only one CPU can execute this - * function. + * Transmit possibly several skbs, and handle the return status as + * required. Holding the __QDISC___STATE_RUNNING bit guarantees that + * only one CPU can execute this function. * * Returns to the caller: * 0 - queue is empty or throttled. @@ -117,19 +144,24 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb, */ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, struct net_device *dev, struct netdev_queue *txq, - spinlock_t *root_lock) + spinlock_t *root_lock, bool validate) { int ret = NETDEV_TX_BUSY; /* And release qdisc */ spin_unlock(root_lock); - HARD_TX_LOCK(dev, txq, smp_processor_id()); - if (!netif_xmit_frozen_or_stopped(txq)) - ret = dev_hard_start_xmit(skb, dev, txq); + /* Note that we validate skb (GSO, checksum, ...) outside of locks */ + if (validate) + skb = validate_xmit_skb_list(skb, dev); - HARD_TX_UNLOCK(dev, txq); + if (skb) { + HARD_TX_LOCK(dev, txq, smp_processor_id()); + if (!netif_xmit_frozen_or_stopped(txq)) + skb = dev_hard_start_xmit(skb, dev, txq, &ret); + HARD_TX_UNLOCK(dev, txq); + } spin_lock(root_lock); if (dev_xmit_complete(ret)) { @@ -178,17 +210,18 @@ static inline int qdisc_restart(struct Qdisc *q) struct net_device *dev; spinlock_t *root_lock; struct sk_buff *skb; + bool validate; /* Dequeue packet */ - skb = dequeue_skb(q); + skb = dequeue_skb(q, &validate); if (unlikely(!skb)) return 0; - WARN_ON_ONCE(skb_dst_is_noref(skb)); + root_lock = qdisc_lock(q); dev = qdisc_dev(q); - txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); + txq = skb_get_tx_queue(dev, skb); - return sch_direct_xmit(skb, q, dev, txq, root_lock); + return sch_direct_xmit(skb, q, dev, txq, root_lock, validate); } void __qdisc_run(struct Qdisc *q) @@ -518,7 +551,7 @@ static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt) struct pfifo_fast_priv *priv = qdisc_priv(qdisc); for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) - skb_queue_head_init(band2list(priv, prio)); + __skb_queue_head_init(band2list(priv, prio)); /* Can by-pass the queue discipline */ qdisc->flags |= TCQ_F_CAN_BYPASS; @@ -616,7 +649,7 @@ void qdisc_reset(struct Qdisc *qdisc) ops->reset(qdisc); if (qdisc->gso_skb) { - kfree_skb(qdisc->gso_skb); + kfree_skb_list(qdisc->gso_skb); qdisc->gso_skb = NULL; qdisc->q.qlen = 0; } @@ -627,6 +660,9 @@ static void qdisc_rcu_free(struct rcu_head *head) { struct Qdisc *qdisc = container_of(head, struct Qdisc, rcu_head); + if (qdisc_is_percpu_stats(qdisc)) + free_percpu(qdisc->cpu_bstats); + kfree((char *) qdisc - qdisc->padded); } @@ -652,7 +688,7 @@ void qdisc_destroy(struct Qdisc *qdisc) module_put(ops->owner); dev_put(qdisc_dev(qdisc)); - kfree_skb(qdisc->gso_skb); + kfree_skb_list(qdisc->gso_skb); /* * gen_estimator est_timer() might access qdisc->q.lock, * wait a RCU grace period before freeing qdisc. @@ -778,7 +814,7 @@ static void dev_deactivate_queue(struct net_device *dev, struct Qdisc *qdisc_default = _qdisc_default; struct Qdisc *qdisc; - qdisc = dev_queue->qdisc; + qdisc = rtnl_dereference(dev_queue->qdisc); if (qdisc) { spin_lock_bh(qdisc_lock(qdisc)); @@ -871,7 +907,7 @@ static void dev_init_scheduler_queue(struct net_device *dev, { struct Qdisc *qdisc = _qdisc; - dev_queue->qdisc = qdisc; + rcu_assign_pointer(dev_queue->qdisc, qdisc); dev_queue->qdisc_sleeping = qdisc; } diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index 12cbc09157fcce67c314076e3cf0ed690f6f8818..a4ca4517cdc82843e21e5245989a59e89aa53702 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c @@ -209,7 +209,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch) break; case RED_PROB_MARK: - sch->qstats.overlimits++; + qdisc_qstats_overlimit(sch); if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) { q->stats.prob_drop++; goto congestion_drop; @@ -219,7 +219,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch) break; case RED_HARD_MARK: - sch->qstats.overlimits++; + qdisc_qstats_overlimit(sch); if (gred_use_harddrop(t) || !gred_use_ecn(t) || !INET_ECN_set_ce(skb)) { q->stats.forced_drop++; diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index ec8aeaac1dd7ad7a077fddac3535c18064351b77..e6c7416d033219a66932bc8b2208a2046ffdd8eb 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -116,7 +116,7 @@ struct hfsc_class { struct gnet_stats_queue qstats; struct gnet_stats_rate_est64 rate_est; unsigned int level; /* class level in hierarchy */ - struct tcf_proto *filter_list; /* filter list */ + struct tcf_proto __rcu *filter_list; /* filter list */ unsigned int filter_cnt; /* filter count */ struct hfsc_sched *sched; /* scheduler data */ @@ -1014,9 +1014,12 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, cur_time = psched_get_time(); if (tca[TCA_RATE]) { - err = gen_replace_estimator(&cl->bstats, &cl->rate_est, - qdisc_root_sleeping_lock(sch), - tca[TCA_RATE]); + spinlock_t *lock = qdisc_root_sleeping_lock(sch); + + err = gen_replace_estimator(&cl->bstats, NULL, + &cl->rate_est, + lock, + tca[TCA_RATE]); if (err) return err; } @@ -1063,7 +1066,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, return -ENOBUFS; if (tca[TCA_RATE]) { - err = gen_new_estimator(&cl->bstats, &cl->rate_est, + err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est, qdisc_root_sleeping_lock(sch), tca[TCA_RATE]); if (err) { @@ -1161,7 +1164,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; head = &q->root; - tcf = q->root.filter_list; + tcf = rcu_dereference_bh(q->root.filter_list); while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { #ifdef CONFIG_NET_CLS_ACT switch (result) { @@ -1185,7 +1188,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) return cl; /* hit leaf class */ /* apply inner filter chain */ - tcf = cl->filter_list; + tcf = rcu_dereference_bh(cl->filter_list); head = cl; } @@ -1285,7 +1288,7 @@ hfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg) cl->filter_cnt--; } -static struct tcf_proto ** +static struct tcf_proto __rcu ** hfsc_tcf_chain(struct Qdisc *sch, unsigned long arg) { struct hfsc_sched *q = qdisc_priv(sch); @@ -1367,16 +1370,15 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct hfsc_class *cl = (struct hfsc_class *)arg; struct tc_hfsc_stats xstats; - cl->qstats.qlen = cl->qdisc->q.qlen; cl->qstats.backlog = cl->qdisc->qstats.backlog; xstats.level = cl->level; xstats.period = cl->cl_vtperiod; xstats.work = cl->cl_total; xstats.rtwork = cl->cl_cumul; - if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || + if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 || gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || - gnet_stats_copy_queue(d, &cl->qstats) < 0) + gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->qdisc->q.qlen) < 0) return -1; return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); @@ -1588,7 +1590,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) cl = hfsc_classify(skb, sch, &err); if (cl == NULL) { if (err & __NET_XMIT_BYPASS) - sch->qstats.drops++; + qdisc_qstats_drop(sch); kfree_skb(skb); return err; } @@ -1597,7 +1599,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) if (unlikely(err != NET_XMIT_SUCCESS)) { if (net_xmit_drop_count(err)) { cl->qstats.drops++; - sch->qstats.drops++; + qdisc_qstats_drop(sch); } return err; } @@ -1640,7 +1642,7 @@ hfsc_dequeue(struct Qdisc *sch) */ cl = vttree_get_minvt(&q->root, cur_time); if (cl == NULL) { - sch->qstats.overlimits++; + qdisc_qstats_overlimit(sch); hfsc_schedule_watchdog(sch); return NULL; } @@ -1695,7 +1697,7 @@ hfsc_drop(struct Qdisc *sch) list_move_tail(&cl->dlist, &q->droplist); } cl->qstats.drops++; - sch->qstats.drops++; + qdisc_qstats_drop(sch); sch->q.qlen--; return len; } diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c index d85b6812a7d4c6bd0794decd872406e6b810e779..15d3aabfe2506c1483a2cdba0fe8425d152d2736 100644 --- a/net/sched/sch_hhf.c +++ b/net/sched/sch_hhf.c @@ -376,8 +376,8 @@ static unsigned int hhf_drop(struct Qdisc *sch) struct sk_buff *skb = dequeue_head(bucket); sch->q.qlen--; - sch->qstats.drops++; - sch->qstats.backlog -= qdisc_pkt_len(skb); + qdisc_qstats_drop(sch); + qdisc_qstats_backlog_dec(sch, skb); kfree_skb(skb); } @@ -395,7 +395,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch) bucket = &q->buckets[idx]; bucket_add(bucket, skb); - sch->qstats.backlog += qdisc_pkt_len(skb); + qdisc_qstats_backlog_inc(sch, skb); if (list_empty(&bucket->bucketchain)) { unsigned int weight; @@ -457,7 +457,7 @@ static struct sk_buff *hhf_dequeue(struct Qdisc *sch) if (bucket->head) { skb = dequeue_head(bucket); sch->q.qlen--; - sch->qstats.backlog -= qdisc_pkt_len(skb); + qdisc_qstats_backlog_dec(sch, skb); } if (!skb) { diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 9f949abcacef1680dcdc15579c7fe56611f21852..f1acb0f60dc35b724289f98498b5cf2162583ce9 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -103,7 +103,7 @@ struct htb_class { u32 prio; /* these two are used only by leaves... */ int quantum; /* but stored for parent-to-leaf return */ - struct tcf_proto *filter_list; /* class attached filters */ + struct tcf_proto __rcu *filter_list; /* class attached filters */ int filter_cnt; int refcnt; /* usage count of this class */ @@ -153,7 +153,7 @@ struct htb_sched { int rate2quantum; /* quant = rate / rate2quantum */ /* filters for qdisc itself */ - struct tcf_proto *filter_list; + struct tcf_proto __rcu *filter_list; #define HTB_WARN_TOOMANYEVENTS 0x1 unsigned int warned; /* only one warning */ @@ -223,9 +223,9 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, if (cl->level == 0) return cl; /* Start with inner filter chain if a non-leaf class is selected */ - tcf = cl->filter_list; + tcf = rcu_dereference_bh(cl->filter_list); } else { - tcf = q->filter_list; + tcf = rcu_dereference_bh(q->filter_list); } *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; @@ -251,7 +251,7 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, return cl; /* we hit leaf; return it */ /* we have got inner class; apply inner filter chain */ - tcf = cl->filter_list; + tcf = rcu_dereference_bh(cl->filter_list); } /* classification failed; try to use default class */ cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch); @@ -586,13 +586,13 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) #ifdef CONFIG_NET_CLS_ACT } else if (!cl) { if (ret & __NET_XMIT_BYPASS) - sch->qstats.drops++; + qdisc_qstats_drop(sch); kfree_skb(skb); return ret; #endif } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(ret)) { - sch->qstats.drops++; + qdisc_qstats_drop(sch); cl->qstats.drops++; } return ret; @@ -895,7 +895,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch) if (!sch->q.qlen) goto fin; - q->now = ktime_to_ns(ktime_get()); + q->now = ktime_get_ns(); start_at = jiffies; next_event = q->now + 5LLU * NSEC_PER_SEC; @@ -925,14 +925,14 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch) goto ok; } } - sch->qstats.overlimits++; + qdisc_qstats_overlimit(sch); if (likely(next_event > q->now)) { if (!test_bit(__QDISC_STATE_DEACTIVATED, &qdisc_root_sleeping(q->watchdog.qdisc)->state)) { ktime_t time = ns_to_ktime(next_event); qdisc_throttled(q->watchdog.qdisc); hrtimer_start(&q->watchdog.timer, time, - HRTIMER_MODE_ABS); + HRTIMER_MODE_ABS_PINNED); } } else { schedule_work(&q->work); @@ -1044,7 +1044,7 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt) qdisc_watchdog_init(&q->watchdog, sch); INIT_WORK(&q->work, htb_work_func); - skb_queue_head_init(&q->direct_queue); + __skb_queue_head_init(&q->direct_queue); if (tb[TCA_HTB_DIRECT_QLEN]) q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]); @@ -1138,15 +1138,16 @@ static int htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d) { struct htb_class *cl = (struct htb_class *)arg; + __u32 qlen = 0; if (!cl->level && cl->un.leaf.q) - cl->qstats.qlen = cl->un.leaf.q->q.qlen; + qlen = cl->un.leaf.q->q.qlen; cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens); cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens); - if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || + if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 || gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 || - gnet_stats_copy_queue(d, &cl->qstats) < 0) + gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0) return -1; return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats)); @@ -1225,7 +1226,7 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl, parent->un.leaf.q = new_q ? new_q : &noop_qdisc; parent->tokens = parent->buffer; parent->ctokens = parent->cbuffer; - parent->t_c = ktime_to_ns(ktime_get()); + parent->t_c = ktime_get_ns(); parent->cmode = HTB_CAN_SEND; } @@ -1402,7 +1403,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, goto failure; if (htb_rate_est || tca[TCA_RATE]) { - err = gen_new_estimator(&cl->bstats, &cl->rate_est, + err = gen_new_estimator(&cl->bstats, NULL, + &cl->rate_est, qdisc_root_sleeping_lock(sch), tca[TCA_RATE] ? : &est.nla); if (err) { @@ -1455,7 +1457,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, cl->tokens = PSCHED_TICKS2NS(hopt->buffer); cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer); cl->mbuffer = 60ULL * NSEC_PER_SEC; /* 1min */ - cl->t_c = ktime_to_ns(ktime_get()); + cl->t_c = ktime_get_ns(); cl->cmode = HTB_CAN_SEND; /* attach to the hash list and parent's family */ @@ -1464,8 +1466,11 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, parent->children++; } else { if (tca[TCA_RATE]) { - err = gen_replace_estimator(&cl->bstats, &cl->rate_est, - qdisc_root_sleeping_lock(sch), + spinlock_t *lock = qdisc_root_sleeping_lock(sch); + + err = gen_replace_estimator(&cl->bstats, NULL, + &cl->rate_est, + lock, tca[TCA_RATE]); if (err) return err; @@ -1519,11 +1524,12 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, return err; } -static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg) +static struct tcf_proto __rcu **htb_find_tcf(struct Qdisc *sch, + unsigned long arg) { struct htb_sched *q = qdisc_priv(sch); struct htb_class *cl = (struct htb_class *)arg; - struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list; + struct tcf_proto __rcu **fl = cl ? &cl->filter_list : &q->filter_list; return fl; } diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c index 62871c14e1f93fef31e5795ad2b8ae2f064399a4..eb5b8445fef989c1f331fa1485fa39c649181a82 100644 --- a/net/sched/sch_ingress.c +++ b/net/sched/sch_ingress.c @@ -17,7 +17,7 @@ struct ingress_qdisc_data { - struct tcf_proto *filter_list; + struct tcf_proto __rcu *filter_list; }; /* ------------------------- Class/flow operations ------------------------- */ @@ -46,7 +46,8 @@ static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker) { } -static struct tcf_proto **ingress_find_tcf(struct Qdisc *sch, unsigned long cl) +static struct tcf_proto __rcu **ingress_find_tcf(struct Qdisc *sch, + unsigned long cl) { struct ingress_qdisc_data *p = qdisc_priv(sch); @@ -59,15 +60,16 @@ static int ingress_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct ingress_qdisc_data *p = qdisc_priv(sch); struct tcf_result res; + struct tcf_proto *fl = rcu_dereference_bh(p->filter_list); int result; - result = tc_classify(skb, p->filter_list, &res); + result = tc_classify(skb, fl, &res); qdisc_bstats_update(sch, skb); switch (result) { case TC_ACT_SHOT: result = TC_ACT_SHOT; - sch->qstats.drops++; + qdisc_qstats_drop(sch); break; case TC_ACT_STOLEN: case TC_ACT_QUEUED: diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c index a8b2864a696be934114a3b23ccf75e0cb8c4d1d6..f3cbaecd283af4ce5a78ad90d135d008d2b542d7 100644 --- a/net/sched/sch_mq.c +++ b/net/sched/sch_mq.c @@ -112,7 +112,6 @@ static int mq_dump(struct Qdisc *sch, struct sk_buff *skb) sch->q.qlen += qdisc->q.qlen; sch->bstats.bytes += qdisc->bstats.bytes; sch->bstats.packets += qdisc->bstats.packets; - sch->qstats.qlen += qdisc->qstats.qlen; sch->qstats.backlog += qdisc->qstats.backlog; sch->qstats.drops += qdisc->qstats.drops; sch->qstats.requeues += qdisc->qstats.requeues; @@ -200,9 +199,8 @@ static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl, struct netdev_queue *dev_queue = mq_queue_get(sch, cl); sch = dev_queue->qdisc_sleeping; - sch->qstats.qlen = sch->q.qlen; - if (gnet_stats_copy_basic(d, &sch->bstats) < 0 || - gnet_stats_copy_queue(d, &sch->qstats) < 0) + if (gnet_stats_copy_basic(d, NULL, &sch->bstats) < 0 || + gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0) return -1; return 0; } diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c index 6749e2f540d0336eb69f61cb82b59623382e5777..3811a745452cf402cd498ed47058fdf655d18cbd 100644 --- a/net/sched/sch_mqprio.c +++ b/net/sched/sch_mqprio.c @@ -231,12 +231,11 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb) memset(&sch->qstats, 0, sizeof(sch->qstats)); for (i = 0; i < dev->num_tx_queues; i++) { - qdisc = netdev_get_tx_queue(dev, i)->qdisc; + qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc); spin_lock_bh(qdisc_lock(qdisc)); sch->q.qlen += qdisc->q.qlen; sch->bstats.bytes += qdisc->bstats.bytes; sch->bstats.packets += qdisc->bstats.packets; - sch->qstats.qlen += qdisc->qstats.qlen; sch->qstats.backlog += qdisc->qstats.backlog; sch->qstats.drops += qdisc->qstats.drops; sch->qstats.requeues += qdisc->qstats.requeues; @@ -327,6 +326,7 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, if (cl <= netdev_get_num_tc(dev)) { int i; + __u32 qlen = 0; struct Qdisc *qdisc; struct gnet_stats_queue qstats = {0}; struct gnet_stats_basic_packed bstats = {0}; @@ -340,11 +340,13 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, spin_unlock_bh(d->lock); for (i = tc.offset; i < tc.offset + tc.count; i++) { - qdisc = netdev_get_tx_queue(dev, i)->qdisc; + struct netdev_queue *q = netdev_get_tx_queue(dev, i); + + qdisc = rtnl_dereference(q->qdisc); spin_lock_bh(qdisc_lock(qdisc)); + qlen += qdisc->q.qlen; bstats.bytes += qdisc->bstats.bytes; bstats.packets += qdisc->bstats.packets; - qstats.qlen += qdisc->qstats.qlen; qstats.backlog += qdisc->qstats.backlog; qstats.drops += qdisc->qstats.drops; qstats.requeues += qdisc->qstats.requeues; @@ -353,16 +355,16 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, } /* Reclaim root sleeping lock before completing stats */ spin_lock_bh(d->lock); - if (gnet_stats_copy_basic(d, &bstats) < 0 || - gnet_stats_copy_queue(d, &qstats) < 0) + if (gnet_stats_copy_basic(d, NULL, &bstats) < 0 || + gnet_stats_copy_queue(d, NULL, &qstats, qlen) < 0) return -1; } else { struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); sch = dev_queue->qdisc_sleeping; - sch->qstats.qlen = sch->q.qlen; - if (gnet_stats_copy_basic(d, &sch->bstats) < 0 || - gnet_stats_copy_queue(d, &sch->qstats) < 0) + if (gnet_stats_copy_basic(d, NULL, &sch->bstats) < 0 || + gnet_stats_copy_queue(d, NULL, + &sch->qstats, sch->q.qlen) < 0) return -1; } return 0; diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c index afb050a735fa5878a29e1da85d7c6b603851b623..42dd218871e09a3d6cce471a41afecde8fdfdb05 100644 --- a/net/sched/sch_multiq.c +++ b/net/sched/sch_multiq.c @@ -31,7 +31,7 @@ struct multiq_sched_data { u16 bands; u16 max_bands; u16 curband; - struct tcf_proto *filter_list; + struct tcf_proto __rcu *filter_list; struct Qdisc **queues; }; @@ -42,10 +42,11 @@ multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) struct multiq_sched_data *q = qdisc_priv(sch); u32 band; struct tcf_result res; + struct tcf_proto *fl = rcu_dereference_bh(q->filter_list); int err; *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; - err = tc_classify(skb, q->filter_list, &res); + err = tc_classify(skb, fl, &res); #ifdef CONFIG_NET_CLS_ACT switch (err) { case TC_ACT_STOLEN: @@ -74,7 +75,7 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch) if (qdisc == NULL) { if (ret & __NET_XMIT_BYPASS) - sch->qstats.drops++; + qdisc_qstats_drop(sch); kfree_skb(skb); return ret; } @@ -86,7 +87,7 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch) return NET_XMIT_SUCCESS; } if (net_xmit_drop_count(ret)) - sch->qstats.drops++; + qdisc_qstats_drop(sch); return ret; } @@ -359,9 +360,8 @@ static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl, struct Qdisc *cl_q; cl_q = q->queues[cl - 1]; - cl_q->qstats.qlen = cl_q->q.qlen; - if (gnet_stats_copy_basic(d, &cl_q->bstats) < 0 || - gnet_stats_copy_queue(d, &cl_q->qstats) < 0) + if (gnet_stats_copy_basic(d, NULL, &cl_q->bstats) < 0 || + gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0) return -1; return 0; @@ -388,7 +388,8 @@ static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg) } } -static struct tcf_proto **multiq_find_tcf(struct Qdisc *sch, unsigned long cl) +static struct tcf_proto __rcu **multiq_find_tcf(struct Qdisc *sch, + unsigned long cl) { struct multiq_sched_data *q = qdisc_priv(sch); diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 111d70fddaea97242ac4a85115e969efcd11dfcd..b34331967e020b6f1151b25be8f744e494a80ad6 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -429,12 +429,12 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) /* Drop packet? */ if (loss_event(q)) { if (q->ecn && INET_ECN_set_ce(skb)) - sch->qstats.drops++; /* mark packet */ + qdisc_qstats_drop(sch); /* mark packet */ else --count; } if (count == 0) { - sch->qstats.drops++; + qdisc_qstats_drop(sch); kfree_skb(skb); return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; } @@ -478,7 +478,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) if (unlikely(skb_queue_len(&sch->q) >= sch->limit)) return qdisc_reshape_fail(skb, sch); - sch->qstats.backlog += qdisc_pkt_len(skb); + qdisc_qstats_backlog_inc(sch, skb); cb = netem_skb_cb(skb); if (q->gap == 0 || /* not doing reordering */ @@ -549,15 +549,14 @@ static unsigned int netem_drop(struct Qdisc *sch) sch->q.qlen--; skb->next = NULL; skb->prev = NULL; - len = qdisc_pkt_len(skb); - sch->qstats.backlog -= len; + qdisc_qstats_backlog_dec(sch, skb); kfree_skb(skb); } } if (!len && q->qdisc && q->qdisc->ops->drop) len = q->qdisc->ops->drop(q->qdisc); if (len) - sch->qstats.drops++; + qdisc_qstats_drop(sch); return len; } @@ -575,7 +574,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch) skb = __skb_dequeue(&sch->q); if (skb) { deliver: - sch->qstats.backlog -= qdisc_pkt_len(skb); + qdisc_qstats_backlog_dec(sch, skb); qdisc_unthrottled(sch); qdisc_bstats_update(sch, skb); return skb; @@ -610,7 +609,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch) if (unlikely(err != NET_XMIT_SUCCESS)) { if (net_xmit_drop_count(err)) { - sch->qstats.drops++; + qdisc_qstats_drop(sch); qdisc_tree_decrease_qlen(sch, 1); } } diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c index fefeeb73f15f18a84406ed686dc653ff3327ef45..33d7a98a7a9799b3cb5b3ce62c9013c49483184c 100644 --- a/net/sched/sch_pie.c +++ b/net/sched/sch_pie.c @@ -232,7 +232,7 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt) while (sch->q.qlen > sch->limit) { struct sk_buff *skb = __skb_dequeue(&sch->q); - sch->qstats.backlog -= qdisc_pkt_len(skb); + qdisc_qstats_backlog_dec(sch, skb); qdisc_drop(skb, sch); } qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen); diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index 79359b69ad8d6fcf35b6937673d4ae332bbf5158..8e5cd34aaa74dbaddc4b38be4916bb79526e90fb 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@ -24,7 +24,7 @@ struct prio_sched_data { int bands; - struct tcf_proto *filter_list; + struct tcf_proto __rcu *filter_list; u8 prio2band[TC_PRIO_MAX+1]; struct Qdisc *queues[TCQ_PRIO_BANDS]; }; @@ -36,11 +36,13 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) struct prio_sched_data *q = qdisc_priv(sch); u32 band = skb->priority; struct tcf_result res; + struct tcf_proto *fl; int err; *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; if (TC_H_MAJ(skb->priority) != sch->handle) { - err = tc_classify(skb, q->filter_list, &res); + fl = rcu_dereference_bh(q->filter_list); + err = tc_classify(skb, fl, &res); #ifdef CONFIG_NET_CLS_ACT switch (err) { case TC_ACT_STOLEN: @@ -50,7 +52,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) return NULL; } #endif - if (!q->filter_list || err < 0) { + if (!fl || err < 0) { if (TC_H_MAJ(band)) band = 0; return q->queues[q->prio2band[band & TC_PRIO_MAX]]; @@ -75,7 +77,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch) if (qdisc == NULL) { if (ret & __NET_XMIT_BYPASS) - sch->qstats.drops++; + qdisc_qstats_drop(sch); kfree_skb(skb); return ret; } @@ -87,7 +89,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch) return NET_XMIT_SUCCESS; } if (net_xmit_drop_count(ret)) - sch->qstats.drops++; + qdisc_qstats_drop(sch); return ret; } @@ -322,9 +324,8 @@ static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl, struct Qdisc *cl_q; cl_q = q->queues[cl - 1]; - cl_q->qstats.qlen = cl_q->q.qlen; - if (gnet_stats_copy_basic(d, &cl_q->bstats) < 0 || - gnet_stats_copy_queue(d, &cl_q->qstats) < 0) + if (gnet_stats_copy_basic(d, NULL, &cl_q->bstats) < 0 || + gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0) return -1; return 0; @@ -351,7 +352,8 @@ static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg) } } -static struct tcf_proto **prio_find_tcf(struct Qdisc *sch, unsigned long cl) +static struct tcf_proto __rcu **prio_find_tcf(struct Qdisc *sch, + unsigned long cl) { struct prio_sched_data *q = qdisc_priv(sch); diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index 8056fb4e618a9bab5ce231ee86ac87db11ffdda7..3ec7e88a43cab2df5e012d3c5216bc31b75eafe3 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -181,7 +181,7 @@ struct qfq_group { }; struct qfq_sched { - struct tcf_proto *filter_list; + struct tcf_proto __rcu *filter_list; struct Qdisc_class_hash clhash; u64 oldV, V; /* Precise virtual times. */ @@ -459,7 +459,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, if (cl != NULL) { /* modify existing class */ if (tca[TCA_RATE]) { - err = gen_replace_estimator(&cl->bstats, &cl->rate_est, + err = gen_replace_estimator(&cl->bstats, NULL, + &cl->rate_est, qdisc_root_sleeping_lock(sch), tca[TCA_RATE]); if (err) @@ -484,7 +485,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, cl->qdisc = &noop_qdisc; if (tca[TCA_RATE]) { - err = gen_new_estimator(&cl->bstats, &cl->rate_est, + err = gen_new_estimator(&cl->bstats, NULL, + &cl->rate_est, qdisc_root_sleeping_lock(sch), tca[TCA_RATE]); if (err) @@ -576,7 +578,8 @@ static void qfq_put_class(struct Qdisc *sch, unsigned long arg) qfq_destroy_class(sch, cl); } -static struct tcf_proto **qfq_tcf_chain(struct Qdisc *sch, unsigned long cl) +static struct tcf_proto __rcu **qfq_tcf_chain(struct Qdisc *sch, + unsigned long cl) { struct qfq_sched *q = qdisc_priv(sch); @@ -661,14 +664,14 @@ static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct tc_qfq_stats xstats; memset(&xstats, 0, sizeof(xstats)); - cl->qdisc->qstats.qlen = cl->qdisc->q.qlen; xstats.weight = cl->agg->class_weight; xstats.lmax = cl->agg->lmax; - if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || + if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 || gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || - gnet_stats_copy_queue(d, &cl->qdisc->qstats) < 0) + gnet_stats_copy_queue(d, NULL, + &cl->qdisc->qstats, cl->qdisc->q.qlen) < 0) return -1; return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); @@ -704,6 +707,7 @@ static struct qfq_class *qfq_classify(struct sk_buff *skb, struct Qdisc *sch, struct qfq_sched *q = qdisc_priv(sch); struct qfq_class *cl; struct tcf_result res; + struct tcf_proto *fl; int result; if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) { @@ -714,7 +718,8 @@ static struct qfq_class *qfq_classify(struct sk_buff *skb, struct Qdisc *sch, } *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; - result = tc_classify(skb, q->filter_list, &res); + fl = rcu_dereference_bh(q->filter_list); + result = tc_classify(skb, fl, &res); if (result >= 0) { #ifdef CONFIG_NET_CLS_ACT switch (result) { @@ -1224,7 +1229,7 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) cl = qfq_classify(skb, sch, &err); if (cl == NULL) { if (err & __NET_XMIT_BYPASS) - sch->qstats.drops++; + qdisc_qstats_drop(sch); kfree_skb(skb); return err; } @@ -1244,7 +1249,7 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) pr_debug("qfq_enqueue: enqueue failed %d\n", err); if (net_xmit_drop_count(err)) { cl->qstats.drops++; - sch->qstats.drops++; + qdisc_qstats_drop(sch); } return err; } diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index 633e32defdcc61291554e9457d5e13c2f6b87a1e..6c0534cc77582881fecb7b9276659f6d921e6daa 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c @@ -74,7 +74,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch) break; case RED_PROB_MARK: - sch->qstats.overlimits++; + qdisc_qstats_overlimit(sch); if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) { q->stats.prob_drop++; goto congestion_drop; @@ -84,7 +84,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch) break; case RED_HARD_MARK: - sch->qstats.overlimits++; + qdisc_qstats_overlimit(sch); if (red_use_harddrop(q) || !red_use_ecn(q) || !INET_ECN_set_ce(skb)) { q->stats.forced_drop++; @@ -100,7 +100,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch) sch->q.qlen++; } else if (net_xmit_drop_count(ret)) { q->stats.pdrop++; - sch->qstats.drops++; + qdisc_qstats_drop(sch); } return ret; @@ -142,7 +142,7 @@ static unsigned int red_drop(struct Qdisc *sch) if (child->ops->drop && (len = child->ops->drop(child)) > 0) { q->stats.other++; - sch->qstats.drops++; + qdisc_qstats_drop(sch); sch->q.qlen--; return len; } diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c index 9b0f7093d970da5fa37130a5df926d5c428191ec..5819dd82630d2a126d2a75a3cd2f5d6a9a3534a8 100644 --- a/net/sched/sch_sfb.c +++ b/net/sched/sch_sfb.c @@ -55,7 +55,7 @@ struct sfb_bins { struct sfb_sched_data { struct Qdisc *qdisc; - struct tcf_proto *filter_list; + struct tcf_proto __rcu *filter_list; unsigned long rehash_interval; unsigned long warmup_time; /* double buffering warmup time in jiffies */ u32 max; @@ -253,13 +253,13 @@ static bool sfb_rate_limit(struct sk_buff *skb, struct sfb_sched_data *q) return false; } -static bool sfb_classify(struct sk_buff *skb, struct sfb_sched_data *q, +static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl, int *qerr, u32 *salt) { struct tcf_result res; int result; - result = tc_classify(skb, q->filter_list, &res); + result = tc_classify(skb, fl, &res); if (result >= 0) { #ifdef CONFIG_NET_CLS_ACT switch (result) { @@ -281,6 +281,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch) struct sfb_sched_data *q = qdisc_priv(sch); struct Qdisc *child = q->qdisc; + struct tcf_proto *fl; int i; u32 p_min = ~0; u32 minqlen = ~0; @@ -289,7 +290,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch) struct flow_keys keys; if (unlikely(sch->q.qlen >= q->limit)) { - sch->qstats.overlimits++; + qdisc_qstats_overlimit(sch); q->stats.queuedrop++; goto drop; } @@ -306,9 +307,10 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch) } } - if (q->filter_list) { + fl = rcu_dereference_bh(q->filter_list); + if (fl) { /* If using external classifiers, get result and record it. */ - if (!sfb_classify(skb, q, &ret, &salt)) + if (!sfb_classify(skb, fl, &ret, &salt)) goto other_drop; keys.src = salt; keys.dst = 0; @@ -346,7 +348,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch) sfb_skb_cb(skb)->hashes[slot] = 0; if (unlikely(minqlen >= q->max)) { - sch->qstats.overlimits++; + qdisc_qstats_overlimit(sch); q->stats.bucketdrop++; goto drop; } @@ -374,7 +376,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch) } } if (sfb_rate_limit(skb, q)) { - sch->qstats.overlimits++; + qdisc_qstats_overlimit(sch); q->stats.penaltydrop++; goto drop; } @@ -409,7 +411,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch) increment_qlen(skb, q); } else if (net_xmit_drop_count(ret)) { q->stats.childdrop++; - sch->qstats.drops++; + qdisc_qstats_drop(sch); } return ret; @@ -418,7 +420,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch) return NET_XMIT_CN; other_drop: if (ret & __NET_XMIT_BYPASS) - sch->qstats.drops++; + qdisc_qstats_drop(sch); kfree_skb(skb); return ret; } @@ -660,7 +662,8 @@ static void sfb_walk(struct Qdisc *sch, struct qdisc_walker *walker) } } -static struct tcf_proto **sfb_find_tcf(struct Qdisc *sch, unsigned long cl) +static struct tcf_proto __rcu **sfb_find_tcf(struct Qdisc *sch, + unsigned long cl) { struct sfb_sched_data *q = qdisc_priv(sch); diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 1af2f73906d07aa5bf6f428ffe37b81523178ee4..b877140beda5573b8d2dbe4a701bf24355ba7229 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -125,7 +125,7 @@ struct sfq_sched_data { u8 cur_depth; /* depth of longest slot */ u8 flags; unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */ - struct tcf_proto *filter_list; + struct tcf_proto __rcu *filter_list; sfq_index *ht; /* Hash table ('divisor' slots) */ struct sfq_slot *slots; /* Flows table ('maxflows' entries) */ @@ -187,6 +187,7 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch, { struct sfq_sched_data *q = qdisc_priv(sch); struct tcf_result res; + struct tcf_proto *fl; int result; if (TC_H_MAJ(skb->priority) == sch->handle && @@ -194,13 +195,14 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch, TC_H_MIN(skb->priority) <= q->divisor) return TC_H_MIN(skb->priority); - if (!q->filter_list) { + fl = rcu_dereference_bh(q->filter_list); + if (!fl) { skb_flow_dissect(skb, &sfq_skb_cb(skb)->keys); return sfq_hash(q, skb) + 1; } *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; - result = tc_classify(skb, q->filter_list, &res); + result = tc_classify(skb, fl, &res); if (result >= 0) { #ifdef CONFIG_NET_CLS_ACT switch (result) { @@ -310,11 +312,6 @@ static inline void slot_queue_add(struct sfq_slot *slot, struct sk_buff *skb) slot->skblist_prev = skb; } -#define slot_queue_walk(slot, skb) \ - for (skb = slot->skblist_next; \ - skb != (struct sk_buff *)slot; \ - skb = skb->next) - static unsigned int sfq_drop(struct Qdisc *sch) { struct sfq_sched_data *q = qdisc_priv(sch); @@ -334,8 +331,8 @@ static unsigned int sfq_drop(struct Qdisc *sch) sfq_dec(q, x); kfree_skb(skb); sch->q.qlen--; - sch->qstats.drops++; - sch->qstats.backlog -= len; + qdisc_qstats_drop(sch); + qdisc_qstats_backlog_dec(sch, skb); return len; } @@ -382,7 +379,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) hash = sfq_classify(skb, sch, &ret); if (hash == 0) { if (ret & __NET_XMIT_BYPASS) - sch->qstats.drops++; + qdisc_qstats_drop(sch); kfree_skb(skb); return ret; } @@ -412,7 +409,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) break; case RED_PROB_MARK: - sch->qstats.overlimits++; + qdisc_qstats_overlimit(sch); if (sfq_prob_mark(q)) { /* We know we have at least one packet in queue */ if (sfq_headdrop(q) && @@ -429,7 +426,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) goto congestion_drop; case RED_HARD_MARK: - sch->qstats.overlimits++; + qdisc_qstats_overlimit(sch); if (sfq_hard_mark(q)) { /* We know we have at least one packet in queue */ if (sfq_headdrop(q) && @@ -464,7 +461,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) } enqueue: - sch->qstats.backlog += qdisc_pkt_len(skb); + qdisc_qstats_backlog_inc(sch, skb); slot->backlog += qdisc_pkt_len(skb); slot_queue_add(slot, skb); sfq_inc(q, x); @@ -523,7 +520,7 @@ sfq_dequeue(struct Qdisc *sch) sfq_dec(q, a); qdisc_bstats_update(sch, skb); sch->q.qlen--; - sch->qstats.backlog -= qdisc_pkt_len(skb); + qdisc_qstats_backlog_dec(sch, skb); slot->backlog -= qdisc_pkt_len(skb); /* Is the slot empty? */ if (slot->qlen == 0) { @@ -589,7 +586,8 @@ static void sfq_rehash(struct Qdisc *sch) if (x == SFQ_EMPTY_SLOT) { x = q->dep[0].next; /* get a free slot */ if (x >= SFQ_MAX_FLOWS) { -drop: sch->qstats.backlog -= qdisc_pkt_len(skb); +drop: + qdisc_qstats_backlog_dec(sch, skb); kfree_skb(skb); dropped++; continue; @@ -841,7 +839,8 @@ static void sfq_put(struct Qdisc *q, unsigned long cl) { } -static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl) +static struct tcf_proto __rcu **sfq_find_tcf(struct Qdisc *sch, + unsigned long cl) { struct sfq_sched_data *q = qdisc_priv(sch); @@ -872,7 +871,7 @@ static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl, qs.qlen = slot->qlen; qs.backlog = slot->backlog; } - if (gnet_stats_copy_queue(d, &qs) < 0) + if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0) return -1; return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); } diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 18ff634337092d5f8e7cba8fe817542f1455738f..a4afde14e8656cca6c0688bc2d092334edaf69df 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -175,7 +175,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch) ret = qdisc_enqueue(segs, q->qdisc); if (ret != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(ret)) - sch->qstats.drops++; + qdisc_qstats_drop(sch); } else { nb++; } @@ -201,7 +201,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch) ret = qdisc_enqueue(skb, q->qdisc); if (ret != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(ret)) - sch->qstats.drops++; + qdisc_qstats_drop(sch); return ret; } @@ -216,7 +216,7 @@ static unsigned int tbf_drop(struct Qdisc *sch) if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) { sch->q.qlen--; - sch->qstats.drops++; + qdisc_qstats_drop(sch); } return len; } @@ -239,7 +239,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc *sch) s64 ptoks = 0; unsigned int len = qdisc_pkt_len(skb); - now = ktime_to_ns(ktime_get()); + now = ktime_get_ns(); toks = min_t(s64, now - q->t_c, q->buffer); if (tbf_peak_present(q)) { @@ -268,7 +268,8 @@ static struct sk_buff *tbf_dequeue(struct Qdisc *sch) } qdisc_watchdog_schedule_ns(&q->watchdog, - now + max_t(long, -toks, -ptoks)); + now + max_t(long, -toks, -ptoks), + true); /* Maybe we have a shorter packet in the queue, which can be sent now. It sounds cool, @@ -281,7 +282,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc *sch) (cf. CSZ, HPFQ, HFSC) */ - sch->qstats.overlimits++; + qdisc_qstats_overlimit(sch); } return NULL; } @@ -292,7 +293,7 @@ static void tbf_reset(struct Qdisc *sch) qdisc_reset(q->qdisc); sch->q.qlen = 0; - q->t_c = ktime_to_ns(ktime_get()); + q->t_c = ktime_get_ns(); q->tokens = q->buffer; q->ptokens = q->mtu; qdisc_watchdog_cancel(&q->watchdog); @@ -431,7 +432,7 @@ static int tbf_init(struct Qdisc *sch, struct nlattr *opt) if (opt == NULL) return -EINVAL; - q->t_c = ktime_to_ns(ktime_get()); + q->t_c = ktime_get_ns(); qdisc_watchdog_init(&q->watchdog, sch); q->qdisc = &noop_qdisc; diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index bd33793b527e176b07f7bacb77143ed38e32d31c..6ada42396a242123fd7738309b79720d0b46b38e 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c @@ -96,11 +96,14 @@ teql_dequeue(struct Qdisc *sch) struct teql_sched_data *dat = qdisc_priv(sch); struct netdev_queue *dat_queue; struct sk_buff *skb; + struct Qdisc *q; skb = __skb_dequeue(&dat->q); dat_queue = netdev_get_tx_queue(dat->m->dev, 0); + q = rcu_dereference_bh(dat_queue->qdisc); + if (skb == NULL) { - struct net_device *m = qdisc_dev(dat_queue->qdisc); + struct net_device *m = qdisc_dev(q); if (m) { dat->m->slaves = sch; netif_wake_queue(m); @@ -108,7 +111,7 @@ teql_dequeue(struct Qdisc *sch) } else { qdisc_bstats_update(sch, skb); } - sch->q.qlen = dat->q.qlen + dat_queue->qdisc->q.qlen; + sch->q.qlen = dat->q.qlen + q->q.qlen; return skb; } @@ -157,9 +160,9 @@ teql_destroy(struct Qdisc *sch) txq = netdev_get_tx_queue(master->dev, 0); master->slaves = NULL; - root_lock = qdisc_root_sleeping_lock(txq->qdisc); + root_lock = qdisc_root_sleeping_lock(rtnl_dereference(txq->qdisc)); spin_lock_bh(root_lock); - qdisc_reset(txq->qdisc); + qdisc_reset(rtnl_dereference(txq->qdisc)); spin_unlock_bh(root_lock); } } @@ -266,7 +269,7 @@ static inline int teql_resolve(struct sk_buff *skb, struct dst_entry *dst = skb_dst(skb); int res; - if (txq->qdisc == &noop_qdisc) + if (rcu_access_pointer(txq->qdisc) == &noop_qdisc) return -ENODEV; if (!dev->header_ops || !dst) @@ -301,7 +304,6 @@ static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev) do { struct net_device *slave = qdisc_dev(q); struct netdev_queue *slave_txq = netdev_get_tx_queue(slave, 0); - const struct net_device_ops *slave_ops = slave->netdev_ops; if (slave_txq->qdisc_sleeping != q) continue; @@ -317,8 +319,8 @@ static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev) unsigned int length = qdisc_pkt_len(skb); if (!netif_xmit_frozen_or_stopped(slave_txq) && - slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) { - txq_trans_update(slave_txq); + netdev_start_xmit(skb, slave, slave_txq, false) == + NETDEV_TX_OK) { __netif_tx_unlock(slave_txq); master->slaves = NEXT_SLAVE(q); netif_wake_queue(dev); @@ -468,7 +470,7 @@ static __init void teql_master_setup(struct net_device *dev) dev->tx_queue_len = 100; dev->flags = IFF_NOARP; dev->hard_header_len = LL_MAX_HEADER; - dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; + netif_keep_dst(dev); } static LIST_HEAD(master_dev_list); diff --git a/net/sctp/input.c b/net/sctp/input.c index c1b991294516fd1ef29de13cf24f06c1d63c8be2..b6493b3f11a97f359d0ec70cde2812082f65910f 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -133,9 +133,13 @@ int sctp_rcv(struct sk_buff *skb) __skb_pull(skb, skb_transport_offset(skb)); if (skb->len < sizeof(struct sctphdr)) goto discard_it; - if (!sctp_checksum_disable && !skb_csum_unnecessary(skb) && - sctp_rcv_checksum(net, skb) < 0) + + skb->csum_valid = 0; /* Previous value not applicable */ + if (skb_csum_unnecessary(skb)) + __skb_decr_checksum_unnecessary(skb); + else if (!sctp_checksum_disable && sctp_rcv_checksum(net, skb) < 0) goto discard_it; + skb->csum_valid = 1; skb_pull(skb, sizeof(struct sctphdr)); diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 6240834f4b95255a054c2017497d9b07b06b71f7..9d2c6c9facb6a4f4dabee29d8430aaf55b323636 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -366,7 +366,7 @@ static int sctp_v4_available(union sctp_addr *addr, struct sctp_sock *sp) if (addr->v4.sin_addr.s_addr != htonl(INADDR_ANY) && ret != RTN_LOCAL && !sp->inet.freebind && - !sysctl_ip_nonlocal_bind) + !net->ipv4.sysctl_ip_nonlocal_bind) return 0; if (ipv6_only_sock(sctp_opt2sk(sp))) diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index d3f1ea460c500caf6e39c1a9b2f31f2de28f37a4..c8f606324134e5bff74c7ddead68eac44eea63dc 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -1775,9 +1775,22 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(struct net *net, /* Update the content of current association. */ sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc)); sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); - sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, - SCTP_STATE(SCTP_STATE_ESTABLISHED)); - sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); + if (sctp_state(asoc, SHUTDOWN_PENDING) && + (sctp_sstate(asoc->base.sk, CLOSING) || + sock_flag(asoc->base.sk, SOCK_DEAD))) { + /* if were currently in SHUTDOWN_PENDING, but the socket + * has been closed by user, don't transition to ESTABLISHED. + * Instead trigger SHUTDOWN bundled with COOKIE_ACK. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); + return sctp_sf_do_9_2_start_shutdown(net, ep, asoc, + SCTP_ST_CHUNK(0), NULL, + commands); + } else { + sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, + SCTP_STATE(SCTP_STATE_ESTABLISHED)); + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); + } return SCTP_DISPOSITION_CONSUME; nomem_ev: diff --git a/net/socket.c b/net/socket.c index 4cdbc107606f037c6404f9947beb80174b961e80..ffd9cb46902b3640c5a52947391ad16c5d0c69b9 100644 --- a/net/socket.c +++ b/net/socket.c @@ -610,7 +610,7 @@ void sock_release(struct socket *sock) } EXPORT_SYMBOL(sock_release); -void sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags) +void __sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags) { u8 flags = *tx_flags; @@ -626,12 +626,9 @@ void sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags) if (sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK) flags |= SKBTX_ACK_TSTAMP; - if (sock_flag(sk, SOCK_WIFI_STATUS)) - flags |= SKBTX_WIFI_STATUS; - *tx_flags = flags; } -EXPORT_SYMBOL(sock_tx_timestamp); +EXPORT_SYMBOL(__sock_tx_timestamp); static inline int __sock_sendmsg_nosec(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size) diff --git a/net/tipc/Makefile b/net/tipc/Makefile index a080c66d819a032233a963512d849f757cc979e2..b8a13caad59a518dddd2a8a50ad3a9a04f363cae 100644 --- a/net/tipc/Makefile +++ b/net/tipc/Makefile @@ -7,7 +7,7 @@ obj-$(CONFIG_TIPC) := tipc.o tipc-y += addr.o bcast.o bearer.o config.o \ core.o link.o discover.o msg.o \ name_distr.o subscr.o name_table.o net.o \ - netlink.o node.o node_subscr.o port.o ref.o \ + netlink.o node.o node_subscr.o \ socket.o log.o eth_media.o server.o tipc-$(CONFIG_TIPC_MEDIA_IB) += ib_media.o diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index dd13bfa09333246fb6ba54171fd11ad0cc4fe63f..b8670bf262e249dea10d463fb3b7d2e926ff747e 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c @@ -37,7 +37,6 @@ #include "core.h" #include "link.h" -#include "port.h" #include "socket.h" #include "msg.h" #include "bcast.h" @@ -226,6 +225,17 @@ static void bclink_retransmit_pkt(u32 after, u32 to) tipc_link_retransmit(bcl, buf, mod(to - after)); } +/** + * tipc_bclink_wakeup_users - wake up pending users + * + * Called with no locks taken + */ +void tipc_bclink_wakeup_users(void) +{ + while (skb_queue_len(&bclink->link.waiting_sks)) + tipc_sk_rcv(skb_dequeue(&bclink->link.waiting_sks)); +} + /** * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets * @n_ptr: node that sent acknowledgement info @@ -300,8 +310,9 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked) tipc_link_push_queue(bcl); bclink_set_last_sent(); } - if (unlikely(released && !list_empty(&bcl->waiting_ports))) - tipc_link_wakeup_ports(bcl, 0); + if (unlikely(released && !skb_queue_empty(&bcl->waiting_sks))) + n_ptr->action_flags |= TIPC_WAKEUP_BCAST_USERS; + exit: tipc_bclink_unlock(); } @@ -840,9 +851,10 @@ int tipc_bclink_init(void) sprintf(bcbearer->media.name, "tipc-broadcast"); spin_lock_init(&bclink->lock); - INIT_LIST_HEAD(&bcl->waiting_ports); + __skb_queue_head_init(&bcl->waiting_sks); bcl->next_out_no = 1; spin_lock_init(&bclink->node.lock); + __skb_queue_head_init(&bclink->node.waiting_sks); bcl->owner = &bclink->node; bcl->max_pkt = MAX_PKT_DEFAULT_MCAST; tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT); diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h index 4875d9536aee7a98ea523b895b3be6475529d7bd..e7b0f85a82bc82a19573b0f3f4158814ba42b7bd 100644 --- a/net/tipc/bcast.h +++ b/net/tipc/bcast.h @@ -99,5 +99,5 @@ int tipc_bclink_set_queue_limits(u32 limit); void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action); uint tipc_bclink_get_mtu(void); int tipc_bclink_xmit(struct sk_buff *buf); - +void tipc_bclink_wakeup_users(void); #endif diff --git a/net/tipc/config.c b/net/tipc/config.c index 2b42403ad33a690221456ff25fb4be50a2235255..876f4c6a2631b35eba2b9927430b30c34d125c87 100644 --- a/net/tipc/config.c +++ b/net/tipc/config.c @@ -35,7 +35,7 @@ */ #include "core.h" -#include "port.h" +#include "socket.h" #include "name_table.h" #include "config.h" #include "server.h" @@ -266,7 +266,7 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area rep_tlv_buf = tipc_media_get_names(); break; case TIPC_CMD_SHOW_PORTS: - rep_tlv_buf = tipc_port_get_ports(); + rep_tlv_buf = tipc_sk_socks_show(); break; case TIPC_CMD_SHOW_STATS: rep_tlv_buf = tipc_show_stats(); diff --git a/net/tipc/core.c b/net/tipc/core.c index 676d18015dd82efa0346f6bed2bf0d7f5489f1f6..a5737b8407ddbf63f9ecfb024bde3b079f979098 100644 --- a/net/tipc/core.c +++ b/net/tipc/core.c @@ -35,11 +35,10 @@ */ #include "core.h" -#include "ref.h" #include "name_table.h" #include "subscr.h" #include "config.h" -#include "port.h" +#include "socket.h" #include @@ -85,7 +84,7 @@ static void tipc_core_stop(void) tipc_netlink_stop(); tipc_subscr_stop(); tipc_nametbl_stop(); - tipc_ref_table_stop(); + tipc_sk_ref_table_stop(); tipc_socket_stop(); tipc_unregister_sysctl(); } @@ -99,7 +98,7 @@ static int tipc_core_start(void) get_random_bytes(&tipc_random, sizeof(tipc_random)); - err = tipc_ref_table_init(tipc_max_ports, tipc_random); + err = tipc_sk_ref_table_init(tipc_max_ports, tipc_random); if (err) goto out_reftbl; @@ -139,7 +138,7 @@ static int tipc_core_start(void) out_netlink: tipc_nametbl_stop(); out_nametbl: - tipc_ref_table_stop(); + tipc_sk_ref_table_stop(); out_reftbl: return err; } diff --git a/net/tipc/core.h b/net/tipc/core.h index bb26ed1ee966c84c66fc7322877d80f763047e5a..f773b148722f7e51195d525a55e2ae18577c3c3e 100644 --- a/net/tipc/core.h +++ b/net/tipc/core.h @@ -81,6 +81,7 @@ extern u32 tipc_own_addr __read_mostly; extern int tipc_max_ports __read_mostly; extern int tipc_net_id __read_mostly; extern int sysctl_tipc_rmem[3] __read_mostly; +extern int sysctl_tipc_named_timeout __read_mostly; /* * Other global variables @@ -187,8 +188,11 @@ static inline void k_term_timer(struct timer_list *timer) struct tipc_skb_cb { void *handle; - bool deferred; struct sk_buff *tail; + bool deferred; + bool wakeup_pending; + u16 chain_sz; + u16 chain_imp; }; #define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0])) diff --git a/net/tipc/link.c b/net/tipc/link.c index fb1485dc6736ec84719c262334ead93d659bb7c4..65410e18b8a6e90f52554276db9bdd927d2a63ba 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -36,7 +36,6 @@ #include "core.h" #include "link.h" -#include "port.h" #include "socket.h" #include "name_distr.h" #include "discover.h" @@ -275,7 +274,7 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr, link_init_max_pkt(l_ptr); l_ptr->next_out_no = 1; - INIT_LIST_HEAD(&l_ptr->waiting_ports); + __skb_queue_head_init(&l_ptr->waiting_sks); link_reset_statistics(l_ptr); @@ -322,66 +321,47 @@ void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down) } /** - * link_schedule_port - schedule port for deferred sending - * @l_ptr: pointer to link - * @origport: reference to sending port - * @sz: amount of data to be sent - * - * Schedules port for renewed sending of messages after link congestion - * has abated. + * link_schedule_user - schedule user for wakeup after congestion + * @link: congested link + * @oport: sending port + * @chain_sz: size of buffer chain that was attempted sent + * @imp: importance of message attempted sent + * Create pseudo msg to send back to user when congestion abates */ -static int link_schedule_port(struct tipc_link *l_ptr, u32 origport, u32 sz) +static bool link_schedule_user(struct tipc_link *link, u32 oport, + uint chain_sz, uint imp) { - struct tipc_port *p_ptr; - struct tipc_sock *tsk; + struct sk_buff *buf; - spin_lock_bh(&tipc_port_list_lock); - p_ptr = tipc_port_lock(origport); - if (p_ptr) { - if (!list_empty(&p_ptr->wait_list)) - goto exit; - tsk = tipc_port_to_sock(p_ptr); - tsk->link_cong = 1; - p_ptr->waiting_pkts = 1 + ((sz - 1) / l_ptr->max_pkt); - list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports); - l_ptr->stats.link_congs++; -exit: - tipc_port_unlock(p_ptr); - } - spin_unlock_bh(&tipc_port_list_lock); - return -ELINKCONG; + buf = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0, tipc_own_addr, + tipc_own_addr, oport, 0, 0); + if (!buf) + return false; + TIPC_SKB_CB(buf)->chain_sz = chain_sz; + TIPC_SKB_CB(buf)->chain_imp = imp; + __skb_queue_tail(&link->waiting_sks, buf); + link->stats.link_congs++; + return true; } -void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all) +/** + * link_prepare_wakeup - prepare users for wakeup after congestion + * @link: congested link + * Move a number of waiting users, as permitted by available space in + * the send queue, from link wait queue to node wait queue for wakeup + */ +static void link_prepare_wakeup(struct tipc_link *link) { - struct tipc_port *p_ptr; - struct tipc_sock *tsk; - struct tipc_port *temp_p_ptr; - int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size; - - if (all) - win = 100000; - if (win <= 0) - return; - if (!spin_trylock_bh(&tipc_port_list_lock)) - return; - if (link_congested(l_ptr)) - goto exit; - list_for_each_entry_safe(p_ptr, temp_p_ptr, &l_ptr->waiting_ports, - wait_list) { - if (win <= 0) + struct sk_buff_head *wq = &link->waiting_sks; + struct sk_buff *buf; + uint pend_qsz = link->out_queue_size; + + for (buf = skb_peek(wq); buf; buf = skb_peek(wq)) { + if (pend_qsz >= link->queue_limit[TIPC_SKB_CB(buf)->chain_imp]) break; - tsk = tipc_port_to_sock(p_ptr); - list_del_init(&p_ptr->wait_list); - spin_lock_bh(p_ptr->lock); - tsk->link_cong = 0; - tipc_sock_wakeup(tsk); - win -= p_ptr->waiting_pkts; - spin_unlock_bh(p_ptr->lock); + pend_qsz += TIPC_SKB_CB(buf)->chain_sz; + __skb_queue_tail(&link->owner->waiting_sks, __skb_dequeue(wq)); } - -exit: - spin_unlock_bh(&tipc_port_list_lock); } /** @@ -423,6 +403,7 @@ void tipc_link_reset(struct tipc_link *l_ptr) u32 prev_state = l_ptr->state; u32 checkpoint = l_ptr->next_in_no; int was_active_link = tipc_link_is_active(l_ptr); + struct tipc_node *owner = l_ptr->owner; msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff)); @@ -450,9 +431,10 @@ void tipc_link_reset(struct tipc_link *l_ptr) kfree_skb(l_ptr->proto_msg_queue); l_ptr->proto_msg_queue = NULL; kfree_skb_list(l_ptr->oldest_deferred_in); - if (!list_empty(&l_ptr->waiting_ports)) - tipc_link_wakeup_ports(l_ptr, 1); - + if (!skb_queue_empty(&l_ptr->waiting_sks)) { + skb_queue_splice_init(&l_ptr->waiting_sks, &owner->waiting_sks); + owner->action_flags |= TIPC_WAKEUP_USERS; + } l_ptr->retransm_queue_head = 0; l_ptr->retransm_queue_size = 0; l_ptr->last_out = NULL; @@ -688,19 +670,23 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) static int tipc_link_cong(struct tipc_link *link, struct sk_buff *buf) { struct tipc_msg *msg = buf_msg(buf); - uint psz = msg_size(msg); uint imp = tipc_msg_tot_importance(msg); u32 oport = msg_tot_origport(msg); - if (likely(imp <= TIPC_CRITICAL_IMPORTANCE)) { - if (!msg_errcode(msg) && !msg_reroute_cnt(msg)) { - link_schedule_port(link, oport, psz); - return -ELINKCONG; - } - } else { + if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) { pr_warn("%s<%s>, send queue full", link_rst_msg, link->name); tipc_link_reset(link); + goto drop; } + if (unlikely(msg_errcode(msg))) + goto drop; + if (unlikely(msg_reroute_cnt(msg))) + goto drop; + if (TIPC_SKB_CB(buf)->wakeup_pending) + return -ELINKCONG; + if (link_schedule_user(link, oport, TIPC_SKB_CB(buf)->chain_sz, imp)) + return -ELINKCONG; +drop: kfree_skb_list(buf); return -EHOSTUNREACH; } @@ -1202,8 +1188,10 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr) if (unlikely(l_ptr->next_out)) tipc_link_push_queue(l_ptr); - if (unlikely(!list_empty(&l_ptr->waiting_ports))) - tipc_link_wakeup_ports(l_ptr, 0); + if (released && !skb_queue_empty(&l_ptr->waiting_sks)) { + link_prepare_wakeup(l_ptr); + l_ptr->owner->action_flags |= TIPC_WAKEUP_USERS; + } /* Process the incoming packet */ if (unlikely(!link_working_working(l_ptr))) { diff --git a/net/tipc/link.h b/net/tipc/link.h index 782983ccd323a8f4df659765f76888ec3faebc7a..b567a3427fda46e05a692b606f424da1a60eaa93 100644 --- a/net/tipc/link.h +++ b/net/tipc/link.h @@ -1,7 +1,7 @@ /* * net/tipc/link.h: Include file for TIPC link code * - * Copyright (c) 1995-2006, 2013, Ericsson AB + * Copyright (c) 1995-2006, 2013-2014, Ericsson AB * Copyright (c) 2004-2005, 2010-2011, Wind River Systems * All rights reserved. * @@ -133,7 +133,7 @@ struct tipc_stats { * @retransm_queue_size: number of messages to retransmit * @retransm_queue_head: sequence number of first message to retransmit * @next_out: ptr to first unsent outbound message in queue - * @waiting_ports: linked list of ports waiting for link congestion to abate + * @waiting_sks: linked list of sockets waiting for link congestion to abate * @long_msg_seq_no: next identifier to use for outbound fragmented messages * @reasm_buf: head of partially reassembled inbound message fragments * @stats: collects statistics regarding link activity @@ -194,7 +194,7 @@ struct tipc_link { u32 retransm_queue_size; u32 retransm_queue_head; struct sk_buff *next_out; - struct list_head waiting_ports; + struct sk_buff_head waiting_sks; /* Fragmentation/reassembly */ u32 long_msg_seq_no; @@ -235,7 +235,6 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob, void tipc_link_push_queue(struct tipc_link *l_ptr); u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail, struct sk_buff *buf); -void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all); void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window); void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *start, u32 retransmits); diff --git a/net/tipc/msg.c b/net/tipc/msg.c index 9680be6d388a2b77a80e930f1092d6f6c3df13f3..74745a47d72ae1c0ce84c6ff5b096682410a0789 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c @@ -56,8 +56,35 @@ void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize, msg_set_size(m, hsize); msg_set_prevnode(m, tipc_own_addr); msg_set_type(m, type); - msg_set_orignode(m, tipc_own_addr); - msg_set_destnode(m, destnode); + if (hsize > SHORT_H_SIZE) { + msg_set_orignode(m, tipc_own_addr); + msg_set_destnode(m, destnode); + } +} + +struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz, + uint data_sz, u32 dnode, u32 onode, + u32 dport, u32 oport, int errcode) +{ + struct tipc_msg *msg; + struct sk_buff *buf; + + buf = tipc_buf_acquire(hdr_sz + data_sz); + if (unlikely(!buf)) + return NULL; + + msg = buf_msg(buf); + tipc_msg_init(msg, user, type, hdr_sz, dnode); + msg_set_size(msg, hdr_sz + data_sz); + msg_set_prevnode(msg, onode); + msg_set_origport(msg, oport); + msg_set_destport(msg, dport); + msg_set_errcode(msg, errcode); + if (hdr_sz > SHORT_H_SIZE) { + msg_set_orignode(msg, onode); + msg_set_destnode(msg, dnode); + } + return buf; } /* tipc_buf_append(): Append a buffer to the fragment list of another buffer @@ -155,7 +182,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov, struct sk_buff *buf, *prev; char *pktpos; int rc; - + uint chain_sz = 0; msg_set_size(mhdr, msz); /* No fragmentation needed? */ @@ -166,6 +193,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov, return -ENOMEM; skb_copy_to_linear_data(buf, mhdr, mhsz); pktpos = buf->data + mhsz; + TIPC_SKB_CB(buf)->chain_sz = 1; if (!dsz || !memcpy_fromiovecend(pktpos, iov, offset, dsz)) return dsz; rc = -EFAULT; @@ -182,6 +210,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov, *chain = buf = tipc_buf_acquire(pktmax); if (!buf) return -ENOMEM; + chain_sz = 1; pktpos = buf->data; skb_copy_to_linear_data(buf, &pkthdr, INT_H_SIZE); pktpos += INT_H_SIZE; @@ -215,6 +244,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov, rc = -ENOMEM; goto error; } + chain_sz++; prev->next = buf; msg_set_type(&pkthdr, FRAGMENT); msg_set_size(&pkthdr, pktsz); @@ -224,7 +254,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov, pktrem = pktsz - INT_H_SIZE; } while (1); - + TIPC_SKB_CB(*chain)->chain_sz = chain_sz; msg_set_type(buf_msg(buf), LAST_FRAGMENT); return dsz; error: diff --git a/net/tipc/msg.h b/net/tipc/msg.h index 462fa194a6afe5e2f6f759d4a04846d6e26ef32e..0ea7b695ac4d891a7556ae2f08110d11306369f9 100644 --- a/net/tipc/msg.h +++ b/net/tipc/msg.h @@ -442,6 +442,7 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m) #define NAME_DISTRIBUTOR 11 #define MSG_FRAGMENTER 12 #define LINK_CONFIG 13 +#define SOCK_WAKEUP 14 /* pseudo user */ /* * Connection management protocol message types @@ -732,6 +733,10 @@ int tipc_msg_eval(struct sk_buff *buf, u32 *dnode); void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize, u32 destnode); +struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz, + uint data_sz, u32 dnode, u32 onode, + u32 dport, u32 oport, int errcode); + int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf); bool tipc_msg_bundle(struct sk_buff *bbuf, struct sk_buff *buf, u32 mtu); diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c index dcc15bcd569279a96b74cc052ee140e6f8e1bf3b..376d2bb51d8da19bd00ee48f4b374ca098f36413 100644 --- a/net/tipc/name_distr.c +++ b/net/tipc/name_distr.c @@ -1,7 +1,7 @@ /* * net/tipc/name_distr.c: TIPC name distribution code * - * Copyright (c) 2000-2006, Ericsson AB + * Copyright (c) 2000-2006, 2014, Ericsson AB * Copyright (c) 2005, 2010-2011, Wind River Systems * All rights reserved. * @@ -71,6 +71,21 @@ static struct publ_list *publ_lists[] = { }; +int sysctl_tipc_named_timeout __read_mostly = 2000; + +/** + * struct tipc_dist_queue - queue holding deferred name table updates + */ +static struct list_head tipc_dist_queue = LIST_HEAD_INIT(tipc_dist_queue); + +struct distr_queue_item { + struct distr_item i; + u32 dtype; + u32 node; + unsigned long expires; + struct list_head next; +}; + /** * publ_to_item - add publication info to a publication message */ @@ -262,55 +277,106 @@ static void named_purge_publ(struct publication *publ) kfree(p); } +/** + * tipc_update_nametbl - try to process a nametable update and notify + * subscribers + * + * tipc_nametbl_lock must be held. + * Returns the publication item if successful, otherwise NULL. + */ +static bool tipc_update_nametbl(struct distr_item *i, u32 node, u32 dtype) +{ + struct publication *publ = NULL; + + if (dtype == PUBLICATION) { + publ = tipc_nametbl_insert_publ(ntohl(i->type), ntohl(i->lower), + ntohl(i->upper), + TIPC_CLUSTER_SCOPE, node, + ntohl(i->ref), ntohl(i->key)); + if (publ) { + tipc_nodesub_subscribe(&publ->subscr, node, publ, + (net_ev_handler) + named_purge_publ); + return true; + } + } else if (dtype == WITHDRAWAL) { + publ = tipc_nametbl_remove_publ(ntohl(i->type), ntohl(i->lower), + node, ntohl(i->ref), + ntohl(i->key)); + if (publ) { + tipc_nodesub_unsubscribe(&publ->subscr); + kfree(publ); + return true; + } + } else { + pr_warn("Unrecognized name table message received\n"); + } + return false; +} + +/** + * tipc_named_add_backlog - add a failed name table update to the backlog + * + */ +static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node) +{ + struct distr_queue_item *e; + unsigned long now = get_jiffies_64(); + + e = kzalloc(sizeof(*e), GFP_ATOMIC); + if (!e) + return; + e->dtype = type; + e->node = node; + e->expires = now + msecs_to_jiffies(sysctl_tipc_named_timeout); + memcpy(e, i, sizeof(*i)); + list_add_tail(&e->next, &tipc_dist_queue); +} + +/** + * tipc_named_process_backlog - try to process any pending name table updates + * from the network. + */ +void tipc_named_process_backlog(void) +{ + struct distr_queue_item *e, *tmp; + char addr[16]; + unsigned long now = get_jiffies_64(); + + list_for_each_entry_safe(e, tmp, &tipc_dist_queue, next) { + if (time_after(e->expires, now)) { + if (!tipc_update_nametbl(&e->i, e->node, e->dtype)) + continue; + } else { + tipc_addr_string_fill(addr, e->node); + pr_warn_ratelimited("Dropping name table update (%d) of {%u, %u, %u} from %s key=%u\n", + e->dtype, ntohl(e->i.type), + ntohl(e->i.lower), + ntohl(e->i.upper), + addr, ntohl(e->i.key)); + } + list_del(&e->next); + kfree(e); + } +} + /** * tipc_named_rcv - process name table update message sent by another node */ void tipc_named_rcv(struct sk_buff *buf) { - struct publication *publ; struct tipc_msg *msg = buf_msg(buf); struct distr_item *item = (struct distr_item *)msg_data(msg); u32 count = msg_data_sz(msg) / ITEM_SIZE; + u32 node = msg_orignode(msg); write_lock_bh(&tipc_nametbl_lock); while (count--) { - if (msg_type(msg) == PUBLICATION) { - publ = tipc_nametbl_insert_publ(ntohl(item->type), - ntohl(item->lower), - ntohl(item->upper), - TIPC_CLUSTER_SCOPE, - msg_orignode(msg), - ntohl(item->ref), - ntohl(item->key)); - if (publ) { - tipc_nodesub_subscribe(&publ->subscr, - msg_orignode(msg), - publ, - (net_ev_handler) - named_purge_publ); - } - } else if (msg_type(msg) == WITHDRAWAL) { - publ = tipc_nametbl_remove_publ(ntohl(item->type), - ntohl(item->lower), - msg_orignode(msg), - ntohl(item->ref), - ntohl(item->key)); - - if (publ) { - tipc_nodesub_unsubscribe(&publ->subscr); - kfree(publ); - } else { - pr_err("Unable to remove publication by node 0x%x\n" - " (type=%u, lower=%u, ref=%u, key=%u)\n", - msg_orignode(msg), ntohl(item->type), - ntohl(item->lower), ntohl(item->ref), - ntohl(item->key)); - } - } else { - pr_warn("Unrecognized name table message received\n"); - } + if (!tipc_update_nametbl(item, node, msg_type(msg))) + tipc_named_add_backlog(item, msg_type(msg), node); item++; } + tipc_named_process_backlog(); write_unlock_bh(&tipc_nametbl_lock); kfree_skb(buf); } diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h index 8afe32b7fc9a0b6a2b9f78657fd13756eae0b465..b9e75feb3434e76fc96a5e71a07e94fbcc709888 100644 --- a/net/tipc/name_distr.h +++ b/net/tipc/name_distr.h @@ -73,5 +73,6 @@ void named_cluster_distribute(struct sk_buff *buf); void tipc_named_node_up(u32 dnode); void tipc_named_rcv(struct sk_buff *buf); void tipc_named_reinit(void); +void tipc_named_process_backlog(void); #endif diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index 9d7d37d95187c77d9d7490ce7aec4de147a9f2fd..3a6a0a7c0759f01ad06b7c8d9b08a6c8d98e4ca9 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c @@ -39,7 +39,6 @@ #include "name_table.h" #include "name_distr.h" #include "subscr.h" -#include "port.h" #define TIPC_NAMETBL_SIZE 1024 /* must be a power of 2 */ @@ -262,8 +261,6 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq, /* Lower end overlaps existing entry => need an exact match */ if ((sseq->lower != lower) || (sseq->upper != upper)) { - pr_warn("Cannot publish {%u,%u,%u}, overlap error\n", - type, lower, upper); return NULL; } @@ -285,8 +282,6 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq, /* Fail if upper end overlaps into an existing entry */ if ((inspos < nseq->first_free) && (upper >= nseq->sseqs[inspos].lower)) { - pr_warn("Cannot publish {%u,%u,%u}, overlap error\n", - type, lower, upper); return NULL; } @@ -678,6 +673,8 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper, if (likely(publ)) { table.local_publ_count++; buf = tipc_named_publish(publ); + /* Any pending external events? */ + tipc_named_process_backlog(); } write_unlock_bh(&tipc_nametbl_lock); @@ -699,6 +696,8 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key) if (likely(publ)) { table.local_publ_count--; buf = tipc_named_withdraw(publ); + /* Any pending external events? */ + tipc_named_process_backlog(); write_unlock_bh(&tipc_nametbl_lock); list_del_init(&publ->pport_list); kfree(publ); diff --git a/net/tipc/net.c b/net/tipc/net.c index 7fcc94998feae6eb1427dd25ac965fece340227a..93b9944a6a8bb1e94c332b8bd5e5f641d90899d8 100644 --- a/net/tipc/net.c +++ b/net/tipc/net.c @@ -38,7 +38,6 @@ #include "net.h" #include "name_distr.h" #include "subscr.h" -#include "port.h" #include "socket.h" #include "node.h" #include "config.h" @@ -111,7 +110,7 @@ int tipc_net_start(u32 addr) tipc_own_addr = addr; tipc_named_reinit(); - tipc_port_reinit(); + tipc_sk_reinit(); res = tipc_bclink_init(); if (res) return res; diff --git a/net/tipc/node.c b/net/tipc/node.c index f7069299943f847f7853a3bb9e5cf781e6caf9b8..90cee4a6fce49450a0f83cca53fcf663ebea8148 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -38,6 +38,7 @@ #include "config.h" #include "node.h" #include "name_distr.h" +#include "socket.h" #define NODE_HTABLE_SIZE 512 @@ -50,6 +51,13 @@ static u32 tipc_num_nodes; static u32 tipc_num_links; static DEFINE_SPINLOCK(node_list_lock); +struct tipc_sock_conn { + u32 port; + u32 peer_port; + u32 peer_node; + struct list_head list; +}; + /* * A trivial power-of-two bitmask technique is used for speed, since this * operation is done for every incoming TIPC packet. The number of hash table @@ -100,6 +108,8 @@ struct tipc_node *tipc_node_create(u32 addr) INIT_HLIST_NODE(&n_ptr->hash); INIT_LIST_HEAD(&n_ptr->list); INIT_LIST_HEAD(&n_ptr->nsub); + INIT_LIST_HEAD(&n_ptr->conn_sks); + __skb_queue_head_init(&n_ptr->waiting_sks); hlist_add_head_rcu(&n_ptr->hash, &node_htable[tipc_hashfn(addr)]); @@ -136,6 +146,71 @@ void tipc_node_stop(void) spin_unlock_bh(&node_list_lock); } +int tipc_node_add_conn(u32 dnode, u32 port, u32 peer_port) +{ + struct tipc_node *node; + struct tipc_sock_conn *conn; + + if (in_own_node(dnode)) + return 0; + + node = tipc_node_find(dnode); + if (!node) { + pr_warn("Connecting sock to node 0x%x failed\n", dnode); + return -EHOSTUNREACH; + } + conn = kmalloc(sizeof(*conn), GFP_ATOMIC); + if (!conn) + return -EHOSTUNREACH; + conn->peer_node = dnode; + conn->port = port; + conn->peer_port = peer_port; + + tipc_node_lock(node); + list_add_tail(&conn->list, &node->conn_sks); + tipc_node_unlock(node); + return 0; +} + +void tipc_node_remove_conn(u32 dnode, u32 port) +{ + struct tipc_node *node; + struct tipc_sock_conn *conn, *safe; + + if (in_own_node(dnode)) + return; + + node = tipc_node_find(dnode); + if (!node) + return; + + tipc_node_lock(node); + list_for_each_entry_safe(conn, safe, &node->conn_sks, list) { + if (port != conn->port) + continue; + list_del(&conn->list); + kfree(conn); + } + tipc_node_unlock(node); +} + +void tipc_node_abort_sock_conns(struct list_head *conns) +{ + struct tipc_sock_conn *conn, *safe; + struct sk_buff *buf; + + list_for_each_entry_safe(conn, safe, conns, list) { + buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, + SHORT_H_SIZE, 0, tipc_own_addr, + conn->peer_node, conn->port, + conn->peer_port, TIPC_ERR_NO_NODE); + if (likely(buf)) + tipc_sk_rcv(buf); + list_del(&conn->list); + kfree(conn); + } +} + /** * tipc_node_link_up - handle addition of link * @@ -474,25 +549,45 @@ int tipc_node_get_linkname(u32 bearer_id, u32 addr, char *linkname, size_t len) void tipc_node_unlock(struct tipc_node *node) { LIST_HEAD(nsub_list); + LIST_HEAD(conn_sks); + struct sk_buff_head waiting_sks; u32 addr = 0; + unsigned int flags = node->action_flags; if (likely(!node->action_flags)) { spin_unlock_bh(&node->lock); return; } + __skb_queue_head_init(&waiting_sks); + if (node->action_flags & TIPC_WAKEUP_USERS) { + skb_queue_splice_init(&node->waiting_sks, &waiting_sks); + node->action_flags &= ~TIPC_WAKEUP_USERS; + } if (node->action_flags & TIPC_NOTIFY_NODE_DOWN) { list_replace_init(&node->nsub, &nsub_list); + list_replace_init(&node->conn_sks, &conn_sks); node->action_flags &= ~TIPC_NOTIFY_NODE_DOWN; } if (node->action_flags & TIPC_NOTIFY_NODE_UP) { node->action_flags &= ~TIPC_NOTIFY_NODE_UP; addr = node->addr; } + node->action_flags &= ~TIPC_WAKEUP_BCAST_USERS; spin_unlock_bh(&node->lock); + while (!skb_queue_empty(&waiting_sks)) + tipc_sk_rcv(__skb_dequeue(&waiting_sks)); + + if (!list_empty(&conn_sks)) + tipc_node_abort_sock_conns(&conn_sks); + if (!list_empty(&nsub_list)) tipc_nodesub_notify(&nsub_list); + + if (flags & TIPC_WAKEUP_BCAST_USERS) + tipc_bclink_wakeup_users(); + if (addr) tipc_named_node_up(addr); } diff --git a/net/tipc/node.h b/net/tipc/node.h index b61716a8218e583b6a01f711cfb3a7de950d3569..67513c3c852c41f2d2a89ffc4d1eb22b7e77de91 100644 --- a/net/tipc/node.h +++ b/net/tipc/node.h @@ -58,7 +58,9 @@ enum { TIPC_WAIT_PEER_LINKS_DOWN = (1 << 1), TIPC_WAIT_OWN_LINKS_DOWN = (1 << 2), TIPC_NOTIFY_NODE_DOWN = (1 << 3), - TIPC_NOTIFY_NODE_UP = (1 << 4) + TIPC_NOTIFY_NODE_UP = (1 << 4), + TIPC_WAKEUP_USERS = (1 << 5), + TIPC_WAKEUP_BCAST_USERS = (1 << 6) }; /** @@ -115,6 +117,8 @@ struct tipc_node { int working_links; u32 signature; struct list_head nsub; + struct sk_buff_head waiting_sks; + struct list_head conn_sks; struct rcu_head rcu; }; @@ -133,6 +137,8 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space) struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space); int tipc_node_get_linkname(u32 bearer_id, u32 node, char *linkname, size_t len); void tipc_node_unlock(struct tipc_node *node); +int tipc_node_add_conn(u32 dnode, u32 port, u32 peer_port); +void tipc_node_remove_conn(u32 dnode, u32 port); static inline void tipc_node_lock(struct tipc_node *node) { diff --git a/net/tipc/port.c b/net/tipc/port.c deleted file mode 100644 index 7e096a5e770156631e701c91fc8c5ac4a34c5ce9..0000000000000000000000000000000000000000 --- a/net/tipc/port.c +++ /dev/null @@ -1,514 +0,0 @@ -/* - * net/tipc/port.c: TIPC port code - * - * Copyright (c) 1992-2007, 2014, Ericsson AB - * Copyright (c) 2004-2008, 2010-2013, Wind River Systems - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include "core.h" -#include "config.h" -#include "port.h" -#include "name_table.h" -#include "socket.h" - -/* Connection management: */ -#define PROBING_INTERVAL 3600000 /* [ms] => 1 h */ - -#define MAX_REJECT_SIZE 1024 - -DEFINE_SPINLOCK(tipc_port_list_lock); - -static LIST_HEAD(ports); -static void port_handle_node_down(unsigned long ref); -static struct sk_buff *port_build_self_abort_msg(struct tipc_port *, u32 err); -static struct sk_buff *port_build_peer_abort_msg(struct tipc_port *, u32 err); -static void port_timeout(unsigned long ref); - -/** - * tipc_port_peer_msg - verify message was sent by connected port's peer - * - * Handles cases where the node's network address has changed from - * the default of <0.0.0> to its configured setting. - */ -int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg) -{ - u32 peernode; - u32 orignode; - - if (msg_origport(msg) != tipc_port_peerport(p_ptr)) - return 0; - - orignode = msg_orignode(msg); - peernode = tipc_port_peernode(p_ptr); - return (orignode == peernode) || - (!orignode && (peernode == tipc_own_addr)) || - (!peernode && (orignode == tipc_own_addr)); -} - -/* tipc_port_init - intiate TIPC port and lock it - * - * Returns obtained reference if initialization is successful, zero otherwise - */ -u32 tipc_port_init(struct tipc_port *p_ptr, - const unsigned int importance) -{ - struct tipc_msg *msg; - u32 ref; - - ref = tipc_ref_acquire(p_ptr, &p_ptr->lock); - if (!ref) { - pr_warn("Port registration failed, ref. table exhausted\n"); - return 0; - } - - p_ptr->max_pkt = MAX_PKT_DEFAULT; - p_ptr->ref = ref; - INIT_LIST_HEAD(&p_ptr->wait_list); - INIT_LIST_HEAD(&p_ptr->subscription.nodesub_list); - k_init_timer(&p_ptr->timer, (Handler)port_timeout, ref); - INIT_LIST_HEAD(&p_ptr->publications); - INIT_LIST_HEAD(&p_ptr->port_list); - - /* - * Must hold port list lock while initializing message header template - * to ensure a change to node's own network address doesn't result - * in template containing out-dated network address information - */ - spin_lock_bh(&tipc_port_list_lock); - msg = &p_ptr->phdr; - tipc_msg_init(msg, importance, TIPC_NAMED_MSG, NAMED_H_SIZE, 0); - msg_set_origport(msg, ref); - list_add_tail(&p_ptr->port_list, &ports); - spin_unlock_bh(&tipc_port_list_lock); - return ref; -} - -void tipc_port_destroy(struct tipc_port *p_ptr) -{ - struct sk_buff *buf = NULL; - struct tipc_msg *msg = NULL; - u32 peer; - - tipc_withdraw(p_ptr, 0, NULL); - - spin_lock_bh(p_ptr->lock); - tipc_ref_discard(p_ptr->ref); - spin_unlock_bh(p_ptr->lock); - - k_cancel_timer(&p_ptr->timer); - if (p_ptr->connected) { - buf = port_build_peer_abort_msg(p_ptr, TIPC_ERR_NO_PORT); - tipc_nodesub_unsubscribe(&p_ptr->subscription); - msg = buf_msg(buf); - peer = msg_destnode(msg); - tipc_link_xmit(buf, peer, msg_link_selector(msg)); - } - spin_lock_bh(&tipc_port_list_lock); - list_del(&p_ptr->port_list); - list_del(&p_ptr->wait_list); - spin_unlock_bh(&tipc_port_list_lock); - k_term_timer(&p_ptr->timer); -} - -/* - * port_build_proto_msg(): create connection protocol message for port - * - * On entry the port must be locked and connected. - */ -static struct sk_buff *port_build_proto_msg(struct tipc_port *p_ptr, - u32 type, u32 ack) -{ - struct sk_buff *buf; - struct tipc_msg *msg; - - buf = tipc_buf_acquire(INT_H_SIZE); - if (buf) { - msg = buf_msg(buf); - tipc_msg_init(msg, CONN_MANAGER, type, INT_H_SIZE, - tipc_port_peernode(p_ptr)); - msg_set_destport(msg, tipc_port_peerport(p_ptr)); - msg_set_origport(msg, p_ptr->ref); - msg_set_msgcnt(msg, ack); - buf->next = NULL; - } - return buf; -} - -static void port_timeout(unsigned long ref) -{ - struct tipc_port *p_ptr = tipc_port_lock(ref); - struct sk_buff *buf = NULL; - struct tipc_msg *msg = NULL; - - if (!p_ptr) - return; - - if (!p_ptr->connected) { - tipc_port_unlock(p_ptr); - return; - } - - /* Last probe answered ? */ - if (p_ptr->probing_state == TIPC_CONN_PROBING) { - buf = port_build_self_abort_msg(p_ptr, TIPC_ERR_NO_PORT); - } else { - buf = port_build_proto_msg(p_ptr, CONN_PROBE, 0); - p_ptr->probing_state = TIPC_CONN_PROBING; - k_start_timer(&p_ptr->timer, p_ptr->probing_interval); - } - tipc_port_unlock(p_ptr); - msg = buf_msg(buf); - tipc_link_xmit(buf, msg_destnode(msg), msg_link_selector(msg)); -} - - -static void port_handle_node_down(unsigned long ref) -{ - struct tipc_port *p_ptr = tipc_port_lock(ref); - struct sk_buff *buf = NULL; - struct tipc_msg *msg = NULL; - - if (!p_ptr) - return; - buf = port_build_self_abort_msg(p_ptr, TIPC_ERR_NO_NODE); - tipc_port_unlock(p_ptr); - msg = buf_msg(buf); - tipc_link_xmit(buf, msg_destnode(msg), msg_link_selector(msg)); -} - - -static struct sk_buff *port_build_self_abort_msg(struct tipc_port *p_ptr, u32 err) -{ - struct sk_buff *buf = port_build_peer_abort_msg(p_ptr, err); - - if (buf) { - struct tipc_msg *msg = buf_msg(buf); - msg_swap_words(msg, 4, 5); - msg_swap_words(msg, 6, 7); - buf->next = NULL; - } - return buf; -} - - -static struct sk_buff *port_build_peer_abort_msg(struct tipc_port *p_ptr, u32 err) -{ - struct sk_buff *buf; - struct tipc_msg *msg; - u32 imp; - - if (!p_ptr->connected) - return NULL; - - buf = tipc_buf_acquire(BASIC_H_SIZE); - if (buf) { - msg = buf_msg(buf); - memcpy(msg, &p_ptr->phdr, BASIC_H_SIZE); - msg_set_hdr_sz(msg, BASIC_H_SIZE); - msg_set_size(msg, BASIC_H_SIZE); - imp = msg_importance(msg); - if (imp < TIPC_CRITICAL_IMPORTANCE) - msg_set_importance(msg, ++imp); - msg_set_errcode(msg, err); - buf->next = NULL; - } - return buf; -} - -static int port_print(struct tipc_port *p_ptr, char *buf, int len, int full_id) -{ - struct publication *publ; - int ret; - - if (full_id) - ret = tipc_snprintf(buf, len, "<%u.%u.%u:%u>:", - tipc_zone(tipc_own_addr), - tipc_cluster(tipc_own_addr), - tipc_node(tipc_own_addr), p_ptr->ref); - else - ret = tipc_snprintf(buf, len, "%-10u:", p_ptr->ref); - - if (p_ptr->connected) { - u32 dport = tipc_port_peerport(p_ptr); - u32 destnode = tipc_port_peernode(p_ptr); - - ret += tipc_snprintf(buf + ret, len - ret, - " connected to <%u.%u.%u:%u>", - tipc_zone(destnode), - tipc_cluster(destnode), - tipc_node(destnode), dport); - if (p_ptr->conn_type != 0) - ret += tipc_snprintf(buf + ret, len - ret, - " via {%u,%u}", p_ptr->conn_type, - p_ptr->conn_instance); - } else if (p_ptr->published) { - ret += tipc_snprintf(buf + ret, len - ret, " bound to"); - list_for_each_entry(publ, &p_ptr->publications, pport_list) { - if (publ->lower == publ->upper) - ret += tipc_snprintf(buf + ret, len - ret, - " {%u,%u}", publ->type, - publ->lower); - else - ret += tipc_snprintf(buf + ret, len - ret, - " {%u,%u,%u}", publ->type, - publ->lower, publ->upper); - } - } - ret += tipc_snprintf(buf + ret, len - ret, "\n"); - return ret; -} - -struct sk_buff *tipc_port_get_ports(void) -{ - struct sk_buff *buf; - struct tlv_desc *rep_tlv; - char *pb; - int pb_len; - struct tipc_port *p_ptr; - int str_len = 0; - - buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN)); - if (!buf) - return NULL; - rep_tlv = (struct tlv_desc *)buf->data; - pb = TLV_DATA(rep_tlv); - pb_len = ULTRA_STRING_MAX_LEN; - - spin_lock_bh(&tipc_port_list_lock); - list_for_each_entry(p_ptr, &ports, port_list) { - spin_lock_bh(p_ptr->lock); - str_len += port_print(p_ptr, pb, pb_len, 0); - spin_unlock_bh(p_ptr->lock); - } - spin_unlock_bh(&tipc_port_list_lock); - str_len += 1; /* for "\0" */ - skb_put(buf, TLV_SPACE(str_len)); - TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); - - return buf; -} - -void tipc_port_reinit(void) -{ - struct tipc_port *p_ptr; - struct tipc_msg *msg; - - spin_lock_bh(&tipc_port_list_lock); - list_for_each_entry(p_ptr, &ports, port_list) { - msg = &p_ptr->phdr; - msg_set_prevnode(msg, tipc_own_addr); - msg_set_orignode(msg, tipc_own_addr); - } - spin_unlock_bh(&tipc_port_list_lock); -} - -void tipc_acknowledge(u32 ref, u32 ack) -{ - struct tipc_port *p_ptr; - struct sk_buff *buf = NULL; - struct tipc_msg *msg; - - p_ptr = tipc_port_lock(ref); - if (!p_ptr) - return; - if (p_ptr->connected) - buf = port_build_proto_msg(p_ptr, CONN_ACK, ack); - - tipc_port_unlock(p_ptr); - if (!buf) - return; - msg = buf_msg(buf); - tipc_link_xmit(buf, msg_destnode(msg), msg_link_selector(msg)); -} - -int tipc_publish(struct tipc_port *p_ptr, unsigned int scope, - struct tipc_name_seq const *seq) -{ - struct publication *publ; - u32 key; - - if (p_ptr->connected) - return -EINVAL; - key = p_ptr->ref + p_ptr->pub_count + 1; - if (key == p_ptr->ref) - return -EADDRINUSE; - - publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper, - scope, p_ptr->ref, key); - if (publ) { - list_add(&publ->pport_list, &p_ptr->publications); - p_ptr->pub_count++; - p_ptr->published = 1; - return 0; - } - return -EINVAL; -} - -int tipc_withdraw(struct tipc_port *p_ptr, unsigned int scope, - struct tipc_name_seq const *seq) -{ - struct publication *publ; - struct publication *tpubl; - int res = -EINVAL; - - if (!seq) { - list_for_each_entry_safe(publ, tpubl, - &p_ptr->publications, pport_list) { - tipc_nametbl_withdraw(publ->type, publ->lower, - publ->ref, publ->key); - } - res = 0; - } else { - list_for_each_entry_safe(publ, tpubl, - &p_ptr->publications, pport_list) { - if (publ->scope != scope) - continue; - if (publ->type != seq->type) - continue; - if (publ->lower != seq->lower) - continue; - if (publ->upper != seq->upper) - break; - tipc_nametbl_withdraw(publ->type, publ->lower, - publ->ref, publ->key); - res = 0; - break; - } - } - if (list_empty(&p_ptr->publications)) - p_ptr->published = 0; - return res; -} - -int tipc_port_connect(u32 ref, struct tipc_portid const *peer) -{ - struct tipc_port *p_ptr; - int res; - - p_ptr = tipc_port_lock(ref); - if (!p_ptr) - return -EINVAL; - res = __tipc_port_connect(ref, p_ptr, peer); - tipc_port_unlock(p_ptr); - return res; -} - -/* - * __tipc_port_connect - connect to a remote peer - * - * Port must be locked. - */ -int __tipc_port_connect(u32 ref, struct tipc_port *p_ptr, - struct tipc_portid const *peer) -{ - struct tipc_msg *msg; - int res = -EINVAL; - - if (p_ptr->published || p_ptr->connected) - goto exit; - if (!peer->ref) - goto exit; - - msg = &p_ptr->phdr; - msg_set_destnode(msg, peer->node); - msg_set_destport(msg, peer->ref); - msg_set_type(msg, TIPC_CONN_MSG); - msg_set_lookup_scope(msg, 0); - msg_set_hdr_sz(msg, SHORT_H_SIZE); - - p_ptr->probing_interval = PROBING_INTERVAL; - p_ptr->probing_state = TIPC_CONN_OK; - p_ptr->connected = 1; - k_start_timer(&p_ptr->timer, p_ptr->probing_interval); - - tipc_nodesub_subscribe(&p_ptr->subscription, peer->node, - (void *)(unsigned long)ref, - (net_ev_handler)port_handle_node_down); - res = 0; -exit: - p_ptr->max_pkt = tipc_node_get_mtu(peer->node, ref); - return res; -} - -/* - * __tipc_disconnect - disconnect port from peer - * - * Port must be locked. - */ -int __tipc_port_disconnect(struct tipc_port *tp_ptr) -{ - if (tp_ptr->connected) { - tp_ptr->connected = 0; - /* let timer expire on it's own to avoid deadlock! */ - tipc_nodesub_unsubscribe(&tp_ptr->subscription); - return 0; - } - - return -ENOTCONN; -} - -/* - * tipc_port_disconnect(): Disconnect port form peer. - * This is a node local operation. - */ -int tipc_port_disconnect(u32 ref) -{ - struct tipc_port *p_ptr; - int res; - - p_ptr = tipc_port_lock(ref); - if (!p_ptr) - return -EINVAL; - res = __tipc_port_disconnect(p_ptr); - tipc_port_unlock(p_ptr); - return res; -} - -/* - * tipc_port_shutdown(): Send a SHUTDOWN msg to peer and disconnect - */ -int tipc_port_shutdown(u32 ref) -{ - struct tipc_msg *msg; - struct tipc_port *p_ptr; - struct sk_buff *buf = NULL; - - p_ptr = tipc_port_lock(ref); - if (!p_ptr) - return -EINVAL; - - buf = port_build_peer_abort_msg(p_ptr, TIPC_CONN_SHUTDOWN); - tipc_port_unlock(p_ptr); - msg = buf_msg(buf); - tipc_link_xmit(buf, msg_destnode(msg), msg_link_selector(msg)); - return tipc_port_disconnect(ref); -} diff --git a/net/tipc/port.h b/net/tipc/port.h deleted file mode 100644 index 3087da39ee47b2acf4f1d5b20babfb15a308d0be..0000000000000000000000000000000000000000 --- a/net/tipc/port.h +++ /dev/null @@ -1,190 +0,0 @@ -/* - * net/tipc/port.h: Include file for TIPC port code - * - * Copyright (c) 1994-2007, 2014, Ericsson AB - * Copyright (c) 2004-2007, 2010-2013, Wind River Systems - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _TIPC_PORT_H -#define _TIPC_PORT_H - -#include "ref.h" -#include "net.h" -#include "msg.h" -#include "node_subscr.h" - -#define TIPC_CONNACK_INTV 256 -#define TIPC_FLOWCTRL_WIN (TIPC_CONNACK_INTV * 2) -#define TIPC_CONN_OVERLOAD_LIMIT ((TIPC_FLOWCTRL_WIN * 2 + 1) * \ - SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE)) - -/** - * struct tipc_port - TIPC port structure - * @lock: pointer to spinlock for controlling access to port - * @connected: non-zero if port is currently connected to a peer port - * @conn_type: TIPC type used when connection was established - * @conn_instance: TIPC instance used when connection was established - * @published: non-zero if port has one or more associated names - * @max_pkt: maximum packet size "hint" used when building messages sent by port - * @ref: unique reference to port in TIPC object registry - * @phdr: preformatted message header used when sending messages - * @port_list: adjacent ports in TIPC's global list of ports - * @wait_list: adjacent ports in list of ports waiting on link congestion - * @waiting_pkts: - * @publications: list of publications for port - * @pub_count: total # of publications port has made during its lifetime - * @probing_state: - * @probing_interval: - * @timer_ref: - * @subscription: "node down" subscription used to terminate failed connections - */ -struct tipc_port { - spinlock_t *lock; - int connected; - u32 conn_type; - u32 conn_instance; - int published; - u32 max_pkt; - u32 ref; - struct tipc_msg phdr; - struct list_head port_list; - struct list_head wait_list; - u32 waiting_pkts; - struct list_head publications; - u32 pub_count; - u32 probing_state; - u32 probing_interval; - struct timer_list timer; - struct tipc_node_subscr subscription; -}; - -extern spinlock_t tipc_port_list_lock; -struct tipc_port_list; - -/* - * TIPC port manipulation routines - */ -u32 tipc_port_init(struct tipc_port *p_ptr, - const unsigned int importance); - -void tipc_acknowledge(u32 port_ref, u32 ack); - -void tipc_port_destroy(struct tipc_port *p_ptr); - -int tipc_publish(struct tipc_port *p_ptr, unsigned int scope, - struct tipc_name_seq const *name_seq); - -int tipc_withdraw(struct tipc_port *p_ptr, unsigned int scope, - struct tipc_name_seq const *name_seq); - -int tipc_port_connect(u32 portref, struct tipc_portid const *port); - -int tipc_port_disconnect(u32 portref); - -int tipc_port_shutdown(u32 ref); - -/* - * The following routines require that the port be locked on entry - */ -int __tipc_port_disconnect(struct tipc_port *tp_ptr); -int __tipc_port_connect(u32 ref, struct tipc_port *p_ptr, - struct tipc_portid const *peer); -int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg); - -struct sk_buff *tipc_port_get_ports(void); -void tipc_port_reinit(void); - -/** - * tipc_port_lock - lock port instance referred to and return its pointer - */ -static inline struct tipc_port *tipc_port_lock(u32 ref) -{ - return (struct tipc_port *)tipc_ref_lock(ref); -} - -/** - * tipc_port_unlock - unlock a port instance - * - * Can use pointer instead of tipc_ref_unlock() since port is already locked. - */ -static inline void tipc_port_unlock(struct tipc_port *p_ptr) -{ - spin_unlock_bh(p_ptr->lock); -} - -static inline u32 tipc_port_peernode(struct tipc_port *p_ptr) -{ - return msg_destnode(&p_ptr->phdr); -} - -static inline u32 tipc_port_peerport(struct tipc_port *p_ptr) -{ - return msg_destport(&p_ptr->phdr); -} - -static inline bool tipc_port_unreliable(struct tipc_port *port) -{ - return msg_src_droppable(&port->phdr) != 0; -} - -static inline void tipc_port_set_unreliable(struct tipc_port *port, - bool unreliable) -{ - msg_set_src_droppable(&port->phdr, unreliable ? 1 : 0); -} - -static inline bool tipc_port_unreturnable(struct tipc_port *port) -{ - return msg_dest_droppable(&port->phdr) != 0; -} - -static inline void tipc_port_set_unreturnable(struct tipc_port *port, - bool unreturnable) -{ - msg_set_dest_droppable(&port->phdr, unreturnable ? 1 : 0); -} - - -static inline int tipc_port_importance(struct tipc_port *port) -{ - return msg_importance(&port->phdr); -} - -static inline int tipc_port_set_importance(struct tipc_port *port, int imp) -{ - if (imp > TIPC_CRITICAL_IMPORTANCE) - return -EINVAL; - msg_set_importance(&port->phdr, (u32)imp); - return 0; -} - -#endif diff --git a/net/tipc/ref.c b/net/tipc/ref.c deleted file mode 100644 index 3d4ecd754eeef578e708e8219d8dc636093ccdfe..0000000000000000000000000000000000000000 --- a/net/tipc/ref.c +++ /dev/null @@ -1,266 +0,0 @@ -/* - * net/tipc/ref.c: TIPC object registry code - * - * Copyright (c) 1991-2006, Ericsson AB - * Copyright (c) 2004-2007, Wind River Systems - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include "core.h" -#include "ref.h" - -/** - * struct reference - TIPC object reference entry - * @object: pointer to object associated with reference entry - * @lock: spinlock controlling access to object - * @ref: reference value for object (combines instance & array index info) - */ -struct reference { - void *object; - spinlock_t lock; - u32 ref; -}; - -/** - * struct tipc_ref_table - table of TIPC object reference entries - * @entries: pointer to array of reference entries - * @capacity: array index of first unusable entry - * @init_point: array index of first uninitialized entry - * @first_free: array index of first unused object reference entry - * @last_free: array index of last unused object reference entry - * @index_mask: bitmask for array index portion of reference values - * @start_mask: initial value for instance value portion of reference values - */ -struct ref_table { - struct reference *entries; - u32 capacity; - u32 init_point; - u32 first_free; - u32 last_free; - u32 index_mask; - u32 start_mask; -}; - -/* - * Object reference table consists of 2**N entries. - * - * State Object ptr Reference - * ----- ---------- --------- - * In use non-NULL XXXX|own index - * (XXXX changes each time entry is acquired) - * Free NULL YYYY|next free index - * (YYYY is one more than last used XXXX) - * Uninitialized NULL 0 - * - * Entry 0 is not used; this allows index 0 to denote the end of the free list. - * - * Note that a reference value of 0 does not necessarily indicate that an - * entry is uninitialized, since the last entry in the free list could also - * have a reference value of 0 (although this is unlikely). - */ - -static struct ref_table tipc_ref_table; - -static DEFINE_SPINLOCK(ref_table_lock); - -/** - * tipc_ref_table_init - create reference table for objects - */ -int tipc_ref_table_init(u32 requested_size, u32 start) -{ - struct reference *table; - u32 actual_size; - - /* account for unused entry, then round up size to a power of 2 */ - - requested_size++; - for (actual_size = 16; actual_size < requested_size; actual_size <<= 1) - /* do nothing */ ; - - /* allocate table & mark all entries as uninitialized */ - table = vzalloc(actual_size * sizeof(struct reference)); - if (table == NULL) - return -ENOMEM; - - tipc_ref_table.entries = table; - tipc_ref_table.capacity = requested_size; - tipc_ref_table.init_point = 1; - tipc_ref_table.first_free = 0; - tipc_ref_table.last_free = 0; - tipc_ref_table.index_mask = actual_size - 1; - tipc_ref_table.start_mask = start & ~tipc_ref_table.index_mask; - - return 0; -} - -/** - * tipc_ref_table_stop - destroy reference table for objects - */ -void tipc_ref_table_stop(void) -{ - vfree(tipc_ref_table.entries); - tipc_ref_table.entries = NULL; -} - -/** - * tipc_ref_acquire - create reference to an object - * - * Register an object pointer in reference table and lock the object. - * Returns a unique reference value that is used from then on to retrieve the - * object pointer, or to determine that the object has been deregistered. - * - * Note: The object is returned in the locked state so that the caller can - * register a partially initialized object, without running the risk that - * the object will be accessed before initialization is complete. - */ -u32 tipc_ref_acquire(void *object, spinlock_t **lock) -{ - u32 index; - u32 index_mask; - u32 next_plus_upper; - u32 ref; - struct reference *entry = NULL; - - if (!object) { - pr_err("Attempt to acquire ref. to non-existent obj\n"); - return 0; - } - if (!tipc_ref_table.entries) { - pr_err("Ref. table not found in acquisition attempt\n"); - return 0; - } - - /* take a free entry, if available; otherwise initialize a new entry */ - spin_lock_bh(&ref_table_lock); - if (tipc_ref_table.first_free) { - index = tipc_ref_table.first_free; - entry = &(tipc_ref_table.entries[index]); - index_mask = tipc_ref_table.index_mask; - next_plus_upper = entry->ref; - tipc_ref_table.first_free = next_plus_upper & index_mask; - ref = (next_plus_upper & ~index_mask) + index; - } else if (tipc_ref_table.init_point < tipc_ref_table.capacity) { - index = tipc_ref_table.init_point++; - entry = &(tipc_ref_table.entries[index]); - spin_lock_init(&entry->lock); - ref = tipc_ref_table.start_mask + index; - } else { - ref = 0; - } - spin_unlock_bh(&ref_table_lock); - - /* - * Grab the lock so no one else can modify this entry - * While we assign its ref value & object pointer - */ - if (entry) { - spin_lock_bh(&entry->lock); - entry->ref = ref; - entry->object = object; - *lock = &entry->lock; - /* - * keep it locked, the caller is responsible - * for unlocking this when they're done with it - */ - } - - return ref; -} - -/** - * tipc_ref_discard - invalidate references to an object - * - * Disallow future references to an object and free up the entry for re-use. - * Note: The entry's spin_lock may still be busy after discard - */ -void tipc_ref_discard(u32 ref) -{ - struct reference *entry; - u32 index; - u32 index_mask; - - if (!tipc_ref_table.entries) { - pr_err("Ref. table not found during discard attempt\n"); - return; - } - - index_mask = tipc_ref_table.index_mask; - index = ref & index_mask; - entry = &(tipc_ref_table.entries[index]); - - spin_lock_bh(&ref_table_lock); - - if (!entry->object) { - pr_err("Attempt to discard ref. to non-existent obj\n"); - goto exit; - } - if (entry->ref != ref) { - pr_err("Attempt to discard non-existent reference\n"); - goto exit; - } - - /* - * mark entry as unused; increment instance part of entry's reference - * to invalidate any subsequent references - */ - entry->object = NULL; - entry->ref = (ref & ~index_mask) + (index_mask + 1); - - /* append entry to free entry list */ - if (tipc_ref_table.first_free == 0) - tipc_ref_table.first_free = index; - else - tipc_ref_table.entries[tipc_ref_table.last_free].ref |= index; - tipc_ref_table.last_free = index; - -exit: - spin_unlock_bh(&ref_table_lock); -} - -/** - * tipc_ref_lock - lock referenced object and return pointer to it - */ -void *tipc_ref_lock(u32 ref) -{ - if (likely(tipc_ref_table.entries)) { - struct reference *entry; - - entry = &tipc_ref_table.entries[ref & - tipc_ref_table.index_mask]; - if (likely(entry->ref != 0)) { - spin_lock_bh(&entry->lock); - if (likely((entry->ref == ref) && (entry->object))) - return entry->object; - spin_unlock_bh(&entry->lock); - } - } - return NULL; -} diff --git a/net/tipc/ref.h b/net/tipc/ref.h deleted file mode 100644 index d01aa1df63b86e391ed61a2fb2b31ff245fdc87e..0000000000000000000000000000000000000000 --- a/net/tipc/ref.h +++ /dev/null @@ -1,48 +0,0 @@ -/* - * net/tipc/ref.h: Include file for TIPC object registry code - * - * Copyright (c) 1991-2006, Ericsson AB - * Copyright (c) 2005-2006, Wind River Systems - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _TIPC_REF_H -#define _TIPC_REF_H - -int tipc_ref_table_init(u32 requested_size, u32 start); -void tipc_ref_table_stop(void); - -u32 tipc_ref_acquire(void *object, spinlock_t **lock); -void tipc_ref_discard(u32 ref); - -void *tipc_ref_lock(u32 ref); - -#endif diff --git a/net/tipc/socket.c b/net/tipc/socket.c index ff8c8118d56e216a62172f0d60e92bf892660ee3..75275c5cf9291a0afe0818b6ef99ccfc14db8822 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -35,17 +35,67 @@ */ #include "core.h" -#include "port.h" #include "name_table.h" #include "node.h" #include "link.h" #include +#include "config.h" +#include "socket.h" #define SS_LISTENING -1 /* socket is listening */ #define SS_READY -2 /* socket is connectionless */ -#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */ -#define TIPC_FWD_MSG 1 +#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */ +#define CONN_PROBING_INTERVAL 3600000 /* [ms] => 1 h */ +#define TIPC_FWD_MSG 1 +#define TIPC_CONN_OK 0 +#define TIPC_CONN_PROBING 1 + +/** + * struct tipc_sock - TIPC socket structure + * @sk: socket - interacts with 'port' and with user via the socket API + * @connected: non-zero if port is currently connected to a peer port + * @conn_type: TIPC type used when connection was established + * @conn_instance: TIPC instance used when connection was established + * @published: non-zero if port has one or more associated names + * @max_pkt: maximum packet size "hint" used when building messages sent by port + * @ref: unique reference to port in TIPC object registry + * @phdr: preformatted message header used when sending messages + * @port_list: adjacent ports in TIPC's global list of ports + * @publications: list of publications for port + * @pub_count: total # of publications port has made during its lifetime + * @probing_state: + * @probing_interval: + * @timer: + * @port: port - interacts with 'sk' and with the rest of the TIPC stack + * @peer_name: the peer of the connection, if any + * @conn_timeout: the time we can wait for an unresponded setup request + * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue + * @link_cong: non-zero if owner must sleep because of link congestion + * @sent_unacked: # messages sent by socket, and not yet acked by peer + * @rcv_unacked: # messages read by user, but not yet acked back to peer + */ +struct tipc_sock { + struct sock sk; + int connected; + u32 conn_type; + u32 conn_instance; + int published; + u32 max_pkt; + u32 ref; + struct tipc_msg phdr; + struct list_head sock_list; + struct list_head publications; + u32 pub_count; + u32 probing_state; + u32 probing_interval; + struct timer_list timer; + uint conn_timeout; + atomic_t dupl_rcvcnt; + bool link_cong; + uint sent_unacked; + uint rcv_unacked; +}; static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb); static void tipc_data_ready(struct sock *sk); @@ -53,6 +103,16 @@ static void tipc_write_space(struct sock *sk); static int tipc_release(struct socket *sock); static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags); static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p); +static void tipc_sk_timeout(unsigned long ref); +static int tipc_sk_publish(struct tipc_sock *tsk, uint scope, + struct tipc_name_seq const *seq); +static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, + struct tipc_name_seq const *seq); +static u32 tipc_sk_ref_acquire(struct tipc_sock *tsk); +static void tipc_sk_ref_discard(u32 ref); +static struct tipc_sock *tipc_sk_get(u32 ref); +static struct tipc_sock *tipc_sk_get_next(u32 *ref); +static void tipc_sk_put(struct tipc_sock *tsk); static const struct proto_ops packet_ops; static const struct proto_ops stream_ops; @@ -106,24 +166,75 @@ static struct proto tipc_proto_kern; * - port reference */ -#include "socket.h" +static u32 tsk_peer_node(struct tipc_sock *tsk) +{ + return msg_destnode(&tsk->phdr); +} + +static u32 tsk_peer_port(struct tipc_sock *tsk) +{ + return msg_destport(&tsk->phdr); +} + +static bool tsk_unreliable(struct tipc_sock *tsk) +{ + return msg_src_droppable(&tsk->phdr) != 0; +} + +static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable) +{ + msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0); +} + +static bool tsk_unreturnable(struct tipc_sock *tsk) +{ + return msg_dest_droppable(&tsk->phdr) != 0; +} + +static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable) +{ + msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0); +} + +static int tsk_importance(struct tipc_sock *tsk) +{ + return msg_importance(&tsk->phdr); +} + +static int tsk_set_importance(struct tipc_sock *tsk, int imp) +{ + if (imp > TIPC_CRITICAL_IMPORTANCE) + return -EINVAL; + msg_set_importance(&tsk->phdr, (u32)imp); + return 0; +} + +static struct tipc_sock *tipc_sk(const struct sock *sk) +{ + return container_of(sk, struct tipc_sock, sk); +} + +static int tsk_conn_cong(struct tipc_sock *tsk) +{ + return tsk->sent_unacked >= TIPC_FLOWCTRL_WIN; +} /** - * advance_rx_queue - discard first buffer in socket receive queue + * tsk_advance_rx_queue - discard first buffer in socket receive queue * * Caller must hold socket lock */ -static void advance_rx_queue(struct sock *sk) +static void tsk_advance_rx_queue(struct sock *sk) { kfree_skb(__skb_dequeue(&sk->sk_receive_queue)); } /** - * reject_rx_queue - reject all buffers in socket receive queue + * tsk_rej_rx_queue - reject all buffers in socket receive queue * * Caller must hold socket lock */ -static void reject_rx_queue(struct sock *sk) +static void tsk_rej_rx_queue(struct sock *sk) { struct sk_buff *buf; u32 dnode; @@ -134,6 +245,38 @@ static void reject_rx_queue(struct sock *sk) } } +/* tsk_peer_msg - verify if message was sent by connected port's peer + * + * Handles cases where the node's network address has changed from + * the default of <0.0.0> to its configured setting. + */ +static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg) +{ + u32 peer_port = tsk_peer_port(tsk); + u32 orig_node; + u32 peer_node; + + if (unlikely(!tsk->connected)) + return false; + + if (unlikely(msg_origport(msg) != peer_port)) + return false; + + orig_node = msg_orignode(msg); + peer_node = tsk_peer_node(tsk); + + if (likely(orig_node == peer_node)) + return true; + + if (!orig_node && (peer_node == tipc_own_addr)) + return true; + + if (!peer_node && (orig_node == tipc_own_addr)) + return true; + + return false; +} + /** * tipc_sk_create - create a TIPC socket * @net: network namespace (must be default network) @@ -153,7 +296,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock, socket_state state; struct sock *sk; struct tipc_sock *tsk; - struct tipc_port *port; + struct tipc_msg *msg; u32 ref; /* Validate arguments */ @@ -188,20 +331,24 @@ static int tipc_sk_create(struct net *net, struct socket *sock, return -ENOMEM; tsk = tipc_sk(sk); - port = &tsk->port; - - ref = tipc_port_init(port, TIPC_LOW_IMPORTANCE); + ref = tipc_sk_ref_acquire(tsk); if (!ref) { - pr_warn("Socket registration failed, ref. table exhausted\n"); - sk_free(sk); + pr_warn("Socket create failed; reference table exhausted\n"); return -ENOMEM; } + tsk->max_pkt = MAX_PKT_DEFAULT; + tsk->ref = ref; + INIT_LIST_HEAD(&tsk->publications); + msg = &tsk->phdr; + tipc_msg_init(msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG, + NAMED_H_SIZE, 0); + msg_set_origport(msg, ref); /* Finish initializing socket data structures */ sock->ops = ops; sock->state = state; - sock_init_data(sock, sk); + k_init_timer(&tsk->timer, (Handler)tipc_sk_timeout, ref); sk->sk_backlog_rcv = tipc_backlog_rcv; sk->sk_rcvbuf = sysctl_tipc_rmem[1]; sk->sk_data_ready = tipc_data_ready; @@ -209,12 +356,11 @@ static int tipc_sk_create(struct net *net, struct socket *sock, tsk->conn_timeout = CONN_TIMEOUT_DEFAULT; tsk->sent_unacked = 0; atomic_set(&tsk->dupl_rcvcnt, 0); - tipc_port_unlock(port); if (sock->state == SS_READY) { - tipc_port_set_unreturnable(port, true); + tsk_set_unreturnable(tsk, true); if (sock->type == SOCK_DGRAM) - tipc_port_set_unreliable(port, true); + tsk_set_unreliable(tsk, true); } return 0; } @@ -308,7 +454,6 @@ static int tipc_release(struct socket *sock) { struct sock *sk = sock->sk; struct tipc_sock *tsk; - struct tipc_port *port; struct sk_buff *buf; u32 dnode; @@ -320,13 +465,13 @@ static int tipc_release(struct socket *sock) return 0; tsk = tipc_sk(sk); - port = &tsk->port; lock_sock(sk); /* * Reject all unreceived messages, except on an active connection * (which disconnects locally & sends a 'FIN+' to peer) */ + dnode = tsk_peer_node(tsk); while (sock->state != SS_DISCONNECTING) { buf = __skb_dequeue(&sk->sk_receive_queue); if (buf == NULL) @@ -337,17 +482,27 @@ static int tipc_release(struct socket *sock) if ((sock->state == SS_CONNECTING) || (sock->state == SS_CONNECTED)) { sock->state = SS_DISCONNECTING; - tipc_port_disconnect(port->ref); + tsk->connected = 0; + tipc_node_remove_conn(dnode, tsk->ref); } if (tipc_msg_reverse(buf, &dnode, TIPC_ERR_NO_PORT)) tipc_link_xmit(buf, dnode, 0); } } - /* Destroy TIPC port; also disconnects an active connection and - * sends a 'FIN-' to peer. - */ - tipc_port_destroy(port); + tipc_sk_withdraw(tsk, 0, NULL); + tipc_sk_ref_discard(tsk->ref); + k_cancel_timer(&tsk->timer); + if (tsk->connected) { + buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, + SHORT_H_SIZE, 0, dnode, tipc_own_addr, + tsk_peer_port(tsk), + tsk->ref, TIPC_ERR_NO_PORT); + if (buf) + tipc_link_xmit(buf, dnode, tsk->ref); + tipc_node_remove_conn(dnode, tsk->ref); + } + k_term_timer(&tsk->timer); /* Discard any remaining (connection-based) messages in receive queue */ __skb_queue_purge(&sk->sk_receive_queue); @@ -355,7 +510,6 @@ static int tipc_release(struct socket *sock) /* Reject any messages that accumulated in backlog queue */ sock->state = SS_DISCONNECTING; release_sock(sk); - sock_put(sk); sock->sk = NULL; @@ -387,7 +541,7 @@ static int tipc_bind(struct socket *sock, struct sockaddr *uaddr, lock_sock(sk); if (unlikely(!uaddr_len)) { - res = tipc_withdraw(&tsk->port, 0, NULL); + res = tipc_sk_withdraw(tsk, 0, NULL); goto exit; } @@ -415,8 +569,8 @@ static int tipc_bind(struct socket *sock, struct sockaddr *uaddr, } res = (addr->scope > 0) ? - tipc_publish(&tsk->port, addr->scope, &addr->addr.nameseq) : - tipc_withdraw(&tsk->port, -addr->scope, &addr->addr.nameseq); + tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) : + tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq); exit: release_sock(sk); return res; @@ -446,10 +600,10 @@ static int tipc_getname(struct socket *sock, struct sockaddr *uaddr, if ((sock->state != SS_CONNECTED) && ((peer != 2) || (sock->state != SS_DISCONNECTING))) return -ENOTCONN; - addr->addr.id.ref = tipc_port_peerport(&tsk->port); - addr->addr.id.node = tipc_port_peernode(&tsk->port); + addr->addr.id.ref = tsk_peer_port(tsk); + addr->addr.id.node = tsk_peer_node(tsk); } else { - addr->addr.id.ref = tsk->port.ref; + addr->addr.id.ref = tsk->ref; addr->addr.id.node = tipc_own_addr; } @@ -518,7 +672,7 @@ static unsigned int tipc_poll(struct file *file, struct socket *sock, break; case SS_READY: case SS_CONNECTED: - if (!tsk->link_cong && !tipc_sk_conn_cong(tsk)) + if (!tsk->link_cong && !tsk_conn_cong(tsk)) mask |= POLLOUT; /* fall thru' */ case SS_CONNECTING: @@ -549,7 +703,7 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq, struct iovec *iov, size_t dsz, long timeo) { struct sock *sk = sock->sk; - struct tipc_msg *mhdr = &tipc_sk(sk)->port.phdr; + struct tipc_msg *mhdr = &tipc_sk(sk)->phdr; struct sk_buff *buf; uint mtu; int rc; @@ -579,6 +733,7 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq, goto new_mtu; if (rc != -ELINKCONG) break; + tipc_sk(sk)->link_cong = 1; rc = tipc_wait_for_sndmsg(sock, &timeo); if (rc) kfree_skb_list(buf); @@ -638,20 +793,19 @@ static int tipc_sk_proto_rcv(struct tipc_sock *tsk, u32 *dnode, struct sk_buff *buf) { struct tipc_msg *msg = buf_msg(buf); - struct tipc_port *port = &tsk->port; int conn_cong; /* Ignore if connection cannot be validated: */ - if (!port->connected || !tipc_port_peer_msg(port, msg)) + if (!tsk_peer_msg(tsk, msg)) goto exit; - port->probing_state = TIPC_CONN_OK; + tsk->probing_state = TIPC_CONN_OK; if (msg_type(msg) == CONN_ACK) { - conn_cong = tipc_sk_conn_cong(tsk); + conn_cong = tsk_conn_cong(tsk); tsk->sent_unacked -= msg_msgcnt(msg); if (conn_cong) - tipc_sock_wakeup(tsk); + tsk->sk.sk_write_space(&tsk->sk); } else if (msg_type(msg) == CONN_PROBE) { if (!tipc_msg_reverse(buf, dnode, TIPC_OK)) return TIPC_OK; @@ -742,8 +896,7 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock, DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); struct sock *sk = sock->sk; struct tipc_sock *tsk = tipc_sk(sk); - struct tipc_port *port = &tsk->port; - struct tipc_msg *mhdr = &port->phdr; + struct tipc_msg *mhdr = &tsk->phdr; struct iovec *iov = m->msg_iov; u32 dnode, dport; struct sk_buff *buf; @@ -774,13 +927,13 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock, rc = -EISCONN; goto exit; } - if (tsk->port.published) { + if (tsk->published) { rc = -EOPNOTSUPP; goto exit; } if (dest->addrtype == TIPC_ADDR_NAME) { - tsk->port.conn_type = dest->addr.name.name.type; - tsk->port.conn_instance = dest->addr.name.name.instance; + tsk->conn_type = dest->addr.name.name.type; + tsk->conn_instance = dest->addr.name.name.instance; } } rc = dest_name_check(dest, m); @@ -820,13 +973,14 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock, } new_mtu: - mtu = tipc_node_get_mtu(dnode, tsk->port.ref); + mtu = tipc_node_get_mtu(dnode, tsk->ref); rc = tipc_msg_build(mhdr, iov, 0, dsz, mtu, &buf); if (rc < 0) goto exit; do { - rc = tipc_link_xmit(buf, dnode, tsk->port.ref); + TIPC_SKB_CB(buf)->wakeup_pending = tsk->link_cong; + rc = tipc_link_xmit(buf, dnode, tsk->ref); if (likely(rc >= 0)) { if (sock->state != SS_READY) sock->state = SS_CONNECTING; @@ -835,10 +989,9 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock, } if (rc == -EMSGSIZE) goto new_mtu; - if (rc != -ELINKCONG) break; - + tsk->link_cong = 1; rc = tipc_wait_for_sndmsg(sock, &timeo); if (rc) kfree_skb_list(buf); @@ -873,8 +1026,8 @@ static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p) prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); done = sk_wait_event(sk, timeo_p, (!tsk->link_cong && - !tipc_sk_conn_cong(tsk)) || - !tsk->port.connected); + !tsk_conn_cong(tsk)) || + !tsk->connected); finish_wait(sk_sleep(sk), &wait); } while (!done); return 0; @@ -897,11 +1050,10 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock, { struct sock *sk = sock->sk; struct tipc_sock *tsk = tipc_sk(sk); - struct tipc_port *port = &tsk->port; - struct tipc_msg *mhdr = &port->phdr; + struct tipc_msg *mhdr = &tsk->phdr; struct sk_buff *buf; DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); - u32 ref = port->ref; + u32 ref = tsk->ref; int rc = -EINVAL; long timeo; u32 dnode; @@ -929,16 +1081,16 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock, } timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); - dnode = tipc_port_peernode(port); + dnode = tsk_peer_node(tsk); next: - mtu = port->max_pkt; + mtu = tsk->max_pkt; send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE); rc = tipc_msg_build(mhdr, m->msg_iov, sent, send, mtu, &buf); if (unlikely(rc < 0)) goto exit; do { - if (likely(!tipc_sk_conn_cong(tsk))) { + if (likely(!tsk_conn_cong(tsk))) { rc = tipc_link_xmit(buf, dnode, ref); if (likely(!rc)) { tsk->sent_unacked++; @@ -948,11 +1100,12 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock, goto next; } if (rc == -EMSGSIZE) { - port->max_pkt = tipc_node_get_mtu(dnode, ref); + tsk->max_pkt = tipc_node_get_mtu(dnode, ref); goto next; } if (rc != -ELINKCONG) break; + tsk->link_cong = 1; } rc = tipc_wait_for_sndpkt(sock, &timeo); if (rc) @@ -984,29 +1137,25 @@ static int tipc_send_packet(struct kiocb *iocb, struct socket *sock, return tipc_send_stream(iocb, sock, m, dsz); } -/** - * auto_connect - complete connection setup to a remote port - * @tsk: tipc socket structure - * @msg: peer's response message - * - * Returns 0 on success, errno otherwise +/* tipc_sk_finish_conn - complete the setup of a connection */ -static int auto_connect(struct tipc_sock *tsk, struct tipc_msg *msg) +static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port, + u32 peer_node) { - struct tipc_port *port = &tsk->port; - struct socket *sock = tsk->sk.sk_socket; - struct tipc_portid peer; - - peer.ref = msg_origport(msg); - peer.node = msg_orignode(msg); - - __tipc_port_connect(port->ref, port, &peer); - - if (msg_importance(msg) > TIPC_CRITICAL_IMPORTANCE) - return -EINVAL; - msg_set_importance(&port->phdr, (u32)msg_importance(msg)); - sock->state = SS_CONNECTED; - return 0; + struct tipc_msg *msg = &tsk->phdr; + + msg_set_destnode(msg, peer_node); + msg_set_destport(msg, peer_port); + msg_set_type(msg, TIPC_CONN_MSG); + msg_set_lookup_scope(msg, 0); + msg_set_hdr_sz(msg, SHORT_H_SIZE); + + tsk->probing_interval = CONN_PROBING_INTERVAL; + tsk->probing_state = TIPC_CONN_OK; + tsk->connected = 1; + k_start_timer(&tsk->timer, tsk->probing_interval); + tipc_node_add_conn(peer_node, tsk->ref, peer_port); + tsk->max_pkt = tipc_node_get_mtu(peer_node, tsk->ref); } /** @@ -1033,17 +1182,17 @@ static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg) } /** - * anc_data_recv - optionally capture ancillary data for received message + * tipc_sk_anc_data_recv - optionally capture ancillary data for received message * @m: descriptor for message info * @msg: received message header - * @tport: TIPC port associated with message + * @tsk: TIPC port associated with message * * Note: Ancillary data is not captured if not requested by receiver. * * Returns 0 if successful, otherwise errno */ -static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg, - struct tipc_port *tport) +static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg, + struct tipc_sock *tsk) { u32 anc_data[3]; u32 err; @@ -1086,10 +1235,10 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg, anc_data[2] = msg_nameupper(msg); break; case TIPC_CONN_MSG: - has_name = (tport->conn_type != 0); - anc_data[0] = tport->conn_type; - anc_data[1] = tport->conn_instance; - anc_data[2] = tport->conn_instance; + has_name = (tsk->conn_type != 0); + anc_data[0] = tsk->conn_type; + anc_data[1] = tsk->conn_instance; + anc_data[2] = tsk->conn_instance; break; default: has_name = 0; @@ -1103,6 +1252,24 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg, return 0; } +static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack) +{ + struct sk_buff *buf = NULL; + struct tipc_msg *msg; + u32 peer_port = tsk_peer_port(tsk); + u32 dnode = tsk_peer_node(tsk); + + if (!tsk->connected) + return; + buf = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0, dnode, + tipc_own_addr, peer_port, tsk->ref, TIPC_OK); + if (!buf) + return; + msg = buf_msg(buf); + msg_set_msgcnt(msg, ack); + tipc_link_xmit(buf, dnode, msg_link_selector(msg)); +} + static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) { struct sock *sk = sock->sk; @@ -1153,7 +1320,6 @@ static int tipc_recvmsg(struct kiocb *iocb, struct socket *sock, { struct sock *sk = sock->sk; struct tipc_sock *tsk = tipc_sk(sk); - struct tipc_port *port = &tsk->port; struct sk_buff *buf; struct tipc_msg *msg; long timeo; @@ -1188,7 +1354,7 @@ static int tipc_recvmsg(struct kiocb *iocb, struct socket *sock, /* Discard an empty non-errored message & try again */ if ((!sz) && (!err)) { - advance_rx_queue(sk); + tsk_advance_rx_queue(sk); goto restart; } @@ -1196,7 +1362,7 @@ static int tipc_recvmsg(struct kiocb *iocb, struct socket *sock, set_orig_addr(m, msg); /* Capture ancillary data (optional) */ - res = anc_data_recv(m, msg, port); + res = tipc_sk_anc_data_recv(m, msg, tsk); if (res) goto exit; @@ -1223,10 +1389,10 @@ static int tipc_recvmsg(struct kiocb *iocb, struct socket *sock, if (likely(!(flags & MSG_PEEK))) { if ((sock->state != SS_READY) && (++tsk->rcv_unacked >= TIPC_CONNACK_INTV)) { - tipc_acknowledge(port->ref, tsk->rcv_unacked); + tipc_sk_send_ack(tsk, tsk->rcv_unacked); tsk->rcv_unacked = 0; } - advance_rx_queue(sk); + tsk_advance_rx_queue(sk); } exit: release_sock(sk); @@ -1250,7 +1416,6 @@ static int tipc_recv_stream(struct kiocb *iocb, struct socket *sock, { struct sock *sk = sock->sk; struct tipc_sock *tsk = tipc_sk(sk); - struct tipc_port *port = &tsk->port; struct sk_buff *buf; struct tipc_msg *msg; long timeo; @@ -1288,14 +1453,14 @@ static int tipc_recv_stream(struct kiocb *iocb, struct socket *sock, /* Discard an empty non-errored message & try again */ if ((!sz) && (!err)) { - advance_rx_queue(sk); + tsk_advance_rx_queue(sk); goto restart; } /* Optionally capture sender's address & ancillary data of first msg */ if (sz_copied == 0) { set_orig_addr(m, msg); - res = anc_data_recv(m, msg, port); + res = tipc_sk_anc_data_recv(m, msg, tsk); if (res) goto exit; } @@ -1334,10 +1499,10 @@ static int tipc_recv_stream(struct kiocb *iocb, struct socket *sock, /* Consume received message (optional) */ if (likely(!(flags & MSG_PEEK))) { if (unlikely(++tsk->rcv_unacked >= TIPC_CONNACK_INTV)) { - tipc_acknowledge(port->ref, tsk->rcv_unacked); + tipc_sk_send_ack(tsk, tsk->rcv_unacked); tsk->rcv_unacked = 0; } - advance_rx_queue(sk); + tsk_advance_rx_queue(sk); } /* Loop around if more data is required */ @@ -1396,12 +1561,9 @@ static void tipc_data_ready(struct sock *sk) static int filter_connect(struct tipc_sock *tsk, struct sk_buff **buf) { struct sock *sk = &tsk->sk; - struct tipc_port *port = &tsk->port; struct socket *sock = sk->sk_socket; struct tipc_msg *msg = buf_msg(*buf); - int retval = -TIPC_ERR_NO_PORT; - int res; if (msg_mcast(msg)) return retval; @@ -1409,16 +1571,23 @@ static int filter_connect(struct tipc_sock *tsk, struct sk_buff **buf) switch ((int)sock->state) { case SS_CONNECTED: /* Accept only connection-based messages sent by peer */ - if (msg_connected(msg) && tipc_port_peer_msg(port, msg)) { + if (tsk_peer_msg(tsk, msg)) { if (unlikely(msg_errcode(msg))) { sock->state = SS_DISCONNECTING; - __tipc_port_disconnect(port); + tsk->connected = 0; + /* let timer expire on it's own */ + tipc_node_remove_conn(tsk_peer_node(tsk), + tsk->ref); } retval = TIPC_OK; } break; case SS_CONNECTING: /* Accept only ACK or NACK message */ + + if (unlikely(!msg_connected(msg))) + break; + if (unlikely(msg_errcode(msg))) { sock->state = SS_DISCONNECTING; sk->sk_err = ECONNREFUSED; @@ -1426,17 +1595,17 @@ static int filter_connect(struct tipc_sock *tsk, struct sk_buff **buf) break; } - if (unlikely(!msg_connected(msg))) - break; - - res = auto_connect(tsk, msg); - if (res) { + if (unlikely(msg_importance(msg) > TIPC_CRITICAL_IMPORTANCE)) { sock->state = SS_DISCONNECTING; - sk->sk_err = -res; + sk->sk_err = EINVAL; retval = TIPC_OK; break; } + tipc_sk_finish_conn(tsk, msg_origport(msg), msg_orignode(msg)); + msg_set_importance(&tsk->phdr, msg_importance(msg)); + sock->state = SS_CONNECTED; + /* If an incoming message is an 'ACK-', it should be * discarded here because it doesn't contain useful * data. In addition, we should try to wake up @@ -1518,6 +1687,13 @@ static int filter_rcv(struct sock *sk, struct sk_buff *buf) if (unlikely(msg_user(msg) == CONN_MANAGER)) return tipc_sk_proto_rcv(tsk, &onode, buf); + if (unlikely(msg_user(msg) == SOCK_WAKEUP)) { + kfree_skb(buf); + tsk->link_cong = 0; + sk->sk_write_space(sk); + return TIPC_OK; + } + /* Reject message if it is wrong sort of message for socket */ if (msg_type(msg) > TIPC_DIRECT_MSG) return -TIPC_ERR_NO_PORT; @@ -1585,7 +1761,6 @@ static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *buf) int tipc_sk_rcv(struct sk_buff *buf) { struct tipc_sock *tsk; - struct tipc_port *port; struct sock *sk; u32 dport = msg_destport(buf_msg(buf)); int rc = TIPC_OK; @@ -1593,13 +1768,11 @@ int tipc_sk_rcv(struct sk_buff *buf) u32 dnode; /* Validate destination and message */ - port = tipc_port_lock(dport); - if (unlikely(!port)) { + tsk = tipc_sk_get(dport); + if (unlikely(!tsk)) { rc = tipc_msg_eval(buf, &dnode); goto exit; } - - tsk = tipc_port_to_sock(port); sk = &tsk->sk; /* Queue message */ @@ -1615,8 +1788,7 @@ int tipc_sk_rcv(struct sk_buff *buf) rc = -TIPC_ERR_OVERLOAD; } bh_unlock_sock(sk); - tipc_port_unlock(port); - + tipc_sk_put(tsk); if (likely(!rc)) return 0; exit: @@ -1803,10 +1975,8 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags) { struct sock *new_sk, *sk = sock->sk; struct sk_buff *buf; - struct tipc_port *new_port; + struct tipc_sock *new_tsock; struct tipc_msg *msg; - struct tipc_portid peer; - u32 new_ref; long timeo; int res; @@ -1828,8 +1998,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags) goto exit; new_sk = new_sock->sk; - new_port = &tipc_sk(new_sk)->port; - new_ref = new_port->ref; + new_tsock = tipc_sk(new_sk); msg = buf_msg(buf); /* we lock on new_sk; but lockdep sees the lock on sk */ @@ -1839,18 +2008,16 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags) * Reject any stray messages received by new socket * before the socket lock was taken (very, very unlikely) */ - reject_rx_queue(new_sk); + tsk_rej_rx_queue(new_sk); /* Connect new socket to it's peer */ - peer.ref = msg_origport(msg); - peer.node = msg_orignode(msg); - tipc_port_connect(new_ref, &peer); + tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg)); new_sock->state = SS_CONNECTED; - tipc_port_set_importance(new_port, msg_importance(msg)); + tsk_set_importance(new_tsock, msg_importance(msg)); if (msg_named(msg)) { - new_port->conn_type = msg_nametype(msg); - new_port->conn_instance = msg_nameinst(msg); + new_tsock->conn_type = msg_nametype(msg); + new_tsock->conn_instance = msg_nameinst(msg); } /* @@ -1860,7 +2027,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags) if (!msg_data_sz(msg)) { struct msghdr m = {NULL,}; - advance_rx_queue(sk); + tsk_advance_rx_queue(sk); tipc_send_packet(NULL, new_sock, &m, 0); } else { __skb_dequeue(&sk->sk_receive_queue); @@ -1886,9 +2053,8 @@ static int tipc_shutdown(struct socket *sock, int how) { struct sock *sk = sock->sk; struct tipc_sock *tsk = tipc_sk(sk); - struct tipc_port *port = &tsk->port; struct sk_buff *buf; - u32 peer; + u32 dnode; int res; if (how != SHUT_RDWR) @@ -1908,15 +2074,21 @@ static int tipc_shutdown(struct socket *sock, int how) kfree_skb(buf); goto restart; } - tipc_port_disconnect(port->ref); - if (tipc_msg_reverse(buf, &peer, TIPC_CONN_SHUTDOWN)) - tipc_link_xmit(buf, peer, 0); + if (tipc_msg_reverse(buf, &dnode, TIPC_CONN_SHUTDOWN)) + tipc_link_xmit(buf, dnode, tsk->ref); + tipc_node_remove_conn(dnode, tsk->ref); } else { - tipc_port_shutdown(port->ref); + dnode = tsk_peer_node(tsk); + buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, + TIPC_CONN_MSG, SHORT_H_SIZE, + 0, dnode, tipc_own_addr, + tsk_peer_port(tsk), + tsk->ref, TIPC_CONN_SHUTDOWN); + tipc_link_xmit(buf, dnode, tsk->ref); } - + tsk->connected = 0; sock->state = SS_DISCONNECTING; - + tipc_node_remove_conn(dnode, tsk->ref); /* fall through */ case SS_DISCONNECTING: @@ -1937,6 +2109,432 @@ static int tipc_shutdown(struct socket *sock, int how) return res; } +static void tipc_sk_timeout(unsigned long ref) +{ + struct tipc_sock *tsk; + struct sock *sk; + struct sk_buff *buf = NULL; + u32 peer_port, peer_node; + + tsk = tipc_sk_get(ref); + if (!tsk) + return; + + sk = &tsk->sk; + bh_lock_sock(sk); + if (!tsk->connected) { + bh_unlock_sock(sk); + goto exit; + } + peer_port = tsk_peer_port(tsk); + peer_node = tsk_peer_node(tsk); + + if (tsk->probing_state == TIPC_CONN_PROBING) { + /* Previous probe not answered -> self abort */ + buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, + SHORT_H_SIZE, 0, tipc_own_addr, + peer_node, ref, peer_port, + TIPC_ERR_NO_PORT); + } else { + buf = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, + 0, peer_node, tipc_own_addr, + peer_port, ref, TIPC_OK); + tsk->probing_state = TIPC_CONN_PROBING; + k_start_timer(&tsk->timer, tsk->probing_interval); + } + bh_unlock_sock(sk); + if (buf) + tipc_link_xmit(buf, peer_node, ref); +exit: + tipc_sk_put(tsk); +} + +static int tipc_sk_publish(struct tipc_sock *tsk, uint scope, + struct tipc_name_seq const *seq) +{ + struct publication *publ; + u32 key; + + if (tsk->connected) + return -EINVAL; + key = tsk->ref + tsk->pub_count + 1; + if (key == tsk->ref) + return -EADDRINUSE; + + publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper, + scope, tsk->ref, key); + if (unlikely(!publ)) + return -EINVAL; + + list_add(&publ->pport_list, &tsk->publications); + tsk->pub_count++; + tsk->published = 1; + return 0; +} + +static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, + struct tipc_name_seq const *seq) +{ + struct publication *publ; + struct publication *safe; + int rc = -EINVAL; + + list_for_each_entry_safe(publ, safe, &tsk->publications, pport_list) { + if (seq) { + if (publ->scope != scope) + continue; + if (publ->type != seq->type) + continue; + if (publ->lower != seq->lower) + continue; + if (publ->upper != seq->upper) + break; + tipc_nametbl_withdraw(publ->type, publ->lower, + publ->ref, publ->key); + rc = 0; + break; + } + tipc_nametbl_withdraw(publ->type, publ->lower, + publ->ref, publ->key); + rc = 0; + } + if (list_empty(&tsk->publications)) + tsk->published = 0; + return rc; +} + +static int tipc_sk_show(struct tipc_sock *tsk, char *buf, + int len, int full_id) +{ + struct publication *publ; + int ret; + + if (full_id) + ret = tipc_snprintf(buf, len, "<%u.%u.%u:%u>:", + tipc_zone(tipc_own_addr), + tipc_cluster(tipc_own_addr), + tipc_node(tipc_own_addr), tsk->ref); + else + ret = tipc_snprintf(buf, len, "%-10u:", tsk->ref); + + if (tsk->connected) { + u32 dport = tsk_peer_port(tsk); + u32 destnode = tsk_peer_node(tsk); + + ret += tipc_snprintf(buf + ret, len - ret, + " connected to <%u.%u.%u:%u>", + tipc_zone(destnode), + tipc_cluster(destnode), + tipc_node(destnode), dport); + if (tsk->conn_type != 0) + ret += tipc_snprintf(buf + ret, len - ret, + " via {%u,%u}", tsk->conn_type, + tsk->conn_instance); + } else if (tsk->published) { + ret += tipc_snprintf(buf + ret, len - ret, " bound to"); + list_for_each_entry(publ, &tsk->publications, pport_list) { + if (publ->lower == publ->upper) + ret += tipc_snprintf(buf + ret, len - ret, + " {%u,%u}", publ->type, + publ->lower); + else + ret += tipc_snprintf(buf + ret, len - ret, + " {%u,%u,%u}", publ->type, + publ->lower, publ->upper); + } + } + ret += tipc_snprintf(buf + ret, len - ret, "\n"); + return ret; +} + +struct sk_buff *tipc_sk_socks_show(void) +{ + struct sk_buff *buf; + struct tlv_desc *rep_tlv; + char *pb; + int pb_len; + struct tipc_sock *tsk; + int str_len = 0; + u32 ref = 0; + + buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN)); + if (!buf) + return NULL; + rep_tlv = (struct tlv_desc *)buf->data; + pb = TLV_DATA(rep_tlv); + pb_len = ULTRA_STRING_MAX_LEN; + + tsk = tipc_sk_get_next(&ref); + for (; tsk; tsk = tipc_sk_get_next(&ref)) { + lock_sock(&tsk->sk); + str_len += tipc_sk_show(tsk, pb + str_len, + pb_len - str_len, 0); + release_sock(&tsk->sk); + tipc_sk_put(tsk); + } + str_len += 1; /* for "\0" */ + skb_put(buf, TLV_SPACE(str_len)); + TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); + + return buf; +} + +/* tipc_sk_reinit: set non-zero address in all existing sockets + * when we go from standalone to network mode. + */ +void tipc_sk_reinit(void) +{ + struct tipc_msg *msg; + u32 ref = 0; + struct tipc_sock *tsk = tipc_sk_get_next(&ref); + + for (; tsk; tsk = tipc_sk_get_next(&ref)) { + lock_sock(&tsk->sk); + msg = &tsk->phdr; + msg_set_prevnode(msg, tipc_own_addr); + msg_set_orignode(msg, tipc_own_addr); + release_sock(&tsk->sk); + tipc_sk_put(tsk); + } +} + +/** + * struct reference - TIPC socket reference entry + * @tsk: pointer to socket associated with reference entry + * @ref: reference value for socket (combines instance & array index info) + */ +struct reference { + struct tipc_sock *tsk; + u32 ref; +}; + +/** + * struct tipc_ref_table - table of TIPC socket reference entries + * @entries: pointer to array of reference entries + * @capacity: array index of first unusable entry + * @init_point: array index of first uninitialized entry + * @first_free: array index of first unused socket reference entry + * @last_free: array index of last unused socket reference entry + * @index_mask: bitmask for array index portion of reference values + * @start_mask: initial value for instance value portion of reference values + */ +struct ref_table { + struct reference *entries; + u32 capacity; + u32 init_point; + u32 first_free; + u32 last_free; + u32 index_mask; + u32 start_mask; +}; + +/* Socket reference table consists of 2**N entries. + * + * State Socket ptr Reference + * ----- ---------- --------- + * In use non-NULL XXXX|own index + * (XXXX changes each time entry is acquired) + * Free NULL YYYY|next free index + * (YYYY is one more than last used XXXX) + * Uninitialized NULL 0 + * + * Entry 0 is not used; this allows index 0 to denote the end of the free list. + * + * Note that a reference value of 0 does not necessarily indicate that an + * entry is uninitialized, since the last entry in the free list could also + * have a reference value of 0 (although this is unlikely). + */ + +static struct ref_table tipc_ref_table; + +static DEFINE_RWLOCK(ref_table_lock); + +/** + * tipc_ref_table_init - create reference table for sockets + */ +int tipc_sk_ref_table_init(u32 req_sz, u32 start) +{ + struct reference *table; + u32 actual_sz; + + /* account for unused entry, then round up size to a power of 2 */ + + req_sz++; + for (actual_sz = 16; actual_sz < req_sz; actual_sz <<= 1) { + /* do nothing */ + }; + + /* allocate table & mark all entries as uninitialized */ + table = vzalloc(actual_sz * sizeof(struct reference)); + if (table == NULL) + return -ENOMEM; + + tipc_ref_table.entries = table; + tipc_ref_table.capacity = req_sz; + tipc_ref_table.init_point = 1; + tipc_ref_table.first_free = 0; + tipc_ref_table.last_free = 0; + tipc_ref_table.index_mask = actual_sz - 1; + tipc_ref_table.start_mask = start & ~tipc_ref_table.index_mask; + + return 0; +} + +/** + * tipc_ref_table_stop - destroy reference table for sockets + */ +void tipc_sk_ref_table_stop(void) +{ + if (!tipc_ref_table.entries) + return; + vfree(tipc_ref_table.entries); + tipc_ref_table.entries = NULL; +} + +/* tipc_ref_acquire - create reference to a socket + * + * Register an socket pointer in the reference table. + * Returns a unique reference value that is used from then on to retrieve the + * socket pointer, or to determine if the socket has been deregistered. + */ +u32 tipc_sk_ref_acquire(struct tipc_sock *tsk) +{ + u32 index; + u32 index_mask; + u32 next_plus_upper; + u32 ref = 0; + struct reference *entry; + + if (unlikely(!tsk)) { + pr_err("Attempt to acquire ref. to non-existent obj\n"); + return 0; + } + if (unlikely(!tipc_ref_table.entries)) { + pr_err("Ref. table not found in acquisition attempt\n"); + return 0; + } + + /* Take a free entry, if available; otherwise initialize a new one */ + write_lock_bh(&ref_table_lock); + index = tipc_ref_table.first_free; + entry = &tipc_ref_table.entries[index]; + + if (likely(index)) { + index = tipc_ref_table.first_free; + entry = &tipc_ref_table.entries[index]; + index_mask = tipc_ref_table.index_mask; + next_plus_upper = entry->ref; + tipc_ref_table.first_free = next_plus_upper & index_mask; + ref = (next_plus_upper & ~index_mask) + index; + entry->tsk = tsk; + } else if (tipc_ref_table.init_point < tipc_ref_table.capacity) { + index = tipc_ref_table.init_point++; + entry = &tipc_ref_table.entries[index]; + ref = tipc_ref_table.start_mask + index; + } + + if (ref) { + entry->ref = ref; + entry->tsk = tsk; + } + write_unlock_bh(&ref_table_lock); + return ref; +} + +/* tipc_sk_ref_discard - invalidate reference to an socket + * + * Disallow future references to an socket and free up the entry for re-use. + */ +void tipc_sk_ref_discard(u32 ref) +{ + struct reference *entry; + u32 index; + u32 index_mask; + + if (unlikely(!tipc_ref_table.entries)) { + pr_err("Ref. table not found during discard attempt\n"); + return; + } + + index_mask = tipc_ref_table.index_mask; + index = ref & index_mask; + entry = &tipc_ref_table.entries[index]; + + write_lock_bh(&ref_table_lock); + + if (unlikely(!entry->tsk)) { + pr_err("Attempt to discard ref. to non-existent socket\n"); + goto exit; + } + if (unlikely(entry->ref != ref)) { + pr_err("Attempt to discard non-existent reference\n"); + goto exit; + } + + /* Mark entry as unused; increment instance part of entry's + * reference to invalidate any subsequent references + */ + + entry->tsk = NULL; + entry->ref = (ref & ~index_mask) + (index_mask + 1); + + /* Append entry to free entry list */ + if (unlikely(tipc_ref_table.first_free == 0)) + tipc_ref_table.first_free = index; + else + tipc_ref_table.entries[tipc_ref_table.last_free].ref |= index; + tipc_ref_table.last_free = index; +exit: + write_unlock_bh(&ref_table_lock); +} + +/* tipc_sk_get - find referenced socket and return pointer to it + */ +struct tipc_sock *tipc_sk_get(u32 ref) +{ + struct reference *entry; + struct tipc_sock *tsk; + + if (unlikely(!tipc_ref_table.entries)) + return NULL; + read_lock_bh(&ref_table_lock); + entry = &tipc_ref_table.entries[ref & tipc_ref_table.index_mask]; + tsk = entry->tsk; + if (likely(tsk && (entry->ref == ref))) + sock_hold(&tsk->sk); + else + tsk = NULL; + read_unlock_bh(&ref_table_lock); + return tsk; +} + +/* tipc_sk_get_next - lock & return next socket after referenced one +*/ +struct tipc_sock *tipc_sk_get_next(u32 *ref) +{ + struct reference *entry; + struct tipc_sock *tsk = NULL; + uint index = *ref & tipc_ref_table.index_mask; + + read_lock_bh(&ref_table_lock); + while (++index < tipc_ref_table.capacity) { + entry = &tipc_ref_table.entries[index]; + if (!entry->tsk) + continue; + tsk = entry->tsk; + sock_hold(&tsk->sk); + *ref = entry->ref; + break; + } + read_unlock_bh(&ref_table_lock); + return tsk; +} + +static void tipc_sk_put(struct tipc_sock *tsk) +{ + sock_put(&tsk->sk); +} + /** * tipc_setsockopt - set socket option * @sock: socket structure @@ -1955,7 +2553,6 @@ static int tipc_setsockopt(struct socket *sock, int lvl, int opt, { struct sock *sk = sock->sk; struct tipc_sock *tsk = tipc_sk(sk); - struct tipc_port *port = &tsk->port; u32 value; int res; @@ -1973,16 +2570,16 @@ static int tipc_setsockopt(struct socket *sock, int lvl, int opt, switch (opt) { case TIPC_IMPORTANCE: - res = tipc_port_set_importance(port, value); + res = tsk_set_importance(tsk, value); break; case TIPC_SRC_DROPPABLE: if (sock->type != SOCK_STREAM) - tipc_port_set_unreliable(port, value); + tsk_set_unreliable(tsk, value); else res = -ENOPROTOOPT; break; case TIPC_DEST_DROPPABLE: - tipc_port_set_unreturnable(port, value); + tsk_set_unreturnable(tsk, value); break; case TIPC_CONN_TIMEOUT: tipc_sk(sk)->conn_timeout = value; @@ -2015,7 +2612,6 @@ static int tipc_getsockopt(struct socket *sock, int lvl, int opt, { struct sock *sk = sock->sk; struct tipc_sock *tsk = tipc_sk(sk); - struct tipc_port *port = &tsk->port; int len; u32 value; int res; @@ -2032,16 +2628,16 @@ static int tipc_getsockopt(struct socket *sock, int lvl, int opt, switch (opt) { case TIPC_IMPORTANCE: - value = tipc_port_importance(port); + value = tsk_importance(tsk); break; case TIPC_SRC_DROPPABLE: - value = tipc_port_unreliable(port); + value = tsk_unreliable(tsk); break; case TIPC_DEST_DROPPABLE: - value = tipc_port_unreturnable(port); + value = tsk_unreturnable(tsk); break; case TIPC_CONN_TIMEOUT: - value = tipc_sk(sk)->conn_timeout; + value = tsk->conn_timeout; /* no need to set "res", since already 0 at this point */ break; case TIPC_NODE_RECVQ_DEPTH: diff --git a/net/tipc/socket.h b/net/tipc/socket.h index 43b75b3cecedb9b3bee4fd6a9a42a8f5a4cbcf0b..baa43d03901e5b093238bf4a92b01135c109d5a2 100644 --- a/net/tipc/socket.h +++ b/net/tipc/socket.h @@ -35,56 +35,17 @@ #ifndef _TIPC_SOCK_H #define _TIPC_SOCK_H -#include "port.h" #include -#define TIPC_CONN_OK 0 -#define TIPC_CONN_PROBING 1 - -/** - * struct tipc_sock - TIPC socket structure - * @sk: socket - interacts with 'port' and with user via the socket API - * @port: port - interacts with 'sk' and with the rest of the TIPC stack - * @peer_name: the peer of the connection, if any - * @conn_timeout: the time we can wait for an unresponded setup request - * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue - * @link_cong: non-zero if owner must sleep because of link congestion - * @sent_unacked: # messages sent by socket, and not yet acked by peer - * @rcv_unacked: # messages read by user, but not yet acked back to peer - */ - -struct tipc_sock { - struct sock sk; - struct tipc_port port; - unsigned int conn_timeout; - atomic_t dupl_rcvcnt; - int link_cong; - uint sent_unacked; - uint rcv_unacked; -}; - -static inline struct tipc_sock *tipc_sk(const struct sock *sk) -{ - return container_of(sk, struct tipc_sock, sk); -} - -static inline struct tipc_sock *tipc_port_to_sock(const struct tipc_port *port) -{ - return container_of(port, struct tipc_sock, port); -} - -static inline void tipc_sock_wakeup(struct tipc_sock *tsk) -{ - tsk->sk.sk_write_space(&tsk->sk); -} - -static inline int tipc_sk_conn_cong(struct tipc_sock *tsk) -{ - return tsk->sent_unacked >= TIPC_FLOWCTRL_WIN; -} - +#define TIPC_CONNACK_INTV 256 +#define TIPC_FLOWCTRL_WIN (TIPC_CONNACK_INTV * 2) +#define TIPC_CONN_OVERLOAD_LIMIT ((TIPC_FLOWCTRL_WIN * 2 + 1) * \ + SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE)) int tipc_sk_rcv(struct sk_buff *buf); - +struct sk_buff *tipc_sk_socks_show(void); void tipc_sk_mcast_rcv(struct sk_buff *buf); +void tipc_sk_reinit(void); +int tipc_sk_ref_table_init(u32 requested_size, u32 start); +void tipc_sk_ref_table_stop(void); #endif diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index 642437231ad5d2da2e7c9de5bd4494b082f3472e..31b5cb232a4371e3728ed9e7b934c3560f9ed92c 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c @@ -36,7 +36,6 @@ #include "core.h" #include "name_table.h" -#include "port.h" #include "subscr.h" /** diff --git a/net/tipc/sysctl.c b/net/tipc/sysctl.c index f3fef93325a8977c0e22ffe8cb0fd9a2302cff0e..1a779b1e85100b501c1f0cde29cbfaa0d98fb4eb 100644 --- a/net/tipc/sysctl.c +++ b/net/tipc/sysctl.c @@ -47,6 +47,13 @@ static struct ctl_table tipc_table[] = { .mode = 0644, .proc_handler = proc_dointvec, }, + { + .procname = "named_timeout", + .data = &sysctl_tipc_named_timeout, + .maxlen = sizeof(sysctl_tipc_named_timeout), + .mode = 0644, + .proc_handler = proc_dointvec, + }, {} }; diff --git a/net/unix/garbage.c b/net/unix/garbage.c index 9bc73f87f64a3b547c3b48c6bafb29825dd2e5c1..99f7012b23b9f2101bc8b3a35ed27871a7835b1d 100644 --- a/net/unix/garbage.c +++ b/net/unix/garbage.c @@ -258,7 +258,7 @@ static void inc_inflight_move_tail(struct unix_sock *u) list_move_tail(&u->link, &gc_candidates); } -static bool gc_in_progress = false; +static bool gc_in_progress; #define UNIX_INFLIGHT_TRIGGER_GC 16000 void wait_for_unix_gc(void) diff --git a/net/wimax/id-table.c b/net/wimax/id-table.c index 72273abfcb16beab16d4eff6b43457df3f802ec3..a21508d11036681c2ec069c16a24314d52f52fe0 100644 --- a/net/wimax/id-table.c +++ b/net/wimax/id-table.c @@ -137,7 +137,7 @@ void wimax_id_table_release(void) #endif spin_lock(&wimax_id_table_lock); list_for_each_entry(wimax_dev, &wimax_id_table, id_table_node) { - printk(KERN_ERR "BUG: %s wimax_dev %p ifindex %d not cleared\n", + pr_err("BUG: %s wimax_dev %p ifindex %d not cleared\n", __func__, wimax_dev, wimax_dev->net_dev->ifindex); WARN_ON(1); } diff --git a/net/wimax/op-msg.c b/net/wimax/op-msg.c index c278b3356f75fe2de50b99b9e7e71609eb25a8df..54aa146930bd819a45ce3520484326553d37ad16 100644 --- a/net/wimax/op-msg.c +++ b/net/wimax/op-msg.c @@ -189,7 +189,7 @@ const void *wimax_msg_data_len(struct sk_buff *msg, size_t *size) nla = nlmsg_find_attr(nlh, sizeof(struct genlmsghdr), WIMAX_GNL_MSG_DATA); if (nla == NULL) { - printk(KERN_ERR "Cannot find attribute WIMAX_GNL_MSG_DATA\n"); + pr_err("Cannot find attribute WIMAX_GNL_MSG_DATA\n"); return NULL; } *size = nla_len(nla); @@ -211,7 +211,7 @@ const void *wimax_msg_data(struct sk_buff *msg) nla = nlmsg_find_attr(nlh, sizeof(struct genlmsghdr), WIMAX_GNL_MSG_DATA); if (nla == NULL) { - printk(KERN_ERR "Cannot find attribute WIMAX_GNL_MSG_DATA\n"); + pr_err("Cannot find attribute WIMAX_GNL_MSG_DATA\n"); return NULL; } return nla_data(nla); @@ -232,7 +232,7 @@ ssize_t wimax_msg_len(struct sk_buff *msg) nla = nlmsg_find_attr(nlh, sizeof(struct genlmsghdr), WIMAX_GNL_MSG_DATA); if (nla == NULL) { - printk(KERN_ERR "Cannot find attribute WIMAX_GNL_MSG_DATA\n"); + pr_err("Cannot find attribute WIMAX_GNL_MSG_DATA\n"); return -EINVAL; } return nla_len(nla); @@ -343,8 +343,7 @@ int wimax_gnl_doit_msg_from_user(struct sk_buff *skb, struct genl_info *info) d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info); result = -ENODEV; if (info->attrs[WIMAX_GNL_MSG_IFIDX] == NULL) { - printk(KERN_ERR "WIMAX_GNL_MSG_FROM_USER: can't find IFIDX " - "attribute\n"); + pr_err("WIMAX_GNL_MSG_FROM_USER: can't find IFIDX attribute\n"); goto error_no_wimax_dev; } ifindex = nla_get_u32(info->attrs[WIMAX_GNL_MSG_IFIDX]); diff --git a/net/wimax/op-reset.c b/net/wimax/op-reset.c index eb4580784d9dc5abdcaf5be27b24578ed6b0ca0e..a42079165e1f974a28897c9224190d17e4bf854e 100644 --- a/net/wimax/op-reset.c +++ b/net/wimax/op-reset.c @@ -107,8 +107,7 @@ int wimax_gnl_doit_reset(struct sk_buff *skb, struct genl_info *info) d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info); result = -ENODEV; if (info->attrs[WIMAX_GNL_RESET_IFIDX] == NULL) { - printk(KERN_ERR "WIMAX_GNL_OP_RFKILL: can't find IFIDX " - "attribute\n"); + pr_err("WIMAX_GNL_OP_RFKILL: can't find IFIDX attribute\n"); goto error_no_wimax_dev; } ifindex = nla_get_u32(info->attrs[WIMAX_GNL_RESET_IFIDX]); diff --git a/net/wimax/op-rfkill.c b/net/wimax/op-rfkill.c index 403078d670a909cb20b4660e1a33965fd996bb52..7d730543f24304dd64b7c9d1d855a6469c8727d8 100644 --- a/net/wimax/op-rfkill.c +++ b/net/wimax/op-rfkill.c @@ -421,8 +421,7 @@ int wimax_gnl_doit_rfkill(struct sk_buff *skb, struct genl_info *info) d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info); result = -ENODEV; if (info->attrs[WIMAX_GNL_RFKILL_IFIDX] == NULL) { - printk(KERN_ERR "WIMAX_GNL_OP_RFKILL: can't find IFIDX " - "attribute\n"); + pr_err("WIMAX_GNL_OP_RFKILL: can't find IFIDX attribute\n"); goto error_no_wimax_dev; } ifindex = nla_get_u32(info->attrs[WIMAX_GNL_RFKILL_IFIDX]); diff --git a/net/wimax/op-state-get.c b/net/wimax/op-state-get.c index 995c08c827b512bf4a9e6650f77ac834015e60fe..e6788d281d0ef4a1a7ca83e3e4be2fcff33d7d3f 100644 --- a/net/wimax/op-state-get.c +++ b/net/wimax/op-state-get.c @@ -49,8 +49,7 @@ int wimax_gnl_doit_state_get(struct sk_buff *skb, struct genl_info *info) d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info); result = -ENODEV; if (info->attrs[WIMAX_GNL_STGET_IFIDX] == NULL) { - printk(KERN_ERR "WIMAX_GNL_OP_STATE_GET: can't find IFIDX " - "attribute\n"); + pr_err("WIMAX_GNL_OP_STATE_GET: can't find IFIDX attribute\n"); goto error_no_wimax_dev; } ifindex = nla_get_u32(info->attrs[WIMAX_GNL_STGET_IFIDX]); diff --git a/net/wimax/stack.c b/net/wimax/stack.c index ec8b577db1354a6e6c33e631a3223f1fb5af7ccf..3f816e2971ee26d97bfb4cd12976d6f67980fce6 100644 --- a/net/wimax/stack.c +++ b/net/wimax/stack.c @@ -191,8 +191,8 @@ void __check_new_state(enum wimax_st old_state, enum wimax_st new_state, unsigned int allowed_states_bm) { if (WARN_ON(((1 << new_state) & allowed_states_bm) == 0)) { - printk(KERN_ERR "SW BUG! Forbidden state change %u -> %u\n", - old_state, new_state); + pr_err("SW BUG! Forbidden state change %u -> %u\n", + old_state, new_state); } } @@ -602,8 +602,7 @@ int __init wimax_subsys_init(void) wimax_gnl_ops, wimax_gnl_mcgrps); if (unlikely(result < 0)) { - printk(KERN_ERR "cannot register generic netlink family: %d\n", - result); + pr_err("cannot register generic netlink family: %d\n", result); goto error_register_family; } diff --git a/net/wimax/wimax-internal.h b/net/wimax/wimax-internal.h index b445b82020a83143aee4e9e89815dc921fdb4759..733c4bf8d4b3fe8ad50b33fec033126baa1017cc 100644 --- a/net/wimax/wimax-internal.h +++ b/net/wimax/wimax-internal.h @@ -30,6 +30,12 @@ #define __WIMAX_INTERNAL_H__ #ifdef __KERNEL__ +#ifdef pr_fmt +#undef pr_fmt +#endif + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include diff --git a/net/wireless/chan.c b/net/wireless/chan.c index 992b34070bcb16f9fe4323bae293996524f1373c..72d81e2154d590dbba26bf2d1e0868c5c4940e26 100644 --- a/net/wireless/chan.c +++ b/net/wireless/chan.c @@ -4,6 +4,7 @@ * any point in time. * * Copyright 2009 Johannes Berg + * Copyright 2013-2014 Intel Mobile Communications GmbH */ #include diff --git a/net/wireless/core.c b/net/wireless/core.c index afee5e0455ea460b9b3d2e39c27e7eb0d881fa9f..f52a4cd7017c855c1c64f9ee2bbe2b0b44b625cc 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -2,6 +2,7 @@ * This is the linux wireless configuration interface. * * Copyright 2006-2010 Johannes Berg + * Copyright 2013-2014 Intel Mobile Communications GmbH */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -492,12 +493,6 @@ int wiphy_register(struct wiphy *wiphy) int i; u16 ifmodes = wiphy->interface_modes; - /* - * There are major locking problems in nl80211/mac80211 for CSA, - * disable for all drivers until this has been reworked. - */ - wiphy->flags &= ~WIPHY_FLAG_HAS_CHANNEL_SWITCH; - #ifdef CONFIG_PM if (WARN_ON(wiphy->wowlan && (wiphy->wowlan->flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) && @@ -635,6 +630,9 @@ int wiphy_register(struct wiphy *wiphy) if (IS_ERR(rdev->wiphy.debugfsdir)) rdev->wiphy.debugfsdir = NULL; + cfg80211_debugfs_rdev_add(rdev); + nl80211_notify_wiphy(rdev, NL80211_CMD_NEW_WIPHY); + if (wiphy->regulatory_flags & REGULATORY_CUSTOM_REG) { struct regulatory_request request; @@ -646,8 +644,6 @@ int wiphy_register(struct wiphy *wiphy) nl80211_send_reg_change_event(&request); } - cfg80211_debugfs_rdev_add(rdev); - rdev->wiphy.registered = true; rtnl_unlock(); @@ -659,8 +655,6 @@ int wiphy_register(struct wiphy *wiphy) return res; } - nl80211_notify_wiphy(rdev, NL80211_CMD_NEW_WIPHY); - return 0; } EXPORT_SYMBOL(wiphy_register); @@ -1012,7 +1006,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, rdev->devlist_generation++; cfg80211_mlme_purge_registrations(wdev); #ifdef CONFIG_CFG80211_WEXT - kfree(wdev->wext.keys); + kzfree(wdev->wext.keys); #endif } /* diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c index 8f345da3ea5f4e0767ae6b1de539f18954707148..e24fc585c8834782295558481f0f592be743e6dc 100644 --- a/net/wireless/ibss.c +++ b/net/wireless/ibss.c @@ -115,7 +115,7 @@ static int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev, } if (WARN_ON(wdev->connect_keys)) - kfree(wdev->connect_keys); + kzfree(wdev->connect_keys); wdev->connect_keys = connkeys; wdev->ibss_fixed = params->channel_fixed; @@ -161,7 +161,7 @@ static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext) ASSERT_WDEV_LOCK(wdev); - kfree(wdev->connect_keys); + kzfree(wdev->connect_keys); wdev->connect_keys = NULL; rdev_set_qos_map(rdev, dev, NULL); diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index 266766b8d80b61455565cc43779a6e229ed7710d..2c52b59e43f319670e6ce054b0eda1bde995ce1c 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c @@ -19,7 +19,7 @@ void cfg80211_rx_assoc_resp(struct net_device *dev, struct cfg80211_bss *bss, - const u8 *buf, size_t len) + const u8 *buf, size_t len, int uapsd_queues) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; @@ -43,7 +43,7 @@ void cfg80211_rx_assoc_resp(struct net_device *dev, struct cfg80211_bss *bss, return; } - nl80211_send_rx_assoc(rdev, dev, buf, len, GFP_KERNEL); + nl80211_send_rx_assoc(rdev, dev, buf, len, GFP_KERNEL, uapsd_queues); /* update current_bss etc., consumes the bss reference */ __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, ie, len - ieoffs, status_code, @@ -605,7 +605,7 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev, } bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm, - const u8 *buf, size_t len, u32 flags, gfp_t gfp) + const u8 *buf, size_t len, u32 flags) { struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); @@ -648,7 +648,7 @@ bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm, /* Indicate the received Action frame to user space */ if (nl80211_send_mgmt(rdev, wdev, reg->nlportid, freq, sig_mbm, - buf, len, flags, gfp)) + buf, len, flags, GFP_ATOMIC)) continue; result = true; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 7257164af91bf2f80318bd88d6ed74071df9ce38..cb9f5a44ffadf7175d109cadefb091bbfdbb41e4 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -2,6 +2,7 @@ * This is the new netlink-based wireless configuration interface. * * Copyright 2006-2010 Johannes Berg + * Copyright 2013-2014 Intel Mobile Communications GmbH */ #include @@ -225,6 +226,7 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = { [NL80211_ATTR_WIPHY_FRAG_THRESHOLD] = { .type = NLA_U32 }, [NL80211_ATTR_WIPHY_RTS_THRESHOLD] = { .type = NLA_U32 }, [NL80211_ATTR_WIPHY_COVERAGE_CLASS] = { .type = NLA_U8 }, + [NL80211_ATTR_WIPHY_DYN_ACK] = { .type = NLA_FLAG }, [NL80211_ATTR_IFTYPE] = { .type = NLA_U32 }, [NL80211_ATTR_IFINDEX] = { .type = NLA_U32 }, @@ -388,6 +390,11 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = { [NL80211_ATTR_TDLS_PEER_CAPABILITY] = { .type = NLA_U32 }, [NL80211_ATTR_IFACE_SOCKET_OWNER] = { .type = NLA_FLAG }, [NL80211_ATTR_CSA_C_OFFSETS_TX] = { .type = NLA_BINARY }, + [NL80211_ATTR_USE_RRM] = { .type = NLA_FLAG }, + [NL80211_ATTR_TSID] = { .type = NLA_U8 }, + [NL80211_ATTR_USER_PRIO] = { .type = NLA_U8 }, + [NL80211_ATTR_ADMITTED_TIME] = { .type = NLA_U16 }, + [NL80211_ATTR_SMPS_MODE] = { .type = NLA_U8 }, }; /* policy for the key attributes */ @@ -1507,6 +1514,9 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, if (rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH) CMD(channel_switch, CHANNEL_SWITCH); CMD(set_qos_map, SET_QOS_MAP); + if (rdev->wiphy.flags & + WIPHY_FLAG_SUPPORTS_WMM_ADMISSION) + CMD(add_tx_ts, ADD_TX_TS); } /* add into the if now */ #undef CMD @@ -2237,11 +2247,21 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) } if (info->attrs[NL80211_ATTR_WIPHY_COVERAGE_CLASS]) { + if (info->attrs[NL80211_ATTR_WIPHY_DYN_ACK]) + return -EINVAL; + coverage_class = nla_get_u8( info->attrs[NL80211_ATTR_WIPHY_COVERAGE_CLASS]); changed |= WIPHY_PARAM_COVERAGE_CLASS; } + if (info->attrs[NL80211_ATTR_WIPHY_DYN_ACK]) { + if (!(rdev->wiphy.features & NL80211_FEATURE_ACKTO_ESTIMATION)) + return -EOPNOTSUPP; + + changed |= WIPHY_PARAM_DYN_ACK; + } + if (changed) { u8 old_retry_short, old_retry_long; u32 old_frag_threshold, old_rts_threshold; @@ -3326,6 +3346,29 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) return PTR_ERR(params.acl); } + if (info->attrs[NL80211_ATTR_SMPS_MODE]) { + params.smps_mode = + nla_get_u8(info->attrs[NL80211_ATTR_SMPS_MODE]); + switch (params.smps_mode) { + case NL80211_SMPS_OFF: + break; + case NL80211_SMPS_STATIC: + if (!(rdev->wiphy.features & + NL80211_FEATURE_STATIC_SMPS)) + return -EINVAL; + break; + case NL80211_SMPS_DYNAMIC: + if (!(rdev->wiphy.features & + NL80211_FEATURE_DYNAMIC_SMPS)) + return -EINVAL; + break; + default: + return -EINVAL; + } + } else { + params.smps_mode = NL80211_SMPS_OFF; + } + wdev_lock(wdev); err = rdev_start_ap(rdev, dev, ¶ms); if (!err) { @@ -6033,7 +6076,6 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb, const struct cfg80211_bss_ies *ies; void *hdr; struct nlattr *bss; - bool tsf = false; ASSERT_WDEV_LOCK(wdev); @@ -6060,18 +6102,27 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb, goto nla_put_failure; rcu_read_lock(); + /* indicate whether we have probe response data or not */ + if (rcu_access_pointer(res->proberesp_ies) && + nla_put_flag(msg, NL80211_BSS_PRESP_DATA)) + goto fail_unlock_rcu; + + /* this pointer prefers to be pointed to probe response data + * but is always valid + */ ies = rcu_dereference(res->ies); if (ies) { if (nla_put_u64(msg, NL80211_BSS_TSF, ies->tsf)) goto fail_unlock_rcu; - tsf = true; if (ies->len && nla_put(msg, NL80211_BSS_INFORMATION_ELEMENTS, ies->len, ies->data)) goto fail_unlock_rcu; } + + /* and this pointer is always (unless driver didn't know) beacon data */ ies = rcu_dereference(res->beacon_ies); - if (ies) { - if (!tsf && nla_put_u64(msg, NL80211_BSS_TSF, ies->tsf)) + if (ies && ies->from_beacon) { + if (nla_put_u64(msg, NL80211_BSS_BEACON_TSF, ies->tsf)) goto fail_unlock_rcu; if (ies->len && nla_put(msg, NL80211_BSS_BEACON_IES, ies->len, ies->data)) @@ -6575,6 +6626,14 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info) sizeof(req.vht_capa)); } + if (nla_get_flag(info->attrs[NL80211_ATTR_USE_RRM])) { + if (!(rdev->wiphy.features & + NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES) || + !(rdev->wiphy.features & NL80211_FEATURE_QUIET)) + return -EINVAL; + req.flags |= ASSOC_REQ_USE_RRM; + } + err = nl80211_crypto_settings(rdev, info, &req.crypto, 1); if (!err) { wdev_lock(dev->ieee80211_ptr); @@ -6837,7 +6896,7 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info) err = cfg80211_join_ibss(rdev, dev, &ibss, connkeys); if (err) - kfree(connkeys); + kzfree(connkeys); return err; } @@ -7209,7 +7268,7 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) { if (!info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]) { - kfree(connkeys); + kzfree(connkeys); return -EINVAL; } memcpy(&connect.ht_capa, @@ -7227,7 +7286,7 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) if (info->attrs[NL80211_ATTR_VHT_CAPABILITY]) { if (!info->attrs[NL80211_ATTR_VHT_CAPABILITY_MASK]) { - kfree(connkeys); + kzfree(connkeys); return -EINVAL; } memcpy(&connect.vht_capa, @@ -7235,11 +7294,19 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) sizeof(connect.vht_capa)); } + if (nla_get_flag(info->attrs[NL80211_ATTR_USE_RRM])) { + if (!(rdev->wiphy.features & + NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES) || + !(rdev->wiphy.features & NL80211_FEATURE_QUIET)) + return -EINVAL; + connect.flags |= ASSOC_REQ_USE_RRM; + } + wdev_lock(dev->ieee80211_ptr); err = cfg80211_connect(rdev, dev, &connect, connkeys, NULL); wdev_unlock(dev->ieee80211_ptr); if (err) - kfree(connkeys); + kzfree(connkeys); return err; } @@ -8925,13 +8992,9 @@ static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info) if (nla_len(tb[NL80211_REKEY_DATA_KCK]) != NL80211_KCK_LEN) return -ERANGE; - memcpy(rekey_data.kek, nla_data(tb[NL80211_REKEY_DATA_KEK]), - NL80211_KEK_LEN); - memcpy(rekey_data.kck, nla_data(tb[NL80211_REKEY_DATA_KCK]), - NL80211_KCK_LEN); - memcpy(rekey_data.replay_ctr, - nla_data(tb[NL80211_REKEY_DATA_REPLAY_CTR]), - NL80211_REPLAY_CTR_LEN); + rekey_data.kek = nla_data(tb[NL80211_REKEY_DATA_KEK]); + rekey_data.kck = nla_data(tb[NL80211_REKEY_DATA_KCK]); + rekey_data.replay_ctr = nla_data(tb[NL80211_REKEY_DATA_REPLAY_CTR]); wdev_lock(wdev); if (!wdev->current_bss) { @@ -9363,6 +9426,93 @@ static int nl80211_set_qos_map(struct sk_buff *skb, return ret; } +static int nl80211_add_tx_ts(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev = info->user_ptr[0]; + struct net_device *dev = info->user_ptr[1]; + struct wireless_dev *wdev = dev->ieee80211_ptr; + const u8 *peer; + u8 tsid, up; + u16 admitted_time = 0; + int err; + + if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_WMM_ADMISSION)) + return -EOPNOTSUPP; + + if (!info->attrs[NL80211_ATTR_TSID] || !info->attrs[NL80211_ATTR_MAC] || + !info->attrs[NL80211_ATTR_USER_PRIO]) + return -EINVAL; + + tsid = nla_get_u8(info->attrs[NL80211_ATTR_TSID]); + if (tsid >= IEEE80211_NUM_TIDS) + return -EINVAL; + + up = nla_get_u8(info->attrs[NL80211_ATTR_USER_PRIO]); + if (up >= IEEE80211_NUM_UPS) + return -EINVAL; + + /* WMM uses TIDs 0-7 even for TSPEC */ + if (tsid < IEEE80211_FIRST_TSPEC_TSID) { + if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_WMM_ADMISSION)) + return -EINVAL; + } else { + /* TODO: handle 802.11 TSPEC/admission control + * need more attributes for that (e.g. BA session requirement) + */ + return -EINVAL; + } + + peer = nla_data(info->attrs[NL80211_ATTR_MAC]); + + if (info->attrs[NL80211_ATTR_ADMITTED_TIME]) { + admitted_time = + nla_get_u16(info->attrs[NL80211_ATTR_ADMITTED_TIME]); + if (!admitted_time) + return -EINVAL; + } + + wdev_lock(wdev); + switch (wdev->iftype) { + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_P2P_CLIENT: + if (wdev->current_bss) + break; + err = -ENOTCONN; + goto out; + default: + err = -EOPNOTSUPP; + goto out; + } + + err = rdev_add_tx_ts(rdev, dev, tsid, peer, up, admitted_time); + + out: + wdev_unlock(wdev); + return err; +} + +static int nl80211_del_tx_ts(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev = info->user_ptr[0]; + struct net_device *dev = info->user_ptr[1]; + struct wireless_dev *wdev = dev->ieee80211_ptr; + const u8 *peer; + u8 tsid; + int err; + + if (!info->attrs[NL80211_ATTR_TSID] || !info->attrs[NL80211_ATTR_MAC]) + return -EINVAL; + + tsid = nla_get_u8(info->attrs[NL80211_ATTR_TSID]); + peer = nla_data(info->attrs[NL80211_ATTR_MAC]); + + wdev_lock(wdev); + err = rdev_del_tx_ts(rdev, dev, tsid, peer); + wdev_unlock(wdev); + + return err; +} + #define NL80211_FLAG_NEED_WIPHY 0x01 #define NL80211_FLAG_NEED_NETDEV 0x02 #define NL80211_FLAG_NEED_RTNL 0x04 @@ -9373,6 +9523,7 @@ static int nl80211_set_qos_map(struct sk_buff *skb, /* If a netdev is associated, it must be UP, P2P must be started */ #define NL80211_FLAG_NEED_WDEV_UP (NL80211_FLAG_NEED_WDEV |\ NL80211_FLAG_CHECK_NETDEV_UP) +#define NL80211_FLAG_CLEAR_SKB 0x20 static int nl80211_pre_doit(const struct genl_ops *ops, struct sk_buff *skb, struct genl_info *info) @@ -9456,8 +9607,20 @@ static void nl80211_post_doit(const struct genl_ops *ops, struct sk_buff *skb, dev_put(info->user_ptr[1]); } } + if (ops->internal_flags & NL80211_FLAG_NEED_RTNL) rtnl_unlock(); + + /* If needed, clear the netlink message payload from the SKB + * as it might contain key data that shouldn't stick around on + * the heap after the SKB is freed. The netlink message header + * is still needed for further processing, so leave it intact. + */ + if (ops->internal_flags & NL80211_FLAG_CLEAR_SKB) { + struct nlmsghdr *nlh = nlmsg_hdr(skb); + + memset(nlmsg_data(nlh), 0, nlmsg_len(nlh)); + } } static const struct genl_ops nl80211_ops[] = { @@ -9525,7 +9688,8 @@ static const struct genl_ops nl80211_ops[] = { .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | - NL80211_FLAG_NEED_RTNL, + NL80211_FLAG_NEED_RTNL | + NL80211_FLAG_CLEAR_SKB, }, { .cmd = NL80211_CMD_NEW_KEY, @@ -9533,7 +9697,8 @@ static const struct genl_ops nl80211_ops[] = { .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | - NL80211_FLAG_NEED_RTNL, + NL80211_FLAG_NEED_RTNL | + NL80211_FLAG_CLEAR_SKB, }, { .cmd = NL80211_CMD_DEL_KEY, @@ -9711,7 +9876,8 @@ static const struct genl_ops nl80211_ops[] = { .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | - NL80211_FLAG_NEED_RTNL, + NL80211_FLAG_NEED_RTNL | + NL80211_FLAG_CLEAR_SKB, }, { .cmd = NL80211_CMD_ASSOCIATE, @@ -9945,7 +10111,8 @@ static const struct genl_ops nl80211_ops[] = { .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | - NL80211_FLAG_NEED_RTNL, + NL80211_FLAG_NEED_RTNL | + NL80211_FLAG_CLEAR_SKB, }, { .cmd = NL80211_CMD_TDLS_MGMT, @@ -10103,6 +10270,22 @@ static const struct genl_ops nl80211_ops[] = { .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, + { + .cmd = NL80211_CMD_ADD_TX_TS, + .doit = nl80211_add_tx_ts, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | + NL80211_FLAG_NEED_RTNL, + }, + { + .cmd = NL80211_CMD_DEL_TX_TS, + .doit = nl80211_del_tx_ts, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | + NL80211_FLAG_NEED_RTNL, + }, }; /* notification functions */ @@ -10371,7 +10554,8 @@ void nl80211_send_reg_change_event(struct regulatory_request *request) static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *buf, size_t len, - enum nl80211_commands cmd, gfp_t gfp) + enum nl80211_commands cmd, gfp_t gfp, + int uapsd_queues) { struct sk_buff *msg; void *hdr; @@ -10391,6 +10575,19 @@ static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev, nla_put(msg, NL80211_ATTR_FRAME, len, buf)) goto nla_put_failure; + if (uapsd_queues >= 0) { + struct nlattr *nla_wmm = + nla_nest_start(msg, NL80211_ATTR_STA_WME); + if (!nla_wmm) + goto nla_put_failure; + + if (nla_put_u8(msg, NL80211_STA_WME_UAPSD_QUEUES, + uapsd_queues)) + goto nla_put_failure; + + nla_nest_end(msg, nla_wmm); + } + genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, @@ -10407,15 +10604,15 @@ void nl80211_send_rx_auth(struct cfg80211_registered_device *rdev, size_t len, gfp_t gfp) { nl80211_send_mlme_event(rdev, netdev, buf, len, - NL80211_CMD_AUTHENTICATE, gfp); + NL80211_CMD_AUTHENTICATE, gfp, -1); } void nl80211_send_rx_assoc(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *buf, - size_t len, gfp_t gfp) + size_t len, gfp_t gfp, int uapsd_queues) { nl80211_send_mlme_event(rdev, netdev, buf, len, - NL80211_CMD_ASSOCIATE, gfp); + NL80211_CMD_ASSOCIATE, gfp, uapsd_queues); } void nl80211_send_deauth(struct cfg80211_registered_device *rdev, @@ -10423,7 +10620,7 @@ void nl80211_send_deauth(struct cfg80211_registered_device *rdev, size_t len, gfp_t gfp) { nl80211_send_mlme_event(rdev, netdev, buf, len, - NL80211_CMD_DEAUTHENTICATE, gfp); + NL80211_CMD_DEAUTHENTICATE, gfp, -1); } void nl80211_send_disassoc(struct cfg80211_registered_device *rdev, @@ -10431,7 +10628,7 @@ void nl80211_send_disassoc(struct cfg80211_registered_device *rdev, size_t len, gfp_t gfp) { nl80211_send_mlme_event(rdev, netdev, buf, len, - NL80211_CMD_DISASSOCIATE, gfp); + NL80211_CMD_DISASSOCIATE, gfp, -1); } void cfg80211_rx_unprot_mlme_mgmt(struct net_device *dev, const u8 *buf, @@ -10452,7 +10649,7 @@ void cfg80211_rx_unprot_mlme_mgmt(struct net_device *dev, const u8 *buf, cmd = NL80211_CMD_UNPROT_DISASSOCIATE; trace_cfg80211_rx_unprot_mlme_mgmt(dev, buf, len); - nl80211_send_mlme_event(rdev, dev, buf, len, cmd, GFP_ATOMIC); + nl80211_send_mlme_event(rdev, dev, buf, len, cmd, GFP_ATOMIC, -1); } EXPORT_SYMBOL(cfg80211_rx_unprot_mlme_mgmt); diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h index 49c9a482dd1248565f0b7f333ecacf5f027ff650..7ad70d6f0cc699147e26c679c298d1200e182f1a 100644 --- a/net/wireless/nl80211.h +++ b/net/wireless/nl80211.h @@ -23,7 +23,8 @@ void nl80211_send_rx_auth(struct cfg80211_registered_device *rdev, const u8 *buf, size_t len, gfp_t gfp); void nl80211_send_rx_assoc(struct cfg80211_registered_device *rdev, struct net_device *netdev, - const u8 *buf, size_t len, gfp_t gfp); + const u8 *buf, size_t len, gfp_t gfp, + int uapsd_queues); void nl80211_send_deauth(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *buf, size_t len, gfp_t gfp); diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h index 56c2240c30cefcc5a386b4da67a0e49686e47e4a..f6d457d6a558a63cea2787b921af8c2ec66e9843 100644 --- a/net/wireless/rdev-ops.h +++ b/net/wireless/rdev-ops.h @@ -915,4 +915,35 @@ rdev_set_ap_chanwidth(struct cfg80211_registered_device *rdev, return ret; } +static inline int +rdev_add_tx_ts(struct cfg80211_registered_device *rdev, + struct net_device *dev, u8 tsid, const u8 *peer, + u8 user_prio, u16 admitted_time) +{ + int ret = -EOPNOTSUPP; + + trace_rdev_add_tx_ts(&rdev->wiphy, dev, tsid, peer, + user_prio, admitted_time); + if (rdev->ops->add_tx_ts) + ret = rdev->ops->add_tx_ts(&rdev->wiphy, dev, tsid, peer, + user_prio, admitted_time); + trace_rdev_return_int(&rdev->wiphy, ret); + + return ret; +} + +static inline int +rdev_del_tx_ts(struct cfg80211_registered_device *rdev, + struct net_device *dev, u8 tsid, const u8 *peer) +{ + int ret = -EOPNOTSUPP; + + trace_rdev_del_tx_ts(&rdev->wiphy, dev, tsid, peer); + if (rdev->ops->del_tx_ts) + ret = rdev->ops->del_tx_ts(&rdev->wiphy, dev, tsid, peer); + trace_rdev_return_int(&rdev->wiphy, ret); + + return ret; +} + #endif /* __CFG80211_RDEV_OPS */ diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 1afdf45db38f216bb750a905dcdb5a85ae7d0897..b725a31a475178ec99de0e9aabc4d89e2ebb7d40 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -3,6 +3,7 @@ * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2007 Johannes Berg * Copyright 2008-2011 Luis R. Rodriguez + * Copyright 2013-2014 Intel Mobile Communications GmbH * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -798,6 +799,57 @@ static int reg_rules_intersect(const struct ieee80211_regdomain *rd1, return 0; } +/* check whether old rule contains new rule */ +static bool rule_contains(struct ieee80211_reg_rule *r1, + struct ieee80211_reg_rule *r2) +{ + /* for simplicity, currently consider only same flags */ + if (r1->flags != r2->flags) + return false; + + /* verify r1 is more restrictive */ + if ((r1->power_rule.max_antenna_gain > + r2->power_rule.max_antenna_gain) || + r1->power_rule.max_eirp > r2->power_rule.max_eirp) + return false; + + /* make sure r2's range is contained within r1 */ + if (r1->freq_range.start_freq_khz > r2->freq_range.start_freq_khz || + r1->freq_range.end_freq_khz < r2->freq_range.end_freq_khz) + return false; + + /* and finally verify that r1.max_bw >= r2.max_bw */ + if (r1->freq_range.max_bandwidth_khz < + r2->freq_range.max_bandwidth_khz) + return false; + + return true; +} + +/* add or extend current rules. do nothing if rule is already contained */ +static void add_rule(struct ieee80211_reg_rule *rule, + struct ieee80211_reg_rule *reg_rules, u32 *n_rules) +{ + struct ieee80211_reg_rule *tmp_rule; + int i; + + for (i = 0; i < *n_rules; i++) { + tmp_rule = ®_rules[i]; + /* rule is already contained - do nothing */ + if (rule_contains(tmp_rule, rule)) + return; + + /* extend rule if possible */ + if (rule_contains(rule, tmp_rule)) { + memcpy(tmp_rule, rule, sizeof(*rule)); + return; + } + } + + memcpy(®_rules[*n_rules], rule, sizeof(*rule)); + (*n_rules)++; +} + /** * regdom_intersect - do the intersection between two regulatory domains * @rd1: first regulatory domain @@ -817,12 +869,10 @@ regdom_intersect(const struct ieee80211_regdomain *rd1, { int r, size_of_regd; unsigned int x, y; - unsigned int num_rules = 0, rule_idx = 0; + unsigned int num_rules = 0; const struct ieee80211_reg_rule *rule1, *rule2; - struct ieee80211_reg_rule *intersected_rule; + struct ieee80211_reg_rule intersected_rule; struct ieee80211_regdomain *rd; - /* This is just a dummy holder to help us count */ - struct ieee80211_reg_rule dummy_rule; if (!rd1 || !rd2) return NULL; @@ -840,7 +890,7 @@ regdom_intersect(const struct ieee80211_regdomain *rd1, for (y = 0; y < rd2->n_reg_rules; y++) { rule2 = &rd2->reg_rules[y]; if (!reg_rules_intersect(rd1, rd2, rule1, rule2, - &dummy_rule)) + &intersected_rule)) num_rules++; } } @@ -855,34 +905,24 @@ regdom_intersect(const struct ieee80211_regdomain *rd1, if (!rd) return NULL; - for (x = 0; x < rd1->n_reg_rules && rule_idx < num_rules; x++) { + for (x = 0; x < rd1->n_reg_rules; x++) { rule1 = &rd1->reg_rules[x]; - for (y = 0; y < rd2->n_reg_rules && rule_idx < num_rules; y++) { + for (y = 0; y < rd2->n_reg_rules; y++) { rule2 = &rd2->reg_rules[y]; - /* - * This time around instead of using the stack lets - * write to the target rule directly saving ourselves - * a memcpy() - */ - intersected_rule = &rd->reg_rules[rule_idx]; r = reg_rules_intersect(rd1, rd2, rule1, rule2, - intersected_rule); + &intersected_rule); /* * No need to memset here the intersected rule here as * we're not using the stack anymore */ if (r) continue; - rule_idx++; - } - } - if (rule_idx != num_rules) { - kfree(rd); - return NULL; + add_rule(&intersected_rule, rd->reg_rules, + &rd->n_reg_rules); + } } - rd->n_reg_rules = num_rules; rd->alpha2[0] = '9'; rd->alpha2[1] = '8'; rd->dfs_region = reg_intersect_dfs_region(rd1->dfs_region, diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 0798c62e60858cb81d1df3d4829414665808268b..bda39f149810b6e9c377c0b8911bd730feeb3395 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -2,6 +2,7 @@ * cfg80211 scan result handling * * Copyright 2008 Johannes Berg + * Copyright 2013-2014 Intel Mobile Communications GmbH */ #include #include @@ -884,6 +885,7 @@ struct cfg80211_bss* cfg80211_inform_bss_width(struct wiphy *wiphy, struct ieee80211_channel *rx_channel, enum nl80211_bss_scan_width scan_width, + enum cfg80211_bss_frame_type ftype, const u8 *bssid, u64 tsf, u16 capability, u16 beacon_interval, const u8 *ie, size_t ielen, s32 signal, gfp_t gfp) @@ -911,21 +913,32 @@ cfg80211_inform_bss_width(struct wiphy *wiphy, tmp.pub.beacon_interval = beacon_interval; tmp.pub.capability = capability; /* - * Since we do not know here whether the IEs are from a Beacon or Probe + * If we do not know here whether the IEs are from a Beacon or Probe * Response frame, we need to pick one of the options and only use it * with the driver that does not provide the full Beacon/Probe Response * frame. Use Beacon frame pointer to avoid indicating that this should * override the IEs pointer should we have received an earlier * indication of Probe Response data. */ - ies = kmalloc(sizeof(*ies) + ielen, gfp); + ies = kzalloc(sizeof(*ies) + ielen, gfp); if (!ies) return NULL; ies->len = ielen; ies->tsf = tsf; + ies->from_beacon = false; memcpy(ies->data, ie, ielen); - rcu_assign_pointer(tmp.pub.beacon_ies, ies); + switch (ftype) { + case CFG80211_BSS_FTYPE_BEACON: + ies->from_beacon = true; + /* fall through to assign */ + case CFG80211_BSS_FTYPE_UNKNOWN: + rcu_assign_pointer(tmp.pub.beacon_ies, ies); + break; + case CFG80211_BSS_FTYPE_PRESP: + rcu_assign_pointer(tmp.pub.proberesp_ies, ies); + break; + } rcu_assign_pointer(tmp.pub.ies, ies); signal_valid = abs(rx_channel->center_freq - channel->center_freq) <= @@ -982,11 +995,12 @@ cfg80211_inform_bss_width_frame(struct wiphy *wiphy, if (!channel) return NULL; - ies = kmalloc(sizeof(*ies) + ielen, gfp); + ies = kzalloc(sizeof(*ies) + ielen, gfp); if (!ies) return NULL; ies->len = ielen; ies->tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp); + ies->from_beacon = ieee80211_is_beacon(mgmt->frame_control); memcpy(ies->data, mgmt->u.probe_resp.variable, ielen); if (ieee80211_is_probe_resp(mgmt->frame_control)) diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 8bbeeb302216223a260085634bf6b5627c209783..dc1668ff543b90927d9cb4e08b38897ac8e97f6c 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -641,7 +641,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, } if (status != WLAN_STATUS_SUCCESS) { - kfree(wdev->connect_keys); + kzfree(wdev->connect_keys); wdev->connect_keys = NULL; wdev->ssid_len = 0; if (bss) { @@ -918,7 +918,7 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev, ASSERT_WDEV_LOCK(wdev); if (WARN_ON(wdev->connect_keys)) { - kfree(wdev->connect_keys); + kzfree(wdev->connect_keys); wdev->connect_keys = NULL; } @@ -978,7 +978,7 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev, ASSERT_WDEV_LOCK(wdev); - kfree(wdev->connect_keys); + kzfree(wdev->connect_keys); wdev->connect_keys = NULL; if (wdev->conn) diff --git a/net/wireless/trace.h b/net/wireless/trace.h index 0c524cd76c837ccf23a070a802e1ab405fd235f4..625a6e6d1168376e7d5b3f55d09729ebc786f1f6 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -1896,6 +1896,51 @@ TRACE_EVENT(rdev_set_ap_chanwidth, WIPHY_PR_ARG, NETDEV_PR_ARG, CHAN_DEF_PR_ARG) ); +TRACE_EVENT(rdev_add_tx_ts, + TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, + u8 tsid, const u8 *peer, u8 user_prio, u16 admitted_time), + TP_ARGS(wiphy, netdev, tsid, peer, user_prio, admitted_time), + TP_STRUCT__entry( + WIPHY_ENTRY + NETDEV_ENTRY + MAC_ENTRY(peer) + __field(u8, tsid) + __field(u8, user_prio) + __field(u16, admitted_time) + ), + TP_fast_assign( + WIPHY_ASSIGN; + NETDEV_ASSIGN; + MAC_ASSIGN(peer, peer); + __entry->tsid = tsid; + __entry->user_prio = user_prio; + __entry->admitted_time = admitted_time; + ), + TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT ", TSID %d, UP %d, time %d", + WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer), + __entry->tsid, __entry->user_prio, __entry->admitted_time) +); + +TRACE_EVENT(rdev_del_tx_ts, + TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, + u8 tsid, const u8 *peer), + TP_ARGS(wiphy, netdev, tsid, peer), + TP_STRUCT__entry( + WIPHY_ENTRY + NETDEV_ENTRY + MAC_ENTRY(peer) + __field(u8, tsid) + ), + TP_fast_assign( + WIPHY_ASSIGN; + NETDEV_ASSIGN; + MAC_ASSIGN(peer, peer); + __entry->tsid = tsid; + ), + TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT ", TSID %d", + WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer), __entry->tsid) +); + /************************************************************* * cfg80211 exported functions traces * *************************************************************/ diff --git a/net/wireless/util.c b/net/wireless/util.c index 728f1c0dc70dbdb7c85558b84b83dc9b66e4083f..5e233a577d0fcd15e39b5eebc5678598c37637df 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -2,6 +2,7 @@ * Wireless utility functions * * Copyright 2007-2009 Johannes Berg + * Copyright 2013-2014 Intel Mobile Communications GmbH */ #include #include @@ -796,7 +797,7 @@ void cfg80211_upload_connect_keys(struct wireless_dev *wdev) netdev_err(dev, "failed to set mgtdef %d\n", i); } - kfree(wdev->connect_keys); + kzfree(wdev->connect_keys); wdev->connect_keys = NULL; } diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index 11120bb14162505043579628bed2ad131ba41f7d..0f47948c572f5eae30403d98be88e4d233c1903a 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c @@ -496,6 +496,8 @@ static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev, err = 0; if (!err) { if (!addr) { + memset(wdev->wext.keys->data[idx], 0, + sizeof(wdev->wext.keys->data[idx])); wdev->wext.keys->params[idx].key_len = 0; wdev->wext.keys->params[idx].cipher = 0; } diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c index c7e5c8eb4f24708a27d90ae298a85c825ee81e38..368611c0573997e45f433c68a0a2c0e9de17270b 100644 --- a/net/wireless/wext-sme.c +++ b/net/wireless/wext-sme.c @@ -57,7 +57,7 @@ int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev, err = cfg80211_connect(rdev, wdev->netdev, &wdev->wext.connect, ck, prev_bssid); if (err) - kfree(ck); + kzfree(ck); return err; } diff --git a/net/xfrm/xfrm_hash.h b/net/xfrm/xfrm_hash.h index 0622d319e1f269c31e8a6af27e4c986832948534..666c5ffe929dca388b218d99c2d71946a231872f 100644 --- a/net/xfrm/xfrm_hash.h +++ b/net/xfrm/xfrm_hash.h @@ -3,6 +3,7 @@ #include #include +#include static inline unsigned int __xfrm4_addr_hash(const xfrm_address_t *addr) { @@ -28,6 +29,58 @@ static inline unsigned int __xfrm6_daddr_saddr_hash(const xfrm_address_t *daddr, saddr->a6[2] ^ saddr->a6[3]); } +static inline u32 __bits2mask32(__u8 bits) +{ + u32 mask32 = 0xffffffff; + + if (bits == 0) + mask32 = 0; + else if (bits < 32) + mask32 <<= (32 - bits); + + return mask32; +} + +static inline unsigned int __xfrm4_dpref_spref_hash(const xfrm_address_t *daddr, + const xfrm_address_t *saddr, + __u8 dbits, + __u8 sbits) +{ + return jhash_2words(ntohl(daddr->a4) & __bits2mask32(dbits), + ntohl(saddr->a4) & __bits2mask32(sbits), + 0); +} + +static inline unsigned int __xfrm6_pref_hash(const xfrm_address_t *addr, + __u8 prefixlen) +{ + int pdw; + int pbi; + u32 initval = 0; + + pdw = prefixlen >> 5; /* num of whole u32 in prefix */ + pbi = prefixlen & 0x1f; /* num of bits in incomplete u32 in prefix */ + + if (pbi) { + __be32 mask; + + mask = htonl((0xffffffff) << (32 - pbi)); + + initval = (__force u32)(addr->a6[pdw] & mask); + } + + return jhash2((__force u32 *)addr->a6, pdw, initval); +} + +static inline unsigned int __xfrm6_dpref_spref_hash(const xfrm_address_t *daddr, + const xfrm_address_t *saddr, + __u8 dbits, + __u8 sbits) +{ + return __xfrm6_pref_hash(daddr, dbits) ^ + __xfrm6_pref_hash(saddr, sbits); +} + static inline unsigned int __xfrm_dst_hash(const xfrm_address_t *daddr, const xfrm_address_t *saddr, u32 reqid, unsigned short family, @@ -84,7 +137,8 @@ static inline unsigned int __idx_hash(u32 index, unsigned int hmask) } static inline unsigned int __sel_hash(const struct xfrm_selector *sel, - unsigned short family, unsigned int hmask) + unsigned short family, unsigned int hmask, + u8 dbits, u8 sbits) { const xfrm_address_t *daddr = &sel->daddr; const xfrm_address_t *saddr = &sel->saddr; @@ -92,19 +146,19 @@ static inline unsigned int __sel_hash(const struct xfrm_selector *sel, switch (family) { case AF_INET: - if (sel->prefixlen_d != 32 || - sel->prefixlen_s != 32) + if (sel->prefixlen_d < dbits || + sel->prefixlen_s < sbits) return hmask + 1; - h = __xfrm4_daddr_saddr_hash(daddr, saddr); + h = __xfrm4_dpref_spref_hash(daddr, saddr, dbits, sbits); break; case AF_INET6: - if (sel->prefixlen_d != 128 || - sel->prefixlen_s != 128) + if (sel->prefixlen_d < dbits || + sel->prefixlen_s < sbits) return hmask + 1; - h = __xfrm6_daddr_saddr_hash(daddr, saddr); + h = __xfrm6_dpref_spref_hash(daddr, saddr, dbits, sbits); break; } h ^= (h >> 16); @@ -113,17 +167,19 @@ static inline unsigned int __sel_hash(const struct xfrm_selector *sel, static inline unsigned int __addr_hash(const xfrm_address_t *daddr, const xfrm_address_t *saddr, - unsigned short family, unsigned int hmask) + unsigned short family, + unsigned int hmask, + u8 dbits, u8 sbits) { unsigned int h = 0; switch (family) { case AF_INET: - h = __xfrm4_daddr_saddr_hash(daddr, saddr); + h = __xfrm4_dpref_spref_hash(daddr, saddr, dbits, sbits); break; case AF_INET6: - h = __xfrm6_daddr_saddr_hash(daddr, saddr); + h = __xfrm6_dpref_spref_hash(daddr, saddr, dbits, sbits); break; } h ^= (h >> 16); diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index c51e8f7b8653cb167aba13f61c2d7520d615c31c..499d6c18a8ce4366a5fe61184eb7416bfb060401 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c @@ -166,11 +166,7 @@ static int xfrm_output_gso(struct sk_buff *skb) err = xfrm_output2(segs); if (unlikely(err)) { - while ((segs = nskb)) { - nskb = segs->next; - segs->next = NULL; - kfree_skb(segs); - } + kfree_skb_list(nskb); return err; } diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index fdde51f4271adf22b26e36c46f76c2b93c4ce56e..4c4e457e788861a1255dc654cc4d4eec80c22fda 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -349,12 +349,39 @@ static inline unsigned int idx_hash(struct net *net, u32 index) return __idx_hash(index, net->xfrm.policy_idx_hmask); } +/* calculate policy hash thresholds */ +static void __get_hash_thresh(struct net *net, + unsigned short family, int dir, + u8 *dbits, u8 *sbits) +{ + switch (family) { + case AF_INET: + *dbits = net->xfrm.policy_bydst[dir].dbits4; + *sbits = net->xfrm.policy_bydst[dir].sbits4; + break; + + case AF_INET6: + *dbits = net->xfrm.policy_bydst[dir].dbits6; + *sbits = net->xfrm.policy_bydst[dir].sbits6; + break; + + default: + *dbits = 0; + *sbits = 0; + } +} + static struct hlist_head *policy_hash_bysel(struct net *net, const struct xfrm_selector *sel, unsigned short family, int dir) { unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; - unsigned int hash = __sel_hash(sel, family, hmask); + unsigned int hash; + u8 dbits; + u8 sbits; + + __get_hash_thresh(net, family, dir, &dbits, &sbits); + hash = __sel_hash(sel, family, hmask, dbits, sbits); return (hash == hmask + 1 ? &net->xfrm.policy_inexact[dir] : @@ -367,25 +394,35 @@ static struct hlist_head *policy_hash_direct(struct net *net, unsigned short family, int dir) { unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; - unsigned int hash = __addr_hash(daddr, saddr, family, hmask); + unsigned int hash; + u8 dbits; + u8 sbits; + + __get_hash_thresh(net, family, dir, &dbits, &sbits); + hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits); return net->xfrm.policy_bydst[dir].table + hash; } -static void xfrm_dst_hash_transfer(struct hlist_head *list, +static void xfrm_dst_hash_transfer(struct net *net, + struct hlist_head *list, struct hlist_head *ndsttable, - unsigned int nhashmask) + unsigned int nhashmask, + int dir) { struct hlist_node *tmp, *entry0 = NULL; struct xfrm_policy *pol; unsigned int h0 = 0; + u8 dbits; + u8 sbits; redo: hlist_for_each_entry_safe(pol, tmp, list, bydst) { unsigned int h; + __get_hash_thresh(net, pol->family, dir, &dbits, &sbits); h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr, - pol->family, nhashmask); + pol->family, nhashmask, dbits, sbits); if (!entry0) { hlist_del(&pol->bydst); hlist_add_head(&pol->bydst, ndsttable+h); @@ -439,7 +476,7 @@ static void xfrm_bydst_resize(struct net *net, int dir) write_lock_bh(&net->xfrm.xfrm_policy_lock); for (i = hmask; i >= 0; i--) - xfrm_dst_hash_transfer(odst + i, ndst, nhashmask); + xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir); net->xfrm.policy_bydst[dir].table = ndst; net->xfrm.policy_bydst[dir].hmask = nhashmask; @@ -534,6 +571,86 @@ static void xfrm_hash_resize(struct work_struct *work) mutex_unlock(&hash_resize_mutex); } +static void xfrm_hash_rebuild(struct work_struct *work) +{ + struct net *net = container_of(work, struct net, + xfrm.policy_hthresh.work); + unsigned int hmask; + struct xfrm_policy *pol; + struct xfrm_policy *policy; + struct hlist_head *chain; + struct hlist_head *odst; + struct hlist_node *newpos; + int i; + int dir; + unsigned seq; + u8 lbits4, rbits4, lbits6, rbits6; + + mutex_lock(&hash_resize_mutex); + + /* read selector prefixlen thresholds */ + do { + seq = read_seqbegin(&net->xfrm.policy_hthresh.lock); + + lbits4 = net->xfrm.policy_hthresh.lbits4; + rbits4 = net->xfrm.policy_hthresh.rbits4; + lbits6 = net->xfrm.policy_hthresh.lbits6; + rbits6 = net->xfrm.policy_hthresh.rbits6; + } while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq)); + + write_lock_bh(&net->xfrm.xfrm_policy_lock); + + /* reset the bydst and inexact table in all directions */ + for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) { + INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]); + hmask = net->xfrm.policy_bydst[dir].hmask; + odst = net->xfrm.policy_bydst[dir].table; + for (i = hmask; i >= 0; i--) + INIT_HLIST_HEAD(odst + i); + if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) { + /* dir out => dst = remote, src = local */ + net->xfrm.policy_bydst[dir].dbits4 = rbits4; + net->xfrm.policy_bydst[dir].sbits4 = lbits4; + net->xfrm.policy_bydst[dir].dbits6 = rbits6; + net->xfrm.policy_bydst[dir].sbits6 = lbits6; + } else { + /* dir in/fwd => dst = local, src = remote */ + net->xfrm.policy_bydst[dir].dbits4 = lbits4; + net->xfrm.policy_bydst[dir].sbits4 = rbits4; + net->xfrm.policy_bydst[dir].dbits6 = lbits6; + net->xfrm.policy_bydst[dir].sbits6 = rbits6; + } + } + + /* re-insert all policies by order of creation */ + list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) { + newpos = NULL; + chain = policy_hash_bysel(net, &policy->selector, + policy->family, + xfrm_policy_id2dir(policy->index)); + hlist_for_each_entry(pol, chain, bydst) { + if (policy->priority >= pol->priority) + newpos = &pol->bydst; + else + break; + } + if (newpos) + hlist_add_behind(&policy->bydst, newpos); + else + hlist_add_head(&policy->bydst, chain); + } + + write_unlock_bh(&net->xfrm.xfrm_policy_lock); + + mutex_unlock(&hash_resize_mutex); +} + +void xfrm_policy_hash_rebuild(struct net *net) +{ + schedule_work(&net->xfrm.policy_hthresh.work); +} +EXPORT_SYMBOL(xfrm_policy_hash_rebuild); + /* Generate new index... KAME seems to generate them ordered by cost * of an absolute inpredictability of ordering of rules. This will not pass. */ static u32 xfrm_gen_index(struct net *net, int dir, u32 index) @@ -1844,10 +1961,8 @@ static int xdst_queue_output(struct sock *sk, struct sk_buff *skb) struct xfrm_dst *xdst = (struct xfrm_dst *) dst; struct xfrm_policy *pol = xdst->pols[0]; struct xfrm_policy_queue *pq = &pol->polq; - const struct sk_buff *fclone = skb + 1; - if (unlikely(skb->fclone == SKB_FCLONE_ORIG && - fclone->fclone == SKB_FCLONE_CLONE)) { + if (unlikely(skb_fclone_busy(skb))) { kfree_skb(skb); return 0; } @@ -2862,10 +2977,21 @@ static int __net_init xfrm_policy_init(struct net *net) if (!htab->table) goto out_bydst; htab->hmask = hmask; + htab->dbits4 = 32; + htab->sbits4 = 32; + htab->dbits6 = 128; + htab->sbits6 = 128; } + net->xfrm.policy_hthresh.lbits4 = 32; + net->xfrm.policy_hthresh.rbits4 = 32; + net->xfrm.policy_hthresh.lbits6 = 128; + net->xfrm.policy_hthresh.rbits6 = 128; + + seqlock_init(&net->xfrm.policy_hthresh.lock); INIT_LIST_HEAD(&net->xfrm.policy_all); INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize); + INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild); if (net_eq(net, &init_net)) register_netdevice_notifier(&xfrm_dev_notifier); return 0; diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 0ab54134bb40b84cf825df6035a9acf7cb3cf760..de971b6d38c58310f744988bb970ccb5438ef39b 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -97,8 +97,6 @@ static unsigned long xfrm_hash_new_size(unsigned int state_hmask) return ((state_hmask + 1) << 1) * sizeof(struct hlist_head); } -static DEFINE_MUTEX(hash_resize_mutex); - static void xfrm_hash_resize(struct work_struct *work) { struct net *net = container_of(work, struct net, xfrm.state_hash_work); @@ -107,22 +105,20 @@ static void xfrm_hash_resize(struct work_struct *work) unsigned int nhashmask, ohashmask; int i; - mutex_lock(&hash_resize_mutex); - nsize = xfrm_hash_new_size(net->xfrm.state_hmask); ndst = xfrm_hash_alloc(nsize); if (!ndst) - goto out_unlock; + return; nsrc = xfrm_hash_alloc(nsize); if (!nsrc) { xfrm_hash_free(ndst, nsize); - goto out_unlock; + return; } nspi = xfrm_hash_alloc(nsize); if (!nspi) { xfrm_hash_free(ndst, nsize); xfrm_hash_free(nsrc, nsize); - goto out_unlock; + return; } spin_lock_bh(&net->xfrm.xfrm_state_lock); @@ -148,9 +144,6 @@ static void xfrm_hash_resize(struct work_struct *work) xfrm_hash_free(odst, osize); xfrm_hash_free(osrc, osize); xfrm_hash_free(ospi, osize); - -out_unlock: - mutex_unlock(&hash_resize_mutex); } static DEFINE_SPINLOCK(xfrm_state_afinfo_lock); diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index d4db6ebb089d0dc4ba2abefd44df0123fa3148dd..e812e988c111b7d83b4e52321f394aabc9a69036 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -333,8 +333,7 @@ static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props, algo = xfrm_aalg_get_byname(ualg->alg_name, 1); if (!algo) return -ENOSYS; - if ((ualg->alg_trunc_len / 8) > MAX_AH_AUTH_LEN || - ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits) + if (ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits) return -EINVAL; *props = algo->desc.sadb_alg_id; @@ -964,7 +963,9 @@ static inline size_t xfrm_spdinfo_msgsize(void) { return NLMSG_ALIGN(4) + nla_total_size(sizeof(struct xfrmu_spdinfo)) - + nla_total_size(sizeof(struct xfrmu_spdhinfo)); + + nla_total_size(sizeof(struct xfrmu_spdhinfo)) + + nla_total_size(sizeof(struct xfrmu_spdhthresh)) + + nla_total_size(sizeof(struct xfrmu_spdhthresh)); } static int build_spdinfo(struct sk_buff *skb, struct net *net, @@ -973,9 +974,11 @@ static int build_spdinfo(struct sk_buff *skb, struct net *net, struct xfrmk_spdinfo si; struct xfrmu_spdinfo spc; struct xfrmu_spdhinfo sph; + struct xfrmu_spdhthresh spt4, spt6; struct nlmsghdr *nlh; int err; u32 *f; + unsigned lseq; nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0); if (nlh == NULL) /* shouldn't really happen ... */ @@ -993,9 +996,22 @@ static int build_spdinfo(struct sk_buff *skb, struct net *net, sph.spdhcnt = si.spdhcnt; sph.spdhmcnt = si.spdhmcnt; + do { + lseq = read_seqbegin(&net->xfrm.policy_hthresh.lock); + + spt4.lbits = net->xfrm.policy_hthresh.lbits4; + spt4.rbits = net->xfrm.policy_hthresh.rbits4; + spt6.lbits = net->xfrm.policy_hthresh.lbits6; + spt6.rbits = net->xfrm.policy_hthresh.rbits6; + } while (read_seqretry(&net->xfrm.policy_hthresh.lock, lseq)); + err = nla_put(skb, XFRMA_SPD_INFO, sizeof(spc), &spc); if (!err) err = nla_put(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph); + if (!err) + err = nla_put(skb, XFRMA_SPD_IPV4_HTHRESH, sizeof(spt4), &spt4); + if (!err) + err = nla_put(skb, XFRMA_SPD_IPV6_HTHRESH, sizeof(spt6), &spt6); if (err) { nlmsg_cancel(skb, nlh); return err; @@ -1004,6 +1020,51 @@ static int build_spdinfo(struct sk_buff *skb, struct net *net, return nlmsg_end(skb, nlh); } +static int xfrm_set_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh, + struct nlattr **attrs) +{ + struct net *net = sock_net(skb->sk); + struct xfrmu_spdhthresh *thresh4 = NULL; + struct xfrmu_spdhthresh *thresh6 = NULL; + + /* selector prefixlen thresholds to hash policies */ + if (attrs[XFRMA_SPD_IPV4_HTHRESH]) { + struct nlattr *rta = attrs[XFRMA_SPD_IPV4_HTHRESH]; + + if (nla_len(rta) < sizeof(*thresh4)) + return -EINVAL; + thresh4 = nla_data(rta); + if (thresh4->lbits > 32 || thresh4->rbits > 32) + return -EINVAL; + } + if (attrs[XFRMA_SPD_IPV6_HTHRESH]) { + struct nlattr *rta = attrs[XFRMA_SPD_IPV6_HTHRESH]; + + if (nla_len(rta) < sizeof(*thresh6)) + return -EINVAL; + thresh6 = nla_data(rta); + if (thresh6->lbits > 128 || thresh6->rbits > 128) + return -EINVAL; + } + + if (thresh4 || thresh6) { + write_seqlock(&net->xfrm.policy_hthresh.lock); + if (thresh4) { + net->xfrm.policy_hthresh.lbits4 = thresh4->lbits; + net->xfrm.policy_hthresh.rbits4 = thresh4->rbits; + } + if (thresh6) { + net->xfrm.policy_hthresh.lbits6 = thresh6->lbits; + net->xfrm.policy_hthresh.rbits6 = thresh6->rbits; + } + write_sequnlock(&net->xfrm.policy_hthresh.lock); + + xfrm_policy_hash_rebuild(net); + } + + return 0; +} + static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs) { @@ -2274,6 +2335,7 @@ static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = { [XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report), [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = sizeof(u32), + [XFRM_MSG_NEWSPDINFO - XFRM_MSG_BASE] = sizeof(u32), [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = sizeof(u32), }; @@ -2308,10 +2370,17 @@ static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = { [XFRMA_ADDRESS_FILTER] = { .len = sizeof(struct xfrm_address_filter) }, }; +static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = { + [XFRMA_SPD_IPV4_HTHRESH] = { .len = sizeof(struct xfrmu_spdhthresh) }, + [XFRMA_SPD_IPV6_HTHRESH] = { .len = sizeof(struct xfrmu_spdhthresh) }, +}; + static const struct xfrm_link { int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **); int (*dump)(struct sk_buff *, struct netlink_callback *); int (*done)(struct netlink_callback *); + const struct nla_policy *nla_pol; + int nla_max; } xfrm_dispatch[XFRM_NR_MSGTYPES] = { [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa }, [XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa }, @@ -2335,6 +2404,9 @@ static const struct xfrm_link { [XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae }, [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate }, [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_sadinfo }, + [XFRM_MSG_NEWSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_set_spdinfo, + .nla_pol = xfrma_spd_policy, + .nla_max = XFRMA_SPD_MAX }, [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_spdinfo }, }; @@ -2371,8 +2443,9 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) } } - err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs, XFRMA_MAX, - xfrma_policy); + err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs, + link->nla_max ? : XFRMA_MAX, + link->nla_pol ? : xfrma_policy); if (err < 0) return err; diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..634391797856243d609bb81f973b6f89a3712944 --- /dev/null +++ b/samples/bpf/Makefile @@ -0,0 +1,12 @@ +# kbuild trick to avoid linker error. Can be omitted if a module is built. +obj- := dummy.o + +# List of programs to build +hostprogs-y := test_verifier + +test_verifier-objs := test_verifier.o libbpf.o + +# Tell kbuild to always build the programs +always := $(hostprogs-y) + +HOSTCFLAGS += -I$(objtree)/usr/include diff --git a/samples/bpf/libbpf.c b/samples/bpf/libbpf.c new file mode 100644 index 0000000000000000000000000000000000000000..ff65044207384b908f511f48567e1fb57ec72982 --- /dev/null +++ b/samples/bpf/libbpf.c @@ -0,0 +1,94 @@ +/* eBPF mini library */ +#include +#include +#include +#include +#include +#include +#include +#include +#include "libbpf.h" + +static __u64 ptr_to_u64(void *ptr) +{ + return (__u64) (unsigned long) ptr; +} + +int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size, + int max_entries) +{ + union bpf_attr attr = { + .map_type = map_type, + .key_size = key_size, + .value_size = value_size, + .max_entries = max_entries + }; + + return syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr)); +} + +int bpf_update_elem(int fd, void *key, void *value) +{ + union bpf_attr attr = { + .map_fd = fd, + .key = ptr_to_u64(key), + .value = ptr_to_u64(value), + }; + + return syscall(__NR_bpf, BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr)); +} + +int bpf_lookup_elem(int fd, void *key, void *value) +{ + union bpf_attr attr = { + .map_fd = fd, + .key = ptr_to_u64(key), + .value = ptr_to_u64(value), + }; + + return syscall(__NR_bpf, BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr)); +} + +int bpf_delete_elem(int fd, void *key) +{ + union bpf_attr attr = { + .map_fd = fd, + .key = ptr_to_u64(key), + }; + + return syscall(__NR_bpf, BPF_MAP_DELETE_ELEM, &attr, sizeof(attr)); +} + +int bpf_get_next_key(int fd, void *key, void *next_key) +{ + union bpf_attr attr = { + .map_fd = fd, + .key = ptr_to_u64(key), + .next_key = ptr_to_u64(next_key), + }; + + return syscall(__NR_bpf, BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr)); +} + +#define ROUND_UP(x, n) (((x) + (n) - 1u) & ~((n) - 1u)) + +char bpf_log_buf[LOG_BUF_SIZE]; + +int bpf_prog_load(enum bpf_prog_type prog_type, + const struct bpf_insn *insns, int prog_len, + const char *license) +{ + union bpf_attr attr = { + .prog_type = prog_type, + .insns = ptr_to_u64((void *) insns), + .insn_cnt = prog_len / sizeof(struct bpf_insn), + .license = ptr_to_u64((void *) license), + .log_buf = ptr_to_u64(bpf_log_buf), + .log_size = LOG_BUF_SIZE, + .log_level = 1, + }; + + bpf_log_buf[0] = 0; + + return syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr)); +} diff --git a/samples/bpf/libbpf.h b/samples/bpf/libbpf.h new file mode 100644 index 0000000000000000000000000000000000000000..8a31babeca5dc1878f87d4e400aedc2ca2b9388e --- /dev/null +++ b/samples/bpf/libbpf.h @@ -0,0 +1,172 @@ +/* eBPF mini library */ +#ifndef __LIBBPF_H +#define __LIBBPF_H + +struct bpf_insn; + +int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size, + int max_entries); +int bpf_update_elem(int fd, void *key, void *value); +int bpf_lookup_elem(int fd, void *key, void *value); +int bpf_delete_elem(int fd, void *key); +int bpf_get_next_key(int fd, void *key, void *next_key); + +int bpf_prog_load(enum bpf_prog_type prog_type, + const struct bpf_insn *insns, int insn_len, + const char *license); + +#define LOG_BUF_SIZE 8192 +extern char bpf_log_buf[LOG_BUF_SIZE]; + +/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */ + +#define BPF_ALU64_REG(OP, DST, SRC) \ + ((struct bpf_insn) { \ + .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = 0, \ + .imm = 0 }) + +#define BPF_ALU32_REG(OP, DST, SRC) \ + ((struct bpf_insn) { \ + .code = BPF_ALU | BPF_OP(OP) | BPF_X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = 0, \ + .imm = 0 }) + +/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */ + +#define BPF_ALU64_IMM(OP, DST, IMM) \ + ((struct bpf_insn) { \ + .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \ + .dst_reg = DST, \ + .src_reg = 0, \ + .off = 0, \ + .imm = IMM }) + +#define BPF_ALU32_IMM(OP, DST, IMM) \ + ((struct bpf_insn) { \ + .code = BPF_ALU | BPF_OP(OP) | BPF_K, \ + .dst_reg = DST, \ + .src_reg = 0, \ + .off = 0, \ + .imm = IMM }) + +/* Short form of mov, dst_reg = src_reg */ + +#define BPF_MOV64_REG(DST, SRC) \ + ((struct bpf_insn) { \ + .code = BPF_ALU64 | BPF_MOV | BPF_X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = 0, \ + .imm = 0 }) + +/* Short form of mov, dst_reg = imm32 */ + +#define BPF_MOV64_IMM(DST, IMM) \ + ((struct bpf_insn) { \ + .code = BPF_ALU64 | BPF_MOV | BPF_K, \ + .dst_reg = DST, \ + .src_reg = 0, \ + .off = 0, \ + .imm = IMM }) + +/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */ +#define BPF_LD_IMM64(DST, IMM) \ + BPF_LD_IMM64_RAW(DST, 0, IMM) + +#define BPF_LD_IMM64_RAW(DST, SRC, IMM) \ + ((struct bpf_insn) { \ + .code = BPF_LD | BPF_DW | BPF_IMM, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = 0, \ + .imm = (__u32) (IMM) }), \ + ((struct bpf_insn) { \ + .code = 0, /* zero is reserved opcode */ \ + .dst_reg = 0, \ + .src_reg = 0, \ + .off = 0, \ + .imm = ((__u64) (IMM)) >> 32 }) + +#define BPF_PSEUDO_MAP_FD 1 + +/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */ +#define BPF_LD_MAP_FD(DST, MAP_FD) \ + BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD) + + +/* Memory load, dst_reg = *(uint *) (src_reg + off16) */ + +#define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \ + ((struct bpf_insn) { \ + .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = OFF, \ + .imm = 0 }) + +/* Memory store, *(uint *) (dst_reg + off16) = src_reg */ + +#define BPF_STX_MEM(SIZE, DST, SRC, OFF) \ + ((struct bpf_insn) { \ + .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = OFF, \ + .imm = 0 }) + +/* Memory store, *(uint *) (dst_reg + off16) = imm32 */ + +#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \ + ((struct bpf_insn) { \ + .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \ + .dst_reg = DST, \ + .src_reg = 0, \ + .off = OFF, \ + .imm = IMM }) + +/* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */ + +#define BPF_JMP_REG(OP, DST, SRC, OFF) \ + ((struct bpf_insn) { \ + .code = BPF_JMP | BPF_OP(OP) | BPF_X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = OFF, \ + .imm = 0 }) + +/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */ + +#define BPF_JMP_IMM(OP, DST, IMM, OFF) \ + ((struct bpf_insn) { \ + .code = BPF_JMP | BPF_OP(OP) | BPF_K, \ + .dst_reg = DST, \ + .src_reg = 0, \ + .off = OFF, \ + .imm = IMM }) + +/* Raw code statement block */ + +#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \ + ((struct bpf_insn) { \ + .code = CODE, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = OFF, \ + .imm = IMM }) + +/* Program exit */ + +#define BPF_EXIT_INSN() \ + ((struct bpf_insn) { \ + .code = BPF_JMP | BPF_EXIT, \ + .dst_reg = 0, \ + .src_reg = 0, \ + .off = 0, \ + .imm = 0 }) + +#endif diff --git a/samples/bpf/test_verifier.c b/samples/bpf/test_verifier.c new file mode 100644 index 0000000000000000000000000000000000000000..f44ef11f65a787a26fe99791b24b2b2606321a0e --- /dev/null +++ b/samples/bpf/test_verifier.c @@ -0,0 +1,678 @@ +/* + * Testsuite for eBPF verifier + * + * Copyright (c) 2014 PLUMgrid, http://plumgrid.com + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include "libbpf.h" + +#define MAX_INSNS 512 +#define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x))) + +struct bpf_test { + const char *descr; + struct bpf_insn insns[MAX_INSNS]; + int fixup[32]; + const char *errstr; + enum { + ACCEPT, + REJECT + } result; +}; + +static struct bpf_test tests[] = { + { + "add+sub+mul", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 1), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2), + BPF_MOV64_IMM(BPF_REG_2, 3), + BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1), + BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + }, + { + "unreachable", + .insns = { + BPF_EXIT_INSN(), + BPF_EXIT_INSN(), + }, + .errstr = "unreachable", + .result = REJECT, + }, + { + "unreachable2", + .insns = { + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "unreachable", + .result = REJECT, + }, + { + "out of range jump", + .insns = { + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_EXIT_INSN(), + }, + .errstr = "jump out of range", + .result = REJECT, + }, + { + "out of range jump2", + .insns = { + BPF_JMP_IMM(BPF_JA, 0, 0, -2), + BPF_EXIT_INSN(), + }, + .errstr = "jump out of range", + .result = REJECT, + }, + { + "test1 ld_imm64", + .insns = { + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), + BPF_LD_IMM64(BPF_REG_0, 0), + BPF_LD_IMM64(BPF_REG_0, 0), + BPF_LD_IMM64(BPF_REG_0, 1), + BPF_LD_IMM64(BPF_REG_0, 1), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .errstr = "invalid BPF_LD_IMM insn", + .result = REJECT, + }, + { + "test2 ld_imm64", + .insns = { + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), + BPF_LD_IMM64(BPF_REG_0, 0), + BPF_LD_IMM64(BPF_REG_0, 0), + BPF_LD_IMM64(BPF_REG_0, 1), + BPF_LD_IMM64(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .errstr = "invalid BPF_LD_IMM insn", + .result = REJECT, + }, + { + "test3 ld_imm64", + .insns = { + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0), + BPF_LD_IMM64(BPF_REG_0, 0), + BPF_LD_IMM64(BPF_REG_0, 0), + BPF_LD_IMM64(BPF_REG_0, 1), + BPF_LD_IMM64(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_ld_imm64 insn", + .result = REJECT, + }, + { + "test4 ld_imm64", + .insns = { + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_ld_imm64 insn", + .result = REJECT, + }, + { + "test5 ld_imm64", + .insns = { + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0), + }, + .errstr = "invalid bpf_ld_imm64 insn", + .result = REJECT, + }, + { + "no bpf_exit", + .insns = { + BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2), + }, + .errstr = "jump out of range", + .result = REJECT, + }, + { + "loop (back-edge)", + .insns = { + BPF_JMP_IMM(BPF_JA, 0, 0, -1), + BPF_EXIT_INSN(), + }, + .errstr = "back-edge", + .result = REJECT, + }, + { + "loop2 (back-edge)", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), + BPF_JMP_IMM(BPF_JA, 0, 0, -4), + BPF_EXIT_INSN(), + }, + .errstr = "back-edge", + .result = REJECT, + }, + { + "conditional loop", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3), + BPF_EXIT_INSN(), + }, + .errstr = "back-edge", + .result = REJECT, + }, + { + "read uninitialized register", + .insns = { + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_EXIT_INSN(), + }, + .errstr = "R2 !read_ok", + .result = REJECT, + }, + { + "read invalid register", + .insns = { + BPF_MOV64_REG(BPF_REG_0, -1), + BPF_EXIT_INSN(), + }, + .errstr = "R15 is invalid", + .result = REJECT, + }, + { + "program doesn't init R0 before exit", + .insns = { + BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .errstr = "R0 !read_ok", + .result = REJECT, + }, + { + "stack out of bounds", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid stack", + .result = REJECT, + }, + { + "invalid call insn1", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "BPF_CALL uses reserved", + .result = REJECT, + }, + { + "invalid call insn2", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0), + BPF_EXIT_INSN(), + }, + .errstr = "BPF_CALL uses reserved", + .result = REJECT, + }, + { + "invalid function call", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567), + BPF_EXIT_INSN(), + }, + .errstr = "invalid func 1234567", + .result = REJECT, + }, + { + "uninitialized stack1", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_unspec), + BPF_EXIT_INSN(), + }, + .fixup = {2}, + .errstr = "invalid indirect read from stack", + .result = REJECT, + }, + { + "uninitialized stack2", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8), + BPF_EXIT_INSN(), + }, + .errstr = "invalid read from stack", + .result = REJECT, + }, + { + "check valid spill/fill", + .insns = { + /* spill R1(ctx) into stack */ + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), + + /* fill it back into R2 */ + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8), + + /* should be able to access R0 = *(R2 + 8) */ + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + }, + { + "check corrupted spill/fill", + .insns = { + /* spill R1(ctx) into stack */ + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), + + /* mess up with R1 pointer on stack */ + BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23), + + /* fill back into R0 should fail */ + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), + + BPF_EXIT_INSN(), + }, + .errstr = "corrupted spill", + .result = REJECT, + }, + { + "invalid src register in STX", + .insns = { + BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1), + BPF_EXIT_INSN(), + }, + .errstr = "R15 is invalid", + .result = REJECT, + }, + { + "invalid dst register in STX", + .insns = { + BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1), + BPF_EXIT_INSN(), + }, + .errstr = "R14 is invalid", + .result = REJECT, + }, + { + "invalid dst register in ST", + .insns = { + BPF_ST_MEM(BPF_B, 14, -1, -1), + BPF_EXIT_INSN(), + }, + .errstr = "R14 is invalid", + .result = REJECT, + }, + { + "invalid src register in LDX", + .insns = { + BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R12 is invalid", + .result = REJECT, + }, + { + "invalid dst register in LDX", + .insns = { + BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R11 is invalid", + .result = REJECT, + }, + { + "junk insn", + .insns = { + BPF_RAW_INSN(0, 0, 0, 0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid BPF_LD_IMM", + .result = REJECT, + }, + { + "junk insn2", + .insns = { + BPF_RAW_INSN(1, 0, 0, 0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "BPF_LDX uses reserved fields", + .result = REJECT, + }, + { + "junk insn3", + .insns = { + BPF_RAW_INSN(-1, 0, 0, 0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid BPF_ALU opcode f0", + .result = REJECT, + }, + { + "junk insn4", + .insns = { + BPF_RAW_INSN(-1, -1, -1, -1, -1), + BPF_EXIT_INSN(), + }, + .errstr = "invalid BPF_ALU opcode f0", + .result = REJECT, + }, + { + "junk insn5", + .insns = { + BPF_RAW_INSN(0x7f, -1, -1, -1, -1), + BPF_EXIT_INSN(), + }, + .errstr = "BPF_ALU uses reserved fields", + .result = REJECT, + }, + { + "misaligned read from stack", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4), + BPF_EXIT_INSN(), + }, + .errstr = "misaligned access", + .result = REJECT, + }, + { + "invalid map_fd for function call", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_unspec), + BPF_EXIT_INSN(), + }, + .errstr = "fd 0 is not pointing to valid bpf_map", + .result = REJECT, + }, + { + "don't check return value before access", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_unspec), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), + BPF_EXIT_INSN(), + }, + .fixup = {3}, + .errstr = "R0 invalid mem access 'map_value_or_null'", + .result = REJECT, + }, + { + "access memory with incorrect alignment", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_unspec), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0), + BPF_EXIT_INSN(), + }, + .fixup = {3}, + .errstr = "misaligned access", + .result = REJECT, + }, + { + "sometimes access memory with incorrect alignment", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_unspec), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), + BPF_EXIT_INSN(), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + }, + .fixup = {3}, + .errstr = "R0 invalid mem access", + .result = REJECT, + }, + { + "jump test 1", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), + BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1), + BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1), + BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1), + BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1), + BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1), + BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + }, + { + "jump test 2", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2), + BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 14), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2), + BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 11), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2), + BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 8), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2), + BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 5), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2), + BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 2), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1), + BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + }, + { + "jump test 3", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), + BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_JMP_IMM(BPF_JA, 0, 0, 19), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3), + BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), + BPF_JMP_IMM(BPF_JA, 0, 0, 15), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3), + BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32), + BPF_JMP_IMM(BPF_JA, 0, 0, 11), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3), + BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40), + BPF_JMP_IMM(BPF_JA, 0, 0, 7), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3), + BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48), + BPF_JMP_IMM(BPF_JA, 0, 0, 3), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0), + BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_unspec), + BPF_EXIT_INSN(), + }, + .fixup = {24}, + .result = ACCEPT, + }, + { + "jump test 4", + .insns = { + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + }, +}; + +static int probe_filter_length(struct bpf_insn *fp) +{ + int len = 0; + + for (len = MAX_INSNS - 1; len > 0; --len) + if (fp[len].code != 0 || fp[len].imm != 0) + break; + + return len + 1; +} + +static int create_map(void) +{ + long long key, value = 0; + int map_fd; + + map_fd = bpf_create_map(BPF_MAP_TYPE_UNSPEC, sizeof(key), sizeof(value), 1024); + if (map_fd < 0) { + printf("failed to create map '%s'\n", strerror(errno)); + } + + return map_fd; +} + +static int test(void) +{ + int prog_fd, i; + + for (i = 0; i < ARRAY_SIZE(tests); i++) { + struct bpf_insn *prog = tests[i].insns; + int prog_len = probe_filter_length(prog); + int *fixup = tests[i].fixup; + int map_fd = -1; + + if (*fixup) { + map_fd = create_map(); + + do { + prog[*fixup].imm = map_fd; + fixup++; + } while (*fixup); + } + printf("#%d %s ", i, tests[i].descr); + + prog_fd = bpf_prog_load(BPF_PROG_TYPE_UNSPEC, prog, + prog_len * sizeof(struct bpf_insn), + "GPL"); + + if (tests[i].result == ACCEPT) { + if (prog_fd < 0) { + printf("FAIL\nfailed to load prog '%s'\n", + strerror(errno)); + printf("%s", bpf_log_buf); + goto fail; + } + } else { + if (prog_fd >= 0) { + printf("FAIL\nunexpected success to load\n"); + printf("%s", bpf_log_buf); + goto fail; + } + if (strstr(bpf_log_buf, tests[i].errstr) == 0) { + printf("FAIL\nunexpected error message: %s", + bpf_log_buf); + goto fail; + } + } + + printf("OK\n"); +fail: + if (map_fd >= 0) + close(map_fd); + close(prog_fd); + + } + + return 0; +} + +int main(void) +{ + return test(); +}